diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/autotune_process.py b/llava_next/lib/python3.10/site-packages/torch/_inductor/autotune_process.py new file mode 100644 index 0000000000000000000000000000000000000000..66d73749ea757aeb1782a179cde4801e19379661 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/_inductor/autotune_process.py @@ -0,0 +1,273 @@ +import dataclasses +import queue +import time +import warnings +from multiprocessing.process import BaseProcess +from multiprocessing.queues import Queue +from typing import Any, Dict, List, Optional, Tuple, TYPE_CHECKING, Union + +import torch +from torch import multiprocessing +from torch._dynamo.testing import rand_strided + +from torch._inductor import ir +from torch._inductor.codecache import PyCodeCache + +if TYPE_CHECKING: + from torch._inductor.select_algorithm import TritonTemplateCaller + +from .utils import do_bench +from .virtualized import V + +DEBUG = False +EXIT_HANDLER_REGISTERED = False + + +# Used to synchronize between parent and child processes +class Ping: + pass + + +class Pong: + pass + + +@dataclasses.dataclass +class TuningProcess: + process: Optional[BaseProcess] = None + request_queue: Optional["Queue[Any]"] = None + response_queue: Optional["Queue[Any]"] = None + + @staticmethod + def process_main( + request_queue: "Queue[Any]", + response_queue: "Queue[Any]", + ) -> None: + print("enter child process main") + while True: + obj = request_queue.get() + + if obj is None: + break # None is a sentinel for the child to terminate + elif isinstance(obj, Ping): + response_queue.put(Pong()) + elif isinstance(obj, BenchmarkRequest): + response_queue.put(obj.benchmark()) + else: + raise RuntimeError(f"Invalid request type {type(obj)}") + + def valid(self) -> bool: + return ( + self.process is not None + and self.request_queue is not None + and self.response_queue is not None + ) + + def clear(self) -> None: + self.process = self.request_queue = self.response_queue = None + + def initialize(self) -> None: + """ + Create child process, request/response queues and do the warm up. + """ + if self.valid(): + return + + # cuda runtime does not work with "fork", use "spawn" to start processes. + ctx = multiprocessing.get_context("spawn") + request_queue = self.request_queue = ctx.Queue() + response_queue = self.response_queue = ctx.Queue() + + process = self.process = ctx.Process( + target=self.process_main, + args=( + self.request_queue, + self.response_queue, + ), + ) + process.start() + + # register the exit handler for the parent process so it will terminate + # the child processes + global EXIT_HANDLER_REGISTERED + if not EXIT_HANDLER_REGISTERED: + EXIT_HANDLER_REGISTERED = True + import atexit + + atexit.register(lambda: self.terminate()) + + # wait for the initialization to be done + request_queue.put(Ping()) + resp = response_queue.get() + assert isinstance(resp, Pong) + + def terminate(self) -> None: + if self.valid(): + request_queue = self.request_queue + assert request_queue is not None + request_queue.put(None) + process = self.process + assert process is not None + process.join() + + +tuning_process = TuningProcess() + + +LayoutOrBuffer = Union[ir.Layout, ir.Buffer] + + +@dataclasses.dataclass +class TensorMeta: + device: torch.device + dtype: torch.dtype + sizes: List[int] + strides: List[int] + offset: int + + @classmethod + def from_irnodes( + cls, irnodes: Union[LayoutOrBuffer, Tuple[LayoutOrBuffer], List[LayoutOrBuffer]] + ) -> Union["TensorMeta", List["TensorMeta"]]: + if isinstance(irnodes, (tuple, list)): + result: List[Any] = [cls.from_irnodes(x) for x in irnodes] + assert all(isinstance(x, TensorMeta) for x in result) + return result + + node = irnodes + if isinstance(node, ir.Layout): + node = ir.Buffer("fake", node) + + dtype = node.get_dtype() + assert dtype is not None + + return TensorMeta( + device=node.get_device(), + dtype=dtype, + sizes=V.graph.sizevars.size_hints(node.get_size()), + strides=V.graph.sizevars.size_hints(node.get_stride()), + offset=V.graph.sizevars.size_hint(node.get_layout().offset), + ) + + def to_tensor(self) -> torch.Tensor: + return rand_strided( + self.sizes, + self.strides, + device=self.device, + dtype=self.dtype, + extra_size=self.offset, + ) + + +@dataclasses.dataclass +class BenchmarkRequest: + """ + Only handle triton template benchmark for now. The extern kernel benchmark + can be done inside the same process since they usually don't cause crash. + """ + + module_path: str # the path of the module defining the triton kernel + module_cache_key: str + kernel_name: str # the kernel name defined in the module + grid: List[int] + extra_args: Dict[str, Any] + num_stages: int + num_warps: int + + input_tensors: Union["TensorMeta", List["TensorMeta"]] + output_tensor: Union["TensorMeta", List["TensorMeta"]] + + def benchmark( + self, *input_tensors: torch.Tensor, output_tensor: Optional[torch.Tensor] = None + ) -> float: + if DEBUG: + start_ts = time.time() + + mod = PyCodeCache.load_by_key_path(self.module_cache_key, self.module_path) + if DEBUG: + print( + f"benchmark module key: {self.module_cache_key}, path: {self.module_path}" + ) + + run = getattr(mod, self.kernel_name).run + + if DEBUG: + load_elapse = time.time() - start_ts + start_ts = time.time() + + # create args and out tensor + if output_tensor is None: + assert len(input_tensors) == 0 + if isinstance(self.input_tensors, List): + input_tensors = tuple(x.to_tensor() for x in self.input_tensors) + if isinstance(self.input_tensors, TensorMeta): + input_tensors = tuple(self.input_tensors.to_tensor()) + assert isinstance(self.output_tensor, TensorMeta) + output_tensor = self.output_tensor.to_tensor() + + if DEBUG: + create_tensor_elapse = time.time() - start_ts + start_ts = time.time() + + def worker() -> float: + return run( + *input_tensors, + output_tensor, + *self.extra_args, + grid=self.grid, + num_stages=self.num_stages, + num_warps=self.num_warps, + ) + + out = do_bench(worker) + torch.cuda.synchronize() # shake out any CUDA errors + + if DEBUG: + bench_elapse = time.time() - start_ts + print( + f"InChidProcess {self.module_cache_key}: load {load_elapse}, " + + f"create tensor {create_tensor_elapse}, bench {bench_elapse}" + ) + return out + + +def benchmark_in_sub_process( + choice: "TritonTemplateCaller", +) -> float: + """ + Do benchmarking in subprocess and return the perf number (latency). + """ + assert choice.bmreq is not None + tuning_process.initialize() + assert tuning_process.valid() + process, request_queue, response_queue = ( + tuning_process.process, + tuning_process.request_queue, + tuning_process.response_queue, + ) + assert ( + process is not None and request_queue is not None and response_queue is not None + ) + + request_queue.put(choice.bmreq) + while True: + try: + timing = response_queue.get(timeout=1.0) + except queue.Empty: + status = process.exitcode + if status is None: + # child process is still running + continue + # child process fail + assert status != 0 + + warnings.warn( + f"Fail to benchmark choice '{choice}'. It will be ignored. Please debug the root cause in case the choice can bring perf gains." # noqa: B950 line too long + ) + + tuning_process.clear() + + # return INF so this choice will be ignored + return float("inf") + + return timing diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/__init__.py b/llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/common.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d870f5734805d88d343507b56e2b69347385f1f Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/common.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cpp.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cpp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e26516af6dbbbc043a0d1708e54ab557b4462240 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cpp.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/triton.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/triton.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5431fe2314cebf07a16c7da49f70ad59a28a216f Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/triton.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/triton_utils.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/triton_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8c75feb3428e2e82ce88fca73155609724d061c Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/triton_utils.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/wrapper.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/wrapper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b251dcf1d64e0df0b3be44880cc61afe67923c9 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/wrapper.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/aot_inductor_interface.cpp b/llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/aot_inductor_interface.cpp new file mode 100644 index 0000000000000000000000000000000000000000..23a609106dd707eaf8584ff04267b492af0be5d1 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/aot_inductor_interface.cpp @@ -0,0 +1,171 @@ +#include +#include +#include +#include +#include +#include + +#define CONVERT_EXCEPTION_TO_ERROR_CODE(...) \ + try { \ + __VA_ARGS__ \ + } catch (const std::exception& e) { \ + std::cerr << "Error: " << e.what() << std::endl; \ + return AOTInductorError::Failure; \ + } catch (...) { \ + std::cerr << "Unknown exception occurred." << std::endl; \ + return AOTInductorError::Failure; \ + } \ + return AOTInductorError::Success; + +extern "C" { + +AOTInductorError AOTInductorModelContainerCreate( + AOTInductorModelContainerHandle* container_handle, + size_t num_models) { + if (num_models == 0) { + LOG(ERROR) << "num_models must be positive, but got 0"; + return AOTInductorError::Failure; + } + CONVERT_EXCEPTION_TO_ERROR_CODE({ + auto* container = + new torch::aot_inductor::AOTInductorModelContainer(num_models); + *container_handle = + reinterpret_cast(container); + }) +} + +AOTInductorError AOTInductorModelContainerDelete( + AOTInductorModelContainerHandle container_handle) { + CONVERT_EXCEPTION_TO_ERROR_CODE({ + auto* container = + reinterpret_cast( + container_handle); + delete container; + }); +} + +AOTInductorError AOTInductorModelContainerRun( + AOTInductorModelContainerHandle container_handle, + const AOTInductorTensorHandle inputs_handle, + size_t num_inputs, + AOTInductorTensorHandle outputs_handle, + size_t num_outputs, + AOTInductorStreamHandle stream_handle) { + auto* container = + reinterpret_cast( + container_handle); + + const auto* inputs = reinterpret_cast(inputs_handle); + std::vector input_tensors; + input_tensors.reserve(num_inputs); + for (size_t i = 0; i < num_inputs; i++) { + input_tensors.push_back(inputs[i]); + } + + auto* outputs = reinterpret_cast(outputs_handle); + std::vector output_tensors; + output_tensors.reserve(num_outputs); + for (size_t i = 0; i < num_outputs; i++) { + output_tensors.push_back(outputs[i]); + } + + auto stream = reinterpret_cast(stream_handle); + CONVERT_EXCEPTION_TO_ERROR_CODE( + { container->run(input_tensors, output_tensors, stream); }) +} + +AOTInductorError AOTInductorModelContainerGetNumInputs( + AOTInductorModelContainerHandle container_handle, + size_t* num_inputs_out) { + auto* container = + reinterpret_cast( + container_handle); + CONVERT_EXCEPTION_TO_ERROR_CODE( + { *num_inputs_out = container->num_inputs(); }) +} + +AOTInductorError AOTInductorModelContainerGetInputName( + AOTInductorModelContainerHandle container_handle, + size_t input_idx, + const char** input_name_out) { + auto* container = + reinterpret_cast( + container_handle); + CONVERT_EXCEPTION_TO_ERROR_CODE( + { *input_name_out = container->input_name(input_idx); }) +} + +AOTInductorError AOTInductorModelContainerGetInputDtype( + AOTInductorModelContainerHandle container_handle, + size_t input_idx, + const char** input_dtype_out) { + auto* container = + reinterpret_cast( + container_handle); + CONVERT_EXCEPTION_TO_ERROR_CODE( + { *input_dtype_out = container->get_input_dtype(input_idx); }) +} + +AOTInductorError AOTInductorModelContainerGetNumOutputs( + AOTInductorModelContainerHandle container_handle, + size_t* num_outputs_out) { + auto* container = + reinterpret_cast( + container_handle); + CONVERT_EXCEPTION_TO_ERROR_CODE( + { *num_outputs_out = container->num_outputs(); }) +} + +AOTInductorError AOTInductorModelContainerGetOutputName( + AOTInductorModelContainerHandle container_handle, + size_t output_idx, + const char** output_name_out) { + auto* container = + reinterpret_cast( + container_handle); + CONVERT_EXCEPTION_TO_ERROR_CODE( + { *output_name_out = container->output_name(output_idx); }) +} + +AOTInductorError AOTInductorModelContainerGetOutputDtype( + AOTInductorModelContainerHandle container_handle, + size_t output_idx, + const char** output_dtype_out) { + auto* container = + reinterpret_cast( + container_handle); + CONVERT_EXCEPTION_TO_ERROR_CODE( + { *output_dtype_out = container->get_output_dtype(output_idx); }) +} + +AOTInductorError AOTInductorModelContainerGetMaxInputShape( + AOTInductorModelContainerHandle container_handle, + size_t input_idx, + AOTInductorParamShape* input_shape) { + auto* container = + reinterpret_cast( + container_handle); + CONVERT_EXCEPTION_TO_ERROR_CODE({ + const std::vector& max_input_shape = + container->max_input_shape(input_idx); + *input_shape = + AOTInductorParamShape(max_input_shape.data(), max_input_shape.size()); + }) +} + +AOTInductorError AOTInductorModelContainerGetMaxOutputShape( + AOTInductorModelContainerHandle container_handle, + size_t output_idx, + AOTInductorParamShape* output_shape) { + auto* container = + reinterpret_cast( + container_handle); + CONVERT_EXCEPTION_TO_ERROR_CODE({ + const std::vector& max_output_shape = + container->max_output_shape(output_idx); + *output_shape = + AOTInductorParamShape(max_output_shape.data(), max_output_shape.size()); + }) +} + +} // extern "C" diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/common.py b/llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/common.py new file mode 100644 index 0000000000000000000000000000000000000000..a4611610f7b0a4dd0ea1c76d00095002f2feb5e4 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/common.py @@ -0,0 +1,1031 @@ +import contextlib +import dataclasses +import functools +import itertools +import logging +import operator +import re +from collections import namedtuple +from itertools import chain +from typing import Any, Callable, ClassVar, Dict, List, NamedTuple, Optional, Set, Union + +import sympy +from sympy.printing.printer import Printer + +import torch +import torch.fx +from torch.utils._sympy.value_ranges import ValueRanges + +from .. import metrics +from ..utils import ( + DeferredLineBase, + free_symbol_startswith, + get_sympy_Expr_dtype, + IndentedBuffer, + sympy_dot, + sympy_subs, + unique, +) +from ..virtualized import ops, OpsValue, V + +schedule_log = torch._logging.getArtifactLogger(__name__, "schedule") + + +def data_type_logger(msg): + if schedule_log.isEnabledFor(logging.DEBUG): + schedule_log.debug("Data type propagation: %s", msg) + + +TensorArg = namedtuple("TensorArg", ["name", "buffer", "dtype"]) +SizeArg = namedtuple("SizeArg", ["name", "expr"]) + +DeviceCodegen = namedtuple("DeviceCodegen", ["scheduling", "wrapper_codegen"]) +device_codegens: Dict[str, DeviceCodegen] = {} + + +# The code generated by Inductor consists of two main parts: kernel code and wrapper code. +# For any new backend looking to integrate with Inductor, customization of these two main +# parts are necessary to generate its specific code. +# +# Kernel code generation is determined by different Scheduling. Consequently, a new +# backend needs to provide a custom Scheduling for its unique kernel code generation. Currently, +# CppScheduling and TritonScheduling serve the C++/OpenMP and Triton backends, respectively. +# +# For the Wrapper, Inductor provides a WrapperCodeGen class to generate the Python wrapper code +# that bridges kernels. This allows out-of-tree backends to inherit from WrapperCodeGen, +# and override specific member functions to create backend-specific Python wrapper code. +# +# Other classes, such as CppKernel and TritonKernel, used for code generation, typically form part +# of the logic for either Scheduling or WrapperCodeGen. So the Scheduling and WrapperCodeGen interfaces +# provide flexibility to the backend. A backend can choose to implement these classes from scratch, +# or reuse them by extending and overriding as necessary. And Inductor provides the registration API, +# register_backend_for_device, to equip a new backend at runtime. +# +# Intel has developed a new backend on top of Triton to support Intel GPUs, leveraging these interfaces. +# This backend can be used as a reference: +# https://github.com/intel/intel-extension-for-pytorch/blob/5dcc9d57e5422cf295e1a1ee97896d6b6a554a85/intel_extension_for_pytorch/_inductor/__init__.py#L9 +def register_backend_for_device( + device: str, device_scheduling: type, device_wrapper_codegen: type +): + device_codegens[device] = DeviceCodegen(device_scheduling, device_wrapper_codegen) + + +def get_scheduling_for_device(device: str): + return device_codegens[device].scheduling if device in device_codegens else None + + +def get_wrapper_codegen_for_device(device: str): + return ( + device_codegens[device].wrapper_codegen if device in device_codegens else None + ) + + +def index_prevent_reordering(index: List[sympy.Expr], index_vars, sizes): + from ..ir import FlexibleLayout + + # added contiguous index prevents reordering + return [*index, sympy_dot(index_vars, FlexibleLayout.contiguous_strides(sizes))] + + +@functools.lru_cache(None) +def boolean_ops(): + return ( + "is_inf", + "is_nan", + "bitwise_xor", + "logical_not", + "signbit", + "le", + "lt", + "ge", + "gt", + "eq", + "ne", + ) + + +DTYPE_TO_COMPUTATION_DTYPE = { + torch.bfloat16: torch.float, + torch.float16: torch.float, + **{ + dtype: dtype + for dtype in [ + torch.bool, + torch.float32, + torch.float64, + torch.int8, + torch.int16, + torch.int32, + torch.int64, + torch.uint8, + ] + }, +} + + +class DataTypePropagation: + def __init__(self, body) -> None: + self.body = body + self.graphs: Dict[Union[Callable[..., Any], str], Any] = { + "root": body.root_block.graph + } + for k, v in body.subblocks.items(): + self.graphs[k] = v.graph + + def deduce_node_dtype_by_inputs(self, node: torch.fx.Node): + inputs = node.all_input_nodes + input_nodes = [ + n for n in inputs if isinstance(n, torch.fx.Node) and n.op != "placeholder" + ] + if len(input_nodes) == 0: + return None + + all_input_nodes_propogated = all( + OptimizationContext.key in n.meta + and n.meta[OptimizationContext.key].dtype is not None + for n in input_nodes + ) + if not all_input_nodes_propogated: + return None + + return functools.reduce( + torch.promote_types, + [n.meta[OptimizationContext.key].dtype for n in input_nodes], + ) + + def deduce_node_dtype_by_subgraph(self, node: torch.fx.Node): + sub_graph = self.graphs[node.target] + dtype = self.propagate_graph(sub_graph) + assert dtype + return dtype + + def deduce_node_dtype(self, node: torch.fx.Node): + if node.target in boolean_ops(): + return torch.bool + + if node.op == "placeholder": + return None + + if node.target == "output": + # we can infer output node if it only have 1 arg + if len(node.args) != 1: + return None + + if node.target in ( + "to_dtype", + "index_expr", + ): + return node.args[-1] + + if node.target in ( + "rand", + "randn", + ): + return torch.float + + if node.target in ( + "get_index", + "index_expr", + ): + return torch.int64 + + if node.target in ( + "load", + "store", + "store_reduction", + ): + buf_name = node.args[1] + return V.graph.get_dtype(buf_name) + + if node.target == operator.getitem: + return self.deduce_node_dtype(node.args[0]) + + assert isinstance(node.target, str) + + if node.target == "reduction": + return node.args[1] + + if node.target == "constant": + return DTYPE_TO_COMPUTATION_DTYPE[node.args[-1]] + + if node.target.startswith("masked_subblock"): + return self.deduce_node_dtype_by_subgraph(node) + + return self.deduce_node_dtype_by_inputs(node) + + def propagate_graph(self, graph: torch.fx.Graph): + assert graph.nodes + graph_dtype = None + # For masked_subblock, we use output's dtype to represent + # the dtype of this subgraph. For other cases, graph_dtype + # might be None + for node in graph.nodes: + if OptimizationContext.key in node.meta: + opt_ctx = node.meta[OptimizationContext.key] + else: + opt_ctx = OptimizationContext() + + opt_ctx.dtype = self.deduce_node_dtype(node) + node.meta[OptimizationContext.key] = opt_ctx + if node.target == "output": + graph_dtype = opt_ctx.dtype + return graph_dtype + + def propagate(self): + self.propagate_graph(self.graphs["root"]) + + @classmethod + def propagate_loopbody(cls, body): + return cls(body).propagate() + + @classmethod + def propagate_scheduler_node(cls, node): + from ..ir import LoopBody + from ..scheduler import SchedulerNode + + assert isinstance(node, SchedulerNode) + assert isinstance(node._body, LoopBody) + DataTypePropagation.propagate_loopbody(node._body) + + +class ExprPrinter(Printer): + @staticmethod + def paren(string): + def all_in_parens(string): + if string[0] != "(" or len(string) < 2: + return False + count = 1 + for i, char in enumerate(string[1:]): + if char == "(": + count += 1 + elif char == ")": + count -= 1 + if count == 0 and i != len(string) - 2: + return False + assert count == 0 + return True + + if ( + isinstance(string, CSEVariable) + or re.match(r"^[a-z0-9_.]+$", string, re.I) + or re.match(r"^\([^)]*\)$", string, re.I) + or string == "" + ): + return string + # don't put extra parens for strings that are already wrapped in parens + if all_in_parens(string): + return string + return f"({string})" + + def _print_Pow(self, expr): + # Pow() confuses triton + base, exp = expr.args + # NB: Remember this is sizevar computation! You don't typically + # expect to have to do floating point computation including exponents + # in sizevar compute. Instead of adding support for floating + # point pow, you should make upstream retranslate the Sympy expression + # into Tensor expressions earlier and do that instead. + if exp == 0.5: + return self._helper_sqrt(base) # type: ignore[attr-defined] + elif exp == -0.5: + return "1/" + self._helper_sqrt(base) # type: ignore[attr-defined] + base = self._print(base) + assert exp == int(exp), exp + exp = int(exp) + if exp > 0: + return "*".join([self.paren(base)] * exp) + elif exp < 0: + return "1/" + self.paren("*".join([self.paren(base)] * abs(exp))) + else: # exp == 0 + return "1" + + def _print_Unequality(self, expr): + return " != ".join(map(self.paren, map(self._print, expr.args))) + + def _print_Mul(self, expr): + return "*".join(map(self.paren, map(self._print, expr.args))) + + def _print_Add(self, expr): + return " + ".join(map(self.paren, map(self._print, expr.args))) + + def _print_Mod(self, expr): + return " % ".join(map(self.paren, map(self._print, expr.args))) + + def _print_CleanDiv(self, expr): + return self._print_FloorDiv(expr) # type: ignore[attr-defined] + + def _print_GreaterThan(self, expr): + # GreaterThan: >= + # StrictlyGreaterThan: > + # Go figure... + return " >= ".join(map(self.paren, map(self._print, expr.args))) + + +class PythonPrinter(ExprPrinter): + def _print_ModularIndexing(self, expr): + x, div, mod = expr.args + x = self.paren(self.doprint(x)) + div = self.paren(self.doprint(div)) + mod = self.paren(self.doprint(mod)) + if div != "1": + x = f"({x} // {div})" + return f"{x} % {mod}" + + def _print_FloorDiv(self, expr): + x, div = expr.args + x = self.paren(self.doprint(x)) + div = self.paren(self.doprint(div)) + return f"({x} // {div})" + + def _helper_sqrt(self, expr): + return f"math.sqrt({self._print(expr)})" + + def _print_floor(self, expr): + assert len(expr.args) == 1 + return f"math.floor({self._print(expr.args[0])})" + + def _print_ceiling(self, expr): + assert len(expr.args) == 1 + return f"math.ceil({self._print(expr.args[0])})" + + +class OpOverrides: + def __init__(self, parent): + super().__init__() + self._parent = parent + + def __getattr__(self, item): + return getattr(self._parent, item) + + @staticmethod + def identity(value): + # used to trigger cse + return value + + @staticmethod + def constant(value, dtype): + return repr(value) + + @staticmethod + def reciprocal(x): + return ops.div("1", x) + + @staticmethod + def square(x): + return ops.mul(x, x) + + @staticmethod + def bitwise_not(x): + return f"~{ExprPrinter.paren(x)}" + + @staticmethod + def logical_not(a): + return f"{ExprPrinter.paren(a)} == 0" + + @staticmethod + def bitwise_and(x, y): + return f"{ExprPrinter.paren(x)} & {ExprPrinter.paren(y)}" + + @staticmethod + def bitwise_or(x, y): + return f"{ExprPrinter.paren(x)} | {ExprPrinter.paren(y)}" + + @staticmethod + def bitwise_xor(x, y): + return f"{ExprPrinter.paren(x)} ^ {ExprPrinter.paren(y)}" + + @staticmethod + def bitwise_left_shift(x, y): + return f"{ExprPrinter.paren(x)} << {ExprPrinter.paren(y)}" + + # TODO(fdrocha): this is currently not being used anywhere, + # pending on moving triton pin past 972b761 + @staticmethod + def bitwise_right_shift(x, y): + return f"{ExprPrinter.paren(x)} >> {ExprPrinter.paren(y)}" + + @staticmethod + def remainder(a, b): + r = ops.mod(a, b) + return ops.where(f"(({r} != 0) & (({r} < 0) != ({b} < 0)))", ops.add(r, b), r) + + @staticmethod + def load_seed(name, offset): + return ops.load(name, sympy.Integer(offset)) + + +class DeferredLine(DeferredLineBase): + """A line that can be 'unwritten' by adding name to V.graph.removed_buffers""" + + def __init__(self, name, line): + super().__init__(line) + self.name = name + + def __call__(self): + if ( + self.name not in V.graph.removed_buffers + and self.name not in V.graph.inplaced_to_remove + ): + return self.line + return None + + def _new_line(self, line): + return DeferredLine(self.name, line) + + +class BracesBuffer(IndentedBuffer): + def indent(self, offset=1): + @contextlib.contextmanager + def ctx(): + for _ in range(offset): + self.writeline("{") + self._indent += 1 + for _ in range(-offset): + self._indent -= 1 + self.writeline("}") + yield + for _ in range(-offset): + self.writeline("{") + self._indent += 1 + for _ in range(offset): + self._indent -= 1 + self.writeline("}") + + return ctx() + + +class InplacedBuffer(NamedTuple): + inner_name: str + other_names: List[str] + + +class KernelArgs: + @staticmethod + def _lookup(prefix, odict, name): + assert isinstance(name, (str, sympy.Symbol)) + if name not in odict: + odict[name] = f"{prefix}{len(odict)}" + return odict[name] + + def __init__(self, sizevars=None): + self.input_buffers = dict() + self.output_buffers = dict() + self.inplace_buffers = dict() + self.sizevars = sizevars or dict() + + def __repr__(self): + return "KernelArgs({})".format( + ", ".join( + map( + repr, + [ + self.input_buffers, + self.output_buffers, + self.inplace_buffers, + self.sizevars, + ], + ) + ) + ) + + def _buffer_is_marked_removed(self, name): + return isinstance(name, str) and name.startswith("REMOVED") + + def input(self, name): + if V.graph.scheduler: + name = V.graph.scheduler.mutation_real_name.get(name, name) + assert name not in V.graph.removed_buffers, name + if name in self.output_buffers: + return self.output_buffers[name] + if name in self.inplace_buffers: + return self.inplace_buffers[name].inner_name + if name.startswith("seed"): + return self._lookup("seed", self.input_buffers, name) + return self._lookup("in_ptr", self.input_buffers, name) + + def output(self, name): + if V.graph.scheduler: + name = V.graph.scheduler.mutation_real_name.get(name, name) + assert name not in V.graph.removed_buffers, name + if name in self.inplace_buffers: + return self.inplace_buffers[name].inner_name + return self._lookup("out_ptr", self.output_buffers, name) + + def make_inplace(self, input_name, output_name): + assert output_name not in self.inplace_buffers + if input_name in self.inplace_buffers: + buf = self.inplace_buffers[input_name] + buf.other_names.append(output_name) + self.inplace_buffers[output_name] = buf + else: + buf = InplacedBuffer( + f"in_out_ptr{len(unique(self.inplace_buffers.values()))}", + [input_name, output_name], + ) + self.inplace_buffers[input_name] = buf + self.inplace_buffers[output_name] = buf + + def seed_offset(self, name, value): + if value in self.sizevars: + return self.sizevars[value] + if name in self.sizevars.values(): + name = ( + f"{name}{sum(1 for v in self.sizevars.values() if v.startswith(name))}" + ) + self.sizevars[value] = name + return name + + def size(self, name): + if str(name) == "seed": + self.sizevars["seed"] = "seed" + return "seed" + return self._lookup("ks", self.sizevars, name) + + def call_names(self): + return chain( + self.input_buffers.keys(), self.output_buffers.keys(), self.sizevars.keys() + ) + + def wrap_ptr_arg(self, buf, dtype): + return f"c_void_p({buf}.data_ptr())" + + def wrap_size_arg(self, size): + return f"c_long({size})" + + def cpp_argdefs(self): + from .cpp import DTYPE_TO_CPP, INDEX_TYPE + + # TODO(jansel): replace this with data from scheduler + buffer_types = {x.get_name(): x.get_dtype() for x in V.graph.buffers} + for name, val in V.graph.graph_inputs.items(): + if isinstance(val, sympy.Expr): + buffer_types[name] = get_sympy_Expr_dtype(val) + else: + buffer_types[name] = val.get_dtype() + buffer_types.update( + {name: val.dtype for name, val in V.graph.constants.items()} + ) + + call_args = [] + arg_defs = [] + arg_types = [] + for inplaced in unique(self.inplace_buffers.values()): + if self._buffer_is_marked_removed(inplaced): + continue + outer = inplaced.other_names[-1] + inner = inplaced.inner_name + dtype = buffer_types[outer] + cpp_dtype = DTYPE_TO_CPP[dtype] + arg_defs.append(f"{cpp_dtype}* {inner}") + call_args.append(self.wrap_ptr_arg(outer, dtype)) + arg_types.append(f"{cpp_dtype}*") + for outer, inner in self.input_buffers.items(): + if outer in self.inplace_buffers: + continue + dtype = buffer_types[outer] + cpp_dtype = DTYPE_TO_CPP[dtype] + arg_defs.append(f"const {cpp_dtype}* {inner}") + call_args.append(self.wrap_ptr_arg(outer, dtype)) + arg_types.append(f"const {cpp_dtype}*") + for outer, inner in self.output_buffers.items(): + if outer in self.inplace_buffers or self._buffer_is_marked_removed(inner): + continue + dtype = buffer_types[outer] + cpp_dtype = DTYPE_TO_CPP[dtype] + arg_defs.append(f"{cpp_dtype}* {inner}") + call_args.append(self.wrap_ptr_arg(outer, dtype)) + arg_types.append(f"{cpp_dtype}*") + for outer, inner in self.sizevars.items(): + arg_defs.append(f"const {INDEX_TYPE} {inner}") + call_args.append(self.wrap_size_arg(outer)) + arg_types.append(f"const {INDEX_TYPE}") + return arg_defs, call_args, arg_types + + def python_argdefs(self): + arg_defs = [] + call_args = [] + precompile_args: List[Union[TensorArg, SizeArg]] = [] + for inplaced in unique(self.inplace_buffers.values()): + if self._buffer_is_marked_removed(inplaced): + continue + arg_defs.append(inplaced.inner_name) + call_args.append(inplaced.other_names[-1]) + precompile_args.append( + TensorArg( + inplaced.inner_name, + inplaced.other_names[-1], + V.graph.get_dtype(inplaced.other_names[-1]), + ) + ) + for outer, inner in chain( + self.input_buffers.items(), self.output_buffers.items() + ): + if outer in self.inplace_buffers or self._buffer_is_marked_removed(inner): + continue + arg_defs.append(inner) + call_args.append(outer) + precompile_args.append(TensorArg(inner, outer, V.graph.get_dtype(outer))) + for outer, inner in self.sizevars.items(): + arg_defs.append(inner) + call_args.append(outer) + precompile_args.append(SizeArg(inner, outer)) + + return arg_defs, call_args, precompile_args + + def aliases(self): + for inplaced in unique(self.inplace_buffers.values()): + if self._buffer_is_marked_removed(inplaced): + continue + for other in inplaced.other_names: + if other in V.graph.inplaced_to_remove: + continue + if other in self.input_buffers: + yield self.input_buffers[other], inplaced.inner_name + if other in self.output_buffers: + yield self.output_buffers[other], inplaced.inner_name + + def is_removed(self, name): + def _is_removed(name, buffers): + return name not in buffers or self._buffer_is_marked_removed(buffers[name]) + + return _is_removed(name, self.output_buffers) and _is_removed( + name, self.inplace_buffers + ) + + # Includes inplace buffers, excludes removed buffers. Essentially, + # after you do a call into this kernel, which buffers actually contain + # updated data? Modeled off of python_argdefs. + def live_output_buffers(self): + live_outs = set() + for inplaced in unique(self.inplace_buffers.values()): + if self._buffer_is_marked_removed(inplaced): + continue + live_outs.add(inplaced.other_names[-1]) + for outer, inner in self.output_buffers.items(): + if outer in self.inplace_buffers or self._buffer_is_marked_removed(inner): + continue + live_outs.add(outer) + return live_outs + + +class CSEVariable: + """A CSEVariable is just a name for an expression but it is useful to be able to annotate them on a backend dependent basis. + To do so, the backends can simply overload `Kernel.create_cse_var` + The "CSEVariable.update_on_args" method gives you a hook for annotations + See example of TritonCSEVariable in triton.py + """ + + def __init__(self, name, bounds: ValueRanges): + assert isinstance(bounds, ValueRanges) + self.name = name + self.bounds = bounds + + def __str__(self): + return self.name + + def __hash__(self) -> int: + return hash(self.name) + + def __eq__(self, other) -> bool: + return type(other) == type(self) and other.name == self.name + + def update_on_args(self, name, args, kwargs): + pass + + +class CppWrapperKernelArgs(KernelArgs): + def wrap_ptr_arg(self, buf, dtype): + from .cpp import DTYPE_TO_CPP + + return f"({DTYPE_TO_CPP[dtype]}*)({buf}.data_ptr())" + + def wrap_size_arg(self, size): + return f"{size}" + + +class CSE: + """Common subexpression elimination""" + + def __init__( + self, + prefix="", + suffix="", + name_prefix="tmp", + iter_buffers=None, + store_cache=None, + reduction_cache=None, + varname_map=None, + ): + self.prefix = prefix + self.suffix = suffix + self.cache = {} + self.name_prefix = name_prefix + self.store_cache = store_cache or {} + self.reduction_cache = reduction_cache or {} + self.iter_buffer_ids = iter_buffers or itertools.count() + self.invalidated_stores = set() + self.varname_map = varname_map or {} + + def invalidate(self, keep_vars: Set[str]): + for name, tmp in list(self.store_cache.items()): + if tmp not in keep_vars: + del self.store_cache[name] + self.invalidated_stores.add(name) + self.cache = {k: v for k, v in self.cache.items() if v in keep_vars} + + def clone(self): + # Note(fdrocha): reduction_cache is not being cloned, not sure if this is intentional + return CSE( + prefix=self.prefix, + suffix=self.suffix, + name_prefix=self.name_prefix, + iter_buffers=self.iter_buffer_ids, + store_cache=self.store_cache, + varname_map=self.varname_map, + ) + + def generate( + self, + buffer: IndentedBuffer, + expr: Union[str, CSEVariable, OpsValue], + *, + bounds: ValueRanges = ValueRanges.unknown(), + write=True, + assignment=True, + ) -> CSEVariable: + if isinstance(expr, OpsValue): + expr = expr.value + + assert isinstance(expr, (str, CSEVariable)), type(expr) + assert write or assignment + if isinstance(expr, CSEVariable): + # If the expressions were always created with all the information, we could + # assert expr.bounds == bounds, but sometimes the expression is created + # with the loose ValueRanges.unknown(), so we need to tighten the bounds + expr.bounds = expr.bounds.tighten(bounds) + return expr + cache_key = expr + var = self.cache.get(cache_key, None) + if not var: + var = self.newvar(bounds) if assignment else None + self.cache[cache_key] = var + if write: + if V.kernel.current_node: + V.kernel.current_node.codegen_originating_info( + buffer, only_once=True + ) + if assignment: + line = f"{self.prefix}{var} = {expr}{self.suffix}" + else: + line = f"{expr}{self.suffix}" + buffer.writeline(line) + else: + var.bounds = var.bounds.tighten(bounds) + + return var + + def newvar(self, bounds: ValueRanges = ValueRanges.unknown()) -> CSEVariable: + var_name = f"{self.name_prefix}{next(self.iter_buffer_ids)}" + var = V.kernel.create_cse_var(var_name, bounds) + self.varname_map[var_name] = var + return var + + +class CodeGen: + def __init__(self): + super().__init__() + self.exit_stack = contextlib.ExitStack() + + def __enter__(self): + self.exit_stack.__enter__() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.exit_stack.__exit__(exc_type, exc_val, exc_tb) + + +class Kernel(CodeGen): + newvar_prefix = "" + suffix = "" + overrides = None + load_format = None + store_format = None + + def __init__(self, args=None): + super().__init__() + metrics.generated_kernel_count += 1 + self.args = args or KernelArgs() + self.loads = IndentedBuffer() + self.compute = IndentedBuffer() + self.stores = IndentedBuffer() + self.cse = CSE(self.newvar_prefix, self.suffix) + self.must_keep_buffers = set() + self.store_buffer_names = set() + # set in set_current_node + self.current_node = None + self.node_to_bounds: Optional[Dict[torch.fx.Node, ValueRanges]] = None + + @contextlib.contextmanager + def set_current_node(self, node): + prior = self.current_node + self.current_node = node + self.node_to_bounds = node._body.bounds().get_bounds() + try: + yield + finally: + self.current_node = prior + + @contextlib.contextmanager + def swap_buffers(self, lb, cb=None, sb=None): + if cb is None: + cb = lb + loads = self.loads + compute = self.compute + stores = self.stores + cse = self.cse + self.loads = lb + self.compute = cb + self.stores = sb + self.cse = cse.clone() + try: + yield + finally: + self.loads = loads + self.compute = compute + self.stores = stores + self.cse = cse + + def load(self, name: str, index: sympy.Expr): + raise NotImplementedError() + + def indirect_load(self, name: str, index: sympy.Expr): + """A load the depends on an index we have read""" + prior = self.loads + try: + # put the load in the compute section as it might have deps + self.loads = self.compute + return self.load(name, index) + finally: + self.loads = prior + + def store_reduction(self, name, index, value): + raise NotImplementedError() + + def store(self, name, index, value, mode=None): + raise NotImplementedError() + + def reduction(self, dtype, src_dtype, reduction_type, value): + raise NotImplementedError() + + def bucketize( + self, + values, + offsets_name: str, + offsets_size: sympy.Expr, + indexing_dtype: torch.dtype, + right: bool, + ): + """ + See [Note: Inductor bucketize op] + """ + raise NotImplementedError() + + def __enter__(self): + class CSEProxy: + self.name = "CSEProxy" + + @staticmethod + def __getattr__(name: str) -> Callable[..., CSEVariable]: # type: ignore[misc] + def inner(*args, **kwargs): + # TritonTemplateKernel has no current_node + buf_bounds = ValueRanges.unknown() + if hasattr(V.interpreter, "current_node"): + fx_node = V.interpreter.current_node + assert isinstance(self.node_to_bounds, dict) + buf_bounds = self.node_to_bounds.get( + fx_node, ValueRanges.unknown() + ) + + csevar = self.cse.generate( + self.compute, + getattr(parent_handler, name)(*args, **kwargs), # type: ignore[has-type] + bounds=buf_bounds, + ) + csevar.update_on_args(name, args, kwargs) + return csevar + + return inner + + @staticmethod + def indirect_indexing(index_var, size, check=True): + # Skip CSE since this doesn't return an expression + return self.indirect_indexing(index_var, size, check) # type: ignore[attr-defined] + + @staticmethod + def load(name: str, index: sympy.Expr): + if name in self.cse.invalidated_stores: + # A load from an invalidated store requires us to + # keep the actual buffer around + V.kernel.must_keep_buffers.add(name) + if free_symbol_startswith(index, "tmp"): + return self.indirect_load(name, index) + store_cache = self.cse.store_cache + if name in store_cache: + return store_cache[name] + return self.load(name, index) + + @staticmethod + def store(name, index, value, mode=None): + self.store_buffer_names.add(name) + if mode is None: + self.cse.store_cache[name] = value + if self.current_node: + for other_name in self.current_node.get_mutations(): + self.cse.store_cache[other_name] = value + if name not in V.graph.removed_buffers: + return self.store(name, index, value, mode=mode) + + @staticmethod + def store_reduction(name, index, value): + self.store_buffer_names.add(name) + self.cse.store_cache[name] = value + if self.current_node: + for other_name in self.current_node.get_mutations(): + self.cse.store_cache[other_name] = value + + if name not in V.graph.removed_buffers: + return self.store_reduction(name, index, value) + + @staticmethod + def reduction(dtype, src_dtype, reduction_type, value): + return self.reduction(dtype, src_dtype, reduction_type, value) + + @staticmethod + def bucketize( + values, + offsets_name: str, + offsets_size: sympy.Expr, + indexing_dtype: torch.dtype, + right: bool, + ): + """ + [Note: Inductor bucketize op] + + Given values (tensor) and offsets_name (reference to the name of a 1D + tensor), calculate the bucket that each value belongs to. + + e.g. for values [-1, 0, 1, 2, 3, 4, 5, 9], offsets [0, 4, 4, 8], right=True + return = [ 0, 1, 1, 1, 1, 3, 3, 4]. + + When right == False, bucket i refers to range (offsets[i], offsets[i+1]]. + When right == True, bucket i refers to range [offsets[i], offsets[i+1]). + + Offsets must be non-decreasing or the result is undefined. + """ + return self.bucketize( + values, offsets_name, offsets_size, indexing_dtype, right + ) + + super().__enter__() + assert self.overrides + parent_handler = self.overrides(V.get_ops_handler()) + self.exit_stack.enter_context(V.set_ops_handler(CSEProxy())) + self.exit_stack.enter_context(V.set_kernel_handler(self)) + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if V.graph.scheduler: + V.graph.scheduler.remove_kernel_local_buffers() + super().__exit__(exc_type, exc_val, exc_tb) + + def rename_indexing(self, index) -> sympy.Expr: + # adds the necessary kernel args for index expressions + # and renames variables in index expressions to kernel arg names + if isinstance(index, (list, tuple)): + return [self.rename_indexing(x) for x in index] + index = V.graph.sizevars.simplify(index) + sorted_symbols = sorted(index.free_symbols, key=lambda s: s.name) + replacements = { + x: self.args.size(x) + for x in sorted_symbols + if x.name.startswith("s") or x.name.startswith("ps") + } + return sympy_subs(index, replacements) + + def create_cse_var(self, *args, **kwargs): + return CSEVariable(*args, **kwargs) + + +@dataclasses.dataclass +class OptimizationContext: + key: ClassVar[str] = "opt_ctx" + + # Load value as mask + is_load_as_mask: bool = False + + dtype: torch.dtype = None + ops_name: str = "" + is_most_inner_loop_irrevelant: bool = False + + # Load uint8 value as float32 + is_load_uint8_as_float: bool = False diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/cpp.py b/llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/cpp.py new file mode 100644 index 0000000000000000000000000000000000000000..f00f55e509b4ae426d1c239a632f284c675a3024 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/cpp.py @@ -0,0 +1,3198 @@ +import contextlib +import dataclasses +import functools +import itertools +import logging +import math +import re +import sys +from copy import copy, deepcopy +from typing import Dict, List + +import sympy + +import torch +import torch.fx +from torch._inductor import dependencies +from torch._inductor.ir import StorageBox, TensorBox +from torch._prims_common import is_float_dtype +from torch.utils._sympy.functions import FloorDiv +from torch.utils._sympy.value_ranges import bound_sympy, ValueRanges + +from .. import codecache, config, ir, metrics +from ..codegen.wrapper import WrapperCodeGen +from ..optimize_indexing import range_expressable_in_32_bits +from ..scheduler import BaseScheduling, SchedulerNode +from ..utils import ( + cache_on_self, + get_fused_kernel_name, + is_welford_reduction, + sympy_product, + sympy_subs, + sympy_symbol, +) + +from ..virtualized import ops, V +from .common import ( + BracesBuffer, + CppWrapperKernelArgs, + CSE, + DataTypePropagation, + DeferredLine, + DTYPE_TO_COMPUTATION_DTYPE, + ExprPrinter, + IndentedBuffer, + Kernel, + KernelArgs, + OpOverrides, + OptimizationContext, +) + +schedule_log = torch._logging.getArtifactLogger(__name__, "schedule") + +DTYPE_TO_CPP = { + torch.float32: "float", + torch.float64: "double", + torch.float16: "half", + torch.int64: "long", + torch.int32: "int", + torch.int16: "short", + torch.int8: "signed char", + torch.uint8: "unsigned char", + torch.bool: "bool", + torch.bfloat16: "bfloat16", + torch.complex64: "complex64", +} + +DTYPE_TO_ATEN = { + torch.float32: "at::kFloat", + torch.float64: "at::kDouble", + torch.float16: "at::kHalf", + torch.int64: "at::kLong", + torch.int32: "at::kInt", + torch.int16: "at::kShort", + torch.int8: "at::kChar", + torch.uint8: "at::kByte", + torch.bool: "at::kBool", + torch.bfloat16: "at::kBFloat16", + torch.complex64: "at::kComplexFloat", +} + +DEVICE_TO_ATEN = { + "cpu": "at::kCPU", + "cuda": "at::kCUDA", +} + +INDEX_TYPE = "long" + +NATIVE_OMP_RTYPES = {"+", "*", "^", "||", "min", "max"} +RTYPE_TO_CPP = { + "sum": "+", + "prod": "*", + "xor_sum": "^", + "min": "min", + "max": "max", + "argmin": "argmin", + "argmax": "argmax", + "any": "||", + "welford_reduce": "welford", + "welford_combine": "welford", +} +VECTORIZABLE_RTYPES = { + "max", + "min", + "sum", + "prod", + "xor_sum", + "welford_reduce", + "welford_combine", +} + +PYTHON_TO_CPP = { + "int": "long", + "float": "double", + "bool": "bool", + "ScalarType": "c10::ScalarType", +} + +CONTAINER_PYTHON_TO_CPP = { + "List": "std::vector", + "Optional": "c10::optional", +} + +DTYPE_LOWP_FP = [ + torch.bfloat16, + torch.float16, +] + + +def reduction_init(reduction_type, dtype): + if dtype in DTYPE_LOWP_FP: + # Since load promotes all half-precision inputs to float, the initial + # constant for reduction must be promoted as well + dtype = torch.float32 + if reduction_type in ("xor_sum", "sum", "any"): + return 0 + if reduction_type == "prod": + return 1 + if reduction_type in {"max", "argmax"}: + return ( + f"-std::numeric_limits<{DTYPE_TO_CPP[dtype]}>::infinity()" + if is_float_dtype(dtype) + else f"std::numeric_limits<{DTYPE_TO_CPP[dtype]}>::min()" + ) + if reduction_type in {"min", "argmin"}: + return ( + f"std::numeric_limits<{DTYPE_TO_CPP[dtype]}>::infinity()" + if is_float_dtype(dtype) + else f"std::numeric_limits<{DTYPE_TO_CPP[dtype]}>::max()" + ) + if is_welford_reduction(reduction_type): + return f"Welford<{DTYPE_TO_CPP[dtype]}>()" + raise AssertionError(reduction_type) + + +def reduction_init_vec(reduction_type, dtype): + scalar_type = DTYPE_TO_CPP[DTYPE_TO_COMPUTATION_DTYPE[dtype]] + vec_type = f"at::vec::Vectorized<{scalar_type}>" + + if is_welford_reduction(reduction_type): + return f"Welford<{vec_type}>()" + + scalar_init = reduction_init(reduction_type, dtype) + return f"{vec_type}({scalar_init})" + + +def reduction_acc_type(reduction_type, dtype): + assert reduction_type not in {"argmin", "argmax"} + scalar_type = DTYPE_TO_CPP[DTYPE_TO_COMPUTATION_DTYPE[dtype]] + if is_welford_reduction(reduction_type): + return f"Welford<{scalar_type}>" + + return scalar_type + + +def reduction_acc_type_vec(reduction_type, dtype): + assert reduction_type not in {"argmin", "argmax"} + scalar_type = DTYPE_TO_CPP[DTYPE_TO_COMPUTATION_DTYPE[dtype]] + vec_type = f"at::vec::Vectorized<{scalar_type}>" + if is_welford_reduction(reduction_type): + return f"Welford<{vec_type}>" + + return vec_type + + +def reduction_combine(reduction_type, var, next_value): + if reduction_type == "sum": + return f"{var} + {next_value}" + if reduction_type == "prod": + return f"{var} * {next_value}" + if reduction_type == "xor_sum": + return f"{var} ^ {next_value}" + if reduction_type == "any": + return f"{var} || {next_value}" + if reduction_type in ("min", "max"): + return f"{reduction_type}_propagate_nan({var}, {next_value})" + if reduction_type == "welford_reduce": + return f"welford_combine({var}, {next_value})" + if reduction_type == "welford_combine": + if isinstance(next_value, tuple): + mean, m2, weight = next_value + else: + mean, m2, weight = reduction_project(reduction_type, next_value) + return f"welford_combine({var}, {{{mean}, {m2}, {weight}}})" + raise AssertionError(reduction_type) + + +def reduction_combine_vec(reduction_type, var, next_value): + if reduction_type == "max": + return f"at::vec::maximum({var}, {next_value})" + elif reduction_type == "min": + return f"at::vec::minimum({var}, {next_value})" + elif reduction_type == "sum": + return f"{var} + {next_value}" + elif reduction_type == "prod": + return f"{var} * {next_value}" + elif reduction_type == "xor_sum": + return f"{var} ^ {next_value}" + elif reduction_type == "welford_reduce": + return f"welford_combine({var}, {next_value})" + elif reduction_type == "welford_combine": + if isinstance(next_value, tuple): + # When reading a value from Inductor IR we have a tuple of variable names + mean, m2, weight = next_value + else: + # When combining intermediate accumulators we have a Welford struct + mean, m2, weight = reduction_project(reduction_type, next_value) + return f"welford_combine({var}, {{{mean}, {m2}, {weight}}})" + else: + raise NotImplementedError() + + +def reduction_project(reduction_type, acc): + if is_welford_reduction(reduction_type): + return f"{acc}.mean", f"{acc}.m2", f"{acc}.weight" + elif reduction_type in {"argmin", "argmax"}: + return f"{acc}.index" + return acc + + +index_value_name_counter = 1 + + +def argmax_argmin_prefix(reduction_type, src_dtype, tmpvar): + global index_value_name_counter + struct_name = f"IndexValue_{index_value_name_counter}" + index_value_name_counter += 1 + + # A small annoyance, due to it being a little cumbersome to just throw {} into strings + prefix = [ + f"struct {struct_name} {{size_t index; {DTYPE_TO_CPP[src_dtype]} value;}};", + f"{struct_name} {tmpvar}{{0, {reduction_init(reduction_type, src_dtype)}}};", + ] + if reduction_type == "argmax": + prefix.extend( + [ + "#if !defined(__clang_major__) || __clang_major__ > 9", + f"#pragma omp declare reduction(argmax : {struct_name} :\\", + " omp_out.value = omp_in.value < omp_out.value ? omp_out.value : omp_in.value,\\", + " omp_out.index = omp_in.value < omp_out.value ? omp_out.index : omp_in.index)\\", + f"\tinitializer(omp_priv = {{0, {reduction_init(reduction_type, src_dtype)}}})", + "#endif", + ] + ) + elif reduction_type == "argmin": + prefix.extend( + [ + "#if !defined(__clang_major__) || __clang_major__ > 9", + f"#pragma omp declare reduction(argmin : {struct_name} :\\", + " omp_out.value = omp_in.value > omp_out.value ? omp_out.value : omp_in.value,\\", + " omp_out.index = omp_in.value > omp_out.value ? omp_out.index : omp_in.index)\\", + f"\tinitializer(omp_priv = {{0, {reduction_init(reduction_type, src_dtype)}}})", + "#endif", + ] + ) + return prefix + + +def parallel_num_threads(): + threads = config.cpp.threads + if threads < 1: + threads = torch.get_num_threads() + return threads + + +@functools.lru_cache +def stride_at(var: sympy.Symbol, index: sympy.Expr): + replacement = {var: var + 1} + new_index = sympy_subs(index, replacement) + return sympy.simplify(new_index - index) + + +class CppPrinter(ExprPrinter): + def _print_Integer(self, expr): + return f"{int(expr)}L" + + def _print_Where(self, expr): + c = self.paren(self.doprint(expr.args[0])) + p = self.paren(self.doprint(expr.args[1])) + q = self.paren(self.doprint(expr.args[2])) + return f"{c} ? {p} : {q}" + + def _print_ModularIndexing(self, expr): + x, div, mod = expr.args + x = self.paren(self.doprint(x)) + if div != 1: + div = self.paren(self.doprint(div)) + if expr.is_integer: + x = f"at::native::div_floor_integer({x}, {div})" + else: + x = f"at::native::div_floor_floating(static_cast({x}), static_cast({div}))" + mod = self.paren(self.doprint(mod)) + return f"static_cast<{INDEX_TYPE}>({x}) % static_cast<{INDEX_TYPE}>({mod})" + + def _print_FloorDiv(self, expr): + x, div = expr.args + x = self.paren(self.doprint(x)) + div = self.paren(self.doprint(div)) + if expr.is_integer: + return f"at::native::div_floor_integer({x}, {div})" + return f"at::native::div_floor_floating(static_cast({x}), static_cast({div}))" + + def _print_floor(self, expr): + assert len(expr.args) == 1 + r = f"std::floor({self._print(expr.args[0])})" + return f"static_cast<{INDEX_TYPE}>({r})" if expr.is_integer else r + + def _print_Pow(self, expr): + # Uses float constants to perform FP div + base, exp = expr.args + base = self._print(base) + + if exp == 0.5 or exp == -0.5: + r = f"std::sqrt({base})" if exp == 0.5 else f"1.0/std::sqrt({base})" + return f"static_cast({r})" if expr.is_integer else r + assert exp.is_integer + exp = int(exp) + if exp > 0: + r = "*".join([self.paren(base)] * exp) + elif exp < 0: + r = "1.0/" + self.paren("*".join([self.paren(base)] * abs(exp))) + else: # exp == 0 + r = "1.0" + + return f"static_cast<{INDEX_TYPE}>({r})" if expr.is_integer else r + + def _print_Rational(self, expr): + # Uses float constants to perform FP div + if expr.q == 1: + r = f"{expr.p}" + else: + r = f"{expr.p}.0/{expr.q}.0" + return f"static_cast<{INDEX_TYPE}>({r})" if expr.is_integer else r + + def _print_ceiling(self, expr): + assert len(expr.args) == 1 + r = f"std::ceil({self._print(expr.args[0])})" + return f"static_cast<{INDEX_TYPE}>({r})" if expr.is_integer else r + + def _print_Min(self, expr): + args = [self._print(a) for a in expr.args] + if len(args) == 2: + return f"std::min({args[0]}, {args[1]})" + else: + # Initializer list overload + il = "{" + ", ".join(args) + "}" + return f"std::min({il})" + + def _print_Max(self, expr): + args = [self._print(a) for a in expr.args] + if len(args) == 2: + return f"std::max({args[0]}, {args[1]})" + else: + # Initializer list overload + il = "{" + ", ".join(args) + "}" + return f"std::max({il})" + + +cexpr = CppPrinter().doprint + + +def cexpr_index(index): + return f"static_cast<{INDEX_TYPE}>({cexpr(index)})" + + +class RecordOptimizationContext: + def __init__(self, func_name: str = ""): + self.func_name = func_name + self.current_node: torch.fx.Node = None + self.opt_ctx: OptimizationContext = None + + def __enter__(self): + assert V.interpreter + assert V.interpreter.current_node + + self.current_node: torch.fx.Node = V.interpreter.current_node + if OptimizationContext.key in self.current_node.meta: + self.opt_ctx = self.current_node.meta[OptimizationContext.key] + else: + self.opt_ctx = OptimizationContext() + self.opt_ctx.ops_name = self.func_name + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + assert self.current_node + assert self.opt_ctx + self.current_node.meta[OptimizationContext.key] = self.opt_ctx + + def get_opt_ctx(self): + return self.opt_ctx + + def get_fx_node(self): + assert self.current_node + return self.current_node + + +def get_opt_ctx(node: torch.fx.Node) -> OptimizationContext: + return node.meta.get(OptimizationContext.key, None) + + +def get_current_node_opt_ctx() -> OptimizationContext: + assert V.interpreter.current_node + return get_opt_ctx(V.interpreter.current_node) + + +class CppVecOverrides(OpOverrides): + """Map element-wise ops to aten vectorization C++""" + + @staticmethod + def add(a, b): + return f"{a} + {b}" + + @staticmethod + def sub(a, b): + return f"{a} - {b}" + + @staticmethod + def mul(a, b): + return f"{a} * {b}" + + @staticmethod + def div(a, b): + return f"{a} / {b}" + + @staticmethod + def abs(x): + return f"{x}.abs()" + + @staticmethod + def sin(x): + return f"{x}.sin()" + + @staticmethod + def cos(x): + return f"{x}.cos()" + + @staticmethod + def exp(x): + return f"{x}.exp()" + + @staticmethod + def exp2(x): + return f"{x}.exp2()" + + @staticmethod + def expm1(x): + # decompose for a better performance + vec_one = f"decltype({x})(1)" + return f"{x}.exp() - {vec_one}" + + @staticmethod + def erf(x): + return f"{x}.erf()" + + @staticmethod + def erfc(x): + return f"{x}.erfc()" + + @staticmethod + def erfinv(x): + return f"{x}.erfinv()" + + @staticmethod + def sqrt(x): + return f"{x}.sqrt()" + + @staticmethod + def eq(x, y): + return f"to_float_mask({x} == {y})" + + @staticmethod + def ne(x, y): + return f"to_float_mask({x} != {y})" + + @staticmethod + def lt(x, y): + return f"to_float_mask({x} < {y})" + + @staticmethod + def gt(x, y): + return f"to_float_mask({x} > {y})" + + @staticmethod + def le(x, y): + return f"to_float_mask({x} <= {y})" + + @staticmethod + def ge(x, y): + return f"to_float_mask({x} >= {y})" + + @staticmethod + def and_(x, y): + return f"{x} & {y}" + + @staticmethod + def rsqrt(x): + return f"{x}.rsqrt()" + + @staticmethod + def pow(a, b): + return f"{a}.pow({b})" + + @staticmethod + def log(x): + return f"{x}.log()" + + @staticmethod + def round(x): + return f"{x}.round()" + + @staticmethod + def floor(x): + return f"{x}.floor()" + + @staticmethod + def ceil(x): + return f"{x}.ceil()" + + @staticmethod + def trunc(x): + return f"{x}.trunc()" + + @staticmethod + def fmod(a, b): + return f"{a}.fmod({b})" + + @staticmethod + def lgamma(x): + return f"{x}.lgamma()" + + @staticmethod + def logical_and(a, b): + return f"({a} != 0) & ({b} != 0)" + + @staticmethod + def logical_not(a): + return f"{a} == 0" + + @staticmethod + def logical_or(a, b): + return f"({a} != 0) | ({b} != 0)" + + @staticmethod + def logical_xor(a, b): + return f"({a} != 0) ^ ({b} != 0)" + + @staticmethod + def tan(a): + return f"{a}.tan()" + + @staticmethod + def tanh(a): + vec_one = f"decltype({a})(1)" + vec_two = f"decltype({a})(2)" + vec_minus_two = f"decltype({a})(-2)" + return f"{vec_two} / ({vec_one} + ({vec_minus_two} * {a}).exp()) - {vec_one}" + + @staticmethod + def reciprocal(a): + return f"{a}.reciprocal()" + + @staticmethod + def atan(x): + return f"{x}.atan()" + + @staticmethod + def acos(x): + return f"{x}.acos()" + + @staticmethod + def asin(x): + return f"{x}.asin()" + + @staticmethod + def cosh(x): + return f"{x}.cosh()" + + @staticmethod + def sinh(x): + return f"{x}.sinh()" + + @staticmethod + def log10(x): + return f"{x}.log10()" + + @staticmethod + def nextafter(x): + return f"{x}.nextafter()" + + @staticmethod + def copysign(a, b): + return f"{a}.copysign({b})" + + @staticmethod + def atan2(a, b): + return f"{a}.atan2({b})" + + @staticmethod + def hypot(a, b): + return f"{a}.hypot({b})" + + @staticmethod + def atanh(x): + # For real x, atanh(x) = 1/2 * log((1+x)/(1-x)) + vec_one = f"decltype({x})(1)" + vec_one_half = f"decltype({x})(0.5)" + return f"{vec_one_half} * (({vec_one} + {x})/({vec_one} - {x})).log()" + + @staticmethod + def asinh(x): + # For real x, asinh(x) = log(x + sqrt(1 + x**2)) + vec_one = f"decltype({x})(1)" + return f"({x} + ({vec_one} + {x}*{x}).sqrt()).log()" + + @staticmethod + def acosh(x): + # For real x, acosh(x) = log(x + sqrt(x**2 -1)) + vec_one = f"decltype({x})(1)" + return f"({x} + ({x}*{x} - {vec_one}).sqrt()).log()" + + @staticmethod + def constant(val, dtype): + opt_ctx: OptimizationContext = get_current_node_opt_ctx() + assert opt_ctx + proposed_dtype = opt_ctx.dtype + assert proposed_dtype in [ + torch.float, + torch.int32, + ] + if val == float("inf"): + quote = f"std::numeric_limits<{DTYPE_TO_CPP[proposed_dtype]}>::infinity()" + elif val == float("-inf"): + quote = f"-std::numeric_limits<{DTYPE_TO_CPP[proposed_dtype]}>::infinity()" + elif math.isnan(val): + quote = f"std::numeric_limits<{DTYPE_TO_CPP[proposed_dtype]}>::quiet_NaN()" + elif val is True or val is False: + quote = f"static_cast<{DTYPE_TO_CPP[proposed_dtype]}>({str(val).lower()})" + else: + quote = f"static_cast<{DTYPE_TO_CPP[proposed_dtype]}>({repr(val)})" + + return f"at::vec::Vectorized<{DTYPE_TO_CPP[proposed_dtype]}>({quote})" + + @staticmethod + def relu(x): + bug = config.cpp.inject_relu_bug_TESTING_ONLY + if bug == "compile_error": + return "compile error!" + elif bug == "runtime_error": + return f"{x}; throw 1" + elif bug == "accuracy": + return f"{x} + decltype({x})(1)" + elif bug is None: + return f"at::vec::clamp_min({x}, decltype({x})(0))" + else: + raise AssertionError( + f"unrecognized config cpp.inject_relu_bug_TESTING_ONLY = {bug!r}" + ) + + # TODO: this seems to be dead + @staticmethod + def sigmoid(x): + return f"decltype({x})(1)/(decltype({x})(1) + {x}.neg().exp())" + + @staticmethod + def neg(x): + return f"{x}.neg()" + + @staticmethod + def floordiv(a, b): + # a and b are integer type + _t = f"decltype({a})" + quot = f"{a} / {b}" + rem = f"{a} % {b}" + return f"(({a} < {_t}(0)) != ({b} < {_t}(0)) ? ({rem} != {_t}(0) ? {quot} - {_t}(1) : {quot}) : {quot})" + + @staticmethod + def truncdiv(a, b): + # a and b are integer type + return f"{a} / {b}" + + @staticmethod + def minimum(a, b): + return f"at::vec::minimum({a}, {b})" + + @staticmethod + def maximum(a, b): + return f"at::vec::maximum({a}, {b})" + + @staticmethod + def square(a): + return f"{a} * {a}" + + @staticmethod + def where(a, b, c): + return f"decltype({b})::blendv({c}, {b}, {a})" + + @staticmethod + def sign(x): + code = BracesBuffer() + # auto tmp5 = tmp4 < 0 ? -1 : 1; + vec_zero = f"decltype({x})(0)" + vec_one = f"decltype({x})(1)" + blendv = f"decltype({x})::blendv({vec_zero}, {vec_one}, {vec_zero} < {x})" + left = V.kernel.cse.newvar() + code.writeline(f"auto {left} = {blendv};") + + # auto tmp6 = tmp4 == 0 ? 0 : tmp5; + blendv = f"decltype({x})::blendv({vec_zero}, {vec_one}, {x} < {vec_zero})" + right = V.kernel.cse.newvar() + code.writeline(f"auto {right} = {blendv};") + result = V.kernel.cse.newvar() + code.writeline(f"auto {result} = {left} - {right};") + V.kernel.compute.splice(code) + return result + + @staticmethod + def to_dtype(x, dtype): + assert dtype in [ + torch.bool, + torch.float, + torch.bfloat16, + torch.float16, + torch.uint8, + ], f"{__name__} does not support {dtype}" + node: torch.fx.Node = V.interpreter.current_node + assert node + opt_ctx_x = get_opt_ctx(node.args[1]) + assert opt_ctx_x + if opt_ctx_x.dtype in (torch.float, torch.float32) and dtype == torch.bool: + return f"vec_convert_to_mask({x})" + if opt_ctx_x.dtype == torch.bool and dtype in (torch.float, torch.float32): + return f"mask_convert_to_float({x})" + if opt_ctx_x.dtype in (torch.float, torch.float32) and dtype in DTYPE_LOWP_FP: + return f"cvt_fp32_to_lowp_fp<{DTYPE_TO_CPP[dtype]}>({x})" + if opt_ctx_x.dtype in DTYPE_LOWP_FP and dtype in (torch.float, torch.float32): + return f"cvt_lowp_fp_to_fp32<{DTYPE_TO_CPP[opt_ctx_x.dtype]}>({x})" + if opt_ctx_x.dtype == torch.uint8 and dtype in (torch.float, torch.float32): + # Note: this function only convert inputs number of elements equal to at::vec::Vectorized.size() + return f"at::vec::convert_uint8_to_float({x})" + if opt_ctx_x.dtype in (torch.float, torch.float32) and dtype == torch.uint8: + # TODO(Leslie): Add fast path to at::vec::convert_float_to_uint8, + # if we already handle the saturation previously. + # * Pattern match of quantization op in the loop body. + # * Skip the explicit saturation and clamp inside at::vec::convert_float_to_uint8. + return f"at::vec::convert_float_to_uint8({x})" + # TODO(jgong5): support conversion for other types + # currently we only allow load/store torch.uint8 and handle conversion there + return f"({x})" + + @staticmethod + def log1p(x): + bug = config.cpp.inject_log1p_bug_TESTING_ONLY + if bug == "accuracy": + return f"{x} + decltype({x})(1)" + elif bug is None: + return f"{x}.log1p()" + else: + raise AssertionError( + f"unrecognized config cpp.inject_log1p_bug_TESTING_ONLY = {bug!r}" + ) + + @staticmethod + def masked(mask, body, other): + code = BracesBuffer() + var = V.kernel.cse.newvar() + with V.kernel.masked(mask) as new_mask: + code.writeline(f"auto {var} = [&]") + with V.kernel.swap_buffers(code), code.indent(): + result = body() + code.writeline(f"return {result};") + code.writeline(";") + V.kernel.compute.splice(code) + + if other == float("-inf"): + other_code = ( + "at::vec::Vectorized(-std::numeric_limits::infinity())" + ) + elif other == float("inf"): + other_code = ( + "at::vec::Vectorized(std::numeric_limits::infinity())" + ) + elif math.isnan(other): + other_code = ( + "at::vec::Vectorized(std::numeric_limits::quiet_NaN())" + ) + else: + other_code = f"at::vec::Vectorized({other!r})" + type = f"decltype({var}())" + float_mask = f"to_float_mask({new_mask})" + return f"{type}::blendv({other_code}, {var}(), {float_mask})" + + @staticmethod + def index_expr(expr, dtype): + assert dtype == torch.int64 + opt_ctx: OptimizationContext = get_current_node_opt_ctx() + assert opt_ctx + assert opt_ctx.dtype == torch.int32 + assert opt_ctx.is_most_inner_loop_irrevelant + return f"at::vec::Vectorized(static_cast({cexpr(V.kernel.rename_indexing(expr))}))" + + +class CppOverrides(OpOverrides): + """Map element-wise ops to C++""" + + @staticmethod + def mul(a, b): + return f"decltype({a})({a} * {b})" + + @staticmethod + def to_dtype(x, dtype): + assert dtype in DTYPE_TO_CPP, f"{dtype} missing from {__name__}.DTYPE_TO_CPP" + return f"static_cast<{DTYPE_TO_CPP[dtype]}>({x})" + + @staticmethod + def to_dtype_bitcast(x, dtype): + assert dtype in DTYPE_TO_CPP, f"{dtype} missing from {__name__}.DTYPE_TO_CPP" + return f"reinterpret_cast<{DTYPE_TO_CPP[dtype]}&>({x})" + + @staticmethod + def abs(x): + return f"std::abs({x})" + + @staticmethod + def sin(x): + return f"std::sin({x})" + + @staticmethod + def cos(x): + return f"std::cos({x})" + + @staticmethod + def neg(x): + return f"decltype({x})(-{x})" + + @staticmethod + def exp(x): + # return f"Sleef_expf_u10({x})" + return f"std::exp({x})" + + @staticmethod + def exp2(x): + return f"std::exp2({x})" + + @staticmethod + def expm1(x): + return f"std::expm1({x})" + + @staticmethod + def erf(x): + return f"std::erf({x})" + + @staticmethod + def erfc(x): + return f"std::erfc({x})" + + @staticmethod + def erfinv(x): + return f"calc_erfinv({x})" + + @staticmethod + def sqrt(x): + return f"std::sqrt({x})" + + @staticmethod + def rsqrt(x): + return f"1 / std::sqrt({x})" + + @staticmethod + def log1p(x): + bug = config.cpp.inject_log1p_bug_TESTING_ONLY + if bug == "accuracy": + return f"{x} + decltype({x})(1)" + elif bug is None: + return f"std::log1p({x})" + else: + raise AssertionError( + f"unrecognized config cpp.inject_log1p_bug_TESTING_ONLY = {bug!r}" + ) + + @staticmethod + def tan(x): + return f"std::tan({x})" + + @staticmethod + def tanh(x): + return f"std::tanh({x})" + + @staticmethod + def signbit(x): + return f"std::signbit({x})" + + @staticmethod + def pow(a, b): + return f"std::pow({a}, {b})" + + @staticmethod + def log(x): + return f"std::log({x})" + + @staticmethod + def round(x): + return f"std::nearbyint({x})" + + @staticmethod + def floor(x): + return f"std::floor({x})" + + @staticmethod + def floordiv(a, b): + # a and b are integer type + quot = f"{a} / {b}" + rem = f"{a} % {b}" + return f"(({a} < 0) != ({b} < 0) ? ({rem} != 0 ? {quot} - 1 : {quot}) : {quot})" + + @staticmethod + def ceil(x): + return f"std::ceil({x})" + + @staticmethod + def trunc(x): + return f"std::trunc({x})" + + @staticmethod + def truncdiv(a, b): + # a and b are integer type + return f"{a} / {b}" + + @staticmethod + def fmod(a, b): + return f"std::fmod({a}, {b})" + + @staticmethod + def isinf(x): + return f"std::isinf({x})" + + @staticmethod + def isnan(x): + return f"std::isnan({x})" + + @staticmethod + def lgamma(x): + return f"std::lgamma({x})" + + @staticmethod + def acos(x): + return f"std::acos({x})" + + @staticmethod + def acosh(x): + return f"std::acosh({x})" + + @staticmethod + def cosh(x): + return f"std::cosh({x})" + + @staticmethod + def sinh(x): + return f"std::sinh({x})" + + @staticmethod + def asin(x): + return f"std::asin({x})" + + @staticmethod + def asinh(x): + return f"std::asinh({x})" + + @staticmethod + def atan2(x, y): + return f"std::atan2({x}, {y})" + + @staticmethod + def atan(x): + return f"std::atan({x})" + + @staticmethod + def atanh(x): + return f"std::atanh({x})" + + @staticmethod + def copysign(x, y): + return f"std::copysign({x}, {y})" + + @staticmethod + def hypot(x, y): + return f"std::hypot({x}, {y})" + + @staticmethod + def log10(x): + return f"std::log10({x})" + + @staticmethod + def nextafter(x, y): + return f"std::nextafter({x}, {y})" + + @staticmethod + def relu(x): + bug = config.cpp.inject_relu_bug_TESTING_ONLY + if bug == "compile_error": + return "compile error!" + elif bug == "runtime_error": + return f"{x}; throw 1" + elif bug == "accuracy": + return f"{x} + decltype({x})(1)" + elif bug is None: + return f"{x} * ({x}>0)" + else: + raise AssertionError( + f"unrecognized config cpp.inject_relu_bug_TESTING_ONLY = {bug!r}" + ) + + @staticmethod + def minimum(a, b): + return f"min_propagate_nan({a}, {b})" + + @staticmethod + def maximum(a, b): + return f"max_propagate_nan({a}, {b})" + + @staticmethod + def where(a, b, c): + return f"{a} ? {b} : {c}" + + @staticmethod + def mod(a, b): + return f"mod({a}, {b})" + + @staticmethod + def constant(val, dtype): + if dtype in DTYPE_LOWP_FP: + # Since load promotes all half-precision inputs to float, constants + # must be promoted as well + dtype = torch.float32 + if val == float("inf"): + return f"std::numeric_limits<{DTYPE_TO_CPP[dtype]}>::infinity()" + elif val == float("-inf"): + return f"-std::numeric_limits<{DTYPE_TO_CPP[dtype]}>::infinity()" + elif math.isnan(val): + return f"std::numeric_limits<{DTYPE_TO_CPP[dtype]}>::quiet_NaN()" + elif val is True or val is False: + return ops.to_dtype(str(val).lower(), dtype) + return ops.to_dtype(repr(val), dtype) + + @staticmethod + def index_expr(expr, dtype): + return ops.to_dtype(cexpr(V.kernel.rename_indexing(expr)), dtype) + + @staticmethod + def masked(mask, body, other): + code = BracesBuffer() + + # Write masked operation into a lambda + body_var = V.kernel.cse.newvar() + code.writeline(f"auto {body_var} = [&]") + with V.kernel.swap_buffers(code), code.indent(): + result = body() + code.writeline(f"return {result};") + code.writeline(";") + V.kernel.compute.splice(code) + + # Use the lambda's return type as the type of other + type = f"decltype({body_var}())" + + if other == float("-inf"): + other_code = f"-std::numeric_limits<{type}>::infinity()" + elif other == float("inf"): + other_code = f"std::numeric_limits<{type}>::infinity()" + elif isinstance(other, bool): + other_code = f"static_cast<{type}>({str(other).lower()})" + elif math.isnan(other): + other_code = f"std::numeric_limits<{type}>::quiet_NaN()" + else: + other_code = f"static_cast<{type}>({repr(other)})" + + return f"{mask} ? {body_var}() : {other_code}" + + @staticmethod + def logical_and(a, b): + return f"{a} && {b}" + + @staticmethod + def logical_not(a): + return f"!{a}" + + @staticmethod + def logical_or(a, b): + return f"{a} || {b}" + + @staticmethod + def logical_xor(a, b): + return f"{a} != {b}" + + @staticmethod + def bitwise_and(a, b): + return f"decltype({a})({a} & {b})" + + @staticmethod + def bitwise_not(a): + return f"decltype({a})(~{a})" + + @staticmethod + def bitwise_or(a, b): + return f"decltype({a})({a} | {b})" + + @staticmethod + def bitwise_xor(a, b): + return f"decltype({a})({a} ^ {b})" + + @staticmethod + def bitwise_left_shift(a, b): + return f"decltype({a})({a} << {b})" + + @staticmethod + def bitwise_right_shift(a, b): + return f"decltype({a})({a} >> {b})" + + @staticmethod + def rand(seed: sympy.Expr, offset: sympy.Expr): + return f"normalized_rand_cpu({seed}, {offset})" + + @staticmethod + def randn(seed: sympy.Expr, offset: sympy.Expr): + return f"randn_cpu({seed}, {offset})" + + @staticmethod + def randint64(seed: sympy.Expr, offset: sympy.Expr, low, high): + return f"randint64_cpu({seed}, {offset}, {low}, {high})" + + @staticmethod + def sigmoid(x): + return f"decltype({x})(1) / (decltype({x})(1) + std::exp(-{x}))" + + @staticmethod + def sign(x): + code = BracesBuffer() + # auto tmp5 = tmp4 < 0 ? -1 : 1; + left = V.kernel.cse.newvar() + right = V.kernel.cse.newvar() + result = V.kernel.cse.newvar() + scalar_zero = f"decltype({x})(0)" + scalar_one = f"decltype({x})(1)" + code.writeline(f"auto {left} = {x} > 0 ? {scalar_one} : {scalar_zero};") + code.writeline(f"auto {right} = {x} < 0 ? {scalar_one} : {scalar_zero};") + code.writeline(f"auto {result} = {left} - {right};") + V.kernel.compute.splice(code) + return result + + +class CppKernel(Kernel): + overrides = CppOverrides + sexpr = cexpr + newvar_prefix = "auto " + suffix = ";" + + def __init__(self, args, num_threads): + super().__init__(args) + self.call_ranges = None + self.ranges = None + self.itervars = None + self.reduction_depth = None + self.reduction_prefix = IndentedBuffer() + self.reduction_suffix = IndentedBuffer() + self.reduction_var_map = {} + self.reduction_cse = CSE(self.newvar_prefix, self.suffix, name_prefix="tmp_acc") + self.preloads = IndentedBuffer() + self.poststores = IndentedBuffer() + self.num_threads = num_threads # num_threads the kernel specialized for + self.reduction_omp_dec: Dict[Tuple[str, str], str] = {} + self._load_mask = None + + @contextlib.contextmanager + def masked(self, mask): + """Context manager to add an additional mask to loads and stores.""" + prior = self._load_mask + if prior: + mask = self.cse.generate(self.compute, f"{mask} & {prior}") + + self._load_mask = mask + try: + yield mask + finally: + self._load_mask = prior + + def scale_index_with_offset( + self, index: sympy.Expr, scale=1, itervar_idx=-1, offset=0 + ): + var = self.itervars[itervar_idx] + replacement = {var: var * scale + offset} + new_index = sympy_subs(index, replacement) + return new_index + + @staticmethod + def indirect_indexing(index_var, size, check=True): + return sympy_symbol(str(index_var)) + + def load(self, name: str, index: sympy.Expr): + var = self.args.input(name) + index = self.rename_indexing(index) + line = f"{var}[{cexpr_index(index)}]" + if V.graph.get_dtype(name) in [torch.float16]: + line = f"static_cast({line})" + return self.cse.generate(self.loads, line) + + def store(self, name, index, value, mode=None): + assert "buf" in name + var = self.args.output(name) + index = self.rename_indexing(index) + if mode is None: + line = f"{var}[{cexpr_index(index)}] = {value};" + elif mode == "atomic_add": + if not config.cpp.dynamic_threads and self.num_threads == 1: + line = f"{var}[{cexpr_index(index)}] += {value};" + else: + line = f"atomic_add(&{var}[{cexpr_index(index)}], {value});" + else: + raise NotImplementedError(f"store mode={mode}") + self.stores.writeline(DeferredLine(name, line)) + + def reduction(self, dtype, src_dtype, reduction_type, value): + argmax_or_argmin = reduction_type in {"argmax", "argmin"} + + reduction_key = src_dtype, reduction_type, value + if reduction_key in self.reduction_cse.reduction_cache: + return self.reduction_cse.reduction_cache[reduction_key] + + acc = self.reduction_cse.generate( + self.loads, f"reduction {reduction_key}", write=False + ) + self.reduction_var_map[acc] = reduction_type + if argmax_or_argmin: + self.reduction_prefix.writelines( + argmax_argmin_prefix(reduction_type, src_dtype, acc) + ) + compare_op = "<" if reduction_type == "argmax" else ">" + self.stores.writelines( + [ + f"if ({acc}.value {compare_op} {value}) {{", + f" {acc}.index = {self.itervars[-1]}; {acc}.value = {value};", + "}", + ], + ) + else: + acc_type = reduction_acc_type(reduction_type, dtype) + + if (reduction_type, acc_type) not in self.reduction_omp_dec: + if RTYPE_TO_CPP[reduction_type] not in NATIVE_OMP_RTYPES: + # Scalar reduction for other reductions are declared by default + self.reduction_prefix.splice( + f"""\ + #pragma omp declare reduction(\ + {RTYPE_TO_CPP[reduction_type]}:{acc_type}:\ + omp_out = {reduction_combine(reduction_type, "omp_out", "omp_in")}) \ + initializer(omp_priv={{{reduction_init(reduction_type, dtype)}}}) + """ + ) + self.reduction_omp_dec[reduction_type, acc_type] = RTYPE_TO_CPP[ + reduction_type + ] + + self.reduction_prefix.writeline( + f"{acc_type} {acc} = {reduction_init(reduction_type, dtype)};" + ) + self.stores.writeline( + f"{acc} = {reduction_combine(reduction_type, acc, value)};" + ) + + result = reduction_project(reduction_type, acc) + self.reduction_cse.reduction_cache[reduction_key] = result + return result + + def store_reduction(self, name, index, value): + index = self.rename_indexing(index) + var = self.args.output(name) + self.reduction_suffix.writeline( + DeferredLine(name, f"{var}[{cexpr_index(index)}] = {value};") + ) + + def set_ranges(self, lengths, reduction_lengths): + if self.call_ranges: + assert self.call_ranges == tuple(lengths) + tuple( + reduction_lengths + ), f"{self.call_ranges} == {tuple(lengths)} + {tuple(reduction_lengths)}" + assert self.reduction_depth == len(lengths) + else: + self.call_ranges = tuple(lengths) + tuple(reduction_lengths) + self.ranges = [self.rename_indexing(x) for x in self.call_ranges] + self.itervars = [sympy_symbol(f"i{n}") for n in range(len(self.ranges))] + self.reduction_depth = len(lengths) + return ( + self.itervars[: self.reduction_depth], + self.itervars[self.reduction_depth :], + ) + + def size_hint(self): + return V.graph.sizevars.size_hint(sympy_product(self.call_ranges)) + + def codegen_loops_impl(self, loop_nest, code, worksharing): + threads = parallel_num_threads() + par_depth = self.decide_parallel_depth( + self.call_ranges[: loop_nest.max_parallel_depth()], threads + ) + with contextlib.ExitStack() as stack: + if par_depth: + if loop_nest.is_reduction_only(): + # need to close the worksharing scope to define reduction vars outside it + worksharing.close() + else: + worksharing.parallel(threads) + loop_nest.mark_parallel(par_depth) + elif threads > 1: + if worksharing.single(): + stack.enter_context(code.indent()) + + def gen_kernel(kernel): + with contextlib.ExitStack() as stack: + assert kernel + if hasattr(kernel, "codegen_inner_loops"): + code.splice(kernel.preloads) + kernel.codegen_inner_loops(code) + stack.enter_context(code.indent()) + code.splice(kernel.loads) + code.splice(kernel.compute) + code.splice(kernel.stores) + if hasattr(kernel, "codegen_inner_loops"): + code.splice(kernel.poststores) + + def get_reduction_code_buffer(loops, is_suffix=True): + for loop in loops: + for kernel in loop.get_kernels(): + if is_suffix: + return kernel.reduction_suffix + else: + return kernel.reduction_prefix + return None + + def gen_loops(loops: List[LoopLevel], in_reduction=False): + with contextlib.ExitStack() as stack_outer: + if loops: + loop = loops[0] + if loop.is_reduction() and not in_reduction: + reduction_prefix = get_reduction_code_buffer( + loops, is_suffix=False + ) + if reduction_prefix: + stack_outer.enter_context(code.indent()) + code.splice(reduction_prefix) + if loop_nest.is_reduction_only() and loop.parallel: + worksharing.parallel(threads) + + for loop in loops: + gen_loop(loop, in_reduction) + + if loops: + loop = loops[0] + if loop_nest.is_reduction_only() and loop.parallel: + worksharing.close() + if loop.is_reduction() and not in_reduction: + code.splice( + get_reduction_code_buffer(loops, is_suffix=True) + ) + + def gen_loop(loop: LoopLevel, in_reduction=False): + with contextlib.ExitStack() as stack: + loop_lines = loop.lines() + if loop_lines is None: + return + code.writelines(loop_lines) + stack.enter_context(code.indent()) + # generate inner loops or loop body + if loop.inner: + gen_loops(loop.inner, loop.is_reduction()) + else: + kernels = loop.get_kernels() + assert len(kernels) == 1 + gen_kernel(kernels[0]) + + stack.enter_context(code.indent()) + if loop_nest.root: + gen_loops(loop_nest.root) + else: + gen_kernel(loop_nest.kernel) + + def codegen_loops(self, code, worksharing): + loop_nest = LoopNestWithSplit.build(self) + self.codegen_loops_impl(loop_nest, code, worksharing) + + def decide_parallel_depth(self, ranges, threads): + seq = self.size_hint() + par = 1 + depth = 0 + for expr in ranges: + hint = V.graph.sizevars.size_hint(expr) + if par >= 2 * threads or par == threads: + break + if seq // threads < config.cpp.min_chunk_size: + # not enough work + break + depth += 1 + par *= hint + seq /= hint + # if we assume thread number is dynamic, make sure we + # have at least one parallel scope and let OMP runtime + # to manage the serial vs. parallel. + if config.cpp.dynamic_threads and depth == 0 and len(ranges) > 0: + depth = 1 + return depth + + @contextlib.contextmanager + def write_to_suffix(self): + prior = (self.loads, self.compute, self.stores, self.cse) + self.loads = IndentedBuffer() + self.compute = IndentedBuffer() + self.stores = IndentedBuffer() + self.cse = self.cse.clone() + yield + self.reduction_suffix.splice(self.loads) + self.reduction_suffix.splice(self.compute) + self.reduction_suffix.splice(self.stores) + (self.loads, self.compute, self.stores, self.cse) = prior + + +class CppVecKernel(CppKernel): + overrides = CppVecOverrides + + def __init__( + self, + args, + num_threads, + tiling_factor=0, + tiling_idx=-1, + tiling_dtype=torch.float, + ): + super().__init__(args, num_threads) + assert codecache.pick_vec_isa() + if tiling_factor == 0: + tiling_factor = codecache.pick_vec_isa().nelements(dtype=tiling_dtype) + self.tiling_factor = tiling_factor + self.tiling_idx = tiling_idx + metrics.generated_cpp_vec_kernel_count += 1 + + def load(self, name: str, index: sympy.Expr): + opt_ctx: OptimizationContext = get_current_node_opt_ctx() + var = self.args.input(name) + index = self.rename_indexing(index) + dtype = V.graph.get_dtype(name) + tiling_var = self.itervars[self.tiling_idx] + is_broadcast = not index.has(tiling_var) + is_mask = ( + dtype in [torch.bool, torch.uint8] and not opt_ctx.is_load_uint8_as_float + ) + load_mask = f"to_float_mask({self._load_mask})" if self._load_mask else None + non_contiguous = ( + not is_broadcast + and stride_at(tiling_var, index) != 1 + or "tmp" in f"{index}" + ) + var_expr = ( + f"{var}[{cexpr_index(index)}]" + if is_broadcast + else f"{var} + {cexpr_index(index)}" + ) + loadbuf = "tmpbuf" if non_contiguous else var_expr + if is_broadcast: + # should always be broadcast as float for vectorization since we always use float to compute + if is_mask: + loadbuf = f"flag_to_float_scalar({loadbuf})" + if dtype in DTYPE_LOWP_FP: + line = f"at::vec::Vectorized<{DTYPE_TO_CPP[dtype]}>({loadbuf})" + else: + line = f"at::vec::Vectorized(static_cast({loadbuf}))" + elif dtype in [torch.uint8] and opt_ctx.is_load_uint8_as_float: + line = ( + f"masked_load({loadbuf}, {load_mask})" + if load_mask + else f"at::vec::Vectorized::loadu_one_fourth({loadbuf})" + ) + elif is_mask: + line = f"flag_to_float_vec({loadbuf})" + elif dtype in DTYPE_LOWP_FP: + line = ( + f"masked_load({loadbuf}, {load_mask})" + if load_mask + else f"at::vec::Vectorized<{DTYPE_TO_CPP[dtype]}>::loadu({loadbuf}, {self.tiling_factor})" + ) + else: + line = ( + f"masked_load({loadbuf}, {load_mask})" + if load_mask + else f"at::vec::Vectorized::loadu({loadbuf})" + ) + + if non_contiguous: + # TODO: support masked_load for non_contiguous path? + tmpbuftype = "float" if is_mask else f"{DTYPE_TO_CPP[dtype]}" + tmpbufsize = f"{self.tiling_factor}" + if dtype in DTYPE_LOWP_FP: + tmpbufsize += " * 2" + tmpbufdeclare = f"__at_align__ {tmpbuftype} tmpbuf[{tmpbufsize}];" + inner = sympy_symbol(f"{tiling_var}_inner") + new_index = self.scale_index_with_offset( + index, itervar_idx=self.tiling_idx, offset=inner + ) + tmpbufdefine = ( + f"for (long {inner} = 0; {inner} < {self.tiling_factor}; {inner}++) " + ) + rhs = f"{var}[{cexpr_index(new_index)}]" + if is_mask: + rhs = f"flag_to_float_scalar({rhs})" + tmpbufdefine += f"tmpbuf[{inner}] = {rhs};" + line = f"([&]() {{ {tmpbufdeclare} {tmpbufdefine} return {line}; }})()" + + return self.cse.generate(self.loads, line) + + def store(self, name, index, value, mode=None): + assert "buf" in name + assert mode is None + opt_ctx: OptimizationContext = get_current_node_opt_ctx() + var = self.args.output(name) + index = self.rename_indexing(index) + tiling_var = self.itervars[self.tiling_idx] + assert index.has(tiling_var) + var_expr = f"{var} + {cexpr_index(index)}" + dtype = V.graph.get_dtype(name) + non_contiguous = stride_at(tiling_var, index) != 1 or "tmp" in f"{index}" + if non_contiguous: + var_expr = "tmpbuf" + if V.graph.get_dtype(name) in DTYPE_LOWP_FP: + line = f"{value}.store({var_expr}, {self.tiling_factor});" + elif V.graph.get_dtype(name) in [torch.uint8]: + line = f"{value}.store({var_expr}, {self.tiling_factor});" + else: + line = f"{value}.store({var_expr});" + if non_contiguous: + inner = sympy_symbol(f"{tiling_var}_inner") + new_index = self.scale_index_with_offset( + index, itervar_idx=self.tiling_idx, offset=inner + ) + tmp_bufsize = ( + f"{self.tiling_factor}*sizeof(float)/sizeof({DTYPE_TO_CPP[dtype]})" + ) + line = ( + f"{{ __at_align__ {DTYPE_TO_CPP[dtype]} tmpbuf[{tmp_bufsize}]; {line} " + f"for (long {inner} = 0; {inner} < {self.tiling_factor}; {inner}++) " + f"{var}[{cexpr_index(new_index)}] = tmpbuf[{inner}]; }}" + ) + self.stores.writeline(DeferredLine(name, line)) + + def reduction(self, dtype, src_dtype, reduction_type, value): + assert reduction_type in { + "max", + "min", + "sum", + "prod", + "xor_sum", + "welford_reduce", + "welford_combine", + } + assert dtype == torch.float + assert src_dtype == torch.float + + vec_ns = "at::vec" + vec = f"{vec_ns}::Vectorized<{DTYPE_TO_CPP[dtype]}>" + acc_type = reduction_acc_type(reduction_type, dtype) + acc_type_vec = reduction_acc_type_vec(reduction_type, dtype) + + if (reduction_type, acc_type) not in self.reduction_omp_dec: + if RTYPE_TO_CPP[reduction_type] not in NATIVE_OMP_RTYPES: + # Scalar reduction for other reductions are declared by default + self.reduction_prefix.splice( + f"""\ +#pragma omp declare reduction(\ +{RTYPE_TO_CPP[reduction_type]}:{acc_type}:\ +omp_out = {reduction_combine(reduction_type, "omp_out", "omp_in")}) \ +initializer(omp_priv={{{reduction_init(reduction_type, dtype)}}}) + """ + ) + self.reduction_omp_dec[reduction_type, acc_type] = RTYPE_TO_CPP[ + reduction_type + ] + + if (reduction_type, acc_type_vec) not in self.reduction_omp_dec: + self.reduction_prefix.splice( + f"""\ +#pragma omp declare reduction(\ +{RTYPE_TO_CPP[reduction_type]}:{acc_type_vec}:\ +omp_out = {reduction_combine_vec(reduction_type, "omp_out", "omp_in")}) \ +initializer(omp_priv={{{reduction_init_vec(reduction_type, dtype)}}}) + """ + ) + self.reduction_omp_dec[reduction_type, acc_type_vec] = RTYPE_TO_CPP[ + reduction_type + ] + + reduction_key = src_dtype, reduction_type, value + if reduction_key in self.reduction_cse.reduction_cache: + return self.reduction_cse.reduction_cache[reduction_key] + + acc = self.reduction_cse.generate( + self.loads, f"reduction {reduction_key}", write=False + ) + acc_vec = f"{acc}_vec" + + self.reduction_var_map[acc_vec] = reduction_type + self.reduction_prefix.writeline( + f"{acc_type} {acc} = {reduction_init(reduction_type, dtype)};" + ) + self.reduction_prefix.writeline( + f"{acc_type_vec} {acc_vec} = {reduction_init_vec(reduction_type, dtype)};" + ) + self.stores.writeline( + f"{acc_vec} = {reduction_combine_vec(reduction_type, acc_vec, value)};" + ) + + if self.tiling_idx >= self.reduction_depth: + # Horizontal reduction + if is_welford_reduction(reduction_type): + next_value = f"welford_vec_reduce_all({acc_vec})" + else: + reduce_all_body = ( + "{ return " + + reduction_combine_vec(reduction_type, "x", "y") + + "; }" + ) + vec_reduce_all_func = f"{vec_ns}::vec_reduce_all<{DTYPE_TO_CPP[dtype]}>" + next_value = f"{vec_reduce_all_func}([]({vec}& x, {vec}& y) {reduce_all_body}, {acc_vec})" + + self.reduction_suffix.writeline( + f"{acc} = {reduction_combine(reduction_type, acc, next_value)};" + ) + tmpvar = acc + else: + tmpvar = acc_vec + + result = reduction_project(reduction_type, tmpvar) + self.reduction_cse.reduction_cache[reduction_key] = result + return result + + def store_reduction(self, name, index, value): + index = self.rename_indexing(index) + var = self.args.output(name) + out_dtype = V.graph.get_dtype(name) + # Only float reductions are vectorized currently + dtype = torch.float + if self.tiling_idx >= self.reduction_depth: + # Horizontal reduction + self.reduction_suffix.writeline( + DeferredLine( + name, + f"{var}[{cexpr_index(index)}] = static_cast<{DTYPE_TO_CPP[out_dtype]}>({value});", + ) + ) + else: + # Vertical reduction + store_lines = [ + DeferredLine(name, f"{value}.store({var} + {cexpr_index(index)});") + ] + if out_dtype != dtype: + if out_dtype in DTYPE_LOWP_FP and dtype == torch.float: + _lowp_fp_tmpvar_vec = f"{DTYPE_TO_CPP[out_dtype]}_{value}" + store_lines = [ + DeferredLine( + name, + f"auto {_lowp_fp_tmpvar_vec} = cvt_fp32_to_lowp_fp<{DTYPE_TO_CPP[out_dtype]}>({value});", + ), + DeferredLine( + name, + f"{_lowp_fp_tmpvar_vec}.store({var} + {cexpr_index(index)}, {self.tiling_factor});", + ), + ] + else: + raise AssertionError( + f"Unsupported reduction type {reduction_type} from {dtype} to {out_dtype}" + ) + self.reduction_suffix.writelines(store_lines) + + +class CppTile2DKernel(CppVecKernel): + """ + A vector kernel that handles the 2d tiles with the tile size defined in `tiling_factor` on + the inner-most loop level and one of the outer loop level (`outer_tiling_idx`). When the data + tile is accessed in a contiguous way from the outer loop axis, a transposition is applied on the + tile to make the access contiguous from the inner-most loop axis. Then, the same vectorization + logic from its parent `CppVecKernel` is leveraged for load/store/compute. The transposed tile load + and store are generated into kernel.preloads and kernel.poststores buffers. + + The loop structure looks like below: + for ... + for i_outer ... + for ... + for inner_most ... + // generated by CppTile2DKernel + float tmp0[16*16]; at::vec::transpose_mxn<...>(tmp0, in_ptr0 + ..., ...); // into kernel.preloads + float tmp1[16*16]; // into kernel.preloads + for i_inner ... { // the kernel inner loop + vectorized loads/compute/stores (e.g., load tmp0, store tmp1) // into kernel.loads/compute/stores + } + at::vec::transpose_mxn(out_ptr0 + ..., tmp1, ...) // into kernel.poststores + for inner_most ... (tail) + // generated by CppVecKernel + ... + for i_outer ... (tail) + for ... + for ... + // generated by CppKernel + ... + """ + + def __init__(self, args, num_threads, tiling_factor, tiling_indices, tiling_dtype): + super().__init__( + args, num_threads, tiling_factor, tiling_indices[1], tiling_dtype + ) + self.tiling_indices = tiling_indices + + def inner_itervar(self): + return sympy_symbol(f"{self.itervars[self.outer_idx]}_inner") + + def need_vec_transpose(self, index): + return ( + stride_at(self.itervars[self.outer_idx], index) == 1 + and index.has(self.itervars[self.tiling_idx]) + and not stride_at(self.itervars[self.tiling_idx], index).has( + self.itervars[self.tiling_idx] + ) + and not stride_at(self.itervars[self.tiling_idx], index).has( + self.itervars[self.outer_idx] + ) + ) + + def gen_transposed_tile_load_store(self, name, var, index, is_store): + # transposed tile load/store outside the kernel inner loop + dtype = V.graph.get_dtype(name) + factor = self.tiling_factor + src = f"{var} + {cexpr_index(index)}" + dst = "__place_holder__" + ld_src = f"{cexpr_index(stride_at(self.itervars[self.tiling_idx], index))}" + ld_dst = f"{factor}" + if is_store: + src, dst = dst, src + ld_src, ld_dst = ld_dst, ld_src + + need_define = True + load_or_store = f"at::vec::transpose_mxn<{DTYPE_TO_CPP[dtype]},{factor},{factor}>({src}, {ld_src}, {dst}, {ld_dst});" + if is_store: + tile_var = self.cse.newvar() + elif load_or_store not in self.cse.cache: + tile_var = self.cse.generate(self.preloads, load_or_store, write=False) + else: + need_define = False + tile_var = self.cse.cache[load_or_store] + + if need_define: + define_line = f"{DTYPE_TO_CPP[dtype]} {tile_var}[{factor}*{factor}] __attribute__ ((aligned ({factor})));" + self.preloads.writeline(define_line) + + load_or_store = load_or_store.replace("__place_holder__", str(tile_var)) + if is_store: + self.poststores.writeline(DeferredLine(name, load_or_store)) + else: + self.preloads.writeline(load_or_store) + + return tile_var + + def load(self, name: str, index: sympy.Expr): + opt_ctx: OptimizationContext = get_current_node_opt_ctx() + var = self.args.input(name) + index = self.rename_indexing(index) + + inner = self.inner_itervar() + if self.need_vec_transpose(index): + tile_var = self.gen_transposed_tile_load_store( + name, var, index, is_store=False + ) + # vector load inside the kernel inner loop + loadbuf = f"{tile_var} + {cexpr_index(inner * self.tiling_factor)}" + dtype = V.graph.get_dtype(name) + if dtype in DTYPE_LOWP_FP: + line = f"at::vec::Vectorized<{DTYPE_TO_CPP[dtype]}>::loadu({loadbuf}, {self.tiling_factor})" + elif ( + V.graph.get_dtype(name) in [torch.uint8] + and opt_ctx.is_load_uint8_as_float + ): + line = f"at::vec::Vectorized::loadu_one_fourth({loadbuf})" + else: + line = f"at::vec::Vectorized::loadu({loadbuf})" + return self.cse.generate(self.loads, line) + else: + new_index = self.scale_index_with_offset( + index, + itervar_idx=self.outer_idx, + offset=inner, + ) + return super().load(name, new_index) + + def store(self, name, index, value, mode=None): + assert "buf" in name + opt_ctx: OptimizationContext = get_current_node_opt_ctx() + var = self.args.output(name) + + inner = self.inner_itervar() + index = self.rename_indexing(index) + assert mode is None + if self.need_vec_transpose(index): + tile_var = self.gen_transposed_tile_load_store( + name, var, index, is_store=True + ) + # vector store inside the kernel inner loop + storebuf = f"{tile_var} + {cexpr_index(inner * self.tiling_factor)}" + if V.graph.get_dtype(name) in DTYPE_LOWP_FP: + line = f"{value}.store({storebuf}, {self.tiling_factor});" + elif V.graph.get_dtype(name) in [torch.uint8]: + line = f"{value}.store({storebuf}, {self.tiling_factor});" + else: + line = f"{value}.store({storebuf});" + self.stores.writeline(DeferredLine(name, line)) + else: + new_index = self.scale_index_with_offset( + index, + itervar_idx=self.outer_idx, + offset=inner, + ) + super().store(name, new_index, value, mode) + + def codegen_inner_loops(self, code): + inner = self.inner_itervar() + code.writeline( + f"for (long {inner} = 0; {inner} < {self.tiling_factor}; {inner}++)" + ) + + def set_ranges(self, group, reduction_group): + vars = super().set_ranges(group, reduction_group) + # do vertical reduction as the tail loop + self.outer_idx, self.tiling_idx = ( + self.tiling_indices + if self.tiling_indices[1] < self.reduction_depth + else reversed(self.tiling_indices) + ) + return vars + + +class CppVecKernelChecker(CppVecKernel): + def __init__(self, args, num_threads, tiling_factor, tiling_idx=-1): + super().__init__(args, num_threads, tiling_factor, tiling_idx) + + # Since this kernel is only for checker but does not generate any + # code, so we need to decrease the kernel count. + metrics.generated_kernel_count -= 1 + metrics.generated_cpp_vec_kernel_count -= 1 + + # Used to record the graph wrapper code as the wrapper_code status could be + # changed during graph run. + self._orig_wrapper_code = None + + self.simd_vec = True + + self.fast_vec_list = [] + for k, v in CppVecOverrides.__dict__.items(): + if isinstance(v, staticmethod): + self.fast_vec_list.append(k) + self.exit_stack = contextlib.ExitStack() + + # Cache all the load result + self.load_supported_dtypes: list[torch.dtype] = [ + torch.float, + torch.bfloat16, + torch.float16, + torch.bool, + torch.uint8, + ] + self.store_supported_dtypes: list[torch.dtype] = [ + torch.float, + torch.bfloat16, + torch.float16, + torch.uint8, + ] + # Cache the dtypes of the store operation. If the store is mixing dtypes, the + # vectorization would not support it as it is hard to determine the vec dtype + self.store_dtypes: list[torch.dtype] = [] + # The dtype is used for vectorization + self.vec_dtype: torch.dtype = torch.float32 + + def disable_vec(self, msg=None): + if schedule_log.isEnabledFor(logging.DEBUG): + schedule_log.debug("Disabled vectorization: %s", msg) + self.simd_vec = False + + def could_vec(self, name: str, index: sympy.Expr): + assert self.itervars is not None + return len(self.itervars) > 0 + + def is_mask(self, name: str, users: Dict[torch.fx.Node, None]): + load_type = V.graph.get_dtype(name) + if load_type == torch.bool: + return all(user.target in ("where", "masked") for user in users.keys()) + elif load_type == torch.uint8: + """ + If the load value is torch.uint8, then we only support the loaded + value is as the mask. + """ + if not all( + user.target == "to_dtype" and user.args[-1] == torch.bool + for user in users.keys() + ): + return False + + for to_dtype_node in users.keys(): + assert to_dtype_node.target == "to_dtype" + if not all( + user.target in ("where", "masked") + for user in to_dtype_node.users.keys() + ): + return False + return True + else: + return False + + def is_load_uint8_as_float(self, name: str, users: Dict[torch.fx.Node, None]): + """ + Check: + 1. load_type is torch.uint8 + 2. has 1 user node of target to_dtype + 3. dtype of to_dtype is torch.float + """ + load_type = V.graph.get_dtype(name) + if load_type is not torch.uint8: + return False + if len(users) == 1: + user = list(users)[0] + if (user.target == "to_dtype") and (user.args[-1] == torch.float): + return True + return False + return False + + def can_store_fp32_as_uint8(self, store_var: str, value_node: torch.fx.Node): + """ + Check: + 1. store_type is torch.uint8 + 2. value_node is of target to_dtype + 3. dtype of to_dtype node is torch.uint8 + """ + store_type = V.graph.get_dtype(store_var) + if store_type not in [torch.uint8]: + return False + if value_node.target == "to_dtype" and value_node.args[-1] == torch.uint8: + return True + + return False + + def is_load_integer_scalar_tensor(self, name: str, index: sympy.Expr): + load_dtype = V.graph.get_dtype(name) + buffer = V.graph.get_buffer(name) + return ( + load_dtype in [torch.int32, torch.int64] + and isinstance(buffer, TensorBox) + and isinstance(buffer.data, StorageBox) + and (len(buffer.data.layout.size) == 0) + and (index == 0) + ) + + def load(self, name: str, index: sympy.Expr): + with RecordOptimizationContext(__name__) as node_ctx: + load_dtype = V.graph.get_dtype(name) + opt_ctx: OptimizationContext = node_ctx.get_opt_ctx() + assert opt_ctx + opt_ctx.dtype = load_dtype + opt_ctx.is_load_as_mask = self.is_mask(name, node_ctx.get_fx_node().users) + opt_ctx.is_load_uint8_as_float = self.is_load_uint8_as_float( + name, node_ctx.get_fx_node().users + ) + + var = self.cse.newvar() + + if load_dtype in [torch.bool, torch.uint8] and not ( + opt_ctx.is_load_as_mask or opt_ctx.is_load_uint8_as_float + ): + if not opt_ctx.is_load_as_mask: + self.disable_vec(f"{load_dtype} not loaded as mask") + elif not opt_ctx.is_load_uint8_as_float: + self.disable_vec(f"{load_dtype} not loaded as float") + return var + + if ( + load_dtype not in self.load_supported_dtypes + ) and not self.is_load_integer_scalar_tensor(name, index): + self.disable_vec(f"{load_dtype} not supported by load") + return var + + index = self.rename_indexing(index) + if self.simd_vec and not self.could_vec(name, index): + self.disable_vec(f"not a loop: {index}") + return var + + def store(self, name, index, value, mode=None): + with RecordOptimizationContext(__name__) as node_ctx: + store_dtype = V.graph.get_dtype(name) + + opt_ctx: OptimizationContext = node_ctx.get_opt_ctx() + assert opt_ctx + opt_ctx.dtype = store_dtype + + store_dtype = torch.float if store_dtype == torch.float32 else store_dtype + self.store_dtypes.append(store_dtype) + if store_dtype not in self.store_supported_dtypes: + self.disable_vec(f"{store_dtype} not supported by store") + return self.simd_vec + + if store_dtype in [torch.uint8]: + value_node = node_ctx.get_fx_node().all_input_nodes[-1] + if not self.can_store_fp32_as_uint8(name, value_node): + self.disable_vec("not support store float32 as uint8") + return self.simd_vec + + assert "buf" in name + index = self.rename_indexing(index) + + if mode: + self.disable_vec(f"store mode: {mode}") + return self.simd_vec + + if len(index.free_symbols) == 0: + self.disable_vec(f"constant store index: {index}") + if self.simd_vec and not self.could_vec(name, index): + self.disable_vec(f"not a loop: {index}") + return self.simd_vec + + def reduction(self, dtype, src_dtype, reduction_type, value): + if ( + dtype == torch.float + and src_dtype == torch.float + and reduction_type in VECTORIZABLE_RTYPES + ): + pass + else: + self.disable_vec( + f"reduction: dtype {dtype}, src_dtype {src_dtype}, reduction_type {reduction_type}" + ) + if is_welford_reduction(reduction_type): + return tuple([self.simd_vec] * 3) + return self.simd_vec + + def store_reduction(self, name, index, value): + return self.simd_vec + + def is_supported_cmp(self, node: torch.fx.Node): + def get_node_dtype(node): + if type(node) == torch.fx.Node: + opt_ctx: OptimizationContext = get_current_node_opt_ctx() + return opt_ctx.dtype if opt_ctx else None + else: + return None + + def get_cmp_dtypes(node: torch.fx.Node): + return get_node_dtype(node.args[-2]), get_node_dtype(node.args[-1]) + + assert len(node.args) >= 2 + # cmp(x, y): y is a magic value like x >= 1 + if type(node.args[-1]) in [int, float]: + return True + # cmp(x, y): x is a magic value like 1 >= y + if type(node.args[-2]) in [int, float]: + return False + + left_dtype, right_dtype = get_cmp_dtypes(node) + if left_dtype is None or right_dtype is None: + # TODO(Eikan): To record, deduce and propagate the data type of every expression. + return True + else: + return left_dtype == right_dtype + + def __exit__(self, exc_type, exc_val, exc_tb): + assert self._orig_wrapper_code is not None + # Restore the wrapper_code + V.graph.wrapper_code = self._orig_wrapper_code + self.exit_stack.__exit__(exc_type, exc_val, exc_tb) + + def __enter__(self): + # Record the graph wrapper code. The wrapper_code status could be + # changed during graph run. Regarding this checker, we also need to + # run the graph but we don't expect to change any status that would + # impact the code generation. Hence, we record the graph wrapper code + # and replace it with a dummy wrapper_code and then restore to the + # original one as long as the checker is finished. + self._orig_wrapper_code = V.graph.wrapper_code + V.graph.wrapper_code = WrapperCodeGen() + + class VecCheckerProxy: + bin_cmp_ops = ["eq", "ne", "le", "ge", "lt", "gt"] + + @staticmethod + def _bin_cmp_op(x, y): + current_node: torch.fx.Node = V.interpreter.current_node + if not self.is_supported_cmp(current_node): + self.disable_vec(f"binary comparison op: {current_node}") + return self.simd_vec + + @staticmethod + def __getattr__(name): + def inner(*args, **kwargs): + if name in VecCheckerProxy.bin_cmp_ops: + return VecCheckerProxy._bin_cmp_op(args, kwargs) + + if name not in self.fast_vec_list: + self.disable_vec(f"op: {name}") + return self.simd_vec + + return inner + + @staticmethod + def load(name: str, index: sympy.Expr): + return self.load(name, index) + + @staticmethod + def store(name, index, value, mode=None): + return self.store(name, index, value, mode=mode) + + @staticmethod + def reduction(dtype, src_dtype, reduction_type, value): + return self.reduction(dtype, src_dtype, reduction_type, value) + + @staticmethod + def store_reduction(name, index, value): + return self.store_reduction(name, index, value) + + @staticmethod + def constant(val, dtype): + with RecordOptimizationContext(__name__) as node_ctx: + opt_ctx: OptimizationContext = node_ctx.get_opt_ctx() + assert opt_ctx + # VecKernel override dtype for constant + # Vectorization only support int32/fp32 now + # So if dtype = int64/fp64, we will cast it to int32/fp32 if possible + i32_iinfo = torch.iinfo(torch.int32) + if ( + dtype == torch.int64 + and val <= i32_iinfo.max + and val >= i32_iinfo.min + ): + opt_ctx.dtype = torch.int32 + + f32_iinfo = torch.finfo(torch.float32) + if dtype == torch.double: + if ( + (val <= f32_iinfo.max and val >= f32_iinfo.min) + or (val == torch.inf) + or (val == -torch.inf) + ): + opt_ctx.dtype = torch.float32 + + supported_dtypes = [ + torch.float32, + torch.int32, + torch.bfloat16, + torch.float16, + ] + + if opt_ctx.dtype not in supported_dtypes or ( + opt_ctx.dtype == torch.int32 + and not all( + user.target in VecCheckerProxy.bin_cmp_ops + for user in node_ctx.current_node.users + ) + ): + self.disable_vec(f"constant dtype: {opt_ctx.dtype}") + return val + + @staticmethod + def index_expr(expr, dtype): + assert len(self.ranges) == len(self.itervars) + if not len(self.ranges) or not all( + not isinstance(range, sympy.Expr) or sympy.simplify(range).is_number + for range in self.ranges + ): + # if the range value is sympy.Expr, we might could not deduce the accurate loop interval. + self.disable_vec(f"index_expr: {expr}, dtype {dtype}") + return self.cse.newvar() + + def can_use_int32(): + free_symbols = list(expr.free_symbols) + sizes = { + k: v + for k, v in zip(self.itervars, self.ranges) + if k in free_symbols + } + # Trivial case: Range empty + if any(v == 0 for v in sizes.values()): + return True + + vars_ranges = {k: ValueRanges(0, v - 1) for k, v in sizes.items()} + if not vars_ranges or len(vars_ranges) != len(free_symbols): + i32_iinfo = torch.iinfo(torch.int32) + return ( + expr.is_number + and expr <= i32_iinfo.max + and expr >= i32_iinfo.min + ) + expr_ranges = bound_sympy(expr, vars_ranges) + if math.isinf(expr_ranges.lower) or math.isinf(expr_ranges.upper): + return False + # If something takes the values 0..7, we will compare in the loop + # x < 8. As such, for the loop not to overflow in the last iteration, we want + # to check that expr_ranges.upper + 1 is representable as well + return range_expressable_in_32_bits( + ValueRanges(int(expr_ranges.lower), int(expr_ranges.upper) + 1) + ) + + with RecordOptimizationContext(__name__) as node_ctx: + assert len(self.ranges) == len(self.itervars) + opt_ctx: OptimizationContext = node_ctx.get_opt_ctx() + assert opt_ctx + if ( + dtype == torch.int64 + and can_use_int32() + and all( + user.target in VecCheckerProxy.bin_cmp_ops + for user in node_ctx.current_node.users + ) + ): + opt_ctx.dtype = torch.int32 + else: + opt_ctx.dtype = dtype + self.disable_vec(f"index_expr: {expr}, dtype {dtype}") + + tiling_var = self.itervars[self.tiling_idx] + tiling_var_irrelevant = not expr.has(tiling_var) + if not tiling_var_irrelevant: + self.disable_vec( + f"index_expr (tiling var relevant): {expr}, dtype {dtype}" + ) + opt_ctx.is_most_inner_loop_irrevelant = tiling_var_irrelevant + tmp_var = self.cse.newvar() + return tmp_var + + @staticmethod + def indirect_indexing(index_var, size, check=True): + return sympy_symbol(str(index_var)) + + @staticmethod + def masked(mask, body, other): + body() + return self.cse.newvar() + + @staticmethod + def to_dtype(x, dtype): + with RecordOptimizationContext(__name__) as node_ctx: + opt_ctx: OptimizationContext = node_ctx.get_opt_ctx() + assert opt_ctx + opt_ctx.dtype = dtype + + cur_node = node_ctx.get_fx_node() + input_value: torch.fx.Node = cur_node.all_input_nodes[1] + if dtype == torch.float: + if input_value.target in [ + "load", + ]: + # Support masked_load for BF16/FP16. Because the legalization will + # insert to_dtype to convert the BF16/FP16 input to FP32. + dtype = ( + V.graph.get_dtype(input_value.args[1]) + if input_value.target == "load" + else input_value.args[-1] + ) + if dtype in [ + torch.float16, + torch.bfloat16, + torch.float, + torch.uint8, + ]: + # Convert from dtype to torch.float + pass + elif ( + dtype in [torch.int32, torch.int64] + and input_value.target == "load" + ): + buffer = V.graph.get_buffer(input_value.args[1]) + # Check if load of a scalar tensor of integer + if not ( + isinstance(buffer, TensorBox) + and isinstance(buffer.data, StorageBox) + and len(buffer.data.layout.size) == 0 + ): + self.disable_vec(f"to_dtype: dtype {dtype}") + else: + self.disable_vec(f"to_dtype: dtype {dtype}") + elif dtype in DTYPE_LOWP_FP: + if not all(usr.target == "store" for usr in cur_node.users): + self.disable_vec( + "to_dtype: bfloat16/float16 expecting users are all stores" + ) + return x + + store_names = [usr.args[1] for usr in cur_node.users] + if not all( + V.graph.get_dtype(name) in [dtype] for name in store_names + ): + self.disable_vec( + "to_dtype: expecting all stores into bfloat16 or float16" + ) + return x + elif dtype == torch.bool: + pass + elif dtype == torch.uint8: + # Only allow below 2 cases: + # Case 1: to_uint8 and store which corresponding to the single quant node + # at last of fusion pattern. + is_to_uint8_and_store = all( + usr.target in ["store"] for usr in cur_node.users + ) + # Case 2: to_uint8 and to_float which corresponding to pair of quant/dequant node + # at middle of fusion pattern. + is_to_uint8_and_to_float = all( + ( + usr.target in ["to_dtype"] + and usr.args[2] == torch.float32 + ) + for usr in cur_node.users + ) + if not (is_to_uint8_and_store or is_to_uint8_and_to_float): + self.disable_vec(f"to_dtype: dtype {dtype}") + else: + self.disable_vec(f"to_dtype: dtype {dtype}") + return x + + self.exit_stack.enter_context(V.set_ops_handler(VecCheckerProxy())) + self.exit_stack.enter_context(V.set_kernel_handler(self)) + return self + + +class CppKernelProxy(CppKernel): + def __init__(self, kernel_group): + super().__init__(kernel_group.args, kernel_group.ws.num_threads) + self.kernel_group = kernel_group + self.loop_nest = None + self.call_ranges = None + self.picked_vec_isa: codecache.VecISA = codecache.pick_vec_isa() + + def data_type_propagation(self, nodes): + for _node in nodes: + assert isinstance(_node, SchedulerNode) + DataTypePropagation.propagate_scheduler_node(_node) + + # Check if all the nodes of a given fx graph can support BF16/FP16 + def is_lowp_fp_scheduler(self, scheduler_node: SchedulerNode): + if not isinstance(scheduler_node._body, ir.LoopBody): + return True + + _lowp_fp_type: torch.dtype = None + + # Propagate the dtype to check if all the fx node is bf16/fp16 + DataTypePropagation.propagate_scheduler_node(scheduler_node) + + sub_blocks = [scheduler_node._body.root_block] + list( + scheduler_node._body.subblocks.values() + ) + for sub_block in sub_blocks: + for _node in sub_block.graph.nodes: + # TODO(Eikan): Regarding get_index and index_expr, we should conclude the + # the data type as well. + if _node.op == "placeholder" or _node.target in ( + "get_index", + "index_expr", + ): + continue + + # Fast path if all operations can support bf16/fp16 without converting to fp32 + if _node.target not in [ + "load", + "store", + "abs", + "neg", + "output", + ]: + return False + + if hasattr(_node, "meta") and _node.meta: + assert OptimizationContext.key in _node.meta + opt_ctx: OptimizationContext = _node.meta[OptimizationContext.key] + if not opt_ctx.dtype or opt_ctx.dtype not in DTYPE_LOWP_FP: + return False + if _lowp_fp_type: + assert ( + _lowp_fp_type == opt_ctx.dtype + ), "scheduler node do not support bf16/fp16 mix" + else: + _lowp_fp_type = opt_ctx.dtype + else: + return False + + scheduler_node._lowp_fp_type = _lowp_fp_type + return True + + def legalize_lowp_fp_dtype(self, nodes): + def add_to_dtype(sub_graph: torch.fx.Graph): + def is_lowp_fp_load(node: torch.fx.Node): + if node.target not in ["load"]: + return False + assert len(node.args) == 3 + load_dtype = V.graph.get_dtype(node.args[1]) + return load_dtype in DTYPE_LOWP_FP + + def is_lowp_fp_store(node: torch.fx.Node): + if node.target != "store": + return False + _, store_var, _, _, _ = node.args + store_dtype = V.graph.get_dtype(store_var) + return store_dtype in DTYPE_LOWP_FP + + sub_graph_nodes = list(sub_graph.nodes) + to_lowp_fp_legalized_nodes = [] + for _node in sub_graph_nodes: + if is_lowp_fp_load(_node): + ops = _node.args[0] + with sub_graph.inserting_after(_node): + to_type_node = sub_graph.call_method( + "to_dtype", args=(ops, _node, torch.float) + ) + to_type_node_args = to_type_node.args + _node.replace_all_uses_with(to_type_node) + to_type_node.args = to_type_node_args + metrics.cpp_to_dtype_count += 1 + elif is_lowp_fp_store(_node): + ops, name, _, value_var, _ = _node.args + dtype = V.graph.get_dtype(name) + with sub_graph.inserting_before(_node): + to_type_node = sub_graph.call_method( + "to_dtype", args=(ops, value_var, dtype) + ) + _node.replace_input_with(value_var, to_type_node) + metrics.cpp_to_dtype_count += 1 + elif _node.target == "reduction": + ( + ops, + dtype, + src_dtype, + reduction_type, + value, + ) = _node.args + if src_dtype in DTYPE_LOWP_FP: + # Since we always convert the load/store value to float if the tensor is bfloat16/float16. + # Therefore, the reduction should never work with bfloat16/float16 value. Hence, we update + # the bfloat16/float16 reduction by + # 1) updating the src_dtype to float + # and 2) updating the dtype to float if it is bfloat16/float16. + assert dtype in [ + torch.float, + torch.bfloat16, + torch.float16, + torch.int64, + ] + _node.args = ( + ops, + torch.float if dtype in DTYPE_LOWP_FP else dtype, + torch.float, + reduction_type, + value, + ) + elif _node.target == "to_dtype" and _node.args[-1] in DTYPE_LOWP_FP: + (ops, x, _) = _node.args + # The legalization always loads the BF16/FP16 tensor as FP32 for computation + # and converts back to BF16/FP16 after the computation. + # Hence, there should be no computation w/ BF16/FP16. + # Therefore, we update the to_dtype by replacing the bf16/fp16 dtype with fp32. + # Save the legalized to_dtype node for the elimination(eliminate_to_dtype step): + # 1) Eliminate the redundant to_dtype node if we have a pattern as follows: + # graph(): + # %lowp_fp_legalized = call_method[target=to_dtype](args = (%ops, %input, torch.float)) + # %to_dtype2 = call_method[target=to_dtype](args = (%ops, %lowp_fp_legalized, torch.bfloat16/float16)) + # Regarding the first to_dtype, it is redundant because + # the second to_type also converts to the torch.bfloat16/torch.float16. + # Hence, we remove the first to_type. + to_lowp_fp_legalized_nodes.append(_node) + _node.args = (ops, x, torch.float) + else: + pass + + def eliminate_to_dtype(sub_graph: torch.fx.Graph): + def _eliminate_duplicate_to_node(sub_graph: torch.fx.Graph): + # Eliminate the redundant to_dtype node. Let's consider a pattern as follows: + # graph(): + # %to_dtype1 = call_method[target=to_dtype](args = (%ops, %input, torch.float), kwargs = {}) + # %to_dtype2 = call_method[target=to_dtype](args = (%ops, %to_dtype1, torch.float), kwargs = {}) + # Regarding the first to_dtype, it is redundant because the second to_type also converts to the + # torch.float. Hence, we remove the first to_type + def _used_by_to(to_node: torch.fx.Node): + return all(usr.target == "to_dtype" for usr in to_node.users) + + all_to_nodes = [ + node for node in sub_graph.nodes if node.target == "to_dtype" + ] + all_to_nodes_and_users = [ + {node: node.users} for node in all_to_nodes if _used_by_to(node) + ] + for node_users in all_to_nodes_and_users: + for node, users in node_users.items(): + if node in sub_graph.nodes and ( + all(usr.args[-1] == node.args[-1] for usr in users) + or ( + node in to_lowp_fp_legalized_nodes + and all( + usr.args[-1] in DTYPE_LOWP_FP for usr in users + ) + ) + ): + val_node = node.all_input_nodes[-1] + node.replace_all_uses_with(val_node) + sub_graph.erase_node(node) + + # For debug mode, the graph of LoopBody will attach a new GraphModule as + # owning_module for debugging while the release mode will not. The lint will + # check whether the graph has owning_module to decide if it needs to check + # call_module. LoopBody might contain get_index as a module call. But it + # is just a function. Hence, it cannot pass the lint check for debug mode. + # We bypass the check if the owning_module is None. Eventually, we should call + # get_index via call_function but not call_module. + if sub_graph.owning_module is None: + sub_graph.lint() + + _eliminate_duplicate_to_node(sub_graph) + + eliminate_to_dtype(sub_graph) + + def _legalize_lowp_fp(loop_body: ir.LoopBody): + sub_blocks = [loop_body.root_block] + list(loop_body.subblocks.values()) + for sub_block in sub_blocks: + add_to_dtype(sub_block.graph) + + if all( + isinstance(_node, SchedulerNode) and self.is_lowp_fp_scheduler(_node) + for _node in nodes + ): + # Mark the load node to load bf16/fp16 + for _node in nodes: + sub_blocks = [_node._body.root_block] + list( + _node._body.subblocks.values() + ) + for sub_block in sub_blocks: + for fx_node in sub_block.graph.nodes: + if fx_node.target in ["load", "store"]: + assert fx_node.meta + assert OptimizationContext.key in fx_node.meta + opt_ctx: OptimizationContext = fx_node.meta[ + OptimizationContext.key + ] + assert opt_ctx.dtype in DTYPE_LOWP_FP + + # Bypass the legalization as the kernel can run with bf16/fp16 directly + return + + for _node in nodes: + assert isinstance(_node, SchedulerNode) + assert isinstance(_node._body, ir.LoopBody) + node: SchedulerNode = _node + + def is_memory_copy_scheduler_node(node: SchedulerNode): + op_counts = node.read_writes.op_counts + return ( + len(op_counts) == 2 and "load" in op_counts and "store" in op_counts + ) + + should_legalize = not is_memory_copy_scheduler_node(node) + if should_legalize: + body: ir.LoopBody = node._body + _legalize_lowp_fp(body) + + def codegen_nodes(self, nodes): + # Legalize BF16 node by adding to_dtype explicitly + self.legalize_lowp_fp_dtype(nodes) + self.data_type_propagation(nodes) + + assert len(nodes) >= 1 + first_node = nodes[0] + vec_dtype = ( + first_node._lowp_fp_type + if all( + hasattr(_node, "_lowp_fp_type") + and _node._lowp_fp_type == first_node._lowp_fp_type + for _node in nodes + ) + else torch.float + ) + + kernel_group = self.kernel_group + _, (group, reduction_group) = max( + nodes, key=lambda x: int(x.is_reduction()) + ).group + + self.set_ranges(group, reduction_group) + + def codegen_kernel(cls, *args): + with kernel_group.new_kernel(cls, *args) as kernel: + run(kernel) + + # Ugly hack to maintain the metrics kernel count since + # we only count in CppKernelProxy, not those contained in it + metrics.generated_kernel_count -= 1 + + return kernel + + def run(kernel): + vars, reduction_vars = kernel.set_ranges(group, reduction_group) + in_suffix = False + for node in nodes: + if node.group[1] in [ + (group, reduction_group), + (group + reduction_group, ()), + ]: + assert not in_suffix + node.run(vars, reduction_vars) + else: + in_suffix = True + assert node.group[1] == ( + group, + (), + ), f"unexpected group: {node.group[1]} != {group}, {reduction_group}" + # we can fuse in some extra pointwise into the suffix + with kernel.write_to_suffix(): + node.run(vars, ()) + + scalar_kernel = codegen_kernel(CppKernel) + self.loop_nest = LoopNestWithSplit.build(scalar_kernel) + + if not self.picked_vec_isa: + return + + def select_tiling_indices(): + all_index = [] + for node in nodes: + rw = dependencies.extract_read_writes(node._body, *node._sizes) + all_index += [dep.index for dep in itertools.chain(rw.reads, rw.writes)] + contig_vars = set() + contig_vars_list = [] + non_contig_stride_const = set() + non_contig_stride_other = set() + for index in all_index: + for var in index.free_symbols: + if not re.search(r"^d\d+$", var.name): + continue + stride = stride_at(var, index) + if stride == 1: + contig_vars.add(int(var.name[1:])) + contig_vars_list.append(int(var.name[1:])) + elif all(s.name.startswith("s") for s in stride.free_symbols): + non_contig_stride_const.add(int(var.name[1:])) + else: + non_contig_stride_other.add(int(var.name[1:])) + contig_only = ( + contig_vars - non_contig_stride_const - non_contig_stride_other + ) + if len(contig_vars) == 0: + # no contiguous vars + return [len(self.itervars) - 1] + if contig_only: + return sorted(contig_only)[-1:] + contig_and_const_stride = ( + contig_vars & non_contig_stride_const + ) - non_contig_stride_other + contig_vars_sorted = sorted(contig_vars) + if ( + len(contig_vars_sorted) == 2 + and contig_vars_sorted[-1] in contig_and_const_stride + and contig_vars_sorted[-1] == len(self.itervars) - 1 + ): + return contig_vars_sorted + return sorted(contig_vars_sorted, key=lambda i: contig_vars_list.count(i))[ + -1: + ] + + def select_tiling(dtype: torch.dtype = torch.float): + # TODO(jgong5): support alternative tiling factors and data types + tiling_factor = self.picked_vec_isa.nelements(dtype=dtype) + tiling_indices = select_tiling_indices() + if tiling_indices: + could_vec = True + for tiling_indice in tiling_indices: + with CppVecKernelChecker( + deepcopy(self.kernel_group.args), + parallel_num_threads(), + tiling_factor, + tiling_indice, + ) as vec_checker: + run(vec_checker) + could_vec = could_vec and vec_checker.simd_vec + if not could_vec: + break + if could_vec: + if len(tiling_indices) == 1: + return [tiling_factor], tiling_indices + if len(tiling_indices) == 2: + return [tiling_factor, tiling_factor], tiling_indices + return [], [] + + # Kernels share the same global contexts like V.graph.wrapper_code, V.kernel.args. + # But the generated scalar kernel has updated these global contexts. Hence, the other kernels + # should not do this again to avoid context conflict. By now, we only control the + # config.inplace_buffers. In the future, we could maintain more contexts. + with torch._inductor.config.patch(inplace_buffers=False): + tiling_factors, tiling_indices = select_tiling(vec_dtype) + assert len(tiling_factors) == len(tiling_indices) + if len(tiling_indices) == 1: + main_loop, tail_loop = self.loop_nest.split_with_tiling( + tiling_indices[0], factor=tiling_factors[0] + ) + main_loop.set_kernel( + codegen_kernel( + CppVecKernel, tiling_factors[0], tiling_indices[0], vec_dtype + ) + ) + tail_loop.set_kernel(scalar_kernel) + main_loop.simd_vec = True + tail_loop.simd_omp = True + # We chop the loop into two cubes by the nelements - main loop and tail loop. + # Regarding the main loop, it is straightforward that it could be vectorized with + # nelements. But for the tail loop, it still could be vectorized. For example, + # if the nelements is 8(256bits), then the tail loop still could be vectorized + # as 4(128bits). + tail_loop.simd_nelements = tiling_factors[0] // 2 + elif len(tiling_indices) == 2: + assert ( + tiling_indices[1] == len(self.itervars) - 1 + and tiling_factors[0] == tiling_factors[1] + ) + outer_main_loop, outer_tail_loop = self.loop_nest.split_with_tiling( + tiling_indices[0], factor=tiling_factors[0] + ) + outer_tail_loop.set_kernel(scalar_kernel) + inner_main_loop, inner_tail_loop = outer_main_loop.split_with_tiling( + tiling_indices[1] - tiling_indices[0], factor=tiling_factors[0] + ) + inner_main_loop.set_kernel( + codegen_kernel( + CppTile2DKernel, tiling_factors[0], tiling_indices, vec_dtype + ) + ) + inner_tail_loop.set_kernel( + codegen_kernel( + CppVecKernel, tiling_factors[0], tiling_indices[0], vec_dtype + ) + ) + + def codegen_loops(self, code, worksharing): + self.codegen_loops_impl(self.loop_nest, code, worksharing) + + +class CppScheduling(BaseScheduling): + def __init__(self, scheduler): + self.scheduler = scheduler + self.get_kernel_group() + + def group_fn(self, sizes): + return tuple(tuple(map(V.graph.sizevars.simplify, s)) for s in sizes) + + def get_kernel_group(self): + from .wrapper import CppWrapperCodeGen + + if isinstance(V.graph.wrapper_code, CppWrapperCodeGen): + self.kernel_group = CppWrapperKernelGroup() + else: + self.kernel_group = KernelGroup() + + def _can_fuse_horizontal_impl(self, node1, node2): + _, (vars1, reduce1) = node1.group + _, (vars2, reduce2) = node2.group + if vars1 == vars2 and reduce1 == reduce2: + return True + if reduce1 == () and vars1 == vars2 + reduce2: + return True + # TODO(jansel): allow fusion pointwise (vars1, ()) suffix? + return False + + def can_fuse_horizontal(self, node1, node2): + if ( + len(node1.get_nodes()) + len(node2.get_nodes()) + > config.cpp.max_horizontal_fusion_size + ): + return False + + return self._can_fuse_horizontal_impl(node1, node2) + + def can_fuse_vertical(self, node1, node2): + return self._can_fuse_horizontal_impl(node1, node2) and not node1.is_reduction() + + def codegen_nodes(self, nodes): + """ + Turn an set of pre-fused nodes into a C++ kernel. + """ + kernel_group = self.kernel_group + + cpp_kernel_proxy = CppKernelProxy(kernel_group) + cpp_kernel_proxy.codegen_nodes(nodes) + + kernel_group.finalize_kernel(cpp_kernel_proxy, nodes) + + def codegen_sync(self): + pass + + def flush(self): + self.kernel_group.codegen_define_and_call(V.graph.wrapper_code) + self.get_kernel_group() + + +class KernelGroup: + def __init__(self): + super().__init__() + self.args = KernelArgs() + self.loops_code = BracesBuffer() + self.ws = WorkSharing(self.loops_code) + self.stack = contextlib.ExitStack() + self.stack.enter_context(self.ws) + self.scheduled_nodes = [] + + def new_kernel(self, cls, *args): + return cls(self.args, parallel_num_threads(), *args) + + def finalize_kernel(self, new_kernel, nodes): + self.scheduled_nodes += nodes + code = self.loops_code + ws = self.ws + new_kernel.codegen_loops(code, ws) + + def codegen_define_and_call(self, wrapper): + self.stack.close() + if not self.scheduled_nodes: + return + + fused_name = ( + get_fused_kernel_name(self.scheduled_nodes, config.cpp.descriptive_names) + if config.cpp.descriptive_names + else "" + ) + kernel_name = "_".join(["cpp", fused_name, wrapper.next_kernel_suffix()]) + arg_defs, call_args, arg_types = self.args.cpp_argdefs() + arg_defs = ",\n".ljust(25).join(arg_defs) + arg_types = ",".join(arg_types) + code = BracesBuffer() + # TODO: support kernel profile on other platforms + enable_kernel_profile = ( + config.cpp.enable_kernel_profile and sys.platform == "linux" + ) + if enable_kernel_profile: + code.writelines(["#include "]) + kernel_decl_name = kernel_name if V.graph.cpp_wrapper else "kernel" + code.writeline(codecache.cpp_prefix()) + + code.writeline(f'extern "C" void {kernel_decl_name}({arg_defs})') + with code.indent(): + if enable_kernel_profile: + graph_id = V.graph.graph_id + prefix = "graph_" + str(graph_id) + "_" if graph_id is not None else "" + code.writelines( + [ + f'RECORD_FUNCTION("{prefix + kernel_name}", c10::ArrayRef({{}}));' + ] + ) + for old, new in self.args.aliases(): + code.writeline(f"auto {old} = {new};") + code.splice(self.loops_code) + + codecache_def = IndentedBuffer() + if not V.graph.cpp_wrapper: + codecache_def.writeline("async_compile.cpp('''") + codecache_def.splice(code) + if not V.graph.cpp_wrapper: + codecache_def.writeline("''')") + + codecache_str = codecache_def.getvalue() + # TODO(voz): Ostensibly, we should not need this. But there are cases where C++ codegen does + # not use BracesBuffer, so we have no good indicator of a C++ buffer atm. + codecache_str = codecache_str.replace("#pragma CMT", "//") + wrapper.define_kernel(kernel_name, codecache_str, cuda=False) + # generate the code to call this + wrapper.generate_kernel_call(kernel_name, call_args, cuda=False) + + +class CppWrapperKernelGroup(KernelGroup): + def __init__(self): + super().__init__() + self.args = CppWrapperKernelArgs() + + +class WorkSharing: + def __init__(self, code): + self.code = code + self.in_parallel = False + self.num_threads = None + self.stack = contextlib.ExitStack() + + def parallel(self, threads): + if self.in_parallel and threads != self.num_threads: + # wrong number of threads + self.close() + if not self.in_parallel: + self.num_threads = threads + self.in_parallel = True + if config.cpp.dynamic_threads: + self.code.writeline("#pragma omp parallel") + else: + self.code.writeline(f"#pragma omp parallel num_threads({threads})") + self.stack.enter_context(self.code.indent()) + + def single(self): + if self.in_parallel: + self.code.writeline("#pragma omp single") + return self.in_parallel + + def close(self): + self.stack.close() + self.in_parallel = False + + def __enter__(self): + self.stack.__enter__() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.stack.__exit__(exc_type, exc_val, exc_tb) + + +@dataclasses.dataclass +class LoopLevel: + var: sympy.Expr = None + size: sympy.Expr = None + offset: sympy.Expr = sympy.Integer(0) + steps: sympy.Expr = sympy.Integer(1) + parallel: int = 0 + simd_omp: bool = False + simd_vec: bool = False + collapsed: bool = False + reduction_var_map: Dict[str, str] = None + parent: "LoopLevel" = None + # the next inner level of the loop, empty if it is inner-most + # contains >1 LoopLevel if the inner level of loop is split + inner: List["LoopLevel"] = dataclasses.field(default_factory=list) + # kernel assigned to this loop level, only valid when it is a leaf + kernel: CppKernel = None + + def __post_init__(self): + # Regarding the C++/OpenMP backend, `codecache.pick_vec_isa()` to check + # vectorization ISA is a time-consuming and one-shot operation. It leads + # to taking a longer time to import `codegen.cpp` package because the + # `LoopLevel` of the package is decorated by `@dataclasses.dataclass` while + # the decorator will invoke `codecache.pick_vec_isa()` to initialize the + # `simd_nelements` of the `LoopLevel`. It might introduce additional compilation + # overhead to the Triton backend. Therefore, we moved the `simd_nelements` to + # `__post_init__` + picked_vec_isa: codecache.VecISA = codecache.pick_vec_isa() + self.simd_nelements: int = picked_vec_isa.nelements() if picked_vec_isa else 0 + + def get_kernels(self) -> List[CppKernel]: + """Get all kernel objects under this loop level""" + if self.kernel: + return [self.kernel] + kernels = [] + for loop in self.inner: + kernels += loop.get_kernels() + return kernels + + def set_kernel(self, kernel: CppKernel): + """ + Set the kernel under this loop level. No split is allowed under + this loop level. + """ + if not self.inner: + self.kernel = kernel + loop = self + if loop.is_reduction(): + loop.reduction_var_map = kernel.reduction_var_map.copy() + loop = loop.parent + while loop is not None and loop.is_reduction(): + loop.reduction_var_map.update(kernel.reduction_var_map) + loop = loop.parent + return + assert len(self.inner) == 1 + self.inner[0].set_kernel(kernel) + + def get_loops_at(self, depth) -> List["LoopLevel"]: + if depth == 0: + return [self] + else: + loops = [] + for loop in self.inner: + loops += loop.get_loops_at(depth - 1) + return loops + + def is_reduction(self): + return bool(self.reduction_var_map) + + def split_with_tiling(self, depth, factor): + def clone_inner(): + inner = [] + if self.inner: + for loop in self.inner: + inner.append(loop.clone()) + return inner + + def do_split_with_tiling(): + sympy_factor = sympy.Integer(factor) + + offset = FloorDiv(self.size, sympy_factor) * sympy_factor + main_loop = LoopLevel(self.var, offset) + main_loop.steps = sympy_factor + main_loop.parallel = self.parallel + main_loop.collapsed = False + main_loop.reduction_var_map = self.reduction_var_map + main_loop.inner = clone_inner() + if main_loop.inner: + for loop in main_loop.inner: + loop.parent = main_loop + + tail_loop = LoopLevel(self.var, self.size) + tail_loop.offset = offset + tail_loop.parallel = self.parallel + tail_loop.collapsed = False + tail_loop.reduction_var_map = self.reduction_var_map + tail_loop.inner = clone_inner() + if tail_loop.inner: + for loop in tail_loop.inner: + loop.parent = tail_loop + + return main_loop, tail_loop + + if depth == 0: + main_loop, tail_loop = do_split_with_tiling() + parent = self.parent + if parent: + parent.inner = [main_loop, tail_loop] + main_loop.parent = parent + tail_loop.parent = parent + return main_loop, tail_loop + else: + assert len(self.inner) == 1 + return self.inner[0].split_with_tiling(depth - 1, factor) + + def clone(self): + loop = copy(self) + loop.inner = [] + if self.inner: + for inner_loop in self.inner: + inner_loop_clone = inner_loop.clone() + inner_loop_clone.parent = loop + loop.inner.append(inner_loop_clone) + loop.kernel = deepcopy(self.kernel) + return loop + + def lines(self): + offset_expr = cexpr_index(self.offset) + size_expr = cexpr_index(self.size) + if config.cpp.no_redundant_loops and offset_expr == size_expr: + return None + if self.reduction_var_map: + reduction = " " + " ".join( + f"reduction({RTYPE_TO_CPP[rtype]}:{var})" + for var, rtype in self.reduction_var_map.items() + ) + else: + reduction = "" + simd = ( + f"simd simdlen({self.simd_nelements}) " + if self.simd_omp and self.simd_nelements > 1 + else "" + ) + if self.parallel: + # TODO(jansel): look into chunk size and other schedules + line1 = f"#pragma omp for{reduction} " + if self.parallel > 1: + line1 += f" collapse({self.parallel})" + if self.simd_omp: + line1 = line1.replace(" for ", f" for {simd}") + elif self.simd_vec: + line1 = "" + elif self.simd_omp: + line1 = f"#pragma omp {simd}{reduction}" + elif not self.reduction_var_map and codecache.is_gcc(): + line1 = "#pragma GCC ivdep" + else: + line1 = "" + offset_str = f"{INDEX_TYPE} {self.var}={offset_expr}" + size_str = f"{self.var}<{size_expr}" + steps_str = f"{self.var}+={cexpr_index(self.steps)}" + line2 = f"for({offset_str}; {size_str}; {steps_str})" + if self.collapsed or not line1: + return [line2] + return [line1, line2] + + +@dataclasses.dataclass +class LoopNestWithSplit: + """ + A loop-nest like structure but with some loop level split along + the loop range into the main tiling loop and the tail. It is built + with the `build` method as a loop nest and then split with + `split_with_tiling` at some depth. + + A typical case is for vectorization where we typically split at the inner-most + loop level. A more complicated case is 2D tiling where we split at + both inner-most and outer levels. + """ + + root: List[LoopLevel] = None + kernel: CppKernel = None + + @staticmethod + def build(kernel: CppKernel): + """Build a LoopNest with the given `kernel` as the leaf""" + itervars = kernel.itervars + ranges = kernel.ranges + reduction_depth = kernel.reduction_depth + + root: List[LoopLevel] = [] + levels: List[LoopLevel] = root + loop: LoopLevel = None + for loop_idx, (var, size) in enumerate(zip(itervars, ranges)): + loop = LoopLevel(var, size, parent=loop) + if loop_idx >= reduction_depth: + loop.reduction_var_map = kernel.reduction_var_map.copy() + levels.append(loop) + levels = loop.inner + loop_nest = LoopNestWithSplit(root, len(itervars)) + if loop: + loop.kernel = kernel + else: + loop_nest.kernel = kernel + return loop_nest + + def __bool__(self): + return bool(self.root) + + def get_loops_at(self, depth) -> List[LoopLevel]: + """Get all the loop levels at the given `depth` (most outer loop has depth 0)""" + loops = [] + for loop in self.root: + loops += loop.get_loops_at(depth) + return loops + + @cache_on_self + def max_parallel_depth(self): + """ + Maximal allowed depth for parallelism: + 1) Levels without splitting and + 2) All reduction or non-reduction levels + When the loop is split at the top level, the max depth is 1. + """ + max_depth = 0 + loops = self.root + if len(loops) > 1: + return 1 + is_reduction = loops[0].is_reduction() if loops else False + while len(loops) == 1 and loops[0].is_reduction() == is_reduction: + max_depth += 1 + loops = loops[0].inner + return max_depth + + def is_reduction_only(self): + """ + Whether all the loops are for reduction. Reduction loops + are always the inner most ones. + """ + return self.root and self.root[0].is_reduction() + + def mark_parallel(self, par_depth): + assert ( + par_depth <= self.max_parallel_depth() + ), "Parallel depth cannot exceed the maximal allowed parallel depth" + loops = self.root + for loop in loops: + loop.parallel = par_depth + for i in range(1, par_depth): + loops = loops[0].inner + loops[0].collapsed = True + + def split_with_tiling(self, depth, factor): + """ + Split the loop into main and tail loops at given `depth` so that the range + of the main loop has range `floor_div(range, factor) * factor` and + the tail loop handles the remainder. The main loop is tiled + according to the `factor`. + """ + loops = self.get_loops_at(depth) + assert len(loops) == 1 + split_loops = loops[0].split_with_tiling(0, factor) + if depth == 0: + self.root = split_loops + return split_loops diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/cpp_prefix.h b/llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/cpp_prefix.h new file mode 100644 index 0000000000000000000000000000000000000000..d2abd4dc6b1f208d2662716ccca0d2289d8ff82a --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/cpp_prefix.h @@ -0,0 +1,372 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include + +#if defined(CPU_CAPABILITY_AVX512) || defined(CPU_CAPABILITY_AVX2) +#define INDUCTOR_USE_VECTOR_TYPES() 1 +#else +#define INDUCTOR_USE_VECTOR_TYPES() 0 +#endif + +#if INDUCTOR_USE_VECTOR_TYPES() +#include +#include +#endif + +typedef at::Half half; +typedef at::BFloat16 bfloat16; + +template +struct Welford { + T mean = T(0); + T m2 = T(0); + T weight = T(0); +}; + + +template +struct IsVecType: std::false_type {}; + +#if INDUCTOR_USE_VECTOR_TYPES() +template +struct IsVecType>: std::true_type {}; +#endif + +template +Welford welford_combine(const Welford &a, const Welford &b) { + if constexpr (!IsVecType::value) { + if (a.weight == 0) { + return b; + } + if (b.weight == 0) { + return a; + } + } + auto delta = b.mean - a.mean; + auto new_weight = a.weight + b.weight; + auto wb_over_w = b.weight / new_weight; + if constexpr (IsVecType::value) { + // Guard against division by zero + wb_over_w = T::blendv(wb_over_w, T(0), new_weight == T(0)); + } + auto result = Welford{ + a.mean + delta * wb_over_w, + a.m2 + b.m2 + delta * delta * a.weight * wb_over_w, + new_weight + }; + return result; +} + +template +Welford welford_combine(const Welford &acc, T data) { + // Add a single data point + auto delta = data - acc.mean; + auto new_weight = acc.weight + T(1); + auto new_mean = acc.mean + delta / new_weight; + auto new_delta = data - new_mean; + auto result = Welford{ + new_mean, + acc.m2 + delta * new_delta, + new_weight + }; + return result; +} + + +#if INDUCTOR_USE_VECTOR_TYPES() +template +inline at::vec::Vectorized vec_shuffle_down(at::vec::Vectorized x, size_t n) { + using Vec = at::vec::Vectorized; + alignas(alignof(Vec)) scalar_t array[Vec::size()]; + x.store(array); + for (size_t i = 0; i + n < Vec::size(); i += 2 * n) { + array[i] = array[i + n]; + } + return Vec::loadu(array); +} + +#ifdef CPU_CAPABILITY_AVX2 +inline at::vec::Vectorized vec_shuffle_down(at::vec::Vectorized x, size_t n) { + using vec_t = at::vec::Vectorized; +#define SHUFFLE_MASK(z, y, x, w) ((z << 6) | (y << 4) | (x << 2) | w) + switch (n) { + case 1: + return vec_t(_mm256_permute_ps(x, SHUFFLE_MASK(1, 1, 3, 3))); + case 2: + return vec_t(_mm256_permute_ps(x, SHUFFLE_MASK(2, 2, 2, 2))); + case 4: + return vec_t(_mm256_permute2f128_ps(x, x, SHUFFLE_MASK(1, 1, 1, 1))); + } + TORCH_CHECK(false, "Unhandled vec_shuffle_down value ", n); +} +#endif + +template +Welford welford_vec_reduce_all(Welford> acc) { + using Vec = at::vec::Vectorized; + for (size_t n = 1; n < Vec::size(); n *= 2) { + auto shuffled = Welford{ + vec_shuffle_down(acc.mean, n), + vec_shuffle_down(acc.m2, n), + vec_shuffle_down(acc.weight, n) + }; + acc = welford_combine(acc, shuffled); + } + + Welford result; + alignas(alignof(Vec)) scalar_t array[Vec::size()]; + acc.mean.store(array); + result.mean = array[0]; + + acc.m2.store(array); + result.m2 = array[0]; + + acc.weight.store(array); + result.weight = array[0]; + + return result; +} +#endif + + +template inline T mod(T a, T b) { return a % b; } +template <> inline float mod(float a, float b) { return std::fmod(a, b); } +template <> inline double mod(double a, double b) { return std::fmod(a, b); } + +template +inline scalar_t max_propagate_nan(scalar_t a, scalar_t b) { + if (at::_isnan(a)) { + return a; + } + return a > b ? a : b; +} + +template +inline scalar_t min_propagate_nan(scalar_t a, scalar_t b) { + if (at::_isnan(a)) { + return a; + } + return a < b ? a : b; +} + +constexpr float uint32_to_uniform_float(uint32_t value) { + // maximum value such that `MAX_INT * scale < 1.0` (with float rounding) + constexpr float scale = 4.6566127342e-10; + return static_cast(value & 0x7FFFFFFF) * scale; +} + +float normalized_rand_cpu(uint32_t seed, uint32_t offset) { + return uint32_to_uniform_float(at::Philox4_32(seed, 0, offset)()); +} + +float randn_cpu(uint32_t seed, uint32_t offset) { + at::Philox4_32 engine(seed, 0, offset); + return engine.randn(10); +} + +uint64_t randint64_cpu(uint32_t seed, uint32_t offset, int64_t low, int64_t high) { + auto gen = at::Philox4_32(seed, 0, offset); + uint64_t r0 = gen(); + uint64_t r1 = gen(); + uint64_t result = r0 | (r1 << 32); + return (result % static_cast(high - low)) + low; +} + +template struct AsIntegerType { typedef T type; }; +template <> struct AsIntegerType { typedef uint32_t type; }; +template <> struct AsIntegerType { typedef uint64_t type; }; +template <> struct AsIntegerType { typedef uint16_t type; }; + +template +typename std::enable_if::value, T>::type +inline fetch_value(volatile T *addr) { + return *addr; +} + +template +typename std::enable_if::value, T>::type +inline fetch_value(volatile T *addr) { + return T(addr->x, T::from_bits()); +} + +template +typename std::enable_if::value>::type +atomic_add(volatile T *addr, T offset) { + typedef typename AsIntegerType::type alt_type; + + static_assert(sizeof(std::atomic) == sizeof(T), + "std::atomic issue"); + + alt_type expected; + + alt_type desired; + + std::atomic *atomic_addr = (std::atomic *)addr; + do { + T val = fetch_value(addr); + reinterpret_cast(&expected)[0] = val; + reinterpret_cast(&desired)[0] = val + offset; + } while (!atomic_addr->compare_exchange_weak(expected, desired, + std::memory_order_relaxed)); +} + +// Since C++20 float is supported by fetch_add, but the performance may not +// better than compare_exchange_weak, which can be checked by microbenchmark +// inductor_cpu_atomic.py +template +typename std::enable_if::value>::type +atomic_add(volatile T *addr, T offset) { + static_assert(sizeof(std::atomic) == sizeof(T), + "std::atomic issue"); + std::atomic *atomic_addr = (std::atomic *)addr; + atomic_addr->fetch_add(offset, std::memory_order_relaxed); +} + +// This function is used to convert bool or uint8 to float mask for +// vectorization. The caller needs to make sure the src represents TRUE/FALSE +// correctly. +template +inline float flag_to_float_scalar(T src) { + float ret; + *(uint32_t*)(&ret) = src ? 0xFFFFFFFF : 0; + return ret; +} + +#if defined(CPU_CAPABILITY_AVX512) || defined(CPU_CAPABILITY_AVX2) + +inline at::vec::Vectorized masked_load(const float* src, at::vec::Vectorized mask) { + at::vec::Vectorized zero_vec(0); +# if defined(CPU_CAPABILITY_AVX512) + auto all_ones = _mm512_set1_epi32(0xFFFFFFFF); + auto mmask = _mm512_cmp_epi32_mask(_mm512_castps_si512(mask), all_ones, _MM_CMPINT_EQ); + return _mm512_mask_loadu_ps(zero_vec, mmask, src); +# else // AVX2 + auto all_ones = _mm256_set1_epi32(0xFFFFFFFF); + auto mmask = _mm256_cmpeq_epi32(_mm256_castps_si256(mask), all_ones); + return _mm256_maskload_ps(src, mmask); +# endif +} + +template +typename std::enable_if::value || std::is_same::value, at::vec::Vectorized>::type +inline masked_load(const T* src, at::vec::Vectorized mask) { +# if defined(CPU_CAPABILITY_AVX512) + auto all_ones = _mm512_set1_epi32(0xFFFFFFFF); + auto mmask = _mm512_cmp_epi32_mask(_mm512_castps_si512(mask), all_ones, _MM_CMPINT_EQ); + auto zero = _mm256_set1_epi16(0); + auto temp = _mm256_mask_loadu_epi16(zero, mmask, src); + return _mm512_inserti32x8(_mm512_castsi256_si512(temp), zero, 1); +# else // AVX2 + auto all_ones = _mm256_set1_epi32(0xFFFFFFFF); + auto mmask_vec = _mm256_cmpeq_epi32(_mm256_castps_si256(mask), all_ones); + __at_align__ uint32_t mmask[8]; + _mm256_storeu_si256(reinterpret_cast<__m256i*>(mmask), mmask_vec); + __at_align__ uint16_t result[16]; + for (auto i = 0; i < 8; i++) { + result[i] = mmask[i] == 0xFFFFFFFF ? src[i].x: uint16_t(0); + } + return at::vec::Vectorized::loadu(result); +# endif +} + +inline at::vec::Vectorized masked_load(const uint8_t* src, at::vec::Vectorized mask) { +# if defined(CPU_CAPABILITY_AVX512) + auto all_ones = _mm512_set1_epi32(0xFFFFFFFF); + auto mmask = _mm512_cmp_epi32_mask(_mm512_castps_si512(mask), all_ones, _MM_CMPINT_EQ); + auto zero = _mm_set1_epi8(0); + auto temp = _mm_mask_loadu_epi8(zero, mmask, src); + return _mm512_inserti64x2(_mm512_set1_epi32(0), temp, 0); +# else // AVX2 + auto all_ones = _mm256_set1_epi32(0xFFFFFFFF); + auto mmask_vec = _mm256_cmpeq_epi32(_mm256_castps_si256(mask), all_ones); + __at_align__ uint32_t mmask[8]; + _mm256_storeu_si256(reinterpret_cast<__m256i*>(mmask), mmask_vec); + __at_align__ uint8_t result[32]; + for (auto i = 0; i < 8; i++) { + result[i] = mmask[i] == 0xFFFFFFFF ? src[i]: uint8_t(0); + } + return at::vec::Vectorized::loadu(result); +# endif +} + +template +inline at::vec::Vectorized flag_to_float_vec(const T* src) { + __at_align__ float dst_tmp[at::vec::Vectorized::size()]; + #pragma unroll + for (int64_t i = 0; i < at::vec::Vectorized::size(); i++) { + dst_tmp[i] = flag_to_float_scalar(src[i]); + } + return at::vec::Vectorized::loadu(dst_tmp); +} + +template +inline at::vec::Vectorized cvt_lowp_fp_to_fp32( + at::vec::Vectorized src) { + at::vec::Vectorized res_vec1(0); + at::vec::Vectorized res_vec2(0); + std::tie(res_vec1, res_vec2) = at::vec::convert_to_float(src); + return res_vec1; +} + +template +inline at::vec::Vectorized cvt_fp32_to_lowp_fp( + at::vec::Vectorized src) { + return at::vec::convert_from_float(src, src); +} + +inline at::vec::Vectorized mask_convert_to_float(at::vec::Vectorized src) { + auto zeros = at::vec::Vectorized(0); + auto ones = at::vec::Vectorized(1); + return at::vec::Vectorized::blendv(zeros, ones, src); +} + +template +inline at::vec::Vectorized vec_convert_to_mask(at::vec::Vectorized src) { + assert( + at::vec::Vectorized::size() == at::vec::Vectorized::size()); + at::vec::Vectorized res_vec(0); + __at_align__ float dst_tmp[at::vec::Vectorized::size()]; + __at_align__ SRC src_tmp[at::vec::Vectorized::size()]; + src.store(src_tmp); + +#pragma unroll + for (int i = 0; i < at::vec::Vectorized::size(); i++) { + *(uint32_t*)(dst_tmp + i) = src_tmp[i] ? 0xFFFFFFFF : 0; + } + + return res_vec.loadu(dst_tmp); +} + +template +inline at::vec::Vectorized to_float_mask(at::vec::Vectorized src) { + return vec_convert_to_mask(src); +} + +template <> +inline at::vec::Vectorized to_float_mask(at::vec::Vectorized src) { +#if defined(CPU_CAPABILITY_AVX2) + return at::vec::Vectorized(_mm256_castsi256_ps(src)); +#else + return at::vec::Vectorized(_mm512_castsi512_ps(src)); +#endif +} + +template <> +inline at::vec::Vectorized to_float_mask(at::vec::Vectorized src) { + return src; +} +#endif diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/triton.py b/llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/triton.py new file mode 100644 index 0000000000000000000000000000000000000000..0a981afb08a124c23c1f81d7828933da15315289 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/triton.py @@ -0,0 +1,2740 @@ +import collections +import contextlib +import dataclasses +import functools +import itertools +import logging +import math +import operator +from typing import Dict, Iterable, List, Set + +import sympy + +import torch + +import torch._logging +from torch._prims_common import is_integer_dtype +from torch.utils._sympy.functions import FloorDiv, ModularIndexing +from torch.utils._sympy.value_ranges import ValueRanges +from ..._dynamo.utils import counters +from .. import config, ir, scheduler +from ..codecache import code_hash, get_path +from ..dependencies import MemoryDep, StarDep +from ..ir import ReductionHint +from ..optimize_indexing import indexing_dtype_strength_reduction +from ..scheduler import BaseScheduling +from ..triton_heuristics import AutotuneHint +from ..utils import ( + DeferredLineBase, + get_fused_kernel_name, + get_kernel_metadata, + green_text, + is_welford_reduction, + next_power_of_2, + sympy_product, + sympy_subs, + sympy_symbol, + unique, + yellow_text, +) +from ..virtualized import ops, V +from ..wrapper_benchmark import get_kernel_category_by_source_code +from .common import ( + CSEVariable, + DeferredLine, + free_symbol_startswith, + IndentedBuffer, + index_prevent_reordering, + Kernel, + OpOverrides, + PythonPrinter, + SizeArg, +) +from .triton_utils import config_of, signature_of, signature_to_meta + +log = logging.getLogger(__name__) +perf_hint_log = torch._logging.getArtifactLogger(__name__, "perf_hints") +schedule_log = torch._logging.getArtifactLogger(__name__, "schedule") + + +class TritonPrinter(PythonPrinter): + def _print_floor(self, expr): + assert len(expr.args) == 1 + return f"tl.math.floor({self.paren(self._print(expr.args[0]))})" + + def _helper_sqrt(self, expr): + return f"tl.math.sqrt({self.paren(self._print(expr))}.to(tl.float32))" + + def _print_Where(self, expr): + c = self.doprint(expr.args[0]) + p = self.doprint(expr.args[1]) + q = self.doprint(expr.args[2]) + return f"tl.where({c}, {p}, {q})" + + def _print_Min(self, expr): + nargs = len(expr.args) + if len(expr.args) == 1: + return self._print(expr.args[0]) + + mid = len(expr.args) // 2 + a = self._print(sympy.Min(*expr.args[:mid])) + b = self._print(sympy.Min(*expr.args[mid:])) + return f"tl.math.min({a}, {b})" + + def _print_Max(self, expr): + nargs = len(expr.args) + if len(expr.args) == 1: + return self._print(expr.args[0]) + + mid = len(expr.args) // 2 + a = self._print(sympy.Max(*expr.args[:mid])) + b = self._print(sympy.Max(*expr.args[mid:])) + return f"tl.math.max({a}, {b})" + + +texpr = TritonPrinter().doprint +pexpr = PythonPrinter().doprint + + +def triton_compute_type(dtype): + triton_type_name = str(dtype).split(".")[-1] + if triton_type_name == "bool": + triton_type_name = "int1" + if triton_type_name in ("float16", "bfloat16"): + # float16 math is done in float32 inside the kernel + triton_type_name = "float32" + return f"tl.{triton_type_name}" + + +def triton_acc_type(dtype): + if is_integer_dtype(dtype) and dtype.is_signed: + nbits = 64 if dtype == torch.int64 else 32 + return f"tl.int{nbits}" + return triton_compute_type(dtype) + + +def triton_constant(value): + if value == float("inf"): + return 'float("inf")' + elif value == float("-inf"): + return 'float("-inf")' + elif math.isnan(value): + return 'float("nan")' + return repr(value) + + +class TritonCSEVariable(CSEVariable): + def __init__(self, name, bounds: ValueRanges): + super().__init__(name, bounds) + # We'll use this to track which masks the variable needs when used for indirect indexing + self.mask_vars: Set[str] = set() + + def update_on_args(self, name, args, kwargs): + # When making a variable that is going to be used in indirect indexing + # if a where clause is used it should mean that the result is always a + # valid index, so you shouldn't include any of the dependent variables + # in the resulting load mask + if name == "where": + return + for arg in args: + if isinstance(arg, TritonCSEVariable): + self.mask_vars.update(arg.mask_vars) + elif isinstance(arg, sympy.Symbol) and arg.name[0] in "xyr": + # most of the time index vars don't need masks associated with them + # however, when index vars are used to compute indices for indirect reads + # those reads should subsequently be masked, + self.mask_vars.update({f"{arg.name[0]}mask"}) + + +class TritonOverrides(OpOverrides): + """Map element-wise ops to Triton""" + + @staticmethod + def to_dtype(x, dtype: torch.dtype): + if dtype == torch.bool: + return f"({x} != 0)" + elif dtype == torch.uint8: + # to work around llvm uint conversion semantics + # that produces 0's for negative values + return f"{x}.to(tl.int8).to(tl.uint8)" + return f"{x}.to({triton_compute_type(dtype)})" + + @staticmethod + def to_dtype_bitcast(x, dtype: torch.dtype): + return f"{x}.to({triton_compute_type(dtype)}, bitcast=True)" + + @classmethod + def constant(cls, value, dtype): + if dtype == torch.uint8: + # tl.full is broken for uint8, remove once triton is fixed. + # See openai/triton#1919 + tmp = cls.constant(value, torch.int16) + return cls.to_dtype(tmp, dtype) + + type_ = torch._prims_common.dtype_to_type(dtype) + triton_val = triton_constant(type_(value)) + triton_type = triton_compute_type(dtype) + + if triton_type == "tl.float32": + # Float constants are always f32 in triton + return triton_val + + # NOTE: We use a tensor here in order to get the expected type. + # Otherwise, e.g. float64 constants would be trunctated to float32. + # Also, we could just use shape=[1] here but starting with the correct + # ndim avoids extra `tt.expand_dim` ops appearing in the triton IR. + ndim = V.kernel.triton_tensor_ndim() + shape = [1] * ndim + return f"tl.full({shape}, {triton_val}, {triton_type})" + + @staticmethod + def abs(x): + return f"tl.abs({x})" + + @staticmethod + def libdevice_abs(x): + return f"tl.math.abs({x})" + + @staticmethod + def exp(x): + return f"tl.exp({x})" + + @staticmethod + def libdevice_exp(x): + return f"tl.math.exp({x})" + + @staticmethod + def exp2(x): + return f"tl.math.exp2({x})" + + @staticmethod + def expm1(x): + return f"tl.math.expm1({x})" + + @staticmethod + def sqrt(x): + return f"tl.sqrt({x})" + + @staticmethod + def libdevice_sqrt(x): + return f"tl.math.sqrt({x})" + + @staticmethod + def relu(x): + bug = config.triton.inject_relu_bug_TESTING_ONLY + if bug == "compile_error": + return "compile error!" + elif bug == "runtime_error": + # NB: this only triggers runtime error as long as input + # is not all zero + return f'triton_helpers.device_assert_then({x} == 0, "injected assert fail", {x})' + elif bug == "accuracy": + return f"{x} + 1" + elif bug is None: + return ops.maximum("0", x) + else: + raise AssertionError( + f"unrecognized config triton.inject_relu_bug_TESTING_ONLY = {bug!r}" + ) + + @staticmethod + def minimum(a, b): + return f"triton_helpers.minimum({a}, {b})" + + @staticmethod + def maximum(a, b): + return f"triton_helpers.maximum({a}, {b})" + + @staticmethod + def where(a, b, c): + return f"tl.where({a}, {b}, {c})" + + @staticmethod + def cos(x): + return f"tl.cos({x})" + + @staticmethod + def libdevice_cos(x): + return f"tl.math.cos({x})" + + @staticmethod + def sin(x): + return f"tl.sin({x})" + + @staticmethod + def libdevice_sin(x): + return f"tl.math.sin({x})" + + @classmethod + def index_expr(cls, expr, dtype): + index_str, mask_vars, mask, expand_str = V.kernel.indexing(expr) + # This is called from CSEProxy.__getattr__, so we'll set the bounds there + var = V.kernel.cse.generate(V.kernel.compute, index_str) + + if dtype not in {torch.int32, torch.int64}: + var = V.kernel.cse.generate(V.kernel.compute, cls.to_dtype(var, dtype)) + var.mask_vars = mask_vars + return var + + @staticmethod + def masked(mask, body, other): + with V.kernel.mask_loads(mask) as new_mask: + result = body() + return ops.where(new_mask, result, triton_constant(other)) + + @staticmethod + def lgamma(x): + return f"tl.math.lgamma({x})" + + @staticmethod + def erf(x): + return f"tl.math.erf({x})" + + @staticmethod + def cosh(x): + return f"tl.math.cosh({x})" + + @staticmethod + def sinh(x): + return f"tl.math.sinh({x})" + + @staticmethod + def acos(x): + return f"tl.math.acos({x})" + + @staticmethod + def acosh(x): + return f"tl.math.acosh({x})" + + @staticmethod + def asin(x): + return f"tl.math.asin({x})" + + @staticmethod + def asinh(x): + return f"tl.math.asinh({x})" + + @staticmethod + def atan2(x, y): + return f"tl.math.atan2({x}, {y})" + + @staticmethod + def atan(x): + return f"tl.math.atan({x})" + + @staticmethod + def atanh(x): + return f"tl.math.atanh({x})" + + @staticmethod + def copysign(x, y): + return f"tl.math.copysign({x}, {y})" + + @staticmethod + def erfc(x): + return f"tl.math.erfc({x})" + + @staticmethod + def erfinv(x): + return f"tl.math.erfinv({x})" + + @staticmethod + def hypot(x, y): + return f"tl.math.hypot({x}, {y})" + + @staticmethod + def log10(x): + return f"tl.math.log10({x})" + + @staticmethod + def nextafter(x, y): + return f"tl.math.nextafter({x}, {y})" + + @staticmethod + def logical_and(a, b): + return f"{a} & {b}" + + @staticmethod + def logical_not(a): + return f"{a} == 0" + + @staticmethod + def logical_or(a, b): + return f"{a} | {b}" + + @staticmethod + def logical_xor(a, b): + return f"({a} ^ {b})" + + @staticmethod + def bitwise_and(a, b): + return f"{a} & {b}" + + @staticmethod + def bitwise_not(a): + return f"~{a}" + + @staticmethod + def bitwise_or(a, b): + return f"{a} | {b}" + + @staticmethod + def bitwise_xor(a, b): + return f"{a} ^ {b}" + + @staticmethod + def bitwise_left_shift(a, b): + return f"{a} << {b}" + + @staticmethod + def bitwise_right_shift(a, b): + return f"{a} >> {b}" + + @staticmethod + def rand(seed, offset): + offset = f"({offset}).to(tl.uint32)" + return f"tl.rand({seed}, {offset})" + + @staticmethod + def randn(seed, offset): + offset = f"({offset}).to(tl.uint32)" + return f"tl.randn({seed}, {offset})" + + @staticmethod + def randint64(seed, offset, low, high): + offset = f"({offset}).to(tl.uint32)" + return f"triton_helpers.randint64({seed}, {offset}, {low}, {high})" + + @staticmethod + def load_seed(name, offset): + var = V.kernel.args.input(name) + return ( + f"tl.load({var} + {V.kernel.args.seed_offset('load_seed_offset', offset)})" + ) + + @staticmethod + def rsqrt(x): + return f"tl.math.rsqrt({x})" + + @staticmethod + def log1p(x): + return f"tl.math.log1p({x})" + + @staticmethod + def tan(x): + return f"tl.math.tan({x})" + + @staticmethod + def tanh(x): + return f"tl.math.tanh({x})" + + @staticmethod + def sigmoid(x): + return f"tl.sigmoid({x})" + + @staticmethod + def libdevice_sigmoid(x): + return f"1/(1 + tl.math.exp(-({x})))" + + @staticmethod + def signbit(x): + # XX: This is wrong for the value -0.0 in floating point + return f"tl.math.signbit({x}) if ({x}).dtype is tl.float32 else {x} < 0" + + @staticmethod + def fmod(a, b): + return f"tl.math.fmod({a}, {b})" + + @staticmethod + def pow(a, b): + return f"tl.math.pow({a}, {b})" + + @staticmethod + def log(x): + return f"tl.log({x})" + + @staticmethod + def libdevice_log(x): + return f"tl.math.log({x})" + + @staticmethod + def isinf(x): + return f"tl.math.isinf({x}).to(tl.int1)" + + @staticmethod + def isnan(x): + return f"tl.math.isnan({x}).to(tl.int1)" + + @staticmethod + def round(x): + return f"tl.math.nearbyint({x})" + + @staticmethod + def floor(x): + return f"tl.math.floor({x})" + + @staticmethod + def floordiv(a, b): + # See the comment in lowering.div_mode. a and b are integer type. + # Similar to div_floor_kernel_cuda in pytorch core. + # Notice that // in triton behaves as truncdiv instead of floordiv + quot = f"{a} // {b}" + rem = f"{a} % {b}" + return f"tl.where(({a} < 0) != ({b} < 0), tl.where({rem} != 0, {quot} - 1, {quot}), {quot})" + + @staticmethod + def sign(x): + def to_int(s): + return f"{s}.to(tl.int8)" + + left = to_int(ops.lt("0", x)) + right = to_int(ops.lt(x, "0")) + sub = ops.sub(left, right) + return f"{sub}.to({x}.dtype)" + + @staticmethod + def trunc(x): + return f"tl.math.trunc({x})" + + @staticmethod + def truncdiv(a, b): + # See the comment in lowering.div_mode. a and b are integer type. + # Notice that // in triton behaves as truncdiv instead of floordiv + return f"{a} // {b}" + + @staticmethod + def ceil(x): + return f"tl.math.ceil({x})" + + +@dataclasses.dataclass +class IterationRanges: + """ + Each range tree represents multiple sets of iteration indexing + in a single tiled dimension in the output kernel. + + If you have two loops ranges one (4, 3, 2) and another (4, 6), + then the range tree will be: + 4 (i0) + 3 (i1) 6 (i3) + 2 (i2) + Where i0 is shared between both loops, but then the split into + different indexing vars. All loop ranges must iterate over + the same number of elements. + """ + + def __init__( + self, + name: str, + var_list: List[sympy.Symbol], + var_ranges: Dict[sympy.Symbol, sympy.Expr], + numel: sympy.Expr, + prefix: str, + *, + kernel: "Kernel", + divisor=sympy.Integer(1), + length=sympy.Integer(1), + ): + super().__init__() + self.name = name + self.var_list = var_list + self.var_ranges = var_ranges + self.numel = numel + self.prefix = prefix + self.divisor = divisor + self.length = length + self.kernel = kernel + + def is_loop(self): + return self.prefix == "r" and not self.kernel.persistent_reduction + + +class IterationRangesRoot(IterationRanges): + def __init__( + self, + name: str, + numel: sympy.Expr, + prefix: str, + index: int, + kernel: "Kernel", + pid_cache=None, + ): + if pid_cache is None: + pid_cache = {} + super().__init__( + name=name, + var_list=[], + var_ranges={}, + numel=numel, + prefix=prefix, + kernel=kernel, + ) + self.index = index + # Store all the nodes in one flat list + self.nodes: Dict[sympy.Expr, IterationRangesEntry] = {} + # This is for re-ordering program ID in triton mm template + # pid_cache["tl.program_id(0)"] = pid_m + self.pid_cache: Dict[str, str] = pid_cache + + def cache_clear(self): + for node in self.nodes.values(): + node.cache_clear() + + def lookup(self, divisor, length): + """ + Lookup a given RangeTreeEntry, creating it if needed + """ + if V.graph.sizevars.statically_known_equals(divisor * length, self.numel): + expr = FloorDiv(sympy_symbol(f"{self.prefix}index"), divisor) + else: + expr = ModularIndexing(sympy_symbol(f"{self.prefix}index"), divisor, length) + + if expr not in self.nodes: + node = IterationRangesEntry( + f"{self.prefix}{next(V.kernel.iter_vars_count)}", + divisor, + length, + expr, + self, + ) + V.kernel.range_tree_nodes[node.symbol()] = node + self.var_list.append(node.symbol()) + self.var_ranges[node.symbol()] = length + self.nodes[expr] = node + return self.nodes[expr] + + def construct_entries(self, lengths: List[sympy.Expr]): + divisor = sympy.Integer(1) + itervars = [] + for length in reversed(lengths): + itervars.append(self.lookup(divisor, length)) + divisor = divisor * length + return list(reversed(itervars)) + + def construct(self, lengths: List[sympy.Expr]): + return [e.symbol() for e in self.construct_entries(lengths)] + + def vars_and_sizes(self, index: sympy.Expr): + """Figure out vars from this tree used in index""" + nodes = [V.kernel.range_tree_nodes.get(s) for s in index.free_symbols] + nodes = [n for n in nodes if n and n.prefix == self.prefix] + nodes.sort(key=lambda x: V.graph.sizevars.size_hint(x.divisor)) + divisor = sympy.Integer(1) + index_vars = [] + sizes = [] + + def add(node): + nonlocal divisor + index_vars.append(node.symbol()) + sizes.append(node.length) + divisor = divisor * node.length + + for node in nodes: + if not V.graph.sizevars.statically_known_equals(node.divisor, divisor): + # fill in unused index var + add(self.lookup(divisor, FloorDiv(node.divisor, divisor))) + divisor = node.divisor + add(node) + if not V.graph.sizevars.statically_known_equals(self.numel, divisor): + # fill in unused index var + add(self.lookup(divisor, FloorDiv(self.numel, divisor))) + + return list(reversed(index_vars)), list(reversed(sizes)) + + def ranges_code(self): + size = self.kernel.indexing_size_str(self.index, self.prefix) + index_dtype = self.kernel.index_dtype + convert = f".to({index_dtype})" if index_dtype != "tl.int32" else "" + return f"tl.arange(0, {self.prefix.upper()}BLOCK){size}{convert}" + + def scalar_code(self, value): + index_dtype = self.kernel.index_dtype + ndim = self.kernel.triton_tensor_ndim() + size = [1] * ndim + return f"tl.full({size}, {value}, {index_dtype})" + + def get_pid(self): + key = f"tl.program_id({self.index})" + pid = self.pid_cache.get(key, key) + if self.kernel.index_dtype != "tl.int32": + return f"{pid}.to({self.kernel.index_dtype})" + return pid + + def codegen_header(self, code, no_x_dim=False): + x = self.prefix + if self.is_loop(): + code.writeline(f"{self.name} = {x}offset + {x}base") + elif x == "r" and self.kernel.persistent_reduction: + # no need to "roffset = " + code.writeline( + f"{self.name} = {self.ranges_code()}", + ) + else: + if not no_x_dim: + line = f"{x}offset + {self.ranges_code()}" + else: + line = self.scalar_code(f"{x}offset") + code.writelines( + [ + f"{x}offset = {self.get_pid()} * {x.upper()}BLOCK", + f"{self.name} = {line}", + ] + ) + code.writeline(f"{x}mask = {self.name} < {x}numel") + + +class IterationRangesEntry(IterationRanges): + def __init__( + self, + name: str, + divisor: sympy.Expr, + length: sympy.Expr, + expr: sympy.Expr, + parent: IterationRanges, + ): + super().__init__( + name=name, + numel=parent.numel / length, + var_list=parent.var_list, + var_ranges=parent.var_ranges, + prefix=parent.prefix, + divisor=divisor, + length=length, + kernel=parent.kernel, + ) + self.parent = parent + self.codegen = functools.lru_cache(None)(self._codegen) + self.expr = expr + + def set_name(self, name): + self.codegen = lambda: name + self.codegen.cache_clear = lambda: None + self.name = name + + def cache_clear(self): + self.codegen.cache_clear() + + def writeline(self, line): + if self.is_loop(): + V.kernel.indexing_code.writeline(line) + else: + # lift non-reduction stores outside loop + V.kernel.body.writeline(line) + + def _codegen(self): + self.writeline(f"{self.name} = " + texpr(V.kernel.rename_indexing(self.expr))) + return self.name + + def precomputed_args(self): + # for dynamic shapes, find parts of indexing expressions that have to be precomputed + precomputed_args = [] + if isinstance(self.expr, sympy.Symbol): + return precomputed_args + assert isinstance(self.expr, (FloorDiv, ModularIndexing)), type(self.expr) + for arg in self.expr.args[1:]: + if not isinstance(arg, (sympy.Integer, sympy.Symbol)): + symbols = arg.free_symbols + if len(symbols) > 0 and all(s.name.startswith("s") for s in symbols): + precomputed_args.append(arg) + return precomputed_args + + def symbol(self): + return sympy_symbol(self.name) + + def __hash__(self): + return hash(self.name) + + def __eq__(self, other): + return self.name == other.name + + +class TritonKernel(Kernel): + overrides = TritonOverrides + sexpr = pexpr + + def __init__( + self, + *groups, + index_dtype, + mutations=None, + pid_cache=None, + reduction_hint=ReductionHint.DEFAULT, + ): + if pid_cache is None: + pid_cache = {} + super().__init__() + self.numels = [V.graph.sizevars.simplify(s) for s in groups] + self.mutations = mutations + self.range_trees = [] + self.range_tree_nodes = {} + self.iter_vars_count = itertools.count() + self.inside_reduction = self.numels[-1] != 1 + self._load_mask = None + self.body = IndentedBuffer() + self.indexing_code = IndentedBuffer() + self.suffix = IndentedBuffer() + self.outside_loop_vars = set() + self.reduction_hint = reduction_hint + self.index_dtype = index_dtype + # Upper bounds for indirect_indexing and their str representation + self.indirect_max_sizes: Dict[Tuple[str, str], [sympy.Expr, str]] = {} + self.last_usage = set() + + self.persistent_reduction = self.should_use_persistent_reduction() + self.no_x_dim = ( + self.reduction_hint == ReductionHint.INNER + and self.persistent_reduction + and len(self.numels) == 2 + and self.numels[-1] >= 256 + ) + self.initialize_range_tree(pid_cache) + + # A set of autotuning hints to pass as part of triton_meta + self.autotune_hints: Set[AutotuneHint] = set() + + # define this in a closure to make cache local to object + @functools.lru_cache(None) + def simplify_indexing(index: sympy.Expr): + index = V.graph.sizevars.simplify_with_ranges(index, self.var_ranges()) + for tree in self.range_trees: + index = self.combine_contiguous_dims(index, tree) + return index + + self.simplify_indexing = simplify_indexing + + def should_use_persistent_reduction(self): + """ + Heuristic to set self.persistent_reduction and add guards + if needed. + """ + if not (self.inside_reduction and config.triton.persistent_reductions): + return False + threshold = { + ReductionHint.INNER: 1024, + }.get(self.reduction_hint, 64) + last_numel = self.numels[-1] + if not isinstance(last_numel, (int, sympy.Integer)): + # Not static + return False + hint = V.graph.sizevars.size_hint(last_numel) + if hint > threshold: + return False + # will need to recompile if we cross a larger power of 2 boundary + V.graph.sizevars.guard_leq(self.numels[-1], next_power_of_2(hint)) + return True + + def set_last_usage(self, nodes): + if not self.inside_reduction or self.persistent_reduction: + return + self.last_usage = set( + itertools.chain.from_iterable( + n.last_usage for n in nodes if n is not EnableReduction + ) + ) + + def initialize_range_tree(self, pid_cache): + names = list( + reversed(["xindex", "yindex", "zindex"][: len(self.numels) - 1]) + ) + ["rindex"] + for i in range(len(self.numels)): + pid_idx = i if names[i][0] == "r" else "xyz".find(names[i][0]) + self.range_trees.append( + IterationRangesRoot( + names[i], self.numels[i], names[i][0], pid_idx, self, pid_cache + ) + ) + for tree in self.range_trees: + # reduction indexing goes inside a loop + if not tree.is_loop(): + tree.codegen_header(self.body, self.no_x_dim) + if self.inside_reduction and self.range_trees[-1].is_loop(): + # workaround for this issue: + # https://gist.github.com/jansel/6527126f781559095c5531f98a4235a7 + self.body.writeline(f"rbase = {self.range_trees[-1].ranges_code()}") + + def disable_reduction(self): + @contextlib.contextmanager + def ctx(): + if self.numels[-1] == 1: + assert not self.inside_reduction + yield + return + if not self.persistent_reduction: + # calling codegen_body() will flush all the pending buffers + # and write out a reduction loop + self.codegen_body() + self.inside_reduction = False + try: + yield + if not self.persistent_reduction: + # flush out any code before opening the next loop + self.codegen_body() + finally: + self.inside_reduction = True + + return ctx() + + def set_ranges(self, *lengths): + assert len(lengths) == len(self.range_trees) + return [ + ranges.construct(length) + for length, ranges in zip(lengths, self.range_trees) + ] + + @staticmethod + def _split_iteration_ranges( + groups: List[sympy.Expr], lengths: List[List[sympy.Expr]] + ): + sv = V.graph.sizevars + new_ranges = [[] for _ in groups] + remaining = [sv.simplify(g) for g in groups] + var_count = itertools.count() + + def add_range(i, expr): + expr = sv.simplify(expr) + if not sv.statically_known_multiple_of(remaining[i], expr): + raise CantSplit() + # guard on the last item out + remaining[i] = FloorDiv(remaining[i], expr) + new_ranges[i].append(expr) + return next(var_count) + + def make_combined(size, idx1, idx2): + def getter(flat_vars): + return size * flat_vars[idx1] + flat_vars[idx2] + + return getter + + return_getters_groups = [] + current_group = 0 + for length_group in lengths: + return_getters = [] + for size in length_group: + if sv.statically_known_equals(size, 1): + return_getters.append(lambda _: sympy.Integer(0)) + continue + + while ( + current_group < len(remaining) + and sv.size_hint(remaining[current_group]) == 1 + ): + # scroll to next group with remaining elements + current_group += 1 + + if sv.size_hint(size) > sv.size_hint(remaining[current_group]): + # need to break size in two + if not sv.statically_known_multiple_of( + size, remaining[current_group] + ): + raise CantSplit() + size1 = remaining[current_group] + size2 = FloorDiv(size, remaining[current_group]) + return_getters.append( + make_combined( + size2, + add_range(current_group, size1), + add_range(current_group + 1, size2), + ) + ) + else: + return_getters.append( + operator.itemgetter(add_range(current_group, size)) + ) + return_getters_groups.append(return_getters) + + assert all( + V.graph.sizevars.size_hint(s) == 1 for s in remaining + ), f"failed to set ranges {remaining} {lengths}" + + return new_ranges, return_getters_groups + + @classmethod + def is_compatible(cls, groups: List[sympy.Expr], lengths: List[List[sympy.Expr]]): + try: + cls._split_iteration_ranges(groups, lengths) + return True + except CantSplit: + return False + + def split_and_set_ranges(self, lengths: List[List[sympy.Expr]]): + """ + We may want to fuse `for i0 in s0*s1` into a tiled kernel with groups (s0, s1). + + To do this we need to split up the iteration space of i0 into something like: + for i1 in s0: + for i2 in s1: + i0 = i1*s1 + i2 + .... + + This function matches and resplits lengths to the groups of + this kernel to enable tiled + non-tiled fusions. + """ + groups = [rt.numel for rt in self.range_trees] + if not self.inside_reduction: + groups[-1] = sympy.Integer(1) + + if len(lengths) == len(self.range_trees) and all( + V.graph.sizevars.simplify(sympy_product(x) - g) == 0 + for x, g in zip(lengths, groups) + ): + return self.set_ranges(*lengths) + + new_ranges, return_getters_groups = self._split_iteration_ranges( + groups, lengths + ) + itervars = list(itertools.chain(*self.set_ranges(*new_ranges))) + return [[fn(itervars) for fn in fns] for fns in return_getters_groups] + + def is_indirect_indexing(self, index: sympy.Expr): + # tmpX means indirect indexing + return free_symbol_startswith(index, "tmp") + + def is_broadcasted(self, index: sympy.Expr): + # Note. This may not be correct when there is indirect indexing + if self.is_indirect_indexing(index): + return False + + index_numels = [1] * len(self.numels) + for symbol in index.free_symbols: + if symbol not in self.range_tree_nodes: + # Non-iterated variables, e.g. strides + continue + entry = self.range_tree_nodes[symbol] + index_numels[entry.parent.index] *= entry.length + + # If the index variables only iterate over a subset of the kernel + # numels, then it must be broadcasted. + simplify = V.graph.sizevars.simplify + return any( + simplify(idx_range) != simplify(iter_range) + for idx_range, iter_range in zip(index_numels, self.numels) + ) + + def combine_contiguous_dims(self, index: sympy.Expr, tree: IterationRangesRoot): + """ + More aggressive simplification to merge contiguous dims + """ + if isinstance(index, (sympy.Integer, sympy.Symbol)): + return index + index_vars, sizes = tree.vars_and_sizes(index) + if len(sizes) <= 1: + return index + new_sizes, reindex, prune = V.graph.sizevars._simplify_loops( + index_vars, sizes, index_prevent_reordering([index], index_vars, sizes) + ) + if new_sizes == sizes: + return index + new_index_vars = tree.construct(new_sizes) + new_index = sympy_subs(index, dict(zip(index_vars, reindex(new_index_vars)))) + return new_index + + def index_to_str(self, index: sympy.Expr) -> str: + """ + Convert an index expr to a string that can be used in triton code. + e.g. a sympy expression "s2" may actually appear as "ks1" in the triton kernel. + + Index expressions often need to be passed in as arguments to the triton kernel. + Rename_indexing and codegen_indexing keep track of the needed indices and add + new parameters to the function signature. + """ + return texpr(self.rename_indexing(self.codegen_indexing(index))) + + def indexing( + self, + index: sympy.Expr, + *, + copy_shape=None, + dense_indexing=False, + override_mask=None, + ): + """ + Compute the index and mask to pass to tl.load() or tl.store() + """ + index = self.simplify_indexing(index) + index = sympy_subs(index, V.graph.sizevars.precomputed_replacements) + # if simple replacements didn't get rid of floor/ceil, try full subs + if len(index.atoms(sympy.floor)) or len(index.atoms(sympy.ceiling)): + index = index.subs(V.graph.sizevars.precomputed_replacements) + # last resort, if no range vars are in the expr, hoist it + # TODO instead of trying to blindly find complicated exprs, we should hoist the + # inputs/outputs sizes and strides, but at the time indexing is generated + # kernel inputs and outputs are not set yet, we'd need a deeper refactor + # to do it this way + + if len(index.atoms(sympy.ceiling)): + for a in index.atoms(sympy.ceiling): + # for nested exprs, atoms yields top level first (?) + # so if everything goes fine, lower level replacements will come up empty + symbols = a.free_symbols + if len(symbols) > 0 and all( + s.name.startswith("s") or s.name.startswith("ps") for s in symbols + ): + replacements = {a: V.graph.sizevars.lookup_precomputed_size(a)} + index = sympy_subs(index, replacements) + + index_vars = index.free_symbols + index = self.simplify_indexing(index) + index_str = self.index_to_str(index) + + mask_vars: Set[str] = set() + for var in index_vars: + if override_mask: + pass + elif var.name.startswith("tmp"): + # indirect indexing + cse_var = self.cse.varname_map[var.name] + mask_vars.update(cse_var.mask_vars) + elif var.name.startswith(("s", "ps")): + pass + else: + # var is one of xN, yN or rN + assert var.name[0] in "xyr", var.name + mask_vars.add(f"{var.name[0]}mask") + + need_dense = ( + config.triton.dense_indexing + or dense_indexing + or self._load_mask is not None + ) and index != 0 + + have_dense = True + have_loop_vars = False + dense_mask_vars = set() + + for tree in self.range_trees: + if tree.prefix == "r" and not self.inside_reduction: + continue + if index_vars.intersection(tree.var_list): + have_loop_vars = True + else: + have_dense = False + dense_mask_vars.add(f"{tree.prefix}mask") + + expand_str = None + + if isinstance(index, sympy.Integer): + expand_str = f"{copy_shape}.shape" if copy_shape else self.dense_size_str() + index_str = f"tl.full({expand_str}, {index_str}, tl.int32)" + return index_str, set(), "None", expand_str + + if need_dense and not have_dense: + expand_str = f"{copy_shape}.shape" if copy_shape else self.dense_size_str() + index_str = f"tl.broadcast_to({index_str}, {expand_str})" + mask_vars = dense_mask_vars + elif not have_loop_vars and copy_shape: + index_str = f"tl.broadcast_to({index_str}, {copy_shape}.shape)" + mask_vars = dense_mask_vars + + if override_mask: + mask_vars = {override_mask} + + if self._load_mask: + mask_vars.add(self._load_mask) + + self.filter_masks(mask_vars) + + mask_str = " & ".join(sorted(map(str, mask_vars))) if mask_vars else "None" + return index_str, mask_vars, mask_str, expand_str + + def filter_masks(self, mask_vars): + for tree in self.range_trees: + # Masks are superfluous if we only have one element + if V.graph.sizevars.statically_known_equals(tree.numel, 1): + mask_vars.discard(f"{tree.prefix}mask") + continue + # Masks are superfluous if numel is a multiple of BLOCK + # (We use the fact that BLOCK is required by triton to be a power of 2) + if tree.prefix.upper() not in config.triton.max_block: + continue + max_block = config.triton.max_block[tree.prefix.upper()] + # Optional optimization: if block divides numel exactly, we will + # never need to do a masked load to handle stragglers at the end. + # It's faster to avoid masking at all. But it is sound to always + # mask. + if V.graph.sizevars.statically_known_multiple_of(tree.numel, max_block): + mask_vars.discard(f"{tree.prefix}mask") + + def var_ranges(self): + return dict( + itertools.chain.from_iterable( + tree.var_ranges.items() for tree in self.range_trees + ) + ) + + def codegen_indexing(self, expr: sympy.Expr): + expr = V.graph.sizevars.simplify_with_ranges(expr, self.var_ranges()) + for sym in sorted(expr.free_symbols, key=str): + if sym in self.range_tree_nodes: + # if indexing expression is complicated, we precompute it on the host side + # and send the result as a kernel argument + replacements = {} + for ps in self.range_tree_nodes[sym].precomputed_args(): + replacements[ps] = V.graph.sizevars.lookup_precomputed_size(ps) + if len(replacements) > 0: + self.range_tree_nodes[sym].expr = sympy_subs( + self.range_tree_nodes[sym].expr, replacements + ) + self.range_tree_nodes[sym].codegen() + return expr + + @contextlib.contextmanager + def mask_loads(self, mask): + """Context manager to add an additional mask to tl.load/store""" + prior = self._load_mask + if prior: + mask = self.cse.generate(self.compute, f"{mask} & {prior}") + + self._load_mask = mask + try: + # TODO(jansel): do we need a reshape here? + yield mask + finally: + self._load_mask = prior + + def indirect_indexing(self, var, size, check=True): + # TODO(lezcano) This code should be lifted to codegen/common.py. + # This should be easy, as now CSE variables carry bounds info + class IndirectAssertLine(DeferredLineBase): + def __init__(self, line, var, mask, size_map): + self.var = var + self.mask = mask + self.line = line + self.size_map = size_map + + def __call__(self): + size, size_str = self.size_map[(self.var, self.mask)] + + # We assert if we've not been able to prove the bound + assert_min = (self.var.bounds.lower >= 0) != sympy.true + assert_max = (self.var.bounds.upper < size) != sympy.true + + # FooBar interview question + if not (assert_min or assert_max): + return None + elif assert_min and assert_max: + # The conditions need to be in parens because of Python's operator precedence. + # It'd be less error-prone to use and/or/not, which is suported by triton + cond = f"(0 <= {self.var}) & ({self.var} < {size_str})" + cond_print = f"0 <= {self.var} < {size_str}" + elif assert_min: + cond = f"0 <= {self.var}" + cond_print = cond + else: + assert assert_max + cond = f"{self.var} < {size_str}" + cond_print = cond + + if self.mask: + cond = f"({cond}) | ~{self.mask}" + return self.line.format(cond=cond, cond_print=cond_print) + + def _new_line(self, line): + return IndirectAssertLine(line, self.var, self.mask, self.size_map) + + if var.bounds.lower < 0: + new_bounds = ValueRanges.unknown() + if var.bounds != ValueRanges.unknown() and isinstance(size, sympy.Number): + # Take the negative part of the bound and add size to it + # Then take union of that and the positive part + # This is a tighter bound than that of a generic ops.where, as we have info on the cond + neg = var.bounds & ValueRanges(-sympy.oo, -1) + new_bounds = ValueRanges(neg.lower + size, neg.upper + size) + # We don't have a good way of representing the empty range + if var.bounds.upper >= 0: + pos = var.bounds & ValueRanges(0, sympy.oo) + new_bounds = new_bounds | pos + + stm = f"{var} + {self.index_to_str(size)}" + # Mixed negative and non-negative + if var.bounds.upper >= 0: + stm = f"tl.where({var} < 0, {stm}, {var})" + new_var = self.cse.generate(self.compute, stm, bounds=new_bounds) + + new_var.update_on_args("index_wrap", (var,), {}) + var = new_var + + generate_assert = ( + (check or config.debug_index_asserts) + and config.triton.assert_indirect_indexing + and torch.version.hip is None + ) + if generate_assert: + mask_vars = set(var.mask_vars) + if self._load_mask: + mask_vars.add(self._load_mask) + + mask = "" + if mask_vars: + mask = ( + f"{list(mask_vars)[0]}" + if len(mask_vars) == 1 + else f"({' & '.join(str(v) for v in mask_vars)})" + ) + + # An assertion line may have been written already, if so just + # update the max size. + map_key = (var, mask) + existing_size, _ = self.indirect_max_sizes.get(map_key, (None, None)) + if existing_size is not None: + size = sympy.Min(size, existing_size) + else: + line = 'tl.device_assert({cond}, "index out of bounds: {cond_print}")' + self.compute.writeline( + IndirectAssertLine(line, var, mask, self.indirect_max_sizes) + ) + + self.indirect_max_sizes[map_key] = (size, self.index_to_str(size)) + + return sympy_symbol(str(var)) + + def load(self, name: str, index: sympy.Expr): + var = self.args.input(name) + indirect_indexing = self.is_indirect_indexing(index) + original_index = index + index, mask_vars, mask, expand_str = self.indexing(index) + + # Keep the variable in cache if were going to reuse it. Equiv., if any of the following hold + # 1) We are doing broadcasting + # 2) It will be used later and it won't be CSE'd. Equiv., if all the following hold + # 2.1) We are in a reduction loop + # 2.2) Its not its last use + # 2.3) This load will not be lifted to the body + if self.is_broadcasted(original_index): + ep = ", eviction_policy='evict_last'" + elif self.inside_reduction and not self.persistent_reduction: + if name in self.args.inplace_buffers: + names = set(self.args.inplace_buffers[name].other_names) + else: + names = {name} + last_use = len(names & self.last_usage) > 0 + evict_last = not last_use and ("rmask" in mask or indirect_indexing) + ep = ", eviction_policy='evict_last'" if evict_last else "" + else: + ep = "" + # "other" below is a workaround for https://github.com/openai/triton/issues/737 + # for bool, even though it's likely subject to the same bug, setting `other` leads + # to LLVM errors so we are skipping it for now + if ("tmp" in mask or "rmask" in mask) and V.graph.get_dtype(name) != torch.bool: + other = ", other=0" + else: + other = "" + + append_broadcast = None + if V.graph.is_unspec_arg(name): + line = var + else: + if isinstance(original_index, sympy.Integer): + line = f"tl.load({var} + ({original_index}))" + append_broadcast = expand_str + else: + line = f"tl.load({var} + ({index}), {mask}{ep}{other})" + if V.graph.get_dtype(name) in (torch.float16, torch.bfloat16): + line += ".to(tl.float32)" + + if "tmp" in mask: + # Masked loads must come after the mask is computed + load_buffer = self.compute + elif ( + self.inside_reduction + and not self.persistent_reduction + and "rmask" not in mask + and not indirect_indexing + ): + # can lift a common load outside of reduction loop + # One exception is when this is an indirect_load. + load_buffer = self.body + else: + load_buffer = self.loads + + result_var = self.cse.generate(load_buffer, line) + result_var.mask_vars = mask_vars + + if append_broadcast: + line = f"tl.broadcast_to({result_var}, {append_broadcast})" + result_var = self.cse.generate(load_buffer, line) + + if not self.inside_reduction or "rmask" not in mask: + self.outside_loop_vars.add(result_var) + + return result_var + + def store(self, name, index, value, mode=None): + var = self.args.output(name) + indirect_indexing = self.is_indirect_indexing(index) + original_index = index + index, mask_vars, mask, expand_str = self.indexing(index, dense_indexing=True) + + # Guard against write-after-read corruption in triton. + # See # https://github.com/openai/triton/issues/1615 + # This triton bug means that a load which is broadcasted over multiple + # warps may see the result of a store that happens later in the triton + # program. The workaround is to add a barrier before storing, which + # enforces that all warps have already read the data. + is_inplace = name in self.args.inplace_buffers + is_broadcasted = self.is_broadcasted(original_index) + if is_inplace and is_broadcasted: + self.stores.writeline(DeferredLine(name, "tl.debug_barrier()")) + + if mode is None: + line = f"tl.store({var} + ({index}), {value}, {mask})" + elif mode == "atomic_add": + line = f"tl.atomic_add({var} + ({index}), {value}, {mask})" + else: + raise NotImplementedError(f"store mode={mode}") + self.stores.writeline(DeferredLine(name, line)) + if not self.inside_reduction: + self.outside_loop_vars.add(value) + + def bucketize( + self, + values: CSEVariable, + offsets_name: str, + offsets_size: sympy.Expr, + indexing_dtype: torch.dtype, + right: bool, + ): + """ + See [Note: Inductor bucketize op] + """ + + # Triton performance for bucketize_binary_search is much better when the number + # of threads equals the number of elements. + # If we're trying to use a bucketize kernel, we should make sure that an + # autotuning config with num_elements_per_warp=32 exists. + self.autotune_hints.add(AutotuneHint.ELEMENTS_PER_WARP_32) + + offsets_ptr = self.args.input(offsets_name) + block_size = self.dense_size_str() + offsets_size_str = self.index_to_str(offsets_size) + + if indexing_dtype == torch.int32: + triton_dtype = "tl.int32" + elif indexing_dtype == torch.int64: + triton_dtype = "tl.int64" + else: + raise NotImplementedError( + "Bucketize only supports indexing with int32 and int64" + ) + + result = self.cse.generate( + self.compute, + f"triton_helpers.bucketize_binary_search({values}, {offsets_ptr}, {triton_dtype}, {right}, {offsets_size_str}, {block_size})", # noqa: B950 line too long + ) + + return result + + def reduction_resize(self, value): + ndims = self.triton_tensor_ndim() + if ndims == 1: + return f"triton_helpers.promote_to_tensor({value})" + + sizes = [":"] * ndims + sizes[-1] = "None" + return f"{value}[{', '.join(sizes)}]" + + @staticmethod + def _map_tuple_or_scalar(fn, value): + if isinstance(value, tuple): + return tuple(map(fn, value)) + return fn(value) + + def reduction(self, dtype, src_dtype, reduction_type, value): + assert self.inside_reduction + masks = {f"{tree.prefix}mask" for tree in self.range_trees} + self.filter_masks(masks) + masks = sorted(masks) + if self._load_mask: + masks.append(self._load_mask) + reduction_range_prefix = self.range_trees[-1].prefix + reduction_sizes = ["None" for _ in self.range_trees] + reduction_sizes[-1] = ":" + + # Say we have + # tmp0 = ops.constant(1, torch.int64) + # tmp1 = ops.reduction(torch.int64, torch.int64, "sum", tmp0) + # tmp0 in the triton code is either a scalar, or single-element tensor + # so if we emit tl.sum directly, it will only give 1 instead of RBLOCK * 1 + # To avoid this, we broadcast to the expected shape first. + dense_size_str = self.dense_size_str() + value = self._map_tuple_or_scalar( + lambda v: self.cse.generate( + self.compute, f"tl.broadcast_to({v}, {dense_size_str})" + ), + value, + ) + + def final_reduction(value): + use_helper = reduction_type in {"any", "max", "min", "prod"} + module = "triton_helpers" if use_helper else "tl" + if reduction_type in {"max", "min"}: + return self.reduction_resize( + f"{module}.{reduction_type}2({value}, {dim})" + ) + return self.reduction_resize(f"{module}.{reduction_type}({value}, {dim})") + + def final_argreduce(buffer, result_var, value, index): + buffer.splice( + f"""\ + _, {result_var}_tmp = triton_helpers.{root_op}_with_index({value}, {index}, {dim}) + {result_var} = {self.reduction_resize(f'{result_var}_tmp')} + """ + ) + + cache_key = (src_dtype, reduction_type, value) + if cache_key in self.cse.reduction_cache: + return self.cse.reduction_cache[cache_key] + + dim = len(self.range_trees) - 1 - int(bool(self.no_x_dim)) + acc_type = triton_acc_type(src_dtype) + result_var = self.cse.newvar() + result_var.mask_vars = {var for var in masks if var[0] != "r"} + cond = " & ".join(masks) + + if self.persistent_reduction: + default = ir.Reduction.default_value(reduction_type, src_dtype) + default = self._map_tuple_or_scalar(triton_constant, default) + + def _mask_value(value, default): + return self.cse.generate( + self.compute, f"tl.where({cond}, {value}, {default})" + ) + + if isinstance(value, tuple): + masked_value = [_mask_value(v, d) for v, d in zip(value, default)] + else: + masked_value = _mask_value(value, default) + + if reduction_type in {"argmax", "argmin"}: + accumulator_index = self.cse.generate( + self.compute, + f"tl.broadcast_to({reduction_range_prefix}index, {masked_value}.shape)", + ) + root_op = {"argmax": "max", "argmin": "min"}[reduction_type] + final_argreduce( + self.compute, result_var, masked_value, accumulator_index + ) + elif reduction_type == "welford_reduce": + # For persistent reductions, don't bother with + # welford's algorithm since it uses more registers, and + # taking two reductions doesn't increase memory usage. + sum_ = ops.reduction(dtype, dtype, "sum", value) + self.inside_reduction = False + rnumel = ops.index_expr(self.numels[-1], dtype) + mean = ops.div(sum_, rnumel) + + self.inside_reduction = True + dx = ops.sub(value, mean) + dx2 = ops.mul(dx, dx) + m2 = ops.reduction(dtype, dtype, "sum", dx2) + result_var = (mean, m2, rnumel) + elif reduction_type == "welford_combine": + mean, m2, weight = masked_value + welford = f"triton_helpers.welford({mean}, {m2}, {weight}, {dim})" + mean, m2, weight = (self.cse.newvar() for _ in range(3)) + self.compute.writeline(f"{mean}, {m2}, {weight} = {welford}") + + result_var = tuple( + self.cse.generate(self.compute, self.reduction_resize(var_name)) + for var_name in (mean, m2, weight) + ) + else: + result_var = self.cse.generate( + self.compute, final_reduction(masked_value) + ) + else: + accumulator = f"_{result_var}" + default = ir.Reduction.default_accumulator(reduction_type, src_dtype) + default = self._map_tuple_or_scalar(triton_constant, default) + if not isinstance(default, tuple): + self.body.writeline( + f"{accumulator} = tl.full({self.dense_size_str()}, {default}, {acc_type})" + ) + + if reduction_type in {"argmax", "argmin"}: + accumulator_index = f"_{result_var}_index" + long_max = torch.iinfo(torch.int64).max + self.body.writeline( + f"{accumulator_index} = tl.full({self.dense_size_str()}, {long_max}, tl.int64)" + ) + root_op = {"argmax": "max", "argmin": "min"}[reduction_type] + + self.compute.splice( + f"""\ + {accumulator}_next, {accumulator_index}_next = triton_helpers.{root_op}imum_with_index( + {accumulator}, {accumulator_index}, {value}, {reduction_range_prefix}index + ) + {accumulator} = tl.where({cond}, {accumulator}_next, {accumulator}) + {accumulator_index} = tl.where({cond}, {accumulator_index}_next, {accumulator_index}) + """ + ) + final_argreduce(self.suffix, result_var, accumulator, accumulator_index) + elif is_welford_reduction(reduction_type): + accumulator = f"{result_var}_mean" + accumulator_m2 = f"{result_var}_m2" + accumulator_weight = f"{result_var}_weight" + self.body.writeline( + f"{accumulator} = tl.zeros({self.dense_size_str()}, {acc_type})" + ) + self.body.writeline( + f"{accumulator_m2} = tl.zeros({self.dense_size_str()}, {acc_type})" + ) + self.body.writeline( + f"{accumulator_weight} = tl.zeros({self.dense_size_str()}, {acc_type})" + ) + + if reduction_type == "welford_combine": + mean, m2, weight = value + self.compute.splice( + f"""\ + {accumulator}_next, {accumulator_m2}_next, {accumulator_weight}_next = triton_helpers.welford_combine( + {accumulator}, {accumulator_m2}, {accumulator_weight}, + {mean}, {m2}, {weight} + ) + """ + ) + else: + assert reduction_type == "welford_reduce" + self.compute.splice( + f"""\ + {accumulator}_next, {accumulator_m2}_next, {accumulator_weight}_next = triton_helpers.welford_reduce( + {value}, {accumulator}, {accumulator_m2}, {accumulator_weight}, + ) + """ + ) + + self.compute.splice( + f"""\ + {accumulator} = tl.where({cond}, {accumulator}_next, {accumulator}) + {accumulator_m2} = tl.where({cond}, {accumulator_m2}_next, {accumulator_m2}) + {accumulator_weight} = tl.where({cond}, {accumulator_weight}_next, {accumulator_weight}) + """ + ) + + result_mean = result_var + result_m2 = self.cse.newvar() + result_weight = self.cse.newvar() + self.suffix.splice( + f"""\ + {result_mean}_tmp, {result_m2}_tmp, {result_weight}_tmp = triton_helpers.welford( + {accumulator}, {accumulator_m2}, {accumulator_weight}, {dim} + ) + {result_mean} = {self.reduction_resize(f'{result_mean}_tmp')} + {result_m2} = {self.reduction_resize(f'{result_m2}_tmp')} + {result_weight} = {self.reduction_resize(f'{result_weight}_tmp')} + """ + ) + result_var = result_mean, result_m2, result_weight + else: + combine_fn = ir.get_reduction_combine_fn(reduction_type, src_dtype) + updated = combine_fn(accumulator, value) + self.compute.writeline( + f"{accumulator} = tl.where({cond}, {updated}, {accumulator})" + ) + + if src_dtype == torch.bool: + # This is only really used for aten.any. It changes the + # final reduction of a non-persistent reduction from + # tmp5 = triton_helpers.max(_tmp5, 1)[:, None] + # to + # tmp5 = triton_helpers.max(_tmp5.to(tl.int8), 1)[:, None].to(tl.int1) + # which is needed because tl.reduce doesn't support tl.int1 + accumulator = f"{accumulator}.to(tl.int8)" + result_type = triton_compute_type(dtype) + self.suffix.writeline( + f"{result_var} = {final_reduction(accumulator)}.to({result_type})" + ) + else: + self.suffix.writeline( + f"{result_var} = {final_reduction(accumulator)}" + ) + + self.cse.reduction_cache[cache_key] = result_var + + if isinstance(result_var, tuple): + self.outside_loop_vars |= set(result_var) + else: + self.outside_loop_vars.add(result_var) + + return result_var + + def store_reduction(self, name, index, value): + assert self.inside_reduction + self.inside_reduction = False + index, mask_vars, mask, _ = self.indexing(index) + assert "rmask" not in index + self.inside_reduction = True + + var = self.args.output(name) + self.suffix.writeline( + DeferredLine(name, f"tl.store({var} + ({index}), {value}, {mask})") + ) + + def codegen_body(self): + """ + Concat output code from index_code, loads, compute, stores, + suffix into self.body. + + For pointwise kernels, this is called just once at the end. + + For reduction kernels, this generates a loop over the reduction + axis. + """ + if not ( + self.indexing_code + or self.loads + or self.stores + or self.compute + or self.suffix + ): + return + + if self.inside_reduction and not self.persistent_reduction: + self.body.writeline("for roffset in range(0, rnumel, RBLOCK):") + with self.body.indent(): + # last range tree is always reduction + self.range_trees[-1].codegen_header(self.body) + self.body.splice(self.indexing_code) + self.body.splice(self.loads) + self.body.splice(self.compute) + self.body.splice(self.stores) + + # invalidate any caches that came from inside the reduction loop + self.cse.invalidate(self.outside_loop_vars) + self.range_trees[-1].cache_clear() + else: + self.body.splice(self.indexing_code) + self.body.splice(self.loads) + self.body.splice(self.compute) + self.body.splice(self.stores) + self.body.splice(self.suffix) + self.indexing_code.clear() + self.loads.clear() + self.compute.clear() + self.stores.clear() + self.suffix.clear() + + def codegen_kernel_benchmark(self): + result = IndentedBuffer() + argdefs, call_args, signature = self.args.python_argdefs() + + result.writelines(["", "", "def get_args():"]) + with result.indent(): + name_cnt = itertools.count() + var_names = [] + for arg_name, arg_sig in zip(call_args, signature): + var_name = f"arg_{next(name_cnt)}" + buf = V.graph.get_buffer(arg_name) + if buf: + result.writeline( + f"{var_name} = rand_strided({V.graph.sizevars.size_hints(buf.get_size())}, {V.graph.sizevars.size_hints(buf.get_stride())}, device='{buf.get_device()}', dtype={buf.get_dtype()})" # noqa: B950 line too long + ) + elif arg_name in V.graph.constants: + # note that random seed is put in V.graph.constants + const_tensor = V.graph.constants[arg_name] + result.writeline( + f"{var_name} = rand_strided({V.graph.sizevars.size_hints(const_tensor.size())}, {V.graph.sizevars.size_hints(const_tensor.stride())}, device='{const_tensor.device}', dtype={const_tensor.dtype})" # noqa: B950 line too long + ) + elif isinstance(arg_sig, SizeArg): + symval_hint = V.graph.sizevars.size_hint(arg_sig.expr) + + # Force the seed_offset to be 0 so calls to the same kernel + # using different seed offset will have the same benchmark harness. + # We can dedup kernel definitions in this case. + if "seed_offset" in arg_sig.name: + symval_hint = 0 + result.writeline(f"{var_name} = {symval_hint}") + else: + raise KeyError( + f"Don't find the buffer or const tensor for {arg_name}" + ) + var_names.append(var_name) + result.writeline(f"return {', '.join(var_names)},") + + result.writelines(["\n", "\n", "def call(args):"]) + grid = [] + extra_args = [] + extra_args_str = None + index = V.graph.scheduler.current_device.index + with result.indent(): + result.writeline(f"with torch.cuda._DeviceGuard({index}):") + with result.indent(): + result.writeline( + f"torch.cuda.set_device({index})" + ) # no-op to ensure context + for tree in self.range_trees: + expr = pexpr(V.graph.sizevars.size_hint(tree.numel)) + if tree.prefix != "r" or self.inside_reduction: + extra_args.append(expr) + if tree.prefix != "r": + grid.append(expr) + + stream_name = f"stream{index}" + result.writeline(f"{stream_name} = get_cuda_stream({index})") + extra_args_str = ", ".join(map(str, extra_args)) + ", " + result.writeline( + f"KERNEL_NAME.run(*args, {extra_args_str}grid=grid({', '.join(grid)}), stream={stream_name})" + ) + + # benchmark all configs + result.writelines(["\n", "\n", "def benchmark_all_configs(args):"]) + with result.indent(): + result.writeline(f"with torch.cuda._DeviceGuard({index}):") + with result.indent(): + result.writeline( + f"torch.cuda.set_device({index})" + ) # no-op to ensure context + result.writeline( + f"return KERNEL_NAME.benchmark_all_configs(*args, {extra_args_str}grid=grid({', '.join(grid)}))" + ) + + ninplace_args = len(unique(self.args.inplace_buffers.values())) + result.writelines(["\n", "\n", "if __name__ == '__main__':"]) + with result.indent(): + result.writeline("from torch._inductor.utils import get_num_bytes") + result.writeline("from triton.testing import do_bench") + result.writeline("") + + result.writeline("args = get_args()") + result.writeline( + "ms = do_bench(lambda: call(args), rep=40, fast_flush=True)" + ) + result.writeline( + f"num_gb = get_num_bytes(*args, num_in_out_args={ninplace_args}) / 1e9" + ) + result.writeline("gb_per_s = num_gb / (ms / 1e3)") + result.writeline( + 'print(f"{ms:.3f}ms {num_gb:.3f}GB {gb_per_s:.2f}GB/s")' + ) + + return result + + def codegen_kernel(self, name=None): + from triton import next_power_of_2 + + code = IndentedBuffer() + + size_hints = [ + next_power_of_2(V.graph.sizevars.size_hint(numel)) for numel in self.numels + ] + if self.persistent_reduction: + assert self.inside_reduction + heuristics = "persistent_reduction" + elif self.inside_reduction: + heuristics = "reduction" + else: + size_hints.pop() + heuristics = "pointwise" + + if name is None: + code.splice( + f""" + import triton + import triton.language as tl + from torch._inductor.ir import ReductionHint + from torch._inductor.ir import TileHint + from torch._inductor.triton_heuristics import AutotuneHint, {heuristics} + from torch._inductor.utils import instance_descriptor + from torch._inductor import triton_helpers + """ + ) + if config.benchmark_kernel: + code.splice( + """ + from torch._dynamo.testing import rand_strided + from torch._C import _cuda_getCurrentRawStream as get_cuda_stream + import torch + from torch._inductor.triton_heuristics import grid + """ + ) + + argdefs, _, signature = self.args.python_argdefs() + # maps actual expression to SizeArg if its in sizevars replacements + for i, arg in enumerate(signature): + if ( + isinstance(arg, SizeArg) + and arg.expr in V.graph.sizevars.inv_precomputed_replacements + ): + signature[i] = SizeArg( + arg.name, V.graph.sizevars.inv_precomputed_replacements[arg.expr] + ) + + mutated_args = set() + for mutation in self.mutations: + if mutation in self.args.input_buffers: + mutated_args.add(self.args.input_buffers[mutation]) + if ( + mutation in self.args.inplace_buffers + and mutation not in V.graph.removed_buffers + ): + mutated_args.add(self.args.inplace_buffers[mutation].inner_name) + if mutation in self.args.output_buffers: + mutated_args.add(self.args.output_buffers[mutation]) + mutated_args = sorted(mutated_args) + + triton_meta = { + "signature": signature_to_meta(signature, size_dtype=self.index_dtype), + "device": V.graph.scheduler.current_device.index, + "device_type": V.graph.scheduler.current_device.type, + "constants": {}, + "mutated_arg_names": mutated_args, + "autotune_hints": set(self.autotune_hints), + "kernel_name": "DESCRIPTIVE_KRNL_NAME", + } + + for tree in self.range_trees: + if tree.prefix != "r" or self.inside_reduction: + sizearg = SizeArg(f"{tree.prefix}numel", tree.numel) + signature.append(sizearg) + triton_meta["signature"][len(argdefs)] = signature_of( + sizearg, size_dtype=self.index_dtype + ) + argdefs.append(f"{tree.prefix}numel") + # constexpr version causes issues, see + # https://github.com/pytorch/torchdynamo/pull/1362 + # triton_meta["constants"][len(argdefs)] = V.graph.sizevars.size_hint( + # tree.numel + # ) + # argdefs.append(f"{tree.prefix}numel: tl.constexpr") + triton_meta["configs"] = [config_of(signature)] + + for tree in self.range_trees: + if tree.prefix == "r" and ( + not self.inside_reduction or self.persistent_reduction + ): + continue + if tree.prefix == "x" and self.no_x_dim: + continue + argdefs.append(f"{tree.prefix.upper()}BLOCK : tl.constexpr") + + if self.inside_reduction: + reduction_hint = self.reduction_hint + heuristics_line = f""" + @{heuristics}( + size_hints={size_hints!r}, + reduction_hint={reduction_hint}, + filename=__file__, + meta={triton_meta!r} + ) + @triton.jit + """ + else: + tile_hint = "" + if len(size_hints) == 2: + if len(signature) == 4: # input, output and 2 args + tile_hint = "tile_hint=TileHint.SQUARE," + else: + tile_hint = "tile_hint=TileHint.DEFAULT," + heuristics_line = f""" + @{heuristics}(size_hints={size_hints!r}, {tile_hint}filename=__file__, meta={triton_meta!r}) + @triton.jit + """ + code.splice(heuristics_line) + code.writeline(f"def {name or 'KERNEL_NAME'}({', '.join(argdefs)}):") + self.codegen_body() + with code.indent(): + self.codegen_static_numels(code) + for old, new in self.args.aliases(): + code.writeline(f"{old} = {new}") + code.splice(self.body) + + if config.benchmark_kernel: + code.splice(self.codegen_kernel_benchmark()) + + if name is not None: + return code.getvalue() + + return code.getvalue() + + def codegen_static_numels(self, code): + """ + We get a small speedup from hard coding numels if they are static. + + This code stomps on the passed-in values by writing an constant to the top of the kernel. + + In a kernel like: + def KERNEL_NAME(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): + + We would add + xnumel = 4096 + rnumel = 768 + + After the signature, before the kernel code, if we decided to make these static. As its hardcoded, it becomes + a better signal to triton on how to unroll and do some static indexing. So, it's not so much that downstream + knows that its a static numel, as that you just plop a constant into the kernel. + """ + for tree in self.range_trees: + if tree.prefix != "r" or self.inside_reduction: + simplified_tree_numel = V.graph.sizevars.simplify(tree.numel) + if isinstance(simplified_tree_numel, (sympy.Integer, int)): + code.writeline(f"{tree.prefix}numel = {int(simplified_tree_numel)}") + + if tree.prefix == "r" and self.persistent_reduction: + simplified_tree_numel = V.graph.sizevars.simplify(tree.numel) + if isinstance(simplified_tree_numel, (sympy.Integer, int)): + val = int(simplified_tree_numel) + else: + continue + val = next_power_of_2(val) + code.writeline(f"RBLOCK: tl.constexpr = {val}") + + if tree.prefix == "x" and self.no_x_dim: + code.writeline("XBLOCK: tl.constexpr = 1") + + def triton_tensor_ndim(self): + no_x_dim = int(bool(self.no_x_dim)) + no_r_dim = self.numels[-1] == 1 + return len(self.range_trees) - no_x_dim - no_r_dim + + def indexing_size_str(self, i=None, x=None): + # no_x_dim is sympy.logic.boolalg.BooleanTrue + no_x_dim = int(bool(self.no_x_dim)) + sizes = ["None"] * self.triton_tensor_ndim() + if i is not None: + idx = i - no_x_dim + sizes[idx] = ":" + return f"[{', '.join(sizes)}]" + + def dense_size_str(self): + sizes = [] + for tree in self.range_trees: + if self.no_x_dim and tree.prefix == "x": + continue + if tree.prefix != "r" or self.inside_reduction: + sizes.append(f"{tree.prefix.upper()}BLOCK") + elif tree.prefix == "r" and tree.numel != 1: + sizes.append("1") + + if sizes[0:3] == ["ZBLOCK", "YBLOCK", "XBLOCK"]: + sizes[0:3] = reversed(sizes[0:3]) + + if sizes[0:2] == ["YBLOCK", "XBLOCK"]: + sizes[0:2] = reversed(sizes[0:2]) + + return f"[{', '.join(sizes)}]" + + def call_kernel(self, name: str): + wrapper = V.graph.wrapper_code + _, call_args, _ = self.args.python_argdefs() + # dynamo wraps unspec variable as 0d CPU tensor, need convert to scalar + for i in range(len(call_args)): + if V.graph.is_unspec_arg(call_args[i]): + call_args[i] = call_args[i] + ".item()" + grid = [] + # TODO(jansel): if there are constants, we shouldn't bother passing them as args + for tree in self.range_trees: + if isinstance(tree.numel, (sympy.Integer, sympy.Symbol)): + expr = tree.numel + else: + expr = wrapper.generate_numel_expr(name, tree) + + if tree.prefix != "r" or self.inside_reduction: + call_args.append(expr) + if tree.prefix != "r": + grid.append(expr) + + wrapper.generate_kernel_call( + name, + call_args, + grid, + V.graph.scheduler.current_device.index, + ) + + def warn_mix_layout(self, kernel_name): + """ + Print message if the kernel have mixed layout inputs. + Only care about 4D tensor for now. + """ + if ( + len(self.args.input_buffers) == 1 + and len(self.args.output_buffers) == 1 + and len(self.args.inplace_buffers) == 0 + ): + # even if input buffer and output buffer have different layout, + # this can be a layout conversion kernel. No need to warn for + # the mix layouts. + return + + argdefs, call_args, signature = self.args.python_argdefs() + uniform_stride_order = None + for arg_name in call_args: + buf = V.graph.get_buffer(arg_name) + if buf and len(buf.layout.size) == 4: + # ignore the tensor if only 1 dimention is non-zero + if len([x for x in buf.layout.size if x == 1]) == 3: + continue + stride_order = ir.get_stride_order(buf.layout.stride) + if uniform_stride_order is None: + uniform_stride_order = stride_order + elif uniform_stride_order != stride_order: + msg = yellow_text( + f"Expected stride order {uniform_stride_order}, but found stride order" + + f" {stride_order} for kernel {kernel_name}" + ) + log.warning(msg) + + stride_order_list = [ + ir.get_stride_order(V.graph.get_buffer(name).layout.stride) + if V.graph.get_buffer(name) + else None + for name in call_args + ] + size_list = [ + V.graph.get_buffer(name).layout.size + if V.graph.get_buffer(name) + else None + for name in call_args + ] + source_list = [ + "GraphInput" + if name in V.graph.graph_inputs + else "IntermediateBuffer" + if name in V.graph.name_to_buffer + else None + for name in call_args + ] + + msg = yellow_text( + f" param names {argdefs}\n buf names {call_args}\n strides {stride_order_list}" + + f"\n sizes {size_list}\n sources {source_list}\n" + ) + log.warning(msg) + return + msg = green_text( + f"All the inputs for the triton kernel {kernel_name} have uniform layout" + ) + log.warning(msg) + + def create_cse_var(self, *args, **kwargs): + return TritonCSEVariable(*args, **kwargs) + + +class TritonScheduling(BaseScheduling): + def __init__(self, scheduler): + self.scheduler = scheduler + + def group_fn(self, sizes): + return tuple(V.graph.sizevars.simplify(sympy_product(s)) for s in sizes) + + def can_fuse(self, node1, node2): + """ + Hook called by Scheduler to determine if the Triton backend + can fuse node1 and node2. These nodes might already be + FusedSchedulerNodes. + """ + if isinstance(node1, scheduler.ForeachKernelSchedulerNode) or isinstance( + node2, scheduler.ForeachKernelSchedulerNode + ): + return scheduler.ForeachKernelSchedulerNode.can_fuse(node1, node2) + + _, (numel1, rnumel1) = node1.group + _, (numel2, rnumel2) = node2.group + + if node1.is_reduction() and node2.is_reduction(): + return numel1 == numel2 and rnumel1 == rnumel2 + + if not node1.is_reduction() and not node2.is_reduction(): + if not (numel1 == numel2 and rnumel1 == rnumel2): + return False + + if node1.is_template(): + return True # skip checks for compatible tiling + + # check for a bad combined tiling + tiling1 = self.select_tiling(node1.get_nodes(), numel1, rnumel1) + tiling2 = self.select_tiling(node2.get_nodes(), numel1, rnumel1) + tiling3 = self.select_tiling( + node1.get_nodes() + node2.get_nodes(), numel1, rnumel1 + ) + if config.triton.tiling_prevents_pointwise_fusion: + if len(tiling1) > 2: + if len(tiling2) > 2: + return tiling1 == tiling2 == tiling3 + else: + return tiling1 == tiling3 + elif len(tiling2) > 2: + return tiling2 == tiling3 + + return True + + if not node1.is_reduction() and node2.is_reduction(): + assert rnumel1 == 1 and rnumel2 != 1 + if numel1 == numel2 * rnumel2: + if not all( + TritonKernel.is_compatible((numel2, rnumel2), n.get_ranges()) + for n in node1.get_nodes() + ): + return False + if ( + config.triton.tiling_prevents_reduction_fusion + and not node1.is_template() + ): + return self.select_tiling(node1.get_nodes(), numel1) in ( + (numel1, 1), + (numel2, rnumel2, 1), + ) + return True + + return numel1 == numel2 + + assert node1.is_reduction() and not node2.is_reduction() + # swap args to hit the case above + return self.can_fuse_horizontal(node2, node1) + + can_fuse_vertical = can_fuse + can_fuse_horizontal = can_fuse + + def generate_node_schedule(self, nodes, numel, rnumel): + node_schedule = [] + current_loop_writes = set() + is_current_reductions = set() + done = set() + + def fits_in_main_body(n): + _, (node_numel, node_rnumel) = n.group + return (node_numel == numel and node_rnumel == rnumel) or ( + node_numel == numel * rnumel and node_rnumel == 1 + ) + + def fits_outside_reduction(n): + _, (node_numel, node_rnumel) = n.group + return node_numel == numel and node_rnumel == 1 and rnumel != 1 + + @contextlib.contextmanager + def end_current_reduction_loop(): + if current_loop_writes: + # flush out any other runnable nodes to reduce number of loops + for other_node in nodes[index + 1 :]: + if ( + node not in done + and fits_in_main_body(other_node) + and not ( + current_loop_writes & other_node.recursive_predecessors + ) + ): + done.add(node) + current_loop_writes.add(node.get_name()) + is_current_reductions.add(node.is_reduction()) + node_schedule.append(node) + + if node_schedule and node_schedule[-1] is EnableReduction: + node_schedule.pop() + else: + node_schedule.append(DisableReduction) + yield + node_schedule.append(EnableReduction) + current_loop_writes.clear() + is_current_reductions.clear() + + for index, node in enumerate(nodes): + if node in done: + continue + done.add(node) + + def requires_closing_previous_reduction(node, node_schedule): + if rnumel == 1: + return False + if not current_loop_writes & node.recursive_predecessors: + return False + assert node_schedule and not isinstance( + node_schedule[-1], (EnableReduction, DisableReduction) + ) + return True in is_current_reductions + + if fits_in_main_body(node): + if requires_closing_previous_reduction(node, node_schedule): + with end_current_reduction_loop(): + pass # need to start a new reduction loop + current_loop_writes.add(node.get_name()) + is_current_reductions.add(node.is_reduction()) + node_schedule.append(node) + elif fits_outside_reduction(node): + with end_current_reduction_loop(): + node_schedule.append(node) + else: + raise NotImplementedError( + f"unexpected group: ({numel}, {rnumel}) != {node.group[1]}" + ) + + return node_schedule + + def codegen_nodes(self, nodes): + """ + Given a set of pre-fused nodes, generate a Triton kernel. + """ + _, (numel, rnumel) = max(nodes, key=lambda x: int(x.is_reduction())).group + + node_schedule = self.generate_node_schedule(nodes, numel, rnumel) + + if schedule_log.isEnabledFor(logging.DEBUG): + schedule_log.debug("Schedule:\n %s", node_schedule) + + return self.codegen_node_schedule(node_schedule, numel, rnumel) + + @staticmethod + def reduction_hint(node): + assert node.is_reduction() + if all( + dep.is_contiguous() + for dep in itertools.chain(node.read_writes.reads, node.read_writes.writes) + ): + return ReductionHint.INNER + else: + return node.node.data.reduction_hint + + @staticmethod + def can_use_32bit_indexing(numel: sympy.Expr, buffers: Iterable[ir.Buffer]) -> bool: + int_max = torch.iinfo(torch.int32).max + size_hint = V.graph.sizevars.size_hint + has_hint = V.graph.sizevars.shape_env.has_hint + + def within_32bit(e): + # Allow for unhinted e as long as we can still statically prove + # (e.g., via ValueRanges) that it is still in bounds + if V.graph.sizevars.is_expr_static_and_true(e <= int_max): + return True + # Otherwise, the hint MUST exist and be in range + return has_hint(e) and size_hint(e) <= int_max + + if not within_32bit(numel): + return False + + # Any use of a MultiOutputLayout will create a buffer with a + # Layout whose sizes are accounted for + buf_sizes = [ + buf.get_layout().storage_size() + for buf in buffers + if not isinstance(buf.get_layout(), ir.MultiOutputLayout) + ] + + if not all(within_32bit(size) for size in buf_sizes): + return False + + # Only install guards for 32-bit indexing as there is no correctness + # issue with using 64-bit for everything + V.graph.sizevars.guard_leq(numel, int_max) + for size in buf_sizes: + V.graph.sizevars.guard_leq(size, int_max) + return True + + @staticmethod + def select_index_dtype(node_schedule, numel, reduction_numel): + # Gather all used buffer names + buffer_names = set() + for node in node_schedule: + if not isinstance(node, scheduler.BaseSchedulerNode): + continue + + buffer_names.update(node.get_names()) + buffer_names.update(node.used_buffer_names()) + + # Get buffers objects + def _get_buffer(name: str) -> ir.Buffer: + if name in V.graph.name_to_buffer: + return V.graph.name_to_buffer[name] + elif name in V.graph.graph_inputs: + return V.graph.graph_inputs[name] + elif name in V.graph.constants: + data = V.graph.constants[name] + return ir.ConstantBuffer( + name, + ir.FixedLayout( + data.device, data.dtype, *V.graph.static_sizes_strides(data) + ), + ) + raise RuntimeError(f"Failed to find buffer matching name {name}") + + buffers = [_get_buffer(name) for name in buffer_names] + + # In theory we can separately check xnumel and rnumel are <= int_max + # but some indexers do use the full linear index so we need to be + # conservative here. + total_numel = numel * reduction_numel + + if TritonScheduling.can_use_32bit_indexing(total_numel, buffers): + return "tl.int32" + return "tl.int64" + + def get_kernel_args(self, node_schedule, numel, reduction_numel): + reductions = list( + filter( + lambda n: n not in (EnableReduction, DisableReduction) + and n.is_reduction(), + node_schedule, + ) + ) + if len(reductions) > 0: + hints = [self.reduction_hint(n) for n in reductions] + if hints.count(hints[0]) == len(hints): + reduction_hint_val = hints[0] + else: + reduction_hint_val = ReductionHint.DEFAULT + else: + reduction_hint_val = ReductionHint.DEFAULT + + mutations = set() + for node in node_schedule: + if hasattr(node, "get_mutations"): + mutations.update(node.get_mutations()) + + index_dtype = self.select_index_dtype(node_schedule, numel, reduction_numel) + + return reduction_hint_val, mutations, index_dtype + + def codegen_comment(self, node_schedule): + wrapper = V.graph.wrapper_code + origins, detailed_origins = get_kernel_metadata(node_schedule, wrapper) + if origins: + wrapper.writeline(origins) + + def codegen_node_schedule(self, node_schedule, numel, reduction_numel): + tiled_groups = self.select_tiling(node_schedule, numel, reduction_numel) + reduction_hint_val, mutations, index_dtype = self.get_kernel_args( + node_schedule, numel, reduction_numel + ) + + kernel = TritonKernel( + *tiled_groups, + reduction_hint=reduction_hint_val, + mutations=mutations, + index_dtype=index_dtype, + ) + + self.codegen_node_schedule_with_kernel(node_schedule, kernel) + + src_code = kernel.codegen_kernel() + kernel_name = self.define_kernel(src_code, node_schedule) + self.codegen_comment(node_schedule) + kernel.call_kernel(kernel_name) + + if config.warn_mix_layout: + kernel.warn_mix_layout(kernel_name) + + if ( + V.graph.wrapper_code.supports_intermediate_hooks + and config.generate_intermediate_hooks + ): + # Not every node in the schedule will actually be live on output; + # we can't check dead buffers. + live_outs = kernel.args.live_output_buffers() + for node in node_schedule: + if not isinstance(node, scheduler.BaseSchedulerNode): + continue + name = node.get_name() + if name not in live_outs: + continue + origin_node = node.node.get_origin_node() + if origin_node is not None: + counters["inductor"]["intermediate_hooks"] += 1 + V.graph.wrapper_code.writeline( + f"run_intermediate_hooks({origin_node.name!r}, {name})" + ) + + self.scheduler.free_buffers() + + def codegen_node_schedule_with_kernel(self, node_schedule, kernel): + def current_reduction_nodes(nodes): + return itertools.takewhile(lambda n: n is not DisableReduction, nodes) + + with kernel: + stack = contextlib.ExitStack() + kernel.set_last_usage(current_reduction_nodes(node_schedule)) + for node in node_schedule: + if node not in (EnableReduction, DisableReduction): + node.mark_run() + for i, node in enumerate(node_schedule): + if node is DisableReduction: + stack.enter_context(kernel.disable_reduction()) + elif node is EnableReduction: + stack.close() + kernel.set_last_usage(current_reduction_nodes(node_schedule[i:])) + else: + # TODO - use split ranges ? + indexing_dtype_strength_reduction(node._body) + index_vars = kernel.split_and_set_ranges(node.get_ranges()) + node.codegen(index_vars) + + def define_kernel(self, src_code, node_schedule): + wrapper = V.graph.wrapper_code + if src_code in wrapper.src_to_kernel: + kernel_name = wrapper.src_to_kernel[src_code] + else: + fused_name = ( + get_fused_kernel_name(node_schedule, config.triton.descriptive_names) + if config.triton.descriptive_names + else "" + ) + kernel_category = get_kernel_category_by_source_code(src_code)[:3] + kernel_name = "_".join( + ["triton", kernel_category, fused_name, wrapper.next_kernel_suffix()] + ) + # use the original src_code as the key + wrapper.src_to_kernel[src_code] = kernel_name + subs_name = kernel_name if config.triton.unique_kernel_names else "triton_" + + # DESCRIPTIVE_KRNL_NAME is used for profiling purposes; it shows the full kernel name + # even when unique_kernel_names is turned off. Meanwhile, KERNEL_NAME is sometimes set + # to "triton_" to maximize caching opportunities (when unique_kernel_names = False). + src_code = src_code.replace("DESCRIPTIVE_KRNL_NAME", kernel_name) + src_code = src_code.replace("KERNEL_NAME", subs_name) + + # TODO(voz): Ostensibly, we should not need this. But there are cases where C++ codegen does + # not use BracesBuffer, so we have no good indicator of a C++ buffer atm. + src_code = src_code.replace("#pragma CMT", "#") + + basename, _, kernel_path = get_path(code_hash(src_code), "py") + + compile_wrapper = IndentedBuffer() + compile_wrapper.writeline(f"async_compile.triton({subs_name!r}, '''") + compile_wrapper.splice(src_code, strip=True) + compile_wrapper.writeline("''')") + + metadata_comment = f"# kernel path: {kernel_path}" + origins, detailed_origins = get_kernel_metadata(node_schedule, wrapper) + metadata_comment += "\n" + origins + "\n" + detailed_origins + wrapper.define_kernel( + kernel_name, compile_wrapper.getvalue(), metadata_comment + ) + return kernel_name + + def codegen_template(self, template_node, epilogue_nodes): + """ + Codegen a triton template + """ + _, (numel, rnumel) = template_node.group + assert rnumel == 1 + kernel, render = template_node.node.make_kernel_render(template_node.node) + with kernel: + for node in [template_node, *epilogue_nodes]: + node.mark_run() + partial_code = render() + for node in epilogue_nodes: + node.codegen(kernel.split_and_set_ranges(node.get_ranges())) + + # finalize must be called after adding epilogue above + src_code = partial_code.finalize() + node_schedule = [template_node, *epilogue_nodes] + kernel_name = self.define_kernel(src_code, node_schedule) + self.codegen_comment(node_schedule) + kernel.call_kernel(kernel_name) + self.scheduler.free_buffers() + + def codegen_sync(self): + V.graph.wrapper_code.writeline("torch.cuda.synchronize()") + + def codegen_foreach(self, foreach_node): + from .triton_foreach import ForeachKernel + + for partitions_with_metadata in ForeachKernel.horizontal_partition( + foreach_node.get_subkernel_nodes(), self + ): + kernel = ForeachKernel() + for nodes, tiled_groups, numel, rnumel in partitions_with_metadata: + node_schedule = self.generate_node_schedule(nodes, numel, rnumel) + ( + reduction_hint_val, + mutations, + index_dtype, + ) = self.get_kernel_args(node_schedule, numel, rnumel) + self.codegen_node_schedule_with_kernel( + node_schedule, + kernel.create_sub_kernel( + *tiled_groups, + reduction_hint=reduction_hint_val, + mutations=mutations, + index_dtype=index_dtype, + ), + ) + + src_code = kernel.codegen_kernel() + kernel_name = self.define_kernel(src_code, [foreach_node]) + self.codegen_comment([foreach_node]) + kernel.call_kernel(V.graph.wrapper_code, kernel_name) + + self.scheduler.free_buffers() + + @staticmethod + @functools.lru_cache(32) + def candidate_tilings(node): + ranges, reduction_ranges = node.get_ranges() + if len(ranges) <= 1: + return () + + rw = node.pointwise_read_writes() + assert len(rw.range_vars) == len(ranges) + + # isinstance(dep, MemoryDep): this filters out StarDeps. StarDeps refer to reads + # that need to access the entire tensor; they don't contribute read indexing + # information (and practically, they don't have dep.index so they can't be used + # for stride_hints below + dep_sources = [rw.reads, rw.writes] + assert all( + isinstance(dep, (MemoryDep, StarDep)) + for dep in itertools.chain(*dep_sources) + ) + deps = [ + dep + for dep in itertools.chain(*dep_sources) + if dep.name not in V.graph.removed_buffers and isinstance(dep, MemoryDep) + ] + write_names = {dep.name for dep in rw.writes} + + tilings = [] + + for dep in deps: + strides = V.graph.sizevars.stride_hints(dep.index, rw.range_vars) + assert len(strides) == len(ranges) + try: + split = strides.index(1) + 1 + if split == len(ranges): + continue + if all(s == 0 for s in strides[split:]): + # if this is a broadcasted tensor and all dimensions after split are broadcast, + # this is not a real split + continue + + except ValueError: + continue + tiled_groups = ( + V.graph.sizevars.simplify(sympy_product(ranges[:split])), + V.graph.sizevars.simplify(sympy_product(ranges[split:])), + ) + # score by number of elements + score = V.graph.sizevars.size_hint( + sympy_product( + size for size, stride in zip(ranges, strides) if stride != 0 + ) + ) + if dep.name in write_names: + # ngimel said contiguous writes is more important than reads + score *= 2 + if CandidateTiling.is_good_size(tiled_groups[0]): + score *= 2 + if CandidateTiling.is_good_size(tiled_groups[1]): + score *= 2 + + if ( + V.graph.sizevars.size_hint( + score - sympy_product(itertools.chain(ranges, reduction_ranges)) + ) + >= 0 + ): + tilings.append(CandidateTiling(tiled_groups, score, dep.name)) + return tilings + + @classmethod + def select_tiling(cls, node_schedule, numel, reduction_numel=sympy.Integer(1)): + """ + Heuristics to decide how to tile kernels. + Currently, we tile based on stride-1 dimensions. + + Returns: + `(tile1, tile2, reduction_numel)` s.t. `tile1 * tile2 == numel` + + """ + if reduction_numel != 1 or config.triton.max_tiles <= 1: + # TODO(jansel): should we tile reductions? + # do perf hint here if stride-1 dim is not being reduced + if perf_hint_log.level <= logging.WARNING: + for node in EnableReduction.filter(node_schedule): + if len(cls.candidate_tilings(node)) > 0: + perf_hint_log.info("reduction over non-contiguous dims") + break + return (numel, reduction_numel) + + seen_names = set() + candidate_tiles = collections.Counter() + for node in EnableReduction.filter(node_schedule): + for tiling in cls.candidate_tilings(node): + if tiling.name in seen_names: + continue + seen_names.add(tiling.name) + candidate_tiles[tiling.tiling] += tiling.score + + ranked_tilings = [tiling for tiling, score in candidate_tiles.most_common()] + + if config.triton.max_tiles >= 3: + # Consider adding a third dimension of tiling, but only + # when a1 is a multiple of b1; otherwise, you have a lot + # of stragglers which is annoying to generate code for. + # + # NB: More than three max tiles is not enabled by default. + + # Add one 3D tiling choice + for i in range(1, len(ranked_tilings)): + a0, a1 = ranked_tilings[0] + b0, b1 = ranked_tilings[i] + if V.graph.sizevars.size_hint(a1 - b1) == 0: + continue + if V.graph.sizevars.size_hint(a1 - b1) < 0: + # swap so a0 is bigger + a0, a1 = ranked_tilings[i] + b0, b1 = ranked_tilings[0] + assert V.graph.sizevars.size_hint(a1 - b1) > 0 + if V.graph.sizevars.statically_known_multiple_of(a1, b1): + tiling = (a0, FloorDiv(a1, b1), b1) + ranked_tilings = [tiling] + ranked_tilings + break # only 1 choice for now + + if len(ranked_tilings) > 1: + perf_hint_log.info("possibly bad tiling: %s", ranked_tilings) + + for tiled_groups in ranked_tilings: + new_groups = (*tiled_groups, reduction_numel) + if all( + TritonKernel.is_compatible(new_groups, node.get_ranges()) + for node in node_schedule + if isinstance(node, scheduler.SchedulerNode) + ): + return new_groups + + return (numel, reduction_numel) + + def flush(self): + pass + + +@dataclasses.dataclass +class CandidateTiling: + tiling: List[sympy.Expr] + score: int # higher is better + name: str = None + + @staticmethod + def is_good_size(s): + """Somewhat arbitrary heuristic used to boost scores for some sizes""" + s = V.graph.sizevars.size_hint(s) + return s >= 32 and (s % 32 == 0) + + +class DisableReduction: + """ + Marker to invoke `kernel.disable_reduction()`. This closes a + reduction loop and allows for pointwise ops to occur on the output + of a reduction. + """ + + +class EnableReduction: + """ + Marker to end a DisableReduction block. + """ + + @staticmethod + def filter(node_schedule): + """ + Get the nodes from node_schedule skipping those in a + DisableReduction block. + """ + disabled = False + for node in node_schedule: + if node in (EnableReduction, DisableReduction): + # Don't tile stuff outside the main reduction loop + disabled = node is DisableReduction + elif disabled: + pass + else: + yield node + + +class CantSplit(Exception): + pass diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/triton_foreach.py b/llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/triton_foreach.py new file mode 100644 index 0000000000000000000000000000000000000000..865a01f440ceb9dbcc867f8d454b8f0eb8c1dfc2 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/triton_foreach.py @@ -0,0 +1,234 @@ +import itertools +from collections import defaultdict +from dataclasses import dataclass +from typing import List, Tuple + +from .. import metrics +from ..utils import ceildiv +from ..virtualized import V +from .common import IndentedBuffer, Kernel +from .triton import TritonKernel +from .triton_utils import config_of, signature_to_meta + + +@dataclass +class PartitionState: + partitions: List[Tuple] + cur_partition: List[Tuple] + cur_count: int + + def finalize(self): + if self.cur_partition: + self.partitions.append(self.cur_partition) + + +class ForeachKernel(Kernel): + MAX_NUM_ARGS = 250 # number where I would no longer get triton errors + + @staticmethod + def _update_partition(partition_state, node_rw_count, node_info): + if partition_state.cur_count + node_rw_count > ForeachKernel.MAX_NUM_ARGS: + partition_state.partitions.append(partition_state.cur_partition) + partition_state.cur_partition = [node_info] + partition_state.cur_count = node_rw_count + else: + partition_state.cur_count += node_rw_count + partition_state.cur_partition.append(node_info) + + @staticmethod + def horizontal_partition(subkernel_nodes, triton_scheduling): + """Generates a list of lists of node info tuples which consist of (fused_nodes, tiling, numel, rnumel) + for each subkernel node where each sublist is guaranteed to not exceed CUDA limits for number of args + (read/writes) and to have the same 2D or 1D blocking strategy.""" + assert len(subkernel_nodes) >= 1 + + partition_state_1d = PartitionState([], [], 0) + yelem_to_partition_state_2d = defaultdict(lambda: PartitionState([], [], 0)) + + for node in subkernel_nodes: + fused_nodes = node.get_nodes() + _, (numel, rnumel) = max( + fused_nodes, key=lambda x: int(x.is_reduction()) + ).group + tiled_groups = triton_scheduling.select_tiling(fused_nodes, numel, rnumel) + node_info = fused_nodes, tiled_groups, numel, rnumel + + read_writes = node.read_writes + read_write_count = len(read_writes.reads) + len(read_writes.writes) + + if tiled_groups[1] == 1: + ForeachKernel._update_partition( + partition_state_1d, read_write_count, node_info + ) + else: + y_elem = tiled_groups[0] + partition_state_2d = yelem_to_partition_state_2d[y_elem] + ForeachKernel._update_partition( + partition_state_2d, read_write_count, node_info + ) + + partition_state_1d.finalize() + all_partitions = partition_state_1d.partitions + for partition_state_2d in yelem_to_partition_state_2d.values(): + partition_state_2d.finalize() + all_partitions.extend(partition_state_2d.partitions) + + return all_partitions + + def __init__(self): + super().__init__() + self.blocking_2d = False + self.block_size_1d = 1024 # Try tuning this value + self.block_size_2d = 32 + self.num_warps = 8 + self.sub_kernels = [] + self.iter_vars_count = itertools.count() + self.x_block_count = 0 + self.y_block_count = 0 + + def get_block_size(self): + if self.blocking_2d: + return self.block_size_2d + else: + return self.block_size_1d + + @staticmethod + def codegen_pid_offsets(code, block_count, lower_bound, prefix): + if block_count == 0: + code.splice(f"{prefix}pid_offset = {prefix}pid") + else: + code.splice(f"{prefix}pid_offset = {prefix}pid - {lower_bound}") + + def codegen_pid_range(self, code, x_elems): + num_x_blocks = ceildiv(x_elems, self.get_block_size()) + upper_bound_x_pid = self.x_block_count + num_x_blocks + lower_bound_x_pid = self.x_block_count + + if self.x_block_count == 0: + cond = "if" + else: + cond = "elif" + + x_pid_bounds_check = ( + f"xpid >= {lower_bound_x_pid} and xpid < {upper_bound_x_pid}" + ) + code.splice(f"{cond} {x_pid_bounds_check}:") + + with code.indent(): + ForeachKernel.codegen_pid_offsets( + code, num_x_blocks, lower_bound_x_pid, "x" + ) + self.x_block_count += num_x_blocks + + def create_sub_kernel(self, *groups, index_dtype, mutations, reduction_hint): + sub_kernel = TritonKernel( + *groups, + index_dtype=index_dtype, + mutations=mutations, + pid_cache={ + "tl.program_id(0)": "xpid_offset", + "tl.program_id(1)": "ypid", + }, + reduction_hint=reduction_hint, + ) + if self.blocking_2d: + assert len(groups) == 3 + + self.blocking_2d |= groups[1] != 1 and len(groups) == 3 + metrics.generated_kernel_count -= 1 + sub_kernel.args = self.args + sub_kernel.iter_vars_count = self.iter_vars_count + sub_kernel.cse.iter_buffer_ids = self.cse.iter_buffer_ids + self.sub_kernels.append(sub_kernel) + return sub_kernel + + def jit_line(self): + can_use_32bit = all(k.index_dtype == "tl.int32" for k in self.sub_kernels) + index_dtype = "tl.int32" if can_use_32bit else "tl.int64" + _, _, signature = self.args.python_argdefs() + triton_meta = { + "signature": signature_to_meta(signature, size_dtype=can_use_32bit), + "device": V.graph.scheduler.current_device.index, + "device_type": V.graph.scheduler.current_device.type, + "constants": {}, + } + triton_meta["configs"] = [config_of(signature)] + return ( + f"@foreach(num_warps={self.num_warps}, meta={triton_meta!r})\n" + + "@triton.jit" + ) + + def grid(self): + return ( + self.x_block_count, + ceildiv(int(self.sub_kernels[0].numels[0]), self.block_size_2d) + if self.blocking_2d + else 1, + 1, + ) + + def codegen_kernel(self, name=None): + code = IndentedBuffer() + + code.splice( + """ + import triton + import triton.language as tl + from torch._inductor.triton_heuristics import foreach + from torch._inductor.utils import instance_descriptor + from torch._inductor import triton_helpers + """ + ) + argdefs, _, _ = self.args.python_argdefs() + code.writeline(self.jit_line()) + code.writeline(f"def {name or 'KERNEL_NAME'}({', '.join(argdefs)}):") + + with code.indent(): + code.splice("xpid = tl.program_id(0)") + if self.blocking_2d: + code.splice("ypid = tl.program_id(1)") + code.splice(f"XBLOCK: tl.constexpr = {self.block_size_2d}") + code.splice(f"YBLOCK: tl.constexpr = {self.block_size_2d}") + else: + code.splice(f"XBLOCK: tl.constexpr = {self.block_size_1d}") + + for sub_kernel in self.sub_kernels: + assert len(sub_kernel.numels) <= 3 + # TODO mlazos: support dynamic shapes + numel_ind = 0 if not self.blocking_2d else 1 + self.codegen_pid_range(code, int(sub_kernel.numels[numel_ind])) + with code.indent(): + if self.blocking_2d: + code.splice(f"ynumel = {sub_kernel.numels[0]}") + code.splice(f"xnumel = {sub_kernel.numels[1]}") + else: + code.splice(f"xnumel = {sub_kernel.numels[0]}") + + sub_kernel.codegen_body() + code.splice(sub_kernel.body) + + code.splice("else:") + with code.indent(): + code.splice("pass") + + return code.getvalue() + + def call_kernel(self, code, name: str): + _, call_args, _ = self.args.python_argdefs() + # dynamo wraps unspec variable as 0d CPU tensor, need convert to scalar + for i in range(len(call_args)): + if V.graph.is_unspec_arg(call_args[i]): + call_args[i] = call_args[i] + ".item()" + if V.graph.cpp_wrapper: + V.graph.wrapper_code.generate_kernel_call( + name, call_args, device_index=V.graph.scheduler.current_device.index + ) + else: + # TODO: refactor generate_kernel_call + call_args_str = ", ".join(call_args) + stream_name = code.write_get_cuda_stream( + V.graph.scheduler.current_device.index + ) + code.writeline( + f"{name}.run({call_args_str}, grid=({self.grid()}), stream={stream_name})" + ) diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/triton_utils.py b/llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/triton_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..487ca7fc517659a01a92029b88e902e77bf5b7bf --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/triton_utils.py @@ -0,0 +1,73 @@ +from .. import config +from ..utils import instance_descriptor +from ..virtualized import V +from .common import SizeArg, TensorArg + + +def signature_of(arg, *, size_dtype: str): + from triton.runtime.jit import JITFunction + + if isinstance(arg, TensorArg): + tye = JITFunction._type_of(arg.dtype) + if V.graph.is_unspec_arg(arg.buffer): + # had unwrapped 0d tensor as scalar + new_tye = tye.lstrip("*") + if new_tye in ["fp16", "bf16"]: + return "fp32" + else: + return new_tye + else: + return tye + if isinstance(arg, SizeArg): + if size_dtype == "tl.int32": + return "i32" + elif size_dtype == "tl.int64": + return "i64" + else: + raise NotImplementedError(f"unhandled size_dtype {size_dtype}") + raise NotImplementedError(f"unhandled {type(arg)}: {arg}") + + +def signature_to_meta(signature, *, size_dtype: str): + return { + i: signature_of(arg, size_dtype=size_dtype) for i, arg in enumerate(signature) + } + + +def config_of(args): + from ..compile_fx import ALIGNMENT + + def is_aligned(x): + if isinstance(x, TensorArg): + return x.buffer not in V.graph.unaligned_buffers + if isinstance(x, SizeArg): + # TODO(voz): These are kinda redundant, if we can solve out statically_known_multiple_of with + # _maybe_evaluate_static... + if x.name.startswith("load_seed_offset"): + return False + else: + return V.graph.sizevars.statically_known_multiple_of(x.expr, ALIGNMENT) + raise NotImplementedError(f"unhandled {type(x)}: {x}") + + def is_aligned_8(x): + """ + Roughly follow triton code here: + https://github.com/openai/triton/blob/5282ed890d453e10b9ee30076ef89115dd197761/python/triton/runtime/jit.py#L208-L222 + """ + if isinstance(x, TensorArg): + return False + if isinstance(x, SizeArg): + # TODO(voz): These are kinda redundant, if we can solve out statically_known_multiple_of with + # _maybe_evaluate_static... + if x.name.startswith("load_seed_offset"): + return False + else: + return V.graph.sizevars.statically_known_multiple_of(x.expr, 8) + raise NotImplementedError(f"unhandled {type(x)}: {x}") + + if config.triton.divisible_by_16: + divisible_by_16 = tuple(i for i, arg in enumerate(args) if is_aligned(arg)) + else: + divisible_by_16 = () + divisible_by_8 = tuple(i for i, arg in enumerate(args) if is_aligned_8(arg)) + return instance_descriptor(divisible_by_16, (), (), divisible_by_8) diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/wrapper.py b/llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..389e9e88d5c98429e283e859c91f7e95c4de3d49 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/wrapper.py @@ -0,0 +1,1505 @@ +import collections +import contextlib +import dataclasses +import functools +import os +import re +from itertools import count +from typing import Any, Dict, List, Optional, Tuple + +import sympy +from sympy import Expr + +import torch +from torch._dynamo.utils import counters, dynamo_timed +from torch.fx.experimental.symbolic_shapes import SymTypes +from torch.fx.node import _get_qualified_name + +from .. import codecache, config, ir +from ..codecache import CudaKernelParamCache +from ..utils import ( + cache_on_self, + get_benchmark_name, + LineContext, + sympy_dot, + sympy_product, +) +from ..virtualized import V +from .common import CodeGen, DeferredLine, IndentedBuffer, PythonPrinter + + +pexpr = PythonPrinter().doprint + + +def buffer_reuse_key(node: ir.Buffer): + size = node.get_size() + stride = node.get_stride() + last_element = sympy_dot([s - 1 for s in size], stride) + return ( + node.get_device(), + node.get_dtype(), + V.graph.sizevars.simplify(sympy_product(size)), + # Detect gaps in tensor storage caused by strides + V.graph.sizevars.size_hint(last_element), + ) + + +def is_int(s: str): + try: + int(s) + except ValueError: + return False + return True + + +def is_float(s: str): + try: + float(s) + except ValueError: + return False + return True + + +def convert_arg_type(python_type): + from .cpp import CONTAINER_PYTHON_TO_CPP, PYTHON_TO_CPP + + if python_type == "Tensor": + # Conversions rules follow https://github.com/pytorch/pytorch/tree/main/aten/src/ATen/native#func + return f"at::{python_type} const&" + + if python_type in PYTHON_TO_CPP: + return PYTHON_TO_CPP[python_type] + + # Convert args of container types e.g. Optional[*] + for py_container, cpp_container in CONTAINER_PYTHON_TO_CPP.items(): + container_match = re.findall(py_container + r"\[([a-zA-Z_]+)]", python_type) + if len(container_match) == 1: + contained_type = container_match[0] + assert ( + contained_type in PYTHON_TO_CPP + ), f"unsupported {py_container} type in convert_arg_type: {contained_type}" + cpp_contained_type = PYTHON_TO_CPP[contained_type] + return f"{cpp_container}<{cpp_contained_type}>" + + raise AssertionError(f"unsupport python_type: {python_type}") + + +def convert_return_type(python_type): + # TODO: only support Tensor as func return type for now + # TODO: support alias + assert ( + python_type == "Tensor" + ), f"only support tensor output for cpp_wrapper, but receive type {python_type}" + return f"at::{python_type}" + + +def get_cpp_op_schema(kernel): + # use x.real_type instead of x.type so that we get ScalarType instead of int + arg_types = [repr(x.real_type) for x in kernel._schema.arguments] + arg_names = [x.name for x in kernel._schema.arguments] + # TODO: only support len(returns) == 1 for now. + returns = [repr(x.type) for x in kernel._schema.returns] + assert ( + len(returns) == 1 + ), f"only support 1 single output for cpp_wrapper, but {kernel.__name__} has {len(returns)} outputs" + return_value = returns[0] + cpp_return_value = convert_return_type(return_value) + + cpp_arg_type = [ + f"{convert_arg_type(arg_type)} {arg_name}" + for arg_type, arg_name in zip(arg_types, arg_names) + ] + return f"{cpp_return_value}({', '.join(cpp_arg_type)})" + + +@dataclasses.dataclass +class SymbolicCallArg: + inner: Any + + def __str__(self): + return str(self.inner) + + +class MemoryPlanningState: + def __init__(self): + super().__init__() + self.reuse_pool: Dict[Any, List[FreeIfNotReusedLine]] = collections.defaultdict( + list + ) + + def __contains__(self, key): + return bool(self.reuse_pool.get(key, None)) + + def pop(self, key) -> "FreeIfNotReusedLine": + item = self.reuse_pool[key].pop() + assert not item.is_reused + return item + + def push(self, key, item: "FreeIfNotReusedLine"): + assert not item.is_reused + self.reuse_pool[key].append(item) + + +@dataclasses.dataclass +class EnterCudaDeviceContextManagerLine: + device_idx: int + first_time: bool + + def codegen(self, code: IndentedBuffer, device_cm_stack: contextlib.ExitStack): + if V.graph.cpp_wrapper: + code.writeline("\n") + if V.graph.aot_mode: + # In AOT mode, we have a stream provided as a param. A stream is + # associated with a device, so we never expect the device to change. + assert self.first_time + # CUDAStreamGuard sets the stream and the device. + code.writeline( + f"at::cuda::CUDAStreamGuard stream_guard(" + f"at::cuda::getStreamFromExternal(stream, {self.device_idx}));" + ) + else: + if self.first_time: + code.writeline( + f"at::cuda::CUDAGuard device_guard({self.device_idx});" + ) + else: + code.writeline(f"device_guard.set_index({self.device_idx});") + else: + # Note _DeviceGuard has less overhead than device, but only accepts + # integers + code.writeline(f"with torch.cuda._DeviceGuard({self.device_idx}):") + device_cm_stack.enter_context(code.indent()) + code.writeline( + f"torch.cuda.set_device({self.device_idx}) # no-op to ensure context" + ) + + +class ExitCudaDeviceContextManagerLine: + def codegen(self, code: IndentedBuffer, device_cm_stack: contextlib.ExitStack): + if not V.graph.cpp_wrapper: + device_cm_stack.close() + + +@dataclasses.dataclass +class MemoryPlanningLine: + wrapper: "WrapperCodeGen" + + def plan(self, state: MemoryPlanningState) -> "MemoryPlanningLine": + """First pass to find reuse""" + return self + + def codegen(self, code: IndentedBuffer): + """Second pass to output code""" + pass + + +@dataclasses.dataclass +class AllocateLine(MemoryPlanningLine): + node: ir.Buffer + can_reuse: bool = True + + def plan(self, state: MemoryPlanningState): + if self.node.get_name() in V.graph.removed_buffers: + return NullLine(self.wrapper) + + # try to reuse a recently freed buffer + key = buffer_reuse_key(self.node) + if config.allow_buffer_reuse and key in state and self.can_reuse: + free_line = state.pop(key) + free_line.is_reused = True + return ReuseLine(self.wrapper, free_line.node, self.node) + + return self + + def codegen(self, code: IndentedBuffer): + assert self.node.get_name() not in V.graph.removed_buffers + line = self.wrapper.make_buffer_allocation(self.node) + code.writeline(line) + + +@dataclasses.dataclass +class FreeIfNotReusedLine(MemoryPlanningLine): + node: ir.Buffer + is_reused: bool = False + + def plan(self, state: MemoryPlanningState): + assert not self.is_reused + if self.node.get_name() in V.graph.removed_buffers: + return NullLine(self.wrapper) + if config.allow_buffer_reuse: + state.push(buffer_reuse_key(self.node), self) + return self + + def codegen(self, code: IndentedBuffer): + assert self.node.get_name() not in V.graph.removed_buffers + if not self.is_reused: + code.writeline(self.wrapper.make_buffer_free(self.node)) + + +@dataclasses.dataclass +class ReuseLine(MemoryPlanningLine): + node: ir.Buffer + reused_as: ir.Buffer + + def plan(self, state: MemoryPlanningState): + if self.node.get_name() in V.graph.removed_buffers: + assert self.reused_as.get_name() in V.graph.removed_buffers + return NullLine(self.wrapper) + assert self.reused_as.get_name() not in V.graph.removed_buffers + return self + + def codegen(self, code: IndentedBuffer): + assert self.node.get_name() not in V.graph.removed_buffers + assert self.reused_as.get_name() not in V.graph.removed_buffers + code.writeline( + self.wrapper.make_buffer_reuse( + self.node, + self.reused_as, + ) + ) + + +class NullLine(MemoryPlanningLine): + pass + + +class WrapperCodeGen(CodeGen): + """ + Generate outer wrapper in Python that calls the kernels. + """ + + def __init__(self): + super().__init__() + self._names_iter = count() + self.header = IndentedBuffer() + self.prefix = IndentedBuffer() + self.wrapper_call = IndentedBuffer() + self.src_to_kernel = {} + self.kenel_numel_expr = set() + self.lines = [] + self.declare = "" + self.ending = "" + self.open_bracket = "[" + self.closed_bracket = "]" + self.comment = "#" + self.namespace = "" + self.none_str = "None" + self.optional_tensor_str = "None" + self.size = "size()" + self.stride = "stride()" + self.first_device_guard = True + self.supports_intermediate_hooks = True + self.expr_printer = pexpr + + self.write_header() + self.write_prefix() + + for name, hashed in V.graph.constant_reprs.items(): + # include a hash so our code cache gives different constants different files + self.write_constant(name, hashed) + + self.allocated = set() + self.freed = set() + + # maps from reusing buffer to reused buffer + self.reuses = dict() + + self.write_get_cuda_stream = functools.lru_cache(None)( # type: ignore[assignment] + self.write_get_cuda_stream + ) + + @functools.lru_cache(None) + def add_import_once(line): + self.header.writeline(line) + + self.add_import_once = add_import_once + self._metas = {} + + def write_constant(self, name, hashed): + self.header.writeline(f"{name} = None # {hashed}") + + def write_header(self): + self.header.splice( + f""" + from ctypes import c_void_p, c_long + import torch + import math + import random + import os + import tempfile + from math import inf, nan + from torch._inductor.hooks import run_intermediate_hooks + from torch._inductor.utils import maybe_profile + + from torch import empty_strided, device + from {codecache.__name__} import AsyncCompile + from torch._inductor.select_algorithm import extern_kernels + + aten = torch.ops.aten + assert_size_stride = torch._C._dynamo.guards.assert_size_stride + reinterpret_tensor = torch.ops.inductor._reinterpret_tensor + async_compile = AsyncCompile() + + """ + ) + + @cache_on_self + def write_triton_header_once(self): + self.header.splice( + """ + import triton + import triton.language as tl + from torch._inductor.triton_heuristics import grid, start_graph, end_graph + from torch._C import _cuda_getCurrentRawStream as get_cuda_stream + """ + ) + + def add_meta_once(self, meta): + meta = repr(meta) + if meta not in self._metas: + var = f"meta{len(self._metas)}" + self._metas[meta] = var + self.header.writeline(f"{var} = {meta}") + return self._metas[meta] + + @cache_on_self + def get_output_refs(self): + return [x.codegen_reference() for x in V.graph.graph_outputs] + + def mark_output_type(self): + return + + def codegen_input_size_asserts(self): + for name, buf in V.graph.graph_inputs.items(): + if isinstance(buf, sympy.Expr): + continue + + # comparing strides for 0 size tensor is tricky. Ignore them for now. + if sympy_product(buf.get_size()) == 0: + continue + size = self.codegen_shape_tuple(buf.get_size()) + stride = self.codegen_shape_tuple(buf.get_stride()) + self.prefix.writeline(f"assert_size_stride({name}, {size}, {stride})") + + def write_prefix(self): + self.prefix.splice( + """ + + async_compile.wait(globals()) + del async_compile + + def call(args): + """ + ) + with self.prefix.indent(): + if config.triton.debug_sync_graph: + self.prefix.writeline("torch.cuda.synchronize()") + inp_len = len(V.graph.graph_inputs.keys()) + if inp_len != 0: + lhs = f"{', '.join(V.graph.graph_inputs.keys())}{'' if inp_len != 1 else ','}" + self.prefix.writeline(f"{lhs} = args") + self.prefix.writeline("args.clear()") + + self.codegen_inputs(self.prefix, V.graph.graph_inputs) + if config.size_asserts: + self.codegen_input_size_asserts() + + def write_get_cuda_stream(self, index): + self.write_triton_header_once() + name = f"stream{index}" + self.writeline(f"{name} = get_cuda_stream({index})") + return name + + def next_kernel_suffix(self): + return f"{next(self._names_iter)}" + + def codegen_device_guard_enter(self, device_idx): + self.writeline( + EnterCudaDeviceContextManagerLine(device_idx, self.first_device_guard) + ) + self.first_device_guard = False + + def codegen_device_guard_exit(self): + self.writeline(ExitCudaDeviceContextManagerLine()) + + def generate_return(self, output_refs): + if output_refs: + self.wrapper_call.writeline("return (" + ", ".join(output_refs) + ", )") + else: + self.wrapper_call.writeline("return ()") + + def generate_end(self, result): + return + + def generate_extern_kernel_alloc(self, output_name, kernel, args, origin_node): + self.writeline( + f"{self.declare}{output_name} = {kernel}({', '.join(args)}){self.ending}" + ) + if ( + self.supports_intermediate_hooks + and config.generate_intermediate_hooks + and origin_node is not None + ): + counters["inductor"]["intermediate_hooks"] += 1 + self.writeline( + f"run_intermediate_hooks({origin_node.name!r}, {output_name})" + ) + + def generate_extern_kernel_out(self, output_view, codegen_reference, args, kernel): + if output_view: + args.append(f"out={output_view.codegen_reference()}") + else: + args.append(f"out={codegen_reference}") + self.writeline(f"{kernel}({', '.join(args)})") + + def generate_scatter_fallback( + self, output, inputs, kernel, fn, src_is_tensor, reduce, kwargs + ): + line = f"{kernel}({','.join(map(str, inputs))}" + if kernel == "aten.scatter_": + if reduce: + line += f", reduce={repr(reduce)}" + else: + line += ", ".join([""] + kwargs) + line += f"){self.ending}" + self.writeline(line) + + def generate_extern_kernel_alloc_and_find_schema_if_needed( + self, + name, + kernel, + codegen_args, + cpp_op_schema, + cpp_kernel_key, + cpp_kernel_overload_name="", + ): + self.writeline(f"{name} = {kernel}({', '.join(codegen_args)})") + + @dynamo_timed + def generate(self): + result = IndentedBuffer() + result.splice(self.header) + + out_names = V.graph.get_output_names() + with contextlib.ExitStack() as stack: + stack.enter_context(self.wrapper_call.indent()) + if config.profiler_mark_wrapper_call: + self.generate_profiler_mark_wrapper_call(stack) + if config.profile_bandwidth: + self.write_triton_header_once() + self.wrapper_call.writeline("start_graph()") + + while ( + self.lines + and isinstance(self.lines[-1], MemoryPlanningLine) + # TODO: this seems legit, NullLine has no node + and self.lines[-1].node.name not in out_names # type: ignore[attr-defined] + ): + # these lines will be pointless + self.lines.pop() + + # codegen allocations in two passes + planning_state = MemoryPlanningState() + for i in range(len(self.lines)): + if isinstance(self.lines[i], MemoryPlanningLine): + self.lines[i] = self.lines[i].plan(planning_state) + + device_cm_stack = contextlib.ExitStack() + for line in self.lines: + if isinstance(line, MemoryPlanningLine): + line.codegen(self.wrapper_call) + elif isinstance( + line, + ( + EnterCudaDeviceContextManagerLine, + ExitCudaDeviceContextManagerLine, + ), + ): + line.codegen(self.wrapper_call, device_cm_stack) + else: + self.wrapper_call.writeline(line) + + output_refs = self.get_output_refs() + self.mark_output_type() + if config.triton.debug_sync_graph: + self.wrapper_call.writeline("torch.cuda.synchronize()") + + if config.profile_bandwidth: + self.wrapper_call.writeline("end_graph()") + + self.generate_return(output_refs) + + self.append_precomputed_sizes_to_prefix() + result.splice(self.prefix) + + with result.indent(): + result.splice(self.wrapper_call) + + self.generate_end(result) + + self.add_benchmark_harness(result) + + return result.getvaluewithlinemap() + + def codegen_inputs(self, code: IndentedBuffer, graph_inputs: Dict[str, ir.Buffer]): + """Assign all symbolic shapes to locals""" + + @functools.lru_cache(None) + def sizeof(name): + code.writeline( + f"{self.declare}{name}_size = {name}.{self.size}{self.ending}" + ) + return f"{name}_size" + + @functools.lru_cache(None) + def strideof(name): + code.writeline( + f"{self.declare}{name}_stride = {name}.{self.stride}{self.ending}" + ) + return f"{name}_stride" + + # Assign all symbolic shapes needed to local variables + needed = set(V.graph.sizevars.var_to_val.keys()) - set( + V.graph.sizevars.replacements.keys() + ) + + def is_expr(x): + return isinstance(x[1], sympy.Expr) + + graph_inputs_expr = list(filter(is_expr, graph_inputs.items())) + graph_inputs_tensors = list( + filter(lambda x: not is_expr(x), graph_inputs.items()) + ) + + for name, shape in graph_inputs_expr: + shape = V.graph.sizevars.simplify(shape) + if shape in needed: + needed.remove(shape) + code.writeline(f"{self.declare}{shape} = {name}{self.ending}") + + for name, value in graph_inputs_tensors: + shapes = value.get_size() + for dim, shape in enumerate(shapes): + shape = V.graph.sizevars.simplify(shape) + if shape in needed: + needed.remove(shape) + code.writeline( + f"{self.declare}{shape} = {sizeof(name)}[{dim}]{self.ending}" + ) + + for name, value in graph_inputs_tensors: + shapes = value.get_stride() + for dim, shape in enumerate(shapes): + shape = V.graph.sizevars.simplify(shape) + if shape in needed: + needed.remove(shape) + code.writeline( + f"{self.declare}{shape} = {strideof(name)}[{dim}]{self.ending}" + ) + + def append_precomputed_sizes_to_prefix(self): + with self.prefix.indent(): + for sym, expr in V.graph.sizevars.inv_precomputed_replacements.items(): + self.prefix.writeline( + f"{self.declare}{sym} = {self.expr_printer(expr)}{self.ending}" + ) + + def codegen_python_sizevar(self, x: Expr) -> str: + return pexpr(V.graph.sizevars.simplify(x)) + + def codegen_sizevar(self, x: Expr) -> str: + return self.codegen_python_sizevar(x) + + def codegen_tuple_access(self, basename: str, index: str) -> str: + return f"{basename}[{index}]" + + def codegen_python_shape_tuple(self, shape: Tuple[Expr, ...]) -> str: + parts = list(map(self.codegen_python_sizevar, shape)) + if len(parts) == 0: + return "()" + if len(parts) == 1: + return f"({parts[0]}, )" + return f"({', '.join(parts)})" + + def codegen_shape_tuple(self, shape: Tuple[Expr, ...]) -> str: + return self.codegen_python_shape_tuple(shape) + + def benchmark_compiled_module(self, output): + def add_fake_input(name, shape, stride, device, dtype): + output.writeline( + f"{name} = rand_strided(" + f"{self.codegen_python_shape_tuple(shape)}, " + f"{self.codegen_python_shape_tuple(stride)}, " + f"device='{device}', dtype={dtype})" + ) + + def add_expr_input(name, val): + output.writeline(f"{name} = {val}") + + output.writelines( + ["", "", "def benchmark_compiled_module(times=10, repeat=10):"] + ) + with output.indent(): + output.splice( + """ + from torch._dynamo.testing import rand_strided + from torch._inductor.utils import print_performance + """, + strip=True, + ) + + for name, value in V.graph.constants.items(): + # all the constants are global variables, that's why we need + # these 'global var_name' lines + output.writeline(f"global {name}") + add_fake_input( + name, value.size(), value.stride(), value.device, value.dtype + ) + + for name, value in V.graph.graph_inputs.items(): + if isinstance(value, sympy.Expr): # Don't need to add symbolic + add_expr_input(name, V.graph.sizevars.size_hint(value)) + else: + shape = [V.graph.sizevars.size_hint(x) for x in value.get_size()] + stride = [V.graph.sizevars.size_hint(x) for x in value.get_stride()] + add_fake_input( + name, shape, stride, value.get_device(), value.get_dtype() + ) + + call_str = f"call([{', '.join(V.graph.graph_inputs.keys())}])" + output.writeline( + f"return print_performance(lambda: {call_str}, times=times, repeat=repeat)" + ) + + def add_benchmark_harness(self, output): + """ + Append a benchmark harness to generated code for debugging + """ + if not config.benchmark_harness: + return + + self.benchmark_compiled_module(output) + + output.writelines(["", "", 'if __name__ == "__main__":']) + with output.indent(): + output.writelines( + [ + "from torch._inductor.wrapper_benchmark import compiled_module_main", + f"compiled_module_main('{get_benchmark_name()}', benchmark_compiled_module)", + ] + ) + + def define_kernel( + self, name: str, kernel: str, metadata: Optional[str] = None, cuda=True + ): + metadata_comment = f"{metadata}\n" if metadata else "" + self.header.splice(f"\n\n{metadata_comment}{name} = {kernel}") + + def generate_numel_expr(self, kernel_name: str, tree): + expr = f"{kernel_name}_{tree.prefix}numel" + if expr not in self.kenel_numel_expr: + self.kenel_numel_expr.add(expr) + self.writeline( + f"{self.declare}{expr} = {self.expr_printer(tree.numel)}{self.ending}" + ) + else: + self.writeline(f"{expr} = {self.expr_printer(tree.numel)}{self.ending}") + # We can get symbolic expressions here, like s0*64 + # It is fine to have them here, but we need to handle them correctly as their own type + # This is tricky to do, so we wrap in a custom type, distinct from scalars, but also from sympy* + # scalars as well. + # This is handled in `generate_args_decl` which has a correct comment of: TODO: only works for + # constant now, need type info. I agree, this needs type info, and while this is not true type info + # it suffices as a type hint for the purposes of producing the correct code for this type. + return SymbolicCallArg(expr) + + def wrap_kernel_call(self, name, call_args): + return f"{name}({', '.join(call_args)}){self.ending}" + + def generate_profiler_mark_wrapper_call(self, stack): + self.wrapper_call.writeline("from torch.profiler import record_function") + self.wrapper_call.writeline( + f"with record_function('graph_{V.graph.graph_id}_inductor_wrapper_call'):" + ) + stack.enter_context(self.wrapper_call.indent()) + + def generate_kernel_call( + self, name, call_args, grid=None, device_index=None, cuda=True + ): + if cuda: + call_args_str = ", ".join(pexpr(item) for item in call_args) + grid_str = ", ".join(pexpr(item) for item in grid) + stream_name = self.write_get_cuda_stream( + V.graph.scheduler.current_device.index + ) + self.writeline( + f"{name}.run({call_args_str}, grid=grid({grid_str}), stream={stream_name})" + ) + else: + self.writeline(self.wrap_kernel_call(name, call_args)) + + def writeline(self, line): + self.lines.append(line) + + def enter_context(self, ctx): + self.lines.append(LineContext(ctx)) + + def val_to_arg_str(self, s): + if isinstance(s, SymTypes): + return pexpr(sympy.expand(repr(s))) + elif isinstance(s, sympy.Expr): + return pexpr(s) + elif isinstance(s, (tuple, list)): + + @dataclasses.dataclass + class Shim: + ref: Any + + def __repr__(self): + return self.ref + + return repr(type(s)(Shim(self.val_to_arg_str(a)) for a in s)) + elif isinstance(s, torch._ops.OpOverload): + return _get_qualified_name(s) + else: + return repr(s) + + # The following methods are for memory management + def make_buffer_allocation(self, buffer): + device = buffer.get_device() + dtype = buffer.get_dtype() + shape = tuple(buffer.get_size()) + stride = tuple(buffer.get_stride()) + return ( + f"{buffer.get_name()} = empty_strided(" + f"{self.codegen_shape_tuple(shape)}, " + f"{self.codegen_shape_tuple(stride)}, " + f"device='{device.type}', dtype={dtype})" + ) + + def make_buffer_free(self, buffer): + return f"del {buffer.get_name()}" + + def make_buffer_reuse(self, old, new): + assert old.get_dtype() == new.get_dtype() + del_line = "" + if old.get_name() not in V.graph.get_output_names(): + del_line = f"; {self.make_buffer_free(old)}" + if old.get_size() == new.get_size() and old.get_stride() == new.get_stride(): + return f"{self.declare}{new.get_name()} = {old.get_name()}{del_line} {self.comment} reuse" + + return ( + f"{self.declare}{new.get_name()} = reinterpret_tensor(" + f"{old.get_name()}, " + f"{self.codegen_shape_tuple(new.get_size())}, " + f"{self.codegen_shape_tuple(new.get_stride())}){del_line} {self.comment} reuse" + ) + + def codegen_deferred_allocation(self, name, layout): + self.writeline( + DeferredLine( + name, + f"{self.declare}{name} = {layout.view.codegen_reference()}{self.ending} {self.comment} alias", + ) + ) + + def use_preallocated_ouput(self, buffer): + # outputs are passed-in in the AOT mode + return ( + V.graph.aot_mode + and buffer + and buffer.get_name() in set(V.graph.get_output_names()) + ) + + def codegen_allocation(self, buffer): + name = buffer.get_name() + + if name in V.graph.removed_buffers or name in self.allocated: + return + self.allocated.add(name) + if isinstance( + buffer, + (ir.ExternKernelAlloc, ir.MultiOutput), + ): + return + + layout = buffer.get_layout() + if isinstance(layout, ir.MutationLayout): + return + if isinstance(layout, ir.AliasedLayout): + assert isinstance( + layout.view, ir.ReinterpretView + ), f"unexpected {type(layout.view)}: {layout.view}" + if not layout.maybe_guard_aligned(): + V.graph.unaligned_buffers.add(name) + self.codegen_allocation(layout.view.data) + self.codegen_deferred_allocation(name, layout) + return + + self.writeline( + AllocateLine( + self, + buffer, + not self.use_preallocated_ouput(buffer), + ) + ) + + def codegen_free(self, buffer): + name = buffer.get_name() + + # can be freed but not reused + if isinstance(buffer, ir.InputBuffer): + self.writeline(self.make_buffer_free(buffer)) + return + + if not self.can_reuse(buffer): + return + self.freed.add(name) + + layout = buffer.get_layout() + if isinstance(layout, (ir.AliasedLayout, ir.MultiOutputLayout)): + self.writeline(self.make_buffer_free(buffer)) + return + + self.writeline(FreeIfNotReusedLine(self, buffer)) + + def can_reuse(self, input_buffer, output_buffer=None): + name = input_buffer.get_name() + if ( + name in V.graph.removed_buffers + or name in V.graph.graph_inputs + or name in V.graph.constants + or name in self.freed + or self.use_preallocated_ouput(output_buffer) + ): + return False + + return True + + def did_reuse(self, buffer, reused_buffer): + # Check whether a given buffer was reused by a possible reuser in the wrapper codegen + # Can be consulted from inside ir codegen, e.g. to determine whether a copy is needed + return ( + buffer.get_name() in self.reuses + and self.reuses[buffer.get_name()] == reused_buffer.get_name() + ) + + def codegen_inplace_reuse(self, input_buffer, output_buffer): + assert buffer_reuse_key(input_buffer) == buffer_reuse_key(output_buffer) + self.codegen_allocation(input_buffer) + self.freed.add(input_buffer.get_name()) + self.allocated.add(output_buffer.get_name()) + self.reuses[output_buffer.get_name()] = input_buffer.get_name() + self.writeline(ReuseLine(self, input_buffer, output_buffer)) + + +class CppWrapperCodeGen(WrapperCodeGen): + """ + Generates cpp wrapper for running on CPU and calls cpp kernels + """ + + def __init__(self): + super().__init__() + from ..ir import OptionalTensor + + self.declare = "auto " + self.ending = ";" + self.open_bracket = "{" + self.closed_bracket = "}" + self.comment = "//" + self.namespace = "at::" + self.none_str = "at::Tensor()" + self.optional_tensor_str = repr(OptionalTensor()) + self.extern_call_ops = set() + self.size = "sizes()" + self.stride = "strides()" + self.call_func_name = "inductor_entry_cpp" + self.cuda = False + self.supports_intermediate_hooks = False + self.outputs_need_copy = set() + self.resized_outputs = {} + + from .cpp import cexpr + + self.expr_printer = cexpr + + def write_constant(self, name, hashed): + # include a hash so our code cache gives different constants different files + self.header.writeline(f"// {name} {hashed}") + + def write_header(self): + if V.graph.aot_mode: + with open( + os.path.join(os.path.dirname(__file__), "aot_inductor_interface.cpp") + ) as f: + self.header.splice(f.read()) + else: + self.header.splice( + """ + import torch + from torch._inductor.codecache import CppWrapperCodeCache + + cpp_wrapper_src = ( + ''' + """ + ) + + self.header.splice( + """ + #include + #define reinterpret_tensor torch::inductor::_reinterpret_tensor + """ + ) + + def mark_output_type(self): + # mark output type to unwrap tensor back to python scalar + from ..ir import ShapeAsConstantBuffer + + output_is_tensor = dict() + for idx, x in enumerate(V.graph.graph_outputs): + if isinstance(x, ShapeAsConstantBuffer): + output_is_tensor[idx] = False + else: + output_is_tensor[idx] = True + + self.output_is_tensor = output_is_tensor + + def write_prefix(self): + if V.graph.aot_mode: + self.prefix.writeline("namespace torch {") + self.prefix.writeline("namespace aot_inductor {") + + def write_wrapper_decl(self): + inputs_len = len(V.graph.graph_inputs.keys()) + if V.graph.aot_mode: + self.prefix.splice( + """ + void AOTInductorModel::run_impl( + const std::vector& args, + std::vector& outputs, + cudaStream_t stream) { + """ + ) + else: + self.prefix.splice( + f"""std::vector {self.call_func_name}(const std::vector& args) {{""" + ) + with self.prefix.indent(): + if inputs_len != 0: + for idx, input_key in enumerate(V.graph.graph_inputs.keys()): + # unwrap input tensor back to scalar + if isinstance(V.graph.graph_inputs[input_key], sympy.Expr): + from ..graph import may_get_constant_buffer_dtype + from .cpp import DTYPE_TO_CPP + + dtype = may_get_constant_buffer_dtype( + V.graph.graph_inputs[input_key] + ) + assert ( + dtype is not None + ), "Fails to get the dtype of the sympy.Expr" + cpp_dtype = DTYPE_TO_CPP[dtype] + self.prefix.writeline( + f"{cpp_dtype} {input_key} = args[{idx}].item<{cpp_dtype}>();" + ) + else: + self.prefix.writeline(f"at::Tensor {input_key} = args[{idx}];") + + assert all( + isinstance(v, torch.Tensor) for v in list(V.graph.constants.values()) + ), "Expect all constants to be Tensor" + for idx, constants_key in enumerate(V.graph.constants.keys()): + constants_idx = inputs_len + idx + self.prefix.writeline( + f"at::Tensor {constants_key} = args[{constants_idx}];" + ) + + self.codegen_inputs(self.prefix, V.graph.graph_inputs) + + self.wrapper_call.splice( + """ + c10::optional optional_scalar; + c10::optional optional_string; + c10::optional optional_layout; + c10::optional optional_tensor; + torch::List> optional_list; + """ + ) + + def codegen_model_constructor(self): + """ + // Generated code example + AOTInductorModel::AOTInductorModel() + : AOTInductorModelBase(4, 1) { + inputs_info_[0].name = "linear.weight"; + inputs_info_[0].shape.reserve(2); + inputs_info_[0].shape.emplace_back(10, 10, nullptr); + inputs_info_[0].shape.emplace_back(64, 64, nullptr); + ... + outputs_info_[0].name = "output0"; + outputs_info_[0].shape.reserve(2); + outputs_info_[0].shape.emplace_back(32, 32, nullptr); + outputs_info_[0].shape.emplace_back(10, 10, nullptr); + } + """ + num_inputs = len(V.graph.graph_inputs) + num_outputs = len(V.graph.graph_outputs) + self.prefix.splice( + f""" + AOTInductorModel::AOTInductorModel() + : AOTInductorModelBase({num_inputs}, {num_outputs}) {{ + """ + ) + + with self.prefix.indent(): + for idx, name in enumerate(V.graph.graph_inputs.keys()): + # TODO: handle symbolic expressions later. + assert not isinstance(V.graph.graph_inputs[name], sympy.Expr) + self.prefix.writeline(f"""inputs_info_[{idx}].name = "{name}";""") + self.prefix.writeline( + f"""inputs_info_[{idx}].dtype = "{V.graph.graph_inputs[name].get_dtype()}";""" + ) + sizes = V.graph.graph_inputs[name].get_size() + self.prefix.writeline( + f"inputs_info_[{idx}].shape.reserve({len(sizes)});" + ) + for size in sizes: + # FIXME: set the lower bound and the upper bound to be "size". + # Later, we should specify the correct range for dynamic dimentions. + self.prefix.writeline( + f"inputs_info_[{idx}].shape.emplace_back({size}, {size}, nullptr);" + ) + + for idx, output in enumerate(V.graph.graph_outputs): + # TODO: handle symbolic expressions later. + assert not isinstance(output, sympy.Expr) + self.prefix.writeline(f"""outputs_info_[{idx}].name = "output{idx}";""") + self.prefix.writeline( + f"""outputs_info_[{idx}].dtype = "{output.get_dtype()}";""" + ) + sizes = output.get_size() + self.prefix.writeline( + f"outputs_info_[{idx}].shape.reserve({len(sizes)});" + ) + for size in sizes: + # FIXME: set the lower bound and the upper bound to be "size". + # Later, we should specify the correct range for dynamic dimentions. + self.prefix.writeline( + f"outputs_info_[{idx}].shape.emplace_back({size}, {size}, nullptr);" + ) + + self.prefix.writeline("}") + + def generate(self): + if V.graph.aot_mode: + self.codegen_model_constructor() + self.write_wrapper_decl() + return super().generate() + + def define_kernel( + self, name: str, kernel: str, metadata: Optional[str] = None, cuda=False + ): + self.header.splice(f"\n{kernel}\n") + + def generate_return(self, output_refs): + # Output tensors are allocated by the AOT runtime. + if V.graph.aot_mode: + for idx, output in enumerate(V.graph.graph_outputs): + if hasattr(output, "get_name"): + name = output.get_name() + if name in self.outputs_need_copy: + output_as_strided = output.codegen_reference() + self.wrapper_call.writeline( + f"outputs[{idx}].copy_({output_as_strided});" + ) + resize_to = self.resized_outputs.get(name, None) + if resize_to is not None: + resize_to_args = ", ".join( + self.expr_printer(d) for d in resize_to + ) + self.wrapper_call.writeline( + f"outputs[{idx}].resize_({{{resize_to_args}}});" + ) + self.wrapper_call.writeline("\n}") + else: + self.wrapper_call.writeline(f"return {{{', '.join(output_refs)}}};\n}}") + + def generate_end(self, result): + if V.graph.aot_mode: + result.writeline("} // namespace aot_inductor") + result.writeline("} // namespace inductor") + return + + result.writeline("'''\n)") + # get the hash of the wrapper code to name the extension + wrapper_call_hash = codecache.code_hash(result.getvalue()) + result.splice( + f""" + module = CppWrapperCodeCache.load(cpp_wrapper_src, '{self.call_func_name}', '{wrapper_call_hash}', {self.cuda}) + """ + ) + + # unwrap output tensor back to python scalar + if all(x for x in self.output_is_tensor.values()): + # If no ShapeAsConstantBuffer in the output, directly return the output as tensors + return_str = "return f(args_tensor)" + else: + outputs = [ + f"outputs[{i}]" if self.output_is_tensor[i] else f"outputs[{i}].item()" + for i in range(len(V.graph.graph_outputs)) + ] + outputs_str = f"[{', '.join(outputs)}]" + return_str = f""" + outputs = f(args_tensor) + return {outputs_str} + """ + + args_str = "args_tensor = [arg if isinstance(arg, torch.Tensor) else torch.tensor(arg) for arg in args]" + if V.graph.constants: + # Append constants to the input args for cpp wrapper. + # Python wrapper directly gets the value inside the wrapper call + # as a global variable passed when calling exec(code, mod.__dict__, mod.__dict__). + # For cpp wrapper, we need to pass this python value to the inductor_entry_cpp function explicitly. + assert all( + isinstance(v, torch.Tensor) for v in list(V.graph.constants.values()) + ), "Expect all constants to be Tensor" + constants_str = f"[{', '.join(V.graph.constants.keys())}]" + args_str += f""" + constants_tensor = {constants_str} + args_tensor.extend(constants_tensor) + """ + + # Wrap the func to support setting result._boxed_call = True + result.splice( + f""" + def _wrap_func(f): + def g(args): + {args_str} + {return_str} + return g + call = _wrap_func(module.{self.call_func_name}) + """ + ) + + def generate_extern_kernel_out(self, output_view, codegen_reference, args, kernel): + if output_view: + output_as_strided = f"{output_view.codegen_reference()}" + output_name = f"{output_view.get_name()}_as_strided" + self.writeline(f"auto {output_name} = {output_as_strided};") + + args.insert(0, output_name) + else: + args.insert(0, f"{codegen_reference}") + self.writeline(self.wrap_kernel_call(kernel, args)) + + def generate_scatter_fallback( + self, output, inputs, kernel, fn, src_is_tensor, reduce, kwargs + ): + # TODO: support other overload for cpp wrapper and remove the below assertions + line = f"{kernel}({output}, {','.join(map(str, inputs))}" + if fn == "aten.scatter_": + if src_is_tensor: + if reduce: + line += f", {V.graph.wrapper_code.val_to_arg_str(reduce)}" + else: + assert ( + reduce is None + ), "Expect reduce to be None for aten.scatter_ with scalar src" + else: + line += f", {','.join(kwargs)}" + line += f"){self.ending}" + self.writeline(line) + + def add_benchmark_harness(self, output): + if V.graph.aot_mode: + return + super().add_benchmark_harness(output) + + def codegen_sizevar(self, x: Expr) -> str: + return self.expr_printer(V.graph.sizevars.simplify(x)) + + def codegen_tuple_access(self, basename: str, index: str) -> str: + return f"std::get<{index}>({basename})" + + def codegen_shape_tuple(self, shape: Tuple[Expr, ...]) -> str: + parts = list(map(self.codegen_sizevar, shape)) + if len(parts) == 0: + return "{}" + if len(parts) == 1: + return f"{{{parts[0]}, }}" + return f"{{{', '.join(parts)}}}" + + def make_buffer_free(self, buffer): + return ( + "" + if isinstance(buffer.get_layout(), ir.MultiOutputLayout) + else f"{buffer.get_name()}.reset();" + ) + + def generate_profiler_mark_wrapper_call(self, stack): + self.wrapper_call.writeline( + 'RECORD_FUNCTION("inductor_wrapper_call", c10::ArrayRef());' + ) + + def codegen_device(self, device): + from .cpp import DEVICE_TO_ATEN + + return ( + f"c10::Device({DEVICE_TO_ATEN[device.type]}, {device.index})" + if device.index is not None + else f"{DEVICE_TO_ATEN[device.type]}" + ) + + def codegen_tensor_option(self, device, dtype): + from .cpp import DTYPE_TO_ATEN + + cpp_device = self.codegen_device(device) + return f"at::TensorOptions({cpp_device}).dtype({DTYPE_TO_ATEN[dtype]}))" + + def make_buffer_allocation(self, buffer): + name = buffer.get_name() + # outputs are passed-in in the AOT mode + if self.use_preallocated_ouput(buffer): + output_idx = None + output_buffer = None + for idx, output in enumerate(V.graph.graph_outputs): + if hasattr(output, "get_name") and name == output.get_name(): + output_idx = idx + output_buffer = output + break + + assert ( + output_idx is not None and output_buffer is not None + ), "Unknown output index" + if V.graph.sizevars.statically_known_leq( + buffer.get_numel(), output_buffer.get_numel() + ): + buf_str = f"auto {name} = outputs[{output_idx}];" + # avoid resize_output warning: + # "An output with one or more elements was resized since it had..." + if buffer.get_size() != output_buffer.get_size(): + resize_to_args = ", ".join( + self.expr_printer(d) for d in buffer.get_size() + ) + buf_str += f" {name}.resize_({{{resize_to_args}}});" + assert name not in self.resized_outputs + self.resized_outputs[name] = list(output_buffer.get_size()) + return buf_str + else: + self.outputs_need_copy.add(name) + + # TODO: map layout here. + device = buffer.get_device() + dtype = buffer.get_dtype() + shape = tuple(buffer.get_size()) + stride = tuple(buffer.get_stride()) + return ( + f"{self.declare}{name} = {self.namespace}empty_strided(" + f"{self.codegen_shape_tuple(shape)}, " + f"{self.codegen_shape_tuple(stride)}, " + f"{self.codegen_tensor_option(device, dtype)};" + ) + + def generate_extern_kernel_alloc_and_find_schema_if_needed( + self, + name, + kernel, + codegen_args, + cpp_op_schema, + cpp_kernel_key, + cpp_kernel_overload_name="", + ): + if cpp_kernel_key not in self.extern_call_ops: + self.writeline( + f"static auto op_{cpp_kernel_key} = c10::Dispatcher::singleton()" + ) + self.writeline( + f'\t.findSchemaOrThrow("{kernel}", "{cpp_kernel_overload_name}")' + ) + self.writeline(f"\t.typed<{cpp_op_schema}>();") + self.extern_call_ops.add(cpp_kernel_key) + + self.writeline( + f"auto {name} = op_{cpp_kernel_key}.call({', '.join(codegen_args)});" + ) + + def val_to_arg_str(self, val): + from .cpp import DTYPE_TO_ATEN + + if val is None: + # When None is passed as an argument, it represents an optional that does not contain a value. + return self.optional_tensor_str + elif isinstance(val, bool): + return "true" if val else "false" + elif isinstance(val, str): + return f'"{val}"' + elif isinstance(val, torch.device): + return self.codegen_device(val) + elif isinstance(val, torch.dtype): + return DTYPE_TO_ATEN[val] + elif isinstance(val, float) and val in [float("inf"), float("-inf")]: + if val == float("inf"): + return "std::numeric_limits::infinity()" + else: + return "-std::numeric_limits::infinity()" + elif isinstance(val, (list, tuple)): + return f"{{{', '.join(list(map(self.val_to_arg_str, val)))}}}" + else: + return repr(val) + + +class CudaWrapperCodeGen(CppWrapperCodeGen): + """ + Generates cpp wrapper for running on GPU and calls CUDA kernels + """ + + def __init__(self): + super().__init__() + self.kernel_callsite_id = count() + self.arg_var_id = count() + self.cuda = True + + def write_header(self): + super().write_header() + self.header.splice( + """ + #include + #include + #include + #include + + #define AT_CUDA_DRIVER_CHECK_OVERRIDE(EXPR) \\ + do { \\ + CUresult __err = EXPR; \\ + if (__err != CUDA_SUCCESS) { \\ + AT_ERROR("CUDA driver error: ", static_cast(__err)); \\ + } \\ + } while (0) + + static inline CUfunction loadKernel( + const std::string &filePath, + const std::string &funcName, + int sharedMemBytes) { + CUmodule mod; + CUfunction func; + AT_CUDA_DRIVER_CHECK_OVERRIDE(cuModuleLoad(&mod, filePath.c_str())); + AT_CUDA_DRIVER_CHECK_OVERRIDE(cuModuleGetFunction(&func, mod, funcName.c_str())); + if (sharedMemBytes > 0) { + AT_CUDA_DRIVER_CHECK_OVERRIDE(cuFuncSetAttribute( + func, + CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, + sharedMemBytes + )); + } + return func; + } + + static inline void launchKernel( + CUfunction func, + int gridX, + int gridY, + int gridZ, + int numWarps, + int sharedMemBytes, + void* args[], + cudaStream_t stream) { + AT_CUDA_DRIVER_CHECK_OVERRIDE(cuLaunchKernel( + func, gridX, gridY, gridZ, 32*numWarps, 1, 1, sharedMemBytes, stream, args, nullptr)); + } + """ + ) + + def write_get_cuda_stream(self, index): + name = f"stream{index}" + self.writeline( + f"cudaStream_t {name} = at::cuda::getCurrentCUDAStream({index});" + ) + return name + + def define_kernel( + self, name: str, kernel: str, metadata: Optional[str] = None, cuda=True + ): + if not cuda: + return super().define_kernel(name, kernel, metadata, cuda) + + def generate(self): + self.prefix.writeline("\n") + for kernel in self.src_to_kernel.values(): + self.prefix.writeline(f"static CUfunction {kernel} = nullptr;") + self.prefix.writeline("\n") + return super().generate() + + def generate_load_kernel(self, name, params): + mangled_name = params.get("mangled_name", None) + assert mangled_name is not None, "missing mangled_name" + cubin_path = params.get("cubin_path", None) + assert os.path.exists( + cubin_path + ), "cubin file should already exist at this moment" + + shared_mem = params.get("shared_mem", 0) + self.writeline(f"if ({name} == nullptr) {{") + self.writeline( + f""" {name} = loadKernel("{cubin_path}", "{mangled_name}", {shared_mem});""" + ) + self.writeline("}") + + def generate_args_decl(self, call_args): + # TODO: only works for constant now, need type info + new_args = [] + for arg in call_args: + var_name = f"var_{next(self.arg_var_id)}" + if isinstance( + arg, + ( + sympy.Integer, + sympy.Symbol, + SymbolicCallArg, + ), + ): + self.writeline(f"auto {var_name} = {arg};") + elif is_int(arg): + self.writeline(f"int {var_name} = {arg};") + elif is_float(arg): + self.writeline(f"float {var_name} = {arg};") + else: + self.writeline( + f"CUdeviceptr {var_name} = reinterpret_cast({arg}.data_ptr());" + ) + new_args.append(f"&{var_name}") + + return ", ".join(new_args) + + def generate_kernel_call( + self, name, call_args, grid=None, device_index=None, cuda=True + ): + if not cuda: + return super().generate_kernel_call( + name, call_args, grid, device_index, cuda + ) + + params = CudaKernelParamCache.get(name) + assert ( + params is not None + ), f"cuda kernel parameters for {name} should already exist at this moment" + + self.generate_load_kernel(name, params) + + call_args = self.generate_args_decl(call_args) + kernel_args_var = f"kernel_args_var_{next(self.kernel_callsite_id)}" + self.writeline(f"void* {kernel_args_var}[] = {{{call_args}}};") + stream = ( + "stream" if V.graph.aot_mode else self.write_get_cuda_stream(device_index) + ) + self.writeline( + "launchKernel({}, {}, {}, {}, {}, {}, {}, {});".format( + name, + params["grid_x"], + params["grid_y"], + params["grid_z"], + params["num_warps"], + params["shared_mem"], + kernel_args_var, + stream, + ) + ) diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/coordinate_descent_tuner.py b/llava_next/lib/python3.10/site-packages/torch/_inductor/coordinate_descent_tuner.py new file mode 100644 index 0000000000000000000000000000000000000000..b652db863e31dd61845a0d043ed64126a4e1ec0f --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/_inductor/coordinate_descent_tuner.py @@ -0,0 +1,307 @@ +import copy +import itertools +import logging +from typing import Callable, Optional + +from .utils import has_triton, red_text, triton_config_to_hashable + +if has_triton(): + import triton +else: + triton = None + +from . import config as inductor_config + +log = logging.getLogger(__name__) + + +def get_field(config, name): + if name == "num_warps": + return config.num_warps + elif name == "num_stages": + return config.num_stages + else: + return config.kwargs.get(name, None) + + +def set_field(config, name, value): + if name == "num_warps": + config.num_warps = value + elif name == "num_stages": + config.num_stages = value + else: + config.kwargs[name] = value + + +class CoordescTuner: + """ + The coordinate descent tuner. Tune one field/coordinate at a time. + + TODO will it be necessary to tune multiple fields simultanuously. + + + TODO: what if both increasing and descreasing a field can improve perf. + i.e., there are multiple local optima.. + """ + + def __init__(self, is_mm=False, name="unknown", size_hints=None): + self.is_mm = is_mm # we will tune num_stages for mm + self.cached_benchmark_results = {} + self.name = name + self.size_hints = size_hints + + def get_xmax(self): + xmax = inductor_config.triton.max_block["X"] + if self.size_hints and len(self.size_hints) > 0: + xmax = min(xmax, self.size_hints[0]) + return xmax + + def get_ymax(self): + ymax = inductor_config.triton.max_block["Y"] + if self.size_hints and len(self.size_hints) > 1: + ymax = min(ymax, self.size_hints[1]) + return ymax + + def get_zmax(self): + zmax = inductor_config.triton.max_block["Z"] + if self.size_hints and len(self.size_hints) > 2: + zmax = min(zmax, self.size_hints[2]) + return zmax + + def get_rmax(self): + if self.size_hints and len(self.size_hints) > 0: + return self.size_hints[-1] # the last one is for reduction + else: + # large enough. We should not pick this large RBLOCK anyway + return 2**30 + + def cache_benchmark_result(self, config, timing): + self.cached_benchmark_results[triton_config_to_hashable(config)] = timing + + def lookup_in_cache(self, config): + return self.cached_benchmark_results.get(triton_config_to_hashable(config)) + + def call_func(self, func, config): + found = self.lookup_in_cache(config) + if found is not None: + log.debug(" CACHED") + return found + timing = func(config) + self.cache_benchmark_result(config, timing) + return timing + + @property + def tunable_fields(self): + out = [ + "XBLOCK", + "YBLOCK", + "ZBLOCK", + # NOTE: we should not tune RBLOCK for persistent reduction. + # We rely on the fact that persistent reduction's triton.Config + # does not have the RBLOCK field to guarantee that. + "RBLOCK", + # the following 3 are for mm + "BLOCK_M", + "BLOCK_N", + "BLOCK_K", + "num_warps", + ] + if self.is_mm: + out.append("num_stages") + + return out + + def value_too_large(self, name, val): + if name == "XBLOCK": + return val > self.get_xmax() + if name == "YBLOCK": + return val > self.get_ymax() + if name == "ZBLOCK": + return val > self.get_zmax() + if name == "RBLOCK": + return val > self.get_rmax() + + return False + + def get_neighbour_values(self, name, orig_val, radius=1, include_self=False): + """ + Get neighbour values in 'radius' steps. The original value is not + returned as it's own neighbour. + """ + assert radius >= 1 + + def update(cur_val, inc=True): + if name == "num_stages": + if inc: + return cur_val + 1 + else: + return cur_val - 1 + else: + if inc: + return cur_val * 2 + else: + return cur_val // 2 + + out = [] + # increment loop + cur_val = orig_val + for _ in range(radius): + cur_val = update(cur_val, True) + if self.value_too_large(name, cur_val): + break + out.append(cur_val) + + # decrement loop + cur_val = orig_val + for _ in range(radius): + cur_val = update(cur_val, False) + if cur_val <= 0: + break + out.append(cur_val) + + if include_self: + out.append(orig_val) + return out + + @staticmethod + def has_improvement(baseline, test): + threshold = 0.001 # 0.1% + return test is not None and test < baseline * (1 - threshold) + + def check_all_tuning_directions( + self, + func: Callable[["triton.Config"], float], + best_config, + best_timing, + ): + """ + Check all directions. We only do this once the regular coordinate + descent tuning find no better choices any more. + We only have a few tunable fields, so this should be fine. + """ + candidate_values_list = [] + effective_fields = [] + for field in self.tunable_fields: + old_value = get_field(best_config, field) + if old_value is None: + continue + candidate_values = self.get_neighbour_values( + field, + old_value, + radius=inductor_config.coordinate_descent_search_radius, + include_self=True, + ) + candidate_values_list.append(candidate_values) + effective_fields.append(field) + + choices = itertools.product(*candidate_values_list) + improved = False + for choice in choices: + assert len(choice) == len(effective_fields) + candidate_config = copy.deepcopy(best_config) + for new_val, field in zip(choice, effective_fields): + set_field(candidate_config, field, new_val) + cmp_res, candidate_timing = self.compare_config( + func, candidate_config, best_config, best_timing + ) + if cmp_res: + improved = True + best_config = candidate_config + best_timing = candidate_timing + + return improved, best_config, best_timing + + def compare_config(self, func, candidate_config, best_config, best_timing): + """ + Check if candidate_config is better than best_config. + + Return a touple of (compare_result, candidate_timing). + compare_result is true iff condidate_config is better. + """ + log.debug("Try config %s", candidate_config) + try: + candidate_timing = self.call_func(func, candidate_config) + except Exception as e: + log.debug("Got exception %s", e) + return False, float("inf") + + if self.has_improvement(best_timing, candidate_timing): + log.debug( + "Tune from %s %f -> %s %f", + best_config, + best_timing, + candidate_config, + candidate_timing, + ) + + return True, candidate_timing + return False, candidate_timing + + def autotune( + self, + func: Callable[["triton.Config"], float], + baseline_config: "triton.Config", + baseline_timing: Optional[float] = None, + ) -> "triton.Config": + if baseline_timing is None: + baseline_timing = self.call_func(func, baseline_config) + + log.debug("= Do coordinate descent tuning for %s =", self.name) + log.debug( + "Baseline Config %s, baseline timing %f", baseline_config, baseline_timing + ) + improved = True + best_config = baseline_config + best_timing = baseline_timing + tunable_fields = self.tunable_fields + + while improved: + improved = False + + for name in tunable_fields: + cur_val = get_field(best_config, name) + # some kernel don't have RBLOCK/YBLOCK/ZBLOCK. So cur_val may be None + if cur_val is None: + continue + + # It's possible that candidate_values is empty. + # E.g., if XBLOCK is 1 initially and size_hint for x is also 1. + # We would not try either larger or smaller XBLOCK in this case. + candidate_values = self.get_neighbour_values(name, cur_val) + + for next_val in candidate_values: + candidate_config = copy.deepcopy(best_config) + set_field(candidate_config, name, next_val) + + cmp_res, candidate_timing = self.compare_config( + func, candidate_config, best_config, best_timing + ) + if cmp_res: + improved = True + best_config, best_timing = candidate_config, candidate_timing + + if not improved and inductor_config.coordinate_descent_check_all_directions: + old_best_timing = best_timing + improved, best_config, best_timing = self.check_all_tuning_directions( + func, best_config, best_timing + ) + + if improved: + msg = red_text( + "Coordinate descend tuning found improvement of %.3fx by looking in all directions." + ) + log.debug( + msg, + old_best_timing / best_timing, + ) + + log.debug( + "Improve from %s %f -> %s %f, %.3fx", + baseline_config, + baseline_timing, + best_config, + best_timing, + baseline_timing / best_timing, + ) + + return best_config diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/cuda_properties.py b/llava_next/lib/python3.10/site-packages/torch/_inductor/cuda_properties.py new file mode 100644 index 0000000000000000000000000000000000000000..59bcc4e91287767b0202b6de2609c0b520a6c8f2 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/_inductor/cuda_properties.py @@ -0,0 +1,58 @@ +import functools +from typing import Dict, Optional, Tuple, Union + +import torch +from torch.cuda import _CudaDeviceProperties + +# API to query cuda properties that will work in a triton compile process +# that cannot use the GPU APIs (due to processing fork() and initialization +# time issues). Properties are recorded in the main process before +# we fork the workers. + +_compile_worker_current_device: Optional[int] = None + + +@functools.lru_cache(None) +def _properties() -> Dict[int, _CudaDeviceProperties]: + if not torch.cuda.is_available(): + return {} + try: + return { + i: torch.cuda.get_device_properties(i) + for i in range(torch.cuda.device_count()) + } + except RuntimeError: + return {} + + +def set_compiler_worker_current_device(device: int) -> None: + global _compile_worker_current_device + _compile_worker_current_device = device + + +def current_device() -> int: + if _compile_worker_current_device is not None: + return _compile_worker_current_device + return torch.cuda.current_device() + + +def _device(device: Optional[Union[torch.device, int]]) -> int: + if device is not None: + if isinstance(device, torch.device): + assert device.type == "cuda" + device = device.index + return device + return current_device() + + +def get_device_properties( + device: Optional[Union[torch.device, int]] = None +) -> _CudaDeviceProperties: + return _properties()[_device(device)] + + +def get_device_capability( + device: Optional[Union[torch.device, int]] = None +) -> Tuple[int, int]: + p = get_device_properties(device) + return p.major, p.minor diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__init__.py b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/__init__.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..866d67248657003c52d95187cab2992c96eca499 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/__init__.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/binary_folding.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/binary_folding.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..afcd1e2a33218ac3ab1a35b90d539f6b71c0a271 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/binary_folding.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/freezing_patterns.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/freezing_patterns.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4793f26b1c6bcce988ce888678d48ad92d70b648 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/freezing_patterns.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/fuse_attention.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/fuse_attention.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28273997b3fea3dbfa04058a37bf36204e8743f7 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/fuse_attention.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/group_batch_fusion.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/group_batch_fusion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6d786da260ffd497bfa558b0c8349d8470b5f93 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/group_batch_fusion.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/joint_graph.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/joint_graph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f5980269cadb6b8d0bb9e3a5dfd779f872553e7a Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/joint_graph.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/mkldnn_fusion.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/mkldnn_fusion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f8c701be3f064dc3a4573cf2795a3ca49597e62 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/mkldnn_fusion.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/pad_mm.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/pad_mm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18d4c907d971771839e64cc9280aaa5722352f25 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/pad_mm.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/post_grad.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/post_grad.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f471f5543ffd87c39dd33fe6cbbd25369dae909 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/post_grad.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/pre_grad.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/pre_grad.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8db2eaa2da3682091717e455e85a907a9c2c7120 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/pre_grad.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/quantization.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/quantization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d11b50e51346bc2dfdfcfc7de415f29f1976f08 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/quantization.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/replace_random.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/replace_random.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e316fa7ecf086e72292820962824cca2ff4ba694 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/replace_random.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/split_cat.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/split_cat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9a73b9f26406ebd81c6b7860fb0fb3b366a7dc2 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/split_cat.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/binary_folding.py b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/binary_folding.py new file mode 100644 index 0000000000000000000000000000000000000000..e8811f2b746164ff838265c8cb4c177bf2c425bf --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/binary_folding.py @@ -0,0 +1,274 @@ +import functools +import itertools + +import torch +from ..._dynamo.utils import counters + +from ..pattern_matcher import Arg, CallFunction, KeywordArg +from .freezing_patterns import register_binary_folding_pattern + +aten = torch.ops.aten +prims = torch.ops.prims + + +def mark_mixed_dtype_conv(conv): + conv_dtype = conv.meta["val"].dtype + if conv_dtype not in (torch.float16, torch.bfloat16): + return + + if not len(conv.users) == 1: + return + + conv_user = next(iter(conv.users.keys())) + if not isinstance(conv_user.meta["val"], torch.Tensor): + return + + if not conv_user.meta["val"].dtype == torch.float32: + return + + while conv_user.target in _binary_ops: + if not len(conv_user.users) == 1: + return + + conv_user = next(iter(conv_user.users.keys())) + + if not ( + conv_user.target == prims.convert_element_type.default + and conv_user.args[1] == conv_dtype + ): + return + + conv.meta["_allow_conv_mixed_dtype_folding"] = conv_dtype + + +def mark_mixed_dtype_allowed_convs(gm): + """ + Mark convolutions which we will binary fold even with mixed precision constants. We constant fold in the higher precision + for better accuracy and then recover the original precision after. + """ + for node in gm.graph.nodes: + if node.target is aten.convolution.default: + mark_mixed_dtype_conv(node) + + +def recover_original_precision_folded_convs(gm): + """ + After binary folding conv weights and biases to a higher dtype, recover the original precision they were in. + """ + graph = gm.graph + convs = [node for node in graph.nodes if node.target is aten.convolution.default] + for node in convs: + orig_dtype = node.meta.get("_allow_conv_mixed_dtype_folding", None) + if orig_dtype is None: + continue + + with graph.inserting_before(node): + for idx in [1, 2]: + old_input = node.args[idx] + if old_input is None: + continue + + new_input = graph.create_node( + "call_function", + prims.convert_element_type.default, + (old_input, orig_dtype), + ) + node.replace_input_with(old_input, new_input) + + +_binary_ops = [aten.add.Tensor, aten.sub.Tensor, aten.mul.Tensor, aten.div.Tensor] + + +@functools.lru_cache(None) +def binary_folding_init(): + _conv_args = [Arg() for _ in range(9)] + _computation_ops = [aten.convolution.default] + _computation_calls = [CallFunction(aten.convolution.default, *_conv_args, _users=1)] + + """ + In order to fuse add/sub/mul/div with conv, the dimensions of its + constant tensor must satisfy the following: + - with resizing, broadcast to w/ weight/bias tensor shape + - broadcast to the conv output shape + It needs to have a shape that can resize to weight/bias + tensor shape because we need to run the op with the conv + weights/bias without changing their sizes. + It needs to broadcast to the conv output shape so that we do + accidentally change the shape of op output by pre-fusing it + compared to eager. + The only dimension value shared by weight/bias/conv output + is they all contain a dim with value = channels-out. In the + conv output tensor, this is in the second dimension, + so the pointwise op tensor may have a second dimension of + value == channels-out, but all the other dimensions have to be 1 + """ + + def _op_not_broadcasting_with_conv(weight_tensor, other_tensor): + # According to opDoesNotBroadCastWithConv of frozen_conv_folding.cpp + weight_shape = weight_tensor.shape + other_shape = other_tensor.shape + if len(weight_shape) < len(other_shape): + return False + if len(weight_shape) == len(other_shape) + 1: + # weight shape is [o, i, *], other_shape is [o, 1...]. + for i in reversed(range(len(other_shape))): + if i == 0 and weight_shape[0] == other_shape[i]: + continue + if other_shape[i] != 1: + return False + else: + # weight shape is [o, i, *], other_shape is [1, i, *] + for i in reversed(range(len(other_shape))): + if i == 1 and weight_shape[0] == other_shape[i]: + continue + if other_shape[i] != 1: + return False + return True + + def _check_conv_and_broadcast_op(conv_node, other): + # According to checkConvAndBroadcastingOpPreConditions of frozen_conv_folding.cpp. + # conv.weight + if conv_node.args[1].op != "get_attr": + return False + # conv.bias + if conv_node.args[1] is not None and conv_node.args[1].op != "get_attr": + return False + if ( + not isinstance(other, int) + and not isinstance(other, float) + and other.op != "get_attr" + ): + return False + + weight_meta_value = conv_node.args[1].meta.get("val") + if weight_meta_value is None: + return False + # Avoid fusing op that causes type promotion + # restricting to float avoids int/float difficulties with scalar overload + if not weight_meta_value.is_floating_point(): + return False + if isinstance(other, torch.fx.Node) and other.op == "get_attr": + other_meta_value = other.meta.get("val") + if not other_meta_value.is_floating_point(): + return False + if ( + torch.promote_types(other_meta_value.dtype, weight_meta_value.dtype) + != weight_meta_value.dtype + ): + if not conv_node.meta.get("_allow_conv_mixed_dtype_folding", False): + return False + + if ( + other_meta_value.dtype != torch.float + and weight_meta_value.dtype not in (torch.float16, torch.bfloat16) + ): + return False + + if not _op_not_broadcasting_with_conv(weight_meta_value, other_meta_value): + return False + else: + # TODO: support scalar case + return False + + return True + + def _is_foldable_pattern(match): + binary_node = match.output_node() + computation_node = binary_node.args[0] + other = binary_node.args[1] + if binary_node.args[0].target not in _computation_ops: + computation_node = binary_node.args[1] + other = binary_node.args[0] + if binary_node.args[0].target == aten.convolution.default: + return _check_conv_and_broadcast_op(computation_node, other) + + return False + + def resize_scalar_or_tensor_to_shape(graph, other, shape): + # TODO: support scalar case + if other.meta.get("val").numel() == 1: + # expand errors if the shape input has less # dims than the tensor input + res = graph.create_node( + "call_function", + aten.reshape.default, + (other, (1,)), + ) + res = graph.create_node( + "call_function", + aten.expand.default, + (res, shape), + ) + else: + res = graph.create_node( + "call_function", + aten.reshape.default, + (other, shape), + ) + return res + + def _create_new_conv_node(graph, conv_node, binary_node, other): + assert conv_node.target == aten.convolution.default + conv_args = list(conv_node.args) + weight_meta_value = conv_node.args[1].meta.get("val") + bias = conv_args[2] + if binary_node.target in [aten.add.Tensor, aten.sub.Tensor]: + other_reshape = resize_scalar_or_tensor_to_shape( + graph, other, (weight_meta_value.size(0),) + ) + new_bias = graph.create_node( + "call_function", + binary_node.target, + (0 if bias is None else bias, other_reshape), + ) + conv_args[2] = new_bias + else: + assert binary_node.target in [aten.mul.Tensor, aten.div.Tensor] + weight_broadcast_shape = [1 for _ in range(len(weight_meta_value.shape))] + weight_broadcast_shape[0] = weight_meta_value.size(0) + other_reshape1 = resize_scalar_or_tensor_to_shape( + graph, other, tuple(weight_broadcast_shape) + ) + new_weight = graph.create_node( + "call_function", binary_node.target, (conv_args[1], other_reshape1) + ) + new_weight.meta.update(conv_args[1].meta) + conv_args[1] = new_weight + if bias is not None: + other_reshape = resize_scalar_or_tensor_to_shape( + graph, other, (weight_meta_value.size(0),) + ) + new_bias = graph.create_node( + "call_function", binary_node.target, (bias, other_reshape) + ) + new_bias.meta.update(bias.meta) + conv_args[2] = new_bias + return graph.create_node("call_function", conv_node.target, tuple(conv_args)) + + for _computation_call, binary_op in itertools.product( + _computation_calls, _binary_ops + ): + + @register_binary_folding_pattern( + CallFunction(binary_op, _computation_call, KeywordArg("other")), + extra_check=_is_foldable_pattern, + ) + def folded_op(match, *args, **kwargs): + counters["inductor"]["binary_folding"] += 1 + other = kwargs.get("other") + binary_node = match.output_node() + computation_node = ( + binary_node.args[0] + if binary_node.args[0].target in _computation_ops + else binary_node.args[1] + ) + graph = match.graph + with graph.inserting_before(binary_node): + # TODO: support linear? + assert computation_node.target == aten.convolution.default + new_computation_node = _create_new_conv_node( + graph, computation_node, binary_node, other + ) + binary_node.replace_all_uses_with(new_computation_node) + new_computation_node.meta.update(computation_node.meta) + graph.erase_node(binary_node) + graph.erase_node(computation_node) diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/freezing_patterns.py b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/freezing_patterns.py new file mode 100644 index 0000000000000000000000000000000000000000..2fd573257cbc4847ae19c10b446c3514613b31d2 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/freezing_patterns.py @@ -0,0 +1,192 @@ +import functools + +import torch +from torch._inductor.compile_fx import fake_tensor_prop +from ..._dynamo.utils import counters + +from .. import config +from ..pattern_matcher import ( + _return_true, + CallFunction, + Ignored, + inference_graph, + init_once_fakemode, + KeywordArg, + Match, + PatternMatcherPass, + register_graph_pattern, + register_replacement, + stable_topological_sort, +) + +aten = torch.ops.aten + +# First pass_patterns[0] are applied, then [1], then [2] +pass_patterns = [ + PatternMatcherPass(), + PatternMatcherPass(), + PatternMatcherPass(), +] + +binary_folding_pass = PatternMatcherPass() + + +def freezing_passes(gm: torch.fx.GraphModule, aot_example_inputs): + """ + Passes that are applied to the graph to freeze pass. + """ + + from ..freezing import constant_fold + + lazy_init() + # We need a few rounds of binary folding to get rid of all the + # unnecessary nodes, but may need a good method to chose the rounds number. + # works like: conv+binary+binary. + binary_folding = counters["inductor"]["binary_folding"] + fake_tensor_prop(gm, aot_example_inputs, True) + + torch._inductor.fx_passes.binary_folding.mark_mixed_dtype_allowed_convs(gm) + for _ in range(4): + constant_fold(gm) + # Make sure meta['val'] is properly set for all nodes + fake_tensor_prop(gm, aot_example_inputs, True) + binary_folding_pass.apply(gm.graph) + # If we don't have binary folding, we don't need to run the pass again. + # TODO: remove the need to run fake_tensor_prop on the whole model. + if counters["inductor"]["binary_folding"] == binary_folding: + break + binary_folding = counters["inductor"]["binary_folding"] + + torch._inductor.fx_passes.binary_folding.recover_original_precision_folded_convs(gm) + + constant_fold(gm) + fake_tensor_prop(gm, aot_example_inputs, True) + + for pattern in pass_patterns: + pattern.apply(gm.graph) + + # The CPU weight packing always assume the conv's weight is channels last, + # So make sure the layout_optimization is on when doing it. + if ( + torch._C._has_mkldnn + and config.cpp.weight_prepack + and config.layout_optimization + ): + from .mkldnn_fusion import _eliminate_duplicate_packed_nodes + + _eliminate_duplicate_packed_nodes(gm) + + stable_topological_sort(gm.graph) + gm.recompile() + gm.graph.lint() + + +@init_once_fakemode +def lazy_init(): + if torch._C._has_mkldnn and config.cpp.weight_prepack: + from .mkldnn_fusion import _mkldnn_weight_pack_init + + _mkldnn_weight_pack_init() + + from .binary_folding import binary_folding_init + + addmm_patterns_init() + binary_folding_init() + + +def register_freezing_graph_pattern(pattern, extra_check=_return_true, pass_number=0): + return register_graph_pattern( + pattern, + extra_check=extra_check, + pass_dict=pass_patterns[pass_number], + ) + + +def register_binary_folding_pattern(pattern, extra_check=_return_true): + return register_graph_pattern( + pattern, + extra_check=extra_check, + pass_dict=binary_folding_pass, + ) + + +@functools.lru_cache(None) +def addmm_patterns_init(): + if torch.cuda.is_available(): + # workaround https://github.com/pytorch/pytorch/issues/97894 + device = "cuda" + else: + device = "cpu" + val = functools.partial(torch.empty, (10, 10), device=device, requires_grad=False) + + def check_concat_weights(match): + weights = [ + match.kwargs["w1"], + match.kwargs["w2"], + match.kwargs["w3"], + ] + return all( + w.op == "get_attr" and w.meta["val"].shape == weights[0].meta["val"].shape + for w in weights + ) + + def matmul_fuse_pattern(inp, w1, w2, w3): + return (inp @ w1, inp @ w2, inp @ w3) + + def matmul_replacement(inp, w1, w2, w3): + cat_t = torch.cat((w1, w2, w3), dim=1) + mm = inp @ cat_t + return mm.chunk(3, dim=1) + + register_replacement( + matmul_fuse_pattern, + matmul_replacement, + [val(), val(), val(), val()], + inference_graph, + pass_patterns[0], + extra_check=check_concat_weights, + exclusive_arg_names=("w1", "w2", "w3"), + ) + + def addmm_fuse_pattern_second(inp, w1, w2, w3, b1, b2, b3): + return ( + aten.addmm(b1, inp, w1), + aten.addmm(b2, inp, w2), + aten.addmm(b3, inp, w3), + ) + + def addmm_fuse_replacement_second(inp, w1, w2, w3, b1, b2, b3): + cat_w = torch.cat((w1, w2, w3), dim=1) + cat_b = torch.cat((b1, b2, b3)) + return aten.addmm(cat_b, inp, cat_w).chunk(3, dim=1) + + register_replacement( + addmm_fuse_pattern_second, + addmm_fuse_replacement_second, + [val() for _ in range(7)], + inference_graph, + pass_patterns[0], + extra_check=check_concat_weights, + exclusive_arg_names=("w1", "w2", "w3", "b1", "b2", "b3"), + ) + + +def same_dtype(match): + return match.output_node().args[0].meta["val"].dtype == match.kwargs["dtype"] + + +@register_graph_pattern( + CallFunction( + torch.ops.prims.convert_element_type.default, + Ignored(), + KeywordArg("dtype"), + ), + pass_dict=pass_patterns[0], + extra_check=same_dtype, +) +def unnecessary_dtype_convert(match: Match, **kwargs): + """Remove unnecessary dtype conversion op, probably left as a result of Conv-Bn folding""" + graph = match.graph + node = match.output_node() + node.replace_all_uses_with(node.args[0]) + graph.erase_node(node) diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/fuse_attention.py b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/fuse_attention.py new file mode 100644 index 0000000000000000000000000000000000000000..333eafa1d5a2694ea3820b3b4dc20a21d2b9701c --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/fuse_attention.py @@ -0,0 +1,568 @@ +import functools +import logging +import math + +import torch +from ..._dynamo.utils import counters +from ..pattern_matcher import ( + filter_nodes, + inference_graph, + register_replacement, + training_graph, +) + +log = logging.getLogger(__name__) +aten = torch.ops.aten + + +def _sfdp_pattern_1(query, key, value, inv_scale): + return ( + torch.matmul(query, key.transpose(-2, -1)) + .div(inv_scale) + .softmax(dim=-1) + .matmul(value) + ) + + +def _sfdp_replacement_1(query, key, value, inv_scale): + counters["inductor"]["fuse_attention"] += 1 + return aten.scaled_dot_product_attention( + query.contiguous(), + key.contiguous(), + value.contiguous(), + attn_mask=None, + dropout_p=0.0, + is_causal=False, + scale=1.0 / inv_scale, + ) + + +def _sfdp_pattern_2(query, key, value, scale_factor): + return ( + torch.matmul(query, key.transpose(-2, -1)) + .mul(scale_factor) + .softmax(dim=-1) + .matmul(value) + ) + + +def _sfdp_replacement_2(query, key, value, scale_factor): + counters["inductor"]["fuse_attention"] += 1 + return aten.scaled_dot_product_attention( + query.contiguous(), + key.contiguous(), + value.contiguous(), + attn_mask=None, + dropout_p=0.0, + is_causal=False, + scale=scale_factor, + ) + + +def _sfdp_pattern_3(query, key, value, inv_scale_factor, dropout_p): + return torch.nn.functional.dropout( + torch.matmul(query, key.transpose(-2, -1)) + .div(inv_scale_factor) + .softmax(dim=-1), + p=dropout_p, + ).matmul(value) + + +def _sfdp_replacement_3(query, key, value, inv_scale_factor, dropout_p): + counters["inductor"]["fuse_attention"] += 1 + return aten.scaled_dot_product_attention( + query.contiguous(), + key.contiguous(), + value.contiguous(), + attn_mask=None, + dropout_p=dropout_p, + is_causal=False, + scale=1.0 / inv_scale_factor, + ) + + +def _sfdp_pattern_4(query, key, value, scale_factor, dropout_p): + return torch.nn.functional.dropout( + torch.matmul(query, key.transpose(-2, -1)).mul(scale_factor).softmax(dim=-1), + p=dropout_p, + ).matmul(value) + + +def _sfdp_replacement_4(query, key, value, scale_factor, dropout_p): + counters["inductor"]["fuse_attention"] += 1 + return aten.scaled_dot_product_attention( + query.contiguous(), + key.contiguous(), + value.contiguous(), + attn_mask=None, + dropout_p=dropout_p, + is_causal=False, + scale=scale_factor, + ) + + +def _sfdp_pattern_5(query, key, value, attn_mask): + attn_weight = torch.softmax( + (query @ key.transpose(-2, -1) / math.sqrt(query.size(-1))) + attn_mask, dim=-1 + ) + # attn_weight = torch.dropout(attn_weight, dropout_p) + return attn_weight @ value + + +def _sfdp_replacement_5(query, key, value, attn_mask): + counters["inductor"]["fuse_attention"] += 1 + return aten.scaled_dot_product_attention( + query.contiguous(), + key.contiguous(), + value.contiguous(), + attn_mask=attn_mask.to(dtype=query.dtype), + dropout_p=0.0, + is_causal=False, + ) + + +def _sfdp_pattern_6(query, key, value, attn_mask, dropout_p): + attn_weight = torch.softmax( + (query @ key.transpose(-2, -1) / math.sqrt(query.size(-1))) + attn_mask, dim=-1 + ) + attn_weight = torch.dropout(attn_weight, dropout_p, True) + return attn_weight @ value + + +def _sfdp_replacement_6(query, key, value, attn_mask, dropout_p): + counters["inductor"]["fuse_attention"] += 1 + return aten.scaled_dot_product_attention( + query.contiguous(), + key.contiguous(), + value.contiguous(), + attn_mask=attn_mask.to(dtype=query.dtype), + dropout_p=dropout_p, + is_causal=False, + ) + + +def _sfdp_pattern_7(query, key, value, dropout_p): + # in real workloads inputs to matmul are permuted + # causing matmul to expand to a series of expand and clone calls + # we want the same to happen during pattern tracing + q = query.permute(0, 2, 1, 3) + k = key.permute(0, 2, 1, 3) + v = value.permute(0, 2, 1, 3) + div = q @ k.transpose(-2, -1) / math.sqrt(q.size(-1)) + div = div.to(torch.float32) + attn_weight = torch.softmax(div, dim=-1) + attn_weight = torch.dropout(attn_weight, dropout_p, True) + attn_weight = attn_weight.to(torch.float16) + return attn_weight @ v + + +def _sfdp_replacement_7(query, key, value, dropout_p): + # sdpa prefers inputs in permuted format + # it makes a copy to put them in this format + # if they aren't already + # to make replacement efficient ensure that inputs to sdpa + # are in required order + counters["inductor"]["fuse_attention"] += 1 + q = query.permute(0, 2, 1, 3) + k = key.permute(0, 2, 1, 3) + v = value.permute(0, 2, 1, 3) + return aten.scaled_dot_product_attention( + q, + k, + v, + attn_mask=None, # attn_mask, + dropout_p=dropout_p, + is_causal=False, + ) + + +def _sfdp_pattern_8(query, key, value): + # no dropout version of pattern 7 + q = query.permute(0, 2, 1, 3) + k = key.permute(0, 2, 1, 3) + v = value.permute(0, 2, 1, 3) + div = q @ k.transpose(-2, -1) / math.sqrt(q.size(-1)) + div = div.to(torch.float32) + attn_weight = torch.softmax(div, dim=-1) + attn_weight = attn_weight.to(torch.float16) + return attn_weight @ v + + +def _sfdp_replacement_8(query, key, value): + counters["inductor"]["fuse_attention"] += 1 + q = query.permute(0, 2, 1, 3) + k = key.permute(0, 2, 1, 3) + v = value.permute(0, 2, 1, 3) + return aten.scaled_dot_product_attention( + q, + k, + v, + attn_mask=None, # attn_mask, + dropout_p=0.0, + is_causal=False, + ) + + +def _sfdp_pattern_9(query, key, value, dropout_p): + q = query.permute(0, 2, 1, 3) + k = key.permute(0, 2, 1, 3) + v = value.permute(0, 2, 1, 3) + q = q / math.sqrt(q.size(-1)) + div = q @ k.transpose(-2, -1) + div = div.to(torch.float32) + attn_weight = torch.softmax(div, dim=-1) + attn_weight = torch.dropout(attn_weight, dropout_p, True) + attn_weight = attn_weight.to(torch.float16) + return attn_weight @ v + + +def _sfdp_replacement_9(query, key, value, dropout_p): + counters["inductor"]["fuse_attention"] += 1 + q = query.permute(0, 2, 1, 3) + k = key.permute(0, 2, 1, 3) + v = value.permute(0, 2, 1, 3) + return aten.scaled_dot_product_attention( + q, + k, + v, + attn_mask=None, # attn_mask, + dropout_p=dropout_p, + is_causal=False, + ) + + +def _sfdp_pattern_10(query, key, value): + # no dropout version of 9 + q = query.permute(0, 2, 1, 3) + k = key.permute(0, 2, 1, 3) + v = value.permute(0, 2, 1, 3) + q = q / math.sqrt(q.size(-1)) + div = q @ k.transpose(-2, -1) + div = div.to(torch.float32) + attn_weight = torch.softmax(div, dim=-1) + attn_weight = attn_weight.to(torch.float16) + return attn_weight @ v + + +def _sfdp_replacement_10(query, key, value): + counters["inductor"]["fuse_attention"] += 1 + q = query.permute(0, 2, 1, 3) + k = key.permute(0, 2, 1, 3) + v = value.permute(0, 2, 1, 3) + return aten.scaled_dot_product_attention( + q, + k, + v, + attn_mask=None, # attn_mask, + dropout_p=0.0, + is_causal=False, + ) + + +def _sfdp_pattern_11(query, key, value, inv_scale): + # Mainly for huggingface models + q = query.permute(0, 2, 1, 3) + k = key.permute(0, 2, 1, 3) + v = value.permute(0, 2, 1, 3) + return torch.matmul(q, k.transpose(-2, -1)).div(inv_scale).softmax(dim=-1).matmul(v) + + +def _sfdp_replacement_11(query, key, value, inv_scale): + counters["inductor"]["fuse_attention"] += 1 + return aten.scaled_dot_product_attention( + query.transpose(1, 2), + key.transpose(1, 2), + value.transpose(1, 2), + attn_mask=None, + dropout_p=0.0, + is_causal=False, + scale=1.0 / inv_scale, + ) + + +def _sfdp_pattern_12(query, key, value, inv_scale_factor, dropout_p): + q = query.permute(0, 2, 1, 3) + k = key.permute(0, 2, 1, 3) + v = value.permute(0, 2, 1, 3) + return torch.nn.functional.dropout( + torch.matmul(q, k.transpose(-2, -1)).div(inv_scale_factor).softmax(dim=-1), + p=dropout_p, + ).matmul(v) + + +def _sfdp_replacement_12(query, key, value, inv_scale_factor, dropout_p): + counters["inductor"]["fuse_attention"] += 1 + return aten.scaled_dot_product_attention( + query.transpose(1, 2), + key.transpose(1, 2), + value.transpose(1, 2), + attn_mask=None, + dropout_p=dropout_p, + is_causal=False, + scale=1.0 / inv_scale_factor, + ) + + +def _sfdp_pattern_13(query, key, value, inv_scale): + # dropout would create a clone() if eval() or p = 0 + return ( + torch.matmul(query, key.transpose(-2, -1)) + .div(inv_scale) + .softmax(dim=-1) + .clone() + .matmul(value) + ) + + +def _sfdp_replacement_13(query, key, value, inv_scale): + counters["inductor"]["fuse_attention"] += 1 + return aten.scaled_dot_product_attention( + query.contiguous(), + key.contiguous(), + value.contiguous(), + attn_mask=None, + dropout_p=0.0, + is_causal=False, + scale=1.0 / inv_scale, + ) + + +def _sfdp_pattern_14(query, key, value, scale_factor): + # dropout would create a clone() if eval() or p = 0 + return ( + torch.matmul(query, key.transpose(-2, -1)) + .mul(scale_factor) + .softmax(dim=-1) + .clone() + .matmul(value) + ) + + +def _sfdp_replacement_14(query, key, value, scale_factor): + counters["inductor"]["fuse_attention"] += 1 + return aten.scaled_dot_product_attention( + query.contiguous(), + key.contiguous(), + value.contiguous(), + attn_mask=None, + dropout_p=0.0, + is_causal=False, + scale=scale_factor, + ) + + +def _sfdp_pattern_15(query, key, value, inv_scale): + # dropout would create a clone() if eval() or p = 0 + q = query.permute(0, 2, 1, 3) + k = key.permute(0, 2, 1, 3) + v = value.permute(0, 2, 1, 3) + return ( + torch.matmul(q, k.transpose(-2, -1)) + .div(inv_scale) + .softmax(dim=-1) + .clone() + .matmul(v) + ) + + +def _sfdp_replacement_15(query, key, value, inv_scale): + counters["inductor"]["fuse_attention"] += 1 + return aten.scaled_dot_product_attention( + query.transpose(1, 2), + key.transpose(1, 2), + value.transpose(1, 2), + attn_mask=None, + dropout_p=0.0, + is_causal=False, + scale=1.0 / inv_scale, + ) + + +def _sfdp_params_check(match): + assert all(k in match.kwargs for k in ("query", "key", "value")) + query = match.kwargs["query"].meta["val"] + key = match.kwargs["key"].meta["val"] + value = match.kwargs["value"].meta["val"] + if not (query.dtype == key.dtype == value.dtype) or not ( + query.device == key.device == value.device + ): + return False + add_mask_node = filter_nodes(match.nodes, aten.add.Tensor) + # Has attn_mask add. + if len(add_mask_node) > 0: + attn_mask_node = add_mask_node[0].args[1] + # attn_mask_node may be a float/int number. + if not hasattr(attn_mask_node, "meta"): + return False + attn_mask = attn_mask_node.meta["val"] + # Make sure attn_mask.dtype == query.dtype or attn_mask.dtype == torch.bool + if ( + not isinstance(attn_mask, torch.Tensor) + or not (attn_mask.dtype == query.dtype or attn_mask.dtype == torch.bool) + or query.device != attn_mask.device + ): + return False + return True + + +def _sfdp_scale_factor_check(scale_factor_op): + def fn(match): + scale_factor_node = filter_nodes(match.nodes, scale_factor_op)[0] + # Note: args[1] of the scale_factor_node is always the scale_factor for the current patterns. + scale_factor = scale_factor_node.args[1] + # make sure the scale_factor a float/int. SymInt? + if not isinstance(scale_factor, (float, int)): + return False + return _sfdp_params_check(match) + + return fn + + +@functools.lru_cache(None) +def _sfdp_init(): + from .joint_graph import patterns + + if torch.cuda.is_available(): + # workaround https://github.com/pytorch/pytorch/issues/97894 + device = "cuda" + else: + device = "cpu" + + # sizes/values don't actually matter for initial trace + # once we get a possible match we re-trace with the actual values and verify the match still holds + g = functools.partial(torch.empty, (2, 4, 8, 16), device=device, requires_grad=True) + gp = functools.partial( + torch.empty, (2, 8, 4, 16), device=device, requires_grad=True, dtype=torch.half + ) + b = functools.partial(torch.empty, (1, 1, 8, 8), device=device) + c = functools.partial(torch.tensor, 2.0, device=device) + # workaround https://github.com/pytorch/pytorch/issues/97894 + # 0.113377 is a "magic" value that lets us recover the lost input arg relationship + d = {"dropout_p": 0.113377} + + for pattern, replacement, args, workaround, extra_check in [ + ( + _sfdp_pattern_1, + _sfdp_replacement_1, + [g(), g(), g(), c()], + {}, + _sfdp_scale_factor_check(aten.div.Tensor), + ), + ( + _sfdp_pattern_2, + _sfdp_replacement_2, + [g(), g(), g(), c()], + {}, + _sfdp_scale_factor_check(aten.mul.Tensor), + ), + ( + _sfdp_pattern_3, + _sfdp_replacement_3, + [g(), g(), g(), c()], + d, + _sfdp_scale_factor_check(aten.div.Tensor), + ), + ( + _sfdp_pattern_4, + _sfdp_replacement_4, + [g(), g(), g(), c()], + d, + _sfdp_scale_factor_check(aten.mul.Tensor), + ), + ( + _sfdp_pattern_5, + _sfdp_replacement_5, + [g(), g(), g(), b()], + {}, + _sfdp_params_check, + ), + ( + _sfdp_pattern_6, + _sfdp_replacement_6, + [g(), g(), g(), b()], + d, + _sfdp_params_check, + ), + ( + _sfdp_pattern_7, + _sfdp_replacement_7, + [gp(), gp(), gp()], + d, + _sfdp_params_check, + ), + ( + _sfdp_pattern_8, + _sfdp_replacement_8, + [gp(), gp(), gp()], + {}, + _sfdp_params_check, + ), + ( + _sfdp_pattern_9, + _sfdp_replacement_9, + [gp(), gp(), gp()], + d, + _sfdp_params_check, + ), + ( + _sfdp_pattern_10, + _sfdp_replacement_10, + [gp(), gp(), gp()], + {}, + _sfdp_params_check, + ), + ( + _sfdp_pattern_11, + _sfdp_replacement_11, + [g(), g(), g(), c()], + {}, + _sfdp_scale_factor_check(aten.div.Tensor), + ), + ( + _sfdp_pattern_12, + _sfdp_replacement_12, + [g(), g(), g(), c()], + d, + _sfdp_scale_factor_check(aten.div.Tensor), + ), + ( + _sfdp_pattern_13, + _sfdp_replacement_13, + [g(), g(), g(), c()], + {}, + _sfdp_scale_factor_check(aten.div.Tensor), + ), + ( + _sfdp_pattern_14, + _sfdp_replacement_14, + [g(), g(), g(), c()], + {}, + _sfdp_scale_factor_check(aten.mul.Tensor), + ), + ( + _sfdp_pattern_15, + _sfdp_replacement_15, + [g(), g(), g(), c()], + {}, + _sfdp_scale_factor_check(aten.div.Tensor), + ), + ]: + args = [*args, *workaround.values()] + register_replacement( + pattern, + replacement, + args, + training_graph, + patterns, + extra_check=extra_check, + scalar_workaround=workaround, + ) + register_replacement( + pattern, + replacement, + args, + inference_graph, + patterns, + extra_check=extra_check, + scalar_workaround=workaround, + ) diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/group_batch_fusion.py b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/group_batch_fusion.py new file mode 100644 index 0000000000000000000000000000000000000000..42a359b20020dc37317216f3307c3fcbc472037f --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/group_batch_fusion.py @@ -0,0 +1,575 @@ +import collections +import logging +import operator + +import torch +from torch._dynamo.utils import counters + +from .. import config +from ..pattern_matcher import ( + CallFunctionVarArgs, + get_arg_value, + stable_topological_sort, +) + +try: + # importing this will register fbgemm lowerings for inductor + import deeplearning.fbgemm.fbgemm_gpu.fb.inductor_lowerings # noqa: F401 + + has_fbgemm = True +except Exception: + has_fbgemm = False + pass + +aten = torch.ops.aten + +log = logging.getLogger(__name__) + +MIN_FUSE_SET_SIZE = 5 +MAX_FUSE_SET_SIZE = 300 +MAX_FUSE_SEARCH_DEPTH = 5 +# The maximum tensor size that can go into the fusion group +MAX_FUSE_TENSOR_SIZE_GROUP_LINEAR = 4096 + + +class GroupBatchFusionBase: + def match(self, node): + raise NotImplementedError("match called on base") + + def fuse(self, graph, subset): + raise NotImplementedError("fuse called on base") + + +class GroupFusion(GroupBatchFusionBase): + """ + Fuse ops in a group way, e.g, fuse mm/addmm of arbitrary input shapes with fbgemm.gmm. + """ + + pass + + +class BatchFusion(GroupBatchFusionBase): + """ + Fuse ops in a batch way, e.g, fuse mm/addmm of same input shapes with bmm. + """ + + pass + + +class GroupLinearFusion(GroupFusion): + def _addmm_node_can_be_fused(self, node): + input_shape = node.args[1].meta["tensor_meta"].shape + weight_shape = node.args[2].meta["tensor_meta"].shape + return ( + node.kwargs.get("beta", 1.0) == 1.0 + and node.kwargs.get("alpha", 1.0) == 1.0 + and len(input_shape) == 2 + and len(weight_shape) == 2 + and all(x % 2 == 0 for x in input_shape + weight_shape) + and shape <= MAX_FUSE_TENSOR_SIZE_GROUP_LINEAR + for shape in input_shape + weight_shape + ) + + def _mm_node_can_be_fused(self, node): + input_shape = node.args[0].meta["tensor_meta"].shape + weight_shape = node.args[1].meta["tensor_meta"].shape + return ( + len(input_shape) == 2 + and len(weight_shape) == 2 + and all(x % 2 == 0 for x in input_shape + weight_shape) + and shape <= MAX_FUSE_TENSOR_SIZE_GROUP_LINEAR + for shape in input_shape + weight_shape + ) + + def match(self, node): + if CallFunctionVarArgs(aten.mm.default).match( + node + ) and self._mm_node_can_be_fused(node): + group_key = ("group_linear", True) + elif CallFunctionVarArgs(aten.addmm.default).match( + node + ) and self._addmm_node_can_be_fused(node): + bias = node.args[0] + group_key = ("group_linear", bias is None) + else: + group_key = None + return group_key + + def fuse(self, graph, subset): + group_inputs = [] + group_weights = [] + group_biases = [] + group_nodes = [] + for node in subset: + if CallFunctionVarArgs(aten.addmm.default).match(node): + bias, input, weight = node.args + else: + assert CallFunctionVarArgs(aten.mm.default).match(node) + input, weight = node.args + bias = None + + group_nodes.append(node) + group_inputs.append(input) + group_weights.append(weight) + group_biases.append(bias) + + if all(bias is None for bias in group_biases): + group_biases = None + + with graph.inserting_before(subset[0]): + fused_mm = graph.call_function( + torch.ops.fbgemm.gmm, + args=(group_inputs, group_weights, group_biases), + ) + + for i, original_mm in enumerate(group_nodes): + with graph.inserting_after(fused_mm): + new_mm = graph.call_function(operator.getitem, args=(fused_mm, i)) + original_mm.replace_all_uses_with(new_mm) + new_mm.meta.update(original_mm.meta) + graph.erase_node(original_mm) + + +class BatchLinearLHSFusion(BatchFusion): + """ + Batch linear left-hand side fusion. This pass tries to fuse the following patterns: + + torch.nn.functional.linear(x, w1), linear(x, w2),... * linear(x, wn) + -> torch.mm(x, torch.cat([w1, w2,... * wn]).transpose(0, 1)) + + We have a separate pass to eliminate contiguous transpose in a generic way. + """ + + def match(self, node): + if CallFunctionVarArgs(torch.nn.functional.linear).match( + node + ) and is_linear_node_can_be_fused(node): + input = get_arg_value(node, 0, "input") + bias = get_arg_value(node, 2, "bias") + group_key = ("batch_linear_lhs", bias is None, input) + else: + group_key = None + return group_key + + def fuse(self, graph, subset): + batch_nodes = [] + batch_input = None + batch_weights = [] + batch_biases = [] + split_sections = [] + for node in subset: + input = get_arg_value(node, 0, "input") + weight = get_arg_value(node, 1, "weight") + bias = get_arg_value(node, 2, "bias") + batch_nodes.append(node) + if batch_input is None: + batch_input = input + else: + assert batch_input is input + batch_weights.append(weight) + if bias: + batch_biases.append(bias) + split_sections.append(weight.meta["example_value"].shape[0]) + + with graph.inserting_before(subset[0]): + cat_weights = graph.call_function(torch.cat, args=((batch_weights, 0))) + transposed_weights = graph.call_function( + torch.transpose, args=(cat_weights, 0, 1) + ) + if len(batch_biases) > 0: + cat_biases = graph.call_function(torch.cat, args=((batch_biases, 0))) + fused_lhs = graph.call_function( + torch.addmm, + args=(cat_biases, batch_input, transposed_weights), + ) + else: + fused_lhs = graph.call_function( + torch.mm, + args=(batch_input, transposed_weights), + ) + fused_lhs_list = graph.call_function( + torch.split, args=((fused_lhs, split_sections, 1)) + ) + + for i, node in enumerate(batch_nodes): + with graph.inserting_after(fused_lhs_list): + new_node = graph.call_function( + operator.getitem, args=(fused_lhs_list, i) + ) + node.replace_all_uses_with(new_node) + new_node.meta.update(node.meta) + graph.erase_node(node) + + +def is_node_meta_valid(node): + if node is None: + return True + if "example_value" not in node.meta: + return False + return True + + +def is_linear_node_can_be_fused(node): + input = get_arg_value(node, 0, "input") + weight = get_arg_value(node, 1, "weight") + return ( + is_node_meta_valid(node) + and len(input.meta["example_value"].shape) == 2 + and len(weight.meta["example_value"].shape) == 2 + ) + + +class BatchLinearFusion(BatchFusion): + """ + Batch linear fusion in pre grad pass. + Fuse linear with same size with torch.baddmm + """ + + def _getitem_args(self, getitem_node: torch.fx.Node): + if getitem_node.target != operator.__getitem__ or ( + getitem_node.op != "call_function" + ): + return None + return getitem_node.args[0] + + def match(self, node): + if CallFunctionVarArgs(torch.nn.functional.linear).match( + node + ) and is_linear_node_can_be_fused(node): + input = get_arg_value(node, 0, "input") + weight = get_arg_value(node, 1, "weight") + bias = get_arg_value(node, 2, "bias") + group_key = ( + "batch_linear_pre_grad", + self._getitem_args(input), + str(input.meta["example_value"].shape), + str(weight.meta["example_value"].shape), + bias is None, + ) + else: + group_key = None + return group_key + + def fuse(self, graph, subset): + batch_nodes = [] + batch_inputs = [] + batch_weights = [] + batch_biases = [] + for node in subset: + batch_nodes.append(node) + batch_inputs.append(get_arg_value(node, 0, "input")) + batch_weights.append(get_arg_value(node, 1, "weight")) + batch_biases.append(get_arg_value(node, 2, "bias")) + + with graph.inserting_before(subset[0]): + stack_inputs = graph.call_function(torch.stack, args=(batch_inputs, 0)) + stack_weights = graph.call_function(torch.stack, args=(batch_weights, 0)) + transpose_weight = graph.call_function( + torch.transpose, args=(stack_weights, 1, 2) + ) + if all(bias is None for bias in batch_biases): + bmm = graph.call_function( + torch.bmm, + args=(stack_inputs, transpose_weight), + ) + else: + stack_biases = graph.call_function(torch.stack, args=(batch_biases, 0)) + unsqueeze_biases = graph.call_function( + torch.unsqueeze, args=(stack_biases, 1) + ) + bmm = graph.call_function( + torch.baddbmm, + args=(unsqueeze_biases, stack_inputs, transpose_weight), + ) + + bmm = graph.call_function(torch.unbind, args=(bmm,), kwargs={"dim": 0}) + for i, linear in enumerate(batch_nodes): + with graph.inserting_after(bmm): + getitem = graph.call_function(operator.getitem, args=(bmm, i)) + linear.replace_all_uses_with(getitem) + getitem.meta.update(linear.meta) + graph.erase_node(linear) + + +class BatchTanhFusion(BatchFusion): + """ + Batch tanh fusion in pre grad pass. + We only fuse the tahn if the input is after same split node. + """ + + def _getitem_args(self, getitem_node: torch.fx.Node): + if getitem_node.target != operator.__getitem__ or ( + getitem_node.op != "call_function" + ): + return None + return getitem_node.args[0] + + def match(self, node): + input = get_arg_value(node, 0, "input") + if ( + CallFunctionVarArgs(torch.tanh).match(node) + and is_node_meta_valid(node) + and self._getitem_args(input) is not None + ): + group_key = ( + "batch_tanh", + self._getitem_args(input), + str(input.meta["example_value"].shape), + ) + else: + group_key = None + return group_key + + def fuse(self, graph, subset): + batch_nodes = [] + batch_inputs = [] + + for node in subset: + batch_nodes.append(node) + batch_inputs.append(get_arg_value(node, 0, "input")) + + with graph.inserting_before(subset[0]): + stack_inputs = graph.call_function(torch.stack, args=(batch_inputs, 0)) + + batch_tanh = graph.call_function( + torch.tanh, + args=(stack_inputs,), + ) + unbind_tanh = graph.call_function( + torch.unbind, args=(batch_tanh,), kwargs={"dim": 0} + ) + for i, node in enumerate(batch_nodes): + with graph.inserting_after(unbind_tanh): + getitem = graph.call_function( + operator.getitem, args=(unbind_tanh, i) + ) + node.replace_all_uses_with(getitem) + getitem.meta.update(node.meta) + graph.erase_node(node) + + +class BatchLayernormFusion(BatchFusion): + """ + Batch layer norm fusion in pre grad pass + """ + + def match(self, node): + if CallFunctionVarArgs(torch.nn.functional.layer_norm).match(node): + input = get_arg_value(node, 0, "input") + weight = get_arg_value(node, 2, "weight") + bias = get_arg_value(node, 3, "bias") + group_key = ( + ( + "batch_layernorm", + str(input.meta["example_value"].shape), + str(weight.meta["example_value"].shape) + if weight is not None + else "", + str(bias.meta["example_value"].shape) if bias is not None else "", + str(get_arg_value(node, 1, "normalized_shape")), + str(get_arg_value(node, 4, "eps")), + ) + if "example_value" in input.meta + and is_node_meta_valid(weight) + and is_node_meta_valid(bias) + else None + ) + else: + group_key = None + return group_key + + def fuse(self, graph, subset): + group_inputs = [] + group_shapes = [] + group_weights = [] + group_biases = [] + group_epss = [] + group_nodes = [] + for node in subset: + group_nodes.append(node) + group_inputs.append(get_arg_value(node, 0, "input")) + group_shapes.append(get_arg_value(node, 1, "normalized_shape")) + group_weights.append(get_arg_value(node, 2, "weight")) + group_biases.append(get_arg_value(node, 3, "bias")) + eps = get_arg_value(node, 4, "eps") + if eps is None: + eps = 1e-5 + group_epss.append(eps) + stack_dim = -1 - len(group_shapes[-1]) + + if all(bias is None for bias in group_biases): + group_biases = None + if all(weight is None for weight in group_weights): + group_weights = None + assert all( + eps == group_epss[0] for eps in group_epss + ), "all epsilon values must be equal" + + with graph.inserting_before(subset[0]): + stack_input = graph.call_function( + torch.stack, args=(group_inputs, stack_dim) + ) + if group_weights is not None: + stack_weight = graph.call_function(torch.stack, args=(group_weights,)) + else: + stack_weight = None + if group_biases is not None: + stack_bias = graph.call_function(torch.stack, args=(group_biases,)) + else: + stack_bias = None + + batch_layer_norm = graph.call_function( + torch.nn.functional.layer_norm, + args=(stack_input, group_shapes[-1]), + kwargs={"eps": group_epss[-1]}, + ) + + if group_weights is not None and group_biases is not None: + batch_layer_norm = graph.call_function( + torch.addcmul, args=(stack_bias, stack_weight, batch_layer_norm) + ) + elif group_weights is not None and group_biases is None: + batch_layer_norm = graph.call_function( + torch.mul, args=(stack_weight, batch_layer_norm) + ) + elif group_weights is None and group_biases is not None: + batch_layer_norm = graph.call_function( + torch.add, args=(stack_bias, batch_layer_norm) + ) + + batch_layer_norm_unbind = graph.call_function( + torch.unbind, + args=(batch_layer_norm,), + kwargs={"dim": stack_dim}, + ) + + for i, node in enumerate(group_nodes): + with graph.inserting_after(batch_layer_norm_unbind): + new_node = graph.call_function( + operator.getitem, args=(batch_layer_norm_unbind, i) + ) + node.replace_all_uses_with(new_node) + new_node.meta.update(node.meta) + graph.erase_node(node) + + +def find_independent_subset_greedy(node_list): + """ + Return a list of subset from node_list, all nodes in each subset are independent with each other and can be fused together. + The type of subset is list, so we can preserve node's order and benefit from split-cat elimination in later pass. + """ + visited_node_set = set() + dep_set = set() + + def find_dependent_nodes(src_node, cur_node): + for input_node in cur_node.all_input_nodes: + if input_node in node_list: + dep_set.add(input_node) + + if input_node not in visited_node_set: + visited_node_set.add(input_node) + find_dependent_nodes(src_node, input_node) + + while len(node_list) > 0: + subset = [] + subset_deps = set() + + for node in node_list: + if len(subset) >= MAX_FUSE_SET_SIZE: + break + + visited_node_set.clear() + dep_set.clear() + + find_dependent_nodes(node, node) + if not dep_set.intersection(subset) and node not in subset_deps: + subset.append(node) + subset_deps.update(dep_set) + + if len(subset) >= MIN_FUSE_SET_SIZE: + yield subset + + next_round_node_list = [node for node in node_list if node not in subset] + node_list = next_round_node_list + + +def get_fusion_candidates(rule, root_node, fused_set): + """ + Search fusion candidates for a specific rule using BFS starting from the root node. + We only search the subgraph within MAX_FUSE_SEARCH_DEPTH. + """ + q = collections.deque() + + candidate_dict = collections.defaultdict(list) + visited_set = set() + + for next_node in root_node.all_input_nodes: + q.append((1, next_node)) + visited_set.add(next_node) + + while len(q) > 0: + depth, node = q.popleft() + + if node in fused_set: + continue + + key = rule.match(node) + if key is not None: + candidate_nodes = candidate_dict[key] + if node not in candidate_nodes: + candidate_nodes.append(node) + else: + if depth < MAX_FUSE_SEARCH_DEPTH: + for next_node in node.all_input_nodes: + if next_node not in visited_set: + visited_set.add(next_node) + q.append((depth + 1, next_node)) + + return candidate_dict + + +def apply_group_batch_fusion(graph, rule): + stable_topological_sort(graph) + fused_set = set() + + for node in reversed(graph.nodes): + candidates = get_fusion_candidates(rule, node, fused_set) + + for key, candidate_nodes in candidates.items(): + if len(candidate_nodes) < MIN_FUSE_SET_SIZE: + continue + + for subset in find_independent_subset_greedy(candidate_nodes): + rule.fuse(graph, subset) + fused_set.update(subset) + if isinstance(rule, GroupFusion): + counters["inductor"]["group_fusion"] += 1 + else: + counters["inductor"]["batch_fusion"] += 1 + + log.info( + f"{rule.__class__.__name__}: key = {key}; subset size = {len(subset)}" # noqa: G004 + ) + + +def group_batch_fusion_post_grad_passes(graph: torch.fx.Graph): + fusions = [] + + if config.group_fusion and has_fbgemm: + fusions += [GroupLinearFusion()] + + for rule in fusions: + apply_group_batch_fusion(graph, rule) + + +def group_batch_fusion_pre_grad_passes(graph: torch.fx.Graph): + fusions = [] + if config.batch_fusion: + fusions += [ + BatchLinearFusion(), + BatchLinearLHSFusion(), + BatchLayernormFusion(), + BatchTanhFusion(), + ] + for rule in fusions: + apply_group_batch_fusion(graph, rule) diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/joint_graph.py b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/joint_graph.py new file mode 100644 index 0000000000000000000000000000000000000000..fa298bb3d27a05a6c1a8f7178cb78c336531bdfe --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/joint_graph.py @@ -0,0 +1,256 @@ +import logging +from collections import Counter +from typing import Set + +import torch +import torch._guards +from .. import config +from ..pattern_matcher import ( + CallFunction, + init_once_fakemode, + KeywordArg, + Match, + PatternMatcherPass, + register_graph_pattern, + stable_topological_sort, +) +from .replace_random import replace_random_passes + +log = logging.getLogger(__name__) +patterns = PatternMatcherPass() + + +@init_once_fakemode +def lazy_init(): + from .fuse_attention import _sfdp_init + from .pad_mm import _pad_mm_init + + _pad_mm_init() + _sfdp_init() + + +@torch.utils._python_dispatch._disable_current_modes() +def remove_no_ops( + gm: torch.fx.GraphModule, zeros: Set[torch.fx.Node], ones: Set[torch.fx.Node] +): + "Removes no-ops: (+ 0, - 0, * 1, / 1)" + aten = torch.ops.aten + graph = gm.graph + + def fake_tensors_eq(t1, t2, fields=("shape", "dtype", "device")): + for field in fields: + if getattr(t1, field) != getattr(t2, field): + return False + return True + + def replace_no_op(node, replace_input_index): + replacement = node.args[replace_input_index] + + # https://github.com/pytorch/pytorch/issues/86128 causes + # non-Tensor inputs even for ops with only Tensor inputs. + # TODO - decompose/type promote to avoid this + if not all(isinstance(arg, torch.fx.Node) for arg in node.args): + return + + if not fake_tensors_eq(node.meta["val"], replacement.meta["val"]): + if fake_tensors_eq( + node.meta["val"], + replacement.meta["val"], + ("shape", "device"), + ): + with graph.inserting_after(node): + replacement = graph.call_function( + torch.ops.prims.convert_element_type.default, + args=(replacement, node.meta["val"].dtype), + ) + else: + return + + node.replace_all_uses_with(replacement) + replacement.meta.update(node.meta) + graph.erase_node(node) + + for node in graph.nodes: + if node.op != "call_function": + continue + + # TODO handle Tensor-Scalar adds, it's a different schema + if node.target == aten.add.Tensor and len(node.args) == 2: + if ( + not any(e in zeros for e in node.args) + or node.kwargs.get("alpha", 1) != 1 + ): + continue + + replace_index = 1 if node.args[0] in zeros else 0 + replace_no_op(node, replace_index) + + elif node.target == aten.sub.Tensor and len(node.args) == 2: + if node.args[1] not in zeros or node.kwargs.get("alpha", 1) != 1: + continue + + replace_no_op(node, 0) + + elif node.target == aten.mul.Tensor and len(node.args) == 2: + if not any(e in ones for e in node.args): + continue + + replace_input_index = 1 if node.args[0] in ones else 0 + replace_no_op(node, replace_input_index) + + elif ( + node.target == aten.div.Tensor + and len(node.args) == 2 + and node.args[1] in ones + ): + replace_no_op(node, 0) + + +@torch.utils._python_dispatch._disable_current_modes() +def constant_fold_uniform_value(gm): + "Runs constant folding and replaces constants which can be constructed with a single `full` call. Calls into remove_no_ops." + aten = torch.ops.aten + from torch._inductor.freezing import ConstantFolder + + def is_uniform_valued_tensor(t): + return t.numel() != 0 and (t == t.flatten()[0]).all() + + cf = ConstantFolder(gm, insertable_tensor_check=is_uniform_valued_tensor) + cf.run() + + node_replacements = cf.node_replacements + graph = gm.graph + + zeros = set() + ones = set() + + # Got failures in `test_is_set_to_cuda` if we change aliasing on constants, + # so just constant-ify if a Tensor is unaliased + constant_data_ptrs = Counter() + + for constant in node_replacements.values(): + if ( + constant.numel() != 0 + and torch._C._has_storage(constant) + and constant.layout == torch.strided + ): + constant_data_ptrs[constant.untyped_storage().data_ptr()] += 1 + + for node, constant in node_replacements.items(): + # Constant folding can leak memory, especially with repeated compilation, so we are only going to + # remove constants which can be replaced with a constructor. + + # TODO - we could also Tensors which get replaced with arange here + if not is_uniform_valued_tensor(constant): + continue + + # we dont have a functional way right now of instantiating a non-contiguous tensor with full/zeros/ones right now + # hasn't shown up to be important yet + if ( + not constant.is_contiguous(memory_format=torch.contiguous_format) + or not constant.layout == torch.strided + ): + continue + + if ( + torch._C._has_storage(constant) + and constant_data_ptrs[constant.untyped_storage().data_ptr()] != 1 + ): + continue + + value = constant.flatten()[0].item() + + with graph.inserting_after(node): + # the conversion from tensor and back to value can be lossy, just use the original full ctor value + if ( + node.op == "call_function" + and node.target == aten.full.default + and len(node.args) == 2 + ): + value = node.args[1] + + # zeros, and ones just get traced into full, so we insert those + new_node = graph.call_function( + aten.full.default, + args=(list(constant.shape), value), + kwargs={ + "dtype": constant.dtype, + "layout": torch.strided, + "device": constant.device, + "pin_memory": False, + }, + ) + + new_node.meta.update(node.meta) + node.replace_all_uses_with(new_node) + graph.erase_node(node) + + if value == 0: + zeros.add(new_node) + elif value == 1: + ones.add(new_node) + + remove_no_ops(gm, zeros, ones) + + +def joint_graph_passes(graph: torch.fx.GraphModule): + """ + Run FX transformations on the joint forwards+backwards graph. + """ + lazy_init() + count = 0 + + if config.joint_graph_constant_folding: + constant_fold_uniform_value(graph) + + if config.pattern_matcher: + count += patterns.apply(graph.graph) + + if not config.fallback_random: + count += replace_random_passes(graph) + + if count: + stable_topological_sort(graph.graph) + graph.graph.lint() + graph.recompile() + return graph + + +@register_graph_pattern( + CallFunction( + torch.ops.prims.convert_element_type.default, + CallFunction( + torch.ops.prims.convert_element_type.default, + KeywordArg("arg"), + KeywordArg("dtype1"), + ), + KeywordArg("dtype2"), + ), + pass_dict=patterns, +) +def pointless_convert(match: Match, arg, dtype1, dtype2): + """Remove chain of dtype conversions often created by AMP""" + graph = match.graph + node = match.output_node() + allowed = {torch.float16, torch.bfloat16, torch.float32, torch.float64} + if dtype1 in allowed and dtype2 in allowed: + repl = graph.call_function( + torch.ops.prims.convert_element_type.default, (arg, dtype2) + ) + repl.meta.update(node.meta) + node.replace_all_uses_with(repl) + match.erase_nodes(graph) + + +@register_graph_pattern( + CallFunction(torch.ops.aten.view.default, KeywordArg("arg"), KeywordArg("size")), + pass_dict=patterns, +) +def pointless_view(match: Match, arg, size): + """Remove no-op view""" + graph = match.graph + node = match.output_node() + arg_size = list(node.args[0].meta["val"].shape) + if size == arg_size: + node.replace_all_uses_with(node.args[0]) + match.erase_nodes(graph) diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/mkldnn_fusion.py b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/mkldnn_fusion.py new file mode 100644 index 0000000000000000000000000000000000000000..1f33a56943ee3bbfa9d8b03ff3a637d00cccab27 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/mkldnn_fusion.py @@ -0,0 +1,1080 @@ +import functools +import operator +from functools import reduce + +import torch + +from torch.fx.experimental.symbolic_shapes import free_symbols + +from .. import ir + +from ..lowering import lowerings as L +from ..pattern_matcher import ( + Arg, + CallFunction, + filter_nodes, + get_arg_value, + KeywordArg, + MULTIPLE, +) +from ..virtualized import ops +from .freezing_patterns import register_freezing_graph_pattern +from .post_grad import register_lowering_pattern +from .quantization import ( + _register_quantization_lowerings, + _register_quantization_weight_pack_pass, +) + +if torch._C._has_mkldnn: + aten = torch.ops.aten + mkldnn = torch.ops.mkldnn + prims = torch.ops.prims + + _conv_args = [Arg() for _ in range(10)] + _linear_args = [Arg() for _ in range(6)] + _conv_transpose_args = [Arg() for _ in range(11)] + + def _conv_call(users=1): + return CallFunction( + mkldnn._convolution_pointwise.default, *_conv_args, _users=users + ) + + def _linear_call(users=1): + return CallFunction( + mkldnn._linear_pointwise.default, *_linear_args, _users=users + ) + + def _conv_transpose_call(users=1): + return CallFunction( + mkldnn._convolution_transpose_pointwise.default, + *_conv_transpose_args, + _users=users, + ) + + def _to_float(input_call, users=1): + return CallFunction( + prims.convert_element_type.default, + input_call, + KeywordArg("to_float"), + _users=users, + ) + + def _to_bf16(input_call): + return CallFunction( + prims.convert_element_type.default, + input_call, + KeywordArg("to_bf16"), + _users=1, + ) + + def _unary_fusion_pattern(unary_fusion, call_fn, users, is_bf16): + # only insert to_dtype if is_bf16 is True + computation_call = ( + _to_float(call_fn(), users=users) if is_bf16 else call_fn(users=users) + ) + out = unary_fusion(computation_call) + return _to_bf16(out) if is_bf16 else out + + def _gelu_fusion_1(computation_call): + return CallFunction( + aten.mul, + CallFunction(aten.mul, computation_call, 0.5), + CallFunction( + aten.add, + CallFunction( + aten.erf, + CallFunction(aten.mul, computation_call, 0.7071067811865476), + ), + 1, + ), + ) + + def _gelu_fusion_2(computation_call): + return CallFunction( + aten.mul, + CallFunction(aten.mul, computation_call, 0.5), + CallFunction( + aten.add, + CallFunction( + aten.tanh, + CallFunction( + aten.mul, + CallFunction( + aten.add, + computation_call, + CallFunction( + aten.mul, + CallFunction( + aten.mul, + CallFunction( + aten.mul, computation_call, computation_call + ), + computation_call, + ), + 0.044715, + ), + ), + 0.7978845608028654, + ), + ), + 1, + ), + ) + + def _hardswish_fusion(computation_call): + return CallFunction( + aten.div, + CallFunction( + aten.mul, + computation_call, + CallFunction( + aten.clamp_max, + CallFunction( + aten.clamp_min, CallFunction(aten.add, computation_call, 3), 0 + ), + 6, + ), + ), + 6, + ) + + def _silu_fusion(computation_call): + return CallFunction( + aten.mul, computation_call, CallFunction(aten.sigmoid, computation_call) + ) + + def _hardsigmoid_fusion(computation_call): + return CallFunction( + aten.div, + CallFunction( + aten.clamp_max, + CallFunction( + aten.clamp_min, CallFunction(aten.add, computation_call, 3), 0 + ), + 6, + ), + 6, + ) + + def _leaky_relu_fusion(computation_call): + return CallFunction( + aten.where, + CallFunction(aten.gt, computation_call, 0), + computation_call, + CallFunction(aten.mul, computation_call, KeywordArg("negative_slope")), + ) + + def _hardtanh_fusion(computation_call): + return CallFunction( + aten.clamp_max, + CallFunction(aten.clamp_min, computation_call, KeywordArg("min_value")), + KeywordArg("max_value"), + ) + + def _combined_fusion(computation_call, elementwise_op): + return CallFunction(elementwise_op, computation_call) + + # binary_op(other, computation_op) + def _binary_fusion_v1(computation_call, binary_fn): + return CallFunction(binary_fn, KeywordArg("other"), computation_call) + + # binary_op(computation_op, other) + def _binary_fusion_v2(computation_call, binary_fn): + return CallFunction(binary_fn, computation_call, KeywordArg("other")) + + def _is_single_computation_op(computation_op): + def fn(match): + computation_nodes = filter_nodes(match.nodes, computation_op) + if len(computation_nodes) < 1: + return False + if any(n.args[-3] != "none" for n in computation_nodes): + return False + return True + + return fn + + def _is_valid_computation_unary_fusion(computation_op, is_bf16=False): + def fn(match): + matched = _is_single_computation_op(computation_op)(match) + computation_node = filter_nodes(match.nodes, computation_op)[0] + if is_bf16: + conversion_dtype_nodes = filter_nodes( + match.nodes, prims.convert_element_type.default + ) + if len(conversion_dtype_nodes) != 2: + return False + # fusion pattern is always in the form of computation_op + to_float32 + unary_op + to_bfloat16 + if computation_node == conversion_dtype_nodes[0].args[0]: + to_float = conversion_dtype_nodes[0].args[1] + to_bf16 = conversion_dtype_nodes[1].args[1] + else: + to_float = conversion_dtype_nodes[1].args[1] + to_bf16 = conversion_dtype_nodes[0].args[1] + matched = ( + matched and to_float == torch.float and to_bf16 == torch.bfloat16 + ) + return matched + + return fn + + def _register_unary_fusion_lowering( + pattern, unary_attr, computation_op, is_bf16=False + ): + @register_lowering_pattern( + pattern, + extra_check=_is_valid_computation_unary_fusion(computation_op, is_bf16), + ) + def fn(match, *args, **kwargs): + computation_args = list(args)[:-3] + [ + unary_attr.op_name, + unary_attr.scalars_attr, + unary_attr.algorithm_attr, + ] + return L[computation_op](*computation_args) + + return fn + + def _register_leaky_relu_fusion_lowering(pattern, computation_op, is_bf16=False): + @register_lowering_pattern( + pattern, extra_check=_is_single_computation_op(computation_op) + ) + def fn(match, *args, **kwargs): + negative_slope = kwargs.get("negative_slope") + if isinstance(negative_slope, ir.TensorBox): + matched = False + else: # inp is a Number + matched = True + if is_bf16: + dtype1 = kwargs.get("to_float") + dtype2 = kwargs.get("to_bf16") + matched = matched and dtype1 == torch.float and dtype2 == torch.bfloat16 + computation_args = list(args) + if matched: + computation_args = computation_args[:-3] + [ + "leaky_relu", + [negative_slope], + "", + ] + return L[computation_op](*computation_args) + else: + # computation_args += ["none", [], ""] + out = L[computation_op](*computation_args) + if is_bf16: + out = L[prims.convert_element_type.default](out, dtype=torch.float) + out = L[aten.where]( + L[aten.gt](out, 0), + out, + L[aten.mul](out, negative_slope), + ) + if is_bf16: + out = L[prims.convert_element_type.default]( + out, dtype=torch.bfloat16 + ) + return out + + return fn + + def _register_hardtanh_fusion_lowering(pattern, computation_op, is_bf16=False): + @register_lowering_pattern( + pattern, extra_check=_is_single_computation_op(computation_op) + ) + def fn(match, *args, **kwargs): + min_value = kwargs.get("min_value") + max_value = kwargs.get("max_value") + if isinstance(min_value, ir.TensorBox) or isinstance( + max_value, ir.TensorBox + ): + matched = False + else: # inp is a Number + matched = min_value <= max_value + if is_bf16: + dtype1 = kwargs.get("to_float") + dtype2 = kwargs.get("to_bf16") + matched = matched and dtype1 == torch.float and dtype2 == torch.bfloat16 + computation_args = list(args) + if matched: + computation_args = computation_args[:-3] + [ + "hardtanh", + [min_value, max_value], + "", + ] + return L[computation_op](*computation_args) + else: + out = L[computation_op](*computation_args) + if is_bf16: + out = L[prims.convert_element_type.default](out, dtype=torch.float) + out = L[aten.clamp_max](L[aten.clamp_min](out, min_value), max_value) + if is_bf16: + out = L[prims.convert_element_type.default]( + out, dtype=torch.bfloat16 + ) + return out + + return fn + + _binary_attr = { + aten.add: "add", + ops.add: "add", + aten.sub: "sub", + ops.sub: "sub", + } + + def _is_valid_binary(match, fn): + binary_nodes = filter_nodes(match.nodes, fn) + if len(binary_nodes) < 1: + return False + if any( + not ( + hasattr(n.args[0], "meta") + and isinstance(n.args[0].meta.get("val", None), torch.Tensor) + ) + or not ( + hasattr(n.args[1], "meta") + and isinstance(n.args[1].meta.get("val", None), torch.Tensor) + ) + for n in binary_nodes + ): + return False + # check alpha is one. + if any( + get_arg_value(n, 2, kwarg_name="alpha") != 1.0 + and get_arg_value(n, 2, kwarg_name="alpha") is not None + for n in binary_nodes + ): + return False + if any( + n.args[0].meta["val"].size() != n.args[1].meta["val"].size() + or n.args[0].meta["val"].device != n.args[1].meta["val"].device + or n.args[0].meta["val"].dtype != n.args[1].meta["val"].dtype + for n in binary_nodes + ): + return False + # check args[0] and args[1] is not same + if any(n.args[0] == n.args[1] for n in binary_nodes): + return False + return True + + def _is_valid_computation_binary(computation_op, binary_op, other_index=None): + def fn(match): + if not _is_single_computation_op(computation_op)(match): + return False + if not _is_valid_binary(match, binary_op): + return False + return True + + return fn + + def _is_valid_computation_binary_inplace(computation_op, binary_op, other_index): + def fn(match): + if not _is_valid_computation_binary(computation_op, binary_op)(match): + return False + binary_nodes = filter_nodes(match.nodes, binary_op) + if any(len(n.args[other_index].users) > 1 for n in binary_nodes): + return False + if any( + n.args[other_index].op in ["placeholder", "output"] + for n in binary_nodes + ): + return False + return True + + return fn + + def _register_binary_unary_fusion_lowering( + pattern, + computation_op, + binary_op, + fusion_op, + unary_attr=None, + ): + @register_lowering_pattern( + pattern, extra_check=_is_valid_computation_binary(computation_op, binary_op) + ) + def fn(match, *args, **kwargs): + other = kwargs.get("other") + assert isinstance(other, ir.TensorBox) + binary_attr = _binary_attr[binary_op] + args_list = list(args) + computation_args = [args_list[0], other] + args_list[1:-3] + [binary_attr] + if len(args_list) > 6: + if unary_attr is not None: + computation_args += [ + 1.0, + unary_attr.op_name, + unary_attr.scalars_attr, + unary_attr.algorithm_attr, + ] + else: + computation_args += [1.0, None, [], None] + return L[fusion_op](*computation_args) + + return fn + + def _register_binary_unary_maybe_inplace_fusion_lowering( + pattern, + computation_op, + binary_op, + inplace_fusion_op, + outplace_fusion_op, + unary_attr=None, + other_index=None, + ): + @register_lowering_pattern( + pattern, + extra_check=_is_valid_computation_binary_inplace( + computation_op, binary_op, other_index + ), + ) + def fn(match, *args, **kwargs): + other = kwargs.get("other") + assert isinstance(other, ir.TensorBox) + binary_attr = _binary_attr[binary_op] + args_list = list(args) + computation_args = [args_list[0], other] + args_list[1:-3] + [binary_attr] + if len(args_list) > 6: + if unary_attr is not None: + computation_args += [ + 1.0, + unary_attr.op_name, + unary_attr.scalars_attr, + unary_attr.algorithm_attr, + ] + else: + computation_args += [1.0, None, [], None] + # Make sure the other is not an alias or mutation(fx side doesn't has such info). + other.realize() + can_be_inplace = not ( + isinstance(other.data, ir.ReinterpretView) + or isinstance(other.get_layout(), (ir.MutationLayout, ir.AliasedLayout)) + ) + if not can_be_inplace: + return L[outplace_fusion_op](*computation_args) + return L[inplace_fusion_op](*computation_args) + + return fn + + computation_ops = [ + mkldnn._convolution_pointwise.default, + mkldnn._linear_pointwise.default, + mkldnn._convolution_transpose_pointwise.default, + ] + + class UnaryAttr: + def __init__(self, op_name: str, scalars_attr=None, algorithm_attr=None): + self.op_name = op_name + self.scalars_attr = scalars_attr if scalars_attr else [] + self.algorithm_attr = algorithm_attr if algorithm_attr else "" + + def _register_unary_fusion(): + computation_call_fns = [_conv_call, _linear_call, _conv_transpose_call] + + def _unary_fusion_patterns(is_bf16): + replacement_unary_fusion_patterns = { + UnaryAttr("gelu", algorithm_attr="tanh"): [ + _unary_fusion_pattern(_gelu_fusion_2, call_fn, 4, is_bf16) + for call_fn in computation_call_fns + ], + UnaryAttr("gelu", algorithm_attr="none"): [ + _unary_fusion_pattern(_gelu_fusion_1, call_fn, 2, is_bf16) + for call_fn in computation_call_fns + ], + UnaryAttr("hardswish"): [ + _unary_fusion_pattern(_hardswish_fusion, call_fn, 2, is_bf16) + for call_fn in computation_call_fns + ], + UnaryAttr("hardsigmoid"): [ + _unary_fusion_pattern(_hardsigmoid_fusion, call_fn, 1, is_bf16) + for call_fn in computation_call_fns + ], + UnaryAttr("swish"): [ + _unary_fusion_pattern(_silu_fusion, call_fn, 2, is_bf16) + for call_fn in computation_call_fns + ], + } + if not is_bf16: + call_user1 = [call_fn(users=1) for call_fn in computation_call_fns] + replacement_unary_fusion_patterns.update( + { + UnaryAttr("relu"): [ + _combined_fusion(u, aten.relu) for u in call_user1 + ], + UnaryAttr("sigmoid"): [ + _combined_fusion(u, aten.sigmoid) for u in call_user1 + ], + UnaryAttr("tanh"): [ + _combined_fusion(u, aten.tanh) for u in call_user1 + ], + } + ) + + return replacement_unary_fusion_patterns + + for is_bf16 in [True, False]: + replace_patterns = _unary_fusion_patterns(is_bf16) + for unary_attr, patterns in replace_patterns.items(): + _register_unary_fusion_lowering( + patterns[0], unary_attr, computation_ops[0], is_bf16 + ) + _register_unary_fusion_lowering( + patterns[1], unary_attr, computation_ops[1], is_bf16 + ) + _register_unary_fusion_lowering( + patterns[2], unary_attr, computation_ops[2], is_bf16 + ) + _leaky_relu_patterns = [ + _unary_fusion_pattern(_leaky_relu_fusion, call_fn, 3, is_bf16) + for call_fn in computation_call_fns + ] + for pattern, computation_op in zip(_leaky_relu_patterns, computation_ops): + _register_leaky_relu_fusion_lowering(pattern, computation_op, is_bf16) + hardtanh_patterns = [ + _unary_fusion_pattern(_hardtanh_fusion, call_fn, 1, is_bf16) + for call_fn in computation_call_fns + ] + for pattern, computation_op in zip(hardtanh_patterns, computation_ops): + _register_hardtanh_fusion_lowering(pattern, computation_op, is_bf16) + + def _register_inplace_fusion(): + binary_ops = [aten.add, ops.add] + inplace_fusion_op = mkldnn._convolution_pointwise_.binary + outplace_fusion_op = mkldnn._convolution_pointwise.binary + conv_call = _conv_call(users=1) + conv_op = computation_ops[0] + for binary_op in binary_ops: + binary_v1 = _binary_fusion_v1(conv_call, binary_op) + binary_unary_v1 = _combined_fusion(binary_v1, aten.relu) + _register_binary_unary_maybe_inplace_fusion_lowering( + binary_unary_v1, + conv_op, + binary_op, + inplace_fusion_op, + outplace_fusion_op, + other_index=0, + unary_attr=UnaryAttr("relu"), + ) + _register_binary_unary_maybe_inplace_fusion_lowering( + binary_v1, + conv_op, + binary_op, + inplace_fusion_op, + outplace_fusion_op, + other_index=0, + ) + binary_v2 = _binary_fusion_v2(conv_call, binary_op) + binary_unary_v2 = _combined_fusion(binary_v2, aten.relu) + _register_binary_unary_maybe_inplace_fusion_lowering( + binary_unary_v2, + conv_op, + binary_op, + inplace_fusion_op, + outplace_fusion_op, + other_index=1, + unary_attr=UnaryAttr("relu"), + ) + _register_binary_unary_maybe_inplace_fusion_lowering( + binary_v2, + conv_op, + binary_op, + inplace_fusion_op, + outplace_fusion_op, + other_index=1, + ) + + def _register_binary_fusion(): + binary_ops = [aten.add, ops.add, aten.sub, ops.sub] + fusion_ops = [ + mkldnn._convolution_pointwise.binary, + mkldnn._linear_pointwise.binary, + ] + _computation_user_1 = [_conv_call(users=1), _linear_call(users=1)] + for computation_call, computation_op, fusion_op in zip( + _computation_user_1, computation_ops[:-1], fusion_ops + ): + for binary_op in binary_ops: + pattern = _binary_fusion_v2(computation_call, binary_op) + _register_binary_unary_fusion_lowering( + pattern, computation_op, binary_op, fusion_op + ) + + for binary_op in [aten.add, ops.add]: + pattern = _binary_fusion_v1(computation_call, binary_op) + _register_binary_unary_fusion_lowering( + pattern, computation_op, binary_op, fusion_op + ) + + def _register_binary_unary_fusion(): + binary_ops = [aten.add, ops.add, aten.sub, ops.sub] + fusion_ops = [mkldnn._convolution_pointwise.binary] + _computation_user_1 = [_conv_call(users=1)] + for computation_call, computation_op, fusion_op in zip( + _computation_user_1, computation_ops[:-1], fusion_ops + ): + for binary_op in binary_ops: + pattern_v1 = _combined_fusion( + _binary_fusion_v2(computation_call, binary_op), aten.relu + ) + _register_binary_unary_fusion_lowering( + pattern_v1, + computation_op, + binary_op, + fusion_op, + unary_attr=UnaryAttr("relu"), + ) + for binary_op in [aten.add, ops.add]: + pattern_v2 = _combined_fusion( + _binary_fusion_v1(computation_call, binary_op), aten.relu + ) + _register_binary_unary_fusion_lowering( + pattern_v2, + computation_op, + binary_op, + fusion_op, + unary_attr=UnaryAttr("relu"), + ) + + def _recover_linear(): + # convert reshape+linear+reshape to a single linear for applying fusion path. + @register_freezing_graph_pattern( + CallFunction( + aten.reshape.default, + CallFunction( + mkldnn._linear_pointwise.default, + CallFunction( + aten.reshape.default, + Arg(), + KeywordArg("reshape_1"), + _users=MULTIPLE, + ), + Arg(), + Arg(), + Arg(), + Arg(), + Arg(), + ), + KeywordArg("reshape_2"), + ), + pass_number=1, + ) + def reshape_linear_reshape_pattern(match, *args, **kwargs): + reshape_1 = kwargs.get("reshape_1") + reshape_2 = kwargs.get("reshape_2") + assert len(reshape_1) == 2 + dynamic_shapes = not all( + isinstance(x, int) for x in ([reshape_1[0]] + reshape_2[:-1]) + ) + + graph = match.graph + reshape_2_node = match.output_node() + linear_input_node = reshape_2_node.args[0].args[0].args[0] + # check linear's input's shape[:-1] == reshape_2[:-1] + # and check product(reshape_2[:-1]) == reshape_1[0] + if dynamic_shapes: + # TODO: Haozhe investigate how add guard here + return + else: + can_remove_reshape = linear_input_node.meta.get("val").shape[ + :-1 + ] == torch.Size(reshape_2[:-1]) + can_remove_reshape = can_remove_reshape and ( + reduce(lambda x, y: x * y, reshape_2[:-1]) == reshape_1[0] + ) + + if can_remove_reshape: + repl = graph.call_function(mkldnn._linear_pointwise.default, args) + repl.meta.update(reshape_2_node.meta) + reshape_2_node.replace_all_uses_with(repl) + old_linear_node = reshape_2_node.args[0] + reshape_1_node = old_linear_node.args[0] + graph.erase_node(reshape_2_node) + graph.erase_node(old_linear_node) + if len(reshape_1_node.users) == 0: + graph.erase_node(reshape_1_node) + + def is_linear_add_bias(match): + add_node = match.output_node() + linear_node = add_node.args[0] + weight_meta = linear_node.args[1].meta.get("val") + bias_meta = add_node.args[1].meta.get("val") + if weight_meta is None or bias_meta is None: + return False + return ( + linear_node.args[2] is None + and bias_meta.dim() == 1 + and bias_meta.size(0) == weight_meta.size(0) + ) + + # convert linear+bias to a single linear for applying fusion path. + @register_freezing_graph_pattern( + CallFunction( + aten.add.Tensor, + CallFunction(mkldnn._linear_pointwise.default, *_linear_args), + Arg(), + ), + pass_number=1, + extra_check=is_linear_add_bias, + ) + def linear_bias_pattern(match, *args): + graph = match.graph + add_node = match.output_node() + linear_node = add_node.args[0] + new_args = list(linear_node.args) + new_args[2] = add_node.args[1] + repl = graph.call_function( + mkldnn._linear_pointwise.default, tuple(new_args) + ) + repl.meta.update(add_node.meta) + add_node.replace_all_uses_with(repl) + match.erase_nodes(graph) + + def _is_packable_mkldnn_rnn_layer(match): + lstm_node = match.output_node() + POS_WEIGHTS = [1, 2] + POS_INPUTS = [0, 5, 6] + POS_ARGS = POS_WEIGHTS + POS_INPUTS + # Weights should be Constant + if any( + lstm_node.args[POS_WEIGHT].op != "get_attr" for POS_WEIGHT in POS_WEIGHTS + ): + return False + + # Meta info for weights and inputs should be available + if any(lstm_node.args[POS_ARG].meta.get("val") is None for POS_ARG in POS_ARGS): + return False + + # Check device + if any( + lstm_node.args[POS_ARG].meta.get("val").device.type != "cpu" + for POS_ARG in POS_ARGS + ): + return False + + # Check dtype + if any( + lstm_node.args[POS_ARG].meta.get("val").dtype == torch.bfloat16 + and not mkldnn._is_mkldnn_bf16_supported() + for POS_ARG in POS_ARGS + ): + return False + + return True + + def _is_packable_convolution(match): + """ + Check if the node is supported for MKLDNN convolution. + """ + conv_node = match.output_node() + input_meta_value = conv_node.args[0].meta.get("val") + weight_meta_value = conv_node.args[1].meta.get("val") + if input_meta_value is None or weight_meta_value is None: + return False + input_size = input_meta_value.shape + if conv_node.args[1].op != "get_attr": + return False + for meta_value in [input_meta_value, weight_meta_value]: + if ( + meta_value is None + or meta_value.device.type != "cpu" + or meta_value.dim() != 4 + ): + return False + if ( + input_meta_value.dtype == torch.bfloat16 + or weight_meta_value.dtype == torch.bfloat16 + ): + if not mkldnn._is_mkldnn_bf16_supported(): + return False + is_transposed = conv_node.args[-3] + if is_transposed: + # TODO: Support dynamic shape case for MKLDNN conv transpose. + if free_symbols(input_size): + return False + groups = conv_node.args[-1] + in_channels = weight_meta_value.size(0) + # doesn't support group_depthwise_conv_transpose. + if groups > 1 and groups == in_channels: + return False + # Port from: aten/src/ATen/native/Convolution.cpp:is_output_padding_big + output_paddings = conv_node.args[-2] + strides = conv_node.args[3] + if any( + output_padding >= stride + for output_padding, stride in zip(output_paddings, strides) + ): + return False + return True + + def _is_packable_linear(match): + """ + Check if the node is supported for MKLDNN linear. + """ + linear_node = match.output_node() + # weight_idx is 1 for aten.mm and is 2 for aten.addmm + weight_idx = 2 if linear_node.target == aten.addmm.default else 1 + if linear_node.args[weight_idx].op != "get_attr": + return False + input_meta_value = linear_node.args[weight_idx - 1].meta.get("val") + weight_meta_value = linear_node.args[weight_idx].meta.get("val") + if input_meta_value is None or weight_meta_value is None: + return False + batch_size = input_meta_value.shape[0] + is_bf16_weight = weight_meta_value.dtype == torch.bfloat16 + # for fp32, mkl should be enabled and batch_size should not be a free symbol. + if not is_bf16_weight and (free_symbols(batch_size) or (not torch._C.has_mkl)): + return False + for meta_value in [input_meta_value, weight_meta_value]: + if ( + meta_value is None + or meta_value.device.type != "cpu" + or meta_value.dim() != 2 + ): + return False + if weight_idx == 2: + bias_meta_value = linear_node.args[0].meta.get("val") + if ( + bias_meta_value is None + or meta_value.device.type != "cpu" + or bias_meta_value.dim() != 1 + or bias_meta_value.size(0) != weight_meta_value.size(1) + ): + return False + + if ( + input_meta_value.dtype == torch.bfloat16 + or weight_meta_value.dtype == torch.bfloat16 + ): + if not mkldnn._is_mkldnn_bf16_supported(): + return False + return True + + _aten_conv_args = ( + Arg(), + Arg(), + Arg(), + Arg(), + Arg(), + Arg(), + KeywordArg("is_transposed"), + Arg(), + Arg(), + ) + + _aten_mkldnn_rnn_layer_args = ( + Arg(), # input + Arg(), # weight0 + Arg(), # weight1 + Arg(), # weight2 + Arg(), # weight3 + Arg(), # hx_ + Arg(), # cx_ + KeywordArg("reverse"), # reverse + Arg(), # batch_sizes + Arg(), # mode + Arg(), # hidden_size + Arg(), # num_layers + Arg(), # has_biases + Arg(), # bidirectional + Arg(), # batch_first + Arg(), # train + ) + + def _register_weight_pack_pass(): + @register_freezing_graph_pattern( + CallFunction(aten.convolution.default, *_aten_conv_args), + extra_check=_is_packable_convolution, + ) + def convolution(match, *args, **kwargs): + is_transposed = kwargs.get("is_transposed") + assert isinstance(is_transposed, bool) + graph = match.graph + conv_node = match.output_node() + input_size = conv_node.args[0].meta.get("val").shape + with graph.inserting_before(conv_node): + constant_args = [args[4], args[3], args[5], args[-1]] + packed_weight_op = mkldnn._reorder_convolution_weight + packed_conv_op = mkldnn._convolution_pointwise.default + if is_transposed: + constant_args.insert(1, args[-2]) # output_padding + packed_weight_op = mkldnn._reorder_convolution_transpose_weight + packed_conv_op = mkldnn._convolution_transpose_pointwise.default + if not free_symbols(input_size): + packed_weight_inputs = ( + (args[1],) + tuple(constant_args) + (input_size,) + ) + packed_weight_node = graph.create_node( + "call_function", packed_weight_op, args=packed_weight_inputs + ) + else: + assert not is_transposed + # For dynamic shape case, we need to pack weight in runtime. + packed_weight_node = args[1] + packed_conv_inputs = ( + (args[0], packed_weight_node, args[2]) + + tuple(constant_args) + + ("none", [], "") + ) + packed_conv_node = graph.create_node( + "call_function", packed_conv_op, tuple(packed_conv_inputs) + ) + conv_node.replace_all_uses_with(packed_conv_node) + packed_conv_node.meta.update(conv_node.meta) + graph.erase_node(conv_node) + + @register_freezing_graph_pattern( + CallFunction(aten.mkldnn_rnn_layer.default, *_aten_mkldnn_rnn_layer_args), + extra_check=_is_packable_mkldnn_rnn_layer, + ) + def mkldnn_rnn_layer(match, *args, **kwargs): + def get_item(graph, node, index): + return graph.call_function(operator.getitem, (node, index)) + + graph = match.graph + lstm_node = match.output_node() + input = args[0] + weight0, weight1 = args[1:3] + reverse = kwargs.get("reverse") + packed_lstm_op = aten.mkldnn_rnn_layer.default + hidden_size = args[9] + has_biases = args[11] + batch_first = args[13] + with graph.inserting_before(lstm_node): + packed_weight_op = mkldnn._reorder_mkldnn_rnn_layer_weight.default + packed_weight_inputs = ( + weight0, + weight1, + hidden_size, + reverse, + has_biases, + batch_first, + ) + packed_weight_node = graph.create_node( + "call_function", packed_weight_op, packed_weight_inputs, {}, "name" + ) + packed_weight_items = [ + get_item(graph, packed_weight_node, i) for i in range(2) + ] + pack_lstm_inputs = ( + args[0], + *packed_weight_items, + args[3], + args[4], + args[5], + args[6], + reverse, + *args[7:], + ) + + packed_lstm_node = graph.create_node( + "call_function", packed_lstm_op, args=pack_lstm_inputs + ) + lstm_node.replace_all_uses_with(packed_lstm_node) + packed_lstm_node.meta.update(lstm_node.meta) + graph.erase_node(lstm_node) + + @register_freezing_graph_pattern( + CallFunction(aten.addmm.default, Arg(), Arg(), Arg()), + extra_check=_is_packable_linear, + ) + @register_freezing_graph_pattern( + CallFunction(aten.mm.default, Arg(), Arg()), + extra_check=_is_packable_linear, + ) + def linear(match, *args, **kwargs): + graph = match.graph + linear_node = match.output_node() + input = args[0] if linear_node.target == aten.mm.default else args[1] + bias = None if linear_node.target == aten.mm.default else args[0] + weight = args[1] if linear_node.target == aten.mm.default else args[2] + with graph.inserting_before(linear_node): + transpose_weight_node = graph.create_node( + "call_function", aten.permute.default, (weight, (1, 0)) + ) + weight_dtype = weight.meta.get("val").dtype + is_bf16_weight = weight_dtype == torch.bfloat16 + batch_size = input.meta.get("val").shape[0] + if free_symbols(batch_size): + assert ( + is_bf16_weight + ), f"only bf16 weight prepacking supports dynamic shape inputs but got {weight_dtype}" + # For bfloat16 dynamic shape path, using input size hint to pack weight for a better performance. + packed_weight_inputs = ( + transpose_weight_node, + batch_size.node.shape_env.size_hint(batch_size.node.expr) + if free_symbols(batch_size) + else batch_size, + ) + packed_weight_inputs = (transpose_weight_node, batch_size) + packed_weight_op = ( + mkldnn._reorder_linear_weight + if is_bf16_weight + else torch.ops.mkl._mkl_reorder_linear_weight + ) + packed_weight_node = graph.create_node( + "call_function", packed_weight_op, args=packed_weight_inputs + ) + + packed_linear_inputs = (input, packed_weight_node) + if is_bf16_weight: + packed_linear_inputs += (bias, "none", [], "") + packed_linear_op = mkldnn._linear_pointwise.default + else: + packed_linear_inputs += (transpose_weight_node, bias, batch_size) + packed_linear_op = torch.ops.mkl._mkl_linear + packed_linear_node = graph.create_node( + "call_function", packed_linear_op, packed_linear_inputs + ) + linear_node.replace_all_uses_with(packed_linear_node) + packed_linear_node.meta.update(linear_node.meta) + graph.erase_node(linear_node) + + def _eliminate_duplicate_packed_nodes(gm): + """ + Combine packed weight nodes with the same inputs to reduce memory usage. + for example: + class Model(nn.Module): + def __init__(self): + super().__init__() + self.linear = nn.Linear(32, 32, bias=True) + + def forward(self, x): + return self.linear(self.linear(x)) + + the above's packed weight nodes are duplicate if two linear calls have same input size. + """ + if not (torch.backends.mkldnn.enabled and torch.backends.mkldnn.is_available()): + return gm + + packed_weight_ops = [ + torch._C._nn.mkldnn_reorder_conv2d_weight, + mkldnn._reorder_convolution_transpose_weight, + mkldnn._reorder_linear_weight, + mkldnn._reorder_mkldnn_rnn_layer_weight, + ] + if torch._C.has_mkl: + packed_weight_ops.append(torch.ops.mkl._mkl_reorder_linear_weight) + + for node in gm.graph.nodes: + if node.target in packed_weight_ops and len(node.args[0].users) > 1: + for user_node in list(node.args[0].users.keys()): + if ( + user_node.target == node.target + and user_node != node + and user_node.args == node.args + ): + user_node.replace_all_uses_with(node) + gm.graph.erase_node(user_node) + + @functools.lru_cache(None) + def _mkldnn_fusion_init(): + if torch.backends.mkldnn.enabled and torch.backends.mkldnn.is_available(): + _register_unary_fusion() + _register_inplace_fusion() + _register_binary_unary_fusion() + _register_binary_fusion() + _register_quantization_lowerings() + + @functools.lru_cache(None) + def _mkldnn_weight_pack_init(): + if torch.backends.mkldnn.enabled and torch.backends.mkldnn.is_available(): + _register_weight_pack_pass() + _recover_linear() + _register_quantization_weight_pack_pass() diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/pad_mm.py b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/pad_mm.py new file mode 100644 index 0000000000000000000000000000000000000000..e270fd652c4d7e2b92644360c04296f49bcb4331 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/pad_mm.py @@ -0,0 +1,445 @@ +import functools +from itertools import chain +from typing import Optional + +import torch +from torch import Tensor +from torch._inductor import utils +from torch.utils._mode_utils import no_dispatch + +from ..pattern_matcher import inference_graph, register_replacement, training_graph + +aten = torch.ops.aten + + +def fetch_fake_tensors(match, kwarg_names): + kwargs = match.kwargs + return [kwargs[name].meta["val"] for name in kwarg_names] + + +def unwrap_fake_args(*arg_names): + def decorator(func): + def wrapper(match): + fake_tensors = fetch_fake_tensors(match, arg_names) + return func(*fake_tensors) + + return wrapper + + return decorator + + +def get_alignment_size(x): + if x.dtype == torch.float16 or x.dtype == torch.half or x.dtype == torch.bfloat16: + return 8 + elif x.dtype == torch.float32 or x.dtype == torch.float: + return 4 + else: + return 0 + + +def check_device(a: Tensor, b: Tensor): + return a.is_cuda and b.is_cuda + + +def check_dtype(a: Tensor, b: Tensor): + return a.is_floating_point() and b.is_floating_point() + + +def is_symbolic(a: Optional[Tensor]): + return a is not None and any( + isinstance(x, torch.SymInt) for x in chain(a.size(), a.stride()) + ) + + +def any_is_symbolic(*args): + return any(is_symbolic(a) for a in args) + + +def should_pad_common(mat1, mat2, input=None): + return ( + torch._inductor.config.shape_padding + and check_device(mat1, mat2) + and check_dtype(mat1, mat2) + and not any_is_symbolic(mat1, mat2, input) + ) + + +def get_padded_length(x, alignment_size): + if alignment_size == 0 or x % alignment_size == 0: + return 0 + return int((x // alignment_size + 1) * alignment_size) - x + + +def pad_dim(x, padded_length, dim): + if padded_length == 0: + return x + pad = x.new_zeros(*x.shape[:dim], padded_length, *x.shape[dim + 1 :]) + return torch.cat([x, pad], dim=dim) + + +def addmm_pattern(input, mat1, mat2, beta, alpha): + return aten.addmm(input, mat1, mat2, beta=beta, alpha=alpha) + + +def should_pad_addmm(match): + mat1, mat2, input = fetch_fake_tensors(match, ("mat1", "mat2", "input")) + return should_pad_common(mat1, mat2, input) and should_pad_bench( + mat1, mat2, torch.ops.aten.addmm, input=input + ) + + +def addmm_replace(input, mat1, mat2, beta=1.0, alpha=1.0): + m_padded_length = get_padded_length(mat1.shape[0], get_alignment_size(mat1)) + k_padded_length = get_padded_length(mat1.shape[1], get_alignment_size(mat1)) + n_padded_length = get_padded_length(mat2.shape[1], get_alignment_size(mat2)) + + if m_padded_length != 0 or k_padded_length != 0 or n_padded_length != 0: + return pad_addmm( + input, + mat1, + mat2, + m_padded_length, + k_padded_length, + n_padded_length, + beta, + alpha, + ) + + return aten.addmm(input, mat1, mat2, beta=beta, alpha=alpha) + + +def pad_addmm( + input, + mat1, + mat2, + m_padded_length, + k_padded_length, + n_padded_length, + beta=1.0, + alpha=1.0, +): + # addmm decomp with padding will go through pad_addmm multiple times if multiple dimensions are needed to be padded + if k_padded_length != 0: + mat1 = pad_dim(mat1, k_padded_length, 1) + mat2 = pad_dim(mat2, k_padded_length, 0) + elif n_padded_length != 0: + mat2 = pad_dim(mat2, n_padded_length, 1) + elif m_padded_length != 0: + mat1 = pad_dim(mat1, m_padded_length, 0) + + if input is not None and k_padded_length == 0: + if n_padded_length != 0: + if input.dim() == 2: + input = pad_dim(input, n_padded_length, 1) + elif input.dim() == 1: + input = pad_dim(input, n_padded_length, 0) + elif m_padded_length != 0 and input.dim() == 2: + input = pad_dim(input, m_padded_length, 0) + + if k_padded_length != 0: + return addmm_replace(input, mat1, mat2, beta=beta, alpha=alpha) + elif n_padded_length != 0: + return addmm_replace(input, mat1, mat2, beta=beta, alpha=alpha)[ + :, :-n_padded_length + ] + else: + return addmm_replace(input, mat1, mat2, beta=beta, alpha=alpha)[ + :-m_padded_length, : + ] + + +def is_mm_compute_bound(M, K, N, dtype): + denominator = M * K + N * K + M * N + if denominator == 0: + return False + arithmetic_intensity = (M * N * K) / denominator + + # Fails with AMD + try: + machine_balance = ( + 1000 * utils.get_device_tflops(dtype) + ) / utils.get_gpu_dram_gbps() + except Exception: + return True + + # dram_gbps might be underestimating bandwidth because of cache. + # if we estimate machine balance too low we might miss some speedups, + # if we extimate too high there will be unnecessary compilation time increase. + # TODO - finetune coefficient here. As a reference point, Triton mm model assumes + # 80% of reads are in cache and cache is 4x faster than dram_gbps + machine_balance = machine_balance * 0.5 + + return arithmetic_intensity > machine_balance + + +@functools.lru_cache(None) +def get_pad_cache(): + return torch._inductor.codecache.LocalCache() + + +def get_cached_should_pad(key): + return get_pad_cache().lookup(key) + + +def set_cached_should_pad(key, value): + return get_pad_cache().set_value(key, value=value) + + +def should_pad_bench_key(mat1, mat2, op, input=None): + def tensor_key(t): + return (t.shape, t.stride(), t.dtype) + + tf32_key = ( + None if mat1.dtype != torch.float32 else torch.backends.cuda.matmul.allow_tf32 + ) + key = ( + tensor_key(mat1), + tensor_key(mat2), + op, + input if input is None else tensor_key(input), + tf32_key, + ) + + return str(key) + + +def should_pad_bench(mat1, mat2, op, input=None): + if not utils.has_triton(): + return False + + do_bench = functools.partial( + utils.do_bench, + warmup=5, + ) + + with no_dispatch(): + if op is torch.ops.aten.mm or op is torch.ops.aten.addmm: + m = mat1.shape[0] + k = mat1.shape[1] + n = mat2.shape[1] + + m_padded_length = get_padded_length(m, get_alignment_size(mat1)) + k_padded_length = get_padded_length(k, get_alignment_size(mat1)) + n_padded_length = get_padded_length(n, get_alignment_size(mat2)) + elif op is torch.ops.aten.bmm: + m = mat1.shape[1] + k = mat2.shape[2] + n = mat2.shape[2] + + m_padded_length = get_padded_length(m, get_alignment_size(mat1)) + k_padded_length = get_padded_length(k, get_alignment_size(mat1)) + n_padded_length = get_padded_length(n, get_alignment_size(mat2)) + else: + return False + + if m_padded_length == k_padded_length == n_padded_length == 0: + return False + + if not is_mm_compute_bound(m, k, n, mat1.dtype): + return False + + # We don't want to look up the cache for cases that are trivially false + # since it does file io + key = should_pad_bench_key(mat1, mat2, op, input) + + cached_pad = get_cached_should_pad(key) + if cached_pad is not None: + return cached_pad + + mat1 = torch.randn_like(mat1) + mat2 = torch.randn_like(mat2) + if op is torch.ops.aten.bmm or op is torch.ops.aten.mm: + ori_time = do_bench( + lambda: op(mat1, mat2), + ) + else: + if input is not None: + input = torch.randn_like(input) + ori_time = do_bench( + lambda: op(input, mat1, mat2), + ) + + mat1_pad = torch.randn_like(mat1) + mat2_pad = torch.randn_like(mat2) + + if op is torch.ops.aten.addmm: + input_pad = None + if input is not None and input.is_cuda: + input_pad = torch.randn_like(input) + pad_time = do_bench( + lambda: pad_addmm( + input_pad, + mat1_pad, + mat2_pad, + m_padded_length, + k_padded_length, + n_padded_length, + ), + ) + elif op is torch.ops.aten.mm: + pad_time = do_bench( + lambda: pad_mm( + mat1_pad, + mat2_pad, + m_padded_length, + k_padded_length, + n_padded_length, + ), + ) + else: + pad_time = do_bench( + lambda: pad_bmm( + mat1_pad, + mat2_pad, + m_padded_length, + k_padded_length, + n_padded_length, + ), + ) + + # Shape padding introduces additional memory ops. Based on microbenchmarks, 1.1x represents a reasonable + # tradeoff between performance improvement from shape padding and overhead from additional memory ops + # TODO: Build a learned model which would be better than this heuristic + should_pad = ori_time > pad_time * 1.1 + set_cached_should_pad(key, should_pad) + + return should_pad + + +def mm_pattern(mat1, mat2): + return aten.mm(mat1, mat2) + + +def should_pad_mm(match): + mat1, mat2 = fetch_fake_tensors(match, ("mat1", "mat2")) + return should_pad_common(mat1, mat2) and should_pad_bench( + mat1, mat2, torch.ops.aten.mm + ) + + +def mm_replace(mat1, mat2): + m_padded_length = get_padded_length(mat1.shape[0], get_alignment_size(mat1)) + k_padded_length = get_padded_length(mat1.shape[1], get_alignment_size(mat1)) + n_padded_length = get_padded_length(mat2.shape[1], get_alignment_size(mat2)) + + return pad_mm(mat1, mat2, m_padded_length, k_padded_length, n_padded_length) + + +def pad_mm(mat1, mat2, m_padded_length, k_padded_length, n_padded_length): + # mm_replace will go through pad_mm multiple times if multiple dimensions are needed to be padded + if k_padded_length != 0: + mat1 = pad_dim(mat1, k_padded_length, 1) + mat2 = pad_dim(mat2, k_padded_length, 0) + return torch.ops.aten.mm(mat1, mat2) + elif n_padded_length != 0: + mat2 = pad_dim(mat2, n_padded_length, 1) + return torch.ops.aten.mm(mat1, mat2)[:, :-n_padded_length] + else: + mat1 = pad_dim(mat1, m_padded_length, 0) + return torch.ops.aten.mm(mat1, mat2)[:-m_padded_length, :] + + +def bmm_pattern(mat1, mat2): + return aten.bmm(mat1, mat2) + + +def should_pad_bmm(match): + mat1, mat2 = fetch_fake_tensors(match, ("mat1", "mat2")) + return should_pad_common(mat1, mat2) and should_pad_bench( + mat1, mat2, torch.ops.aten.bmm + ) + + +def bmm_replace(mat1, mat2): + m_padded_length = get_padded_length(mat1.shape[1], get_alignment_size(mat1)) + k_padded_length = get_padded_length(mat1.shape[2], get_alignment_size(mat1)) + n_padded_length = get_padded_length(mat2.shape[2], get_alignment_size(mat2)) + + if m_padded_length != 0 or k_padded_length != 0 or n_padded_length != 0: + return pad_bmm(mat1, mat2, m_padded_length, k_padded_length, n_padded_length) + + return aten.bmm(mat1, mat2) + + +def pad_bmm(mat1, mat2, m_padded_length, k_padded_length, n_padded_length): + # bmm_replace will go through pad_bmm multiple times if multiple dimensions are needed to be padded + if k_padded_length != 0: + mat1 = pad_dim(mat1, k_padded_length, 2) + mat2 = pad_dim(mat2, k_padded_length, 1) + + return aten.bmm(mat1, mat2) + elif n_padded_length != 0: + mat2 = pad_dim(mat2, n_padded_length, 2) + return aten.bmm(mat1, mat2)[:, :, :-n_padded_length].contiguous() + else: + mat1 = pad_dim(mat1, m_padded_length, 1) + return aten.bmm(mat1, mat2)[:, :-m_padded_length, :].contiguous() + + +@functools.lru_cache(None) +def _pad_mm_init(): + from .joint_graph import patterns + + if torch.cuda.is_available(): + # workaround https://github.com/pytorch/pytorch/issues/97894 + device = "cuda" + else: + device = "cpu" + + # sizes/values dont actually matter for initial trace + # once we get a possible match we re-trace with the actual values and verify the match still holds + + dim2a = functools.partial(torch.empty, (4, 4), device=device, requires_grad=True) + dim2b = functools.partial(torch.empty, (4, 4), device=device, requires_grad=True) + + dim3a = functools.partial(torch.empty, (4, 4, 4), device=device, requires_grad=True) + dim3b = functools.partial(torch.empty, (4, 4, 4), device=device, requires_grad=True) + + dim1a = functools.partial(torch.empty, (4), device=device, requires_grad=True) + + # workaround https://github.com/pytorch/pytorch/issues/97894 + # 0.113377 is a "magic" value that lets us recover the lost input arg relationship + rep = {"beta": 0.213377, "alpha": 0.113377} + + for pattern, replacement, args, workaround, extra_check in [ + ( + mm_pattern, + mm_replace, + [dim2a(), dim2b()], + {}, + should_pad_mm, + ), + ( + bmm_pattern, + bmm_replace, + [dim3a(), dim3b()], + {}, + should_pad_bmm, + ), + ( + addmm_pattern, + addmm_replace, + [dim1a(), dim2a(), dim2b()], + rep, + should_pad_addmm, + ), + ]: + args = [*args, *workaround.values()] + register_replacement( + pattern, + replacement, + args, + training_graph, + patterns, + extra_check=extra_check, + scalar_workaround=workaround, + ) + register_replacement( + pattern, + replacement, + args, + inference_graph, + patterns, + extra_check=extra_check, + scalar_workaround=workaround, + ) diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/post_grad.py b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/post_grad.py new file mode 100644 index 0000000000000000000000000000000000000000..925702f0762250530bf57f26b38a02d2e6075a06 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/post_grad.py @@ -0,0 +1,602 @@ +import functools +import itertools +import logging +import operator +from typing import List, Optional, Union + +from sympy import Expr + +import torch +import torch._inductor as inductor + +from .. import config, ir, pattern_matcher + +from ..lowering import lowerings as L +from ..pattern_matcher import ( + _return_true, + Arg, + CallFunction, + filter_nodes, + get_arg_value, + Ignored, + init_once_fakemode, + KeywordArg, + ListOf, + Match, + MULTIPLE, + PatternMatcherPass, + register_graph_pattern, + remove_extra_clones, + stable_topological_sort, +) +from ..virtualized import V +from .group_batch_fusion import group_batch_fusion_post_grad_passes + + +log = logging.getLogger(__name__) +aten = torch.ops.aten +prims = torch.ops.prims + +# First pass_patterns[0] are applied, then [1], then [2] +pass_patterns = [ + PatternMatcherPass(), + PatternMatcherPass(), + PatternMatcherPass(), +] +# patterns applied only in inference +inference_patterns = PatternMatcherPass() + + +def post_grad_passes(gm: torch.fx.GraphModule, is_inference: bool): + """ + Passes that run on after grad. This is called once on the forwards + graph and once on the backwards graph. + + The IR here has been normalized and functionalized. + """ + if config.dce: + # has some issues with mutation in inference mode + gm.graph.eliminate_dead_code() + + if is_inference and config.reordering: + reorder_for_locality(gm.graph) + + if config.pattern_matcher: + lazy_init() + + group_batch_fusion_post_grad_passes(gm.graph) + remove_extra_clones(gm.graph) + + for patterns in pass_patterns: + patterns.apply(gm.graph) + if is_inference: + inference_patterns.apply(gm.graph) + + stable_topological_sort(gm.graph) + gm.recompile() + gm.graph.lint() + + +@init_once_fakemode +def lazy_init(): + if torch._C._has_mkldnn: + from .mkldnn_fusion import _mkldnn_fusion_init + + _mkldnn_fusion_init() + + +def reorder_for_locality(graph: torch.fx.Graph): + def visit(other_node): + if ( + other_node.op == "call_function" + and other_node.target != operator.getitem + and all((n in seen_nodes) for n in other_node.users) + ): + # move node's producers right before it + node.prepend(other_node) + + seen_nodes = set() + + # only reorder nodes before the first copy_ in the graph. + # copy_ will appear at the end of functionalized graphs when there is mutation on inputs, + # and this reordering doesnt work well with mutation + first_copy = next( + ( + node + for node in graph.nodes + if node.op == "call_function" + and node.target == torch.ops.aten.copy_.default + ), + None, + ) + past_mutating_epilogue = True if first_copy is None else False + + for node in reversed(graph.nodes): + seen_nodes.add(node) + if not past_mutating_epilogue: + past_mutating_epilogue = node is first_copy + continue + + torch.fx.map_arg((node.args, node.kwargs), visit) + + +def register_lowering_pattern(pattern, extra_check=_return_true, pass_number=1): + """ + Register an aten to inductor IR replacement pattern + """ + return pattern_matcher.register_lowering_pattern( + pattern, extra_check, pass_dict=pass_patterns[pass_number] + ) + + +################################################################################ +# Actual patterns below this point. +# Priority of patterns is: +# - later output nodes first +# - order patterns are defined in +################################################################################ + + +@register_lowering_pattern( + CallFunction( + aten.add, + CallFunction(aten.mm, Arg(), Arg()), + CallFunction(aten.mm, Arg(), Arg()), + ) +) +def mm_plus_mm(match: Match, mat1, mat2, mat3, mat4): + return inductor.kernel.mm_plus_mm.tuned_mm_plus_mm(mat1, mat2, mat3, mat4) # type: ignore[attr-defined] + + +def cuda_and_enabled_mixed_mm(match): + return (config.use_mixed_mm or config.force_mixed_mm) and getattr( + match.kwargs["mat1"].meta.get("val"), "is_cuda", False + ) + + +def cuda_and_enabled_mixed_mm_and_not_int8(match): + return ( + cuda_and_enabled_mixed_mm(match) + and getattr(match.kwargs["mat1"].meta.get("val"), "is_cuda", False) + and getattr(match.kwargs["mat2"].meta.get("val"), "dtype", torch.int8) + != torch.int8 + ) # bitshift numerics in triton and pytorch don't match for torch.int8 + + +""" + this is intended to be used to unpack a [K,N] int4 tensor from a [K/2, N] uint4x2 tensor + (where the int4 and uint4x2 are represented with int8 and uint8 respectively) + where every other row of the int4 is packed with the row above it as: + uint4x2[k,n] = (8+int4[2*k,n])+(8+int4[2*k+1,n])<<4 + + unpack formulas: + int4[2*k,n]=(uint4x2[k,n] & 0xF) - 8 + int4[2*k+1,n]=(uint4x2[k,n] >> 4) - 8 + + thus matching on unpack formula: + torch.mm(mat1, torch.cat((mat2 & 0xF, mat2>>4),1).reshape(mat2_mm_shape).to(mat2_dtype).sub(8)) + + note: although the unpack formula in pytorch and the triton kernel is designed for a uint8 mat2, the behavior + of the kernel matches the pytorch formula for all dtypes except torch.int8 + where the bitwise numerics in triton do not match those in pytorch. +""" + + +@register_lowering_pattern( + CallFunction( + aten.mm.default, + KeywordArg("mat1"), + CallFunction( + aten.sub.Tensor, + CallFunction( + prims.convert_element_type.default, + CallFunction( + aten.reshape.default, + CallFunction( + aten.cat.default, + ListOf( + CallFunction( + aten.bitwise_and.Scalar, + KeywordArg("mat2"), + 0xF, + ), + CallFunction( + aten.__rshift__.Scalar, + KeywordArg("mat2"), + 4, + ), + ), + 1, + ), + KeywordArg("mat2_mm_shape"), + ), + KeywordArg("mat2_dtype"), + ), + 8, + ), + ), + extra_check=cuda_and_enabled_mixed_mm_and_not_int8, +) +def uint4x2_mixed_mm(match: Match, mat1, mat2, mat2_mm_shape, mat2_dtype): + return inductor.kernel.unpack_mixed_mm.tuned_uint4x2_mixed_mm( # type: ignore[attr-defined] + mat1, mat2, mat2_mm_shape, mat2_dtype + ) + + +""" + torch.mm(mat1, mat2.to(mat2_dtype)) +""" + + +@register_lowering_pattern( + CallFunction( + aten.mm, + KeywordArg("mat1"), + CallFunction( + prims.convert_element_type.default, + KeywordArg("mat2"), + KeywordArg("mat2_dtype"), + ), + ), + extra_check=cuda_and_enabled_mixed_mm, +) +def mixed_mm(match: Match, mat1, mat2, mat2_dtype): + return inductor.kernel.mm.tuned_mixed_mm(mat1, mat2, mat2_dtype) # type: ignore[attr-defined] + + +@register_graph_pattern( + CallFunction( + aten.cumsum.default, + CallFunction( + torch.ops.aten.full.default, + [Arg(), Arg()], + 1, + dtype=KeywordArg("dtype"), + layout=Ignored(), + device=KeywordArg("device"), + pin_memory=False, + _users=MULTIPLE, + ), + 1, + _users=MULTIPLE, + ), + pass_dict=pass_patterns[1], +) +def pointless_cumsum_replacement(match: Match, size0, size1, device, dtype): + """Based on a pattern in OPTForCausalLM""" + + def repl(size0, size1): + return torch.arange(1, size1 + 1, device=device, dtype=dtype).expand( + size0, size1 + ) + + # only replace the output node, not all nodes + match.nodes = [match.output_node()] + with V.fake_mode: + match.replace_by_example(repl, [size0, size1]) + + +def shape_of_mm(a, b): + m, _ = a.get_size() + _, n = b.get_size() + return [m, n] + + +@register_lowering_pattern( + CallFunction(aten.cat, ListOf(CallFunction(aten.mm, Arg(), Arg())), Arg()), +) +def cat_mm(match, inputs, dim): + return cat_tuned_op(match, inputs, dim, op=L[aten.mm], shape_of=shape_of_mm) + + +@register_lowering_pattern( + CallFunction( + aten.cat, ListOf(CallFunction(aten.addmm, Arg(), Arg(), Arg())), Arg() + ), +) +def cat_addmm(match, inputs, dim): + def shape_of(bias, a, b): + m, _ = a.get_size() + _, n = b.get_size() + return [m, n] + + return cat_tuned_op(match, inputs, dim, op=L[aten.addmm], shape_of=shape_of) + + +def cat_tuned_op(match, inputs, dim, *, op, shape_of): + """ + Memory planning to remove cat. We can't use the stock memory + planner since autotuning matmuls needs to know the output layout. + """ + if len(inputs) == 1: + return op(*inputs[0]) + + # TODO(jansel): rewrite this as a bmm? + if dim < 0: + dim += len(shape_of(*inputs[0])) + assert dim in (0, 1) + notdim = 1 - dim + + new_size: Optional[Union[List[Expr], List[int]]] = None + offsets_start = [] + offsets_end = [] + + # compute output sizes + for i in range(len(inputs)): + shape = shape_of(*inputs[i]) + if new_size is None: + new_size = shape + else: + new_size[notdim] = V.graph.sizevars.guard_equals( + shape[notdim], new_size[notdim] + ) + new_size[dim] += shape[dim] + offsets_start.append(new_size[dim] - shape[dim]) + offsets_end.append(new_size[dim]) + + assert new_size is not None + dtype = functools.reduce( + torch.promote_types, [x.get_dtype() for x in itertools.chain(*inputs)] + ) + device = inputs[0][0].get_device() + kernel = ir.ConcatKernel( + name=None, + layout=ir.FixedLayout(device, dtype, new_size), + inputs=[], + ) + kernel_tensor = ir.TensorBox.create(kernel) + + for i in range(len(inputs)): + dst = ir.SliceView.create(kernel_tensor, dim, offsets_start[i], offsets_end[i]) + src = op(*inputs[i], layout=dst.get_layout()).data.data + assert isinstance(src, (ir.ExternKernelOut, ir.TemplateBuffer)) + src.layout = ir.AliasedLayout(dst) + kernel.inputs.append(src) + + kernel.name = V.graph.register_buffer(kernel) + kernel.inputs = ir.ConcatKernel.unwrap_storage(kernel.inputs) + return kernel_tensor + + +_cat_1 = CallFunction(aten.cat, Arg(), 1, _users=2) + + +@register_lowering_pattern( + CallFunction( + aten.cat, + [ + _cat_1, + CallFunction( + aten.slice, + CallFunction(aten.slice, _cat_1, 0, 0, 9223372036854775807), + 1, + 0, + KeywordArg("size"), + ), + ], + 1, + ) +) +def cat_slice_cat(match, cat_input, size, dim=1): + """ + This is an example of a more complex pattern where cat_1 is used + multiple times inside the pattern. We fold 2 calls to cat into one. + + Matches: + cat_1: f32[1024, 4077] = torch.ops.aten.cat.default([add_26, primals_217], 1) + slice_1: f32[1024, 4077] = torch.ops.aten.slice.Tensor(cat_1, 0, 0, 9223372036854775807) + slice_2: f32[1024, 19] = torch.ops.aten.slice.Tensor(slice_1, 1, 0, 19) + cat_2: f32[1024, 4096] = torch.ops.aten.cat.default([cat_1, slice_2], 1) + + + Rewrite to: + slice_2 = torch.ops.aten.slice.Tensor(add_26, 1, 0, 19) + cat_2 = torch.ops.aten.cat.default([add_26, primals_217, slice2], 1) + """ + first, *rest = cat_input + # Optimization is optional, because we can just not fold the cat + # size should be within first.get_size()[dim] such that the optimization is valid. + # For negative `end`, we currently fallback to not optimizing. + if size >= 0 and V.graph.sizevars.statically_known_leq(size, first.get_size()[dim]): + # fold 2 cats into 1 cat + return L[aten.cat]( + [ + first, + *rest, + L[aten.slice](first, dim, 0, size), + ], + dim, + ) + else: + # don't expect to hit this case, just fall back + tmp = L[aten.cat](cat_input, dim) + return L[aten.cat]( + [ + tmp, + L[aten.slice](tmp, dim, 0, size), + ], + dim, + ) + + +@register_lowering_pattern( + CallFunction( + aten.add, + CallFunction(aten.mm, Arg(), Arg()), + KeywordArg("inp"), + ), + pass_number=2, +) +@register_lowering_pattern( + CallFunction( + aten.add, + KeywordArg("inp"), + CallFunction(aten.mm, Arg(), Arg()), + ), + pass_number=2, +) +def addmm(match, mat1, mat2, inp): + if isinstance(inp, ir.TensorBox): + inp_shape = inp.get_size() + matched = len(inp_shape) <= 2 + mm_shape = shape_of_mm(mat1, mat2) + for i, m in zip(inp_shape, mm_shape): + matched &= i == 1 or i == m + else: # inp is a Number + matched = False + if matched: + return L[aten.addmm](inp, mat1, mat2) + else: + return L[aten.add](inp, L[aten.mm](mat1, mat2)) + + +def is_valid_splitwithsizes_cat(match): + split_nodes = filter_nodes(match.nodes, aten.split_with_sizes) + cat_nodes = filter_nodes(match.nodes, aten.cat) + get_item_nodes = filter_nodes(match.nodes, operator.getitem) + if len(split_nodes) != 1 or len(cat_nodes) != 1: + return False + split_node, cat_node = split_nodes[0], cat_nodes[0] + # The dim of split and cat should match for passthrough + if get_arg_value(split_node, 2, "dim") != get_arg_value(cat_node, 1, "dim"): + return False + get_item_args = { + get_arg_value(get_item_node, 1) for get_item_node in get_item_nodes + } + assert None not in get_item_args + split_sizes = get_arg_value(split_node, 1, "split_sizes") + # All parts of split should be included in the cat + if get_item_args != set(range(len(split_sizes))): + return False + # The order of get_item_args should same with cat_node used. + # For example, if the split_node like split_with_sizes(input, [2, 2, 3], 1), + # the cat node should be like cat([get_item(0), get_item(1), get_item(2)], 1). + cat_items_args_order = [ + get_arg_value(item_node, 1) for item_node in get_arg_value(cat_node, 0) + ] + if cat_items_args_order != list(range(len(split_sizes))): + return False + + return True + + +@register_lowering_pattern( + CallFunction( + aten.cat, + ListOf( + CallFunction( + operator.getitem, + CallFunction( + aten.split_with_sizes, + KeywordArg("input_"), + Ignored(), + Ignored(), + _users=MULTIPLE, + ), + Ignored(), + ), + ), + Ignored(), + ), + pass_number=2, + extra_check=is_valid_splitwithsizes_cat, +) +def splitwithsizes_cat_replace(match, input_): + return input_ + + +def is_valid_cat_splitwithsizes(match): + cat_nodes = filter_nodes(match.nodes, aten.cat) + split_nodes = filter_nodes(match.nodes, aten.split_with_sizes) + if len(split_nodes) != 1 or len(cat_nodes) != 1: + return False + split_node, cat_node = split_nodes[0], cat_nodes[0] + + # the cat node has other users: can't eliminate + if len(cat_node.users) > 1: + return False + + # the dim of the cat and split should match + dim = get_arg_value(split_node, 2, "dim") + if dim != get_arg_value(cat_node, 1, "dim"): + return False + + cat_inputs = list(get_arg_value(cat_node, 0)) + split_sizes = get_arg_value(split_node, 1, "split_sizes") + # the number of input tensors in cat and the + # length of the split sizes should match + if len(cat_inputs) != len(split_sizes): + return False + + for cat_input, split_size in zip(cat_inputs, split_sizes): + # each cat input tensor's size along dim + # should match the corresponding split size + if "val" not in cat_input.meta: + return False + cat_input_size = cat_input.meta["val"].size(dim) + if cat_input_size != split_size: + return False + + return True + + +@register_lowering_pattern( + CallFunction( + aten.split_with_sizes, + CallFunction( + aten.cat, + KeywordArg("input_"), + Ignored(), + _users=MULTIPLE, + ), + Ignored(), + Ignored(), + ), + pass_number=2, + extra_check=is_valid_cat_splitwithsizes, +) +def cat_splitwithsizes_replace(match, input_): + return input_ + + +def view_to_reshape(gm): + """ + Replace view ops in the GraphModule to reshape ops. + """ + for nd in gm.graph.nodes: + if nd.target == torch.ops.aten.view.default: + nd.target = torch.ops.aten.reshape.default + + +def is_pointwise_use(use): + if not use.op == "call_function": + return False + + if not ( + isinstance(use.target, torch._ops.OpOverload) or use.target is operator.getitem + ): + return False + + if use.target is operator.getitem or use.target.is_view: + return all(is_pointwise_use(u) for u in use.users) + + return torch.Tag.pointwise in use.target.tags + + +@register_graph_pattern( + CallFunction(aten.addmm, Arg(), Arg(), Arg()), + pass_dict=pass_patterns[2], +) +def unfuse_bias_add_to_pointwise(match: Match, inp, mat1, mat2): + if not inp.meta["val"].is_cuda: + return + + output = match.output_node() + if not all(is_pointwise_use(use) for use in output.users): + return + + def repl(inp, x1, x2): + return x1 @ x2 + inp + + with V.fake_mode: + match.replace_by_example(repl, [inp, mat1, mat2]) diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/pre_grad.py b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/pre_grad.py new file mode 100644 index 0000000000000000000000000000000000000000..21e287ec06b6bcb1c6040a588ba02a5f1f1c7200 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/pre_grad.py @@ -0,0 +1,460 @@ +import copy +import logging +from typing import List, Optional + +import torch +import torch.nn as nn +from torch._dynamo.utils import detect_fake_mode +from torch.fx.experimental.optimization import ( + matches_module_pattern, + replace_node_module, +) +from torch.fx.passes.shape_prop import ShapeProp +from torch.nn import functional as F +from torch.nn.utils.fusion import fuse_conv_bn_eval, fuse_conv_bn_weights + +from .. import config + +from ..fx_utils import matches_module_function_pattern +from ..pattern_matcher import ( + init_once_fakemode, + PatternMatcherPass, + stable_topological_sort, +) +from ..utils import is_cpu_device +from .group_batch_fusion import group_batch_fusion_pre_grad_passes + +log = logging.getLogger(__name__) + +normalization_pass = PatternMatcherPass(prevent_match_across_mutations=True) +merge_splits_pass = PatternMatcherPass(prevent_match_across_mutations=True) +split_cat_pass = PatternMatcherPass(prevent_match_across_mutations=True) +unbind_stack_pass = PatternMatcherPass(prevent_match_across_mutations=True) + +pattern_matcher_passes: List[PatternMatcherPass] = [ + normalization_pass, + merge_splits_pass, + split_cat_pass, + unbind_stack_pass, +] + + +@init_once_fakemode +def lazy_init(): + from . import split_cat # noqa: F401 + + if config.is_fbcode(): + from .fb import split_cat as split_cat_fb # noqa: F401 + + +def pre_grad_passes(gm, example_inputs): + """ + Apply passes on the input FX graph using Torch IR. + + WARNING: + The IR before grad is not functional or normalized, so it is harder + to write passes on this IR. Passes must be safe with respect to + aliasing and mutation and need to handle all possible arg schemas. + + Consider adding a new pass to post_grad.py or joint_graph.py which + are after functionalization and normalization. + """ + + if config.pattern_matcher: + lazy_init() + gm = fuse_fx(gm, example_inputs) + group_batch_fusion_pre_grad_passes(gm.graph) + for pattern_matcher_pass in pattern_matcher_passes: + pattern_matcher_pass.apply(gm.graph) + + stable_topological_sort(gm.graph) + gm.graph.lint() + gm.recompile() + + return gm + + +def fuse_fx(gm: torch.fx.GraphModule, example_inputs): + is_cpu = is_cpu_device(example_inputs) + + fake_mode = detect_fake_mode(example_inputs) + + gm = sink_cat_after_pointwise(gm) + if config.permute_fusion and not is_cpu: + # For linear permute fusion, we need to check input info to identify + # and perform proper permutation/transpose + ShapeProp(gm, fake_mode=fake_mode).propagate(*example_inputs) + gm = linear_permute_fusion(gm) + gm = permute_linear_fusion(gm) + gm = permute_matmul_fusion(gm) + + # make sure the autograd is disabled. + if torch.is_grad_enabled(): + return gm + if not is_cpu: + return gm + gm = remove_identity(gm) + gm = fuse_conv_bn(gm) + return gm + + +def fetch_attr(target: str, mod): + target_atoms = target.split(".") + attr_itr = mod + for i, atom in enumerate(target_atoms): + if not hasattr(attr_itr, atom): + raise RuntimeError( + f"Node referenced nonexistant target {'.'.join(target_atoms[:i])}" + ) + attr_itr = getattr(attr_itr, atom) + return attr_itr + + +def remove_identity(gm: torch.fx.GraphModule): + """ + Removes all identity layers from the module. + """ + + class IdentityRemover(torch.fx.Transformer): + def call_module(self, target, args, kwargs): + if isinstance(self.submodules[target], nn.Identity): + assert len(args) == 1 + return args[0] + else: + return super().call_module(target, args, kwargs) + + return IdentityRemover(gm).transform() + + +def fuse_conv_bn(gm: torch.fx.GraphModule, inplace=False): + """ + Fuses Convolution/BN layers for inference purposes. + """ + modules_patterns = [ + (torch.nn.Conv1d, torch.nn.BatchNorm1d), + (torch.nn.Conv2d, torch.nn.BatchNorm2d), + (torch.nn.Conv3d, torch.nn.BatchNorm3d), + ] + module_function_patterns = [ + (torch.nn.Conv1d, F.batch_norm), + (torch.nn.Conv2d, F.batch_norm), + (torch.nn.Conv3d, F.batch_norm), + ] + modules = dict(gm.named_modules()) + for pattern in modules_patterns: + for node in gm.graph.nodes: + if matches_module_pattern(pattern, node, modules): + if len(node.args[0].users) > 1: # Output of conv is used by other nodes + continue + conv = modules[node.args[0].target] + bn = modules[node.target] + eval_mode = all(not n.training for n in [conv, bn]) + if not eval_mode: + continue + if not bn.track_running_stats: + continue + fused_conv = fuse_conv_bn_eval(conv, bn) + replace_node_module(node.args[0], modules, fused_conv) + node.replace_all_uses_with(node.args[0]) + gm.graph.erase_node(node) + gm.graph.lint() + for pattern in module_function_patterns: + for node in gm.graph.nodes: + if matches_module_function_pattern(pattern, node, modules): + # TODO: support kwargs. + if len(node.args) != 8: + continue + conv = modules[node.args[0].target] + bn_training = node.args[5] + bn_eps = node.args[7] + if conv.training or bn_training: + continue + if type(bn_eps) is not float: + continue + bn_args_is_constant = all( + n.op == "get_attr" and len(n.users) == 1 for n in node.args[1:5] + ) + if not bn_args_is_constant: + continue + bn_running_mean = fetch_attr(node.args[1].target, gm) + bn_running_var = fetch_attr(node.args[2].target, gm) + bn_weight = fetch_attr(node.args[3].target, gm) + bn_bias = fetch_attr(node.args[4].target, gm) + if bn_running_mean is None or bn_running_var is None: + continue + fused_conv = copy.deepcopy(conv) + fused_conv.weight, fused_conv.bias = fuse_conv_bn_weights( + fused_conv.weight, + fused_conv.bias, + bn_running_mean, + bn_running_var, + bn_eps, + bn_weight, + bn_bias, + ) + replace_node_module(node.args[0], modules, fused_conv) + node.replace_all_uses_with(node.args[0]) + gm.graph.erase_node(node) + gm.graph.lint() + gm.recompile() + + return gm + + +class NormalizedLinearNode: + def __init__(self, node: torch.fx.Node) -> None: + assert node.op == "call_function" + assert node.target in [torch.nn.functional.linear] + self.node: torch.fx.Node = node + + def get_input(self) -> torch.fx.Node: + if len(self.node.args) > 0: + return self.node.args[0] + else: + return self.node.kwargs["input"] + + def get_weight(self) -> torch.fx.Node: + if len(self.node.args) > 1: + return self.node.args[1] + else: + return self.node.kwargs["weight"] + + def get_bias(self) -> torch.fx.Node: + if len(self.node.args) > 2: + return self.node.args[2] + else: + return self.node.kwargs["bias"] if "bias" in self.node.kwargs else None + + +class NormalizedMatmulNode: + def __init__(self, node: torch.fx.Node) -> None: + assert node.op == "call_function" + assert node.target in [torch.bmm, torch.matmul] + self.node: torch.fx.Node = node + + def get_input(self) -> torch.fx.Node: + if len(self.node.args) > 0: + return self.node.args[0] + else: + return self.node.kwargs["input"] + + def get_other(self) -> torch.fx.Node: + if len(self.node.args) > 1: + return self.node.args[1] + else: + return self.node.kwargs["other"] + + +def check_permute(node: torch.fx.Node): + ranks = len(node.meta["tensor_meta"].shape) + if len(node.args) > 3: + permutation = [node.args[i] % ranks for i in range(1, ranks + 1)] + elif ( + "permutation" in node.kwargs + and node.kwargs["permutation"] is not None + and len(node.kwargs["permutation"]) > 2 + ): + permutation = [i % ranks for i in node.kwargs["permutation"]] + else: + return False + allowed_permutation = list(range(ranks)) + allowed_permutation[-1] = ranks - 2 + allowed_permutation[-2] = ranks - 1 + return permutation == allowed_permutation + + +def sink_cat_after_pointwise(module: torch.fx.GraphModule) -> torch.fx.GraphModule: + def one_user(node): + users = list(node.users) + return users[0] if len(users) == 1 else None + + def is_view(node): + view = {"view"} + return node.op == "call_method" and node.target in view + + def is_pointwise_unary(node): + pointwise = {torch.relu, torch.tanh, "relu", "tanh"} + return node.op in {"call_function", "call_method"} and node.target in pointwise + + g = module.graph + for node in g.nodes: + if node.op != "call_function" or node.target != torch.cat: + continue + + cat_or_view = node + while True: + user = one_user(cat_or_view) + if not user or not is_view(user): + break + cat_or_view = user + + if user and is_pointwise_unary(user): + with g.inserting_before(node): + + def cat_args(tensors, dim=0): + return tensors, dim + + tensors, dim = cat_args(*node.args, **node.kwargs) + new_tensors = [ + g.create_node(user.op, user.target, args=(arg,), kwargs=user.kwargs) + for arg in tensors + ] + new_cat = g.create_node( + "call_function", torch.cat, args=(new_tensors, dim) + ) + user.replace_all_uses_with(cat_or_view) + node.replace_all_uses_with(new_cat) + g.erase_node(user) + g.erase_node(node) + g.lint() + module.recompile() + return module + + +def linear_permute_fusion(module: torch.fx.GraphModule) -> torch.fx.GraphModule: + for node in module.graph.nodes: + if ( + node.op == "call_method" + and node.target == "permute" + and check_permute(node) + ): + if len(node.args) > 0: + input_node = node.args[0] + else: + input_node = node.kwargs["input"] + if ( + input_node.op == "call_function" + and input_node.target == torch.nn.functional.linear + ): + normalized = NormalizedLinearNode(input_node) + input = normalized.get_input() + weight = normalized.get_weight() + bias = normalized.get_bias() + with module.graph.inserting_before(node): + fused_node = module.graph.call_function( + linear_transpose, args=(input, weight, bias) + ) + node.replace_all_uses_with(fused_node) + module.graph.erase_node(node) + if len(input_node.users) == 0: + module.graph.erase_node(input_node) + + module.graph.lint() + module.recompile() + return module + + +# Y1 = X * W^T + bias +# Y2 = Y1.permute(0, 2, 1) +# ----> +# Y2 = (W * X^T + bias.unsqueeze(-1))^T +def linear_transpose( + input: torch.Tensor, weight: torch.Tensor, bias: Optional[torch.Tensor] +) -> torch.Tensor: + if bias is None: + return torch.matmul(weight, input.transpose(-1, -2)) + return torch.matmul(weight, input.transpose(-1, -2)) + bias.unsqueeze(-1) + + +def permute_linear_fusion(module: torch.fx.GraphModule) -> torch.fx.GraphModule: + for node in module.graph.nodes: + if node.op == "call_function" and node.target == torch.nn.functional.linear: + if len(node.args) > 0: + input_node = node.args[0] + else: + input_node = node.kwargs["input"] + if ( + input_node.op == "call_method" + and input_node.target == "permute" + and check_permute(input_node) + ): + normalized = NormalizedLinearNode(node) + if len(input_node.args) > 0: + input = input_node.args[0] + else: + input = input_node.kwargs["input"] + weight = normalized.get_weight() + bias = normalized.get_bias() + with module.graph.inserting_before(node): + fused_node = module.graph.call_function( + transpose_linear, args=(input, weight, bias) + ) + node.replace_all_uses_with(fused_node) + module.graph.erase_node(node) + if len(input_node.users) == 0: + module.graph.erase_node(input_node) + + module.graph.lint() + module.recompile() + return module + + +def permute_matmul_fusion(module: torch.fx.GraphModule) -> torch.fx.GraphModule: + for node in module.graph.nodes: + if node.op == "call_function" and ( + node.target == torch.bmm or node.target == torch.matmul + ): + normalized = NormalizedMatmulNode(node) + input_A_node = normalized.get_input() + input_B_node = normalized.get_other() + input_A = input_A_node + input_B = input_B_node + Atrans = Btrans = False + if ( + input_A_node.op == "call_method" + and input_A_node.target == "permute" + and check_permute(input_A_node) + ): + Atrans = True + if len(input_A_node.args) > 0: + input_A = input_A_node.args[0] + else: + input_A = input_A_node.kwargs["input"] + + if ( + input_B_node.op == "call_method" + and input_B_node.target == "permute" + and check_permute(input_B_node) + ): + Btrans = True + if len(input_B_node.args) > 0: + input_B = input_B_node.args[0] + else: + input_B = input_B_node.kwargs["input"] + + if Atrans or Btrans: + with module.graph.inserting_before(node): + fused_node = module.graph.call_function( + transpose_matmul, + args=(input_A, input_B, Atrans, Btrans), + ) + node.replace_all_uses_with(fused_node) + module.graph.erase_node(node) + if Atrans and len(input_A_node.users) == 0: + module.graph.erase_node(input_A_node) + if Btrans and len(input_B_node.users) == 0: + module.graph.erase_node(input_B_node) + + module.graph.lint() + module.recompile() + return module + + +# X1 = X.permute(0, 2, 1) +# Y1 = X1 * W1^T + bias1 +# ----> +# Y2 = X1.transpose(-1, -2) * W1^T + bias1 +def transpose_linear( + input: torch.Tensor, weight: torch.Tensor, bias: Optional[torch.Tensor] +) -> torch.Tensor: + if bias is None: + return torch.matmul(input.transpose(-1, -2), weight.t()) + return torch.matmul(input.transpose(-1, -2), weight.t()) + bias + + +def transpose_matmul(A: torch.Tensor, B: torch.Tensor, Atrans: bool, Btrans: bool): + if Atrans: + A = A.transpose(-1, -2) + if Btrans: + B = B.transpose(-1, -2) + return torch.matmul(A, B) diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/quantization.py b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/quantization.py new file mode 100644 index 0000000000000000000000000000000000000000..c730302515effb21f4bf15af29b2e34c18c50398 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/quantization.py @@ -0,0 +1,1020 @@ +import copy +import functools +import math +import operator + +import torch +from ..lowering import lowerings as L, require_channels_last +from ..pattern_matcher import Arg, CallFunction, filter_nodes, KeywordArg, ListOf, Match +from ..utils import pad_listlike +from .freezing_patterns import register_freezing_graph_pattern +from .post_grad import register_lowering_pattern + +aten = torch.ops.aten +prims = torch.ops.prims +quantized_decomposed = torch.ops.quantized_decomposed +quantized = torch.ops.quantized + +""" +dequantize activation: + x = x.to(fp32) + x = x - zero_point + x = x * scale +""" +dequantize_per_tensor_activation_pattern = CallFunction( + aten.mul.Tensor, + CallFunction( + aten.sub.Tensor, + CallFunction( + prims.convert_element_type.default, + KeywordArg("x"), + KeywordArg("x_dq_dtype"), + ), + KeywordArg("x_zp"), + ), + KeywordArg("x_scale"), +) + +dequantize_per_channel_weight_pattern = CallFunction( + quantized_decomposed.dequantize_per_channel.default, + KeywordArg("q_weight"), + KeywordArg("w_scale"), + KeywordArg("w_zp"), + KeywordArg("w_axis"), + KeywordArg("w_quant_min"), + KeywordArg("w_quant_max"), + KeywordArg("w_dtype"), +) + +dequantize_per_channel_clone_weight_pattern = CallFunction( + aten.clone.default, + dequantize_per_channel_weight_pattern, + memory_format=KeywordArg("memory_format"), +) + +dequantize_qconv_pt2e_pattern = CallFunction( + torch.ops.onednn.qconv2d_pointwise.default, + KeywordArg("x"), + KeywordArg("x_scale"), # x_scale + KeywordArg("x_zp"), # x_zp + KeywordArg("packed_weight"), # packed_weight + KeywordArg("w_scale"), # w_scale + KeywordArg("w_zp"), # w_zp + KeywordArg("b"), # bias + KeywordArg("stride"), + KeywordArg("padding"), + KeywordArg("dilation"), + KeywordArg("groups"), + KeywordArg("inv_output_scale"), # inv_output_scale = 1.0 + KeywordArg("output_zero_point"), # output_zero_point = 0 + KeywordArg("fp32_output"), # fp32_output = True + KeywordArg("attr"), # attr = "none" + Arg(), # scalars + Arg(), # algorithm +) + +qlinear_pt2e_pattern = CallFunction( + torch.ops.onednn.qlinear_pointwise.default, + KeywordArg("x"), + KeywordArg("x_scale"), + KeywordArg("x_zp"), + KeywordArg("packed_weight"), + KeywordArg("w_scale"), + KeywordArg("w_zp"), + KeywordArg("b"), + KeywordArg("output_scale"), + KeywordArg("output_zero_point"), + KeywordArg("fp32_output"), + KeywordArg("postop_name"), + KeywordArg("postop_args"), + KeywordArg("postop_algorithm"), +) + +dequantize_accum_pattern = CallFunction( + aten.mul.Tensor, + CallFunction( + aten.sub.Tensor, + CallFunction( + prims.convert_element_type.default, + KeywordArg("accum"), + KeywordArg("accum_dq_dtype"), + ), + KeywordArg("accum_zp"), + ), + KeywordArg("accum_scale"), +) + + +def generate_pattern_with_binary(binary_post_op, computation_call, extra_input_pattern): + return CallFunction( + binary_post_op, + computation_call, + extra_input_pattern, + ) + + +def generate_pattern_with_unary(computation_call, unary_post_op): + if unary_post_op is not None: + return CallFunction( + unary_post_op, + computation_call, + ) + return computation_call + + +def generate_pattern_with_output_quant(computation_call): + """ + quantize output: + output = round(output * o_inv_scale) + output = output + zero_point + output = clamp_min(output, 0) + output = clamp_max(output, 127) + output = output.to(uint8) + """ + quantized_op_output_pattern_pt2e = CallFunction( + prims.convert_element_type.default, + CallFunction( + aten.clamp_max.default, + CallFunction( + aten.clamp_min.default, + CallFunction( + aten.add.Tensor, + CallFunction( + aten.round.default, + CallFunction( + aten.mul.Tensor, + computation_call, + KeywordArg("o_inv_scale"), + ), + ), + KeywordArg("o_zp"), + ), + KeywordArg("o_qmin"), + ), + KeywordArg("o_qmax"), + ), + KeywordArg("o_dtype"), + ) + return quantized_op_output_pattern_pt2e + + +def _register_quantized_conv_lowering( + pattern, + pass_number, + computation_op, + fp32_output, + unary_attr, +): + @register_lowering_pattern(pattern, pass_number=pass_number) + def qconv(match: Match, *args, **kwargs): + # Activation QParams + x, x_scale, x_zp = ( + kwargs["x"], + kwargs["x_scale"], + kwargs["x_zp"], + ) + # Weight QParams + packed_weight, w_scale, w_zp = ( + kwargs["packed_weight"], + kwargs["w_scale"], + kwargs["w_zp"], + ) + # Conv Params + b, stride, padding, dilation, groups = ( + kwargs["b"], + kwargs["stride"], + kwargs["padding"], + kwargs["dilation"], + kwargs["groups"], + ) + # Output QParams + o_inv_scale, o_zero_point = ( + kwargs["o_inv_scale"], + kwargs["o_zp"], + ) + assert ( + kwargs["fp32_output"] is True + ) # Expected int8-in fp32-out qconv in weight prepack phase + assert ( + kwargs["attr"] == "none" + ) # Expected no post op fused in weight prepack phase + computation_args = ( + x, + x_scale, + x_zp, + packed_weight, + w_scale, + w_zp, + b, + stride, + padding, + dilation, + groups, + o_inv_scale, + o_zero_point, + fp32_output, + unary_attr.op_name, + unary_attr.scalars_attr, + unary_attr.algorithm_attr, + ) + return L[computation_op](*computation_args) + + return qconv + + +def _register_quantized_linear_lowering( + pattern, + pass_number, + computation_op, + fp32_output, + unary_attr, +): + @register_lowering_pattern(pattern, pass_number=pass_number) + def qlinear(match: Match, *args, **kwargs): + # Activation QParams + x, x_scale, x_zp = ( + kwargs["x"], + kwargs["x_scale"], + kwargs["x_zp"], + ) + # Weight QParams + packed_weight, w_scale, w_zp = ( + kwargs["packed_weight"], + kwargs["w_scale"], + kwargs["w_zp"], + ) + + # bias + b = kwargs["b"] if "b" in kwargs else None + + # Output QParams + o_inv_scale, o_zero_point = ( + kwargs["o_inv_scale"], + kwargs["o_zp"], + ) + assert ( + kwargs["fp32_output"] is True + ) # Expected int8-in fp32-out qlinear in weight prepack phase + assert ( + kwargs["postop_name"] == "none" + ) # Expected no post op fused in weight prepack phase + + computation_args = ( + x, + x_scale, + x_zp, + packed_weight, + w_scale, + w_zp, + b, + o_inv_scale, + o_zero_point, + fp32_output, + unary_attr.op_name, + unary_attr.scalars_attr, + unary_attr.algorithm_attr, + ) + return L[computation_op](*computation_args) + + return qlinear + + +def _register_quantized_conv_binary_lowering( + pattern, + pass_number, + computation_op, + fp32_output, + binary_unary_attr, +): + @register_lowering_pattern(pattern, pass_number=pass_number) + def qconv_binary(match: Match, *args, **kwargs): + x, x_scale, x_zp = kwargs["x"], kwargs["x_scale"], kwargs["x_zp"] + accum, accum_scale, accum_zp = ( + kwargs["accum"], + kwargs["accum_scale"], + kwargs["accum_zp"], + ) + packed_weight, w_scale, w_zp = ( + kwargs["packed_weight"], + kwargs["w_scale"], + kwargs["w_zp"], + ) + b, stride, padding, dilation, groups = ( + kwargs["b"], + kwargs["stride"], + kwargs["padding"], + kwargs["dilation"], + kwargs["groups"], + ) + o_inv_scale, o_zero_point = ( + kwargs["o_inv_scale"], + kwargs["o_zp"], + ) + + computation_args = ( + x, + x_scale, + x_zp, + accum, + accum_scale, + accum_zp, + packed_weight, + w_scale, + w_zp, + b, + stride, + padding, + dilation, + groups, + o_inv_scale, + o_zero_point, + fp32_output, + binary_unary_attr.binary_op_name, + binary_unary_attr.alpha, + binary_unary_attr.unary_op_name, + binary_unary_attr.scalars_attr, + binary_unary_attr.algorithm_attr, + ) + return L[computation_op](*computation_args) + + return qconv_binary + + +def _register_quantization_unary_fusion(): + class UnaryAttr: + def __init__(self, op_name: str, scalars_attr=None, algorithm_attr=None): + self.op_name = op_name + self.scalars_attr = scalars_attr if scalars_attr else [] + self.algorithm_attr = algorithm_attr if algorithm_attr else "" + + conv_unary_replace_patterns = { + UnaryAttr("none", [], ""): generate_pattern_with_output_quant( + dequantize_qconv_pt2e_pattern + ), + UnaryAttr("relu", [], ""): generate_pattern_with_output_quant( + generate_pattern_with_unary( + dequantize_qconv_pt2e_pattern, aten.relu.default + ) + ), + } + + for unary_attr, patterns in conv_unary_replace_patterns.items(): + # Register qconv2d pattern for ExternKernel Lowering + _register_quantized_conv_lowering( + patterns, + 1 if unary_attr.op_name != "none" else 2, # pass_number + torch.ops.onednn.qconv2d_pointwise, # computation_op + False, # fp32_output + unary_attr, # unary_attr + ) + + linear_unary_replace_patterns = { + UnaryAttr("none", [], ""): generate_pattern_with_output_quant( + qlinear_pt2e_pattern + ), + UnaryAttr("relu", [], ""): generate_pattern_with_output_quant( + generate_pattern_with_unary(qlinear_pt2e_pattern, aten.relu.default) + ), + } + + for unary_attr, patterns in linear_unary_replace_patterns.items(): + _register_quantized_linear_lowering( + patterns, + 1 if unary_attr.op_name != "none" else 2, # pass_number + torch.ops.onednn.qlinear_pointwise, # computation_op + False, # fp32_output + unary_attr, # unary_attr + ) + + +def _register_quantization_binary_fusion(): + class BinaryUnaryAttr: + def __init__( + self, + binary_op_name: str, + alpha=None, + unary_op_name: str = "none", + scalars_attr=None, + algorithm_attr=None, + ): + self.binary_op_name = binary_op_name + self.alpha = alpha if alpha else 1.0 + self.unary_op_name = unary_op_name + self.scalars_attr = scalars_attr if scalars_attr else [] + self.algorithm_attr = algorithm_attr if algorithm_attr else "" + + binary_replace_patterns = { + BinaryUnaryAttr("add", 1.0, "none", [], ""): generate_pattern_with_output_quant( + generate_pattern_with_binary( + aten.add.Tensor, + dequantize_qconv_pt2e_pattern, + dequantize_accum_pattern, + ) + ), + BinaryUnaryAttr("add", 1.0, "relu", [], ""): generate_pattern_with_output_quant( + generate_pattern_with_unary( + generate_pattern_with_binary( + aten.add.Tensor, + dequantize_qconv_pt2e_pattern, + dequantize_accum_pattern, + ), + aten.relu.default, + ) + ), + } + + for binary_unary_attr, patterns in binary_replace_patterns.items(): + # Register qconv2d_binary_unary pattern for ExternKernel Lowering + _register_quantized_conv_binary_lowering( + patterns, + 0 if binary_unary_attr.unary_op_name != "none" else 1, # pass_number + torch.ops.onednn.qconv2d_pointwise.binary, # computation_op + False, # fp32_output + binary_unary_attr, # binary_unary_attr + ) + + +def _is_valid_quantized_maxpool2d_optimization_pattern(): + def fn(match): + # Only match the pattern which max_pool2d_with_indices returns value + # instead of indices. + get_item_node = filter_nodes(match.nodes, operator.getitem)[0] + return get_item_node.args[1] == 0 + + return fn + + +def _register_quantized_maxpool2d_lowering( + pattern, + computation_op, +): + @register_lowering_pattern( + pattern, + extra_check=_is_valid_quantized_maxpool2d_optimization_pattern(), + ) + def qmaxpool2d(match: Match, *args, **kwargs): + x = kwargs["x"] + kernel_size = kwargs["kernel_size"] + stride = kwargs["stride"] if ("stride" in kwargs) else None + padding = kwargs["padding"] if ("padding" in kwargs) else 0 + dilation = kwargs["dilation"] if ("dilation" in kwargs) else 1 + ceil_mode = kwargs["ceil_mode"] if ("ceil_mode" in kwargs) else False + + if padding == 0: + padding = [0, 0] + if dilation == 1: + dilation = [1, 1] + if not stride: + stride = kernel_size + kernel_size = pad_listlike(kernel_size, 2) + stride = pad_listlike(stride, 2) + padding = pad_listlike(padding, 2) + dilation = pad_listlike(dilation, 2) + + assert len(kernel_size) == 2 + assert len(stride) == 2 + assert len(padding) == 2 + assert len(dilation) == 2 + + computation_args = ( + x, + kernel_size, + stride, + padding, + dilation, + ceil_mode, + ) + computation_args, _ = require_channels_last(computation_op, *computation_args) + return L[computation_op](*computation_args) + + return qmaxpool2d + + +def _register_quantization_maxpool2d(): + # Currently, the default parameters are not in FX Graph generated by Dynamo export. + # So, if user defines nn.MaxPool2d with different assignment of default parameter, + # it will generate graph with different number of input nodes and hence + # different pattern to be matched. + # Refer to the issue: https://github.com/pytorch/pytorch/issues/105901 + max_pool2d_args_list = [ + [ + KeywordArg("stride"), + ], + [ + KeywordArg("stride"), + KeywordArg("padding"), + ], + [ + KeywordArg("stride"), + KeywordArg("padding"), + KeywordArg("dilation"), + ], + [ + KeywordArg("stride"), + KeywordArg("padding"), + KeywordArg("dilation"), + KeywordArg("ceil_mode"), + ], + ] + + for max_pool2d_args in max_pool2d_args_list: + dequantize_maxpool2d_pattern = CallFunction( + aten.max_pool2d_with_indices.default, + dequantize_per_tensor_activation_pattern, + KeywordArg("kernel_size"), + *max_pool2d_args, + ) + dequantize_maxpool2d_get_item_pattern = CallFunction( + operator.getitem, + dequantize_maxpool2d_pattern, + Arg(), + ) + _register_quantized_maxpool2d_lowering( + generate_pattern_with_output_quant(dequantize_maxpool2d_get_item_pattern), + quantized.max_pool2d, + ) + + +def _is_valid_quantized_cat_optimization_pattern(): + def fn(match): + # Ensure all the inputs and output has same scale and zero point + # Step 1: Check inputs/output zero point + sub_nodes = filter_nodes(match.nodes, aten.sub.Tensor) + zero_points = [node.args[1] for node in sub_nodes] + add_nodes = filter_nodes(match.nodes, aten.add.Tensor) + assert len(add_nodes) == 1, "expect only 1 add node at output quant pattern" + zero_points.append(add_nodes[0].args[1]) + if not all(zero_point == zero_points[0] for zero_point in zero_points): + return False + + # Step 2: Check inputs/output scale + mul_nodes = filter_nodes(match.nodes, aten.mul.Tensor) + # We need to find mul node at output since the scale value is reciprocal to input scale. + # Mul node at output should connect to cat node directly. + scales = [ + ( + mul_node.args[1] + if mul_node.args[0].target is aten.cat.default + else 1.0 / mul_node.args[1] + ) + for mul_node in mul_nodes + ] + if not all(math.isclose(scale, scales[0], rel_tol=1e-5) for scale in scales): + return False + + return True + + return fn + + +def _register_quantized_cat_lowering( + pattern, + computation_op, +): + @register_lowering_pattern( + pattern, + extra_check=_is_valid_quantized_cat_optimization_pattern(), + ) + def qcat(match: Match, inputs, dim, **kwargs): + # inputs is with format: [[x1, x1_dq_dtype, x1_zp, x1_scale], ...] + uint8_inputs = [input[0] for input in inputs] + return L[computation_op](uint8_inputs, dim) + + return qcat + + +_raw_dequantize_per_tensor_activation_pattern = CallFunction( + aten.mul.Tensor, + CallFunction( + aten.sub.Tensor, + CallFunction( + prims.convert_element_type.default, + Arg(), + Arg(), + ), + Arg(), + ), + Arg(), +) + + +def _register_quantization_cat(): + dequantize_cat_pattern = CallFunction( + aten.cat.default, + ListOf(_raw_dequantize_per_tensor_activation_pattern), + KeywordArg("dim"), + ) + _register_quantized_cat_lowering( + generate_pattern_with_output_quant(dequantize_cat_pattern), + aten.cat, + ) + + +def _register_quantization_lowerings(): + _register_quantization_unary_fusion() + _register_quantization_binary_fusion() + _register_quantization_maxpool2d() + _register_quantization_cat() + + +def _is_valid_dequant_promotion_pattern(match): + mul_node = match.output_node() + sub_node = mul_node.args[0] + to_fp32_node = sub_node.args[0] + if ( + mul_node.target is aten.mul.Tensor + and sub_node.target is aten.sub.Tensor + and to_fp32_node.target is prims.convert_element_type.default + and len(list(mul_node.users)) > 1 + ): + # dequant pattern has more than 1 users to be promoted + return True + return False + + +def _register_dequant_promotion_pass(pattern, pass_number): + @register_freezing_graph_pattern( + pattern, + extra_check=_is_valid_dequant_promotion_pattern, + pass_number=pass_number, + ) + def dequant_promotion(match: Match, *args, **kwargs): + # If dequant pattern used by multiply nodes, + # we will do dequant promotion. So each user node has a seperate dequant pattern connected. + def clone_to_new_node(graph, source_node, user_node): + assert ( + source_node.op == "call_function" + ), "clone_to_new_node only support node.op call_function" + with graph.inserting_before(user_node): + new_node = graph.call_function( + source_node.target, + args=source_node.args, + kwargs=source_node.kwargs, + ) + new_node.meta = copy.copy(source_node.meta) + user_node.replace_input_with(source_node, new_node) + return new_node + + mul_node = match.output_node() + sub_node = mul_node.args[0] + to_fp32_node = sub_node.args[0] + assert mul_node.target is aten.mul.Tensor + assert sub_node.target is aten.sub.Tensor + assert to_fp32_node.target is prims.convert_element_type.default + + graph = match.graph + user_node_list = list(mul_node.users) + for user_node in user_node_list: + # Step1: Duplicate the mul node + new_mul_node = clone_to_new_node(graph, mul_node, user_node) + # Step2: Duplicate the sub node + new_sub_node = clone_to_new_node(graph, sub_node, new_mul_node) + # Step3: Duplicate the to_fp32 node + _ = clone_to_new_node(graph, to_fp32_node, new_sub_node) + + +def _is_valid_dequant_conv2d_pattern(match): + # Here we do some further check to ensure: + # 1. It's a conv2d node with dim of 4, since we only support lowering of conv2d now. + # 2. The dequant pattern has only 1 user of conv2d node. + # If these conditions don't meet, we will not + # insert weight prepack node into the matched pattern. + conv_node = match.output_node() + assert conv_node.target is aten.convolution.default + input_meta_value = conv_node.args[0].meta.get("val") + weight_meta_value = conv_node.args[1].meta.get("val") + for meta_value in [input_meta_value, weight_meta_value]: + if ( + meta_value is None + or meta_value.device.type != "cpu" + or meta_value.dim() != 4 + ): + # Only support conv2d now + return False + + mul_node = conv_node.args[0] + sub_node = mul_node.args[0] + to_fp32_node = sub_node.args[0] + + assert to_fp32_node.target is prims.convert_element_type.default + assert sub_node.target is aten.sub.Tensor + assert mul_node.target is aten.mul.Tensor + if ( + len(list(to_fp32_node.users)) != 1 + or len(list(sub_node.users)) != 1 + or len(list(mul_node.users)) != 1 + ): + # Ensure the dequant pattern only has 1 user + # since we will delete the dequant pattern here + return False + return True + + +def _register_qconv_weight_prepack_pass(pattern, pass_number): + @register_freezing_graph_pattern( + pattern, + extra_check=_is_valid_dequant_conv2d_pattern, + pass_number=pass_number, + ) + def qconv_weight_prepack(match: Match, *args, **kwargs): + """ + Match the pattern: + int8 activation + | + dequant_per_tensor + | + Conv2d <- optional(aten.clone.default) <- dequant_per_channel <- int8_weight + + Insert weight prepack node and change the pattern to: + int8 activation + | + onednn.qconv2d_pointwise <- onednn.qconv_prepack <- int8_weight + """ + conv_node = match.output_node() + assert conv_node.target is aten.convolution.default + mul_node = conv_node.args[0] + sub_node = mul_node.args[0] + to_fp32_node = sub_node.args[0] + has_clone_to_channel_last_node_in_pattern = ( + conv_node.args[1].target is aten.clone.default + ) + clone_node = ( + conv_node.args[1] if has_clone_to_channel_last_node_in_pattern else None + ) + dequant_per_channel = ( + clone_node.args[0] + if has_clone_to_channel_last_node_in_pattern + else conv_node.args[1] + ) + assert ( + dequant_per_channel.target + is quantized_decomposed.dequantize_per_channel.default + ) + + # Activation QParams + qx, x_zp, x_scale = ( + kwargs["x"], + kwargs["x_zp"], + kwargs["x_scale"], + ) + + # Weight QParams + qw, w_scale, w_zp = ( + kwargs["q_weight"], + kwargs["w_scale"], + kwargs["w_zp"], + ) + + # Conv Params + bias, stride, padding, dilation, groups = ( + kwargs["b"], + kwargs["stride"], + kwargs["padding"], + kwargs["dilation"], + kwargs["groups"], + ) + + x_shape = qx.meta.get("tensor_meta").shape + graph = match.graph + with graph.inserting_before(conv_node): + # Insert weight prepack node and the QConv node + packed_weight_inputs = ( + qw, + w_scale, + x_scale, + x_zp, + stride, + padding, + dilation, + groups, + x_shape, + ) + packed_weight_op = torch.ops.onednn.qconv_prepack + prepack_weight_node = graph.call_function( + packed_weight_op, args=packed_weight_inputs + ) + + new_args = ( + qx, + x_scale, + x_zp, + prepack_weight_node, + w_scale, + w_zp, + bias, + stride, + padding, + dilation, + groups, + 1.0, # inv_output_scale + 0, # output_zero_point + True, # fp32_output + "none", # attr + [], # scalars + "", # algorithm + ) + new_conv_node = graph.call_function( + torch.ops.onednn.qconv2d_pointwise.default, args=new_args + ) + conv_node.replace_all_uses_with(new_conv_node) + new_conv_node.meta.update(conv_node.meta) + + # Erase the original conv node + graph.erase_node(conv_node) + # Erase the dequant pattern + graph.erase_node(mul_node) + graph.erase_node(sub_node) + graph.erase_node(to_fp32_node) + # Erase the dequant per channel pattern + if clone_node is not None: + graph.erase_node(clone_node) + graph.erase_node(dequant_per_channel) + + +def _generate_dequant_convolution_node_pattern(_dequant_per_channel_pattern): + dequant_convolution_node_pattern = CallFunction( + aten.convolution.default, + dequantize_per_tensor_activation_pattern, + _dequant_per_channel_pattern, + KeywordArg("b"), + KeywordArg("stride"), + KeywordArg("padding"), + KeywordArg("dilation"), + KeywordArg("is_transposed"), + KeywordArg("out_padding"), + KeywordArg("groups"), + ) + return dequant_convolution_node_pattern + + +def _generate_qconv_weight_prepack_patterns(): + return ( + _generate_dequant_convolution_node_pattern( + dequantize_per_channel_weight_pattern + ), + # There is another pattern due to the pass of convert_conv_weights_to_channels_last + # https://github.com/pytorch/pytorch/blob/07107919297db3f8ab37f11c12666b6d6d5f692e/torch/_inductor/freezing.py#L338-L362. + # Depend on some heuristics, it may or may not insert to(channel_last) node + # between convolution and dequant_per_channel node + _generate_dequant_convolution_node_pattern( + dequantize_per_channel_clone_weight_pattern + ), + ) + + +def _is_valid_dequant_linear_pattern(match): + # Check dequant pattern has only 1 user. + linear_node = match.output_node() + assert linear_node.target in (aten.addmm.default, aten.mm.default) + input_index = 0 if linear_node.target is aten.mm.default else 1 + mul_node = linear_node.args[input_index] + sub_node = mul_node.args[0] + to_fp32_node = sub_node.args[0] + + assert to_fp32_node.target is prims.convert_element_type.default + assert sub_node.target is aten.sub.Tensor + assert mul_node.target is aten.mul.Tensor + if ( + len(list(to_fp32_node.users)) != 1 + or len(list(sub_node.users)) != 1 + or len(list(mul_node.users)) != 1 + ): + # Ensure the dequant pattern only has 1 user + # since we will delete the dequant pattern here + return False + return True + + +def _register_qlinear_weight_prepack_pass(pattern, pass_number): + @register_freezing_graph_pattern( + pattern, + extra_check=_is_valid_dequant_linear_pattern, + pass_number=pass_number, + ) + def qlinear_weight_prepack(match: Match, *args, **kwargs): + """ + Match the pattern: + int8 activation + | + dequant_per_tensor + | + mm/addmm <- t <- dequant_per_channel <- int8_weight + + Insert weight prepack node and change the pattern to: + int8 activation + | + onednn.qlinear_pointwise <- onednn.qlinear_prepack <- int8_weight + """ + linear_node = match.output_node() + assert linear_node.target in (aten.addmm.default, aten.mm.default) + input_index = 0 if linear_node.target is aten.mm.default else 1 + weight_index = input_index + 1 + mul_node = linear_node.args[input_index] + sub_node = mul_node.args[0] + to_fp32_node = sub_node.args[0] + t_node = linear_node.args[weight_index] + dequant_per_channel = t_node.args[0] + assert ( + dequant_per_channel.target + is quantized_decomposed.dequantize_per_channel.default + ) + + # Activation QParams + qx, x_zp, x_scale = ( + kwargs["x"], + kwargs["x_zp"], + kwargs["x_scale"], + ) + + # Weight QParams + qw, w_scale, w_zp = ( + kwargs["q_weight"], + kwargs["w_scale"], + kwargs["w_zp"], + ) + + # Params + bias = kwargs["b"] if "b" in kwargs else None + + x_shape = qx.meta.get("tensor_meta").shape + graph = match.graph + with graph.inserting_before(linear_node): + # Insert weight prepack node and the qlinear node + packed_weight_inputs = ( + qw, + x_shape, + ) + packed_weight_op = torch.ops.onednn.qlinear_prepack + prepack_weight_node = graph.call_function( + packed_weight_op, args=packed_weight_inputs + ) + + new_args = ( + qx, + x_scale, + x_zp, + prepack_weight_node, + w_scale, + w_zp, + bias, + 1.0, # output_scale + 0, # output_zero_point + True, # fp32_output + "none", # post op name + [], # post op args + "", # post op algorithm + ) + new_linear_node = graph.call_function( + torch.ops.onednn.qlinear_pointwise.default, args=new_args + ) + linear_node.replace_all_uses_with(new_linear_node) + new_linear_node.meta.update(linear_node.meta) + + # Erase the original linear node + graph.erase_node(linear_node) + # Erase the dequant pattern + graph.erase_node(mul_node) + graph.erase_node(sub_node) + graph.erase_node(to_fp32_node) + # Erase the dequant per channel pattern + graph.erase_node(t_node) + graph.erase_node(dequant_per_channel) + + +def _generate_dequant_linear_node_pattern(_dequant_per_channel_pattern): + t_pattern = CallFunction( + aten.permute.default, + _dequant_per_channel_pattern, + KeywordArg("permute_axes"), + ) + dequant_linear_bias_pattern = CallFunction( + aten.addmm.default, + KeywordArg("b"), + dequantize_per_tensor_activation_pattern, + t_pattern, + ) + dequant_linear_no_bias_pattern = CallFunction( + aten.mm.default, + dequantize_per_tensor_activation_pattern, + t_pattern, + ) + return dequant_linear_bias_pattern, dequant_linear_no_bias_pattern + + +def _generate_qlinear_weight_prepack_patterns(): + return _generate_dequant_linear_node_pattern(dequantize_per_channel_weight_pattern) + + +@functools.lru_cache(None) +def _register_quantization_weight_pack_pass(): + _register_dequant_promotion_pass( + dequantize_per_tensor_activation_pattern, pass_number=0 + ) # pass_number=0 to run before weight prepack + weight_prepack_patterns = _generate_qconv_weight_prepack_patterns() + for weight_prepack_pattern in weight_prepack_patterns: + # Register to pass_number 1, so we can do dequant promotion in pass_number 0. + _register_qconv_weight_prepack_pass(weight_prepack_pattern, pass_number=1) + weight_prepack_patterns = _generate_qlinear_weight_prepack_patterns() + for weight_prepack_pattern in weight_prepack_patterns: + # Register to pass_number 1, so we can do dequant promotion in pass_number 0. + _register_qlinear_weight_prepack_pass(weight_prepack_pattern, pass_number=1) diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/replace_random.py b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/replace_random.py new file mode 100644 index 0000000000000000000000000000000000000000..dcb944f34844eca4810bcb4c97607553351ebc29 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/replace_random.py @@ -0,0 +1,125 @@ +import collections +import logging + +import torch + +from torch.fx.passes.shape_prop import _extract_tensor_metadata +from .. import config, inductor_prims +from ..pattern_matcher import ( + CallFunctionVarArgs, + Match, + PatternMatcherPass, + register_graph_pattern, +) +from ..virtualized import V + +log = logging.getLogger(__name__) +patterns = PatternMatcherPass() +aten = torch.ops.aten + + +def replace_random_passes(gm: torch.fx.GraphModule): + """Modify the given FX graph to use backend-native random ops""" + if config.fallback_random: + return 0 + + count = patterns.apply(gm) + count += fuse_seed_creation_pass(gm.graph) + + return count + + +def fuse_seed_creation_pass(graph: torch.fx.Graph): + """ + Horizontally fuse all the seed generation on each device + + a = inductor_seed(dev) + b = inductor_seed(dev) + + Becomes: + seeds = inductor_seeds(2, dev) + a = inductor_lookup_seed(seeds, 0) + b = inductor_lookup_seed(seeds, 1) + + We do this because seed creation is entirely launch overhead bound. + """ + device_seeds = collections.defaultdict(list) + for node in graph.nodes: + if CallFunctionVarArgs(inductor_prims.seed).match(node): + device_seeds[node.args[0]].append(node) + + if not device_seeds: + return 0 + + for device, seeds in device_seeds.items(): + with graph.inserting_before(seeds[0]): + combined = graph.call_function(inductor_prims.seeds, (len(seeds), device)) + with V.fake_mode: + combined.meta["val"] = torch.empty( + [len(seeds)], device=device, dtype=torch.int64 + ) + combined.meta["tensor_meta"] = _extract_tensor_metadata( + combined.meta["val"] + ) + + for idx, seed in enumerate(seeds): + with graph.inserting_before(seed): + new_seed = graph.call_function( + inductor_prims.lookup_seed, (combined, idx) + ) + seed.replace_all_uses_with(new_seed) + new_seed.meta.update(seed.meta) + graph.erase_node(seed) + + return len(device_seeds) + + +def default_kwargs(device): + return {} + + +def get_device(device): + if device is not None: + return device + return torch.empty([]).device # default device + + +@register_graph_pattern(CallFunctionVarArgs(aten.rand.default), pass_dict=patterns) +@register_graph_pattern(CallFunctionVarArgs(aten.randn.default), pass_dict=patterns) +def replace_random( + match: Match, size, *, dtype=None, device=None, layout=None, pin_memory=None +): + def replacement(size): + result = inductor_prims.random( + size, inductor_prims.seed(device), mode, **default_kwargs(device) + ) + if dtype is not None: + result = result.to(dtype) + return result + + mode = { + aten.rand.default: "rand", + aten.randn.default: "randn", + }[match.output_node().target] + device = get_device(device) + match.replace_by_example(replacement, [size]) + + +@register_graph_pattern(CallFunctionVarArgs(aten.randint.low), pass_dict=patterns) +def replace_randint( + match: Match, + low, + high, + size, + *, + dtype=torch.int64, + device=None, + layout=None, + pin_memory=None, +): + def replacement(size): + result = inductor_prims.randint(low, high, size, inductor_prims.seed(device)) + return result.to(dtype) + + device = get_device(device) + match.replace_by_example(replacement, [size]) diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/split_cat.py b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/split_cat.py new file mode 100644 index 0000000000000000000000000000000000000000..dab9f0827b3c82dd82c2b7d9c553b1d9097faff8 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/split_cat.py @@ -0,0 +1,982 @@ +import itertools +import logging +import operator +from typing import Callable, List, Sequence, Tuple, Union + +import torch +from torch._dynamo.utils import counters + +from ..pattern_matcher import ( + Arg, + CallFunction, + CallFunctionVarArgs, + CallMethodVarArgs, + config_flag, + FailedMatch, + get_arg_value, + Ignored, + KeywordArg, + ListOf, + Match, + MatchContext, + MULTIPLE, + PatternExpr, + register_graph_pattern, + RepeatedExpr, +) +from .pre_grad import ( + merge_splits_pass, + normalization_pass, + split_cat_pass, + unbind_stack_pass, +) + +log = logging.getLogger(__name__) + + +def _get_split_args_default(split_node): + input_kwarg = "tensor" + split_size_kwarg = "split_size_or_sections" + dim_kwarg = "dim" + default_dim_value = 0 + if split_node.op == "call_method": + split_size_kwarg = "split_size" + return ( + get_arg_value(split_node, 0, input_kwarg), + get_arg_value(split_node, 1, split_size_kwarg), + get_arg_value(split_node, 2, dim_kwarg) or default_dim_value, + ) + + +def normalize_split_base(match: Match, _get_split_args: Callable): + """ + Normalize split with split_size into split_with_sizes, so that we only deal with one type of split in + subsequent optimizations + """ + split_node = match.nodes[0] + graph = match.graph + split_input, split_size, split_dim = _get_split_args(split_node) + if split_input is None or split_dim is None or split_size is None: + log.info("couldn't find split args") + return + if "example_value" not in split_node.meta: + log.warning("example value absent for node: %s", split_node) + return + assert isinstance(split_node.meta["example_value"], (list, tuple)) + split_sections = [t.size()[split_dim] for t in split_node.meta["example_value"]] + + if any(isinstance(section, torch.SymInt) for section in split_sections): + # TODO dynamic_shapes with assume_static_by_default=False fails while AOT Autograd tracing. + return + if split_dim < 0: # Normalize split dim + split_dim += split_input.meta["example_value"].dim() + with graph.inserting_after(split_node): + new_split_node = graph.call_function( + torch.split, + args=(split_input, split_sections), + kwargs={"dim": split_dim}, + ) + split_node.replace_all_uses_with(new_split_node) + new_split_node.meta.update(split_node.meta) + graph.erase_node(split_node) + counters["inductor"]["split_cat_norm"] += 1 + + +@register_graph_pattern( + CallFunctionVarArgs(torch.split, users=MULTIPLE), + pass_dict=normalization_pass, + extra_check=config_flag("split_cat_fx_passes"), +) +@register_graph_pattern( + CallMethodVarArgs("split", users=MULTIPLE), + pass_dict=normalization_pass, + extra_check=config_flag("split_cat_fx_passes"), +) +def normalize_split_default(match: Match, *args, **kwargs): + return normalize_split_base(match, _get_split_args_default) + + +@register_graph_pattern( + CallFunctionVarArgs(torch.cat, users=MULTIPLE), + pass_dict=normalization_pass, + extra_check=config_flag("split_cat_fx_passes"), +) +def normalize_cat_default(match: Match, *args, **kwargs): + cat_node = match.nodes[0] + graph = match.graph + tensors = get_arg_value(cat_node, 0, "tensors") + cat_dim = get_arg_value(cat_node, 1, "dim") + if cat_dim is None: + cat_axis = cat_node.kwargs.get("axis") + if cat_axis is not None: + cat_dim = cat_axis + else: + cat_dim = 0 + if tensors is None or cat_dim is None: + log.info("couldn't find cat args") + return + assert isinstance(tensors, (list, tuple)) + for tensor in itertools.chain([cat_node], tensors): + if "example_value" not in tensor.meta: + log.warning("example value absent for node: %s", tensor) + return + + ndim = cat_node.meta["example_value"].dim() + + def is_empty_tensor(x): + # special case where torch.cat supports cat'ing with an empty tensor + x_shape = x.meta["example_value"].shape + return len(x_shape) == 1 and x_shape[0] == 0 + + assert all( + ndim == x.meta["example_value"].dim() or is_empty_tensor(x) for x in tensors + ) + + if cat_dim < 0: # Normalize cat dim + cat_dim += ndim + + with graph.inserting_after(cat_node): + new_cat_node = graph.call_function( + torch.cat, + args=(tensors,), + kwargs={"dim": cat_dim}, + ) + cat_node.replace_all_uses_with(new_cat_node) + new_cat_node.meta.update(cat_node.meta) + graph.erase_node(cat_node) + counters["inductor"]["split_cat_norm"] += 1 + + +def find_next_users(split_node): + next_users = [] + for getitem_node in split_node.users.keys(): + for getitem_user in getitem_node.users.keys(): + if getitem_user not in next_users: + next_users.append(getitem_user) + return next_users + + +@register_graph_pattern( + CallMethodVarArgs("squeeze", users=MULTIPLE), + pass_dict=normalization_pass, + extra_check=config_flag("split_cat_fx_passes"), +) +def normalize_squeeze_default(match: Match, *args, **kwargs): + squeeze_node = match.nodes[0] + squeeze_input = get_arg_value(squeeze_node, 0) + + if "dim" in squeeze_node.kwargs: + assert len(squeeze_node.args) == 1 + dim = squeeze_node.kwargs["dim"] + elif len(squeeze_node.args) == 1: + # squeeze(Tensor) + dim = None + elif len(squeeze_node.args) == 2: + # squeeze(Tensor self, int dim) + # squeeze(Tensor self, int[] dim) + dim = squeeze_node.args[1] + else: + # squeeze(Tensor self, int[] dim) (called with varargs) + dim = squeeze_node.args[1:] + + if isinstance(dim, Sequence) and len(dim) == 1: + dim = dim[0] + + with match.graph.inserting_after(squeeze_node): + if dim is None: + new_squeeze_node = match.graph.call_function( + torch.squeeze, args=(squeeze_input,) + ) + else: + new_squeeze_node = match.graph.call_function( + torch.squeeze, args=(squeeze_input, dim) + ) + squeeze_node.replace_all_uses_with(new_squeeze_node) + match.graph.erase_node(squeeze_node) + + +class TorchSplit(CallFunction): + """ + Matches a call to torch.split if it is in a normalized form. Ensures that all users of + splits are unique getitems. + """ + + def __init__(self, arg, sizes): + # using KeywordArg("dim") for `dim` checks they all match + super().__init__( + torch.split, arg, sizes, _users=MULTIPLE, dim=KeywordArg("dim") + ) + + def _match(self, node: torch.fx.Node, ctx: MatchContext): + m = super()._match(node, ctx) + if not m: + return m + split_sections = node.args[1] + if not isinstance(split_sections, (list, tuple)): + return FailedMatch("split not normalized") + # check users are all unique getitems + seen_idxs = set() + for user in node.users: + if not CallFunction(operator.getitem, Arg(), Arg()).match(user): + # This should ideally never happen. Split user should always be a getitem + return FailedMatch(f"user of split not a getitem: {user}") + if not isinstance(user.args[1], int): + return FailedMatch("only integer getitems are handled") + if user.args[1] in seen_idxs: + return FailedMatch(f"duplicate getitem {user.args[1]}") + if user.args[-1] < 0: + # This shouldn't ideally happen as dynamo normalizes indexes to positive + return FailedMatch("negative index") + seen_idxs.add(user.args[1]) + return m + + +@register_graph_pattern( + TorchSplit( + CallFunction( + operator.getitem, + TorchSplit( + KeywordArg("first_split_input"), + KeywordArg("first_split_sections"), + ), + Ignored(), + ), + KeywordArg("next_split_sections"), + ), + pass_dict=merge_splits_pass, + extra_check=config_flag("split_cat_fx_passes"), +) +def merge_splits( + match: Match, + first_split_input: torch.fx.Node, + first_split_sections: List[int], + next_split_sections: List[int], + # Note: dim is implicitly passed by TorchSplit, as it internally uses a pattern with dim + dim: int, +): + node = match.output_node() + graph = match.graph + first_split = node.args[0].args[0] + next_split_index = node.args[0].args[1] + + new_split_sections = list(first_split_sections) + new_split_sections[next_split_index : next_split_index + 1] = next_split_sections + + first_split_dim = first_split.kwargs["dim"] + + to_remove = [] + + with graph.inserting_before(first_split): + # Add the new split node + new_split = graph.call_function( + torch.split, + args=(first_split_input, new_split_sections), + kwargs={"dim": first_split_dim}, + ) + first_split_num_to_user = { + user.args[1]: user for user in first_split.users.keys() + } + + new_split_num = 0 + for split_num in range(len(first_split_sections)): + if split_num not in first_split_num_to_user: + new_split_num += 1 + continue + old_getitem = first_split_num_to_user[split_num] + if split_num != next_split_index: + old_getitem.update_arg(0, new_split) + old_getitem.update_arg(1, new_split_num) + new_split_num += 1 + else: + next_split_num_to_user = { + user.args[1]: user for user in node.users.keys() + } + for next_split_num in range(len(next_split_sections)): + with graph.inserting_after(new_split): + new_getitem = graph.call_function( + operator.getitem, args=(new_split, new_split_num) + ) + new_split_num += 1 + next_getitem = next_split_num_to_user[next_split_num] + new_getitem.meta.update(next_getitem.meta) + next_getitem.replace_all_uses_with(new_getitem) + to_remove.append(next_getitem) + to_remove.append(node) + to_remove.append(old_getitem) + + to_remove.append(first_split) + for node in to_remove: + graph.erase_node(node) + + counters["inductor"]["consecutive_split_merged"] += 1 + + +class SplitCatSimplifier: + """ + Helper class to simplify split-cat pattern. In simple cases, both split and cat node can be removed in a "split->cat" + pattern. However, there are various cases where they can't and we need to simplify split/ add transforms before cat. + Some such cases are: + 1. Final node has additional args (not coming from the initial split) + 2. Shuffling of args between split/cat + 3. Some final nodes are non-(cat/stack) + 4. Split-dim != cat-dim (but equal split) + + Note that any combination of the above cases can happen. + + To deal with 1, 2, & 3 - we iterate over all users of split. And figure out common "ranges" that can be merged. + Then, we simplify the split accordingly. In the best case, split can be entirely removed. + + To deal with 4, we add some transformations (unflatten + movedim) (See `get_transform_params`). + + Finally, depending on final node being cat or stack, unsqueeze/flatten needs to be added. + + """ + + def simplify( + self, + graph: torch.fx.Graph, + split_node: torch.fx.Node, + split_sections: List[int], + ): + # Find the next users (i.e. users after the getitem) + next_users = find_next_users(split_node) + # Gather inputs of the next users. When inputs come from `split_node`, they are instead represented by + # a tuple indicating the split ranges. See `get_user_input_list` for more details + user_inputs_list = self.get_user_input_list(split_node, next_users) + # Simplify the split_sections based on user_inputs_list. In simpler cases, len(simplified_split_ranges) == 1 and + # we can simply replace the split node. Otherwise, we simplify it. + simplified_split_ranges = self.get_simplified_split_ranges( + split_sections, next_users, user_inputs_list + ) + if not simplified_split_ranges: # Simplification not possible + return + transform_params_list = self.get_transform_params( + split_node, next_users, user_inputs_list + ) + if not transform_params_list: + return + + # Start actual replacement + user_inputs_list_new = self.replace_split( + graph, split_node, split_sections, user_inputs_list, simplified_split_ranges + ) + self.replace_cat( + graph, split_node, next_users, user_inputs_list_new, transform_params_list + ) + self.erase_old_nodes(graph, split_node, next_users) + + def get_user_input_list( + self, split_node, next_users + ) -> List[List[Union[torch.fx.Node, Tuple[int, int]]]]: + """ + Returns list of inputs to the following user nodes, in order. The outer list represents the user node. The inner + list represents the inputs to that particular node. This list can either contain + - a tuple representing the ranges of get_items that should go into the cat (closed interval) + - torch.fx.Node representing "other" inputs (which are not coming from our split) + """ + user_inputs_list: List[List[Union[torch.fx.Node, Tuple[int, int]]]] = [] + for user in next_users: + if user.target in {torch.cat, torch.stack}: + user_inputs_list.append(self.get_merged_user_inputs(split_node, user)) + else: + user_inputs_list.append(self.get_non_cat_node_input(split_node, user)) + return user_inputs_list + + def get_merged_user_inputs( + self, split_node: torch.fx.Node, cat_node: torch.fx.Node + ) -> List[Union[torch.fx.Node, Tuple[int, int]]]: + user_inputs = get_arg_value(cat_node, 0, "tensors") + simplified_user_inputs = [] + split_users = set(split_node.users.keys()) + for user_input in user_inputs: + if user_input not in split_users: + simplified_user_inputs.append(user_input) + else: + # Add which "getitem" cat depends on + simplified_user_inputs.append(user_input.args[1]) + return self.merge_consecutive_inputs(simplified_user_inputs) + + def get_non_cat_node_input( + self, split_node: torch.fx.Node, node: torch.fx.Node + ) -> List[Tuple[int, int]]: + """ + Get input for a non cat node in the same format as `get_merged_user_inputs` + """ + node_input = [] + split_users = set(split_node.users.keys()) + for node_arg in node.all_input_nodes: + if node_arg in split_users: + getitem_num = get_arg_value(node_arg, 1) + node_input.append((getitem_num, getitem_num)) + return node_input + + def merge_consecutive_inputs( + self, inputs: List[Union[torch.fx.Node, int]] + ) -> List[Union[torch.fx.Node, Tuple[int, int]]]: + """ + Merge consecutive inputs going into a user node. + + For e.g. + [arg0, 0, 1, 2, arg1] -> [arg0, (0, 2), arg1] + """ + merged_ranges = [] + cur_range = None + for input_ in inputs: + if isinstance(input_, int): + if not cur_range: + cur_range = [input_, input_] + elif input_ == cur_range[1] + 1: + cur_range[1] += 1 + else: + merged_ranges.append(tuple(cur_range)) + cur_range = [input_, input_] + else: + if cur_range: + merged_ranges.append(tuple(cur_range)) + cur_range = None + merged_ranges.append(input_) + if cur_range: + merged_ranges.append(tuple(cur_range)) + return merged_ranges + + def get_simplified_split_ranges( + self, + split_sections, + next_users, + user_inputs_list: List[List[Union[torch.fx.Node, Tuple[int, int]]]], + ) -> List[Tuple[int, int]]: + ranges = set() + for user_node, user_inputs in zip(next_users, user_inputs_list): + ranges |= { + user_input + for user_input in user_inputs + if isinstance(user_input, tuple) + } + cumulative_sizes = [0] + torch.cumsum(torch.tensor(split_sections), 0).tolist() + split_ranges = sorted( + [(cumulative_sizes[r[0]], cumulative_sizes[r[1] + 1]) for r in ranges] + ) + + if not self.has_non_overlapping_ranges( + split_ranges, + ): # This need not be a strict condition + # However, we keep it now for simplicity. + return + split_ranges = self.fill_gaps(split_ranges, 0, cumulative_sizes[-1]) + if len(split_sections) == len(split_ranges): # Simplification not possible + return + counters["inductor"]["scmerge_split_sections_removed"] = len( + split_sections + ) - len(split_ranges) + return split_ranges + + def has_non_overlapping_ranges(self, ranges: List[Tuple[int, int]]): + for range_, next_range in zip(ranges, ranges[1:]): + if range_[1] > next_range[0]: + return False + return True + + def fill_gaps(self, ranges, min_, max_): + cur = min_ + filled_ranges = [] + for a, b in ranges: + if cur < a: + filled_ranges.append((cur, a)) + filled_ranges.append((a, b)) + cur = b + if filled_ranges[-1][1] < max_: + filled_ranges.append((filled_ranges[-1][1], max_)) + return filled_ranges + + def get_transform_params( + self, + split_node: torch.fx.Node, + next_users: List[torch.fx.Node], + user_inputs_list: List[List[Union[torch.fx.Node, Tuple[int, int]]]], + ) -> List[List[Tuple]]: + """ + Figure out what transforms are needed for each input to each cat node. + + We replace a split node with an unflatten followed by a movedim + """ + split_dim = split_node.kwargs["dim"] + split_sections = split_node.args[1] + transform_params_list = [] + for user_node, user_inputs in zip(next_users, user_inputs_list): + if user_node.target not in {torch.cat, torch.stack}: + transform_params_list.append(None) + continue + + cat_dim = get_arg_value(user_node, 1, "dim") + transform_params = [] + for user_input in user_inputs: + if split_dim == cat_dim and user_node.target == torch.cat: + # No transform needed + transform_params.append((None, None, None, None)) + elif isinstance(user_input, tuple): # Split being simplified + # Verify equal split + subset_split_sections = split_sections[ + user_input[0] : user_input[1] + 1 + ] + # All sections should be equal + if len(set(subset_split_sections)) != 1: + return + + num_splits = len(subset_split_sections) + unflatten_params = (split_dim, (num_splits, -1)) + movedim_params = ( + (split_dim, cat_dim) if split_dim != cat_dim else None + ) + transform_params.append( + (unflatten_params, movedim_params, None, None) + ) + elif ( + user_node.target == torch.stack or split_dim != cat_dim + ): # We need to unsqueeze inputs not coming through split + transform_params.append((None, None, (cat_dim,), None)) + else: # Non-split inputs + transform_params.append((None, None, None, None)) + transform_params_list.append(transform_params) + return transform_params_list + + def replace_split( + self, + graph: torch.fx.Graph, + split_node: torch.fx.Node, + split_sections: List[int], + user_inputs_list: List[List[Union[torch.fx.Node, Tuple[int, int]]]], + split_ranges: List[Tuple[int, int]], + ) -> List[List[torch.fx.Node]]: + """ + Replace the split node. It can either remove the split node if len(split_ranges) == 1, or simplify it + into a split with lesser sections if len(split_ranges) > 1. + + Returns the new `user_inputs_list`, with tuples replaced with new getitems from the newer split node. + """ + split_input = split_node.args[0] + split_dim = split_node.kwargs["dim"] + if len(split_ranges) == 1: # We can completely eliminate the split node + split_items = [split_input] + else: + with graph.inserting_after(split_node): + new_split = graph.call_function( + torch.split, + args=( + split_input, + [r[1] - r[0] for r in split_ranges], + split_dim, + ), + ) + new_split.meta.update(split_node.meta) + counters["inductor"]["scmerge_split_added"] += 1 + with graph.inserting_after(new_split): + split_items = [ + graph.call_function(operator.getitem, args=(new_split, i)) + for i in range(len(split_ranges)) + ] + # Now assign the right getitem to the right input + cumulative_sizes = [0] + torch.cumsum(torch.tensor(split_sections), 0).tolist() + new_user_inputs_list = [] + for user_inputs in user_inputs_list: + new_user_inputs = [] + for user_input in user_inputs: + if isinstance(user_input, tuple): + # Find the correct new getitem (present in split_items) + new_user_inputs.append( + split_items[ + split_ranges.index( + ( + cumulative_sizes[user_input[0]], + cumulative_sizes[user_input[1] + 1], + ) + ) + ] + ) + else: + new_user_inputs.append(user_input) + new_user_inputs_list.append(new_user_inputs) + return new_user_inputs_list + + def replace_cat( + self, + graph, + split_node, + next_users, + user_inputs_list_new, + transform_params_list, + ): + split_dim = split_node.kwargs["dim"] + + split_users = split_node.users.keys() + new_cats = [] + for user_node, user_inputs_new, transform_params in zip( + next_users, user_inputs_list_new, transform_params_list + ): + if user_node.target not in {torch.cat, torch.stack}: + # Change the args and kwargs of non-cat/stack nodes. Replace old getitems (belonging to + # the original split node) with the newer getitems + next_cat_input = 0 + for input_node in user_node.all_input_nodes: + if input_node in split_users: + user_node.replace_input_with( + input_node, user_inputs_new[next_cat_input] + ) + next_cat_input += 1 + continue + + # Handle cat/stack user nodes + cat_dim = get_arg_value(user_node, 1, "dim") + user_inputs_new_transformed = [] + # For `unsqueeze` transform, we will combine consecutive inputs with the same unsqueeze params, and stack them + to_stack = [] + stack_dim = None + with graph.inserting_before(user_node): + for user_input_new, transform_param in zip( + user_inputs_new, transform_params + ): + # Apply transforms + ( + unflatten_params, + movedim_params, + unsqueeze_params, + flatten_params, + ) = transform_param + if unsqueeze_params and ( + stack_dim is None or stack_dim == unsqueeze_params[0] + ): + to_stack.append(user_input_new) + stack_dim = unsqueeze_params[0] + continue + elif to_stack: + stacked_input = graph.call_function( + torch.stack, args=(to_stack, stack_dim) + ) + to_stack = [] + stack_dim = None + user_inputs_new_transformed.append(stacked_input) + if unsqueeze_params: + to_stack.append(user_input_new) + stack_dim = unsqueeze_params[0] + continue + + if unflatten_params: + user_input_new = graph.call_function( + torch.unflatten, args=(user_input_new, *unflatten_params) + ) + if movedim_params: + user_input_new = graph.call_function( + torch.movedim, args=(user_input_new, *movedim_params) + ) + if flatten_params: + user_input_new = graph.call_function( + torch.flatten, args=(user_input_new, *flatten_params) + ) + user_inputs_new_transformed.append(user_input_new) + if to_stack: + stacked_input = graph.call_function( + torch.stack, args=(to_stack, stack_dim) + ) + user_inputs_new_transformed.append(stacked_input) + + with graph.inserting_after(user_node): + if len(user_inputs_new_transformed) > 1: + new_cat_node = graph.call_function( + torch.cat, args=(user_inputs_new_transformed, cat_dim) + ) + new_cat_node.meta.update(user_node.meta) + counters["inductor"]["scmerge_cat_added"] += 1 + else: + new_cat_node = user_inputs_new_transformed[-1] + + if ( + user_node.target == torch.cat + and split_dim != cat_dim + and split_node.target == torch.split + ): + with graph.inserting_after(new_cat_node): + new_cat_node = graph.call_function( + torch.flatten, args=(new_cat_node, cat_dim, cat_dim + 1) + ) + user_node.replace_all_uses_with(new_cat_node) + new_cats.append(new_cat_node) + + def erase_old_nodes(self, graph, split_node, next_users): + to_remove = [split_node] + counters["inductor"]["scmerge_split_removed"] += 1 + for getitem_node in split_node.users.keys(): + to_remove.append(getitem_node) + for next_user in next_users: + if next_user.target not in {torch.cat, torch.stack}: + continue + counters["inductor"]["scmerge_cat_removed"] += 1 + to_remove.append(next_user) + for node in reversed(to_remove): + graph.erase_node(node) + + +class UnbindCatRemover(SplitCatSimplifier): + """ + Helper class to merge Unbind->Cat/Stack. Many of the cases are similar to SplitCatSimplifier. + + Unbind can't be simplified like splits. So, we can only remove the unbind node. Other than this, + other cases like multiple users, additional args, dim mismatch are similar to `SplitCatSimplifier`, + hence we extend that class. + """ + + def remove_unbind( + self, + graph: torch.fx.Graph, + unbind_node: torch.fx.Node, + ): + num_unbind = ( + max(getitem_node.args[1] for getitem_node in unbind_node.users.keys()) + 1 + ) + split_sections = [1 for _ in range(num_unbind)] + + super().simplify(graph, unbind_node, split_sections) + + def get_simplified_split_ranges( + self, + split_sections, + next_users, + user_inputs_list: List[List[Union[torch.fx.Node, Tuple[int, int]]]], + ) -> List[Tuple[int, int]]: + simplified_split_ranges = super().get_simplified_split_ranges( + split_sections, next_users, user_inputs_list + ) + if not simplified_split_ranges or len(simplified_split_ranges) != 1: + return None + return simplified_split_ranges + + def get_transform_params( + self, + unbind_node: torch.fx.Node, + next_users: List[torch.fx.Node], + user_inputs_list: List[List[Union[torch.fx.Node, Tuple[int, int]]]], + ) -> List[List[Tuple]]: + """ + Figure out what transforms are needed for each input to each cat node. + + Here is the rough transforms we apply: + + x -> unbind -> stack => x -> movedim + + x -> unbind -> cat => x -> movedim -> flatten + + When cat/stack nodes have additional args: + + addn ---| addn -> unsqueeze ---| + x -> unbind -> stack => x -> movedim -> cat + + addn ---| addn ---| + x -> unbind -> cat => x -> movedim -> flatten -> cat + + (Note application of these depends on the dims as well) + + + """ + split_dim = unbind_node.kwargs["dim"] + transform_params_list = [] + for user_node, user_inputs in zip(next_users, user_inputs_list): + cat_dim = get_arg_value(user_node, 1, "dim") + transform_params = [] + for user_input in user_inputs: + if isinstance(user_input, tuple): + # User input is coming from unbind + movedim_params = ( + (split_dim, cat_dim) if split_dim != cat_dim else None + ) + flatten_params = None + if user_node.target == torch.cat: + flatten_params = (cat_dim, cat_dim + 1) + transform_params.append( + (None, movedim_params, None, flatten_params) + ) + elif ( + user_node.target == torch.stack + ): # We need to unsqueeze inputs not coming through unbind into cat + transform_params.append((None, None, (cat_dim,), None)) + else: # Non-unbind inputs + transform_params.append((None, None, None, None)) + transform_params_list.append(transform_params) + return transform_params_list + + +class GetItem(CallFunction): + def __init__(self, arg, index, _users=1): + super().__init__(operator.getitem, arg, index, _users=_users) + + def find_anchor_nodes(self, ctx: MatchContext, searched): + # We generally match GetItem with arg being an Arg(). So, we never return the anchor + # nodes as the stored node in ctx.pattern_to_node is returned. Here we override find_anchor_nodes + # to not use ctx.pattern_to_node + for pattern in self.flat_args_kwargs[0]: + if isinstance(pattern, PatternExpr): + for other_node in pattern.find_anchor_nodes(ctx, searched): + if not isinstance(other_node, torch.fx.Node): + continue + for node in other_node.users: + if node not in searched: + if self._match_fns(node): + yield node + searched.add(node) + + +@register_graph_pattern( + RepeatedExpr( + CallFunction( + torch.squeeze, + GetItem( + TorchSplit( + KeywordArg("split_input"), + KeywordArg("split_sizes"), + ), + Ignored(), + ), + KeywordArg("dim"), + _users=MULTIPLE, + ), + ), + pass_dict=split_cat_pass, + extra_check=config_flag("split_cat_fx_passes"), +) +@register_graph_pattern( + RepeatedExpr( + CallFunction( + torch.squeeze, + GetItem( + TorchSplit( + KeywordArg("split_input"), + KeywordArg("split_sizes"), + ), + Ignored(), + ), + dim=KeywordArg("dim"), + _users=MULTIPLE, + ) + ), + pass_dict=split_cat_pass, + extra_check=config_flag("split_cat_fx_passes"), +) +def merge_split_squeeze( + match: Match, split_input: torch.fx.Node, split_sizes: List[int], dim: int +): + graph = match.graph + split = next(node for node in match.nodes if node.target == torch.split) + if not all(s == 1 for s in split_sizes): + return + if isinstance(dim, Sequence): + return + next_users = find_next_users(split) + if not all(node.target == torch.squeeze for node in next_users): + return + with graph.inserting_before(match.output_node()): + unbind = graph.call_function( + torch.unbind, args=(split_input,), kwargs={"dim": dim} + ) + for item_index, getitem_node in sorted( + [ + (getitem_node.args[1], getitem_node) + for getitem_node in split.users.keys() + ] + ): + squeeze = next(iter(getitem_node.users.keys())) + new_get_item = graph.call_function( + operator.getitem, args=(unbind, item_index) + ) + squeeze.replace_all_uses_with(new_get_item) + new_get_item.meta.update(squeeze.meta) + graph.erase_node(squeeze) + graph.erase_node(getitem_node) + graph.erase_node(split) + counters["inductor"]["split_squeeze_replaced"] += 1 + + +getitem_unbind = ListOf( + GetItem( + CallFunction( + torch.unbind, + KeywordArg("unbind_input"), + dim=KeywordArg("dim"), + _users=MULTIPLE, + ), + Ignored(), + _users=MULTIPLE, + ), + partial=True, +) + + +@register_graph_pattern( + CallFunction([torch.stack, torch.cat], getitem_unbind, Ignored(), _users=MULTIPLE), + pass_dict=unbind_stack_pass, + extra_check=config_flag("split_cat_fx_passes"), +) +@register_graph_pattern( + CallFunction( + [torch.stack, torch.cat], getitem_unbind, dim=Ignored(), _users=MULTIPLE + ), + pass_dict=unbind_stack_pass, + extra_check=config_flag("split_cat_fx_passes"), +) +@register_graph_pattern( + CallFunction( + [torch.stack, torch.cat], tensors=getitem_unbind, dim=Ignored(), _users=MULTIPLE + ), + pass_dict=unbind_stack_pass, + extra_check=config_flag("split_cat_fx_passes"), +) +def merge_unbind_stack(match: Match, unbind_input: torch.fx.Node, dim: int): + unbind_node = next(node for node in match.nodes if node.target == torch.unbind) + UnbindCatRemover().remove_unbind(match.graph, unbind_node) + + +getitem_split = ListOf( + CallFunction( + operator.getitem, + TorchSplit( + Ignored(), + KeywordArg("split_sections"), + ), + Ignored(), + _users=MULTIPLE, + ), + partial=True, +) + + +@register_graph_pattern( + CallFunction( + [torch.stack, torch.cat], + tensors=getitem_split, + dim=Ignored(), + _users=MULTIPLE, + ), + pass_dict=split_cat_pass, + extra_check=config_flag("split_cat_fx_passes"), +) +@register_graph_pattern( + CallFunction( + [torch.stack, torch.cat], + getitem_split, + dim=Ignored(), + _users=MULTIPLE, + ), + pass_dict=split_cat_pass, + extra_check=config_flag("split_cat_fx_passes"), +) +@register_graph_pattern( + CallFunction( + [torch.stack, torch.cat], + getitem_split, + Ignored(), + _users=MULTIPLE, + ), + pass_dict=split_cat_pass, + extra_check=config_flag("split_cat_fx_passes"), +) +def simplify_split_cat(match: Match, split_sections: List[int], dim: int): + if not isinstance(split_sections, (list, tuple)): # Unnormalized split + return + split_node = next(node for node in match.nodes if node.target == torch.split) + SplitCatSimplifier().simplify(match.graph, split_node, split_sections) diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/index_propagation.py b/llava_next/lib/python3.10/site-packages/torch/_inductor/index_propagation.py new file mode 100644 index 0000000000000000000000000000000000000000..bbadb024347bd1950b2d0514e5b22f671287633d --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/_inductor/index_propagation.py @@ -0,0 +1,240 @@ +"""This file implements the IndexPropagation ops handler, which wraps an +underlying handler to add a limited form of constant propagation, as well as +propagation of sympy expressions downstream of ops.index_expr calls. + +For example, say we have the IR: + + tmp0 = ops.index_expr(x, torch.int32) + tmp1 = ops.constant(2, torch.int32) + tmp2 = ops.mul(tmp0, tmp1) + tmp3 = ops.indirect_indexing(tmp2, x_size) + tmp4 = ops.load("buf0", tmp3) + +The underlying handler would just see: + + ops.load("buf0", x * 2) + +This is limited by the set of operators handled in the sympy expression +printers. So simple operations like minimum and maximum cannot be translated to +SymPy expressions yet, despite sympy.Min and sympy.Max existing. + +""" +import itertools +from dataclasses import dataclass +from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Union + +import sympy + +import torch +from torch._prims_common import is_boolean_dtype, is_integer_dtype +from torch.utils._sympy.functions import FloorDiv, ModularIndexing, Where + + +@dataclass +class TypedExpr: + """A SymPy expression with associated type""" + + expr: sympy.Expr + dtype: torch.dtype + + +class SymPyOps: + """An ops handler where all IR values are SymPy expressions + + When a value cannot be represented as a SymPy expression, the method is + either not defined, or returns NotImplemented + + """ + + @staticmethod + def identity(value: Any) -> Any: + return value + + @staticmethod + def constant(value: Union[int, float, bool], dtype: torch.dtype) -> TypedExpr: + if is_boolean_dtype(dtype): + expr = sympy.Integer(bool(value)) + elif is_integer_dtype(dtype): + expr = sympy.Integer(int(value)) + else: + expr = sympy.Float(float(value)) + return TypedExpr(expr, dtype) + + @staticmethod + def index_expr(value: sympy.Expr, dtype: torch.dtype) -> Union[int, TypedExpr]: + if isinstance(value, int): + value = sympy.Integer(value) + return TypedExpr(value, dtype) + + @staticmethod + def to_dtype(value: Any, dtype: torch.dtype) -> Union[int, TypedExpr]: + if isinstance(value.expr, (sympy.Integer, sympy.Float)): + return SymPyOps.constant(value.expr, dtype) + elif is_integer_dtype(dtype) and is_integer_dtype(value.dtype): + return SymPyOps.index_expr(value.expr, dtype) + else: + # TODO: Inductor doesn't handle floating point in sympy expressions well at the moment + return NotImplemented + + @staticmethod + def square(x: TypedExpr) -> TypedExpr: + return TypedExpr(x.expr * x.expr, x.dtype) + + @staticmethod + def add(x: TypedExpr, y: TypedExpr) -> TypedExpr: + result_type = torch.promote_types(x.dtype, y.dtype) + return TypedExpr(x.expr + y.expr, result_type) + + @staticmethod + def sub(x: TypedExpr, y: TypedExpr) -> TypedExpr: + result_type = torch.promote_types(x.dtype, y.dtype) + return TypedExpr(x.expr - y.expr, result_type) + + @staticmethod + def mul(x: TypedExpr, y: TypedExpr) -> TypedExpr: + result_type = torch.promote_types(x.dtype, y.dtype) + return TypedExpr(x.expr * y.expr, result_type) + + @staticmethod + def neg(x: TypedExpr) -> TypedExpr: + return TypedExpr(-x.expr, x.dtype) + + @staticmethod + def floordiv(x: TypedExpr, y: TypedExpr) -> TypedExpr: + result_type = torch.promote_types(x.dtype, y.dtype) + if not is_integer_dtype(result_type): + return NotImplemented + + return TypedExpr(FloorDiv(x.expr, y.expr), result_type) + + @staticmethod + def remainder(x: TypedExpr, y: TypedExpr) -> Optional[TypedExpr]: + result_type = torch.promote_types(x.dtype, y.dtype) + if not is_integer_dtype(result_type): + return NotImplemented + + result_expr = ModularIndexing(x.expr, sympy.Integer(1), y.expr) + return TypedExpr(result_expr, result_type) + + @staticmethod + def minimum(x: TypedExpr, y: TypedExpr) -> TypedExpr: + result_type = torch.promote_types(x.dtype, y.dtype) + return TypedExpr(sympy.Min(x.expr, y.expr), result_type) + + @staticmethod + def maximum(x: TypedExpr, y: TypedExpr) -> TypedExpr: + result_type = torch.promote_types(x.dtype, y.dtype) + return TypedExpr(sympy.Max(x.expr, y.expr), result_type) + + +@dataclass +class IndexPropVar: + value: Any # Either an IR value, or TypedExpr if is_symbolic is true + is_symbolic: bool = False + + @staticmethod + def new_symbolic(expr: TypedExpr) -> "IndexPropVar": + return IndexPropVar(expr, is_symbolic=True) + + def __post_init__(self): + assert not self.is_symbolic or isinstance( + self.value, TypedExpr + ), "Symbolic IndexPropVar must contain a TypedExpr" + + +class IndexPropagation: + """Ops wrapper that tries to propagate constant and index_expr values through the computation. + + This aims to maximize the compile time simplification possible, and convert + indirect indexing from arange into normal static indexing. + + """ + + def __init__(self, inner: Any): + self._inner = inner + + def materialize_expr(self, expr: sympy.Expr, dtype: torch.dtype) -> Any: + # Construct a new constant/index_expr from the SymPy expression + if isinstance(expr, sympy.Integer): + return self._inner.constant(int(expr), dtype) + elif not expr.free_symbols: + return self._inner.constant(float(expr), dtype) + return self._inner.index_expr(expr, dtype) + + def unwrap(self, a: Union[Any, IndexPropVar]) -> Any: + if isinstance(a, (list, tuple)): + return tuple(self.unwrap(v) for v in a) + + if not isinstance(a, IndexPropVar): + return a + + # Prefer the sympy representation if possible + if a.is_symbolic: + return self.materialize_expr(a.value.expr, a.value.dtype) + + return a.value + + def wrap(self, a: Any) -> Union[IndexPropVar, Sequence[IndexPropVar]]: + if isinstance(a, (list, tuple)): + return tuple(self.wrap(v) for v in a) + return IndexPropVar(a) + + def fallback( + self, name: str, args: Tuple, kwargs: Dict[str, Any] + ) -> Union[IndexPropVar, Tuple[IndexPropVar, ...]]: + # Fallback to the wrapped handler + new_args = [self.unwrap(a) for a in args] + new_kwargs = {k: self.unwrap(v) for k, v in kwargs.items()} + return self.wrap(getattr(self._inner, name)(*new_args, **new_kwargs)) + + def propagate_sympy( + self, name: str, args: Tuple, kwargs: Dict[str, Any] + ) -> IndexPropVar: + # Build a new SymPy expression from this ops call + def unwrap(a: Union[Any, IndexPropVar]) -> Any: + if not isinstance(a, IndexPropVar): + return a + return a.value + + new_args = [unwrap(a) for a in args] + new_kwargs = {k: unwrap(v) for k, v in kwargs.items()} + new_expr = getattr(SymPyOps, name)(*new_args, **new_kwargs) + is_valid_expr = new_expr is not NotImplemented and ( + # Inductor doesn't expect floating point in sympy expressions, but + # allow floating point constants to be propagated + isinstance(new_expr.expr, sympy.Number) + or new_expr.expr.is_integer + ) + if not is_valid_expr: + return self.fallback(name, args, kwargs) + return IndexPropVar.new_symbolic(new_expr) + + def __getattr__(self, name: str) -> Callable[..., Union[Any, IndexPropVar]]: + def inner(*args: Any, **kwargs: Any) -> Union[Any, IndexPropVar]: + if not hasattr(SymPyOps, name): + return self.fallback(name, args, kwargs) + + var_arguments = [ + a + for a in itertools.chain(args, kwargs.values()) + if isinstance(a, IndexPropVar) + ] + if not all(v.is_symbolic for v in var_arguments): + return self.fallback(name, args, kwargs) + + return self.propagate_sympy(name, args, kwargs) + + return inner + + def indirect_indexing( + self, index: Union[Any, IndexPropVar], size: Any, check: bool = True + ) -> Any: + # nb. We do index + Where(...) rather than Where(idx >= 0, idx, idx + sz) because we don't have CSE + # for SymPy expressions, so we don't want to repeat idx too much + + # indirect_indexing returns a sympy value, so no need to wrap in IndexPropVar here + if isinstance(index, IndexPropVar) and index.is_symbolic: + # If we are turning a indirect indexing into direct, we need to wrap it. + index = index.value.expr + return index + Where(index >= 0, 0, size) + return self.fallback("indirect_indexing", (index, size, check), {}).value diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/utils.py b/llava_next/lib/python3.10/site-packages/torch/_inductor/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..417defc24c2394228fc5a05b8a10b9259724a8c4 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/_inductor/utils.py @@ -0,0 +1,1045 @@ +import collections +import contextlib +import functools +import inspect +import itertools +import logging +import math +import operator +import os +import shutil +import sys +import tempfile +import textwrap +import time +import unittest +from io import StringIO +from typing import ( + Any, + Callable, + Dict, + Iterable, + List, + NamedTuple, + Optional, + Set, + TypeVar, + Union, + ValuesView, +) +from unittest import mock + +import sympy + +import torch +from torch.fx.immutable_collections import immutable_dict, immutable_list +from torch.utils._sympy.functions import CleanDiv, FloorDiv, ModularIndexing + +from . import config +from .cuda_properties import current_device, get_device_capability + +log = logging.getLogger(__name__) + +_T = TypeVar("_T") +VarRanges = Dict[sympy.Expr, sympy.Expr] + + +def do_bench(*args, **kwargs): + @functools.lru_cache(None) + def load_triton(): + try: + # NB: Lazily load triton, as importing triton is slow + # see https://github.com/openai/triton/issues/1599 + from triton.testing import do_bench as triton_do_bench + except ImportError: + raise NotImplementedError("requires Triton") + + # triton PR https://github.com/openai/triton/pull/1513 change the + # quantile fields name from 'percentiles' to 'quantiles' + # and change the default value from (0.5, 0.2, 0.8) to None. + # This may break inductor since a caller expects a tuple may get a item. + # + # Add a wrapper to maintain the same behavior for inductor. + # Maybe we should have own implementation of this function? + return triton_do_bench, ( + "quantiles" + if inspect.signature(triton_do_bench).parameters.get("quantiles") + is not None + else "percentiles" + ) + + triton_do_bench, quantile_field_name = load_triton() + + if quantile_field_name not in kwargs: + kwargs[quantile_field_name] = (0.5, 0.2, 0.8) + return triton_do_bench(*args, **kwargs)[0] + + +@functools.lru_cache(None) +def has_triton() -> bool: + if not torch.cuda.is_available(): + return False + try: + import triton + + return triton is not None and get_device_capability() >= (7, 0) + except ImportError: + return False + + +@functools.lru_cache(None) +def has_torchvision_roi_align() -> bool: + try: + from torchvision.ops import roi_align # noqa: F401 + + return roi_align is not None and hasattr( + getattr(torch.ops, "torchvision", None), "roi_align" + ) + except ImportError: + return False + + +def conditional_product(*args): + return functools.reduce(operator.mul, [x for x in args if x]) + + +def decode_device(device: Union[Optional[torch.device], str]) -> torch.device: + if device is None: + return torch.tensor(0.0).device # default device + if isinstance(device, str): + device = torch.device(device) + if device.type == "cuda" and device.index is None: + return torch.device("cuda", index=current_device()) + return device + + +def sympy_product(it): + return functools.reduce(operator.mul, it, sympy.Integer(1)) + + +def sympy_dot(seq1, seq2): + assert len(seq1) == len(seq2) + return sympy.expand(sum(a * b for a, b in zip(seq1, seq2))) + + +def unique(it: Iterable[_T]) -> ValuesView[_T]: + return {id(x): x for x in it}.values() + + +def ceildiv(numer: int, denom: int) -> int: + # TODO: There is a bug in a call to this function, to repro: + # python benchmarks/dynamo/huggingface.py --inductor -d cuda --accuracy + # --amp --only YituTechConvBert --dynamic-shapes + assert isinstance(numer, int) and isinstance( + denom, int + ), f"{numer}: {type(numer)}, {denom}: {type(denom)}" + return -(numer // -denom) + + +def next_power_of_2(n: int) -> int: + """Return the smallest power of 2 greater than or equal to n""" + assert n <= 2**32, "32-bit only" + n -= 1 + n |= n >> 1 + n |= n >> 2 + n |= n >> 4 + n |= n >> 8 + n |= n >> 16 + n += 1 + return n + + +def convert_shape_to_inductor(lst: List[Union[int, torch.SymInt]]) -> List[sympy.Expr]: + """ + Gets the shape and stride of a tensor. For non-symbolic tensors, this is + trivial. But for symbolic tensors, we need to map from SymIntNode into + sympy.Expr. + """ + return [ + i.node.expr if isinstance(i, torch.SymInt) else sympy.Integer(i) for i in lst + ] + + +def convert_shape_to_symint( + lst: List[Union[int, sympy.Expr]] +) -> List[Union[int, torch.SymInt]]: + """ + Takes a list of shapes from Inductor and converts them into symints (or just + ints if all shapes are static). + """ + from .virtualized import V + + return [ + i + if isinstance(i, int) + else int(i) + if isinstance(i, sympy.Integer) + else V.graph.sizevars.shape_env.create_symintnode(i, hint=None) + for i in lst + ] + + +def gen_gm_and_inputs(target, args, kwargs): + g = torch.fx.Graph() + g_args = [] + a_args = [] + for n, arg in enumerate(args): + if isinstance(arg, torch.Tensor): + g_args.append(g.placeholder(f"arg{n}")) + a_args.append(arg) + else: + g_args.append(arg) + assert all(not isinstance(x, torch.Tensor) for x in kwargs.values()) + node = g.call_function(target, tuple(g_args), kwargs) + if ( + len(target._schema.returns) == 1 + and str(target._schema.returns[0].type) == "Tensor" + ): + node = (node,) + g.output(node) + + gm = torch.fx.GraphModule({}, g) + return gm, a_args + + +def synchronize(): + if torch.cuda.is_available(): + torch.cuda.synchronize() + + +def timed(model: Callable[..., Any], example_inputs, times: int = 1) -> float: + synchronize() + torch.manual_seed(1337) + t0 = time.perf_counter() + for _ in range(times): + result = model(*example_inputs) + synchronize() + t1 = time.perf_counter() + # GC the result after timing + assert result is not None + return t1 - t0 + + +def print_performance(fn, args=(), times=10, repeat=10, baseline=1.0): + timings = torch.tensor([timed(fn, args, times) for _ in range(repeat)]) + took = torch.median(timings) + print(f"{took/baseline:.6f}") + return took + + +immutable_dict.__hash__ = lambda self: hash(tuple(self.items())) +immutable_list.__hash__ = lambda self: hash(tuple(self)) + + +def precompute_method(obj: Any, method: str): + """Replace obj.method() with a new method that returns a precomputed constant.""" + result = getattr(obj, method)() + setattr(obj, method, lambda: result) + + +def precompute_methods(obj: Any, methods: List[str]): + """Replace methods with new methods that returns a precomputed constants.""" + for method in methods: + precompute_method(obj, method) + + +def cmp(a, b) -> int: + return int(a > b) - int(a < b) + + +def pad_listlike(x, size): + if len(x) == 1: + return type(x)([x[0]]) * size + else: + return x + + +def cache_on_self(fn): + key = f"__{fn.__name__}_cache" + + @functools.wraps(fn) + def wrapper(self): + if not hasattr(self, key): + setattr(self, key, fn(self)) + return getattr(self, key) + + return wrapper + + +def aggregate_origins(node_schedule): + from . import ir + + if isinstance(node_schedule, list): + return functools.reduce( + operator.or_, + [ + node.node.origins + for node in node_schedule + if hasattr(node, "node") and node.node + ], + set(), + ) + elif isinstance(node_schedule, ir.ExternKernel): + return node_schedule.origins + else: + return set() + + +def get_fused_kernel_name(node_schedule, descriptive_names): + all_origins = aggregate_origins(node_schedule) + if descriptive_names == "original_aten": + # Bases the kernel name off of the top-level aten operator (i.e. pre-decompositions) + sources = [ + origin.meta["original_aten"]._overloadpacket.__name__ + for origin in all_origins + if origin.op == "call_function" and "original_aten" in origin.meta + ] + sources = sorted(set(sources)) + elif descriptive_names == "torch": + # Bases the kernel name off of the top-level "torch" operator (i.e. post-dynamo graph) + sources = [] + for origin in all_origins: + if origin.op == "call_function" and "source_fn" in origin.meta: + if isinstance(origin.meta["source_fn"][1], str): + sources.append(origin.meta["source_fn"][1]) + else: + sources.append(origin.meta["source_fn"][1].__name__) + sources = sorted(set(sources)) + elif descriptive_names == "inductor_node": + sources = [ + origin.name for origin in all_origins if origin.op == "call_function" + ] + else: + raise NotImplementedError + sources = sources + return "_".join(["fused"] + sources) + + +def get_kernel_metadata(node_schedule, wrapper): + all_origins = aggregate_origins(node_schedule) + inductor_nodes = [origin for origin in all_origins if origin.op == "call_function"] + + from_node_dict = collections.defaultdict(list) + original_aten_dict = collections.defaultdict(list) + for node in inductor_nodes: + if "original_aten" in node.meta: + key = str(node.meta["original_aten"]._overloadpacket) + original_aten_dict[key].append(node.name) + if "from_node" in node.meta: + key = node.meta["from_node"][0][0] + from_node_dict[key].append(node.name) + metadata = ( + f"{wrapper.comment} Source Nodes: [{', '.join(sorted(from_node_dict.keys()))}], " + f"Original ATen: [{', '.join(sorted(original_aten_dict.keys()))}]" + ) + # trace back to original node here + detailed_metadata = [] + for original_node, nodes in sorted(from_node_dict.items()): + detailed_metadata.append( + f"{wrapper.comment} {original_node} => {', '.join(sorted(nodes))}" + ) + return metadata, "\n".join(detailed_metadata) + + +def dominated_nodes( + initial_queue: Iterable[torch.fx.Node], skip_filter=None +) -> Set[torch.fx.Node]: + """Returns the set of nodes whose values depend on those within initial_queue""" + initial_queue = list(initial_queue) + dominated_set = set(initial_queue) + + while initial_queue: + node = initial_queue.pop() + for user in node.users: + if skip_filter and skip_filter(user): + continue + if user not in dominated_set: + dominated_set.add(user) + initial_queue.append(user) + + return dominated_set + + +def gather_origins(args, kwargs): + import itertools + + from . import ir + + def is_unrealized_node(n): + if isinstance(n, ir.TensorBox): + return is_unrealized_node(n.data) + if isinstance(n, ir.StorageBox): + return is_unrealized_node(n.data) + return isinstance(n, ir.IRNode) and isinstance(n, ir.Pointwise) + + kwarg_origins = [val.origins for val in kwargs.values() if is_unrealized_node(val)] + arg_origins = [arg.origins for arg in args if is_unrealized_node(arg)] + return set(itertools.chain(*arg_origins, *kwarg_origins)) + + +def sympy_str(expr: sympy.Expr) -> str: + """ + Normal sympy str is very slow, this is a lot faster. The result are + somewhat worse, as it doesn't do as much simplification. So don't + use this for final codegen. + """ + if isinstance(expr, sympy.Symbol): + return expr.name + if isinstance(expr, sympy.Add): + return " + ".join(map(sympy_str, expr.args)) + if isinstance(expr, sympy.Mul): + return " * ".join(map(sympy_str, expr.args)) + + if isinstance(expr, (ModularIndexing, CleanDiv, FloorDiv)): + return f"{expr.func.__name__}({', '.join(map(sympy_str, expr.args))})" + return str(expr) + + +def sympy_symbol(name: str) -> sympy.Symbol: + # This should never be used for creating shape/stride symbols, as those + # should all be allocated before Inductor. + assert name[0] != "s" + # NOTE: shape symbols are positive (> 0), but index variables are only + # non-negative (>= 0). + return sympy.Symbol(name, integer=True, nonnegative=True) + + +def sympy_subs(expr: sympy.Expr, replacements: Dict[Any, Any]) -> sympy.Expr: + """ + xreplace is faster than subs, but is way more picky + """ + + def promote_strings(key): + if isinstance(key, str): + return sympy_symbol(key) + return key + + return expr.xreplace( + {promote_strings(k): promote_strings(v) for k, v in replacements.items()} + ) + + +def free_symbol_startswith(index: sympy.Expr, prefix: str): + return any(v.name.startswith(prefix) for v in index.free_symbols) + + +def free_symbol_has(index: sympy.Expr, pattern: str): + return any(pattern in v.name for v in index.free_symbols) + + +def has_incompatible_cudagraph_ops(gm): + forbidden_set = { + "aten._fused_moving_avg_obs_fq_helper.default", + "aten._fused_moving_avg_obs_fq_helper_functional.default", + "aten.multinomial.default", + "fbgemm.dense_to_jagged.default", + "fbgemm.jagged_to_padded_dense.default", + "run_and_save_rng_state", + "run_with_rng_state", + } + if torch.are_deterministic_algorithms_enabled(): + forbidden_set.update( + { + "aten._unsafe_index_put.default", + "aten.index_put.default", + "aten.index_put_.default", + "aten.scatter.src", + "aten.scatter.reduce", + "aten.scatter.value_reduce", + "aten.scatter_add_", + "aten.scatter_add.default", + "aten.scatter_reduce.two", + "aten.scatter_reduce_.two", + "aten.scatter_reduce.two_out", + } + ) + for node in gm.graph.nodes: + if str(node.target) in forbidden_set: + return True + return False + + +instance_descriptor = collections.namedtuple( + "instance_descriptor", + ["divisible_by_16", "equal_to_1", "ids_of_folded_args", "divisible_by_8"], + defaults=[tuple(), tuple(), tuple(), tuple()], +) + + +@contextlib.contextmanager +def fresh_inductor_cache(cache_entries=None): + """ + Contextmanager that provides a clean tmp cachedir for inductor. + + Optionally, pass a dict as 'cache_entries' to get a list of filenames and sizes + generated with this cache instance. + """ + with tempfile.TemporaryDirectory() as inductor_cache_dir: + with mock.patch.dict( + os.environ, {"TORCHINDUCTOR_CACHE_DIR": inductor_cache_dir} + ): + triton_cache_dir = os.path.join(inductor_cache_dir, "triton") + with mock.patch.dict(os.environ, {"TRITON_CACHE_DIR": triton_cache_dir}): + yield + if isinstance(cache_entries, dict): + assert len(cache_entries) == 0, "expected empty cache_entries dict" + if os.path.exists(triton_cache_dir): + files = os.listdir(triton_cache_dir) + cache_entries.update( + { + f: os.path.getsize(os.path.join(triton_cache_dir, f)) + for f in files + if ".lock" not in f + } + ) + + +def argsort(seq) -> List[int]: + # preserve original order for equal strides + getter = seq.__getitem__ + a_r = range(len(seq)) + return list(reversed(sorted(a_r, key=getter, reverse=True))) # noqa: C413 + + +@functools.lru_cache(8) +def get_dtype_size(dtype): + return torch.empty((), dtype=dtype).element_size() + + +class LineContext(NamedTuple): + context: Any + + +class IndentedBuffer: + tabwidth = 4 + + def __init__(self, initial_indent=0): + self._lines = [] + self._indent = initial_indent + + def getvaluewithlinemap(self): + buf = StringIO() + p = 1 + linemap = [] + for line in self._lines: + if isinstance(line, DeferredLineBase): + line = line() + if line is None: + continue + elif isinstance(line, LineContext): + linemap.append((p, line.context)) + continue + assert isinstance(line, str) + buf.write(line) + buf.write("\n") + p += 1 + line.count("\n") + return buf.getvalue(), linemap + + def getvalue(self): + v, _ = self.getvaluewithlinemap() + return v + + def getrawvalue(self): + buf = StringIO() + for line in self._lines: + if isinstance(line, DeferredLineBase): + line = line() + if line is None: + continue + elif isinstance(line, LineContext): + continue + assert isinstance(line, str) + # backslash implies line continuation + if line.endswith("\\"): + buf.write(line[:-1]) + else: + buf.write(line) + buf.write("\n") + return buf.getvalue() + + def clear(self): + self._lines.clear() + + def __bool__(self): + return bool(self._lines) + + def prefix(self): + return " " * (self._indent * self.tabwidth) + + def writeline(self, line): + if isinstance(line, LineContext): + self._lines.append(line) + elif isinstance(line, DeferredLineBase): + self._lines.append(line.with_prefix(self.prefix())) + elif line.strip(): + self._lines.append(f"{self.prefix()}{line}") + else: + self._lines.append("") + + def writelines(self, lines): + for line in lines: + self.writeline(line) + + def indent(self, offset=1): + @contextlib.contextmanager + def ctx(): + self._indent += offset + try: + yield + finally: + self._indent -= offset + + return ctx() + + def splice(self, other_code, strip=False): + if isinstance(other_code, IndentedBuffer): + dedent = float("inf") + for line in other_code._lines: + if not isinstance(line, LineContext) and line: + dedent = min(dedent, len(line) - len(line.lstrip())) + if math.isinf(dedent): + dedent = 0 + for line in other_code._lines: + if isinstance(line, LineContext): + self._lines.append(line) + else: + IndentedBuffer.writeline(self, line[int(dedent) :]) + else: + other_code = textwrap.dedent(other_code) + if strip: + other_code = other_code.lstrip() + if not other_code: + return + other_code = other_code.rstrip() + for line in other_code.split("\n"): + self.writeline(line) + + +class DeferredLineBase: + """A line that can be 'unwritten' at a later time""" + + def __init__(self, line): + if not line.strip(): + line = "" + self.line = line + + def __call__(self) -> Optional[str]: + """Returns either self.line or None to indicate the line has been 'unwritten'""" + raise NotImplementedError() + + def _new_line(self, line: str) -> "DeferredLineBase": + """Returns a new deferred line with the same condition""" + raise NotImplementedError() + + def with_prefix(self, prefix): + return self._new_line(f"{prefix}{self.line}") + + def lstrip(self): + return self._new_line(self.line.lstrip()) + + def __getitem__(self, index): + return self._new_line(self.line[index]) + + def __bool__(self): + return bool(self.line) + + def __len__(self): + return len(self.line) + + +@functools.lru_cache(None) +def is_big_gpu(index): + sms = torch.cuda.get_device_properties(index).multi_processor_count + if sms < 80: # V100 + log.warning("not enough SMs to use max_autotune_gemm mode") + return False + return True + + +def use_triton_template(layout, *, enable_int32=False): + layout_dtypes = [torch.float16, torch.bfloat16, torch.float32] + if enable_int32: + layout_dtypes = [torch.float16, torch.bfloat16, torch.float32, torch.int32] + return ( + ( + config.max_autotune + or config.max_autotune_gemm + or config.search_autotune_cache + ) + and "TRITON" in config.max_autotune_gemm_backends.upper().split(",") + and layout.device.type == "cuda" + and layout.dtype in layout_dtypes + and is_big_gpu(layout.device.index or 0) + ) + + +def use_aten_gemm_kernels(): + return "ATEN" in config.max_autotune_gemm_backends.upper().split(",") + + +class DebugDirManager: + counter = itertools.count(0) + + def __init__(self): + self.id = next(DebugDirManager.counter) + self.prev_debug_name = None + + def __enter__(self): + self.prev_debug_name = torch._dynamo.config.debug_dir_root + self.new_name = f"{self.prev_debug_name}_tmp_{self.id}" + torch._dynamo.config.debug_dir_root = self.new_name + + def __exit__(self, *args): + shutil.rmtree(self.new_name) + torch._dynamo.config.debug_dir_root = self.prev_debug_name + + +def run_and_get_code(fn, *args, **kwargs): + from .graph import GraphLowering + + compile_to_module = GraphLowering.compile_to_module + source_codes = [] + + def patched_compile_to_module(self): + mod = compile_to_module(self) + with open(mod.__file__) as f: + source_codes.append(f.read()) + return mod + + with mock.patch.object( + GraphLowering, "compile_to_module", patched_compile_to_module + ): + torch._dynamo.reset() + result = fn(*args, **kwargs) + return result, source_codes + + +def run_and_get_triton_code(fn, *args, **kwargs): + _, source_codes = run_and_get_code(fn, *args, **kwargs) + # Can have two outputs if backwards was eagerly compiled + assert ( + 1 <= len(source_codes) <= 2 + ), f"expected one or two code outputs got {len(source_codes)}" + return source_codes[0] + + +@contextlib.contextmanager +def override_lowering(aten_op, override_fn): + """ + Override the lowering of aten_op with overide_fn. + The first argument of override_fn is the original lowering fn. + """ + from torch._inductor import lowering + + orig_fn = lowering.lowerings[aten_op] + try: + lowering.lowerings[aten_op] = functools.partial(override_fn, orig_fn) + yield + finally: + lowering.lowerings[aten_op] = orig_fn + + +def add_scheduler_init_hook(pre_fn, post_fn=None): + """ + Add hook functions to be called at the beginning and end of Scheduler.__init__. + Used for unit tests. + """ + from torch._inductor.scheduler import Scheduler + + orig_fn = Scheduler.__init__ + + def wrapper(scheduler, nodes): + pre_fn(scheduler, nodes) + out = orig_fn(scheduler, nodes) + if post_fn: + post_fn(scheduler, nodes) + return out + + return unittest.mock.patch.object(Scheduler, "__init__", wrapper) + + +def developer_warning(msg): + """ + Warnings that will be actionable for PyTorch developers, but not + end users. Allows us to easily disable them in stable releases but + keep them on for nightly builds. + """ + if config.developer_warnings: + log.warning(msg) + else: + log.info(msg) + + +def get_num_bytes(*args: torch.Tensor, num_in_out_args: int = 0) -> int: + """ + Return the total number of bytes the arguments of tensor type takes. + + For in/out args, tensor sizes are counted twice: once for reading and + once for writing. + + The first num_in_out_args arguments are in out tensors. + """ + return sum( + arg.numel() * arg.element_size() * (1 + int(i < num_in_out_args)) + for i, arg in enumerate(args) + if isinstance(arg, torch.Tensor) + ) + + +def create_bandwidth_info_str(ms, num_gb, gb_per_s, prefix="", suffix=""): + info_str = f"{prefix}{ms:.3f}ms \t{num_gb:.3f} GB \t {gb_per_s:7.2f}GB/s{suffix}" + try: + import colorama + + if ms > 0.012 and gb_per_s < 650: + info_str = colorama.Fore.RED + info_str + colorama.Fore.RESET + except ImportError: + log.warning("Colorama is not installed. Install it if you want colored output") + + return info_str + + +def get_benchmark_name(): + """ + An experimental API used only when config.benchmark_kernel is true. + + The benchmark name is only available at codegen time. So we can not + directly call it in benchmark_all_kernels which is run after codegen. + + The function assumes the argument after --only is the benchmark name. + It works for torchbench.py/hugginface.py/timm_models.py. But for ad-hoc + scripts, this function may return None. + + There are 2 flavors of --only argument we need handle: + 1. --only model_name + 2. --only=model_name + """ + try: + idx = sys.argv.index("--only") + if ( + idx + 1 < len(sys.argv) + and len(sys.argv[idx + 1]) > 0 + and sys.argv[idx + 1][0] != "-" + ): + return sys.argv[idx + 1] + except ValueError: + pass + + for arg in sys.argv: + if arg.startswith("--only="): + return arg[len("--only=") :] + + +def is_ones(items): + return all(x == 1 for x in items) + + +def is_zeros(items): + return all(x == 0 for x in items) + + +def is_cpu_device(inputs): + return all( + item.device == torch.device("cpu") + for item in inputs + if isinstance(item, torch.Tensor) + ) + + +def get_sympy_Expr_dtype(val: sympy.Expr) -> torch.dtype: + assert isinstance( + val, sympy.Expr + ), "only support sympy.Expr as input to get_sympy_Expr_dtype" + if val.is_integer: + return torch.int64 + else: + return torch.float64 + + +@contextlib.contextmanager +def maybe_profile(should_profile, *args, **kwargs): + if should_profile: + with torch.profiler.profile(*args, **kwargs) as p: + yield p + else: + yield + + +def triton_config_to_hashable(cfg): + """ + Convert triton config to a tuple that can uniquely identify it. We can use + the return value as a dictionary key. + """ + items = sorted(cfg.kwargs.items()) + items.append(("num_warps", cfg.num_warps)) + items.append(("num_stages", cfg.num_stages)) + return tuple(items) + + +HAS_COLORAMA = True +try: + import colorama +except ImportError: + HAS_COLORAMA = False + + +def _color_text(msg, color): + if not HAS_COLORAMA: + return msg + + return getattr(colorama.Fore, color.upper()) + msg + colorama.Fore.RESET + + +def green_text(msg): + return _color_text(msg, "green") + + +def yellow_text(msg): + return _color_text(msg, "yellow") + + +def red_text(msg): + return _color_text(msg, "red") + + +def blue_text(msg): + return _color_text(msg, "blue") + + +@functools.lru_cache(None) +def python_type_to_schema_type(): + from . import ir + + PYTHON_TYPE_TO_SCHEMA_TYPE = { + torch.dtype: "int", + torch.device: "Device", + bool: "bool", + float: "float", + ir.TensorBox: "Tensor", + } + return PYTHON_TYPE_TO_SCHEMA_TYPE + + +def may_get_optional_schema_type(schema_type, is_optional_arg): + return f"Optional[{schema_type}]" if is_optional_arg else schema_type + + +def type_match(arg, arg_type, is_optional_arg): + if isinstance(arg, immutable_list): + if all( + isinstance(x, int) or (isinstance(x, sympy.Symbol) and x.is_integer) + for x in arg + ): + may_optional_schema_type = may_get_optional_schema_type( + "List[int]", is_optional_arg + ) + return may_optional_schema_type == str(arg_type) + else: + # TODO: add support here + return False + + if arg.__class__ in python_type_to_schema_type(): + schema_type = python_type_to_schema_type()[arg.__class__] + may_optional_schema_type = may_get_optional_schema_type( + schema_type, is_optional_arg + ) + return may_optional_schema_type == str(arg_type) + + # TODO: add support here + return False + + +# torch/csrc/utils/python_arg_parser.cpp:FunctionSignature::parse +def schema_match(schema, args, kwargs): + min_args = 0 + max_pos_args = 0 + for argument in schema.arguments: + if not argument.has_default_value(): + min_args += 1 + if not argument.kwarg_only: + max_pos_args += 1 + + nargs = len(args) + remaining_kwargs = len(kwargs) + arg_pos = 0 + + def args_error_message(nargs, max_pos_args, min_args): + if min_args != max_pos_args: + return f"takes from {min_args} to {max_pos_args} positional arguments but {nargs} were given" + else: + return f"takes {max_pos_args} positional arguments but {nargs} were given" + + def is_optional(arg): + return "Optional" in str(arg.type) + + def allow_none(arg): + return is_optional(arg) or arg.has_default_value() + + assert len(args) <= max_pos_args, args_error_message( + len(args), max_pos_args, min_args + ) + + for argument in schema.arguments: + obj = None + is_kwd = False + if arg_pos < nargs: + if argument.kwarg_only: + return False + obj = args[arg_pos] + elif kwargs: + if argument.name in kwargs: + obj = kwargs[argument.name] + is_kwd = True + + if obj is None and not allow_none(argument): + return False + + if obj is not None: + expected_type = argument.type + if not type_match(obj, expected_type, is_optional(argument)): + return False + + if not is_kwd: + arg_pos += 1 + elif (obj is None and is_optional(argument)) or obj is not None: + remaining_kwargs -= 1 + + if remaining_kwargs > 0: + return False + + return True + + +def try_find_schema(schemas, args, kwargs): + for schema in schemas: + if schema_match(schema, args, kwargs): + return schema + + return None + + +def get_device_tflops(dtype): + from triton.testing import get_max_simd_tflops, get_max_tensorcore_tflops + + assert dtype in (torch.float16, torch.bfloat16, torch.float32) + if dtype in (torch.float16, torch.bfloat16): + return get_max_tensorcore_tflops(dtype) + + if torch.backends.cuda.matmul.allow_tf32: + return get_max_tensorcore_tflops(torch.float32) + else: + return get_max_simd_tflops(torch.float32) + + +def get_gpu_dram_gbps(): + from triton.testing import get_dram_gbps + + return get_dram_gbps() + + +def is_welford_reduction(reduction_type): + return reduction_type.startswith("welford") + + +def reduction_num_outputs(reduction_type): + return 3 if is_welford_reduction(reduction_type) else 1 diff --git a/llava_next/lib/python3.10/site-packages/torch/_inductor/wrapper_benchmark.py b/llava_next/lib/python3.10/site-packages/torch/_inductor/wrapper_benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..1b7c22c83cb994b9abe93bc77377ef61f2cfeace --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/_inductor/wrapper_benchmark.py @@ -0,0 +1,293 @@ +import dataclasses +import tempfile +from collections import defaultdict + +import torch +from torch.autograd import DeviceType +from .utils import create_bandwidth_info_str, do_bench, get_num_bytes + +_kernel_category_choices = [ + "pointwise", + "reduction", + "persistent_reduction", +] + + +def get_kernel_category_by_source_code(src_code): + """ + Similar to get_kernel_category but use the source code. Call this API + if we have not compile the src_code to module yet. + """ + choices = [ch for ch in _kernel_category_choices if f"@{ch}" in src_code] + if len(choices) == 1: + return choices[0] + else: + return "unknown" + + +def get_kernel_category(kernel_mod): + """ + Given the module defining a triton kernel, return the category of the kernel. + Cateogry can be one of: + - pointwise + - reduction + - persistent_reduction + + Currently we simply decide the category depending on what decorator is imported + by the kernel. + """ + choices = [ch for ch in _kernel_category_choices if ch in kernel_mod.__dict__] + if len(choices) == 1: + return choices[0] + else: + return "unknown" + + +def benchmark_all_kernels(benchmark_name, benchmark_all_configs): + """ + An experimental API used only when config.benchmark_kernel is true. + + Run the kernel benchmarks for all the kernels cached in PyCodeCache. + Used in the compiled modules. + + Put this method here rather than codegen it for convenience since its implementation + does not change based on different graph modules being compiled. + """ + from torch._inductor.codecache import PyCodeCache + + def get_triton_kernel(mod): + from torch._inductor.triton_heuristics import CachingAutotuner + + cand_list = [ + v + for k, v in mod.__dict__.items() + if k.startswith("triton_") and isinstance(v, CachingAutotuner) + ] + assert len(cand_list) == 1 + return cand_list[0] + + nfound = 0 + for kernel_key, kernel_mod in PyCodeCache.cache.items(): + if not hasattr(kernel_mod, "get_args") or not hasattr(kernel_mod, "call"): + continue + + triton_kernel = get_triton_kernel(kernel_mod) + kernel_category = get_kernel_category(kernel_mod) + args = kernel_mod.get_args() + num_in_out_ptrs = len( + [ + arg_name + for arg_name in triton_kernel.fn.arg_names + if arg_name.startswith("in_out_ptr") + ] + ) + num_gb = get_num_bytes(*args, num_in_out_args=num_in_out_ptrs) / 1e9 + + def get_info_str(ms, n_regs, n_spills, shared, prefix=""): + if not any(x is None for x in [n_regs, n_spills, shared]): + kernel_detail_str = ( + f" {n_regs:3} regs {n_spills:3} spills {shared:8} shared mem" + ) + else: + kernel_detail_str = "" + + gb_per_s = num_gb / (ms / 1e3) + return create_bandwidth_info_str( + ms, num_gb, gb_per_s, prefix=prefix, suffix=kernel_detail_str + ) + + kernel_desc = ( + f"{benchmark_name:20} {kernel_category[:3].upper()} {kernel_key[:10]}" + ) + if benchmark_all_configs: + assert hasattr(kernel_mod, "benchmark_all_configs") + bench_result = kernel_mod.benchmark_all_configs(args) + print(kernel_desc) + for launcher, ms in bench_result.items(): + print( + f" {get_info_str(ms, launcher.n_regs, launcher.n_spills, launcher.shared)} @ {launcher.config}" + ) + else: + ms = do_bench(lambda: kernel_mod.call(args), rep=40, fast_flush=True) + assert ( + len(triton_kernel.launchers) == 1 + ), "Autotuner should have selected the best config" + launcher = triton_kernel.launchers[0] + print( + get_info_str( + ms, + launcher.n_regs, + launcher.n_spills, + launcher.shared, + prefix=f"{kernel_desc} ", + ) + ) + + nfound += 1 + if nfound == 0: + print( + "No kernel with benchmark functionality found. Make sure you run inductor with config.benchmark_kernel being True" + ) + + +@dataclasses.dataclass +class ProfileEvent: + category: str + key: str + self_cuda_time_ms: float + # the benchmark is run multiple times and we average the count across all the + # runs. It should be an integer but define a float just in case. + count: float + + +def parse_profile_event_list(benchmark_name, event_list, wall_time_ms, nruns): + def get_self_cuda_time(ev): + """ + ev.self_cuda_time_total is in microsecond. Convert to millisecond. + """ + return ev.self_cuda_time_total / 1000 / nruns + + all_events = defaultdict(list) + + def add_event(ev, category): + profile_ev = ProfileEvent( + category=category, + key=ev.key, + self_cuda_time_ms=get_self_cuda_time(ev), + count=ev.count / nruns, # average across all runs + ) + all_events[category].append(profile_ev) + + for ev in event_list: + assert not ev.is_legacy, "Don't support the legacy profiler" + if ev.device_type == DeviceType.CPU: + # ignore the event on CPU side + continue + + category = "unknown" + if ev.key.startswith("triton_"): + if ev.key.startswith("triton_poi"): + category = "triton_pointwise" + elif ev.key.startswith("triton_red"): + category = "triton_reduction" + elif ev.key.startswith("triton_per"): + category = "triton_persistent_reduction" + else: + category = "triton_unknown" + + add_event(ev, category) + + def report_category(category, profile_events): + from tabulate import tabulate + + profile_events.sort(key=lambda ev: ev.self_cuda_time_ms, reverse=True) + + rows = [] + total_time = 0.0 + print(f"\n == {category} category kernels == ") + for ev in profile_events: + total_time += ev.self_cuda_time_ms + percent = f"{ev.self_cuda_time_ms / wall_time_ms * 100:.2f}%" + rows.append([ev.key[:120], ev.self_cuda_time_ms, ev.count, percent]) + rows.append( + ["Total", total_time, "", f"{total_time / wall_time_ms * 100:.2f}%"] + ) + print( + tabulate( + rows, headers=["Kernel", "Self CUDA TIME (ms)", "Count", "Percent"] + ) + ) + return total_time + + def report(): + category_list = [ + "triton_pointwise", + "triton_reduction", + "triton_persistent_reduction", + "triton_unknown", + "unknown", + ] + assert set(all_events.keys()).issubset( + set(category_list) + ), f"{list(all_events.keys())}" + + per_category_wall_time = {} + total_cuda_ms = 0.0 + for category in category_list: + if category in all_events: + _time = report_category(category, all_events[category]) + per_category_wall_time[category] = _time + total_cuda_ms += _time + + gpu_busy_percent = f"{total_cuda_ms / wall_time_ms * 100:.2f}%" + print(f"\nPercent of time when GPU is busy: {gpu_busy_percent}") + print(f"Total wall time {wall_time_ms:.3f} ms") + + # output such a line so we can gather such line from all compiled modules from all + # benchmarks and tabulate it! + # Columns: benchmark_name, pointwise_percent, reduction_percent, persistent_reduction_percent, + # unknown_category_percent, GPU_busy_percent, wall_time_ms + tabulate_line = f"Output for tabulate: {benchmark_name}" + for category in category_list: + percent = ( + f"{per_category_wall_time.get(category, 0.0) / wall_time_ms * 100:.2f}%" + ) + tabulate_line += f", {percent}" + tabulate_line += f", {gpu_busy_percent}, {wall_time_ms:.3f}ms" + + print(tabulate_line) + + report() + + +def compiled_module_main(benchmark_name, benchmark_compiled_module_fn): + """ + This is the function called in __main__ block of a compiled module. + """ + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument( + "--benchmark-kernels", + "-k", + action="store_true", + help="Whether to benchmark each individual kernels", + ) + parser.add_argument( + "--benchmark-all-configs", + "-c", + action="store_true", + help="Whether to benchmark each individual config for a kernel", + ) + parser.add_argument( + "--profile", + "-p", + action="store_true", + help="Whether to profile the compiled module", + ) + args = parser.parse_args() + + if args.benchmark_kernels: + benchmark_all_kernels(benchmark_name, args.benchmark_all_configs) + else: + times = 10 + repeat = 10 + wall_time_ms = ( + benchmark_compiled_module_fn(times=times, repeat=repeat) / times * 1000 + ) + + if not args.profile: + return + + with torch.profiler.profile(record_shapes=True) as p: + benchmark_compiled_module_fn(times=times, repeat=repeat) + + path = f"{tempfile.gettempdir()}/compiled_module_profile.json" + p.export_chrome_trace(path) + print(f"Profiling result for a compiled module of benchmark {benchmark_name}:") + print(f"Chrome trace for the profile is written to {path}") + event_list = p.key_averages(group_by_input_shape=True) + print(event_list.table(sort_by="self_cuda_time_total", row_limit=10)) + parse_profile_event_list( + benchmark_name, event_list, wall_time_ms, times * repeat + ) diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/filters/__init__.py b/vlmpy310/lib/python3.10/site-packages/skimage/filters/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2777d8f618ffbd6bb478157bd61f4a7d655db251 --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/skimage/filters/__init__.py @@ -0,0 +1,5 @@ +"""Sharpening, edge finding, rank filters, thresholding, etc.""" + +import lazy_loader as _lazy + +__getattr__, __dir__, __all__ = _lazy.attach_stub(__name__, __file__) diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/filters/__init__.pyi b/vlmpy310/lib/python3.10/site-packages/skimage/filters/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..5c39465fa80f70b0c76a935cfd03678c9beeb64d --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/skimage/filters/__init__.pyi @@ -0,0 +1,109 @@ +# Explicitly setting `__all__` is necessary for type inference engines +# to know which symbols are exported. See +# https://peps.python.org/pep-0484/#stub-files + +__all__ = [ + "LPIFilter2D", + "apply_hysteresis_threshold", + "butterworth", + "correlate_sparse", + "difference_of_gaussians", + "farid", + "farid_h", + "farid_v", + "filter_inverse", + "filter_forward", + "frangi", + "gabor", + "gabor_kernel", + "gaussian", + "hessian", + "laplace", + "median", + "meijering", + "prewitt", + "prewitt_h", + "prewitt_v", + "rank", + "rank_order", + "roberts", + "roberts_neg_diag", + "roberts_pos_diag", + "sato", + "scharr", + "scharr_h", + "scharr_v", + "sobel", + "sobel_h", + "sobel_v", + "threshold_isodata", + "threshold_li", + "threshold_local", + "threshold_mean", + "threshold_minimum", + "threshold_multiotsu", + "threshold_niblack", + "threshold_otsu", + "threshold_sauvola", + "threshold_triangle", + "threshold_yen", + "try_all_threshold", + "unsharp_mask", + "wiener", + "window", +] + +from . import rank +from ._fft_based import butterworth +from ._gabor import gabor, gabor_kernel +from ._gaussian import difference_of_gaussians, gaussian +from ._median import median +from ._rank_order import rank_order +from ._sparse import correlate_sparse +from ._unsharp_mask import unsharp_mask +from ._window import window +from .edges import ( + farid, + farid_h, + farid_v, + laplace, + prewitt, + prewitt_h, + prewitt_v, + roberts, + roberts_neg_diag, + roberts_pos_diag, + scharr, + scharr_h, + scharr_v, + sobel, + sobel_h, + sobel_v, +) +from .lpi_filter import ( + LPIFilter2D, + filter_inverse, + filter_forward, + wiener, +) +from .ridges import ( + frangi, + hessian, + meijering, + sato, +) +from .thresholding import ( + apply_hysteresis_threshold, + threshold_isodata, + threshold_li, + threshold_local, + threshold_mean, + threshold_minimum, + threshold_multiotsu, + threshold_niblack, + threshold_otsu, + threshold_sauvola, + threshold_triangle, + threshold_yen, + try_all_threshold, +) diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/filters/_fft_based.py b/vlmpy310/lib/python3.10/site-packages/skimage/filters/_fft_based.py new file mode 100644 index 0000000000000000000000000000000000000000..12af9a44283f6121cb8c3967e005e8a64689d221 --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/skimage/filters/_fft_based.py @@ -0,0 +1,189 @@ +import functools + +import numpy as np +import scipy.fft as fft + +from .._shared.utils import _supported_float_type + + +def _get_nd_butterworth_filter( + shape, factor, order, high_pass, real, dtype=np.float64, squared_butterworth=True +): + """Create a N-dimensional Butterworth mask for an FFT + + Parameters + ---------- + shape : tuple of int + Shape of the n-dimensional FFT and mask. + factor : float + Fraction of mask dimensions where the cutoff should be. + order : float + Controls the slope in the cutoff region. + high_pass : bool + Whether the filter is high pass (low frequencies attenuated) or + low pass (high frequencies are attenuated). + real : bool + Whether the FFT is of a real (True) or complex (False) image + squared_butterworth : bool, optional + When True, the square of the Butterworth filter is used. + + Returns + ------- + wfilt : ndarray + The FFT mask. + + """ + ranges = [] + for i, d in enumerate(shape): + # start and stop ensures center of mask aligns with center of FFT + axis = np.arange(-(d - 1) // 2, (d - 1) // 2 + 1) / (d * factor) + ranges.append(fft.ifftshift(axis**2)) + # for real image FFT, halve the last axis + if real: + limit = d // 2 + 1 + ranges[-1] = ranges[-1][:limit] + # q2 = squared Euclidean distance grid + q2 = functools.reduce(np.add, np.meshgrid(*ranges, indexing="ij", sparse=True)) + q2 = q2.astype(dtype) + q2 = np.power(q2, order) + wfilt = 1 / (1 + q2) + if high_pass: + wfilt *= q2 + if not squared_butterworth: + np.sqrt(wfilt, out=wfilt) + return wfilt + + +def butterworth( + image, + cutoff_frequency_ratio=0.005, + high_pass=True, + order=2.0, + channel_axis=None, + *, + squared_butterworth=True, + npad=0, +): + """Apply a Butterworth filter to enhance high or low frequency features. + + This filter is defined in the Fourier domain. + + Parameters + ---------- + image : (M[, N[, ..., P]][, C]) ndarray + Input image. + cutoff_frequency_ratio : float, optional + Determines the position of the cut-off relative to the shape of the + FFT. Receives a value between [0, 0.5]. + high_pass : bool, optional + Whether to perform a high pass filter. If False, a low pass filter is + performed. + order : float, optional + Order of the filter which affects the slope near the cut-off. Higher + order means steeper slope in frequency space. + channel_axis : int, optional + If there is a channel dimension, provide the index here. If None + (default) then all axes are assumed to be spatial dimensions. + squared_butterworth : bool, optional + When True, the square of a Butterworth filter is used. See notes below + for more details. + npad : int, optional + Pad each edge of the image by `npad` pixels using `numpy.pad`'s + ``mode='edge'`` extension. + + Returns + ------- + result : ndarray + The Butterworth-filtered image. + + Notes + ----- + A band-pass filter can be achieved by combining a high-pass and low-pass + filter. The user can increase `npad` if boundary artifacts are apparent. + + The "Butterworth filter" used in image processing textbooks (e.g. [1]_, + [2]_) is often the square of the traditional Butterworth filters as + described by [3]_, [4]_. The squared version will be used here if + `squared_butterworth` is set to ``True``. The lowpass, squared Butterworth + filter is given by the following expression for the lowpass case: + + .. math:: + H_{low}(f) = \\frac{1}{1 + \\left(\\frac{f}{c f_s}\\right)^{2n}} + + with the highpass case given by + + .. math:: + H_{hi}(f) = 1 - H_{low}(f) + + where :math:`f=\\sqrt{\\sum_{d=0}^{\\mathrm{ndim}} f_{d}^{2}}` is the + absolute value of the spatial frequency, :math:`f_s` is the sampling + frequency, :math:`c` the ``cutoff_frequency_ratio``, and :math:`n` is the + filter `order` [1]_. When ``squared_butterworth=False``, the square root of + the above expressions are used instead. + + Note that ``cutoff_frequency_ratio`` is defined in terms of the sampling + frequency, :math:`f_s`. The FFT spectrum covers the Nyquist range + (:math:`[-f_s/2, f_s/2]`) so ``cutoff_frequency_ratio`` should have a value + between 0 and 0.5. The frequency response (gain) at the cutoff is 0.5 when + ``squared_butterworth`` is true and :math:`1/\\sqrt{2}` when it is false. + + Examples + -------- + Apply a high-pass and low-pass Butterworth filter to a grayscale and + color image respectively: + + >>> from skimage.data import camera, astronaut + >>> from skimage.filters import butterworth + >>> high_pass = butterworth(camera(), 0.07, True, 8) + >>> low_pass = butterworth(astronaut(), 0.01, False, 4, channel_axis=-1) + + References + ---------- + .. [1] Russ, John C., et al. The Image Processing Handbook, 3rd. Ed. + 1999, CRC Press, LLC. + .. [2] Birchfield, Stan. Image Processing and Analysis. 2018. Cengage + Learning. + .. [3] Butterworth, Stephen. "On the theory of filter amplifiers." + Wireless Engineer 7.6 (1930): 536-541. + .. [4] https://en.wikipedia.org/wiki/Butterworth_filter + + """ + if npad < 0: + raise ValueError("npad must be >= 0") + elif npad > 0: + center_slice = tuple(slice(npad, s + npad) for s in image.shape) + image = np.pad(image, npad, mode='edge') + fft_shape = ( + image.shape if channel_axis is None else np.delete(image.shape, channel_axis) + ) + is_real = np.isrealobj(image) + float_dtype = _supported_float_type(image.dtype, allow_complex=True) + if cutoff_frequency_ratio < 0 or cutoff_frequency_ratio > 0.5: + raise ValueError("cutoff_frequency_ratio should be in the range [0, 0.5]") + wfilt = _get_nd_butterworth_filter( + fft_shape, + cutoff_frequency_ratio, + order, + high_pass, + is_real, + float_dtype, + squared_butterworth, + ) + axes = np.arange(image.ndim) + if channel_axis is not None: + axes = np.delete(axes, channel_axis) + abs_channel = channel_axis % image.ndim + post = image.ndim - abs_channel - 1 + sl = (slice(None),) * abs_channel + (np.newaxis,) + (slice(None),) * post + wfilt = wfilt[sl] + if is_real: + butterfilt = fft.irfftn( + wfilt * fft.rfftn(image, axes=axes), s=fft_shape, axes=axes + ) + else: + butterfilt = fft.ifftn( + wfilt * fft.fftn(image, axes=axes), s=fft_shape, axes=axes + ) + if npad > 0: + butterfilt = butterfilt[center_slice] + return butterfilt diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/filters/_gabor.py b/vlmpy310/lib/python3.10/site-packages/skimage/filters/_gabor.py new file mode 100644 index 0000000000000000000000000000000000000000..f6e035b7fc2c8fd29f7a2d33efb92d92274b5cd7 --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/skimage/filters/_gabor.py @@ -0,0 +1,220 @@ +import math + +import numpy as np +from scipy import ndimage as ndi + +from .._shared.utils import _supported_float_type, check_nD + +__all__ = ['gabor_kernel', 'gabor'] + + +def _sigma_prefactor(bandwidth): + b = bandwidth + # See http://www.cs.rug.nl/~imaging/simplecell.html + return 1.0 / np.pi * math.sqrt(math.log(2) / 2.0) * (2.0**b + 1) / (2.0**b - 1) + + +def gabor_kernel( + frequency, + theta=0, + bandwidth=1, + sigma_x=None, + sigma_y=None, + n_stds=3, + offset=0, + dtype=np.complex128, +): + """Return complex 2D Gabor filter kernel. + + Gabor kernel is a Gaussian kernel modulated by a complex harmonic function. + Harmonic function consists of an imaginary sine function and a real + cosine function. Spatial frequency is inversely proportional to the + wavelength of the harmonic and to the standard deviation of a Gaussian + kernel. The bandwidth is also inversely proportional to the standard + deviation. + + Parameters + ---------- + frequency : float + Spatial frequency of the harmonic function. Specified in pixels. + theta : float, optional + Orientation in radians. If 0, the harmonic is in the x-direction. + bandwidth : float, optional + The bandwidth captured by the filter. For fixed bandwidth, ``sigma_x`` + and ``sigma_y`` will decrease with increasing frequency. This value is + ignored if ``sigma_x`` and ``sigma_y`` are set by the user. + sigma_x, sigma_y : float, optional + Standard deviation in x- and y-directions. These directions apply to + the kernel *before* rotation. If `theta = pi/2`, then the kernel is + rotated 90 degrees so that ``sigma_x`` controls the *vertical* + direction. + n_stds : scalar, optional + The linear size of the kernel is n_stds (3 by default) standard + deviations + offset : float, optional + Phase offset of harmonic function in radians. + dtype : {np.complex64, np.complex128} + Specifies if the filter is single or double precision complex. + + Returns + ------- + g : complex array + Complex filter kernel. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Gabor_filter + .. [2] https://web.archive.org/web/20180127125930/http://mplab.ucsd.edu/tutorials/gabor.pdf + + Examples + -------- + >>> from skimage.filters import gabor_kernel + >>> from matplotlib import pyplot as plt # doctest: +SKIP + + >>> gk = gabor_kernel(frequency=0.2) + >>> fig, ax = plt.subplots() # doctest: +SKIP + >>> ax.imshow(gk.real) # doctest: +SKIP + >>> plt.show() # doctest: +SKIP + + >>> # more ripples (equivalent to increasing the size of the + >>> # Gaussian spread) + >>> gk = gabor_kernel(frequency=0.2, bandwidth=0.1) + >>> fig, ax = plt.suplots() # doctest: +SKIP + >>> ax.imshow(gk.real) # doctest: +SKIP + >>> plt.show() # doctest: +SKIP + """ + if sigma_x is None: + sigma_x = _sigma_prefactor(bandwidth) / frequency + if sigma_y is None: + sigma_y = _sigma_prefactor(bandwidth) / frequency + + if np.dtype(dtype).kind != 'c': + raise ValueError("dtype must be complex") + + ct = math.cos(theta) + st = math.sin(theta) + x0 = math.ceil(max(abs(n_stds * sigma_x * ct), abs(n_stds * sigma_y * st), 1)) + y0 = math.ceil(max(abs(n_stds * sigma_y * ct), abs(n_stds * sigma_x * st), 1)) + y, x = np.meshgrid( + np.arange(-y0, y0 + 1), np.arange(-x0, x0 + 1), indexing='ij', sparse=True + ) + rotx = x * ct + y * st + roty = -x * st + y * ct + + g = np.empty(roty.shape, dtype=dtype) + np.exp( + -0.5 * (rotx**2 / sigma_x**2 + roty**2 / sigma_y**2) + + 1j * (2 * np.pi * frequency * rotx + offset), + out=g, + ) + g *= 1 / (2 * np.pi * sigma_x * sigma_y) + + return g + + +def gabor( + image, + frequency, + theta=0, + bandwidth=1, + sigma_x=None, + sigma_y=None, + n_stds=3, + offset=0, + mode='reflect', + cval=0, +): + """Return real and imaginary responses to Gabor filter. + + The real and imaginary parts of the Gabor filter kernel are applied to the + image and the response is returned as a pair of arrays. + + Gabor filter is a linear filter with a Gaussian kernel which is modulated + by a sinusoidal plane wave. Frequency and orientation representations of + the Gabor filter are similar to those of the human visual system. + Gabor filter banks are commonly used in computer vision and image + processing. They are especially suitable for edge detection and texture + classification. + + Parameters + ---------- + image : 2-D array + Input image. + frequency : float + Spatial frequency of the harmonic function. Specified in pixels. + theta : float, optional + Orientation in radians. If 0, the harmonic is in the x-direction. + bandwidth : float, optional + The bandwidth captured by the filter. For fixed bandwidth, ``sigma_x`` + and ``sigma_y`` will decrease with increasing frequency. This value is + ignored if ``sigma_x`` and ``sigma_y`` are set by the user. + sigma_x, sigma_y : float, optional + Standard deviation in x- and y-directions. These directions apply to + the kernel *before* rotation. If `theta = pi/2`, then the kernel is + rotated 90 degrees so that ``sigma_x`` controls the *vertical* + direction. + n_stds : scalar, optional + The linear size of the kernel is n_stds (3 by default) standard + deviations. + offset : float, optional + Phase offset of harmonic function in radians. + mode : {'constant', 'nearest', 'reflect', 'mirror', 'wrap'}, optional + Mode used to convolve image with a kernel, passed to `ndi.convolve` + cval : scalar, optional + Value to fill past edges of input if ``mode`` of convolution is + 'constant'. The parameter is passed to `ndi.convolve`. + + Returns + ------- + real, imag : arrays + Filtered images using the real and imaginary parts of the Gabor filter + kernel. Images are of the same dimensions as the input one. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Gabor_filter + .. [2] https://web.archive.org/web/20180127125930/http://mplab.ucsd.edu/tutorials/gabor.pdf + + Examples + -------- + >>> from skimage.filters import gabor + >>> from skimage import data + >>> from matplotlib import pyplot as plt # doctest: +SKIP + + >>> image = data.coins() + >>> # detecting edges in a coin image + >>> filt_real, filt_imag = gabor(image, frequency=0.6) + >>> fix, ax = plt.subplots() # doctest: +SKIP + >>> ax.imshow(filt_real) # doctest: +SKIP + >>> plt.show() # doctest: +SKIP + + >>> # less sensitivity to finer details with the lower frequency kernel + >>> filt_real, filt_imag = gabor(image, frequency=0.1) + >>> fig, ax = plt.subplots() # doctest: +SKIP + >>> ax.imshow(filt_real) # doctest: +SKIP + >>> plt.show() # doctest: +SKIP + """ + check_nD(image, 2) + # do not cast integer types to float! + if image.dtype.kind == 'f': + float_dtype = _supported_float_type(image.dtype) + image = image.astype(float_dtype, copy=False) + kernel_dtype = np.promote_types(image.dtype, np.complex64) + else: + kernel_dtype = np.complex128 + + g = gabor_kernel( + frequency, + theta, + bandwidth, + sigma_x, + sigma_y, + n_stds, + offset, + dtype=kernel_dtype, + ) + + filtered_real = ndi.convolve(image, np.real(g), mode=mode, cval=cval) + filtered_imag = ndi.convolve(image, np.imag(g), mode=mode, cval=cval) + + return filtered_real, filtered_imag diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/filters/_gaussian.py b/vlmpy310/lib/python3.10/site-packages/skimage/filters/_gaussian.py new file mode 100644 index 0000000000000000000000000000000000000000..193b77dc67e611b23e3d5c33ab30bad19fa527e9 --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/skimage/filters/_gaussian.py @@ -0,0 +1,168 @@ +import numpy as np + +from .._shared.filters import gaussian +from ..util import img_as_float + +__all__ = ['gaussian', 'difference_of_gaussians'] + + +def difference_of_gaussians( + image, + low_sigma, + high_sigma=None, + *, + mode='nearest', + cval=0, + channel_axis=None, + truncate=4.0, +): + """Find features between ``low_sigma`` and ``high_sigma`` in size. + + This function uses the Difference of Gaussians method for applying + band-pass filters to multi-dimensional arrays. The input array is + blurred with two Gaussian kernels of differing sigmas to produce two + intermediate, filtered images. The more-blurred image is then subtracted + from the less-blurred image. The final output image will therefore have + had high-frequency components attenuated by the smaller-sigma Gaussian, and + low frequency components will have been removed due to their presence in + the more-blurred intermediate. + + Parameters + ---------- + image : ndarray + Input array to filter. + low_sigma : scalar or sequence of scalars + Standard deviation(s) for the Gaussian kernel with the smaller sigmas + across all axes. The standard deviations are given for each axis as a + sequence, or as a single number, in which case the single number is + used as the standard deviation value for all axes. + high_sigma : scalar or sequence of scalars, optional (default is None) + Standard deviation(s) for the Gaussian kernel with the larger sigmas + across all axes. The standard deviations are given for each axis as a + sequence, or as a single number, in which case the single number is + used as the standard deviation value for all axes. If None is given + (default), sigmas for all axes are calculated as 1.6 * low_sigma. + mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional + The ``mode`` parameter determines how the array borders are + handled, where ``cval`` is the value when mode is equal to + 'constant'. Default is 'nearest'. + cval : scalar, optional + Value to fill past edges of input if ``mode`` is 'constant'. Default + is 0.0 + channel_axis : int or None, optional + If None, the image is assumed to be a grayscale (single channel) image. + Otherwise, this parameter indicates which axis of the array corresponds + to channels. + + .. versionadded:: 0.19 + ``channel_axis`` was added in 0.19. + truncate : float, optional (default is 4.0) + Truncate the filter at this many standard deviations. + + Returns + ------- + filtered_image : ndarray + the filtered array. + + See also + -------- + skimage.feature.blob_dog + + Notes + ----- + This function will subtract an array filtered with a Gaussian kernel + with sigmas given by ``high_sigma`` from an array filtered with a + Gaussian kernel with sigmas provided by ``low_sigma``. The values for + ``high_sigma`` must always be greater than or equal to the corresponding + values in ``low_sigma``, or a ``ValueError`` will be raised. + + When ``high_sigma`` is none, the values for ``high_sigma`` will be + calculated as 1.6x the corresponding values in ``low_sigma``. This ratio + was originally proposed by Marr and Hildreth (1980) [1]_ and is commonly + used when approximating the inverted Laplacian of Gaussian, which is used + in edge and blob detection. + + Input image is converted according to the conventions of ``img_as_float``. + + Except for sigma values, all parameters are used for both filters. + + Examples + -------- + Apply a simple Difference of Gaussians filter to a color image: + + >>> from skimage.data import astronaut + >>> from skimage.filters import difference_of_gaussians + >>> filtered_image = difference_of_gaussians(astronaut(), 2, 10, + ... channel_axis=-1) + + Apply a Laplacian of Gaussian filter as approximated by the Difference + of Gaussians filter: + + >>> filtered_image = difference_of_gaussians(astronaut(), 2, + ... channel_axis=-1) + + Apply a Difference of Gaussians filter to a grayscale image using different + sigma values for each axis: + + >>> from skimage.data import camera + >>> filtered_image = difference_of_gaussians(camera(), (2,5), (3,20)) + + References + ---------- + .. [1] Marr, D. and Hildreth, E. Theory of Edge Detection. Proc. R. Soc. + Lond. Series B 207, 187-217 (1980). + https://doi.org/10.1098/rspb.1980.0020 + + """ + image = img_as_float(image) + low_sigma = np.array(low_sigma, dtype='float', ndmin=1) + if high_sigma is None: + high_sigma = low_sigma * 1.6 + else: + high_sigma = np.array(high_sigma, dtype='float', ndmin=1) + + if channel_axis is not None: + spatial_dims = image.ndim - 1 + else: + spatial_dims = image.ndim + + if len(low_sigma) != 1 and len(low_sigma) != spatial_dims: + raise ValueError( + 'low_sigma must have length equal to number of' + ' spatial dimensions of input' + ) + if len(high_sigma) != 1 and len(high_sigma) != spatial_dims: + raise ValueError( + 'high_sigma must have length equal to number of' + ' spatial dimensions of input' + ) + + low_sigma = low_sigma * np.ones(spatial_dims) + high_sigma = high_sigma * np.ones(spatial_dims) + + if any(high_sigma < low_sigma): + raise ValueError( + 'high_sigma must be equal to or larger than' 'low_sigma for all axes' + ) + + im1 = gaussian( + image, + sigma=low_sigma, + mode=mode, + cval=cval, + channel_axis=channel_axis, + truncate=truncate, + preserve_range=False, + ) + + im2 = gaussian( + image, + sigma=high_sigma, + mode=mode, + cval=cval, + channel_axis=channel_axis, + truncate=truncate, + preserve_range=False, + ) + + return im1 - im2 diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/filters/_median.py b/vlmpy310/lib/python3.10/site-packages/skimage/filters/_median.py new file mode 100644 index 0000000000000000000000000000000000000000..e6d1459ed896e69ed3c2252a472999398e0dbc42 --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/skimage/filters/_median.py @@ -0,0 +1,82 @@ +from warnings import warn + +import numpy as np +from scipy import ndimage as ndi + +from .rank import generic + + +def median( + image, footprint=None, out=None, mode='nearest', cval=0.0, behavior='ndimage' +): + """Return local median of an image. + + Parameters + ---------- + image : array-like + Input image. + footprint : ndarray, optional + If ``behavior=='rank'``, ``footprint`` is a 2-D array of 1's and 0's. + If ``behavior=='ndimage'``, ``footprint`` is a N-D array of 1's and 0's + with the same number of dimension than ``image``. + If None, ``footprint`` will be a N-D array with 3 elements for each + dimension (e.g., vector, square, cube, etc.) + out : ndarray, (same dtype as image), optional + If None, a new array is allocated. + mode : {'reflect', 'constant', 'nearest', 'mirror','‘wrap'}, optional + The mode parameter determines how the array borders are handled, where + ``cval`` is the value when mode is equal to 'constant'. + Default is 'nearest'. + + .. versionadded:: 0.15 + ``mode`` is used when ``behavior='ndimage'``. + cval : scalar, optional + Value to fill past edges of input if mode is 'constant'. Default is 0.0 + + .. versionadded:: 0.15 + ``cval`` was added in 0.15 is used when ``behavior='ndimage'``. + behavior : {'ndimage', 'rank'}, optional + Either to use the old behavior (i.e., < 0.15) or the new behavior. + The old behavior will call the :func:`skimage.filters.rank.median`. + The new behavior will call the :func:`scipy.ndimage.median_filter`. + Default is 'ndimage'. + + .. versionadded:: 0.15 + ``behavior`` is introduced in 0.15 + .. versionchanged:: 0.16 + Default ``behavior`` has been changed from 'rank' to 'ndimage' + + Returns + ------- + out : 2-D array (same dtype as input image) + Output image. + + See also + -------- + skimage.filters.rank.median : Rank-based implementation of the median + filtering offering more flexibility with additional parameters but + dedicated for unsigned integer images. + + Examples + -------- + >>> from skimage import data + >>> from skimage.morphology import disk + >>> from skimage.filters import median + >>> img = data.camera() + >>> med = median(img, disk(5)) + + """ + if behavior == 'rank': + if mode != 'nearest' or not np.isclose(cval, 0.0): + warn( + "Change 'behavior' to 'ndimage' if you want to use the " + "parameters 'mode' or 'cval'. They will be discarded " + "otherwise.", + stacklevel=2, + ) + return generic.median(image, footprint=footprint, out=out) + if footprint is None: + footprint = ndi.generate_binary_structure(image.ndim, image.ndim) + return ndi.median_filter( + image, footprint=footprint, output=out, mode=mode, cval=cval + ) diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/filters/_rank_order.py b/vlmpy310/lib/python3.10/site-packages/skimage/filters/_rank_order.py new file mode 100644 index 0000000000000000000000000000000000000000..9c4ba888a2454a1dbf854cf07a8c0b9e08c04f70 --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/skimage/filters/_rank_order.py @@ -0,0 +1,57 @@ +""" +_rank_order.py - convert an image of any type to an image of ints whose +pixels have an identical rank order compared to the original image +""" + +import numpy as np + + +def rank_order(image): + """Return an image of the same shape where each pixel is the + index of the pixel value in the ascending order of the unique + values of ``image``, aka the rank-order value. + + Parameters + ---------- + image : ndarray + + Returns + ------- + labels : ndarray of unsigned integers, of shape image.shape + New array where each pixel has the rank-order value of the + corresponding pixel in ``image``. Pixel values are between 0 and + n - 1, where n is the number of distinct unique values in + ``image``. The dtype of this array will be determined by + ``np.min_scalar_type(image.size)``. + original_values : 1-D ndarray + Unique original values of ``image``. This will have the same dtype as + ``image``. + + Examples + -------- + >>> a = np.array([[1, 4, 5], [4, 4, 1], [5, 1, 1]]) + >>> a + array([[1, 4, 5], + [4, 4, 1], + [5, 1, 1]]) + >>> rank_order(a) + (array([[0, 1, 2], + [1, 1, 0], + [2, 0, 0]], dtype=uint8), array([1, 4, 5])) + >>> b = np.array([-1., 2.5, 3.1, 2.5]) + >>> rank_order(b) + (array([0, 1, 2, 1], dtype=uint8), array([-1. , 2.5, 3.1])) + """ + flat_image = image.reshape(-1) + unsigned_dtype = np.min_scalar_type(flat_image.size) + sort_order = flat_image.argsort().astype(unsigned_dtype, copy=False) + flat_image = flat_image[sort_order] + sort_rank = np.zeros_like(sort_order) + is_different = flat_image[:-1] != flat_image[1:] + np.cumsum(is_different, out=sort_rank[1:], dtype=sort_rank.dtype) + original_values = np.zeros((int(sort_rank[-1]) + 1,), image.dtype) + original_values[0] = flat_image[0] + original_values[1:] = flat_image[1:][is_different] + int_image = np.zeros_like(sort_order) + int_image[sort_order] = sort_rank + return (int_image.reshape(image.shape), original_values) diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/filters/_sparse.py b/vlmpy310/lib/python3.10/site-packages/skimage/filters/_sparse.py new file mode 100644 index 0000000000000000000000000000000000000000..d7831a3fea8e0cce437f34fb36c6d14393be7b42 --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/skimage/filters/_sparse.py @@ -0,0 +1,139 @@ +import numpy as np + +from .._shared.utils import _supported_float_type, _to_np_mode + + +def _validate_window_size(axis_sizes): + """Ensure all sizes in ``axis_sizes`` are odd. + + Parameters + ---------- + axis_sizes : iterable of int + + Raises + ------ + ValueError + If any given axis size is even. + """ + for axis_size in axis_sizes: + if axis_size % 2 == 0: + msg = ( + f'Window size for `threshold_sauvola` or ' + f'`threshold_niblack` must not be even on any dimension. ' + f'Got {axis_sizes}' + ) + raise ValueError(msg) + + +def _get_view(padded, kernel_shape, idx, val): + """Get a view into `padded` that is offset by `idx` and scaled by `val`. + + If `padded` was created by padding the original image by `kernel_shape` as + in correlate_sparse, then the view created here will match the size of the + original image. + """ + sl_shift = tuple( + [ + slice(c, s - (w_ - 1 - c)) + for c, w_, s in zip(idx, kernel_shape, padded.shape) + ] + ) + v = padded[sl_shift] + if val == 1: + return v + return val * v + + +def _correlate_sparse(image, kernel_shape, kernel_indices, kernel_values): + """Perform correlation with a sparse kernel. + + Parameters + ---------- + image : ndarray + The (prepadded) image to be correlated. + kernel_shape : tuple of int + The shape of the sparse filter kernel. + kernel_indices : list of coordinate tuples + The indices of each non-zero kernel entry. + kernel_values : list of float + The kernel values at each location in kernel_indices. + + Returns + ------- + out : ndarray + The filtered image. + + Notes + ----- + This function only returns results for the 'valid' region of the + convolution, and thus `out` will be smaller than `image` by an amount + equal to the kernel size along each axis. + """ + idx, val = kernel_indices[0], kernel_values[0] + # implementation assumes this corner is first in kernel_indices_in_values + if tuple(idx) != (0,) * image.ndim: + raise RuntimeError("Unexpected initial index in kernel_indices") + # make a copy to avoid modifying the input image + out = _get_view(image, kernel_shape, idx, val).copy() + for idx, val in zip(kernel_indices[1:], kernel_values[1:]): + out += _get_view(image, kernel_shape, idx, val) + return out + + +def correlate_sparse(image, kernel, mode='reflect'): + """Compute valid cross-correlation of `padded_array` and `kernel`. + + This function is *fast* when `kernel` is large with many zeros. + + See ``scipy.ndimage.correlate`` for a description of cross-correlation. + + Parameters + ---------- + image : ndarray, dtype float, shape (M, N[, ...], P) + The input array. If mode is 'valid', this array should already be + padded, as a margin of the same shape as kernel will be stripped + off. + kernel : ndarray, dtype float, shape (Q, R[, ...], S) + The kernel to be correlated. Must have the same number of + dimensions as `padded_array`. For high performance, it should + be sparse (few nonzero entries). + mode : string, optional + See `scipy.ndimage.correlate` for valid modes. + Additionally, mode 'valid' is accepted, in which case no padding is + applied and the result is the result for the smaller image for which + the kernel is entirely inside the original data. + + Returns + ------- + result : array of float, shape (M, N[, ...], P) + The result of cross-correlating `image` with `kernel`. If mode + 'valid' is used, the resulting shape is (M-Q+1, N-R+1[, ...], P-S+1). + """ + kernel = np.asarray(kernel) + + float_dtype = _supported_float_type(image.dtype) + image = image.astype(float_dtype, copy=False) + + if mode == 'valid': + padded_image = image + else: + np_mode = _to_np_mode(mode) + _validate_window_size(kernel.shape) + padded_image = np.pad( + image, + [(w // 2, w // 2) for w in kernel.shape], + mode=np_mode, + ) + + # extract the kernel's non-zero indices and corresponding values + indices = np.nonzero(kernel) + values = list(kernel[indices].astype(float_dtype, copy=False)) + indices = list(zip(*indices)) + + # _correlate_sparse requires an index at (0,) * kernel.ndim to be present + corner_index = (0,) * kernel.ndim + if corner_index not in indices: + indices = [corner_index] + indices + values = [0.0] + values + + return _correlate_sparse(padded_image, kernel.shape, indices, values) diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/filters/_unsharp_mask.py b/vlmpy310/lib/python3.10/site-packages/skimage/filters/_unsharp_mask.py new file mode 100644 index 0000000000000000000000000000000000000000..0ebf2f90f0aaa9587e487098baea538cdf4217e1 --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/skimage/filters/_unsharp_mask.py @@ -0,0 +1,141 @@ +import numpy as np + +from ..util.dtype import img_as_float +from .._shared import utils +from .._shared.filters import gaussian + + +def _unsharp_mask_single_channel(image, radius, amount, vrange): + """Single channel implementation of the unsharp masking filter.""" + + blurred = gaussian(image, sigma=radius, mode='reflect') + + result = image + (image - blurred) * amount + if vrange is not None: + return np.clip(result, vrange[0], vrange[1], out=result) + return result + + +def unsharp_mask( + image, radius=1.0, amount=1.0, preserve_range=False, *, channel_axis=None +): + """Unsharp masking filter. + + The sharp details are identified as the difference between the original + image and its blurred version. These details are then scaled, and added + back to the original image. + + Parameters + ---------- + image : (M[, ...][, C]) ndarray + Input image. + radius : scalar or sequence of scalars, optional + If a scalar is given, then its value is used for all dimensions. + If sequence is given, then there must be exactly one radius + for each dimension except the last dimension for multichannel images. + Note that 0 radius means no blurring, and negative values are + not allowed. + amount : scalar, optional + The details will be amplified with this factor. The factor could be 0 + or negative. Typically, it is a small positive number, e.g. 1.0. + preserve_range : bool, optional + Whether to keep the original range of values. Otherwise, the input + image is converted according to the conventions of ``img_as_float``. + Also see https://scikit-image.org/docs/dev/user_guide/data_types.html + channel_axis : int or None, optional + If None, the image is assumed to be a grayscale (single channel) image. + Otherwise, this parameter indicates which axis of the array corresponds + to channels. + + .. versionadded:: 0.19 + ``channel_axis`` was added in 0.19. + + Returns + ------- + output : (M[, ...][, C]) ndarray of float + Image with unsharp mask applied. + + Notes + ----- + Unsharp masking is an image sharpening technique. It is a linear image + operation, and numerically stable, unlike deconvolution which is an + ill-posed problem. Because of this stability, it is often + preferred over deconvolution. + + The main idea is as follows: sharp details are identified as the + difference between the original image and its blurred version. + These details are added back to the original image after a scaling step: + + enhanced image = original + amount * (original - blurred) + + When applying this filter to several color layers independently, + color bleeding may occur. More visually pleasing result can be + achieved by processing only the brightness/lightness/intensity + channel in a suitable color space such as HSV, HSL, YUV, or YCbCr. + + Unsharp masking is described in most introductory digital image + processing books. This implementation is based on [1]_. + + Examples + -------- + >>> array = np.ones(shape=(5,5), dtype=np.uint8)*100 + >>> array[2,2] = 120 + >>> array + array([[100, 100, 100, 100, 100], + [100, 100, 100, 100, 100], + [100, 100, 120, 100, 100], + [100, 100, 100, 100, 100], + [100, 100, 100, 100, 100]], dtype=uint8) + >>> np.around(unsharp_mask(array, radius=0.5, amount=2),2) + array([[0.39, 0.39, 0.39, 0.39, 0.39], + [0.39, 0.39, 0.38, 0.39, 0.39], + [0.39, 0.38, 0.53, 0.38, 0.39], + [0.39, 0.39, 0.38, 0.39, 0.39], + [0.39, 0.39, 0.39, 0.39, 0.39]]) + + >>> array = np.ones(shape=(5,5), dtype=np.int8)*100 + >>> array[2,2] = 127 + >>> np.around(unsharp_mask(array, radius=0.5, amount=2),2) + array([[0.79, 0.79, 0.79, 0.79, 0.79], + [0.79, 0.78, 0.75, 0.78, 0.79], + [0.79, 0.75, 1. , 0.75, 0.79], + [0.79, 0.78, 0.75, 0.78, 0.79], + [0.79, 0.79, 0.79, 0.79, 0.79]]) + + >>> np.around(unsharp_mask(array, radius=0.5, amount=2, preserve_range=True), 2) + array([[100. , 100. , 99.99, 100. , 100. ], + [100. , 99.39, 95.48, 99.39, 100. ], + [ 99.99, 95.48, 147.59, 95.48, 99.99], + [100. , 99.39, 95.48, 99.39, 100. ], + [100. , 100. , 99.99, 100. , 100. ]]) + + + References + ---------- + .. [1] Maria Petrou, Costas Petrou + "Image Processing: The Fundamentals", (2010), ed ii., page 357, + ISBN 13: 9781119994398 :DOI:`10.1002/9781119994398` + .. [2] Wikipedia. Unsharp masking + https://en.wikipedia.org/wiki/Unsharp_masking + + """ + vrange = None # Range for valid values; used for clipping. + float_dtype = utils._supported_float_type(image.dtype) + if preserve_range: + fimg = image.astype(float_dtype, copy=False) + else: + fimg = img_as_float(image).astype(float_dtype, copy=False) + negative = np.any(fimg < 0) + if negative: + vrange = [-1.0, 1.0] + else: + vrange = [0.0, 1.0] + + if channel_axis is not None: + result = np.empty_like(fimg, dtype=float_dtype) + for channel in range(image.shape[channel_axis]): + sl = utils.slice_at_axis(channel, channel_axis) + result[sl] = _unsharp_mask_single_channel(fimg[sl], radius, amount, vrange) + return result + else: + return _unsharp_mask_single_channel(fimg, radius, amount, vrange) diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/filters/_window.py b/vlmpy310/lib/python3.10/site-packages/skimage/filters/_window.py new file mode 100644 index 0000000000000000000000000000000000000000..edd60c81d0baf74e75f111188abdb36dfaa119e6 --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/skimage/filters/_window.py @@ -0,0 +1,131 @@ +import functools + +import numpy as np +from scipy.signal import get_window + +from .._shared.utils import safe_as_int +from ..transform import warp + + +def window(window_type, shape, warp_kwargs=None): + """Return an n-dimensional window of a given size and dimensionality. + + Parameters + ---------- + window_type : string, float, or tuple + The type of window to be created. Any window type supported by + ``scipy.signal.get_window`` is allowed here. See notes below for a + current list, or the SciPy documentation for the version of SciPy + on your machine. + shape : tuple of int or int + The shape of the window along each axis. If an integer is provided, + a 1D window is generated. + warp_kwargs : dict + Keyword arguments passed to `skimage.transform.warp` (e.g., + ``warp_kwargs={'order':3}`` to change interpolation method). + + Returns + ------- + nd_window : ndarray + A window of the specified ``shape``. ``dtype`` is ``np.float64``. + + Notes + ----- + This function is based on ``scipy.signal.get_window`` and thus can access + all of the window types available to that function + (e.g., ``"hann"``, ``"boxcar"``). Note that certain window types require + parameters that have to be supplied with the window name as a tuple + (e.g., ``("tukey", 0.8)``). If only a float is supplied, it is interpreted + as the beta parameter of the Kaiser window. + + See https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.windows.get_window.html + for more details. + + Note that this function generates a double precision array of the specified + ``shape`` and can thus generate very large arrays that consume a large + amount of available memory. + + The approach taken here to create nD windows is to first calculate the + Euclidean distance from the center of the intended nD window to each + position in the array. That distance is used to sample, with + interpolation, from a 1D window returned from ``scipy.signal.get_window``. + The method of interpolation can be changed with the ``order`` keyword + argument passed to `skimage.transform.warp`. + + Some coordinates in the output window will be outside of the original + signal; these will be filled in with zeros. + + Window types: + - boxcar + - triang + - blackman + - hamming + - hann + - bartlett + - flattop + - parzen + - bohman + - blackmanharris + - nuttall + - barthann + - kaiser (needs beta) + - gaussian (needs standard deviation) + - general_gaussian (needs power, width) + - slepian (needs width) + - dpss (needs normalized half-bandwidth) + - chebwin (needs attenuation) + - exponential (needs decay scale) + - tukey (needs taper fraction) + + Examples + -------- + Return a Hann window with shape (512, 512): + + >>> from skimage.filters import window + >>> w = window('hann', (512, 512)) + + Return a Kaiser window with beta parameter of 16 and shape (256, 256, 35): + + >>> w = window(16, (256, 256, 35)) + + Return a Tukey window with an alpha parameter of 0.8 and shape (100, 300): + + >>> w = window(('tukey', 0.8), (100, 300)) + + References + ---------- + .. [1] Two-dimensional window design, Wikipedia, + https://en.wikipedia.org/wiki/Two_dimensional_window_design + """ + + if np.isscalar(shape): + shape = (safe_as_int(shape),) + else: + shape = tuple(safe_as_int(shape)) + if any(s < 0 for s in shape): + raise ValueError("invalid shape") + + ndim = len(shape) + if ndim <= 0: + raise ValueError("Number of dimensions must be greater than zero") + + max_size = functools.reduce(max, shape) + w = get_window(window_type, max_size, fftbins=False) + w = np.reshape(w, (-1,) + (1,) * (ndim - 1)) + + # Create coords for warping following `ndimage.map_coordinates` convention. + L = [np.arange(s, dtype=np.float32) * (max_size / s) for s in shape] + + center = (max_size / 2) - 0.5 + dist = 0 + for g in np.meshgrid(*L, sparse=True, indexing='ij'): + g -= center + dist = dist + g * g + dist = np.sqrt(dist) + coords = np.zeros((ndim,) + dist.shape, dtype=np.float32) + coords[0] = dist + center + + if warp_kwargs is None: + warp_kwargs = {} + + return warp(w, coords, mode='constant', cval=0.0, **warp_kwargs) diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/filters/edges.py b/vlmpy310/lib/python3.10/site-packages/skimage/filters/edges.py new file mode 100644 index 0000000000000000000000000000000000000000..050ed5a30d90795cafa7e6c0db04b72872bf9917 --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/skimage/filters/edges.py @@ -0,0 +1,863 @@ +import numpy as np +from scipy import ndimage as ndi +from scipy.ndimage import binary_erosion, convolve + +from .._shared.utils import _supported_float_type, check_nD +from ..restoration.uft import laplacian +from ..util.dtype import img_as_float + +# n-dimensional filter weights +SOBEL_EDGE = np.array([1, 0, -1]) +SOBEL_SMOOTH = np.array([1, 2, 1]) / 4 +HSOBEL_WEIGHTS = SOBEL_EDGE.reshape((3, 1)) * SOBEL_SMOOTH.reshape((1, 3)) +VSOBEL_WEIGHTS = HSOBEL_WEIGHTS.T + +SCHARR_EDGE = np.array([1, 0, -1]) +SCHARR_SMOOTH = np.array([3, 10, 3]) / 16 +HSCHARR_WEIGHTS = SCHARR_EDGE.reshape((3, 1)) * SCHARR_SMOOTH.reshape((1, 3)) +VSCHARR_WEIGHTS = HSCHARR_WEIGHTS.T + +PREWITT_EDGE = np.array([1, 0, -1]) +PREWITT_SMOOTH = np.full((3,), 1 / 3) +HPREWITT_WEIGHTS = PREWITT_EDGE.reshape((3, 1)) * PREWITT_SMOOTH.reshape((1, 3)) +VPREWITT_WEIGHTS = HPREWITT_WEIGHTS.T + +# 2D-only filter weights +ROBERTS_PD_WEIGHTS = np.array([[1, 0], [0, -1]], dtype=np.float64) +ROBERTS_ND_WEIGHTS = np.array([[0, 1], [-1, 0]], dtype=np.float64) + +# These filter weights can be found in Farid & Simoncelli (2004), +# Table 1 (3rd and 4th row). Additional decimal places were computed +# using the code found at https://www.cs.dartmouth.edu/farid/ +farid_smooth = np.array( + [ + [ + 0.0376593171958126, + 0.249153396177344, + 0.426374573253687, + 0.249153396177344, + 0.0376593171958126, + ] + ] +) +farid_edge = np.array( + [[0.109603762960254, 0.276690988455557, 0, -0.276690988455557, -0.109603762960254]] +) +HFARID_WEIGHTS = farid_edge.T * farid_smooth +VFARID_WEIGHTS = np.copy(HFARID_WEIGHTS.T) + + +def _mask_filter_result(result, mask): + """Return result after masking. + + Input masks are eroded so that mask areas in the original image don't + affect values in the result. + """ + if mask is not None: + erosion_footprint = ndi.generate_binary_structure(mask.ndim, mask.ndim) + mask = binary_erosion(mask, erosion_footprint, border_value=0) + result *= mask + return result + + +def _kernel_shape(ndim, dim): + """Return list of `ndim` 1s except at position `dim`, where value is -1. + + Parameters + ---------- + ndim : int + The number of dimensions of the kernel shape. + dim : int + The axis of the kernel to expand to shape -1. + + Returns + ------- + shape : list of int + The requested shape. + + Examples + -------- + >>> _kernel_shape(2, 0) + [-1, 1] + >>> _kernel_shape(3, 1) + [1, -1, 1] + >>> _kernel_shape(4, -1) + [1, 1, 1, -1] + """ + shape = [ + 1, + ] * ndim + shape[dim] = -1 + return shape + + +def _reshape_nd(arr, ndim, dim): + """Reshape a 1D array to have n dimensions, all singletons but one. + + Parameters + ---------- + arr : array, shape (N,) + Input array + ndim : int + Number of desired dimensions of reshaped array. + dim : int + Which dimension/axis will not be singleton-sized. + + Returns + ------- + arr_reshaped : array, shape ([1, ...], N, [1,...]) + View of `arr` reshaped to the desired shape. + + Examples + -------- + >>> rng = np.random.default_rng() + >>> arr = rng.random(7) + >>> _reshape_nd(arr, 2, 0).shape + (7, 1) + >>> _reshape_nd(arr, 3, 1).shape + (1, 7, 1) + >>> _reshape_nd(arr, 4, -1).shape + (1, 1, 1, 7) + """ + kernel_shape = _kernel_shape(ndim, dim) + return np.reshape(arr, kernel_shape) + + +def _generic_edge_filter( + image, + *, + smooth_weights, + edge_weights=[1, 0, -1], + axis=None, + mode='reflect', + cval=0.0, + mask=None, +): + """Apply a generic, n-dimensional edge filter. + + The filter is computed by applying the edge weights along one dimension + and the smoothing weights along all other dimensions. If no axis is given, + or a tuple of axes is given the filter is computed along all axes in turn, + and the magnitude is computed as the square root of the average square + magnitude of all the axes. + + Parameters + ---------- + image : array + The input image. + smooth_weights : array of float + The smoothing weights for the filter. These are applied to dimensions + orthogonal to the edge axis. + edge_weights : 1D array of float, optional + The weights to compute the edge along the chosen axes. + axis : int or sequence of int, optional + Compute the edge filter along this axis. If not provided, the edge + magnitude is computed. This is defined as:: + + edge_mag = np.sqrt(sum([_generic_edge_filter(image, ..., axis=i)**2 + for i in range(image.ndim)]) / image.ndim) + + The magnitude is also computed if axis is a sequence. + mode : str or sequence of str, optional + The boundary mode for the convolution. See `scipy.ndimage.convolve` + for a description of the modes. This can be either a single boundary + mode or one boundary mode per axis. + cval : float, optional + When `mode` is ``'constant'``, this is the constant used in values + outside the boundary of the image data. + """ + ndim = image.ndim + if axis is None: + axes = list(range(ndim)) + elif np.isscalar(axis): + axes = [axis] + else: + axes = axis + return_magnitude = len(axes) > 1 + + if image.dtype.kind == 'f': + float_dtype = _supported_float_type(image.dtype) + image = image.astype(float_dtype, copy=False) + else: + image = img_as_float(image) + output = np.zeros(image.shape, dtype=image.dtype) + + for edge_dim in axes: + kernel = _reshape_nd(edge_weights, ndim, edge_dim) + smooth_axes = list(set(range(ndim)) - {edge_dim}) + for smooth_dim in smooth_axes: + kernel = kernel * _reshape_nd(smooth_weights, ndim, smooth_dim) + ax_output = ndi.convolve(image, kernel, mode=mode) + if return_magnitude: + ax_output *= ax_output + output += ax_output + + if return_magnitude: + output = np.sqrt(output) / np.sqrt(ndim, dtype=output.dtype) + return output + + +def sobel(image, mask=None, *, axis=None, mode='reflect', cval=0.0): + """Find edges in an image using the Sobel filter. + + Parameters + ---------- + image : array + The input image. + mask : array of bool, optional + Clip the output image to this mask. (Values where mask=0 will be set + to 0.) + axis : int or sequence of int, optional + Compute the edge filter along this axis. If not provided, the edge + magnitude is computed. This is defined as:: + + sobel_mag = np.sqrt(sum([sobel(image, axis=i)**2 + for i in range(image.ndim)]) / image.ndim) + + The magnitude is also computed if axis is a sequence. + mode : str or sequence of str, optional + The boundary mode for the convolution. See `scipy.ndimage.convolve` + for a description of the modes. This can be either a single boundary + mode or one boundary mode per axis. + cval : float, optional + When `mode` is ``'constant'``, this is the constant used in values + outside the boundary of the image data. + + Returns + ------- + output : array of float + The Sobel edge map. + + See also + -------- + sobel_h, sobel_v : horizontal and vertical edge detection. + scharr, prewitt, farid, skimage.feature.canny + + References + ---------- + .. [1] D. Kroon, 2009, Short Paper University Twente, Numerical + Optimization of Kernel Based Image Derivatives. + + .. [2] https://en.wikipedia.org/wiki/Sobel_operator + + Examples + -------- + >>> from skimage import data + >>> from skimage import filters + >>> camera = data.camera() + >>> edges = filters.sobel(camera) + """ + output = _generic_edge_filter( + image, smooth_weights=SOBEL_SMOOTH, axis=axis, mode=mode, cval=cval + ) + output = _mask_filter_result(output, mask) + return output + + +def sobel_h(image, mask=None): + """Find the horizontal edges of an image using the Sobel transform. + + Parameters + ---------- + image : 2-D array + Image to process. + mask : 2-D array, optional + An optional mask to limit the application to a certain area. + Note that pixels surrounding masked regions are also masked to + prevent masked regions from affecting the result. + + Returns + ------- + output : 2-D array + The Sobel edge map. + + Notes + ----- + We use the following kernel:: + + 1 2 1 + 0 0 0 + -1 -2 -1 + + """ + check_nD(image, 2) + return sobel(image, mask=mask, axis=0) + + +def sobel_v(image, mask=None): + """Find the vertical edges of an image using the Sobel transform. + + Parameters + ---------- + image : 2-D array + Image to process. + mask : 2-D array, optional + An optional mask to limit the application to a certain area. + Note that pixels surrounding masked regions are also masked to + prevent masked regions from affecting the result. + + Returns + ------- + output : 2-D array + The Sobel edge map. + + Notes + ----- + We use the following kernel:: + + 1 0 -1 + 2 0 -2 + 1 0 -1 + + """ + check_nD(image, 2) + return sobel(image, mask=mask, axis=1) + + +def scharr(image, mask=None, *, axis=None, mode='reflect', cval=0.0): + """Find the edge magnitude using the Scharr transform. + + Parameters + ---------- + image : array + The input image. + mask : array of bool, optional + Clip the output image to this mask. (Values where mask=0 will be set + to 0.) + axis : int or sequence of int, optional + Compute the edge filter along this axis. If not provided, the edge + magnitude is computed. This is defined as:: + + sch_mag = np.sqrt(sum([scharr(image, axis=i)**2 + for i in range(image.ndim)]) / image.ndim) + + The magnitude is also computed if axis is a sequence. + mode : str or sequence of str, optional + The boundary mode for the convolution. See `scipy.ndimage.convolve` + for a description of the modes. This can be either a single boundary + mode or one boundary mode per axis. + cval : float, optional + When `mode` is ``'constant'``, this is the constant used in values + outside the boundary of the image data. + + Returns + ------- + output : array of float + The Scharr edge map. + + See also + -------- + scharr_h, scharr_v : horizontal and vertical edge detection. + sobel, prewitt, farid, skimage.feature.canny + + Notes + ----- + The Scharr operator has a better rotation invariance than + other edge filters such as the Sobel or the Prewitt operators. + + References + ---------- + .. [1] D. Kroon, 2009, Short Paper University Twente, Numerical + Optimization of Kernel Based Image Derivatives. + + .. [2] https://en.wikipedia.org/wiki/Sobel_operator#Alternative_operators + + Examples + -------- + >>> from skimage import data + >>> from skimage import filters + >>> camera = data.camera() + >>> edges = filters.scharr(camera) + """ + output = _generic_edge_filter( + image, smooth_weights=SCHARR_SMOOTH, axis=axis, mode=mode, cval=cval + ) + output = _mask_filter_result(output, mask) + return output + + +def scharr_h(image, mask=None): + """Find the horizontal edges of an image using the Scharr transform. + + Parameters + ---------- + image : 2-D array + Image to process. + mask : 2-D array, optional + An optional mask to limit the application to a certain area. + Note that pixels surrounding masked regions are also masked to + prevent masked regions from affecting the result. + + Returns + ------- + output : 2-D array + The Scharr edge map. + + Notes + ----- + We use the following kernel:: + + 3 10 3 + 0 0 0 + -3 -10 -3 + + References + ---------- + .. [1] D. Kroon, 2009, Short Paper University Twente, Numerical + Optimization of Kernel Based Image Derivatives. + + """ + check_nD(image, 2) + return scharr(image, mask=mask, axis=0) + + +def scharr_v(image, mask=None): + """Find the vertical edges of an image using the Scharr transform. + + Parameters + ---------- + image : 2-D array + Image to process + mask : 2-D array, optional + An optional mask to limit the application to a certain area. + Note that pixels surrounding masked regions are also masked to + prevent masked regions from affecting the result. + + Returns + ------- + output : 2-D array + The Scharr edge map. + + Notes + ----- + We use the following kernel:: + + 3 0 -3 + 10 0 -10 + 3 0 -3 + + References + ---------- + .. [1] D. Kroon, 2009, Short Paper University Twente, Numerical + Optimization of Kernel Based Image Derivatives. + """ + check_nD(image, 2) + return scharr(image, mask=mask, axis=1) + + +def prewitt(image, mask=None, *, axis=None, mode='reflect', cval=0.0): + """Find the edge magnitude using the Prewitt transform. + + Parameters + ---------- + image : array + The input image. + mask : array of bool, optional + Clip the output image to this mask. (Values where mask=0 will be set + to 0.) + axis : int or sequence of int, optional + Compute the edge filter along this axis. If not provided, the edge + magnitude is computed. This is defined as:: + + prw_mag = np.sqrt(sum([prewitt(image, axis=i)**2 + for i in range(image.ndim)]) / image.ndim) + + The magnitude is also computed if axis is a sequence. + mode : str or sequence of str, optional + The boundary mode for the convolution. See `scipy.ndimage.convolve` + for a description of the modes. This can be either a single boundary + mode or one boundary mode per axis. + cval : float, optional + When `mode` is ``'constant'``, this is the constant used in values + outside the boundary of the image data. + + Returns + ------- + output : array of float + The Prewitt edge map. + + See also + -------- + prewitt_h, prewitt_v : horizontal and vertical edge detection. + sobel, scharr, farid, skimage.feature.canny + + Notes + ----- + The edge magnitude depends slightly on edge directions, since the + approximation of the gradient operator by the Prewitt operator is not + completely rotation invariant. For a better rotation invariance, the Scharr + operator should be used. The Sobel operator has a better rotation + invariance than the Prewitt operator, but a worse rotation invariance than + the Scharr operator. + + Examples + -------- + >>> from skimage import data + >>> from skimage import filters + >>> camera = data.camera() + >>> edges = filters.prewitt(camera) + """ + output = _generic_edge_filter( + image, smooth_weights=PREWITT_SMOOTH, axis=axis, mode=mode, cval=cval + ) + output = _mask_filter_result(output, mask) + return output + + +def prewitt_h(image, mask=None): + """Find the horizontal edges of an image using the Prewitt transform. + + Parameters + ---------- + image : 2-D array + Image to process. + mask : 2-D array, optional + An optional mask to limit the application to a certain area. + Note that pixels surrounding masked regions are also masked to + prevent masked regions from affecting the result. + + Returns + ------- + output : 2-D array + The Prewitt edge map. + + Notes + ----- + We use the following kernel:: + + 1/3 1/3 1/3 + 0 0 0 + -1/3 -1/3 -1/3 + + """ + check_nD(image, 2) + return prewitt(image, mask=mask, axis=0) + + +def prewitt_v(image, mask=None): + """Find the vertical edges of an image using the Prewitt transform. + + Parameters + ---------- + image : 2-D array + Image to process. + mask : 2-D array, optional + An optional mask to limit the application to a certain area. + Note that pixels surrounding masked regions are also masked to + prevent masked regions from affecting the result. + + Returns + ------- + output : 2-D array + The Prewitt edge map. + + Notes + ----- + We use the following kernel:: + + 1/3 0 -1/3 + 1/3 0 -1/3 + 1/3 0 -1/3 + + """ + check_nD(image, 2) + return prewitt(image, mask=mask, axis=1) + + +def roberts(image, mask=None): + """Find the edge magnitude using Roberts' cross operator. + + Parameters + ---------- + image : 2-D array + Image to process. + mask : 2-D array, optional + An optional mask to limit the application to a certain area. + Note that pixels surrounding masked regions are also masked to + prevent masked regions from affecting the result. + + Returns + ------- + output : 2-D array + The Roberts' Cross edge map. + + See also + -------- + roberts_pos_diag, roberts_neg_diag : diagonal edge detection. + sobel, scharr, prewitt, skimage.feature.canny + + Examples + -------- + >>> from skimage import data + >>> camera = data.camera() + >>> from skimage import filters + >>> edges = filters.roberts(camera) + + """ + check_nD(image, 2) + out = np.sqrt( + roberts_pos_diag(image, mask) ** 2 + roberts_neg_diag(image, mask) ** 2 + ) + out /= np.sqrt(2) + return out + + +def roberts_pos_diag(image, mask=None): + """Find the cross edges of an image using Roberts' cross operator. + + The kernel is applied to the input image to produce separate measurements + of the gradient component one orientation. + + Parameters + ---------- + image : 2-D array + Image to process. + mask : 2-D array, optional + An optional mask to limit the application to a certain area. + Note that pixels surrounding masked regions are also masked to + prevent masked regions from affecting the result. + + Returns + ------- + output : 2-D array + The Robert's edge map. + + Notes + ----- + We use the following kernel:: + + 1 0 + 0 -1 + + """ + check_nD(image, 2) + if image.dtype.kind == 'f': + float_dtype = _supported_float_type(image.dtype) + image = image.astype(float_dtype, copy=False) + else: + image = img_as_float(image) + result = convolve(image, ROBERTS_PD_WEIGHTS) + return _mask_filter_result(result, mask) + + +def roberts_neg_diag(image, mask=None): + """Find the cross edges of an image using the Roberts' Cross operator. + + The kernel is applied to the input image to produce separate measurements + of the gradient component one orientation. + + Parameters + ---------- + image : 2-D array + Image to process. + mask : 2-D array, optional + An optional mask to limit the application to a certain area. + Note that pixels surrounding masked regions are also masked to + prevent masked regions from affecting the result. + + Returns + ------- + output : 2-D array + The Robert's edge map. + + Notes + ----- + We use the following kernel:: + + 0 1 + -1 0 + + """ + check_nD(image, 2) + if image.dtype.kind == 'f': + float_dtype = _supported_float_type(image.dtype) + image = image.astype(float_dtype, copy=False) + else: + image = img_as_float(image) + result = convolve(image, ROBERTS_ND_WEIGHTS) + return _mask_filter_result(result, mask) + + +def laplace(image, ksize=3, mask=None): + """Find the edges of an image using the Laplace operator. + + Parameters + ---------- + image : ndarray + Image to process. + ksize : int, optional + Define the size of the discrete Laplacian operator such that it + will have a size of (ksize,) * image.ndim. + mask : ndarray, optional + An optional mask to limit the application to a certain area. + Note that pixels surrounding masked regions are also masked to + prevent masked regions from affecting the result. + + Returns + ------- + output : ndarray + The Laplace edge map. + + Notes + ----- + The Laplacian operator is generated using the function + skimage.restoration.uft.laplacian(). + + """ + if image.dtype.kind == 'f': + float_dtype = _supported_float_type(image.dtype) + image = image.astype(float_dtype, copy=False) + else: + image = img_as_float(image) + # Create the discrete Laplacian operator - We keep only the real part of + # the filter + _, laplace_op = laplacian(image.ndim, (ksize,) * image.ndim) + result = convolve(image, laplace_op) + return _mask_filter_result(result, mask) + + +def farid(image, mask=None, *, axis=None, mode='reflect', cval=0.0): + """Find the edge magnitude using the Farid transform. + + Parameters + ---------- + image : array + The input image. + mask : array of bool, optional + Clip the output image to this mask. (Values where mask=0 will be set + to 0.) + axis : int or sequence of int, optional + Compute the edge filter along this axis. If not provided, the edge + magnitude is computed. This is defined as:: + + farid_mag = np.sqrt(sum([farid(image, axis=i)**2 + for i in range(image.ndim)]) / image.ndim) + + The magnitude is also computed if axis is a sequence. + mode : str or sequence of str, optional + The boundary mode for the convolution. See `scipy.ndimage.convolve` + for a description of the modes. This can be either a single boundary + mode or one boundary mode per axis. + cval : float, optional + When `mode` is ``'constant'``, this is the constant used in values + outside the boundary of the image data. + + Returns + ------- + output : array of float + The Farid edge map. + + See also + -------- + farid_h, farid_v : horizontal and vertical edge detection. + scharr, sobel, prewitt, skimage.feature.canny + + Notes + ----- + Take the square root of the sum of the squares of the horizontal and + vertical derivatives to get a magnitude that is somewhat insensitive to + direction. Similar to the Scharr operator, this operator is designed with + a rotation invariance constraint. + + References + ---------- + .. [1] Farid, H. and Simoncelli, E. P., "Differentiation of discrete + multidimensional signals", IEEE Transactions on Image Processing + 13(4): 496-508, 2004. :DOI:`10.1109/TIP.2004.823819` + .. [2] Wikipedia, "Farid and Simoncelli Derivatives." Available at: + + + Examples + -------- + >>> from skimage import data + >>> camera = data.camera() + >>> from skimage import filters + >>> edges = filters.farid(camera) + """ + output = _generic_edge_filter( + image, + smooth_weights=farid_smooth, + edge_weights=farid_edge, + axis=axis, + mode=mode, + cval=cval, + ) + output = _mask_filter_result(output, mask) + return output + + +def farid_h(image, *, mask=None): + """Find the horizontal edges of an image using the Farid transform. + + Parameters + ---------- + image : 2-D array + Image to process. + mask : 2-D array, optional + An optional mask to limit the application to a certain area. + Note that pixels surrounding masked regions are also masked to + prevent masked regions from affecting the result. + + Returns + ------- + output : 2-D array + The Farid edge map. + + Notes + ----- + The kernel was constructed using the 5-tap weights from [1]. + + References + ---------- + .. [1] Farid, H. and Simoncelli, E. P., "Differentiation of discrete + multidimensional signals", IEEE Transactions on Image Processing + 13(4): 496-508, 2004. :DOI:`10.1109/TIP.2004.823819` + .. [2] Farid, H. and Simoncelli, E. P. "Optimally rotation-equivariant + directional derivative kernels", In: 7th International Conference on + Computer Analysis of Images and Patterns, Kiel, Germany. Sep, 1997. + """ + check_nD(image, 2) + if image.dtype.kind == 'f': + float_dtype = _supported_float_type(image.dtype) + image = image.astype(float_dtype, copy=False) + else: + image = img_as_float(image) + result = convolve(image, HFARID_WEIGHTS) + return _mask_filter_result(result, mask) + + +def farid_v(image, *, mask=None): + """Find the vertical edges of an image using the Farid transform. + + Parameters + ---------- + image : 2-D array + Image to process. + mask : 2-D array, optional + An optional mask to limit the application to a certain area. + Note that pixels surrounding masked regions are also masked to + prevent masked regions from affecting the result. + + Returns + ------- + output : 2-D array + The Farid edge map. + + Notes + ----- + The kernel was constructed using the 5-tap weights from [1]. + + References + ---------- + .. [1] Farid, H. and Simoncelli, E. P., "Differentiation of discrete + multidimensional signals", IEEE Transactions on Image Processing + 13(4): 496-508, 2004. :DOI:`10.1109/TIP.2004.823819` + """ + check_nD(image, 2) + if image.dtype.kind == 'f': + float_dtype = _supported_float_type(image.dtype) + image = image.astype(float_dtype, copy=False) + else: + image = img_as_float(image) + result = convolve(image, VFARID_WEIGHTS) + return _mask_filter_result(result, mask) diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/filters/lpi_filter.py b/vlmpy310/lib/python3.10/site-packages/skimage/filters/lpi_filter.py new file mode 100644 index 0000000000000000000000000000000000000000..6741eef815c8fdb438f846f8cbd9672026a60e9f --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/skimage/filters/lpi_filter.py @@ -0,0 +1,261 @@ +""" +:author: Stefan van der Walt, 2008 +:license: modified BSD +""" + +import numpy as np +import scipy.fft as fft + +from .._shared.utils import _supported_float_type, check_nD + + +def _min_limit(x, val=np.finfo(float).eps): + mask = np.abs(x) < val + x[mask] = np.sign(x[mask]) * val + + +def _center(x, oshape): + """Return an array of shape ``oshape`` from the center of array ``x``.""" + start = (np.array(x.shape) - np.array(oshape)) // 2 + out = x[tuple(slice(s, s + n) for s, n in zip(start, oshape))] + return out + + +def _pad(data, shape): + """Pad the data to the given shape with zeros. + + Parameters + ---------- + data : 2-d ndarray + Input data + shape : (2,) tuple + + """ + out = np.zeros(shape, dtype=data.dtype) + out[tuple(slice(0, n) for n in data.shape)] = data + return out + + +class LPIFilter2D: + """Linear Position-Invariant Filter (2-dimensional)""" + + def __init__(self, impulse_response, **filter_params): + """ + Parameters + ---------- + impulse_response : callable `f(r, c, **filter_params)` + Function that yields the impulse response. ``r`` and ``c`` are + 1-dimensional vectors that represent row and column positions, in + other words coordinates are (r[0],c[0]),(r[0],c[1]) etc. + `**filter_params` are passed through. + + In other words, ``impulse_response`` would be called like this: + + >>> def impulse_response(r, c, **filter_params): + ... pass + >>> + >>> r = [0,0,0,1,1,1,2,2,2] + >>> c = [0,1,2,0,1,2,0,1,2] + >>> filter_params = {'kw1': 1, 'kw2': 2, 'kw3': 3} + >>> impulse_response(r, c, **filter_params) + + + Examples + -------- + Gaussian filter without normalization of coefficients: + + >>> def filt_func(r, c, sigma=1): + ... return np.exp(-(r**2 + c**2)/(2 * sigma**2)) + >>> filter = LPIFilter2D(filt_func) + + """ + if not callable(impulse_response): + raise ValueError("Impulse response must be a callable.") + + self.impulse_response = impulse_response + self.filter_params = filter_params + self._cache = None + + def _prepare(self, data): + """Calculate filter and data FFT in preparation for filtering.""" + dshape = np.array(data.shape) + even_offset = (dshape % 2 == 0).astype(int) + dshape += even_offset # all filter dimensions must be uneven + oshape = np.array(data.shape) * 2 - 1 + + float_dtype = _supported_float_type(data.dtype) + data = data.astype(float_dtype, copy=False) + + if self._cache is None or np.any(self._cache.shape != oshape): + coords = np.mgrid[ + [ + slice(0 + offset, float(n + offset)) + for (n, offset) in zip(dshape, even_offset) + ] + ] + # this steps over two sets of coordinates, + # not over the coordinates individually + for k, coord in enumerate(coords): + coord -= (dshape[k] - 1) / 2.0 + coords = coords.reshape(2, -1).T # coordinate pairs (r,c) + coords = coords.astype(float_dtype, copy=False) + + f = self.impulse_response( + coords[:, 0], coords[:, 1], **self.filter_params + ).reshape(dshape) + + f = _pad(f, oshape) + F = fft.fftn(f) + self._cache = F + else: + F = self._cache + + data = _pad(data, oshape) + G = fft.fftn(data) + + return F, G + + def __call__(self, data): + """Apply the filter to the given data. + + Parameters + ---------- + data : (M, N) ndarray + + """ + check_nD(data, 2, 'data') + F, G = self._prepare(data) + out = fft.ifftn(F * G) + out = np.abs(_center(out, data.shape)) + return out + + +def filter_forward( + data, impulse_response=None, filter_params=None, predefined_filter=None +): + """Apply the given filter to data. + + Parameters + ---------- + data : (M, N) ndarray + Input data. + impulse_response : callable `f(r, c, **filter_params)` + Impulse response of the filter. See LPIFilter2D.__init__. + filter_params : dict, optional + Additional keyword parameters to the impulse_response function. + + Other Parameters + ---------------- + predefined_filter : LPIFilter2D + If you need to apply the same filter multiple times over different + images, construct the LPIFilter2D and specify it here. + + Examples + -------- + + Gaussian filter without normalization: + + >>> def filt_func(r, c, sigma=1): + ... return np.exp(-(r**2 + c**2)/(2 * sigma**2)) + >>> + >>> from skimage import data + >>> filtered = filter_forward(data.coins(), filt_func) + + """ + if filter_params is None: + filter_params = {} + check_nD(data, 2, 'data') + if predefined_filter is None: + predefined_filter = LPIFilter2D(impulse_response, **filter_params) + return predefined_filter(data) + + +def filter_inverse( + data, impulse_response=None, filter_params=None, max_gain=2, predefined_filter=None +): + """Apply the filter in reverse to the given data. + + Parameters + ---------- + data : (M, N) ndarray + Input data. + impulse_response : callable `f(r, c, **filter_params)` + Impulse response of the filter. See :class:`~.LPIFilter2D`. This is a required + argument unless a `predifined_filter` is provided. + filter_params : dict, optional + Additional keyword parameters to the impulse_response function. + max_gain : float, optional + Limit the filter gain. Often, the filter contains zeros, which would + cause the inverse filter to have infinite gain. High gain causes + amplification of artefacts, so a conservative limit is recommended. + + Other Parameters + ---------------- + predefined_filter : LPIFilter2D, optional + If you need to apply the same filter multiple times over different + images, construct the LPIFilter2D and specify it here. + + """ + if filter_params is None: + filter_params = {} + + check_nD(data, 2, 'data') + if predefined_filter is None: + filt = LPIFilter2D(impulse_response, **filter_params) + else: + filt = predefined_filter + + F, G = filt._prepare(data) + _min_limit(F, val=np.finfo(F.real.dtype).eps) + + F = 1 / F + mask = np.abs(F) > max_gain + F[mask] = np.sign(F[mask]) * max_gain + + return _center(np.abs(fft.ifftshift(fft.ifftn(G * F))), data.shape) + + +def wiener( + data, impulse_response=None, filter_params=None, K=0.25, predefined_filter=None +): + """Minimum Mean Square Error (Wiener) inverse filter. + + Parameters + ---------- + data : (M, N) ndarray + Input data. + K : float or (M, N) ndarray + Ratio between power spectrum of noise and undegraded + image. + impulse_response : callable `f(r, c, **filter_params)` + Impulse response of the filter. See LPIFilter2D.__init__. + filter_params : dict, optional + Additional keyword parameters to the impulse_response function. + + Other Parameters + ---------------- + predefined_filter : LPIFilter2D + If you need to apply the same filter multiple times over different + images, construct the LPIFilter2D and specify it here. + + """ + if filter_params is None: + filter_params = {} + + check_nD(data, 2, 'data') + + if not isinstance(K, float): + check_nD(K, 2, 'K') + + if predefined_filter is None: + filt = LPIFilter2D(impulse_response, **filter_params) + else: + filt = predefined_filter + + F, G = filt._prepare(data) + _min_limit(F, val=np.finfo(F.real.dtype).eps) + + H_mag_sqr = np.abs(F) ** 2 + F = 1 / F * H_mag_sqr / (H_mag_sqr + K) + + return _center(np.abs(fft.ifftshift(fft.ifftn(G * F))), data.shape) diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/filters/rank/__init__.py b/vlmpy310/lib/python3.10/site-packages/skimage/filters/rank/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..eb06260488fbf9a151e7d4a7b4dbaa7edb2342db --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/skimage/filters/rank/__init__.py @@ -0,0 +1,89 @@ +from .generic import ( + autolevel, + equalize, + gradient, + majority, + maximum, + mean, + geometric_mean, + subtract_mean, + median, + minimum, + modal, + enhance_contrast, + pop, + threshold, + noise_filter, + entropy, + otsu, + sum, + windowed_histogram, +) +from ._percentile import ( + autolevel_percentile, + gradient_percentile, + mean_percentile, + subtract_mean_percentile, + enhance_contrast_percentile, + percentile, + pop_percentile, + sum_percentile, + threshold_percentile, +) +from .bilateral import mean_bilateral, pop_bilateral, sum_bilateral + + +__all__ = [ + 'autolevel', + 'autolevel_percentile', + 'gradient', + 'equalize', + 'gradient_percentile', + 'majority', + 'maximum', + 'mean', + 'geometric_mean', + 'mean_percentile', + 'mean_bilateral', + 'subtract_mean', + 'subtract_mean_percentile', + 'median', + 'minimum', + 'modal', + 'enhance_contrast', + 'enhance_contrast_percentile', + 'pop', + 'pop_percentile', + 'pop_bilateral', + 'sum', + 'sum_bilateral', + 'sum_percentile', + 'threshold', + 'threshold_percentile', + 'noise_filter', + 'entropy', + 'otsu', + 'percentile', + 'windowed_histogram', +] + +__3Dfilters = [ + 'autolevel', + 'equalize', + 'gradient', + 'majority', + 'maximum', + 'mean', + 'geometric_mean', + 'subtract_mean', + 'median', + 'minimum', + 'modal', + 'enhance_contrast', + 'pop', + 'sum', + 'threshold', + 'noise_filter', + 'entropy', + 'otsu', +] diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/filters/rank/_percentile.py b/vlmpy310/lib/python3.10/site-packages/skimage/filters/rank/_percentile.py new file mode 100644 index 0000000000000000000000000000000000000000..3f233dd8f7a8aedb0a6c58607c9fb7e77cda46f4 --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/skimage/filters/rank/_percentile.py @@ -0,0 +1,485 @@ +"""Inferior and superior ranks, provided by the user, are passed to the kernel +function to provide a softer version of the rank filters. E.g. +``autolevel_percentile`` will stretch image levels between percentile [p0, p1] +instead of using [min, max]. It means that isolated bright or dark pixels will +not produce halos. + +The local histogram is computed using a sliding window similar to the method +described in [1]_. + +Input image can be 8-bit or 16-bit, for 16-bit input images, the number of +histogram bins is determined from the maximum value present in the image. + +Result image is 8-/16-bit or double with respect to the input image and the +rank filter operation. + +References +---------- + +.. [1] Huang, T. ,Yang, G. ; Tang, G.. "A fast two-dimensional + median filtering algorithm", IEEE Transactions on Acoustics, Speech and + Signal Processing, Feb 1979. Volume: 27 , Issue: 1, Page(s): 13 - 18. + +""" + +from ..._shared.utils import check_nD +from . import percentile_cy +from .generic import _preprocess_input + +__all__ = [ + 'autolevel_percentile', + 'gradient_percentile', + 'mean_percentile', + 'subtract_mean_percentile', + 'enhance_contrast_percentile', + 'percentile', + 'pop_percentile', + 'threshold_percentile', +] + + +def _apply(func, image, footprint, out, mask, shift_x, shift_y, p0, p1, out_dtype=None): + check_nD(image, 2) + image, footprint, out, mask, n_bins = _preprocess_input( + image, + footprint, + out, + mask, + out_dtype, + shift_x=shift_x, + shift_y=shift_y, + ) + + func( + image, + footprint, + shift_x=shift_x, + shift_y=shift_y, + mask=mask, + out=out, + n_bins=n_bins, + p0=p0, + p1=p1, + ) + + return out.reshape(out.shape[:2]) + + +def autolevel_percentile( + image, footprint, out=None, mask=None, shift_x=0, shift_y=0, p0=0, p1=1 +): + """Return grayscale local autolevel of an image. + + This filter locally stretches the histogram of grayvalues to cover the + entire range of values from "white" to "black". + + Only grayvalues between percentiles [p0, p1] are considered in the filter. + + Parameters + ---------- + image : 2-D array (uint8, uint16) + Input image. + footprint : 2-D array + The neighborhood expressed as a 2-D array of 1's and 0's. + out : 2-D array (same dtype as input) + If None, a new array is allocated. + mask : ndarray + Mask array that defines (>0) area of the image included in the local + neighborhood. If None, the complete image is used (default). + shift_x, shift_y : int + Offset added to the footprint center point. Shift is bounded to the + footprint sizes (center must be inside the given footprint). + p0, p1 : float in [0, ..., 1] + Define the [p0, p1] percentile interval to be considered for computing + the value. + + Returns + ------- + out : 2-D array (same dtype as input image) + Output image. + + """ + + return _apply( + percentile_cy._autolevel, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + p0=p0, + p1=p1, + ) + + +def gradient_percentile( + image, footprint, out=None, mask=None, shift_x=0, shift_y=0, p0=0, p1=1 +): + """Return local gradient of an image (i.e. local maximum - local minimum). + + Only grayvalues between percentiles [p0, p1] are considered in the filter. + + Parameters + ---------- + image : 2-D array (uint8, uint16) + Input image. + footprint : 2-D array + The neighborhood expressed as a 2-D array of 1's and 0's. + out : 2-D array (same dtype as input) + If None, a new array is allocated. + mask : ndarray + Mask array that defines (>0) area of the image included in the local + neighborhood. If None, the complete image is used (default). + shift_x, shift_y : int + Offset added to the footprint center point. Shift is bounded to the + footprint sizes (center must be inside the given footprint). + p0, p1 : float in [0, ..., 1] + Define the [p0, p1] percentile interval to be considered for computing + the value. + + Returns + ------- + out : 2-D array (same dtype as input image) + Output image. + + """ + + return _apply( + percentile_cy._gradient, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + p0=p0, + p1=p1, + ) + + +def mean_percentile( + image, footprint, out=None, mask=None, shift_x=0, shift_y=0, p0=0, p1=1 +): + """Return local mean of an image. + + Only grayvalues between percentiles [p0, p1] are considered in the filter. + + Parameters + ---------- + image : 2-D array (uint8, uint16) + Input image. + footprint : 2-D array + The neighborhood expressed as a 2-D array of 1's and 0's. + out : 2-D array (same dtype as input) + If None, a new array is allocated. + mask : ndarray + Mask array that defines (>0) area of the image included in the local + neighborhood. If None, the complete image is used (default). + shift_x, shift_y : int + Offset added to the footprint center point. Shift is bounded to the + footprint sizes (center must be inside the given footprint). + p0, p1 : float in [0, ..., 1] + Define the [p0, p1] percentile interval to be considered for computing + the value. + + Returns + ------- + out : 2-D array (same dtype as input image) + Output image. + + """ + + return _apply( + percentile_cy._mean, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + p0=p0, + p1=p1, + ) + + +def subtract_mean_percentile( + image, footprint, out=None, mask=None, shift_x=0, shift_y=0, p0=0, p1=1 +): + """Return image subtracted from its local mean. + + Only grayvalues between percentiles [p0, p1] are considered in the filter. + + Parameters + ---------- + image : 2-D array (uint8, uint16) + Input image. + footprint : 2-D array + The neighborhood expressed as a 2-D array of 1's and 0's. + out : 2-D array (same dtype as input) + If None, a new array is allocated. + mask : ndarray + Mask array that defines (>0) area of the image included in the local + neighborhood. If None, the complete image is used (default). + shift_x, shift_y : int + Offset added to the footprint center point. Shift is bounded to the + footprint sizes (center must be inside the given footprint). + p0, p1 : float in [0, ..., 1] + Define the [p0, p1] percentile interval to be considered for computing + the value. + + Returns + ------- + out : 2-D array (same dtype as input image) + Output image. + + """ + + return _apply( + percentile_cy._subtract_mean, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + p0=p0, + p1=p1, + ) + + +def enhance_contrast_percentile( + image, footprint, out=None, mask=None, shift_x=0, shift_y=0, p0=0, p1=1 +): + """Enhance contrast of an image. + + This replaces each pixel by the local maximum if the pixel grayvalue is + closer to the local maximum than the local minimum. Otherwise it is + replaced by the local minimum. + + Only grayvalues between percentiles [p0, p1] are considered in the filter. + + Parameters + ---------- + image : 2-D array (uint8, uint16) + Input image. + footprint : 2-D array + The neighborhood expressed as a 2-D array of 1's and 0's. + out : 2-D array (same dtype as input) + If None, a new array is allocated. + mask : ndarray + Mask array that defines (>0) area of the image included in the local + neighborhood. If None, the complete image is used (default). + shift_x, shift_y : int + Offset added to the footprint center point. Shift is bounded to the + footprint sizes (center must be inside the given footprint). + p0, p1 : float in [0, ..., 1] + Define the [p0, p1] percentile interval to be considered for computing + the value. + + Returns + ------- + out : 2-D array (same dtype as input image) + Output image. + + """ + + return _apply( + percentile_cy._enhance_contrast, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + p0=p0, + p1=p1, + ) + + +def percentile(image, footprint, out=None, mask=None, shift_x=0, shift_y=0, p0=0): + """Return local percentile of an image. + + Returns the value of the p0 lower percentile of the local grayvalue + distribution. + + Only grayvalues between percentiles [p0, p1] are considered in the filter. + + Parameters + ---------- + image : 2-D array (uint8, uint16) + Input image. + footprint : 2-D array + The neighborhood expressed as a 2-D array of 1's and 0's. + out : 2-D array (same dtype as input) + If None, a new array is allocated. + mask : ndarray + Mask array that defines (>0) area of the image included in the local + neighborhood. If None, the complete image is used (default). + shift_x, shift_y : int + Offset added to the footprint center point. Shift is bounded to the + footprint sizes (center must be inside the given footprint). + p0 : float in [0, ..., 1] + Set the percentile value. + + Returns + ------- + out : 2-D array (same dtype as input image) + Output image. + + """ + + return _apply( + percentile_cy._percentile, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + p0=p0, + p1=0.0, + ) + + +def pop_percentile( + image, footprint, out=None, mask=None, shift_x=0, shift_y=0, p0=0, p1=1 +): + """Return the local number (population) of pixels. + + The number of pixels is defined as the number of pixels which are included + in the footprint and the mask. + + Only grayvalues between percentiles [p0, p1] are considered in the filter. + + Parameters + ---------- + image : 2-D array (uint8, uint16) + Input image. + footprint : 2-D array + The neighborhood expressed as a 2-D array of 1's and 0's. + out : 2-D array (same dtype as input) + If None, a new array is allocated. + mask : ndarray + Mask array that defines (>0) area of the image included in the local + neighborhood. If None, the complete image is used (default). + shift_x, shift_y : int + Offset added to the footprint center point. Shift is bounded to the + footprint sizes (center must be inside the given footprint). + p0, p1 : float in [0, ..., 1] + Define the [p0, p1] percentile interval to be considered for computing + the value. + + Returns + ------- + out : 2-D array (same dtype as input image) + Output image. + + """ + + return _apply( + percentile_cy._pop, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + p0=p0, + p1=p1, + ) + + +def sum_percentile( + image, footprint, out=None, mask=None, shift_x=0, shift_y=0, p0=0, p1=1 +): + """Return the local sum of pixels. + + Only grayvalues between percentiles [p0, p1] are considered in the filter. + + Note that the sum may overflow depending on the data type of the input + array. + + Parameters + ---------- + image : 2-D array (uint8, uint16) + Input image. + footprint : 2-D array + The neighborhood expressed as a 2-D array of 1's and 0's. + out : 2-D array (same dtype as input) + If None, a new array is allocated. + mask : ndarray + Mask array that defines (>0) area of the image included in the local + neighborhood. If None, the complete image is used (default). + shift_x, shift_y : int + Offset added to the footprint center point. Shift is bounded to the + footprint sizes (center must be inside the given footprint). + p0, p1 : float in [0, ..., 1] + Define the [p0, p1] percentile interval to be considered for computing + the value. + + Returns + ------- + out : 2-D array (same dtype as input image) + Output image. + + """ + + return _apply( + percentile_cy._sum, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + p0=p0, + p1=p1, + ) + + +def threshold_percentile( + image, footprint, out=None, mask=None, shift_x=0, shift_y=0, p0=0 +): + """Local threshold of an image. + + The resulting binary mask is True if the grayvalue of the center pixel is + greater than the local mean. + + Only grayvalues between percentiles [p0, p1] are considered in the filter. + + Parameters + ---------- + image : 2-D array (uint8, uint16) + Input image. + footprint : 2-D array + The neighborhood expressed as a 2-D array of 1's and 0's. + out : 2-D array (same dtype as input) + If None, a new array is allocated. + mask : ndarray + Mask array that defines (>0) area of the image included in the local + neighborhood. If None, the complete image is used (default). + shift_x, shift_y : int + Offset added to the footprint center point. Shift is bounded to the + footprint sizes (center must be inside the given footprint). + p0 : float in [0, ..., 1] + Set the percentile value. + + Returns + ------- + out : 2-D array (same dtype as input image) + Output image. + + """ + + return _apply( + percentile_cy._threshold, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + p0=p0, + p1=0, + ) diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/filters/rank/bilateral.py b/vlmpy310/lib/python3.10/site-packages/skimage/filters/rank/bilateral.py new file mode 100644 index 0000000000000000000000000000000000000000..637c491a128ee1ca830516f1721e92406eac79ae --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/skimage/filters/rank/bilateral.py @@ -0,0 +1,262 @@ +"""Approximate bilateral rank filter for local (custom kernel) mean. + +The local histogram is computed using a sliding window similar to the method +described in [1]_. + +The pixel neighborhood is defined by: + +* the given footprint (structuring element) +* an interval [g-s0, g+s1] in graylevel around g the processed pixel graylevel + +The kernel is flat (i.e. each pixel belonging to the neighborhood contributes +equally). + +Result image is 8-/16-bit or double with respect to the input image and the +rank filter operation. + +References +---------- + +.. [1] Huang, T. ,Yang, G. ; Tang, G.. "A fast two-dimensional + median filtering algorithm", IEEE Transactions on Acoustics, Speech and + Signal Processing, Feb 1979. Volume: 27 , Issue: 1, Page(s): 13 - 18. + +""" + +from ..._shared.utils import check_nD +from . import bilateral_cy +from .generic import _preprocess_input + +__all__ = ['mean_bilateral', 'pop_bilateral', 'sum_bilateral'] + + +def _apply(func, image, footprint, out, mask, shift_x, shift_y, s0, s1, out_dtype=None): + check_nD(image, 2) + image, footprint, out, mask, n_bins = _preprocess_input( + image, + footprint, + out, + mask, + out_dtype, + shift_x=shift_x, + shift_y=shift_y, + ) + + func( + image, + footprint, + shift_x=shift_x, + shift_y=shift_y, + mask=mask, + out=out, + n_bins=n_bins, + s0=s0, + s1=s1, + ) + + return out.reshape(out.shape[:2]) + + +def mean_bilateral( + image, footprint, out=None, mask=None, shift_x=0, shift_y=0, s0=10, s1=10 +): + """Apply a flat kernel bilateral filter. + + This is an edge-preserving and noise reducing denoising filter. It averages + pixels based on their spatial closeness and radiometric similarity. + + Spatial closeness is measured by considering only the local pixel + neighborhood given by a footprint (structuring element). + + Radiometric similarity is defined by the graylevel interval [g-s0, g+s1] + where g is the current pixel graylevel. + + Only pixels belonging to the footprint and having a graylevel inside this + interval are averaged. + + Parameters + ---------- + image : 2-D array (uint8, uint16) + Input image. + footprint : 2-D array + The neighborhood expressed as a 2-D array of 1's and 0's. + out : 2-D array (same dtype as input) + If None, a new array is allocated. + mask : ndarray + Mask array that defines (>0) area of the image included in the local + neighborhood. If None, the complete image is used (default). + shift_x, shift_y : int + Offset added to the footprint center point. Shift is bounded to the + footprint sizes (center must be inside the given footprint). + s0, s1 : int + Define the [s0, s1] interval around the grayvalue of the center pixel + to be considered for computing the value. + + Returns + ------- + out : 2-D array (same dtype as input image) + Output image. + + See also + -------- + skimage.restoration.denoise_bilateral + + Examples + -------- + >>> import numpy as np + >>> from skimage import data + >>> from skimage.morphology import disk + >>> from skimage.filters.rank import mean_bilateral + >>> img = data.camera().astype(np.uint16) + >>> bilat_img = mean_bilateral(img, disk(20), s0=10,s1=10) + + """ + + return _apply( + bilateral_cy._mean, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + s0=s0, + s1=s1, + ) + + +def pop_bilateral( + image, footprint, out=None, mask=None, shift_x=0, shift_y=0, s0=10, s1=10 +): + """Return the local number (population) of pixels. + + + The number of pixels is defined as the number of pixels which are included + in the footprint and the mask. Additionally pixels must have a graylevel + inside the interval [g-s0, g+s1] where g is the grayvalue of the center + pixel. + + Parameters + ---------- + image : 2-D array (uint8, uint16) + Input image. + footprint : 2-D array + The neighborhood expressed as a 2-D array of 1's and 0's. + out : 2-D array (same dtype as input) + If None, a new array is allocated. + mask : ndarray + Mask array that defines (>0) area of the image included in the local + neighborhood. If None, the complete image is used (default). + shift_x, shift_y : int + Offset added to the footprint center point. Shift is bounded to the + footprint sizes (center must be inside the given footprint). + s0, s1 : int + Define the [s0, s1] interval around the grayvalue of the center pixel + to be considered for computing the value. + + Returns + ------- + out : 2-D array (same dtype as input image) + Output image. + + Examples + -------- + >>> import numpy as np + >>> from skimage.morphology import footprint_rectangle + >>> import skimage.filters.rank as rank + >>> img = 255 * np.array([[0, 0, 0, 0, 0], + ... [0, 1, 1, 1, 0], + ... [0, 1, 1, 1, 0], + ... [0, 1, 1, 1, 0], + ... [0, 0, 0, 0, 0]], dtype=np.uint16) + >>> rank.pop_bilateral(img, footprint_rectangle((3, 3)), s0=10, s1=10) + array([[3, 4, 3, 4, 3], + [4, 4, 6, 4, 4], + [3, 6, 9, 6, 3], + [4, 4, 6, 4, 4], + [3, 4, 3, 4, 3]], dtype=uint16) + + """ + + return _apply( + bilateral_cy._pop, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + s0=s0, + s1=s1, + ) + + +def sum_bilateral( + image, footprint, out=None, mask=None, shift_x=0, shift_y=0, s0=10, s1=10 +): + """Apply a flat kernel bilateral filter. + + This is an edge-preserving and noise reducing denoising filter. It averages + pixels based on their spatial closeness and radiometric similarity. + + Spatial closeness is measured by considering only the local pixel + neighborhood given by a footprint (structuring element). + + Radiometric similarity is defined by the graylevel interval [g-s0, g+s1] + where g is the current pixel graylevel. + + Only pixels belonging to the footprint AND having a graylevel inside this + interval are summed. + + Note that the sum may overflow depending on the data type of the input + array. + + Parameters + ---------- + image : 2-D array (uint8, uint16) + Input image. + footprint : 2-D array + The neighborhood expressed as a 2-D array of 1's and 0's. + out : 2-D array (same dtype as input) + If None, a new array is allocated. + mask : ndarray + Mask array that defines (>0) area of the image included in the local + neighborhood. If None, the complete image is used (default). + shift_x, shift_y : int + Offset added to the footprint center point. Shift is bounded to the + footprint sizes (center must be inside the given footprint). + s0, s1 : int + Define the [s0, s1] interval around the grayvalue of the center pixel + to be considered for computing the value. + + Returns + ------- + out : 2-D array (same dtype as input image) + Output image. + + See also + -------- + skimage.restoration.denoise_bilateral + + Examples + -------- + >>> import numpy as np + >>> from skimage import data + >>> from skimage.morphology import disk + >>> from skimage.filters.rank import sum_bilateral + >>> img = data.camera().astype(np.uint16) + >>> bilat_img = sum_bilateral(img, disk(10), s0=10, s1=10) + + """ + + return _apply( + bilateral_cy._sum, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + s0=s0, + s1=s1, + ) diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/filters/rank/generic.py b/vlmpy310/lib/python3.10/site-packages/skimage/filters/rank/generic.py new file mode 100644 index 0000000000000000000000000000000000000000..be1c10d5a266c8011017a6828d8d2d08a0f6a41e --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/skimage/filters/rank/generic.py @@ -0,0 +1,1752 @@ +""" + +General Description +------------------- + +These filters compute the local histogram at each pixel, using a sliding window +similar to the method described in [1]_. A histogram is built using a moving +window in order to limit redundant computation. The moving window follows a +snake-like path: + +...------------------------↘ +↙--------------------------↙ +↘--------------------------... + +The local histogram is updated at each pixel as the footprint window +moves by, i.e. only those pixels entering and leaving the footprint +update the local histogram. The histogram size is 8-bit (256 bins) for 8-bit +images and 2- to 16-bit for 16-bit images depending on the maximum value of the +image. + +The filter is applied up to the image border, the neighborhood used is +adjusted accordingly. The user may provide a mask image (same size as input +image) where non zero values are the part of the image participating in the +histogram computation. By default the entire image is filtered. + +This implementation outperforms :func:`skimage.morphology.dilation` +for large footprints. + +Input images will be cast in unsigned 8-bit integer or unsigned 16-bit integer +if necessary. The number of histogram bins is then determined from the maximum +value present in the image. Eventually, the output image is cast in the input +dtype, or the `output_dtype` if set. + +To do +----- + +* add simple examples, adapt documentation on existing examples +* add/check existing doc +* adapting tests for each type of filter + + +References +---------- + +.. [1] Huang, T. ,Yang, G. ; Tang, G.. "A fast two-dimensional + median filtering algorithm", IEEE Transactions on Acoustics, Speech and + Signal Processing, Feb 1979. Volume: 27 , Issue: 1, Page(s): 13 - 18. + +""" + +import numpy as np +from scipy import ndimage as ndi + +from ..._shared.utils import check_nD, warn +from ...morphology.footprints import _footprint_is_sequence +from ...util import img_as_ubyte +from . import generic_cy + + +__all__ = [ + 'autolevel', + 'equalize', + 'gradient', + 'maximum', + 'mean', + 'geometric_mean', + 'subtract_mean', + 'median', + 'minimum', + 'modal', + 'enhance_contrast', + 'pop', + 'threshold', + 'noise_filter', + 'entropy', + 'otsu', +] + + +def _preprocess_input( + image, + footprint=None, + out=None, + mask=None, + out_dtype=None, + pixel_size=1, + shift_x=None, + shift_y=None, +): + """Preprocess and verify input for filters.rank methods. + + Parameters + ---------- + image : 2-D array (integer or float) + Input image. + footprint : 2-D array (integer or float), optional + The neighborhood expressed as a 2-D array of 1's and 0's. + out : 2-D array (integer or float), optional + If None, a new array is allocated. + mask : ndarray (integer or float), optional + Mask array that defines (>0) area of the image included in the local + neighborhood. If None, the complete image is used (default). + out_dtype : data-type, optional + Desired output data-type. Default is None, which means we cast output + in input dtype. + pixel_size : int, optional + Dimension of each pixel. Default value is 1. + shift_x, shift_y : int, optional + Offset added to the footprint center point. Shift is bounded to the + footprint size (center must be inside of the given footprint). + + Returns + ------- + image : 2-D array (np.uint8 or np.uint16) + footprint : 2-D array (np.uint8) + The neighborhood expressed as a binary 2-D array. + out : 3-D array (same dtype out_dtype or as input) + Output array. The two first dimensions are the spatial ones, the third + one is the pixel vector (length 1 by default). + mask : 2-D array (np.uint8) + Mask array that defines (>0) area of the image included in the local + neighborhood. + n_bins : int + Number of histogram bins. + + """ + check_nD(image, 2) + input_dtype = image.dtype + if input_dtype in (bool, bool) or out_dtype in (bool, bool): + raise ValueError('dtype cannot be bool.') + if input_dtype not in (np.uint8, np.uint16): + message = ( + f'Possible precision loss converting image of type ' + f'{input_dtype} to uint8 as required by rank filters. ' + f'Convert manually using skimage.util.img_as_ubyte to ' + f'silence this warning.' + ) + warn(message, stacklevel=5) + image = img_as_ubyte(image) + + if _footprint_is_sequence(footprint): + raise ValueError( + "footprint sequences are not currently supported by rank filters" + ) + + footprint = np.ascontiguousarray(img_as_ubyte(footprint > 0)) + if footprint.ndim != image.ndim: + raise ValueError('Image dimensions and neighborhood dimensions' 'do not match') + + image = np.ascontiguousarray(image) + + if mask is not None: + mask = img_as_ubyte(mask) + mask = np.ascontiguousarray(mask) + + if image is out: + raise NotImplementedError("Cannot perform rank operation in place.") + + if out is None: + if out_dtype is None: + out_dtype = image.dtype + out = np.empty(image.shape + (pixel_size,), dtype=out_dtype) + else: + if len(out.shape) == 2: + out = out.reshape(out.shape + (pixel_size,)) + + if image.dtype in (np.uint8, np.int8): + n_bins = 256 + else: + # Convert to a Python int to avoid the potential overflow when we add + # 1 to the maximum of the image. + n_bins = int(max(3, image.max())) + 1 + + if n_bins > 2**10: + warn( + f'Bad rank filter performance is expected due to a ' + f'large number of bins ({n_bins}), equivalent to an approximate ' + f'bitdepth of {np.log2(n_bins):.1f}.', + stacklevel=2, + ) + + for name, value in zip(("shift_x", "shift_y"), (shift_x, shift_y)): + if np.dtype(type(value)) == bool: + warn( + f"Paramter `{name}` is boolean and will be interpreted as int. " + "This is not officially supported, use int instead.", + category=UserWarning, + stacklevel=4, + ) + + return image, footprint, out, mask, n_bins + + +def _handle_input_3D( + image, + footprint=None, + out=None, + mask=None, + out_dtype=None, + pixel_size=1, + shift_x=None, + shift_y=None, + shift_z=None, +): + """Preprocess and verify input for filters.rank methods. + + Parameters + ---------- + image : 3-D array (integer or float) + Input image. + footprint : 3-D array (integer or float), optional + The neighborhood expressed as a 3-D array of 1's and 0's. + out : 3-D array (integer or float), optional + If None, a new array is allocated. + mask : ndarray (integer or float), optional + Mask array that defines (>0) area of the image included in the local + neighborhood. If None, the complete image is used (default). + out_dtype : data-type, optional + Desired output data-type. Default is None, which means we cast output + in input dtype. + pixel_size : int, optional + Dimension of each pixel. Default value is 1. + shift_x, shift_y, shift_z : int, optional + Offset added to the footprint center point. Shift is bounded to the + footprint size (center must be inside of the given footprint). + + Returns + ------- + image : 3-D array (np.uint8 or np.uint16) + footprint : 3-D array (np.uint8) + The neighborhood expressed as a binary 3-D array. + out : 3-D array (same dtype out_dtype or as input) + Output array. The two first dimensions are the spatial ones, the third + one is the pixel vector (length 1 by default). + mask : 3-D array (np.uint8) + Mask array that defines (>0) area of the image included in the local + neighborhood. + n_bins : int + Number of histogram bins. + + """ + check_nD(image, 3) + if image.dtype not in (np.uint8, np.uint16): + message = ( + f'Possible precision loss converting image of type ' + f'{image.dtype} to uint8 as required by rank filters. ' + f'Convert manually using skimage.util.img_as_ubyte to ' + f'silence this warning.' + ) + warn(message, stacklevel=2) + image = img_as_ubyte(image) + + footprint = np.ascontiguousarray(img_as_ubyte(footprint > 0)) + if footprint.ndim != image.ndim: + raise ValueError('Image dimensions and neighborhood dimensions' 'do not match') + image = np.ascontiguousarray(image) + + if mask is None: + mask = np.ones(image.shape, dtype=np.uint8) + else: + mask = img_as_ubyte(mask) + mask = np.ascontiguousarray(mask) + + if image is out: + raise NotImplementedError("Cannot perform rank operation in place.") + + if out is None: + if out_dtype is None: + out_dtype = image.dtype + out = np.empty(image.shape + (pixel_size,), dtype=out_dtype) + else: + out = out.reshape(out.shape + (pixel_size,)) + + is_8bit = image.dtype in (np.uint8, np.int8) + + if is_8bit: + n_bins = 256 + else: + # Convert to a Python int to avoid the potential overflow when we add + # 1 to the maximum of the image. + n_bins = int(max(3, image.max())) + 1 + + if n_bins > 2**10: + warn( + f'Bad rank filter performance is expected due to a ' + f'large number of bins ({n_bins}), equivalent to an approximate ' + f'bitdepth of {np.log2(n_bins):.1f}.', + stacklevel=2, + ) + + for name, value in zip( + ("shift_x", "shift_y", "shift_z"), (shift_x, shift_y, shift_z) + ): + if np.dtype(type(value)) == bool: + warn( + f"Parameter `{name}` is boolean and will be interpreted as int. " + "This is not officially supported, use int instead.", + category=UserWarning, + stacklevel=4, + ) + + return image, footprint, out, mask, n_bins + + +def _apply_scalar_per_pixel( + func, image, footprint, out, mask, shift_x, shift_y, out_dtype=None +): + """Process the specific cython function to the image. + + Parameters + ---------- + func : function + Cython function to apply. + image : 2-D array (integer or float) + Input image. + footprint : 2-D array (integer or float) + The neighborhood expressed as a 2-D array of 1's and 0's. + out : 2-D array (integer or float) + If None, a new array is allocated. + mask : ndarray (integer or float) + Mask array that defines (>0) area of the image included in the local + neighborhood. If None, the complete image is used (default). + shift_x, shift_y : int + Offset added to the footprint center point. Shift is bounded to the + footprint sizes (center must be inside the given footprint). + out_dtype : data-type, optional + Desired output data-type. Default is None, which means we cast output + in input dtype. + + """ + # preprocess and verify the input + image, footprint, out, mask, n_bins = _preprocess_input( + image, footprint, out, mask, out_dtype, shift_x=shift_x, shift_y=shift_y + ) + + # apply cython function + func( + image, + footprint, + shift_x=shift_x, + shift_y=shift_y, + mask=mask, + out=out, + n_bins=n_bins, + ) + + return np.squeeze(out, axis=-1) + + +def _apply_scalar_per_pixel_3D( + func, image, footprint, out, mask, shift_x, shift_y, shift_z, out_dtype=None +): + image, footprint, out, mask, n_bins = _handle_input_3D( + image, + footprint, + out, + mask, + out_dtype, + shift_x=shift_x, + shift_y=shift_y, + shift_z=shift_z, + ) + + func( + image, + footprint, + shift_x=shift_x, + shift_y=shift_y, + shift_z=shift_z, + mask=mask, + out=out, + n_bins=n_bins, + ) + + return out.reshape(out.shape[:3]) + + +def _apply_vector_per_pixel( + func, image, footprint, out, mask, shift_x, shift_y, out_dtype=None, pixel_size=1 +): + """ + + Parameters + ---------- + func : function + Cython function to apply. + image : 2-D array (integer or float) + Input image. + footprint : 2-D array (integer or float) + The neighborhood expressed as a 2-D array of 1's and 0's. + out : 2-D array (integer or float) + If None, a new array is allocated. + mask : ndarray (integer or float) + Mask array that defines (>0) area of the image included in the local + neighborhood. If None, the complete image is used (default). + shift_x, shift_y : int + Offset added to the footprint center point. Shift is bounded to the + footprint sizes (center must be inside the given footprint). + out_dtype : data-type, optional + Desired output data-type. Default is None, which means we cast output + in input dtype. + pixel_size : int, optional + Dimension of each pixel. + + Returns + ------- + out : 3-D array with float dtype of dimensions (H,W,N), where (H,W) are + the dimensions of the input image and N is n_bins or + ``image.max() + 1`` if no value is provided as a parameter. + Effectively, each pixel is a N-D feature vector that is the histogram. + The sum of the elements in the feature vector will be 1, unless no + pixels in the window were covered by both footprint and mask, in which + case all elements will be 0. + + """ + # preprocess and verify the input + image, footprint, out, mask, n_bins = _preprocess_input( + image, + footprint, + out, + mask, + out_dtype, + pixel_size, + shift_x=shift_x, + shift_y=shift_y, + ) + + # apply cython function + func( + image, + footprint, + shift_x=shift_x, + shift_y=shift_y, + mask=mask, + out=out, + n_bins=n_bins, + ) + + return out + + +def autolevel(image, footprint, out=None, mask=None, shift_x=0, shift_y=0, shift_z=0): + """Auto-level image using local histogram. + + This filter locally stretches the histogram of gray values to cover the + entire range of values from "white" to "black". + + Parameters + ---------- + image : ([P,] M, N) ndarray (uint8, uint16) + Input image. + footprint : ndarray + The neighborhood expressed as an ndarray of 1's and 0's. + out : ([P,] M, N) array (same dtype as input) + If None, a new array is allocated. + mask : ndarray (integer or float), optional + Mask array that defines (>0) area of the image included in the local + neighborhood. If None, the complete image is used (default). + shift_x, shift_y, shift_z : int + Offset added to the footprint center point. Shift is bounded to the + footprint sizes (center must be inside the given footprint). + + Returns + ------- + out : ([P,] M, N) ndarray (same dtype as input image) + Output image. + + Examples + -------- + >>> from skimage import data + >>> from skimage.morphology import disk, ball + >>> from skimage.filters.rank import autolevel + >>> import numpy as np + >>> img = data.camera() + >>> rng = np.random.default_rng() + >>> volume = rng.integers(0, 255, size=(10,10,10), dtype=np.uint8) + >>> auto = autolevel(img, disk(5)) + >>> auto_vol = autolevel(volume, ball(5)) + + """ + + np_image = np.asanyarray(image) + if np_image.ndim == 2: + return _apply_scalar_per_pixel( + generic_cy._autolevel, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + ) + elif np_image.ndim == 3: + return _apply_scalar_per_pixel_3D( + generic_cy._autolevel_3D, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + shift_z=shift_z, + ) + raise ValueError(f'`image` must have 2 or 3 dimensions, got {np_image.ndim}.') + + +def equalize(image, footprint, out=None, mask=None, shift_x=0, shift_y=0, shift_z=0): + """Equalize image using local histogram. + + Parameters + ---------- + image : ([P,] M, N) ndarray (uint8, uint16) + Input image. + footprint : ndarray + The neighborhood expressed as an ndarray of 1's and 0's. + out : ([P,] M, N) array (same dtype as input) + If None, a new array is allocated. + mask : ndarray (integer or float), optional + Mask array that defines (>0) area of the image included in the local + neighborhood. If None, the complete image is used (default). + shift_x, shift_y, shift_z : int + Offset added to the footprint center point. Shift is bounded to the + footprint sizes (center must be inside the given footprint). + + Returns + ------- + out : ([P,] M, N) ndarray (same dtype as input image) + Output image. + + Examples + -------- + >>> from skimage import data + >>> from skimage.morphology import disk, ball + >>> from skimage.filters.rank import equalize + >>> import numpy as np + >>> img = data.camera() + >>> rng = np.random.default_rng() + >>> volume = rng.integers(0, 255, size=(10,10,10), dtype=np.uint8) + >>> equ = equalize(img, disk(5)) + >>> equ_vol = equalize(volume, ball(5)) + + """ + + np_image = np.asanyarray(image) + if np_image.ndim == 2: + return _apply_scalar_per_pixel( + generic_cy._equalize, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + ) + elif np_image.ndim == 3: + return _apply_scalar_per_pixel_3D( + generic_cy._equalize_3D, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + shift_z=shift_z, + ) + raise ValueError(f'`image` must have 2 or 3 dimensions, got {np_image.ndim}.') + + +def gradient(image, footprint, out=None, mask=None, shift_x=0, shift_y=0, shift_z=0): + """Return local gradient of an image (i.e. local maximum - local minimum). + + Parameters + ---------- + image : ([P,] M, N) ndarray (uint8, uint16) + Input image. + footprint : ndarray + The neighborhood expressed as an ndarray of 1's and 0's. + out : ([P,] M, N) array (same dtype as input) + If None, a new array is allocated. + mask : ndarray (integer or float), optional + Mask array that defines (>0) area of the image included in the local + neighborhood. If None, the complete image is used (default). + shift_x, shift_y, shift_z : int + Offset added to the footprint center point. Shift is bounded to the + footprint sizes (center must be inside the given footprint). + + Returns + ------- + out : ([P,] M, N) ndarray (same dtype as input image) + Output image. + + Examples + -------- + >>> from skimage import data + >>> from skimage.morphology import disk, ball + >>> from skimage.filters.rank import gradient + >>> import numpy as np + >>> img = data.camera() + >>> rng = np.random.default_rng() + >>> volume = rng.integers(0, 255, size=(10,10,10), dtype=np.uint8) + >>> out = gradient(img, disk(5)) + >>> out_vol = gradient(volume, ball(5)) + + """ + + np_image = np.asanyarray(image) + if np_image.ndim == 2: + return _apply_scalar_per_pixel( + generic_cy._gradient, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + ) + elif np_image.ndim == 3: + return _apply_scalar_per_pixel_3D( + generic_cy._gradient_3D, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + shift_z=shift_z, + ) + raise ValueError(f'`image` must have 2 or 3 dimensions, got {np_image.ndim}.') + + +def maximum(image, footprint, out=None, mask=None, shift_x=0, shift_y=0, shift_z=0): + """Return local maximum of an image. + + Parameters + ---------- + image : ([P,] M, N) ndarray (uint8, uint16) + Input image. + footprint : ndarray + The neighborhood expressed as an ndarray of 1's and 0's. + out : ([P,] M, N) array (same dtype as input) + If None, a new array is allocated. + mask : ndarray (integer or float), optional + Mask array that defines (>0) area of the image included in the local + neighborhood. If None, the complete image is used (default). + shift_x, shift_y, shift_z : int + Offset added to the footprint center point. Shift is bounded to the + footprint sizes (center must be inside the given footprint). + + Returns + ------- + out : ([P,] M, N) ndarray (same dtype as input image) + Output image. + + See also + -------- + skimage.morphology.dilation + + Notes + ----- + The lower algorithm complexity makes `skimage.filters.rank.maximum` + more efficient for larger images and footprints. + + Examples + -------- + >>> from skimage import data + >>> from skimage.morphology import disk, ball + >>> from skimage.filters.rank import maximum + >>> import numpy as np + >>> img = data.camera() + >>> rng = np.random.default_rng() + >>> volume = rng.integers(0, 255, size=(10,10,10), dtype=np.uint8) + >>> out = maximum(img, disk(5)) + >>> out_vol = maximum(volume, ball(5)) + + """ + + np_image = np.asanyarray(image) + if np_image.ndim == 2: + return _apply_scalar_per_pixel( + generic_cy._maximum, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + ) + elif np_image.ndim == 3: + return _apply_scalar_per_pixel_3D( + generic_cy._maximum_3D, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + shift_z=shift_z, + ) + raise ValueError(f'`image` must have 2 or 3 dimensions, got {np_image.ndim}.') + + +def mean(image, footprint, out=None, mask=None, shift_x=0, shift_y=0, shift_z=0): + """Return local mean of an image. + + Parameters + ---------- + image : ([P,] M, N) ndarray (uint8, uint16) + Input image. + footprint : ndarray + The neighborhood expressed as an ndarray of 1's and 0's. + out : ([P,] M, N) array (same dtype as input) + If None, a new array is allocated. + mask : ndarray (integer or float), optional + Mask array that defines (>0) area of the image included in the local + neighborhood. If None, the complete image is used (default). + shift_x, shift_y, shift_z : int + Offset added to the footprint center point. Shift is bounded to the + footprint sizes (center must be inside the given footprint). + + Returns + ------- + out : ([P,] M, N) ndarray (same dtype as input image) + Output image. + + Examples + -------- + >>> from skimage import data + >>> from skimage.morphology import disk, ball + >>> from skimage.filters.rank import mean + >>> import numpy as np + >>> img = data.camera() + >>> rng = np.random.default_rng() + >>> volume = rng.integers(0, 255, size=(10,10,10), dtype=np.uint8) + >>> avg = mean(img, disk(5)) + >>> avg_vol = mean(volume, ball(5)) + + """ + + np_image = np.asanyarray(image) + if np_image.ndim == 2: + return _apply_scalar_per_pixel( + generic_cy._mean, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + ) + elif np_image.ndim == 3: + return _apply_scalar_per_pixel_3D( + generic_cy._mean_3D, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + shift_z=shift_z, + ) + raise ValueError(f'`image` must have 2 or 3 dimensions, got {np_image.ndim}.') + + +def geometric_mean( + image, footprint, out=None, mask=None, shift_x=0, shift_y=0, shift_z=0 +): + """Return local geometric mean of an image. + + Parameters + ---------- + image : ([P,] M, N) ndarray (uint8, uint16) + Input image. + footprint : ndarray + The neighborhood expressed as an ndarray of 1's and 0's. + out : ([P,] M, N) array (same dtype as input) + If None, a new array is allocated. + mask : ndarray (integer or float), optional + Mask array that defines (>0) area of the image included in the local + neighborhood. If None, the complete image is used (default). + shift_x, shift_y, shift_z : int + Offset added to the footprint center point. Shift is bounded to the + footprint sizes (center must be inside the given footprint). + + Returns + ------- + out : ([P,] M, N) ndarray (same dtype as input image) + Output image. + + Examples + -------- + >>> from skimage import data + >>> from skimage.morphology import disk, ball + >>> from skimage.filters.rank import mean + >>> import numpy as np + >>> img = data.camera() + >>> rng = np.random.default_rng() + >>> volume = rng.integers(0, 255, size=(10,10,10), dtype=np.uint8) + >>> avg = geometric_mean(img, disk(5)) + >>> avg_vol = geometric_mean(volume, ball(5)) + + References + ---------- + .. [1] Gonzalez, R. C. and Woods, R. E. "Digital Image Processing + (3rd Edition)." Prentice-Hall Inc, 2006. + + """ + + np_image = np.asanyarray(image) + if np_image.ndim == 2: + return _apply_scalar_per_pixel( + generic_cy._geometric_mean, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + ) + elif np_image.ndim == 3: + return _apply_scalar_per_pixel_3D( + generic_cy._geometric_mean_3D, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + shift_z=shift_z, + ) + raise ValueError(f'`image` must have 2 or 3 dimensions, got {np_image.ndim}.') + + +def subtract_mean( + image, footprint, out=None, mask=None, shift_x=0, shift_y=0, shift_z=0 +): + """Return image subtracted from its local mean. + + Parameters + ---------- + image : ([P,] M, N) ndarray (uint8, uint16) + Input image. + footprint : ndarray + The neighborhood expressed as an ndarray of 1's and 0's. + out : ([P,] M, N) array (same dtype as input) + If None, a new array is allocated. + mask : ndarray (integer or float), optional + Mask array that defines (>0) area of the image included in the local + neighborhood. If None, the complete image is used (default). + shift_x, shift_y, shift_z : int + Offset added to the footprint center point. Shift is bounded to the + footprint sizes (center must be inside the given footprint). + + Returns + ------- + out : ([P,] M, N) ndarray (same dtype as input image) + Output image. + + Notes + ----- + Subtracting the mean value may introduce underflow. To compensate + this potential underflow, the obtained difference is downscaled by + a factor of 2 and shifted by `n_bins / 2 - 1`, the median value of + the local histogram (`n_bins = max(3, image.max()) +1` for 16-bits + images and 256 otherwise). + + Examples + -------- + >>> from skimage import data + >>> from skimage.morphology import disk, ball + >>> from skimage.filters.rank import subtract_mean + >>> import numpy as np + >>> img = data.camera() + >>> rng = np.random.default_rng() + >>> volume = rng.integers(0, 255, size=(10,10,10), dtype=np.uint8) + >>> out = subtract_mean(img, disk(5)) + >>> out_vol = subtract_mean(volume, ball(5)) + + """ + + np_image = np.asanyarray(image) + if np_image.ndim == 2: + return _apply_scalar_per_pixel( + generic_cy._subtract_mean, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + ) + elif np_image.ndim == 3: + return _apply_scalar_per_pixel_3D( + generic_cy._subtract_mean_3D, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + shift_z=shift_z, + ) + raise ValueError(f'`image` must have 2 or 3 dimensions, got {np_image.ndim}.') + + +def median( + image, + footprint=None, + out=None, + mask=None, + shift_x=0, + shift_y=0, + shift_z=0, +): + """Return local median of an image. + + Parameters + ---------- + image : ([P,] M, N) ndarray (uint8, uint16) + Input image. + footprint : ndarray + The neighborhood expressed as an ndarray of 1's and 0's. If None, a + full square of size 3 is used. + out : ([P,] M, N) array (same dtype as input) + If None, a new array is allocated. + mask : ndarray (integer or float), optional + Mask array that defines (>0) area of the image included in the local + neighborhood. If None, the complete image is used (default). + shift_x, shift_y, shift_z : int + Offset added to the footprint center point. Shift is bounded to the + footprint sizes (center must be inside the given footprint). + + Returns + ------- + out : ([P,] M, N) ndarray (same dtype as input image) + Output image. + + See also + -------- + skimage.filters.median : Implementation of a median filtering which handles + images with floating precision. + + Examples + -------- + >>> from skimage import data + >>> from skimage.morphology import disk, ball + >>> from skimage.filters.rank import median + >>> import numpy as np + >>> img = data.camera() + >>> rng = np.random.default_rng() + >>> volume = rng.integers(0, 255, size=(10,10,10), dtype=np.uint8) + >>> med = median(img, disk(5)) + >>> med_vol = median(volume, ball(5)) + + """ + + np_image = np.asanyarray(image) + if footprint is None: + footprint = ndi.generate_binary_structure(image.ndim, image.ndim) + if np_image.ndim == 2: + return _apply_scalar_per_pixel( + generic_cy._median, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + ) + elif np_image.ndim == 3: + return _apply_scalar_per_pixel_3D( + generic_cy._median_3D, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + shift_z=shift_z, + ) + raise ValueError(f'`image` must have 2 or 3 dimensions, got {np_image.ndim}.') + + +def minimum(image, footprint, out=None, mask=None, shift_x=0, shift_y=0, shift_z=0): + """Return local minimum of an image. + + Parameters + ---------- + image : ([P,] M, N) ndarray (uint8, uint16) + Input image. + footprint : ndarray + The neighborhood expressed as an ndarray of 1's and 0's. + out : ([P,] M, N) array (same dtype as input) + If None, a new array is allocated. + mask : ndarray (integer or float), optional + Mask array that defines (>0) area of the image included in the local + neighborhood. If None, the complete image is used (default). + shift_x, shift_y, shift_z : int + Offset added to the footprint center point. Shift is bounded to the + footprint sizes (center must be inside the given footprint). + + Returns + ------- + out : ([P,] M, N) ndarray (same dtype as input image) + Output image. + + See also + -------- + skimage.morphology.erosion + + Notes + ----- + The lower algorithm complexity makes `skimage.filters.rank.minimum` more + efficient for larger images and footprints. + + Examples + -------- + >>> from skimage import data + >>> from skimage.morphology import disk, ball + >>> from skimage.filters.rank import minimum + >>> import numpy as np + >>> img = data.camera() + >>> rng = np.random.default_rng() + >>> volume = rng.integers(0, 255, size=(10,10,10), dtype=np.uint8) + >>> out = minimum(img, disk(5)) + >>> out_vol = minimum(volume, ball(5)) + + """ + + np_image = np.asanyarray(image) + if np_image.ndim == 2: + return _apply_scalar_per_pixel( + generic_cy._minimum, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + ) + elif np_image.ndim == 3: + return _apply_scalar_per_pixel_3D( + generic_cy._minimum_3D, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + shift_z=shift_z, + ) + raise ValueError(f'`image` must have 2 or 3 dimensions, got {np_image.ndim}.') + + +def modal(image, footprint, out=None, mask=None, shift_x=0, shift_y=0, shift_z=0): + """Return local mode of an image. + + The mode is the value that appears most often in the local histogram. + + Parameters + ---------- + image : ([P,] M, N) ndarray (uint8, uint16) + Input image. + footprint : ndarray + The neighborhood expressed as an ndarray of 1's and 0's. + out : ([P,] M, N) array (same dtype as input) + If None, a new array is allocated. + mask : ndarray (integer or float), optional + Mask array that defines (>0) area of the image included in the local + neighborhood. If None, the complete image is used (default). + shift_x, shift_y, shift_z : int + Offset added to the footprint center point. Shift is bounded to the + footprint sizes (center must be inside the given footprint). + + Returns + ------- + out : ([P,] M, N) ndarray (same dtype as input image) + Output image. + + Examples + -------- + >>> from skimage import data + >>> from skimage.morphology import disk, ball + >>> from skimage.filters.rank import modal + >>> import numpy as np + >>> img = data.camera() + >>> rng = np.random.default_rng() + >>> volume = rng.integers(0, 255, size=(10,10,10), dtype=np.uint8) + >>> out = modal(img, disk(5)) + >>> out_vol = modal(volume, ball(5)) + + """ + + np_image = np.asanyarray(image) + if np_image.ndim == 2: + return _apply_scalar_per_pixel( + generic_cy._modal, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + ) + elif np_image.ndim == 3: + return _apply_scalar_per_pixel_3D( + generic_cy._modal_3D, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + shift_z=shift_z, + ) + raise ValueError(f'`image` must have 2 or 3 dimensions, got {np_image.ndim}.') + + +def enhance_contrast( + image, footprint, out=None, mask=None, shift_x=0, shift_y=0, shift_z=0 +): + """Enhance contrast of an image. + + This replaces each pixel by the local maximum if the pixel gray value is + closer to the local maximum than the local minimum. Otherwise it is + replaced by the local minimum. + + Parameters + ---------- + image : ([P,] M, N) ndarray (uint8, uint16) + Input image. + footprint : ndarray + The neighborhood expressed as an ndarray of 1's and 0's. + out : ([P,] M, N) array (same dtype as input) + If None, a new array is allocated. + mask : ndarray (integer or float), optional + Mask array that defines (>0) area of the image included in the local + neighborhood. If None, the complete image is used (default). + shift_x, shift_y, shift_z : int + Offset added to the footprint center point. Shift is bounded to the + footprint sizes (center must be inside the given footprint). + + Returns + ------- + out : ([P,] M, N) ndarray (same dtype as input image) + Output image + + Examples + -------- + >>> from skimage import data + >>> from skimage.morphology import disk, ball + >>> from skimage.filters.rank import enhance_contrast + >>> import numpy as np + >>> img = data.camera() + >>> rng = np.random.default_rng() + >>> volume = rng.integers(0, 255, size=(10,10,10), dtype=np.uint8) + >>> out = enhance_contrast(img, disk(5)) + >>> out_vol = enhance_contrast(volume, ball(5)) + + """ + + np_image = np.asanyarray(image) + if np_image.ndim == 2: + return _apply_scalar_per_pixel( + generic_cy._enhance_contrast, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + ) + elif np_image.ndim == 3: + return _apply_scalar_per_pixel_3D( + generic_cy._enhance_contrast_3D, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + shift_z=shift_z, + ) + raise ValueError(f'`image` must have 2 or 3 dimensions, got {np_image.ndim}.') + + +def pop(image, footprint, out=None, mask=None, shift_x=0, shift_y=0, shift_z=0): + """Return the local number (population) of pixels. + + The number of pixels is defined as the number of pixels which are included + in the footprint and the mask. + + Parameters + ---------- + image : ([P,] M, N) ndarray (uint8, uint16) + Input image. + footprint : ndarray + The neighborhood expressed as an ndarray of 1's and 0's. + out : ([P,] M, N) array (same dtype as input) + If None, a new array is allocated. + mask : ndarray (integer or float), optional + Mask array that defines (>0) area of the image included in the local + neighborhood. If None, the complete image is used (default). + shift_x, shift_y, shift_z : int + Offset added to the footprint center point. Shift is bounded to the + footprint sizes (center must be inside the given footprint). + + Returns + ------- + out : ([P,] M, N) ndarray (same dtype as input image) + Output image. + + Examples + -------- + >>> from skimage.morphology import footprint_rectangle # Need to add 3D example + >>> import skimage.filters.rank as rank + >>> img = 255 * np.array([[0, 0, 0, 0, 0], + ... [0, 1, 1, 1, 0], + ... [0, 1, 1, 1, 0], + ... [0, 1, 1, 1, 0], + ... [0, 0, 0, 0, 0]], dtype=np.uint8) + >>> rank.pop(img, footprint_rectangle((3, 3))) + array([[4, 6, 6, 6, 4], + [6, 9, 9, 9, 6], + [6, 9, 9, 9, 6], + [6, 9, 9, 9, 6], + [4, 6, 6, 6, 4]], dtype=uint8) + + """ + + np_image = np.asanyarray(image) + if np_image.ndim == 2: + return _apply_scalar_per_pixel( + generic_cy._pop, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + ) + elif np_image.ndim == 3: + return _apply_scalar_per_pixel_3D( + generic_cy._pop_3D, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + shift_z=shift_z, + ) + raise ValueError(f'`image` must have 2 or 3 dimensions, got {np_image.ndim}.') + + +def sum(image, footprint, out=None, mask=None, shift_x=0, shift_y=0, shift_z=0): + """Return the local sum of pixels. + + Note that the sum may overflow depending on the data type of the input + array. + + Parameters + ---------- + image : ([P,] M, N) ndarray (uint8, uint16) + Input image. + footprint : ndarray + The neighborhood expressed as an ndarray of 1's and 0's. + out : ([P,] M, N) array (same dtype as input) + If None, a new array is allocated. + mask : ndarray (integer or float), optional + Mask array that defines (>0) area of the image included in the local + neighborhood. If None, the complete image is used (default). + shift_x, shift_y, shift_z : int + Offset added to the footprint center point. Shift is bounded to the + footprint sizes (center must be inside the given footprint). + + Returns + ------- + out : ([P,] M, N) ndarray (same dtype as input image) + Output image. + + Examples + -------- + >>> from skimage.morphology import footprint_rectangle # Need to add 3D example + >>> import skimage.filters.rank as rank # Cube seems to fail but + >>> img = np.array([[0, 0, 0, 0, 0], # Ball can pass + ... [0, 1, 1, 1, 0], + ... [0, 1, 1, 1, 0], + ... [0, 1, 1, 1, 0], + ... [0, 0, 0, 0, 0]], dtype=np.uint8) + >>> rank.sum(img, footprint_rectangle((3, 3))) + array([[1, 2, 3, 2, 1], + [2, 4, 6, 4, 2], + [3, 6, 9, 6, 3], + [2, 4, 6, 4, 2], + [1, 2, 3, 2, 1]], dtype=uint8) + + """ + + np_image = np.asanyarray(image) + if np_image.ndim == 2: + return _apply_scalar_per_pixel( + generic_cy._sum, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + ) + elif np_image.ndim == 3: + return _apply_scalar_per_pixel_3D( + generic_cy._sum_3D, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + shift_z=shift_z, + ) + raise ValueError(f'`image` must have 2 or 3 dimensions, got {np_image.ndim}.') + + +def threshold(image, footprint, out=None, mask=None, shift_x=0, shift_y=0, shift_z=0): + """Local threshold of an image. + + The resulting binary mask is True if the gray value of the center pixel is + greater than the local mean. + + Parameters + ---------- + image : ([P,] M, N) ndarray (uint8, uint16) + Input image. + footprint : ndarray + The neighborhood expressed as an ndarray of 1's and 0's. + out : ([P,] M, N) array (same dtype as input) + If None, a new array is allocated. + mask : ndarray (integer or float), optional + Mask array that defines (>0) area of the image included in the local + neighborhood. If None, the complete image is used (default). + shift_x, shift_y, shift_z : int + Offset added to the footprint center point. Shift is bounded to the + footprint sizes (center must be inside the given footprint). + + Returns + ------- + out : ([P,] M, N) ndarray (same dtype as input image) + Output image. + + Examples + -------- + >>> from skimage.morphology import footprint_rectangle # Need to add 3D example + >>> from skimage.filters.rank import threshold + >>> img = 255 * np.array([[0, 0, 0, 0, 0], + ... [0, 1, 1, 1, 0], + ... [0, 1, 1, 1, 0], + ... [0, 1, 1, 1, 0], + ... [0, 0, 0, 0, 0]], dtype=np.uint8) + >>> threshold(img, footprint_rectangle((3, 3))) + array([[0, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 1, 0, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 0]], dtype=uint8) + + """ + + np_image = np.asanyarray(image) + if np_image.ndim == 2: + return _apply_scalar_per_pixel( + generic_cy._threshold, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + ) + elif np_image.ndim == 3: + return _apply_scalar_per_pixel_3D( + generic_cy._threshold_3D, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + shift_z=shift_z, + ) + raise ValueError(f'`image` must have 2 or 3 dimensions, got {np_image.ndim}.') + + +def noise_filter( + image, footprint, out=None, mask=None, shift_x=0, shift_y=0, shift_z=0 +): + """Noise feature. + + Parameters + ---------- + image : ([P,] M, N) ndarray (uint8, uint16) + Input image. + footprint : ndarray + The neighborhood expressed as an ndarray of 1's and 0's. + out : ([P,] M, N) array (same dtype as input) + If None, a new array is allocated. + mask : ndarray (integer or float), optional + Mask array that defines (>0) area of the image included in the local + neighborhood. If None, the complete image is used (default). + shift_x, shift_y, shift_z : int + Offset added to the footprint center point. Shift is bounded to the + footprint sizes (center must be inside the given footprint). + + References + ---------- + .. [1] N. Hashimoto et al. Referenceless image quality evaluation + for whole slide imaging. J Pathol Inform 2012;3:9. + + Returns + ------- + out : ([P,] M, N) ndarray (same dtype as input image) + Output image. + + Examples + -------- + >>> from skimage import data + >>> from skimage.morphology import disk, ball + >>> from skimage.filters.rank import noise_filter + >>> import numpy as np + >>> img = data.camera() + >>> rng = np.random.default_rng() + >>> volume = rng.integers(0, 255, size=(10,10,10), dtype=np.uint8) + >>> out = noise_filter(img, disk(5)) + >>> out_vol = noise_filter(volume, ball(5)) + + """ + + np_image = np.asanyarray(image) + if _footprint_is_sequence(footprint): + raise ValueError( + "footprint sequences are not currently supported by rank filters" + ) + if np_image.ndim == 2: + # ensure that the central pixel in the footprint is empty + centre_r = int(footprint.shape[0] / 2) + shift_y + centre_c = int(footprint.shape[1] / 2) + shift_x + # make a local copy + footprint_cpy = footprint.copy() + footprint_cpy[centre_r, centre_c] = 0 + + return _apply_scalar_per_pixel( + generic_cy._noise_filter, + image, + footprint_cpy, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + ) + elif np_image.ndim == 3: + # ensure that the central pixel in the footprint is empty + centre_r = int(footprint.shape[0] / 2) + shift_y + centre_c = int(footprint.shape[1] / 2) + shift_x + centre_z = int(footprint.shape[2] / 2) + shift_z + # make a local copy + footprint_cpy = footprint.copy() + footprint_cpy[centre_r, centre_c, centre_z] = 0 + + return _apply_scalar_per_pixel_3D( + generic_cy._noise_filter_3D, + image, + footprint_cpy, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + shift_z=shift_z, + ) + + raise ValueError(f'`image` must have 2 or 3 dimensions, got {np_image.ndim}.') + + +def entropy(image, footprint, out=None, mask=None, shift_x=0, shift_y=0, shift_z=0): + """Local entropy. + + The entropy is computed using base 2 logarithm i.e. the filter returns the + minimum number of bits needed to encode the local gray level + distribution. + + Parameters + ---------- + image : ([P,] M, N) ndarray (uint8, uint16) + Input image. + footprint : ndarray + The neighborhood expressed as an ndarray of 1's and 0's. + out : ([P,] M, N) array (same dtype as input) + If None, a new array is allocated. + mask : ndarray (integer or float), optional + Mask array that defines (>0) area of the image included in the local + neighborhood. If None, the complete image is used (default). + shift_x, shift_y, shift_z : int + Offset added to the footprint center point. Shift is bounded to the + footprint sizes (center must be inside the given footprint). + + Returns + ------- + out : ([P,] M, N) ndarray (float) + Output image. + + References + ---------- + .. [1] `https://en.wikipedia.org/wiki/Entropy_(information_theory) `_ + + Examples + -------- + >>> from skimage import data + >>> from skimage.filters.rank import entropy + >>> from skimage.morphology import disk, ball + >>> import numpy as np + >>> img = data.camera() + >>> rng = np.random.default_rng() + >>> volume = rng.integers(0, 255, size=(10,10,10), dtype=np.uint8) + >>> ent = entropy(img, disk(5)) + >>> ent_vol = entropy(volume, ball(5)) + + """ + + np_image = np.asanyarray(image) + if np_image.ndim == 2: + return _apply_scalar_per_pixel( + generic_cy._entropy, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + out_dtype=np.float64, + ) + elif np_image.ndim == 3: + return _apply_scalar_per_pixel_3D( + generic_cy._entropy_3D, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + shift_z=shift_z, + out_dtype=np.float64, + ) + raise ValueError(f'`image` must have 2 or 3 dimensions, got {np_image.ndim}.') + + +def otsu(image, footprint, out=None, mask=None, shift_x=0, shift_y=0, shift_z=0): + """Local Otsu's threshold value for each pixel. + + Parameters + ---------- + image : ([P,] M, N) ndarray (uint8, uint16) + Input image. + footprint : ndarray + The neighborhood expressed as an ndarray of 1's and 0's. + out : ([P,] M, N) array (same dtype as input) + If None, a new array is allocated. + mask : ndarray (integer or float), optional + Mask array that defines (>0) area of the image included in the local + neighborhood. If None, the complete image is used (default). + shift_x, shift_y, shift_z : int + Offset added to the footprint center point. Shift is bounded to the + footprint sizes (center must be inside the given footprint). + + Returns + ------- + out : ([P,] M, N) ndarray (same dtype as input image) + Output image. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Otsu's_method + + Examples + -------- + >>> from skimage import data + >>> from skimage.filters.rank import otsu + >>> from skimage.morphology import disk, ball + >>> import numpy as np + >>> img = data.camera() + >>> rng = np.random.default_rng() + >>> volume = rng.integers(0, 255, size=(10,10,10), dtype=np.uint8) + >>> local_otsu = otsu(img, disk(5)) + >>> thresh_image = img >= local_otsu + >>> local_otsu_vol = otsu(volume, ball(5)) + >>> thresh_image_vol = volume >= local_otsu_vol + + """ + + np_image = np.asanyarray(image) + if np_image.ndim == 2: + return _apply_scalar_per_pixel( + generic_cy._otsu, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + ) + elif np_image.ndim == 3: + return _apply_scalar_per_pixel_3D( + generic_cy._otsu_3D, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + shift_z=shift_z, + ) + raise ValueError(f'`image` must have 2 or 3 dimensions, got {np_image.ndim}.') + + +def windowed_histogram( + image, footprint, out=None, mask=None, shift_x=0, shift_y=0, n_bins=None +): + """Normalized sliding window histogram + + Parameters + ---------- + image : 2-D array (integer or float) + Input image. + footprint : 2-D array (integer or float) + The neighborhood expressed as a 2-D array of 1's and 0's. + out : 2-D array (integer or float), optional + If None, a new array is allocated. + mask : ndarray (integer or float), optional + Mask array that defines (>0) area of the image included in the local + neighborhood. If None, the complete image is used (default). + shift_x, shift_y : int, optional + Offset added to the footprint center point. Shift is bounded to the + footprint sizes (center must be inside the given footprint). + n_bins : int or None + The number of histogram bins. Will default to ``image.max() + 1`` + if None is passed. + + Returns + ------- + out : 3-D array (float) + Array of dimensions (H,W,N), where (H,W) are the dimensions of the + input image and N is n_bins or ``image.max() + 1`` if no value is + provided as a parameter. Effectively, each pixel is a N-D feature + vector that is the histogram. The sum of the elements in the feature + vector will be 1, unless no pixels in the window were covered by both + footprint and mask, in which case all elements will be 0. + + Examples + -------- + >>> from skimage import data + >>> from skimage.filters.rank import windowed_histogram + >>> from skimage.morphology import disk, ball + >>> import numpy as np + >>> img = data.camera() + >>> rng = np.random.default_rng() + >>> volume = rng.integers(0, 255, size=(10,10,10), dtype=np.uint8) + >>> hist_img = windowed_histogram(img, disk(5)) + + """ + + if n_bins is None: + n_bins = int(image.max()) + 1 + + return _apply_vector_per_pixel( + generic_cy._windowed_hist, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + out_dtype=np.float64, + pixel_size=n_bins, + ) + + +def majority( + image, + footprint, + *, + out=None, + mask=None, + shift_x=0, + shift_y=0, + shift_z=0, +): + """Assign to each pixel the most common value within its neighborhood. + + Parameters + ---------- + image : ndarray + Image array (uint8, uint16 array). + footprint : 2-D array (integer or float) + The neighborhood expressed as a 2-D array of 1's and 0's. + out : ndarray (integer or float), optional + If None, a new array will be allocated. + mask : ndarray (integer or float), optional + Mask array that defines (>0) area of the image included in the local + neighborhood. If None, the complete image is used (default). + shift_x, shift_y : int, optional + Offset added to the footprint center point. Shift is bounded to the + footprint sizes (center must be inside the given footprint). + + Returns + ------- + out : 2-D array (same dtype as input image) + Output image. + + Examples + -------- + >>> from skimage import data + >>> from skimage.filters.rank import majority + >>> from skimage.morphology import disk, ball + >>> import numpy as np + >>> img = data.camera() + >>> rng = np.random.default_rng() + >>> volume = rng.integers(0, 255, size=(10,10,10), dtype=np.uint8) + >>> maj_img = majority(img, disk(5)) + >>> maj_img_vol = majority(volume, ball(5)) + + """ + + np_image = np.asanyarray(image) + if np_image.ndim == 2: + return _apply_scalar_per_pixel( + generic_cy._majority, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + ) + elif np_image.ndim == 3: + return _apply_scalar_per_pixel_3D( + generic_cy._majority_3D, + image, + footprint, + out=out, + mask=mask, + shift_x=shift_x, + shift_y=shift_y, + shift_z=shift_z, + ) + raise ValueError(f'`image` must have 2 or 3 dimensions, got {np_image.ndim}.') diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/filters/rank/tests/__init__.py b/vlmpy310/lib/python3.10/site-packages/skimage/filters/rank/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/filters/rank/tests/__pycache__/__init__.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/skimage/filters/rank/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a267a7a97f50a788bff4f3eb1251287a86004ffc Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/skimage/filters/rank/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/filters/rank/tests/test_rank.py b/vlmpy310/lib/python3.10/site-packages/skimage/filters/rank/tests/test_rank.py new file mode 100644 index 0000000000000000000000000000000000000000..2f82c8b12b15001be48e081086b0cf80c6766805 --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/skimage/filters/rank/tests/test_rank.py @@ -0,0 +1,1133 @@ +import inspect + +import numpy as np +import pytest + +from skimage import data, morphology, util +from skimage._shared._warnings import expected_warnings +from skimage._shared.testing import ( + assert_allclose, + assert_array_almost_equal, + assert_equal, + fetch, + run_in_parallel, +) +from skimage.filters import rank +from skimage.filters.rank import __all__ as all_rank_filters +from skimage.filters.rank import __3Dfilters as _3d_rank_filters +from skimage.filters.rank import subtract_mean +from skimage.morphology import ball, disk, gray +from skimage.util import img_as_float, img_as_ubyte + + +def test_otsu_edge_case(): + # This is an edge case that causes OTSU to appear to misbehave + # Pixel [1, 1] may take a value of of 41 or 81. Both should be considered + # valid. The value will change depending on the particular implementation + # of OTSU. + # To better understand, see + # https://mybinder.org/v2/gist/hmaarrfk/4afae1cfded1d78e44c9e4f58285d552/master + + footprint = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=np.uint8) + + img = np.array([[0, 41, 0], [30, 81, 106], [0, 147, 0]], dtype=np.uint8) + + result = rank.otsu(img, footprint) + assert result[1, 1] in [41, 81] + + img = np.array([[0, 214, 0], [229, 104, 141], [0, 172, 0]], dtype=np.uint8) + result = rank.otsu(img, footprint) + assert result[1, 1] in [141, 172] + + +@pytest.mark.parametrize("dtype", [np.uint8, np.uint16]) +def test_subtract_mean_underflow_correction(dtype): + # Input: [10, 10, 10] + footprint = np.ones((1, 3)) + arr = np.array([[10, 10, 10]], dtype=dtype) + result = subtract_mean(arr, footprint) + + if dtype == np.uint8: + expected_val = 127 + else: + expected_val = (arr.max() + 1) // 2 - 1 + + assert np.all(result == expected_val) + + +# Note: Explicitly read all values into a dict. Otherwise, stochastic test +# failures related to I/O can occur during parallel test cases. +ref_data = dict(np.load(fetch("data/rank_filter_tests.npz"))) +ref_data_3d = dict(np.load(fetch('data/rank_filters_tests_3d.npz'))) + + +@pytest.mark.parametrize( + 'func', + [ + rank.autolevel, + rank.equalize, + rank.gradient, + rank.maximum, + rank.mean, + rank.geometric_mean, + rank.subtract_mean, + rank.median, + rank.minimum, + rank.modal, + rank.enhance_contrast, + rank.pop, + rank.sum, + rank.threshold, + rank.noise_filter, + rank.entropy, + rank.otsu, + rank.majority, + ], +) +def test_1d_input_raises_error(func): + image = np.arange(10) + footprint = disk(3) + with pytest.raises(ValueError, match='`image` must have 2 or 3 dimensions, got 1'): + func(image, footprint) + + +class TestRank: + def setup_method(self): + np.random.seed(0) + # This image is used along with @run_in_parallel + # to ensure that the same seed is used for each thread. + self.image = np.random.rand(25, 25) + np.random.seed(0) + self.volume = np.random.rand(10, 10, 10) + # Set again the seed for the other tests. + np.random.seed(0) + self.footprint = morphology.disk(1) + self.footprint_3d = morphology.ball(1) + self.refs = ref_data + self.refs_3d = ref_data_3d + + @pytest.mark.parametrize('outdt', [None, np.float32, np.float64]) + @pytest.mark.parametrize('filter', all_rank_filters) + def test_rank_filter(self, filter, outdt): + @run_in_parallel(warnings_matching=['Possible precision loss']) + def check(): + expected = self.refs[filter] + if outdt is not None: + out = np.zeros_like(expected, dtype=outdt) + else: + out = None + result = getattr(rank, filter)(self.image, self.footprint, out=out) + if filter == "entropy": + # There may be some arch dependent rounding errors + # See the discussions in + # https://github.com/scikit-image/scikit-image/issues/3091 + # https://github.com/scikit-image/scikit-image/issues/2528 + if outdt is not None: + # Adjust expected precision + expected = expected.astype(outdt) + assert_allclose(expected, result, atol=0, rtol=1e-15) + elif filter == "otsu": + # OTSU May also have some optimization dependent failures + # See the discussions in + # https://github.com/scikit-image/scikit-image/issues/3091 + # Pixel 3, 5 was found to be problematic. It can take either + # a value of 41 or 81 depending on the specific optimizations + # used. + assert result[3, 5] in [41, 81] + result[3, 5] = 81 + # Pixel [19, 18] is also found to be problematic for the same + # reason. + assert result[19, 18] in [141, 172] + result[19, 18] = 172 + assert_array_almost_equal(expected, result) + else: + if outdt is not None: + # Avoid rounding issues comparing to expected result. + # Take modulus first to avoid undefined behavior for + # float->uint8 conversions. + result = np.mod(result, 256.0).astype(expected.dtype) + assert_array_almost_equal(expected, result) + + check() + + @pytest.mark.parametrize('filter', all_rank_filters) + def test_rank_filter_footprint_sequence_unsupported(self, filter): + footprint_sequence = morphology.diamond(3, decomposition="sequence") + with pytest.raises(ValueError): + getattr(rank, filter)(self.image.astype(np.uint8), footprint_sequence) + + @pytest.mark.parametrize('outdt', [None, np.float32, np.float64]) + @pytest.mark.parametrize('filter', _3d_rank_filters) + def test_rank_filters_3D(self, filter, outdt): + @run_in_parallel(warnings_matching=['Possible precision loss']) + def check(): + expected = self.refs_3d[filter] + if outdt is not None: + out = np.zeros_like(expected, dtype=outdt) + else: + out = None + result = getattr(rank, filter)(self.volume, self.footprint_3d, out=out) + if outdt is not None: + # Avoid rounding issues comparing to expected result + if filter == 'sum': + # sum test data seems to be 8-bit disguised as 16-bit + datadt = np.uint8 + else: + datadt = expected.dtype + # Take modulus first to avoid undefined behavior for + # float->uint8 conversions. + result = np.mod(result, 256.0).astype(datadt) + assert_array_almost_equal(expected, result) + + check() + + def test_random_sizes(self): + # make sure the size is not a problem + + elem = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]], dtype=np.uint8) + for m, n in np.random.randint(1, 101, size=(10, 2)): + mask = np.ones((m, n), dtype=np.uint8) + + image8 = np.ones((m, n), dtype=np.uint8) + out8 = np.empty_like(image8) + rank.mean( + image=image8, footprint=elem, mask=mask, out=out8, shift_x=0, shift_y=0 + ) + assert_equal(image8.shape, out8.shape) + rank.mean( + image=image8, + footprint=elem, + mask=mask, + out=out8, + shift_x=+1, + shift_y=+1, + ) + assert_equal(image8.shape, out8.shape) + + rank.geometric_mean( + image=image8, footprint=elem, mask=mask, out=out8, shift_x=0, shift_y=0 + ) + assert_equal(image8.shape, out8.shape) + rank.geometric_mean( + image=image8, + footprint=elem, + mask=mask, + out=out8, + shift_x=+1, + shift_y=+1, + ) + assert_equal(image8.shape, out8.shape) + + image16 = np.ones((m, n), dtype=np.uint16) + out16 = np.empty_like(image8, dtype=np.uint16) + rank.mean( + image=image16, + footprint=elem, + mask=mask, + out=out16, + shift_x=0, + shift_y=0, + ) + assert_equal(image16.shape, out16.shape) + rank.mean( + image=image16, + footprint=elem, + mask=mask, + out=out16, + shift_x=+1, + shift_y=+1, + ) + assert_equal(image16.shape, out16.shape) + + rank.geometric_mean( + image=image16, + footprint=elem, + mask=mask, + out=out16, + shift_x=0, + shift_y=0, + ) + assert_equal(image16.shape, out16.shape) + rank.geometric_mean( + image=image16, + footprint=elem, + mask=mask, + out=out16, + shift_x=+1, + shift_y=+1, + ) + assert_equal(image16.shape, out16.shape) + + rank.mean_percentile( + image=image16, + mask=mask, + out=out16, + footprint=elem, + shift_x=0, + shift_y=0, + p0=0.1, + p1=0.9, + ) + assert_equal(image16.shape, out16.shape) + rank.mean_percentile( + image=image16, + mask=mask, + out=out16, + footprint=elem, + shift_x=+1, + shift_y=+1, + p0=0.1, + p1=0.9, + ) + assert_equal(image16.shape, out16.shape) + + def test_compare_with_gray_dilation(self): + # compare the result of maximum filter with dilate + + image = (np.random.rand(100, 100) * 256).astype(np.uint8) + out = np.empty_like(image) + mask = np.ones(image.shape, dtype=np.uint8) + + for r in range(3, 20, 2): + elem = np.ones((r, r), dtype=np.uint8) + rank.maximum(image=image, footprint=elem, out=out, mask=mask) + cm = gray.dilation(image, elem) + assert_equal(out, cm) + + def test_compare_with_gray_erosion(self): + # compare the result of maximum filter with erode + + image = (np.random.rand(100, 100) * 256).astype(np.uint8) + out = np.empty_like(image) + mask = np.ones(image.shape, dtype=np.uint8) + + for r in range(3, 20, 2): + elem = np.ones((r, r), dtype=np.uint8) + rank.minimum(image=image, footprint=elem, out=out, mask=mask) + cm = gray.erosion(image, elem) + assert_equal(out, cm) + + def test_bitdepth(self): + # test the different bit depth for rank16 + + elem = np.ones((3, 3), dtype=np.uint8) + out = np.empty((100, 100), dtype=np.uint16) + mask = np.ones((100, 100), dtype=np.uint8) + + for i in range(8, 13): + max_val = 2**i - 1 + image = np.full((100, 100), max_val, dtype=np.uint16) + if i > 10: + expected = ["Bad rank filter performance"] + else: + expected = [] + with expected_warnings(expected): + rank.mean_percentile( + image=image, + footprint=elem, + mask=mask, + out=out, + shift_x=0, + shift_y=0, + p0=0.1, + p1=0.9, + ) + + def test_population(self): + # check the number of valid pixels in the neighborhood + + image = np.zeros((5, 5), dtype=np.uint8) + elem = np.ones((3, 3), dtype=np.uint8) + out = np.empty_like(image) + mask = np.ones(image.shape, dtype=np.uint8) + + rank.pop(image=image, footprint=elem, out=out, mask=mask) + r = np.array( + [ + [4, 6, 6, 6, 4], + [6, 9, 9, 9, 6], + [6, 9, 9, 9, 6], + [6, 9, 9, 9, 6], + [4, 6, 6, 6, 4], + ] + ) + assert_equal(r, out) + + def test_structuring_element8(self): + # check the output for a custom footprint + + r = np.array( + [ + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 255, 0, 0, 0], + [0, 0, 255, 255, 255, 0], + [0, 0, 0, 255, 255, 0], + [0, 0, 0, 0, 0, 0], + ] + ) + + # 8-bit + image = np.zeros((6, 6), dtype=np.uint8) + image[2, 2] = 255 + elem = np.asarray([[1, 1, 0], [1, 1, 1], [0, 0, 1]], dtype=np.uint8) + out = np.empty_like(image) + mask = np.ones(image.shape, dtype=np.uint8) + + rank.maximum( + image=image, footprint=elem, out=out, mask=mask, shift_x=1, shift_y=1 + ) + assert_equal(r, out) + + # 16-bit + image = np.zeros((6, 6), dtype=np.uint16) + image[2, 2] = 255 + out = np.empty_like(image) + + rank.maximum( + image=image, footprint=elem, out=out, mask=mask, shift_x=1, shift_y=1 + ) + assert_equal(r, out) + + def test_pass_on_bitdepth(self): + # should pass because data bitdepth is not too high for the function + + image = np.full((100, 100), 2**11, dtype=np.uint16) + elem = np.ones((3, 3), dtype=np.uint8) + out = np.empty_like(image) + mask = np.ones(image.shape, dtype=np.uint8) + with expected_warnings(["Bad rank filter performance"]): + rank.maximum(image=image, footprint=elem, out=out, mask=mask) + + def test_inplace_output(self): + # rank filters are not supposed to filter inplace + + footprint = disk(20) + image = (np.random.rand(500, 500) * 256).astype(np.uint8) + out = image + with pytest.raises(NotImplementedError): + rank.mean(image, footprint, out=out) + + def test_compare_autolevels(self): + # compare autolevel and percentile autolevel with p0=0.0 and p1=1.0 + # should returns the same arrays + + image = util.img_as_ubyte(data.camera()) + + footprint = disk(20) + loc_autolevel = rank.autolevel(image, footprint=footprint) + loc_perc_autolevel = rank.autolevel_percentile( + image, footprint=footprint, p0=0.0, p1=1.0 + ) + + assert_equal(loc_autolevel, loc_perc_autolevel) + + def test_compare_autolevels_16bit(self): + # compare autolevel(16-bit) and percentile autolevel(16-bit) with + # p0=0.0 and p1=1.0 should returns the same arrays + + image = data.camera().astype(np.uint16) * 4 + + footprint = disk(20) + loc_autolevel = rank.autolevel(image, footprint=footprint) + loc_perc_autolevel = rank.autolevel_percentile( + image, footprint=footprint, p0=0.0, p1=1.0 + ) + + assert_equal(loc_autolevel, loc_perc_autolevel) + + def test_compare_ubyte_vs_float(self): + # Create signed int8 image that and convert it to uint8 + image_uint = img_as_ubyte(data.camera()[:50, :50]) + image_float = img_as_float(image_uint) + + methods = [ + 'autolevel', + 'equalize', + 'gradient', + 'threshold', + 'subtract_mean', + 'enhance_contrast', + 'pop', + ] + + for method in methods: + func = getattr(rank, method) + out_u = func(image_uint, disk(3)) + with expected_warnings(["Possible precision loss"]): + out_f = func(image_float, disk(3)) + assert_equal(out_u, out_f) + + def test_compare_ubyte_vs_float_3d(self): + # Create signed int8 volume that and convert it to uint8 + np.random.seed(0) + volume_uint = np.random.randint(0, high=256, size=(10, 20, 30), dtype=np.uint8) + volume_float = img_as_float(volume_uint) + + methods_3d = [ + 'equalize', + 'otsu', + 'autolevel', + 'gradient', + 'majority', + 'maximum', + 'mean', + 'geometric_mean', + 'subtract_mean', + 'median', + 'minimum', + 'modal', + 'enhance_contrast', + 'pop', + 'sum', + 'threshold', + 'noise_filter', + 'entropy', + ] + + for method in methods_3d: + func = getattr(rank, method) + out_u = func(volume_uint, ball(3)) + with expected_warnings(["Possible precision loss"]): + out_f = func(volume_float, ball(3)) + assert_equal(out_u, out_f) + + def test_compare_8bit_unsigned_vs_signed(self): + # filters applied on 8-bit image or 16-bit image (having only real 8-bit + # of dynamic) should be identical + + # Create signed int8 image that and convert it to uint8 + image = img_as_ubyte(data.camera())[::2, ::2] + image[image > 127] = 0 + image_s = image.astype(np.int8) + image_u = img_as_ubyte(image_s) + assert_equal(image_u, img_as_ubyte(image_s)) + + methods = [ + 'autolevel', + 'equalize', + 'gradient', + 'maximum', + 'mean', + 'geometric_mean', + 'subtract_mean', + 'median', + 'minimum', + 'modal', + 'enhance_contrast', + 'pop', + 'threshold', + ] + + for method in methods: + func = getattr(rank, method) + out_u = func(image_u, disk(3)) + with expected_warnings(["Possible precision loss"]): + out_s = func(image_s, disk(3)) + assert_equal(out_u, out_s) + + def test_compare_8bit_unsigned_vs_signed_3d(self): + # filters applied on 8-bit volume or 16-bit volume (having only real 8-bit + # of dynamic) should be identical + + # Create signed int8 volume that and convert it to uint8 + np.random.seed(0) + volume_s = np.random.randint(0, high=127, size=(10, 20, 30), dtype=np.int8) + volume_u = img_as_ubyte(volume_s) + assert_equal(volume_u, img_as_ubyte(volume_s)) + + methods_3d = [ + 'equalize', + 'otsu', + 'autolevel', + 'gradient', + 'majority', + 'maximum', + 'mean', + 'geometric_mean', + 'subtract_mean', + 'median', + 'minimum', + 'modal', + 'enhance_contrast', + 'pop', + 'sum', + 'threshold', + 'noise_filter', + 'entropy', + ] + + for method in methods_3d: + func = getattr(rank, method) + out_u = func(volume_u, ball(3)) + with expected_warnings(["Possible precision loss"]): + out_s = func(volume_s, ball(3)) + assert_equal(out_u, out_s) + + @pytest.mark.parametrize( + 'method', + [ + 'autolevel', + 'equalize', + 'gradient', + 'maximum', + 'mean', + 'subtract_mean', + 'median', + 'minimum', + 'modal', + 'enhance_contrast', + 'pop', + 'threshold', + ], + ) + def test_compare_8bit_vs_16bit(self, method): + # filters applied on 8-bit image or 16-bit image (having only real 8-bit + # of dynamic) should be identical + image8 = util.img_as_ubyte(data.camera())[::2, ::2] + image16 = image8.astype(np.uint16) + assert_equal(image8, image16) + + np.random.seed(0) + volume8 = np.random.randint(128, high=256, size=(10, 10, 10), dtype=np.uint8) + volume16 = volume8.astype(np.uint16) + + methods_3d = [ + 'equalize', + 'otsu', + 'autolevel', + 'gradient', + 'majority', + 'maximum', + 'mean', + 'geometric_mean', + 'subtract_mean', + 'median', + 'minimum', + 'modal', + 'enhance_contrast', + 'pop', + 'sum', + 'threshold', + 'noise_filter', + 'entropy', + ] + + func = getattr(rank, method) + f8 = func(image8, disk(3)) + f16 = func(image16, disk(3)) + assert_equal(f8, f16) + + if method in methods_3d: + f8 = func(volume8, ball(3)) + f16 = func(volume16, ball(3)) + + assert_equal(f8, f16) + + def test_trivial_footprint8(self): + # check that min, max and mean returns identity if footprint + # contains only central pixel + + image = np.zeros((5, 5), dtype=np.uint8) + out = np.zeros_like(image) + mask = np.ones_like(image, dtype=np.uint8) + image[2, 2] = 255 + image[2, 3] = 128 + image[1, 2] = 16 + + elem = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]], dtype=np.uint8) + rank.mean(image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0) + assert_equal(image, out) + rank.geometric_mean( + image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0 + ) + assert_equal(image, out) + rank.minimum( + image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0 + ) + assert_equal(image, out) + rank.maximum( + image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0 + ) + assert_equal(image, out) + + def test_trivial_footprint16(self): + # check that min, max and mean returns identity if footprint + # contains only central pixel + + image = np.zeros((5, 5), dtype=np.uint16) + out = np.zeros_like(image) + mask = np.ones_like(image, dtype=np.uint8) + image[2, 2] = 255 + image[2, 3] = 128 + image[1, 2] = 16 + + elem = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]], dtype=np.uint8) + rank.mean(image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0) + assert_equal(image, out) + rank.geometric_mean( + image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0 + ) + assert_equal(image, out) + rank.minimum( + image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0 + ) + assert_equal(image, out) + rank.maximum( + image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0 + ) + assert_equal(image, out) + + def test_smallest_footprint8(self): + # check that min, max and mean returns identity if footprint + # contains only central pixel + + image = np.zeros((5, 5), dtype=np.uint8) + out = np.zeros_like(image) + mask = np.ones_like(image, dtype=np.uint8) + image[2, 2] = 255 + image[2, 3] = 128 + image[1, 2] = 16 + + elem = np.array([[1]], dtype=np.uint8) + rank.mean(image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0) + assert_equal(image, out) + rank.minimum( + image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0 + ) + assert_equal(image, out) + rank.maximum( + image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0 + ) + assert_equal(image, out) + + def test_smallest_footprint16(self): + # check that min, max and mean returns identity if footprint + # contains only central pixel + + image = np.zeros((5, 5), dtype=np.uint16) + out = np.zeros_like(image) + mask = np.ones_like(image, dtype=np.uint8) + image[2, 2] = 255 + image[2, 3] = 128 + image[1, 2] = 16 + + elem = np.array([[1]], dtype=np.uint8) + rank.mean(image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0) + assert_equal(image, out) + rank.geometric_mean( + image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0 + ) + assert_equal(image, out) + rank.minimum( + image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0 + ) + assert_equal(image, out) + rank.maximum( + image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0 + ) + assert_equal(image, out) + + def test_empty_footprint(self): + # check that min, max and mean returns zeros if footprint is empty + + image = np.zeros((5, 5), dtype=np.uint16) + out = np.zeros_like(image) + mask = np.ones_like(image, dtype=np.uint8) + res = np.zeros_like(image) + image[2, 2] = 255 + image[2, 3] = 128 + image[1, 2] = 16 + + elem = np.array([[0, 0, 0], [0, 0, 0]], dtype=np.uint8) + + rank.mean(image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0) + assert_equal(res, out) + rank.geometric_mean( + image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0 + ) + assert_equal(res, out) + rank.minimum( + image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0 + ) + assert_equal(res, out) + rank.maximum( + image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0 + ) + assert_equal(res, out) + + def test_otsu(self): + # test the local Otsu segmentation on a synthetic image + # (left to right ramp * sinus) + + test = np.tile( + [ + 128, + 145, + 103, + 127, + 165, + 83, + 127, + 185, + 63, + 127, + 205, + 43, + 127, + 225, + 23, + 127, + ], + (16, 1), + ) + test = test.astype(np.uint8) + res = np.tile([1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1], (16, 1)) + footprint = np.ones((6, 6), dtype=np.uint8) + th = 1 * (test >= rank.otsu(test, footprint)) + assert_equal(th, res) + + def test_entropy(self): + # verify that entropy is coherent with bitdepth of the input data + + footprint = np.ones((16, 16), dtype=np.uint8) + # 1 bit per pixel + data = np.tile(np.asarray([0, 1]), (100, 100)).astype(np.uint8) + assert np.max(rank.entropy(data, footprint)) == 1 + + # 2 bit per pixel + data = np.tile(np.asarray([[0, 1], [2, 3]]), (10, 10)).astype(np.uint8) + assert np.max(rank.entropy(data, footprint)) == 2 + + # 3 bit per pixel + data = np.tile(np.asarray([[0, 1, 2, 3], [4, 5, 6, 7]]), (10, 10)).astype( + np.uint8 + ) + assert np.max(rank.entropy(data, footprint)) == 3 + + # 4 bit per pixel + data = np.tile(np.reshape(np.arange(16), (4, 4)), (10, 10)).astype(np.uint8) + assert np.max(rank.entropy(data, footprint)) == 4 + + # 6 bit per pixel + data = np.tile(np.reshape(np.arange(64), (8, 8)), (10, 10)).astype(np.uint8) + assert np.max(rank.entropy(data, footprint)) == 6 + + # 8-bit per pixel + data = np.tile(np.reshape(np.arange(256), (16, 16)), (10, 10)).astype(np.uint8) + assert np.max(rank.entropy(data, footprint)) == 8 + + # 12 bit per pixel + footprint = np.ones((64, 64), dtype=np.uint8) + data = np.zeros((65, 65), dtype=np.uint16) + data[:64, :64] = np.reshape(np.arange(4096), (64, 64)) + with expected_warnings(['Bad rank filter performance']): + assert np.max(rank.entropy(data, footprint)) == 12 + + # make sure output is of dtype double + with expected_warnings(['Bad rank filter performance']): + out = rank.entropy(data, np.ones((16, 16), dtype=np.uint8)) + assert out.dtype == np.float64 + + def test_footprint_dtypes(self): + image = np.zeros((5, 5), dtype=np.uint8) + out = np.zeros_like(image) + mask = np.ones_like(image, dtype=np.uint8) + image[2, 2] = 255 + image[2, 3] = 128 + image[1, 2] = 16 + + for dtype in ( + bool, + np.uint8, + np.uint16, + np.int32, + np.int64, + np.float32, + np.float64, + ): + elem = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]], dtype=dtype) + rank.mean( + image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0 + ) + assert_equal(image, out) + rank.geometric_mean( + image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0 + ) + assert_equal(image, out) + rank.mean_percentile( + image=image, footprint=elem, out=out, mask=mask, shift_x=0, shift_y=0 + ) + assert_equal(image, out) + + def test_16bit(self): + image = np.zeros((21, 21), dtype=np.uint16) + footprint = np.ones((3, 3), dtype=np.uint8) + + for bitdepth in range(17): + value = 2**bitdepth - 1 + image[10, 10] = value + if bitdepth >= 11: + expected = ['Bad rank filter performance'] + else: + expected = [] + with expected_warnings(expected): + assert rank.minimum(image, footprint)[10, 10] == 0 + assert rank.maximum(image, footprint)[10, 10] == value + mean_val = rank.mean(image, footprint)[10, 10] + assert mean_val == int(value / footprint.size) + + def test_bilateral(self): + image = np.zeros((21, 21), dtype=np.uint16) + footprint = np.ones((3, 3), dtype=np.uint8) + + image[10, 10] = 1000 + image[10, 11] = 1010 + image[10, 9] = 900 + + kwargs = dict(s0=1, s1=1) + assert rank.mean_bilateral(image, footprint, **kwargs)[10, 10] == 1000 + assert rank.pop_bilateral(image, footprint, **kwargs)[10, 10] == 1 + kwargs = dict(s0=11, s1=11) + assert rank.mean_bilateral(image, footprint, **kwargs)[10, 10] == 1005 + assert rank.pop_bilateral(image, footprint, **kwargs)[10, 10] == 2 + + def test_percentile_min(self): + # check that percentile p0 = 0 is identical to local min + img = data.camera() + img16 = img.astype(np.uint16) + footprint = disk(15) + # check for 8bit + img_p0 = rank.percentile(img, footprint=footprint, p0=0) + img_min = rank.minimum(img, footprint=footprint) + assert_equal(img_p0, img_min) + # check for 16bit + img_p0 = rank.percentile(img16, footprint=footprint, p0=0) + img_min = rank.minimum(img16, footprint=footprint) + assert_equal(img_p0, img_min) + + def test_percentile_max(self): + # check that percentile p0 = 1 is identical to local max + img = data.camera() + img16 = img.astype(np.uint16) + footprint = disk(15) + # check for 8bit + img_p0 = rank.percentile(img, footprint=footprint, p0=1.0) + img_max = rank.maximum(img, footprint=footprint) + assert_equal(img_p0, img_max) + # check for 16bit + img_p0 = rank.percentile(img16, footprint=footprint, p0=1.0) + img_max = rank.maximum(img16, footprint=footprint) + assert_equal(img_p0, img_max) + + def test_percentile_median(self): + # check that percentile p0 = 0.5 is identical to local median + img = data.camera() + img16 = img.astype(np.uint16) + footprint = disk(15) + # check for 8bit + img_p0 = rank.percentile(img, footprint=footprint, p0=0.5) + img_max = rank.median(img, footprint=footprint) + assert_equal(img_p0, img_max) + # check for 16bit + img_p0 = rank.percentile(img16, footprint=footprint, p0=0.5) + img_max = rank.median(img16, footprint=footprint) + assert_equal(img_p0, img_max) + + def test_sum(self): + # check the number of valid pixels in the neighborhood + + image8 = np.array( + [ + [0, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 0], + ], + dtype=np.uint8, + ) + image16 = 400 * np.array( + [ + [0, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 0], + ], + dtype=np.uint16, + ) + elem = np.ones((3, 3), dtype=np.uint8) + out8 = np.empty_like(image8) + out16 = np.empty_like(image16) + mask = np.ones(image8.shape, dtype=np.uint8) + + r = np.array( + [ + [1, 2, 3, 2, 1], + [2, 4, 6, 4, 2], + [3, 6, 9, 6, 3], + [2, 4, 6, 4, 2], + [1, 2, 3, 2, 1], + ], + dtype=np.uint8, + ) + rank.sum(image=image8, footprint=elem, out=out8, mask=mask) + assert_equal(r, out8) + rank.sum_percentile( + image=image8, footprint=elem, out=out8, mask=mask, p0=0.0, p1=1.0 + ) + assert_equal(r, out8) + rank.sum_bilateral( + image=image8, footprint=elem, out=out8, mask=mask, s0=255, s1=255 + ) + assert_equal(r, out8) + + r = 400 * np.array( + [ + [1, 2, 3, 2, 1], + [2, 4, 6, 4, 2], + [3, 6, 9, 6, 3], + [2, 4, 6, 4, 2], + [1, 2, 3, 2, 1], + ], + dtype=np.uint16, + ) + rank.sum(image=image16, footprint=elem, out=out16, mask=mask) + assert_equal(r, out16) + rank.sum_percentile( + image=image16, footprint=elem, out=out16, mask=mask, p0=0.0, p1=1.0 + ) + assert_equal(r, out16) + rank.sum_bilateral( + image=image16, footprint=elem, out=out16, mask=mask, s0=1000, s1=1000 + ) + assert_equal(r, out16) + + def test_windowed_histogram(self): + # check the number of valid pixels in the neighborhood + + image8 = np.array( + [ + [0, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 0], + ], + dtype=np.uint8, + ) + elem = np.ones((3, 3), dtype=np.uint8) + outf = np.empty(image8.shape + (2,), dtype=float) + mask = np.ones(image8.shape, dtype=np.uint8) + + # Population so we can normalize the expected output while maintaining + # code readability + pop = np.array( + [ + [4, 6, 6, 6, 4], + [6, 9, 9, 9, 6], + [6, 9, 9, 9, 6], + [6, 9, 9, 9, 6], + [4, 6, 6, 6, 4], + ], + dtype=float, + ) + + r0 = ( + np.array( + [ + [3, 4, 3, 4, 3], + [4, 5, 3, 5, 4], + [3, 3, 0, 3, 3], + [4, 5, 3, 5, 4], + [3, 4, 3, 4, 3], + ], + dtype=float, + ) + / pop + ) + r1 = ( + np.array( + [ + [1, 2, 3, 2, 1], + [2, 4, 6, 4, 2], + [3, 6, 9, 6, 3], + [2, 4, 6, 4, 2], + [1, 2, 3, 2, 1], + ], + dtype=float, + ) + / pop + ) + rank.windowed_histogram(image=image8, footprint=elem, out=outf, mask=mask) + assert_equal(r0, outf[:, :, 0]) + assert_equal(r1, outf[:, :, 1]) + + # Test n_bins parameter + larger_output = rank.windowed_histogram( + image=image8, footprint=elem, mask=mask, n_bins=5 + ) + assert larger_output.shape[2] == 5 + + def test_median_default_value(self): + a = np.zeros((3, 3), dtype=np.uint8) + a[1] = 1 + full_footprint = np.ones((3, 3), dtype=np.uint8) + assert_equal(rank.median(a), rank.median(a, full_footprint)) + assert rank.median(a)[1, 1] == 0 + assert rank.median(a, disk(1))[1, 1] == 1 + + def test_majority(self): + img = data.camera() + elem = np.ones((3, 3), dtype=np.uint8) + expected = rank.windowed_histogram(img, elem).argmax(-1).astype(np.uint8) + assert_equal(expected, rank.majority(img, elem)) + + def test_output_same_dtype(self): + image = (np.random.rand(100, 100) * 256).astype(np.uint8) + out = np.empty_like(image) + mask = np.ones(image.shape, dtype=np.uint8) + elem = np.ones((3, 3), dtype=np.uint8) + rank.maximum(image=image, footprint=elem, out=out, mask=mask) + assert_equal(image.dtype, out.dtype) + + def test_input_boolean_dtype(self): + image = (np.random.rand(100, 100) * 256).astype(bool) + elem = np.ones((3, 3), dtype=bool) + with pytest.raises(ValueError): + rank.maximum(image=image, footprint=elem) + + @pytest.mark.parametrize("filter", all_rank_filters) + @pytest.mark.parametrize("shift_name", ["shift_x", "shift_y"]) + @pytest.mark.parametrize("shift_value", [False, True]) + def test_rank_filters_boolean_shift(self, filter, shift_name, shift_value): + """Test warning if shift is provided as a boolean.""" + filter_func = getattr(rank, filter) + image = img_as_ubyte(self.image) + kwargs = {"footprint": self.footprint, shift_name: shift_value} + + with pytest.warns() as record: + filter_func(image, **kwargs) + expected_lineno = inspect.currentframe().f_lineno - 1 + assert len(record) == 1 + assert "will be interpreted as int" in record[0].message.args[0] + assert record[0].filename == __file__ + assert record[0].lineno == expected_lineno + + @pytest.mark.parametrize("filter", _3d_rank_filters) + @pytest.mark.parametrize("shift_name", ["shift_x", "shift_y", "shift_z"]) + @pytest.mark.parametrize("shift_value", [False, True]) + def test_rank_filters_3D_boolean_shift(self, filter, shift_name, shift_value): + """Test warning if shift is provided as a boolean.""" + filter_func = getattr(rank, filter) + image = img_as_ubyte(self.volume) + kwargs = {"footprint": self.footprint_3d, shift_name: shift_value} + + with pytest.warns() as record: + filter_func(image, **kwargs) + expected_lineno = inspect.currentframe().f_lineno - 1 + assert len(record) == 1 + assert "will be interpreted as int" in record[0].message.args[0] + assert record[0].filename == __file__ + assert record[0].lineno == expected_lineno diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/filters/ridges.py b/vlmpy310/lib/python3.10/site-packages/skimage/filters/ridges.py new file mode 100644 index 0000000000000000000000000000000000000000..b163e926670af1561fb052b9c351b60fd4b1b1a9 --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/skimage/filters/ridges.py @@ -0,0 +1,393 @@ +""" +Ridge filters. + +Ridge filters can be used to detect continuous edges, such as vessels, +neurites, wrinkles, rivers, and other tube-like structures. The present +class of ridge filters relies on the eigenvalues of the Hessian matrix of +image intensities to detect tube-like structures where the intensity changes +perpendicular but not along the structure. +""" + +from warnings import warn + +import numpy as np +from scipy import linalg + +from .._shared.utils import _supported_float_type, check_nD +from ..feature.corner import hessian_matrix, hessian_matrix_eigvals + + +def meijering( + image, sigmas=range(1, 10, 2), alpha=None, black_ridges=True, mode='reflect', cval=0 +): + """ + Filter an image with the Meijering neuriteness filter. + + This filter can be used to detect continuous ridges, e.g. neurites, + wrinkles, rivers. It can be used to calculate the fraction of the + whole image containing such objects. + + Calculates the eigenvalues of the Hessian to compute the similarity of + an image region to neurites, according to the method described in [1]_. + + Parameters + ---------- + image : (M, N[, ...]) ndarray + Array with input image data. + sigmas : iterable of floats, optional + Sigmas used as scales of filter + alpha : float, optional + Shaping filter constant, that selects maximally flat elongated + features. The default, None, selects the optimal value -1/(ndim+1). + black_ridges : boolean, optional + When True (the default), the filter detects black ridges; when + False, it detects white ridges. + mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional + How to handle values outside the image borders. + cval : float, optional + Used in conjunction with mode 'constant', the value outside + the image boundaries. + + Returns + ------- + out : (M, N[, ...]) ndarray + Filtered image (maximum of pixels across all scales). + + See also + -------- + sato + frangi + hessian + + References + ---------- + .. [1] Meijering, E., Jacob, M., Sarria, J. C., Steiner, P., Hirling, H., + Unser, M. (2004). Design and validation of a tool for neurite tracing + and analysis in fluorescence microscopy images. Cytometry Part A, + 58(2), 167-176. + :DOI:`10.1002/cyto.a.20022` + """ + + image = image.astype(_supported_float_type(image.dtype), copy=False) + if not black_ridges: # Normalize to black ridges. + image = -image + + if alpha is None: + alpha = 1 / (image.ndim + 1) + mtx = linalg.circulant([1, *[alpha] * (image.ndim - 1)]).astype(image.dtype) + + # Generate empty array for storing maximum value + # from different (sigma) scales + filtered_max = np.zeros_like(image) + for sigma in sigmas: # Filter for all sigmas. + eigvals = hessian_matrix_eigvals( + hessian_matrix( + image, sigma, mode=mode, cval=cval, use_gaussian_derivatives=True + ) + ) + # Compute normalized eigenvalues l_i = e_i + sum_{j!=i} alpha * e_j. + vals = np.tensordot(mtx, eigvals, 1) + # Get largest normalized eigenvalue (by magnitude) at each pixel. + vals = np.take_along_axis(vals, abs(vals).argmax(0)[None], 0).squeeze(0) + # Remove negative values. + vals = np.maximum(vals, 0) + # Normalize to max = 1 (unless everything is already zero). + max_val = vals.max() + if max_val > 0: + vals /= max_val + filtered_max = np.maximum(filtered_max, vals) + + return filtered_max # Return pixel-wise max over all sigmas. + + +def sato(image, sigmas=range(1, 10, 2), black_ridges=True, mode='reflect', cval=0): + """ + Filter an image with the Sato tubeness filter. + + This filter can be used to detect continuous ridges, e.g. tubes, + wrinkles, rivers. It can be used to calculate the fraction of the + whole image containing such objects. + + Defined only for 2-D and 3-D images. Calculates the eigenvalues of the + Hessian to compute the similarity of an image region to tubes, according to + the method described in [1]_. + + Parameters + ---------- + image : (M, N[, P]) ndarray + Array with input image data. + sigmas : iterable of floats, optional + Sigmas used as scales of filter. + black_ridges : boolean, optional + When True (the default), the filter detects black ridges; when + False, it detects white ridges. + mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional + How to handle values outside the image borders. + cval : float, optional + Used in conjunction with mode 'constant', the value outside + the image boundaries. + + Returns + ------- + out : (M, N[, P]) ndarray + Filtered image (maximum of pixels across all scales). + + See also + -------- + meijering + frangi + hessian + + References + ---------- + .. [1] Sato, Y., Nakajima, S., Shiraga, N., Atsumi, H., Yoshida, S., + Koller, T., ..., Kikinis, R. (1998). Three-dimensional multi-scale line + filter for segmentation and visualization of curvilinear structures in + medical images. Medical image analysis, 2(2), 143-168. + :DOI:`10.1016/S1361-8415(98)80009-1` + """ + + check_nD(image, [2, 3]) # Check image dimensions. + image = image.astype(_supported_float_type(image.dtype), copy=False) + if not black_ridges: # Normalize to black ridges. + image = -image + + # Generate empty array for storing maximum value + # from different (sigma) scales + filtered_max = np.zeros_like(image) + for sigma in sigmas: # Filter for all sigmas. + eigvals = hessian_matrix_eigvals( + hessian_matrix( + image, sigma, mode=mode, cval=cval, use_gaussian_derivatives=True + ) + ) + # Compute normalized tubeness (eqs. (9) and (22), ref. [1]_) as the + # geometric mean of eigvals other than the lowest one + # (hessian_matrix_eigvals returns eigvals in decreasing order), clipped + # to 0, multiplied by sigma^2. + eigvals = eigvals[:-1] + vals = sigma**2 * np.prod(np.maximum(eigvals, 0), 0) ** (1 / len(eigvals)) + filtered_max = np.maximum(filtered_max, vals) + return filtered_max # Return pixel-wise max over all sigmas. + + +def frangi( + image, + sigmas=range(1, 10, 2), + scale_range=None, + scale_step=None, + alpha=0.5, + beta=0.5, + gamma=None, + black_ridges=True, + mode='reflect', + cval=0, +): + """ + Filter an image with the Frangi vesselness filter. + + This filter can be used to detect continuous ridges, e.g. vessels, + wrinkles, rivers. It can be used to calculate the fraction of the + whole image containing such objects. + + Defined only for 2-D and 3-D images. Calculates the eigenvalues of the + Hessian to compute the similarity of an image region to vessels, according + to the method described in [1]_. + + Parameters + ---------- + image : (M, N[, P]) ndarray + Array with input image data. + sigmas : iterable of floats, optional + Sigmas used as scales of filter, i.e., + np.arange(scale_range[0], scale_range[1], scale_step) + scale_range : 2-tuple of floats, optional + The range of sigmas used. + scale_step : float, optional + Step size between sigmas. + alpha : float, optional + Frangi correction constant that adjusts the filter's + sensitivity to deviation from a plate-like structure. + beta : float, optional + Frangi correction constant that adjusts the filter's + sensitivity to deviation from a blob-like structure. + gamma : float, optional + Frangi correction constant that adjusts the filter's + sensitivity to areas of high variance/texture/structure. + The default, None, uses half of the maximum Hessian norm. + black_ridges : boolean, optional + When True (the default), the filter detects black ridges; when + False, it detects white ridges. + mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional + How to handle values outside the image borders. + cval : float, optional + Used in conjunction with mode 'constant', the value outside + the image boundaries. + + Returns + ------- + out : (M, N[, P]) ndarray + Filtered image (maximum of pixels across all scales). + + Notes + ----- + Earlier versions of this filter were implemented by Marc Schrijver, + (November 2001), D. J. Kroon, University of Twente (May 2009) [2]_, and + D. G. Ellis (January 2017) [3]_. + + See also + -------- + meijering + sato + hessian + + References + ---------- + .. [1] Frangi, A. F., Niessen, W. J., Vincken, K. L., & Viergever, M. A. + (1998,). Multiscale vessel enhancement filtering. In International + Conference on Medical Image Computing and Computer-Assisted + Intervention (pp. 130-137). Springer Berlin Heidelberg. + :DOI:`10.1007/BFb0056195` + .. [2] Kroon, D. J.: Hessian based Frangi vesselness filter. + .. [3] Ellis, D. G.: https://github.com/ellisdg/frangi3d/tree/master/frangi + """ + if scale_range is not None and scale_step is not None: + warn( + 'Use keyword parameter `sigmas` instead of `scale_range` and ' + '`scale_range` which will be removed in version 0.17.', + stacklevel=2, + ) + sigmas = np.arange(scale_range[0], scale_range[1], scale_step) + + check_nD(image, [2, 3]) # Check image dimensions. + image = image.astype(_supported_float_type(image.dtype), copy=False) + if not black_ridges: # Normalize to black ridges. + image = -image + + # Generate empty array for storing maximum value + # from different (sigma) scales + filtered_max = np.zeros_like(image) + for sigma in sigmas: # Filter for all sigmas. + eigvals = hessian_matrix_eigvals( + hessian_matrix( + image, sigma, mode=mode, cval=cval, use_gaussian_derivatives=True + ) + ) + # Sort eigenvalues by magnitude. + eigvals = np.take_along_axis(eigvals, abs(eigvals).argsort(0), 0) + lambda1 = eigvals[0] + if image.ndim == 2: + (lambda2,) = np.maximum(eigvals[1:], 1e-10) + r_a = np.inf # implied by eq. (15). + r_b = abs(lambda1) / lambda2 # eq. (15). + else: # ndim == 3 + lambda2, lambda3 = np.maximum(eigvals[1:], 1e-10) + r_a = lambda2 / lambda3 # eq. (11). + r_b = abs(lambda1) / np.sqrt(lambda2 * lambda3) # eq. (10). + s = np.sqrt((eigvals**2).sum(0)) # eq. (12). + if gamma is None: + gamma = s.max() / 2 + if gamma == 0: + gamma = 1 # If s == 0 everywhere, gamma doesn't matter. + # Filtered image, eq. (13) and (15). Our implementation relies on the + # blobness exponential factor underflowing to zero whenever the second + # or third eigenvalues are negative (we clip them to 1e-10, to make r_b + # very large). + vals = 1.0 - np.exp( + -(r_a**2) / (2 * alpha**2), dtype=image.dtype + ) # plate sensitivity + vals *= np.exp(-(r_b**2) / (2 * beta**2), dtype=image.dtype) # blobness + vals *= 1.0 - np.exp( + -(s**2) / (2 * gamma**2), dtype=image.dtype + ) # structuredness + filtered_max = np.maximum(filtered_max, vals) + return filtered_max # Return pixel-wise max over all sigmas. + + +def hessian( + image, + sigmas=range(1, 10, 2), + scale_range=None, + scale_step=None, + alpha=0.5, + beta=0.5, + gamma=15, + black_ridges=True, + mode='reflect', + cval=0, +): + """Filter an image with the Hybrid Hessian filter. + + This filter can be used to detect continuous edges, e.g. vessels, + wrinkles, rivers. It can be used to calculate the fraction of the whole + image containing such objects. + + Defined only for 2-D and 3-D images. Almost equal to Frangi filter, but + uses alternative method of smoothing. Refer to [1]_ to find the differences + between Frangi and Hessian filters. + + Parameters + ---------- + image : (M, N[, P]) ndarray + Array with input image data. + sigmas : iterable of floats, optional + Sigmas used as scales of filter, i.e., + np.arange(scale_range[0], scale_range[1], scale_step) + scale_range : 2-tuple of floats, optional + The range of sigmas used. + scale_step : float, optional + Step size between sigmas. + beta : float, optional + Frangi correction constant that adjusts the filter's + sensitivity to deviation from a blob-like structure. + gamma : float, optional + Frangi correction constant that adjusts the filter's + sensitivity to areas of high variance/texture/structure. + black_ridges : boolean, optional + When True (the default), the filter detects black ridges; when + False, it detects white ridges. + mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional + How to handle values outside the image borders. + cval : float, optional + Used in conjunction with mode 'constant', the value outside + the image boundaries. + + Returns + ------- + out : (M, N[, P]) ndarray + Filtered image (maximum of pixels across all scales). + + Notes + ----- + Written by Marc Schrijver (November 2001) + Re-Written by D. J. Kroon University of Twente (May 2009) [2]_ + + See also + -------- + meijering + sato + frangi + + References + ---------- + .. [1] Ng, C. C., Yap, M. H., Costen, N., & Li, B. (2014,). Automatic + wrinkle detection using hybrid Hessian filter. In Asian Conference on + Computer Vision (pp. 609-622). Springer International Publishing. + :DOI:`10.1007/978-3-319-16811-1_40` + .. [2] Kroon, D. J.: Hessian based Frangi vesselness filter. + """ + filtered = frangi( + image, + sigmas=sigmas, + scale_range=scale_range, + scale_step=scale_step, + alpha=alpha, + beta=beta, + gamma=gamma, + black_ridges=black_ridges, + mode=mode, + cval=cval, + ) + + filtered[filtered <= 0] = 1 + return filtered diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/filters/thresholding.py b/vlmpy310/lib/python3.10/site-packages/skimage/filters/thresholding.py new file mode 100644 index 0000000000000000000000000000000000000000..a9914af851c07f16d2387ad398d738f0a6a5291f --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/skimage/filters/thresholding.py @@ -0,0 +1,1339 @@ +import inspect +import itertools +import math +from collections import OrderedDict +from collections.abc import Iterable + +import numpy as np +from scipy import ndimage as ndi + +from .._shared.filters import gaussian +from .._shared.utils import _supported_float_type, warn +from .._shared.version_requirements import require +from ..exposure import histogram +from ..filters._multiotsu import ( + _get_multiotsu_thresh_indices, + _get_multiotsu_thresh_indices_lut, +) +from ..transform import integral_image +from ..util import dtype_limits +from ._sparse import _correlate_sparse, _validate_window_size + +__all__ = [ + 'try_all_threshold', + 'threshold_otsu', + 'threshold_yen', + 'threshold_isodata', + 'threshold_li', + 'threshold_local', + 'threshold_minimum', + 'threshold_mean', + 'threshold_niblack', + 'threshold_sauvola', + 'threshold_triangle', + 'apply_hysteresis_threshold', + 'threshold_multiotsu', +] + + +def _try_all(image, methods=None, figsize=None, num_cols=2, verbose=True): + """Returns a figure comparing the outputs of different methods. + + Parameters + ---------- + image : (M, N) ndarray + Input image. + methods : dict, optional + Names and associated functions. + Functions must take and return an image. + figsize : tuple, optional + Figure size (in inches). + num_cols : int, optional + Number of columns. + verbose : bool, optional + Print function name for each method. + + Returns + ------- + fig, ax : tuple + Matplotlib figure and axes. + """ + from matplotlib import pyplot as plt + + # Compute the image histogram for better performances + nbins = 256 # Default in threshold functions + hist = histogram(image.reshape(-1), nbins, source_range='image') + + # Handle default value + methods = methods or {} + + num_rows = math.ceil((len(methods) + 1.0) / num_cols) + fig, ax = plt.subplots( + num_rows, num_cols, figsize=figsize, sharex=True, sharey=True + ) + ax = ax.reshape(-1) + + ax[0].imshow(image, cmap=plt.cm.gray) + ax[0].set_title('Original') + + i = 1 + for name, func in methods.items(): + # Use precomputed histogram for supporting functions + sig = inspect.signature(func) + _kwargs = dict(hist=hist) if 'hist' in sig.parameters else {} + + ax[i].set_title(name) + try: + ax[i].imshow(func(image, **_kwargs), cmap=plt.cm.gray) + except Exception as e: + ax[i].text( + 0.5, + 0.5, + f"{type(e).__name__}", + ha="center", + va="center", + transform=ax[i].transAxes, + ) + i += 1 + if verbose: + print(func.__orifunc__) + + for a in ax: + a.axis('off') + + fig.tight_layout() + return fig, ax + + +@require("matplotlib", ">=3.3") +def try_all_threshold(image, figsize=(8, 5), verbose=True): + """Returns a figure comparing the outputs of different thresholding methods. + + Parameters + ---------- + image : (M, N) ndarray + Input image. + figsize : tuple, optional + Figure size (in inches). + verbose : bool, optional + Print function name for each method. + + Returns + ------- + fig, ax : tuple + Matplotlib figure and axes. + + Notes + ----- + The following algorithms are used: + + * isodata + * li + * mean + * minimum + * otsu + * triangle + * yen + + Examples + -------- + .. testsetup:: + >>> import pytest; _ = pytest.importorskip('matplotlib') + + >>> from skimage.data import text + >>> fig, ax = try_all_threshold(text(), figsize=(10, 6), verbose=False) + """ + + def thresh(func): + """ + A wrapper function to return a thresholded image. + """ + + def wrapper(im): + return im > func(im) + + try: + wrapper.__orifunc__ = func.__orifunc__ + except AttributeError: + wrapper.__orifunc__ = func.__module__ + '.' + func.__name__ + return wrapper + + # Global algorithms. + methods = OrderedDict( + { + 'Isodata': thresh(threshold_isodata), + 'Li': thresh(threshold_li), + 'Mean': thresh(threshold_mean), + 'Minimum': thresh(threshold_minimum), + 'Otsu': thresh(threshold_otsu), + 'Triangle': thresh(threshold_triangle), + 'Yen': thresh(threshold_yen), + } + ) + + return _try_all(image, figsize=figsize, methods=methods, verbose=verbose) + + +def threshold_local( + image, block_size=3, method='gaussian', offset=0, mode='reflect', param=None, cval=0 +): + """Compute a threshold mask image based on local pixel neighborhood. + + Also known as adaptive or dynamic thresholding. The threshold value is + the weighted mean for the local neighborhood of a pixel subtracted by a + constant. Alternatively the threshold can be determined dynamically by a + given function, using the 'generic' method. + + Parameters + ---------- + image : (M, N[, ...]) ndarray + Grayscale input image. + block_size : int or sequence of int + Odd size of pixel neighborhood which is used to calculate the + threshold value (e.g. 3, 5, 7, ..., 21, ...). + method : {'generic', 'gaussian', 'mean', 'median'}, optional + Method used to determine adaptive threshold for local neighborhood in + weighted mean image. + + * 'generic': use custom function (see ``param`` parameter) + * 'gaussian': apply gaussian filter (see ``param`` parameter for custom\ + sigma value) + * 'mean': apply arithmetic mean filter + * 'median': apply median rank filter + + By default, the 'gaussian' method is used. + offset : float, optional + Constant subtracted from weighted mean of neighborhood to calculate + the local threshold value. Default offset is 0. + mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional + The mode parameter determines how the array borders are handled, where + cval is the value when mode is equal to 'constant'. + Default is 'reflect'. + param : {int, function}, optional + Either specify sigma for 'gaussian' method or function object for + 'generic' method. This functions takes the flat array of local + neighborhood as a single argument and returns the calculated + threshold for the centre pixel. + cval : float, optional + Value to fill past edges of input if mode is 'constant'. + + Returns + ------- + threshold : (M, N[, ...]) ndarray + Threshold image. All pixels in the input image higher than the + corresponding pixel in the threshold image are considered foreground. + + References + ---------- + .. [1] Gonzalez, R. C. and Wood, R. E. "Digital Image Processing + (2nd Edition)." Prentice-Hall Inc., 2002: 600--612. + ISBN: 0-201-18075-8 + + Examples + -------- + >>> from skimage.data import camera + >>> image = camera()[:50, :50] + >>> binary_image1 = image > threshold_local(image, 15, 'mean') + >>> func = lambda arr: arr.mean() + >>> binary_image2 = image > threshold_local(image, 15, 'generic', + ... param=func) + + """ + + if np.isscalar(block_size): + block_size = (block_size,) * image.ndim + elif len(block_size) != image.ndim: + raise ValueError("len(block_size) must equal image.ndim.") + block_size = tuple(block_size) + if any(b % 2 == 0 for b in block_size): + raise ValueError( + f'block_size must be odd! Given block_size ' + f'{block_size} contains even values.' + ) + float_dtype = _supported_float_type(image.dtype) + image = image.astype(float_dtype, copy=False) + thresh_image = np.zeros(image.shape, dtype=float_dtype) + if method == 'generic': + ndi.generic_filter( + image, param, block_size, output=thresh_image, mode=mode, cval=cval + ) + elif method == 'gaussian': + if param is None: + # automatically determine sigma which covers > 99% of distribution + sigma = tuple([(b - 1) / 6.0 for b in block_size]) + else: + sigma = param + gaussian(image, sigma=sigma, out=thresh_image, mode=mode, cval=cval) + elif method == 'mean': + ndi.uniform_filter(image, block_size, output=thresh_image, mode=mode, cval=cval) + elif method == 'median': + ndi.median_filter(image, block_size, output=thresh_image, mode=mode, cval=cval) + else: + raise ValueError( + "Invalid method specified. Please use `generic`, " + "`gaussian`, `mean`, or `median`." + ) + + return thresh_image - offset + + +def _validate_image_histogram(image, hist, nbins=None, normalize=False): + """Ensure that either image or hist were given, return valid histogram. + + If hist is given, image is ignored. + + Parameters + ---------- + image : array or None + Grayscale image. + hist : array, 2-tuple of array, or None + Histogram, either a 1D counts array, or an array of counts together + with an array of bin centers. + nbins : int, optional + The number of bins with which to compute the histogram, if `hist` is + None. + normalize : bool + If hist is not given, it will be computed by this function. This + parameter determines whether the computed histogram is normalized + (i.e. entries sum up to 1) or not. + + Returns + ------- + counts : 1D array of float + Each element is the number of pixels falling in each intensity bin. + bin_centers : 1D array + Each element is the value corresponding to the center of each intensity + bin. + + Raises + ------ + ValueError : if image and hist are both None + """ + if image is None and hist is None: + raise Exception("Either image or hist must be provided.") + + if hist is not None: + if isinstance(hist, (tuple, list)): + counts, bin_centers = hist + else: + counts = hist + bin_centers = np.arange(counts.size) + + if counts[0] == 0 or counts[-1] == 0: + # Trim histogram from both ends by removing starting and + # ending zeroes as in histogram(..., source_range="image") + cond = counts > 0 + start = np.argmax(cond) + end = cond.size - np.argmax(cond[::-1]) + counts, bin_centers = counts[start:end], bin_centers[start:end] + else: + counts, bin_centers = histogram( + image.reshape(-1), nbins, source_range='image', normalize=normalize + ) + return counts.astype('float32', copy=False), bin_centers + + +def threshold_otsu(image=None, nbins=256, *, hist=None): + """Return threshold value based on Otsu's method. + + Either image or hist must be provided. If hist is provided, the actual + histogram of the image is ignored. + + Parameters + ---------- + image : (M, N[, ...]) ndarray, optional + Grayscale input image. + nbins : int, optional + Number of bins used to calculate histogram. This value is ignored for + integer arrays. + hist : array, or 2-tuple of arrays, optional + Histogram from which to determine the threshold, and optionally a + corresponding array of bin center intensities. If no hist provided, + this function will compute it from the image. + + + Returns + ------- + threshold : float + Upper threshold value. All pixels with an intensity higher than + this value are assumed to be foreground. + + References + ---------- + .. [1] Wikipedia, https://en.wikipedia.org/wiki/Otsu's_Method + + Examples + -------- + >>> from skimage.data import camera + >>> image = camera() + >>> thresh = threshold_otsu(image) + >>> binary = image <= thresh + + Notes + ----- + The input image must be grayscale. + """ + if image is not None and image.ndim > 2 and image.shape[-1] in (3, 4): + warn( + f'threshold_otsu is expected to work correctly only for ' + f'grayscale images; image shape {image.shape} looks like ' + f'that of an RGB image.' + ) + + # Check if the image has more than one intensity value; if not, return that + # value + if image is not None: + first_pixel = image.reshape(-1)[0] + if np.all(image == first_pixel): + return first_pixel + + counts, bin_centers = _validate_image_histogram(image, hist, nbins) + + # class probabilities for all possible thresholds + weight1 = np.cumsum(counts) + weight2 = np.cumsum(counts[::-1])[::-1] + # class means for all possible thresholds + mean1 = np.cumsum(counts * bin_centers) / weight1 + mean2 = (np.cumsum((counts * bin_centers)[::-1]) / weight2[::-1])[::-1] + + # Clip ends to align class 1 and class 2 variables: + # The last value of ``weight1``/``mean1`` should pair with zero values in + # ``weight2``/``mean2``, which do not exist. + variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2 + + idx = np.argmax(variance12) + threshold = bin_centers[idx] + + return threshold + + +def threshold_yen(image=None, nbins=256, *, hist=None): + """Return threshold value based on Yen's method. + Either image or hist must be provided. In case hist is given, the actual + histogram of the image is ignored. + + Parameters + ---------- + image : (M, N[, ...]) ndarray + Grayscale input image. + nbins : int, optional + Number of bins used to calculate histogram. This value is ignored for + integer arrays. + hist : array, or 2-tuple of arrays, optional + Histogram from which to determine the threshold, and optionally a + corresponding array of bin center intensities. + An alternative use of this function is to pass it only hist. + + Returns + ------- + threshold : float + Upper threshold value. All pixels with an intensity higher than + this value are assumed to be foreground. + + References + ---------- + .. [1] Yen J.C., Chang F.J., and Chang S. (1995) "A New Criterion + for Automatic Multilevel Thresholding" IEEE Trans. on Image + Processing, 4(3): 370-378. :DOI:`10.1109/83.366472` + .. [2] Sezgin M. and Sankur B. (2004) "Survey over Image Thresholding + Techniques and Quantitative Performance Evaluation" Journal of + Electronic Imaging, 13(1): 146-165, :DOI:`10.1117/1.1631315` + http://www.busim.ee.boun.edu.tr/~sankur/SankurFolder/Threshold_survey.pdf + .. [3] ImageJ AutoThresholder code, http://fiji.sc/wiki/index.php/Auto_Threshold + + Examples + -------- + >>> from skimage.data import camera + >>> image = camera() + >>> thresh = threshold_yen(image) + >>> binary = image <= thresh + """ + counts, bin_centers = _validate_image_histogram(image, hist, nbins) + + # On blank images (e.g. filled with 0) with int dtype, `histogram()` + # returns ``bin_centers`` containing only one value. Speed up with it. + if bin_centers.size == 1: + return bin_centers[0] + + # Calculate probability mass function + pmf = counts.astype('float32', copy=False) / counts.sum() + P1 = np.cumsum(pmf) # Cumulative normalized histogram + P1_sq = np.cumsum(pmf**2) + # Get cumsum calculated from end of squared array: + P2_sq = np.cumsum(pmf[::-1] ** 2)[::-1] + # P2_sq indexes is shifted +1. I assume, with P1[:-1] it's help avoid + # '-inf' in crit. ImageJ Yen implementation replaces those values by zero. + crit = np.log(((P1_sq[:-1] * P2_sq[1:]) ** -1) * (P1[:-1] * (1.0 - P1[:-1])) ** 2) + return bin_centers[crit.argmax()] + + +def threshold_isodata(image=None, nbins=256, return_all=False, *, hist=None): + """Return threshold value(s) based on ISODATA method. + + Histogram-based threshold, known as Ridler-Calvard method or inter-means. + Threshold values returned satisfy the following equality:: + + threshold = (image[image <= threshold].mean() + + image[image > threshold].mean()) / 2.0 + + That is, returned thresholds are intensities that separate the image into + two groups of pixels, where the threshold intensity is midway between the + mean intensities of these groups. + + For integer images, the above equality holds to within one; for floating- + point images, the equality holds to within the histogram bin-width. + + Either image or hist must be provided. In case hist is given, the actual + histogram of the image is ignored. + + Parameters + ---------- + image : (M, N[, ...]) ndarray + Grayscale input image. + nbins : int, optional + Number of bins used to calculate histogram. This value is ignored for + integer arrays. + return_all : bool, optional + If False (default), return only the lowest threshold that satisfies + the above equality. If True, return all valid thresholds. + hist : array, or 2-tuple of arrays, optional + Histogram to determine the threshold from and a corresponding array + of bin center intensities. Alternatively, only the histogram can be + passed. + + Returns + ------- + threshold : float or int or array + Threshold value(s). + + References + ---------- + .. [1] Ridler, TW & Calvard, S (1978), "Picture thresholding using an + iterative selection method" + IEEE Transactions on Systems, Man and Cybernetics 8: 630-632, + :DOI:`10.1109/TSMC.1978.4310039` + .. [2] Sezgin M. and Sankur B. (2004) "Survey over Image Thresholding + Techniques and Quantitative Performance Evaluation" Journal of + Electronic Imaging, 13(1): 146-165, + http://www.busim.ee.boun.edu.tr/~sankur/SankurFolder/Threshold_survey.pdf + :DOI:`10.1117/1.1631315` + .. [3] ImageJ AutoThresholder code, + http://fiji.sc/wiki/index.php/Auto_Threshold + + Examples + -------- + >>> from skimage.data import coins + >>> image = coins() + >>> thresh = threshold_isodata(image) + >>> binary = image > thresh + """ + counts, bin_centers = _validate_image_histogram(image, hist, nbins) + + # image only contains one unique value + if len(bin_centers) == 1: + if return_all: + return bin_centers + else: + return bin_centers[0] + + counts = counts.astype('float32', copy=False) + + # csuml and csumh contain the count of pixels in that bin or lower, and + # in all bins strictly higher than that bin, respectively + csuml = np.cumsum(counts) + csumh = csuml[-1] - csuml + + # intensity_sum contains the total pixel intensity from each bin + intensity_sum = counts * bin_centers + + # l and h contain average value of all pixels in that bin or lower, and + # in all bins strictly higher than that bin, respectively. + # Note that since exp.histogram does not include empty bins at the low or + # high end of the range, csuml and csumh are strictly > 0, except in the + # last bin of csumh, which is zero by construction. + # So no worries about division by zero in the following lines, except + # for the last bin, but we can ignore that because no valid threshold + # can be in the top bin. + # To avoid the division by zero, we simply skip over the last element in + # all future computation. + csum_intensity = np.cumsum(intensity_sum) + lower = csum_intensity[:-1] / csuml[:-1] + higher = (csum_intensity[-1] - csum_intensity[:-1]) / csumh[:-1] + + # isodata finds threshold values that meet the criterion t = (l + m)/2 + # where l is the mean of all pixels <= t and h is the mean of all pixels + # > t, as calculated above. So we are looking for places where + # (l + m) / 2 equals the intensity value for which those l and m figures + # were calculated -- which is, of course, the histogram bin centers. + # We only require this equality to be within the precision of the bin + # width, of course. + all_mean = (lower + higher) / 2.0 + bin_width = bin_centers[1] - bin_centers[0] + + # Look only at thresholds that are below the actual all_mean value, + # for consistency with the threshold being included in the lower pixel + # group. Otherwise, can get thresholds that are not actually fixed-points + # of the isodata algorithm. For float images, this matters less, since + # there really can't be any guarantees anymore anyway. + distances = all_mean - bin_centers[:-1] + thresholds = bin_centers[:-1][(distances >= 0) & (distances < bin_width)] + + if return_all: + return thresholds + else: + return thresholds[0] + + +# Computing a histogram using np.histogram on a uint8 image with bins=256 +# doesn't work and results in aliasing problems. We use a fully specified set +# of bins to ensure that each uint8 value false into its own bin. +_DEFAULT_ENTROPY_BINS = tuple(np.arange(-0.5, 255.51, 1)) + + +def _cross_entropy(image, threshold, bins=_DEFAULT_ENTROPY_BINS): + """Compute cross-entropy between distributions above and below a threshold. + + Parameters + ---------- + image : array + The input array of values. + threshold : float + The value dividing the foreground and background in ``image``. + bins : int or array of float, optional + The number of bins or the bin edges. (Any valid value to the ``bins`` + argument of ``np.histogram`` will work here.) For an exact calculation, + each unique value should have its own bin. The default value for bins + ensures exact handling of uint8 images: ``bins=256`` results in + aliasing problems due to bin width not being equal to 1. + + Returns + ------- + nu : float + The cross-entropy target value as defined in [1]_. + + Notes + ----- + See Li and Lee, 1993 [1]_; this is the objective function ``threshold_li`` + minimizes. This function can be improved but this implementation most + closely matches equation 8 in [1]_ and equations 1-3 in [2]_. + + References + ---------- + .. [1] Li C.H. and Lee C.K. (1993) "Minimum Cross Entropy Thresholding" + Pattern Recognition, 26(4): 617-625 + :DOI:`10.1016/0031-3203(93)90115-D` + .. [2] Li C.H. and Tam P.K.S. (1998) "An Iterative Algorithm for Minimum + Cross Entropy Thresholding" Pattern Recognition Letters, 18(8): 771-776 + :DOI:`10.1016/S0167-8655(98)00057-9` + """ + histogram, bin_edges = np.histogram(image, bins=bins, density=True) + bin_centers = np.convolve(bin_edges, [0.5, 0.5], mode='valid') + t = np.flatnonzero(bin_centers > threshold)[0] + m0a = np.sum(histogram[:t]) # 0th moment, background + m0b = np.sum(histogram[t:]) + m1a = np.sum(histogram[:t] * bin_centers[:t]) # 1st moment, background + m1b = np.sum(histogram[t:] * bin_centers[t:]) + mua = m1a / m0a # mean value, background + mub = m1b / m0b + nu = -m1a * np.log(mua) - m1b * np.log(mub) + return nu + + +def threshold_li(image, *, tolerance=None, initial_guess=None, iter_callback=None): + """Compute threshold value by Li's iterative Minimum Cross Entropy method. + + Parameters + ---------- + image : (M, N[, ...]) ndarray + Grayscale input image. + tolerance : float, optional + Finish the computation when the change in the threshold in an iteration + is less than this value. By default, this is half the smallest + difference between intensity values in ``image``. + initial_guess : float or Callable[[array[float]], float], optional + Li's iterative method uses gradient descent to find the optimal + threshold. If the image intensity histogram contains more than two + modes (peaks), the gradient descent could get stuck in a local optimum. + An initial guess for the iteration can help the algorithm find the + globally-optimal threshold. A float value defines a specific start + point, while a callable should take in an array of image intensities + and return a float value. Example valid callables include + ``numpy.mean`` (default), ``lambda arr: numpy.quantile(arr, 0.95)``, + or even :func:`skimage.filters.threshold_otsu`. + iter_callback : Callable[[float], Any], optional + A function that will be called on the threshold at every iteration of + the algorithm. + + Returns + ------- + threshold : float + Upper threshold value. All pixels with an intensity higher than + this value are assumed to be foreground. + + References + ---------- + .. [1] Li C.H. and Lee C.K. (1993) "Minimum Cross Entropy Thresholding" + Pattern Recognition, 26(4): 617-625 + :DOI:`10.1016/0031-3203(93)90115-D` + .. [2] Li C.H. and Tam P.K.S. (1998) "An Iterative Algorithm for Minimum + Cross Entropy Thresholding" Pattern Recognition Letters, 18(8): 771-776 + :DOI:`10.1016/S0167-8655(98)00057-9` + .. [3] Sezgin M. and Sankur B. (2004) "Survey over Image Thresholding + Techniques and Quantitative Performance Evaluation" Journal of + Electronic Imaging, 13(1): 146-165 + :DOI:`10.1117/1.1631315` + .. [4] ImageJ AutoThresholder code, http://fiji.sc/wiki/index.php/Auto_Threshold + + Examples + -------- + >>> from skimage.data import camera + >>> image = camera() + >>> thresh = threshold_li(image) + >>> binary = image > thresh + """ + # Remove nan: + image = image[~np.isnan(image)] + if image.size == 0: + return np.nan + + # Make sure image has more than one value; otherwise, return that value + # This works even for np.inf + if np.all(image == image.flat[0]): + return image.flat[0] + + # At this point, the image only contains np.inf, -np.inf, or valid numbers + image = image[np.isfinite(image)] + # if there are no finite values in the image, return 0. This is because + # at this point we *know* that there are *both* inf and -inf values, + # because inf == inf evaluates to True. We might as well separate them. + if image.size == 0: + return 0.0 + + # Li's algorithm requires positive image (because of log(mean)) + image_min = np.min(image) + image -= image_min + if image.dtype.kind in 'iu': + tolerance = tolerance or 0.5 + else: + tolerance = tolerance or np.min(np.diff(np.unique(image))) / 2 + + # Initial estimate for iteration. See "initial_guess" in the parameter list + if initial_guess is None: + t_next = np.mean(image) + elif callable(initial_guess): + t_next = initial_guess(image) + elif np.isscalar(initial_guess): # convert to new, positive image range + t_next = initial_guess - float(image_min) + image_max = np.max(image) + image_min + if not 0 < t_next < np.max(image): + msg = ( + f'The initial guess for threshold_li must be within the ' + f'range of the image. Got {initial_guess} for image min ' + f'{image_min} and max {image_max}.' + ) + raise ValueError(msg) + t_next = image.dtype.type(t_next) + else: + raise TypeError( + 'Incorrect type for `initial_guess`; should be ' + 'a floating point value, or a function mapping an ' + 'array to a floating point value.' + ) + + # initial value for t_curr must be different from t_next by at + # least the tolerance. Since the image is positive, we ensure this + # by setting to a large-enough negative number + t_curr = -2 * tolerance + + # Callback on initial iterations + if iter_callback is not None: + iter_callback(t_next + image_min) + + # Stop the iterations when the difference between the + # new and old threshold values is less than the tolerance + # or if the background mode has only one value left, + # since log(0) is not defined. + + if image.dtype.kind in 'iu': + hist, bin_centers = histogram(image.reshape(-1), source_range='image') + hist = hist.astype('float32', copy=False) + while abs(t_next - t_curr) > tolerance: + t_curr = t_next + foreground = bin_centers > t_curr + background = ~foreground + + mean_fore = np.average(bin_centers[foreground], weights=hist[foreground]) + mean_back = np.average(bin_centers[background], weights=hist[background]) + + if mean_back == 0: + break + + t_next = (mean_back - mean_fore) / (np.log(mean_back) - np.log(mean_fore)) + + if iter_callback is not None: + iter_callback(t_next + image_min) + + else: + while abs(t_next - t_curr) > tolerance: + t_curr = t_next + foreground = image > t_curr + mean_fore = np.mean(image[foreground]) + mean_back = np.mean(image[~foreground]) + + if mean_back == 0.0: + break + + t_next = (mean_back - mean_fore) / (np.log(mean_back) - np.log(mean_fore)) + + if iter_callback is not None: + iter_callback(t_next + image_min) + + threshold = t_next + image_min + return threshold + + +def threshold_minimum(image=None, nbins=256, max_num_iter=10000, *, hist=None): + """Return threshold value based on minimum method. + + The histogram of the input ``image`` is computed if not provided and + smoothed until there are only two maxima. Then the minimum in between is + the threshold value. + + Either image or hist must be provided. In case hist is given, the actual + histogram of the image is ignored. + + Parameters + ---------- + image : (M, N[, ...]) ndarray, optional + Grayscale input image. + nbins : int, optional + Number of bins used to calculate histogram. This value is ignored for + integer arrays. + max_num_iter : int, optional + Maximum number of iterations to smooth the histogram. + hist : array, or 2-tuple of arrays, optional + Histogram to determine the threshold from and a corresponding array + of bin center intensities. Alternatively, only the histogram can be + passed. + + Returns + ------- + threshold : float + Upper threshold value. All pixels with an intensity higher than + this value are assumed to be foreground. + + Raises + ------ + RuntimeError + If unable to find two local maxima in the histogram or if the + smoothing takes more than 1e4 iterations. + + References + ---------- + .. [1] C. A. Glasbey, "An analysis of histogram-based thresholding + algorithms," CVGIP: Graphical Models and Image Processing, + vol. 55, pp. 532-537, 1993. + .. [2] Prewitt, JMS & Mendelsohn, ML (1966), "The analysis of cell + images", Annals of the New York Academy of Sciences 128: 1035-1053 + :DOI:`10.1111/j.1749-6632.1965.tb11715.x` + + Examples + -------- + >>> from skimage.data import camera + >>> image = camera() + >>> thresh = threshold_minimum(image) + >>> binary = image > thresh + """ + + def find_local_maxima_idx(hist): + # We can't use scipy.signal.argrelmax + # as it fails on plateaus + maximum_idxs = list() + direction = 1 + + for i in range(hist.shape[0] - 1): + if direction > 0: + if hist[i + 1] < hist[i]: + direction = -1 + maximum_idxs.append(i) + else: + if hist[i + 1] > hist[i]: + direction = 1 + + return maximum_idxs + + counts, bin_centers = _validate_image_histogram(image, hist, nbins) + + smooth_hist = counts.astype('float32', copy=False) + + for counter in range(max_num_iter): + smooth_hist = ndi.uniform_filter1d(smooth_hist, 3) + maximum_idxs = find_local_maxima_idx(smooth_hist) + if len(maximum_idxs) < 3: + break + + if len(maximum_idxs) != 2: + raise RuntimeError('Unable to find two maxima in histogram') + elif counter == max_num_iter - 1: + raise RuntimeError('Maximum iteration reached for histogram' 'smoothing') + + # Find the lowest point between the maxima + threshold_idx = np.argmin(smooth_hist[maximum_idxs[0] : maximum_idxs[1] + 1]) + + return bin_centers[maximum_idxs[0] + threshold_idx] + + +def threshold_mean(image): + """Return threshold value based on the mean of grayscale values. + + Parameters + ---------- + image : (M, N[, ...]) ndarray + Grayscale input image. + + Returns + ------- + threshold : float + Upper threshold value. All pixels with an intensity higher than + this value are assumed to be foreground. + + References + ---------- + .. [1] C. A. Glasbey, "An analysis of histogram-based thresholding + algorithms," CVGIP: Graphical Models and Image Processing, + vol. 55, pp. 532-537, 1993. + :DOI:`10.1006/cgip.1993.1040` + + Examples + -------- + >>> from skimage.data import camera + >>> image = camera() + >>> thresh = threshold_mean(image) + >>> binary = image > thresh + """ + return np.mean(image) + + +def threshold_triangle(image, nbins=256): + """Return threshold value based on the triangle algorithm. + + Parameters + ---------- + image : (M, N[, ...]) ndarray + Grayscale input image. + nbins : int, optional + Number of bins used to calculate histogram. This value is ignored for + integer arrays. + + Returns + ------- + threshold : float + Upper threshold value. All pixels with an intensity higher than + this value are assumed to be foreground. + + References + ---------- + .. [1] Zack, G. W., Rogers, W. E. and Latt, S. A., 1977, + Automatic Measurement of Sister Chromatid Exchange Frequency, + Journal of Histochemistry and Cytochemistry 25 (7), pp. 741-753 + :DOI:`10.1177/25.7.70454` + .. [2] ImageJ AutoThresholder code, + http://fiji.sc/wiki/index.php/Auto_Threshold + + Examples + -------- + >>> from skimage.data import camera + >>> image = camera() + >>> thresh = threshold_triangle(image) + >>> binary = image > thresh + """ + # nbins is ignored for integer arrays + # so, we recalculate the effective nbins. + hist, bin_centers = histogram(image.reshape(-1), nbins, source_range='image') + nbins = len(hist) + + # Find peak, lowest and highest gray levels. + arg_peak_height = np.argmax(hist) + peak_height = hist[arg_peak_height] + arg_low_level, arg_high_level = np.flatnonzero(hist)[[0, -1]] + + if arg_low_level == arg_high_level: + # Image has constant intensity. + return image.ravel()[0] + + # Flip is True if left tail is shorter. + flip = arg_peak_height - arg_low_level < arg_high_level - arg_peak_height + if flip: + hist = hist[::-1] + arg_low_level = nbins - arg_high_level - 1 + arg_peak_height = nbins - arg_peak_height - 1 + + # If flip == True, arg_high_level becomes incorrect + # but we don't need it anymore. + del arg_high_level + + # Set up the coordinate system. + width = arg_peak_height - arg_low_level + x1 = np.arange(width) + y1 = hist[x1 + arg_low_level] + + # Normalize. + norm = np.sqrt(peak_height**2 + width**2) + peak_height /= norm + width /= norm + + # Maximize the length. + # The ImageJ implementation includes an additional constant when calculating + # the length, but here we omit it as it does not affect the location of the + # minimum. + length = peak_height * x1 - width * y1 + arg_level = np.argmax(length) + arg_low_level + + if flip: + arg_level = nbins - arg_level - 1 + + return bin_centers[arg_level] + + +def _mean_std(image, w): + """Return local mean and standard deviation of each pixel using a + neighborhood defined by a rectangular window size ``w``. + The algorithm uses integral images to speedup computation. This is + used by :func:`threshold_niblack` and :func:`threshold_sauvola`. + + Parameters + ---------- + image : (M, N[, ...]) ndarray + Grayscale input image. + w : int, or iterable of int + Window size specified as a single odd integer (3, 5, 7, …), + or an iterable of length ``image.ndim`` containing only odd + integers (e.g. ``(1, 5, 5)``). + + Returns + ------- + m : ndarray of float, same shape as ``image`` + Local mean of the image. + s : ndarray of float, same shape as ``image`` + Local standard deviation of the image. + + References + ---------- + .. [1] F. Shafait, D. Keysers, and T. M. Breuel, "Efficient + implementation of local adaptive thresholding techniques + using integral images." in Document Recognition and + Retrieval XV, (San Jose, USA), Jan. 2008. + :DOI:`10.1117/12.767755` + """ + + if not isinstance(w, Iterable): + w = (w,) * image.ndim + _validate_window_size(w) + + float_dtype = _supported_float_type(image.dtype) + pad_width = tuple((k // 2 + 1, k // 2) for k in w) + padded = np.pad(image.astype(float_dtype, copy=False), pad_width, mode='reflect') + + # Note: keep float64 integral images for accuracy. Outputs of + # _correlate_sparse can later be safely cast to float_dtype + integral = integral_image(padded, dtype=np.float64) + padded *= padded + integral_sq = integral_image(padded, dtype=np.float64) + + # Create lists of non-zero kernel indices and values + kernel_indices = list(itertools.product(*tuple([(0, _w) for _w in w]))) + kernel_values = [ + (-1) ** (image.ndim % 2 != np.sum(indices) % 2) for indices in kernel_indices + ] + + total_window_size = math.prod(w) + kernel_shape = tuple(_w + 1 for _w in w) + m = _correlate_sparse(integral, kernel_shape, kernel_indices, kernel_values) + m = m.astype(float_dtype, copy=False) + m /= total_window_size + g2 = _correlate_sparse(integral_sq, kernel_shape, kernel_indices, kernel_values) + g2 = g2.astype(float_dtype, copy=False) + g2 /= total_window_size + # Note: we use np.clip because g2 is not guaranteed to be greater than + # m*m when floating point error is considered + s = np.sqrt(np.clip(g2 - m * m, 0, None)) + return m, s + + +def threshold_niblack(image, window_size=15, k=0.2): + """Applies Niblack local threshold to an array. + + A threshold T is calculated for every pixel in the image using the + following formula:: + + T = m(x,y) - k * s(x,y) + + where m(x,y) and s(x,y) are the mean and standard deviation of + pixel (x,y) neighborhood defined by a rectangular window with size w + times w centered around the pixel. k is a configurable parameter + that weights the effect of standard deviation. + + Parameters + ---------- + image : (M, N[, ...]) ndarray + Grayscale input image. + window_size : int, or iterable of int, optional + Window size specified as a single odd integer (3, 5, 7, …), + or an iterable of length ``image.ndim`` containing only odd + integers (e.g. ``(1, 5, 5)``). + k : float, optional + Value of parameter k in threshold formula. + + Returns + ------- + threshold : (M, N[, ...]) ndarray + Threshold mask. All pixels with an intensity higher than + this value are assumed to be foreground. + + Notes + ----- + This algorithm is originally designed for text recognition. + + The Bradley threshold is a particular case of the Niblack + one, being equivalent to + + >>> from skimage import data + >>> image = data.page() + >>> q = 1 + >>> threshold_image = threshold_niblack(image, k=0) * q + + for some value ``q``. By default, Bradley and Roth use ``q=1``. + + + References + ---------- + .. [1] W. Niblack, An introduction to Digital Image Processing, + Prentice-Hall, 1986. + .. [2] D. Bradley and G. Roth, "Adaptive thresholding using Integral + Image", Journal of Graphics Tools 12(2), pp. 13-21, 2007. + :DOI:`10.1080/2151237X.2007.10129236` + + Examples + -------- + >>> from skimage import data + >>> image = data.page() + >>> threshold_image = threshold_niblack(image, window_size=7, k=0.1) + """ + m, s = _mean_std(image, window_size) + return m - k * s + + +def threshold_sauvola(image, window_size=15, k=0.2, r=None): + """Applies Sauvola local threshold to an array. Sauvola is a + modification of Niblack technique. + + In the original method a threshold T is calculated for every pixel + in the image using the following formula:: + + T = m(x,y) * (1 + k * ((s(x,y) / R) - 1)) + + where m(x,y) and s(x,y) are the mean and standard deviation of + pixel (x,y) neighborhood defined by a rectangular window with size w + times w centered around the pixel. k is a configurable parameter + that weights the effect of standard deviation. + R is the maximum standard deviation of a grayscale image. + + Parameters + ---------- + image : (M, N[, ...]) ndarray + Grayscale input image. + window_size : int, or iterable of int, optional + Window size specified as a single odd integer (3, 5, 7, …), + or an iterable of length ``image.ndim`` containing only odd + integers (e.g. ``(1, 5, 5)``). + k : float, optional + Value of the positive parameter k. + r : float, optional + Value of R, the dynamic range of standard deviation. + If None, set to the half of the image dtype range. + + Returns + ------- + threshold : (M, N[, ...]) ndarray + Threshold mask. All pixels with an intensity higher than + this value are assumed to be foreground. + + Notes + ----- + This algorithm is originally designed for text recognition. + + References + ---------- + .. [1] J. Sauvola and M. Pietikainen, "Adaptive document image + binarization," Pattern Recognition 33(2), + pp. 225-236, 2000. + :DOI:`10.1016/S0031-3203(99)00055-2` + + Examples + -------- + >>> from skimage import data + >>> image = data.page() + >>> t_sauvola = threshold_sauvola(image, window_size=15, k=0.2) + >>> binary_image = image > t_sauvola + """ + if r is None: + imin, imax = dtype_limits(image, clip_negative=False) + r = 0.5 * (imax - imin) + m, s = _mean_std(image, window_size) + return m * (1 + k * ((s / r) - 1)) + + +def apply_hysteresis_threshold(image, low, high): + """Apply hysteresis thresholding to ``image``. + + This algorithm finds regions where ``image`` is greater than ``high`` + OR ``image`` is greater than ``low`` *and* that region is connected to + a region greater than ``high``. + + Parameters + ---------- + image : (M[, ...]) ndarray + Grayscale input image. + low : float, or array of same shape as ``image`` + Lower threshold. + high : float, or array of same shape as ``image`` + Higher threshold. + + Returns + ------- + thresholded : (M[, ...]) array of bool + Array in which ``True`` indicates the locations where ``image`` + was above the hysteresis threshold. + + Examples + -------- + >>> image = np.array([1, 2, 3, 2, 1, 2, 1, 3, 2]) + >>> apply_hysteresis_threshold(image, 1.5, 2.5).astype(int) + array([0, 1, 1, 1, 0, 0, 0, 1, 1]) + + References + ---------- + .. [1] J. Canny. A computational approach to edge detection. + IEEE Transactions on Pattern Analysis and Machine Intelligence. + 1986; vol. 8, pp.679-698. + :DOI:`10.1109/TPAMI.1986.4767851` + """ + low = np.clip(low, a_min=None, a_max=high) # ensure low always below high + mask_low = image > low + mask_high = image > high + # Connected components of mask_low + labels_low, num_labels = ndi.label(mask_low) + # Check which connected components contain pixels from mask_high + sums = ndi.sum(mask_high, labels_low, np.arange(num_labels + 1)) + connected_to_high = sums > 0 + thresholded = connected_to_high[labels_low] + return thresholded + + +def threshold_multiotsu(image=None, classes=3, nbins=256, *, hist=None): + r"""Generate `classes`-1 threshold values to divide gray levels in `image`, + following Otsu's method for multiple classes. + + The threshold values are chosen to maximize the total sum of pairwise + variances between the thresholded graylevel classes. See Notes and [1]_ + for more details. + + Either image or hist must be provided. If hist is provided, the actual + histogram of the image is ignored. + + Parameters + ---------- + image : (M, N[, ...]) ndarray, optional + Grayscale input image. + classes : int, optional + Number of classes to be thresholded, i.e. the number of resulting + regions. + nbins : int, optional + Number of bins used to calculate the histogram. This value is ignored + for integer arrays. + hist : array, or 2-tuple of arrays, optional + Histogram from which to determine the threshold, and optionally a + corresponding array of bin center intensities. If no hist provided, + this function will compute it from the image (see notes). + + Returns + ------- + thresh : array + Array containing the threshold values for the desired classes. + + Raises + ------ + ValueError + If ``image`` contains less grayscale value then the desired + number of classes. + + Notes + ----- + This implementation relies on a Cython function whose complexity + is :math:`O\left(\frac{Ch^{C-1}}{(C-1)!}\right)`, where :math:`h` + is the number of histogram bins and :math:`C` is the number of + classes desired. + + If no hist is given, this function will make use of + `skimage.exposure.histogram`, which behaves differently than + `np.histogram`. While both allowed, use the former for consistent + behaviour. + + The input image must be grayscale. + + References + ---------- + .. [1] Liao, P-S., Chen, T-S. and Chung, P-C., "A fast algorithm for + multilevel thresholding", Journal of Information Science and + Engineering 17 (5): 713-727, 2001. Available at: + + :DOI:`10.6688/JISE.2001.17.5.1` + .. [2] Tosa, Y., "Multi-Otsu Threshold", a java plugin for ImageJ. + Available at: + + + Examples + -------- + >>> from skimage.color import label2rgb + >>> from skimage import data + >>> image = data.camera() + >>> thresholds = threshold_multiotsu(image) + >>> regions = np.digitize(image, bins=thresholds) + >>> regions_colorized = label2rgb(regions) + """ + if image is not None and image.ndim > 2 and image.shape[-1] in (3, 4): + warn( + f'threshold_multiotsu is expected to work correctly only for ' + f'grayscale images; image shape {image.shape} looks like ' + f'that of an RGB image.' + ) + + # calculating the histogram and the probability of each gray level. + prob, bin_centers = _validate_image_histogram(image, hist, nbins, normalize=True) + prob = prob.astype('float32', copy=False) + + nvalues = np.count_nonzero(prob) + if nvalues < classes: + msg = ( + f'After discretization into bins, the input image has ' + f'only {nvalues} different values. It cannot be thresholded ' + f'in {classes} classes. If there are more unique values ' + f'before discretization, try increasing the number of bins ' + f'(`nbins`).' + ) + raise ValueError(msg) + elif nvalues == classes: + thresh_idx = np.flatnonzero(prob)[:-1] + else: + # Get threshold indices + try: + thresh_idx = _get_multiotsu_thresh_indices_lut(prob, classes - 1) + except MemoryError: + # Don't use LUT if the number of bins is too large (if the + # image is uint16 for example): in this case, the + # allocated memory is too large. + thresh_idx = _get_multiotsu_thresh_indices(prob, classes - 1) + + thresh = bin_centers[thresh_idx] + + return thresh diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/future/__init__.pyi b/vlmpy310/lib/python3.10/site-packages/skimage/future/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..001d5a3929099d48949fdbfff765093431520ba7 --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/skimage/future/__init__.pyi @@ -0,0 +1,14 @@ +# Explicitly setting `__all__` is necessary for type inference engines +# to know which symbols are exported. See +# https://peps.python.org/pep-0484/#stub-files + +__all__ = [ + "manual_lasso_segmentation", + "manual_polygon_segmentation", + "fit_segmenter", + "predict_segmenter", + "TrainableSegmenter", +] + +from .manual_segmentation import manual_lasso_segmentation, manual_polygon_segmentation +from .trainable_segmentation import TrainableSegmenter, fit_segmenter, predict_segmenter diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/future/__pycache__/__init__.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/skimage/future/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef412d2e7c87a9e942195c6721b42bb0cc3a973a Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/skimage/future/__pycache__/__init__.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/future/__pycache__/manual_segmentation.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/skimage/future/__pycache__/manual_segmentation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..03ae3adb96d716ca7f451ed4211fad5485eb1078 Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/skimage/future/__pycache__/manual_segmentation.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/future/__pycache__/trainable_segmentation.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/skimage/future/__pycache__/trainable_segmentation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b562a152ae44325fa45ecfc21176967f7f924556 Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/skimage/future/__pycache__/trainable_segmentation.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/future/tests/__init__.py b/vlmpy310/lib/python3.10/site-packages/skimage/future/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/future/tests/__pycache__/__init__.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/skimage/future/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bad637e35c75d277e6fe88bf48754a366798a618 Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/skimage/future/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/future/tests/__pycache__/test_trainable_segmentation.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/skimage/future/tests/__pycache__/test_trainable_segmentation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2fa401150048d622820874d2cb8b354b312cd51c Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/skimage/future/tests/__pycache__/test_trainable_segmentation.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/future/tests/test_trainable_segmentation.py b/vlmpy310/lib/python3.10/site-packages/skimage/future/tests/test_trainable_segmentation.py new file mode 100644 index 0000000000000000000000000000000000000000..2c33f9f68692ca447b42c9d12beb2295359aa3e7 --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/skimage/future/tests/test_trainable_segmentation.py @@ -0,0 +1,144 @@ +from functools import partial + +import numpy as np +import pytest +from scipy import spatial + +from skimage.future import fit_segmenter, predict_segmenter, TrainableSegmenter +from skimage.feature import multiscale_basic_features + + +class DummyNNClassifier: + def fit(self, X, labels): + self.X = X + self.labels = labels + self.tree = spatial.cKDTree(self.X) + + def predict(self, X): + # mimic check in scikit-learn for number of features + if X.shape[1] != self.X.shape[1]: + raise ValueError( + f"Expected {self.X.shape[1]} features but got {X.shape[1]}." + ) + nearest_neighbors = self.tree.query(X)[1] + return self.labels[nearest_neighbors] + + +def test_trainable_segmentation_singlechannel(): + img = np.zeros((20, 20)) + img[:10] = 1 + img += 0.05 * np.random.randn(*img.shape) + labels = np.zeros_like(img, dtype=np.uint8) + labels[:2] = 1 + labels[-2:] = 2 + clf = DummyNNClassifier() + features_func = partial( + multiscale_basic_features, + edges=False, + texture=False, + sigma_min=0.5, + sigma_max=2, + ) + features = features_func(img) + clf = fit_segmenter(labels, features, clf) + out = predict_segmenter(features, clf) + assert np.all(out[:10] == 1) + assert np.all(out[10:] == 2) + + +def test_trainable_segmentation_multichannel(): + img = np.zeros((20, 20, 3)) + img[:10] = 1 + img += 0.05 * np.random.randn(*img.shape) + labels = np.zeros_like(img[..., 0], dtype=np.uint8) + labels[:2] = 1 + labels[-2:] = 2 + clf = DummyNNClassifier() + features = multiscale_basic_features( + img, + edges=False, + texture=False, + sigma_min=0.5, + sigma_max=2, + channel_axis=-1, + ) + clf = fit_segmenter(labels, features, clf) + out = predict_segmenter(features, clf) + assert np.all(out[:10] == 1) + assert np.all(out[10:] == 2) + + +def test_trainable_segmentation_predict(): + img = np.zeros((20, 20)) + img[:10] = 1 + img += 0.05 * np.random.randn(*img.shape) + labels = np.zeros_like(img, dtype=np.uint8) + labels[:2] = 1 + labels[-2:] = 2 + clf = DummyNNClassifier() + features_func = partial( + multiscale_basic_features, + edges=False, + texture=False, + sigma_min=0.5, + sigma_max=2, + ) + features = features_func(img) + clf = fit_segmenter(labels, features, clf) + + test_features = np.random.random((5, 20, 20)) + with pytest.raises(ValueError) as err: + _ = predict_segmenter(test_features, clf) + assert 'type of features' in str(err.value) + + +def test_trainable_segmentation_oo(): + """Test the object-oriented interface using the TrainableSegmenter class.""" + + img = np.zeros((20, 20)) + img[:10] = 1 + img += 0.05 * np.random.randn(*img.shape) + labels = np.zeros_like(img, dtype=np.uint8) + labels[:2] = 1 + labels[-2:] = 2 + clf = DummyNNClassifier() + features_func = partial( + multiscale_basic_features, + edges=False, + texture=False, + sigma_min=0.5, + sigma_max=2, + ) + segmenter = TrainableSegmenter(clf=clf, features_func=features_func) + segmenter.fit(img, labels) + + # model has been fitted + np.testing.assert_array_almost_equal(clf.labels, labels[labels > 0]) + + out = segmenter.predict(img) + assert np.all(out[:10] == 1) + assert np.all(out[10:] == 2) + + # test multichannel model + img_with_channels = np.stack((img, img.T), axis=-1) + features_func = partial( + multiscale_basic_features, + channel_axis=-1, + ) + segmenter = TrainableSegmenter(clf=clf, features_func=features_func) + segmenter.fit(img_with_channels, labels) + + # model has been fitted + np.testing.assert_array_almost_equal(clf.labels, labels[labels > 0]) + + out = segmenter.predict(img_with_channels) + assert np.all(out[:10] == 1) + assert np.all(out[10:] == 2) + + # test wrong number of dimensions + with pytest.raises(ValueError): + segmenter.predict(np.expand_dims(img_with_channels, axis=-1)) + + # test wrong number of channels + with pytest.raises(ValueError): + segmenter.predict(np.concatenate([img_with_channels] * 2, axis=-1))