diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/autotune_process.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/autotune_process.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..512af1eba3b56adc84b0b81e4a4aa416a4fe9927 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/autotune_process.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/bounds.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/bounds.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78f030954afc76deaf39e82c065b20e0a856531b Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/bounds.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/comm_analysis.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/comm_analysis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4be0011a737eaaaaaed7f0bace883c9112428cdf Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/comm_analysis.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/comms.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/comms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..227ac6b27ff25995d661a29d5e8686dca75e48fd Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/comms.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/compile_fx.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/compile_fx.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ea17ed39e5d9f478e6c72a38ca01e1f8b675684 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/compile_fx.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/config.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84169760120608defa0c8d1dd461c72729da55c1 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/config.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/constant_folding.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/constant_folding.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1aaec2467c09bda6b86bba5a210038817d3ad43 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/constant_folding.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/coordinate_descent_tuner.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/coordinate_descent_tuner.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e2a6877dd19ca9c91bfea4e8bb7caf996271beb8 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/coordinate_descent_tuner.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/cudagraph_trees.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/cudagraph_trees.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5fdafbf2431b9719502f140c9ddc110c3418ef82 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/cudagraph_trees.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/debug.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/debug.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eeaee9db9d670b5d24aef2a5ec711d49527b3a8d Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/debug.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/decomposition.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/decomposition.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d106062e2fef07077ec1991d5dda39b28b389fe Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/decomposition.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/exc.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/exc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a41b5ead18bed01d41b0b4bd0a1a0f1f770a13f7 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/exc.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/freezing.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/freezing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..76b1fe8cb9e95f73a523f7a7592342c4b03a6f58 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/freezing.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/graph.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/graph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f16db4dbb651d24bd43b4210b1d4d9959c794ca5 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/graph.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/hooks.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/hooks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..79131ad71922d7771b08d24e4384a247bc0302a8 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/hooks.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/index_propagation.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/index_propagation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb8460429ff0369f4f8cb5c846026a261a4aa143 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/index_propagation.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/metrics.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/metrics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7034bd536a536f7e820d65ba03fd3f4ab28b8122 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/metrics.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/pattern_matcher.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/pattern_matcher.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b82a5f47cbba9ca0774c08460fafbf3c99afcae Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/pattern_matcher.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/sizevars.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/sizevars.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d19cdd5f2496cf5d4e663c2c70d014c98b4a6814 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/sizevars.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/test_operators.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/test_operators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dccaade515ff5ec97869e093d1bca38827a077fa Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/test_operators.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/triton_helpers.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/triton_helpers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b65a0548f6c1a576a8ad075828c8d332a7da2306 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/triton_helpers.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/triton_heuristics.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/triton_heuristics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..322ec6b337ba8a5aeadf8fba1925fb8a0350899f Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/triton_heuristics.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/virtualized.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/virtualized.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1cf0b9a3ad8ea12d14f050aa922a2451091fd85f Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/virtualized.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/wrapper_benchmark.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/wrapper_benchmark.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a983bf8ca56ae4ad6a888b614170e7daf967ddd Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/wrapper_benchmark.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/common.py b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/common.py new file mode 100644 index 0000000000000000000000000000000000000000..8eba8feaee149925d662728e993f8a076cafff97 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/common.py @@ -0,0 +1,1295 @@ +import contextlib +import dataclasses +import functools +import itertools +import logging +import operator +import re +from collections import namedtuple +from itertools import chain +from typing import ( + Any, + Callable, + ClassVar, + Dict, + List, + NamedTuple, + Optional, + Set, + Tuple, + Union, +) + +import sympy +from sympy.printing.printer import Printer + +import torch +import torch.fx +from torch.utils._sympy.value_ranges import ValueRanges + +from .. import config, metrics +from ..utils import ( + DeferredLineBase, + do_bench, + free_symbol_startswith, + IndentedBuffer, + sympy_dot, + sympy_subs, + sympy_symbol, + unique, +) +from ..virtualized import ops, OpsValue, V + +schedule_log = torch._logging.getArtifactLogger(__name__, "schedule") + + +def data_type_logger(msg): + if schedule_log.isEnabledFor(logging.DEBUG): + schedule_log.debug("Data type propagation: %s", msg) + + +TensorArg = namedtuple("TensorArg", ["name", "buffer", "dtype", "check_alignment"]) +SizeArg = namedtuple("SizeArg", ["name", "expr"]) + +DeviceCodegen = namedtuple("DeviceCodegen", ["scheduling", "wrapper_codegen"]) +device_codegens: Dict[str, DeviceCodegen] = {} + + +# The code generated by Inductor consists of two main parts: kernel code and wrapper code. +# For any new backend looking to integrate with Inductor, customization of these two main +# parts are necessary to generate its specific code. +# +# Kernel code generation is determined by different Scheduling. Consequently, a new +# backend needs to provide a custom Scheduling for its unique kernel code generation. Currently, +# CppScheduling and TritonScheduling serve the C++/OpenMP and Triton backends, respectively. +# +# For the Wrapper, Inductor provides a WrapperCodeGen class to generate the Python wrapper code +# that bridges kernels. This allows out-of-tree backends to inherit from WrapperCodeGen, +# and override specific member functions to create backend-specific Python wrapper code. +# +# Other classes, such as CppKernel and TritonKernel, used for code generation, typically form part +# of the logic for either Scheduling or WrapperCodeGen. So the Scheduling and WrapperCodeGen interfaces +# provide flexibility to the backend. A backend can choose to implement these classes from scratch, +# or reuse them by extending and overriding as necessary. And Inductor provides the registration API, +# register_backend_for_device, to equip a new backend at runtime. +# +# Intel has developed a new backend on top of Triton to support Intel GPUs, leveraging these interfaces. +# This backend can be used as a reference: +# https://github.com/intel/intel-extension-for-pytorch/blob/5dcc9d57e5422cf295e1a1ee97896d6b6a554a85/intel_extension_for_pytorch/_inductor/__init__.py#L9 +def register_backend_for_device( + device: str, device_scheduling: type, device_wrapper_codegen: type +): + device_codegens[device] = DeviceCodegen(device_scheduling, device_wrapper_codegen) + + +def get_scheduling_for_device(device: str): + return device_codegens[device].scheduling if device in device_codegens else None + + +def get_wrapper_codegen_for_device(device: str): + return ( + device_codegens[device].wrapper_codegen if device in device_codegens else None + ) + + +def index_prevent_reordering(index: List[sympy.Expr], index_vars, sizes): + from ..ir import FlexibleLayout + + # added contiguous index prevents reordering + return [*index, sympy_dot(index_vars, FlexibleLayout.contiguous_strides(sizes))] + + +@functools.lru_cache(None) +def boolean_ops(): + return ( + "is_inf", + "is_nan", + "bitwise_xor", + "logical_not", + "signbit", + "le", + "lt", + "ge", + "gt", + "eq", + "ne", + ) + + +DTYPE_TO_COMPUTATION_DTYPE = { + torch.bfloat16: torch.float, + torch.float16: torch.float, + **{ + dtype: dtype + for dtype in [ + torch.bool, + torch.float32, + torch.float64, + torch.int8, + torch.int16, + torch.int32, + torch.int64, + torch.uint8, + ] + }, +} + + +class DataTypePropagation: + def __init__(self, body) -> None: + self.body = body + self.graphs: Dict[Union[Callable[..., Any], str], Any] = { + "root": body.root_block.graph + } + for k, v in body.subblocks.items(): + self.graphs[k] = v.graph + + def deduce_node_dtype_by_inputs(self, node: torch.fx.Node): + inputs = node.all_input_nodes + input_nodes = [ + n for n in inputs if isinstance(n, torch.fx.Node) and n.op != "placeholder" + ] + if len(input_nodes) == 0: + return None + + all_input_nodes_propogated = all( + OptimizationContext.key in n.meta + and n.meta[OptimizationContext.key].dtype is not None + for n in input_nodes + ) + if not all_input_nodes_propogated: + return None + + return functools.reduce( + torch.promote_types, + [n.meta[OptimizationContext.key].dtype for n in input_nodes], + ) + + def deduce_node_dtype_by_subgraph(self, node: torch.fx.Node): + sub_graph = self.graphs[node.target] + dtype = self.propagate_graph(sub_graph) + assert dtype + return dtype + + def deduce_node_dtype(self, node: torch.fx.Node): + if node.target in boolean_ops(): + return torch.bool + + if node.op == "placeholder": + return None + + if node.target == "output": + # we can infer output node if it only have 1 arg + if len(node.args) != 1: + return None + + if node.target in ( + "to_dtype", + "index_expr", + ): + return node.args[-1] + + if node.target in ( + "rand", + "randn", + ): + return torch.float + + if node.target in ( + "get_index", + "index_expr", + ): + return torch.int64 + + if node.target in ( + "load", + "store", + "store_reduction", + ): + buf_name = node.args[1] + return V.graph.get_dtype(buf_name) + + if node.target == operator.getitem: + return self.deduce_node_dtype(node.args[0]) + + assert isinstance(node.target, str) + + if node.target == "reduction": + return node.args[1] + + if node.target == "constant": + return DTYPE_TO_COMPUTATION_DTYPE[node.args[-1]] + + if node.target.startswith("masked_subblock"): + return self.deduce_node_dtype_by_subgraph(node) + + return self.deduce_node_dtype_by_inputs(node) + + def propagate_graph(self, graph: torch.fx.Graph): + assert graph.nodes + graph_dtype = None + # For masked_subblock, we use output's dtype to represent + # the dtype of this subgraph. For other cases, graph_dtype + # might be None + for node in graph.nodes: + if OptimizationContext.key in node.meta: + opt_ctx = node.meta[OptimizationContext.key] + else: + opt_ctx = OptimizationContext() + + opt_ctx.dtype = self.deduce_node_dtype(node) + node.meta[OptimizationContext.key] = opt_ctx + if node.target == "output": + graph_dtype = opt_ctx.dtype + return graph_dtype + + def propagate(self): + self.propagate_graph(self.graphs["root"]) + + @classmethod + def propagate_loopbody(cls, body): + return cls(body).propagate() + + @classmethod + def propagate_scheduler_node(cls, node): + from ..ir import LoopBody + from ..scheduler import SchedulerNode + + assert isinstance(node, SchedulerNode) + assert isinstance(node._body, LoopBody) + DataTypePropagation.propagate_loopbody(node._body) + + +class ExprPrinter(Printer): + @staticmethod + def paren(string): + def all_in_parens(string): + if string[0] != "(" or len(string) < 2: + return False + count = 1 + for i, char in enumerate(string[1:]): + if char == "(": + count += 1 + elif char == ")": + count -= 1 + if count == 0 and i != len(string) - 2: + return False + assert count == 0 + return True + + if ( + isinstance(string, CSEVariable) + or re.match(r"^[a-z0-9_.]+$", string, re.I) + or re.match(r"^\([^)]*\)$", string, re.I) + or string == "" + ): + return string + # don't put extra parens for strings that are already wrapped in parens + if all_in_parens(string): + return string + return f"({string})" + + def _print_Infinity(self, expr): + return "math.inf" + + def _print_NegativeInfinity(self, expr): + return "-math.inf" + + def _print_Relational(self, expr): + return f" {expr.rel_op} ".join(map(self.paren, map(self._print, expr.args))) + + def _print_Mul(self, expr): + return "*".join(map(self.paren, map(self._print, expr.args))) + + def _print_Add(self, expr): + return " + ".join(map(self.paren, map(self._print, expr.args))) + + def _print_Mod(self, expr): + return " % ".join(map(self.paren, map(self._print, expr.args))) + + def _print_FloorDiv(self, expr): + raise NotImplementedError(f"_print_FloorDiv not implemented for {type(self)}") + + def _print_CleanDiv(self, expr): + return self._print_FloorDiv(expr) + + def _print_GreaterThan(self, expr): + # GreaterThan: >= + # StrictlyGreaterThan: > + # Go figure... + return " >= ".join(map(self.paren, map(self._print, expr.args))) + + def _print_align(self, expr): + assert len(expr.args) == 1 + return f"align({self._print(expr.args[0])})" + + +class PythonPrinter(ExprPrinter): + def _print_ModularIndexing(self, expr): + x, div, mod = expr.args + x = self.paren(self.doprint(x)) + div = self.paren(self.doprint(div)) + mod = self.paren(self.doprint(mod)) + if div != "1": + x = f"({x} // {div})" + return f"{x} % {mod}" + + def _print_FloorDiv(self, expr): + x, div = expr.args + x = self.paren(self.doprint(x)) + div = self.paren(self.doprint(div)) + return f"({x} // {div})" + + def _helper_sqrt(self, expr): + return f"math.sqrt({self._print(expr)})" + + def _print_Pow(self, expr): + # Pow() confuses triton + base, exp = expr.args + # NB: Remember this is sizevar computation! You don't typically + # expect to have to do floating point computation including exponents + # in sizevar compute. Instead of adding support for floating + # point pow, you should make upstream retranslate the Sympy expression + # into Tensor expressions earlier and do that instead. + if exp == 0.5: + return self._helper_sqrt(base) + elif exp == -0.5: + return "1/" + self._helper_sqrt(base) + base = self._print(base) + assert exp == int(exp), exp + exp = int(exp) + if exp > 0: + return "*".join([self.paren(base)] * exp) + elif exp < 0: + return "1/" + self.paren("*".join([self.paren(base)] * abs(exp))) + else: # exp == 0 + return "1" + + def _print_floor(self, expr): + assert len(expr.args) == 1 + return f"math.floor({self._print(expr.args[0])})" + + def _print_ceiling(self, expr): + assert len(expr.args) == 1 + return f"math.ceil({self._print(expr.args[0])})" + + def _print_Abs(self, expr): + assert len(expr.args) == 1 + return f"abs({self._print(expr.args[0])})" + + def _print_Max(self, expr): + assert len(expr.args) >= 2 + return f"max({', '.join(map(self._print, expr.args))})" + + def _print_Min(self, expr): + assert len(expr.args) >= 2 + return f"min({', '.join(map(self._print, expr.args))})" + + +class OpOverrides: + def __init__(self, parent): + super().__init__() + self._parent = parent + + def __getattr__(self, item): + return getattr(self._parent, item) + + @staticmethod + def identity(value): + # used to trigger cse + return value + + @staticmethod + def constant(value, dtype): + return repr(value) + + @staticmethod + def reciprocal(x): + return ops.truediv("1", x) + + @staticmethod + def square(x): + return ops.mul(x, x) + + @staticmethod + def bitwise_not(x): + return f"~{ExprPrinter.paren(x)}" + + @staticmethod + def logical_not(a): + return f"{ExprPrinter.paren(a)} == 0" + + @staticmethod + def bitwise_and(x, y): + return f"{ExprPrinter.paren(x)} & {ExprPrinter.paren(y)}" + + @staticmethod + def bitwise_or(x, y): + return f"{ExprPrinter.paren(x)} | {ExprPrinter.paren(y)}" + + @staticmethod + def bitwise_xor(x, y): + return f"{ExprPrinter.paren(x)} ^ {ExprPrinter.paren(y)}" + + @staticmethod + def bitwise_left_shift(x, y): + return f"{ExprPrinter.paren(x)} << {ExprPrinter.paren(y)}" + + # TODO(fdrocha): this is currently not being used anywhere, + # pending on moving triton pin past 972b761 + @staticmethod + def bitwise_right_shift(x, y): + return f"{ExprPrinter.paren(x)} >> {ExprPrinter.paren(y)}" + + @staticmethod + def remainder(a, b): + r = ops.mod(a, b) + return ops.where(f"(({r} != 0) & (({r} < 0) != ({b} < 0)))", ops.add(r, b), r) + + @staticmethod + def load_seed(name, offset): + return ops.load(name, sympy.Integer(offset)) + + +class DeferredLine(DeferredLineBase): + """A line that can be 'unwritten' by adding name to V.graph.removed_buffers""" + + def __init__(self, name, line): + super().__init__(line) + self.name = name + + def __call__(self): + if all( + self.name not in x + for x in ( + V.graph.removed_buffers, + V.kernel.removed_buffers, + V.graph.inplaced_to_remove, + V.kernel.inplaced_to_remove, + ) + ): + return self.line + return None + + def _new_line(self, line): + return DeferredLine(self.name, line) + + +class BracesBuffer(IndentedBuffer): + def indent(self, offset=1): + @contextlib.contextmanager + def ctx(): + for _ in range(offset): + self.writeline("{") + self._indent += 1 + for _ in range(-offset): + self._indent -= 1 + self.writeline("}") + yield + for _ in range(-offset): + self.writeline("{") + self._indent += 1 + for _ in range(offset): + self._indent -= 1 + self.writeline("}") + + return ctx() + + +class InplacedBuffer(NamedTuple): + inner_name: str + other_names: List[str] + + +class KernelArgs: + @staticmethod + def _lookup(prefix, odict, name): + assert isinstance(name, (str, sympy.Symbol)) + if name not in odict: + odict[name] = f"{prefix}{len(odict)}" + return odict[name] + + def __init__(self, sizevars=None): + self.input_buffers = dict() + self.output_buffers = dict() + self.inplace_buffers = dict() + self.sizevars = sizevars or dict() + + def __repr__(self): + return "KernelArgs({})".format( + ", ".join( + map( + repr, + [ + self.input_buffers, + self.output_buffers, + self.inplace_buffers, + self.sizevars, + ], + ) + ) + ) + + def _buffer_is_marked_removed(self, name): + return isinstance(name, str) and name.startswith("REMOVED") + + def input(self, name): + if V.graph.scheduler: + name = V.graph.scheduler.mutation_real_name.get(name, name) + assert name not in V.graph.removed_buffers, name + if name in self.output_buffers: + return self.output_buffers[name] + if name in self.inplace_buffers: + return self.inplace_buffers[name].inner_name + if name.startswith("seed"): + return self._lookup("seed", self.input_buffers, name) + return self._lookup("in_ptr", self.input_buffers, name) + + def output(self, name): + if V.graph.scheduler: + name = V.graph.scheduler.mutation_real_name.get(name, name) + assert name not in V.graph.removed_buffers, name + if name in self.inplace_buffers: + return self.inplace_buffers[name].inner_name + return self._lookup("out_ptr", self.output_buffers, name) + + def make_inplace(self, input_name, output_name): + assert output_name not in self.inplace_buffers + if input_name in self.inplace_buffers: + buf = self.inplace_buffers[input_name] + buf.other_names.append(output_name) + self.inplace_buffers[output_name] = buf + else: + buf = InplacedBuffer( + f"in_out_ptr{len(unique(self.inplace_buffers.values()))}", + [input_name, output_name], + ) + self.inplace_buffers[input_name] = buf + self.inplace_buffers[output_name] = buf + + def seed_offset(self, name, value): + if value in self.sizevars: + return self.sizevars[value] + if name in self.sizevars.values(): + name = ( + f"{name}{sum(1 for v in self.sizevars.values() if v.startswith(name))}" + ) + self.sizevars[value] = name + return name + + def size(self, name): + if str(name) == "seed": + self.sizevars["seed"] = "seed" + return "seed" + return self._lookup("ks", self.sizevars, name) + + def call_names(self): + return chain( + self.input_buffers.keys(), self.output_buffers.keys(), self.sizevars.keys() + ) + + def wrap_ptr_arg(self, buf, dtype): + return f"c_void_p({buf}.data_ptr())" + + def wrap_size_arg(self, size): + return f"c_long({size})" + + def cpp_argdefs(self): + from .cpp import DTYPE_TO_CPP, INDEX_TYPE + + call_args = [] + arg_defs = [] + arg_types = [] + for inplaced in unique(self.inplace_buffers.values()): + if self._buffer_is_marked_removed(inplaced): + continue + outer = inplaced.other_names[-1] + inner = inplaced.inner_name + dtype = V.graph.get_dtype(outer) + cpp_dtype = DTYPE_TO_CPP[dtype] + arg_defs.append(f"{cpp_dtype}* {inner}") + call_args.append(self.wrap_ptr_arg(outer, dtype)) + arg_types.append(f"{cpp_dtype}*") + for outer, inner in self.input_buffers.items(): + if outer in self.inplace_buffers: + continue + dtype = V.graph.get_dtype(outer) + cpp_dtype = DTYPE_TO_CPP[dtype] + arg_defs.append(f"const {cpp_dtype}* {inner}") + call_args.append(self.wrap_ptr_arg(outer, dtype)) + arg_types.append(f"const {cpp_dtype}*") + for outer, inner in self.output_buffers.items(): + if outer in self.inplace_buffers or self._buffer_is_marked_removed(inner): + continue + dtype = V.graph.get_dtype(outer) + cpp_dtype = DTYPE_TO_CPP[dtype] + arg_defs.append(f"{cpp_dtype}* {inner}") + call_args.append(self.wrap_ptr_arg(outer, dtype)) + arg_types.append(f"{cpp_dtype}*") + for outer, inner in self.sizevars.items(): + arg_defs.append(f"const {INDEX_TYPE} {inner}") + call_args.append(self.wrap_size_arg(outer)) + arg_types.append(f"const {INDEX_TYPE}") + return arg_defs, call_args, arg_types + + def python_argdefs(self): + arg_defs = [] + call_args = [] + precompile_args: List[Union[TensorArg, SizeArg]] = [] + for inplaced in unique(self.inplace_buffers.values()): + if self._buffer_is_marked_removed(inplaced): + continue + arg_defs.append(inplaced.inner_name) + call_args.append(inplaced.other_names[-1]) + precompile_args.append( + TensorArg( + inplaced.inner_name, + inplaced.other_names[-1], + V.graph.get_dtype(inplaced.other_names[-1]), + True, + ) + ) + for outer, inner in chain( + self.input_buffers.items(), self.output_buffers.items() + ): + if outer in self.inplace_buffers or self._buffer_is_marked_removed(inner): + continue + arg_defs.append(inner) + call_args.append(outer) + precompile_args.append( + TensorArg(inner, outer, V.graph.get_dtype(outer), True) + ) + for outer, inner in self.sizevars.items(): + arg_defs.append(inner) + call_args.append(outer) + precompile_args.append(SizeArg(inner, outer)) + + return arg_defs, call_args, precompile_args + + def aliases(self): + for inplaced in unique(self.inplace_buffers.values()): + if self._buffer_is_marked_removed(inplaced): + continue + for other in inplaced.other_names: + if ( + other in V.graph.inplaced_to_remove + or other in V.kernel.inplaced_to_remove + ): + continue + if other in self.input_buffers: + yield self.input_buffers[other], inplaced.inner_name + if other in self.output_buffers: + yield self.output_buffers[other], inplaced.inner_name + + def is_removed(self, name): + def _is_removed(name, buffers): + return name not in buffers or self._buffer_is_marked_removed(buffers[name]) + + return _is_removed(name, self.output_buffers) and _is_removed( + name, self.inplace_buffers + ) + + # Includes inplace buffers, excludes removed buffers. Essentially, + # after you do a call into this kernel, which buffers actually contain + # updated data? Modeled off of python_argdefs. + def live_output_buffers(self): + live_outs = set() + for inplaced in unique(self.inplace_buffers.values()): + if self._buffer_is_marked_removed(inplaced): + continue + live_outs.add(inplaced.other_names[-1]) + for outer, inner in self.output_buffers.items(): + if outer in self.inplace_buffers or self._buffer_is_marked_removed(inner): + continue + live_outs.add(outer) + return live_outs + + +class CSEVariable: + """A CSEVariable is just a name for an expression but it is useful to be able to annotate them on a backend dependent basis. + To do so, the backends can simply overload `Kernel.create_cse_var` + The "CSEVariable.update_on_args" method gives you a hook for annotations + See example of TritonCSEVariable in triton.py + """ + + def __init__(self, name, bounds: ValueRanges): + assert isinstance(bounds, ValueRanges) + self.name = name + self.bounds = bounds + + def __str__(self): + return self.name + + def __hash__(self) -> int: + return hash(self.name) + + def __eq__(self, other) -> bool: + return type(other) == type(self) and other.name == self.name + + def update_on_args(self, name, args, kwargs): + pass + + +class CppWrapperKernelArgs(KernelArgs): + def wrap_ptr_arg(self, buf, dtype): + from .cpp import DTYPE_TO_CPP + + if config.aot_inductor.abi_compatible: + # In the abi_compatible model, we just return the buf here. + # We will form correct call args later in wrapper.generate_kernel_all. + return buf + else: + return f"({DTYPE_TO_CPP[dtype]}*)({buf}.data_ptr())" + + def wrap_size_arg(self, size): + return f"{size}" + + +class CSE: + """Common subexpression elimination""" + + def __init__( + self, + prefix="", + suffix="", + name_prefix="tmp", + iter_buffers=None, + store_cache=None, + reduction_cache=None, + varname_map=None, + ): + self.prefix = prefix + self.suffix = suffix + self.cache = {} + self.name_prefix = name_prefix + self.store_cache = store_cache or {} + self.reduction_cache = reduction_cache or {} + self.iter_buffer_ids = iter_buffers or itertools.count() + self.invalidated_stores = set() + self.varname_map = varname_map or {} + + def invalidate(self, keep_vars: Set[str]): + for name, tmp in list(self.store_cache.items()): + if tmp not in keep_vars: + del self.store_cache[name] + self.invalidated_stores.add(name) + self.cache = {k: v for k, v in self.cache.items() if v in keep_vars} + + def clone(self): + # Note(fdrocha): reduction_cache is not being cloned, not sure if this is intentional + return CSE( + prefix=self.prefix, + suffix=self.suffix, + name_prefix=self.name_prefix, + iter_buffers=self.iter_buffer_ids, + store_cache=self.store_cache, + varname_map=self.varname_map, + ) + + def generate( + self, + buffer: IndentedBuffer, + expr: Union[str, CSEVariable, OpsValue], + *, + bounds: ValueRanges = ValueRanges.unknown(), + write=True, + assignment=True, + ) -> CSEVariable: + if isinstance(expr, OpsValue): + expr = expr.value + + assert isinstance(expr, (str, CSEVariable)), type(expr) + assert write or assignment + if isinstance(expr, CSEVariable): + # If the expressions were always created with all the information, we could + # assert expr.bounds == bounds, but sometimes the expression is created + # with the loose ValueRanges.unknown(), so we need to tighten the bounds + expr.bounds = expr.bounds.tighten(bounds) + return expr + cache_key = expr + var = self.cache.get(cache_key, None) + if not var: + var = self.newvar(bounds) if assignment else None + self.cache[cache_key] = var + if write: + if V.kernel.current_node: + V.kernel.current_node.codegen_originating_info( + buffer, only_once=True + ) + if assignment: + line = f"{self.prefix}{var} = {expr}{self.suffix}" + else: + line = f"{expr}{self.suffix}" + buffer.writeline(line) + else: + var.bounds = var.bounds.tighten(bounds) + + return var + + def newvar(self, bounds: ValueRanges = ValueRanges.unknown()) -> CSEVariable: + var_name = f"{self.name_prefix}{next(self.iter_buffer_ids)}" + var = V.kernel.create_cse_var(var_name, bounds) + self.varname_map[var_name] = var + return var + + +class IndirectAssertLine(DeferredLineBase): + def __init__(self, line, assert_fn, var, mask, size_map): + self.var = var + self.mask = mask + self.line = line + self.assert_fn = assert_fn + self.size_map = size_map + + def __call__(self): + size, size_str = self.size_map[(self.var, self.mask)] + + # We assert if we've not been able to prove the bound + assert_min = (self.var.bounds.lower >= 0) != sympy.true + assert_max = (self.var.bounds.upper < size) != sympy.true + + # FooBar interview question + if not (assert_min or assert_max): + return None + elif assert_min and assert_max: + # The conditions need to be in parens because of Python's operator precedence. + # It'd be less error-prone to use and/or/not, which is suported by triton + cond = f"(0 <= {self.var}) & ({self.var} < {size_str})" + cond_print = f"0 <= {self.var} < {size_str}" + elif assert_min: + cond = f"0 <= {self.var}" + cond_print = cond + else: + assert assert_max + cond = f"{self.var} < {size_str}" + cond_print = cond + + if self.mask: + cond = f"({cond}) | ~{self.mask}" + return self.line.format( + assert_fn=self.assert_fn, cond=cond, cond_print=cond_print + ) + + def _new_line(self, line): + return IndirectAssertLine( + line, self.assert_fn, self.var, self.mask, self.size_map + ) + + +class CodeGen: + def __init__(self): + super().__init__() + self.exit_stack = contextlib.ExitStack() + + def __enter__(self): + self.exit_stack.__enter__() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.exit_stack.__exit__(exc_type, exc_val, exc_tb) + + +class Kernel(CodeGen): + newvar_prefix = "" + suffix = "" + overrides = None + load_format = None + store_format = None + + def __init__(self, args=None, increase_kernel_count=True): + super().__init__() + if increase_kernel_count: + metrics.generated_kernel_count += 1 + self.args = args or KernelArgs() + self.loads = IndentedBuffer() + self.compute = IndentedBuffer() + self.stores = IndentedBuffer() + self.cse: CSE = CSE(self.newvar_prefix, self.suffix) + self.must_keep_buffers = set() + self.store_buffer_names = set() + self._load_mask = None + # set in set_current_node + self.current_node = None + self.node_to_bounds: Optional[Dict[torch.fx.Node, ValueRanges]] = None + # Upper bounds for indirect_indexing and their str representation + self.indirect_max_sizes: Dict[Tuple[str, str], Tuple[sympy.Expr, str]] = {} + + self.removed_buffers = set() + self.inplaced_to_remove = set() + + # key: the buffer to write + # value: the buffer to read and whose memory can be reused for + # the buffer specified by key + self.inplace_update_buffers = dict() + # Set minimum number of elements processed per thread. + self.min_elem_per_thread = 1 + + @contextlib.contextmanager + def set_current_node(self, node): + prior = self.current_node + self.current_node = node + self.node_to_bounds = node._body.bounds().get_bounds() + try: + yield + finally: + self.current_node = prior + + @contextlib.contextmanager + def swap_buffers(self, lb, cb=None, sb=None): + if cb is None: + cb = lb + loads = self.loads + compute = self.compute + stores = self.stores + cse = self.cse + self.loads = lb + self.compute = cb + self.stores = sb + self.cse = cse.clone() + try: + yield + finally: + self.loads = loads + self.compute = compute + self.stores = stores + self.cse = cse + + def load(self, name: str, index: sympy.Expr): + raise NotImplementedError() + + def indirect_load(self, name: str, index: sympy.Expr): + """A load the depends on an index we have read""" + prior = self.loads + try: + # put the load in the compute section as it might have deps + self.loads = self.compute + return self.load(name, index) + finally: + self.loads = prior + + def store_reduction(self, name, index, value): + raise NotImplementedError() + + def store(self, name, index, value, mode=None): + raise NotImplementedError() + + def reduction(self, dtype, src_dtype, reduction_type, value): + raise NotImplementedError() + + def bucketize( + self, + values, + offsets_name: str, + offsets_size: sympy.Expr, + indexing_dtype: torch.dtype, + right: bool, + ): + """ + See [Note: Inductor bucketize op] + """ + raise NotImplementedError() + + @property + def assert_function(self) -> str: + raise NotImplementedError() + + def index_to_str(self, index: sympy.Expr) -> str: + raise NotImplementedError() + + def __enter__(self): + class CSEProxy: + self.name = "CSEProxy" + + @staticmethod + def __getattr__(name: str) -> Callable[..., CSEVariable]: # type: ignore[misc] + def inner(*args, **kwargs): + # TritonTemplateKernel has no current_node + buf_bounds = ValueRanges.unknown() + if hasattr(V.interpreter, "current_node"): + fx_node = V.interpreter.current_node + assert isinstance(self.node_to_bounds, dict) + buf_bounds = self.node_to_bounds.get( + fx_node, ValueRanges.unknown() + ) + + csevar = self.cse.generate( + self.compute, + getattr(parent_handler, name)(*args, **kwargs), # type: ignore[has-type] + bounds=buf_bounds, + ) + csevar.update_on_args(name, args, kwargs) + return csevar + + return inner + + @staticmethod + def indirect_indexing(var, size, check=True): + # Skip CSE since this doesn't return an expression + + if var.bounds.lower < 0: + new_bounds = ValueRanges.unknown() + if var.bounds != ValueRanges.unknown() and isinstance( + size, sympy.Number + ): + # Take the negative part of the bound and add size to it + # Then take union of that and the positive part + # This is a tighter bound than that of a generic ops.where, as we have info on the cond + neg = var.bounds & ValueRanges(-sympy.oo, -1) + new_bounds = ValueRanges(neg.lower + size, neg.upper + size) + # We don't have a good way of representing the empty range + if var.bounds.upper >= 0: + pos = var.bounds & ValueRanges(0, sympy.oo) + new_bounds = new_bounds | pos + + stm = ops.add(var, self.rename_indexing(size)) + # Mixed negative and non-negative + if var.bounds.upper >= 0: + lt = ops.lt(var, "0") + stm = ops.where(lt, stm, var) + new_var = self.cse.generate(self.compute, stm, bounds=new_bounds) + + new_var.update_on_args("index_wrap", (var,), {}) + var = new_var + + if self.generate_assert(check): + mask = self.load_mask(var) + + # An assertion line may have been written already, if so just + # update the max size. + map_key = (var, mask) + existing_size, _ = self.indirect_max_sizes.get( + map_key, (None, None) + ) + if existing_size is not None: + size = sympy.Min(size, existing_size) + else: + line = ( + '{assert_fn}({cond}, "index out of bounds: {cond_print}")' + ) + self.compute.writeline( + IndirectAssertLine( + line, + self.assert_function, + var, + mask, + self.indirect_max_sizes, + ) + ) + + self.indirect_max_sizes[map_key] = (size, self.index_to_str(size)) + return sympy_symbol(str(var)) + + @staticmethod + def load(name: str, index: sympy.Expr): + if name in self.cse.invalidated_stores: + # A load from an invalidated store requires us to + # keep the actual buffer around + V.kernel.must_keep_buffers.add(name) + if free_symbol_startswith(index, "tmp"): + return self.indirect_load(name, index) + store_cache = self.cse.store_cache + if name in store_cache: + return store_cache[name] + return self.load(name, index) + + @staticmethod + def store(name, index, value, mode=None): + self.store_buffer_names.add(name) + if mode is None: + self.cse.store_cache[name] = value + if self.current_node: + for other_name in self.current_node.get_mutations(): + self.cse.store_cache[other_name] = value + if name not in V.graph.removed_buffers: + return self.store(name, index, value, mode=mode) + + @staticmethod + def store_reduction(name, index, value): + self.store_buffer_names.add(name) + self.cse.store_cache[name] = value + if self.current_node: + for other_name in self.current_node.get_mutations(): + self.cse.store_cache[other_name] = value + + if name not in V.graph.removed_buffers: + return self.store_reduction(name, index, value) + + @staticmethod + def reduction(dtype, src_dtype, reduction_type, value): + return self.reduction(dtype, src_dtype, reduction_type, value) + + @staticmethod + def bucketize( + values, + offsets_name: str, + offsets_size: sympy.Expr, + indexing_dtype: torch.dtype, + right: bool, + ): + """ + [Note: Inductor bucketize op] + + Given values (tensor) and offsets_name (reference to the name of a 1D + tensor), calculate the bucket that each value belongs to. + + e.g. for values [-1, 0, 1, 2, 3, 4, 5, 9], offsets [0, 4, 4, 8], right=True + return = [ 0, 1, 1, 1, 1, 3, 3, 4]. + + When right == False, bucket i refers to range (offsets[i], offsets[i+1]]. + When right == True, bucket i refers to range [offsets[i], offsets[i+1]). + + Offsets must be non-decreasing or the result is undefined. + """ + return self.bucketize( + values, offsets_name, offsets_size, indexing_dtype, right + ) + + super().__enter__() + assert self.overrides + parent_handler = self.overrides(V.get_ops_handler()) + self.exit_stack.enter_context(V.set_ops_handler(CSEProxy())) + self.exit_stack.enter_context(V.set_kernel_handler(self)) + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + """ + Note that V.graph.scheduler can be None when codegening triton template + kernels. + """ + if V.graph.scheduler: + V.graph.scheduler.remove_kernel_local_buffers() + super().__exit__(exc_type, exc_val, exc_tb) + + def generate_assert(self, check): + return (check or config.debug_index_asserts) and config.assert_indirect_indexing + + def load_mask(self, var): + # only the triton kernel requires mask + return "" + + def rename_indexing(self, index) -> sympy.Expr: + # adds the necessary kernel args for index expressions + # and renames variables in index expressions to kernel arg names + if isinstance(index, (list, tuple)): + return [self.rename_indexing(x) for x in index] + index = V.graph.sizevars.simplify(index) + sorted_symbols = sorted(index.free_symbols, key=lambda s: s.name) + replacements = { + x: self.args.size(x) + for x in sorted_symbols + if x.name.startswith("s") + or x.name.startswith("ps") + or (x.name.startswith("i") and not x.name.startswith("idx")) + } + return sympy_subs(index, replacements) + + def create_cse_var(self, *args, **kwargs): + return CSEVariable(*args, **kwargs) + + +@dataclasses.dataclass +class OptimizationContext: + key: ClassVar[str] = "opt_ctx" + + # Load value as mask + is_load_as_mask: bool = False + + dtype: Optional[torch.dtype] = None + ops_name: str = "" + is_most_inner_loop_irrevelant: bool = False + + # Load uint8 value as float32 + is_load_uint8_as_float: bool = False + + +@functools.lru_cache(None) +def jinja2_env(): + try: + import jinja2 + + return jinja2.Environment( + undefined=jinja2.StrictUndefined, + ) + except ImportError: + return None + + +class ChoiceCaller: + """ + Represents a possible choice used in autotune_process.py. + During autotuning, self.benchmark() is first called to get benchmark result, + and if this choice is selected, self.output_node() is called to get the output_node. + + Children classes: TritonTemplateCaller, CUDATemplateCaller. + """ + + def __init__(self, name, input_nodes, layout): + super().__init__() + self.name = name + self.layout = layout + self.input_nodes = input_nodes + + def benchmark(self, *args, out) -> float: + algo = self.to_callable() + return do_bench(lambda: algo(*args, out=out)) + + def call_name(self) -> str: + raise NotImplementedError() + + def to_callable(self): + raise NotImplementedError() + + def hash_key(self) -> str: + raise NotImplementedError() + + def output_node(self) -> "TensorBox": # type: ignore[name-defined] + raise NotImplementedError() + + +class KernelTemplate: + """ + Base class for defining kernel templates. + + Children classes: TritonTemplate, CUDATemplate + """ + + @staticmethod + def _template_from_string(source): + env = jinja2_env() + if env is not None: + return env.from_string(source) + return None + + @staticmethod + def _fake_get_dtype(fake_out): + _get_dtype_real = V.graph.get_dtype + + def get_dtype(name): + if name == fake_out.get_name(): + return fake_out.get_dtype() + return _get_dtype_real(name) + + return get_dtype + + def __init__(self, name: str): + self.name = name + + def maybe_append_choice(self, choices, **kwargs): + """ + Maybe generates a new ChoiceCaller and appends it into existing choices. + + choices: A list of ChoiceCallers. + kwargs: Additional kwargs to be passed to self.generate() to generate a new ChoiceCaller. + """ + + try: + choices.append(self.generate(**kwargs)) + except NotImplementedError: + pass + + def generate(self, **kwargs) -> ChoiceCaller: + """ + Generates a ChoiceCaller instance from the given arguments. + """ + + raise NotImplementedError() diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cpp.py b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cpp.py new file mode 100644 index 0000000000000000000000000000000000000000..b94ede02c26589c369799e70a9a109997e41857a --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cpp.py @@ -0,0 +1,3401 @@ +import contextlib +import dataclasses +import functools +import itertools +import logging +import math +import re +import sys +from copy import copy, deepcopy +from typing import Dict, List, Optional, Set, Tuple, Union + +import sympy + +import torch +import torch.fx +from torch._inductor import dependencies +from torch._inductor.ir import StorageBox, TensorBox +from torch._prims_common import is_float_dtype +from torch.utils._sympy.functions import FloorDiv +from torch.utils._sympy.value_ranges import bound_sympy, ValueRanges + +from .. import codecache, config, ir, metrics +from ..codegen.wrapper import WrapperCodeGen +from ..optimize_indexing import range_expressable_in_32_bits +from ..scheduler import BaseScheduling, SchedulerNode +from ..utils import ( + cache_on_self, + get_fused_kernel_name, + is_welford_reduction, + sympy_product, + sympy_subs, + sympy_symbol, +) + +from ..virtualized import ops, V +from .common import ( + BracesBuffer, + CppWrapperKernelArgs, + CSE, + CSEVariable, + DataTypePropagation, + DeferredLine, + DTYPE_TO_COMPUTATION_DTYPE, + ExprPrinter, + IndentedBuffer, + Kernel, + KernelArgs, + OpOverrides, + OptimizationContext, +) + +schedule_log = torch._logging.getArtifactLogger(__name__, "schedule") + +DTYPE_TO_CPP = { + torch.float32: "float", + torch.float64: "double", + torch.float16: "half", + torch.int64: "long", + torch.int32: "int", + torch.int16: "short", + torch.int8: "signed char", + torch.uint8: "unsigned char", + torch.bool: "bool", + torch.bfloat16: "bfloat16", + torch.complex64: "complex64", +} + +DTYPE_TO_ATEN = { + torch.float32: "at::kFloat", + torch.float64: "at::kDouble", + torch.float16: "at::kHalf", + torch.int64: "at::kLong", + torch.int32: "at::kInt", + torch.int16: "at::kShort", + torch.int8: "at::kChar", + torch.uint8: "at::kByte", + torch.bool: "at::kBool", + torch.bfloat16: "at::kBFloat16", + torch.complex64: "at::kComplexFloat", + torch.float8_e4m3fn: "at::kFloat8_e4m3fn", + torch.float8_e5m2: "at::kFloat8_e5m2", +} + +DEVICE_TO_ATEN = { + "cpu": "at::kCPU", + "cuda": "at::kCUDA", +} + +INDEX_TYPE = "long" + +NATIVE_OMP_RTYPES = {"+", "*", "^", "||", "min", "max"} +RTYPE_TO_CPP = { + "sum": "+", + "prod": "*", + "xor_sum": "^", + "min": "min", + "max": "max", + "argmin": "argmin", + "argmax": "argmax", + "any": "||", + "welford_reduce": "welford", + "welford_combine": "welford", +} +VECTORIZABLE_RTYPES = { + "max", + "min", + "sum", + "prod", + "xor_sum", + "welford_reduce", + "welford_combine", +} + +PYTHON_TO_CPP = { + "Tensor": "at::Tensor", + "int": "long", + "float": "double", + "bool": "bool", + "str": "std::string", + "ScalarType": "c10::ScalarType", + "MemoryFormat": "at::MemoryFormat", + "Layout": "at::Layout", + "Device": "at::Device", + "number": "at::Scalar", +} + +CONTAINER_PYTHON_TO_CPP = { + "List": "std::vector", + "Optional": "c10::optional", +} + +DTYPE_LOWP_FP = [ + torch.bfloat16, + torch.float16, +] + + +def value_to_cpp(value, cpp_type): + if value == float("-inf"): + return f"-std::numeric_limits<{cpp_type}>::infinity()" + elif value == float("inf"): + return f"std::numeric_limits<{cpp_type}>::infinity()" + elif isinstance(value, bool): + return f"static_cast<{cpp_type}>({str(value).lower()})" + elif math.isnan(value): + return f"std::numeric_limits<{cpp_type}>::quiet_NaN()" + else: + return f"static_cast<{cpp_type}>({repr(value)})" + + +def reduction_init(reduction_type, dtype): + if dtype in DTYPE_LOWP_FP: + # Since load promotes all half-precision inputs to float, the initial + # constant for reduction must be promoted as well + dtype = torch.float32 + if reduction_type in ("xor_sum", "sum", "any"): + return 0 + if reduction_type == "prod": + return 1 + if reduction_type in {"max", "argmax"}: + return ( + f"-std::numeric_limits<{DTYPE_TO_CPP[dtype]}>::infinity()" + if is_float_dtype(dtype) + else f"std::numeric_limits<{DTYPE_TO_CPP[dtype]}>::min()" + ) + if reduction_type in {"min", "argmin"}: + return ( + f"std::numeric_limits<{DTYPE_TO_CPP[dtype]}>::infinity()" + if is_float_dtype(dtype) + else f"std::numeric_limits<{DTYPE_TO_CPP[dtype]}>::max()" + ) + if is_welford_reduction(reduction_type): + return f"Welford<{DTYPE_TO_CPP[dtype]}>()" + raise AssertionError(reduction_type) + + +def reduction_init_vec(reduction_type, dtype): + scalar_type = DTYPE_TO_CPP[DTYPE_TO_COMPUTATION_DTYPE[dtype]] + vec_type = f"at::vec::Vectorized<{scalar_type}>" + + if is_welford_reduction(reduction_type): + return f"Welford<{vec_type}>()" + + scalar_init = reduction_init(reduction_type, dtype) + return f"{vec_type}({scalar_init})" + + +def reduction_acc_type(reduction_type, dtype): + assert reduction_type not in {"argmin", "argmax"} + scalar_type = DTYPE_TO_CPP[DTYPE_TO_COMPUTATION_DTYPE[dtype]] + if is_welford_reduction(reduction_type): + return f"Welford<{scalar_type}>" + + return scalar_type + + +def reduction_acc_type_vec(reduction_type, dtype): + assert reduction_type not in {"argmin", "argmax"} + scalar_type = DTYPE_TO_CPP[DTYPE_TO_COMPUTATION_DTYPE[dtype]] + vec_type = f"at::vec::Vectorized<{scalar_type}>" + if is_welford_reduction(reduction_type): + return f"Welford<{vec_type}>" + + return vec_type + + +def reduction_combine(reduction_type, var, next_value): + if reduction_type == "sum": + return f"{var} + {next_value}" + if reduction_type == "prod": + return f"{var} * {next_value}" + if reduction_type == "xor_sum": + return f"{var} ^ {next_value}" + if reduction_type == "any": + return f"{var} || {next_value}" + if reduction_type in ("min", "max"): + return f"{reduction_type}_propagate_nan({var}, {next_value})" + if reduction_type == "welford_reduce": + return f"welford_combine({var}, {next_value})" + if reduction_type == "welford_combine": + if isinstance(next_value, tuple): + mean, m2, weight = next_value + else: + mean, m2, weight = reduction_project(reduction_type, next_value) + return f"welford_combine({var}, {{{mean}, {m2}, {weight}}})" + raise AssertionError(reduction_type) + + +def reduction_combine_vec(reduction_type, var, next_value): + if reduction_type == "max": + return f"at::vec::maximum({var}, {next_value})" + elif reduction_type == "min": + return f"at::vec::minimum({var}, {next_value})" + elif reduction_type == "sum": + return f"{var} + {next_value}" + elif reduction_type == "prod": + return f"{var} * {next_value}" + elif reduction_type == "xor_sum": + return f"{var} ^ {next_value}" + elif reduction_type == "welford_reduce": + return f"welford_combine({var}, {next_value})" + elif reduction_type == "welford_combine": + if isinstance(next_value, tuple): + # When reading a value from Inductor IR we have a tuple of variable names + mean, m2, weight = next_value + else: + # When combining intermediate accumulators we have a Welford struct + mean, m2, weight = reduction_project(reduction_type, next_value) + return f"welford_combine({var}, {{{mean}, {m2}, {weight}}})" + else: + raise NotImplementedError() + + +def reduction_project(reduction_type, acc): + if is_welford_reduction(reduction_type): + return f"{acc}.mean", f"{acc}.m2", f"{acc}.weight" + elif reduction_type in {"argmin", "argmax"}: + return f"{acc}.index" + return acc + + +index_value_name_counter = 1 + + +def argmax_argmin_prefix(reduction_type, src_dtype, tmpvar): + global index_value_name_counter + struct_name = f"IndexValue_{index_value_name_counter}" + index_value_name_counter += 1 + + # A small annoyance, due to it being a little cumbersome to just throw {} into strings + prefix = [ + f"struct {struct_name} {{size_t index; {DTYPE_TO_CPP[src_dtype]} value;}};", + f"{struct_name} {tmpvar}{{0, {reduction_init(reduction_type, src_dtype)}}};", + ] + if reduction_type == "argmax": + prefix.extend( + [ + "#if !defined(__clang_major__) || __clang_major__ > 9", + f"#pragma omp declare reduction(argmax : {struct_name} :\\", + " omp_out.value = omp_in.value < omp_out.value ? omp_out.value : omp_in.value,\\", + " omp_out.index = omp_in.value < omp_out.value ? omp_out.index : omp_in.index)\\", + f"\tinitializer(omp_priv = {{0, {reduction_init(reduction_type, src_dtype)}}})", + "#endif", + ] + ) + elif reduction_type == "argmin": + prefix.extend( + [ + "#if !defined(__clang_major__) || __clang_major__ > 9", + f"#pragma omp declare reduction(argmin : {struct_name} :\\", + " omp_out.value = omp_in.value > omp_out.value ? omp_out.value : omp_in.value,\\", + " omp_out.index = omp_in.value > omp_out.value ? omp_out.index : omp_in.index)\\", + f"\tinitializer(omp_priv = {{0, {reduction_init(reduction_type, src_dtype)}}})", + "#endif", + ] + ) + return prefix + + +def parallel_num_threads(): + threads = config.cpp.threads + if threads < 1: + threads = torch.get_num_threads() + return threads + + +@functools.lru_cache +def stride_at(var: sympy.Symbol, index: sympy.Expr): + replacement = {var: var + 1} + new_index = sympy_subs(index, replacement) + return sympy.simplify(new_index - index) + + +class CppPrinter(ExprPrinter): + def _print_Integer(self, expr): + return f"{int(expr)}L" + + def _print_Where(self, expr): + c = self.paren(self.doprint(expr.args[0])) + p = self.paren(self.doprint(expr.args[1])) + q = self.paren(self.doprint(expr.args[2])) + return f"{c} ? {p} : {q}" + + def _print_ModularIndexing(self, expr): + x, div, mod = expr.args + x = self.paren(self.doprint(x)) + if div != 1: + div = self.paren(self.doprint(div)) + if expr.is_integer: + x = f"c10::div_floor_integer({x}, {div})" + else: + x = f"c10::div_floor_floating(static_cast({x}), static_cast({div}))" + mod = self.paren(self.doprint(mod)) + return f"static_cast<{INDEX_TYPE}>({x}) % static_cast<{INDEX_TYPE}>({mod})" + + def _print_FloorDiv(self, expr): + x, div = expr.args + x = self.paren(self.doprint(x)) + div = self.paren(self.doprint(div)) + if expr.is_integer: + return f"c10::div_floor_integer({x}, {div})" + return f"c10::div_floor_floating(static_cast({x}), static_cast({div}))" + + def _print_floor(self, expr): + assert len(expr.args) == 1 + r = f"std::floor({self._print(expr.args[0])})" + return f"static_cast<{INDEX_TYPE}>({r})" if expr.is_integer else r + + def _print_Pow(self, expr): + # Uses float constants to perform FP div + base, exp = expr.args + base = self._print(base) + + if exp == 0.5 or exp == -0.5: + return f"std::sqrt({base})" if exp == 0.5 else f"1.0/std::sqrt({base})" + assert exp.is_integer + exp = int(exp) + if exp > 0: + r = "*".join([self.paren(base)] * exp) + elif exp < 0: + r = "1.0/" + self.paren("*".join([self.paren(base)] * abs(exp))) + else: # exp == 0 + r = "1.0" + + return f"static_cast<{INDEX_TYPE}>({r})" if expr.is_integer else r + + def _print_Rational(self, expr): + # Uses float constants to perform FP div + if expr.q == 1: + r = f"{expr.p}" + else: + r = f"{expr.p}.0/{expr.q}.0" + return f"static_cast<{INDEX_TYPE}>({r})" if expr.is_integer else r + + def _print_ceiling(self, expr): + assert len(expr.args) == 1 + r = f"std::ceil({self._print(expr.args[0])})" + return f"static_cast<{INDEX_TYPE}>({r})" if expr.is_integer else r + + def _print_Min(self, expr): + args = [self._print(a) for a in expr.args] + if len(args) == 2: + return f"std::min({args[0]}, {args[1]})" + else: + # Initializer list overload + il = "{" + ", ".join(args) + "}" + return f"std::min({il})" + + def _print_Max(self, expr): + args = [self._print(a) for a in expr.args] + if len(args) == 2: + return f"std::max({args[0]}, {args[1]})" + else: + # Initializer list overload + il = "{" + ", ".join(args) + "}" + return f"std::max({il})" + + def _print_Abs(self, expr): + assert len(expr.args) == 1 + return f"std::abs({self._print(expr.args[0])})" + + +# A function to print, useful for printing sympy symbols. +cexpr = CppPrinter().doprint + + +def cexpr_index(index): + return f"static_cast<{INDEX_TYPE}>({cexpr(index)})" + + +class RecordOptimizationContext: + def __init__(self, func_name: str = ""): + self.func_name = func_name + self.current_node: Optional[torch.fx.Node] = None + self.opt_ctx: Optional[OptimizationContext] = None + + def __enter__(self): + assert V.interpreter + assert V.interpreter.current_node + + self.current_node = V.interpreter.current_node + assert self.current_node is not None + if OptimizationContext.key in self.current_node.meta: + self.opt_ctx = self.current_node.meta[OptimizationContext.key] + else: + self.opt_ctx = OptimizationContext() + assert self.opt_ctx is not None + self.opt_ctx.ops_name = self.func_name + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + assert self.current_node + assert self.opt_ctx + self.current_node.meta[OptimizationContext.key] = self.opt_ctx + + def get_opt_ctx(self): + return self.opt_ctx + + def get_fx_node(self): + assert self.current_node + return self.current_node + + +def get_opt_ctx(node: torch.fx.Node) -> OptimizationContext: + return node.meta.get(OptimizationContext.key, None) + + +def get_current_node_opt_ctx() -> OptimizationContext: + assert V.interpreter.current_node + return get_opt_ctx(V.interpreter.current_node) + + +class CppCSEVariable(CSEVariable): + def __init__(self, name, bounds: ValueRanges): + super().__init__(name, bounds) + self.is_vec = False + self.dtype: Optional[torch.dtype] = None + self.dependent_itervars: Set[sympy.Symbol] = set() + + def update_on_args(self, name, args, kwargs): + if name == "load": + # args[1] is index + self._set_dependent_itervars(args[1]) + else: + # propagate relevant itervars and is_vec from args + self.dependent_itervars.update( + *[ + arg.dependent_itervars + for arg in args + if isinstance(arg, CppCSEVariable) + ] + ) + if name == "index_expr": + self._set_dependent_itervars(args[0]) + if any(arg.is_vec for arg in args if isinstance(arg, CppCSEVariable)): + self.is_vec = True + if ( + hasattr(V.interpreter, "current_node") + and get_current_node_opt_ctx() is not None + ): + self.dtype = get_current_node_opt_ctx().dtype + + def _set_dependent_itervars(self, index: sympy.Expr): + """ + Set the relevant itervars for this variable based on the `index` expression. + This includes the itervars directly used in the `index` as well as relevant itervars + of other cse variables used in the `index`. + """ + for s in index.free_symbols: + if s in V.kernel.itervars: + self.dependent_itervars.add(s) + elif s.name in V.kernel.cse.varname_map: + self.dependent_itervars.update( + V.kernel.cse.varname_map[s.name].dependent_itervars + ) + + def depends_on(self, itervar: sympy.Symbol): + return itervar in self.dependent_itervars + + +class CppOverrides(OpOverrides): + """Map element-wise ops to C++""" + + @staticmethod + def add(a, b): + return f"decltype({a})({a} + {b})" + + @staticmethod + def sub(a, b): + return f"decltype({a})({a} - {b})" + + @staticmethod + def mul(a, b): + return f"decltype({a})({a} * {b})" + + @staticmethod + def to_dtype(x, dtype, src_dtype=None): + assert dtype in DTYPE_TO_CPP, f"{dtype} missing from {__name__}.DTYPE_TO_CPP" + return f"c10::convert<{DTYPE_TO_CPP[dtype]}>({x})" + + @staticmethod + def to_dtype_bitcast(x, dtype): + assert dtype in DTYPE_TO_CPP, f"{dtype} missing from {__name__}.DTYPE_TO_CPP" + return f"c10::bit_cast<{DTYPE_TO_CPP[dtype]}>({x})" + + @staticmethod + def abs(x): + return f"std::abs({x})" + + @staticmethod + def sin(x): + return f"std::sin({x})" + + @staticmethod + def cos(x): + return f"std::cos({x})" + + @staticmethod + def neg(x): + return f"decltype({x})(-{x})" + + @staticmethod + def exp(x): + # return f"Sleef_expf_u10({x})" + return f"std::exp({x})" + + @staticmethod + def exp2(x): + return f"std::exp2({x})" + + @staticmethod + def expm1(x): + return f"std::expm1({x})" + + @staticmethod + def erf(x): + return f"std::erf({x})" + + @staticmethod + def erfc(x): + return f"std::erfc({x})" + + @staticmethod + def erfinv(x): + return f"calc_erfinv({x})" + + @staticmethod + def sqrt(x): + return f"std::sqrt({x})" + + @staticmethod + def rsqrt(x): + return f"1 / std::sqrt({x})" + + @staticmethod + def log1p(x): + bug = config.cpp.inject_log1p_bug_TESTING_ONLY + if bug == "accuracy": + return f"{x} + decltype({x})(1)" + elif bug is None: + return f"std::log1p({x})" + else: + raise AssertionError( + f"unrecognized config cpp.inject_log1p_bug_TESTING_ONLY = {bug!r}" + ) + + @staticmethod + def tan(x): + return f"std::tan({x})" + + @staticmethod + def tanh(x): + return f"std::tanh({x})" + + @staticmethod + def signbit(x): + return f"std::signbit({x})" + + @staticmethod + def pow(a, b): + return f"std::pow({a}, {b})" + + @staticmethod + def log(x): + return f"std::log({x})" + + @staticmethod + def round(x): + return f"std::nearbyint({x})" + + @staticmethod + def floor(x): + return f"std::floor({x})" + + @staticmethod + def floordiv(a, b): + # a and b are integer type + quot = f"{a} / {b}" + rem = f"{a} % {b}" + return f"(({a} < 0) != ({b} < 0) ? ({rem} != 0 ? {quot} - 1 : {quot}) : {quot})" + + @staticmethod + def ceil(x): + return f"std::ceil({x})" + + @staticmethod + def trunc(x): + return f"std::trunc({x})" + + @staticmethod + def truncdiv(a, b): + # a and b are integer type + return f"{a} / {b}" + + @staticmethod + def fmod(a, b): + return f"std::fmod({a}, {b})" + + @staticmethod + def isinf(x): + return f"std::isinf({x})" + + @staticmethod + def isnan(x): + return f"std::isnan({x})" + + @staticmethod + def lgamma(x): + return f"std::lgamma({x})" + + @staticmethod + def acos(x): + return f"std::acos({x})" + + @staticmethod + def acosh(x): + return f"std::acosh({x})" + + @staticmethod + def cosh(x): + return f"std::cosh({x})" + + @staticmethod + def sinh(x): + return f"std::sinh({x})" + + @staticmethod + def asin(x): + return f"std::asin({x})" + + @staticmethod + def asinh(x): + return f"std::asinh({x})" + + @staticmethod + def atan2(x, y): + return f"std::atan2({x}, {y})" + + @staticmethod + def atan(x): + return f"std::atan({x})" + + @staticmethod + def atanh(x): + return f"std::atanh({x})" + + @staticmethod + def copysign(x, y): + return f"std::copysign({x}, {y})" + + @staticmethod + def hypot(x, y): + return f"std::hypot({x}, {y})" + + @staticmethod + def log10(x): + return f"std::log10({x})" + + @staticmethod + def nextafter(x, y): + return f"std::nextafter({x}, {y})" + + @staticmethod + def relu(x): + bug = config.cpp.inject_relu_bug_TESTING_ONLY + if bug == "compile_error": + return "compile error!" + elif bug == "runtime_error": + return f"{x}; throw 1" + elif bug == "accuracy": + return f"{x} + decltype({x})(1)" + elif bug is None: + return f"{x} * ({x}>0)" + else: + raise AssertionError( + f"unrecognized config cpp.inject_relu_bug_TESTING_ONLY = {bug!r}" + ) + + @staticmethod + def minimum(a, b): + return f"min_propagate_nan({a}, {b})" + + @staticmethod + def maximum(a, b): + return f"max_propagate_nan({a}, {b})" + + @staticmethod + def where(a, b, c): + return f"{a} ? {b} : {c}" + + @staticmethod + def mod(a, b): + return f"mod({a}, {b})" + + @staticmethod + def constant(val, dtype): + opt_ctx: OptimizationContext = get_current_node_opt_ctx() + assert opt_ctx and opt_ctx.dtype is not None + dtype = opt_ctx.dtype + if dtype in DTYPE_LOWP_FP: + # Since load promotes all half-precision inputs to float, constants + # must be promoted as well + dtype = torch.float32 + return value_to_cpp(val, DTYPE_TO_CPP[dtype]) + + @staticmethod + def index_expr(expr, dtype): + opt_ctx: OptimizationContext = get_current_node_opt_ctx() + assert opt_ctx and opt_ctx.dtype is not None + dtype = opt_ctx.dtype + return ops.to_dtype(cexpr(V.kernel.rename_indexing(expr)), dtype) + + @staticmethod + def masked(mask, body, other): + code = BracesBuffer() + + # Write masked operation into a lambda + body_var = V.kernel.cse.newvar() + code.writeline(f"auto {body_var} = [&]") + with V.kernel.swap_buffers(code), code.indent(): + result = body() + code.writeline(f"return {result};") + code.writeline(";") + V.kernel.compute.splice(code) + + # Use the lambda's return type as the type of other + other_code = value_to_cpp(other, f"decltype({body_var}())") + return f"{mask} ? {body_var}() : {other_code}" + + @staticmethod + def logical_and(a, b): + return f"{a} && {b}" + + @staticmethod + def logical_not(a): + return f"!{a}" + + @staticmethod + def logical_or(a, b): + return f"{a} || {b}" + + @staticmethod + def logical_xor(a, b): + return f"{a} != {b}" + + @staticmethod + def bitwise_and(a, b): + return f"decltype({a})({a} & {b})" + + @staticmethod + def bitwise_not(a): + return f"decltype({a})(~{a})" + + @staticmethod + def bitwise_or(a, b): + return f"decltype({a})({a} | {b})" + + @staticmethod + def bitwise_xor(a, b): + return f"decltype({a})({a} ^ {b})" + + @staticmethod + def bitwise_left_shift(a, b): + return f"decltype({a})({a} << {b})" + + @staticmethod + def bitwise_right_shift(a, b): + return f"decltype({a})({a} >> {b})" + + @staticmethod + def rand(seed: sympy.Expr, offset: sympy.Expr): + return f"normalized_rand_cpu({seed}, {offset})" + + @staticmethod + def randn(seed: sympy.Expr, offset: sympy.Expr): + return f"randn_cpu({seed}, {offset})" + + @staticmethod + def randint64(seed: sympy.Expr, offset: sympy.Expr, low, high): + return f"randint64_cpu({seed}, {offset}, {low}, {high})" + + @staticmethod + def sigmoid(x): + return f"decltype({x})(1) / (decltype({x})(1) + std::exp(-{x}))" + + @staticmethod + def sign(x): + code = BracesBuffer() + # auto tmp5 = tmp4 < 0 ? -1 : 1; + left = V.kernel.cse.newvar() + right = V.kernel.cse.newvar() + result = V.kernel.cse.newvar() + scalar_zero = f"decltype({x})(0)" + scalar_one = f"decltype({x})(1)" + code.writeline(f"auto {left} = {x} > 0 ? {scalar_one} : {scalar_zero};") + code.writeline(f"auto {right} = {x} < 0 ? {scalar_one} : {scalar_zero};") + code.writeline(f"auto {result} = {left} - {right};") + V.kernel.compute.splice(code) + return result + + +class CppVecOverrides(CppOverrides): + """Map element-wise ops to aten vectorization C++""" + + def __new__(cls, *args, **kargs): + self = super().__new__(cls) + + def wrap(func): + # `CppVecKernel` generates both scalar ops and vector ops according to + # whether the inputs are scalars or vectors while all ops in `CppVecOverrides` + # (except for "masked") assume the inputs are vectors. We wrap the ops in + # `CppVecOverrides` to broadcast scalar inputs to vectors if needed or fallback to + # `CppOverrides` when all inputs are scalars. + # + # Inputs to ops.masked are handled separately in its own function due to + # the need of recurive handling of masked body. + def wrapper(*args, **kwargs): + has_scalar = any( + not arg.is_vec for arg in args if isinstance(arg, CppCSEVariable) + ) + has_vector = any( + arg.is_vec for arg in args if isinstance(arg, CppCSEVariable) + ) + new_args = list(args) + if has_scalar and has_vector: + # broadcast scalar args to vector if needed + new_args = [] + for arg in args: + if isinstance(arg, CppCSEVariable) and not arg.is_vec: + assert isinstance(V.kernel, CppVecKernel) + new_arg = V.kernel.broadcast(arg) + new_args.append(new_arg) + else: + new_args.append(arg) + if has_vector: + return func(*new_args, **kwargs) + else: + # fallback to scalar ops + scalar_ops = super(CppVecOverrides, self) + scalar_func = getattr( + scalar_ops, func.__name__, scalar_ops.__getattr__(func.__name__) # type: ignore[attr-defined] + ) + assert scalar_func is not None + return scalar_func(*args, **kwargs) + + return wrapper + + for name, method in vars(cls).items(): + if getattr(method, "__class__", None) == staticmethod and name != "masked": + setattr(self, name, wrap(method.__func__)) + return self + + @staticmethod + def add(a, b): + return f"{a} + {b}" + + @staticmethod + def sub(a, b): + return f"{a} - {b}" + + @staticmethod + def mul(a, b): + return f"{a} * {b}" + + @staticmethod + def truediv(a, b): + return f"{a} / {b}" + + @staticmethod + def abs(x): + return f"{x}.abs()" + + @staticmethod + def sin(x): + return f"{x}.sin()" + + @staticmethod + def cos(x): + return f"{x}.cos()" + + @staticmethod + def exp(x): + return f"{x}.exp()" + + @staticmethod + def exp2(x): + return f"{x}.exp2()" + + @staticmethod + def expm1(x): + # decompose for a better performance + vec_one = f"decltype({x})(1)" + return f"{x}.exp() - {vec_one}" + + @staticmethod + def erf(x): + return f"{x}.erf()" + + @staticmethod + def erfc(x): + return f"{x}.erfc()" + + @staticmethod + def erfinv(x): + return f"{x}.erfinv()" + + @staticmethod + def sqrt(x): + return f"{x}.sqrt()" + + @staticmethod + def eq(x, y): + return f"to_float_mask({x} == {y})" + + @staticmethod + def ne(x, y): + return f"to_float_mask({x} != {y})" + + @staticmethod + def lt(x, y): + return f"to_float_mask({x} < {y})" + + @staticmethod + def gt(x, y): + return f"to_float_mask({x} > {y})" + + @staticmethod + def le(x, y): + return f"to_float_mask({x} <= {y})" + + @staticmethod + def ge(x, y): + return f"to_float_mask({x} >= {y})" + + @staticmethod + def and_(x, y): + return f"{x} & {y}" + + @staticmethod + def rsqrt(x): + return f"{x}.rsqrt()" + + @staticmethod + def pow(a, b): + return f"{a}.pow({b})" + + @staticmethod + def log(x): + return f"{x}.log()" + + @staticmethod + def round(x): + return f"{x}.round()" + + @staticmethod + def floor(x): + return f"{x}.floor()" + + @staticmethod + def ceil(x): + return f"{x}.ceil()" + + @staticmethod + def trunc(x): + return f"{x}.trunc()" + + @staticmethod + def fmod(a, b): + return f"{a}.fmod({b})" + + @staticmethod + def lgamma(x): + return f"{x}.lgamma()" + + @staticmethod + def logical_and(a, b): + return f"({a} != 0) & ({b} != 0)" + + @staticmethod + def logical_not(a): + return f"{a} == 0" + + @staticmethod + def logical_or(a, b): + return f"({a} != 0) | ({b} != 0)" + + @staticmethod + def logical_xor(a, b): + return f"({a} != 0) ^ ({b} != 0)" + + @staticmethod + def tan(a): + return f"{a}.tan()" + + @staticmethod + def tanh(a): + vec_one = f"decltype({a})(1)" + vec_two = f"decltype({a})(2)" + vec_minus_two = f"decltype({a})(-2)" + return f"{vec_two} / ({vec_one} + ({vec_minus_two} * {a}).exp()) - {vec_one}" + + @staticmethod + def reciprocal(a): + return f"{a}.reciprocal()" + + @staticmethod + def atan(x): + return f"{x}.atan()" + + @staticmethod + def acos(x): + return f"{x}.acos()" + + @staticmethod + def asin(x): + return f"{x}.asin()" + + @staticmethod + def cosh(x): + return f"{x}.cosh()" + + @staticmethod + def sinh(x): + return f"{x}.sinh()" + + @staticmethod + def log10(x): + return f"{x}.log10()" + + @staticmethod + def nextafter(x): + return f"{x}.nextafter()" + + @staticmethod + def copysign(a, b): + return f"{a}.copysign({b})" + + @staticmethod + def atan2(a, b): + return f"{a}.atan2({b})" + + @staticmethod + def hypot(a, b): + return f"{a}.hypot({b})" + + @staticmethod + def atanh(x): + # For real x, atanh(x) = 1/2 * log((1+x)/(1-x)) + vec_one = f"decltype({x})(1)" + vec_one_half = f"decltype({x})(0.5)" + return f"{vec_one_half} * (({vec_one} + {x})/({vec_one} - {x})).log()" + + @staticmethod + def asinh(x): + # For real x, asinh(x) = log(x + sqrt(1 + x**2)) + vec_one = f"decltype({x})(1)" + return f"({x} + ({vec_one} + {x}*{x}).sqrt()).log()" + + @staticmethod + def acosh(x): + # For real x, acosh(x) = log(x + sqrt(x**2 -1)) + vec_one = f"decltype({x})(1)" + return f"({x} + ({x}*{x} - {vec_one}).sqrt()).log()" + + @staticmethod + def relu(x): + bug = config.cpp.inject_relu_bug_TESTING_ONLY + if bug == "compile_error": + return "compile error!" + elif bug == "runtime_error": + return f"{x}; throw 1" + elif bug == "accuracy": + return f"{x} + decltype({x})(1)" + elif bug is None: + return f"at::vec::clamp_min({x}, decltype({x})(0))" + else: + raise AssertionError( + f"unrecognized config cpp.inject_relu_bug_TESTING_ONLY = {bug!r}" + ) + + # TODO: this seems to be dead + @staticmethod + def sigmoid(x): + return f"decltype({x})(1)/(decltype({x})(1) + {x}.neg().exp())" + + @staticmethod + def neg(x): + return f"{x}.neg()" + + @staticmethod + def floordiv(a, b): + # a and b are integer type + _t = f"decltype({a})" + quot = f"{a} / {b}" + rem = f"{a} % {b}" + return f"(({a} < {_t}(0)) != ({b} < {_t}(0)) ? ({rem} != {_t}(0) ? {quot} - {_t}(1) : {quot}) : {quot})" + + @staticmethod + def truncdiv(a, b): + # a and b are integer type + return f"{a} / {b}" + + @staticmethod + def minimum(a, b): + return f"at::vec::minimum({a}, {b})" + + @staticmethod + def maximum(a, b): + return f"at::vec::maximum({a}, {b})" + + @staticmethod + def square(a): + return f"{a} * {a}" + + @staticmethod + def where(a, b, c): + return f"decltype({b})::blendv({c}, {b}, {a})" + + @staticmethod + def sign(x): + code = BracesBuffer() + # auto tmp5 = tmp4 < 0 ? -1 : 1; + vec_zero = f"decltype({x})(0)" + vec_one = f"decltype({x})(1)" + blendv = f"decltype({x})::blendv({vec_zero}, {vec_one}, {vec_zero} < {x})" + left = V.kernel.cse.newvar() + code.writeline(f"auto {left} = {blendv};") + + # auto tmp6 = tmp4 == 0 ? 0 : tmp5; + blendv = f"decltype({x})::blendv({vec_zero}, {vec_one}, {x} < {vec_zero})" + right = V.kernel.cse.newvar() + code.writeline(f"auto {right} = {blendv};") + result = V.kernel.cse.newvar() + code.writeline(f"auto {result} = {left} - {right};") + V.kernel.compute.splice(code) + return result + + @staticmethod + def to_dtype(x, dtype, src_dtype=None): + assert dtype in [ + torch.bool, + torch.float, + torch.bfloat16, + torch.float16, + torch.uint8, + ], f"{__name__} does not support {dtype}" + node: torch.fx.Node = V.interpreter.current_node + assert node and isinstance(node, torch.fx.Node) + opt_ctx_x = get_opt_ctx(node.args[1]) + assert opt_ctx_x + if opt_ctx_x.dtype in (torch.float, torch.float32) and dtype == torch.bool: + return f"vec_convert_to_mask({x})" + if opt_ctx_x.dtype == torch.bool and dtype in (torch.float, torch.float32): + return f"mask_convert_to_float({x})" + if opt_ctx_x.dtype in (torch.float, torch.float32) and dtype in DTYPE_LOWP_FP: + return f"cvt_fp32_to_lowp_fp<{DTYPE_TO_CPP[dtype]}>({x})" + if opt_ctx_x.dtype in DTYPE_LOWP_FP and dtype in (torch.float, torch.float32): + return f"cvt_lowp_fp_to_fp32<{DTYPE_TO_CPP[opt_ctx_x.dtype]}>({x})" + if opt_ctx_x.dtype == torch.uint8 and dtype in (torch.float, torch.float32): + # Note: this function only convert inputs number of elements equal to at::vec::Vectorized.size() + return f"at::vec::convert_uint8_to_float({x})" + if opt_ctx_x.dtype in (torch.float, torch.float32) and dtype == torch.uint8: + # TODO(Leslie): Add fast path to at::vec::convert_float_to_uint8, + # if we already handle the saturation previously. + # * Pattern match of quantization op in the loop body. + # * Skip the explicit saturation and clamp inside at::vec::convert_float_to_uint8. + return f"at::vec::convert_float_to_uint8({x})" + # TODO(jgong5): support conversion for other types + # currently we only allow load/store torch.uint8 and handle conversion there + return f"({x})" + + @staticmethod + def log1p(x): + bug = config.cpp.inject_log1p_bug_TESTING_ONLY + if bug == "accuracy": + return f"{x} + decltype({x})(1)" + elif bug is None: + return f"{x}.log1p()" + else: + raise AssertionError( + f"unrecognized config cpp.inject_log1p_bug_TESTING_ONLY = {bug!r}" + ) + + @staticmethod + def masked(mask, body, other): + code = BracesBuffer() + var = V.kernel.cse.newvar() + with V.kernel.masked(mask) as new_mask: + code.writeline(f"auto {var} = [&]") + with V.kernel.swap_buffers(code), code.indent(): + result = body() + code.writeline(f"return {result};") + code.writeline(";") + V.kernel.compute.splice(code) + + other_code = value_to_cpp(other, "float") + other_code_vec = f"at::vec::Vectorized({other_code})" + + if result.is_vec: + type = f"decltype({var}())" + float_mask = f"to_float_mask({new_mask})" + csevar = V.kernel.cse.generate( + V.kernel.compute, + f"{type}::blendv({other_code_vec}, {var}(), {float_mask})", + ) + else: + csevar = V.kernel.cse.generate( + V.kernel.compute, f"{mask} ? {var}() : {other_code}" + ) + # `result` is explicitly added to the args for correct propagation + # of relevant itervars and vectorization status. + csevar.update_on_args("masked", (mask, body, other, result), {}) + return csevar + + +class CppKernel(Kernel): + overrides = CppOverrides # type: ignore[assignment] + sexpr = cexpr + newvar_prefix = "auto " + suffix = ";" + + def __init__(self, args, num_threads): + super().__init__(args) + self.call_ranges: Optional[Tuple[sympy.Expr, ...]] = None + self.ranges: List[sympy.Expr] = [] + self.itervars: List[sympy.Symbol] = [] + self.reduction_depth = None + self.reduction_prefix = IndentedBuffer() + self.reduction_suffix = IndentedBuffer() + self.reduction_var_map = {} + self.reduction_cse = CSE(self.newvar_prefix, self.suffix, name_prefix="tmp_acc") + self.preloads = IndentedBuffer() + self.poststores = IndentedBuffer() + self.num_threads = num_threads # num_threads the kernel specialized for + self.reduction_omp_dec: Dict[Tuple[str, str], str] = {} + + @contextlib.contextmanager + def masked(self, mask): + """Context manager to add an additional mask to loads and stores.""" + prior = self._load_mask + if prior: + mask = self.cse.generate(self.compute, f"{mask} & {prior}") + + self._load_mask = mask + try: + yield mask + finally: + self._load_mask = prior + + def scale_index_with_offset( + self, index: sympy.Expr, scale=1, itervar_idx=-1, offset=0 + ): + var = self.itervars[itervar_idx] + replacement = {var: var * scale + offset} + new_index = sympy_subs(index, replacement) + return new_index + + def index_to_str(self, index: sympy.Expr) -> str: + """ + Convert an index expr to a string that can be used in cpp code. + e.g. a sympy expression "s2" may actually appear as "ks1" in the cpp kernel. + """ + return cexpr(self.rename_indexing(index)) + + def load(self, name: str, index: sympy.Expr): + var = self.args.input(name) + index = self.rename_indexing(index) + line = f"{var}[{cexpr_index(index)}]" + if V.graph.get_dtype(name) in [torch.float16]: + line = f"static_cast({line})" + csevar = self.cse.generate(self.loads, line) + csevar.update_on_args("load", (name, index), {}) + return csevar + + def store(self, name, index, value, mode=None): + assert "buf" in name + var = self.args.output(name) + index = self.rename_indexing(index) + if mode is None: + line = f"{var}[{cexpr_index(index)}] = {value};" + elif mode == "atomic_add": + if not config.cpp.dynamic_threads and self.num_threads == 1: + line = f"{var}[{cexpr_index(index)}] += {value};" + else: + line = f"atomic_add(&{var}[{cexpr_index(index)}], {value});" + else: + raise NotImplementedError(f"store mode={mode}") + self.stores.writeline(DeferredLine(name, line)) + + def reduction(self, dtype, src_dtype, reduction_type, value): + argmax_or_argmin = reduction_type in {"argmax", "argmin"} + + reduction_key = src_dtype, reduction_type, value + if reduction_key in self.reduction_cse.reduction_cache: + return self.reduction_cse.reduction_cache[reduction_key] + + acc = self.reduction_cse.generate( + self.loads, f"reduction {reduction_key}", write=False + ) + self.reduction_var_map[acc] = reduction_type + if argmax_or_argmin: + self.reduction_prefix.writelines( + argmax_argmin_prefix(reduction_type, src_dtype, acc) + ) + compare_op = "<" if reduction_type == "argmax" else ">" + assert self.reduction_depth is not None + index = self.itervars[self.reduction_depth] + for i in range(self.reduction_depth + 1, len(self.itervars)): + index = index * self.ranges[i] + self.itervars[i] + self.stores.writelines( + [ + f"if ({acc}.value {compare_op} {value}) {{", + f" {acc}.index = {cexpr_index(index)}; {acc}.value = {value};", + "}", + ], + ) + else: + acc_type = reduction_acc_type(reduction_type, dtype) + + if (reduction_type, acc_type) not in self.reduction_omp_dec: + if RTYPE_TO_CPP[reduction_type] not in NATIVE_OMP_RTYPES: + # Scalar reduction for other reductions are declared by default + self.reduction_prefix.splice( + f"""\ + #pragma omp declare reduction(\ + {RTYPE_TO_CPP[reduction_type]}:{acc_type}:\ + omp_out = {reduction_combine(reduction_type, "omp_out", "omp_in")}) \ + initializer(omp_priv={{{reduction_init(reduction_type, dtype)}}}) + """ + ) + self.reduction_omp_dec[reduction_type, acc_type] = RTYPE_TO_CPP[ + reduction_type + ] + + self.reduction_prefix.writeline( + f"{acc_type} {acc} = {reduction_init(reduction_type, dtype)};" + ) + self.stores.writeline( + f"{acc} = {reduction_combine(reduction_type, acc, value)};" + ) + + result = reduction_project(reduction_type, acc) + self.reduction_cse.reduction_cache[reduction_key] = result + return result + + def store_reduction(self, name, index, value): + index = self.rename_indexing(index) + var = self.args.output(name) + self.reduction_suffix.writeline( + DeferredLine(name, f"{var}[{cexpr_index(index)}] = {value};") + ) + + def set_ranges(self, lengths, reduction_lengths): + if self.call_ranges: + assert self.call_ranges == tuple(lengths) + tuple( + reduction_lengths + ), f"{self.call_ranges} == {tuple(lengths)} + {tuple(reduction_lengths)}" + assert self.reduction_depth == len(lengths) + else: + self.call_ranges = tuple(lengths) + tuple(reduction_lengths) + self.ranges = [self.rename_indexing(x) for x in self.call_ranges] + self.itervars = [sympy_symbol(f"x{n}") for n in range(len(self.ranges))] + self.reduction_depth = len(lengths) + return ( + self.itervars[: self.reduction_depth], + self.itervars[self.reduction_depth :], + ) + + def size_hint(self): + return V.graph.sizevars.size_hint( + sympy_product(self.call_ranges), fallback=8192 + ) + + def codegen_loops_impl(self, loop_nest, code, worksharing): + threads = parallel_num_threads() + assert self.call_ranges is not None + par_depth = self.decide_parallel_depth( + self.call_ranges[: loop_nest.max_parallel_depth()], threads + ) + with contextlib.ExitStack() as stack: + if par_depth: + if loop_nest.is_reduction_only(): + # need to close the worksharing scope to define reduction vars outside it + worksharing.close() + else: + worksharing.parallel(threads) + loop_nest.mark_parallel(par_depth) + elif threads > 1: + if worksharing.single(): + stack.enter_context(code.indent()) + + def gen_kernel(kernel): + with contextlib.ExitStack() as stack: + assert kernel + if hasattr(kernel, "codegen_inner_loops"): + code.splice(kernel.preloads) + kernel.codegen_inner_loops(code) + stack.enter_context(code.indent()) + code.splice(kernel.loads) + code.splice(kernel.compute) + code.splice(kernel.stores) + if hasattr(kernel, "codegen_inner_loops"): + code.splice(kernel.poststores) + + def get_reduction_code_buffer(loops, is_suffix=True): + for loop in loops: + for kernel in loop.get_kernels(): + if is_suffix: + return kernel.reduction_suffix + else: + return kernel.reduction_prefix + return None + + def gen_loops(loops: List[LoopLevel], in_reduction=False): + with contextlib.ExitStack() as stack_outer: + if loops: + loop = loops[0] + if loop.is_reduction() and not in_reduction: + reduction_prefix = get_reduction_code_buffer( + loops, is_suffix=False + ) + if reduction_prefix: + stack_outer.enter_context(code.indent()) + code.splice(reduction_prefix) + if loop_nest.is_reduction_only() and loop.parallel: + worksharing.parallel(threads) + + for loop in loops: + gen_loop(loop, in_reduction) + + if loops: + loop = loops[0] + if loop_nest.is_reduction_only() and loop.parallel: + worksharing.close() + if loop.is_reduction() and not in_reduction: + code.splice( + get_reduction_code_buffer(loops, is_suffix=True) + ) + + def gen_loop(loop: LoopLevel, in_reduction=False): + with contextlib.ExitStack() as stack: + loop_lines = loop.lines() + if loop_lines is None: + return + code.writelines(loop_lines) + stack.enter_context(code.indent()) + # generate inner loops or loop body + if loop.inner: + gen_loops(loop.inner, loop.is_reduction()) + else: + kernels = loop.get_kernels() + assert len(kernels) == 1 + gen_kernel(kernels[0]) + + stack.enter_context(code.indent()) + if loop_nest.root: + gen_loops(loop_nest.root) + else: + gen_kernel(loop_nest.kernel) + + def codegen_loops(self, code, worksharing): + loop_nest = LoopNestWithSplit.build(self) + self.codegen_loops_impl(loop_nest, code, worksharing) + + @property + def assert_function(self) -> str: + return "TORCH_CHECK" + + def decide_parallel_depth(self, ranges, threads): + seq = self.size_hint() + par = 1 + depth = 0 + for expr in ranges: + hint = V.graph.sizevars.size_hint(expr, fallback=8192) + if par >= 2 * threads or par == threads: + break + if seq // threads < config.cpp.min_chunk_size: + # not enough work + break + depth += 1 + par *= hint + seq /= hint + # if we assume thread number is dynamic, make sure we + # have at least one parallel scope and let OMP runtime + # to manage the serial vs. parallel. + if config.cpp.dynamic_threads and depth == 0 and len(ranges) > 0: + depth = 1 + return depth + + @contextlib.contextmanager + def write_to_suffix(self): + prior = (self.loads, self.compute, self.stores, self.cse) + self.loads = IndentedBuffer() + self.compute = IndentedBuffer() + self.stores = IndentedBuffer() + self.cse = self.cse.clone() + yield + self.reduction_suffix.splice(self.loads) + self.reduction_suffix.splice(self.compute) + self.reduction_suffix.splice(self.stores) + (self.loads, self.compute, self.stores, self.cse) = prior + + def create_cse_var(self, *args, **kwargs): + return CppCSEVariable(*args, **kwargs) + + +class CppVecKernel(CppKernel): + overrides = CppVecOverrides # type: ignore[assignment] + + def __init__( + self, + args, + num_threads, + tiling_factor=0, + tiling_idx=-1, + tiling_dtype=torch.float, + ): + super().__init__(args, num_threads) + assert codecache.pick_vec_isa() + if tiling_factor == 0: + tiling_factor = codecache.pick_vec_isa().nelements(dtype=tiling_dtype) + self.tiling_factor = tiling_factor + self.tiling_idx = tiling_idx + metrics.generated_cpp_vec_kernel_count += 1 + + def load(self, name: str, index: sympy.Expr): + opt_ctx: OptimizationContext = get_current_node_opt_ctx() + var = self.args.input(name) + index = self.rename_indexing(index) + dtype = V.graph.get_dtype(name) + tiling_var = self.itervars[self.tiling_idx] + is_broadcast = not index.has(tiling_var) + is_mask = ( + dtype in [torch.bool, torch.uint8] and not opt_ctx.is_load_uint8_as_float + ) + load_mask = f"to_float_mask({self._load_mask})" if self._load_mask else None + non_contiguous = ( + not is_broadcast + and stride_at(tiling_var, index) != 1 + or any( + self.cse.varname_map[s.name].depends_on(tiling_var) + for s in index.free_symbols + if s.name.startswith("tmp") + ) + ) + var_expr = ( + f"{var}[{cexpr_index(index)}]" + if is_broadcast + else f"{var} + {cexpr_index(index)}" + ) + loadbuf = "tmpbuf" if non_contiguous else var_expr + if is_broadcast: + csevar = super().load(name, index) + csevar.dtype = dtype + return csevar + elif dtype in [torch.uint8] and opt_ctx.is_load_uint8_as_float: + line = ( + f"masked_load({loadbuf}, {load_mask})" + if load_mask + else f"at::vec::Vectorized::loadu_one_fourth({loadbuf})" + ) + elif is_mask: + line = f"flag_to_float_vec({loadbuf})" + elif dtype in DTYPE_LOWP_FP: + line = ( + f"masked_load({loadbuf}, {load_mask})" + if load_mask + else f"at::vec::Vectorized<{DTYPE_TO_CPP[dtype]}>::loadu({loadbuf}, {self.tiling_factor})" + ) + else: + line = ( + f"masked_load({loadbuf}, {load_mask})" + if load_mask + else f"at::vec::Vectorized::loadu({loadbuf})" + ) + + if non_contiguous: + # TODO: support masked_load for non_contiguous path? + tmpbuftype = "float" if is_mask else f"{DTYPE_TO_CPP[dtype]}" + tmpbufsize = f"{self.tiling_factor}" + if dtype in DTYPE_LOWP_FP: + tmpbufsize += " * 2" + tmpbufdeclare = f"__at_align__ {tmpbuftype} tmpbuf[{tmpbufsize}];" + inner = sympy_symbol(f"{tiling_var}_inner") + new_index = self.scale_index_with_offset( + index, itervar_idx=self.tiling_idx, offset=inner + ) + tmpbufdefine = ( + f"for (long {inner} = 0; {inner} < {self.tiling_factor}; {inner}++) " + ) + rhs = f"{var}[{cexpr_index(new_index)}]" + if is_mask: + rhs = f"flag_to_float_scalar({rhs})" + tmpbufdefine += f"tmpbuf[{inner}] = {rhs};" + line = f"([&]() {{ {tmpbufdeclare} {tmpbufdefine} return {line}; }})()" + + csevar = self.cse.generate(self.loads, line) + csevar.update_on_args("load", (name, index), {}) + assert isinstance(csevar, CppCSEVariable) + csevar.is_vec = True + return csevar + + def get_vec_store_line(self, value, var, index, dtype): + """ + Get a store line str that stores `value` into `var` at `index` of `dtype`. + :param value: Vectorized type templaterized on `dtype`. + :param var: buffer to store into. + :index: index into the `var`. + """ + # when value's type is str (e.g., welford reduction), caller should make sure + # it is a vector + assert isinstance(value, str) or ( + isinstance(value, CppCSEVariable) and value.is_vec + ), value + tiling_var = self.itervars[self.tiling_idx] + assert index.has(tiling_var) + var_expr = f"{var} + {cexpr_index(index)}" + non_contiguous = stride_at(tiling_var, index) != 1 or "tmp" in f"{index}" + if non_contiguous: + var_expr = "tmpbuf" + if dtype == torch.float: + line = f"{value}.store({var_expr});" + else: + line = f"{value}.store({var_expr}, {self.tiling_factor});" + if non_contiguous: + inner = sympy_symbol(f"{tiling_var}_inner") + new_index = self.scale_index_with_offset( + index, itervar_idx=self.tiling_idx, offset=inner + ) + tmp_bufsize = ( + f"{self.tiling_factor}*sizeof(float)/sizeof({DTYPE_TO_CPP[dtype]})" + ) + line = ( + f"{{ __at_align__ {DTYPE_TO_CPP[dtype]} tmpbuf[{tmp_bufsize}]; {line} " + f"for (long {inner} = 0; {inner} < {self.tiling_factor}; {inner}++) " + f"{var}[{cexpr_index(new_index)}] = tmpbuf[{inner}]; }}" + ) + return line + + def store(self, name, index, value, mode=None): + assert "buf" in name + assert mode is None + assert isinstance(value, CppCSEVariable), value + if not value.is_vec: + # this happens when we store a scalar into a vectorized buffer like "fill" + value = self.broadcast(value) + opt_ctx: OptimizationContext = get_current_node_opt_ctx() + var = self.args.output(name) + index = self.rename_indexing(index) + self.stores.writeline( + DeferredLine( + name, + self.get_vec_store_line(value, var, index, V.graph.get_dtype(name)), + ) + ) + + def reduction(self, dtype, src_dtype, reduction_type, value): + assert reduction_type in { + "max", + "min", + "sum", + "prod", + "xor_sum", + "welford_reduce", + "welford_combine", + } + assert dtype == torch.float + assert src_dtype == torch.float + assert isinstance(value, CppCSEVariable) and value.is_vec, value + + vec_ns = "at::vec" + vec = f"{vec_ns}::Vectorized<{DTYPE_TO_CPP[dtype]}>" + acc_type = reduction_acc_type(reduction_type, dtype) + acc_type_vec = reduction_acc_type_vec(reduction_type, dtype) + + if (reduction_type, acc_type) not in self.reduction_omp_dec: + if RTYPE_TO_CPP[reduction_type] not in NATIVE_OMP_RTYPES: + # Scalar reduction for other reductions are declared by default + self.reduction_prefix.splice( + f"""\ +#pragma omp declare reduction(\ +{RTYPE_TO_CPP[reduction_type]}:{acc_type}:\ +omp_out = {reduction_combine(reduction_type, "omp_out", "omp_in")}) \ +initializer(omp_priv={{{reduction_init(reduction_type, dtype)}}}) + """ + ) + self.reduction_omp_dec[reduction_type, acc_type] = RTYPE_TO_CPP[ + reduction_type + ] + + if (reduction_type, acc_type_vec) not in self.reduction_omp_dec: + self.reduction_prefix.splice( + f"""\ +#pragma omp declare reduction(\ +{RTYPE_TO_CPP[reduction_type]}:{acc_type_vec}:\ +omp_out = {reduction_combine_vec(reduction_type, "omp_out", "omp_in")}) \ +initializer(omp_priv={{{reduction_init_vec(reduction_type, dtype)}}}) + """ + ) + self.reduction_omp_dec[reduction_type, acc_type_vec] = RTYPE_TO_CPP[ + reduction_type + ] + + reduction_key = src_dtype, reduction_type, value + if reduction_key in self.reduction_cse.reduction_cache: + return self.reduction_cse.reduction_cache[reduction_key] + + acc = self.reduction_cse.generate( + self.loads, f"reduction {reduction_key}", write=False + ) + acc_vec = f"{acc}_vec" + + self.reduction_var_map[acc_vec] = reduction_type + self.reduction_prefix.writeline( + f"{acc_type} {acc} = {reduction_init(reduction_type, dtype)};" + ) + self.reduction_prefix.writeline( + f"{acc_type_vec} {acc_vec} = {reduction_init_vec(reduction_type, dtype)};" + ) + self.stores.writeline( + f"{acc_vec} = {reduction_combine_vec(reduction_type, acc_vec, value)};" + ) + + tmpvar: Union[str, CSEVariable] + if self.tiling_idx >= self.reduction_depth: + # Horizontal reduction + if is_welford_reduction(reduction_type): + next_value = f"welford_vec_reduce_all({acc_vec})" + else: + reduce_all_body = ( + "{ return " + + reduction_combine_vec(reduction_type, "x", "y") + + "; }" + ) + vec_reduce_all_func = f"{vec_ns}::vec_reduce_all<{DTYPE_TO_CPP[dtype]}>" + next_value = f"{vec_reduce_all_func}([]({vec}& x, {vec}& y) {reduce_all_body}, {acc_vec})" + + self.reduction_suffix.writeline( + f"{acc} = {reduction_combine(reduction_type, acc, next_value)};" + ) + tmpvar = acc + else: + tmpvar = acc_vec + + result = reduction_project(reduction_type, tmpvar) + self.reduction_cse.reduction_cache[reduction_key] = result + return result + + def store_reduction(self, name, index, value): + index = self.rename_indexing(index) + var = self.args.output(name) + out_dtype = V.graph.get_dtype(name) + # Only float reductions are vectorized currently + dtype = torch.float + if self.tiling_idx >= self.reduction_depth: + # Horizontal reduction + self.reduction_suffix.writeline( + DeferredLine( + name, + f"{var}[{cexpr_index(index)}] = static_cast<{DTYPE_TO_CPP[out_dtype]}>({value});", + ) + ) + else: + # Vertical reduction + store_lines = [] + if out_dtype != dtype: + if out_dtype in DTYPE_LOWP_FP and dtype == torch.float: + _lowp_fp_tmpvar_vec = f"{DTYPE_TO_CPP[out_dtype]}_{value}" + store_lines = [ + DeferredLine( + name, + f"auto {_lowp_fp_tmpvar_vec} = cvt_fp32_to_lowp_fp<{DTYPE_TO_CPP[out_dtype]}>({value});", + ) + ] + value = _lowp_fp_tmpvar_vec + else: + raise AssertionError( + f"Unsupported reduction type from {dtype} to {out_dtype}" + ) + store_lines += [ + DeferredLine( + name, + self.get_vec_store_line(value, var, index, out_dtype), + ) + ] + self.reduction_suffix.writelines(store_lines) + + def broadcast(self, scalar_var: CppCSEVariable): + assert ( + not scalar_var.is_vec + and self.itervars[self.tiling_idx] not in scalar_var.dependent_itervars + ) + if scalar_var.dtype == torch.bool: + vec_var = self.cse.generate( + self.compute, f"to_float_mask({scalar_var.name})" + ) + else: + assert scalar_var.dtype is not None + vec_var = self.cse.generate( + self.compute, + f"at::vec::Vectorized<{DTYPE_TO_CPP[scalar_var.dtype]}>({scalar_var.name})", + ) + assert isinstance(vec_var, CppCSEVariable) + vec_var.dtype = scalar_var.dtype + vec_var.dependent_itervars = scalar_var.dependent_itervars + vec_var.is_vec = True + return vec_var + + +class CppTile2DKernel(CppVecKernel): + """ + A vector kernel that handles the 2d tiles with the tile size defined in `tiling_factor` on + the inner-most loop level and one of the outer loop level (`outer_tiling_idx`). When the data + tile is accessed in a contiguous way from the outer loop axis, a transposition is applied on the + tile to make the access contiguous from the inner-most loop axis. Then, the same vectorization + logic from its parent `CppVecKernel` is leveraged for load/store/compute. The transposed tile load + and store are generated into kernel.preloads and kernel.poststores buffers. + + The loop structure looks like below: + for ... + for i_outer ... + for ... + for inner_most ... + // generated by CppTile2DKernel + float tmp0[16*16]; at::vec::transpose_mxn<...>(tmp0, in_ptr0 + ..., ...); // into kernel.preloads + float tmp1[16*16]; // into kernel.preloads + for i_inner ... { // the kernel inner loop + vectorized loads/compute/stores (e.g., load tmp0, store tmp1) // into kernel.loads/compute/stores + } + at::vec::transpose_mxn(out_ptr0 + ..., tmp1, ...) // into kernel.poststores + for inner_most ... (tail) + // generated by CppVecKernel + ... + for i_outer ... (tail) + for ... + for ... + // generated by CppKernel + ... + """ + + def __init__(self, args, num_threads, tiling_factor, tiling_indices, tiling_dtype): + super().__init__( + args, num_threads, tiling_factor, tiling_indices[1], tiling_dtype + ) + self.tiling_indices = tiling_indices + + def inner_itervar(self): + return sympy_symbol(f"{self.itervars[self.outer_idx]}_inner") + + def need_vec_transpose(self, index): + return ( + stride_at(self.itervars[self.outer_idx], index) == 1 + and index.has(self.itervars[self.tiling_idx]) + and not stride_at(self.itervars[self.tiling_idx], index).has( + self.itervars[self.tiling_idx] + ) + and not stride_at(self.itervars[self.tiling_idx], index).has( + self.itervars[self.outer_idx] + ) + ) + + def gen_transposed_tile_load_store(self, name, var, index, is_store): + # transposed tile load/store outside the kernel inner loop + dtype = V.graph.get_dtype(name) + factor = self.tiling_factor + src = f"{var} + {cexpr_index(index)}" + dst = "__place_holder__" + ld_src = f"{cexpr_index(stride_at(self.itervars[self.tiling_idx], index))}" + ld_dst = f"{factor}" + if is_store: + src, dst = dst, src + ld_src, ld_dst = ld_dst, ld_src + + need_define = True + load_or_store = f"at::vec::transpose_mxn<{DTYPE_TO_CPP[dtype]},{factor},{factor}>({src}, {ld_src}, {dst}, {ld_dst});" + if is_store: + tile_var = self.cse.newvar() + elif load_or_store not in self.cse.cache: + tile_var = self.cse.generate(self.preloads, load_or_store, write=False) + else: + need_define = False + tile_var = self.cse.cache[load_or_store] + + if need_define: + define_line = f"{DTYPE_TO_CPP[dtype]} {tile_var}[{factor}*{factor}] __attribute__ ((aligned ({factor})));" + self.preloads.writeline(define_line) + + load_or_store = load_or_store.replace("__place_holder__", str(tile_var)) + if is_store: + self.poststores.writeline(DeferredLine(name, load_or_store)) + else: + self.preloads.writeline(load_or_store) + + return tile_var + + def load(self, name: str, index: sympy.Expr): + opt_ctx: OptimizationContext = get_current_node_opt_ctx() + var = self.args.input(name) + index = self.rename_indexing(index) + + inner = self.inner_itervar() + if self.need_vec_transpose(index): + tile_var = self.gen_transposed_tile_load_store( + name, var, index, is_store=False + ) + # vector load inside the kernel inner loop + loadbuf = f"{tile_var} + {cexpr_index(inner * self.tiling_factor)}" + dtype = V.graph.get_dtype(name) + if dtype in DTYPE_LOWP_FP: + line = f"at::vec::Vectorized<{DTYPE_TO_CPP[dtype]}>::loadu({loadbuf}, {self.tiling_factor})" + elif ( + V.graph.get_dtype(name) in [torch.uint8] + and opt_ctx.is_load_uint8_as_float + ): + line = f"at::vec::Vectorized::loadu_one_fourth({loadbuf})" + else: + line = f"at::vec::Vectorized::loadu({loadbuf})" + csevar = self.cse.generate(self.loads, line) + csevar.update_on_args("load", (name, index), {}) + assert isinstance(csevar, CppCSEVariable) + csevar.is_vec = True + return csevar + else: + new_index = self.scale_index_with_offset( + index, + itervar_idx=self.outer_idx, + offset=inner, + ) + return super().load(name, new_index) + + def store(self, name, index, value, mode=None): + assert "buf" in name + opt_ctx: OptimizationContext = get_current_node_opt_ctx() + var = self.args.output(name) + + inner = self.inner_itervar() + index = self.rename_indexing(index) + assert mode is None + if self.need_vec_transpose(index): + tile_var = self.gen_transposed_tile_load_store( + name, var, index, is_store=True + ) + # vector store inside the kernel inner loop + storebuf = f"{tile_var} + {cexpr_index(inner * self.tiling_factor)}" + if V.graph.get_dtype(name) in DTYPE_LOWP_FP: + line = f"{value}.store({storebuf}, {self.tiling_factor});" + elif V.graph.get_dtype(name) in [torch.uint8]: + line = f"{value}.store({storebuf}, {self.tiling_factor});" + else: + line = f"{value}.store({storebuf});" + self.stores.writeline(DeferredLine(name, line)) + else: + new_index = self.scale_index_with_offset( + index, + itervar_idx=self.outer_idx, + offset=inner, + ) + super().store(name, new_index, value, mode) + + def codegen_inner_loops(self, code): + inner = self.inner_itervar() + code.writeline( + f"for (long {inner} = 0; {inner} < {self.tiling_factor}; {inner}++)" + ) + + def set_ranges(self, group, reduction_group): + vars = super().set_ranges(group, reduction_group) + # do vertical reduction as the tail loop + self.outer_idx, self.tiling_idx = ( + self.tiling_indices + if self.tiling_indices[1] < self.reduction_depth + else reversed(self.tiling_indices) + ) + return vars + + +class CppVecKernelChecker(CppVecKernel): + def __init__(self, args, num_threads, tiling_factor, tiling_idx=-1): + super().__init__(args, num_threads, tiling_factor, tiling_idx) + + # Since this kernel is only for checker but does not generate any + # code, so we need to decrease the kernel count. + metrics.generated_kernel_count -= 1 + metrics.generated_cpp_vec_kernel_count -= 1 + + # Used to record the graph wrapper code as the wrapper_code status could be + # changed during graph run. + self._orig_wrapper_code = None + + self.simd_vec = True + + self.fast_vec_list = [] + for k, v in CppVecOverrides.__dict__.items(): + if isinstance(v, staticmethod): + self.fast_vec_list.append(k) + self.exit_stack = contextlib.ExitStack() + + # Cache all the load result + self.load_supported_dtypes: List[torch.dtype] = [ + torch.float, + torch.bfloat16, + torch.float16, + torch.bool, + torch.uint8, + ] + self.store_supported_dtypes: List[torch.dtype] = [ + torch.float, + torch.bfloat16, + torch.float16, + torch.uint8, + ] + # Cache the dtypes of the store operation. If the store is mixing dtypes, the + # vectorization would not support it as it is hard to determine the vec dtype + self.store_dtypes: List[torch.dtype] = [] + # The dtype is used for vectorization + self.vec_dtype: torch.dtype = torch.float32 + + def disable_vec(self, msg=None): + if schedule_log.isEnabledFor(logging.DEBUG): + schedule_log.debug("Disabled vectorization: %s", msg) + self.simd_vec = False + + def is_mask(self, name: str, users: Dict[torch.fx.Node, None]): + load_type = V.graph.get_dtype(name) + if load_type == torch.bool: + return all(user.target in ("where", "masked") for user in users.keys()) + elif load_type == torch.uint8: + """ + If the load value is torch.uint8, then we only support the loaded + value is as the mask. + """ + if not all( + user.target == "to_dtype" and user.args[-1] == torch.bool + for user in users.keys() + ): + return False + + for to_dtype_node in users.keys(): + assert to_dtype_node.target == "to_dtype" + if not all( + user.target in ("where", "masked") + for user in to_dtype_node.users.keys() + ): + return False + return True + else: + return False + + def is_load_uint8_as_float(self, name: str, users: Dict[torch.fx.Node, None]): + """ + Check: + 1. load_type is torch.uint8 + 2. has 1 user node of target to_dtype + 3. dtype of to_dtype is torch.float + """ + load_type = V.graph.get_dtype(name) + if load_type is not torch.uint8: + return False + if len(users) == 1: + user = next(iter(users)) + if (user.target == "to_dtype") and (user.args[-1] == torch.float): + return True + return False + return False + + def can_store_fp32_as_uint8(self, store_var: str, value_node: torch.fx.Node): + """ + Check: + 1. store_type is torch.uint8 + 2. value_node is of target to_dtype + 3. dtype of to_dtype node is torch.uint8 + """ + store_type = V.graph.get_dtype(store_var) + if store_type not in [torch.uint8]: + return False + if value_node.target == "to_dtype" and value_node.args[-1] == torch.uint8: + return True + + return False + + def is_load_integer_scalar_tensor(self, name: str, index: sympy.Expr): + load_dtype = V.graph.get_dtype(name) + buffer = V.graph.get_buffer(name) + return ( + load_dtype in [torch.int32, torch.int64] + and isinstance(buffer, TensorBox) + and isinstance(buffer.data, StorageBox) + and (len(buffer.data.layout.size) == 0) + and (index == 0) + ) + + def load(self, name: str, index: sympy.Expr): + with RecordOptimizationContext(__name__) as node_ctx: + load_dtype = V.graph.get_dtype(name) + opt_ctx: OptimizationContext = node_ctx.get_opt_ctx() + assert opt_ctx + opt_ctx.dtype = load_dtype + opt_ctx.is_load_as_mask = self.is_mask(name, node_ctx.get_fx_node().users) + opt_ctx.is_load_uint8_as_float = self.is_load_uint8_as_float( + name, node_ctx.get_fx_node().users + ) + + var = self.cse.newvar() + + if len(self.itervars) == 0: + self.disable_vec("not a loop") + return var + + if load_dtype in [torch.bool, torch.uint8] and not ( + opt_ctx.is_load_as_mask or opt_ctx.is_load_uint8_as_float + ): + if not opt_ctx.is_load_as_mask: + self.disable_vec(f"{load_dtype} not loaded as mask") + elif not opt_ctx.is_load_uint8_as_float: + self.disable_vec(f"{load_dtype} not loaded as float") + return var + + if ( + (load_dtype not in self.load_supported_dtypes) + and not self.is_load_integer_scalar_tensor(name, index) + and index.has(self.itervars[self.tiling_idx]) + ): + self.disable_vec(f"{load_dtype} not supported by load") + return var + + return var + + def store(self, name, index, value, mode=None): + with RecordOptimizationContext(__name__) as node_ctx: + if len(self.itervars) == 0: + self.disable_vec("not a loop") + return self.simd_vec + + store_dtype = V.graph.get_dtype(name) + + opt_ctx: OptimizationContext = node_ctx.get_opt_ctx() + assert opt_ctx + opt_ctx.dtype = store_dtype + + store_dtype = torch.float if store_dtype == torch.float32 else store_dtype + self.store_dtypes.append(store_dtype) + if store_dtype not in self.store_supported_dtypes: + self.disable_vec(f"{store_dtype} not supported by store") + return self.simd_vec + + if store_dtype in [torch.uint8]: + value_node = node_ctx.get_fx_node().all_input_nodes[-1] + if not self.can_store_fp32_as_uint8(name, value_node): + self.disable_vec("not support store float32 as uint8") + return self.simd_vec + + assert "buf" in name + index = self.rename_indexing(index) + + if mode: + self.disable_vec(f"store mode: {mode}") + return self.simd_vec + + if index.is_number: + self.disable_vec(f"constant store index: {index}") + return self.simd_vec + + def reduction(self, dtype, src_dtype, reduction_type, value): + if ( + dtype == torch.float + and src_dtype == torch.float + and reduction_type in VECTORIZABLE_RTYPES + ): + pass + else: + self.disable_vec( + f"reduction: dtype {dtype}, src_dtype {src_dtype}, reduction_type {reduction_type}" + ) + if is_welford_reduction(reduction_type): + return tuple([self.simd_vec] * 3) + return self.simd_vec + + def store_reduction(self, name, index, value): + return self.simd_vec + + def is_supported_cmp(self, node: torch.fx.Node): + def get_node_dtype(node): + if type(node) == torch.fx.Node: + opt_ctx: OptimizationContext = get_current_node_opt_ctx() + return opt_ctx.dtype if opt_ctx else None + else: + return None + + def get_cmp_dtypes(node: torch.fx.Node): + return get_node_dtype(node.args[-2]), get_node_dtype(node.args[-1]) + + assert len(node.args) >= 2 + # cmp(x, y): y is a magic value like x >= 1 + if type(node.args[-1]) in [int, float]: + return True + # cmp(x, y): x is a magic value like 1 >= y + if type(node.args[-2]) in [int, float]: + return False + + left_dtype, right_dtype = get_cmp_dtypes(node) + if left_dtype is None or right_dtype is None: + # TODO(Eikan): To record, deduce and propagate the data type of every expression. + return True + else: + return left_dtype == right_dtype + + def __exit__(self, exc_type, exc_val, exc_tb): + assert self._orig_wrapper_code is not None + # Restore the wrapper_code + V.graph.wrapper_code = self._orig_wrapper_code + self.exit_stack.__exit__(exc_type, exc_val, exc_tb) + + def __enter__(self): + # Record the graph wrapper code. The wrapper_code status could be + # changed during graph run. Regarding this checker, we also need to + # run the graph but we don't expect to change any status that would + # impact the code generation. Hence, we record the graph wrapper code + # and replace it with a dummy wrapper_code and then restore to the + # original one as long as the checker is finished. + self._orig_wrapper_code = V.graph.wrapper_code + V.graph.wrapper_code = WrapperCodeGen() + + class VecCheckerProxy: + bin_cmp_ops = ["eq", "ne", "le", "ge", "lt", "gt"] + + @staticmethod + def _bin_cmp_op(x, y): + current_node: torch.fx.Node = V.interpreter.current_node + if not self.is_supported_cmp(current_node): + self.disable_vec(f"binary comparison op: {current_node}") + return self.simd_vec + + @staticmethod + def __getattr__(name): # type: ignore[misc] + def inner(*args, **kwargs): + if name in VecCheckerProxy.bin_cmp_ops: + return VecCheckerProxy._bin_cmp_op(args, kwargs) + + if name not in self.fast_vec_list: + self.disable_vec(f"op: {name}") + return self.simd_vec + + return inner + + @staticmethod + def load(name: str, index: sympy.Expr): + return self.load(name, index) + + @staticmethod + def store(name, index, value, mode=None): + return self.store(name, index, value, mode=mode) + + @staticmethod + def reduction(dtype, src_dtype, reduction_type, value): + return self.reduction(dtype, src_dtype, reduction_type, value) + + @staticmethod + def store_reduction(name, index, value): + return self.store_reduction(name, index, value) + + @staticmethod + def constant(val, dtype): + with RecordOptimizationContext(__name__) as node_ctx: + opt_ctx: OptimizationContext = node_ctx.get_opt_ctx() + assert opt_ctx + # VecKernel override dtype for constant + # Vectorization only support int32/fp32 now + # So if dtype = int64/fp64, we will cast it to int32/fp32 if possible + i32_iinfo = torch.iinfo(torch.int32) + if ( + dtype == torch.int64 + and val <= i32_iinfo.max + and val >= i32_iinfo.min + ): + opt_ctx.dtype = torch.int32 + + f32_iinfo = torch.finfo(torch.float32) + if dtype == torch.double: + if ( + (val <= f32_iinfo.max and val >= f32_iinfo.min) + or (val == torch.inf) + or (val == -torch.inf) + ): + opt_ctx.dtype = torch.float32 + + supported_dtypes = [ + torch.float32, + torch.int32, + torch.bfloat16, + torch.float16, + ] + + if opt_ctx.dtype not in supported_dtypes or ( + opt_ctx.dtype == torch.int32 + and not all( + user.target in VecCheckerProxy.bin_cmp_ops + for user in node_ctx.current_node.users + ) + ): + self.disable_vec(f"constant dtype: {opt_ctx.dtype}") + return val + + @staticmethod + def index_expr(expr, dtype): + assert len(self.ranges) == len(self.itervars) + if not len(self.ranges) or not all( + not isinstance(range, sympy.Expr) or sympy.simplify(range).is_number + for range in self.ranges + ): + # if the range value is sympy.Expr, we might could not deduce the accurate loop interval. + self.disable_vec(f"index_expr: {expr}, dtype {dtype}") + return self.cse.newvar() + + def can_use_int32(): + free_symbols = list(expr.free_symbols) + sizes = { + k: v + for k, v in zip(self.itervars, self.ranges) + if k in free_symbols + } + # Trivial case: Range empty + if any(v == 0 for v in sizes.values()): + return True + + vars_ranges = {k: ValueRanges(0, v - 1) for k, v in sizes.items()} + if not vars_ranges or len(vars_ranges) != len(free_symbols): + i32_iinfo = torch.iinfo(torch.int32) + return ( + expr.is_number + and expr <= i32_iinfo.max + and expr >= i32_iinfo.min + ) + expr_ranges = bound_sympy(expr, vars_ranges) + if math.isinf(expr_ranges.lower) or math.isinf(expr_ranges.upper): # type: ignore[arg-type] + return False + # If something takes the values 0..7, we will compare in the loop + # x < 8. As such, for the loop not to overflow in the last iteration, we want + # to check that expr_ranges.upper + 1 is representable as well + return range_expressable_in_32_bits( + ValueRanges( + int(expr_ranges.lower), int(expr_ranges.upper) + 1 # type: ignore[arg-type] + ) + ) + + with RecordOptimizationContext(__name__) as node_ctx: + assert len(self.ranges) == len(self.itervars) + opt_ctx: OptimizationContext = node_ctx.get_opt_ctx() + assert opt_ctx + if ( + dtype == torch.int64 + and can_use_int32() + and all( + user.target in VecCheckerProxy.bin_cmp_ops + for user in node_ctx.current_node.users + ) + ): + opt_ctx.dtype = torch.int32 + else: + opt_ctx.dtype = dtype + self.disable_vec(f"index_expr: {expr}, dtype {dtype}") + + tiling_var = self.itervars[self.tiling_idx] + tiling_var_irrelevant = not expr.has(tiling_var) + if not tiling_var_irrelevant: + self.disable_vec( + f"index_expr (tiling var relevant): {expr}, dtype {dtype}" + ) + opt_ctx.is_most_inner_loop_irrevelant = tiling_var_irrelevant + tmp_var = self.cse.newvar() + return tmp_var + + @staticmethod + def indirect_indexing(index_var, size, check=True): + return sympy_symbol(str(index_var)) + + @staticmethod + def masked(mask, body, other): + body() + return self.cse.newvar() + + @staticmethod + def to_dtype(x, dtype, src_dtype=None): + with RecordOptimizationContext(__name__) as node_ctx: + opt_ctx: OptimizationContext = node_ctx.get_opt_ctx() + assert opt_ctx + opt_ctx.dtype = dtype + + cur_node = node_ctx.get_fx_node() + input_value: torch.fx.Node = cur_node.all_input_nodes[1] + if dtype == torch.float: + if input_value.target in [ + "load", + ]: + # Support masked_load for BF16/FP16. Because the legalization will + # insert to_dtype to convert the BF16/FP16 input to FP32. + dtype = ( + V.graph.get_dtype(input_value.args[1]) + if input_value.target == "load" + else input_value.args[-1] + ) + if dtype in [ + torch.float16, + torch.bfloat16, + torch.float, + torch.uint8, + ]: + # Convert from dtype to torch.float + pass + elif ( + dtype in [torch.int32, torch.int64] + and input_value.target == "load" + ): + buffer = V.graph.get_buffer(input_value.args[1]) + # Check if load of a scalar tensor of integer + if not ( + isinstance(buffer, TensorBox) + and isinstance(buffer.data, StorageBox) + and len(buffer.data.layout.size) == 0 + ): + self.disable_vec(f"to_dtype: dtype {dtype}") + else: + self.disable_vec(f"to_dtype: dtype {dtype}") + elif dtype in DTYPE_LOWP_FP: + if not all(usr.target == "store" for usr in cur_node.users): + self.disable_vec( + "to_dtype: bfloat16/float16 expecting users are all stores" + ) + return x + + store_names = [usr.args[1] for usr in cur_node.users] + if not all( + V.graph.get_dtype(name) in [dtype] for name in store_names + ): + self.disable_vec( + "to_dtype: expecting all stores into bfloat16 or float16" + ) + return x + elif dtype == torch.bool: + pass + elif dtype == torch.uint8: + # Only allow below 2 cases: + # Case 1: to_uint8 and store which corresponding to the single quant node + # at last of fusion pattern. + is_to_uint8_and_store = all( + usr.target in ["store"] for usr in cur_node.users + ) + # Case 2: to_uint8 and to_float which corresponding to pair of quant/dequant node + # at middle of fusion pattern. + is_to_uint8_and_to_float = all( + ( + usr.target in ["to_dtype"] + and usr.args[2] == torch.float32 + ) + for usr in cur_node.users + ) + if not (is_to_uint8_and_store or is_to_uint8_and_to_float): + self.disable_vec(f"to_dtype: dtype {dtype}") + else: + self.disable_vec(f"to_dtype: dtype {dtype}") + return x + + self.exit_stack.enter_context(V.set_ops_handler(VecCheckerProxy())) + self.exit_stack.enter_context(V.set_kernel_handler(self)) + return self + + +class CppKernelProxy(CppKernel): + def __init__(self, kernel_group): + super().__init__(kernel_group.args, kernel_group.ws.num_threads) + self.kernel_group = kernel_group + self.loop_nest = None + self.call_ranges = None + self.picked_vec_isa: codecache.VecISA = codecache.pick_vec_isa() + + def data_type_propagation(self, nodes): + for _node in nodes: + assert isinstance(_node, SchedulerNode) + DataTypePropagation.propagate_scheduler_node(_node) + + # Check if all the nodes of a given fx graph can support BF16/FP16 + def is_lowp_fp_scheduler(self, scheduler_node: SchedulerNode): + if not isinstance(scheduler_node._body, ir.LoopBody): + return True + + _lowp_fp_type: Optional[torch.dtype] = None + + # Propagate the dtype to check if all the fx node is bf16/fp16 + DataTypePropagation.propagate_scheduler_node(scheduler_node) + + sub_blocks = [scheduler_node._body.root_block] + list( + scheduler_node._body.subblocks.values() + ) + for sub_block in sub_blocks: + for _node in sub_block.graph.nodes: + # TODO(Eikan): Regarding get_index and index_expr, we should conclude the + # the data type as well. + if _node.op == "placeholder" or _node.target in ( + "get_index", + "index_expr", + ): + continue + + # Fast path if all operations can support bf16/fp16 without converting to fp32 + if _node.target not in [ + "load", + "store", + "abs", + "neg", + "output", + ]: + return False + + if hasattr(_node, "meta") and _node.meta: + assert OptimizationContext.key in _node.meta + opt_ctx: OptimizationContext = _node.meta[OptimizationContext.key] + if not opt_ctx.dtype or opt_ctx.dtype not in DTYPE_LOWP_FP: + return False + if _lowp_fp_type: + assert ( + _lowp_fp_type == opt_ctx.dtype + ), "scheduler node do not support bf16/fp16 mix" + else: + _lowp_fp_type = opt_ctx.dtype + else: + return False + + scheduler_node._lowp_fp_type = _lowp_fp_type # type: ignore[attr-defined] + return True + + def legalize_lowp_fp_dtype(self, nodes): + def add_to_dtype(sub_graph: torch.fx.Graph): + def is_lowp_fp_load(node: torch.fx.Node): + if node.target not in ["load"]: + return False + assert len(node.args) == 3 + load_dtype = V.graph.get_dtype(node.args[1]) + return load_dtype in DTYPE_LOWP_FP + + def is_lowp_fp_store(node: torch.fx.Node): + if node.target != "store": + return False + _, store_var, _, _, _ = node.args + store_dtype = V.graph.get_dtype(store_var) + return store_dtype in DTYPE_LOWP_FP + + sub_graph_nodes = list(sub_graph.nodes) + to_lowp_fp_legalized_nodes = [] + for _node in sub_graph_nodes: + if is_lowp_fp_load(_node): + # No need to promote to float if all users are direct stores + if all(user.target == "store" for user in _node.users): + continue + ops = _node.args[0] + with sub_graph.inserting_after(_node): + to_type_node = sub_graph.call_method( + "to_dtype", args=(ops, _node, torch.float) + ) + to_type_node_args = to_type_node.args + _node.replace_all_uses_with(to_type_node) + to_type_node.args = to_type_node_args + metrics.cpp_to_dtype_count += 1 + elif is_lowp_fp_store(_node): + ops, name, _, value_var, _ = _node.args + # No need to promote to float if it is a user of a load which are all directly stored + if value_var.target == "load" and all( + user.target == "store" for user in value_var.users + ): + continue + dtype = V.graph.get_dtype(name) + with sub_graph.inserting_before(_node): + to_type_node = sub_graph.call_method( + "to_dtype", args=(ops, value_var, dtype) + ) + _node.replace_input_with(value_var, to_type_node) + metrics.cpp_to_dtype_count += 1 + elif _node.target == "reduction": + ( + ops, + dtype, + src_dtype, + reduction_type, + value, + ) = _node.args + if src_dtype in DTYPE_LOWP_FP: + # Since we always convert the load/store value to float if the tensor is bfloat16/float16. + # Therefore, the reduction should never work with bfloat16/float16 value. Hence, we update + # the bfloat16/float16 reduction by + # 1) updating the src_dtype to float + # and 2) updating the dtype to float if it is bfloat16/float16. + assert dtype in [ + torch.float, + torch.bfloat16, + torch.float16, + torch.int64, + ] + _node.args = ( + ops, + torch.float if dtype in DTYPE_LOWP_FP else dtype, + torch.float, + reduction_type, + value, + ) + elif _node.target == "to_dtype" and _node.args[-1] in DTYPE_LOWP_FP: + (ops, x, _) = _node.args + # The legalization always loads the BF16/FP16 tensor as FP32 for computation + # and converts back to BF16/FP16 after the computation. + # Hence, there should be no computation w/ BF16/FP16. + # Therefore, we update the to_dtype by replacing the bf16/fp16 dtype with fp32. + # Save the legalized to_dtype node for the elimination(eliminate_to_dtype step): + # 1) Eliminate the redundant to_dtype node if we have a pattern as follows: + # graph(): + # %lowp_fp_legalized = call_method[target=to_dtype](args = (%ops, %input, torch.float)) + # %to_dtype2 = call_method[target=to_dtype](args = (%ops, %lowp_fp_legalized, torch.bfloat16/float16)) + # Regarding the first to_dtype, it is redundant because + # the second to_type also converts to the torch.bfloat16/torch.float16. + # Hence, we remove the first to_type. + to_lowp_fp_legalized_nodes.append(_node) + _node.args = (ops, x, torch.float) + else: + pass + + def eliminate_to_dtype(sub_graph: torch.fx.Graph): + def _eliminate_duplicate_to_node(sub_graph: torch.fx.Graph): + # Eliminate the redundant to_dtype node. Let's consider a pattern as follows: + # graph(): + # %to_dtype1 = call_method[target=to_dtype](args = (%ops, %input, torch.float), kwargs = {}) + # %to_dtype2 = call_method[target=to_dtype](args = (%ops, %to_dtype1, torch.float), kwargs = {}) + # Regarding the first to_dtype, it is redundant because the second to_type also converts to the + # torch.float. Hence, we remove the first to_type + def _used_by_to(to_node: torch.fx.Node): + return all(usr.target == "to_dtype" for usr in to_node.users) + + all_to_nodes = [ + node for node in sub_graph.nodes if node.target == "to_dtype" + ] + all_to_nodes_and_users = [ + {node: node.users} for node in all_to_nodes if _used_by_to(node) + ] + for node_users in all_to_nodes_and_users: + for node, users in node_users.items(): + if node in sub_graph.nodes and ( + all(usr.args[-1] == node.args[-1] for usr in users) + or ( + node in to_lowp_fp_legalized_nodes + and all( + usr.args[-1] in DTYPE_LOWP_FP for usr in users + ) + ) + ): + val_node = node.all_input_nodes[-1] + node.replace_all_uses_with(val_node) + sub_graph.erase_node(node) + + # For debug mode, the graph of LoopBody will attach a new GraphModule as + # owning_module for debugging while the release mode will not. The lint will + # check whether the graph has owning_module to decide if it needs to check + # call_module. LoopBody might contain get_index as a module call. But it + # is just a function. Hence, it cannot pass the lint check for debug mode. + # We bypass the check if the owning_module is None. Eventually, we should call + # get_index via call_function but not call_module. + if sub_graph.owning_module is None: + sub_graph.lint() + + _eliminate_duplicate_to_node(sub_graph) + + eliminate_to_dtype(sub_graph) + + def _legalize_lowp_fp(loop_body: ir.LoopBody): + sub_blocks = [loop_body.root_block] + list(loop_body.subblocks.values()) + for sub_block in sub_blocks: + add_to_dtype(sub_block.graph) + + if all( + isinstance(_node, SchedulerNode) and self.is_lowp_fp_scheduler(_node) + for _node in nodes + ): + # Mark the load node to load bf16/fp16 + for _node in nodes: + sub_blocks = [_node._body.root_block] + list( + _node._body.subblocks.values() + ) + for sub_block in sub_blocks: + for fx_node in sub_block.graph.nodes: + if fx_node.target in ["load", "store"]: + assert fx_node.meta + assert OptimizationContext.key in fx_node.meta + opt_ctx: OptimizationContext = fx_node.meta[ + OptimizationContext.key + ] + assert opt_ctx.dtype in DTYPE_LOWP_FP + + # Bypass the legalization as the kernel can run with bf16/fp16 directly + return + + for _node in nodes: + assert isinstance(_node, SchedulerNode) + assert isinstance(_node._body, ir.LoopBody) + node: SchedulerNode = _node + + def is_memory_copy_scheduler_node(node: SchedulerNode): + op_counts = node.read_writes.op_counts + return ( + len(op_counts) == 2 and "load" in op_counts and "store" in op_counts + ) + + should_legalize = not is_memory_copy_scheduler_node(node) + if should_legalize: + body: ir.LoopBody = node._body + _legalize_lowp_fp(body) + + def codegen_nodes(self, nodes): + # Legalize BF16 node by adding to_dtype explicitly + self.legalize_lowp_fp_dtype(nodes) + self.data_type_propagation(nodes) + + assert len(nodes) >= 1 + first_node = nodes[0] + vec_dtype = ( + first_node._lowp_fp_type + if all( + hasattr(_node, "_lowp_fp_type") + and _node._lowp_fp_type == first_node._lowp_fp_type + for _node in nodes + ) + else torch.float + ) + + kernel_group = self.kernel_group + _, (group, reduction_group) = max( + nodes, key=lambda x: int(x.is_reduction()) + ).group + + self.set_ranges(group, reduction_group) + + def codegen_kernel(cls, *args): + with kernel_group.new_kernel(cls, *args) as kernel: + run(kernel) + + # Ugly hack to maintain the metrics kernel count since + # we only count in CppKernelProxy, not those contained in it + metrics.generated_kernel_count -= 1 + + return kernel + + def run(kernel): + vars, reduction_vars = kernel.set_ranges(group, reduction_group) + in_suffix = False + for node in nodes: + if node.group[1] in [ + (group, reduction_group), + (group + reduction_group, ()), + ]: + assert not in_suffix + node.run(vars, reduction_vars) + else: + in_suffix = True + assert node.group[1] == ( + group, + (), + ), f"unexpected group: {node.group[1]} != {group}, {reduction_group}" + # we can fuse in some extra pointwise into the suffix + with kernel.write_to_suffix(): + node.run(vars, ()) + + scalar_kernel = codegen_kernel(CppKernel) + V.graph.removed_buffers |= scalar_kernel.removed_buffers + V.graph.inplaced_to_remove |= scalar_kernel.inplaced_to_remove + self.loop_nest = LoopNestWithSplit.build(scalar_kernel) + + if not self.picked_vec_isa: + return + + def select_tiling_indices(): + all_index = [] + for node in nodes: + rw = dependencies.extract_read_writes(node._body, *node._sizes) + all_index += [dep.index for dep in itertools.chain(rw.reads, rw.writes)] + contig_vars = set() + contig_vars_list = [] + non_contig_stride_const = set() + non_contig_stride_other = set() + for index in all_index: + for var in index.free_symbols: + if not re.search(r"^d\d+$", var.name): + continue + stride = stride_at(var, index) + if stride == 1: + contig_vars.add(int(var.name[1:])) + contig_vars_list.append(int(var.name[1:])) + elif all(s.name.startswith("s") for s in stride.free_symbols): + non_contig_stride_const.add(int(var.name[1:])) + else: + non_contig_stride_other.add(int(var.name[1:])) + contig_only = ( + contig_vars - non_contig_stride_const - non_contig_stride_other + ) + if len(contig_vars) == 0: + # no contiguous vars + return [len(self.itervars) - 1] + if contig_only: + return sorted(contig_only)[-1:] + contig_and_const_stride = ( + contig_vars & non_contig_stride_const + ) - non_contig_stride_other + contig_vars_sorted = sorted(contig_vars) + if ( + len(contig_vars_sorted) == 2 + and contig_vars_sorted[-1] in contig_and_const_stride + and contig_vars_sorted[-1] == len(self.itervars) - 1 + ): + return contig_vars_sorted + return sorted(contig_vars_sorted, key=contig_vars_list.count)[-1:] + + def select_tiling(dtype: torch.dtype = torch.float): + # TODO(jgong5): support alternative tiling factors and data types + tiling_factor = self.picked_vec_isa.nelements(dtype=dtype) + tiling_indices = select_tiling_indices() + if tiling_indices: + could_vec = True + for tiling_indice in tiling_indices: + with CppVecKernelChecker( + deepcopy(self.kernel_group.args), + parallel_num_threads(), + tiling_factor, + tiling_indice, + ) as vec_checker: + run(vec_checker) + could_vec = could_vec and vec_checker.simd_vec + if not could_vec: + break + if could_vec: + if len(tiling_indices) == 1: + return [tiling_factor], tiling_indices + if len(tiling_indices) == 2: + return [tiling_factor, tiling_factor], tiling_indices + return [], [] + + # Kernels share the same global contexts like V.graph.wrapper_code, V.kernel.args. + # But the generated scalar kernel has updated these global contexts. Hence, the other kernels + # should not do this again to avoid context conflict. By now, we only control the + # config.inplace_buffers. In the future, we could maintain more contexts. + with torch._inductor.config.patch(inplace_buffers=False): + tiling_factors, tiling_indices = select_tiling(vec_dtype) + assert len(tiling_factors) == len(tiling_indices) + if len(tiling_indices) == 1: + main_loop, tail_loop = self.loop_nest.split_with_tiling( + tiling_indices[0], factor=tiling_factors[0] + ) + main_loop.set_kernel( + codegen_kernel( + CppVecKernel, tiling_factors[0], tiling_indices[0], vec_dtype + ) + ) + tail_loop.set_kernel(scalar_kernel) + main_loop.simd_vec = True + tail_loop.simd_omp = True + # We chop the loop into two cubes by the nelements - main loop and tail loop. + # Regarding the main loop, it is straightforward that it could be vectorized with + # nelements. But for the tail loop, it still could be vectorized. For example, + # if the nelements is 8(256bits), then the tail loop still could be vectorized + # as 4(128bits). + tail_loop.simd_nelements = tiling_factors[0] // 2 + elif len(tiling_indices) == 2: + assert ( + tiling_indices[1] == len(self.itervars) - 1 + and tiling_factors[0] == tiling_factors[1] + ) + outer_main_loop, outer_tail_loop = self.loop_nest.split_with_tiling( + tiling_indices[0], factor=tiling_factors[0] + ) + outer_tail_loop.set_kernel(scalar_kernel) + inner_main_loop, inner_tail_loop = outer_main_loop.split_with_tiling( + tiling_indices[1] - tiling_indices[0], factor=tiling_factors[0] + ) + inner_main_loop.set_kernel( + codegen_kernel( + CppTile2DKernel, tiling_factors[0], tiling_indices, vec_dtype + ) + ) + inner_tail_loop.set_kernel( + codegen_kernel( + CppVecKernel, tiling_factors[0], tiling_indices[0], vec_dtype + ) + ) + + def codegen_loops(self, code, worksharing): + self.codegen_loops_impl(self.loop_nest, code, worksharing) + + +class CppScheduling(BaseScheduling): + # ctypes limits the number of args to 1024, refer to: + # https://github.com/python/cpython/commit/a285af7e626d1b81cf09f8b2bf7656f100bc1237 + # We set a conservative threshold here. + MAX_FUSED_KERNEL_ARGS_NUM = 500 + + def __init__(self, scheduler): + self.scheduler = scheduler + self.get_kernel_group() + self._ready_to_flush = False + + def _set_flush_status(self, status: bool): + self._ready_to_flush = status + + def group_fn(self, sizes): + return tuple(tuple(map(V.graph.sizevars.simplify, s)) for s in sizes) + + def get_kernel_group(self): + from .wrapper import CppWrapperCodeGen + + self.kernel_group: Union[CppWrapperKernelGroup, KernelGroup] + if isinstance(V.graph.wrapper_code, CppWrapperCodeGen): + self.kernel_group = CppWrapperKernelGroup() + else: + self.kernel_group = KernelGroup() + + def _can_fuse_horizontal_impl(self, node1, node2): + _, (vars1, reduce1) = node1.group + _, (vars2, reduce2) = node2.group + if vars1 == vars2 and reduce1 == reduce2: + return True + if reduce1 == () and vars1 == vars2 + reduce2: + return True + # TODO(jansel): allow fusion pointwise (vars1, ()) suffix? + return False + + def can_fuse_horizontal(self, node1, node2): + if ( + len(node1.get_nodes()) + len(node2.get_nodes()) + > config.cpp.max_horizontal_fusion_size + ): + return False + + return self._can_fuse_horizontal_impl(node1, node2) + + def can_fuse_vertical(self, node1, node2): + return self._can_fuse_horizontal_impl(node1, node2) and not node1.is_reduction() + + def codegen_nodes(self, nodes): + """ + Turn an set of pre-fused nodes into a C++ kernel. + """ + kernel_group = self.kernel_group + + cpp_kernel_proxy = CppKernelProxy(kernel_group) + cpp_kernel_proxy.codegen_nodes(nodes) + + kernel_group.finalize_kernel(cpp_kernel_proxy, nodes) + + args_num = self._get_scheduled_num_args() + if args_num > CppScheduling.MAX_FUSED_KERNEL_ARGS_NUM: + self._set_flush_status(True) + + def _get_scheduled_num_args(self): + return self.kernel_group.get_num_args() + + def ready_to_flush(self): + return self._ready_to_flush + + def codegen_sync(self): + pass + + def flush(self): + self.kernel_group.codegen_define_and_call(V.graph.wrapper_code) + self.get_kernel_group() + self._set_flush_status(False) + + +class KernelGroup: + def __init__(self): + super().__init__() + self.args = KernelArgs() + self.loops_code = BracesBuffer() + self.ws = WorkSharing(self.loops_code) + self.stack = contextlib.ExitStack() + self.stack.enter_context(self.ws) + self.scheduled_nodes = [] + + def new_kernel(self, cls, *args): + return cls(self.args, parallel_num_threads(), *args) + + def finalize_kernel(self, new_kernel, nodes): + self.scheduled_nodes += nodes + code = self.loops_code + ws = self.ws + new_kernel.codegen_loops(code, ws) + + def get_num_args(self): + arg_defs, call_args, arg_types = self.args.cpp_argdefs() + args_num = len(arg_defs) + return args_num + + def codegen_define_and_call(self, wrapper): + self.stack.close() + if not self.scheduled_nodes: + return + + fused_name = ( + get_fused_kernel_name(self.scheduled_nodes, config.cpp.descriptive_names) + if config.cpp.descriptive_names + else "" + ) + kernel_name = "_".join(["cpp", fused_name, wrapper.next_kernel_suffix()]) + arg_defs, call_args, arg_types = self.args.cpp_argdefs() + arg_defs = ",\n".ljust(25).join(arg_defs) + arg_types = ",".join(arg_types) + code = BracesBuffer() + # TODO: support kernel profile on other platforms + enable_kernel_profile = ( + config.cpp.enable_kernel_profile and sys.platform == "linux" + ) + if enable_kernel_profile: + code.writelines(["#include "]) + kernel_decl_name = kernel_name if V.graph.cpp_wrapper else "kernel" + code.writeline(codecache.cpp_prefix()) + + code.writeline(f'extern "C" void {kernel_decl_name}({arg_defs})') + with code.indent(): + if enable_kernel_profile: + graph_id = V.graph.graph_id + prefix = "graph_" + str(graph_id) + "_" if graph_id is not None else "" + code.writelines( + [ + f'RECORD_FUNCTION("{prefix + kernel_name}", c10::ArrayRef({{}}));' + ] + ) + for old, new in self.args.aliases(): + code.writeline(f"auto {old} = {new};") + code.splice(self.loops_code) + + codecache_def = IndentedBuffer() + if not V.graph.cpp_wrapper: + codecache_def.writeline("async_compile.cpp('''") + codecache_def.splice(code) + if not V.graph.cpp_wrapper: + codecache_def.writeline("''')") + + codecache_str = codecache_def.getvalue() + # TODO(voz): Ostensibly, we should not need this. But there are cases where C++ codegen does + # not use BracesBuffer, so we have no good indicator of a C++ buffer atm. + codecache_str = codecache_str.replace("#pragma CMT", "//") + wrapper.define_kernel(kernel_name, codecache_str, cuda=False) + # generate the code to call this + wrapper.generate_kernel_call(kernel_name, call_args, cuda=False) + + +class CppWrapperKernelGroup(KernelGroup): + def __init__(self): + super().__init__() + self.args = CppWrapperKernelArgs() + + +class WorkSharing: + def __init__(self, code): + self.code = code + self.in_parallel = False + self.num_threads = None + self.stack = contextlib.ExitStack() + + def parallel(self, threads): + if self.in_parallel and threads != self.num_threads: + # wrong number of threads + self.close() + if not self.in_parallel: + self.num_threads = threads + self.in_parallel = True + if config.cpp.dynamic_threads: + self.code.writeline("#pragma omp parallel") + else: + self.code.writeline(f"#pragma omp parallel num_threads({threads})") + self.stack.enter_context(self.code.indent()) + + def single(self): + if self.in_parallel: + self.code.writeline("#pragma omp single") + return self.in_parallel + + def close(self): + self.stack.close() + self.in_parallel = False + + def __enter__(self): + self.stack.__enter__() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.stack.__exit__(exc_type, exc_val, exc_tb) + + +@dataclasses.dataclass +class LoopLevel: + var: Optional[sympy.Expr] = None + size: Optional[sympy.Expr] = None + offset: sympy.Expr = sympy.Integer(0) + steps: sympy.Expr = sympy.Integer(1) + parallel: int = 0 + simd_omp: bool = False + simd_vec: bool = False + collapsed: bool = False + reduction_var_map: Optional[Dict[str, str]] = None + parent: Optional["LoopLevel"] = None + # the next inner level of the loop, empty if it is inner-most + # contains >1 LoopLevel if the inner level of loop is split + inner: List["LoopLevel"] = dataclasses.field(default_factory=list) + # kernel assigned to this loop level, only valid when it is a leaf + kernel: Optional[CppKernel] = None + + def __post_init__(self): + # Regarding the C++/OpenMP backend, `codecache.pick_vec_isa()` to check + # vectorization ISA is a time-consuming and one-shot operation. It leads + # to taking a longer time to import `codegen.cpp` package because the + # `LoopLevel` of the package is decorated by `@dataclasses.dataclass` while + # the decorator will invoke `codecache.pick_vec_isa()` to initialize the + # `simd_nelements` of the `LoopLevel`. It might introduce additional compilation + # overhead to the Triton backend. Therefore, we moved the `simd_nelements` to + # `__post_init__` + picked_vec_isa: codecache.VecISA = codecache.pick_vec_isa() + self.simd_nelements: int = picked_vec_isa.nelements() if picked_vec_isa else 0 + + def get_kernels(self) -> List[CppKernel]: + """Get all kernel objects under this loop level""" + if self.kernel: + return [self.kernel] + kernels = [] + for loop in self.inner: + kernels += loop.get_kernels() + return kernels + + def set_kernel(self, kernel: CppKernel): + """ + Set the kernel under this loop level. No split is allowed under + this loop level. + """ + if not self.inner: + self.kernel = kernel + loop: Optional[LoopLevel] = self + assert loop is not None + if loop.is_reduction(): + loop.reduction_var_map = kernel.reduction_var_map.copy() + loop = loop.parent + while loop is not None and loop.is_reduction(): + assert loop.reduction_var_map is not None + loop.reduction_var_map.update(kernel.reduction_var_map) + loop = loop.parent + return + assert len(self.inner) == 1 + self.inner[0].set_kernel(kernel) + + def get_loops_at(self, depth) -> List["LoopLevel"]: + if depth == 0: + return [self] + else: + loops = [] + for loop in self.inner: + loops += loop.get_loops_at(depth - 1) + return loops + + def is_reduction(self): + return bool(self.reduction_var_map) + + def split_with_tiling(self, depth, factor): + def clone_inner(): + inner = [] + if self.inner: + for loop in self.inner: + inner.append(loop.clone()) + return inner + + def do_split_with_tiling(): + sympy_factor = sympy.Integer(factor) + + offset = FloorDiv(self.size, sympy_factor) * sympy_factor + main_loop = LoopLevel(self.var, offset) + main_loop.steps = sympy_factor + main_loop.parallel = self.parallel + main_loop.collapsed = False + main_loop.reduction_var_map = self.reduction_var_map + main_loop.inner = clone_inner() + if main_loop.inner: + for loop in main_loop.inner: + loop.parent = main_loop + + tail_loop = LoopLevel(self.var, self.size) + tail_loop.offset = offset + tail_loop.parallel = self.parallel + tail_loop.collapsed = False + tail_loop.reduction_var_map = self.reduction_var_map + tail_loop.inner = clone_inner() + if tail_loop.inner: + for loop in tail_loop.inner: + loop.parent = tail_loop + + return main_loop, tail_loop + + if depth == 0: + main_loop, tail_loop = do_split_with_tiling() + parent = self.parent + if parent: + parent.inner = [main_loop, tail_loop] + main_loop.parent = parent + tail_loop.parent = parent + return main_loop, tail_loop + else: + assert len(self.inner) == 1 + return self.inner[0].split_with_tiling(depth - 1, factor) + + def clone(self): + loop = copy(self) + loop.inner = [] + if self.inner: + for inner_loop in self.inner: + inner_loop_clone = inner_loop.clone() + inner_loop_clone.parent = loop + loop.inner.append(inner_loop_clone) + loop.kernel = deepcopy(self.kernel) + return loop + + def lines(self): + offset_expr = cexpr_index(self.offset) + size_expr = cexpr_index(self.size) + if config.cpp.no_redundant_loops and offset_expr == size_expr: + return None + if self.reduction_var_map: + reduction = " " + " ".join( + f"reduction({RTYPE_TO_CPP[rtype]}:{var})" + for var, rtype in self.reduction_var_map.items() + ) + else: + reduction = "" + simd = ( + f"simd simdlen({self.simd_nelements}) " + if self.simd_omp and self.simd_nelements > 1 + else "" + ) + if self.parallel: + # TODO(jansel): look into chunk size and other schedules + line1 = f"#pragma omp for{reduction} " + if self.parallel > 1: + line1 += f" collapse({self.parallel})" + if self.simd_omp: + line1 = line1.replace(" for ", f" for {simd}") + elif self.simd_vec: + line1 = "" + elif self.simd_omp: + line1 = f"#pragma omp {simd}{reduction}" + elif not self.reduction_var_map and codecache.is_gcc(): + line1 = "#pragma GCC ivdep" + else: + line1 = "" + offset_str = f"{INDEX_TYPE} {self.var}={offset_expr}" + size_str = f"{self.var}<{size_expr}" + steps_str = f"{self.var}+={cexpr_index(self.steps)}" + line2 = f"for({offset_str}; {size_str}; {steps_str})" + if self.collapsed or not line1: + return [line2] + return [line1, line2] + + +@dataclasses.dataclass +class LoopNestWithSplit: + """ + A loop-nest like structure but with some loop level split along + the loop range into the main tiling loop and the tail. It is built + with the `build` method as a loop nest and then split with + `split_with_tiling` at some depth. + + A typical case is for vectorization where we typically split at the inner-most + loop level. A more complicated case is 2D tiling where we split at + both inner-most and outer levels. + """ + + root: Optional[List[LoopLevel]] = None + kernel: Optional[CppKernel] = None + + @staticmethod + def build(kernel: CppKernel): + """Build a LoopNest with the given `kernel` as the leaf""" + itervars = kernel.itervars + ranges = kernel.ranges + reduction_depth = kernel.reduction_depth + assert reduction_depth is not None + + root: List[LoopLevel] = [] + levels: List[LoopLevel] = root + loop: Optional[LoopLevel] = None + for loop_idx, (var, size) in enumerate(zip(itervars, ranges)): + loop = LoopLevel(var, size, parent=loop) + if loop_idx >= reduction_depth: + loop.reduction_var_map = kernel.reduction_var_map.copy() + levels.append(loop) + levels = loop.inner + loop_nest = LoopNestWithSplit(root) + if loop: + loop.kernel = kernel + else: + loop_nest.kernel = kernel + return loop_nest + + def __bool__(self): + return bool(self.root) + + def get_loops_at(self, depth) -> List[LoopLevel]: + """Get all the loop levels at the given `depth` (most outer loop has depth 0)""" + loops: List[LoopLevel] = [] + assert self.root is not None + for loop in self.root: + loops += loop.get_loops_at(depth) + return loops + + @cache_on_self + def max_parallel_depth(self): + """ + Maximal allowed depth for parallelism: + 1) Levels without splitting and + 2) All reduction or non-reduction levels + When the loop is split at the top level, the max depth is 1. + """ + max_depth = 0 + assert self.root is not None + loops = self.root + if len(loops) > 1: + return 1 + is_reduction = loops[0].is_reduction() if loops else False + while len(loops) == 1 and loops[0].is_reduction() == is_reduction: + max_depth += 1 + loops = loops[0].inner + return max_depth + + def is_reduction_only(self): + """ + Whether all the loops are for reduction. Reduction loops + are always the inner most ones. + """ + return ( + self.root is not None and len(self.root) > 0 and self.root[0].is_reduction() + ) + + def mark_parallel(self, par_depth): + assert ( + par_depth <= self.max_parallel_depth() + ), "Parallel depth cannot exceed the maximal allowed parallel depth" + assert self.root is not None + loops = self.root + for loop in loops: + loop.parallel = par_depth + for i in range(1, par_depth): + loops = loops[0].inner + loops[0].collapsed = True + + def split_with_tiling(self, depth, factor): + """ + Split the loop into main and tail loops at given `depth` so that the range + of the main loop has range `floor_div(range, factor) * factor` and + the tail loop handles the remainder. The main loop is tiled + according to the `factor`. + """ + loops = self.get_loops_at(depth) + assert len(loops) == 1 + split_loops = loops[0].split_with_tiling(0, factor) + if depth == 0: + self.root = split_loops + return split_loops diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cpp_prefix.h b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cpp_prefix.h new file mode 100644 index 0000000000000000000000000000000000000000..23f72218a0cc1c40b4a752d43b7e59c0839b436e --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cpp_prefix.h @@ -0,0 +1,410 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include + +#if defined(CPU_CAPABILITY_AVX512) || defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_ZVECTOR) +#define INDUCTOR_USE_VECTOR_TYPES() 1 +#else +#define INDUCTOR_USE_VECTOR_TYPES() 0 +#endif + +#if INDUCTOR_USE_VECTOR_TYPES() +#include +#include +#endif + +typedef at::Half half; +typedef at::BFloat16 bfloat16; + +template +struct Welford { + T mean = T(0); + T m2 = T(0); + T weight = T(0); +}; + + +template +struct IsVecType: std::false_type {}; + +#if INDUCTOR_USE_VECTOR_TYPES() +template +struct IsVecType>: std::true_type {}; +#endif + +template +Welford welford_combine(const Welford &a, const Welford &b) { + if constexpr (!IsVecType::value) { + if (a.weight == 0) { + return b; + } + if (b.weight == 0) { + return a; + } + } + auto delta = b.mean - a.mean; + auto new_weight = a.weight + b.weight; + auto wb_over_w = b.weight / new_weight; + if constexpr (IsVecType::value) { + // Guard against division by zero + wb_over_w = T::blendv(wb_over_w, T(0), new_weight == T(0)); + } + auto result = Welford{ + a.mean + delta * wb_over_w, + a.m2 + b.m2 + delta * delta * a.weight * wb_over_w, + new_weight + }; + return result; +} + +template +Welford welford_combine(const Welford &acc, T data) { + // Add a single data point + auto delta = data - acc.mean; + auto new_weight = acc.weight + T(1); + auto new_mean = acc.mean + delta / new_weight; + auto new_delta = data - new_mean; + auto result = Welford{ + new_mean, + acc.m2 + delta * new_delta, + new_weight + }; + return result; +} + + +#if INDUCTOR_USE_VECTOR_TYPES() +template +inline at::vec::Vectorized vec_shuffle_down(at::vec::Vectorized x, size_t n) { + using Vec = at::vec::Vectorized; + alignas(alignof(Vec)) scalar_t array[Vec::size()]; + x.store(array); + for (size_t i = 0; i + n < Vec::size(); i += 2 * n) { + array[i] = array[i + n]; + } + return Vec::loadu(array); +} + +#ifdef CPU_CAPABILITY_AVX2 +inline at::vec::Vectorized vec_shuffle_down(at::vec::Vectorized x, size_t n) { + using vec_t = at::vec::Vectorized; +#define SHUFFLE_MASK(z, y, x, w) ((z << 6) | (y << 4) | (x << 2) | w) + switch (n) { + case 1: + return vec_t(_mm256_permute_ps(x, SHUFFLE_MASK(1, 1, 3, 3))); + case 2: + return vec_t(_mm256_permute_ps(x, SHUFFLE_MASK(2, 2, 2, 2))); + case 4: + return vec_t(_mm256_permute2f128_ps(x, x, SHUFFLE_MASK(1, 1, 1, 1))); + } + TORCH_CHECK(false, "Unhandled vec_shuffle_down value ", n); +} +#endif + +template +Welford welford_vec_reduce_all(Welford> acc) { + using Vec = at::vec::Vectorized; + for (size_t n = 1; n < Vec::size(); n *= 2) { + auto shuffled = Welford{ + vec_shuffle_down(acc.mean, n), + vec_shuffle_down(acc.m2, n), + vec_shuffle_down(acc.weight, n) + }; + acc = welford_combine(acc, shuffled); + } + + Welford result; + alignas(alignof(Vec)) scalar_t array[Vec::size()]; + acc.mean.store(array); + result.mean = array[0]; + + acc.m2.store(array); + result.m2 = array[0]; + + acc.weight.store(array); + result.weight = array[0]; + + return result; +} +#endif + + +template inline T mod(T a, T b) { return a % b; } +template <> inline float mod(float a, float b) { return std::fmod(a, b); } +template <> inline double mod(double a, double b) { return std::fmod(a, b); } + +template +inline scalar_t max_propagate_nan(scalar_t a, scalar_t b) { + if (at::_isnan(a)) { + return a; + } + return a > b ? a : b; +} + +template +inline scalar_t min_propagate_nan(scalar_t a, scalar_t b) { + if (at::_isnan(a)) { + return a; + } + return a < b ? a : b; +} + +constexpr float uint32_to_uniform_float(uint32_t value) { + // maximum value such that `MAX_INT * scale < 1.0` (with float rounding) + constexpr float scale = 4.6566127342e-10; + return static_cast(value & 0x7FFFFFFF) * scale; +} + +float normalized_rand_cpu(uint32_t seed, uint32_t offset) { + return uint32_to_uniform_float(at::Philox4_32(seed, 0, offset)()); +} + +float randn_cpu(uint32_t seed, uint32_t offset) { + at::Philox4_32 engine(seed, 0, offset); + return engine.randn(10); +} + +uint64_t randint64_cpu(uint32_t seed, uint32_t offset, int64_t low, int64_t high) { + auto gen = at::Philox4_32(seed, 0, offset); + uint64_t r0 = gen(); + uint64_t r1 = gen(); + uint64_t result = r0 | (r1 << 32); + return (result % static_cast(high - low)) + low; +} + +template struct AsIntegerType { typedef T type; }; +template <> struct AsIntegerType { typedef uint32_t type; }; +template <> struct AsIntegerType { typedef uint64_t type; }; +template <> struct AsIntegerType { typedef uint16_t type; }; + +template +typename std::enable_if::value, T>::type +inline fetch_value(volatile T *addr) { + return *addr; +} + +template +typename std::enable_if::value, T>::type +inline fetch_value(volatile T *addr) { + return T(addr->x, T::from_bits()); +} + +template +typename std::enable_if::value>::type +atomic_add(volatile T *addr, T offset) { + typedef typename AsIntegerType::type alt_type; + + static_assert(sizeof(std::atomic) == sizeof(T), + "std::atomic issue"); + + alt_type expected; + + alt_type desired; + + std::atomic *atomic_addr = (std::atomic *)addr; + do { + T val = fetch_value(addr); + reinterpret_cast(&expected)[0] = val; + reinterpret_cast(&desired)[0] = val + offset; + } while (!atomic_addr->compare_exchange_weak(expected, desired, + std::memory_order_relaxed)); +} + +// Since C++20 float is supported by fetch_add, but the performance may not +// better than compare_exchange_weak, which can be checked by microbenchmark +// inductor_cpu_atomic.py +template +typename std::enable_if::value>::type +atomic_add(volatile T *addr, T offset) { + static_assert(sizeof(std::atomic) == sizeof(T), + "std::atomic issue"); + std::atomic *atomic_addr = (std::atomic *)addr; + atomic_addr->fetch_add(offset, std::memory_order_relaxed); +} + +// This function is used to convert bool or uint8 to float mask for +// vectorization. The caller needs to make sure the src represents TRUE/FALSE +// correctly. +template +inline float flag_to_float_scalar(T src) { + float ret; + *(uint32_t*)(&ret) = src ? 0xFFFFFFFF : 0; + return ret; +} + +#if defined(CPU_CAPABILITY_AVX512) || defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_ZVECTOR) + +inline at::vec::Vectorized masked_load(const float* src, at::vec::Vectorized mask) { +# if defined(CPU_CAPABILITY_AVX512) + at::vec::Vectorized zero_vec(0); + auto all_ones = _mm512_set1_epi32(0xFFFFFFFF); + auto mmask = _mm512_cmp_epi32_mask(_mm512_castps_si512(mask), all_ones, _MM_CMPINT_EQ); + return _mm512_mask_loadu_ps(zero_vec, mmask, src); +# elif defined(CPU_CAPABILITY_AVX2) + auto all_ones = _mm256_set1_epi32(0xFFFFFFFF); + auto mmask = _mm256_cmpeq_epi32(_mm256_castps_si256(mask), all_ones); + return _mm256_maskload_ps(src, mmask); +# elif defined(CPU_CAPABILITY_ZVECTOR) + auto result = at::vec::Vectorized::loadu(src); + return (result & mask); +# else +# error Unsupported vectorization CPU capability +# endif +} + +template +typename std::enable_if::value || std::is_same::value, at::vec::Vectorized>::type +inline masked_load(const T* src, at::vec::Vectorized mask) { +# if defined(CPU_CAPABILITY_AVX512) + auto all_ones = _mm512_set1_epi32(0xFFFFFFFF); + auto mmask = _mm512_cmp_epi32_mask(_mm512_castps_si512(mask), all_ones, _MM_CMPINT_EQ); + auto zero = _mm256_set1_epi16(0); + auto temp = _mm256_mask_loadu_epi16(zero, mmask, src); + return _mm512_inserti32x8(_mm512_castsi256_si512(temp), zero, 1); +# elif defined(CPU_CAPABILITY_AVX2) + auto all_ones = _mm256_set1_epi32(0xFFFFFFFF); + auto mmask_vec = _mm256_cmpeq_epi32(_mm256_castps_si256(mask), all_ones); + __at_align__ uint32_t mmask[8]; + _mm256_storeu_si256(reinterpret_cast<__m256i*>(mmask), mmask_vec); + __at_align__ uint16_t result[16]; + for (auto i = 0; i < 8; i++) { + result[i] = mmask[i] == 0xFFFFFFFF ? src[i].x: uint16_t(0); + } + return at::vec::Vectorized::loadu(result); +# elif defined(CPU_CAPABILITY_ZVECTOR) + auto result = at::vec::Vectorized::loadu(src, 8); + uint32_t maskdata[8] = { 0 }; + uint16_t maskdata_dest[16] = { 0 }; + mask.store(maskdata); + for (auto i = 0; i < 8; i++) { + maskdata_dest[i] = (maskdata[i] == 0xFFFFFFFF) ? 0xFFFF: 0; + } + auto maskvector = at::vec::Vectorized::loadu(maskdata_dest); + return (result & maskvector); +# else +# error Unsupported vectorization CPU capability +# endif +} + +inline at::vec::Vectorized masked_load(const uint8_t* src, at::vec::Vectorized mask) { +# if defined(CPU_CAPABILITY_AVX512) + auto all_ones = _mm512_set1_epi32(0xFFFFFFFF); + auto mmask = _mm512_cmp_epi32_mask(_mm512_castps_si512(mask), all_ones, _MM_CMPINT_EQ); + auto zero = _mm_set1_epi8(0); + auto temp = _mm_mask_loadu_epi8(zero, mmask, src); + return _mm512_inserti64x2(_mm512_set1_epi32(0), temp, 0); +# elif defined(CPU_CAPABILITY_AVX2) + auto all_ones = _mm256_set1_epi32(0xFFFFFFFF); + auto mmask_vec = _mm256_cmpeq_epi32(_mm256_castps_si256(mask), all_ones); + __at_align__ uint32_t mmask[8]; + _mm256_storeu_si256(reinterpret_cast<__m256i*>(mmask), mmask_vec); + __at_align__ uint8_t result[32]; + for (auto i = 0; i < 8; i++) { + result[i] = mmask[i] == 0xFFFFFFFF ? src[i]: uint8_t(0); + } + return at::vec::Vectorized::loadu(result); +# elif defined(CPU_CAPABILITY_ZVECTOR) + auto result = at::vec::Vectorized::loadu(src, 8); + uint32_t maskdata[8]; + uint8_t maskdata_dest[32] = { 0 }; + mask.store(maskdata); + for (auto i = 0; i < 8; i++) { + maskdata_dest[i] = (maskdata[i] == 0xFFFFFFFF) ? 0xFF: 0; + } + auto maskvector = at::vec::Vectorized::loadu(maskdata_dest); + return (result & maskvector); +# else +# error Unsupported vectorization CPU capability +# endif +} + +template +inline at::vec::Vectorized flag_to_float_vec(const T* src) { + __at_align__ float dst_tmp[at::vec::Vectorized::size()]; + #pragma unroll + for (int64_t i = 0; i < at::vec::Vectorized::size(); i++) { + dst_tmp[i] = flag_to_float_scalar(src[i]); + } + return at::vec::Vectorized::loadu(dst_tmp); +} + +template +inline at::vec::Vectorized cvt_lowp_fp_to_fp32( + at::vec::Vectorized src) { + at::vec::Vectorized res_vec1(0); + at::vec::Vectorized res_vec2(0); + std::tie(res_vec1, res_vec2) = at::vec::convert_to_float(src); + return res_vec1; +} + +template +inline at::vec::Vectorized cvt_fp32_to_lowp_fp( + at::vec::Vectorized src) { + return at::vec::convert_from_float(src, src); +} + +inline at::vec::Vectorized mask_convert_to_float(at::vec::Vectorized src) { + auto zeros = at::vec::Vectorized(0); + auto ones = at::vec::Vectorized(1); + return at::vec::Vectorized::blendv(zeros, ones, src); +} + +template +inline at::vec::Vectorized vec_convert_to_mask(at::vec::Vectorized src) { + assert( + at::vec::Vectorized::size() == at::vec::Vectorized::size()); + at::vec::Vectorized res_vec(0); + __at_align__ float dst_tmp[at::vec::Vectorized::size()]; + __at_align__ SRC src_tmp[at::vec::Vectorized::size()]; + src.store(src_tmp); + +#pragma unroll + for (int i = 0; i < at::vec::Vectorized::size(); i++) { + *(uint32_t*)(dst_tmp + i) = src_tmp[i] ? 0xFFFFFFFF : 0; + } + + return res_vec.loadu(dst_tmp); +} + +template +inline at::vec::Vectorized to_float_mask(at::vec::Vectorized src) { + return vec_convert_to_mask(src); +} + +#if defined(CPU_CAPABILITY_AVX512) || defined(CPU_CAPABILITY_AVX2) +template <> +inline at::vec::Vectorized to_float_mask(at::vec::Vectorized src) { +#if defined(CPU_CAPABILITY_AVX2) + return at::vec::Vectorized(_mm256_castsi256_ps(src)); +#else + return at::vec::Vectorized(_mm512_castsi512_ps(src)); +#endif +} +#endif + +template <> +inline at::vec::Vectorized to_float_mask(at::vec::Vectorized src) { + return src; +} + +inline at::vec::Vectorized to_float_mask(int src) { + float mask; + *(uint32_t*)&mask = src ? 0xFFFFFFFF : 0; + return at::vec::Vectorized(mask); +} +#endif diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/cuda_kernel.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/cuda_kernel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8dd6924b2d655e9fbd47ebf3fe9920d4ad5c2988 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/cuda_kernel.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cuda_cpp_scheduling.py b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cuda_cpp_scheduling.py new file mode 100644 index 0000000000000000000000000000000000000000..2e9dc2d8a7e149e1d9023a4aa45f0b6ba166e2d1 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cuda_cpp_scheduling.py @@ -0,0 +1,212 @@ +import logging +from typing import cast, List + +from ...._dynamo.utils import counters + +from ... import config, ir +from ...codecache import code_hash, get_path +from ...ir import ComputedBuffer, CUDATemplateBuffer, Pointwise +from ...scheduler import ( + BaseSchedulerNode, + BaseScheduling, + FusedSchedulerNode, + Scheduler, + SchedulerNode, +) +from ...utils import get_fused_kernel_name, get_kernel_metadata, sympy_product +from ...virtualized import V +from ..common import IndentedBuffer + +from .cutlass_epilogue_gen import CUTLASSEVTOpNotImplementedError + +log = logging.getLogger(__name__) + + +class CUDACPPScheduling(BaseScheduling): + """ + Partial Scheduling implementation for CUDA C++ Kernels. + This class is intended to be used in combination with TritonScheduling, + and delegated to by CUDACombinedScheduling. + + It handles fusion decisions and CUDA C++ specific template code generation. + """ + + def __init__(self, scheduler: Scheduler): + super().__init__() + self.scheduler = scheduler + + def group_fn(self, sizes): + return tuple(V.graph.sizevars.simplify(sympy_product(s)) for s in sizes) + + def is_cuda_cpp_template(self, node: BaseSchedulerNode) -> bool: + return isinstance(node, SchedulerNode) and isinstance( + node.node, CUDATemplateBuffer + ) + + def is_cuda_cpp_fused_template(self, node: BaseSchedulerNode) -> bool: + return isinstance(node, FusedSchedulerNode) and self.is_cuda_cpp_template( + node.get_template_node() + ) + + def _can_fuse_epilogue_impl( + self, + cuda_template_buffer: CUDATemplateBuffer, + epilogue_nodes: List[ir.IRNode], + additional_node: ir.IRNode, + ) -> bool: + """ + Check if the given node can be fused with the epilogue. At the moment, Kernels + support fusion with Pointwise operations, wrapped in (named) ComputedBuffer nodes. + + Args: + cuda_template_buffer : A CUDATemplateBuffer object representing the CUDA template and it's result buffer + epilogue_nodes : List[ir.Buffer]: The list of already fused epilogue nodes. + additional_node: The ir.Buffer node to be checked if it can be fused with the epilogue. + Returns: + - bool: True if the given node can be fused with the epilogue, False otherwise. + + """ + if not isinstance(cuda_template_buffer, CUDATemplateBuffer): + return False + if not cuda_template_buffer.template.can_fuse_epilogue: + # The used GEMM op does not support fusing epilogues + return False + if not isinstance(additional_node, ComputedBuffer): + return False + if not isinstance(additional_node.data, Pointwise): + return False + # We can fuse a Pointwise op that depends on the last fused epilogue node + # if any. If there is no epilogue node yet, it needs to depend on the template + # node + node_name = additional_node.get_computed_buffer_name() + if node_name is None: + return False + + if len(epilogue_nodes) == 0: + if cuda_template_buffer.name not in additional_node.get_read_names(): + return False + else: + last_epilogue_node = epilogue_nodes[-1] + assert isinstance(last_epilogue_node, ir.ComputedBuffer) # for mypy + last_epilogue_name = ( + last_epilogue_node.name + if last_epilogue_node.name is not None + else last_epilogue_node.data.name # type: ignore[attr-defined] + ) + if last_epilogue_name not in additional_node.get_read_names(): + return False + if additional_node.layout != cuda_template_buffer.layout: + return False + try: + from torch._inductor.codegen.cuda.cutlass_epilogue_gen import ( + CutlassEVTEpilogueArgumentFormatter, + CutlassEVTEpilogueTypeFormatter, + ) + + CutlassEVTEpilogueTypeFormatter.ir_to_evt_string( + cast(str, cuda_template_buffer.name), "anything", [additional_node] + ) + CutlassEVTEpilogueArgumentFormatter.ir_to_evt_argument_string( + cast(str, cuda_template_buffer.name), [additional_node] + ) + except CUTLASSEVTOpNotImplementedError as e: + not_implemented_op = str(e) + if not_implemented_op.startswith("_op_"): + not_implemented_op = not_implemented_op[4:] + log.warning( + f"Cannot fuse epilogue node {additional_node} into {cuda_template_buffer.name}, likely due to unsupported operation: {not_implemented_op}" # noqa: G004, B950 + ) + return False + else: + # Likely due to unsupported dtype. + log.warning( + f"Cannot fuse epilogue node {additional_node} into {cuda_template_buffer.name}. Reason: {not_implemented_op}" # noqa: G004, B950 + ) + return False + return True + + @staticmethod + def _unwrap_epilogue_nodes(fused_node: FusedSchedulerNode) -> List[ir.IRNode]: + nodes = fused_node.get_nodes() + template_node = fused_node.get_template_node() + nodes.remove(template_node) + return [n.node for n in nodes] + + def can_fuse_vertical( + self, node1: BaseSchedulerNode, node2: BaseSchedulerNode + ) -> bool: + if self.is_cuda_cpp_template(node1) and isinstance(node2, SchedulerNode): + return self._can_fuse_epilogue_impl( + cast(CUDATemplateBuffer, node1.node), [], node2.node + ) + elif self.is_cuda_cpp_fused_template(node1) and isinstance( + node2, SchedulerNode + ): + fnode1 = cast(FusedSchedulerNode, node1) + return self._can_fuse_epilogue_impl( + fnode1.get_template_node().node, + self._unwrap_epilogue_nodes(fnode1), + node2.node, + ) + return False + + def define_kernel(self, src_code: str, node_schedule) -> str: + wrapper = V.graph.wrapper_code + if src_code in wrapper.src_to_kernel: + kernel_name = wrapper.src_to_kernel[src_code] + else: + fused_name = ( + get_fused_kernel_name(node_schedule, config.triton.descriptive_names) + if config.triton.descriptive_names + else "" + ) + kernel_name = "_".join(["cuda", fused_name, wrapper.next_kernel_suffix()]) + # use the original src_code as the key + wrapper.src_to_kernel[src_code] = kernel_name + src_code = src_code.replace("KERNEL_NAME", kernel_name) + + _, _, kernel_path = get_path(code_hash(src_code), "py") + + compile_wrapper = IndentedBuffer() + compile_wrapper.writeline("async_compile.cuda(r'''") + compile_wrapper.splice(src_code, strip=True) + compile_wrapper.writeline("''', 'so')") + + metadata_comment = f"# kernel path: {kernel_path}" + origins, detailed_origins = get_kernel_metadata(node_schedule, wrapper) + metadata_comment += "\n" + origins + "\n" + detailed_origins + wrapper.define_kernel( + kernel_name, compile_wrapper.getvalue(), metadata_comment + ) + return kernel_name + + def codegen_template( + self, template_node: BaseSchedulerNode, epilogue_nodes: List[SchedulerNode] + ): + """ + Codegen a CUDA template, possibly with fused epilogues + """ + counters["inductor"]["cuda_epilogue_fusion_counter"] += len(epilogue_nodes) + assert self.is_cuda_cpp_template( + template_node + ), "Template node passed to CUDAScheduler.codegen_template must be a SchedulerNode that wraps a CUDATemplateBuffer" + template_node = cast(SchedulerNode, template_node) + _, (numel, rnumel) = template_node.group + assert rnumel == 1 + ctb: CUDATemplateBuffer = cast(CUDATemplateBuffer, template_node.node) + epilogue_ir_nodes: List[ir.Buffer] = [n.node for n in epilogue_nodes] + assert all( + isinstance(n, ir.ComputedBuffer) for n in epilogue_ir_nodes + ), "Epilogue nodes must all be instances of ir.ComputedBuffer" + kernel, render = ctb.make_kernel_render(ctb, epilogue_nodes=epilogue_ir_nodes) + with kernel: + for node in [template_node, *epilogue_nodes]: + node.mark_run() + src_code = render() + + with V.set_kernel_handler(kernel): + node_schedule = [template_node, *epilogue_nodes] + kernel_name = self.define_kernel(src_code, node_schedule) + kernel.call_kernel(kernel_name, ctb, epilogue_ir_nodes) + V.graph.removed_buffers |= kernel.removed_buffers + self.scheduler.free_buffers() diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cuda_env.py b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cuda_env.py new file mode 100644 index 0000000000000000000000000000000000000000..2e872861cdac951d2bfd04f2eceee7a1eb17c448 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cuda_env.py @@ -0,0 +1,45 @@ +import functools +import logging +from typing import Optional + +import torch + +from ... import config + +log = logging.getLogger(__name__) + + +def get_cuda_arch() -> Optional[str]: + try: + cuda_arch = config.cuda.arch + if cuda_arch is None: + # Get Compute Capability of the first Visible device + major, minor = torch.cuda.get_device_capability(0) + cuda_arch = major * 10 + minor + return str(cuda_arch) + except Exception as e: + log.error("Error getting cuda arch: %s", e) + return None + + +def get_cuda_version() -> Optional[str]: + try: + cuda_version = config.cuda.version + if cuda_version is None: + cuda_version = torch.version.cuda + return cuda_version + except Exception as e: + log.error("Error getting cuda version: %s", e) + return None + + +@functools.lru_cache(None) +def nvcc_exist(nvcc_path: str = "nvcc") -> bool: + if nvcc_path is None: + return False + import subprocess + + res = subprocess.call( + ["which", nvcc_path], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL + ) + return res == 0 diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cuda_template.py b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cuda_template.py new file mode 100644 index 0000000000000000000000000000000000000000..3e106dad84e4f5c97bf083b2eca833dd209840e5 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cuda_template.py @@ -0,0 +1,241 @@ +import functools +import itertools +import logging +from typing import List, Optional +from unittest.mock import patch + +import sympy + +import torch +from ...autotune_process import CUDABenchmarkRequest, TensorMeta +from ...ir import Buffer, CUDATemplateBuffer, IRNode, Layout + +from ...utils import IndentedBuffer, unique +from ...virtualized import V +from ..common import KernelTemplate +from .cuda_kernel import CUDATemplateCaller, CUDATemplateKernel + +log = logging.getLogger(__name__) + + +class CUDATemplate(KernelTemplate): + index_counter = itertools.count() + + def __init__( + self, + name: str, + input_nodes: List[Buffer], + layout: Layout, + input_reorder: Optional[List[int]] = None, + ): + """ + + Baseclass for CUDA C++ Templates, derived from KernelTemplate. Not to be instantiated directly. + + Args: + name (str): The name of the CUDATemplate object. + input_nodes (List[IRNode]): A list of input IRNodes. + layout (Layout): The layout of the output buffer / tensor. + input_reorder (Optional[List[int]]): An optional list that specifies the order of the input nodes. + + """ + super().__init__(name) + self.input_nodes = input_nodes + self.output_node: Buffer = Buffer("buf_out", layout) + self.input_reorder = input_reorder + self.layout = layout + + def generate( # type: ignore[override] + self, + **kwargs, + ) -> CUDATemplateCaller: + """ + Generates the CUDA template caller object for the given GEMM template and operation. This CUDATemplateCaller + may be used to call and benchmark the generated CUDA kernel in a standalone manner to enable Autotuning. + + Args: + kwargs: Additional keyword arguments. + + Returns: + A CUDATemplateCaller object representing the generated CUDA template caller. + """ + kernel_name = f"cuda_{self.name}" + with patch.object( + V.graph, "get_dtype", self._fake_get_dtype(self.output_node) + ), CUDATemplateKernel( + kernel_name=kernel_name, + ) as kernel: + code = self.render(kernel=kernel, **kwargs) + _, call_args, _ = kernel.args.python_argdefs() + log.debug("Generated Code:\n%s", code) + log.debug( + "Args: cpp_argdefs: %s, python_argdefs: %s", + kernel.args.cpp_argdefs(), + kernel.args.python_argdefs(), + ) + + input_reorder = ( + self.input_reorder + if self.input_reorder is not None + else list(range(len(self.input_nodes))) + ) + expected_args = list( + unique(self.input_nodes[idx].get_name() for idx in input_reorder) + ) + expected_args.extend([self.output_node.get_name()]) + assert list(call_args)[: len(expected_args)] == expected_args, ( + call_args, + expected_args, + ) + extra_args = V.graph.sizevars.size_hints( + map(sympy.expand, call_args[len(expected_args) :]) + ) + + kernel_hash_name = f"cuda_{self.name}_{next(self.index_counter)}" + + # create the BenchmarkRequest + bmreq = CUDABenchmarkRequest( + kernel_name=kernel_name, + input_tensor_meta=TensorMeta.from_irnodes(self.input_nodes), + output_tensor_meta=TensorMeta.from_irnodes(self.output_node), + extra_args=extra_args, + source_code=code, + ) + + def make_kernel_render( + template_node: CUDATemplateBuffer, + epilogue_nodes: Optional[List[IRNode]] = None, + ): + kernel = CUDATemplateKernel( + kernel_name="KERNEL_NAME", + ) + render = functools.partial( + self.render, + kernel=kernel, + template_buffer_node=template_node, + epilogue_nodes=epilogue_nodes, + **kwargs, # includes "op" argument in case of CUTLASSGemmTemplate + ) + return kernel, render + + return CUDATemplateCaller( + kernel_hash_name, + self.name, + self.input_nodes, + self.output_node.get_layout(), + make_kernel_render, + bmreq, + self, + ) + + def header(self) -> IndentedBuffer: + res = IndentedBuffer() + res.splice( + """ + #include + #include + #include + #include + #include + """ + ) + return res + + def globals(self) -> IndentedBuffer: + res = IndentedBuffer() + res.splice( + """ + // We compile all models with -fvisibility=hidden. Any symbols that need to be + // exposed in the final shared library must be declared with PT_EXPORT to make + // them visible. + #ifdef __GNUC__ // Applies to any compiler with GNU extensions (clang and g++) + #define PT_EXPORT __attribute__((__visibility__("default"))) + #else + #ifdef _WIN32 + #define PT_EXPORT __declspec(dllexport) + #else + #define PT_EXPORT + #endif + #endif + using bfloat16 = nv_bfloat16; + """ + ) + return res + + def render(self, **kwargs) -> str: + raise NotImplementedError + + +class CUTLASSTemplate(CUDATemplate): + """ + CUTLASSTemplate is a class that provides a template for generating CUTLASS Templates. Used as a baseclass for the + CUTLASSGemmTemplate, providing functionality that might also be relevant for non-GEMM CUTLASS Kernels. + """ + + def header(self) -> IndentedBuffer: + res = super().header() + res.splice( + """ + #include "cute/tensor.hpp" + #include "cutlass/cutlass.h" + #include "cutlass/numeric_types.h" + #include "cutlass/tensor_ref.h" + #include "cutlass/util/host_tensor.h" + #include "cutlass/util/reference/host/tensor_fill.h" + #include "cutlass/util/reference/device/tensor_fill.h" + #include "cutlass/util/device_memory.h" + """ + ) + return res + + def globals(self) -> IndentedBuffer: + res = super().globals() + res.splice( + """ + using namespace cute; + #define CUTLASS_CHECK(status) \\ + { \\ + cutlass::Status error = status; \\ + if (error != cutlass::Status::kSuccess) { \\ + auto msg = std::string("[") + __FILE__ + "] Got cutlass error: " + \\ + cutlassGetStatusString(error) + " at: " + std::to_string(__LINE__); \\ + throw std::runtime_error(msg); \\ + } \\ + } + + // Used as pass-through functor in EVT just for type casting / rounding + template + struct identity_op { + CUTLASS_HOST_DEVICE + T operator()(T val) const { return val; } + }; + + """ + ) + return res + + def cute_int(self, int_str: str, var_name: str) -> str: + res = "" + if int_str in {"1", "1L"}: + res = "cute::Int<1>{}" + else: + res = int_str + + return f"{res} /* {var_name} */" + + _DTYPE_TO_CUTLASS = { + torch.float32: "float", + torch.float64: "double", + torch.float16: "cutlass::half_t", + torch.int32: "int", + torch.int8: "int8_t", + torch.uint8: "uint8_t", + torch.bool: "bool", + torch.bfloat16: "cutlass::bfloat16_t", + } + + def cutlass_type_cast(self, node: IRNode, ptr: str) -> str: + if node is None: + return ptr + else: + return f"({self._DTYPE_TO_CUTLASS.get(node.get_dtype())}*)({ptr})" diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_lib_extensions/__pycache__/gemm_operation_extensions.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_lib_extensions/__pycache__/gemm_operation_extensions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0efff1cd5d0bd812ceb30505db5d94d85fc335ae Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_lib_extensions/__pycache__/gemm_operation_extensions.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_lib_extensions/gemm_operation_extensions.py b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_lib_extensions/gemm_operation_extensions.py new file mode 100644 index 0000000000000000000000000000000000000000..2a386a114e86f9556f0302151b42ae8c37b7e806 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_lib_extensions/gemm_operation_extensions.py @@ -0,0 +1,186 @@ +from ..cutlass_utils import try_import_cutlass + +if try_import_cutlass(): + import enum + + from cutlass_library.library import * # noqa: F401, F403 + from cutlass_library.gemm_operation import * # noqa: F401, F403 + + # copied / modified from original at + # https://github.com/NVIDIA/cutlass/blob/8783c41851cd3582490e04e69e0cd756a8c1db7f/tools/library/scripts/gemm_operation.py#L658 + # to support EVT similar to + # https://github.com/NVIDIA/cutlass/blob/8783c41851cd3582490e04e69e0cd756a8c1db7f/examples/49_hopper_gemm_with_collective_builder/49_collective_builder.cu#L315C69-L315C69 # noqa: B950 + class EmitGemmUniversal3xInstanceWithEVT: + """Responsible for emitting a CUTLASS 3.x template definition""" + + def __init__(self, operation_suffix=""): + self.operation_suffix = operation_suffix + self.includes = [ + "cutlass/cutlass.h", + "cutlass/gemm/gemm.h", + "cutlass/numeric_types.h", + "cutlass/gemm/kernel/gemm_universal.hpp", + "cutlass/gemm/collective/collective_builder.hpp", + "cutlass/epilogue/collective/collective_builder.hpp", + ] + self.builtin_epilogue_functor_template = """ + ${epilogue_functor}< + ${element_c}, + ${epilogue_vector_length}, + ${element_accumulator}, + ${element_epilogue} + > + """ + self.gemm_template = """ + using EpilogueScheduleType = ${epilogue_schedule}; + static_assert(cute::is_same_v || + cute::is_same_v, + "Epilogue visitor trees are currently only supported by the TMA warp-specialized epilogue"); + static constexpr auto RoundStyle = cutlass::FloatRoundStyle::round_to_nearest; + using ElementAcc = ${element_accumulator}; + using ElementD = ${element_d}; + ${epilogue_functor}; + using ${operation_name}_epilogue = + typename cutlass::epilogue::collective::CollectiveBuilder< + ${arch}, ${opcode_class}, + cute::Shape, + cute::Shape, + cutlass::epilogue::collective::EpilogueTileAuto, + ${element_accumulator}, ${element_epilogue}, + ${element_c}, ${layout_c}, ${align_c}, + ${element_d}, ${layout_d}, ${align_d}, + EpilogueScheduleType, + ${operation_name}_epilogue_functor + >::CollectiveOp; + + using ${operation_name}_mainloop = + typename cutlass::gemm::collective::CollectiveBuilder< + ${arch}, ${opcode_class}, + ${element_a}, ${layout_a}, ${align_a}, + ${element_b}, ${layout_b}, ${align_b}, + ${element_accumulator}, + cute::Shape, + cute::Shape, + ${stages}, + ${kernel_schedule} + >::CollectiveOp; + + // Gemm operator ${operation_name} + using ${operation_name}_base = cutlass::gemm::kernel::GemmUniversal< + cute::Shape, + ${operation_name}_mainloop, + ${operation_name}_epilogue, + ${tile_scheduler}>; + + // Define named type + struct ${operation_name} : + public ${operation_name}_base { }; + + """ + + # + def instance_template(self): + return """ + ${compile_guard_start} + using GemmKernel = cutlass::gemm::device::GemmUniversalAdapter<${operation_name}>; + manifest.append( + new ${gemm_kind}("${operation_name}")); + ${compile_guard_end} + """ + + # + def emit(self, operation): + tile_shape = operation.tile_description.tile_shape + warp_count = operation.tile_description.warp_count + # stage count set to zero indicates builder automatic stage selection + if operation.tile_description.stages > 0: + stage_count_string = f"cutlass::gemm::collective::StageCount<{str(operation.tile_description.stages)}>" + else: + stage_count_string = f"cutlass::gemm::collective::StageCountAutoCarveout" # noqa: B950 + warp_shape = [tile_shape[idx] // warp_count[idx] for idx in range(3)] + + ( + instance_layout_A, + instance_layout_B, + instance_layout_C, + instance_layout_D, + ) = ( + operation.A.layout, + operation.B.layout, + operation.C.layout, + operation.D.layout, + ) + + # 3.0 profiler integration only supports trivial epilogues for now + epilogue_vector_length = 1 + + # Support built-in epilogue functors or user-defined functions + if isinstance(operation.epilogue_functor, enum.Enum): + values = { + "epilogue_vector_length": str(epilogue_vector_length), + "element_epilogue": str(DataTypeTag[operation.element_epilogue]), # type: ignore[name-defined] + "epilogue_functor": EpilogueFunctorTag[operation.epilogue_functor], # type: ignore[name-defined] + } + epilogue_functor = SubstituteTemplate( # type: ignore[name-defined] + self.builtin_epilogue_functor_template, values + ) + + elif callable(operation.epilogue_functor): + epilogue_functor = operation.epilogue_functor( + operation.procedural_name() + "_epilogue_functor" + ) + else: + epilogue_functor = str(operation.epilogue_functor) + # + + values = { + "operation_name": operation.procedural_name(), + "operation_suffix": self.operation_suffix, + "element_a": DataTypeTag[operation.A.element], # type: ignore[name-defined] + "layout_a": LayoutTag[instance_layout_A], # type: ignore[name-defined] + "element_b": DataTypeTag[operation.B.element], # type: ignore[name-defined] + "layout_b": LayoutTag[instance_layout_B], # type: ignore[name-defined] + "element_c": DataTypeTag[operation.C.element], # type: ignore[name-defined] + "layout_c": LayoutTag[instance_layout_C], # type: ignore[name-defined] + "element_d": DataTypeTag[operation.D.element], # type: ignore[name-defined] + "layout_d": LayoutTag[instance_layout_D], # type: ignore[name-defined] + "element_accumulator": DataTypeTag[operation.accumulator_type()], # type: ignore[name-defined] + "opcode_class": OpcodeClassTag[operation.tile_description.math_instruction.opcode_class], # type: ignore[name-defined] # noqa: B950 + "arch": "cutlass::arch::Sm%d" % operation.arch, + "tile_shape_m": str(operation.tile_description.tile_shape[0]), + "tile_shape_n": str(operation.tile_description.tile_shape[1]), + "tile_shape_k": str(operation.tile_description.tile_shape[2]), + "cluster_m": str(operation.tile_description.cluster_shape[0]), + "cluster_n": str(operation.tile_description.cluster_shape[1]), + "cluster_k": str(operation.tile_description.cluster_shape[2]), + "warp_shape_m": str(warp_shape[0]), + "warp_shape_n": str(warp_shape[1]), + "warp_shape_k": str(warp_shape[2]), + "instruction_shape_m": str( + operation.tile_description.math_instruction.instruction_shape[0] + ), + "instruction_shape_n": str( + operation.tile_description.math_instruction.instruction_shape[1] + ), + "instruction_shape_k": str( + operation.tile_description.math_instruction.instruction_shape[2] + ), + "kernel_schedule": str(KernelScheduleTag[operation.kernel_schedule]), # type: ignore[name-defined] + "epilogue_schedule": str(EpilogueScheduleTag[operation.epilogue_schedule]), # type: ignore[name-defined] + "epilogue_functor": epilogue_functor, + "stages": stage_count_string, + "align_a": str(operation.A.alignment), + "align_b": str(operation.B.alignment), + "align_c": str(operation.C.alignment), + "align_d": str(operation.C.alignment), + "transform_a": ComplexTransformTag[operation.A.complex_transform], # type: ignore[name-defined] + "transform_b": ComplexTransformTag[operation.B.complex_transform], # type: ignore[name-defined] + "math_operation": MathOperationTag[ # type: ignore[name-defined] + operation.tile_description.math_instruction.math_operation + ], + "epilogue_vector_length": str(epilogue_vector_length), + "element_epilogue": str(DataTypeTag[operation.element_epilogue]), # type: ignore[name-defined] + "tile_scheduler": str(TileSchedulerTag[operation.tile_scheduler]), # type: ignore[name-defined] + } + + return SubstituteTemplate(self.gemm_template, values) # type: ignore[name-defined] diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_utils.py b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d8d3ec662556812d80e67bc6b881eb1a825cee5e --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_utils.py @@ -0,0 +1,257 @@ +import functools +import logging +import os +import sys +from dataclasses import dataclass +from typing import Any, List, Optional + +import sympy + +import torch + +from ...codecache import cache_dir +from ...config import cuda as inductor_cuda_config +from ...ir import Layout +from .cuda_env import get_cuda_arch, get_cuda_version + +log = logging.getLogger(__name__) + + +def _rename_cutlass_import(content: str, cutlass_modules: List[str]) -> str: + for cutlass_module in cutlass_modules: + content = content.replace( + f"from {cutlass_module} import ", + f"from cutlass_library.{cutlass_module} import ", + ) + return content + + +def _gen_cutlass_file( + file_name: str, cutlass_modules: List[str], src_dir: str, dst_dir: str +) -> None: + orig_full_path = os.path.abspath(os.path.join(src_dir, file_name)) + text = "" + with open(orig_full_path) as f: + text = f.read() + text = _rename_cutlass_import(text, cutlass_modules) + dst_full_path = os.path.abspath( + os.path.join( + dst_dir, + file_name, + ) + ) + with open(dst_full_path, "w") as f: + f.write(text) + + +@functools.lru_cache(None) +def try_import_cutlass() -> bool: + # Copy CUTLASS python scripts to a temp dir and add the temp dir to Python search path. + # This is a temporary hack to avoid CUTLASS module naming conflicts. + # TODO(ipiszy): remove this hack when CUTLASS solves Python scripts packaging structure issues. + + cutlass_py_full_path = os.path.abspath( + os.path.join(inductor_cuda_config.cutlass_dir, "python/cutlass_library") + ) + tmp_cutlass_py_full_path = os.path.abspath( + os.path.join(cache_dir(), "torch_cutlass_library") + ) + dst_link = os.path.join(tmp_cutlass_py_full_path, "cutlass_library") + + if os.path.isdir(cutlass_py_full_path): + if tmp_cutlass_py_full_path not in sys.path: + if os.path.exists(dst_link): + assert os.path.islink( + dst_link + ), f"{dst_link} is not a symlink. Try to remove {dst_link} manually and try again." + assert os.path.realpath(os.readlink(dst_link)) == os.path.realpath( + cutlass_py_full_path + ), f"Symlink at {dst_link} does not point to {cutlass_py_full_path}" + else: + os.makedirs(tmp_cutlass_py_full_path, exist_ok=True) + os.symlink(cutlass_py_full_path, dst_link) + sys.path.append(tmp_cutlass_py_full_path) + try: + import cutlass_library.generator # noqa: F401 + import cutlass_library.library # noqa: F401 + import cutlass_library.manifest # noqa: F401 + + return True + + except ImportError as e: + log.debug( + "Failed to import CUTLASS packages: %s, ignoring the CUTLASS backend.", + str(e), + ) + else: + log.debug( + "Failed to import CUTLASS packages: CUTLASS repo does not exist: %s", + cutlass_py_full_path, + ) + return False + + +def _normalize_cuda_arch(arch: str) -> str: + if int(arch) >= 90: + return "90" + elif int(arch) >= 80: + return "80" + elif int(arch) >= 75: + return "75" + elif int(arch) >= 70: + return "70" + else: + raise NotImplementedError(f"Unsupported cuda arch: {arch}") + + +@dataclass +class CUTLASSArgs: + """ + CUTLASS args used to initialize a CUTLASS Manifest. + """ + + architectures: Optional[str] = None + cuda_version: Optional[str] = None + + operations = "all" + build_dir = "" + curr_build_dir = "" + generator_target = "" + kernels = "all" + ignore_kernels = "" + kernel_filter_file = None + selected_kernel_list = None + interface_dir = None + filter_by_cc = True + disable_full_archs_compilation = False + + def __post_init__(self): + if self.architectures is None or self.cuda_version is None: + raise RuntimeError( + f"{self.architectures=} or {self.cuda_version=} is None!" + ) + self.architectures = _normalize_cuda_arch(self.architectures) + + +@functools.lru_cache(None) +def _gen_ops_cached(arch, version) -> List[Any]: + # Note: Cache needs to be specific for cuda architecture and version + + # Import cutlass python scripts. + assert try_import_cutlass() + import cutlass_library.generator as cutlass_generator + import cutlass_library.manifest as cutlass_manifest + + if arch is None or version is None: + log.error( + "Cannot detect cuda arch %s or cuda version %s. " + "Will discard all cutlass ops. " + "Please consider setting _inductor.cuda.arch and _inductor.cuda.version configs.", + arch, + version, + ) + return list() + arch = _normalize_cuda_arch(arch) + args = CUTLASSArgs(architectures=arch, cuda_version=version) + manifest = cutlass_manifest.Manifest(args) + + if arch == "90": + cutlass_generator.GenerateSM90(manifest, args.cuda_version) + cutlass_generator.GenerateSM80(manifest, args.cuda_version) + else: + try: + func = getattr(cutlass_generator, "GenerateSM" + arch) + func(manifest, args.cuda_version) + except AttributeError as e: + raise NotImplementedError( + "Arch " + arch + " is not supported by current cutlass lib." + ) from e + return manifest.operations + + +def gen_ops() -> List[Any]: + """ + Generates all supported CUTLASS operations. + """ + arch = get_cuda_arch() + version = get_cuda_version() + return _gen_ops_cached(arch, version) + + +def dtype_match( + torch_dtype: Optional[torch.dtype], + cutlass_dtype: "cutlass_library.library.DataType", # type: ignore[name-defined] +) -> bool: + # Import cutlass python scripts. + assert try_import_cutlass() + import cutlass_library + + if torch_dtype == torch.float: + return ( + cutlass_dtype == cutlass_library.library.DataType.f32 + or cutlass_dtype == cutlass_library.library.DataType.tf32 + ) + elif torch_dtype == torch.half: + return cutlass_dtype == cutlass_library.library.DataType.f16 + elif torch_dtype == torch.bfloat16: + return cutlass_dtype == cutlass_library.library.DataType.bf16 + else: + return False + + +def get_accumulator_dtype( + input_torch_dtypes: List[torch.dtype], +) -> Optional[torch.dtype]: + """ + Given a list of input torch dtypes, returns the inferred accumulator torch dtype. + """ + + if len(input_torch_dtypes) == 0: + return None + torch_dtype = input_torch_dtypes[0] + for dtype in input_torch_dtypes[1:]: + if torch_dtype != dtype: + raise RuntimeError(f"Unmatched input dtypes: {torch_dtype=}, {dtype=}") + if torch_dtype == torch.half: + if torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction: + return torch_dtype + else: + return torch.float + if torch_dtype in {torch.bfloat16, torch.float}: + return torch.float + raise NotImplementedError(f"Unsupported data type: {input_torch_dtypes=}") + + +def get_alignments(torch_dtype: torch.dtype) -> List[int]: + """ + Returns all possible valid CUTLASS alignments in terms of the number of elements for a given dtype. + CUTLASS gemm / conv SM80 APIs support 16 bytes max alignment, and 2 bytes min alignment. + """ + + if torch_dtype in (torch.half, torch.bfloat16): + return [8, 4, 2, 1] + elif torch_dtype == torch.float: + return [4, 2, 1] + else: + raise NotImplementedError(f"unsupported {torch_dtype=} for alignments") + + +def get_max_alignment(inductor_layout: Layout) -> int: + """ + Returns the max alignment (in terms of number of elements) for a given Inductor Layout. + """ + + dtype = inductor_layout.dtype + size = inductor_layout.size + offset = inductor_layout.offset + + def is_static_int(number): + return isinstance(number, (int, sympy.Integer)) + + if is_static_int(size[-1]) and is_static_int(offset): + alignments = get_alignments(dtype) + for alignment in alignments: + if int(size[-1]) % alignment == 0 and int(offset) % alignment == 0: + return alignment + + return 1 diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/gemm_template.py b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/gemm_template.py new file mode 100644 index 0000000000000000000000000000000000000000..2fbb00aeb7cc15edd7e14252befc6d70dd9df183 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/gemm_template.py @@ -0,0 +1,706 @@ +import copy +import logging +import re +from typing import cast, Dict, List, Optional, Tuple + +from ...config import cuda as inductor_cuda_config +from ...ir import Buffer, CUDATemplateBuffer, FixedLayout, IRNode, Layout +from ..common import IndentedBuffer + +from . import cutlass_utils +from .cuda_kernel import CUDATemplateKernel +from .cuda_template import CUTLASSTemplate +from .cutlass_epilogue_gen import ( + CutlassEVTEpilogueArgumentFormatter, + CutlassEVTEpilogueTypeFormatter, +) + +log = logging.getLogger(__name__) + +GEMM_TEMPLATE = r""" +{{template.header().getvalue()}} +{{template.globals().getvalue()}} +{{instance_definition}} +// When workspace_size is not a nullptr, populates requested workspace_size and returns. +// Otherwise, computes the Gemm kernel using the given workspace ptr. +extern "C" { +{{kernel.def_kernel(inputs=[X, W, Bias], outputs=[Y], names_str="X, W, Bias, Y", input_reorder=input_reorder)}} { + try { + {{kernel.check_not_null(X)}} + {{kernel.check_not_null(W)}} + {{kernel.check_not_null(Bias)}} + {{kernel.check_not_null(Y)}} + int64_t B = {{kernel.size(Y, 0, -3, default_value=1)}}; + int64_t M = {{kernel.size(X, -2)}}; + int64_t K = {{kernel.size(X, -1)}}; + int64_t N = {{kernel.size(W, -1)}}; + using ElementComputeEpilogue = {{instance_type}}::ElementAccumulator; + using coord_t = cutlass::gemm::GemmCoord::Index; + {{instance_type}}::Arguments arguments; + {{template.render_gemm_arguments(argument_template, epilogue_template, should_swap_xw, + X, W, Bias, Y, alpha, beta, kernel, epilogue_args)}} + {{instance_type}} gemm_op; + if (workspace_size) { + *workspace_size = gemm_op.get_workspace_size(arguments); + return 0; + } + { + auto status = gemm_op.can_implement(arguments); + CUTLASS_CHECK(status); + } + { + auto status = gemm_op.initialize(arguments, workspace, stream); + CUTLASS_CHECK(status); + } + { + auto status = gemm_op(stream); + CUTLASS_CHECK(status); + } + } + catch (std::exception& e) { + std::cerr << "Runtime error: " << e.what() << std::endl; + return -1; + } + catch (...) { + return -1; + } + return 0; +} +} +""" + + +GEMM_ARGS_CUTLASS_2X = r""" + int64_t batch_stride_x = {{kernel.stride(X, -3)}}; + int64_t row_stride_x = {{kernel.row_or_column_stride(X)}}; + int64_t batch_stride_w = {{kernel.stride(W, -3)}}; + int64_t row_stride_w = {{kernel.row_or_column_stride(W)}}; + int64_t batch_stride_bias = {{kernel.stride(Bias, -3)}}; + int64_t row_stride_bias = {{kernel.row_or_column_stride(Bias)}}; + int64_t batch_stride_y = {{kernel.stride(Y, -3)}}; + int64_t row_stride_y = {{kernel.row_or_column_stride(Y)}}; + // Initialize GemmUniversalInstance arguments. + arguments = { + {{template.gemm_mode()}}, // GemmUniversalMode mode + { + static_cast(M), + static_cast(N), + static_cast(K) + }, // GemmCoord problem_size + {{split_k if split_k > 1 else 'B'}}, // int batch_count + {ElementComputeEpilogue({{alpha}}), ElementComputeEpilogue({{beta}})}, // typename EpilogueOutputOp::Params epilogue + {{template.cutlass_type_cast(X, kernel.ptr(X))}}, // void const * ptr_A + {{template.cutlass_type_cast(W, kernel.ptr(W))}}, // void const * ptr_B + {{template.cutlass_type_cast(Bias, kernel.ptr(Bias))}}, // void const * ptr_C + {{template.cutlass_type_cast(Y, kernel.ptr(Y))}}, // void * ptr_D + batch_stride_x, // int64_t batch_stride_A + batch_stride_w, // int64_t batch_stride_B + batch_stride_bias, // int64_t batch_stride_C + batch_stride_y, // int64_t batch_stride_D + row_stride_x, // typename LayoutA::Stride::LongIndex lda + row_stride_w, // typename LayoutB::Stride::LongIndex ldb + row_stride_bias, // typename LayoutC::Stride::LongIndex ldc + row_stride_y, // typename LayoutC::Stride::LongIndex ldd + }; +""" + + +GEMM_ARGS_CUTLASS_3X = r""" + // Initialize GemmUniversal3xInstance arguments. + arguments = { + {{template.gemm_mode()}}, // GemmUniversalMode mode + { + static_cast({{M}}), + static_cast({{N}}), + static_cast(K), + static_cast(B) + }, // ProblemShape problem_shape + { + {{template.cutlass_type_cast(X, kernel.ptr(X))}}, // ElementA const* ptr_A + { + {{template.cute_int(kernel.stride(X, -2), "stride_x0")}}, + {{template.cute_int(kernel.stride(X, -1), "stride_x1")}}, + {{template.cute_int(kernel.stride(X, -3), "batch_stride_x")}} + }, // StrideA dA + {{template.cutlass_type_cast(W, kernel.ptr(W))}}, // ElementB const* ptr_B + { + {{template.cute_int(kernel.stride(W, -1), "stride_w1")}}, + {{template.cute_int(kernel.stride(W, -2), "stride_w0")}}, + {{template.cute_int(kernel.stride(W, -3), "batch_stride_w")}} + }, // StrideB dB + }, // MainloopArguments mainloop + {{epilogue_arguments}} + }; +""" + +GEMM_ARGS_CUTLASS_3X_EPILOGUE = r""" + // see https://tinyurl.com/4rk89z48 + { + {{epilogue_args}}, // thread, typename FusionCallbacks::Arguments ( EVT ) or ThreadEpilogueOp::Params (non-EVT ) + {{template.cutlass_type_cast(Bias, kernel.ptr(Bias))}}, // ElementC const* ptr_C + { + {{template.cute_int(kernel.stride(Bias, -2, 1), "stride_bias0")}}, + {{template.cute_int(kernel.stride(Bias, -1, 1), "stride_bias1")}}, + {{template.cute_int(kernel.stride(Bias, -3), "batch_stride_bias")}} + }, // StrideC dC + {{template.cutlass_type_cast(Y, kernel.ptr(Y))}}, // ElementD const* ptr_D + { + {{template.cute_int(kernel.stride(Y, -2), "stride_y0")}}, + {{template.cute_int(kernel.stride(Y, -1), "stride_y1")}}, + {{template.cute_int(kernel.stride(Y, -3), "batch_stride_y")}} + }, // StrideD dD + }, // EpilogueArguments epilogue +""" + + +class CUTLASSGemmTemplate(CUTLASSTemplate): + """ + CUTLASS GEMM template, which is used to generate CUTLASS GEMM kernels + including those which allow flexible fusions with epilogues. + """ + + def __init__( + self, + input_nodes: List[Buffer], + layout: Layout, + alpha: float, + beta: float, + input_reorder: Optional[List[int]] = None, + can_fuse_epilogue: Optional[bool] = None, + ): + """ + Args: + input_nodes: input nodes of the kernel + layout: layout of the output node + alpha: alpha value of the GEMM operation + beta: beta value of the GEMM operation + input_reorder: reorder of the input nodes + can_fuse_epilogue: If set to True, will only list and use operators capable of flexible epilogue fusions. + If False, it will not use those. If None, both may be listed, but it will not allow fusions. + Defaults to None + """ + super().__init__("cutlass_gemm", input_nodes, layout, input_reorder) + self.alpha = alpha + self.beta = beta + self.can_fuse_epilogue = can_fuse_epilogue + + @staticmethod + def add_cutlass_gemm_choices( + choices, + layout, + input_nodes, + alpha=1, + beta=0, + input_reorder=None, + fuseable=True, + non_fuseable=True, + ): + if non_fuseable: + if fuseable: + # list both fuseable and non-fuseable ops, and treat them all as non-fuseable + can_fuse_epilogue = False + else: + can_fuse_epilogue = None + + cutlass_template = CUTLASSGemmTemplate( + input_nodes, + layout, + alpha=alpha, + beta=beta, + input_reorder=input_reorder, + can_fuse_epilogue=can_fuse_epilogue, + ) + ops = cutlass_template.gen_ops() + for op in ops: + cutlass_template.maybe_append_choice( + choices, + op=op, + ) + else: + ops = [] + if fuseable: + cutlass_template_evt = CUTLASSGemmTemplate( + input_nodes, + layout, + alpha=alpha, + beta=beta, + input_reorder=input_reorder, + can_fuse_epilogue=True, + ) + # This will list only ops capable of EVT fusion + ops_evt = cutlass_template_evt.gen_ops() + for op in ops_evt: + cutlass_template_evt.maybe_append_choice( + choices, + op=op, + ) + else: + ops_evt = [] + log.debug( + "Added %d cutlass gemm configs and %d fuseable gemm configs.", + len(ops), + len(ops_evt), + ) + + def header(self) -> IndentedBuffer: + res = super().header() + res.splice( + """ + #include "cutlass/gemm/gemm.h" + #include "cutlass/gemm/device/gemm_universal.h" + #include "cutlass/gemm/device/gemm_universal_adapter.h" + #include "cutlass/gemm/kernel/gemm_universal.hpp" + #include "cutlass/gemm/collective/collective_builder.hpp" + #include "cutlass/epilogue/collective/collective_builder.hpp" + #include "cutlass/epilogue/collective/default_epilogue.hpp" + #include "cutlass/epilogue/thread/linear_combination.h" + #include "cutlass/gemm/dispatch_policy.hpp" + #include "cutlass/gemm/kernel/tile_scheduler.hpp" + #include "cutlass/util/distribution.h" + #include "cutlass/util/packed_stride.hpp" + #include "cutlass/util/tensor_view_io.h" + """ + ) + return res + + @staticmethod + def cutlass_layout(torch_layout) -> "Optional[cutlass_lib.LayoutType]": # type: ignore[name-defined] + assert cutlass_utils.try_import_cutlass() + import cutlass_library.library as cutlass_lib + + if torch_layout.stride[-1] == 1: + return cutlass_lib.LayoutType.RowMajor + elif torch_layout.stride[-2] == 1: + return cutlass_lib.LayoutType.ColumnMajor + else: + return None + + @staticmethod + def flip_cutlass_layout( + cutlass_layout: "cutlass_lib.LayoutType", # type: ignore[name-defined] + ) -> "cutlass_lib.LayoutType": # type: ignore[name-defined] + assert cutlass_utils.try_import_cutlass() + import cutlass_library.library as cutlass_lib + + if cutlass_layout == cutlass_lib.LayoutType.RowMajor: + return cutlass_lib.LayoutType.ColumnMajor + else: + return cutlass_lib.LayoutType.RowMajor + + @staticmethod + def layout_match(torch_layout, cutlass_layout) -> bool: + return CUTLASSGemmTemplate.cutlass_layout(torch_layout) == cutlass_layout + + @staticmethod + def set_alignment(torch_layout, op_element) -> bool: + alignment = cutlass_utils.get_max_alignment(torch_layout) + if alignment < op_element.alignment: + return False + else: + op_element.alignment = alignment + return True + + @staticmethod + def has_tma_epilogue(op) -> bool: + assert cutlass_utils.try_import_cutlass() + import cutlass_library.library as cutlass_lib + + result = False + if op.gemm_kind == cutlass_lib.GemmKind.Universal3x: + epilogue_schedule_str = str(op.epilogue_schedule).split(".")[-1] + result = epilogue_schedule_str.lower().startswith("tma") + return result + + @staticmethod + def supports_evt(op: "cutlass_library.gemm_op.GemmOperation") -> bool: # type: ignore[name-defined] + """ + returns True if the op is capable of flexible epilogue fusions + using epilogue visitor trees. + + See https://github.com/NVIDIA/cutlass/blob/e01b9b5029b7caca5a43c29f7d2714d7cf1dcae8/examples/49_hopper_gemm_with_collective_builder/49_collective_builder.cu#L283-L285 # noqa: B950 + """ + assert cutlass_utils.try_import_cutlass() + import cutlass_library.library as cutlass_lib + + if op.gemm_kind != cutlass_lib.GemmKind.Universal3x: + return False + if op.epilogue_schedule not in ( + cutlass_lib.EpilogueScheduleType.TmaWarpSpecialized, + cutlass_lib.EpilogueScheduleType.TmaWarpSpecializedCooperative, + ): + return False + + return True + + def render_evt_epilogue_declaration( + self, + template_output_node_name: str, + evt_type_name: str, + epilogue_nodes: List[IRNode], + ) -> str: + """Generates the epilogue for the EVT epilogue fusion""" + return CutlassEVTEpilogueTypeFormatter.ir_to_evt_string( + template_output_node_name, evt_type_name, epilogue_nodes + ) + + def define_gemm_instance( + self, + op: "cutlass_library.gemm_op.GemmOperation", # type: ignore[name-defined] + output_buffer_name: str, + epilogue_nodes: Optional[List[IRNode]] = None, + ) -> Tuple[str, str]: + assert cutlass_utils.try_import_cutlass() + import cutlass_library.gemm_operation as cutlass_gemm_op + import cutlass_library.library as cutlass_lib + + from torch._inductor.codegen.cuda.cutlass_lib_extensions.gemm_operation_extensions import ( + EmitGemmUniversal3xInstanceWithEVT, + ) + + if op.gemm_kind == cutlass_lib.GemmKind.Universal3x: + if epilogue_nodes is not None and len(epilogue_nodes) > 0: + emitter = EmitGemmUniversal3xInstanceWithEVT() + op.epilogue_functor = lambda epilogue_functor_type_name: self.render_evt_epilogue_declaration( + output_buffer_name, epilogue_functor_type_name, epilogue_nodes + ) + else: + emitter = cutlass_gemm_op.EmitGemmUniversal3xInstance() + op_def = emitter.emit(op) + pattern = re.compile(r"\s*struct\s(.*?)\s:") + decl = [line for line in op_def.split("\n") if "struct " in line][-1] + else: + if epilogue_nodes is not None and len(epilogue_nodes) > 0: + raise RuntimeError( + "EVT epilogue fusion is not supported for Cutlass 2.x ops." + ) + emitter = cutlass_gemm_op.EmitGemmInstance() + op_def = emitter.emit(op) + op_def = op_def.replace( + "cutlass::gemm::device::Gemm", "cutlass::gemm::device::GemmUniversal" + ) + op_def = op_def.replace("false,", "") + pattern = re.compile(r"\s*using\s(.*?)\s=") + decl = op_def.split("\n")[2] + match = pattern.match(decl) + if match is None: + raise RuntimeError("Invalid Gemm config: \n" + op_def) + op_type = match.groups()[0] + if op.gemm_kind == cutlass_lib.GemmKind.Universal3x: + op_def += f"\n using {op_type}_device_type = cutlass::gemm::device::GemmUniversalAdapter<{op_type}>;\n" + op_type = f"{op_type}_device_type" + return op_def, op_type + + @staticmethod + def should_swap_XW( + bias: IRNode, + beta: float, + ) -> bool: + return True + + # TODO(ipiszy): Check whether it's necessary to swap X/W. + # strides = bias.get_stride() + # if strides[-1] != 1: + # return True + # for stride in strides[:-1]: + # if stride != 0: + # return True + # return False + + @staticmethod + def swap_XW( + op: "cutlass_library.gemm_op.GemmOperation", # type: ignore[name-defined] + ) -> "cutlass_library.gemm_op.GemmOperation": # type: ignore[name-defined] + # Swap X and W in GemmOperation. + new_op = copy.deepcopy(op) + new_op.A.layout = CUTLASSGemmTemplate.flip_cutlass_layout(new_op.A.layout) + new_op.B.layout = CUTLASSGemmTemplate.flip_cutlass_layout(new_op.B.layout) + new_op.A, new_op.B = new_op.B, new_op.A + new_op.C.layout = CUTLASSGemmTemplate.flip_cutlass_layout(new_op.C.layout) + new_op.D.layout = CUTLASSGemmTemplate.flip_cutlass_layout(new_op.D.layout) + return new_op + + def filter_op( + self, + op: "cutlass_library.gemm_op.GemmOperation", # type: ignore[name-defined] + ) -> "cutlass_library.gemm_op.GemmOperation": # type: ignore[name-defined] + assert cutlass_utils.try_import_cutlass() + import cutlass_library.library as cutlass_lib + + # Skip simt kernels + if ( + op.tile_description.math_instruction.opcode_class + == cutlass_lib.OpcodeClass.Simt + ): + return None + + # Only keep GemmUniversal kernels + if op.gemm_kind not in { + cutlass_lib.GemmKind.Universal, + cutlass_lib.GemmKind.Universal3x, + }: + return None + # Filter ops by dtypes. + X = self.input_nodes[0] + W = self.input_nodes[1] + accumulator_torch_dtype = cutlass_utils.get_accumulator_dtype( + [X.get_dtype(), W.get_dtype()], + ) + if not ( + cutlass_utils.dtype_match(X.get_dtype(), op.A.element) + and cutlass_utils.dtype_match(W.get_dtype(), op.B.element) + and cutlass_utils.dtype_match( + self.output_node.get_layout().dtype, op.C.element + ) + and cutlass_utils.dtype_match( + accumulator_torch_dtype, op.accumulator_type() + ) + ): + return None + + # Filter ops by input layouts. + if not ( + self.layout_match(X.get_layout(), op.A.layout) + and self.layout_match(W.get_layout(), op.B.layout) + ): + return None + + # Update op. + op = copy.deepcopy(op) + + # Set output layout. + op.D.layout = CUTLASSGemmTemplate.cutlass_layout(self.output_node.get_layout()) + + # Filter ops by alignments and set alignments. + if not ( + self.set_alignment(X.get_layout(), op.A) + and self.set_alignment(W.get_layout(), op.B) + and self.set_alignment(self.output_node.get_layout(), op.D) + ): + return None + + # Set epilogue. + # TODO: update epilogue functor according to epilogues. + op.element_epilogue = op.accumulator_type() + + # Set bias layout and alignment. + if len(self.input_nodes) >= 3 and self.input_nodes[2] is not None: + Bias = self.input_nodes[2] + bias_layout = CUTLASSGemmTemplate.cutlass_layout(Bias.get_layout()) + if op.gemm_kind != cutlass_lib.GemmKind.Universal3x: + if bias_layout != op.D.layout: + # For cutlass2, bias and output layout must match + return None + else: + op.C.layout = bias_layout + if not self.set_alignment(Bias.get_layout(), op.C): + return None + else: + if op.gemm_kind == cutlass_lib.GemmKind.Universal3x: + op.C.element = cutlass_lib.DataType.void + else: + op.C.layout = op.D.layout + supports_evt: bool = self.supports_evt(op) + if (self.can_fuse_epilogue is not None) and ( + self.can_fuse_epilogue != supports_evt + ): + return None + if inductor_cuda_config.cutlass_only_evt_capable_ops and not supports_evt: + return None + return op + + def gen_ops(self) -> "List[cutlass_gemm_op.GemmOperation]": # type: ignore[name-defined] + assert cutlass_utils.try_import_cutlass() + import cutlass_library.gemm_operation as cutlass_gemm_op + import cutlass_library.library as cutlass_lib + + ops = cutlass_utils.gen_ops()[cutlass_lib.OperationKind.Gemm] + res: Dict[str, cutlass_gemm_op.GemmOperation] = dict() + num_3x_ops = 0 + num_2x_ops = 0 + for op_dict in ops.values(): + for op_list in op_dict.values(): + for op in op_list: + assert isinstance(op, cutlass_gemm_op.GemmOperation) + filter_res = self.filter_op(op) + if ( + filter_res is not None + and res.get(filter_res.configuration_name(), None) is None + ): + res[filter_res.configuration_name()] = filter_res + for op in res.values(): + if op.gemm_kind == cutlass_lib.GemmKind.Universal3x: + num_3x_ops += 1 + else: + num_2x_ops += 1 + log.debug( + "Got cutlass configs: total number of ops: %d, " + "total number of 3x ops: %d, total number of 2x ops: %d", + len(res), + num_3x_ops, + num_2x_ops, + ) + return list(res.values())[: inductor_cuda_config.cutlass_max_profiling_configs] + + def gemm_mode(self) -> str: + sizes = self.output_node.get_size() + if len(sizes) > 2: + return "cutlass::gemm::GemmUniversalMode::kBatched" + else: + return "cutlass::gemm::GemmUniversalMode::kGemm" + + def render_gemm_arguments( + self, + argument_template: str, + epilogue_template: str, + should_swap_xw: bool, + X: IRNode, + W: IRNode, + Bias: IRNode, + Y: IRNode, + alpha: float, + beta: float, + kernel: CUDATemplateKernel, + epilogue_args, + ) -> str: + options = dict( + alpha=self.alpha, + beta=self.beta, + X=X, + W=W, + Y=Y, + Bias=Bias, + template=self, + kernel=kernel, + M="M", + N="N", + epilogue_args=epilogue_args, + ) + + if epilogue_template is not None: + if should_swap_xw: + # Swap + def clone_with_transposed_stride(node: IRNode) -> IRNode: + old_layout = node.get_layout() + new_stride = list(old_layout.stride) + new_stride[-2], new_stride[-1] = new_stride[-1], new_stride[-2] + new_layout = FixedLayout( + old_layout.device, + old_layout.dtype, + list(old_layout.size), + new_stride, + old_layout.offset, + ) + return Buffer(node.get_name(), new_layout) + + new_X = clone_with_transposed_stride(X) + new_W = clone_with_transposed_stride(W) + new_Bias = clone_with_transposed_stride(Bias) + new_Y = clone_with_transposed_stride(Y) + options["X"], options["W"], options["Bias"], options["Y"] = ( + new_W, + new_X, + new_Bias, + new_Y, + ) + options["M"], options["N"] = "N", "M" + + epilogue_arguments = self._template_from_string(epilogue_template).render( + **options + ) + arguments = self._template_from_string(argument_template).render( + epilogue_arguments=epilogue_arguments, **options + ) + else: + arguments = self._template_from_string(GEMM_ARGS_CUTLASS_2X).render( + split_k=1, **options + ) + return arguments + + def render( # type: ignore[override] + self, + kernel: CUDATemplateKernel, + op: "cutlass_gemm_op.GemmOperation" = None, # type: ignore[name-defined] + template_buffer_node: Optional[CUDATemplateBuffer] = None, + epilogue_nodes: Optional[List[IRNode]] = None, + **kwargs, + ) -> str: + if epilogue_nodes is not None and len(epilogue_nodes) > 0: + assert self.can_fuse_epilogue and CUTLASSGemmTemplate.supports_evt( + op + ), "op does not support EVT epilogue fusion" + assert ( + template_buffer_node is not None + ), "Template node is required for epilogue fusion" + assert isinstance( + template_buffer_node, CUDATemplateBuffer + ), f"Template node has to be a CUDATemplateBuffer, is type {type(template_buffer_node)}" + assert ( + template_buffer_node.name is not None + ), "Output node has to be a Buffer with a name" + # This is the name of the output of the Matmul, before epilogues are applied. + # it is not necessarily materialized in global memory if we have an epilogue + + template_output_node_name = ( + template_buffer_node.name if template_buffer_node is not None else None + ) + + assert cutlass_utils.try_import_cutlass() + import cutlass_library.gemm_operation as cutlass_gemm_op + import cutlass_library.library as cutlass_lib + + assert isinstance( + op, cutlass_gemm_op.GemmOperation + ), "op argument is required and has to be an instance of GemmOperation" + if template_buffer_node is not None: + self.output_node = template_buffer_node + if epilogue_nodes is not None and len(epilogue_nodes) > 0: + self.output_node = cast(Buffer, epilogue_nodes[-1]) + + assert len(self.input_nodes) >= 2 and self.output_node is not None + X, W = self.input_nodes[0], self.input_nodes[1] + Y = self.output_node + Bias = None if len(self.input_nodes) == 2 else self.input_nodes[2] + + epilogue_template: Optional[str] = None + should_swap_xw: bool = False + epilogue_args = f"{{ElementComputeEpilogue({self.alpha}), ElementComputeEpilogue({self.beta})}}" + if op.gemm_kind == cutlass_lib.GemmKind.Universal3x: + if Bias is not None and self.has_tma_epilogue(op): + if self.should_swap_XW(Bias, self.beta): + # TMA epilogue requires bias vector in column major to get best perf. + op = self.swap_XW(op) + should_swap_xw = True + if epilogue_nodes is not None and len(epilogue_nodes) > 0: + epilogue_args = ( + CutlassEVTEpilogueArgumentFormatter.ir_to_evt_argument_string( + cast(str, template_output_node_name), epilogue_nodes + ) + ) + epilogue_template = GEMM_ARGS_CUTLASS_3X_EPILOGUE + argument_template = GEMM_ARGS_CUTLASS_3X + else: + # TODO: Support split_k. + argument_template = GEMM_ARGS_CUTLASS_2X + + instance_definition, instance_type = self.define_gemm_instance( + op, cast(str, template_output_node_name), epilogue_nodes + ) + options = dict( + alpha=self.alpha, + beta=self.beta, + X=X, + W=W, + Y=Y, + Bias=Bias, + epilogue_template=epilogue_template, + argument_template=argument_template, + should_swap_xw=should_swap_xw, + template=self, + kernel=kernel, + instance_definition=instance_definition, + instance_type=instance_type, + input_reorder=self.input_reorder, + epilogue_args=epilogue_args, + ) + res = self._template_from_string(GEMM_TEMPLATE).render(**options) + return res diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/triton.py b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/triton.py new file mode 100644 index 0000000000000000000000000000000000000000..9df1bc8df07ccee677e62434a14b22b8afa7a4b3 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/triton.py @@ -0,0 +1,3008 @@ +from __future__ import annotations + +import collections +import contextlib +import dataclasses +import functools +import itertools +import logging +import math +import operator +import os +import textwrap +from typing import Any, Counter, Dict, Iterable, List, Optional, Set, Tuple, Union + +import sympy + +import torch + +import torch._logging +from torch._prims_common import is_integer_dtype +from torch.utils._sympy.functions import FloorDiv, ModularIndexing +from torch.utils._sympy.value_ranges import ValueRanges + +from ..._dynamo.utils import counters +from .. import config, ir, scheduler +from ..codecache import code_hash, get_path, PyCodeCache +from ..dependencies import MemoryDep, StarDep +from ..ir import IRNode, ReductionHint, TritonTemplateBuffer +from ..optimize_indexing import indexing_dtype_strength_reduction +from ..scheduler import BaseScheduling, WhyNoFuse +from ..triton_heuristics import AutotuneHint +from ..utils import ( + do_bench, + get_fused_kernel_name, + get_kernel_metadata, + green_text, + is_welford_reduction, + next_power_of_2, + Placeholder, + sympy_product, + sympy_subs, + sympy_symbol, + unique, + yellow_text, +) +from ..virtualized import ops, V +from ..wrapper_benchmark import get_kernel_category_by_source_code +from .common import ( + CSEVariable, + DeferredLine, + free_symbol_startswith, + IndentedBuffer, + index_prevent_reordering, + Kernel, + OpOverrides, + PythonPrinter, + SizeArg, + TensorArg, +) +from .triton_utils import config_of, signature_of, signature_to_meta + +log = logging.getLogger(__name__) +perf_hint_log = torch._logging.getArtifactLogger(__name__, "perf_hints") +schedule_log = torch._logging.getArtifactLogger(__name__, "schedule") +fusion_log = torch._logging.getArtifactLogger(__name__, "fusion") + + +class TritonPrinter(PythonPrinter): + def _print_floor(self, expr): + assert len(expr.args) == 1 + return f"tl.math.floor({self.paren(self._print(expr.args[0]))})" + + def _helper_sqrt(self, expr): + return f"tl.math.sqrt({self.paren(self._print(expr))}.to(tl.float32))" + + def _print_Where(self, expr): + c = self.doprint(expr.args[0]) + p = self.doprint(expr.args[1]) + q = self.doprint(expr.args[2]) + return f"tl.where({c}, {p}, {q})" + + def _print_Min(self, expr): + nargs = len(expr.args) + if len(expr.args) == 1: + return self._print(expr.args[0]) + + mid = len(expr.args) // 2 + a = self._print(sympy.Min(*expr.args[:mid])) + b = self._print(sympy.Min(*expr.args[mid:])) + return f"tl.math.min({a}, {b})" + + def _print_Max(self, expr): + nargs = len(expr.args) + if len(expr.args) == 1: + return self._print(expr.args[0]) + + mid = len(expr.args) // 2 + a = self._print(sympy.Max(*expr.args[:mid])) + b = self._print(sympy.Max(*expr.args[mid:])) + return f"tl.math.max({a}, {b})" + + def _print_Abs(self, expr): + assert len(expr.args) == 1 + return f"tl.abs({self._print(expr.args[0])})" + + +texpr = TritonPrinter().doprint +pexpr = PythonPrinter().doprint + + +def triton_compute_type(dtype): + triton_type_name = str(dtype).split(".")[-1] + if triton_type_name == "bool": + triton_type_name = "int1" + elif triton_type_name in ("float16", "bfloat16"): + # float16 math is done in float32 inside the kernel + triton_type_name = "float32" + elif triton_type_name == "float8_e4m3fn": + triton_type_name = "float8e4nv" + elif triton_type_name == "float8_e5m2": + triton_type_name = "float8e5" + return f"tl.{triton_type_name}" + + +def triton_acc_type(dtype): + if is_integer_dtype(dtype) and dtype.is_signed: + nbits = 64 if dtype == torch.int64 else 32 + return f"tl.int{nbits}" + return triton_compute_type(dtype) + + +def triton_constant(value): + if value == float("inf"): + return 'float("inf")' + elif value == float("-inf"): + return 'float("-inf")' + elif math.isnan(value): + return 'float("nan")' + return repr(value) + + +class TritonCSEVariable(CSEVariable): + def __init__(self, name, bounds: ValueRanges): + super().__init__(name, bounds) + # We'll use this to track which masks the variable needs when used for indirect indexing + self.mask_vars: Set[str] = set() + + def update_on_args(self, name, args, kwargs): + # When making a variable that is going to be used in indirect indexing + # if a where clause is used it should mean that the result is always a + # valid index, so you shouldn't include any of the dependent variables + # in the resulting load mask + if name == "where": + return + for arg in args: + if isinstance(arg, TritonCSEVariable): + self.mask_vars.update(arg.mask_vars) + elif isinstance(arg, sympy.Symbol) and arg.name[0] in "xyr": + # most of the time index vars don't need masks associated with them + # however, when index vars are used to compute indices for indirect reads + # those reads should subsequently be masked, + self.mask_vars.update({f"{arg.name[0]}mask"}) + + +class TritonOverrides(OpOverrides): + """Map element-wise ops to Triton""" + + @staticmethod + def to_dtype(x, dtype: torch.dtype, src_dtype: Optional[torch.dtype] = None): + def _get_min_elements_per_thread( + src_dtype: torch.dtype, dst_dtype: torch.dtype + ) -> int: + if src_dtype == dst_dtype: + # No data type conversion is needed. No requirements on min_elem_per_thread. + return 0 + + # fp8 data type conversions has min_elem_per_thread requirements. + # Refer to Triton implementations here: + # https://github.com/openai/triton/blob/10f59d8ce04052521c1bc0cb3a3f8b98918fc7e3/lib/Conversion/TritonGPUToLLVM/ElementwiseOpToLLVM.cpp#L10. + fp8_dtypes = { + torch.float8_e4m3fn, + torch.float8_e5m2, + } + # Triton doesn't support type conversions between fp8_e4m3 and fp8_e5m2. + assert not ( + src_dtype in fp8_dtypes + and dst_dtype in fp8_dtypes + and src_dtype != dst_dtype + ), "Conversions between float8_e5m2 and float8_e4m3fn is not supported!" + if src_dtype == torch.float8_e5m2 or dst_dtype == torch.float8_e5m2: + return 4 + if src_dtype == torch.float8_e4m3fn or dst_dtype == torch.float8_e4m3fn: + return 2 + # No requirements on min_elem_per_thread. + return 0 + + if src_dtype is not None: + # Both dtype and src_dtype are set. This is used by torch to(dtype=dtype). + # It takes the maximum min_elem_per_thread if there are multiple fp8 conversions + # in the same kernel. + V.kernel.min_elem_per_thread = max( + _get_min_elements_per_thread(src_dtype, dtype), + V.kernel.min_elem_per_thread, + ) + + if dtype == torch.bool: + return f"({x} != 0)" + elif dtype == torch.uint8: + # to work around llvm uint conversion semantics + # that produces 0's for negative values + return f"{x}.to(tl.int8).to(tl.uint8)" + return f"{x}.to({triton_compute_type(dtype)})" + + @staticmethod + def to_dtype_bitcast(x, dtype: torch.dtype): + return f"{x}.to({triton_compute_type(dtype)}, bitcast=True)" + + @classmethod + def constant(cls, value, dtype): + if dtype == torch.uint8: + # tl.full is broken for uint8, remove once triton is fixed. + # See openai/triton#1919 + tmp = cls.constant(value, torch.int16) + return cls.to_dtype(tmp, dtype) + + type_ = torch._prims_common.dtype_to_type(dtype) + triton_val = triton_constant(type_(value)) + triton_type = triton_compute_type(dtype) + + if triton_type == "tl.float32": + # Float constants are always f32 in triton + return triton_val + + # NOTE: We use a tensor here in order to get the expected type. + # Otherwise, e.g. float64 constants would be trunctated to float32. + # Also, we could just use shape=[1] here but starting with the correct + # ndim avoids extra `tt.expand_dim` ops appearing in the triton IR. + ndim = V.kernel.triton_tensor_ndim() + shape = [1] * ndim + return f"tl.full({shape}, {triton_val}, {triton_type})" + + @staticmethod + def abs(x): + return f"tl.abs({x})" + + @staticmethod + def libdevice_abs(x): + return f"tl.math.abs({x})" + + @staticmethod + def exp(x): + return f"tl.exp({x})" + + @staticmethod + def libdevice_exp(x): + return f"tl.math.exp({x})" + + @staticmethod + def exp2(x): + return f"tl.math.exp2({x})" + + @staticmethod + def expm1(x): + return f"tl.math.expm1({x})" + + @staticmethod + def sqrt(x): + return f"tl.sqrt({x})" + + @staticmethod + def libdevice_sqrt(x): + return f"tl.math.sqrt({x})" + + @staticmethod + def relu(x): + bug = config.triton.inject_relu_bug_TESTING_ONLY + if bug == "compile_error": + return "compile error!" + elif bug == "runtime_error": + # NB: this only triggers runtime error as long as input + # is not all zero + return f'triton_helpers.device_assert_then({x} == 0, "injected assert fail", {x})' + elif bug == "accuracy": + return f"{x} + 1" + elif bug is None: + return ops.maximum("0", x) + else: + raise AssertionError( + f"unrecognized config triton.inject_relu_bug_TESTING_ONLY = {bug!r}" + ) + + @staticmethod + def minimum(a, b): + return f"triton_helpers.minimum({a}, {b})" + + @staticmethod + def maximum(a, b): + return f"triton_helpers.maximum({a}, {b})" + + @staticmethod + def where(a, b, c): + return f"tl.where({a}, {b}, {c})" + + @staticmethod + def cos(x): + return f"tl.cos({x})" + + @staticmethod + def libdevice_cos(x): + return f"tl.math.cos({x})" + + @staticmethod + def sin(x): + return f"tl.sin({x})" + + @staticmethod + def libdevice_sin(x): + return f"tl.math.sin({x})" + + @classmethod + def index_expr(cls, expr, dtype): + index_str, mask_vars, mask, expand_str = V.kernel.indexing(expr) + # This is called from CSEProxy.__getattr__, so we'll set the bounds there + var = V.kernel.cse.generate(V.kernel.compute, index_str) + + if dtype not in {torch.int32, torch.int64}: + var = V.kernel.cse.generate(V.kernel.compute, cls.to_dtype(var, dtype)) + var.mask_vars = mask_vars + return var + + @staticmethod + def masked(mask, body, other): + with V.kernel.mask_loads(mask) as new_mask: + result = body() + + # Take dtype from result to prevent accidental promotion + other = V.kernel.cse.generate( + V.kernel.compute, + f"tl.full({result}.shape, {triton_constant(other)}, {result}.dtype)", + ) + return ops.where(new_mask, result, other) + + @staticmethod + def lgamma(x): + return f"tl.math.lgamma({x})" + + @staticmethod + def erf(x): + return f"tl.math.erf({x})" + + @staticmethod + def cosh(x): + return f"tl.math.cosh({x})" + + @staticmethod + def sinh(x): + return f"tl.math.sinh({x})" + + @staticmethod + def acos(x): + return f"tl.math.acos({x})" + + @staticmethod + def acosh(x): + return f"tl.math.acosh({x})" + + @staticmethod + def asin(x): + return f"tl.math.asin({x})" + + @staticmethod + def asinh(x): + return f"tl.math.asinh({x})" + + @staticmethod + def atan2(x, y): + return f"tl.math.atan2({x}, {y})" + + @staticmethod + def atan(x): + return f"tl.math.atan({x})" + + @staticmethod + def atanh(x): + return f"tl.math.atanh({x})" + + @staticmethod + def copysign(x, y): + return f"tl.math.copysign({x}, {y})" + + @staticmethod + def erfc(x): + return f"tl.math.erfc({x})" + + @staticmethod + def erfinv(x): + return f"tl.math.erfinv({x})" + + @staticmethod + def hypot(x, y): + return f"tl.math.hypot({x}, {y})" + + @staticmethod + def log10(x): + return f"tl.math.log10({x})" + + @staticmethod + def nextafter(x, y): + return f"tl.math.nextafter({x}, {y})" + + @staticmethod + def logical_and(a, b): + return f"{a} & {b}" + + @staticmethod + def logical_not(a): + return f"{a} == 0" + + @staticmethod + def logical_or(a, b): + return f"{a} | {b}" + + @staticmethod + def logical_xor(a, b): + return f"({a} ^ {b})" + + @staticmethod + def bitwise_and(a, b): + return f"{a} & {b}" + + @staticmethod + def bitwise_not(a): + return f"~{a}" + + @staticmethod + def bitwise_or(a, b): + return f"{a} | {b}" + + @staticmethod + def bitwise_xor(a, b): + return f"{a} ^ {b}" + + @staticmethod + def bitwise_left_shift(a, b): + return f"{a} << {b}" + + @staticmethod + def bitwise_right_shift(a, b): + return f"{a} >> {b}" + + @staticmethod + def rand(seed, offset): + offset = f"({offset}).to(tl.uint32)" + return f"tl.rand({seed}, {offset})" + + @staticmethod + def randn(seed, offset): + offset = f"({offset}).to(tl.uint32)" + return f"tl.randn({seed}, {offset})" + + @staticmethod + def randint64(seed, offset, low, high): + offset = f"({offset}).to(tl.uint32)" + return f"triton_helpers.randint64({seed}, {offset}, {low}, {high})" + + @staticmethod + def load_seed(name, offset): + var = V.kernel.args.input(name) + return ( + f"tl.load({var} + {V.kernel.args.seed_offset('load_seed_offset', offset)})" + ) + + @staticmethod + def rsqrt(x): + return f"tl.math.rsqrt({x})" + + @staticmethod + def log1p(x): + return f"tl.math.log1p({x})" + + @staticmethod + def tan(x): + return f"tl.math.tan({x})" + + @staticmethod + def tanh(x): + return f"tl.math.tanh({x})" + + @staticmethod + def sigmoid(x): + return f"tl.sigmoid({x})" + + @staticmethod + def libdevice_sigmoid(x): + return f"1/(1 + tl.math.exp(-({x})))" + + @staticmethod + def signbit(x): + # XX: This is wrong for the value -0.0 in floating point + return f"tl.math.signbit({x}) if ({x}).dtype is tl.float32 else {x} < 0" + + @staticmethod + def fmod(a, b): + return f"tl.math.fmod({a}, {b})" + + @staticmethod + def pow(a, b): + return f"tl.math.pow({a}, {b})" + + @staticmethod + def log(x): + return f"tl.log({x})" + + @staticmethod + def libdevice_log(x): + return f"tl.math.log({x})" + + @staticmethod + def isinf(x): + return f"tl.math.isinf({x}).to(tl.int1)" + + @staticmethod + def isnan(x): + return f"tl.math.isnan({x}).to(tl.int1)" + + @staticmethod + def round(x): + return f"tl.math.nearbyint({x})" + + @staticmethod + def floor(x): + return f"tl.math.floor({x})" + + @staticmethod + def floordiv(a, b): + # See the comment in lowering.div_mode. a and b are integer type. + # Similar to div_floor_kernel_cuda in pytorch core. + # Notice that // in triton behaves as truncdiv instead of floordiv + quot = f"{a} // {b}" + rem = f"{a} % {b}" + return f"tl.where(({a} < 0) != ({b} < 0), tl.where({rem} != 0, {quot} - 1, {quot}), {quot})" + + @staticmethod + def sign(x): + def to_int(s): + return f"{s}.to(tl.int8)" + + left = to_int(ops.lt("0", x)) + right = to_int(ops.lt(x, "0")) + sub = ops.sub(left, right) + return f"{sub}.to({x}.dtype)" + + @staticmethod + def trunc(x): + return f"tl.math.trunc({x})" + + @staticmethod + def truncdiv(a, b): + # See the comment in lowering.div_mode. a and b are integer type. + # Notice that // in triton behaves as truncdiv instead of floordiv + return f"{a} // {b}" + + @staticmethod + def ceil(x): + return f"tl.math.ceil({x})" + + +@dataclasses.dataclass +class IterationRanges: + """ + Each range tree represents multiple sets of iteration indexing + in a single tiled dimension in the output kernel. + + If you have two loops ranges one (4, 3, 2) and another (4, 6), + then the range tree will be: + 4 (i0) + 3 (i1) 6 (i3) + 2 (i2) + Where i0 is shared between both loops, but then the split into + different indexing vars. All loop ranges must iterate over + the same number of elements. + """ + + def __init__( + self, + name: str, + var_list: List[sympy.Symbol], + var_ranges: Dict[sympy.Symbol, sympy.Expr], + numel: sympy.Expr, + prefix: str, + *, + kernel: TritonKernel, + divisor=sympy.Integer(1), + length=sympy.Integer(1), + ): + super().__init__() + self.name = name + self.var_list = var_list + self.var_ranges = var_ranges + self.numel = numel + self.prefix = prefix + self.divisor = divisor + self.length = length + self.kernel = kernel + + def is_loop(self): + return self.prefix == "r" and not self.kernel.persistent_reduction + + +class IterationRangesRoot(IterationRanges): + def __init__( + self, + name: str, + numel: sympy.Expr, + prefix: str, + index: int, + kernel: TritonKernel, + pid_cache=None, + ): + if pid_cache is None: + pid_cache = {} + super().__init__( + name=name, + var_list=[], + var_ranges={}, + numel=numel, + prefix=prefix, + kernel=kernel, + ) + self.index = index + # Store all the nodes in one flat list + self.nodes: Dict[sympy.Expr, IterationRangesEntry] = {} + # This is for re-ordering program ID in triton mm template + # pid_cache["tl.program_id(0)"] = pid_m + self.pid_cache: Dict[str, str] = pid_cache + + def cache_clear(self): + for node in self.nodes.values(): + node.cache_clear() + + def lookup(self, divisor, length): + """ + Lookup a given RangeTreeEntry, creating it if needed + """ + if V.graph.sizevars.statically_known_equals(divisor * length, self.numel): + expr = FloorDiv(sympy_symbol(f"{self.prefix}index"), divisor) + else: + expr = ModularIndexing(sympy_symbol(f"{self.prefix}index"), divisor, length) + + if expr not in self.nodes: + node = IterationRangesEntry( + f"{self.prefix}{next(V.kernel.iter_vars_count)}", + divisor, + length, + expr, + self, + ) + V.kernel.range_tree_nodes[node.symbol()] = node + self.var_list.append(node.symbol()) + self.var_ranges[node.symbol()] = length + self.nodes[expr] = node + return self.nodes[expr] + + def construct_entries(self, lengths: List[sympy.Expr]): + divisor = sympy.Integer(1) + itervars = [] + for length in reversed(lengths): + itervars.append(self.lookup(divisor, length)) + divisor = divisor * length + return list(reversed(itervars)) + + def construct(self, lengths: List[sympy.Expr]): + return [e.symbol() for e in self.construct_entries(lengths)] + + def vars_and_sizes(self, index: sympy.Expr): + """Figure out vars from this tree used in index""" + nodes = [V.kernel.range_tree_nodes.get(s) for s in index.free_symbols] + nodes = [n for n in nodes if n and n.prefix == self.prefix] + nodes.sort(key=lambda x: V.graph.sizevars.size_hint(x.divisor)) + divisor = sympy.Integer(1) + index_vars = [] + sizes = [] + + def add(node): + nonlocal divisor + index_vars.append(node.symbol()) + sizes.append(node.length) + divisor = divisor * node.length + + for node in nodes: + if not V.graph.sizevars.statically_known_equals(node.divisor, divisor): + # fill in unused index var + add(self.lookup(divisor, FloorDiv(node.divisor, divisor))) + divisor = node.divisor + add(node) + if not V.graph.sizevars.statically_known_equals(self.numel, divisor): + # fill in unused index var + add(self.lookup(divisor, FloorDiv(self.numel, divisor))) + + return list(reversed(index_vars)), list(reversed(sizes)) + + def ranges_code(self): + size = self.kernel.indexing_size_str(self.index, self.prefix) + index_dtype = self.kernel.index_dtype + convert = f".to({index_dtype})" if index_dtype != "tl.int32" else "" + return f"tl.arange(0, {self.prefix.upper()}BLOCK){size}{convert}" + + def scalar_code(self, value): + index_dtype = self.kernel.index_dtype + ndim = self.kernel.triton_tensor_ndim() + size = [1] * ndim + return f"tl.full({size}, {value}, {index_dtype})" + + def get_pid(self): + key = f"tl.program_id({self.index})" + pid = self.pid_cache.get(key, key) + if self.kernel.index_dtype != "tl.int32": + return f"{pid}.to({self.kernel.index_dtype})" + return pid + + def codegen_header(self, code, no_x_dim=False): + x = self.prefix + if self.is_loop(): + code.writeline(f"{self.name} = {x}offset + {x}base") + elif x == "r" and self.kernel.persistent_reduction: + # no need to "roffset = " + code.writeline( + f"{self.name} = {self.ranges_code()}", + ) + else: + if not no_x_dim: + line = f"{x}offset + {self.ranges_code()}" + else: + line = self.scalar_code(f"{x}offset") + code.writelines( + [ + f"{x}offset = {self.get_pid()} * {x.upper()}BLOCK", + f"{self.name} = {line}", + ] + ) + code.writeline(f"{x}mask = {self.name} < {x}numel") + + +class IterationRangesEntry(IterationRanges): + def __init__( + self, + name: str, + divisor: sympy.Expr, + length: sympy.Expr, + expr: sympy.Expr, + parent: IterationRanges, + ): + super().__init__( + name=name, + numel=parent.numel / length, + var_list=parent.var_list, + var_ranges=parent.var_ranges, + prefix=parent.prefix, + divisor=divisor, + length=length, + kernel=parent.kernel, + ) + self.parent = parent + self.codegen = functools.lru_cache(None)(self._codegen) + self.expr = expr + + def set_name(self, name): + self.codegen = lambda: name # type: ignore[assignment] + self.codegen.cache_clear = lambda: None # type: ignore[method-assign] + self.name = name + + def cache_clear(self): + self.codegen.cache_clear() + + def writeline(self, line): + if self.is_loop(): + V.kernel.indexing_code.writeline(line) + else: + # lift non-reduction stores outside loop + V.kernel.body.writeline(line) + + def _codegen(self): + self.writeline(f"{self.name} = " + texpr(V.kernel.rename_indexing(self.expr))) + return self.name + + def precomputed_args(self): + # for dynamic shapes, find parts of indexing expressions that have to be precomputed + precomputed_args: List[sympy.Expr] = [] + if isinstance(self.expr, sympy.Symbol): + return precomputed_args + assert isinstance(self.expr, (FloorDiv, ModularIndexing)), type(self.expr) + for arg in self.expr.args[1:]: + if not isinstance(arg, (sympy.Integer, sympy.Symbol)): + symbols = arg.free_symbols + if len(symbols) > 0 and all(s.name.startswith("s") for s in symbols): + precomputed_args.append(arg) + return precomputed_args + + def symbol(self): + return sympy_symbol(self.name) + + def __hash__(self): + return hash(self.name) + + def __eq__(self, other): + return self.name == other.name + + +class TritonKernel(Kernel): + overrides = TritonOverrides # type: ignore[assignment] + sexpr = pexpr + + def __init__( + self, + *groups, + index_dtype: str, + mutations: Optional[Set[str]] = None, + pid_cache=None, + reduction_hint=ReductionHint.DEFAULT, + min_elem_per_thread=0, + ): + if pid_cache is None: + pid_cache = {} + super().__init__() + self.numels = [V.graph.sizevars.simplify(s) for s in groups] + self.mutations: Set[str] = mutations if mutations is not None else set() + self.range_trees: List[IterationRangesRoot] = [] + self.range_tree_nodes: Dict[sympy.Symbol, IterationRangesEntry] = {} + self.iter_vars_count = itertools.count() + self.inside_reduction = self.numels[-1] != 1 + self.body = IndentedBuffer() + self.indexing_code = IndentedBuffer() + self.suffix: IndentedBuffer = IndentedBuffer() # type: ignore[assignment] + self.outside_loop_vars: Set[Any] = set() + self.reduction_hint = reduction_hint + self.index_dtype: str = index_dtype + self.min_elem_per_thread = min_elem_per_thread + self.last_usage: Set[str] = set() + + self.persistent_reduction: bool = self.should_use_persistent_reduction() + self.no_x_dim = ( + self.reduction_hint == ReductionHint.INNER + and self.persistent_reduction + and len(self.numels) == 2 + and self.numels[-1] >= 256 + ) + self.initialize_range_tree(pid_cache) + + # A set of autotuning hints to pass as part of triton_meta + self.autotune_hints: Set[AutotuneHint] = set() + + # define this in a closure to make cache local to object + @functools.lru_cache(None) + def simplify_indexing(index: sympy.Expr): + index = V.graph.sizevars.simplify_with_ranges(index, self.var_ranges()) + for tree in self.range_trees: + index = self.combine_contiguous_dims(index, tree) + return index + + self.simplify_indexing = simplify_indexing + + def need_numel_args(self): + r""" + Indicate whether we need provide numel as arguments for the generated + kernel calls in the benchmark. + + Should be true for pointwise/reduction kernels but false for triton + matmul kernels. + """ + return True + + def should_use_persistent_reduction(self) -> bool: + """ + Heuristic to set self.persistent_reduction and add guards + if needed. + """ + if not (self.inside_reduction and config.triton.persistent_reductions): + return False + threshold = { + ReductionHint.INNER: 1024, + }.get(self.reduction_hint, 64) + last_numel = self.numels[-1] + if not isinstance(last_numel, (int, sympy.Integer)): + # Not static + return False + hint = V.graph.sizevars.size_hint(last_numel) + if hint > threshold: + return False + # will need to recompile if we cross a larger power of 2 boundary + V.graph.sizevars.guard_leq(self.numels[-1], next_power_of_2(hint)) + return True + + def set_last_usage(self, nodes): + if not self.inside_reduction or self.persistent_reduction: + return + self.last_usage = set( + itertools.chain.from_iterable( + n.last_usage for n in nodes if n is not EnableReduction + ) + ) + + def initialize_range_tree(self, pid_cache): + names = list( + reversed(["xindex", "yindex", "zindex"][: len(self.numels) - 1]) + ) + ["rindex"] + for i in range(len(self.numels)): + pid_idx = i if names[i][0] == "r" else "xyz".find(names[i][0]) + self.range_trees.append( + IterationRangesRoot( + names[i], self.numels[i], names[i][0], pid_idx, self, pid_cache + ) + ) + for tree in self.range_trees: + # reduction indexing goes inside a loop + if not tree.is_loop(): + tree.codegen_header(self.body, self.no_x_dim) + if self.inside_reduction and self.range_trees[-1].is_loop(): + # workaround for this issue: + # https://gist.github.com/jansel/6527126f781559095c5531f98a4235a7 + self.body.writeline(f"rbase = {self.range_trees[-1].ranges_code()}") + + def disable_reduction(self): + @contextlib.contextmanager + def ctx(): + if self.numels[-1] == 1: + assert not self.inside_reduction + yield + return + if not self.persistent_reduction: + # calling codegen_body() will flush all the pending buffers + # and write out a reduction loop + self.codegen_body() + self.inside_reduction = False + try: + yield + if not self.persistent_reduction: + # flush out any code before opening the next loop + self.codegen_body() + finally: + self.inside_reduction = True + + return ctx() + + def set_ranges(self, *lengths): + assert len(lengths) == len(self.range_trees) + return [ + ranges.construct(length) + for length, ranges in zip(lengths, self.range_trees) + ] + + @staticmethod + def _split_iteration_ranges( + groups: Iterable[sympy.Expr], lengths: List[List[sympy.Expr]] + ): + sv = V.graph.sizevars + new_ranges: List[List[sympy.Expr]] = [[] for _ in groups] + remaining = [sv.simplify(g) for g in groups] + var_count = itertools.count() + + def add_range(i, expr): + expr = sv.simplify(expr) + if not sv.statically_known_multiple_of(remaining[i], expr): + raise CantSplit() + # guard on the last item out + remaining[i] = FloorDiv(remaining[i], expr) + new_ranges[i].append(expr) + return next(var_count) + + def make_combined(size, idx1, idx2): + def getter(flat_vars): + return size * flat_vars[idx1] + flat_vars[idx2] + + return getter + + return_getters_groups = [] + current_group = 0 + for length_group in lengths: + return_getters = [] + for size in length_group: + if sv.statically_known_equals(size, 1): + return_getters.append(lambda _: sympy.Integer(0)) + continue + + while ( + current_group < len(remaining) + and sv.size_hint(remaining[current_group]) == 1 + ): + # scroll to next group with remaining elements + current_group += 1 + + if sv.size_hint(size) > sv.size_hint(remaining[current_group]): + # need to break size in two + if not sv.statically_known_multiple_of( + size, remaining[current_group] + ): + raise CantSplit() + size1 = remaining[current_group] + size2 = FloorDiv(size, remaining[current_group]) + return_getters.append( + make_combined( + size2, + add_range(current_group, size1), + add_range(current_group + 1, size2), + ) + ) + else: + return_getters.append( + operator.itemgetter(add_range(current_group, size)) + ) + return_getters_groups.append(return_getters) + + assert all( + V.graph.sizevars.size_hint(s) == 1 for s in remaining + ), f"failed to set ranges {remaining} {lengths}" + + return new_ranges, return_getters_groups + + @classmethod + def is_compatible( + cls, groups: Iterable[sympy.Expr], lengths: List[List[sympy.Expr]] + ): + try: + cls._split_iteration_ranges(groups, lengths) + return True + except CantSplit: + return False + + def split_and_set_ranges(self, lengths: List[List[sympy.Expr]]): + """ + We may want to fuse `for i0 in s0*s1` into a tiled kernel with groups (s0, s1). + + To do this we need to split up the iteration space of i0 into something like: + for i1 in s0: + for i2 in s1: + i0 = i1*s1 + i2 + .... + + This function matches and resplits lengths to the groups of + this kernel to enable tiled + non-tiled fusions. + """ + groups = [rt.numel for rt in self.range_trees] + if not self.inside_reduction: + groups[-1] = sympy.Integer(1) + + if len(lengths) == len(self.range_trees) and all( + V.graph.sizevars.simplify(sympy_product(x) - g) == 0 + for x, g in zip(lengths, groups) + ): + return self.set_ranges(*lengths) + + new_ranges, return_getters_groups = self._split_iteration_ranges( + groups, lengths + ) + itervars = list(itertools.chain(*self.set_ranges(*new_ranges))) + return [[fn(itervars) for fn in fns] for fns in return_getters_groups] + + def is_indirect_indexing(self, index: sympy.Expr): + # tmpX means indirect indexing + return free_symbol_startswith(index, "tmp") + + def is_broadcasted(self, index: sympy.Expr): + # Note. This may not be correct when there is indirect indexing + if self.is_indirect_indexing(index): + return False + + index_numels = [1] * len(self.numels) + for symbol in index.free_symbols: + if symbol not in self.range_tree_nodes: + # Non-iterated variables, e.g. strides + continue + entry = self.range_tree_nodes[symbol] + assert isinstance(entry.parent, IterationRangesRoot) + index_numels[entry.parent.index] *= entry.length + + # If the index variables only iterate over a subset of the kernel + # numels, then it must be broadcasted. + simplify = V.graph.sizevars.simplify + return any( + simplify(idx_range) != simplify(iter_range) + for idx_range, iter_range in zip(index_numels, self.numels) + ) + + def combine_contiguous_dims(self, index: sympy.Expr, tree: IterationRangesRoot): + """ + More aggressive simplification to merge contiguous dims + """ + if isinstance(index, (sympy.Integer, sympy.Symbol)): + return index + index_vars, sizes = tree.vars_and_sizes(index) + if len(sizes) <= 1: + return index + new_sizes, reindex, prune = V.graph.sizevars._simplify_loops( + index_vars, sizes, index_prevent_reordering([index], index_vars, sizes) + ) + if new_sizes == sizes: + return index + new_index_vars = tree.construct(new_sizes) + new_index = sympy_subs(index, dict(zip(index_vars, reindex(new_index_vars)))) + return new_index + + def index_to_str(self, index: sympy.Expr) -> str: + """ + Convert an index expr to a string that can be used in triton code. + e.g. a sympy expression "s2" may actually appear as "ks1" in the triton kernel. + + Index expressions often need to be passed in as arguments to the triton kernel. + Rename_indexing and codegen_indexing keep track of the needed indices and add + new parameters to the function signature. + """ + return texpr(self.rename_indexing(self.codegen_indexing(index))) + + def indexing( + self, + index: sympy.Expr, + *, + copy_shape=None, + dense_indexing=False, + override_mask=None, + ): + """ + Compute the index and mask to pass to tl.load() or tl.store() + """ + index = self.simplify_indexing(index) + index = sympy_subs(index, V.graph.sizevars.precomputed_replacements) + # if simple replacements didn't get rid of floor/ceil, try full subs + if len(index.atoms(sympy.floor)) or len(index.atoms(sympy.ceiling)): + index = index.subs(V.graph.sizevars.precomputed_replacements) + # last resort, if no range vars are in the expr, hoist it + # TODO instead of trying to blindly find complicated exprs, we should hoist the + # inputs/outputs sizes and strides, but at the time indexing is generated + # kernel inputs and outputs are not set yet, we'd need a deeper refactor + # to do it this way + + if len(index.atoms(sympy.ceiling)): + for a in index.atoms(sympy.ceiling): + # for nested exprs, atoms yields top level first (?) + # so if everything goes fine, lower level replacements will come up empty + symbols = a.free_symbols + if len(symbols) > 0 and all( + s.name.startswith("s") or s.name.startswith("ps") for s in symbols + ): + replacements = {a: V.graph.sizevars.lookup_precomputed_size(a)} + index = sympy_subs(index, replacements) + + index_vars = index.free_symbols + index = self.simplify_indexing(index) + index_str = self.index_to_str(index) + + mask_vars: Set[str] = set() + for var in index_vars: + assert isinstance(var, sympy.Symbol) + if override_mask: + pass + elif var.name.startswith("tmp"): + # indirect indexing + cse_var = self.cse.varname_map[var.name] + mask_vars.update(cse_var.mask_vars) + elif var.name.startswith(("s", "ps", "i")): + pass + else: + # var is one of xN, yN or rN + assert var.name[0] in "xyr", var.name + mask_vars.add(f"{var.name[0]}mask") + + need_dense = ( + config.triton.dense_indexing + or dense_indexing + or self._load_mask is not None + ) and index != 0 + + have_dense = True + have_loop_vars = False + dense_mask_vars = set() + + for tree in self.range_trees: + if tree.prefix == "r" and not self.inside_reduction: + continue + if index_vars.intersection(tree.var_list): + have_loop_vars = True + else: + have_dense = False + dense_mask_vars.add(f"{tree.prefix}mask") + + expand_str = None + + if isinstance(index, sympy.Integer): + expand_str = f"{copy_shape}.shape" if copy_shape else self.dense_size_str() + index_str = f"tl.full({expand_str}, {index_str}, tl.int32)" + return index_str, set(), "None", expand_str + + if need_dense and not have_dense: + expand_str = f"{copy_shape}.shape" if copy_shape else self.dense_size_str() + index_str = f"tl.broadcast_to({index_str}, {expand_str})" + mask_vars = dense_mask_vars + elif not have_loop_vars and copy_shape: + index_str = f"tl.broadcast_to({index_str}, {copy_shape}.shape)" + mask_vars = dense_mask_vars + + if override_mask: + mask_vars = {override_mask} + + if self._load_mask: + mask_vars.add(self._load_mask) + + self.filter_masks(mask_vars) + + mask_str = " & ".join(sorted(map(str, mask_vars))) if mask_vars else "None" + return index_str, mask_vars, mask_str, expand_str + + def filter_masks(self, mask_vars): + for tree in self.range_trees: + # Masks are superfluous if we only have one element + if V.graph.sizevars.statically_known_equals(tree.numel, 1): + mask_vars.discard(f"{tree.prefix}mask") + continue + # Masks are superfluous if numel is a multiple of BLOCK + # (We use the fact that BLOCK is required by triton to be a power of 2) + if tree.prefix.upper() not in config.triton.max_block: + continue + max_block = config.triton.max_block[tree.prefix.upper()] + # Optional optimization: if block divides numel exactly, we will + # never need to do a masked load to handle stragglers at the end. + # It's faster to avoid masking at all. But it is sound to always + # mask. + if V.graph.sizevars.statically_known_multiple_of(tree.numel, max_block): + mask_vars.discard(f"{tree.prefix}mask") + + def var_ranges(self): + return dict( + itertools.chain.from_iterable( + tree.var_ranges.items() for tree in self.range_trees + ) + ) + + def codegen_indexing(self, expr: sympy.Expr): + expr = V.graph.sizevars.simplify_with_ranges(expr, self.var_ranges()) + for sym in sorted(expr.free_symbols, key=str): + if sym in self.range_tree_nodes: + # if indexing expression is complicated, we precompute it on the host side + # and send the result as a kernel argument + replacements = {} + for ps in self.range_tree_nodes[sym].precomputed_args(): + replacements[ps] = V.graph.sizevars.lookup_precomputed_size(ps) + if len(replacements) > 0: + self.range_tree_nodes[sym].expr = sympy_subs( + self.range_tree_nodes[sym].expr, replacements + ) + self.range_tree_nodes[sym].codegen() + return expr + + @contextlib.contextmanager + def mask_loads(self, mask): + """Context manager to add an additional mask to tl.load/store""" + prior = self._load_mask + if prior: + mask = self.cse.generate(self.compute, f"{mask} & {prior}") + + self._load_mask = mask + try: + # TODO(jansel): do we need a reshape here? + yield mask + finally: + self._load_mask = prior + + def generate_assert(self, check): + return torch.version.hip is None and super().generate_assert(check) + + def load_mask(self, var): + mask = "" + mask_vars = set(var.mask_vars) + if self._load_mask: + mask_vars.add(self._load_mask) + + if mask_vars: + mask = ( + f"{next(iter(mask_vars))}" + if len(mask_vars) == 1 + else f"({' & '.join(str(v) for v in mask_vars)})" + ) + return mask + + @property + def assert_function(self) -> str: + return "tl.device_assert" + + def get_strides_of_load(self, index: sympy.Expr): + """ + This gets the stride of the index for each of the tiling variables + (technically, it does it at index 0) + + For example, if + xindex = x0 + 512*x1 + 1024*r0 + x0 = (xindex//512) + x1 = (xindex % 512) + r0 = rindex // 1024 + + this function would return + {xindex: 512, rindex: 1024} + """ + index_to_tile_indexes = {k: v.expr for k, v in self.range_tree_nodes.items()} + index_in_tile_vars = sympy_subs(index, index_to_tile_indexes) + strides = {} + for range_tree in self.range_trees: + s = sympy_symbol(range_tree.name) + strides[s] = sympy_subs(index_in_tile_vars, {s: 1}) - sympy_subs( + index_in_tile_vars, {s: 0} + ) + return strides + + def load(self, name: str, index: sympy.Expr): + var = self.args.input(name) + indirect_indexing = self.is_indirect_indexing(index) + original_index = index + index, mask_vars, mask, expand_str = self.indexing(index) + + # Keep the variable in cache if were going to reuse it. Equiv., if any of the following hold + # 1) We are doing broadcasting + # 2) It is a non-coalesced load. The intuition is that if it's + # non-coalesced, we will likely load each element multiple times in + # practice. + # 3) It will be used later and it won't be CSE'd. Equiv., if all the following hold + # 3.1) We are in a reduction loop + # 3.2) Its not its last use + # 3.3) This load will not be lifted to the body + # + is_coalesced = any( + i == 1 for i in self.get_strides_of_load(original_index).values() + ) + if self.is_broadcasted(original_index): + ep = ", eviction_policy='evict_last'" + elif not is_coalesced: + ep = ", eviction_policy='evict_last'" + elif self.inside_reduction and not self.persistent_reduction: + if name in self.args.inplace_buffers: + names = set(self.args.inplace_buffers[name].other_names) + else: + names = {name} + last_use = len(names & self.last_usage) > 0 + evict_last = not last_use and ("rmask" in mask or indirect_indexing) + if evict_last: + ep = ", eviction_policy='evict_last'" + else: + ep = ", eviction_policy='evict_first'" + else: + ep = "" + # "other" below is a workaround for https://github.com/openai/triton/issues/737 + # for bool, even though it's likely subject to the same bug, setting `other` leads + # to LLVM errors so we are skipping it for now + if ("tmp" in mask or "rmask" in mask) and V.graph.get_dtype(name) != torch.bool: + other = ", other=0.0" + else: + other = "" + + append_broadcast = None + if V.graph.is_unspec_arg(name): + line = var + else: + if isinstance(original_index, sympy.Integer): + line = f"tl.load({var} + ({original_index}))" + append_broadcast = expand_str + else: + line = f"tl.load({var} + ({index}), {mask}{ep}{other})" + + dtype = V.graph.get_dtype(name) + if dtype in (torch.float16, torch.bfloat16): + line += ".to(tl.float32)" + if dtype == torch.bool and torch.version.hip is None: + # Workaround for https://github.com/openai/triton/issues/2151 + # tl.load returns int8 when loading from pointer to int1 + # NOTE: Currently causes hangs on bool UTs for ROCm + line += ".to(tl.int1)" + + if "tmp" in mask: + # Masked loads must come after the mask is computed + load_buffer = self.compute + elif ( + self.inside_reduction + and not self.persistent_reduction + and "rmask" not in mask + and not indirect_indexing + ): + # can lift a common load outside of reduction loop + # One exception is when this is an indirect_load. + load_buffer = self.body + else: + load_buffer = self.loads + + result_var = self.cse.generate(load_buffer, line) + assert isinstance(result_var, TritonCSEVariable) + result_var.mask_vars = mask_vars + + if append_broadcast: + line = f"tl.broadcast_to({result_var}, {append_broadcast})" + result_var = self.cse.generate(load_buffer, line) + + if not self.inside_reduction or "rmask" not in mask: + self.outside_loop_vars.add(result_var) + + return result_var + + def store(self, name, index, value, mode=None): + var = self.args.output(name) + indirect_indexing = self.is_indirect_indexing(index) + original_index = index + index, mask_vars, mask, expand_str = self.indexing(index, dense_indexing=True) + + # Guard against write-after-read corruption in triton. + # See # https://github.com/openai/triton/issues/1615 + # This triton bug means that a load which is broadcasted over multiple + # warps may see the result of a store that happens later in the triton + # program. The workaround is to add a barrier before storing, which + # enforces that all warps have already read the data. + is_inplace = name in self.args.inplace_buffers + is_broadcasted = self.is_broadcasted(original_index) + if is_inplace and is_broadcasted: + self.stores.writeline(DeferredLine(name, "tl.debug_barrier()")) + + if mode is None: + line = f"tl.store({var} + ({index}), {value}, {mask})" + elif mode == "atomic_add": + line = f"tl.atomic_add({var} + ({index}), {value}, {mask})" + else: + raise NotImplementedError(f"store mode={mode}") + self.stores.writeline(DeferredLine(name, line)) + if not self.inside_reduction: + self.outside_loop_vars.add(value) + + def bucketize( + self, + values: CSEVariable, + offsets_name: str, + offsets_size: sympy.Expr, + indexing_dtype: torch.dtype, + right: bool, + ): + """ + See [Note: Inductor bucketize op] + """ + + # Triton performance for bucketize_binary_search is much better when the number + # of threads equals the number of elements. + # If we're trying to use a bucketize kernel, we should make sure that an + # autotuning config with num_elements_per_warp=32 exists. + self.autotune_hints.add(AutotuneHint.ELEMENTS_PER_WARP_32) + + offsets_ptr = self.args.input(offsets_name) + block_size = self.dense_size_str() + offsets_size_str = self.index_to_str(offsets_size) + + if indexing_dtype == torch.int32: + triton_dtype = "tl.int32" + elif indexing_dtype == torch.int64: + triton_dtype = "tl.int64" + else: + raise NotImplementedError( + "Bucketize only supports indexing with int32 and int64" + ) + + result = self.cse.generate( + self.compute, + f"triton_helpers.bucketize_binary_search({values}, {offsets_ptr}, {triton_dtype}, {right}, {offsets_size_str}, {block_size})", # noqa: B950 line too long + ) + + return result + + def reduction_resize(self, value): + ndims = self.triton_tensor_ndim() + if ndims == 1: + return f"triton_helpers.promote_to_tensor({value})" + + sizes = [":"] * ndims + sizes[-1] = "None" + return f"{value}[{', '.join(sizes)}]" + + @staticmethod + def _map_tuple_or_scalar(fn, value): + if isinstance(value, tuple): + return tuple(map(fn, value)) + return fn(value) + + def reduction(self, dtype, src_dtype, reduction_type, value): + assert self.inside_reduction + masks = {f"{tree.prefix}mask" for tree in self.range_trees} + self.filter_masks(masks) + masks = sorted(masks) + if self._load_mask: + masks.append(self._load_mask) + reduction_range_prefix = self.range_trees[-1].prefix + reduction_sizes = ["None" for _ in self.range_trees] + reduction_sizes[-1] = ":" + + # Say we have + # tmp0 = ops.constant(1, torch.int64) + # tmp1 = ops.reduction(torch.int64, torch.int64, "sum", tmp0) + # tmp0 in the triton code is either a scalar, or single-element tensor + # so if we emit tl.sum directly, it will only give 1 instead of RBLOCK * 1 + # To avoid this, we broadcast to the expected shape first. + dense_size_str = self.dense_size_str() + value = self._map_tuple_or_scalar( + lambda v: self.cse.generate( + self.compute, f"tl.broadcast_to({v}, {dense_size_str})" + ), + value, + ) + + dim: int + root_op: str + + def final_reduction(value): + use_helper = reduction_type in {"any", "max", "min", "prod"} + module = "triton_helpers" if use_helper else "tl" + if reduction_type in {"max", "min"}: + return self.reduction_resize( + f"{module}.{reduction_type}2({value}, {dim})" + ) + return self.reduction_resize(f"{module}.{reduction_type}({value}, {dim})") + + def final_argreduce(buffer, result_var, value, index): + buffer.splice( + f"""\ + _, {result_var}_tmp = triton_helpers.{root_op}_with_index({value}, {index}, {dim}) + {result_var} = {self.reduction_resize(f'{result_var}_tmp')} + """ + ) + + cache_key = (src_dtype, reduction_type, value) + if cache_key in self.cse.reduction_cache: + return self.cse.reduction_cache[cache_key] + + dim = len(self.range_trees) - 1 - int(bool(self.no_x_dim)) + acc_type = triton_acc_type(src_dtype) + result_var: Any = self.cse.newvar() + result_var.mask_vars = {var for var in masks if var[0] != "r"} + cond = " & ".join(masks) + + if self.persistent_reduction: + default = ir.Reduction.default_value(reduction_type, src_dtype) + default = self._map_tuple_or_scalar(triton_constant, default) + + def _mask_value(value, default): + return self.cse.generate( + self.compute, f"tl.where({cond}, {value}, {default})" + ) + + if isinstance(value, tuple): + masked_value = [_mask_value(v, d) for v, d in zip(value, default)] + else: + masked_value = _mask_value(value, default) + + if reduction_type in {"argmax", "argmin"}: + accumulator_index = str( + self.cse.generate( + self.compute, + f"tl.broadcast_to({reduction_range_prefix}index, {masked_value}.shape)", + ) + ) + root_op = {"argmax": "max", "argmin": "min"}[reduction_type] + final_argreduce( + self.compute, result_var, masked_value, accumulator_index + ) + elif reduction_type == "welford_reduce": + # For persistent reductions, don't bother with + # welford's algorithm since it uses more registers, and + # taking two reductions doesn't increase memory usage. + sum_ = ops.reduction(dtype, dtype, "sum", value) + self.inside_reduction = False + rnumel = ops.index_expr(self.numels[-1], dtype) + mean = ops.truediv(sum_, rnumel) + + self.inside_reduction = True + dx = ops.sub(value, mean) + dx2 = ops.mul(dx, dx) + m2 = ops.reduction(dtype, dtype, "sum", dx2) + result_var = (mean, m2, rnumel) + elif reduction_type == "welford_combine": + mean, m2, weight = masked_value + welford = f"triton_helpers.welford({mean}, {m2}, {weight}, {dim})" + mean, m2, weight = (self.cse.newvar() for _ in range(3)) + self.compute.writeline(f"{mean}, {m2}, {weight} = {welford}") + + result_var = tuple( + self.cse.generate(self.compute, self.reduction_resize(var_name)) + for var_name in (mean, m2, weight) + ) + else: + result_var = self.cse.generate( + self.compute, final_reduction(masked_value) + ) + else: + accumulator = f"_{result_var}" + default = ir.Reduction.default_accumulator(reduction_type, src_dtype) + default = self._map_tuple_or_scalar(triton_constant, default) + if not isinstance(default, tuple): + self.body.writeline( + f"{accumulator} = tl.full({self.dense_size_str()}, {default}, {acc_type})" + ) + + if reduction_type in {"argmax", "argmin"}: + accumulator_index = f"_{result_var}_index" + long_max = torch.iinfo(torch.int64).max + self.body.writeline( + f"{accumulator_index} = tl.full({self.dense_size_str()}, {long_max}, tl.int64)" + ) + root_op = {"argmax": "max", "argmin": "min"}[reduction_type] + + self.compute.splice( + f"""\ + {accumulator}_next, {accumulator_index}_next = triton_helpers.{root_op}imum_with_index( + {accumulator}, {accumulator_index}, {value}, {reduction_range_prefix}index + ) + {accumulator} = tl.where({cond}, {accumulator}_next, {accumulator}) + {accumulator_index} = tl.where({cond}, {accumulator_index}_next, {accumulator_index}) + """ + ) + final_argreduce(self.suffix, result_var, accumulator, accumulator_index) + elif is_welford_reduction(reduction_type): + accumulator = f"{result_var}_mean" + accumulator_m2 = f"{result_var}_m2" + accumulator_weight = f"{result_var}_weight" + self.body.writeline( + f"{accumulator} = tl.zeros({self.dense_size_str()}, {acc_type})" + ) + self.body.writeline( + f"{accumulator_m2} = tl.zeros({self.dense_size_str()}, {acc_type})" + ) + self.body.writeline( + f"{accumulator_weight} = tl.zeros({self.dense_size_str()}, {acc_type})" + ) + + if reduction_type == "welford_combine": + mean, m2, weight = value + self.compute.splice( + f"""\ + {accumulator}_next, {accumulator_m2}_next, {accumulator_weight}_next = triton_helpers.welford_combine( + {accumulator}, {accumulator_m2}, {accumulator_weight}, + {mean}, {m2}, {weight} + ) + """ + ) + else: + assert reduction_type == "welford_reduce" + self.compute.splice( + f"""\ + {accumulator}_next, {accumulator_m2}_next, {accumulator_weight}_next = triton_helpers.welford_reduce( + {value}, {accumulator}, {accumulator_m2}, {accumulator_weight}, + ) + """ + ) + + self.compute.splice( + f"""\ + {accumulator} = tl.where({cond}, {accumulator}_next, {accumulator}) + {accumulator_m2} = tl.where({cond}, {accumulator_m2}_next, {accumulator_m2}) + {accumulator_weight} = tl.where({cond}, {accumulator_weight}_next, {accumulator_weight}) + """ + ) + + result_mean = result_var + result_m2 = self.cse.newvar() + result_weight = self.cse.newvar() + self.suffix.splice( + f"""\ + {result_mean}_tmp, {result_m2}_tmp, {result_weight}_tmp = triton_helpers.welford( + {accumulator}, {accumulator_m2}, {accumulator_weight}, {dim} + ) + {result_mean} = {self.reduction_resize(f'{result_mean}_tmp')} + {result_m2} = {self.reduction_resize(f'{result_m2}_tmp')} + {result_weight} = {self.reduction_resize(f'{result_weight}_tmp')} + """ + ) + result_var = result_mean, result_m2, result_weight + else: + combine_fn = ir.get_reduction_combine_fn(reduction_type, src_dtype) + updated = combine_fn(accumulator, value) + self.compute.writeline( + f"{accumulator} = tl.where({cond}, {updated}, {accumulator})" + ) + + if src_dtype == torch.bool: + # This is only really used for aten.any. It changes the + # final reduction of a non-persistent reduction from + # tmp5 = triton_helpers.max(_tmp5, 1)[:, None] + # to + # tmp5 = triton_helpers.max(_tmp5.to(tl.int8), 1)[:, None].to(tl.int1) + # which is needed because tl.reduce doesn't support tl.int1 + accumulator = f"{accumulator}.to(tl.int8)" + result_type = triton_compute_type(dtype) + self.suffix.writeline( + f"{result_var} = {final_reduction(accumulator)}.to({result_type})" + ) + else: + self.suffix.writeline( + f"{result_var} = {final_reduction(accumulator)}" + ) + + self.cse.reduction_cache[cache_key] = result_var + + if isinstance(result_var, tuple): + self.outside_loop_vars |= set(result_var) + else: + self.outside_loop_vars.add(result_var) + + return result_var + + def store_reduction(self, name, index, value): + assert self.inside_reduction + self.inside_reduction = False + index, mask_vars, mask, _ = self.indexing(index) + assert "rmask" not in index + self.inside_reduction = True + + var = self.args.output(name) + self.suffix.writeline( + DeferredLine(name, f"tl.store({var} + ({index}), {value}, {mask})") + ) + + def codegen_body(self): + """ + Concat output code from index_code, loads, compute, stores, + suffix into self.body. + + For pointwise kernels, this is called just once at the end. + + For reduction kernels, this generates a loop over the reduction + axis. + """ + if not ( + self.indexing_code + or self.loads + or self.stores + or self.compute + or self.suffix + ): + return + + if self.inside_reduction and not self.persistent_reduction: + self.body.writeline("for roffset in range(0, rnumel, RBLOCK):") + with self.body.indent(): + # last range tree is always reduction + self.range_trees[-1].codegen_header(self.body) + self.body.splice(self.indexing_code) + self.body.splice(self.loads) + self.body.splice(self.compute) + self.body.splice(self.stores) + + # invalidate any caches that came from inside the reduction loop + self.cse.invalidate(self.outside_loop_vars) + self.range_trees[-1].cache_clear() + else: + self.body.splice(self.indexing_code) + self.body.splice(self.loads) + self.body.splice(self.compute) + self.body.splice(self.stores) + self.body.splice(self.suffix) + self.indexing_code.clear() + self.loads.clear() + self.compute.clear() + self.stores.clear() + self.suffix.clear() + + def codegen_kernel_benchmark(self): + result = IndentedBuffer() + argdefs, call_args, signature = self.args.python_argdefs() + + result.writelines(["", "", "def get_args():"]) + with result.indent(): + name_cnt = itertools.count() + var_names = [] + for arg_name, arg_sig in zip(call_args, signature): + var_name = f"arg_{next(name_cnt)}" + buf = V.graph.get_buffer(arg_name) + if buf: + result.writeline( + f"{var_name} = rand_strided({V.graph.sizevars.size_hints(buf.get_size())}, {V.graph.sizevars.size_hints(buf.get_stride())}, device='{buf.get_device()}', dtype={buf.get_dtype()})" # noqa: B950 line too long + ) + elif arg_name in V.graph.constants: + # note that random seed is put in V.graph.constants + const_tensor = V.graph.constants[arg_name] + result.writeline( + f"{var_name} = rand_strided({V.graph.sizevars.size_hints(const_tensor.size())}, {V.graph.sizevars.size_hints(const_tensor.stride())}, device='{const_tensor.device}', dtype={const_tensor.dtype})" # noqa: B950 line too long + ) + elif isinstance(arg_sig, SizeArg): + symval_hint = V.graph.sizevars.size_hint(arg_sig.expr) + + # Force the seed_offset to be 0 so calls to the same kernel + # using different seed offset will have the same benchmark harness. + # We can dedup kernel definitions in this case. + if "seed_offset" in arg_sig.name: + symval_hint = 0 + result.writeline(f"{var_name} = {symval_hint}") + else: + raise KeyError( + f"Don't find the buffer or const tensor for {arg_name}" + ) + var_names.append(var_name) + result.writeline(f"return {', '.join(var_names)},") + + result.writelines(["\n", "\n", "def call(args):"]) + grid = [] + extra_args = [] + extra_args_str = None + index = V.graph.scheduler.current_device.index + with result.indent(): + result.writeline(f"with torch.cuda._DeviceGuard({index}):") + with result.indent(): + result.writeline( + f"torch.cuda.set_device({index})" + ) # no-op to ensure context + for tree in self.range_trees: + expr = pexpr(V.graph.sizevars.size_hint(tree.numel)) + if tree.prefix != "r" or self.inside_reduction: + extra_args.append(expr) + if tree.prefix != "r": + grid.append(expr) + + stream_name = f"stream{index}" + result.writeline(f"{stream_name} = get_cuda_stream({index})") + + if self.need_numel_args(): + extra_args_str = ", ".join(map(str, extra_args)) + ", " + else: + extra_args_str = "" + + result.writeline( + f"{str(Placeholder.KERNEL_NAME)}.run(*args, {extra_args_str}grid=grid({', '.join(grid)}), stream={stream_name})" + ) + + # benchmark all configs + result.writelines(["\n", "\n", "def benchmark_all_configs(args):"]) + with result.indent(): + result.writeline(f"with torch.cuda._DeviceGuard({index}):") + with result.indent(): + result.writeline( + f"torch.cuda.set_device({index})" + ) # no-op to ensure context + result.writeline( + f"return {str(Placeholder.KERNEL_NAME)}.benchmark_all_configs(*args, {extra_args_str}grid=grid({', '.join(grid)}))" # noqa: B950 line too long + ) + + ninplace_args = len(unique(self.args.inplace_buffers.values())) + result.writelines(["\n", "\n", "if __name__ == '__main__':"]) + with result.indent(): + result.writeline("from torch._inductor.utils import get_num_bytes") + result.writeline("from triton.testing import do_bench") + result.writeline("") + + result.writeline("args = get_args()") + result.writeline( + "ms = do_bench(lambda: call(args), rep=40, fast_flush=True)" + ) + result.writeline( + f"num_gb = get_num_bytes(*args, num_in_out_args={ninplace_args}) / 1e9" + ) + result.writeline("gb_per_s = num_gb / (ms / 1e3)") + result.writeline( + 'print(f"{ms:.3f}ms {num_gb:.3f}GB {gb_per_s:.2f}GB/s")' + ) + + return result + + def imports_for_benchmark_kernel(self): + return textwrap.dedent( + """ + from torch._dynamo.testing import rand_strided + from torch._C import _cuda_getCurrentRawStream as get_cuda_stream + import torch + from torch._inductor.triton_heuristics import grid + """ + ) + + def codegen_kernel(self, name=None): + from triton import next_power_of_2 + + code = IndentedBuffer() + + size_hints = [] + for numel in self.numels: + numel_hint = V.graph.sizevars.symbolic_hint(numel) + if not isinstance(numel_hint, (int, sympy.Integer)): + # This default heuristic hint was picked carefully: it is + # large, to ensure that we don't shrink the block size (since + # if you don't have many elements, it'd be wasteful to pick a + # large block size). Since we don't know how many elements we + # might have, we should be OK with some inefficiency to make + # sure we handle the large case well. 8192 is the largest + # block size we support, so we pick that. + # + # If we have a better hint for unbacked SymInts (e.g., because + # a user told us, or we are tracking upper bounds) we could + # use that here. + size_hint = 8192 + else: + size_hint = next_power_of_2(int(numel_hint)) + size_hints.append(size_hint) + if self.persistent_reduction: + assert self.inside_reduction + heuristics = "persistent_reduction" + elif self.inside_reduction: + heuristics = "reduction" + else: + size_hints.pop() + heuristics = "pointwise" + + if name is None: + code.splice( + f""" + import triton + import triton.language as tl + from torch._inductor.ir import ReductionHint + from torch._inductor.ir import TileHint + from torch._inductor.triton_heuristics import AutotuneHint, {heuristics} + from torch._inductor.utils import instance_descriptor + from torch._inductor import triton_helpers + """ + ) + if config.benchmark_kernel: + code.splice(self.imports_for_benchmark_kernel()) + + argdefs, _, signature = self.args.python_argdefs() + # maps actual expression to SizeArg if its in sizevars replacements + for i, arg in enumerate(signature): + if ( + isinstance(arg, SizeArg) + and arg.expr in V.graph.sizevars.inv_precomputed_replacements + ): + signature[i] = SizeArg( + arg.name, V.graph.sizevars.inv_precomputed_replacements[arg.expr] + ) + + mutated_args = set() + for mutation in self.mutations: + if mutation in self.args.input_buffers: + mutated_args.add(self.args.input_buffers[mutation]) + if ( + mutation in self.args.inplace_buffers + and mutation not in V.graph.removed_buffers + and mutation not in self.removed_buffers + ): + mutated_args.add(self.args.inplace_buffers[mutation].inner_name) + if mutation in self.args.output_buffers: + mutated_args.add(self.args.output_buffers[mutation]) + mutated_args = sorted(mutated_args) + + triton_meta_signature = signature_to_meta( + signature, size_dtype=self.index_dtype + ) + triton_meta = { + "signature": triton_meta_signature, + "device": V.graph.scheduler.current_device.index, + "device_type": V.graph.scheduler.current_device.type, + "constants": {}, + } + + inductor_meta = { + "autotune_hints": set(self.autotune_hints), + "kernel_name": str(Placeholder.DESCRIPTIVE_NAME), + "mutated_arg_names": mutated_args, + } + + for tree in self.range_trees: + if tree.prefix != "r" or self.inside_reduction: + sizearg = SizeArg(f"{tree.prefix}numel", tree.numel) + signature.append(sizearg) + triton_meta_signature[len(argdefs)] = signature_of( + sizearg, size_dtype=self.index_dtype + ) + argdefs.append(f"{tree.prefix}numel") + # constexpr version causes issues, see + # https://github.com/pytorch/torchdynamo/pull/1362 + # triton_meta["constants"][len(argdefs)] = V.graph.sizevars.size_hint( + # tree.numel + # ) + # argdefs.append(f"{tree.prefix}numel: tl.constexpr") + triton_meta["configs"] = [config_of(signature)] + + for tree in self.range_trees: + if tree.prefix == "r" and ( + not self.inside_reduction or self.persistent_reduction + ): + continue + if tree.prefix == "x" and self.no_x_dim: + continue + argdefs.append(f"{tree.prefix.upper()}BLOCK : tl.constexpr") + + if self.inside_reduction: + reduction_hint = self.reduction_hint + heuristics_line = f""" + @{heuristics}( + size_hints={size_hints!r}, + reduction_hint={reduction_hint}, + filename=__file__, + triton_meta={triton_meta!r}, + inductor_meta={inductor_meta!r} + ) + @triton.jit + """ + else: + tile_hint = "" + if len(size_hints) == 2: + if len(signature) == 4: # input, output and 2 args + tile_hint = "tile_hint=TileHint.SQUARE," + else: + tile_hint = "tile_hint=TileHint.DEFAULT," + heuristics_line = f""" + @{heuristics}( + size_hints={size_hints!r}, {tile_hint} + filename=__file__, + triton_meta={triton_meta!r}, + inductor_meta={inductor_meta!r}, + min_elem_per_thread={self.min_elem_per_thread} + ) + @triton.jit + """ + code.splice(heuristics_line) + code.writeline( + f"def {name or str(Placeholder.KERNEL_NAME)}({', '.join(argdefs)}):" + ) + self.codegen_body() + with code.indent(): + self.codegen_static_numels(code) + for old, new in self.args.aliases(): + code.writeline(f"{old} = {new}") + code.splice(self.body) + + if config.benchmark_kernel: + code.splice(self.codegen_kernel_benchmark()) + + return code.getvalue() + + def codegen_static_numels(self, code): + """ + We get a small speedup from hard coding numels if they are static. + + This code stomps on the passed-in values by writing an constant to the top of the kernel. + + In a kernel like: + def KERNEL_NAME(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): + + We would add + xnumel = 4096 + rnumel = 768 + + After the signature, before the kernel code, if we decided to make these static. As its hardcoded, it becomes + a better signal to triton on how to unroll and do some static indexing. So, it's not so much that downstream + knows that its a static numel, as that you just plop a constant into the kernel. + """ + for tree in self.range_trees: + if tree.prefix != "r" or self.inside_reduction: + simplified_tree_numel = V.graph.sizevars.simplify(tree.numel) + if isinstance(simplified_tree_numel, (sympy.Integer, int)): + code.writeline(f"{tree.prefix}numel = {int(simplified_tree_numel)}") + + if tree.prefix == "r" and self.persistent_reduction: + simplified_tree_numel = V.graph.sizevars.simplify(tree.numel) + if isinstance(simplified_tree_numel, (sympy.Integer, int)): + val = int(simplified_tree_numel) + else: + continue + val = next_power_of_2(val) + code.writeline(f"RBLOCK: tl.constexpr = {val}") + + if tree.prefix == "x" and self.no_x_dim: + code.writeline("XBLOCK: tl.constexpr = 1") + + def triton_tensor_ndim(self): + no_x_dim = int(bool(self.no_x_dim)) + no_r_dim = self.numels[-1] == 1 + return len(self.range_trees) - no_x_dim - no_r_dim + + def indexing_size_str(self, i=None, x=None): + # no_x_dim is sympy.logic.boolalg.BooleanTrue + no_x_dim = int(bool(self.no_x_dim)) + sizes = ["None"] * self.triton_tensor_ndim() + if i is not None: + idx = i - no_x_dim + sizes[idx] = ":" + return f"[{', '.join(sizes)}]" + + def dense_size_str(self): + sizes = [] + for tree in self.range_trees: + if self.no_x_dim and tree.prefix == "x": + continue + if tree.prefix != "r" or self.inside_reduction: + sizes.append(f"{tree.prefix.upper()}BLOCK") + elif tree.prefix == "r" and tree.numel != 1: + sizes.append("1") + + if sizes[0:3] == ["ZBLOCK", "YBLOCK", "XBLOCK"]: + sizes[0:3] = reversed(sizes[0:3]) + + if sizes[0:2] == ["YBLOCK", "XBLOCK"]: + sizes[0:2] = reversed(sizes[0:2]) + + return f"[{', '.join(sizes)}]" + + def call_kernel(self, name: str, node: Optional[IRNode] = None): + wrapper = V.graph.wrapper_code + _, call_args, _ = self.args.python_argdefs() + # dynamo wraps unspec variable as 0d CPU tensor, need convert to scalar + for i in range(len(call_args)): + if V.graph.is_unspec_arg(call_args[i]): + call_args[i] = call_args[i] + ".item()" + grid = [] + # TODO(jansel): if there are constants, we shouldn't bother passing them as args + for tree in self.range_trees: + if isinstance(tree.numel, (sympy.Integer, sympy.Symbol)): + expr = tree.numel + else: + expr = wrapper.generate_numel_expr(name, tree) + + if tree.prefix != "r" or self.inside_reduction: + call_args.append(expr) + if tree.prefix != "r": + grid.append(expr) + + grid = wrapper.generate_default_grid(name, grid) + wrapper.generate_kernel_call( + name, + call_args, + grid, + V.graph.scheduler.current_device.index, + cuda=True, + triton=True, + ) + + def codegen_nan_check(self): + if not config.nan_asserts: + return + + wrapper = V.graph.wrapper_code + _, call_args, arg_types = self.args.python_argdefs() + for arg, arg_type in zip(call_args, arg_types): + if isinstance(arg_type, TensorArg): + line = f"assert not {arg}.isnan().any().item()" + wrapper.writeline(line) + line = f"assert not {arg}.isinf().any().item()" + wrapper.writeline(line) + + def warn_mix_layout(self, kernel_name): + """ + Print message if the kernel have mixed layout inputs. + Only care about 4D tensor for now. + """ + if ( + len(self.args.input_buffers) == 1 + and len(self.args.output_buffers) == 1 + and len(self.args.inplace_buffers) == 0 + ): + # even if input buffer and output buffer have different layout, + # this can be a layout conversion kernel. No need to warn for + # the mix layouts. + return + + argdefs, call_args, signature = self.args.python_argdefs() + uniform_stride_order = None + for arg_name in call_args: + buf = V.graph.get_buffer(arg_name) + if buf and len(buf.layout.size) == 4: + # ignore the tensor if only 1 dimension is non-zero + if len([x for x in buf.layout.size if x == 1]) == 3: + continue + stride_order = ir.get_stride_order(buf.layout.stride) + if uniform_stride_order is None: + uniform_stride_order = stride_order + elif uniform_stride_order != stride_order: + msg = yellow_text( + f"Expected stride order {uniform_stride_order}, but found stride order" + + f" {stride_order} for kernel {kernel_name}" + ) + log.warning(msg) + + stride_order_list = [ + ir.get_stride_order(V.graph.get_buffer(name).layout.stride) + if V.graph.get_buffer(name) + else None + for name in call_args + ] + size_list = [ + V.graph.get_buffer(name).layout.size + if V.graph.get_buffer(name) + else None + for name in call_args + ] + source_list = [ + "GraphInput" + if name in V.graph.graph_inputs + else "IntermediateBuffer" + if name in V.graph.name_to_buffer + else None + for name in call_args + ] + + msg = yellow_text( + f" param names {argdefs}\n buf names {call_args}\n strides {stride_order_list}" + + f"\n sizes {size_list}\n sources {source_list}\n" + ) + log.warning(msg) + return + msg = green_text( + f"All the inputs for the triton kernel {kernel_name} have uniform layout" + ) + log.warning(msg) + + def create_cse_var(self, *args, **kwargs): + return TritonCSEVariable(*args, **kwargs) + + +class TritonScheduling(BaseScheduling): + def __init__(self, scheduler): + self.scheduler = scheduler + + def group_fn(self, sizes): + return tuple(V.graph.sizevars.simplify(sympy_product(s)) for s in sizes) + + def can_fuse(self, node1, node2): + """ + Hook called by Scheduler to determine if the Triton backend + can fuse node1 and node2. These nodes might already be + FusedSchedulerNodes. + """ + if isinstance(node1, scheduler.ForeachKernelSchedulerNode) or isinstance( + node2, scheduler.ForeachKernelSchedulerNode + ): + return scheduler.ForeachKernelSchedulerNode.can_fuse(node1, node2) + + _, (numel1, rnumel1) = node1.group + _, (numel2, rnumel2) = node2.group + why = WhyNoFuse(node1, node2) + + if node1.is_reduction() and node2.is_reduction(): + reduction_can_fuse = numel1 == numel2 and rnumel1 == rnumel2 + if not reduction_can_fuse: + why( + "numel/rnumel mismatch (reduce) (%s, %s), (%s, %s)", + numel1, + numel2, + rnumel1, + rnumel2, + ) + return reduction_can_fuse + + if not node1.is_reduction() and not node2.is_reduction(): + if not (numel1 == numel2 and rnumel1 == rnumel2): + why( + "numel/rnumel mismatch (non-reduce) (%s, %s), (%s, %s)", + numel1, + numel2, + rnumel1, + rnumel2, + ) + return False + + if node1.is_template(): + # Only allow fusion for TritonTemplates for now. + # Fusion for CUDATemplates are not supported. + is_triton_template = isinstance(node1.node, TritonTemplateBuffer) + if not is_triton_template: + why("node1 is not TritonTemplateBuffer") + return is_triton_template + + # check for a bad combined tiling + tiling1 = self.select_tiling(node1.get_nodes(), numel1, rnumel1) + tiling2 = self.select_tiling(node2.get_nodes(), numel1, rnumel1) + tiling3 = self.select_tiling( + node1.get_nodes() + node2.get_nodes(), numel1, rnumel1 + ) + if config.triton.tiling_prevents_pointwise_fusion: + cond = True + if len(tiling1) > 2: + if len(tiling2) > 2: + cond = tiling1 == tiling2 == tiling3 + else: + cond = tiling1 == tiling3 + elif len(tiling2) > 2: + cond = tiling2 == tiling3 + if not cond: + why( + "tiling mismatch (%s, %s, %s)", + tiling1, + tiling2, + tiling3, + ) + return False + + return True + + if not node1.is_reduction() and node2.is_reduction(): + assert rnumel1 == 1 and rnumel2 != 1 + if numel1 == numel2 * rnumel2: + if not all( + TritonKernel.is_compatible((numel2, rnumel2), n.get_ranges()) + for n in node1.get_nodes() + ): + why("nodes numel/rnumel incompatibility") + return False + if ( + config.triton.tiling_prevents_reduction_fusion + and not node1.is_template() + ): + is_reduction_tiling_valid = self.select_tiling( + node1.get_nodes(), numel1 + ) in ( + (numel1, 1), + (numel2, rnumel2, 1), + ) + if not is_reduction_tiling_valid: + why("invalid tiling for reduction") + return is_reduction_tiling_valid + return True + + if numel1 != numel2: + why("nodes numel incompatibility") + return numel1 == numel2 + + assert node1.is_reduction() and not node2.is_reduction() + # swap args to hit the case above + return self.can_fuse_horizontal(node2, node1) + + can_fuse_vertical = can_fuse + can_fuse_horizontal = can_fuse + + def generate_node_schedule(self, nodes, numel, rnumel): + node_schedule: List[Any] = [] + current_loop_writes: Set[str] = set() + is_current_reductions = set() + done = set() + + def fits_in_main_body(n): + _, (node_numel, node_rnumel) = n.group + return (node_numel == numel and node_rnumel == rnumel) or ( + node_numel == numel * rnumel and node_rnumel == 1 + ) + + def fits_outside_reduction(n): + _, (node_numel, node_rnumel) = n.group + return node_numel == numel and node_rnumel == 1 and rnumel != 1 + + @contextlib.contextmanager + def end_current_reduction_loop(): + if current_loop_writes: + # flush out any other runnable nodes to reduce number of loops + for other_node in nodes[index + 1 :]: + if ( + node not in done + and fits_in_main_body(other_node) + and not (current_loop_writes & other_node.ancestors) + ): + done.add(node) + current_loop_writes.add(node.get_name()) + is_current_reductions.add(node.is_reduction()) + node_schedule.append(node) + + if node_schedule and node_schedule[-1] is EnableReduction: + node_schedule.pop() + else: + node_schedule.append(DisableReduction) + yield + node_schedule.append(EnableReduction) + current_loop_writes.clear() + is_current_reductions.clear() + + for index, node in enumerate(nodes): + if node in done: + continue + done.add(node) + + def requires_closing_previous_reduction(node, node_schedule): + if rnumel == 1: + return False + if not current_loop_writes & node.ancestors: + return False + assert node_schedule and not isinstance( + node_schedule[-1], (EnableReduction, DisableReduction) + ) + return True in is_current_reductions + + if fits_in_main_body(node): + if requires_closing_previous_reduction(node, node_schedule): + with end_current_reduction_loop(): + pass # need to start a new reduction loop + current_loop_writes.add(node.get_name()) + is_current_reductions.add(node.is_reduction()) + node_schedule.append(node) + elif fits_outside_reduction(node): + with end_current_reduction_loop(): + node_schedule.append(node) + else: + raise NotImplementedError( + f"unexpected group: ({numel}, {rnumel}) != {node.group[1]}" + ) + + return node_schedule + + def codegen_nodes(self, nodes): + """ + Given a set of pre-fused nodes, generate a Triton kernel. + """ + _, (numel, rnumel) = max(nodes, key=lambda x: int(x.is_reduction())).group + + node_schedule = self.generate_node_schedule(nodes, numel, rnumel) + + schedule_log.debug("Schedule:\n %s", node_schedule) + + return self.codegen_node_schedule(node_schedule, numel, rnumel) + + @staticmethod + def reduction_hint(node): + assert node.is_reduction() + if all( + dep.is_contiguous() + for dep in itertools.chain(node.read_writes.reads, node.read_writes.writes) + ): + return ReductionHint.INNER + else: + return node.node.data.reduction_hint + + @staticmethod + def can_use_32bit_indexing( + numel: sympy.Expr, buffers: Iterable[Union[ir.Buffer, ir.TensorBox]] + ) -> bool: + int_max = torch.iinfo(torch.int32).max + size_hint = V.graph.sizevars.size_hint + has_hint = V.graph.sizevars.shape_env.has_hint + + def within_32bit(e): + # Allow for unhinted e as long as we can still statically prove + # (e.g., via ValueRanges) that it is still in bounds + if V.graph.sizevars.is_expr_static_and_true(e <= int_max): + return True + # Otherwise, the hint MUST exist and be in range + return has_hint(e) and size_hint(e) <= int_max + + if not within_32bit(numel): + return False + + # Any use of a MultiOutputLayout will create a buffer with a + # Layout whose sizes are accounted for + buf_sizes = [ + buf.get_layout().storage_size() + for buf in buffers + if not isinstance(buf.get_layout(), ir.MultiOutputLayout) + ] + + if not all(within_32bit(size) for size in buf_sizes): + return False + + # Only install guards for 32-bit indexing as there is no correctness + # issue with using 64-bit for everything + V.graph.sizevars.guard_leq(numel, int_max) + for size in buf_sizes: + V.graph.sizevars.guard_leq(size, int_max) + return True + + @staticmethod + def select_index_dtype(node_schedule, numel, reduction_numel): + # Gather all used buffer names + buffer_names = set() + for node in node_schedule: + if not isinstance(node, scheduler.BaseSchedulerNode): + continue + + buffer_names.update(node.get_names()) + buffer_names.update(node.used_buffer_names()) + + # Get buffers objects + def _get_buffer(name: str) -> Union[ir.Buffer, ir.TensorBox]: + if name in V.graph.name_to_buffer: + return V.graph.name_to_buffer[name] + elif name in V.graph.graph_inputs: + return V.graph.graph_inputs[name] + elif name in V.graph.constants: + data = V.graph.constants[name] + return ir.ConstantBuffer( + name, + ir.FixedLayout( + data.device, data.dtype, *V.graph.static_sizes_strides(data) + ), + ) + raise RuntimeError(f"Failed to find buffer matching name {name}") + + buffers = [_get_buffer(name) for name in buffer_names] + + # In theory we can separately check xnumel and rnumel are <= int_max + # but some indexers do use the full linear index so we need to be + # conservative here. + total_numel = numel * reduction_numel + + if TritonScheduling.can_use_32bit_indexing(total_numel, buffers): + return "tl.int32" + return "tl.int64" + + def get_kernel_args(self, node_schedule, numel, reduction_numel): + reductions = list( + filter( + lambda n: n not in (EnableReduction, DisableReduction) + and n.is_reduction(), + node_schedule, + ) + ) + if len(reductions) > 0: + hints = [self.reduction_hint(n) for n in reductions] + if hints.count(hints[0]) == len(hints): + reduction_hint_val = hints[0] + else: + reduction_hint_val = ReductionHint.DEFAULT + else: + reduction_hint_val = ReductionHint.DEFAULT + + mutations = set() + for node in node_schedule: + if hasattr(node, "get_mutations"): + mutations.update(node.get_mutations()) + + index_dtype = self.select_index_dtype(node_schedule, numel, reduction_numel) + + return reduction_hint_val, mutations, index_dtype + + def codegen_comment(self, node_schedule): + wrapper = V.graph.wrapper_code + origins, detailed_origins = get_kernel_metadata(node_schedule, wrapper) + if origins: + wrapper.writeline(origins) + + if config.debug_fusion: + from torch._inductor.scheduler import ( + BaseSchedulerNode, + ForeachKernelSchedulerNode, + ) + + if not any( + isinstance(n, ForeachKernelSchedulerNode) for n in node_schedule + ): + # We probably should look what are the nodes inside a foreach + # schedule node + node_names = [ + n.get_name() + for n in node_schedule + if isinstance(n, BaseSchedulerNode) + ] + wrapper.writeline( + f"{wrapper.comment} Fused node name list: {', '.join(node_names)}" + ) + + def codegen_node_schedule(self, node_schedule, numel, reduction_numel): + tiled_groups = self.select_tiling(node_schedule, numel, reduction_numel) + reduction_hint_val, mutations, index_dtype = self.get_kernel_args( + node_schedule, numel, reduction_numel + ) + + kernel = TritonKernel( + *tiled_groups, + reduction_hint=reduction_hint_val, + mutations=mutations, + index_dtype=index_dtype, + ) + + self.codegen_node_schedule_with_kernel(node_schedule, kernel) + + with V.set_kernel_handler(kernel): + src_code = kernel.codegen_kernel() + + for node in node_schedule: + if node not in (EnableReduction, DisableReduction): + node.mark_run() + + kernel_name = self.define_kernel(src_code, node_schedule) + log.debug("Generating kernel code with kernel_name: %s", kernel_name) + self.codegen_comment(node_schedule) + kernel.call_kernel(kernel_name) + kernel.codegen_nan_check() + V.graph.removed_buffers |= kernel.removed_buffers + V.graph.inplaced_to_remove |= kernel.inplaced_to_remove + + if config.warn_mix_layout: + kernel.warn_mix_layout(kernel_name) + + if ( + V.graph.wrapper_code.supports_intermediate_hooks + and config.generate_intermediate_hooks + ): + # Not every node in the schedule will actually be live on output; + # we can't check dead buffers. + live_outs = kernel.args.live_output_buffers() + for node in node_schedule: + if not isinstance(node, scheduler.BaseSchedulerNode): + continue + name = node.get_name() + if name not in live_outs: + continue + origin_node = node.node.get_origin_node() + if origin_node is not None: + counters["inductor"]["intermediate_hooks"] += 1 + V.graph.wrapper_code.writeline( + f"run_intermediate_hooks({origin_node.name!r}, {name})" + ) + + self.scheduler.free_buffers() + + def codegen_node_schedule_with_kernel(self, node_schedule, kernel): + def current_reduction_nodes(nodes): + return itertools.takewhile(lambda n: n is not DisableReduction, nodes) + + with kernel: + stack = contextlib.ExitStack() + kernel.set_last_usage(current_reduction_nodes(node_schedule)) + + for node in node_schedule: + if node not in (EnableReduction, DisableReduction): + node.decide_inplace_update() + for i, node in enumerate(node_schedule): + if node is DisableReduction: + stack.enter_context(kernel.disable_reduction()) + elif node is EnableReduction: + stack.close() + kernel.set_last_usage(current_reduction_nodes(node_schedule[i:])) + else: + # TODO - use split ranges ? + indexing_dtype_strength_reduction(node._body) + index_vars = kernel.split_and_set_ranges(node.get_ranges()) + node.codegen(index_vars) + + def define_kernel(self, src_code, node_schedule): + wrapper = V.graph.wrapper_code + if src_code in wrapper.src_to_kernel: + kernel_name = wrapper.src_to_kernel[src_code] + else: + fused_name = ( + get_fused_kernel_name(node_schedule, config.triton.descriptive_names) + if config.triton.descriptive_names + else "" + ) + kernel_category = get_kernel_category_by_source_code(src_code)[:3] + kernel_name = "_".join( + ["triton", kernel_category, fused_name, wrapper.next_kernel_suffix()] + ) + # use the original src_code as the key + wrapper.src_to_kernel[src_code] = kernel_name + subs_name = kernel_name if config.triton.unique_kernel_names else "triton_" + + # DESCRIPTIVE_NAME is used for profiling purposes; it shows the full kernel name + # even when unique_kernel_names is turned off. Meanwhile, KERNEL_NAME is sometimes set + # to "triton_" to maximize caching opportunities (when unique_kernel_names = False). + src_code = src_code.replace(str(Placeholder.DESCRIPTIVE_NAME), kernel_name) + src_code = src_code.replace(str(Placeholder.KERNEL_NAME), subs_name) + + # TODO(voz): Ostensibly, we should not need this. But there are cases where C++ codegen does + # not use BracesBuffer, so we have no good indicator of a C++ buffer atm. + src_code = src_code.replace("#pragma CMT", "#") + + basename, _, kernel_path = get_path(code_hash(src_code.strip()), "py") + + compile_wrapper = IndentedBuffer() + compile_wrapper.writeline(f"async_compile.triton({subs_name!r}, '''") + compile_wrapper.splice(src_code, strip=True) + compile_wrapper.writeline("''')") + + metadata_comment = f"# kernel path: {kernel_path}" + origins, detailed_origins = get_kernel_metadata(node_schedule, wrapper) + metadata_comment += "\n" + origins + "\n" + detailed_origins + wrapper.define_kernel( + kernel_name, compile_wrapper.getvalue(), metadata_comment + ) + return kernel_name + + def codegen_template(self, template_node, epilogue_nodes): + """ + Codegen a triton template + """ + _, (numel, rnumel) = template_node.group + assert rnumel == 1 + kernel, render = template_node.node.make_kernel_render(template_node.node) + with kernel: + for node in [template_node, *epilogue_nodes]: + node.mark_run() + partial_code = render() + for node in epilogue_nodes: + node.codegen(kernel.split_and_set_ranges(node.get_ranges())) + + # finalize must be called after adding epilogue above + with V.set_kernel_handler(kernel): + # TODO: Maybe unify CUDATemplateKernel to also use PartialRender for flexible epilogue fusion. + src_code = ( + partial_code + if isinstance(partial_code, str) + else partial_code.finalize() + ) + node_schedule = [template_node, *epilogue_nodes] + + if config.benchmark_kernel: + src_code = f"{kernel.imports_for_benchmark_kernel()}\n{src_code}\n{kernel.codegen_kernel_benchmark().getvalue()}" + + kernel_name = self.define_kernel(src_code, node_schedule) + self.codegen_comment(node_schedule) + kernel.call_kernel(kernel_name, template_node.node) + V.graph.removed_buffers |= kernel.removed_buffers + V.graph.inplaced_to_remove |= kernel.inplaced_to_remove + self.scheduler.free_buffers() + + def codegen_sync(self): + V.graph.wrapper_code.writeline("torch.cuda.synchronize()") + + def codegen_foreach(self, foreach_node): + from .triton_foreach import ForeachKernel + + for partitions_with_metadata in ForeachKernel.horizontal_partition( + foreach_node.get_subkernel_nodes(), self + ): + kernel = ForeachKernel() + for nodes, tiled_groups, numel, rnumel in partitions_with_metadata: + node_schedule = self.generate_node_schedule(nodes, numel, rnumel) + ( + reduction_hint_val, + mutations, + index_dtype, + ) = self.get_kernel_args(node_schedule, numel, rnumel) + + subkernel = kernel.create_sub_kernel( + *tiled_groups, + reduction_hint=reduction_hint_val, + mutations=mutations, + index_dtype=index_dtype, + ) + + self.codegen_node_schedule_with_kernel( + node_schedule, + subkernel, + ) + + with V.set_kernel_handler(subkernel): + for node in node_schedule: + if node not in (EnableReduction, DisableReduction): + node.mark_run() + V.graph.removed_buffers |= subkernel.removed_buffers + V.graph.inplaced_to_remove |= subkernel.inplaced_to_remove + + src_code = kernel.codegen_kernel() + kernel_name = self.define_kernel(src_code, [foreach_node]) + self.codegen_comment([foreach_node]) + kernel.call_kernel(V.graph.wrapper_code, kernel_name) + + self.scheduler.free_buffers() + + @staticmethod + @functools.lru_cache(32) + def candidate_tilings(node): + ranges, reduction_ranges = node.get_ranges() + if len(ranges) <= 1: + return () + + rw = node.pointwise_read_writes() + assert len(rw.range_vars) == len(ranges) + + # isinstance(dep, MemoryDep): this filters out StarDeps. StarDeps refer to reads + # that need to access the entire tensor; they don't contribute read indexing + # information (and practically, they don't have dep.index so they can't be used + # for stride_hints below + dep_sources = [rw.reads, rw.writes] + assert all( + isinstance(dep, (MemoryDep, StarDep)) + for dep in itertools.chain(*dep_sources) + ) + deps = [ + dep + for dep in itertools.chain(*dep_sources) + if dep.name not in V.graph.removed_buffers and isinstance(dep, MemoryDep) + ] + write_names = {dep.name for dep in rw.writes} + + tilings: List[CandidateTiling] = [] + + for dep in deps: + strides = V.graph.sizevars.stride_hints(dep.index, rw.range_vars) + assert len(strides) == len(ranges) + try: + split = strides.index(1) + 1 + if split == len(ranges): + continue + if all(s == 0 for s in strides[split:]): + # if this is a broadcasted tensor and all dimensions after split are broadcast, + # this is not a real split + continue + + except ValueError: + continue + tiled_groups = ( + V.graph.sizevars.simplify(sympy_product(ranges[:split])), + V.graph.sizevars.simplify(sympy_product(ranges[split:])), + ) + # score by number of elements + score = V.graph.sizevars.size_hint( + sympy_product( + size for size, stride in zip(ranges, strides) if stride != 0 + ) + ) + if dep.name in write_names: + # ngimel said contiguous writes is more important than reads + score *= 2 + if CandidateTiling.is_good_size(tiled_groups[0]): + score *= 2 + if CandidateTiling.is_good_size(tiled_groups[1]): + score *= 2 + + if ( + V.graph.sizevars.size_hint( + score - sympy_product(itertools.chain(ranges, reduction_ranges)) + ) + >= 0 + ): + tilings.append(CandidateTiling(tiled_groups, score, dep.name)) + return tilings + + @classmethod + def select_tiling(cls, node_schedule, numel, reduction_numel=sympy.Integer(1)): + """ + Heuristics to decide how to tile kernels. + Currently, we tile based on stride-1 dimensions. + + Returns: + `(tile1, tile2, reduction_numel)` s.t. `tile1 * tile2 == numel` + + """ + if reduction_numel != 1 or config.triton.max_tiles <= 1: + # TODO(jansel): should we tile reductions? + # do perf hint here if stride-1 dim is not being reduced + if perf_hint_log.level <= logging.WARNING: + for node in EnableReduction.filter(node_schedule): + if len(cls.candidate_tilings(node)) > 0: + perf_hint_log.info("reduction over non-contiguous dims") + break + return (numel, reduction_numel) + + seen_names = set() + candidate_tiles: Counter[Any] = collections.Counter() + for node in EnableReduction.filter(node_schedule): + for tiling in cls.candidate_tilings(node): + if tiling.name in seen_names: + continue + seen_names.add(tiling.name) + candidate_tiles[tiling.tiling] += tiling.score + + ranked_tilings = [tiling for tiling, score in candidate_tiles.most_common()] + + if config.triton.max_tiles >= 3: + # Consider adding a third dimension of tiling, but only + # when a1 is a multiple of b1; otherwise, you have a lot + # of stragglers which is annoying to generate code for. + # + # NB: More than three max tiles is not enabled by default. + + # Add one 3D tiling choice + for i in range(1, len(ranked_tilings)): + a0, a1 = ranked_tilings[0] + b0, b1 = ranked_tilings[i] + if V.graph.sizevars.size_hint(a1 - b1) == 0: + continue + if V.graph.sizevars.size_hint(a1 - b1) < 0: + # swap so a0 is bigger + a0, a1 = ranked_tilings[i] + b0, b1 = ranked_tilings[0] + assert V.graph.sizevars.size_hint(a1 - b1) > 0 + if V.graph.sizevars.statically_known_multiple_of(a1, b1): + tiling = (a0, FloorDiv(a1, b1), b1) + ranked_tilings = [tiling] + ranked_tilings + break # only 1 choice for now + + if len(ranked_tilings) > 1: + perf_hint_log.info("possibly bad tiling: %s", ranked_tilings) + + for tiled_groups in ranked_tilings: + new_groups = (*tiled_groups, reduction_numel) + if all( + TritonKernel.is_compatible(new_groups, node.get_ranges()) + for node in node_schedule + if isinstance(node, scheduler.SchedulerNode) + ): + return new_groups + + return (numel, reduction_numel) + + def flush(self): + pass + + def ready_to_flush(self) -> bool: + return False + + def benchmark_fused_nodes(self, nodes): + _, (numel, rnumel) = max(nodes, key=lambda x: int(x.is_reduction())).group + node_schedule = self.generate_node_schedule(nodes, numel, rnumel) + tiled_groups = self.select_tiling(node_schedule, numel, rnumel) + reduction_hint_val, mutations, index_dtype = self.get_kernel_args( + node_schedule, numel, rnumel + ) + + kernel = TritonKernel( + *tiled_groups, + reduction_hint=reduction_hint_val, + mutations=mutations, + index_dtype=index_dtype, + ) + + # empty last_usage. May cause more aggressive 'evict_last'. Should be fine. + for n in nodes: + n.last_usage = set() + + self.codegen_node_schedule_with_kernel(node_schedule, kernel) + with config.patch("benchmark_kernel", True), V.set_kernel_handler(kernel): + src_code = kernel.codegen_kernel() + + src_code = src_code.replace(str(Placeholder.KERNEL_NAME), "triton_") + mod = PyCodeCache.load(src_code) + + def cache_file_path(): + assert mod.__file__ is not None + return os.path.splitext(mod.__file__)[0] + ".kernel_perf" + + def load_cache(): + path = cache_file_path() + if os.path.exists(path): + with open(path) as fd: + return float(fd.read()) + return None + + def store_cache(): + path = cache_file_path() + with open(path, "w") as fd: + fd.write(str(ms)) + + log.debug( + "kernel src code for %s written to: %s", + {n.get_name() for n in nodes}, + mod.__file__, + ) + ms = load_cache() + if ms is not None: + return ms, mod.__file__ + + args = mod.get_args() + call = mod.call + wrapped_jit_function = mod.triton_ + + # call once to trigger the compilation + call(wrapped_jit_function.clone_args(*args)[0]) + + launchers = wrapped_jit_function.launchers + assert len(launchers) == 1 + if launchers[0].n_spills > 0: + # skip benchmarking the kernel if there are register spills + ms = float("inf") + else: + # We have to clone the inplace updated arguments to avoid earlier calls + # generating out of range indices for later calls. + ms = do_bench(lambda: call(wrapped_jit_function.clone_args(*args)[0])) + + log.debug( + "The fused kernel for %s took %.3f ms to run", + {n.get_name() for n in nodes}, + ms, + ) + store_cache() + return ms, mod.__file__ + + +@dataclasses.dataclass +class CandidateTiling: + tiling: Tuple[sympy.Expr, sympy.Expr] + score: int # higher is better + name: Optional[str] = None + + @staticmethod + def is_good_size(s): + """Somewhat arbitrary heuristic used to boost scores for some sizes""" + s = V.graph.sizevars.size_hint(s) + return s >= 32 and (s % 32 == 0) + + +class DisableReduction: + """ + Marker to invoke `kernel.disable_reduction()`. This closes a + reduction loop and allows for pointwise ops to occur on the output + of a reduction. + """ + + +class EnableReduction: + """ + Marker to end a DisableReduction block. + """ + + @staticmethod + def filter(node_schedule): + """ + Get the nodes from node_schedule skipping those in a + DisableReduction block. + """ + disabled = False + for node in node_schedule: + if node in (EnableReduction, DisableReduction): + # Don't tile stuff outside the main reduction loop + disabled = node is DisableReduction + elif disabled: + pass + else: + yield node + + +class CantSplit(Exception): + pass diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/triton_foreach.py b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/triton_foreach.py new file mode 100644 index 0000000000000000000000000000000000000000..8efd9fb6864a8fc8d57f57956eeba6e4df7481e2 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/triton_foreach.py @@ -0,0 +1,249 @@ +import itertools +from collections import defaultdict +from dataclasses import dataclass +from typing import Dict, List, Tuple + +from sympy import Integer + +from .. import metrics +from ..scheduler import SchedulerNode +from ..utils import ceildiv, Placeholder +from ..virtualized import V +from .common import IndentedBuffer, Kernel +from .triton import TritonKernel +from .triton_utils import config_of, signature_to_meta + + +@dataclass +class PartitionState: + partitions: List[ + List[Tuple[List[SchedulerNode], Tuple[Integer, ...], Integer, Integer]] + ] + cur_partition: List[ + Tuple[List[SchedulerNode], Tuple[Integer, ...], Integer, Integer] + ] + cur_count: int + + def finalize(self): + if self.cur_partition: + self.partitions.append(self.cur_partition) + + +class ForeachKernel(Kernel): + MAX_NUM_ARGS = 250 # number where I would no longer get triton errors + + @staticmethod + def _update_partition(partition_state, node_rw_count, node_info): + if partition_state.cur_count + node_rw_count > ForeachKernel.MAX_NUM_ARGS: + partition_state.partitions.append(partition_state.cur_partition) + partition_state.cur_partition = [node_info] + partition_state.cur_count = node_rw_count + else: + partition_state.cur_count += node_rw_count + partition_state.cur_partition.append(node_info) + + @staticmethod + def horizontal_partition(subkernel_nodes, triton_scheduling): + """Generates a list of lists of node info tuples which consist of (fused_nodes, tiling, numel, rnumel) + for each subkernel node where each sublist is guaranteed to not exceed CUDA limits for number of args + (read/writes) and to have the same 2D or 1D blocking strategy.""" + assert len(subkernel_nodes) >= 1 + + partition_state_1d = PartitionState([], [], 0) + yelem_to_partition_state_2d: Dict[Integer, PartitionState] = defaultdict( + lambda: PartitionState([], [], 0) + ) + + for node in subkernel_nodes: + fused_nodes = node.get_nodes() + _, (numel, rnumel) = max( + fused_nodes, key=lambda x: int(x.is_reduction()) + ).group + tiled_groups = triton_scheduling.select_tiling(fused_nodes, numel, rnumel) + node_info = fused_nodes, tiled_groups, numel, rnumel + + read_writes = node.read_writes + read_write_count = len(read_writes.reads) + len(read_writes.writes) + + if tiled_groups[1] == 1: + ForeachKernel._update_partition( + partition_state_1d, read_write_count, node_info + ) + else: + y_elem = tiled_groups[0] + partition_state_2d = yelem_to_partition_state_2d[y_elem] + ForeachKernel._update_partition( + partition_state_2d, read_write_count, node_info + ) + + partition_state_1d.finalize() + all_partitions = partition_state_1d.partitions + for partition_state_2d in yelem_to_partition_state_2d.values(): + partition_state_2d.finalize() + all_partitions.extend(partition_state_2d.partitions) + + return all_partitions + + def __init__(self): + super().__init__() + self.blocking_2d = False + self.block_size_1d = 1024 # Try tuning this value + self.block_size_2d = 32 + self.num_warps = 8 + self.sub_kernels = [] + self.iter_vars_count = itertools.count() + self.x_block_count = 0 + self.y_block_count = 0 + + def get_block_size(self): + if self.blocking_2d: + return self.block_size_2d + else: + return self.block_size_1d + + @staticmethod + def codegen_pid_offsets(code, block_count, lower_bound, prefix): + if block_count == 0: + code.splice(f"{prefix}pid_offset = {prefix}pid") + else: + code.splice(f"{prefix}pid_offset = {prefix}pid - {lower_bound}") + + def codegen_pid_range(self, code, x_elems): + num_x_blocks = ceildiv(x_elems, self.get_block_size()) + upper_bound_x_pid = self.x_block_count + num_x_blocks + lower_bound_x_pid = self.x_block_count + + if self.x_block_count == 0: + cond = "if" + else: + cond = "elif" + + x_pid_bounds_check = ( + f"xpid >= {lower_bound_x_pid} and xpid < {upper_bound_x_pid}" + ) + code.splice(f"{cond} {x_pid_bounds_check}:") + + with code.indent(): + ForeachKernel.codegen_pid_offsets( + code, num_x_blocks, lower_bound_x_pid, "x" + ) + self.x_block_count += num_x_blocks + + def create_sub_kernel(self, *groups, index_dtype, mutations, reduction_hint): + sub_kernel = TritonKernel( + *groups, + index_dtype=index_dtype, + mutations=mutations, + pid_cache={ + "tl.program_id(0)": "xpid_offset", + "tl.program_id(1)": "ypid", + }, + reduction_hint=reduction_hint, + ) + if self.blocking_2d: + assert len(groups) == 3 + + self.blocking_2d |= groups[1] != 1 and len(groups) == 3 + metrics.generated_kernel_count -= 1 + sub_kernel.args = self.args + sub_kernel.iter_vars_count = self.iter_vars_count + sub_kernel.cse.iter_buffer_ids = self.cse.iter_buffer_ids + self.sub_kernels.append(sub_kernel) + return sub_kernel + + def jit_line(self): + can_use_32bit = all(k.index_dtype == "tl.int32" for k in self.sub_kernels) + size_dtype = "tl.int32" if can_use_32bit else "tl.int64" + _, _, signature = self.args.python_argdefs() + triton_meta = { + "signature": signature_to_meta(signature, size_dtype=size_dtype), + "device": V.graph.scheduler.current_device.index, + "device_type": V.graph.scheduler.current_device.type, + "constants": {}, + } + triton_meta["configs"] = [config_of(signature)] + inductor_meta = {"kernel_name": str(Placeholder.DESCRIPTIVE_NAME)} + return ( + f"@foreach(num_warps={self.num_warps}, triton_meta={triton_meta!r}, inductor_meta={inductor_meta!r})\n" + + "@triton.jit" + ) + + def grid(self): + return ( + self.x_block_count, + ceildiv(int(self.sub_kernels[0].numels[0]), self.block_size_2d) + if self.blocking_2d + else 1, + 1, + ) + + def codegen_kernel(self, name=None): + code = IndentedBuffer() + + code.splice( + """ + import triton + import triton.language as tl + from torch._inductor.triton_heuristics import foreach + from torch._inductor.utils import instance_descriptor + from torch._inductor import triton_helpers + """ + ) + argdefs, _, _ = self.args.python_argdefs() + code.writeline(self.jit_line()) + code.writeline( + f"def {name or str(Placeholder.KERNEL_NAME)}({', '.join(argdefs)}):" + ) + + with code.indent(): + code.splice("xpid = tl.program_id(0)") + if self.blocking_2d: + code.splice("ypid = tl.program_id(1)") + code.splice(f"XBLOCK: tl.constexpr = {self.block_size_2d}") + code.splice(f"YBLOCK: tl.constexpr = {self.block_size_2d}") + else: + code.splice(f"XBLOCK: tl.constexpr = {self.block_size_1d}") + + for sub_kernel in self.sub_kernels: + assert len(sub_kernel.numels) <= 3 + # TODO mlazos: support dynamic shapes + numel_ind = 0 if not self.blocking_2d else 1 + self.codegen_pid_range(code, int(sub_kernel.numels[numel_ind])) + with code.indent(): + if self.blocking_2d: + code.splice(f"ynumel = {sub_kernel.numels[0]}") + code.splice(f"xnumel = {sub_kernel.numels[1]}") + else: + code.splice(f"xnumel = {sub_kernel.numels[0]}") + + sub_kernel.codegen_body() + code.splice(sub_kernel.body) + + code.splice("else:") + with code.indent(): + code.splice("pass") + + return code.getvalue() + + def call_kernel(self, code, name: str): + _, call_args, _ = self.args.python_argdefs() + # dynamo wraps unspec variable as 0d CPU tensor, need convert to scalar + for i in range(len(call_args)): + if V.graph.is_unspec_arg(call_args[i]): + call_args[i] = call_args[i] + ".item()" + if V.graph.cpp_wrapper: + V.graph.wrapper_code.generate_kernel_call( + name, + call_args, + device_index=V.graph.scheduler.current_device.index, + grid=self.grid(), + ) + else: + # TODO: refactor generate_kernel_call + call_args_str = ", ".join(call_args) + stream_name = code.write_get_raw_stream( + V.graph.scheduler.current_device.index + ) + code.writeline( + f"{name}.run({call_args_str}, grid=({self.grid()}), stream={stream_name})" + ) diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/wrapper.py b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..3d00ed51771b29796e64357d8b04ea107bc3fe73 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/wrapper.py @@ -0,0 +1,2692 @@ +import collections +import contextlib +import dataclasses +import functools +import inspect +import os +import re +from itertools import chain, count +from typing import Any, Dict, List, Optional, Set, Tuple, Union + +import sympy +from sympy import Expr + +import torch +from torch._dynamo.utils import counters, dynamo_timed +from torch._inductor.codecache import get_cpp_wrapper_cubin_path_name +from torch.fx.experimental.symbolic_shapes import free_unbacked_symbols, SymTypes + +from torch.fx.node import _get_qualified_name +from torch.utils._sympy.singleton_int import SingletonInt + +from .. import codecache, config, ir +from ..codecache import CudaKernelParamCache +from ..ir import ComputedBuffer, InputBuffer, ReinterpretView +from ..triton_heuristics import grid as default_grid +from ..utils import ( + cache_on_self, + get_benchmark_name, + LineContext, + sympy_product, + sympy_str, +) +from ..virtualized import V +from .common import CodeGen, DeferredLine, IndentedBuffer, PythonPrinter +from .triton_utils import config_of, signature_to_meta + + +pexpr = PythonPrinter().doprint + + +def buffer_reuse_key(node: ir.Buffer): + return ( + node.get_device(), + node.get_dtype(), + # NB: this is symbolic so that we don't try to reuse a buffer + # for s0 for s1, just because they happen to share the same + # size hint + sympy_str(V.graph.sizevars.simplify(node.layout.storage_size())), + ) + + +def is_int(s: str): + # Cpp code gen adds L at the end of ints + # Lets remove it for checking whether we have an int or not + if s and s[-1] == "L": + s = s[:-1] + try: + int(s) + except ValueError: + return False + except TypeError: + return False + return True + + +def is_float(s: str): + try: + float(s) + except ValueError: + return False + return True + + +def convert_arg_type(python_type: str): + from .cpp import CONTAINER_PYTHON_TO_CPP, PYTHON_TO_CPP + + if python_type == "Tensor": + # Conversions rules follow https://github.com/pytorch/pytorch/tree/main/aten/src/ATen/native#func + return f"at::{python_type} const&" + + if python_type in PYTHON_TO_CPP: + return PYTHON_TO_CPP[python_type] + + # Convert args of container types e.g. Optional[*] + for py_container, cpp_container in CONTAINER_PYTHON_TO_CPP.items(): + container_match = re.findall(py_container + r"\[([a-zA-Z_]+)]", python_type) + if len(container_match) == 1: + contained_type = container_match[0] + assert ( + contained_type in PYTHON_TO_CPP + ), f"unsupported {py_container} type in convert_arg_type: {contained_type}" + cpp_contained_type = PYTHON_TO_CPP[contained_type] + return f"{cpp_container}<{cpp_contained_type}>" + + raise AssertionError(f"unsupport python_type: {python_type}") + + +def convert_return_type(python_type: str): + # TODO: support alias + python_to_cpp = { + "Tensor": "at::Tensor", + "List[Tensor]": "std::vector", + } + + cpp_type = python_to_cpp.get(python_type, None) + assert cpp_type is not None, f"NYI return type: {python_type}" + return cpp_type + + +def get_cpp_op_schema(kernel): + # use x.real_type instead of x.type so that we get ScalarType instead of int + arg_types = [repr(x.real_type) for x in kernel._schema.arguments] + arg_names = [x.name for x in kernel._schema.arguments] + returns = [repr(x.real_type) for x in kernel._schema.returns] + + num_returns = len(returns) + assert num_returns > 0, "must have at least one return value" + + if num_returns == 1: + cpp_return_value = convert_return_type(returns[0]) + elif num_returns > 1: + tuple_returns = ", ".join([convert_return_type(r) for r in returns]) + cpp_return_value = f"std::tuple<{tuple_returns}>" + + cpp_arg_type = [ + f"{convert_arg_type(arg_type)} {arg_name}" + for arg_type, arg_name in zip(arg_types, arg_names) + ] + return f"{cpp_return_value}({', '.join(cpp_arg_type)})" + + +def user_defined_kernel_grid_fn_code(name, configs, grids): + output = IndentedBuffer() + + fn_name = f"grid_wrapper_for_{name}" + output.writeline(f"def {fn_name}(meta):") + with output.indent(): + if len(grids) == 1: + output.writeline(f"return {grids[0]}") + else: + assert len(grids) == len(configs) + for grid, c in zip(grids, configs): + guards = [f"meta['{name}'] == {val}" for name, val in c.kwargs.items()] + guards = " and ".join(guards) + output.writeline(f"if {guards}: return {grid}") + return fn_name, output.getvalue() + + +@dataclasses.dataclass +class SymbolicCallArg: + inner: Any + # the original symbolic expression represented by inner + inner_expr: sympy.Expr + + def __str__(self): + return str(self.inner) + + +class MemoryPlanningState: + def __init__(self): + super().__init__() + self.reuse_pool: Dict[Any, List[FreeIfNotReusedLine]] = collections.defaultdict( + list + ) + + def __contains__(self, key): + return bool(self.reuse_pool.get(key, None)) + + def pop(self, key) -> "FreeIfNotReusedLine": + item = self.reuse_pool[key].pop() + assert not item.is_reused + return item + + def push(self, key, item: "FreeIfNotReusedLine"): + assert not item.is_reused + self.reuse_pool[key].append(item) + + +@dataclasses.dataclass +class EnterCudaDeviceContextManagerLine: + device_idx: int + last_seen_device_guard_index: Optional[int] + + def codegen(self, code: IndentedBuffer, device_cm_stack: contextlib.ExitStack): + if V.graph.cpp_wrapper: + code.writeline("\n") + if V.graph.aot_mode: + # In AOT mode, we have a stream provided as a param. A stream is + # associated with a device, so we never expect the device to change. + # CUDAStreamGuard sets the stream and the device. + if self.last_seen_device_guard_index is None: + if config.aot_inductor.abi_compatible: + code.writeline( + "AOTICudaStreamGuard stream_guard(stream, this->device_idx_);" + ) + else: + code.writeline( + "at::cuda::CUDAStreamGuard stream_guard(" + + "at::cuda::getStreamFromExternal(stream, this->device_idx_));" + ) + else: + assert ( + self.last_seen_device_guard_index == self.device_idx + ), "AOTInductor only supports running on one CUDA device" + else: + if self.last_seen_device_guard_index is None: + code.writeline( + f"at::cuda::CUDAGuard device_guard({self.device_idx});" + ) + else: + code.writeline(f"device_guard.set_index({self.device_idx});") + else: + # Note _DeviceGuard has less overhead than device, but only accepts + # integers + code.writeline(f"with torch.cuda._DeviceGuard({self.device_idx}):") + device_cm_stack.enter_context(code.indent()) + code.writeline( + f"torch.cuda.set_device({self.device_idx}) # no-op to ensure context" + ) + + +class ExitCudaDeviceContextManagerLine: + def codegen(self, code: IndentedBuffer, device_cm_stack: contextlib.ExitStack): + if not V.graph.cpp_wrapper: + device_cm_stack.close() + + +@dataclasses.dataclass +class MemoryPlanningLine: + wrapper: "WrapperCodeGen" + + def plan(self, state: MemoryPlanningState) -> "MemoryPlanningLine": + """First pass to find reuse""" + return self + + def codegen(self, code: IndentedBuffer): + """Second pass to output code""" + pass + + def __str__(self): + """ + Emits a string representation that fits on one line. + """ + args: List[str] = [] + for field in dataclasses.fields(self): + if field.name == "wrapper": + continue + val = getattr(self, field.name) + args.append( + f"{field.name}={val.get_name() if field.type is ir.Buffer else val}" + ) + return f"{type(self).__name__}({', '.join(args)})" + + +@dataclasses.dataclass +class AllocateLine(MemoryPlanningLine): + node: ir.Buffer + + def plan(self, state: MemoryPlanningState): + if self.node.get_name() in V.graph.removed_buffers: + return NullLine(self.wrapper) + + # try to reuse a recently freed buffer + key = buffer_reuse_key(self.node) + if config.allow_buffer_reuse and key in state: + free_line = state.pop(key) + free_line.is_reused = True + return ReuseLine(self.wrapper, free_line.node, self.node) + + return self + + def codegen(self, code: IndentedBuffer): + assert self.node.get_name() not in V.graph.removed_buffers + line = self.wrapper.make_buffer_allocation(self.node) + code.writeline(line) + + +@dataclasses.dataclass +class FreeIfNotReusedLine(MemoryPlanningLine): + node: ir.Buffer + is_reused: bool = False + + def plan(self, state: MemoryPlanningState): + if isinstance(self.node.layout, (ir.AliasedLayout, ir.MultiOutputLayout)): + return self + assert not self.is_reused + if self.node.get_name() in V.graph.removed_buffers: + return NullLine(self.wrapper) + if config.allow_buffer_reuse: + state.push(buffer_reuse_key(self.node), self) + return self + + def codegen(self, code: IndentedBuffer): + assert self.node.get_name() not in V.graph.removed_buffers + if not self.is_reused: + code.writeline(self.wrapper.make_buffer_free(self.node)) + + +@dataclasses.dataclass +class ReuseLine(MemoryPlanningLine): + node: ir.Buffer + reused_as: ir.Buffer + delete_old: bool = True + + def plan(self, state: MemoryPlanningState): + if self.node.get_name() in V.graph.removed_buffers: + assert self.reused_as.get_name() in V.graph.removed_buffers + return NullLine(self.wrapper) + assert self.reused_as.get_name() not in V.graph.removed_buffers + return self + + def codegen(self, code: IndentedBuffer): + assert self.node.get_name() not in V.graph.removed_buffers + assert self.reused_as.get_name() not in V.graph.removed_buffers + code.writeline( + self.wrapper.make_buffer_reuse(self.node, self.reused_as, self.delete_old) + ) + + +class NullLine(MemoryPlanningLine): + pass + + +class WrapperCodeGen(CodeGen): + """ + Generate outer wrapper in Python that calls the kernels. + """ + + def __init__(self): + super().__init__() + self._names_iter = count() + self.header = IndentedBuffer() + self.prefix = IndentedBuffer() + self.wrapper_call = IndentedBuffer() + self.src_to_kernel = {} + self.kenel_numel_expr = set() + self.lines = [] + self.declare = "" + self.ending = "" + self.open_bracket = "[" + self.closed_bracket = "]" + self.comment = "#" + self.namespace = "" + self.none_str = "None" + self.size = "size()" + self.stride = "stride()" + self.last_seen_device_guard_index = None + self.supports_intermediate_hooks = True + self.expr_printer = pexpr + self.cached_thread_locals = set() + self.user_defined_kernel_cache: Dict[Tuple[Any, ...], str] = {} + self.unbacked_symbol_decls = set() + + self.write_header() + self.write_prefix() + + if not V.graph.aot_mode: + for name, hashed in V.graph.constant_reprs.items(): + # include a hash so our code cache puts different constants into different files + self.write_constant(name, hashed) + + self.allocated = set() + self.freed: Set[str] = set() + + # maps from reusing buffer to reused buffer + self.reuses = dict() + + self.write_get_raw_stream = functools.lru_cache(None)( # type: ignore[assignment] + self.write_get_raw_stream + ) + + @functools.lru_cache(None) + def add_import_once(line): + self.header.writeline(line) + + self.add_import_once = add_import_once + self._metas = {} + + def write_constant(self, name, hashed): + self.header.writeline(f"{name} = None # {hashed}") + + def write_header(self): + self.header.splice( + f""" + from ctypes import c_void_p, c_long + import torch + import math + import random + import os + import tempfile + from math import inf, nan + from torch._inductor.hooks import run_intermediate_hooks + from torch._inductor.utils import maybe_profile + from torch._inductor.codegen.memory_planning import _align as align + + from torch import device, empty, empty_strided + from {codecache.__name__} import AsyncCompile + from torch._inductor.select_algorithm import extern_kernels + + aten = torch.ops.aten + inductor_ops = torch.ops.inductor + assert_size_stride = torch._C._dynamo.guards.assert_size_stride + alloc_from_pool = torch.ops.inductor._alloc_from_pool + reinterpret_tensor = torch.ops.inductor._reinterpret_tensor + async_compile = AsyncCompile() + + """ + ) + + @cache_on_self + def write_triton_header_once(self): + self.header.splice( + """ + import triton + import triton.language as tl + from torch._inductor.triton_heuristics import grid, start_graph, end_graph + from torch._C import _cuda_getCurrentRawStream as get_cuda_stream + """ + ) + + def add_meta_once(self, meta): + meta = repr(meta) + if meta not in self._metas: + var = f"meta{len(self._metas)}" + self._metas[meta] = var + self.header.writeline(f"{var} = {meta}") + return self._metas[meta] + + @cache_on_self + def get_output_refs(self): + return [x.codegen_reference(self.wrapper_call) for x in V.graph.graph_outputs] + + def mark_output_type(self): + return + + def codegen_input_size_asserts(self): + for name, buf in V.graph.graph_inputs.items(): + if isinstance(buf, sympy.Expr): + continue + + # comparing strides for 0 size tensor is tricky. Ignore them for now. + if sympy_product(buf.get_size()) == 0: + continue + size = self.codegen_shape_tuple(buf.get_size()) + stride = self.codegen_shape_tuple(buf.get_stride()) + self.prefix.writeline(f"assert_size_stride({name}, {size}, {stride})") + + def write_prefix(self): + self.prefix.splice( + """ + + async_compile.wait(globals()) + del async_compile + + def call(args): + """ + ) + with self.prefix.indent(): + if config.triton.debug_sync_graph: + self.prefix.writeline("torch.cuda.synchronize()") + inp_len = len(V.graph.graph_inputs.keys()) + if inp_len != 0: + lhs = f"{', '.join(V.graph.graph_inputs.keys())}{'' if inp_len != 1 else ','}" + self.prefix.writeline(f"{lhs} = args") + self.prefix.writeline("args.clear()") + + self.codegen_inputs(self.prefix, V.graph.graph_inputs) + if config.size_asserts: + self.codegen_input_size_asserts() + + def write_get_raw_stream(self, index): + self.write_triton_header_once() + name = f"stream{index}" + self.writeline(f"{name} = get_cuda_stream({index})") + return name + + def next_kernel_suffix(self): + return f"{next(self._names_iter)}" + + def codegen_device_guard_enter(self, device_idx): + self.writeline( + EnterCudaDeviceContextManagerLine( + device_idx, self.last_seen_device_guard_index + ) + ) + self.last_seen_device_guard_index = device_idx + + def codegen_device_guard_exit(self): + self.writeline(ExitCudaDeviceContextManagerLine()) + + def generate_return(self, output_refs): + if output_refs: + self.wrapper_call.writeline("return (" + ", ".join(output_refs) + ", )") + else: + self.wrapper_call.writeline("return ()") + + def generate_end(self, result): + return + + def generate_fallback_kernel(self, fallback_kernel, args): + self.generate_extern_kernel_alloc(fallback_kernel, args) + + def generate_extern_kernel_alloc(self, extern_kernel, args): + ending = self.ending + if config.memory_planning and "view_as_complex" in str(extern_kernel.kernel): + # view operation fallbacks cause issues since inductor + # doesn't know the memory is still needed and might reuse it. + ending = f".clone(){ending}" + output_name = extern_kernel.get_name() + origin_node = extern_kernel.get_origin_node() + kernel_name = extern_kernel.codegen_kernel_name() + self.writeline( + f"{self.declare}{output_name} = {kernel_name}({', '.join(args)}){ending}" + ) + if ( + self.supports_intermediate_hooks + and config.generate_intermediate_hooks + and origin_node is not None + ): + counters["inductor"]["intermediate_hooks"] += 1 + self.writeline( + f"run_intermediate_hooks({origin_node.name!r}, {output_name})" + ) + + def generate_extern_kernel_out(self, output_view, codegen_reference, args, kernel): + if output_view: + args.append(f"out={output_view.codegen_reference()}") + else: + args.append(f"out={codegen_reference}") + self.writeline(f"{kernel}({', '.join(args)})") + + def generate_user_defined_triton_kernel(self, kernel_name, grid, configs, args): + grid, code = user_defined_kernel_grid_fn_code(kernel_name, configs, grid) + # Must happen after free symbols are already codegened + with self.prefix.indent(): + self.prefix.splice(code) + + stream_name = self.write_get_raw_stream(V.graph.scheduler.current_device.index) + self.writeline( + f"{kernel_name}.run({', '.join(args)}, grid={grid}, stream={stream_name})" + ) + + def generate_scatter_fallback( + self, output, inputs, kernel, fn, src_is_tensor, reduce, kwargs + ): + line = f"{kernel}({','.join(map(str, inputs))}" + if kernel == "aten.scatter_": + if reduce: + line += f", reduce={repr(reduce)}" + else: + line += ", ".join([""] + kwargs) + line += f"){self.ending}" + self.writeline(line) + + def generate_extern_kernel_alloc_and_find_schema_if_needed( + self, + name, + kernel, + codegen_args, + cpp_op_schema, + cpp_kernel_key, + cpp_kernel_overload_name="", + op_overload=None, + raw_args=None, + outputs=None, + ): + self.writeline(f"{name} = {kernel}({', '.join(codegen_args)})") + + def generate_inf_and_nan_checker(self, node): + # TODO: Add check for python too. + pass + + @dynamo_timed + def generate(self, is_inference): + if config.profile_bandwidth: + self.write_triton_header_once() + result = IndentedBuffer() + result.splice(self.header) + + with contextlib.ExitStack() as stack: + stack.enter_context(self.wrapper_call.indent()) + if config.profiler_mark_wrapper_call: + self.generate_profiler_mark_wrapper_call(stack) + if config.profile_bandwidth: + self.generate_start_graph() + + # We disable planning during training because it presently increases peak memory consumption. + if is_inference and config.memory_planning: + self.memory_plan() + else: + self.memory_plan_reuse() + + device_cm_stack = contextlib.ExitStack() + for line in self.lines: + if isinstance(line, MemoryPlanningLine): + line.codegen(self.wrapper_call) + elif isinstance( + line, + ( + EnterCudaDeviceContextManagerLine, + ExitCudaDeviceContextManagerLine, + ), + ): + line.codegen(self.wrapper_call, device_cm_stack) + else: + self.wrapper_call.writeline(line) + + output_refs = self.get_output_refs() + self.mark_output_type() + if config.triton.debug_sync_graph: + self.wrapper_call.writeline("torch.cuda.synchronize()") + + if config.profile_bandwidth: + self.generate_end_graph() + + self.generate_return(output_refs) + + self.append_precomputed_sizes_to_prefix() + self.finalize_prefix() + result.splice(self.prefix) + + with result.indent(): + result.splice(self.wrapper_call) + + self.generate_end(result) + + self.add_benchmark_harness(result) + + return result.getvaluewithlinemap() + + def memory_plan(self): + from .memory_planning import MemoryPlanner + + self.lines = MemoryPlanner(self).plan(self.lines) + + def memory_plan_reuse(self): + out_names = V.graph.get_output_names() + + while ( + self.lines + and isinstance(self.lines[-1], MemoryPlanningLine) + # TODO: this seems legit, NullLine has no node + and self.lines[-1].node.name not in out_names # type: ignore[attr-defined] + ): + # these lines will be pointless + self.lines.pop() + + # codegen allocations in two passes + planning_state = MemoryPlanningState() + for i in range(len(self.lines)): + if isinstance(self.lines[i], MemoryPlanningLine): + self.lines[i] = self.lines[i].plan(planning_state) + + def codegen_input_size_var_decl(self, code: IndentedBuffer, name): + code.writeline(f"{self.declare}{name}_size = {name}.{self.size}{self.ending}") + + def codegen_input_stride_var_decl(self, code: IndentedBuffer, name): + code.writeline( + f"{self.declare}{name}_stride = {name}.{self.stride}{self.ending}" + ) + + def codegen_inputs( + self, code: IndentedBuffer, graph_inputs: Dict[str, ir.TensorBox] + ): + """Assign all symbolic shapes to locals""" + + @functools.lru_cache(None) + def sizeof(name): + self.codegen_input_size_var_decl(code, name) + return f"{name}_size" + + @functools.lru_cache(None) + def strideof(name): + self.codegen_input_stride_var_decl(code, name) + return f"{name}_stride" + + # Assign all symbolic shapes needed to local variables + needed = V.graph.sizevars.free_symbols() + + def is_expr(x): + return isinstance(x[1], sympy.Expr) + + graph_inputs_expr = list(filter(is_expr, graph_inputs.items())) + graph_inputs_tensors = list( + filter(lambda x: not is_expr(x), graph_inputs.items()) + ) + + for name, shape in graph_inputs_expr: + shape = V.graph.sizevars.simplify(shape) + if shape in needed: + needed.remove(shape) + code.writeline(f"{self.declare}{shape} = {name}{self.ending}") + + for name, value in graph_inputs_tensors: + shapes = value.get_size() + for dim, shape in enumerate(shapes): + shape = V.graph.sizevars.simplify(shape) + if shape in needed: + needed.remove(shape) + code.writeline( + f"{self.declare}{shape} = {sizeof(name)}[{dim}]{self.ending}" + ) + + for name, value in graph_inputs_tensors: + shapes = value.get_stride() + for dim, shape in enumerate(shapes): + shape = V.graph.sizevars.simplify(shape) + if shape in needed: + needed.remove(shape) + code.writeline( + f"{self.declare}{shape} = {strideof(name)}[{dim}]{self.ending}" + ) + + def append_precomputed_sizes_to_prefix(self): + with self.prefix.indent(): + for sym, expr in V.graph.sizevars.inv_precomputed_replacements.items(): + self.prefix.writeline( + f"{self.declare}{sym} = {self.expr_printer(expr)}{self.ending}" + ) + + def finalize_prefix(self): + pass + + def codegen_python_sizevar(self, x: Expr) -> str: + return pexpr(V.graph.sizevars.simplify(x)) + + def codegen_sizevar(self, x: Expr) -> str: + return self.codegen_python_sizevar(x) + + def codegen_tuple_access(self, basename: str, name: str, index: str) -> str: + return f"{basename}[{index}]" + + def codegen_python_shape_tuple(self, shape: Tuple[Expr, ...]) -> str: + parts = list(map(self.codegen_python_sizevar, shape)) + if len(parts) == 0: + return "()" + if len(parts) == 1: + return f"({parts[0]}, )" + return f"({', '.join(parts)})" + + def codegen_shape_tuple(self, shape: Tuple[Expr, ...]) -> str: + return self.codegen_python_shape_tuple(shape) + + def codegen_alloc_from_pool(self, name, offset, dtype, shape, stride) -> str: + return "alloc_from_pool({})".format( + ", ".join( + [ + name, + pexpr(offset), # bytes not numel + str(dtype), + self.codegen_shape_tuple(shape), + self.codegen_shape_tuple(stride), + ] + ) + ) + + def codegen_reinterpret_view(self, data, size, stride, offset, writer) -> str: + size = self.codegen_shape_tuple(size) + stride = self.codegen_shape_tuple(stride) + offset = self.codegen_sizevar(offset) + return f"reinterpret_tensor({data.get_name()}, {size}, {stride}, {offset})" + + def codegen_device_copy(self, src, dst): + self.writeline(f"{dst}.copy_({src})") + + def codegen_multi_output(self, name, value): + self.writeline(f"{self.declare}{name} = {value}{self.ending}") + + def benchmark_compiled_module(self, output): + def add_fake_input(name, shape, stride, device, dtype): + output.writeline( + f"{name} = rand_strided(" + f"{self.codegen_python_shape_tuple(shape)}, " + f"{self.codegen_python_shape_tuple(stride)}, " + f"device='{device}', dtype={dtype})" + ) + + def add_expr_input(name, val): + output.writeline(f"{name} = {val}") + + output.writelines( + ["", "", "def benchmark_compiled_module(times=10, repeat=10):"] + ) + with output.indent(): + output.splice( + """ + from torch._dynamo.testing import rand_strided + from torch._inductor.utils import print_performance + """, + strip=True, + ) + + for name, value in V.graph.constants.items(): + # all the constants are global variables, that's why we need + # these 'global var_name' lines + output.writeline(f"global {name}") + add_fake_input( + name, value.size(), value.stride(), value.device, value.dtype + ) + + for name, value in V.graph.graph_inputs.items(): + if isinstance(value, sympy.Symbol) and isinstance( + V.graph.sizevars.var_to_val.get(value, None), SingletonInt + ): + # Inductor should only work with dense -> dense graph, and + # SingletonInts belong to metadata that should only live on + # the subclass. + continue + if isinstance(value, sympy.Expr): # Don't need to add symbolic + add_expr_input(name, V.graph.sizevars.size_hint(value)) + else: + shape = [V.graph.sizevars.size_hint(x) for x in value.get_size()] + stride = [V.graph.sizevars.size_hint(x) for x in value.get_stride()] + add_fake_input( + name, shape, stride, value.get_device(), value.get_dtype() + ) + + call_str = f"call([{', '.join(V.graph.graph_inputs.keys())}])" + output.writeline(f"fn = lambda: {call_str}") + output.writeline("return print_performance(fn, times=times, repeat=repeat)") + + def add_benchmark_harness(self, output): + """ + Append a benchmark harness to generated code for debugging + """ + if not config.benchmark_harness: + return + + self.benchmark_compiled_module(output) + + output.writelines(["", "", 'if __name__ == "__main__":']) + with output.indent(): + output.writelines( + [ + "from torch._inductor.wrapper_benchmark import compiled_module_main", + f"compiled_module_main('{get_benchmark_name()}', benchmark_compiled_module)", + ] + ) + + def define_kernel( + self, name: str, kernel: str, metadata: Optional[str] = None, cuda=True + ): + metadata_comment = f"{metadata}\n" if metadata else "" + self.header.splice(f"\n\n{metadata_comment}{name} = {kernel}") + + def define_user_defined_triton_kernel(self, kernel, configs, kwargs): + original_name = kernel.__name__ + + # Distinguish between different functions using function id + cache_key = [id(kernel.fn)] + for arg in kwargs.values(): + if isinstance(arg, (ir.Buffer, ir.ReinterpretView)): + cache_key.append(arg.get_dtype()) + elif len(configs) > 0: + # We need to key on non tensor arg only in autotune mode + cache_key.append(arg) + cache_key = tuple(cache_key) + + if cache_key in self.user_defined_kernel_cache: + return self.user_defined_kernel_cache[cache_key] + + name = f"{original_name}_{len(self.user_defined_kernel_cache)}" + # Add to the cache for the next use + self.user_defined_kernel_cache[cache_key] = name + + compile_wrapper = IndentedBuffer() + compile_wrapper.writeline(f"async_compile.triton({original_name!r}, '''") + + compile_wrapper.splice( + """ + import triton + import triton.language as tl + from torch._inductor.utils import instance_descriptor + from torch._inductor.triton_heuristics import user_autotune + """, + strip=True, + ) + compile_wrapper.newline() + + from .common import SizeArg, TensorArg + + signature: List[Union[TensorArg, SizeArg]] = [] + constants = {} + for key, arg in kwargs.items(): + idx = kernel.arg_names.index(key) + if idx in kernel.constexprs: + constants[key] = arg + continue + if isinstance(arg, (ir.Buffer, ir.ReinterpretView)): + signature.append( + TensorArg( + key, + arg.codegen_reference(), + arg.get_dtype(), + # For ReinterpretView, we do not want to check alignment + not isinstance(arg, ReinterpretView), + ) + ) + else: + signature.append(SizeArg(key, arg)) + index_dtype = "tl.int32" + inductor_meta = { + "kernel_name": name, + } + triton_meta = { + "signature": signature_to_meta(signature, size_dtype=index_dtype), + "device": V.graph.scheduler.current_device.index, + "device_type": V.graph.scheduler.current_device.type, + "constants": constants, + "configs": [config_of(signature)], + } + configs = [ + { + "kwargs": config.kwargs, + "num_warps": config.num_warps, + "num_stages": config.num_stages, + } + for config in configs + ] + compile_wrapper.splice( + f""" + @user_autotune( + configs={configs!r}, + inductor_meta={inductor_meta!r}, + triton_meta={triton_meta!r}, + filename=__file__ + ) + @triton.jit + """ + ) + compile_wrapper.splice(kernel.src, strip=True) + + # Also include any possible kernel being called indirectly + from triton import JITFunction + + symbols_included = {original_name} + + def traverse(cur_kernel): + for symbol_name in cur_kernel.fn.__code__.co_names: + if symbol_name in symbols_included: + continue + if symbol_name in cur_kernel.fn.__globals__: + symbol = cur_kernel.fn.__globals__[symbol_name] + if isinstance(symbol, JITFunction): + compile_wrapper.newline() + compile_wrapper.writeline("@triton.jit") + compile_wrapper.splice(symbol.src, strip=True) + symbols_included.add(symbol_name) + traverse(symbol) + elif isinstance(symbol, (int, str, bool)): + compile_wrapper.newline() + compile_wrapper.writeline(f"{symbol_name} = {symbol!r}") + symbols_included.add(symbol_name) + + traverse(kernel) + + compile_wrapper.writeline("''')") + _, lineno = inspect.getsourcelines(kernel.fn) + srcfile = inspect.getsourcefile(kernel.fn) + metadata = f"# Original path: {srcfile}:{lineno}" + self.define_kernel( + name, + compile_wrapper.getvalue(), + metadata, + ) + return name + + def generate_numel_expr(self, kernel_name: str, tree): + expr = f"{kernel_name}_{tree.prefix}numel" + if expr not in self.kenel_numel_expr: + self.kenel_numel_expr.add(expr) + self.writeline( + f"{self.declare}{expr} = {self.expr_printer(tree.numel)}{self.ending}" + ) + else: + self.writeline(f"{expr} = {self.expr_printer(tree.numel)}{self.ending}") + # We can get symbolic expressions here, like s0*64 + # It is fine to have them here, but we need to handle them correctly as their own type + # This is tricky to do, so we wrap in a custom type, distinct from scalars, but also from sympy* + # scalars as well. + # This is handled in `generate_args_decl` which has a correct comment of: TODO: only works for + # constant now, need type info. I agree, this needs type info, and while this is not true type info + # it suffices as a type hint for the purposes of producing the correct code for this type. + return SymbolicCallArg(expr, tree.numel) + + def wrap_kernel_call(self, name, call_args): + return f"{name}({', '.join(call_args)}){self.ending}" + + def generate_profiler_mark_wrapper_call(self, stack): + self.wrapper_call.writeline("from torch.profiler import record_function") + self.wrapper_call.writeline( + f"with record_function('graph_{V.graph.graph_id}_inductor_wrapper_call'):" + ) + stack.enter_context(self.wrapper_call.indent()) + + def generate_start_graph(self): + self.wrapper_call.writeline("start_graph()") + + def generate_end_graph(self): + self.wrapper_call.writeline("end_graph()") + + def generate_default_grid(self, name: str, grid_args: List[Any]): + return grid_args + + def generate_kernel_call( + self, + name, + call_args, + grid=None, + device_index=None, + cuda=True, + triton=True, + ): + """ + Generates kernel call code. + + cuda: Defines whether the backend is GPU. Otherwise the backend is CPU. + + triton: Defines whether the GPU backend uses Triton for codegen. + Otherwise it uses the CUDA language for codegen. + Only valid when cuda == True. + """ + if cuda: + call_args_str = ", ".join(pexpr(item) for item in call_args) + stream_name = self.write_get_raw_stream( + V.graph.scheduler.current_device.index + ) + if triton: + grid_str = ", ".join(pexpr(item) for item in grid) + self.writeline( + f"{name}.run({call_args_str}, grid=grid({grid_str}), stream={stream_name})" + ) + else: + stream_ptr = f"c_void_p({stream_name})" + self.writeline(f"{name}.{name}({call_args_str}, {stream_ptr})") + else: + self.writeline(self.wrap_kernel_call(name, call_args)) + + def writeline(self, line): + self.lines.append(line) + + def enter_context(self, ctx): + self.lines.append(LineContext(ctx)) + + def val_to_cpp_arg_str(self, type_, val, is_legacy_abi) -> str: + raise NotImplementedError() + + def val_to_arg_str(self, s): + if isinstance(s, SymTypes): + return pexpr(sympy.expand(repr(s))) + elif isinstance(s, sympy.Expr): + return pexpr(s) + elif isinstance(s, (tuple, list)): + + @dataclasses.dataclass + class Shim: + ref: Any + + def __repr__(self): + return self.ref + + return repr(type(s)(Shim(self.val_to_arg_str(a)) for a in s)) + elif isinstance(s, torch._ops.OpOverload): + return _get_qualified_name(s) + elif isinstance(s, (ComputedBuffer, InputBuffer, ReinterpretView)): + return s.codegen_reference() + else: + return repr(s) + + # The following methods are for memory management + def make_buffer_allocation(self, buffer): + device = buffer.get_device() + dtype = buffer.get_dtype() + shape = tuple(buffer.get_size()) + stride = tuple(buffer.get_stride()) + return self.make_allocation(buffer.get_name(), device, dtype, shape, stride) + + def make_allocation(self, name, device, dtype, shape, stride): + try: + expected = tuple(ir.make_contiguous_strides_for(shape)) + except Exception: # cannot determine truth value of Relational + expected = None + if stride == expected: + return ( + f"{name} = empty(" + f"{self.codegen_shape_tuple(shape)}, " + f"device='{device.type}', dtype={dtype})" + ) + else: + return ( + f"{name} = empty_strided(" + f"{self.codegen_shape_tuple(shape)}, " + f"{self.codegen_shape_tuple(stride)}, " + f"device='{device.type}', dtype={dtype})" + ) + + def make_tensor_alias(self, new_name, old_name, comment=""): + return f"{self.declare}{new_name} = {old_name}{self.ending} {self.comment} {comment}" + + def make_buffer_free(self, buffer): + return f"del {buffer.get_name()}" + + def make_free_by_names(self, names_to_del: List[str]): + return f"del {', '.join(name for name in names_to_del)}" + + def codegen_exact_buffer_reuse(self, old_name: str, new_name: str, del_line: str): + return f"{self.declare}{new_name} = {old_name}{del_line}{self.ending} {self.comment} reuse" + + def make_buffer_reuse(self, old, new, delete_old: bool): + assert old.get_dtype() == new.get_dtype() + old_name = old.get_name() + new_name = new.get_name() + del_line = ";" + if old_name not in V.graph.get_output_names() and delete_old: + del_line = f"; {self.make_buffer_free(old)}" + + if old.get_size() == new.get_size() and old.get_stride() == new.get_stride(): + if old_name in self.cached_thread_locals: + self.cached_thread_locals.add(new_name) + return self.codegen_exact_buffer_reuse(old_name, new_name, del_line) + + reinterpret_view = self.codegen_reinterpret_view( + old, new.get_size(), new.get_stride(), 0, self.wrapper_call + ) + if reinterpret_view in self.cached_thread_locals: + self.cached_thread_locals.add(new_name) + return f"{self.declare}{new_name} = {reinterpret_view}{del_line} {self.comment} reuse" + + def codegen_deferred_allocation(self, name, layout): + self.writeline( + DeferredLine( + name, + f"{self.declare}{name} = {layout.view.codegen_reference()}{self.ending} {self.comment} alias", + ) + ) + + def codegen_allocation(self, buffer): + assert ( + buffer.get_workspace_size() == 0 + ), "Only support zero workspace size for now!" + + name = buffer.get_name() + + if name in V.graph.removed_buffers or name in self.allocated: + return + self.allocated.add(name) + if isinstance( + buffer, + (ir.ExternKernelAlloc, ir.MultiOutput), + ): + return + + layout = buffer.get_layout() + if isinstance(layout, ir.MutationLayout): + return + if isinstance(layout, ir.AliasedLayout): + assert isinstance( + layout.view, ir.ReinterpretView + ), f"unexpected {type(layout.view)}: {layout.view}" + self.codegen_allocation(layout.view.data) + self.codegen_deferred_allocation(name, layout) + return + + self.writeline(AllocateLine(self, buffer)) + + def codegen_free(self, buffer): + assert ( + buffer.get_workspace_size() == 0 + ), "Only support zero workspace size for now!" + + name = buffer.get_name() + + # can be freed but not reused + if isinstance(buffer, ir.InputBuffer): + self.writeline(self.make_buffer_free(buffer)) + return + + if not self.can_reuse(buffer): + return + self.freed.add(name) + + self.writeline(FreeIfNotReusedLine(self, buffer)) + + def can_reuse(self, input_buffer, output_buffer=None): + name = input_buffer.get_name() + if ( + name in V.graph.removed_buffers + or name in V.graph.graph_inputs + or name in V.graph.constants + or name in V.graph.never_reuse_buffers + or name in self.freed + ): + return False + + return True + + def did_reuse(self, buffer, reused_buffer): + # Check whether a given buffer was reused by a possible reuser in the wrapper codegen + # Can be consulted from inside ir codegen, e.g. to determine whether a copy is needed + return ( + buffer.get_name() in self.reuses + and self.reuses[buffer.get_name()] == reused_buffer.get_name() + ) + + def codegen_inplace_reuse(self, input_buffer, output_buffer): + assert buffer_reuse_key(input_buffer) == buffer_reuse_key(output_buffer) + self.codegen_allocation(input_buffer) + self.freed.add(input_buffer.get_name()) + self.allocated.add(output_buffer.get_name()) + self.reuses[output_buffer.get_name()] = input_buffer.get_name() + self.writeline(ReuseLine(self, input_buffer, output_buffer)) + + def codegen_unbacked_symbol_decl(self, symbol): + name = str(symbol) + if name in self.unbacked_symbol_decls: + return name + else: + # When in CppWrapperCodeGen, we should only generate the declaration once + self.unbacked_symbol_decls.add(name) + return self.declare + name + + +class CppWrapperCodeGen(WrapperCodeGen): + """ + Generates cpp wrapper for running on CPU and calls cpp kernels + """ + + def __init__(self): + super().__init__() + + self.declare = "auto " + self.ending = ";" + self.open_bracket = "{" + self.closed_bracket = "}" + self.comment = "//" + self.namespace = "at::" + self.none_str = "at::Tensor()" + self.extern_call_ops = set() + self.size = "sizes()" + self.stride = "strides()" + self.call_func_name = "inductor_entry_cpp" + self.cuda = False + self.supports_intermediate_hooks = False + self.outputs_need_copy = set() + self.kernel_callsite_id = count() + self.int_array_id = count() # for int array local variable declarations + self.declared_int_array_vars = set() + self.tmp_tensor_id = count() # for tmp tensor local variable declarations + self.arg_var_id = count() + self.used_cached_dtypes = set() + + from .cpp import cexpr, CppPrinter + + self.expr_printer = cexpr + + # CppPrinter sometimes calls at::native functions which causes problems in + # the ABI-compatible mode. Currently we are hitting this problem when codegen + # Grid computation expressions, but we my need to fix other size computation + # as well. + class GridExprCppPrinter(CppPrinter): + def _print_FloorDiv(self, expr): + x, div = expr.args + x = self.paren(self.doprint(x)) + div = self.paren(self.doprint(div)) + assert expr.is_integer, "Expect integers in GridExprPrinter" + return f"({x}/{div})" + + self.grid_expr_printer = GridExprCppPrinter().doprint + + def generate_kernel_call( + self, + name, + call_args, + grid=None, + device_index=None, + cuda=True, + triton=True, + ): + """ + Generates kernel call code. + + cuda: Defines whether the backend is GPU. Otherwise the backend is CPU. + + triton: Defines whether the GPU backend uses Triton for codegen. + Otherwise it uses the CUDA language for codegen. + Only valid when cuda == True. + """ + if cuda: + return super().generate_kernel_call( + name, call_args, grid, device_index, cuda, triton + ) + else: + if V.graph.aot_mode and config.aot_inductor.abi_compatible: + from .cpp import DTYPE_TO_CPP + + new_args = [] + for arg in call_args: + var_name = f"var_{next(self.arg_var_id)}" + self.writeline(f"void *{var_name}{self.ending}") + self.writeline( + f"AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_data_ptr({arg}, &{var_name}));" + ) + dtype = V.graph.get_dtype(arg) + cpp_dtype = DTYPE_TO_CPP[dtype] + new_args.append(f"({cpp_dtype}*)({var_name})") + self.writeline(self.wrap_kernel_call(name, new_args)) + else: + self.writeline(self.wrap_kernel_call(name, call_args)) + + def write_constant(self, name, hashed): + # include a hash so our code cache gives different constants different files + self.header.writeline(f"// {name} {hashed}") + + def write_header(self): + if V.graph.aot_mode: + with open( + os.path.join(os.path.dirname(__file__), "aoti_runtime", "interface.cpp") + ) as f: + self.header.splice(f.read()) + else: + self.header.splice( + """ + import torch + from torch._inductor.codecache import CppWrapperCodeCache + + cpp_wrapper_src = ( + ''' + """ + ) + + if config.aot_inductor.abi_compatible: + self.header.splice("#include ") + else: + self.header.splice( + """ + #include + #include + #include + #include + #include + #define reinterpret_tensor torch::inductor::_reinterpret_tensor + #define alloc_from_pool torch::inductor::_alloc_from_pool + """ + ) + + self.header.splice("#include ") + + from .memory_planning import ALIGN_BYTES + + # Round up to the nearest multiple of ALIGN_BYTES + # ALIGN_BYTES must be a power of 2 + self.header.splice( + f""" + [[maybe_unused]] static int64_t align(int64_t nbytes) {{ + return (nbytes + {ALIGN_BYTES} - 1) & -{ALIGN_BYTES}; + }} + """ + ) + + def mark_output_type(self): + # mark output type to unwrap tensor back to python scalar + from ..ir import ShapeAsConstantBuffer + + output_is_tensor = dict() + for idx, x in enumerate(V.graph.graph_outputs): + if isinstance(x, ShapeAsConstantBuffer): + output_is_tensor[idx] = False + else: + output_is_tensor[idx] = True + + self.output_is_tensor = output_is_tensor + + def write_prefix(self): + if V.graph.aot_mode: + self.prefix.writeline("namespace torch {") + self.prefix.writeline("namespace aot_inductor {") + + def write_input_output_info( + self, + info_kind: str, + idx: int, + name: str, + ): + self.prefix.writeline(f"""{info_kind}[{idx}].name = "{name}";""") + + def write_wrapper_decl(self): + inputs_len = len(V.graph.graph_inputs.keys()) + if V.graph.aot_mode: + self.prefix.splice( + """ + void AOTInductorModel::run_impl( + AtenTensorHandle* + input_handles, // array of input AtenTensorHandle; handles + // are stolen; the array itself is borrowed + AtenTensorHandle* + output_handles, // array for writing output AtenTensorHandle; handles + // will be stolen by the caller; the array itself is + // borrowed + DeviceStreamType stream, + AOTIProxyExecutorHandle proxy_executor + ) { + """ + ) + else: + self.prefix.splice( + f"""std::vector {self.call_func_name}(const std::vector& inputs) {{""" + ) + with self.prefix.indent(): + # assign inputs and outputs in both cases so the later codegen can be simplified + if V.graph.aot_mode: + if config.aot_inductor.abi_compatible: + self.prefix.splice( + """ + auto inputs = steal_from_raw_handles_to_raii_handles(input_handles, num_inputs()); + """ + ) + else: + # This looks dumb, but can avoid creating two versions of code in the AOTInductor runtime. + self.prefix.splice( + """ + auto inputs = alloc_tensors_by_stealing_from_handles(input_handles, num_inputs()); + """ + ) + else: + self.prefix.splice( + """ + py::gil_scoped_release release; + """ + ) + + if inputs_len != 0: + for idx, input_key in enumerate(V.graph.graph_inputs.keys()): + # unwrap input tensor back to scalar + if isinstance(V.graph.graph_inputs[input_key], sympy.Expr): + from ..graph import may_get_constant_buffer_dtype + from .cpp import DTYPE_TO_CPP + + dtype = may_get_constant_buffer_dtype( + V.graph.graph_inputs[input_key] + ) + assert ( + dtype is not None + ), "Fails to get the dtype of the sympy.Expr" + cpp_dtype = DTYPE_TO_CPP[dtype] + assert ( + not config.aot_inductor.abi_compatible + ), "Need to add .item support for abi_compatible AOTInductor codegen" + self.prefix.writeline( + f"{cpp_dtype} {input_key} = inputs[{idx}].item<{cpp_dtype}>();" + ) + else: + self.prefix.writeline( + f"auto {input_key} = std::move(inputs[{idx}]);" + ) + + assert all( + isinstance(v, torch.Tensor) for v in list(V.graph.constants.values()) + ), "Expect all constants to be Tensor" + for idx, constants_key in enumerate(V.graph.constants.keys()): + if V.graph.aot_mode: + # Weights are stored in constants_ and owned by RAIIAtenTensorHandle there. + # Don't call std::move here because it will cause constants_ to lose the ownership. + if config.aot_inductor.abi_compatible: + self.prefix.writeline( + f"""auto {constants_key} = constants_.at({idx});""" + ) + else: + self.prefix.writeline( + f"auto {constants_key} = *tensor_handle_to_tensor_pointer(" + + f"""constants_.at({idx}));""" + ) + else: + # Append constants as inputs to the graph + constants_idx = inputs_len + idx + self.prefix.writeline( + f"auto {constants_key} = inputs[{constants_idx}];" + ) + + self.codegen_inputs(self.prefix, V.graph.graph_inputs) + + if V.graph.aot_mode: + self.prefix.writeline("inputs.clear();") + self.prefix.writeline( + "auto& kernels = *dynamic_cast(this->kernels_.get());" + ) + + def codegen_input_size_var_decl(self, code: IndentedBuffer, name): + if config.aot_inductor.abi_compatible: + code.writeline(f"int64_t* {name}_size;") + code.writeline( + f"AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_sizes({name}, &{name}_size));" + ) + else: + super().codegen_input_size_var_decl(code, name) + + def codegen_input_stride_var_decl(self, code: IndentedBuffer, name): + if config.aot_inductor.abi_compatible: + code.writeline(f"int64_t* {name}_stride;") + code.writeline( + f"AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_strides({name}, &{name}_stride));" + ) + else: + super().codegen_input_stride_var_decl(code, name) + + def codegen_model_kernels(self): + self.prefix.writeline("namespace {") + self.prefix.writeline( + "class AOTInductorModelKernels : public AOTInductorModelKernelsBase {" + ) + self.prefix.writeline(" public:") + for kernel in chain( + self.src_to_kernel.values(), self.user_defined_kernel_cache.values() + ): + self.prefix.writeline(f" CUfunction {kernel}{{nullptr}};") + self.prefix.writeline("};") + self.prefix.writeline("} // namespace") + + def codegen_model_constructor(self): + """ + // Generated code example + AOTInductorModel::AOTInductorModel() + : AOTInductorModelBase(4, 1) { + inputs_info_[0].name = "input0"; + inputs_info_[0].dtype = "torch.float16"; + ... + constants_info_[0].name = "L__self___weight"; + constants_info_[0].dtype = at::kFloat; + constants_info_[0].offset = 0; + constants_info_[0].data_size = 8192; + constants_info_[0].shape = {64, 32}; + constants_info_[0].stride = {32, 1}; + ... + outputs_info_[0].name = "output0"; + outputs_info_[0].dtype = "torch.float16"; + } + """ + + num_inputs = len(V.graph.graph_inputs) + num_outputs = len(V.graph.graph_outputs) + num_constants = len(V.graph.constants) + self.prefix.splice( + f""" + AOTInductorModel::AOTInductorModel(std::shared_ptr constants_map, std::optional cubin_dir) + : AOTInductorModelBase({num_inputs}, {num_outputs}, {num_constants}, cubin_dir) {{ + """ + ) + + with self.prefix.indent(): + for idx, (name, inp) in enumerate(V.graph.graph_inputs.items()): + assert not isinstance( + inp, sympy.Expr + ), f"input {name=} cannot be symbolic" + self.write_input_output_info("inputs_info_", idx, name) + + for idx, (name, tensor) in enumerate(V.graph.constants.items()): + assert isinstance(tensor, torch.Tensor) + self.prefix.writeline(f"""constants_info_[{idx}].name = "{name}";""") + self.prefix.writeline( + f"constants_info_[{idx}].dtype = static_cast({self.codegen_dtype(tensor.dtype)});" + ) + self.prefix.writeline( + f"constants_info_[{idx}].offset = {tensor.storage_offset()};" + ) + self.prefix.writeline( + f"constants_info_[{idx}].data_size = {tensor.untyped_storage().nbytes()};" + ) + + size_str = ", ".join([str(s) for s in tensor.size()]) + self.prefix.writeline(f"constants_info_[{idx}].shape = {{{size_str}}};") + + stride_str = ", ".join([str(s) for s in tensor.stride()]) + self.prefix.writeline( + f"constants_info_[{idx}].stride = {{{stride_str}}};" + ) + + self.prefix.writeline("update_constants_map(std::move(constants_map));") + + def escape_string(x): + return ( + x.replace("\\", "\\\\") + .replace('"', '\\"') + .replace("\n", "\\n") + .replace("\t", "\\t") + ) + + self.prefix.writeline( + f'in_spec_ = "{escape_string(config.aot_inductor.serialized_in_spec)}";' + ) + self.prefix.writeline( + f'out_spec_ = "{escape_string(config.aot_inductor.serialized_out_spec)}";' + ) + + for idx, output in enumerate(V.graph.graph_outputs): + assert not isinstance( + output, sympy.Expr + ), f"output {name=} cannot be symbolic" + name = f"output{idx}" + self.write_input_output_info("outputs_info_", idx, name) + + self.prefix.writeline( + "this->kernels_ = std::make_unique();" + ) + + self.prefix.writeline("}") + + def generate(self, is_inference): + if V.graph.aot_mode: + self.codegen_model_kernels() + self.codegen_model_constructor() + self.write_wrapper_decl() + return super().generate(is_inference) + + def finalize_prefix(self): + cached_dtypes_buffer = IndentedBuffer() + if config.aot_inductor.abi_compatible: + for dtype in self.used_cached_dtypes: + cached_dtypes_buffer.writeline(f"CACHE_TORCH_DTYPE({dtype});") + cached_dtypes_buffer.splice(self.prefix) + self.prefix = cached_dtypes_buffer + + def define_kernel( + self, name: str, kernel: str, metadata: Optional[str] = None, cuda=False + ): + self.header.splice(f"\n{kernel}\n") + + def generate_return(self, output_refs): + if V.graph.aot_mode: + cst_names = V.graph.constants.keys() + for idx, output in enumerate(output_refs): + if output in cst_names: + # In some rare cases where we return a constant, we + # have to return a copy of this constant, because + # (1) constants are not owned by the Model instance + # (2) constants remain the same cross inference runs, + # assuming they are not updated at runtime + # Basically, we cannot release or transfer the ownership + # of any origianl constant to the user. + if config.aot_inductor.abi_compatible: + self.wrapper_call.writeline( + f"aoti_torch_clone({output}, &output_handles[{idx}]);" + ) + else: + self.wrapper_call.writeline( + f"output_handles[{idx}] = reinterpret_cast(" + + f"new at::Tensor(std::move({output}.clone())));" + ) + else: + if config.aot_inductor.abi_compatible: + if output in self.cached_thread_locals: + self.wrapper_call.writeline( + f"aoti_torch_new_uninitialized_tensor(&output_handles[{idx}]);" + ) + self.wrapper_call.writeline( + f"aoti_torch_assign_tensors({output}, output_handles[{idx}]);" + ) + else: + self.wrapper_call.writeline( + f"output_handles[{idx}] = {output}.release();" + ) + + else: + self.wrapper_call.writeline( + f"output_handles[{idx}] = reinterpret_cast(" + + f"new at::Tensor({output}));" + ) + else: + self.wrapper_call.writeline(f"return {{{', '.join(output_refs)}}};\n}}") + + def generate_end(self, result): + if V.graph.aot_mode: + result.writeline("} // AOTInductorModel::run_impl") + result.writeline("} // namespace aot_inductor") + result.writeline("} // namespace torch") + return + + result.writeline("'''\n)") + # get the hash of the wrapper code to name the extension + wrapper_call_hash = codecache.code_hash(result.getvalue()) + result.splice( + f""" + module = CppWrapperCodeCache.load(cpp_wrapper_src, '{self.call_func_name}', '{wrapper_call_hash}', {self.cuda}) + """ + ) + + # unwrap output tensor back to python scalar + if all(x for x in self.output_is_tensor.values()): + # If no ShapeAsConstantBuffer in the output, directly return the output as tensors + return_str = "return f(args_tensor)" + else: + outputs = [ + f"outputs[{i}]" if self.output_is_tensor[i] else f"outputs[{i}].item()" + for i in range(len(V.graph.graph_outputs)) + ] + outputs_str = f"[{', '.join(outputs)}]" + return_str = f""" + outputs = f(args_tensor) + return {outputs_str} + """ + + args_str = "args_tensor = [arg if isinstance(arg, torch.Tensor) else torch.tensor(arg) for arg in args]" + if V.graph.constants: + # Append constants to the input args for cpp wrapper. + # Python wrapper directly gets the value inside the wrapper call + # as a global variable passed when calling exec(code, mod.__dict__, mod.__dict__). + # For cpp wrapper, we need to pass this python value to the inductor_entry_cpp function explicitly. + assert all( + isinstance(v, torch.Tensor) for v in list(V.graph.constants.values()) + ), "Expect all constants to be Tensor" + constants_str = f"[{', '.join(V.graph.constants.keys())}]" + args_str += f""" + constants_tensor = {constants_str} + args_tensor.extend(constants_tensor) + """ + + # Wrap the func to support setting result._boxed_call = True + result.splice( + f""" + def _wrap_func(f): + def g(args): + {args_str} + {return_str} + return g + call = _wrap_func(module.{self.call_func_name}) + """ + ) + + def generate_c_shim_extern_kernel_call(self, kernel, args): + # In the abi_compatible mode, we call fallback aten ops through a C shim layer + kernel_tokens = kernel.split("::") + kernel_suffix = kernel_tokens[-1] + if kernel_suffix == "call": + kernel_suffix = kernel_tokens[-2] + shim_fn = f"aoti_torch_{kernel_suffix}" + self.writeline(f"AOTI_TORCH_ERROR_CODE_CHECK({shim_fn}({', '.join(args)}));") + + def generate_c_shim_extern_kernel_alloc(self, extern_kernel, args): + # registered output buffer name + name = extern_kernel.name + output_handle_name = f"{name}_handle" + self.writeline(f"AtenTensorHandle {output_handle_name};") + output_arg = f"&{output_handle_name}" + self.generate_c_shim_extern_kernel_call( + extern_kernel.codegen_kernel_name(), args + [output_arg] + ) + self.writeline(f"RAIIAtenTensorHandle {name}({output_handle_name});") + + def generate_extern_kernel_alloc(self, extern_kernel, args): + if V.graph.aot_mode and config.aot_inductor.abi_compatible: + self.generate_c_shim_extern_kernel_alloc(extern_kernel, args) + else: + super().generate_extern_kernel_alloc(extern_kernel, args) + + def generate_c_shim_fallback_kernel(self, fallback_kernel, args): + output_args = [] + output_raii_handles = [] + output_name_base = fallback_kernel.get_name() + for idx, output in enumerate(fallback_kernel.outputs): + if isinstance(output, ir.MultiOutput): + name = f"{output.get_name()}" + output_handle_name = f"{name}_handle" + if output.indices: + assert ( + output.indices[0][1] == idx + ), f"expected {output.indices[0][1]=} == {idx=} for {output_name_base=}" + self.writeline(f"AtenTensorHandle {output_handle_name};") + output_args.append(f"&{output_handle_name}") + output_raii_handles.append( + f"RAIIAtenTensorHandle {name}({output_handle_name});" + ) + elif isinstance(output, int): + output_name = f"{output_name_base}_{idx}" + self.writeline(f"int64_t {output_name} = {output};") + output_args.append(f"&{output_name}") + elif output is None: + output_args.append("nullptr") + else: + raise NotImplementedError("unsupported type of {output=}") + args = args + output_args + assert ( + fallback_kernel.abi_compatible_kernel is not None + ), f"abi_compatible_kernel is None for {fallback_kernel.kernel=}" + self.generate_c_shim_extern_kernel_call( + fallback_kernel.abi_compatible_kernel, args + ) + for raii_handle in output_raii_handles: + self.writeline(raii_handle) + + def generate_fallback_kernel(self, fallback_kernel, args): + if V.graph.aot_mode and config.aot_inductor.abi_compatible: + self.generate_c_shim_fallback_kernel(fallback_kernel, args) + else: + super().generate_fallback_kernel(fallback_kernel, args) + + def generate_extern_kernel_out(self, output_view, codegen_reference, args, kernel): + if output_view: + output_as_strided = f"{output_view.codegen_reference()}" + output_name = f"{output_view.get_name()}_as_strided" + self.writeline(f"auto {output_name} = {output_as_strided};") + + args.insert(0, output_name) + else: + args.insert(0, f"{codegen_reference}") + + if V.graph.aot_mode and config.aot_inductor.abi_compatible: + self.generate_c_shim_extern_kernel_call(kernel, args) + else: + self.writeline(self.wrap_kernel_call(kernel, args)) + + def generate_user_defined_triton_kernel(self, kernel_name, grid, configs, args): + assert len(grid) != 0 + if len(grid) == 1: + grid_decision = grid[0] + else: + meta = CudaKernelParamCache.get(kernel_name) + assert meta is not None + grid_decision = None + for i, c in enumerate(configs): + if all(arg == meta["meta"][key] for key, arg in c.kwargs.items()): + grid_decision = grid[i] + break + assert grid_decision is not None + + self.generate_kernel_call( + kernel_name, + args, + grid=grid_decision, + device_index=V.graph.scheduler.current_device.index, + cuda=True, + triton=True, + ) + + def generate_scatter_fallback( + self, output, inputs, kernel, fn, src_is_tensor, reduce, kwargs + ): + # TODO: support other overload for cpp wrapper and remove the below assertions + if V.graph.aot_mode and config.aot_inductor.abi_compatible: + # call the ABI shim function instead of the ATen one + kernel = kernel.replace("at::", "aoti_torch_") + line = f"{kernel}({output}, {','.join(map(str, inputs))}" + if fn == "aten.scatter_": + if src_is_tensor: + if reduce: + line += f", {V.graph.wrapper_code.val_to_arg_str(reduce)}" + else: + assert ( + reduce is None + ), "Expect reduce to be None for aten.scatter_ with scalar src" + else: + line += f", {','.join(kwargs)}" + line += f"){self.ending}" + self.writeline(line) + + def add_benchmark_harness(self, output): + if V.graph.aot_mode: + return + super().add_benchmark_harness(output) + + def codegen_sizevar(self, x: Expr) -> str: + return self.expr_printer(V.graph.sizevars.simplify(x)) + + def codegen_tuple_access(self, basename: str, name: str, index: str) -> str: + if V.graph.aot_mode and config.aot_inductor.abi_compatible: + # in the abi_compatible mode, outputs are returned via arguments + return name + else: + return f"std::get<{index}>({basename})" + + def codegen_shape_tuple(self, shape: Tuple[Expr, ...]) -> str: + parts = list(map(self.codegen_sizevar, shape)) + if len(parts) == 0: + return "{}" + if len(parts) == 1: + return f"{{{parts[0]}, }}" + return f"{{{', '.join(parts)}}}" + + def is_statically_known_int(self, x): + try: + val = V.graph._shape_env._maybe_evaluate_static(x) + int(x) + return True + except Exception: + return False + + def is_statically_known_list_of_ints(self, lst): + return all(isinstance(self.is_statically_known_int(x), int) for x in lst) + + def can_prove_buffer_has_static_shape(self, buffer): + return self.is_statically_known_list_of_ints(buffer.get_size()) + + def can_cache_buffer_in_thread_local(self, buffer): + # We are gated off on CUDA because this is intended to reduce overhead in + # overhead-bound CPU use case. + return ( + not self.cuda + and config.allow_buffer_reuse + and self.can_prove_buffer_has_static_shape(buffer) + ) + + def make_buffer_free(self, buffer): + return ( + "" + if isinstance(buffer.get_layout(), ir.MultiOutputLayout) + or (V.graph.aot_mode and self.can_cache_buffer_in_thread_local(buffer)) + else f"{buffer.get_name()}.reset();" + ) + + def make_free_by_names(self, names_to_del: List[str]): + return " ".join(f"{name}.reset();" for name in names_to_del) + + def codegen_exact_buffer_reuse(self, old_name: str, new_name: str, del_line: str): + if config.aot_inductor.abi_compatible: + return f"auto {new_name} = std::move({old_name}); // reuse" + else: + return super().codegen_exact_buffer_reuse(old_name, new_name, del_line) + + def generate_profiler_mark_wrapper_call(self, stack): + self.wrapper_call.writeline( + 'RECORD_FUNCTION("inductor_wrapper_call", c10::ArrayRef());' + ) + + def write_triton_header_once(self): + pass + + def generate_start_graph(self): + pass + + def generate_end_graph(self): + pass + + def generate_inf_and_nan_checker(self, nodes): + for buf in nodes.get_names(): + # TODO: Add buf name directly into check_inf_and_nan. + self.writeline( + f"AOTI_TORCH_ERROR_CODE_CHECK(aoti_check_inf_and_nan({buf}));" + ) + + def codegen_device(self, device): + if config.aot_inductor.abi_compatible: + return f"cached_torch_device_type_{device.type},{device.index if device.index else 0}" + else: + from .cpp import DEVICE_TO_ATEN + + return ( + f"c10::Device({DEVICE_TO_ATEN[device.type]}, {device.index})" + if device.index is not None + else f"{DEVICE_TO_ATEN[device.type]}" + ) + + def codegen_dtype(self, dtype): + if config.aot_inductor.abi_compatible: + dtype_str = str(dtype).split(".")[-1] + self.used_cached_dtypes.add(dtype_str) + return f"cached_torch_dtype_{dtype_str}" + else: + from .cpp import DTYPE_TO_ATEN + + return DTYPE_TO_ATEN[dtype] + + @functools.lru_cache(None) + def codegen_int_array_var(self, int_array: str, writer=None): + # Because the memory planning is done in two passes (see the implementation + # of self.generate), the writeline behavior is different in the two passes. + # As a result, the emitted int array declarations may appear in a later + # position of the generated code, so the second pass codegen should not + # reuse int array declarations generated in the first pass + if writer is None: + # The first pass codegen uses `self` as the writer + writer = self + + var = f"int_array_{next(self.int_array_id)}" + if var not in self.declared_int_array_vars: + self.declared_int_array_vars.add(var) + writer.writeline(f"int64_t {var}[] = {int_array};") + return var + + def make_buffer_allocation(self, buffer): + return self.make_allocation( + buffer.get_name(), + buffer.get_device(), + buffer.get_dtype(), + buffer.get_size(), + buffer.get_stride(), + self.can_cache_buffer_in_thread_local(buffer), + ) + + def make_allocation( + self, name, device, dtype, shape, stride, can_cache_buffer_in_thread_local=False + ): + device = self.codegen_device(device) + dtype = self.codegen_dtype(dtype) + size = self.codegen_shape_tuple(shape) + stride = self.codegen_shape_tuple(stride) + if config.aot_inductor.abi_compatible: + device_type, device_id = device.split(",") + args = [ + str(len(shape)), + self.codegen_int_array_var(size, self.wrapper_call), + self.codegen_int_array_var(stride, self.wrapper_call), + dtype, + device_type, + "this->device_idx_" if V.graph.aot_mode else device_id, + f"&{name}_handle", + ] + + def gen_alloc(wrapper_call, name, args): + wrapper_call.writeline(f"AtenTensorHandle {name}_handle;") + wrapper_call.writeline( + f"AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_empty_strided({', '.join(args)}));" + ) + + if can_cache_buffer_in_thread_local: + self.cached_thread_locals.add(name) + self.wrapper_call.writeline( + f"thread_local RAIIAtenTensorHandle {name}_handle = ([&] {{" + ) + with self.wrapper_call.indent(): + gen_alloc(self.wrapper_call, name, args) + self.wrapper_call.writeline(f"return {name}_handle;") + self.wrapper_call.writeline("})();") + return f"AtenTensorHandle {name}({name}_handle.get());" + else: + gen_alloc(self.wrapper_call, name, args) + return f"RAIIAtenTensorHandle {name}({name}_handle);" + + if V.graph.aot_mode and device.startswith("c10::Device("): + tensor_device = f"{device.split(',')[0]}, this->device_idx_)" + else: + tensor_device = device + + return ( + f"{self.declare}{name} = {self.namespace}empty_strided(" + f"{size}, {stride}, at::TensorOptions({tensor_device}).dtype({dtype})){self.ending}" + ) + + def codegen_alloc_from_pool(self, name, offset, dtype, shape, stride) -> str: + if config.aot_inductor.abi_compatible: + size = self.codegen_shape_tuple(shape) + stride = self.codegen_shape_tuple(stride) + tmp_name = f"tmp_tensor_handle_{next(self.tmp_tensor_id)}" + args = [ + name, + pexpr(offset), # bytes not numel + self.codegen_dtype(dtype), + str(len(shape)), + self.codegen_int_array_var(size, self.wrapper_call), + self.codegen_int_array_var(stride, self.wrapper_call), + f"&{tmp_name}", + ] + self.wrapper_call.writeline(f"AtenTensorHandle {tmp_name};") + self.wrapper_call.writeline( + f"AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch__alloc_from_pool({', '.join(args)}));" + ) + return f"RAIIAtenTensorHandle({tmp_name})" + + return "alloc_from_pool({})".format( + ", ".join( + [ + name, + pexpr(offset), # bytes not numel + self.codegen_dtype(dtype), + self.codegen_shape_tuple(shape), + self.codegen_shape_tuple(stride), + ] + ) + ) + + def codegen_reinterpret_view( + self, data, size_list, stride_list, offset, writer + ) -> str: + dim = str(len(size_list)) + size = self.codegen_shape_tuple(size_list) + stride = self.codegen_shape_tuple(stride_list) + offset = self.codegen_sizevar(offset) + + if config.aot_inductor.abi_compatible: + tmp_name = f"tmp_tensor_handle_{next(self.tmp_tensor_id)}" + # Because the memory planning is done in two passes (see the implementation + # of self.generate), the writeline behavior is different in the two passes. + if writer is None: + writer = self + + args = [ + f"{data.get_name()}", + dim, + self.codegen_int_array_var(size, writer), + self.codegen_int_array_var(stride, writer), + offset, + f"&{tmp_name}", + ] + + def gen_reinterpret_call(writer, args): + writer.writeline(f"AtenTensorHandle {tmp_name};") + writer.writeline( + f"AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch__reinterpret_tensor({', '.join(args)}));" + ) + + if ( + self.can_cache_buffer_in_thread_local(data) + and self.is_statically_known_list_of_ints(size_list) + and self.is_statically_known_list_of_ints(stride_list) + ): + self.cached_thread_locals.add(tmp_name) + writer.writeline( + f"thread_local RAIIAtenTensorHandle {tmp_name}_handle = ([&] {{" + ) + if hasattr(writer, "indent"): + indent = writer.indent() + else: + indent = contextlib.nullcontext() + with indent: + gen_reinterpret_call(writer, args) + writer.writeline(f"return {tmp_name};") + writer.writeline("})();") + writer.writeline( + f"AtenTensorHandle {tmp_name}({tmp_name}_handle.get());" + ) + return tmp_name + + gen_reinterpret_call(writer, args) + + # NB, the return handle here represents a temporary tensor, which will be automatically + # released. + # Here's a sample usage in the cpp wrapper code: + # ``` + # aoti_torch_addmm_out( + # buf1, + # arg1_1, + # RAIIAtenTensorHandle(tmp_tensor_handle_0), + # buf0, + # 1L, + # 1L)); + # ``` + # RAIIAtenTensorHandle(tmp_tensor_handle_0) will be released after the call to addmm_out. + # This could be problematic when it's used in a different pattern, for example: + # ```` + # AtenTensorHandle tensor_args[] = {RAIIAtenTensorHandle(tmp_tensor_handle_2), buf5, buf6}; + # aoti_torch_proxy_executor_call_function(..., tensor_args); + # ```` + # RAIIAtenTensorHandle(tmp_tensor_handle_2) will be invalid when it's used in the latter + # kernel call. + # + # This is solved by updating the proxy_executor invocation to + # ``` + # aoti_torch_proxy_executor_call_function(..., + # std::vector{ + # RAIIAtenTensorHandle(tmp_tensor_handle_2), buf5, buf6 + # }.data() + # ); + # ``` + return f"RAIIAtenTensorHandle({tmp_name})" + else: + args = [data.get_name(), size, stride, offset] + return f"reinterpret_tensor({', '.join(args)})" + + def codegen_device_copy(self, src, dst): + if config.aot_inductor.abi_compatible: + self.writeline( + f"AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_tensor_copy_({src}, {dst}));" + ) + else: + self.writeline(f"{dst}.copy_({src});") + + def codegen_multi_output(self, name, value): + # in the abi_compatible mode, outputs are retrieved by passing + # output pointers, so we skip its codegen here. + if not config.aot_inductor.abi_compatible: + super().codegen_multi_output(name, value) + + def generate_extern_kernel_args_decl_if_needed( + self, op_overload, raw_args, output_args + ): + arg_types = [x.real_type for x in op_overload._schema.arguments] + return_types = [x.type for x in op_overload._schema.returns] + + new_tensor_args = [] + new_int_args = [] + + def fill_args(arg, arg_type): + static_arg_types = ( + torch.FloatType, + torch.BoolType, + torch.StringType, + torch.Type, + torch.DeviceObjType, + ) + inductor_tensor_buffers = ( + ir.Buffer, + ir.ReinterpretView, + ) + + if isinstance(arg_type, torch.TensorType): + assert isinstance(arg, inductor_tensor_buffers), f"got {type(arg)}" + new_tensor_args.append(f"{arg.codegen_reference()}") + elif isinstance(arg_type, torch.IntType): + # int + new_int_args.append(str(arg)) + elif isinstance(arg_type, torch.SymIntType): + # SymInt + new_int_args.append(str(arg)) + elif isinstance(arg_type, torch.NumberType): + # Scalar of type int + assert isinstance(arg, (int, float, bool)) + # Only treat int Scalar as dynamic + if isinstance(arg, int): + new_int_args.append(str(arg)) + elif isinstance(arg_type, torch.ListType): + assert isinstance(arg, (list, tuple)) + + # List[Tensor] + if isinstance(arg_type.getElementType(), torch.TensorType): + new_tensor_args.extend([f"{a.codegen_reference()}" for a in arg]) + # List[Optional[Tensor]] + elif isinstance( + arg_type.getElementType(), torch.OptionalType + ) and isinstance( + arg_type.getElementType().getElementType(), torch.TensorType + ): + new_tensor_args.extend( + [f"{a.codegen_reference()}" for a in arg if a is not None] + ) + # List [int] or List[SymInt] + elif isinstance( + arg_type.getElementType(), (torch.IntType, torch.SymIntType) + ): + new_int_args.extend([str(a) for a in arg]) + # List[Scalar] + elif isinstance(arg_type.getElementType(), torch.NumberType): + # Only treat int Scalar as dynamic + is_int_type = [isinstance(a, int) for a in arg] + if any(is_int_type): + assert all( + is_int_type + ), "AOTInductor only supports int scalars of the same type" + new_int_args.extend([str(a) for a in arg]) + else: + assert isinstance( + arg_type.getElementType(), static_arg_types # type: ignore[arg-type] + ), f"Fall through arguments must be one of static_arg_types, got {type(arg_type)}" + else: + assert isinstance( + arg_type, static_arg_types # type: ignore[arg-type] + ), f"Fall through arguments must be one of static_arg_types, got {type(arg_type)}" + + for arg, arg_type in zip(raw_args, arg_types): + if arg is not None: + if isinstance(arg_type, torch.OptionalType): + fill_args(arg, arg_type.getElementType()) + else: + fill_args(arg, arg_type) + + def fill_output_arg(arg, return_type): + if isinstance(return_type, torch.TensorType): + self.writeline(f"AtenTensorHandle {arg}_handle; // output buffer") + self.writeline( + f"AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_new_uninitialized_tensor(&{arg}_handle));" + ) + self.writeline(f"RAIIAtenTensorHandle {arg}({arg}_handle);") + new_tensor_args.append(f"{arg}") + elif isinstance(return_type, torch.SymIntType): + raise NotImplementedError("NYI support for return type: SymInt") + elif isinstance(return_type, torch.ListType) and isinstance( + return_type.getElementType(), torch.SymIntType + ): + raise NotImplementedError("NYI support for return type: List[SymInt]") + else: + raise AssertionError(f"Unsupported return type found: {return_type}") + + # TODO: Only support tensor(s) returns for now, SymInt is not implemented yet + for return_type in return_types: + if isinstance(return_type, (torch.TensorType)): + pass + elif isinstance(return_type, torch.OptionalType): + assert isinstance(return_type.getElementType(), torch.TensorType) + elif isinstance(return_type, torch.ListType): + assert isinstance(return_type.getElementType(), torch.TensorType) + else: + raise NotImplementedError( + f"return type {return_type} is not yet supported." + ) + + for output_arg in output_args: + assert output_arg is not None, "Optional return types are not yet supported" + if isinstance(output_arg, (list, tuple)): + for out in output_arg: + fill_output_arg(out, torch.TensorType.get()) + else: + fill_output_arg(output_arg, torch.TensorType.get()) + + return new_tensor_args, new_int_args + + def generate_extern_kernel_alloc_and_find_schema_if_needed( + self, + name, + kernel, + codegen_args, + cpp_op_schema, + cpp_kernel_key, + cpp_kernel_overload_name="", + op_overload=None, + raw_args=None, + outputs=None, + ): + if config.is_fbcode(): + assert op_overload is not None + assert raw_args is not None + assert outputs is not None + + return self.generate_extern_kernel_alloc_and_find_schema_if_needed_fbcode( + name, + cpp_kernel_key, + op_overload, + raw_args, + outputs, + ) + else: + return self.generate_extern_kernel_alloc_and_find_schema_if_needed_oss( + name, + kernel, + codegen_args, + cpp_op_schema, + cpp_kernel_key, + cpp_kernel_overload_name, + ) + + def generate_extern_kernel_alloc_and_find_schema_if_needed_oss( + self, + name, + kernel, + codegen_args, + cpp_op_schema, + cpp_kernel_key, + cpp_kernel_overload_name="", + ): + if cpp_kernel_key not in self.extern_call_ops: + self.writeline( + f"static auto op_{cpp_kernel_key} = c10::Dispatcher::singleton()" + ) + self.writeline( + f'\t.findSchemaOrThrow("{kernel}", "{cpp_kernel_overload_name}")' + ) + self.writeline(f"\t.typed<{cpp_op_schema}>();") + self.extern_call_ops.add(cpp_kernel_key) + + self.writeline( + f"auto {name} = op_{cpp_kernel_key}.call({', '.join(codegen_args)});" + ) + + def generate_extern_kernel_alloc_and_find_schema_if_needed_fbcode( + self, + name, + cpp_kernel_key, + op_overload, + raw_args, # contains both args and flatten kwargs + outputs, + ): + def extract_output_name(out): + assert out is not None, "None, i.e. optional output is not supported" + if isinstance(out, ir.MultiOutput): + return out.get_name() + elif isinstance(out, (list, tuple)): + return type(out)(extract_output_name(o) for o in out) + else: + raise AssertionError(f"Unexpected output: {type(out)}") + + # output_args has the same pytree structure as outputs + output_args = extract_output_name(outputs) + if isinstance(output_args, str): + output_args = [output_args] + + ( + tensor_call_args, + int_call_args, + ) = self.generate_extern_kernel_args_decl_if_needed( + op_overload, raw_args, output_args + ) + + tensor_call_args_str = ", ".join(tensor_call_args) + int_call_args_str = ", ".join(int_call_args) + + extern_kernel_node_index = len(V.graph.extern_kernel_nodes) - 1 + + self.writeline( + f"aoti_torch_proxy_executor_call_function(proxy_executor, " + f"{extern_kernel_node_index}, " + f"{len(int_call_args)}, " + f"std::vector{{{int_call_args_str}}}.data(), " + f"{len(tensor_call_args)}, " + f"std::vector{{{tensor_call_args_str}}}.data());" + ) + + self.extern_call_ops.add(cpp_kernel_key) + + def val_to_cpp_arg_str(self, type_, val, is_legacy_abi) -> str: + if ( + config.aot_inductor.abi_compatible + and not is_legacy_abi + and isinstance(type_, torch.OptionalType) + ): + if val is None: + return "0" # nullptr is not available in C + if isinstance(val, (bool, int, str, float)): + var_name = f"var_{next(self.arg_var_id)}" + self.writeline(f"auto {var_name} = {self.val_to_arg_str(val)};") + return f"&{var_name}" + if not isinstance(type_.getElementType(), torch.TensorType): + return f"&{self.val_to_arg_str(val)}" + + return self.val_to_arg_str(val) + + def val_to_arg_str(self, val) -> str: + if val is None: + # When None is passed as an argument, it represents an optional that does not contain a value. + if config.aot_inductor.abi_compatible: + return "0" # nullptr is not available in C + return "c10::nullopt" + elif isinstance(val, bool): + if config.aot_inductor.abi_compatible: + return "1" if val else "0" + else: + return "true" if val else "false" + elif isinstance(val, int): + return f"{val}L" + elif isinstance(val, str): + return f'"{val}"' + elif isinstance(val, (ComputedBuffer, InputBuffer, ReinterpretView)): + return val.codegen_reference() + elif isinstance(val, torch.device): + return self.codegen_device(val) + elif isinstance(val, torch.dtype): + return self.codegen_dtype(val) + elif isinstance(val, float) and val in [float("inf"), float("-inf")]: + if val == float("inf"): + return "std::numeric_limits::infinity()" + else: + return "-std::numeric_limits::infinity()" + elif isinstance(val, (list, tuple)): + # FIXME handle embedded optional types? + result = f"{{{', '.join(self.val_to_arg_str(x) for x in val)}}}" + if config.aot_inductor.abi_compatible: + # Need to pass the array length because we can't use std::vector + return f"{self.codegen_int_array_var(result)}, {len(val)}" + else: + return result + else: + return repr(val) + + +class CudaWrapperCodeGen(CppWrapperCodeGen): + """ + Generates cpp wrapper for running on GPU and calls CUDA kernels + """ + + def __init__(self): + super().__init__() + self.grid_id = count() + self.cuda = True + + def write_header(self): + super().write_header() + + self.header.splice("#include ") + if not config.aot_inductor.abi_compatible: + self.header.splice( + """ + #include + #include + """ + ) + + self.header.splice( + """ + #define CUDA_DRIVER_CHECK(EXPR) \\ + do { \\ + CUresult code = EXPR; \\ + const char *msg; \\ + cuGetErrorString(code, &msg); \\ + if (code != CUDA_SUCCESS) { \\ + throw std::runtime_error( \\ + std::string("CUDA driver error: ") + \\ + std::string(msg)); \\ + } \\ + } while (0); + + namespace { + + struct Grid { + Grid(uint32_t x, uint32_t y, uint32_t z) + : grid_x(x), grid_y(y), grid_z(z) {} + uint32_t grid_x; + uint32_t grid_y; + uint32_t grid_z; + + bool is_non_zero() { + return grid_x > 0 && grid_y > 0 && grid_z > 0; + } + }; + + } // anonymous namespace + + static inline CUfunction loadKernel( + std::string filePath, + const std::string &funcName, + uint32_t sharedMemBytes, + const std::optional &cubinDir = std::nullopt) { + if (cubinDir) { + std::filesystem::path p1{*cubinDir}; + std::filesystem::path p2{filePath}; + filePath = (p1 / p2.filename()).string(); + } + + CUmodule mod; + CUfunction func; + CUDA_DRIVER_CHECK(cuModuleLoad(&mod, filePath.c_str())); + CUDA_DRIVER_CHECK(cuModuleGetFunction(&func, mod, funcName.c_str())); + if (sharedMemBytes > 0) { + CUDA_DRIVER_CHECK(cuFuncSetAttribute( + func, + CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, + sharedMemBytes + )) + } + return func; + } + + static inline void launchKernel( + CUfunction func, + uint32_t gridX, + uint32_t gridY, + uint32_t gridZ, + uint32_t numWarps, + uint32_t sharedMemBytes, + void* args[], + cudaStream_t stream) { + CUDA_DRIVER_CHECK(cuLaunchKernel( + func, gridX, gridY, gridZ, 32*numWarps, 1, 1, sharedMemBytes, stream, args, nullptr + )); + } + """ + ) + + def write_get_raw_stream(self, index): + name = f"stream{index}" + self.writeline( + f"cudaStream_t {name} = at::cuda::getCurrentCUDAStream({index});" + ) + return name + + def define_kernel( + self, name: str, kernel: str, metadata: Optional[str] = None, cuda=True + ): + if not cuda: + return super().define_kernel(name, kernel, metadata, cuda) + + def generate(self, is_inference): + self.prefix.writeline("\n") + if not V.graph.aot_mode: + for kernel in chain( + self.src_to_kernel.values(), self.user_defined_kernel_cache.values() + ): + self.prefix.writeline(f"static CUfunction {kernel} = nullptr;") + self.prefix.writeline("\n") + return super().generate(is_inference) + + @functools.lru_cache(None) + def generate_load_kernel_once( + self, name: str, mangled_name: str, cubin_path: str, shared_mem: int + ): + if V.graph.aot_mode: + self.writeline(f"if (kernels.{name} == nullptr) {{") + self.writeline( + f""" kernels.{name} = loadKernel("{cubin_path}", "{mangled_name}", {shared_mem}, this->cubin_dir_);""" + ) + self.writeline("}") + else: + self.writeline(f"if ({name} == nullptr) {{") + self.writeline( + f""" {name} = loadKernel("{cubin_path}", "{mangled_name}", {shared_mem});""" + ) + self.writeline("}") + + def generate_args_decl(self, call_args): + dynamic_symbols = V.graph.sizevars.free_symbols() + # TODO: only works for constant now, need type info + new_args = [] + for arg in call_args: + var_name = f"var_{next(self.arg_var_id)}" + if isinstance(arg, (sympy.Integer, sympy.Symbol, SymbolicCallArg)): + self.writeline(f"auto {var_name} = {arg};") + elif isinstance(arg, sympy.Expr): + self.writeline(f"auto {var_name} = {self.expr_printer(arg)};") + elif is_int(arg): + self.writeline(f"int {var_name} = {arg};") + elif is_float(arg): + self.writeline(f"float {var_name} = {arg};") + elif any(str(arg) == s.name for s in dynamic_symbols): + self.writeline(f"auto {var_name} = {arg};") + elif arg == "nullptr": + self.writeline(f"auto {var_name} = nullptr;") + elif arg == "c10::nullopt": + self.writeline(f"auto {var_name} = c10::nullopt;") + else: + if config.aot_inductor.abi_compatible: + self.writeline(f"CUdeviceptr {var_name};") + self.writeline( + f"AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_data_ptr({arg}, reinterpret_cast(&{var_name})));" + ) + else: + self.writeline( + f"CUdeviceptr {var_name} = reinterpret_cast({arg}.data_ptr());" + ) + new_args.append(f"&{var_name}") + + return ", ".join(new_args) + + def generate_default_grid(self, name: str, grid: List[Any], cuda: bool = True): + """ + Generate grid configs for launching a CUDA kernel using the grid + function from triton_heuristics. + """ + if not cuda: + return grid + assert isinstance(grid, list), f"expected {grid=} to be a list" + grid = [e.inner_expr if isinstance(e, SymbolicCallArg) else e for e in grid] + grid_fn = default_grid(*grid) + params = CudaKernelParamCache.get(name) + assert ( + params is not None + ), f"cuda kernel parameters for {name} should already exist at this moment" + block_cfg = { + "XBLOCK": params["x_block"], + "YBLOCK": params["y_block"], + "ZBLOCK": params["z_block"], + } + return grid_fn(block_cfg) + + def generate_kernel_call( + self, name, call_args, grid=None, device_index=None, cuda=True, triton=True + ): + if not cuda: + # Even in CudaWrapperCodeGen, we may see cpp kernels + return super().generate_kernel_call( + name, call_args, grid, device_index, cuda, triton + ) + + params = CudaKernelParamCache.get(name) + assert ( + params is not None + ), f"cuda kernel parameters for {name} should already exist at this moment" + mangled_name = params.get("mangled_name", None) + assert mangled_name is not None, "missing mangled_name" + cubin_path = params.get(get_cpp_wrapper_cubin_path_name(), None) + assert cubin_path is not None and os.path.exists( + cubin_path + ), f"cubin file should already exist at this moment: {cubin_path}" + shared_mem = params.get("shared_mem", 0) + + self.generate_load_kernel_once(name, mangled_name, cubin_path, shared_mem) + + call_args = self.generate_args_decl(call_args) + kernel_args_var = f"kernel_args_var_{next(self.kernel_callsite_id)}" + self.writeline(f"void* {kernel_args_var}[] = {{{call_args}}};") + stream = ( + "stream" if V.graph.aot_mode else self.write_get_raw_stream(device_index) + ) + grid_name = f"{name}_grid_{next(self.grid_id)}" + assert isinstance( + grid, (list, tuple) + ), f"expected grid to be a list or tuple but got: {grid=}" + + grid = [V.graph.sizevars.simplify(item) for item in grid] + grid_has_unbacked_symbols = any(free_unbacked_symbols(item) for item in grid) + grid_args = [self.grid_expr_printer(item) for item in grid] + grid_args_str = ", ".join(grid_args) + self.writeline(f"Grid {grid_name} = Grid({grid_args_str});") + + if grid_has_unbacked_symbols: + self.writeline(f"if ({grid_name}.is_non_zero()) {{") + kernel_var_name = f"kernels.{name}" if V.graph.aot_mode else name + self.writeline( + "launchKernel({}, {}, {}, {}, {}, {}, {}, {});".format( + kernel_var_name, + f"{grid_name}.grid_x", + f"{grid_name}.grid_y", + f"{grid_name}.grid_z", + params["num_warps"], + params["shared_mem"], + kernel_args_var, + stream, + ) + ) + if grid_has_unbacked_symbols: + self.writeline("}") diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b17d76e12794b8407063c84d5dbb55b3aac25c99 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/__init__.py @@ -0,0 +1 @@ +from . import mm, mm_common, mm_plus_mm, unpack_mixed_mm diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13f82b4ac063d04f997f8a1d2236d41a88637b61 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/bmm.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/bmm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ff55b5a4ae2122455b4f2989f0288bc8ffdbc5d Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/bmm.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/conv.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/conv.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83fcf96227d56130a391acb39b2900c6b3bb01f4 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/conv.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/mm_common.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/mm_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01c93667b4f74f206205cb2254d5e66c9246005d Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/mm_common.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/mm_plus_mm.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/mm_plus_mm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9710bea82e6dc5b723188c1a39e9d38c2d89bdf6 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/mm_plus_mm.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/unpack_mixed_mm.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/unpack_mixed_mm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0213af9d4f21192d1ee00f54dd6b550fd1b314e6 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/unpack_mixed_mm.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/conv.py b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/conv.py new file mode 100644 index 0000000000000000000000000000000000000000..849058349778ca456265dff94a8c69b4fd2b6d89 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/conv.py @@ -0,0 +1,489 @@ +from __future__ import annotations + +import functools +import logging +from typing import cast, List, Optional, Sequence, Tuple, TypedDict + +import torch +from .. import config, ir +from ..ir import TensorBox + +from ..lowering import ( + add_layout_constraint, + constrain_to_fx_strides, + lowerings as L, + register_lowering, +) +from ..select_algorithm import ( + autotune_select_algorithm, + ExternKernelChoice, + TritonTemplate, +) +from ..utils import ( + ceildiv, + is_ones, + is_zeros, + pad_listlike, + sympy_product, + use_triton_template, +) +from ..virtualized import V +from .mm_common import filtered_configs + +log = logging.getLogger(__name__) + + +aten = torch.ops.aten + + +def conv_grid(n, c, h, w, meta): + return ( + ceildiv(n * h * w, meta["BLOCK_M"]), + ceildiv(c, meta["BLOCK_N"]), + meta["GROUPS"], + ) + + +# List of dictionaries to store the kernel configs. Configs that evaluate to true +# will be utilised on the target platform +kernel_configs = [ + # "BLOCK_M", "BLOCK_N", "BLOCK_K", "num_stages", "num_warps" + {"config": (64, 256, 16, 2, 4), "cond": True}, + {"config": (256, 64, 16, 2, 4), "cond": True}, + {"config": (1024, 16, 16, 1, 8), "cond": True}, + {"config": (128, 128, 32, 2, 8), "cond": True}, + {"config": (64, 64, 32, 2, 4), "cond": True}, + {"config": (64, 256, 32, 2, 8), "cond": True}, + {"config": (256, 64, 32, 2, 8), "cond": True}, +] + +# Create filtered list of configs based on conv +platform_configs = tuple( + cast(Tuple[int, int, int, int, int], config["config"]) + for config in kernel_configs + if config["cond"] +) + +# On ROCm convert num_stages to 1 as pipelining provides no benefit +if torch.version.hip: + platform_configs = tuple( + (config[0], config[1], config[2], 1, config[4]) for config in platform_configs + ) + +conv_configs = functools.partial( + filtered_configs, + configs=platform_configs, +) + +LOOP_BODY = """ + idx_x_h = i - PADDING_H + idx_y_h * STRIDE_H + idx_x_w = j - PADDING_W + idx_y_w * STRIDE_W + idx_x_c = tl.arange(0, BLOCK_K) + k + + x_ptrs = x_base + ( + (idx_x_h * stride_xh)[:, None] + + (idx_x_w * stride_xw)[:, None] + + (idx_x_c * stride_xc)[None, :] + ) + mask_x = ( + (idx_n < BATCH)[:, None] + & (idx_x_h >= 0)[:, None] + & (idx_x_h < IN_H)[:, None] + & (idx_x_w >= 0)[:, None] + & (idx_x_w < IN_W)[:, None] + & (idx_x_c < GROUP_IN_C)[None, :] + ) + matrix_x = tl.load(x_ptrs, mask=mask_x, other=0.0) + + w_ptrs = w_base + ( + (idx_x_c * stride_wc_in)[:, None] + (i * stride_wh) + (j * stride_ww) + ) + mask_w = (idx_x_c[:, None] < GROUP_IN_C) & (idx_y_c[None, :] < GROUP_OUT_C) + matrix_w = tl.load(w_ptrs, mask=mask_w, other=0.0) + acc += tl.dot(matrix_x, matrix_w, allow_tf32=ALLOW_TF32) +""" + +""" +This is a relatively simple conv implementation that can likely be +improved. Many alternate conv versions can be found here: +https://github.com/pytorch/torchdynamo/pull/971 +""" +conv2d_template = TritonTemplate( + name="convolution", + grid=conv_grid, + source=r""" +{{def_kernel("X", "W")}} + # Tensor dimensions + BATCH = {{size("X", 0)}} + IN_C = {{size("X", 1)}} + IN_H = {{size("X", 2)}} + IN_W = {{size("X", 3)}} + OUT_C = {{size(None, 1)}} + OUT_H = {{size(None, 2)}} + OUT_W = {{size(None, 3)}} + + # Strides: + stride_xn = {{stride("X", 0)}} + stride_xc = {{stride("X", 1)}} + stride_xh = {{stride("X", 2)}} + stride_xw = {{stride("X", 3)}} + stride_wc_out = {{stride("W", 0)}} + stride_wc_in = {{stride("W", 1)}} + stride_wh = {{stride("W", 2)}} + stride_ww = {{stride("W", 3)}} + + nhw = tl.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M) + idx_y_w = nhw % OUT_W + nh = nhw // OUT_W + idx_y_h = nh % OUT_H + idx_n = nh // OUT_H + idx_y_c = tl.program_id(1) * BLOCK_N + tl.arange(0, BLOCK_N) + +{% if GROUPS == 1 %} + group = 0 + GROUP_IN_C = IN_C + GROUP_OUT_C = OUT_C +{% else %} + group = tl.program_id(2) + GROUP_IN_C = IN_C // GROUPS + GROUP_OUT_C = OUT_C // GROUPS +{% endif %} + + x_base = X + (group * stride_xc * GROUP_IN_C + idx_n * stride_xn)[:, None] + w_base = ( + W + (group * stride_wc_out * GROUP_OUT_C + idx_y_c * stride_wc_out)[None, :] + ) + + acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.float32) + +{% if UNROLL %} +{% for i in range(KERNEL_H) %} +{% for j in range(KERNEL_W) %} + i = {{i}} + j = {{j}} + for k in range(0, GROUP_IN_C, BLOCK_K): + """ + + LOOP_BODY + + """ +{% endfor %} +{% endfor %} +{% else %} + # Could be simplified, but slightly slower: + # for i in range(KERNEL_H): + # for j in range(KERNEL_W): + # for k in range(0, GROUP_IN_C, BLOCK_K): + BLOCK_K_COUNT = (GROUP_IN_C + BLOCK_K - 1) // BLOCK_K + for ijk in range(KERNEL_H * KERNEL_W * BLOCK_K_COUNT): + k = (ijk % BLOCK_K_COUNT) * BLOCK_K + ij = ijk // BLOCK_K_COUNT + i = ij // KERNEL_W + j = ij % KERNEL_W + """ + + LOOP_BODY + + """ +{% endif %} + + mask = ( + (idx_n < BATCH)[:, None] + & (idx_y_h < OUT_H)[:, None] + & (idx_y_w < OUT_W)[:, None] + & (idx_y_c < GROUP_OUT_C)[None, :] + ) + idx_n = idx_n[:, None] + idx_c = idx_y_c[None, :] + group * GROUP_OUT_C + idx_h = idx_y_h[:, None] + idx_w = idx_y_w[:, None] + + # inductor generates a suffix + {{store_output(("idx_n", "idx_c", "idx_h", "idx_w"), "acc", "mask")}} +""", +) + +aten_convolution = ExternKernelChoice( + torch.convolution, + "at::convolution", + has_out_variant=False, +) + + +def conv1x1_via_mm(x, w, *, out): + w = torch.squeeze(torch.squeeze(w, -1), -1) + return torch.matmul( + x.permute(0, 2, 3, 1), w.permute(1, 0), out=out.permute(0, 2, 3, 1) + ) + + +aten_conv1x1_via_mm = ExternKernelChoice(conv1x1_via_mm, None) + + +class ConvLayoutParams(TypedDict): + stride: tuple[int, ...] + padding: tuple[int, ...] + dilation: tuple[int, ...] + transposed: bool + output_padding: tuple[int, ...] + groups: int + + +def conv_layout( + x: TensorBox, + weight: TensorBox, + bias: Optional[TensorBox], + stride: Sequence[int], + padding: tuple[int, ...], + dilation: tuple[int, ...], + transposed: bool, + output_padding: tuple[int, ...], + groups: int, +) -> ir.Layout: + """Determine output layout for a convolution""" + with V.graph.fake_mode: + output = torch.ops.aten.convolution( + ir.ir_node_to_tensor(x, guard_shape=True), + ir.ir_node_to_tensor(weight, guard_shape=True), + ir.ir_node_to_tensor(bias, guard_shape=True), + stride, + tuple(V.graph.sizevars.size_hint(p) for p in padding), + dilation, + transposed, + tuple(V.graph.sizevars.size_hint(p) for p in output_padding), + groups, + ) + sizes = ir.convert_shape_to_inductor(output.size()) + stride = ir.convert_shape_to_inductor(output.stride()) + + return ir.FixedLayout( + x.get_device(), + x.get_dtype(), + sizes, + stride, + ) + + +def channels_last_order(rank): + order = list(reversed(range(rank))) + order.insert(1, order.pop(-1)) + return order + + +def convert_1x1_conv_to_mm(x, weight, bias): + # special case for 1x1 convolution, which is actually just a matmul + rank = len(weight.get_size()) + for _ in range(rank - 2): + weight = L[aten.squeeze](weight, dim=-1) + weight = L[aten.permute](weight, [1, 0]) + + if x.get_size()[0] != 1: + x = ir.ExternKernel.require_stride_order(x, channels_last_order(rank)) + else: + x.realize() + x.freeze_layout() + + x_permute = list(range(rank)) + x_permute.append(x_permute.pop(1)) + x = L[aten.permute](x, x_permute) + *sizes, in_chan = x.get_size() + x = L[aten.reshape](x, [sympy_product(sizes), in_chan]) + if bias is None: + result = L[aten.mm](x, weight) + else: + result = L[aten.addmm](bias, x, weight) + result = L[aten.reshape](result, [*sizes, -1]) + result_permute = list(range(rank)) + result_permute.insert(1, result_permute.pop(-1)) + return L[aten.permute](result, result_permute) + + +@register_lowering(aten.convolution) +def convolution( + x: TensorBox, + weight: TensorBox, + bias: TensorBox, + stride: List[int], + padding: List[int], + dilation: List[int], + transposed: bool, + output_padding: List[int], + groups: int, +): + stride = tuple(stride) + padding = tuple(padding) + dilation = tuple(dilation) + output_padding = tuple(output_padding) + if not isinstance(groups, int): + groups = V.graph.sizevars.evaluate_static_shape(groups) + assert isinstance(groups, int) + kwargs: ConvLayoutParams = { + "stride": stride, + "padding": padding, + "dilation": dilation, + "transposed": transposed, + "output_padding": output_padding, + "groups": groups, + } + + if len(x.get_size()) == len(weight.get_size()) - 1: + # add batch dimension to simplify rest of function + return L[aten.squeeze]( + convolution(L[aten.expand](x, [1, *x.get_size()]), weight, bias, **kwargs), + dim=0, + ) + + out_chan, in_chan, *kernel_shape = V.graph.sizevars.evaluate_static_shapes( + weight.get_size() + ) + ndim = len(kernel_shape) + stride = pad_listlike(stride, ndim) + padding = pad_listlike(padding, ndim) + dilation = pad_listlike(dilation, ndim) + output_padding = pad_listlike(output_padding, ndim) + + def channels_last_conv(): + if V.graph.layout_opt and ndim == 2: + return True + + layout = conv_layout(x, weight, None, **kwargs) + req_stride_order = ir.get_stride_order( + V.graph.sizevars.size_hints(layout.stride) + ) + return req_stride_order == ir.NHWC_STRIDE_ORDER + + autotuning_gemm = config.max_autotune or config.max_autotune_gemm + + if ( + (config.conv_1x1_as_mm or (autotuning_gemm and channels_last_conv())) + and is_ones(kernel_shape) + and is_ones(stride) + and is_zeros(padding) + and is_ones(dilation) + and not transposed + and is_zeros(output_padding) + and groups == 1 + ): + return convert_1x1_conv_to_mm(x, weight, bias) + + if bias is not None and ir.get_device_type(x) != "cpu": + # peel off the bias, cudnn is slower with it + result = convolution(x, weight, None, **kwargs) + return L[aten.add]( + result, L[aten.view](bias, [result.get_size()[1]] + ndim * [1]) + ) + + x.realize() + weight.realize() + + # ndim can be 1 for convolution in models such as demucs + # TODO: check if it's beneficial to convert Conv1d to Conv2d and then + # apply channels last. + if V.graph.layout_opt and ndim == 2: + V.graph.num_channels_last_conv += 1 + x = ir.ExternKernel.require_channels_last(x) + # TODO maybe we can convert weights to channels last just once before + # running the model. + weight = ir.ExternKernel.require_channels_last(weight) + layout = conv_layout(x, weight, None, **kwargs) + else: + layout = conv_layout(x, weight, None, **kwargs) + req_stride_order = ir.get_stride_order( + V.graph.sizevars.size_hints(layout.stride) + ) + x = ir.ExternKernel.require_stride_order(x, req_stride_order) + weight = ir.ExternKernel.require_stride_order(weight, req_stride_order) + + ordered_kwargs_for_cpp_kernel = [ + "stride", + "padding", + "dilation", + "transposed", + "output_padding", + "groups", + ] + if bias is None: + args = [x, weight] + kwargs["bias"] = None # type: ignore[typeddict-unknown-key] + ordered_kwargs_for_cpp_kernel.insert(0, "bias") + else: + args = [x, weight, bias] + bias.realize() + bias.freeze_layout() + V.graph.sizevars.evaluate_static_shapes(bias.get_size()) + + choices = [ + aten_convolution.bind(args, layout, ordered_kwargs_for_cpp_kernel, **kwargs) + ] + if ( + use_triton_template(layout) + # templates only support these: + and ndim == 2 + and is_ones(dilation) + and not transposed + and is_zeros(output_padding) + # there are some odd models where this check fails (e.g. shufflenet_v2_x1_0) + and V.graph.sizevars.statically_known_equals(in_chan, x.get_size()[1]) + ): + if ( + is_ones(kernel_shape) + and is_ones(stride) + and is_zeros(padding) + and groups == 1 + ): + choices.append(aten_conv1x1_via_mm.bind(args, layout)) + + for cfg in conv_configs( + sympy_product([x.get_size()[0], *x.get_size()[2:]]), + out_chan, + in_chan, + ): + conv2d_template.maybe_append_choice( + choices, + input_nodes=(x, weight), + layout=layout, + KERNEL_H=kernel_shape[0], + KERNEL_W=kernel_shape[1], + STRIDE_H=stride[0], + STRIDE_W=stride[1], + PADDING_H=padding[0], + PADDING_W=padding[1], + GROUPS=groups, + # TODO(jansel): try unroll for bigger kernels once fixed: + # https://github.com/openai/triton/issues/1254 + UNROLL=is_ones(kernel_shape), + ALLOW_TF32=torch.backends.cudnn.allow_tf32, + num_stages=cfg.num_stages, + num_warps=cfg.num_warps, + **cfg.kwargs, + ) + + return autotune_select_algorithm("convolution", choices, args, layout) + + +@register_lowering(aten._convolution) +def _convolution( + x, + weight, + bias, + stride, + padding, + dilation, + transposed, + output_padding, + groups, + benchmark, + deterministic, + cudnn_enabled, + allow_tf32, +): + return convolution( + x, weight, bias, stride, padding, dilation, transposed, output_padding, groups + ) + + +def constrain_conv_to_fx_strides(fx_node, *args, **kwargs): + assert fx_node.target == torch.ops.aten.convolution.default + if V.graph.layout_opt: + return args, kwargs + else: + return constrain_to_fx_strides(fx_node, *args, **kwargs) + + +add_layout_constraint(aten.convolution, constrain_conv_to_fx_strides) diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/mm.py b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/mm.py new file mode 100644 index 0000000000000000000000000000000000000000..6479c2d83c75c3269cb924e74b9dcf6bbe291515 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/mm.py @@ -0,0 +1,305 @@ +import logging +from typing import Any, Dict, List + +import torch +from torch._inductor.virtualized import V +from .. import config as inductor_config +from ..codegen.cuda.gemm_template import CUTLASSGemmTemplate +from ..lowering import register_lowering +from ..select_algorithm import ( + autotune_select_algorithm, + ExternKernelChoice, + TritonTemplate, +) +from ..utils import ( + use_aten_gemm_kernels, + use_cutlass_template, + use_max_autotune, + use_triton_template, +) +from .mm_common import ( + addmm_epilogue, + int8_mm_configs, + mm_args, + mm_configs, + mm_grid, + mm_options, +) + +log = logging.getLogger(__name__) +aten = torch.ops.aten + +mm_template = TritonTemplate( + name="mm", + grid=mm_grid, + source=r""" +{{def_kernel("A", "B")}} + M = {{size("A", 0)}} + N = {{size("B", 1)}} + K = {{size("A", 1)}} + if M * N == 0: + # early exit due to zero-size input(s) + return + stride_am = {{stride("A", 0)}} + stride_ak = {{stride("A", 1)}} + stride_bk = {{stride("B", 0)}} + stride_bn = {{stride("B", 1)}} + + # based on triton.ops.matmul + pid = tl.program_id(0) + grid_m = (M + BLOCK_M - 1) // BLOCK_M + grid_n = (N + BLOCK_N - 1) // BLOCK_N + + # re-order program ID for better L2 performance + width = GROUP_M * grid_n + group_id = pid // width + group_size = min(grid_m - group_id * GROUP_M, GROUP_M) + pid_m = group_id * GROUP_M + (pid % group_size) + pid_n = (pid % width) // (group_size) + + rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) + rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) + ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M) + rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N) + rk = tl.arange(0, BLOCK_K) + A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak) + B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn) + + acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE) + for k in range(K, 0, -BLOCK_K): + if EVEN_K: + a = tl.load(A) + b = tl.load(B) + else: + a = tl.load(A, mask=rk[None, :] < k, other=0.) + b = tl.load(B, mask=rk[:, None] < k, other=0.) + if B_PROLOGUE_CAST_TYPE is not None: + b = b.to(B_PROLOGUE_CAST_TYPE) + acc += tl.dot(a, b, allow_tf32=ALLOW_TF32) + A += BLOCK_K * stride_ak + B += BLOCK_K * stride_bk + + # rematerialize rm and rn to save registers + rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) + rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) + idx_m = rm[:, None] + idx_n = rn[None, :] + mask = (idx_m < M) & (idx_n < N) + + # inductor generates a suffix + {{store_output(("idx_m", "idx_n"), "acc", "mask")}} +""", +) + +aten_mm = ExternKernelChoice(torch.mm, "at::mm_out") + + +aten_addmm = ExternKernelChoice(torch.addmm, "at::addmm_out") + +aten__int_mm = ExternKernelChoice(torch._int_mm, "at::_int_mm") + + +def _is_int8_mat(mat): + return mat.get_dtype() in (torch.int8, torch.uint8) + + +def bias_addmm(inp, mat1, mat2, *, out=None, alpha=1, beta=1): + """ + Giving torch.addmm a 1D tensor calls a different (faster) cublasLt + kernel under the hood. There are a few shapes where this is slower, + but they are rare. + """ + if inp.stride(0) == 0 or inp.size(0) == 1: + return torch.addmm(inp[0], mat1, mat2, out=out, alpha=alpha, beta=beta) + return torch.addmm(inp, mat1, mat2, out=out, alpha=alpha, beta=beta) + + +aten_bias_addmm = ExternKernelChoice(bias_addmm, None) + + +@register_lowering(aten.mm, type_promotion_kind=None) +def tuned_mm(mat1, mat2, *, layout=None): + m, n, k, layout, mat1, mat2 = mm_args(mat1, mat2, layout=layout) + + # options to tune from + choices = [aten_mm.bind((mat1, mat2), layout)] if use_aten_gemm_kernels() else [] + + if m * n != 0 and use_triton_template(layout): + for config in mm_configs(m, n, k): + mm_template.maybe_append_choice( + choices, + input_nodes=(mat1, mat2), + layout=layout, + **mm_options(config, k, layout), + ) + + if m * n != 0 and use_cutlass_template(layout): + CUTLASSGemmTemplate.add_cutlass_gemm_choices( + choices, layout, [mat1, mat2], fuseable=True, non_fuseable=True + ) + + from torch._inductor.ir import FixedLayout, FlexibleLayout + + if ( + len(choices) == 1 + and use_aten_gemm_kernels() + and isinstance(layout, FixedLayout) + ): + # If we are not autotuning, we can swap to a FlexibleLayout + # in order to get fusion optimizations to kick in, e.g. ConcatFusion + layout = FlexibleLayout( + device=layout.device, dtype=layout.dtype, size=layout.size + ) + choices = [aten_mm.bind((mat1, mat2), layout)] + + return autotune_select_algorithm("mm", choices, [mat1, mat2], layout) + + +@register_lowering(aten._int_mm, type_promotion_kind=None) +def tuned_int_mm(mat1, mat2, *, layout=None): + m, n, k, layout, mat1, mat2 = mm_args( + mat1, mat2, layout=layout, out_dtype=torch.int32 + ) + choices = ( + [aten__int_mm.bind((mat1, mat2), layout)] if use_aten_gemm_kernels() else [] + ) + if m * n != 0 and use_triton_template(layout, enable_int32=True): + # TODO: Re-enable eager mode implementation once cuBLAS is fixed + choices = [] + for config in int8_mm_configs(m, n, k): + mm_template.maybe_append_choice( + choices, + input_nodes=(mat1, mat2), + layout=layout, + **mm_options(config, k, layout), + ) + return autotune_select_algorithm("int_mm", choices, [mat1, mat2], layout) + + +@register_lowering(aten.addmm, type_promotion_kind=None) +def tuned_addmm(inp, mat1, mat2, *, alpha=1, beta=1, layout=None): + ordered_kwargs_for_cpp_kernel = ("beta", "alpha") + + m, n, k, layout, mat1, mat2, inp_expanded = mm_args(mat1, mat2, inp, layout=layout) + if m * n == 0 or not use_max_autotune(): + choices = ( + [ + aten_addmm.bind( + (inp, mat1, mat2), + layout, + ordered_kwargs_for_cpp_kernel, + alpha=alpha, + beta=beta, + ) + ] + if use_aten_gemm_kernels() + else [] + ) + return autotune_select_algorithm("addmm", choices, [inp, mat1, mat2], layout) + + choices = ( + [ + aten_addmm.bind( + (inp_expanded, mat1, mat2), + layout, + ordered_kwargs_for_cpp_kernel, + alpha=alpha, + beta=beta, + ) + ] + if use_aten_gemm_kernels() + else [] + ) + + if ( + use_aten_gemm_kernels() + and inp_expanded.get_stride()[0] == 0 + and inp_expanded.get_device().type == "cuda" + and inductor_config.triton.autotune_cublasLt + ): + # unexpand inp to make sure fused addmm from cublasLt is used + choices.insert( + 0, + aten_bias_addmm.bind( + (inp_expanded, mat1, mat2), layout, alpha=alpha, beta=beta + ), + ) + + if use_triton_template(layout): + for config in mm_configs(m, n, k): + mm_template.maybe_append_choice( + choices, + input_nodes=(inp_expanded, mat1, mat2), + layout=layout, + **mm_options(config, k, layout), + prefix_args=1, + epilogue_fn=addmm_epilogue(layout.dtype, alpha, beta), + ) + + if use_cutlass_template(layout): + CUTLASSGemmTemplate.add_cutlass_gemm_choices( + choices, + layout, + [mat1, mat2, inp_expanded], + alpha=alpha, + beta=beta, + input_reorder=[2, 0, 1], + fuseable=False, + ) + + return autotune_select_algorithm( + "addmm", choices, [inp_expanded, mat1, mat2], layout + ) + + +def fallback_mixed_mm(mat1, mat2, *, out): + return torch.mm(mat1, mat2.to(mat1.dtype), out=out) + + +aten_fallback_mixed_mm = ExternKernelChoice(fallback_mixed_mm, None) + + +def tuned_mixed_mm(mat1, mat2, mat2_dtype): + m, n, k, layout, mat1, mat2 = mm_args(mat1, mat2, layout=None) + choices = [aten_fallback_mixed_mm.bind((mat1, mat2), layout)] + if mat1.layout.dtype != torch.float32 and not mat2.layout.is_contiguous(): + # can't use triton kernel unless one of these is true + return autotune_select_algorithm("mixed_mm", choices, [mat1, mat2], layout) + if inductor_config.force_mixed_mm: + choices = [] + b_prologue_cast_type = f"tl.{mat2_dtype}".replace("torch.", "") + has_int8_tensor = _is_int8_mat(mat1) or _is_int8_mat(mat2) + for config in mm_configs(m, n, k, has_int8_tensor=has_int8_tensor): + mm_template.maybe_append_choice( + choices, + input_nodes=(mat1, mat2), + layout=layout, + **mm_options(config, k, layout, b_prologue_cast_type), + ) + return autotune_select_algorithm("mixed_mm", choices, [mat1, mat2], layout) + + +# This op is a special case of the int_mm op which we use based on the pattern +# _int_mm -> mul (defined in ../fx_passes/post_grad.py) in order to prevent +# realization of the int32 _int_mm output by forcing fusion with the mul op. +# This is only used when config.force_fuse_int_mm_with_mul = True +def tuned_fused_int_mm_mul(mat1, mat2, mat3, out_dtype, *, layout=None): + out_dtype = ( + torch.promote_types(mat3.get_dtype(), torch.int32) + if out_dtype is None + else out_dtype + ) + m, n, k, layout, mat1, mat2, mat3 = mm_args( + mat1, mat2, mat3, layout=layout, out_dtype=out_dtype + ) + choices: List[Dict[Any, Any]] = [] + for config in int8_mm_configs(m, n, k): + mm_template.maybe_append_choice( + choices, + input_nodes=(mat1, mat2, mat3), + layout=layout, + **dict(mm_options(config, k, layout), ACC_TYPE="tl.int32"), + suffix_args=1, + epilogue_fn=V.ops.mul, + ) + return autotune_select_algorithm("int_mm", choices, [mat1, mat2, mat3], layout) diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/mm_common.py b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/mm_common.py new file mode 100644 index 0000000000000000000000000000000000000000..26519c86c1192b04045c157cd6e4772864145dd9 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/mm_common.py @@ -0,0 +1,222 @@ +import functools +import logging +from typing import cast, List, Tuple + +import sympy + +import torch +from torch._inductor.select_algorithm import realize_inputs +from torch._inductor.virtualized import V + +from ..utils import ceildiv as cdiv, next_power_of_2 + +log = logging.getLogger(__name__) + + +def triton_config(num_stages, num_warps, **kwargs): + from triton import Config + + return Config(kwargs, num_stages=num_stages, num_warps=num_warps) + + +def filtered_configs( + m: int, + n: int, + k: int, + configs: List[Tuple[int, int, int, int, int]], + has_int8_tensor=False, +): + """Heuristic to shrink configs when they are bigger than the input size""" + + # According to https://github.com/openai/triton/issues/2156#issuecomment-1695897424 + # it's safer to use at least [32, 32] block size for int8/uint8 + # tensors + min_block_size = 32 if has_int8_tensor else 16 + m = max( + next_power_of_2( + V.graph.sizevars.size_hint( + m, fallback=torch._inductor.config.unbacked_symint_fallback + ) + ), + min_block_size, + ) + n = max( + next_power_of_2( + V.graph.sizevars.size_hint( + n, fallback=torch._inductor.config.unbacked_symint_fallback + ) + ), + min_block_size, + ) + k = max( + next_power_of_2( + V.graph.sizevars.size_hint( + k, fallback=torch._inductor.config.unbacked_symint_fallback + ) + ), + min_block_size, + ) + used = set() + for block_m, block_n, block_k, num_stages, num_warps in configs: + # shrink configs for small sizes + block_m = max(min(block_m, m), min_block_size) + block_n = max(min(block_n, n), min_block_size) + block_k = max(min(block_k, k), min_block_size) + # each warp computes 16x16 tile = 256 + num_warps = min(num_warps, block_m * block_n // 256) + if (block_m, block_n, block_k, num_stages, num_warps) not in used: + used.add((block_m, block_n, block_k, num_stages, num_warps)) + yield triton_config( + BLOCK_M=block_m, + BLOCK_N=block_n, + BLOCK_K=block_k, + num_stages=num_stages, + num_warps=num_warps, + ) + + +# List of dictionaries to store the kernel configs. Configs that evaluate to true +# will be utilised on the target platform +mm_kernel_configs = [ + # "BLOCK_M", "BLOCK_N", "BLOCK_K", "num_stages", "num_warps" + {"config": (64, 64, 32, 2, 4), "cond": True}, + {"config": (64, 128, 32, 3, 4), "cond": True}, + {"config": (128, 64, 32, 3, 4), "cond": True}, + {"config": (64, 128, 32, 4, 8), "cond": True}, + {"config": (128, 64, 32, 4, 8), "cond": True}, + {"config": (64, 32, 32, 5, 8), "cond": True}, + {"config": (32, 64, 32, 5, 8), "cond": True}, + {"config": (128, 128, 32, 2, 8), "cond": True}, + {"config": (64, 64, 64, 3, 8), "cond": True}, + {"config": (32, 32, 128, 2, 4), "cond": torch.version.hip is None}, + {"config": (64, 64, 16, 2, 4), "cond": True}, + {"config": (32, 32, 16, 1, 2), "cond": True}, +] + +int8_mm_kernel_configs = [ + {"config": (64, 64, 32, 2, 4), "cond": True}, + {"config": (64, 128, 32, 3, 4), "cond": True}, + {"config": (128, 64, 32, 3, 4), "cond": True}, + {"config": (64, 128, 32, 4, 8), "cond": True}, + {"config": (128, 64, 32, 4, 8), "cond": True}, + {"config": (64, 32, 32, 5, 8), "cond": True}, + {"config": (32, 64, 32, 5, 8), "cond": True}, + {"config": (128, 128, 32, 2, 8), "cond": True}, + {"config": (64, 64, 64, 3, 8), "cond": True}, + # {"config": (32, 32, 128, 2, 4), "cond": True}, + # {"config": (64, 64, 16, 2, 4), "cond": True}, + # {"config": (32, 32, 16, 1, 2), "cond": True}, + {"config": (128, 256, 128, 3, 8), "cond": torch.version.hip is None}, + {"config": (256, 128, 128, 3, 8), "cond": torch.version.hip is None}, +] + +# Create filtered list of configs based on cond evaluation + + +mm_platform_configs = tuple( + cast(Tuple[int, int, int, int, int], config["config"]) + for config in mm_kernel_configs + if config["cond"] +) +int8_platform_configs = tuple( + cast(Tuple[int, int, int, int, int], config["config"]) + for config in int8_mm_kernel_configs + if config["cond"] +) + +# On ROCm convert num_stages to 1 as pipelining provides no benefit +if torch.version.hip: + mm_platform_configs = tuple( + (config[0], config[1], config[2], 1, config[4]) + for config in mm_platform_configs + ) + int8_platform_configs = tuple( + (config[0], config[1], config[2], 1, config[4]) + for config in mm_platform_configs + ) + +mm_configs = functools.partial( + filtered_configs, + configs=mm_platform_configs, +) + +int8_mm_configs = functools.partial( + filtered_configs, + configs=int8_platform_configs, +) + + +def mm_grid(m, n, meta): + """ + The CUDA grid size for matmul triton templates. + """ + return (cdiv(m, meta["BLOCK_M"]) * cdiv(n, meta["BLOCK_N"]), 1, 1) + + +def acc_type(dtype): + if dtype in (torch.float16, torch.bfloat16): + return "tl.float32" + return f"tl.{dtype}".replace("torch.", "") + + +def mm_options(config, sym_k, layout, b_prologue_cast_type=None): + """ + Common options to matmul triton templates. + """ + even_k_symbolic = ( + # it isn't worth guarding on this + sympy.gcd(sym_k, config.kwargs["BLOCK_K"]) + == config.kwargs["BLOCK_K"] + ) + return dict( + GROUP_M=8, + EVEN_K=even_k_symbolic, + ALLOW_TF32=torch.backends.cuda.matmul.allow_tf32, + ACC_TYPE=acc_type(layout.dtype), + B_PROLOGUE_CAST_TYPE=b_prologue_cast_type, + num_stages=config.num_stages, + num_warps=config.num_warps, + **config.kwargs, + ) + + +def mm_args(mat1, mat2, *others, layout=None, out_dtype=None, use_4x2_dim=False): + """ + Common arg processing for mm,bmm,addmm,etc + """ + mat1, mat2 = realize_inputs(mat1, mat2) + *b1, m, k1 = mat1.get_size() + *b2, k2, n = mat2.get_size() + b = [V.graph.sizevars.guard_equals(a, b) for a, b in zip(b1, b2)] + if use_4x2_dim: + k2 = k2 * 2 + k = V.graph.sizevars.guard_equals(k1, k2) + if layout is None: + from torch._inductor.ir import FixedLayout + + if out_dtype is None: + out_dtype = mat1.get_dtype() + layout = FixedLayout( + mat1.get_device(), + out_dtype, + [*b, m, n], + ) + else: + assert out_dtype is None, "out_dtype is ignored if layout is specified." + + from ..lowering import expand + + others = [realize_inputs(expand(x, layout.size)) for x in others] + + return [m, n, k, layout, mat1, mat2, *others] + + +def addmm_epilogue(dtype, alpha, beta): + def epilogue(acc, bias): + if alpha != 1: + acc = V.ops.mul(acc, V.ops.constant(alpha, dtype)) + if beta != 1: + bias = V.ops.mul(bias, V.ops.constant(beta, dtype)) + return V.ops.add(acc, bias) + + return epilogue diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_lazy/__pycache__/tensor_factory_functions.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_lazy/__pycache__/tensor_factory_functions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b79948c60780860d85a8d7de1d05217f2182858 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_lazy/__pycache__/tensor_factory_functions.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_lazy/__pycache__/ts_backend.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_lazy/__pycache__/ts_backend.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6597825e5c4bf873950eb21ecb659fb6daba4bcb Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_lazy/__pycache__/ts_backend.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_lazy/closure.py b/evalkit_tf437/lib/python3.10/site-packages/torch/_lazy/closure.py new file mode 100644 index 0000000000000000000000000000000000000000..07f1055ee82783643bf5e57c8713d90aa1d15df6 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/torch/_lazy/closure.py @@ -0,0 +1,134 @@ +import os +import threading +from queue import Empty as EmptyQueue, Queue + +from torch._lazy.device_context import get_device_context + + +class ClosureHandler: + def __init__(self): + pass + + def run(self, closure): + """Run closure function + + Args: + closure: callable function to run + """ + closure() + + def __call__(self, closures): + for closure in closures: + self.run(closure) + + +class AsyncClosureHandler(ClosureHandler): + """Handler for Asynchronous Step Closures + Args: + max_queue_size: The maximum length of the closure queue after which + the training loop will block until closures are evaluated. + By default, a reasonable limit of a maximum of 100 on the queue. + This value can be set using the `XLA_MAX_ASYNC_QUEUE` environment + variable. + """ + + def __init__(self, max_queue_size=100): + super().__init__() + self._closure_queue: Queue = Queue( + int(os.environ.get("LTC_MAX_ASYNC_QUEUE", max_queue_size)) + ) + self._closure_exception: Queue = Queue() + self._closure_lock = threading.Lock() + self._closure_event_loop_finished = threading.Event() + self._closure_event_loop = None + + def start_event_loop(self): + """Start closure event loop if not started""" + if self._closure_event_loop is None: + + def event_loop(): + # Run loop until closure event is set and closure queue is empty + while True: + try: + closure = self._closure_queue.get(block=True, timeout=3) + closure() + self._closure_queue.task_done() + except EmptyQueue: + with self._closure_lock: + if self._closure_queue.empty(): + self._closure_event_loop_finished.set() + return + except Exception as e: + self._closure_exception.put(e) + return + + self._closure_event_loop = threading.Thread(target=event_loop) + self._closure_event_loop.start() + + def run(self, closure): + with self._closure_lock: + self._closure_queue.put(closure, block=True) + if ( + self._closure_event_loop is None + or not self._closure_event_loop.is_alive() + ): + try: + e = self._closure_exception.get(block=False) + raise RuntimeError( + "Cannot run asynchronous closure due to previously raised exception" + ) from e + except EmptyQueue: + self._closure_event_loop = None + self.start_event_loop() + + +def add_step_closure(closure, args=(), run_async=False): + """Adds a closure to the list of the ones to be run at the end of the step. + Many times during model training there is the need to print/report (print to + console, post to tensorboard, etc...) information which require the content of + intermediary tensors to be inspected. + Inspecting different tensors content in different points of the model code + requires many executions and typically causes performance issues. + Adding a step closure will ensure that it will be run after the barrier, when + all the live tensors will be already materialized to device data. + Live tensors which will include the ones captured by the closure arguments. + So using `add_step_closure()` will ensure a single execution will be + performed, even when multiple closures are queued, requiring multiple tensors + to be inspected. + Step closures will be run sequentially in the order they have been queued. + Note that even though using this API the execution will be optimized, it is + advised to throttle the printing/reporting events once every N steps. + Args: + closure (callable): The function to be called. + args (tuple): The arguments to be passed to the closure. + run_async: If True, run the closure asynchronously. + """ + devctx = get_device_context() + closures_type = "async_step_closures" if run_async else "step_closures" + step_closures = getattr(devctx, closures_type, None) + if step_closures is None: + step_closures = [] + setattr(devctx, closures_type, step_closures) + step_closures.append(lambda a=args: closure(*a)) + + +def run_step_closures(): + devctx = get_device_context() + async_step_closures = getattr(devctx, "async_step_closures", None) + if async_step_closures is not None: + devctx.async_step_closures = [] + async_closure_handler = getattr(devctx, "async_closure_handler", None) + if async_closure_handler is None: + async_closure_handler = AsyncClosureHandler() + devctx.async_closure_handler = async_closure_handler + async_closure_handler(async_step_closures) + + step_closures = getattr(devctx, "step_closures", None) + if step_closures is not None: + devctx.step_closures = [] + closure_handler = getattr(devctx, "closure_handler", None) + if closure_handler is None: + closure_handler = ClosureHandler() + devctx.closure_handler = closure_handler + closure_handler(step_closures) + return devctx diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_lazy/computation.py b/evalkit_tf437/lib/python3.10/site-packages/torch/_lazy/computation.py new file mode 100644 index 0000000000000000000000000000000000000000..27b73c42e5c0de39e5112f717796cfce5d808bc1 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/torch/_lazy/computation.py @@ -0,0 +1,26 @@ +import torch._C._lazy +import torch._C._lazy_ts_backend + + +def get_tensors_ts_device_data_node(tensors): + """Return tensor ids and eager tensors for DeviceData nodes in the + IR for the passed in lazy tensors. + + TODO: This API is currently ts backend specific. We are working on + generalizing it to all backends including XLA. + """ + return torch._C._lazy_ts_backend._get_tensors_ts_device_data_node(tensors) + + +def get_graph_hash(tensors): + """Return the graph hash for the passed in lazy tensors""" + return torch._C._lazy._get_graph_hash(tensors) + + +def run_cached_graph(hash_str, graph_inputs): + """Running the cached computation graph with the given inputs + + TODO: This API is currently ts backend specific. We are working on + generalizing it to all backends including XLA. + """ + return torch._C._lazy_ts_backend._run_cached_graph(hash_str, graph_inputs) diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_lazy/config.py b/evalkit_tf437/lib/python3.10/site-packages/torch/_lazy/config.py new file mode 100644 index 0000000000000000000000000000000000000000..e7a4d1dd24f8dbf505995982bbb33b8d90d3de2e --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/torch/_lazy/config.py @@ -0,0 +1,16 @@ +import torch._C._lazy + + +def get_force_fallback(): + """Get the config used to force LTC fallback""" + return torch._C._lazy._get_force_fallback() + + +def set_force_fallback(configval): + """Set the config used to force LTC fallback""" + torch._C._lazy._set_force_fallback(configval) + + +def set_reuse_ir(val: bool): + """Set the config to reuse IR nodes for faster tracing""" + torch._C._lazy._set_reuse_ir(val) diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_lazy/debug.py b/evalkit_tf437/lib/python3.10/site-packages/torch/_lazy/debug.py new file mode 100644 index 0000000000000000000000000000000000000000..286aa049280c9d9555f64042f35b4a5fd57d0059 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/torch/_lazy/debug.py @@ -0,0 +1,21 @@ +import torch._C._lazy + + +def render_ir_graph(tensors): + """Return a text dump of the LTC IR graph in dot format for the tensors. + The text can be processed by tools like dot to be rendered in pdf,png etc.""" + return torch._C._lazy._get_tensors_dot(tensors) + + +def dump_ir(tensors, ir_format): + """Return a dump of the tensors in the specified format. + Valid format are + - text: for LTC IR + - backend: for the activate backend IR + """ + if ir_format == "text": + return torch._C._lazy._get_tensors_text(tensors) + elif ir_format == "backend": + return torch._C._lazy._get_tensors_backend(tensors) + else: + raise RuntimeError(f"Unrecognized IR format: {ir_format}") diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_lazy/device_context.py b/evalkit_tf437/lib/python3.10/site-packages/torch/_lazy/device_context.py new file mode 100644 index 0000000000000000000000000000000000000000..840c7f8e50d039c9b72f31b16e8d69f706920534 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/torch/_lazy/device_context.py @@ -0,0 +1,25 @@ +import threading +from typing import Any, Dict + +import torch._C._lazy + + +class DeviceContext: + _CONTEXTS: Dict[str, Any] = dict() + _CONTEXTS_LOCK = threading.Lock() + + def __init__(self, device): + self.device = device + + +def get_device_context(device=None): + if device is None: + device = torch._C._lazy._get_default_device_type() + else: + device = str(device) + with DeviceContext._CONTEXTS_LOCK: + devctx = DeviceContext._CONTEXTS.get(device, None) + if devctx is None: + devctx = DeviceContext(device) + DeviceContext._CONTEXTS[device] = devctx + return devctx diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_lazy/extract_compiled_graph.py b/evalkit_tf437/lib/python3.10/site-packages/torch/_lazy/extract_compiled_graph.py new file mode 100644 index 0000000000000000000000000000000000000000..033d000c69d858aa1b8264d90c7d3e984229eb23 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/torch/_lazy/extract_compiled_graph.py @@ -0,0 +1,223 @@ +import copy +import dataclasses +import itertools +import os +from typing import Any, Callable, Dict, List + +import torch +import torch._lazy as lazy +import torch._lazy.metrics as metrics +from torch import fx +from torch._lazy import computation, debug as lazy_debug +from torch._lazy.tensor_factory_functions import tensor_factory_functions + +debug = os.environ.get("debug_extract_compiled_graph") is not None + + +@dataclasses.dataclass +class GraphInputMatcher: + """ + The GraphInputMatcher class setup the graph inputs for future calls after lazy tracing. + Specifically, those graph inputs corresponding to method parameters should be replaced with the + arguments for the current call. + + tensor_id_to_arg_idx maps the tensor id to the parameter index. + graph_input_tensor_ids, graph_input_ivalues list the tensor_id and ivalue for each of the + TS/XLA graph inputs. + """ + + tensor_id_to_arg_idx: Dict[int, int] + graph_input_tensor_ids: List[int] + # there are 2 categories of graph_input_tensors. + # Category 1: those whose id are not found in tensor_id_to_arg_idx. These are + # most likely const tensors and we can get its content from graph_input_tensors + # Category 2: those whose id are found in tensor_id_to_arg_idx. We should get + # the tensor from method arguments + graph_input_ivalues: List[Any] + + # get the real graph input tensors + def __call__(self, args): + real_input = [] + for tensor_id, traced_ivalue in zip( + self.graph_input_tensor_ids, self.graph_input_ivalues + ): + arg_idx = self.tensor_id_to_arg_idx.get(tensor_id, None) + if arg_idx is None: + inp = traced_ivalue + else: + inp = args[arg_idx] + real_input.append(inp) + return real_input + + +class ReturnValueHandler: + r""" + When ltc_sync_multi is called on multi tensors, the compiled graph + will contain output only for unique tensors - if a tensor appears multiple + times in the input to _ltc_sync_multi, only the first occurance matters. + + However from python level, we still expect multi tensors returned with duplciation + even if the TS graph dedup the output. e.g. for method: + + def forward(self, a): + return a, a + + the TS graph captured by LTC will return a single tensor, but Python method expects 2. + + This class dedup the lazy tensors first to get the index that will be used + to duplicate the eager tensors later. + """ + + def __init__(self, lazy_out_list): + self.index: List[List[int]] = [] + self.total_count = len(lazy_out_list) + + tensor_id_to_idx: Dict[int, int] = {} + for dup_idx, lazy_tensor in enumerate(lazy_out_list): + uniq_idx = tensor_id_to_idx.get(id(lazy_tensor), None) + if uniq_idx is not None: + self.index[uniq_idx].append(dup_idx) + else: + uniq_idx = len(self.index) + self.index.append([dup_idx]) + tensor_id_to_idx[id(lazy_tensor)] = uniq_idx + + def duplicate_eager_tensors(self, eager_tensor_list): + duplicated_list = [None] * self.total_count + assert len(eager_tensor_list) == len(self.index) + + for uniq_idx, eager_tensor in enumerate(eager_tensor_list): + for dup_idx in self.index[uniq_idx]: + duplicated_list[dup_idx] = eager_tensor + return duplicated_list + + +def force_lazy_device(model: fx.GraphModule): + """ + Factory methods in a Fx graph may create tensors for a specific eager devices. + If we take no actions, those eager tensors will be mixed with lazy tensors and + cause crash. This method overwrite those eager device to lazy device. + """ + + def tolazydevice(dev): + if isinstance(dev, torch.device): + return torch.device("lazy", index=dev.index) + return dev + + def hasDeviceArg(args, kwargs): + return any( + isinstance(arg, torch.device) + for arg in itertools.chain(args, kwargs.values()) + ) + + for nd in model.graph.nodes: + nd.args = tuple(tolazydevice(arg) for arg in nd.args) + nd.kwargs = {k: tolazydevice(v) for k, v in nd.kwargs.items()} + + # For torchbench like yolov3, hf_Bart, dynamo generates Fx graph that return + # eager tensors on the default device + # (check https://gist.github.com/shunting314/eabdf6c769c59bc384469717b8f9bb7f for yolove, + # and https://gist.github.com/shunting314/8d5e2d9348a3258959d3954186c48814 for hf_Bart). + # To force those tensors on the lazy device, we can not simply override + # the device argument since there is no explicit device argument. + # What we are doing here is, for the list of covered tensor factory methods + # we add a lazy device argument explicity. + # + # TODO: This solution is no ideal since we may miss some factory methods. In future + # when we support lazy mode, this method can be replaced by that. + if nd.target in tensor_factory_functions and not hasDeviceArg( + nd.args, nd.kwargs + ): + kwargs = dict(nd.kwargs) # nd.kwargs is immutable. make a mutable copy. + kwargs["device"] = torch.device("lazy") + nd.kwargs = kwargs + + model.recompile() + + +def get_fallback_ops(): + fallback_ops = [] + for opname in metrics.counter_names(): + if "aten::" not in opname: + continue + val = int(metrics.counter_value(opname)) + if val > 0: + fallback_ops.append(f"{opname}={val}") + + return fallback_ops + + +def extract_compiled_graph(model: fx.GraphModule, example_inputs) -> Callable: + """ + Optimize an eager model with LTC and returns a wrapper to execute the + compiled graph directly without retracing. It depends on other mechanisms + like TorchDynamo guards to guarantee the returned wrapper is only called + when it's safe. + """ + lazy_args = [arg.to(device="lazy") for arg in example_inputs] + args_tensor_ids = [lazy.get_tensor_id(lazy_arg) for lazy_arg in lazy_args] + tensor_id_to_arg_idx = {tensor_id: i for i, tensor_id in enumerate(args_tensor_ids)} + lazy_model = copy.deepcopy(model).to(device=torch.device("lazy")) + force_lazy_device(lazy_model) + + # This line executes lazy tracing and enable us extracting compiled graph later + metrics.reset() + lazy_out = lazy_model(*lazy_args) + fallback_ops = get_fallback_ops() + metrics.reset() + + if len(fallback_ops) > 0: + raise RuntimeError( + f"Fail to extact the compiled graph because of fallback: {','.join(fallback_ops)}" + ) + + if not isinstance(lazy_out, (tuple, list)): + lazy_out = (lazy_out,) + + args_and_out = tuple(lazy_args) + tuple(lazy_out) + return_value_handler = ReturnValueHandler(args_and_out) + if debug: + print("Fx code:\n", model.code) + print("LTC IR:", lazy_debug.dump_ir(args_and_out, "text")) + + # TODO: this part is TS backend specific for now and will be generalized to + # support XLA + ( + graph_input_tensor_ids, + graph_input_ivalues, + ) = computation.get_tensors_ts_device_data_node(args_and_out) + assert len(graph_input_tensor_ids) == len(graph_input_ivalues) + graph_input_matcher = GraphInputMatcher( + tensor_id_to_arg_idx, graph_input_tensor_ids, graph_input_ivalues + ) + + graph_hash = computation.get_graph_hash(args_and_out) + + if debug: + print("graph_hash", graph_hash) + print(f"args_tensor_ids {args_tensor_ids}") + print("tensor ids from device data:", graph_input_tensor_ids) + + # sync the list of output tensors so the computation graph for these + # tensors will be cached. Those computation graphs can be retrieved + # by graph hash later. + lazy.sync_multi(args_and_out, []) + + def optimized_mod(*args): + if len(args_and_out) == 0: + return () + graph_input = graph_input_matcher(args) + res = return_value_handler.duplicate_eager_tensors( + computation.run_cached_graph(graph_hash, graph_input) + ) + + assert len(res) == len(args_and_out) + for i, arg in enumerate(args): + # only copy those tensors that get inplace updated + if arg is not res[i]: + arg.copy_(res[i]) + + # skip the args + return res[len(args) :] + + return optimized_mod diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_lazy/ir_cache.py b/evalkit_tf437/lib/python3.10/site-packages/torch/_lazy/ir_cache.py new file mode 100644 index 0000000000000000000000000000000000000000..4270684d29434747f53177e48a58fd8dc9c7c44b --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/torch/_lazy/ir_cache.py @@ -0,0 +1,13 @@ +import torch._C._lazy + + +def dump(dot_file_name: str): + """Dump TrieCache in the dot format""" + return torch._C._lazy._dump_ir_cache(dot_file_name) + + +def reset(): + """Clear TrieCache. This is needed in testing to avoid + node reusing between different tests. + """ + return torch._C._lazy._clear_ir_cache() diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_lazy/metrics.py b/evalkit_tf437/lib/python3.10/site-packages/torch/_lazy/metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..2d7db730556779a353a1bb9f4b2529464d4bfc95 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/torch/_lazy/metrics.py @@ -0,0 +1,21 @@ +import torch._C._lazy + + +def reset(): + """Resets all metric counters.""" + torch._C._lazy._reset_metrics() + + +def counter_names(): + """Retrieves all the currently active counter names.""" + return torch._C._lazy._counter_names() + + +def counter_value(name: str): + """Return the value of the counter with the speficied name""" + return torch._C._lazy._counter_value(name) + + +def metrics_report(): + """Return the combined (lazy core and backend) metric report""" + return torch._C._lazy._metrics_report() diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_lazy/tensor_factory_functions.py b/evalkit_tf437/lib/python3.10/site-packages/torch/_lazy/tensor_factory_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..47aa9c500466daadf282633d43f0335e0a8c0b70 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/torch/_lazy/tensor_factory_functions.py @@ -0,0 +1,48 @@ +import torch + +""" +tensor_factory_functions defines the list of torch functions that create tensors. +The list is grabbed by searching thru native_functions.yaml by the following +regular expression: + + cat native_functions.yaml | grep 'func:' | grep -v "Tensor.*->" | grep "[-]>.*Tensor" + +It's possible that new tensor factory functions are added making this list stale. +Use at your own risk or regenerate the list. +""" +tensor_factory_functions = ( + torch._cudnn_init_dropout_state, + torch.arange, + torch.bartlett_window, + torch.blackman_window, + torch._empty_affine_quantized, + torch.empty_strided, + torch.eye, + torch.full, + torch.from_file, + torch.hann_window, + torch.hamming_window, + torch.kaiser_window, + torch.linspace, + torch.logspace, + torch.ones, + torch.scalar_tensor, + torch.rand, + torch.randint, + torch.randn, + torch.randperm, + torch.range, + torch._efficientzerotensor, + torch.zeros, + torch.tril_indices, + torch.triu_indices, + # Note: the following functions match the regular expression search above but + # they are not available in the torch module. Comment out. + # torch._sparse_coo_tensor_with_dims, + # torch.fft_fftfreq, + # torch.fft_rfftfreq, +) + ( + # torch.tensor is special since it's not in native_functions.yaml + # add it separately + torch.tensor, +) diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/absoft.cpython-310.pyc b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/absoft.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..751e2de666d7370b2b02d68291c30c7d72bc32b8 Binary files /dev/null and b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/absoft.cpython-310.pyc differ diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/fujitsu.cpython-310.pyc b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/fujitsu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf5ad659d03f69fc69dfbf6cce6686764d937153 Binary files /dev/null and b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/fujitsu.cpython-310.pyc differ diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/hpux.cpython-310.pyc b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/hpux.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3cc4ee45430de7ec734d90b5b3b1decf5de7bf9f Binary files /dev/null and b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/hpux.cpython-310.pyc differ diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/intel.cpython-310.pyc b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/intel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..611ca3154ff36fbc10f3ccb5fc1aea8ca87a749e Binary files /dev/null and b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/intel.cpython-310.pyc differ diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/nag.cpython-310.pyc b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/nag.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b72b53ad13ea46679cc6a26608ee9e66278e3df Binary files /dev/null and b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/nag.cpython-310.pyc differ diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/absoft.py b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/absoft.py new file mode 100644 index 0000000000000000000000000000000000000000..68f516b92751fd12343d0f3c9375b3e43e587247 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/absoft.py @@ -0,0 +1,156 @@ + +# Absoft Corporation ceased operations on 12/31/2022. +# Thus, all links to are invalid. + +# Notes: +# - when using -g77 then use -DUNDERSCORE_G77 to compile f2py +# generated extension modules (works for f2py v2.45.241_1936 and up) +import os + +from numpy.distutils.cpuinfo import cpu +from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file +from numpy.distutils.misc_util import cyg2win32 + +compilers = ['AbsoftFCompiler'] + +class AbsoftFCompiler(FCompiler): + + compiler_type = 'absoft' + description = 'Absoft Corp Fortran Compiler' + #version_pattern = r'FORTRAN 77 Compiler (?P[^\s*,]*).*?Absoft Corp' + version_pattern = r'(f90:.*?(Absoft Pro FORTRAN Version|FORTRAN 77 Compiler|Absoft Fortran Compiler Version|Copyright Absoft Corporation.*?Version))'+\ + r' (?P[^\s*,]*)(.*?Absoft Corp|)' + + # on windows: f90 -V -c dummy.f + # f90: Copyright Absoft Corporation 1994-1998 mV2; Cray Research, Inc. 1994-1996 CF90 (2.x.x.x f36t87) Version 2.3 Wed Apr 19, 2006 13:05:16 + + # samt5735(8)$ f90 -V -c dummy.f + # f90: Copyright Absoft Corporation 1994-2002; Absoft Pro FORTRAN Version 8.0 + # Note that fink installs g77 as f77, so need to use f90 for detection. + + executables = { + 'version_cmd' : None, # set by update_executables + 'compiler_f77' : ["f77"], + 'compiler_fix' : ["f90"], + 'compiler_f90' : ["f90"], + 'linker_so' : [""], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + if os.name=='nt': + library_switch = '/out:' #No space after /out:! + + module_dir_switch = None + module_include_switch = '-p' + + def update_executables(self): + f = cyg2win32(dummy_fortran_file()) + self.executables['version_cmd'] = ['', '-V', '-c', + f+'.f', '-o', f+'.o'] + + def get_flags_linker_so(self): + if os.name=='nt': + opt = ['/dll'] + # The "-K shared" switches are being left in for pre-9.0 versions + # of Absoft though I don't think versions earlier than 9 can + # actually be used to build shared libraries. In fact, version + # 8 of Absoft doesn't recognize "-K shared" and will fail. + elif self.get_version() >= '9.0': + opt = ['-shared'] + else: + opt = ["-K", "shared"] + return opt + + def library_dir_option(self, dir): + if os.name=='nt': + return ['-link', '/PATH:%s' % (dir)] + return "-L" + dir + + def library_option(self, lib): + if os.name=='nt': + return '%s.lib' % (lib) + return "-l" + lib + + def get_library_dirs(self): + opt = FCompiler.get_library_dirs(self) + d = os.environ.get('ABSOFT') + if d: + if self.get_version() >= '10.0': + # use shared libraries, the static libraries were not compiled -fPIC + prefix = 'sh' + else: + prefix = '' + if cpu.is_64bit(): + suffix = '64' + else: + suffix = '' + opt.append(os.path.join(d, '%slib%s' % (prefix, suffix))) + return opt + + def get_libraries(self): + opt = FCompiler.get_libraries(self) + if self.get_version() >= '11.0': + opt.extend(['af90math', 'afio', 'af77math', 'amisc']) + elif self.get_version() >= '10.0': + opt.extend(['af90math', 'afio', 'af77math', 'U77']) + elif self.get_version() >= '8.0': + opt.extend(['f90math', 'fio', 'f77math', 'U77']) + else: + opt.extend(['fio', 'f90math', 'fmath', 'U77']) + if os.name =='nt': + opt.append('COMDLG32') + return opt + + def get_flags(self): + opt = FCompiler.get_flags(self) + if os.name != 'nt': + opt.extend(['-s']) + if self.get_version(): + if self.get_version()>='8.2': + opt.append('-fpic') + return opt + + def get_flags_f77(self): + opt = FCompiler.get_flags_f77(self) + opt.extend(['-N22', '-N90', '-N110']) + v = self.get_version() + if os.name == 'nt': + if v and v>='8.0': + opt.extend(['-f', '-N15']) + else: + opt.append('-f') + if v: + if v<='4.6': + opt.append('-B108') + else: + # Though -N15 is undocumented, it works with + # Absoft 8.0 on Linux + opt.append('-N15') + return opt + + def get_flags_f90(self): + opt = FCompiler.get_flags_f90(self) + opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX", + "-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"]) + if self.get_version(): + if self.get_version()>'4.6': + opt.extend(["-YDEALLOC=ALL"]) + return opt + + def get_flags_fix(self): + opt = FCompiler.get_flags_fix(self) + opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX", + "-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"]) + opt.extend(["-f", "fixed"]) + return opt + + def get_flags_opt(self): + opt = ['-O'] + return opt + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='absoft').get_version()) diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/compaq.py b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/compaq.py new file mode 100644 index 0000000000000000000000000000000000000000..01314c136acff7171298dc2819db4b50d7eec091 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/compaq.py @@ -0,0 +1,120 @@ + +#http://www.compaq.com/fortran/docs/ +import os +import sys + +from numpy.distutils.fcompiler import FCompiler +from distutils.errors import DistutilsPlatformError + +compilers = ['CompaqFCompiler'] +if os.name != 'posix' or sys.platform[:6] == 'cygwin' : + # Otherwise we'd get a false positive on posix systems with + # case-insensitive filesystems (like darwin), because we'll pick + # up /bin/df + compilers.append('CompaqVisualFCompiler') + +class CompaqFCompiler(FCompiler): + + compiler_type = 'compaq' + description = 'Compaq Fortran Compiler' + version_pattern = r'Compaq Fortran (?P[^\s]*).*' + + if sys.platform[:5]=='linux': + fc_exe = 'fort' + else: + fc_exe = 'f90' + + executables = { + 'version_cmd' : ['', "-version"], + 'compiler_f77' : [fc_exe, "-f77rtl", "-fixed"], + 'compiler_fix' : [fc_exe, "-fixed"], + 'compiler_f90' : [fc_exe], + 'linker_so' : [''], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + module_dir_switch = '-module ' # not tested + module_include_switch = '-I' + + def get_flags(self): + return ['-assume no2underscore', '-nomixed_str_len_arg'] + def get_flags_debug(self): + return ['-g', '-check bounds'] + def get_flags_opt(self): + return ['-O4', '-align dcommons', '-assume bigarrays', + '-assume nozsize', '-math_library fast'] + def get_flags_arch(self): + return ['-arch host', '-tune host'] + def get_flags_linker_so(self): + if sys.platform[:5]=='linux': + return ['-shared'] + return ['-shared', '-Wl,-expect_unresolved,*'] + +class CompaqVisualFCompiler(FCompiler): + + compiler_type = 'compaqv' + description = 'DIGITAL or Compaq Visual Fortran Compiler' + version_pattern = (r'(DIGITAL|Compaq) Visual Fortran Optimizing Compiler' + r' Version (?P[^\s]*).*') + + compile_switch = '/compile_only' + object_switch = '/object:' + library_switch = '/OUT:' #No space after /OUT:! + + static_lib_extension = ".lib" + static_lib_format = "%s%s" + module_dir_switch = '/module:' + module_include_switch = '/I' + + ar_exe = 'lib.exe' + fc_exe = 'DF' + + if sys.platform=='win32': + from numpy.distutils.msvccompiler import MSVCCompiler + + try: + m = MSVCCompiler() + m.initialize() + ar_exe = m.lib + except DistutilsPlatformError: + pass + except AttributeError as e: + if '_MSVCCompiler__root' in str(e): + print('Ignoring "%s" (I think it is msvccompiler.py bug)' % (e)) + else: + raise + except OSError as e: + if not "vcvarsall.bat" in str(e): + print("Unexpected OSError in", __file__) + raise + except ValueError as e: + if not "'path'" in str(e): + print("Unexpected ValueError in", __file__) + raise + + executables = { + 'version_cmd' : ['', "/what"], + 'compiler_f77' : [fc_exe, "/f77rtl", "/fixed"], + 'compiler_fix' : [fc_exe, "/fixed"], + 'compiler_f90' : [fc_exe], + 'linker_so' : [''], + 'archiver' : [ar_exe, "/OUT:"], + 'ranlib' : None + } + + def get_flags(self): + return ['/nologo', '/MD', '/WX', '/iface=(cref,nomixed_str_len_arg)', + '/names:lowercase', '/assume:underscore'] + def get_flags_opt(self): + return ['/Ox', '/fast', '/optimize:5', '/unroll:0', '/math_library:fast'] + def get_flags_arch(self): + return ['/threads'] + def get_flags_debug(self): + return ['/debug'] + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='compaq').get_version()) diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/environment.py b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/environment.py new file mode 100644 index 0000000000000000000000000000000000000000..ecd4d998927961f185dd0ddb498136a4f3581d0e --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/environment.py @@ -0,0 +1,88 @@ +import os +from distutils.dist import Distribution + +__metaclass__ = type + +class EnvironmentConfig: + def __init__(self, distutils_section='ALL', **kw): + self._distutils_section = distutils_section + self._conf_keys = kw + self._conf = None + self._hook_handler = None + + def dump_variable(self, name): + conf_desc = self._conf_keys[name] + hook, envvar, confvar, convert, append = conf_desc + if not convert: + convert = lambda x : x + print('%s.%s:' % (self._distutils_section, name)) + v = self._hook_handler(name, hook) + print(' hook : %s' % (convert(v),)) + if envvar: + v = os.environ.get(envvar, None) + print(' environ: %s' % (convert(v),)) + if confvar and self._conf: + v = self._conf.get(confvar, (None, None))[1] + print(' config : %s' % (convert(v),)) + + def dump_variables(self): + for name in self._conf_keys: + self.dump_variable(name) + + def __getattr__(self, name): + try: + conf_desc = self._conf_keys[name] + except KeyError: + raise AttributeError( + f"'EnvironmentConfig' object has no attribute '{name}'" + ) from None + + return self._get_var(name, conf_desc) + + def get(self, name, default=None): + try: + conf_desc = self._conf_keys[name] + except KeyError: + return default + var = self._get_var(name, conf_desc) + if var is None: + var = default + return var + + def _get_var(self, name, conf_desc): + hook, envvar, confvar, convert, append = conf_desc + if convert is None: + convert = lambda x: x + var = self._hook_handler(name, hook) + if envvar is not None: + envvar_contents = os.environ.get(envvar) + if envvar_contents is not None: + envvar_contents = convert(envvar_contents) + if var and append: + if os.environ.get('NPY_DISTUTILS_APPEND_FLAGS', '1') == '1': + var.extend(envvar_contents) + else: + # NPY_DISTUTILS_APPEND_FLAGS was explicitly set to 0 + # to keep old (overwrite flags rather than append to + # them) behavior + var = envvar_contents + else: + var = envvar_contents + if confvar is not None and self._conf: + if confvar in self._conf: + source, confvar_contents = self._conf[confvar] + var = convert(confvar_contents) + return var + + + def clone(self, hook_handler): + ec = self.__class__(distutils_section=self._distutils_section, + **self._conf_keys) + ec._hook_handler = hook_handler + return ec + + def use_distribution(self, dist): + if isinstance(dist, Distribution): + self._conf = dist.get_option_dict(self._distutils_section) + else: + self._conf = dist diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/gnu.py b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/gnu.py new file mode 100644 index 0000000000000000000000000000000000000000..3472b5d4c0951cf4501436614a28375bea2a8cef --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/gnu.py @@ -0,0 +1,555 @@ +import re +import os +import sys +import warnings +import platform +import tempfile +import hashlib +import base64 +import subprocess +from subprocess import Popen, PIPE, STDOUT +from numpy.distutils.exec_command import filepath_from_subprocess_output +from numpy.distutils.fcompiler import FCompiler +from distutils.version import LooseVersion + +compilers = ['GnuFCompiler', 'Gnu95FCompiler'] + +TARGET_R = re.compile(r"Target: ([a-zA-Z0-9_\-]*)") + +# XXX: handle cross compilation + + +def is_win64(): + return sys.platform == "win32" and platform.architecture()[0] == "64bit" + + +class GnuFCompiler(FCompiler): + compiler_type = 'gnu' + compiler_aliases = ('g77', ) + description = 'GNU Fortran 77 compiler' + + def gnu_version_match(self, version_string): + """Handle the different versions of GNU fortran compilers""" + # Strip warning(s) that may be emitted by gfortran + while version_string.startswith('gfortran: warning'): + version_string =\ + version_string[version_string.find('\n') + 1:].strip() + + # Gfortran versions from after 2010 will output a simple string + # (usually "x.y", "x.y.z" or "x.y.z-q") for ``-dumpversion``; older + # gfortrans may still return long version strings (``-dumpversion`` was + # an alias for ``--version``) + if len(version_string) <= 20: + # Try to find a valid version string + m = re.search(r'([0-9.]+)', version_string) + if m: + # g77 provides a longer version string that starts with GNU + # Fortran + if version_string.startswith('GNU Fortran'): + return ('g77', m.group(1)) + + # gfortran only outputs a version string such as #.#.#, so check + # if the match is at the start of the string + elif m.start() == 0: + return ('gfortran', m.group(1)) + else: + # Output probably from --version, try harder: + m = re.search(r'GNU Fortran\s+95.*?([0-9-.]+)', version_string) + if m: + return ('gfortran', m.group(1)) + m = re.search( + r'GNU Fortran.*?\-?([0-9-.]+\.[0-9-.]+)', version_string) + if m: + v = m.group(1) + if v.startswith('0') or v.startswith('2') or v.startswith('3'): + # the '0' is for early g77's + return ('g77', v) + else: + # at some point in the 4.x series, the ' 95' was dropped + # from the version string + return ('gfortran', v) + + # If still nothing, raise an error to make the problem easy to find. + err = 'A valid Fortran version was not found in this string:\n' + raise ValueError(err + version_string) + + def version_match(self, version_string): + v = self.gnu_version_match(version_string) + if not v or v[0] != 'g77': + return None + return v[1] + + possible_executables = ['g77', 'f77'] + executables = { + 'version_cmd' : [None, "-dumpversion"], + 'compiler_f77' : [None, "-g", "-Wall", "-fno-second-underscore"], + 'compiler_f90' : None, # Use --fcompiler=gnu95 for f90 codes + 'compiler_fix' : None, + 'linker_so' : [None, "-g", "-Wall"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"], + 'linker_exe' : [None, "-g", "-Wall"] + } + module_dir_switch = None + module_include_switch = None + + # Cygwin: f771: warning: -fPIC ignored for target (all code is + # position independent) + if os.name != 'nt' and sys.platform != 'cygwin': + pic_flags = ['-fPIC'] + + # use -mno-cygwin for g77 when Python is not Cygwin-Python + if sys.platform == 'win32': + for key in ['version_cmd', 'compiler_f77', 'linker_so', 'linker_exe']: + executables[key].append('-mno-cygwin') + + g2c = 'g2c' + suggested_f90_compiler = 'gnu95' + + def get_flags_linker_so(self): + opt = self.linker_so[1:] + if sys.platform == 'darwin': + target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', None) + # If MACOSX_DEPLOYMENT_TARGET is set, we simply trust the value + # and leave it alone. But, distutils will complain if the + # environment's value is different from the one in the Python + # Makefile used to build Python. We let distutils handle this + # error checking. + if not target: + # If MACOSX_DEPLOYMENT_TARGET is not set in the environment, + # we try to get it first from sysconfig and then + # fall back to setting it to 10.9 This is a reasonable default + # even when using the official Python dist and those derived + # from it. + import sysconfig + target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET') + if not target: + target = '10.9' + s = f'Env. variable MACOSX_DEPLOYMENT_TARGET set to {target}' + warnings.warn(s, stacklevel=2) + os.environ['MACOSX_DEPLOYMENT_TARGET'] = str(target) + opt.extend(['-undefined', 'dynamic_lookup', '-bundle']) + else: + opt.append("-shared") + if sys.platform.startswith('sunos'): + # SunOS often has dynamically loaded symbols defined in the + # static library libg2c.a The linker doesn't like this. To + # ignore the problem, use the -mimpure-text flag. It isn't + # the safest thing, but seems to work. 'man gcc' says: + # ".. Instead of using -mimpure-text, you should compile all + # source code with -fpic or -fPIC." + opt.append('-mimpure-text') + return opt + + def get_libgcc_dir(self): + try: + output = subprocess.check_output(self.compiler_f77 + + ['-print-libgcc-file-name']) + except (OSError, subprocess.CalledProcessError): + pass + else: + output = filepath_from_subprocess_output(output) + return os.path.dirname(output) + return None + + def get_libgfortran_dir(self): + if sys.platform[:5] == 'linux': + libgfortran_name = 'libgfortran.so' + elif sys.platform == 'darwin': + libgfortran_name = 'libgfortran.dylib' + else: + libgfortran_name = None + + libgfortran_dir = None + if libgfortran_name: + find_lib_arg = ['-print-file-name={0}'.format(libgfortran_name)] + try: + output = subprocess.check_output( + self.compiler_f77 + find_lib_arg) + except (OSError, subprocess.CalledProcessError): + pass + else: + output = filepath_from_subprocess_output(output) + libgfortran_dir = os.path.dirname(output) + return libgfortran_dir + + def get_library_dirs(self): + opt = [] + if sys.platform[:5] != 'linux': + d = self.get_libgcc_dir() + if d: + # if windows and not cygwin, libg2c lies in a different folder + if sys.platform == 'win32' and not d.startswith('/usr/lib'): + d = os.path.normpath(d) + path = os.path.join(d, "lib%s.a" % self.g2c) + if not os.path.exists(path): + root = os.path.join(d, *((os.pardir, ) * 4)) + d2 = os.path.abspath(os.path.join(root, 'lib')) + path = os.path.join(d2, "lib%s.a" % self.g2c) + if os.path.exists(path): + opt.append(d2) + opt.append(d) + # For Macports / Linux, libgfortran and libgcc are not co-located + lib_gfortran_dir = self.get_libgfortran_dir() + if lib_gfortran_dir: + opt.append(lib_gfortran_dir) + return opt + + def get_libraries(self): + opt = [] + d = self.get_libgcc_dir() + if d is not None: + g2c = self.g2c + '-pic' + f = self.static_lib_format % (g2c, self.static_lib_extension) + if not os.path.isfile(os.path.join(d, f)): + g2c = self.g2c + else: + g2c = self.g2c + + if g2c is not None: + opt.append(g2c) + c_compiler = self.c_compiler + if sys.platform == 'win32' and c_compiler and \ + c_compiler.compiler_type == 'msvc': + opt.append('gcc') + if sys.platform == 'darwin': + opt.append('cc_dynamic') + return opt + + def get_flags_debug(self): + return ['-g'] + + def get_flags_opt(self): + v = self.get_version() + if v and v <= '3.3.3': + # With this compiler version building Fortran BLAS/LAPACK + # with -O3 caused failures in lib.lapack heevr,syevr tests. + opt = ['-O2'] + else: + opt = ['-O3'] + opt.append('-funroll-loops') + return opt + + def _c_arch_flags(self): + """ Return detected arch flags from CFLAGS """ + import sysconfig + try: + cflags = sysconfig.get_config_vars()['CFLAGS'] + except KeyError: + return [] + arch_re = re.compile(r"-arch\s+(\w+)") + arch_flags = [] + for arch in arch_re.findall(cflags): + arch_flags += ['-arch', arch] + return arch_flags + + def get_flags_arch(self): + return [] + + def runtime_library_dir_option(self, dir): + if sys.platform == 'win32' or sys.platform == 'cygwin': + # Linux/Solaris/Unix support RPATH, Windows does not + raise NotImplementedError + + # TODO: could use -Xlinker here, if it's supported + assert "," not in dir + + if sys.platform == 'darwin': + return f'-Wl,-rpath,{dir}' + elif sys.platform.startswith(('aix', 'os400')): + # AIX RPATH is called LIBPATH + return f'-Wl,-blibpath:{dir}' + else: + return f'-Wl,-rpath={dir}' + + +class Gnu95FCompiler(GnuFCompiler): + compiler_type = 'gnu95' + compiler_aliases = ('gfortran', ) + description = 'GNU Fortran 95 compiler' + + def version_match(self, version_string): + v = self.gnu_version_match(version_string) + if not v or v[0] != 'gfortran': + return None + v = v[1] + if LooseVersion(v) >= "4": + # gcc-4 series releases do not support -mno-cygwin option + pass + else: + # use -mno-cygwin flag for gfortran when Python is not + # Cygwin-Python + if sys.platform == 'win32': + for key in [ + 'version_cmd', 'compiler_f77', 'compiler_f90', + 'compiler_fix', 'linker_so', 'linker_exe' + ]: + self.executables[key].append('-mno-cygwin') + return v + + possible_executables = ['gfortran', 'f95'] + executables = { + 'version_cmd' : ["", "-dumpversion"], + 'compiler_f77' : [None, "-Wall", "-g", "-ffixed-form", + "-fno-second-underscore"], + 'compiler_f90' : [None, "-Wall", "-g", + "-fno-second-underscore"], + 'compiler_fix' : [None, "-Wall", "-g","-ffixed-form", + "-fno-second-underscore"], + 'linker_so' : ["", "-Wall", "-g"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"], + 'linker_exe' : [None, "-Wall"] + } + + module_dir_switch = '-J' + module_include_switch = '-I' + + if sys.platform.startswith(('aix', 'os400')): + executables['linker_so'].append('-lpthread') + if platform.architecture()[0][:2] == '64': + for key in ['compiler_f77', 'compiler_f90','compiler_fix','linker_so', 'linker_exe']: + executables[key].append('-maix64') + + g2c = 'gfortran' + + def _universal_flags(self, cmd): + """Return a list of -arch flags for every supported architecture.""" + if not sys.platform == 'darwin': + return [] + arch_flags = [] + # get arches the C compiler gets. + c_archs = self._c_arch_flags() + if "i386" in c_archs: + c_archs[c_archs.index("i386")] = "i686" + # check the arches the Fortran compiler supports, and compare with + # arch flags from C compiler + for arch in ["ppc", "i686", "x86_64", "ppc64", "s390x"]: + if _can_target(cmd, arch) and arch in c_archs: + arch_flags.extend(["-arch", arch]) + return arch_flags + + def get_flags(self): + flags = GnuFCompiler.get_flags(self) + arch_flags = self._universal_flags(self.compiler_f90) + if arch_flags: + flags[:0] = arch_flags + return flags + + def get_flags_linker_so(self): + flags = GnuFCompiler.get_flags_linker_so(self) + arch_flags = self._universal_flags(self.linker_so) + if arch_flags: + flags[:0] = arch_flags + return flags + + def get_library_dirs(self): + opt = GnuFCompiler.get_library_dirs(self) + if sys.platform == 'win32': + c_compiler = self.c_compiler + if c_compiler and c_compiler.compiler_type == "msvc": + target = self.get_target() + if target: + d = os.path.normpath(self.get_libgcc_dir()) + root = os.path.join(d, *((os.pardir, ) * 4)) + path = os.path.join(root, "lib") + mingwdir = os.path.normpath(path) + if os.path.exists(os.path.join(mingwdir, "libmingwex.a")): + opt.append(mingwdir) + # For Macports / Linux, libgfortran and libgcc are not co-located + lib_gfortran_dir = self.get_libgfortran_dir() + if lib_gfortran_dir: + opt.append(lib_gfortran_dir) + return opt + + def get_libraries(self): + opt = GnuFCompiler.get_libraries(self) + if sys.platform == 'darwin': + opt.remove('cc_dynamic') + if sys.platform == 'win32': + c_compiler = self.c_compiler + if c_compiler and c_compiler.compiler_type == "msvc": + if "gcc" in opt: + i = opt.index("gcc") + opt.insert(i + 1, "mingwex") + opt.insert(i + 1, "mingw32") + c_compiler = self.c_compiler + if c_compiler and c_compiler.compiler_type == "msvc": + return [] + else: + pass + return opt + + def get_target(self): + try: + p = subprocess.Popen( + self.compiler_f77 + ['-v'], + stdin=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + stdout, stderr = p.communicate() + output = (stdout or b"") + (stderr or b"") + except (OSError, subprocess.CalledProcessError): + pass + else: + output = filepath_from_subprocess_output(output) + m = TARGET_R.search(output) + if m: + return m.group(1) + return "" + + def _hash_files(self, filenames): + h = hashlib.sha1() + for fn in filenames: + with open(fn, 'rb') as f: + while True: + block = f.read(131072) + if not block: + break + h.update(block) + text = base64.b32encode(h.digest()) + text = text.decode('ascii') + return text.rstrip('=') + + def _link_wrapper_lib(self, objects, output_dir, extra_dll_dir, + chained_dlls, is_archive): + """Create a wrapper shared library for the given objects + + Return an MSVC-compatible lib + """ + + c_compiler = self.c_compiler + if c_compiler.compiler_type != "msvc": + raise ValueError("This method only supports MSVC") + + object_hash = self._hash_files(list(objects) + list(chained_dlls)) + + if is_win64(): + tag = 'win_amd64' + else: + tag = 'win32' + + basename = 'lib' + os.path.splitext( + os.path.basename(objects[0]))[0][:8] + root_name = basename + '.' + object_hash + '.gfortran-' + tag + dll_name = root_name + '.dll' + def_name = root_name + '.def' + lib_name = root_name + '.lib' + dll_path = os.path.join(extra_dll_dir, dll_name) + def_path = os.path.join(output_dir, def_name) + lib_path = os.path.join(output_dir, lib_name) + + if os.path.isfile(lib_path): + # Nothing to do + return lib_path, dll_path + + if is_archive: + objects = (["-Wl,--whole-archive"] + list(objects) + + ["-Wl,--no-whole-archive"]) + self.link_shared_object( + objects, + dll_name, + output_dir=extra_dll_dir, + extra_postargs=list(chained_dlls) + [ + '-Wl,--allow-multiple-definition', + '-Wl,--output-def,' + def_path, + '-Wl,--export-all-symbols', + '-Wl,--enable-auto-import', + '-static', + '-mlong-double-64', + ]) + + # No PowerPC! + if is_win64(): + specifier = '/MACHINE:X64' + else: + specifier = '/MACHINE:X86' + + # MSVC specific code + lib_args = ['/def:' + def_path, '/OUT:' + lib_path, specifier] + if not c_compiler.initialized: + c_compiler.initialize() + c_compiler.spawn([c_compiler.lib] + lib_args) + + return lib_path, dll_path + + def can_ccompiler_link(self, compiler): + # MSVC cannot link objects compiled by GNU fortran + return compiler.compiler_type not in ("msvc", ) + + def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir): + """ + Convert a set of object files that are not compatible with the default + linker, to a file that is compatible. + """ + if self.c_compiler.compiler_type == "msvc": + # Compile a DLL and return the lib for the DLL as + # the object. Also keep track of previous DLLs that + # we have compiled so that we can link against them. + + # If there are .a archives, assume they are self-contained + # static libraries, and build separate DLLs for each + archives = [] + plain_objects = [] + for obj in objects: + if obj.lower().endswith('.a'): + archives.append(obj) + else: + plain_objects.append(obj) + + chained_libs = [] + chained_dlls = [] + for archive in archives[::-1]: + lib, dll = self._link_wrapper_lib( + [archive], + output_dir, + extra_dll_dir, + chained_dlls=chained_dlls, + is_archive=True) + chained_libs.insert(0, lib) + chained_dlls.insert(0, dll) + + if not plain_objects: + return chained_libs + + lib, dll = self._link_wrapper_lib( + plain_objects, + output_dir, + extra_dll_dir, + chained_dlls=chained_dlls, + is_archive=False) + return [lib] + chained_libs + else: + raise ValueError("Unsupported C compiler") + + +def _can_target(cmd, arch): + """Return true if the architecture supports the -arch flag""" + newcmd = cmd[:] + fid, filename = tempfile.mkstemp(suffix=".f") + os.close(fid) + try: + d = os.path.dirname(filename) + output = os.path.splitext(filename)[0] + ".o" + try: + newcmd.extend(["-arch", arch, "-c", filename]) + p = Popen(newcmd, stderr=STDOUT, stdout=PIPE, cwd=d) + p.communicate() + return p.returncode == 0 + finally: + if os.path.exists(output): + os.remove(output) + finally: + os.remove(filename) + + +if __name__ == '__main__': + from distutils import log + from numpy.distutils import customized_fcompiler + log.set_verbosity(2) + + print(customized_fcompiler('gnu').get_version()) + try: + print(customized_fcompiler('g95').get_version()) + except Exception as e: + print(e) diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/ibm.py b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/ibm.py new file mode 100644 index 0000000000000000000000000000000000000000..29927518c703581d7c4bf0aecd06fe2ea0904ed8 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/ibm.py @@ -0,0 +1,97 @@ +import os +import re +import sys +import subprocess + +from numpy.distutils.fcompiler import FCompiler +from numpy.distutils.exec_command import find_executable +from numpy.distutils.misc_util import make_temp_file +from distutils import log + +compilers = ['IBMFCompiler'] + +class IBMFCompiler(FCompiler): + compiler_type = 'ibm' + description = 'IBM XL Fortran Compiler' + version_pattern = r'(xlf\(1\)\s*|)IBM XL Fortran ((Advanced Edition |)Version |Enterprise Edition V|for AIX, V)(?P[^\s*]*)' + #IBM XL Fortran Enterprise Edition V10.1 for AIX \nVersion: 10.01.0000.0004 + + executables = { + 'version_cmd' : ["", "-qversion"], + 'compiler_f77' : ["xlf"], + 'compiler_fix' : ["xlf90", "-qfixed"], + 'compiler_f90' : ["xlf90"], + 'linker_so' : ["xlf95"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + def get_version(self,*args,**kwds): + version = FCompiler.get_version(self,*args,**kwds) + + if version is None and sys.platform.startswith('aix'): + # use lslpp to find out xlf version + lslpp = find_executable('lslpp') + xlf = find_executable('xlf') + if os.path.exists(xlf) and os.path.exists(lslpp): + try: + o = subprocess.check_output([lslpp, '-Lc', 'xlfcmp']) + except (OSError, subprocess.CalledProcessError): + pass + else: + m = re.search(r'xlfcmp:(?P\d+([.]\d+)+)', o) + if m: version = m.group('version') + + xlf_dir = '/etc/opt/ibmcmp/xlf' + if version is None and os.path.isdir(xlf_dir): + # linux: + # If the output of xlf does not contain version info + # (that's the case with xlf 8.1, for instance) then + # let's try another method: + l = sorted(os.listdir(xlf_dir)) + l.reverse() + l = [d for d in l if os.path.isfile(os.path.join(xlf_dir, d, 'xlf.cfg'))] + if l: + from distutils.version import LooseVersion + self.version = version = LooseVersion(l[0]) + return version + + def get_flags(self): + return ['-qextname'] + + def get_flags_debug(self): + return ['-g'] + + def get_flags_linker_so(self): + opt = [] + if sys.platform=='darwin': + opt.append('-Wl,-bundle,-flat_namespace,-undefined,suppress') + else: + opt.append('-bshared') + version = self.get_version(ok_status=[0, 40]) + if version is not None: + if sys.platform.startswith('aix'): + xlf_cfg = '/etc/xlf.cfg' + else: + xlf_cfg = '/etc/opt/ibmcmp/xlf/%s/xlf.cfg' % version + fo, new_cfg = make_temp_file(suffix='_xlf.cfg') + log.info('Creating '+new_cfg) + with open(xlf_cfg) as fi: + crt1_match = re.compile(r'\s*crt\s*=\s*(?P.*)/crt1.o').match + for line in fi: + m = crt1_match(line) + if m: + fo.write('crt = %s/bundle1.o\n' % (m.group('path'))) + else: + fo.write(line) + fo.close() + opt.append('-F'+new_cfg) + return opt + + def get_flags_opt(self): + return ['-O3'] + +if __name__ == '__main__': + from numpy.distutils import customized_fcompiler + log.set_verbosity(2) + print(customized_fcompiler(compiler='ibm').get_version()) diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/intel.py b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/intel.py new file mode 100644 index 0000000000000000000000000000000000000000..1d606590411048e9bebb2dc04d28e56be89783b3 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/intel.py @@ -0,0 +1,211 @@ +# http://developer.intel.com/software/products/compilers/flin/ +import sys + +from numpy.distutils.ccompiler import simple_version_match +from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file + +compilers = ['IntelFCompiler', 'IntelVisualFCompiler', + 'IntelItaniumFCompiler', 'IntelItaniumVisualFCompiler', + 'IntelEM64VisualFCompiler', 'IntelEM64TFCompiler'] + + +def intel_version_match(type): + # Match against the important stuff in the version string + return simple_version_match(start=r'Intel.*?Fortran.*?(?:%s).*?Version' % (type,)) + + +class BaseIntelFCompiler(FCompiler): + def update_executables(self): + f = dummy_fortran_file() + self.executables['version_cmd'] = ['', '-FI', '-V', '-c', + f + '.f', '-o', f + '.o'] + + def runtime_library_dir_option(self, dir): + # TODO: could use -Xlinker here, if it's supported + assert "," not in dir + + return '-Wl,-rpath=%s' % dir + + +class IntelFCompiler(BaseIntelFCompiler): + + compiler_type = 'intel' + compiler_aliases = ('ifort',) + description = 'Intel Fortran Compiler for 32-bit apps' + version_match = intel_version_match('32-bit|IA-32') + + possible_executables = ['ifort', 'ifc'] + + executables = { + 'version_cmd' : None, # set by update_executables + 'compiler_f77' : [None, "-72", "-w90", "-w95"], + 'compiler_f90' : [None], + 'compiler_fix' : [None, "-FI"], + 'linker_so' : ["", "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + pic_flags = ['-fPIC'] + module_dir_switch = '-module ' # Don't remove ending space! + module_include_switch = '-I' + + def get_flags_free(self): + return ['-FR'] + + def get_flags(self): + return ['-fPIC'] + + def get_flags_opt(self): # Scipy test failures with -O2 + v = self.get_version() + mpopt = 'openmp' if v and v < '15' else 'qopenmp' + return ['-fp-model', 'strict', '-O1', + '-assume', 'minus0', '-{}'.format(mpopt)] + + def get_flags_arch(self): + return [] + + def get_flags_linker_so(self): + opt = FCompiler.get_flags_linker_so(self) + v = self.get_version() + if v and v >= '8.0': + opt.append('-nofor_main') + if sys.platform == 'darwin': + # Here, it's -dynamiclib + try: + idx = opt.index('-shared') + opt.remove('-shared') + except ValueError: + idx = 0 + opt[idx:idx] = ['-dynamiclib', '-Wl,-undefined,dynamic_lookup'] + return opt + + +class IntelItaniumFCompiler(IntelFCompiler): + compiler_type = 'intele' + compiler_aliases = () + description = 'Intel Fortran Compiler for Itanium apps' + + version_match = intel_version_match('Itanium|IA-64') + + possible_executables = ['ifort', 'efort', 'efc'] + + executables = { + 'version_cmd' : None, + 'compiler_f77' : [None, "-FI", "-w90", "-w95"], + 'compiler_fix' : [None, "-FI"], + 'compiler_f90' : [None], + 'linker_so' : ['', "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + +class IntelEM64TFCompiler(IntelFCompiler): + compiler_type = 'intelem' + compiler_aliases = () + description = 'Intel Fortran Compiler for 64-bit apps' + + version_match = intel_version_match('EM64T-based|Intel\\(R\\) 64|64|IA-64|64-bit') + + possible_executables = ['ifort', 'efort', 'efc'] + + executables = { + 'version_cmd' : None, + 'compiler_f77' : [None, "-FI"], + 'compiler_fix' : [None, "-FI"], + 'compiler_f90' : [None], + 'linker_so' : ['', "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + +# Is there no difference in the version string between the above compilers +# and the Visual compilers? + + +class IntelVisualFCompiler(BaseIntelFCompiler): + compiler_type = 'intelv' + description = 'Intel Visual Fortran Compiler for 32-bit apps' + version_match = intel_version_match('32-bit|IA-32') + + def update_executables(self): + f = dummy_fortran_file() + self.executables['version_cmd'] = ['', '/FI', '/c', + f + '.f', '/o', f + '.o'] + + ar_exe = 'lib.exe' + possible_executables = ['ifort', 'ifl'] + + executables = { + 'version_cmd' : None, + 'compiler_f77' : [None], + 'compiler_fix' : [None], + 'compiler_f90' : [None], + 'linker_so' : [None], + 'archiver' : [ar_exe, "/verbose", "/OUT:"], + 'ranlib' : None + } + + compile_switch = '/c ' + object_switch = '/Fo' # No space after /Fo! + library_switch = '/OUT:' # No space after /OUT:! + module_dir_switch = '/module:' # No space after /module: + module_include_switch = '/I' + + def get_flags(self): + opt = ['/nologo', '/MD', '/nbs', '/names:lowercase', + '/assume:underscore', '/fpp'] + return opt + + def get_flags_free(self): + return [] + + def get_flags_debug(self): + return ['/4Yb', '/d2'] + + def get_flags_opt(self): + return ['/O1', '/assume:minus0'] # Scipy test failures with /O2 + + def get_flags_arch(self): + return ["/arch:IA32", "/QaxSSE3"] + + def runtime_library_dir_option(self, dir): + raise NotImplementedError + + +class IntelItaniumVisualFCompiler(IntelVisualFCompiler): + compiler_type = 'intelev' + description = 'Intel Visual Fortran Compiler for Itanium apps' + + version_match = intel_version_match('Itanium') + + possible_executables = ['efl'] # XXX this is a wild guess + ar_exe = IntelVisualFCompiler.ar_exe + + executables = { + 'version_cmd' : None, + 'compiler_f77' : [None, "-FI", "-w90", "-w95"], + 'compiler_fix' : [None, "-FI", "-4L72", "-w"], + 'compiler_f90' : [None], + 'linker_so' : ['', "-shared"], + 'archiver' : [ar_exe, "/verbose", "/OUT:"], + 'ranlib' : None + } + + +class IntelEM64VisualFCompiler(IntelVisualFCompiler): + compiler_type = 'intelvem' + description = 'Intel Visual Fortran Compiler for 64-bit apps' + + version_match = simple_version_match(start=r'Intel\(R\).*?64,') + + def get_flags_arch(self): + return [] + + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='intel').get_version()) diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/mips.py b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/mips.py new file mode 100644 index 0000000000000000000000000000000000000000..a0973804571b1404400e0749533a001d0833f905 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/mips.py @@ -0,0 +1,54 @@ +from numpy.distutils.cpuinfo import cpu +from numpy.distutils.fcompiler import FCompiler + +compilers = ['MIPSFCompiler'] + +class MIPSFCompiler(FCompiler): + + compiler_type = 'mips' + description = 'MIPSpro Fortran Compiler' + version_pattern = r'MIPSpro Compilers: Version (?P[^\s*,]*)' + + executables = { + 'version_cmd' : ["", "-version"], + 'compiler_f77' : ["f77", "-f77"], + 'compiler_fix' : ["f90", "-fixedform"], + 'compiler_f90' : ["f90"], + 'linker_so' : ["f90", "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : None + } + module_dir_switch = None #XXX: fix me + module_include_switch = None #XXX: fix me + pic_flags = ['-KPIC'] + + def get_flags(self): + return self.pic_flags + ['-n32'] + def get_flags_opt(self): + return ['-O3'] + def get_flags_arch(self): + opt = [] + for a in '19 20 21 22_4k 22_5k 24 25 26 27 28 30 32_5k 32_10k'.split(): + if getattr(cpu, 'is_IP%s'%a)(): + opt.append('-TARG:platform=IP%s' % a) + break + return opt + def get_flags_arch_f77(self): + r = None + if cpu.is_r10000(): r = 10000 + elif cpu.is_r12000(): r = 12000 + elif cpu.is_r8000(): r = 8000 + elif cpu.is_r5000(): r = 5000 + elif cpu.is_r4000(): r = 4000 + if r is not None: + return ['r%s' % (r)] + return [] + def get_flags_arch_f90(self): + r = self.get_flags_arch_f77() + if r: + r[0] = '-' + r[0] + return r + +if __name__ == '__main__': + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='mips').get_version()) diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/nag.py b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/nag.py new file mode 100644 index 0000000000000000000000000000000000000000..939201f44e024de7b9f3d3858284a1dfce1d1a11 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/nag.py @@ -0,0 +1,87 @@ +import sys +import re +from numpy.distutils.fcompiler import FCompiler + +compilers = ['NAGFCompiler', 'NAGFORCompiler'] + +class BaseNAGFCompiler(FCompiler): + version_pattern = r'NAG.* Release (?P[^(\s]*)' + + def version_match(self, version_string): + m = re.search(self.version_pattern, version_string) + if m: + return m.group('version') + else: + return None + + def get_flags_linker_so(self): + return ["-Wl,-shared"] + def get_flags_opt(self): + return ['-O4'] + def get_flags_arch(self): + return [] + +class NAGFCompiler(BaseNAGFCompiler): + + compiler_type = 'nag' + description = 'NAGWare Fortran 95 Compiler' + + executables = { + 'version_cmd' : ["", "-V"], + 'compiler_f77' : ["f95", "-fixed"], + 'compiler_fix' : ["f95", "-fixed"], + 'compiler_f90' : ["f95"], + 'linker_so' : [""], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + def get_flags_linker_so(self): + if sys.platform == 'darwin': + return ['-unsharedf95', '-Wl,-bundle,-flat_namespace,-undefined,suppress'] + return BaseNAGFCompiler.get_flags_linker_so(self) + def get_flags_arch(self): + version = self.get_version() + if version and version < '5.1': + return ['-target=native'] + else: + return BaseNAGFCompiler.get_flags_arch(self) + def get_flags_debug(self): + return ['-g', '-gline', '-g90', '-nan', '-C'] + +class NAGFORCompiler(BaseNAGFCompiler): + + compiler_type = 'nagfor' + description = 'NAG Fortran Compiler' + + executables = { + 'version_cmd' : ["nagfor", "-V"], + 'compiler_f77' : ["nagfor", "-fixed"], + 'compiler_fix' : ["nagfor", "-fixed"], + 'compiler_f90' : ["nagfor"], + 'linker_so' : ["nagfor"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + def get_flags_linker_so(self): + if sys.platform == 'darwin': + return ['-unsharedrts', + '-Wl,-bundle,-flat_namespace,-undefined,suppress'] + return BaseNAGFCompiler.get_flags_linker_so(self) + def get_flags_debug(self): + version = self.get_version() + if version and version > '6.1': + return ['-g', '-u', '-nan', '-C=all', '-thread_safe', + '-kind=unique', '-Warn=allocation', '-Warn=subnormal'] + else: + return ['-g', '-nan', '-C=all', '-u', '-thread_safe'] + + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + compiler = customized_fcompiler(compiler='nagfor') + print(compiler.get_version()) + print(compiler.get_flags_debug()) diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/nv.py b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/nv.py new file mode 100644 index 0000000000000000000000000000000000000000..212f34806fc491f5c9ef3ff5148331bea92041c4 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/nv.py @@ -0,0 +1,53 @@ +from numpy.distutils.fcompiler import FCompiler + +compilers = ['NVHPCFCompiler'] + +class NVHPCFCompiler(FCompiler): + """ NVIDIA High Performance Computing (HPC) SDK Fortran Compiler + + https://developer.nvidia.com/hpc-sdk + + Since august 2020 the NVIDIA HPC SDK includes the compilers formerly known as The Portland Group compilers, + https://www.pgroup.com/index.htm. + See also `numpy.distutils.fcompiler.pg`. + """ + + compiler_type = 'nv' + description = 'NVIDIA HPC SDK' + version_pattern = r'\s*(nvfortran|(pg(f77|f90|fortran)) \(aka nvfortran\)) (?P[\d.-]+).*' + + executables = { + 'version_cmd': ["", "-V"], + 'compiler_f77': ["nvfortran"], + 'compiler_fix': ["nvfortran", "-Mfixed"], + 'compiler_f90': ["nvfortran"], + 'linker_so': [""], + 'archiver': ["ar", "-cr"], + 'ranlib': ["ranlib"] + } + pic_flags = ['-fpic'] + + module_dir_switch = '-module ' + module_include_switch = '-I' + + def get_flags(self): + opt = ['-Minform=inform', '-Mnosecond_underscore'] + return self.pic_flags + opt + + def get_flags_opt(self): + return ['-fast'] + + def get_flags_debug(self): + return ['-g'] + + def get_flags_linker_so(self): + return ["-shared", '-fpic'] + + def runtime_library_dir_option(self, dir): + return '-R%s' % dir + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='nv').get_version()) diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/vast.py b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/vast.py new file mode 100644 index 0000000000000000000000000000000000000000..92a1647ba43708084ce85e0b986cb9d71329b842 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/vast.py @@ -0,0 +1,52 @@ +import os + +from numpy.distutils.fcompiler.gnu import GnuFCompiler + +compilers = ['VastFCompiler'] + +class VastFCompiler(GnuFCompiler): + compiler_type = 'vast' + compiler_aliases = () + description = 'Pacific-Sierra Research Fortran 90 Compiler' + version_pattern = (r'\s*Pacific-Sierra Research vf90 ' + r'(Personal|Professional)\s+(?P[^\s]*)') + + # VAST f90 does not support -o with -c. So, object files are created + # to the current directory and then moved to build directory + object_switch = ' && function _mvfile { mv -v `basename $1` $1 ; } && _mvfile ' + + executables = { + 'version_cmd' : ["vf90", "-v"], + 'compiler_f77' : ["g77"], + 'compiler_fix' : ["f90", "-Wv,-ya"], + 'compiler_f90' : ["f90"], + 'linker_so' : [""], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + module_dir_switch = None #XXX Fix me + module_include_switch = None #XXX Fix me + + def find_executables(self): + pass + + def get_version_cmd(self): + f90 = self.compiler_f90[0] + d, b = os.path.split(f90) + vf90 = os.path.join(d, 'v'+b) + return vf90 + + def get_flags_arch(self): + vast_version = self.get_version() + gnu = GnuFCompiler() + gnu.customize(None) + self.version = gnu.get_version() + opt = GnuFCompiler.get_flags_arch(self) + self.version = vast_version + return opt + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='vast').get_version())