ZTWHHH commited on
Commit
da48e74
·
verified ·
1 Parent(s): 60c4e44

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. vllm/lib/python3.10/site-packages/decorator-5.1.1.dist-info/INSTALLER +1 -0
  2. vllm/lib/python3.10/site-packages/decorator-5.1.1.dist-info/LICENSE.txt +26 -0
  3. vllm/lib/python3.10/site-packages/decorator-5.1.1.dist-info/RECORD +10 -0
  4. vllm/lib/python3.10/site-packages/decorator-5.1.1.dist-info/REQUESTED +0 -0
  5. vllm/lib/python3.10/site-packages/decorator-5.1.1.dist-info/WHEEL +5 -0
  6. vllm/lib/python3.10/site-packages/decorator-5.1.1.dist-info/pbr.json +1 -0
  7. vllm/lib/python3.10/site-packages/decorator-5.1.1.dist-info/top_level.txt +1 -0
  8. vllm/lib/python3.10/site-packages/more_itertools/__pycache__/recipes.cpython-310.pyc +0 -0
  9. vllm/lib/python3.10/site-packages/more_itertools/more.py +0 -0
  10. vllm/lib/python3.10/site-packages/torchgen/__init__.py +10 -0
  11. vllm/lib/python3.10/site-packages/torchgen/code_template.py +99 -0
  12. vllm/lib/python3.10/site-packages/torchgen/context.py +130 -0
  13. vllm/lib/python3.10/site-packages/torchgen/executorch/__init__.py +0 -0
  14. vllm/lib/python3.10/site-packages/torchgen/executorch/__pycache__/model.cpython-310.pyc +0 -0
  15. vllm/lib/python3.10/site-packages/torchgen/executorch/__pycache__/parse.cpython-310.pyc +0 -0
  16. vllm/lib/python3.10/site-packages/torchgen/executorch/api/__init__.py +0 -0
  17. vllm/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/unboxing.cpython-310.pyc +0 -0
  18. vllm/lib/python3.10/site-packages/torchgen/executorch/api/custom_ops.py +149 -0
  19. vllm/lib/python3.10/site-packages/torchgen/executorch/api/types/__pycache__/__init__.cpython-310.pyc +0 -0
  20. vllm/lib/python3.10/site-packages/torchgen/executorch/api/types/__pycache__/signatures.cpython-310.pyc +0 -0
  21. vllm/lib/python3.10/site-packages/torchgen/executorch/api/unboxing.py +230 -0
  22. vllm/lib/python3.10/site-packages/torchgen/executorch/model.py +220 -0
  23. vllm/lib/python3.10/site-packages/torchgen/executorch/parse.py +153 -0
  24. vllm/lib/python3.10/site-packages/torchgen/gen.py +0 -0
  25. vllm/lib/python3.10/site-packages/torchgen/gen_aoti_c_shim.py +486 -0
  26. vllm/lib/python3.10/site-packages/torchgen/gen_backend_stubs.py +611 -0
  27. vllm/lib/python3.10/site-packages/torchgen/gen_executorch.py +998 -0
  28. vllm/lib/python3.10/site-packages/torchgen/gen_functionalization_type.py +882 -0
  29. vllm/lib/python3.10/site-packages/torchgen/gen_lazy_tensor.py +581 -0
  30. vllm/lib/python3.10/site-packages/torchgen/gen_schema_utils.py +97 -0
  31. vllm/lib/python3.10/site-packages/torchgen/gen_vmap_plumbing.py +271 -0
  32. vllm/lib/python3.10/site-packages/torchgen/local.py +59 -0
  33. vllm/lib/python3.10/site-packages/torchgen/model.py +0 -0
  34. vllm/lib/python3.10/site-packages/torchgen/native_function_generation.py +646 -0
  35. vllm/lib/python3.10/site-packages/torchgen/operator_versions/__init__.py +0 -0
  36. vllm/lib/python3.10/site-packages/torchgen/operator_versions/__pycache__/__init__.cpython-310.pyc +0 -0
  37. vllm/lib/python3.10/site-packages/torchgen/operator_versions/__pycache__/gen_mobile_upgraders.cpython-310.pyc +0 -0
  38. vllm/lib/python3.10/site-packages/torchgen/operator_versions/__pycache__/gen_mobile_upgraders_constant.cpython-310.pyc +0 -0
  39. vllm/lib/python3.10/site-packages/torchgen/operator_versions/gen_mobile_upgraders.py +395 -0
  40. vllm/lib/python3.10/site-packages/torchgen/operator_versions/gen_mobile_upgraders_constant.py +7 -0
  41. vllm/lib/python3.10/site-packages/torchgen/packaged/autograd/BUILD.bazel +4 -0
  42. vllm/lib/python3.10/site-packages/torchgen/packaged/autograd/__init__.py +0 -0
  43. vllm/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_annotated_fn_args.cpython-310.pyc +0 -0
  44. vllm/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_autograd_functions.cpython-310.pyc +0 -0
  45. vllm/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_python_functions.cpython-310.pyc +0 -0
  46. vllm/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_variable_factories.cpython-310.pyc +0 -0
  47. vllm/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/load_derivatives.cpython-310.pyc +0 -0
  48. vllm/lib/python3.10/site-packages/torchgen/packaged/autograd/derivatives.yaml +0 -0
  49. vllm/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_annotated_fn_args.py +132 -0
  50. vllm/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_autograd.py +147 -0
vllm/lib/python3.10/site-packages/decorator-5.1.1.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
vllm/lib/python3.10/site-packages/decorator-5.1.1.dist-info/LICENSE.txt ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) 2005-2018, Michele Simionato
2
+ All rights reserved.
3
+
4
+ Redistribution and use in source and binary forms, with or without
5
+ modification, are permitted provided that the following conditions are
6
+ met:
7
+
8
+ Redistributions of source code must retain the above copyright
9
+ notice, this list of conditions and the following disclaimer.
10
+ Redistributions in bytecode form must reproduce the above copyright
11
+ notice, this list of conditions and the following disclaimer in
12
+ the documentation and/or other materials provided with the
13
+ distribution.
14
+
15
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
18
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
19
+ HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21
+ BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22
+ OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23
+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
24
+ TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
25
+ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26
+ DAMAGE.
vllm/lib/python3.10/site-packages/decorator-5.1.1.dist-info/RECORD ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ __pycache__/decorator.cpython-310.pyc,,
2
+ decorator-5.1.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
3
+ decorator-5.1.1.dist-info/LICENSE.txt,sha256=_RFmDKvwUyCCxFcGhi-vwpSQfsf44heBgkCkmZgGeC4,1309
4
+ decorator-5.1.1.dist-info/METADATA,sha256=XAr2zbYpRxCkcPbsmg1oaiS5ea7mhTq-j-wb0XjuVho,3955
5
+ decorator-5.1.1.dist-info/RECORD,,
6
+ decorator-5.1.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
+ decorator-5.1.1.dist-info/WHEEL,sha256=ewwEueio1C2XeHTvT17n8dZUJgOvyCWCt0WVNLClP9o,92
8
+ decorator-5.1.1.dist-info/pbr.json,sha256=AL84oUUWQHwkd8OCPhLRo2NJjU5MDdmXMqRHv-posqs,47
9
+ decorator-5.1.1.dist-info/top_level.txt,sha256=Kn6eQjo83ctWxXVyBMOYt0_YpjRjBznKYVuNyuC_DSI,10
10
+ decorator.py,sha256=el5cAEgoTEpRQN65tOxGhElue-CccMv0xol-J2MwOc0,16752
vllm/lib/python3.10/site-packages/decorator-5.1.1.dist-info/REQUESTED ADDED
File without changes
vllm/lib/python3.10/site-packages/decorator-5.1.1.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.37.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
vllm/lib/python3.10/site-packages/decorator-5.1.1.dist-info/pbr.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"is_release": false, "git_version": "8608a46"}
vllm/lib/python3.10/site-packages/decorator-5.1.1.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ decorator
vllm/lib/python3.10/site-packages/more_itertools/__pycache__/recipes.cpython-310.pyc ADDED
Binary file (29.7 kB). View file
 
vllm/lib/python3.10/site-packages/more_itertools/more.py ADDED
The diff for this file is too large to render. See raw diff
 
vllm/lib/python3.10/site-packages/torchgen/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ """torchgen
2
+
3
+ This module contains codegeneration utilities for PyTorch. It is used to
4
+ build PyTorch from source, but may also be used for out-of-tree projects
5
+ that extend PyTorch.
6
+
7
+ Note well that we provide no BC guarantees for torchgen. If you're interested
8
+ in using torchgen and want the PyTorch team to be aware, please reach out
9
+ on GitHub.
10
+ """
vllm/lib/python3.10/site-packages/torchgen/code_template.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import re
4
+ from typing import Mapping, Sequence
5
+
6
+
7
+ # match $identifier or ${identifier} and replace with value in env
8
+ # If this identifier is at the beginning of whitespace on a line
9
+ # and its value is a list then it is treated as
10
+ # block substitution by indenting to that depth and putting each element
11
+ # of the list on its own line
12
+ # if the identifier is on a line starting with non-whitespace and a list
13
+ # then it is comma separated ${,foo} will insert a comma before the list
14
+ # if this list is not empty and ${foo,} will insert one after.
15
+
16
+
17
+ class CodeTemplate:
18
+ substitution_str = r"(^[^\n\S]*)?\$([^\d\W]\w*|\{,?[^\d\W]\w*\,?})"
19
+ substitution = re.compile(substitution_str, re.MULTILINE)
20
+
21
+ pattern: str
22
+ filename: str
23
+
24
+ @staticmethod
25
+ def from_file(filename: str) -> CodeTemplate:
26
+ with open(filename) as f:
27
+ return CodeTemplate(f.read(), filename)
28
+
29
+ def __init__(self, pattern: str, filename: str = "") -> None:
30
+ self.pattern = pattern
31
+ self.filename = filename
32
+
33
+ def substitute(
34
+ self, env: Mapping[str, object] | None = None, **kwargs: object
35
+ ) -> str:
36
+ if env is None:
37
+ env = {}
38
+
39
+ def lookup(v: str) -> object:
40
+ assert env is not None
41
+ return kwargs[v] if v in kwargs else env[v]
42
+
43
+ def indent_lines(indent: str, v: Sequence[object]) -> str:
44
+ return "".join(
45
+ [indent + l + "\n" for e in v for l in str(e).splitlines()]
46
+ ).rstrip()
47
+
48
+ def replace(match: re.Match[str]) -> str:
49
+ indent = match.group(1)
50
+ key = match.group(2)
51
+ comma_before = ""
52
+ comma_after = ""
53
+ if key[0] == "{":
54
+ key = key[1:-1]
55
+ if key[0] == ",":
56
+ comma_before = ", "
57
+ key = key[1:]
58
+ if key[-1] == ",":
59
+ comma_after = ", "
60
+ key = key[:-1]
61
+ v = lookup(key)
62
+ if indent is not None:
63
+ if not isinstance(v, list):
64
+ v = [v]
65
+ return indent_lines(indent, v)
66
+ elif isinstance(v, list):
67
+ middle = ", ".join([str(x) for x in v])
68
+ if len(v) == 0:
69
+ return middle
70
+ return comma_before + middle + comma_after
71
+ else:
72
+ return str(v)
73
+
74
+ return self.substitution.sub(replace, self.pattern)
75
+
76
+
77
+ if __name__ == "__main__":
78
+ c = CodeTemplate(
79
+ """\
80
+ int foo($args) {
81
+
82
+ $bar
83
+ $bar
84
+ $a+$b
85
+ }
86
+ int commatest(int a${,stuff})
87
+ int notest(int a${,empty,})
88
+ """
89
+ )
90
+ print(
91
+ c.substitute(
92
+ args=["hi", 8],
93
+ bar=["what", 7],
94
+ a=3,
95
+ b=4,
96
+ stuff=["things...", "others"],
97
+ empty=[],
98
+ )
99
+ )
vllm/lib/python3.10/site-packages/torchgen/context.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import contextlib
4
+ import functools
5
+ from typing import Any, Callable, Iterator, List, Optional, Tuple, TypeVar, Union
6
+
7
+ import torchgen.local as local
8
+ from torchgen.model import (
9
+ BackendIndex,
10
+ DispatchKey,
11
+ NativeFunction,
12
+ NativeFunctionsGroup,
13
+ NativeFunctionsViewGroup,
14
+ )
15
+ from torchgen.utils import context, S, T
16
+
17
+
18
+ # Helper functions for defining generators on things in the model
19
+
20
+ F = TypeVar(
21
+ "F",
22
+ NativeFunction,
23
+ NativeFunctionsGroup,
24
+ NativeFunctionsViewGroup,
25
+ Union[NativeFunction, NativeFunctionsGroup],
26
+ Union[NativeFunction, NativeFunctionsViewGroup],
27
+ )
28
+
29
+ F2 = TypeVar(
30
+ "F2",
31
+ NativeFunction,
32
+ NativeFunctionsGroup,
33
+ Optional[NativeFunction],
34
+ bool,
35
+ str,
36
+ )
37
+
38
+ F3 = TypeVar("F3", Tuple[NativeFunction, Any], List[NativeFunction])
39
+
40
+
41
+ @contextlib.contextmanager
42
+ def native_function_manager(
43
+ g: NativeFunctionsGroup | NativeFunctionsViewGroup | NativeFunction,
44
+ ) -> Iterator[None]:
45
+ if isinstance(g, NativeFunctionsGroup):
46
+ # By default, we associate all errors with structured native functions
47
+ # with the out variant. In some cases, it might be better to have
48
+ # a more specific place to hang things; if so, use
49
+ # native_function_manager again on the inside
50
+ f = g.out
51
+ elif isinstance(g, NativeFunctionsViewGroup):
52
+ # We associate errors with the view operator
53
+ f = g.view
54
+ else:
55
+ f = g
56
+ with context(lambda: f"in native_functions.yaml line {f.loc}:\n {f.func}"):
57
+ with local.parametrize(
58
+ use_const_ref_for_mutable_tensors=f.use_const_ref_for_mutable_tensors,
59
+ use_ilistref_for_tensor_lists=f.part_of_structured_group,
60
+ ):
61
+ yield
62
+
63
+
64
+ # Given a function that operates on NativeFunction, wrap it into a new function
65
+ # that sets some appropriate context managers for that native function.
66
+ # YOU MUST WRAP FUNCTIONS IN THIS for calls to api modules to be sound
67
+ # (you will get an error if we try to access the local variables without having
68
+ # set them).
69
+ def with_native_function(func: Callable[[F], T]) -> Callable[[F], T]:
70
+ @functools.wraps(func)
71
+ def wrapper(f: F) -> T:
72
+ with native_function_manager(f):
73
+ return func(f)
74
+
75
+ return wrapper
76
+
77
+
78
+ def with_native_function_and(func: Callable[[F, F2], T]) -> Callable[[F, F2], T]:
79
+ @functools.wraps(func)
80
+ def wrapper(f: F, f2: F2) -> T:
81
+ # The first native_function is assumed to be the one with the appropriate context.
82
+ with native_function_manager(f):
83
+ return func(f, f2)
84
+
85
+ return wrapper
86
+
87
+
88
+ def method_with_native_function(func: Callable[[S, F], T]) -> Callable[[S, F], T]:
89
+ @functools.wraps(func)
90
+ def wrapper(slf: S, f: F) -> T:
91
+ with native_function_manager(f):
92
+ return func(slf, f)
93
+
94
+ return wrapper
95
+
96
+
97
+ def method_with_nested_native_function(
98
+ func: Callable[[S, F3], T]
99
+ ) -> Callable[[S, F3], T]:
100
+ @functools.wraps(func)
101
+ def wrapper(slf: S, f: F3) -> T:
102
+ with native_function_manager(f[0]):
103
+ return func(slf, f)
104
+
105
+ return wrapper
106
+
107
+
108
+ # Convenience decorator for functions that explicitly take in a BackendIndex,
109
+ # instead of indirectly taking one in as a closure
110
+ def with_native_function_and_index(
111
+ func: Callable[[F, BackendIndex], T]
112
+ ) -> Callable[[F, BackendIndex], T]:
113
+ @functools.wraps(func)
114
+ def wrapper(f: F, backend_index: BackendIndex) -> T:
115
+ with native_function_manager(f):
116
+ return func(f, backend_index)
117
+
118
+ return wrapper
119
+
120
+
121
+ # Convenience decorator for functions that explicitly take in a Dict of BackendIndices
122
+ def with_native_function_and_indices(
123
+ func: Callable[[F, dict[DispatchKey, BackendIndex]], T]
124
+ ) -> Callable[[F, dict[DispatchKey, BackendIndex]], T]:
125
+ @functools.wraps(func)
126
+ def wrapper(f: F, backend_indices: dict[DispatchKey, BackendIndex]) -> T:
127
+ with native_function_manager(f):
128
+ return func(f, backend_indices)
129
+
130
+ return wrapper
vllm/lib/python3.10/site-packages/torchgen/executorch/__init__.py ADDED
File without changes
vllm/lib/python3.10/site-packages/torchgen/executorch/__pycache__/model.cpython-310.pyc ADDED
Binary file (7.39 kB). View file
 
vllm/lib/python3.10/site-packages/torchgen/executorch/__pycache__/parse.cpython-310.pyc ADDED
Binary file (4.45 kB). View file
 
vllm/lib/python3.10/site-packages/torchgen/executorch/api/__init__.py ADDED
File without changes
vllm/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/unboxing.cpython-310.pyc ADDED
Binary file (6.51 kB). View file
 
vllm/lib/python3.10/site-packages/torchgen/executorch/api/custom_ops.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from collections import defaultdict
4
+ from dataclasses import dataclass
5
+ from typing import Sequence, TYPE_CHECKING
6
+
7
+ from torchgen import dest
8
+
9
+
10
+ # disable import sorting to avoid circular dependency.
11
+ from torchgen.api.types import DispatcherSignature # usort: skip
12
+ from torchgen.context import method_with_native_function
13
+ from torchgen.model import BaseTy, BaseType, DispatchKey, NativeFunction, Variant
14
+ from torchgen.utils import concatMap, Target
15
+
16
+
17
+ if TYPE_CHECKING:
18
+ from torchgen.executorch.model import ETKernelIndex
19
+ from torchgen.selective_build.selector import SelectiveBuilder
20
+
21
+
22
+ # Generates RegisterKernelStub.cpp, which provides placeholder kernels for custom operators. This will be used at
23
+ # model authoring side.
24
+ @dataclass(frozen=True)
25
+ class ComputeNativeFunctionStub:
26
+ @method_with_native_function
27
+ def __call__(self, f: NativeFunction) -> str | None:
28
+ if Variant.function not in f.variants:
29
+ return None
30
+
31
+ sig = DispatcherSignature.from_schema(
32
+ f.func, prefix=f"wrapper_CPU_{f.func.name.overload_name}_", symint=False
33
+ )
34
+ assert sig is not None
35
+ if len(f.func.returns) == 0:
36
+ ret_name = ""
37
+ elif len(f.func.returns) == 1:
38
+ if f.func.arguments.out:
39
+ ret_name = f.func.arguments.out[0].name
40
+ else:
41
+ ret_name = next(
42
+ (
43
+ a.name
44
+ for a in f.func.arguments.flat_non_out
45
+ if a.type == f.func.returns[0].type
46
+ ),
47
+ "",
48
+ )
49
+ if not ret_name:
50
+ # if return type is tensor
51
+ if f.func.returns[0].type == BaseType(BaseTy.Tensor):
52
+ # Returns an empty tensor
53
+ ret_name = "at::Tensor()"
54
+ else:
55
+ raise Exception( # noqa: TRY002
56
+ f"Can't handle this return type {f.func}"
57
+ ) # noqa: TRY002
58
+ elif len(f.func.arguments.out) == len(f.func.returns):
59
+ # Returns a tuple of out arguments
60
+ tensor_type = "at::Tensor &"
61
+ comma = ", "
62
+ ret_name = f"""::std::tuple<{comma.join([tensor_type] * len(f.func.returns))}>(
63
+ {comma.join([r.name for r in f.func.arguments.out])}
64
+ )"""
65
+ else:
66
+ assert all(
67
+ a.type == BaseType(BaseTy.Tensor) for a in f.func.returns
68
+ ), f"Only support tensor returns but got {f.func.returns}"
69
+ # Returns a tuple of empty tensors
70
+ tensor_type = "at::Tensor"
71
+ comma = ", "
72
+ ret_name = f"""::std::tuple<{comma.join([tensor_type] * len(f.func.returns))}>(
73
+ {comma.join(["at::Tensor()" for _ in f.func.returns])}
74
+ )"""
75
+ ret_str = f"return {ret_name};" if len(f.func.returns) > 0 else ""
76
+ return f"""
77
+ {sig.defn()} {{
78
+ {ret_str}
79
+ }}
80
+ """
81
+
82
+
83
+ def gen_custom_ops_registration(
84
+ *,
85
+ native_functions: Sequence[NativeFunction],
86
+ selector: SelectiveBuilder,
87
+ kernel_index: ETKernelIndex,
88
+ rocm: bool,
89
+ ) -> tuple[str, str]:
90
+ """
91
+ Generate custom ops registration code for dest.RegisterDispatchKey.
92
+
93
+ :param native_functions: a sequence of `NativeFunction`
94
+ :param selector: for selective build.
95
+ :param kernel_index: kernels for all the ops.
96
+ :param rocm: bool for dest.RegisterDispatchKey.
97
+ :return: generated C++ code to register custom operators into PyTorch
98
+ """
99
+
100
+ # convert kernel index to BackendIndex. This is because we can't handle ETKernelIndex yet.
101
+ # TODO larryliu: evaluate if this code is still needed. If yes let it handle ETKernelIndex.
102
+
103
+ dispatch_key = DispatchKey.CPU
104
+ backend_index = kernel_index._to_backend_index()
105
+ static_init_dispatch_registrations = ""
106
+ ns_grouped_native_functions: dict[str, list[NativeFunction]] = defaultdict(list)
107
+ for native_function in native_functions:
108
+ ns_grouped_native_functions[native_function.namespace].append(native_function)
109
+
110
+ for namespace, functions in ns_grouped_native_functions.items():
111
+ if len(functions) == 0:
112
+ continue
113
+ dispatch_registrations_body = "\n".join(
114
+ list(
115
+ concatMap(
116
+ dest.RegisterDispatchKey(
117
+ backend_index,
118
+ Target.REGISTRATION,
119
+ selector,
120
+ rocm=rocm,
121
+ symint=False,
122
+ class_method_name=None,
123
+ skip_dispatcher_op_registration=False,
124
+ ),
125
+ functions,
126
+ )
127
+ )
128
+ )
129
+ static_init_dispatch_registrations += f"""
130
+ TORCH_LIBRARY_IMPL({namespace}, {dispatch_key}, m) {{
131
+ {dispatch_registrations_body}
132
+ }};"""
133
+ anonymous_definition = "\n".join(
134
+ list(
135
+ concatMap(
136
+ dest.RegisterDispatchKey(
137
+ backend_index,
138
+ Target.ANONYMOUS_DEFINITION,
139
+ selector,
140
+ rocm=rocm,
141
+ symint=False,
142
+ class_method_name=None,
143
+ skip_dispatcher_op_registration=False,
144
+ ),
145
+ native_functions,
146
+ )
147
+ )
148
+ )
149
+ return anonymous_definition, static_init_dispatch_registrations
vllm/lib/python3.10/site-packages/torchgen/executorch/api/types/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (283 Bytes). View file
 
vllm/lib/python3.10/site-packages/torchgen/executorch/api/types/__pycache__/signatures.cpython-310.pyc ADDED
Binary file (3.16 kB). View file
 
vllm/lib/python3.10/site-packages/torchgen/executorch/api/unboxing.py ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass
4
+ from typing import Callable, Sequence, TYPE_CHECKING
5
+
6
+ from torchgen.model import (
7
+ Argument,
8
+ BaseTy,
9
+ BaseType,
10
+ ListType,
11
+ NativeFunction,
12
+ OptionalType,
13
+ Type,
14
+ )
15
+
16
+
17
+ if TYPE_CHECKING:
18
+ from torchgen.api.types import Binding, CType, NamedCType
19
+
20
+
21
+ connector = "\n\t"
22
+
23
+
24
+ # Return unboxing function name for a NativeFunction
25
+ def name(f: NativeFunction) -> str:
26
+ return f.func.name.unambiguous_name()
27
+
28
+
29
+ @dataclass(frozen=True)
30
+ class Unboxing:
31
+ """
32
+ Takes a sequence of Bindings and unbox EValues to these Bindings. Return generated code that performs correct unboxing.
33
+ A sample generated code:
34
+ // aten::mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
35
+ void mul_out(EValue** stack) {
36
+ EValue& self = *stack[0];
37
+ EValue& other = *stack[1];
38
+ EValue& out = *stack[2];
39
+ const torch::executor::Tensor & self_base = self.to<torch::executor::Tensor>();
40
+ const torch::executor::Tensor & other_base = other.to<torch::executor::Tensor>();
41
+ torch::executor::Tensor & out_base = out.to<torch::executor::Tensor>();
42
+
43
+ EXECUTORCH_SCOPE_PROF("native_call_mul.out");
44
+ torch::executor::mul_outf(self_base, other_base, out_base);
45
+
46
+
47
+ }
48
+ """
49
+
50
+ # this is a callable that converts a JIT argument, into its C++ type.
51
+ # Translates (type, mutability, binds) to NamedCType. E.g., torchgen.api.cpp.argumenttype_type.
52
+ argument_type_gen: Callable[
53
+ ...,
54
+ NamedCType,
55
+ ]
56
+
57
+ # Convert all the arguments in a NativeFunction to C++ code
58
+ def convert_arguments(
59
+ self, args: Sequence[Binding]
60
+ ) -> tuple[list[Binding], list[str]]:
61
+ code_list = [f"EValue& {args[i].name} = *stack[{i}];" for i in range(len(args))]
62
+ binding_list = []
63
+ for arg in args:
64
+ # expecting only Argument
65
+ if not isinstance(arg.argument, Argument):
66
+ raise Exception( # noqa: TRY002
67
+ f"Unexpected argument type, expecting `Argument` but got {arg}"
68
+ )
69
+ argument: Argument = arg.argument
70
+ unboxed_name, _, code, decl = self.argumenttype_evalue_convert(
71
+ argument.type, argument.name, mutable=argument.is_write
72
+ )
73
+ code_list.extend(decl)
74
+ code_list.extend(code)
75
+ binding_list.append(arg.with_name(unboxed_name))
76
+ return binding_list, code_list
77
+
78
+ def argumenttype_evalue_convert(
79
+ self, t: Type, arg_name: str, *, mutable: bool = False
80
+ ) -> tuple[str, CType, list[str], list[str]]:
81
+ """
82
+ Takes in the type, name and mutability corresponding to an argument, and generates a tuple of:
83
+ (1) the C++ code necessary to unbox the argument
84
+ (2) A Binding corresponding to the newly created unboxed variable, including variable name and its CType
85
+ :param t: a `Type` of an argument
86
+ :param arg_name: argument name
87
+ :param mutable: boolean for whether this argument type is mutable
88
+ :return: unboxed result
89
+ """
90
+ ctype = self.argument_type_gen(t, mutable=mutable, binds=arg_name).type
91
+
92
+ if isinstance(t, BaseType):
93
+ out_name = f"{arg_name}_base"
94
+ code, decl = self._gen_code_base_type(
95
+ arg_name=arg_name, out_name=out_name, ctype=ctype
96
+ )
97
+ elif isinstance(t, OptionalType):
98
+ out_name = f"{arg_name}_opt_out"
99
+ code, decl = self._gen_code_optional_type(
100
+ arg_name=arg_name, out_name=out_name, t=t, ctype=ctype
101
+ )
102
+ elif isinstance(t, ListType):
103
+ out_name = f"{arg_name}_list_out"
104
+ code, decl = self._gen_code_list_type(
105
+ arg_name=arg_name, out_name=out_name, t=t, ctype=ctype
106
+ )
107
+ else:
108
+ raise Exception( # noqa: TRY002
109
+ f"Cannot handle type {t}. arg_name: {arg_name}"
110
+ ) # noqa: TRY002
111
+ return out_name, ctype, code, decl
112
+
113
+ def _gen_code_base_type(
114
+ self, arg_name: str, out_name: str, ctype: CType
115
+ ) -> tuple[list[str], list[str]]:
116
+ return [
117
+ f"{ctype.cpp_type()} {out_name} = {arg_name}.to<{ctype.cpp_type(strip_ref=True)}>();"
118
+ ], []
119
+
120
+ def _gen_code_optional_type(
121
+ self, arg_name: str, out_name: str, t: OptionalType, ctype: CType
122
+ ) -> tuple[list[str], list[str]]:
123
+ in_name = f"{arg_name}_opt_in"
124
+ res_name, base_type, res_code, decl = self.argumenttype_evalue_convert(
125
+ t.elem, in_name
126
+ )
127
+ return (
128
+ f"""
129
+ auto {out_name} = {arg_name}.toOptional<{base_type.cpp_type(strip_ref=True)}>();
130
+ """.split(
131
+ "\n"
132
+ ),
133
+ decl,
134
+ )
135
+
136
+ def _gen_code_list_type(
137
+ self, arg_name: str, out_name: str, t: ListType, ctype: CType
138
+ ) -> tuple[list[str], list[str]]:
139
+ in_name = f"{arg_name}_list_in"
140
+ elem_name = f"{arg_name}_elem"
141
+ code = []
142
+ res_name, res_ctype, res_code, decl = self.argumenttype_evalue_convert(
143
+ t.elem, elem_name
144
+ )
145
+
146
+ if isinstance(t.elem, BaseType) and t.elem.name == BaseTy.Tensor:
147
+ code.extend(
148
+ f"""
149
+ auto {out_name} = {arg_name}.toTensorList();
150
+ """.split(
151
+ "\n"
152
+ )
153
+ )
154
+ elif isinstance(t.elem, BaseType) and (
155
+ t.elem.name == BaseTy.int or t.elem.name == BaseTy.SymInt
156
+ ):
157
+ code.extend(
158
+ f"""
159
+ auto {out_name} = {arg_name}.toIntList();
160
+ """.split(
161
+ "\n"
162
+ )
163
+ )
164
+ elif isinstance(t.elem, BaseType) and t.elem.name == BaseTy.float:
165
+ code.extend(
166
+ f"""
167
+ auto {out_name} = {arg_name}.toDoubleList();
168
+ """.split(
169
+ "\n"
170
+ )
171
+ )
172
+ elif isinstance(t.elem, BaseType) and t.elem.name == BaseTy.bool:
173
+ # handle list type with size, e.g., bool[4]
174
+ code.extend(
175
+ f"""
176
+ #ifdef USE_ATEN_LIB
177
+ std::array<bool, {t.size}> {out_name};
178
+ auto {in_name} = {arg_name}.toBoolList();
179
+ size_t _i = 0;
180
+ for (auto {elem_name}: {in_name}) {{
181
+ {out_name}[_i++] = {elem_name};
182
+ }}
183
+ #else
184
+ auto {out_name} = {arg_name}.toBoolList();
185
+ #endif
186
+ """.split(
187
+ "\n"
188
+ )
189
+ )
190
+ # pytorch codegen:
191
+ # we have to use c10::List for optional element. e.g., Tensor?[] -> c10::List<::std::optional<at::Tensor>>
192
+ elif (
193
+ isinstance(t.elem, OptionalType)
194
+ and isinstance(t.elem.elem, BaseType)
195
+ and t.elem.elem.name == BaseTy.Tensor
196
+ ):
197
+ code.extend(
198
+ f"""
199
+ #ifdef USE_ATEN_LIB
200
+ auto {in_name} = {arg_name}.toListOptionalTensor();
201
+ c10::List<::std::optional<at::Tensor>> {out_name};
202
+ for (auto {elem_name}: {in_name}) {{
203
+ {out_name}.push_back({elem_name});
204
+ }}
205
+ #else
206
+ auto {out_name} = {arg_name}.toListOptionalTensor();
207
+ #endif
208
+ """.split(
209
+ "\n"
210
+ )
211
+ )
212
+ else:
213
+ # use ArrayRef as default.
214
+ vec_name = arg_name + "_vec"
215
+ # need to bring vector instantiation out of scope so that ArrayRef has valid data
216
+ decl.append(
217
+ f"std::vector<{res_ctype.cpp_type(strip_ref=True)}> {vec_name};"
218
+ )
219
+ code.extend(
220
+ f"""
221
+ for (EValue {elem_name}: {in_name}) {{
222
+ {connector.join(res_code)}
223
+ {vec_name}.push_back({res_name});
224
+ }}
225
+ {ctype.cpp_type(strip_ref=True)} {out_name}({vec_name});
226
+ """.split(
227
+ "\n"
228
+ )
229
+ )
230
+ return code, decl
vllm/lib/python3.10/site-packages/torchgen/executorch/model.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Represents all kernels used by an Executorch model.
2
+ # It maintains a Dict[OperatorName, Dict[ETKernelKey, BackendMetadata]] structure.
3
+
4
+ from __future__ import annotations
5
+
6
+ import itertools
7
+ from collections import defaultdict, namedtuple
8
+ from dataclasses import dataclass
9
+ from enum import IntEnum
10
+
11
+ from torchgen.model import (
12
+ BackendIndex,
13
+ BackendMetadata,
14
+ DispatchKey,
15
+ NativeFunction,
16
+ NativeFunctionsGroup,
17
+ OperatorName,
18
+ )
19
+ from torchgen.utils import assert_never
20
+
21
+
22
+ KERNEL_KEY_VERSION = 1
23
+
24
+
25
+ # TODO: Duplicated Subset from codegen.tool.gen_oplist, remove declaration in codegen
26
+ class ScalarType(IntEnum):
27
+ Byte = 0
28
+ Char = 1
29
+ Short = 2
30
+ Int = 3
31
+ Long = 4
32
+ Float = 6
33
+ Double = 7
34
+ Bool = 11
35
+
36
+
37
+ ETParsedYaml = namedtuple("ETParsedYaml", ["native_functions", "kernel_index"])
38
+
39
+
40
+ @dataclass(frozen=True)
41
+ class ETKernelKeyOpArgMeta:
42
+ arg_name: str
43
+ dtype: str
44
+ # The order of the dimensions if entry is a Tensor
45
+ dim_order: tuple[int, ...]
46
+
47
+ def to_native_string(self) -> str:
48
+ dtype_str = ScalarType[self.dtype].value
49
+ dim_str = str(self.dim_order)[1:-1].replace(" ", "")
50
+ return f"{dtype_str};{dim_str}"
51
+
52
+
53
+ @dataclass(frozen=True)
54
+ class ETKernelKey:
55
+ # Field undefined is default = True
56
+ arg_meta: tuple[ETKernelKeyOpArgMeta, ...] = ()
57
+
58
+ # Indicator for this kernel being used as a catch all
59
+ default: bool = False
60
+
61
+ version: int = KERNEL_KEY_VERSION
62
+
63
+ @staticmethod
64
+ def gen_from_yaml(
65
+ args: dict[str, tuple[str, str]],
66
+ type_alias_map: dict[str, list[str]], # TODO: Support unwrapped str val
67
+ dim_order_alias_map: dict[str, list[int]],
68
+ ) -> list[ETKernelKey]:
69
+ """Generate ETKernelKeys from arg kernel specs
70
+ Multiple ETKernelKeys are returned due to dtype permutations from utilizing
71
+ type_alias_map (actualizing each potential type permutation as a KernelKey)
72
+
73
+ Args:
74
+ args: Mapping from argument name to kernel specs
75
+ Kernel specs are a tuple of (dtype, dim_order).
76
+ Currently tuple entries must be aliased via the alias map arguments
77
+ type_alias_map: Mapping from type alias to potential type enums
78
+ i.e { T0 : [Double, Int] } means T0 can be either Double or Int
79
+ Used for lookup by args
80
+ dim_order_alias_map: Mapping from alias to a list of dimension orders
81
+ Used for lookup by args
82
+ """
83
+ # Cast to dim order to int
84
+ dim_order_alias_map = {
85
+ k: [int(alias) for alias in v] for k, v in dim_order_alias_map.items()
86
+ }
87
+ kernel_keys = []
88
+
89
+ # Get all used Dtype Alias
90
+ dtype_alias_used = set()
91
+ for type_alias, dim_order in args.values():
92
+ # Enforce usage of alias initially
93
+ # TODO: Support inlined arguments
94
+ assert type_alias in type_alias_map, "Undefined type alias: " + str(
95
+ type_alias
96
+ )
97
+ assert (
98
+ dim_order in dim_order_alias_map
99
+ ), "Undefined dim_order alias: " + str(dim_order)
100
+ dtype_alias_used.add(type_alias)
101
+
102
+ # Generate all permutations of dtype alias values
103
+ alias_dtypes = [
104
+ [(alias, dtype) for dtype in type_alias_map[alias]]
105
+ for alias in dtype_alias_used
106
+ ]
107
+ alias_permutations = [
108
+ dict(permutation) for permutation in list(itertools.product(*alias_dtypes))
109
+ ]
110
+
111
+ # Using each alias value permutation, generate kernel keys
112
+ op_arg_cache = {}
113
+ for permutation in alias_permutations:
114
+ arg_list = []
115
+ for arg_name, arg_spec in args.items():
116
+ dtype = permutation[arg_spec[0]]
117
+ dim_order = dim_order_alias_map[arg_spec[1]] # type: ignore[assignment]
118
+ if (
119
+ cache_key := (arg_name, dtype, tuple(dim_order))
120
+ ) not in op_arg_cache:
121
+ op_arg_cache[cache_key] = ETKernelKeyOpArgMeta(*cache_key) # type: ignore[arg-type]
122
+
123
+ arg_list.append(op_arg_cache[cache_key])
124
+ kernel_keys.append(ETKernelKey(tuple(arg_list)))
125
+
126
+ return kernel_keys
127
+
128
+ def to_native_string(self) -> str:
129
+ if self.default:
130
+ return "default"
131
+ return (
132
+ "v"
133
+ + str(KERNEL_KEY_VERSION)
134
+ + "/"
135
+ + "|".join([arg.to_native_string() for arg in self.arg_meta])
136
+ )
137
+
138
+
139
+ @dataclass(frozen=True)
140
+ class ETKernelIndex:
141
+ index: dict[OperatorName, dict[ETKernelKey, BackendMetadata]]
142
+
143
+ def has_kernels(self, g: NativeFunction | NativeFunctionsGroup) -> bool:
144
+ m = self.get_kernels(g)
145
+ return m is not None
146
+
147
+ def get_kernels(
148
+ self, g: NativeFunction | NativeFunctionsGroup
149
+ ) -> dict[ETKernelKey, BackendMetadata]:
150
+ if isinstance(g, NativeFunction):
151
+ f = g
152
+ elif isinstance(g, NativeFunctionsGroup):
153
+ f = g.functional
154
+ else:
155
+ assert_never(g)
156
+ if f.func.name not in self.index:
157
+ return {}
158
+ return self.index[f.func.name]
159
+
160
+ @staticmethod
161
+ def grow_from_backend_indices(
162
+ kernel_index: dict[OperatorName, dict[ETKernelKey, BackendMetadata]],
163
+ backend_indices: dict[DispatchKey, dict[OperatorName, BackendMetadata]],
164
+ ) -> None:
165
+ for dk in backend_indices:
166
+ index = backend_indices[dk]
167
+ for op, backend_metadata in index.items():
168
+ if op in kernel_index:
169
+ kernel_index[op][ETKernelKey(default=True)] = backend_metadata
170
+ else:
171
+ kernel_index[op] = {ETKernelKey(default=True): backend_metadata}
172
+
173
+ @staticmethod
174
+ def from_backend_indices(
175
+ backend_indices: dict[DispatchKey, dict[OperatorName, BackendMetadata]]
176
+ ) -> ETKernelIndex:
177
+ kernel_index: dict[
178
+ OperatorName, dict[ETKernelKey, BackendMetadata]
179
+ ] = defaultdict(dict)
180
+ ETKernelIndex.grow_from_backend_indices(kernel_index, backend_indices)
181
+ return ETKernelIndex(kernel_index)
182
+
183
+ def grow(
184
+ self, backend_indices: dict[DispatchKey, dict[OperatorName, BackendMetadata]]
185
+ ) -> ETKernelIndex:
186
+ ETKernelIndex.grow_from_backend_indices(self.index, backend_indices)
187
+ return self
188
+
189
+ def _to_backend_index(self) -> BackendIndex:
190
+ """
191
+ WARNING: this will be deprecated once all the codegen places know how to handle ETKernelIndex.
192
+ """
193
+ index: dict[OperatorName, BackendMetadata] = {}
194
+ for op in self.index:
195
+ kernel_dict = self.index[op]
196
+ assert (
197
+ len(kernel_dict.values()) == 1
198
+ ), f"Can't convert ETKernelIndex to BackendIndex because {op} has more than one kernels. Got {kernel_dict}"
199
+ index[op] = kernel_dict.get(
200
+ ETKernelKey(default=True),
201
+ BackendMetadata(kernel="", structured=False, cpp_namespace=""),
202
+ )
203
+ return BackendIndex(
204
+ dispatch_key=DispatchKey.CPU,
205
+ use_out_as_primary=False,
206
+ device_guard=False,
207
+ external=False,
208
+ index=index,
209
+ )
210
+
211
+ # Note duplicate ETKernelKey from index_b will clobber the metadata from index_a
212
+ @staticmethod
213
+ def merge_indices(index_a: ETKernelIndex, index_b: ETKernelIndex) -> ETKernelIndex:
214
+ combined = defaultdict(dict, index_a.index.copy())
215
+
216
+ for op, entry in index_b.index.items():
217
+ for key, metadata in entry.items():
218
+ combined[op][key] = metadata
219
+
220
+ return ETKernelIndex(combined)
vllm/lib/python3.10/site-packages/torchgen/executorch/parse.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from collections import defaultdict, namedtuple
4
+ from typing import Any
5
+
6
+ import yaml
7
+
8
+ from torchgen.executorch.model import ETKernelIndex, ETKernelKey
9
+ from torchgen.gen import LineLoader, parse_native_yaml
10
+ from torchgen.model import (
11
+ BackendMetadata,
12
+ DispatchKey,
13
+ FunctionSchema,
14
+ NativeFunction,
15
+ OperatorName,
16
+ )
17
+ from torchgen.utils import NamespaceHelper
18
+
19
+
20
+ # Parse native_functions.yaml into a sequence of NativeFunctions and ET Backend Indices.
21
+ ETParsedYaml = namedtuple("ETParsedYaml", ["native_functions", "et_kernel_indices"])
22
+
23
+ # Fields in native_functions.yaml used to determine which kernels should be used
24
+ ET_FIELDS = ["kernels", "type_alias", "dim_order_alias"]
25
+
26
+
27
+ def parse_from_yaml(ei: dict[str, object]) -> dict[ETKernelKey, BackendMetadata]:
28
+ """Given a loaded yaml representing kernel assignment information, extract the
29
+ mapping from `kernel keys` to `BackendMetadata` (the latter representing the kernel instance)
30
+
31
+ Args:
32
+ ei: Dict keys {kernels, type_alias, dim_order_alias}
33
+ See ETKernelKey for description of arguments
34
+ """
35
+ e = ei.copy()
36
+ if (kernels := e.pop("kernels", None)) is None:
37
+ return {}
38
+
39
+ type_alias: dict[str, list[str]] = e.pop("type_alias", {}) # type: ignore[assignment]
40
+ dim_order_alias: dict[str, list[str]] = e.pop("dim_order_alias", {}) # type: ignore[assignment]
41
+ dim_order_alias.pop("__line__", None)
42
+
43
+ kernel_mapping: dict[ETKernelKey, BackendMetadata] = {}
44
+
45
+ for entry in kernels: # type: ignore[attr-defined]
46
+ arg_meta = entry.get("arg_meta")
47
+ if arg_meta is not None:
48
+ arg_meta.pop("__line__")
49
+
50
+ kernel_name = entry.get("kernel_name")
51
+ namespace_helper = NamespaceHelper.from_namespaced_entity(
52
+ kernel_name, max_level=3
53
+ )
54
+ kernel_namespace = namespace_helper.get_cpp_namespace(default="at")
55
+ backend_metadata = BackendMetadata(
56
+ kernel=namespace_helper.entity_name,
57
+ structured=False,
58
+ cpp_namespace=(kernel_namespace + "::native"),
59
+ )
60
+
61
+ kernel_keys = (
62
+ [ETKernelKey((), default=True)]
63
+ if arg_meta is None
64
+ else ETKernelKey.gen_from_yaml(arg_meta, type_alias, dim_order_alias) # type: ignore[arg-type]
65
+ )
66
+
67
+ for kernel_key in kernel_keys:
68
+ assert kernel_key not in kernel_mapping, (
69
+ "Duplicate kernel key: " + str(kernel_key) + " " + str(e)
70
+ )
71
+ kernel_mapping[kernel_key] = backend_metadata
72
+
73
+ return kernel_mapping
74
+
75
+
76
+ def parse_et_yaml_struct(es: object) -> ETKernelIndex:
77
+ """Given a loaded yaml representing a list of operators, for each op extract the mapping
78
+ of `kernel keys` to `BackendMetadata` (the latter representing the kernel instance
79
+ that should be used by the kernel key).
80
+ """
81
+ indices: dict[OperatorName, dict[ETKernelKey, BackendMetadata]] = {}
82
+ for ei in es: # type: ignore[attr-defined]
83
+ e = ei.copy()
84
+
85
+ funcs = e.pop("func")
86
+ assert isinstance(funcs, str), f"not a str: {funcs}"
87
+ namespace_helper = NamespaceHelper.from_namespaced_entity(
88
+ namespaced_entity=funcs, max_level=1
89
+ )
90
+ opname = FunctionSchema.parse(namespace_helper.entity_name).name
91
+
92
+ assert opname not in indices, f"Duplicate func found in yaml: {opname} already"
93
+
94
+ if len(index := parse_from_yaml(e)) != 0:
95
+ indices[opname] = index
96
+
97
+ return ETKernelIndex(indices)
98
+
99
+
100
+ def extract_kernel_fields(es: object) -> dict[OperatorName, dict[str, Any]]:
101
+ """Given a loaded yaml representing a list of operators, extract the
102
+ kernel key related fields indexed by the operator name.
103
+ """
104
+ fields: dict[OperatorName, dict[str, Any]] = defaultdict(dict)
105
+ for ei in es: # type: ignore[attr-defined]
106
+ funcs = ei.get("func")
107
+ assert isinstance(funcs, str), f"not a str: {funcs}"
108
+ namespace_helper = NamespaceHelper.from_namespaced_entity(
109
+ namespaced_entity=funcs, max_level=1
110
+ )
111
+ opname = FunctionSchema.parse(namespace_helper.entity_name).name
112
+
113
+ for field in ET_FIELDS:
114
+ if (value := ei.get(field)) is not None:
115
+ fields[opname][field] = value
116
+
117
+ return fields
118
+
119
+
120
+ def parse_et_yaml(
121
+ path: str,
122
+ tags_yaml_path: str,
123
+ ignore_keys: set[DispatchKey] | None = None,
124
+ skip_native_fns_gen: bool = False,
125
+ ) -> tuple[list[NativeFunction], dict[OperatorName, dict[str, Any]]]:
126
+ """Parse native_functions.yaml into NativeFunctions and an Operator Indexed Dict
127
+ of fields to persist from native_functions.yaml to functions.yaml
128
+ """
129
+ with open(path) as f:
130
+ es = yaml.load(f, Loader=LineLoader)
131
+
132
+ et_kernel = extract_kernel_fields(es)
133
+
134
+ # Remove ET specific fields from entries for BC compatibility
135
+ strip_et_fields(es)
136
+
137
+ native_yaml = parse_native_yaml(
138
+ path,
139
+ tags_yaml_path,
140
+ ignore_keys,
141
+ skip_native_fns_gen=skip_native_fns_gen,
142
+ loaded_yaml=es,
143
+ )
144
+ return native_yaml.native_functions, et_kernel
145
+
146
+
147
+ def strip_et_fields(es: object) -> None:
148
+ """Given a loaded yaml representing a list of operators,
149
+ remove ET specific fields from every entries for BC compatibility
150
+ """
151
+ for entry in es: # type: ignore[attr-defined]
152
+ for field in ET_FIELDS:
153
+ entry.pop(field, None)
vllm/lib/python3.10/site-packages/torchgen/gen.py ADDED
The diff for this file is too large to render. See raw diff
 
vllm/lib/python3.10/site-packages/torchgen/gen_aoti_c_shim.py ADDED
@@ -0,0 +1,486 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import textwrap
4
+ from dataclasses import dataclass
5
+ from typing import Sequence
6
+
7
+ from torchgen.api.types import DispatcherSignature
8
+ from torchgen.api.types.signatures import CppSignature, CppSignatureGroup
9
+ from torchgen.context import method_with_native_function
10
+ from torchgen.model import (
11
+ Argument,
12
+ BackendIndex,
13
+ BaseTy,
14
+ BaseType,
15
+ DispatchKey,
16
+ FunctionSchema,
17
+ ListType,
18
+ NativeFunction,
19
+ NativeFunctionsGroup,
20
+ OperatorName,
21
+ OptionalType,
22
+ Type,
23
+ )
24
+ from torchgen.utils import mapMaybe
25
+
26
+
27
+ base_type_to_c_type = {
28
+ BaseTy.Tensor: "AtenTensorHandle",
29
+ BaseTy.bool: "int32_t", # Use int to pass bool
30
+ BaseTy.int: "int64_t",
31
+ BaseTy.SymInt: "int64_t", # Inductor-generated code won't see a SymInt
32
+ BaseTy.Scalar: "double", # Use double to pass both integer and floating point
33
+ BaseTy.float: "double", # TODO: how about other floating point types?
34
+ BaseTy.str: "const char*",
35
+ BaseTy.DeviceIndex: "int32_t",
36
+ BaseTy.Layout: "int32_t", # Represent enum as int
37
+ BaseTy.MemoryFormat: "int32_t", # Represent enum as int
38
+ BaseTy.ScalarType: "int32_t", # Represent enum as int
39
+ BaseTy.Generator: "AtenGeneratorHandle",
40
+ }
41
+
42
+ base_type_to_aten_type = {
43
+ BaseTy.Tensor: "at::Tensor",
44
+ BaseTy.bool: "bool",
45
+ BaseTy.int: "int64_t",
46
+ BaseTy.SymInt: "c10::SymInt",
47
+ BaseTy.Scalar: "c10::Scalar",
48
+ BaseTy.float: "double",
49
+ BaseTy.str: "c10::string_view",
50
+ BaseTy.DeviceIndex: "c10::DeviceIndex",
51
+ BaseTy.Layout: "c10::Layout",
52
+ BaseTy.MemoryFormat: "c10::MemoryFormat",
53
+ BaseTy.ScalarType: "c10::ScalarType",
54
+ BaseTy.Generator: "at::Generator",
55
+ }
56
+
57
+ base_type_to_callsite_expr = {
58
+ BaseTy.Tensor: "*tensor_handle_to_tensor_pointer",
59
+ BaseTy.bool: "",
60
+ BaseTy.int: "",
61
+ BaseTy.SymInt: "",
62
+ BaseTy.Scalar: "",
63
+ BaseTy.float: "",
64
+ BaseTy.str: "",
65
+ BaseTy.DeviceIndex: "static_cast<c10::DeviceIndex>",
66
+ BaseTy.Layout: "static_cast<c10::Layout>",
67
+ BaseTy.MemoryFormat: "static_cast<c10::MemoryFormat>",
68
+ BaseTy.ScalarType: "static_cast<c10::ScalarType>",
69
+ BaseTy.Generator: "*generator_handle_to_generator_pointer",
70
+ }
71
+
72
+
73
+ # convert args to C types, names in declarations, and expressions in function bodies
74
+ def convert_arg_type_and_name(typ: Type, name: str) -> tuple[list[str], list[str], list[str], list[str]]: # type: ignore[return]
75
+ if isinstance(typ, BaseType):
76
+ if typ.name in base_type_to_c_type:
77
+ return (
78
+ [base_type_to_c_type[typ.name]],
79
+ [name],
80
+ [base_type_to_aten_type[typ.name]],
81
+ [
82
+ f"{base_type_to_callsite_expr[typ.name]}({name})"
83
+ if base_type_to_callsite_expr[typ.name]
84
+ else name
85
+ ],
86
+ )
87
+ elif typ.name == BaseTy.Device:
88
+ return (
89
+ ["int32_t", "int32_t"],
90
+ [name, name + "_index_"],
91
+ ["c10::Device"],
92
+ [
93
+ f"c10::Device(static_cast<c10::DeviceType>({name}), static_cast<c10::DeviceIndex>({name}_index_))"
94
+ ],
95
+ )
96
+ else:
97
+ # TODO: BaseTy.Dimname, etc.
98
+ raise NotImplementedError(f"TODO: add support for arg type {repr(typ)}")
99
+ elif isinstance(typ, OptionalType):
100
+ c_types, names, aten_types, callsite_exprs = convert_arg_type_and_name(
101
+ typ.elem, name
102
+ )
103
+ j = 0 # index for names
104
+ new_aten_types = []
105
+ new_callsite_exprs = []
106
+ for aten_type in aten_types:
107
+ # Use pointer to denote optional type
108
+ c_types[j] = c_types[j] + "*"
109
+ if aten_type.startswith("c10::ArrayRef<"):
110
+ # ArrayRef is passed as pointer + size, but no need to add "*" to the size argument
111
+ new_aten_types.append(f"::std::optional<{aten_type}>")
112
+ base_type = aten_type[len("c10::ArrayRef<") : -1]
113
+ new_callsite_exprs.append(
114
+ f"pointer_to_optional_list<{base_type}>({names[j]}, {names[j+1]})"
115
+ )
116
+ j += 2
117
+ elif aten_type == "c10::Device":
118
+ # Device is passed as device_type + device_index
119
+ new_aten_types.append("::std::optional<c10::Device>")
120
+ new_callsite_exprs.append(
121
+ f"pointer_to_optional_device({names[j]}, {names[j+1]})"
122
+ )
123
+ j += 2
124
+ else:
125
+ new_aten_types.append(f"::std::optional<{aten_type}>")
126
+ new_callsite_exprs.append(
127
+ f"pointer_to_optional<{aten_type}>({names[j]})"
128
+ )
129
+ j += 1
130
+
131
+ return (
132
+ c_types,
133
+ names,
134
+ new_aten_types,
135
+ new_callsite_exprs,
136
+ )
137
+ elif isinstance(typ, ListType):
138
+ # Need to explictly pass the list as pointer + length
139
+ c_types, names, aten_types, _ = convert_arg_type_and_name(typ.elem, name)
140
+ assert len(c_types) == 1, "ListType with unsupported element type " + repr(typ)
141
+
142
+ # The list content should never be modified
143
+ c_types[0] = f"const {c_types[0]}*"
144
+ c_types.append("int64_t")
145
+ name = names[0]
146
+ names.append(name + "_len_")
147
+
148
+ atype = aten_types[0]
149
+ callsite_exprs = []
150
+ if atype == "bool":
151
+ # no converter from std::vector<bool> to c10::ArrayRef<bool>
152
+ # construct std::array<bool, N> instead
153
+ assert typ.size is not None
154
+ callsite_exprs.append(f"pointer_to_list<{typ.size}>({name})")
155
+ elif atype == "::std::optional<at::Tensor>":
156
+ # convert from std::vector<::std::optional<at::Tensor>> to c10::List<::std::optional<at::Tensor>>
157
+ callsite_exprs.append(
158
+ f"c10::List<{atype}>(c10::ArrayRef<{atype}>(pointer_to_list<{atype}>({name}, {name}_len_)))"
159
+ )
160
+ else:
161
+ callsite_exprs.append(f"pointer_to_list<{atype}>({name}, {name}_len_)")
162
+
163
+ aten_types = [f"c10::ArrayRef<{t}>" for t in aten_types]
164
+ return (
165
+ c_types,
166
+ names,
167
+ aten_types,
168
+ callsite_exprs,
169
+ )
170
+
171
+
172
+ def zip_type_and_name(types: list[str], names: list[str]) -> list[str]:
173
+ return [typ + " " + name for typ, name in zip(types, names)]
174
+
175
+
176
+ # Generate argument declarations and callsite expressions
177
+ def gen_arguments(flat_arguments: Sequence[Argument]) -> tuple[list[str], list[str]]:
178
+ types = []
179
+ new_names = []
180
+ callsite_exprs = []
181
+ for arg in flat_arguments:
182
+ new_types, names, _, new_callsite_exprs = convert_arg_type_and_name(
183
+ arg.type, arg.name
184
+ )
185
+ types.extend(new_types)
186
+ new_names.extend(names)
187
+ callsite_exprs.extend(new_callsite_exprs)
188
+ return zip_type_and_name(types, new_names), callsite_exprs
189
+
190
+
191
+ # Return values are passed out as pointer arguments because all the C shim functions
192
+ # are expected to return AOTITorchError.
193
+ # Generate returns as declarations and callsite expressions
194
+ def gen_returns(schema: FunctionSchema) -> tuple[list[str], list[str]]:
195
+ types = []
196
+ names = []
197
+ for idx, ret in enumerate(schema.returns):
198
+ names.append(f"ret{idx}")
199
+ if isinstance(ret.type, BaseType) and ret.type.name in base_type_to_c_type:
200
+ types.append(base_type_to_c_type[ret.type.name] + "*")
201
+ else:
202
+ raise NotImplementedError(
203
+ f"TODO: add support for return type {repr(ret.type)}"
204
+ )
205
+
206
+ def convert_return(typ: BaseType, val: str) -> str:
207
+ if typ.name == BaseTy.Tensor:
208
+ return f"new_tensor_handle(std::move({val}));"
209
+ elif typ.name == BaseTy.SymInt:
210
+ return f"{val}.expect_int()"
211
+ elif typ.name == BaseTy.Scalar:
212
+ return f"{val}.toDouble()"
213
+ else:
214
+ return val
215
+
216
+ ret_pointer_can_be_null = False
217
+ unambiguous_name = schema.name.unambiguous_name()
218
+ for name in [
219
+ "_scaled_dot_product_flash_attention",
220
+ "_scaled_dot_product_efficient_attention",
221
+ "_scaled_dot_product_cudnn_attention",
222
+ "convolution_backward",
223
+ ]:
224
+ if name in unambiguous_name:
225
+ ret_pointer_can_be_null = True
226
+ break
227
+
228
+ callsite_exprs: list[str] = []
229
+ for idx, ret in enumerate(schema.returns):
230
+ tmp = "tmp_result" if len(names) == 1 else f"std::get<{idx}>(tmp_result)"
231
+ assert isinstance(ret.type, BaseType)
232
+ rval = convert_return(ret.type, tmp)
233
+ if ret_pointer_can_be_null:
234
+ callsite_exprs.append(f"if ({names[idx]}) {{ *{names[idx]} = {rval}; }}")
235
+ else:
236
+ callsite_exprs.append(f"*{names[idx]} = {rval};")
237
+
238
+ return zip_type_and_name(types, names), callsite_exprs
239
+
240
+
241
+ # gen.py generates header first and then src, so caching the result here to avoid duplicate work
242
+ declaration_definition_cache: dict[tuple[str, str, str], tuple[str, str]] = {}
243
+
244
+
245
+ def gen_declaration_and_definition(
246
+ schema: FunctionSchema, device: str, backend_call: str
247
+ ) -> tuple[str, str]:
248
+ func_name = schema.name.unambiguous_name()
249
+
250
+ global declaration_definition_cache
251
+ if (func_name, device, backend_call) in declaration_definition_cache:
252
+ return declaration_definition_cache[(func_name, device, backend_call)]
253
+
254
+ if schema.is_out_fn():
255
+ # out_variant has out arguments in the front, and it's ok to ignore return values
256
+ # because C shim functions only return AOTITorchError
257
+ args, callsite_exprs = gen_arguments(
258
+ [*schema.arguments.out, *schema.arguments.flat_non_out]
259
+ )
260
+ ret_assignments: list[str] = []
261
+ else:
262
+ args, callsite_exprs = gen_arguments(schema.arguments.flat_all)
263
+ # ignore return values for inplace ops
264
+ ret_declarations, ret_assignments = (
265
+ ([], []) if schema.name.name.inplace else gen_returns(schema)
266
+ )
267
+ args.extend(ret_declarations)
268
+
269
+ declaration = f"AOTITorchError aoti_torch_{device}_{func_name}({', '.join(args)})"
270
+
271
+ tmp_result = "auto tmp_result = " if ret_assignments else ""
272
+ ret_assignments_str = "\n" + "\n".join(ret_assignments) if ret_assignments else ""
273
+ definition = f"""
274
+ {declaration} {{
275
+ AOTI_TORCH_CONVERT_EXCEPTION_TO_ERROR_CODE({{
276
+ {tmp_result}{backend_call}(
277
+ {textwrap.indent(', '.join(callsite_exprs), " ")}
278
+ );{textwrap.indent(ret_assignments_str, " ")}
279
+ }});
280
+ }}
281
+ """
282
+ declaration_definition_cache[(func_name, device, backend_call)] = (
283
+ declaration,
284
+ definition,
285
+ )
286
+ return declaration, definition
287
+
288
+
289
+ def gen_static_dispatch_backend_call_signature(
290
+ sig: CppSignature | DispatcherSignature,
291
+ f: NativeFunction,
292
+ ) -> CppSignature:
293
+ sig = DispatcherSignature.from_schema(f.func)
294
+ cpp_sigs = CppSignatureGroup.from_native_function(
295
+ f, method=False, fallback_binding=False
296
+ )
297
+ if sig.symint and f.func.has_symint():
298
+ cpp_sig = cpp_sigs.symint_signature
299
+ else:
300
+ cpp_sig = cpp_sigs.signature
301
+ assert cpp_sig is not None
302
+ return cpp_sig
303
+
304
+
305
+ def gen_static_dispatch_backend_call(
306
+ f: NativeFunction,
307
+ backend_index: BackendIndex,
308
+ ) -> str:
309
+ sig = DispatcherSignature.from_schema(f.func)
310
+ cpp_sig = gen_static_dispatch_backend_call_signature(sig, f)
311
+ return f"at::{backend_index.dispatch_key.lower()}::{cpp_sig.name()}"
312
+
313
+
314
+ def get_backend_index_for_aoti(
315
+ func: NativeFunction,
316
+ func_group_mapping: dict[OperatorName, NativeFunctionsGroup],
317
+ dispatch_key: DispatchKey,
318
+ backend_indices: dict[DispatchKey, BackendIndex],
319
+ ) -> BackendIndex | None:
320
+ backend_index = None
321
+ if backend_indices[dispatch_key].has_kernel(func) or (
322
+ func.structured_delegate is not None
323
+ and func.structured_delegate in func_group_mapping
324
+ and backend_indices[dispatch_key].has_kernel(
325
+ func_group_mapping[func.structured_delegate]
326
+ )
327
+ ):
328
+ backend_index = backend_indices[dispatch_key]
329
+ elif backend_indices[DispatchKey.CompositeExplicitAutograd].has_kernel(func):
330
+ # We need to create C shim wrappers for CompositeExplicitAutograd kernels
331
+ backend_index = backend_indices[DispatchKey.CompositeExplicitAutograd]
332
+ elif backend_indices[DispatchKey.CompositeExplicitAutogradNonFunctional].has_kernel(
333
+ func
334
+ ):
335
+ # We need to create C shim wrappers for CompositeExplicitAutogradNonFunctional kernels
336
+ backend_index = backend_indices[
337
+ DispatchKey.CompositeExplicitAutogradNonFunctional
338
+ ]
339
+ elif backend_indices[DispatchKey.CompositeImplicitAutograd].has_kernel(func):
340
+ backend_index = backend_indices[DispatchKey.CompositeImplicitAutograd]
341
+
342
+ return backend_index
343
+
344
+
345
+ def get_header_for_aoti(
346
+ func: NativeFunction,
347
+ func_group_mapping: dict[OperatorName, NativeFunctionsGroup],
348
+ dispatch_key: DispatchKey,
349
+ backend_indices: dict[DispatchKey, BackendIndex],
350
+ ) -> str | None:
351
+ backend_index = get_backend_index_for_aoti(
352
+ func, func_group_mapping, dispatch_key, backend_indices
353
+ )
354
+ return (
355
+ None
356
+ if backend_index is None
357
+ else f"#include <ATen/ops/{func.root_name}_{backend_index.dispatch_key.lower()}_dispatch.h>"
358
+ )
359
+
360
+
361
+ def get_fallback_op_name(func: NativeFunction) -> str:
362
+ return (
363
+ f"{func.namespace}.{func.func.name.name}.{func.func.name.overload_name}"
364
+ if func.func.name.overload_name
365
+ else f"{func.namespace}.{func.func.name.name}.default"
366
+ )
367
+
368
+
369
+ def gen_c_shim(
370
+ func: NativeFunction,
371
+ func_group_mapping: dict[OperatorName, NativeFunctionsGroup],
372
+ dispatch_key: DispatchKey,
373
+ backend_indices: dict[DispatchKey, BackendIndex],
374
+ header: bool,
375
+ ) -> str | None:
376
+ backend_index = get_backend_index_for_aoti(
377
+ func, func_group_mapping, dispatch_key, backend_indices
378
+ )
379
+ if backend_index is None:
380
+ return None
381
+
382
+ schema = func.func
383
+ device = dispatch_key.lower()
384
+ backend_call = gen_static_dispatch_backend_call(
385
+ func,
386
+ backend_index,
387
+ )
388
+
389
+ try:
390
+ if header:
391
+ declaration, _ = gen_declaration_and_definition(
392
+ schema, device, backend_call
393
+ )
394
+ return f"AOTI_TORCH_EXPORT {declaration};"
395
+ else:
396
+ _, definition = gen_declaration_and_definition(schema, device, backend_call)
397
+ return definition
398
+
399
+ except NotImplementedError:
400
+ return None
401
+
402
+
403
+ @dataclass(frozen=True)
404
+ class ShimGenerator:
405
+ func_group_mapping: dict[OperatorName, NativeFunctionsGroup]
406
+ dispatch_key: DispatchKey
407
+ backend_indices: dict[DispatchKey, BackendIndex]
408
+ header: bool # True to generate .h and False to generate .cpp
409
+
410
+ @method_with_native_function
411
+ def __call__(
412
+ self,
413
+ func: NativeFunction,
414
+ ) -> str | None:
415
+ result = gen_c_shim(
416
+ func,
417
+ self.func_group_mapping,
418
+ self.dispatch_key,
419
+ self.backend_indices,
420
+ self.header,
421
+ )
422
+ return result
423
+
424
+
425
+ def gen_aoti_c_shim(
426
+ native_functions: Sequence[NativeFunction],
427
+ func_group_mapping: dict[OperatorName, NativeFunctionsGroup],
428
+ dispatch_key: DispatchKey,
429
+ backend_indices: dict[DispatchKey, BackendIndex],
430
+ header: bool,
431
+ includes: str = "",
432
+ ) -> str:
433
+ body = "\n".join(
434
+ list(
435
+ mapMaybe(
436
+ ShimGenerator(
437
+ func_group_mapping, dispatch_key, backend_indices, header
438
+ ),
439
+ native_functions,
440
+ )
441
+ )
442
+ )
443
+ device = dispatch_key.lower()
444
+
445
+ warning = """
446
+ // WARNING: THIS FILE IS AUTOGENERATED BY torchgen. DO NOT MODIFY BY HAND.
447
+ // See https://github.com/pytorch/pytorch/blob/7e86a7c0155295539996e0cf422883571126073e/torchgen/gen.py#L2424-L2436 for details"""
448
+
449
+ if header:
450
+ return f"""
451
+ {warning}
452
+
453
+ #pragma once
454
+
455
+ #include <torch/csrc/inductor/aoti_torch/c/shim.h>
456
+
457
+ #ifdef __cplusplus
458
+ extern "C" {{
459
+ #endif
460
+
461
+ {body}
462
+
463
+ #ifdef __cplusplus
464
+ }} // extern "C"
465
+ #endif
466
+ """
467
+
468
+ else:
469
+ return f"""
470
+ {warning}
471
+
472
+ #include <torch/csrc/inductor/aoti_torch/generated/c_shim_{device}.h>
473
+ #include <torch/csrc/inductor/aoti_torch/utils.h>
474
+
475
+ #ifndef AT_PER_OPERATOR_HEADERS
476
+ #include <ATen/{str(dispatch_key)}Functions.h>
477
+ #include <ATen/CompositeExplicitAutogradFunctions.h>
478
+ #include <ATen/CompositeExplicitAutogradNonFunctionalFunctions.h>
479
+ #include <ATen/CompositeImplicitAutogradFunctions.h>
480
+ #else
481
+ {includes}
482
+ #endif
483
+
484
+ using namespace torch::aot_inductor;
485
+
486
+ {body}"""
vllm/lib/python3.10/site-packages/torchgen/gen_backend_stubs.py ADDED
@@ -0,0 +1,611 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import argparse
4
+ import os
5
+ import re
6
+ from collections import Counter, defaultdict, namedtuple
7
+ from pathlib import Path
8
+ from typing import Sequence
9
+
10
+ import yaml
11
+
12
+ import torchgen.api.dispatcher as dispatcher
13
+ import torchgen.dest as dest
14
+ from torchgen.api.types import DispatcherSignature
15
+ from torchgen.code_template import CodeTemplate
16
+ from torchgen.context import native_function_manager
17
+ from torchgen.gen import get_grouped_native_functions, parse_native_yaml
18
+ from torchgen.model import (
19
+ BackendIndex,
20
+ BackendMetadata,
21
+ DispatchKey,
22
+ NativeFunction,
23
+ NativeFunctionsGroup,
24
+ OperatorName,
25
+ )
26
+ from torchgen.selective_build.selector import SelectiveBuilder
27
+ from torchgen.utils import concatMap, context, FileManager, NamespaceHelper, Target
28
+ from torchgen.yaml_utils import YamlLoader
29
+
30
+
31
+ # Parses the external backend's yaml, and adds a new BackendIndex for the backend's dispatch key.
32
+ # Returns a Tuple of (backend_key, autograd_key, cpp_namespace, updated BackendIndex mapping)
33
+ ParsedExternalYaml = namedtuple(
34
+ "ParsedExternalYaml",
35
+ ["backend_key", "autograd_key", "class_name", "cpp_namespace", "backend_indices"],
36
+ )
37
+
38
+
39
+ def parse_backend_yaml(
40
+ backend_yaml_path: str,
41
+ grouped_native_functions: Sequence[NativeFunction | NativeFunctionsGroup],
42
+ backend_indices: dict[DispatchKey, BackendIndex],
43
+ ) -> ParsedExternalYaml:
44
+ native_functions_map: dict[OperatorName, NativeFunction] = {
45
+ f.func.name: f
46
+ for f in concatMap(
47
+ lambda f: [f] if isinstance(f, NativeFunction) else list(f.functions()),
48
+ grouped_native_functions,
49
+ )
50
+ }
51
+
52
+ with open(backend_yaml_path) as f:
53
+ yaml_values = yaml.load(f, Loader=YamlLoader)
54
+ assert isinstance(yaml_values, dict)
55
+
56
+ valid_keys = [
57
+ "backend",
58
+ "class_name",
59
+ "cpp_namespace",
60
+ "extra_headers",
61
+ "supported",
62
+ "autograd",
63
+ "full_codegen",
64
+ "non_native",
65
+ "ir_gen",
66
+ "symint",
67
+ ]
68
+
69
+ backend = yaml_values.pop("backend", None)
70
+ assert backend is not None, 'You must provide a value for "backend"'
71
+
72
+ class_name = yaml_values.pop("class_name", None)
73
+
74
+ cpp_namespace = yaml_values.pop("cpp_namespace", None)
75
+ assert cpp_namespace is not None, 'You must provide a value for "cpp_namespace"'
76
+
77
+ # Mostly just defaulting to false to stick with LazyTensor convention.
78
+ use_out_as_primary = yaml_values.pop("use_out_as_primary", False)
79
+ assert isinstance(
80
+ use_out_as_primary, bool
81
+ ), f"You must provide either True or False for use_out_as_primary. Provided: {use_out_as_primary}"
82
+
83
+ use_device_guard = yaml_values.pop("device_guard", False)
84
+ assert isinstance(
85
+ use_device_guard, bool
86
+ ), f"You must provide either True or False for device_guard. Provided: {use_device_guard}"
87
+
88
+ supported = yaml_values.pop("supported", [])
89
+ if supported is None:
90
+ supported = [] # Allow an empty list of supported ops
91
+ assert isinstance(
92
+ supported, list
93
+ ), f'expected "supported" to be a list, but got: {supported} (of type {type(supported)})'
94
+
95
+ symint = yaml_values.pop("symint", [])
96
+ if symint is None:
97
+ symint = [] # Allow an empty list of symint ops
98
+ assert isinstance(
99
+ symint, list
100
+ ), f'expected "symint" to be a list, but got: {supported} (of type {type(supported)})'
101
+ symint_set = set(symint)
102
+
103
+ supported_autograd = yaml_values.pop("autograd", [])
104
+ assert isinstance(
105
+ supported_autograd, list
106
+ ), f'expected "autograd" to be a list, but got: {supported_autograd}'
107
+
108
+ # full_codegen is ignored by parse_backend_yaml, and re-parsed in gen_lazy_tensor.py
109
+ full_codegen = yaml_values.pop("full_codegen", [])
110
+ supported.extend(full_codegen)
111
+
112
+ # non_native is ignored by parse_backend_yaml, and re-parsed in gen_lazy_tensor.py
113
+ yaml_values.pop("non_native", {})
114
+
115
+ # ir_gen is ignored by parse_backend_yaml, and re-parsed in gen_lazy_tensor.py
116
+ yaml_values.pop("ir_gen", {})
117
+
118
+ assert (
119
+ len(yaml_values.keys()) == 0
120
+ ), f'{backend_yaml_path} contains unexpected keys: {", ".join(yaml_values.keys())}. \
121
+ Only the following keys are supported: {", ".join(valid_keys)}'
122
+
123
+ def create_backend_index(
124
+ backend_ops: list[str],
125
+ symint_ops: set[str],
126
+ dispatch_key: DispatchKey,
127
+ *,
128
+ use_out_as_primary: bool,
129
+ use_device_guard: bool,
130
+ ) -> BackendIndex:
131
+ metadata: dict[OperatorName, BackendMetadata] = {}
132
+ for op in backend_ops:
133
+ op_name = OperatorName.parse(op)
134
+ assert (
135
+ op_name in native_functions_map
136
+ ), f"Found an invalid operator name: {op_name}"
137
+ # See Note [External Backends Follow Dispatcher API]
138
+ kernel_name = dispatcher.name(native_functions_map[op_name].func)
139
+ if op in symint_ops:
140
+ kernel_name += "_symint"
141
+ # TODO: allow structured external backends later.
142
+ m = BackendMetadata(
143
+ kernel=kernel_name, structured=False, cpp_namespace=cpp_namespace
144
+ )
145
+ metadata[op_name] = m
146
+ return BackendIndex(
147
+ dispatch_key=dispatch_key,
148
+ use_out_as_primary=use_out_as_primary,
149
+ external=True,
150
+ device_guard=use_device_guard,
151
+ index=metadata,
152
+ )
153
+
154
+ backend_key: DispatchKey | None = None
155
+ if len(supported) > 0:
156
+ with context(
157
+ lambda: f'The provided value for "backend" must be a valid DispatchKey, but got {backend}.'
158
+ ):
159
+ backend_key = DispatchKey.parse(backend)
160
+
161
+ backend_idx = create_backend_index(
162
+ supported,
163
+ symint_set,
164
+ backend_key,
165
+ use_out_as_primary=use_out_as_primary,
166
+ use_device_guard=use_device_guard,
167
+ )
168
+ assert backend_key not in backend_indices
169
+ backend_indices[backend_key] = backend_idx
170
+
171
+ autograd_key: DispatchKey | None = None
172
+ if len(supported_autograd) > 0:
173
+ with context(
174
+ lambda: f'The "autograd" key was specified, which indicates that you would like to override \
175
+ the behavior of autograd for some operators on your backend. However "Autograd{backend}" is not a valid DispatchKey.'
176
+ ):
177
+ autograd_key = DispatchKey.parse(f"Autograd{backend}")
178
+
179
+ autograd_idx = create_backend_index(
180
+ supported_autograd,
181
+ symint_set,
182
+ autograd_key,
183
+ use_out_as_primary=use_out_as_primary,
184
+ use_device_guard=use_device_guard,
185
+ )
186
+ assert autograd_key not in backend_indices
187
+ backend_indices[autograd_key] = autograd_idx
188
+
189
+ for g in grouped_native_functions:
190
+ if isinstance(g, NativeFunction):
191
+ forward_kernels = (
192
+ []
193
+ if backend_key is None
194
+ else [
195
+ m
196
+ for m in [backend_indices[backend_key].get_kernel(g)]
197
+ if m is not None
198
+ ]
199
+ )
200
+ backward_kernels = (
201
+ []
202
+ if autograd_key is None
203
+ else [
204
+ m
205
+ for m in [backend_indices[autograd_key].get_kernel(g)]
206
+ if m is not None
207
+ ]
208
+ )
209
+ else:
210
+ forward_kernels = (
211
+ []
212
+ if backend_key is None
213
+ else [
214
+ m
215
+ for m in [
216
+ backend_indices[backend_key].get_kernel(f)
217
+ for f in g.functions()
218
+ ]
219
+ if m is not None
220
+ ]
221
+ )
222
+ backward_kernels = (
223
+ []
224
+ if autograd_key is None
225
+ else [
226
+ m
227
+ for m in [
228
+ backend_indices[autograd_key].get_kernel(f)
229
+ for f in g.functions()
230
+ ]
231
+ if m is not None
232
+ ]
233
+ )
234
+
235
+ forward_kernels = [f for f in forward_kernels if f is not None]
236
+ backward_kernels = [f for f in backward_kernels if f is not None]
237
+ assert (
238
+ len(forward_kernels) == 0 or len(backward_kernels) == 0
239
+ ), f'Currently, all variants of an op must either be registered to a backend key, or to a backend\'s \
240
+ autograd key. They cannot be mix and matched. If this is something you need, feel free to create an issue! \
241
+ {forward_kernels[0].kernel} is listed under "supported", but {backward_kernels[0].kernel} is listed under "autograd".'
242
+
243
+ return ParsedExternalYaml(
244
+ backend_key, autograd_key, class_name, cpp_namespace, backend_indices
245
+ )
246
+
247
+
248
+ def error_on_missing_kernels(
249
+ native_functions: Sequence[NativeFunction],
250
+ backend_indices: dict[DispatchKey, BackendIndex],
251
+ backend_key: DispatchKey,
252
+ autograd_key: DispatchKey | None,
253
+ class_name: str,
254
+ kernel_defn_file_path: str,
255
+ full_codegen: list[OperatorName] | None = None,
256
+ ) -> None:
257
+ try:
258
+ with open(kernel_defn_file_path) as f:
259
+ backend_defns = f.read()
260
+ except OSError as e:
261
+ raise AssertionError(
262
+ f"Unable to read from the specified impl_path file: {kernel_defn_file_path}"
263
+ ) from e
264
+
265
+ if full_codegen is None:
266
+ full_codegen = []
267
+
268
+ indices = [backend_indices[backend_key].index] + (
269
+ [] if autograd_key is None else [backend_indices[autograd_key].index]
270
+ )
271
+ # Quick mapping from each OperatorName used by the external backend
272
+ # to its backend kernel name
273
+ expected_backend_op_names: dict[OperatorName, str] = dict(
274
+ list(
275
+ concatMap(
276
+ lambda index: [
277
+ (op_name, metadata.kernel) for op_name, metadata in index.items()
278
+ ],
279
+ indices,
280
+ )
281
+ )
282
+ )
283
+ expected_backend_native_funcs: list[NativeFunction] = [
284
+ f
285
+ for f in native_functions
286
+ if f.func.name in expected_backend_op_names.keys()
287
+ and f.func.name not in full_codegen
288
+ ]
289
+ expected_backend_kernel_name_counts: dict[str, list[NativeFunction]] = defaultdict(
290
+ list
291
+ )
292
+ for native_f in expected_backend_native_funcs:
293
+ expected_backend_kernel_name_counts[
294
+ expected_backend_op_names[native_f.func.name]
295
+ ].append(native_f)
296
+
297
+ # This just looks for lines containing "foo(", and assumes that the kernel foo has been implemented.
298
+ # It might cause false negatives (we won't catch all cases), but that's ok - if we catch a missing kernel
299
+ # here, then we get a nicer error message. If we miss it, you get a linker error.
300
+ kernel_defn_regex = rf"(.*){class_name}::\s*([\w\d]*)\("
301
+ actual_backend_kernel_name_counts = Counter(
302
+ # A bit unwieldy (this could probably be moved into regex),
303
+ # but we don't want to include kernel names that come from function calls,
304
+ # like "return torch_xla::XLANativeFunctions::empty_strided_symint(...)".
305
+ # Easy check is to ignore any lines with colons before the class name.
306
+ [
307
+ y
308
+ for (x, y) in re.findall(kernel_defn_regex, backend_defns)
309
+ if not x.endswith(":")
310
+ ]
311
+ )
312
+
313
+ missing_kernels_err_msg = ""
314
+ for expected_name, funcs in expected_backend_kernel_name_counts.items():
315
+ expected_overload_count = len(funcs)
316
+ actual_overload_count = actual_backend_kernel_name_counts[expected_name]
317
+ if expected_overload_count != actual_overload_count:
318
+
319
+ def create_decl(f: NativeFunction) -> str:
320
+ with native_function_manager(f):
321
+ return DispatcherSignature.from_schema(f.func).decl()
322
+
323
+ expected_schemas_str = "\n".join([create_decl(f) for f in funcs])
324
+ missing_kernels_err_msg += f"""
325
+ {class_name} is missing a kernel definition for {expected_name}. We found {actual_overload_count} kernel(s) with that name,
326
+ but expected {expected_overload_count} kernel(s). The expected function schemas for the missing operator are:
327
+ {expected_schemas_str}
328
+
329
+ """
330
+ assert missing_kernels_err_msg == "", missing_kernels_err_msg
331
+
332
+
333
+ def main() -> None:
334
+ parser = argparse.ArgumentParser(description="Generate backend stub files")
335
+ parser.add_argument(
336
+ "-s",
337
+ "--source-yaml",
338
+ "--source_yaml",
339
+ help="path to source yaml file containing operator external definitions",
340
+ )
341
+ parser.add_argument("-o", "--output-dir", "--output_dir", help="output directory")
342
+ parser.add_argument(
343
+ "--dry-run", "--dry_run", type=bool, default=False, help="output directory"
344
+ )
345
+ parser.add_argument(
346
+ "--impl-path",
347
+ "--impl_path",
348
+ type=str,
349
+ default=None,
350
+ help="path to the source C++ file containing kernel definitions",
351
+ )
352
+ options = parser.parse_args()
353
+
354
+ run(options.source_yaml, options.output_dir, options.dry_run, options.impl_path)
355
+
356
+
357
+ def gen_dispatchkey_nativefunc_headers(
358
+ fm: FileManager,
359
+ class_name: str,
360
+ cpp_namespace: str,
361
+ backend_indices: dict[DispatchKey, BackendIndex],
362
+ grouped_native_functions: Sequence[NativeFunction | NativeFunctionsGroup],
363
+ backend_dispatch_key: DispatchKey,
364
+ autograd_dispatch_key: DispatchKey | None,
365
+ backend_name: str = "",
366
+ ) -> None:
367
+ assert class_name is not None
368
+ generated_comment = (
369
+ "Autogenerated file by gen_backend_stubs.py. Do not edit directly!"
370
+ )
371
+
372
+ # Convert to a set first to remove duplicate kernel names.
373
+ # Backends are allowed to repeat kernel names; only generate the declaration once!
374
+ # Sort for deterministic output.
375
+ backend_declarations = sorted(
376
+ set(
377
+ concatMap(
378
+ lambda f: dest.compute_native_function_declaration(
379
+ f, backend_indices[backend_dispatch_key]
380
+ ),
381
+ grouped_native_functions,
382
+ )
383
+ )
384
+ )
385
+ autograd_declarations = sorted(
386
+ set(
387
+ concatMap(
388
+ lambda f: []
389
+ if autograd_dispatch_key is None
390
+ else dest.compute_native_function_declaration(
391
+ f, backend_indices[autograd_dispatch_key]
392
+ ),
393
+ grouped_native_functions,
394
+ )
395
+ )
396
+ )
397
+
398
+ ns_helper = NamespaceHelper(cpp_namespace)
399
+ fm.write_with_template(
400
+ f"{backend_dispatch_key}NativeFunctions.h",
401
+ "DispatchKeyNativeFunctions.h",
402
+ lambda: {
403
+ "generated_comment": generated_comment,
404
+ "namespace_prologue": ns_helper.prologue,
405
+ "class_name": class_name,
406
+ "namespace_epilogue": ns_helper.epilogue,
407
+ "dispatch_declarations": backend_declarations + autograd_declarations,
408
+ "BackendName": backend_name,
409
+ "DispatchKey": backend_dispatch_key,
410
+ },
411
+ )
412
+
413
+
414
+ def gen_dispatcher_registrations(
415
+ fm: FileManager,
416
+ output_dir: str,
417
+ class_name: str,
418
+ backend_indices: dict[DispatchKey, BackendIndex],
419
+ grouped_native_functions: Sequence[NativeFunction | NativeFunctionsGroup],
420
+ backend_dispatch_key: DispatchKey,
421
+ dispatch_key: DispatchKey,
422
+ selector: SelectiveBuilder,
423
+ # build_in_tree is true for lazy TS backend and affects include paths, not used for external backends
424
+ build_in_tree: bool = False,
425
+ per_operator_headers: bool = False,
426
+ backend_name: str = "",
427
+ eager_registration: bool = True,
428
+ ) -> None:
429
+ headers = [
430
+ f"{output_dir}/{backend_dispatch_key}NativeFunctions.h",
431
+ ]
432
+ if build_in_tree:
433
+ external_backend_headers_str = "\n".join(f"#include <{h}>" for h in headers)
434
+ else:
435
+ external_backend_headers_str = "\n".join(f'#include "{h}"' for h in headers)
436
+
437
+ assert class_name is not None
438
+ backend_index = backend_indices[dispatch_key]
439
+
440
+ dispatch_registrations_body = list(
441
+ concatMap(
442
+ dest.RegisterDispatchKey(
443
+ backend_index,
444
+ Target.REGISTRATION,
445
+ selector,
446
+ rocm=False,
447
+ symint=True,
448
+ class_method_name=f"{class_name}",
449
+ skip_dispatcher_op_registration=False,
450
+ ),
451
+ grouped_native_functions,
452
+ )
453
+ )
454
+ newline = "\n"
455
+ ns_helper = NamespaceHelper(namespace_str="at")
456
+ deferred_dispatch_registrations = ""
457
+ static_init_dispatch_registrations = ""
458
+ if eager_registration:
459
+ static_template = CodeTemplate(
460
+ """\
461
+ TORCH_LIBRARY_IMPL(aten, $dispatch_key, m) {
462
+ $dispatch_registrations_body
463
+ };"""
464
+ )
465
+ static_init_dispatch_registrations = static_template.substitute(
466
+ dispatch_key=dispatch_key,
467
+ dispatch_registrations_body=dispatch_registrations_body,
468
+ )
469
+ else:
470
+ deferred_template = CodeTemplate(
471
+ """\
472
+ TORCH_API void Register${backend_name}${dispatch_key}NativeFunctions();
473
+ TORCH_API void Register${backend_name}${dispatch_key}NativeFunctions() {
474
+ static auto m = MAKE_TORCH_LIBRARY_IMPL(aten, $dispatch_key);
475
+ $dispatch_registrations_body
476
+ }"""
477
+ )
478
+ deferred_dispatch_registrations = deferred_template.substitute(
479
+ backend_name=backend_name,
480
+ dispatch_key=dispatch_key,
481
+ dispatch_registrations_body=dispatch_registrations_body,
482
+ )
483
+
484
+ fm.write_with_template(
485
+ f"Register{dispatch_key}.cpp",
486
+ "RegisterDispatchKey.cpp",
487
+ lambda: {
488
+ "extra_cuda_headers": "",
489
+ "external_backend_headers": external_backend_headers_str,
490
+ "ops_headers": "#include <ATen/Functions.h>"
491
+ if not per_operator_headers
492
+ else "",
493
+ "DispatchKey": dispatch_key,
494
+ "dispatch_namespace": dispatch_key.lower(),
495
+ "dispatch_headers": dest.gen_registration_headers(
496
+ backend_index, per_operator_headers=per_operator_headers, rocm=False
497
+ ),
498
+ "dispatch_definitions": fm.substitute_with_template(
499
+ "RegisterDispatchDefinitions.ini",
500
+ lambda: {
501
+ "ns_prologue": ns_helper.prologue,
502
+ "ns_epilogue": ns_helper.epilogue,
503
+ "static_init_dispatch_registrations": static_init_dispatch_registrations,
504
+ "deferred_dispatch_registrations": deferred_dispatch_registrations,
505
+ "dispatch_helpers": dest.gen_registration_helpers(backend_index),
506
+ "dispatch_namespace": dispatch_key.lower(),
507
+ "dispatch_namespaced_definitions": "",
508
+ "dispatch_anonymous_definitions": list(
509
+ concatMap(
510
+ dest.RegisterDispatchKey(
511
+ backend_index,
512
+ Target.ANONYMOUS_DEFINITION,
513
+ selector,
514
+ rocm=False,
515
+ symint=True,
516
+ class_method_name=f"{class_name}",
517
+ skip_dispatcher_op_registration=False,
518
+ ),
519
+ grouped_native_functions,
520
+ )
521
+ ),
522
+ },
523
+ ).split(newline),
524
+ },
525
+ )
526
+
527
+
528
+ def run(
529
+ source_yaml: str, output_dir: str, dry_run: bool, impl_path: str | None = None
530
+ ) -> None:
531
+ # Assumes that this file lives at PYTORCH_ROOT/torchgen/gen_backend_stubs.py
532
+ pytorch_root = Path(__file__).parent.parent.absolute()
533
+ template_dir = os.path.join(pytorch_root, "aten/src/ATen/templates")
534
+
535
+ def make_file_manager(install_dir: str) -> FileManager:
536
+ return FileManager(
537
+ install_dir=install_dir, template_dir=template_dir, dry_run=dry_run
538
+ )
539
+
540
+ fm = make_file_manager(output_dir)
541
+
542
+ native_yaml_path = os.path.join(
543
+ pytorch_root, "aten/src/ATen/native/native_functions.yaml"
544
+ )
545
+ tags_yaml_path = os.path.join(pytorch_root, "aten/src/ATen/native/tags.yaml")
546
+ parsed_yaml = parse_native_yaml(native_yaml_path, tags_yaml_path)
547
+ native_functions, backend_indices = (
548
+ parsed_yaml.native_functions,
549
+ parsed_yaml.backend_indices,
550
+ )
551
+ grouped_native_functions = get_grouped_native_functions(native_functions)
552
+ parsed_backend_yaml = parse_backend_yaml(
553
+ source_yaml, grouped_native_functions, backend_indices
554
+ )
555
+ backend_key = parsed_backend_yaml.backend_key
556
+ autograd_key = parsed_backend_yaml.autograd_key
557
+ cpp_namespace = parsed_backend_yaml.cpp_namespace
558
+ class_name = parsed_backend_yaml.class_name
559
+ backend_indices = parsed_backend_yaml.backend_indices
560
+
561
+ selector = SelectiveBuilder.get_nop_selector()
562
+
563
+ if backend_key is None:
564
+ # This could be useful if a backend wants to quickly set up a noop yaml file but doesn't have any kernels ready yet.
565
+ return
566
+
567
+ if class_name is None:
568
+ # class_name is an optional argument to backend yaml file.
569
+ # if specified it allows an external backend to override
570
+ # the name of the class that all generated kernel definitions live under.
571
+ # if not specified, its value is given as native_function_class_name.
572
+ class_name = backend_indices[backend_key].native_function_class_name()
573
+ assert class_name is not None
574
+
575
+ if impl_path is not None:
576
+ error_on_missing_kernels(
577
+ native_functions,
578
+ backend_indices,
579
+ backend_key,
580
+ autograd_key,
581
+ class_name,
582
+ impl_path,
583
+ )
584
+
585
+ gen_dispatchkey_nativefunc_headers(
586
+ fm,
587
+ class_name,
588
+ cpp_namespace,
589
+ backend_indices,
590
+ grouped_native_functions,
591
+ backend_key,
592
+ autograd_key,
593
+ )
594
+
595
+ for dispatch_key in (
596
+ [backend_key] if autograd_key is None else [backend_key, autograd_key]
597
+ ):
598
+ gen_dispatcher_registrations(
599
+ fm,
600
+ output_dir,
601
+ class_name,
602
+ backend_indices,
603
+ grouped_native_functions,
604
+ backend_key,
605
+ dispatch_key,
606
+ selector,
607
+ )
608
+
609
+
610
+ if __name__ == "__main__":
611
+ main()
vllm/lib/python3.10/site-packages/torchgen/gen_executorch.py ADDED
@@ -0,0 +1,998 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import argparse
4
+ import os
5
+ from collections import defaultdict
6
+ from dataclasses import dataclass
7
+ from pathlib import Path
8
+ from typing import Any, Callable, Sequence, TextIO, TYPE_CHECKING
9
+
10
+ import yaml
11
+
12
+ # Parse native_functions.yaml into a sequence of NativeFunctions and Backend Indices.
13
+ from torchgen import dest
14
+ from torchgen.api import cpp as aten_cpp
15
+ from torchgen.api.types import CppSignature, CppSignatureGroup, CType, NamedCType
16
+ from torchgen.context import (
17
+ method_with_native_function,
18
+ method_with_nested_native_function,
19
+ with_native_function_and_index,
20
+ )
21
+ from torchgen.executorch.api import et_cpp
22
+ from torchgen.executorch.api.custom_ops import (
23
+ ComputeNativeFunctionStub,
24
+ gen_custom_ops_registration,
25
+ )
26
+ from torchgen.executorch.api.types import contextArg, ExecutorchCppSignature
27
+ from torchgen.executorch.api.unboxing import Unboxing
28
+ from torchgen.executorch.model import ETKernelIndex, ETKernelKey, ETParsedYaml
29
+ from torchgen.executorch.parse import ET_FIELDS, parse_et_yaml, parse_et_yaml_struct
30
+ from torchgen.gen import (
31
+ get_custom_build_selector,
32
+ get_native_function_declarations,
33
+ get_native_function_declarations_from_ns_grouped_kernels,
34
+ get_native_function_schema_registrations,
35
+ LineLoader,
36
+ parse_native_yaml,
37
+ )
38
+ from torchgen.model import (
39
+ BackendIndex,
40
+ BackendMetadata,
41
+ DEFAULT_KERNEL_NAMESPACE,
42
+ DispatchKey,
43
+ FunctionSchema,
44
+ Location,
45
+ NativeFunction,
46
+ NativeFunctionsGroup,
47
+ OperatorName,
48
+ Variant,
49
+ )
50
+ from torchgen.utils import (
51
+ context,
52
+ FileManager,
53
+ make_file_manager,
54
+ mapMaybe,
55
+ NamespaceHelper,
56
+ )
57
+
58
+
59
+ if TYPE_CHECKING:
60
+ from torchgen.selective_build.selector import SelectiveBuilder
61
+
62
+
63
+ def _sig_decl_wrapper(sig: CppSignature | ExecutorchCppSignature) -> str:
64
+ """
65
+ A wrapper function to basically get `sig.decl(include_context=True)`.
66
+ For ATen kernel, the codegen has no idea about ET contextArg, so we
67
+ use this wrapper to add it.
68
+ """
69
+ if isinstance(sig, ExecutorchCppSignature):
70
+ return sig.decl()
71
+
72
+ returns_type = aten_cpp.returns_type(sig.func.returns).cpp_type()
73
+ cpp_args = [a.decl() for a in sig.arguments()]
74
+ cpp_args_str = ", ".join([contextArg.decl()] + cpp_args)
75
+ sig_decl = f"{returns_type} {sig.name()}({cpp_args_str})"
76
+ return sig_decl
77
+
78
+
79
+ def static_dispatch(
80
+ sig: CppSignature | ExecutorchCppSignature,
81
+ f: NativeFunction,
82
+ backend_indices: list[BackendIndex],
83
+ ) -> str:
84
+ """
85
+ For a given `NativeFunction`, find out the corresponding native function and dispatch to it. If zero or more than one
86
+ native function exists, error out. A simplified version of register_dispatch_key.py
87
+ Arguments:
88
+ sig: A CppSignature for this native function we want to use.
89
+ f: NativeFunction to generate static dispatch.
90
+ backend_indices: All available backends.
91
+ Return:
92
+ C++ code to call backend-specific functions, e.g., "return at::native::add(self, other, scale);"
93
+ """
94
+ if len(backend_indices) == 0 or f.manual_kernel_registration:
95
+ return ""
96
+
97
+ backends = [b for b in backend_indices if b.has_kernel(f)]
98
+ static_block = None
99
+ if len(backends) == 1:
100
+ backend_metadata = backends[0].get_kernel(f)
101
+ if backend_metadata:
102
+ args = ", ".join(a.name for a in sig.arguments())
103
+ # Here we are assuming there's no difference between CppSignature and NativeSignature for Executorch.
104
+ static_block = f"return ::{backend_metadata.cpp_namespace}::{backend_metadata.kernel}({args});"
105
+ else:
106
+ static_block = f"""
107
+ ET_ASSERT_UNREACHABLE_MSG("The number of native function(s) binding to {f.func.name} is {len(backends)}.");
108
+ """
109
+ return f"""
110
+ // {f.namespace}::{f.func}
111
+ TORCH_API inline {_sig_decl_wrapper(sig)} {{
112
+ {static_block}
113
+ }}
114
+ """
115
+
116
+
117
+ # Generates Functions.h, which provides the functional public C++ API,
118
+ # and the scaffolding to call into the dispatcher from these functions.
119
+ @dataclass(frozen=True)
120
+ class ComputeFunction:
121
+ static_dispatch_backend_indices: list[BackendIndex]
122
+
123
+ selector: SelectiveBuilder
124
+
125
+ use_aten_lib: bool
126
+
127
+ is_custom_op: Callable[[NativeFunction], bool]
128
+
129
+ @method_with_native_function
130
+ def __call__(self, f: NativeFunction) -> str | None:
131
+ is_method_variant = False
132
+ if not self.selector.is_root_operator(f"{f.namespace}::{f.func.name}"):
133
+ return None
134
+
135
+ if Variant.function not in f.variants and Variant.method in f.variants:
136
+ is_method_variant = True
137
+
138
+ # only valid remaining case is only function is in f.variants
139
+ elif not (Variant.function in f.variants and Variant.method not in f.variants):
140
+ raise Exception( # noqa: TRY002
141
+ f"Can't handle native function {f.func} with the following variant specification {f.variants}."
142
+ )
143
+
144
+ sig: CppSignature | ExecutorchCppSignature = (
145
+ CppSignatureGroup.from_native_function(
146
+ f, method=False, fallback_binding=f.manual_cpp_binding
147
+ ).most_faithful_signature()
148
+ if self.use_aten_lib
149
+ else ExecutorchCppSignature.from_native_function(f)
150
+ )
151
+ if self.use_aten_lib and not self.is_custom_op(f):
152
+ comma = ", "
153
+
154
+ if is_method_variant:
155
+ return f"""
156
+ // {f.namespace}::{f.func}
157
+ TORCH_API inline {_sig_decl_wrapper(sig)} {{
158
+ return {sig.arguments()[0].name}.{sig.name()}({comma.join(e.name for e in sig.arguments()[1:])});
159
+ }}
160
+ """
161
+ else:
162
+ return f"""
163
+ // {f.namespace}::{f.func}
164
+ TORCH_API inline {_sig_decl_wrapper(sig)} {{
165
+ return at::{sig.name()}({comma.join(e.name for e in sig.arguments())});
166
+ }}
167
+ """
168
+
169
+ else:
170
+ return static_dispatch(
171
+ sig,
172
+ f,
173
+ backend_indices=self.static_dispatch_backend_indices,
174
+ )
175
+
176
+
177
+ # Generates RegisterCodegenUnboxedKernels.cpp.
178
+ @dataclass(frozen=True)
179
+ class ComputeCodegenUnboxedKernels:
180
+ selector: SelectiveBuilder
181
+
182
+ use_aten_lib: bool
183
+
184
+ @method_with_nested_native_function
185
+ def __call__(
186
+ self,
187
+ unbox_kernel_entry: tuple[NativeFunction, tuple[ETKernelKey, BackendMetadata]],
188
+ ) -> str:
189
+ f: NativeFunction = unbox_kernel_entry[0]
190
+ kernel_key: ETKernelKey | list[ETKernelKey] = unbox_kernel_entry[1][0]
191
+ kernel_meta: BackendMetadata = unbox_kernel_entry[1][1]
192
+
193
+ op_name = f"{f.namespace}::{f.func.name}"
194
+ if not self.selector.is_root_operator(op_name):
195
+ return ""
196
+
197
+ if not isinstance(kernel_key, list):
198
+ kernel_key = [kernel_key]
199
+ used_kernel_keys = self.selector.et_get_selected_kernels(
200
+ op_name, [k.to_native_string() for k in kernel_key]
201
+ )
202
+ if not used_kernel_keys:
203
+ return ""
204
+ sig: CppSignature | ExecutorchCppSignature
205
+ argument_type_gen: Callable[..., NamedCType]
206
+ return_type_gen: Callable[..., CType]
207
+ if self.use_aten_lib:
208
+ sig = CppSignatureGroup.from_native_function(
209
+ f, method=False, fallback_binding=f.manual_cpp_binding
210
+ ).most_faithful_signature()
211
+ argument_type_gen = aten_cpp.argumenttype_type
212
+ return_type_gen = aten_cpp.returns_type
213
+ arguments = sig.arguments()
214
+ kernel_call = f"torch::executor::{f.namespace}::{sig.name()}"
215
+ else:
216
+ sig = ExecutorchCppSignature.from_native_function(f)
217
+ argument_type_gen = et_cpp.argumenttype_type
218
+ return_type_gen = et_cpp.returns_type
219
+ arguments = sig.arguments(include_context=False)
220
+ kernel_call = f"{kernel_meta.cpp_namespace}::{kernel_meta.kernel}"
221
+ # parse arguments into C++ code
222
+ binding_list, code_list = Unboxing(
223
+ argument_type_gen=argument_type_gen
224
+ ).convert_arguments(arguments)
225
+
226
+ # for each C++ argument, generate the conversion code
227
+ code_connector = "\n\t"
228
+ arg_connector = ", "
229
+
230
+ args_str = f"{arg_connector.join(e.name for e in binding_list)}"
231
+ event_tracer_output_logging = ""
232
+ output_ids = []
233
+
234
+ if len(f.func.returns) == 0:
235
+ if len(f.func.arguments.out) == 0:
236
+ raise Exception( # noqa: TRY002
237
+ f"Can't handle native function {f.func} with no returns and no out yet."
238
+ )
239
+ out = f.func.arguments.out[0]
240
+ return_assignment = f"""stack[{len(binding_list)}] = &{out.name};"""
241
+ ret_prefix = ""
242
+ output_ids = [len(binding_list)]
243
+ else:
244
+ if len(f.func.arguments.out) == 0:
245
+ return_assignment = (
246
+ f"""*stack[{len(binding_list)}] = EValue(result_);"""
247
+ )
248
+ ret_prefix = return_type_gen(f.func.returns).cpp_type() + " result_ = "
249
+ output_ids = [len(binding_list)]
250
+ else:
251
+ return_assignment = ""
252
+ ret_prefix = ""
253
+ output_ids = [
254
+ len(binding_list) - (i + 1)
255
+ for i in reversed(range(len(f.func.arguments.out)))
256
+ ]
257
+
258
+ for output_id in output_ids:
259
+ event_tracer_output_logging += (
260
+ f"internal::event_tracer_log_evalue("
261
+ f"context.internal_event_tracer(), "
262
+ f"*stack[{output_id}]);\n"
263
+ )
264
+
265
+ newline = "\n "
266
+ return "\n".join(
267
+ [
268
+ f"""
269
+ Kernel(
270
+ "{f.namespace}::{f.func.name}",{newline + '"' + (k + '",') if k != 'default' else ''}
271
+ []({contextArg.defn()}, EValue** stack) {{
272
+ {code_connector.join(code_list)}
273
+
274
+ internal::EventTracerProfileScope event_tracer_scope(context.internal_event_tracer(), "native_call_{f.func.name}");
275
+ EXECUTORCH_SCOPE_PROF("native_call_{f.func.name}");
276
+ {ret_prefix}{kernel_call}(context, {args_str});
277
+ {event_tracer_output_logging}
278
+ {return_assignment}
279
+ }}
280
+ ),
281
+ """
282
+ for k in used_kernel_keys
283
+ ]
284
+ )
285
+
286
+
287
+ def gen_unboxing(
288
+ *,
289
+ native_functions: Sequence[NativeFunction],
290
+ cpu_fm: FileManager,
291
+ selector: SelectiveBuilder,
292
+ use_aten_lib: bool,
293
+ kernel_index: ETKernelIndex,
294
+ manual_registration: bool,
295
+ ) -> None:
296
+ # Iterable type for write_sharded is a Tuple of (native_function, (kernel_key, metadata))
297
+ def key_func(
298
+ item: tuple[NativeFunction, tuple[ETKernelKey, BackendMetadata]]
299
+ ) -> str:
300
+ return item[0].root_name + ":" + item[1][0].to_native_string()
301
+
302
+ items: list[tuple[NativeFunction, tuple[ETKernelKey, BackendMetadata]]] = [
303
+ (native_function, (kernel_key, metadata))
304
+ for native_function in native_functions
305
+ for kernel_key, metadata in kernel_index.get_kernels(native_function).items()
306
+ ]
307
+
308
+ header = ["Functions.h" if use_aten_lib else "NativeFunctions.h"]
309
+ filename = (
310
+ "RegisterKernels.cpp"
311
+ if manual_registration
312
+ else "RegisterCodegenUnboxedKernels.cpp"
313
+ )
314
+ cpu_fm.write_sharded(
315
+ filename,
316
+ items,
317
+ key_fn=key_func,
318
+ env_callable=lambda unbox_kernel_entry: {
319
+ "unboxed_kernels": [
320
+ ComputeCodegenUnboxedKernels(selector, use_aten_lib)(unbox_kernel_entry)
321
+ ],
322
+ "fn_header": header
323
+ if unbox_kernel_entry == items[0]
324
+ else [], # Only write header once
325
+ },
326
+ num_shards=1,
327
+ sharded_keys={"unboxed_kernels", "fn_header"},
328
+ )
329
+
330
+
331
+ @with_native_function_and_index # type: ignore[arg-type]
332
+ def compute_native_function_declaration(
333
+ g: NativeFunctionsGroup | NativeFunction, kernel_index: ETKernelIndex
334
+ ) -> list[str]:
335
+ assert isinstance(g, NativeFunction)
336
+ sig = ExecutorchCppSignature.from_native_function(f=g)
337
+ metadata_list = kernel_index.get_kernels(g).values()
338
+ if metadata_list is None:
339
+ return []
340
+
341
+ # for kernels in lean mode, we declare two versions, one with context and one without.
342
+ # In the end we will cleanup the unused one.
343
+ def gen_decl(metadata: BackendMetadata, include_context: bool) -> str:
344
+ return f"{sig.decl(name=metadata.kernel, include_context=include_context)};"
345
+
346
+ return [
347
+ gen_decl(metadata, include_context)
348
+ for include_context in [False, True]
349
+ for metadata in metadata_list
350
+ ]
351
+
352
+
353
+ def gen_functions_declarations(
354
+ *,
355
+ native_functions: Sequence[NativeFunction],
356
+ kernel_index: ETKernelIndex,
357
+ selector: SelectiveBuilder,
358
+ use_aten_lib: bool,
359
+ custom_ops_native_functions: Sequence[NativeFunction] | None = None,
360
+ ) -> str:
361
+ """
362
+ Generates namespace separated C++ function API inline declaration/definitions.
363
+ Native functions are grouped by namespaces and the generated code is wrapped inside
364
+ namespace blocks.
365
+
366
+ E.g., for `custom_1::foo.out` in yaml file we will generate a C++ API as a symbol
367
+ in `torch::executor::custom_1::foo_out`. This way we avoid symbol conflict when
368
+ the other `custom_2::foo.out` is available.
369
+ """
370
+
371
+ # convert kernel index to BackendIndex. This is because we can't handle ETKernelIndex yet.
372
+ # TODO larryliu: evaluate if this code is still needed. If yes let it handle ETKernelIndex.
373
+
374
+ backend_index = kernel_index._to_backend_index()
375
+
376
+ ns_grouped_functions = defaultdict(list)
377
+ for native_function in native_functions:
378
+ ns_grouped_functions[native_function.namespace].append(native_function)
379
+ functions_declarations = ""
380
+ newline = "\n"
381
+ for namespace in ns_grouped_functions:
382
+ ns_helper = NamespaceHelper(
383
+ namespace_str=namespace,
384
+ entity_name="",
385
+ max_level=3,
386
+ )
387
+ declarations = list(
388
+ mapMaybe(
389
+ ComputeFunction(
390
+ static_dispatch_backend_indices=[backend_index],
391
+ selector=selector,
392
+ use_aten_lib=use_aten_lib,
393
+ is_custom_op=lambda f: custom_ops_native_functions is not None
394
+ and f in custom_ops_native_functions,
395
+ ),
396
+ ns_grouped_functions[namespace],
397
+ )
398
+ )
399
+ functions_declarations += f"""
400
+ {ns_helper.prologue}
401
+ {newline.join(declarations)}
402
+ {ns_helper.epilogue}
403
+ """
404
+ return functions_declarations
405
+
406
+
407
+ def get_ns_grouped_kernels(
408
+ *,
409
+ native_functions: Sequence[NativeFunction],
410
+ kernel_index: ETKernelIndex,
411
+ native_function_decl_gen: Callable[
412
+ [
413
+ NativeFunctionsGroup | NativeFunction,
414
+ ETKernelIndex,
415
+ ],
416
+ list[str],
417
+ ],
418
+ ) -> dict[str, list[str]]:
419
+ ns_grouped_kernels: dict[str, list[str]] = defaultdict(list)
420
+ for f in native_functions:
421
+ native_function_namespaces = set()
422
+ op_kernels = kernel_index.get_kernels(f)
423
+ for backend_metadata in op_kernels.values():
424
+ if backend_metadata:
425
+ namespace = backend_metadata.cpp_namespace
426
+ native_function_namespaces.add(namespace)
427
+ else:
428
+ namespace = DEFAULT_KERNEL_NAMESPACE
429
+ assert (
430
+ len(native_function_namespaces) <= 1
431
+ ), f"Codegen only supports one namespace per operator, got {native_function_namespaces}"
432
+ ns_grouped_kernels[namespace].extend(
433
+ native_function_decl_gen(f, kernel_index)
434
+ )
435
+ return ns_grouped_kernels
436
+
437
+
438
+ def gen_headers(
439
+ *,
440
+ native_functions: Sequence[NativeFunction],
441
+ gen_custom_ops_header: bool,
442
+ custom_ops_native_functions: Sequence[NativeFunction],
443
+ selector: SelectiveBuilder,
444
+ kernel_index: ETKernelIndex,
445
+ cpu_fm: FileManager,
446
+ use_aten_lib: bool,
447
+ ) -> None:
448
+ """Generate headers.
449
+
450
+ Args:
451
+ native_functions (Sequence[NativeFunction]): a collection of NativeFunction for ATen ops.
452
+ gen_custom_ops_header (bool): whether we should generate CustomOpsNativeFunctions.h
453
+ custom_ops_native_functions (Sequence[NativeFunction]): a collection of NativeFunction for custom ops.
454
+ kernel_index (ETKernelIndex): kernel collection
455
+ cpu_fm (FileManager): file manager manages output stream
456
+ use_aten_lib (bool): whether we are generating for PyTorch types or Executorch types.
457
+ """
458
+ aten_headers = ["#include <ATen/Functions.h>"]
459
+ backend_indices = {DispatchKey.CPU: kernel_index._to_backend_index()}
460
+ if gen_custom_ops_header:
461
+ cpu_fm.write_with_template(
462
+ "CustomOpsNativeFunctions.h",
463
+ "NativeFunctions.h",
464
+ lambda: {
465
+ "nativeFunctions_declarations": get_native_function_declarations(
466
+ grouped_native_functions=custom_ops_native_functions,
467
+ backend_indices=backend_indices,
468
+ native_function_decl_gen=dest.compute_native_function_declaration,
469
+ ),
470
+ "headers": [
471
+ "#include <ATen/ATen.h>",
472
+ "#include <torch/torch.h>",
473
+ ],
474
+ },
475
+ )
476
+ aten_headers.append('#include "CustomOpsNativeFunctions.h"')
477
+ cpu_fm.write(
478
+ "Functions.h",
479
+ lambda: {
480
+ "static_dispatch_extra_headers": aten_headers
481
+ if use_aten_lib
482
+ else ['#include "NativeFunctions.h"'],
483
+ "Functions_declarations": gen_functions_declarations(
484
+ native_functions=native_functions,
485
+ kernel_index=kernel_index,
486
+ selector=selector,
487
+ use_aten_lib=use_aten_lib,
488
+ custom_ops_native_functions=custom_ops_native_functions,
489
+ ),
490
+ },
491
+ )
492
+ cpu_fm.write(
493
+ "RegisterKernels.h",
494
+ lambda: {
495
+ "generated_comment": "@" + "generated by torchgen/gen_executorch.py",
496
+ },
497
+ )
498
+ headers = {
499
+ "headers": [
500
+ "#include <executorch/runtime/core/exec_aten/exec_aten.h> // at::Tensor etc.",
501
+ "#include <executorch/runtime/kernel/kernel_runtime_context.h>",
502
+ ],
503
+ }
504
+ if use_aten_lib:
505
+ headers["headers"].append("#include <executorch/codegen/macros.h> // TORCH_API")
506
+ cpu_fm.write(
507
+ "NativeFunctions.h",
508
+ lambda: dict(
509
+ {
510
+ "nativeFunctions_declarations": get_native_function_declarations(
511
+ grouped_native_functions=native_functions,
512
+ backend_indices=backend_indices,
513
+ native_function_decl_gen=dest.compute_native_function_declaration,
514
+ ),
515
+ },
516
+ **headers,
517
+ ),
518
+ )
519
+ else:
520
+ ns_grouped_kernels = get_ns_grouped_kernels(
521
+ native_functions=native_functions,
522
+ kernel_index=kernel_index,
523
+ native_function_decl_gen=compute_native_function_declaration, # type: ignore[arg-type]
524
+ )
525
+ cpu_fm.write(
526
+ "NativeFunctions.h",
527
+ lambda: dict(
528
+ {
529
+ "nativeFunctions_declarations": get_native_function_declarations_from_ns_grouped_kernels(
530
+ ns_grouped_kernels=ns_grouped_kernels,
531
+ ),
532
+ },
533
+ **headers,
534
+ ),
535
+ )
536
+
537
+
538
+ def gen_custom_ops(
539
+ *,
540
+ native_functions: Sequence[NativeFunction],
541
+ selector: SelectiveBuilder,
542
+ kernel_index: ETKernelIndex,
543
+ cpu_fm: FileManager,
544
+ rocm: bool,
545
+ ) -> None:
546
+ dispatch_key = DispatchKey.CPU
547
+ (
548
+ anonymous_definition,
549
+ static_init_dispatch_registrations,
550
+ ) = gen_custom_ops_registration(
551
+ native_functions=native_functions,
552
+ selector=selector,
553
+ kernel_index=kernel_index,
554
+ rocm=rocm,
555
+ )
556
+ cpu_fm.write_with_template(
557
+ f"Register{dispatch_key}CustomOps.cpp",
558
+ "RegisterDispatchKeyCustomOps.cpp",
559
+ lambda: {
560
+ "ops_headers": '#include "CustomOpsNativeFunctions.h"',
561
+ "DispatchKey": dispatch_key,
562
+ "dispatch_namespace": dispatch_key.lower(),
563
+ "dispatch_namespaced_definitions": "",
564
+ "dispatch_anonymous_definitions": anonymous_definition,
565
+ "static_init_dispatch_registrations": static_init_dispatch_registrations,
566
+ },
567
+ )
568
+ cpu_fm.write_with_template(
569
+ f"Register{dispatch_key}Stub.cpp",
570
+ "RegisterDispatchKeyCustomOps.cpp",
571
+ lambda: {
572
+ "ops_headers": "",
573
+ "DispatchKey": dispatch_key,
574
+ "dispatch_namespace": dispatch_key.lower(),
575
+ "dispatch_namespaced_definitions": "",
576
+ "dispatch_anonymous_definitions": list(
577
+ mapMaybe(ComputeNativeFunctionStub(), native_functions)
578
+ ),
579
+ "static_init_dispatch_registrations": static_init_dispatch_registrations,
580
+ },
581
+ )
582
+
583
+ (
584
+ aten_schema_registrations,
585
+ schema_registrations,
586
+ ) = get_native_function_schema_registrations(
587
+ native_functions=native_functions,
588
+ schema_selector=selector,
589
+ )
590
+ cpu_fm.write(
591
+ "RegisterSchema.cpp",
592
+ lambda: {
593
+ "schema_registrations": schema_registrations,
594
+ "aten_schema_registrations": aten_schema_registrations,
595
+ },
596
+ )
597
+
598
+
599
+ def translate_native_yaml(
600
+ tags_yaml_path: str,
601
+ aten_yaml_path: str,
602
+ native_yaml_path: str | None,
603
+ use_aten_lib: bool,
604
+ out_file: TextIO,
605
+ ) -> None:
606
+ """Translates Executorch DSL dialect to use the same syntax as
607
+ native_functions.yaml. The major difference is that Executorch DSL dialect
608
+ supports "op" key, where it refers to the operator name in native_functions.yaml.
609
+
610
+ For example, a functions.yaml may have the following entry:
611
+
612
+ - op: add.out
613
+ ...
614
+
615
+ It needs to be translated to the following:
616
+
617
+ - func: add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
618
+ ...
619
+
620
+ We go in aten_yaml_path and find the operator schema for "add.out" and add it
621
+ to the original functions.yaml. We also add required field "variants", where for
622
+ Executorch it will always be "function".
623
+
624
+ For ATen mode we don't have to do the translation because native_yaml_path is
625
+ the same as native_functions.yaml.
626
+
627
+ Args:
628
+ tags_yaml_path: Path to a tags.yaml file to satisfy codegen parsing.
629
+ It is not optional.
630
+ aten_yaml_path: Path to ATen operator yaml file native_functions.yaml.
631
+ native_yaml_path: Path to a functions.yaml file to parse.
632
+ If the path does not exist in the filesystem, it is treated as an
633
+ empty file. If `custom_ops_yaml_path` exists, the contents of that
634
+ file are appended to the yaml input to be parsed.
635
+ use_aten_lib: We use this flag to determine if we want to generate native
636
+ functions. In ATen mode we should generate out= variants.
637
+ out_file: The IO object that we are writing into.
638
+ Returns:
639
+ None
640
+ """
641
+ if use_aten_lib:
642
+ with open(aten_yaml_path) as aten_yaml:
643
+ out_file.writelines(aten_yaml.readlines())
644
+ return
645
+
646
+ native_functions, persisted_fields = parse_et_yaml(
647
+ aten_yaml_path,
648
+ tags_yaml_path,
649
+ None,
650
+ skip_native_fns_gen=False,
651
+ )
652
+
653
+ func_to_scoped_name: dict[FunctionSchema, str] = {
654
+ f.func: f"{f.namespace}::{f.func.name}" for f in native_functions
655
+ }
656
+ op_to_scoped_name: dict[OperatorName, str] = {
657
+ func.name: name for func, name in func_to_scoped_name.items()
658
+ }
659
+
660
+ schema_dict = {name: str(func) for func, name in func_to_scoped_name.items()}
661
+ kernel_persist_dict: dict[str, dict[str, Any]] = {
662
+ op_to_scoped_name[op]: v for op, v in persisted_fields.items()
663
+ }
664
+
665
+ if (
666
+ not native_yaml_path
667
+ or not os.path.exists(native_yaml_path)
668
+ or os.stat(native_yaml_path).st_size == 0
669
+ ):
670
+ return
671
+ with open(native_yaml_path) as native_yaml:
672
+ native_es = yaml.load(native_yaml, Loader=LineLoader)
673
+ if not native_es:
674
+ return
675
+ for e in native_es:
676
+ assert isinstance(e.get("__line__"), int), e
677
+ loc = Location(native_yaml_path, e.pop("__line__"))
678
+ with context(lambda: f"in {loc}:\n "):
679
+ if "variants" not in e:
680
+ e["variants"] = "function"
681
+ if "func" in e:
682
+ continue
683
+ assert isinstance(e.get("op"), str), e
684
+ opname = e.pop("op")
685
+ if "::" not in opname:
686
+ opname = "aten::" + opname
687
+ assert opname in schema_dict
688
+ e["func"] = schema_dict.get(opname)
689
+
690
+ # Write out persisted kernel information
691
+ if opname in kernel_persist_dict:
692
+ for k, v in kernel_persist_dict[opname].items():
693
+ e[k] = v
694
+
695
+ yaml.dump(native_es, out_file, width=1000)
696
+
697
+
698
+ def parse_yaml(
699
+ path: str | None,
700
+ tags_yaml_path: str,
701
+ function_filter: Callable[[NativeFunction], bool],
702
+ skip_native_fns_gen: bool = False,
703
+ ) -> tuple[
704
+ list[NativeFunction],
705
+ dict[DispatchKey, dict[OperatorName, BackendMetadata]] | ETKernelIndex,
706
+ ]:
707
+ if path and os.path.exists(path) and os.stat(path).st_size > 0:
708
+ with open(path) as f:
709
+ es = yaml.load(f, Loader=LineLoader)
710
+
711
+ # Check for kernel index structure
712
+ kernel_index = (
713
+ parse_et_yaml_struct(es) if any("kernels" in e for e in es) else None
714
+ )
715
+
716
+ # Remove ET specific fields from entries for BC compatibility
717
+ for entry in es:
718
+ for field in ET_FIELDS:
719
+ entry.pop(field, None)
720
+
721
+ parsed_yaml = parse_native_yaml(
722
+ path,
723
+ tags_yaml_path,
724
+ None,
725
+ skip_native_fns_gen=skip_native_fns_gen,
726
+ loaded_yaml=es,
727
+ )
728
+ native_functions = list(filter(function_filter, parsed_yaml.native_functions))
729
+ op_names = [f.func.name for f in native_functions]
730
+
731
+ # (1) Return ETKernelIndex if kernel index is present
732
+ if kernel_index is not None:
733
+ filtered_index = {
734
+ op_name: kernel_mapping
735
+ for op_name, kernel_mapping in kernel_index.index.items()
736
+ if op_name in op_names
737
+ }
738
+ return native_functions, ETKernelIndex(index=filtered_index)
739
+
740
+ # (2) Return BackendIndices if kernel index is absent
741
+ def map_index(
742
+ m: dict[OperatorName, BackendMetadata]
743
+ ) -> dict[OperatorName, BackendMetadata]:
744
+ return {op: m[op] for op in m if op in op_names}
745
+
746
+ backend_indices = {
747
+ k: map_index(b.index) for (k, b) in parsed_yaml.backend_indices.items()
748
+ }
749
+
750
+ return native_functions, backend_indices
751
+ else:
752
+ return [], {}
753
+
754
+
755
+ def parse_yaml_files(
756
+ tags_yaml_path: str,
757
+ aten_yaml_path: str,
758
+ native_yaml_path: str | None,
759
+ custom_ops_yaml_path: str | None,
760
+ selector: SelectiveBuilder,
761
+ use_aten_lib: bool,
762
+ ) -> tuple[ETParsedYaml, ETParsedYaml | None]:
763
+ """Parses functions.yaml and custom_ops.yaml files.
764
+
765
+ Args:
766
+ tags_yaml_path: Path to a tags.yaml file to satisfy codegen parsing.
767
+ It is not optional.
768
+ aten_yaml_path: Path to ATen operator yaml file native_functions.yaml.
769
+ native_yaml_path: Path to a functions.yaml file to parse.
770
+ If the path does not exist in the filesystem, it is treated as an
771
+ empty file. If `custom_ops_yaml_path` exists, the contents of that
772
+ file are appended to the yaml input to be parsed.
773
+ custom_ops_yaml_path: Path to a custom_ops.yaml file to parse. If
774
+ the path does not exist in the filesystem, it is ignored.
775
+ selector: For selective build.
776
+ use_aten_lib: We use this flag to determine if we want to generate native
777
+ functions. In ATen mode we should generate out= variants.
778
+ Returns:
779
+ A tuple with two elements:
780
+ [0]: The parsed results of concatenating the contents of
781
+ `native_yaml_path` and `custom_ops_yaml_path`.
782
+ [1]: The parsed results of the contents of `custom_ops_yaml_path`, if
783
+ present. If not present, None.
784
+ """
785
+ import tempfile
786
+
787
+ # only include selected ops, this is because we want to avoid
788
+ def function_filter(f: NativeFunction) -> bool:
789
+ return selector.is_native_function_selected(f)
790
+
791
+ with tempfile.TemporaryDirectory() as tmpdirname:
792
+ translated_yaml_path = os.path.join(tmpdirname, "translated.yaml")
793
+ with open(translated_yaml_path, "w") as translated:
794
+ translate_native_yaml(
795
+ tags_yaml_path,
796
+ aten_yaml_path,
797
+ native_yaml_path,
798
+ use_aten_lib,
799
+ translated,
800
+ )
801
+
802
+ translated_functions, translated_indices = parse_yaml(
803
+ translated_yaml_path, tags_yaml_path, function_filter, not use_aten_lib
804
+ )
805
+ custom_ops_functions, custom_ops_indices = parse_yaml(
806
+ custom_ops_yaml_path, tags_yaml_path, function_filter, True
807
+ )
808
+
809
+ # Convert BackendIndices to ETKernelIndex
810
+ if not isinstance(translated_indices, ETKernelIndex):
811
+ translated_indices = ETKernelIndex.from_backend_indices(translated_indices)
812
+ if not isinstance(custom_ops_indices, ETKernelIndex):
813
+ custom_ops_indices = ETKernelIndex.from_backend_indices(custom_ops_indices)
814
+
815
+ combined_functions = translated_functions + custom_ops_functions
816
+ combined_kernel_index = ETKernelIndex.merge_indices(
817
+ translated_indices, custom_ops_indices
818
+ )
819
+ combined_yaml = ETParsedYaml(combined_functions, combined_kernel_index)
820
+ custom_ops_parsed_yaml = ETParsedYaml(custom_ops_functions, custom_ops_indices)
821
+
822
+ return combined_yaml, custom_ops_parsed_yaml
823
+
824
+
825
+ def main() -> None:
826
+ parser = argparse.ArgumentParser(description="Generate operator source files")
827
+ # Although we don't refer to --source-path directly, make_file_manager()
828
+ # expects it to point to a directory that contains a templates/ subdirectory
829
+ # containing the file templates.
830
+ parser.add_argument(
831
+ "-s",
832
+ "--source-path",
833
+ help="path to source directory for kernel templates",
834
+ )
835
+ parser.add_argument(
836
+ "--functions-yaml-path",
837
+ "--functions_yaml_path",
838
+ help="path to the functions.yaml file to use. Optional, but at least "
839
+ "one of --functions-yaml-path and --custom-ops-yaml-path must be "
840
+ "specified.",
841
+ )
842
+ parser.add_argument(
843
+ "--custom-ops-yaml-path",
844
+ "--custom_ops_yaml_path",
845
+ help="path to the custom_ops.yaml file to use. Optional, but at least "
846
+ "one of --functions-yaml-path and --custom-ops-yaml-path must be "
847
+ "specified.",
848
+ )
849
+ parser.add_argument(
850
+ "--aten-yaml-path",
851
+ "--aten_yaml_path",
852
+ help="path to native_functions.yaml file.",
853
+ )
854
+ # Note that make_file_manager() also looks at --install-dir.
855
+ parser.add_argument(
856
+ "-d",
857
+ "--install-dir",
858
+ "--install_dir",
859
+ help="output directory",
860
+ default="build/generated",
861
+ )
862
+ parser.add_argument(
863
+ "-o",
864
+ "--output-dependencies",
865
+ help="output a list of dependencies into the given file and exit",
866
+ )
867
+ # Although we don't refer to --dry-run directly, make_file_manager() looks
868
+ # for it.
869
+ parser.add_argument(
870
+ "--dry-run",
871
+ action="store_true",
872
+ help="run without writing any files (still updates outputs)",
873
+ )
874
+ parser.add_argument(
875
+ "--static-dispatch-backend",
876
+ "--static_dispatch_backend",
877
+ nargs="*",
878
+ help="generate static dispatch code for the specific backend (if set)",
879
+ )
880
+ parser.add_argument(
881
+ "--op-registration-whitelist",
882
+ "--op_registration_whitelist",
883
+ nargs="*",
884
+ help="filter op registrations by the whitelist (if set); "
885
+ "each item is `namespace`::`operator name` without overload name; "
886
+ "e.g.: aten::empty aten::conv2d ...",
887
+ )
888
+ parser.add_argument(
889
+ "--op-selection-yaml-path",
890
+ "--op_selection_yaml_path",
891
+ help="Provide a path to the operator selection (for custom build) YAML "
892
+ "that contains the information about the set of selected operators "
893
+ "and their categories (training, ...). Each operator is either a "
894
+ "full operator name with overload or just a bare operator name. "
895
+ "The operator names also contain the namespace prefix (e.g. aten::)",
896
+ )
897
+ parser.add_argument(
898
+ "--tags-path",
899
+ help="Path to tags.yaml. Required by yaml parsing in codegen system.",
900
+ )
901
+ parser.add_argument(
902
+ "--rocm",
903
+ action="store_true",
904
+ help="reinterpret CUDA as ROCm/HIP and adjust filepaths accordingly",
905
+ )
906
+ parser.add_argument(
907
+ "--use-aten-lib",
908
+ "--use_aten_lib",
909
+ action="store_true",
910
+ help="a boolean flag to indicate whether we use ATen kernels or not, in the future this flag will be per "
911
+ "operator",
912
+ )
913
+ parser.add_argument(
914
+ "--manual_registration",
915
+ "--manual-registration",
916
+ action="store_true",
917
+ help="a boolean flag to indicate whether we want to manually call"
918
+ "register_kernels() or rely on static init. ",
919
+ )
920
+ parser.add_argument(
921
+ "--generate",
922
+ type=str,
923
+ nargs="*",
924
+ choices=["headers", "sources"],
925
+ default=["headers", "sources"],
926
+ help="Generate only a subset of files",
927
+ )
928
+ options = parser.parse_args()
929
+ assert options.tags_path, "tags.yaml is required by codegen yaml parsing."
930
+
931
+ selector = get_custom_build_selector(
932
+ options.op_registration_whitelist,
933
+ options.op_selection_yaml_path,
934
+ )
935
+
936
+ parsed_yaml, custom_ops_parsed_yaml = parse_yaml_files(
937
+ aten_yaml_path=options.aten_yaml_path,
938
+ tags_yaml_path=options.tags_path,
939
+ native_yaml_path=options.functions_yaml_path,
940
+ custom_ops_yaml_path=options.custom_ops_yaml_path,
941
+ selector=selector,
942
+ use_aten_lib=options.use_aten_lib,
943
+ )
944
+ native_functions, kernel_index = (
945
+ parsed_yaml.native_functions,
946
+ parsed_yaml.kernel_index,
947
+ )
948
+ custom_ops_native_functions = (
949
+ custom_ops_parsed_yaml.native_functions if custom_ops_parsed_yaml else []
950
+ )
951
+
952
+ cpu_fm = make_file_manager(options=options)
953
+
954
+ if "headers" in options.generate:
955
+ # generate CustomOpsNativeFunctions.h when custom_ops.yaml is present, to match the build system.
956
+ gen_headers(
957
+ native_functions=native_functions,
958
+ gen_custom_ops_header=options.custom_ops_yaml_path,
959
+ custom_ops_native_functions=custom_ops_native_functions,
960
+ selector=selector,
961
+ kernel_index=kernel_index,
962
+ cpu_fm=cpu_fm,
963
+ use_aten_lib=options.use_aten_lib,
964
+ )
965
+
966
+ if "sources" in options.generate:
967
+ gen_unboxing(
968
+ native_functions=native_functions,
969
+ cpu_fm=cpu_fm,
970
+ selector=selector,
971
+ use_aten_lib=options.use_aten_lib,
972
+ kernel_index=kernel_index,
973
+ manual_registration=options.manual_registration,
974
+ )
975
+ if custom_ops_native_functions:
976
+ gen_custom_ops(
977
+ native_functions=custom_ops_native_functions,
978
+ selector=selector,
979
+ kernel_index=kernel_index,
980
+ cpu_fm=cpu_fm,
981
+ rocm=options.rocm,
982
+ )
983
+
984
+ if options.output_dependencies:
985
+ depfile_path = Path(options.output_dependencies).resolve()
986
+ depfile_name = depfile_path.name
987
+ depfile_stem = depfile_path.stem
988
+
989
+ for fm, prefix in [
990
+ (cpu_fm, ""),
991
+ ]:
992
+ varname = prefix + depfile_stem
993
+ path = depfile_path.parent / (prefix + depfile_name)
994
+ fm.write_outputs(varname, str(path))
995
+
996
+
997
+ if __name__ == "__main__":
998
+ main()
vllm/lib/python3.10/site-packages/torchgen/gen_functionalization_type.py ADDED
@@ -0,0 +1,882 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass
4
+ from typing import Callable, TYPE_CHECKING
5
+
6
+ from torchgen.api import cpp, dispatcher
7
+ from torchgen.api.translate import translate
8
+ from torchgen.api.types import (
9
+ BaseCType,
10
+ Binding,
11
+ CType,
12
+ DispatcherSignature,
13
+ FunctionalizationLambda,
14
+ iTensorListRefT,
15
+ NativeSignature,
16
+ OptionalCType,
17
+ optionalSymIntArrayRefT,
18
+ symIntArrayRefT,
19
+ SymIntT,
20
+ tensorListT,
21
+ tensorT,
22
+ VectorCType,
23
+ ViewInverseSignature,
24
+ )
25
+ from torchgen.context import (
26
+ method_with_native_function,
27
+ native_function_manager,
28
+ with_native_function,
29
+ with_native_function_and,
30
+ )
31
+ from torchgen.model import (
32
+ Argument,
33
+ BackendIndex,
34
+ BaseTy,
35
+ BaseType,
36
+ FunctionSchema,
37
+ ListType,
38
+ NativeFunction,
39
+ NativeFunctionsGroup,
40
+ NativeFunctionsViewGroup,
41
+ Return,
42
+ SchemaKind,
43
+ SelfArgument,
44
+ TensorOptionsArguments,
45
+ )
46
+ from torchgen.native_function_generation import (
47
+ INPLACE_OPS_THAT_DONT_GET_GROUPED_PROPERLY,
48
+ MUTABLE_OPS_THAT_CANNOT_GET_AN_OUT_VARIANT,
49
+ OUT_OPS_THAT_DONT_GET_GROUPED_PROPERLY,
50
+ )
51
+ from torchgen.utils import dataclass_repr
52
+
53
+
54
+ if TYPE_CHECKING:
55
+ from torchgen.selective_build.selector import SelectiveBuilder
56
+
57
+
58
+ # Note: [Mutable Ops Not Using Functionalization]
59
+ # Ops in this list currently do not work with functionalization and should be fixed.
60
+ MUTABLE_OPS_NOT_USING_FUNCTIONALIZATION = (
61
+ OUT_OPS_THAT_DONT_GET_GROUPED_PROPERLY
62
+ + MUTABLE_OPS_THAT_CANNOT_GET_AN_OUT_VARIANT
63
+ + INPLACE_OPS_THAT_DONT_GET_GROUPED_PROPERLY
64
+ + [
65
+ # It will be BC-breaking, but we should fix their schemas.
66
+ # should be inplace?
67
+ "record_stream",
68
+ # See Note [resize_ in Functionalization]
69
+ "resize_",
70
+ "resize_as_",
71
+ # This function is used as for testing purposes only.
72
+ "_fill_mem_eff_dropout_mask_",
73
+ ]
74
+ )
75
+
76
+ # This file contains codegen that relates to the functionalization pass.
77
+ # It includes:
78
+ # - gen_functionalization_definition
79
+ # Generates dispatcher kernel definitions for the functionalization pass.
80
+ # - gen_functionalization_registration
81
+ # Generates dispatcher kernel registrations for the functionalization pass.
82
+ # - gen_functionalization_view_inverse_declaration
83
+ # Generates a declaration for an "inverse view", for every view op
84
+ # that is needed in functionalization. We manually implement their definitions.
85
+ # - gen_composite_view_copy_kernel
86
+ # Generates view_copy() composite kernels for all view_copy operators.
87
+
88
+
89
+ # Generates the body of the default composite C++ kernel for a {view}_copy NativeFunction
90
+ # See Note [view_copy NativeFunctions]
91
+ @dataclass(frozen=True)
92
+ class GenCompositeViewCopyKernel:
93
+ backend_index: BackendIndex
94
+
95
+ @method_with_native_function
96
+ def __call__(self, g: NativeFunctionsViewGroup) -> str | None:
97
+ if g.view_copy is None:
98
+ return None
99
+ elif g.view_copy.func.name.name.base != f"{g.view.func.name.name}_copy":
100
+ # If the view_copy doesn't match the standard naming scheme of <op>_copy,
101
+ # assume it already exists and doesn't need to be generated.
102
+ # Example: slice_inverse() with the copy variant named slice_scatter()
103
+ # instead of slice_inverse_copy()
104
+ return None
105
+
106
+ metadata = self.backend_index.get_kernel(g.view_copy)
107
+ assert metadata is not None
108
+
109
+ # We can make view_copy work in more cases by using reshape()
110
+ # when a normal view call would ordinarily fail.
111
+ # This also makes LTC more efficient, because they don't need to include
112
+ # clone() calls in their graph (which is normally needed by reshape).
113
+ if str(g.view_copy.func.name) == "view_copy":
114
+ assert metadata.kernel == "view_copy_symint"
115
+ return """\
116
+ at::Tensor view_copy_symint(const at::Tensor & self, at::SymIntArrayRef size) {
117
+ c10::SymDimVector shape = infer_size_dv(size, self.sym_numel());
118
+ if (!at::detail::computeStride(self.sym_sizes(), self.sym_strides(), shape).has_value()) {
119
+ return self.reshape_symint(size);
120
+ } else {
121
+ auto output = at::_ops::view::call(self, size);
122
+ return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous);
123
+ }
124
+ }
125
+ """
126
+ # view_copy is a native signature, since we're generating an at::native:: kernel
127
+ # Functionalization always operates on symints though
128
+ view_copy_sig = NativeSignature(
129
+ g.view_copy.func, symint=metadata.supports_symint()
130
+ )
131
+
132
+ # view is a dispatcher signature, since we're calling into the at::_ops API
133
+ view_sig = DispatcherSignature(g.view.func)
134
+
135
+ view_api_name = g.view.func.name.unambiguous_name()
136
+ exprs = ", ".join(
137
+ [e.expr for e in translate(view_copy_sig.arguments(), view_sig.arguments())]
138
+ )
139
+
140
+ # view ops today always return either a Tensor or a list of Tensors
141
+ assert len(g.view.func.returns) == 1
142
+ assert g.view.func.returns[0].type == BaseType(
143
+ BaseTy.Tensor
144
+ ) or g.view.func.returns[0].type == ListType(BaseType(BaseTy.Tensor), None)
145
+
146
+ if g.view.func.returns[0].type == BaseType(BaseTy.Tensor):
147
+ return_cloned_output = """\
148
+ return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous);"""
149
+ else:
150
+ # If the return type is a list, we need to clone each tensor in the list.
151
+ return_cloned_output = f"""\
152
+ {view_copy_sig.returns_type().cpp_type()} out_clone;
153
+ for (const auto i : c10::irange(output.size())) {{
154
+ out_clone.push_back(output[i].clone(/*memory_format=*/at::MemoryFormat::Contiguous));
155
+ }}
156
+ return out_clone;"""
157
+
158
+ # The default generated composite kernel for {view}_copy() operators just clones
159
+ # the input tensor, and runs the underlying view on the clone.
160
+ return f"""
161
+ {view_copy_sig.defn(name=metadata.kernel)} {{
162
+ auto output = at::_ops::{view_api_name}::call({exprs});
163
+ {return_cloned_output}
164
+ }}
165
+ """
166
+
167
+
168
+ def return_str(rets: tuple[Return, ...], names: list[str]) -> str:
169
+ assert len(rets) == len(names)
170
+ if len(rets) == 0:
171
+ return ""
172
+ elif len(rets) == 1:
173
+ return f"return {names[0]};"
174
+ else:
175
+ return f"return {dispatcher.returns_type(rets).cpp_type()}({', '.join(names)});"
176
+
177
+
178
+ def modifies_arguments(f: NativeFunction) -> bool:
179
+ return any(
180
+ a.annotation is not None and a.annotation.is_write
181
+ for a in f.func.arguments.flat_all
182
+ )
183
+
184
+
185
+ def wrapper_name(func: FunctionSchema) -> str:
186
+ if func.name.overload_name:
187
+ return f"{cpp.name(func)}_{func.name.overload_name}"
188
+ else:
189
+ return cpp.name(func)
190
+
191
+
192
+ def is_tensor_like(a: Argument | TensorOptionsArguments | SelfArgument) -> bool:
193
+ return isinstance(a, SelfArgument) or (
194
+ isinstance(a, Argument) and a.type.is_tensor_like()
195
+ )
196
+
197
+
198
+ # We need to wrap / unwrap various arguments from the op in the functionalization kernels.
199
+ # Some op schemas include non-owning types though (like TensorList),
200
+ # and when we unwrap them we expect to get out an owning type!.
201
+ # We also return a lambda that tells you how to conver the non-owning type argument into the owning type.
202
+ def get_owning_type(t: CType) -> tuple[CType, Callable[[str], str]]:
203
+ if t == BaseCType(tensorListT):
204
+ return VectorCType(BaseCType(tensorT)), lambda x: f"{x}.vec()"
205
+ if t == BaseCType(iTensorListRefT):
206
+ return VectorCType(BaseCType(tensorT)), lambda x: f"{{{x}.begin(), {x}.end()}}"
207
+ # There are technically other non-owning types out there (like IntArrayRef),
208
+ # but functionalization only actually cares about the ones involving tensors.
209
+ return t, lambda x: x
210
+
211
+
212
+ # unwraps all tensor-like arguments, returning:
213
+ # (1) a string containing all of the logic that does the unwrapping
214
+ # (2) a context, to be used by translate(), with all of the relevant bindings.
215
+ def unwrap_tensor_args(
216
+ sig: DispatcherSignature, *, is_view_op: bool
217
+ ) -> tuple[str, list[Binding]]:
218
+ context: list[Binding] = []
219
+ unwrapped_tensor_args: list[str] = []
220
+ for arg in sig.arguments():
221
+ if is_tensor_like(arg.argument):
222
+ # for tensor inputs, we want to unwrap them before passing them into the redispatch calls.
223
+ unwrapped_name = f"{arg.name}_"
224
+ # For most ops, the functionalization needs to sync any pending updates on the input tensors
225
+ # before calling the operator, since otherwise the operator will act on stale data.
226
+ # For view ops though, we can continue to defer syncing until the tensor is used by
227
+ # a non-view operator.
228
+ maybe_sync_input = (
229
+ "" if is_view_op else f"at::functionalization::impl::sync({arg.name});"
230
+ )
231
+ unwrapped_type, conversion_fn = get_owning_type(
232
+ arg.nctype.remove_const_ref().type
233
+ )
234
+ unwrapped_tensor_args.append(
235
+ f"""
236
+ {unwrapped_type.cpp_type()} {unwrapped_name};
237
+ if (at::functionalization::impl::isFunctionalTensor({arg.name})) {{
238
+ {maybe_sync_input}
239
+ {unwrapped_name} = at::functionalization::impl::from_functional_tensor({arg.name});
240
+ }} else {{
241
+ {unwrapped_name} = {conversion_fn(arg.name)};
242
+ }}"""
243
+ )
244
+ context.append(arg.with_name(unwrapped_name))
245
+ else:
246
+ # for non-tensor inputs, we want to pass them directly into the redispatch calls.
247
+ context.append(arg)
248
+ unwrap_tensor_args_str = "\n ".join(unwrapped_tensor_args)
249
+ return unwrap_tensor_args_str, context
250
+
251
+
252
+ # converts all tensor-like arguments to meta tensors, which are used to compute stride info. Returns:
253
+ # (1) a string containing all of the logic that does the conversions.
254
+ # (2) a context, to be used by translate(), with all of the relevant bindings.
255
+ def convert_to_meta_tensors(sig: DispatcherSignature) -> tuple[str, list[Binding]]:
256
+ context: list[Binding] = []
257
+ unwrapped_tensor_args: list[str] = []
258
+ for arg in sig.arguments():
259
+ if is_tensor_like(arg.argument):
260
+ # for tensor inputs, we want to unwrap them before passing them into the redispatch calls.
261
+ a_ = arg.name
262
+ unwrapped_name = f"{arg.name}_meta"
263
+ unwrapped_tensor_args.append(f"auto {unwrapped_name} = to_meta({a_});")
264
+ context.append(arg.with_name(unwrapped_name))
265
+ else:
266
+ # for non-tensor inputs, we want to pass them directly into the redispatch calls.
267
+ context.append(arg)
268
+ unwrap_tensor_args_str = "\n ".join(unwrapped_tensor_args)
269
+ return unwrap_tensor_args_str, context
270
+
271
+
272
+ # The functionalization codegen currently expects view op schemas to have this form:
273
+ # foo(Tensor(a), ...) -> Tensor(a) (e.g. transpose)
274
+ # foo(Tensor(a!), ...) -> Tensor(a!) (e.g. transpose_)
275
+ def assert_view_op_properties(func: FunctionSchema) -> None:
276
+ def is_alias(a: Argument) -> bool:
277
+ return a.annotation is not None
278
+
279
+ args = func.arguments.flat_non_out
280
+ # The first argument is a tensor with an alias semantics (annotations)
281
+ assert len(args) > 0 and args[0].type == BaseType(
282
+ BaseTy.Tensor
283
+ ), f"""In the functionalization codegen, we expect the first argument of every view operator to be a tensor,
284
+ but found an argument of type {str(args[0].type)} for operator: {str(func.name)}."""
285
+ # No other arguments have aliasing semantics
286
+ assert is_alias(args[0]) and not any(
287
+ is_alias(a) for a in args[1:]
288
+ ), """In the functionalization codegen, we expect the first argument of every view operator to alias the output.
289
+ View operators with multiple aliasing inputs aren't supported yet. Found an operator that doesn't satisfy this constraint"""
290
+
291
+
292
+ # One-liner expression for checking if an expression expr of type type has any
293
+ # symbolic values.
294
+ def emit_expr_has_symbolic_values(expr: str, type: CType) -> str:
295
+ if type == BaseCType(SymIntT):
296
+ return f"{expr}.is_symbolic()"
297
+
298
+ if isinstance(type, OptionalCType):
299
+ innerexpr = f"(*{expr})"
300
+ return f"{expr}.has_value() ? {emit_expr_has_symbolic_values(innerexpr, type.elem)} : false"
301
+
302
+ if type == BaseCType(optionalSymIntArrayRefT):
303
+ return emit_expr_has_symbolic_values(
304
+ expr, OptionalCType(BaseCType(symIntArrayRefT))
305
+ )
306
+
307
+ if type in (BaseCType(symIntArrayRefT), VectorCType(BaseCType(SymIntT))):
308
+ argname = "arg"
309
+ lambda_check = emit_expr_has_symbolic_values(argname, BaseCType(SymIntT))
310
+ return (
311
+ "std::any_of("
312
+ f"{expr}.begin(), {expr}.end(), "
313
+ f"[=](auto& {argname}) {{ return {lambda_check}; }})"
314
+ )
315
+
316
+ raise ValueError(
317
+ "unsupported type for has_symbolic_values check. "
318
+ "It should be a SymInt or a collection of those. "
319
+ f"Got: {type.cpp_type()}"
320
+ )
321
+
322
+
323
+ # Detects whether any of the SymInt arguments are, in fact, symbolic values.
324
+ # This is used in the constructor of ViewMeta.
325
+ def emit_has_symbolic_inputs(sig: DispatcherSignature) -> tuple[str, str]:
326
+ name = "has_symbolic_inputs"
327
+ statements = [
328
+ f"{name} = {name} | ({emit_expr_has_symbolic_values(binding.name, binding.nctype.type)});"
329
+ for binding in sig.arguments()
330
+ if (
331
+ isinstance(binding.argument, Argument)
332
+ and binding.argument.type.is_symint_like()
333
+ )
334
+ ]
335
+ body = "\n ".join(statements)
336
+ return (
337
+ name,
338
+ f"""
339
+ bool {name} = false;
340
+ {body}""",
341
+ )
342
+
343
+
344
+ # Generates the Functionalization kernel for:
345
+ # - ops that create aliases (e.g. transpose())
346
+ # - ops that are views AND mutations (e.g. transpose_())
347
+ def emit_view_functionalization_body(
348
+ g: NativeFunctionsViewGroup, *, view_inplace: bool
349
+ ) -> str:
350
+ if view_inplace:
351
+ # This op is both an inplace op AND a view op.
352
+ # See Note [Functionalization Pass - Inplace View Ops] for details.
353
+ # I currently have the view meta call into the out-of-place variant of the view, to avoid
354
+ # having to define an extra ~20 inplace {view}_inverse_ functions.
355
+ # Most view ops don't have NativeFunctionGroup's both, because we don't define out= variants for view ops.
356
+ # I'm assuming that every inplace-view op has a corresponding out-of-place view op,
357
+ # with the same name but the trailing underscore removed.
358
+ # This is currently asserted at parse time in gen.py (see error_check_native_functions).
359
+ assert g.view_inplace is not None
360
+ f = g.view_inplace
361
+ else:
362
+ f = g.view
363
+
364
+ assert g.view_copy is not None
365
+ with native_function_manager(f):
366
+ call_sig = DispatcherSignature.from_schema(g.view_copy.func)
367
+
368
+ # the "view_copy" op name that the functionalization kernels need to call
369
+ api_name = g.view_copy.func.name.unambiguous_name()
370
+ # Sometimes the functionalization pass needs to no-op (e.g. if it was passed non-functional tensors)
371
+ # "no-op"ing in this context is just redispatching to the original op.
372
+ noop_api_name = f.func.name.unambiguous_name()
373
+
374
+ dispatcher_sig = DispatcherSignature.from_schema(f.func)
375
+ assert_view_op_properties(f.func)
376
+ view_tensor_name = dispatcher_sig.arguments()[0].name
377
+
378
+ return_type = dispatcher_sig.returns_type().remove_const_ref().cpp_type()
379
+
380
+ unwrap_tensor_args_str, unwrapped_args_ctx = unwrap_tensor_args(
381
+ dispatcher_sig, is_view_op=True
382
+ )
383
+ view_redispatch_args = [
384
+ e.expr
385
+ for e in translate(unwrapped_args_ctx, call_sig.arguments(), method=False)
386
+ ]
387
+
388
+ forward_lambda = FunctionalizationLambda.from_func(g, is_reverse=False)
389
+ reverse_lambda = FunctionalizationLambda.from_func(g, is_reverse=True)
390
+
391
+ # The meta API call should use the same arguments, but convert all tensors to meta tensors first.
392
+ meta_conversion_str, meta_call_ctx = convert_to_meta_tensors(dispatcher_sig)
393
+ meta_call_args = [
394
+ e.expr for e in translate(meta_call_ctx, call_sig.arguments(), method=False)
395
+ ]
396
+
397
+ (
398
+ symbolic_inputs_varname,
399
+ symbolic_inputs_check,
400
+ ) = emit_has_symbolic_inputs(call_sig)
401
+
402
+ if "inplace_view" in f.tags:
403
+ # See Note [Functionalization Pass - Inplace View Ops] for more details
404
+ return f"""
405
+ {dispatcher_sig.defn(name=wrapper_name(f.func), is_redispatching_fn=True)} {{
406
+ if (!at::functionalization::impl::isFunctionalTensor({view_tensor_name})) {{
407
+ // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
408
+ {unwrap_tensor_args_str}
409
+ at::AutoDispatchSkipFunctionalize guard;
410
+ return at::_ops::{noop_api_name}::call({', '.join(view_redispatch_args)});
411
+ }}
412
+ auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
413
+ auto inverse_return_mode = (
414
+ reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
415
+ : at::functionalization::InverseReturnMode::NeverView
416
+ );
417
+ {symbolic_inputs_check}
418
+ at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
419
+ {forward_lambda.decl()} {{
420
+ if (reapply_views) {{
421
+ return {forward_lambda.inner_call(reapply_views=True)}
422
+ }} else {{
423
+ return {forward_lambda.inner_call(reapply_views=False)}
424
+ }}
425
+ }},
426
+ {reverse_lambda.decl()} {{
427
+ return {reverse_lambda.inner_call()}
428
+ }},
429
+ /*has_symbolic_inputs=*/{symbolic_inputs_varname}
430
+ );
431
+ auto compute_reference_meta =
432
+ {view_tensor_name}.key_set().has_backend(c10::BackendComponent::XLABit) ||
433
+ {view_tensor_name}.key_set().has_backend(c10::BackendComponent::LazyBit);
434
+ {return_type} reference_tensor_output;
435
+ if (compute_reference_meta) {{
436
+ {meta_conversion_str}
437
+ at::AutoDispatchSkipFunctionalize func_guard;
438
+ c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
439
+ reference_tensor_output = at::_ops::{noop_api_name}::call({', '.join(meta_call_args)});
440
+ }}
441
+ // This function adds the above view meta to the current tensor and replays them off the base,
442
+ // mutating the size/stride info of the current FunctionalTensorWrapper.
443
+ // Because of this, we need to make sure to run the reference shape function above,
444
+ // BEFORE doing this (otherwise we'll end up runnin the reference function using the wrong sizes/strides)
445
+ at::functionalization::impl::mutate_view_meta({view_tensor_name}, view_meta);
446
+ // See Note [Propagating strides in the functionalization pass]
447
+ // XLA/LTC don't implement the logic to propagate strides correctly, so we need to rely
448
+ // on a reference implementation here (instead of relying on the output from the forward lambda
449
+ // having the correct stride info)
450
+ if (compute_reference_meta) {{
451
+ at::functionalization::impl::set_sizes_strides_offset({view_tensor_name}, reference_tensor_output);
452
+ }}
453
+ return {view_tensor_name};
454
+ }}
455
+ """
456
+
457
+ else:
458
+ is_multi_output_view = isinstance(f.func.returns[0].type, ListType)
459
+ return f"""
460
+ {dispatcher_sig.defn(name=wrapper_name(f.func), is_redispatching_fn=True)} {{
461
+ {unwrap_tensor_args_str}
462
+ if (!at::functionalization::impl::isFunctionalTensor({view_tensor_name})) {{
463
+ // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
464
+ at::AutoDispatchSkipFunctionalize guard;
465
+ return at::_ops::{noop_api_name}::call({', '.join(view_redispatch_args)});
466
+ }}
467
+ auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
468
+ auto inverse_return_mode = (
469
+ reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
470
+ : at::functionalization::InverseReturnMode::NeverView
471
+ );
472
+ auto compute_reference_meta =
473
+ {view_tensor_name}.key_set().has_backend(c10::BackendComponent::XLABit) ||
474
+ {view_tensor_name}.key_set().has_backend(c10::BackendComponent::LazyBit);
475
+ {return_type} reference_tensor_output;
476
+ if (compute_reference_meta) {{
477
+ {meta_conversion_str}
478
+ at::AutoDispatchSkipFunctionalize func_guard;
479
+ c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
480
+ reference_tensor_output = at::_ops::{noop_api_name}::call({', '.join(meta_call_args)});
481
+ }}
482
+ {return_type} tmp_output;
483
+ {{
484
+ at::AutoDispatchSkipFunctionalize guard;
485
+ if (reapply_views) {{
486
+ tmp_output = at::_ops::{noop_api_name}::call({', '.join(view_redispatch_args)});
487
+ }} else {{
488
+ tmp_output = at::_ops::{api_name}::call({', '.join(view_redispatch_args)});
489
+ }}
490
+ }}
491
+ {symbolic_inputs_check}
492
+ at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
493
+ {forward_lambda.decl()} {{
494
+ if (reapply_views) {{
495
+ return {forward_lambda.inner_call(reapply_views=True)}
496
+ }} else {{
497
+ return {forward_lambda.inner_call(reapply_views=False)}
498
+ }}
499
+ }},
500
+ {reverse_lambda.decl()} {{
501
+ return {reverse_lambda.inner_call()}
502
+ }},
503
+ /*has_symbolic_inputs=*/{symbolic_inputs_varname},
504
+ /*is_multi_output=*/{str(is_multi_output_view).lower()},
505
+ /*is_as_strided=*/{str(str(f.func.name) == 'as_strided').lower()}
506
+ );
507
+ auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, {view_tensor_name}, view_meta);
508
+ // See Note [Propagating strides in the functionalization pass]
509
+ if (compute_reference_meta) {{
510
+ at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
511
+ }}
512
+ return out;
513
+ }}
514
+ """
515
+
516
+
517
+ def maybe_create_output(f: NativeFunction, var_name: str) -> str:
518
+ if len(f.func.returns) == 0:
519
+ return ""
520
+ return_type = dispatcher.returns_type(f.func.returns).remove_const_ref().cpp_type()
521
+ return f"{return_type} {var_name} = "
522
+
523
+
524
+ # Given a NativeFunction, and a variable name corresponding to the output of redispatching on the function,
525
+ # this returns two lists of names, consisting of:
526
+ # - the names of returns corresponding to the original (mutable) inputs of the outer function
527
+ # - the names of returns corresponding to the (immutable) outputs of the inner redispatched function
528
+ def get_mutable_redispatch_return_names(
529
+ f: NativeFunction, inner_return_var: str
530
+ ) -> tuple[list[str], list[str]]:
531
+ aliased_returns = []
532
+ non_aliased_returns = []
533
+ for i, name in enumerate(f.func.aliased_return_names()):
534
+ if name is not None:
535
+ aliased_returns.append(name)
536
+ else:
537
+ non_aliased_returns.append(
538
+ inner_return_var
539
+ if len(f.func.returns) == 1
540
+ else f"std::get<{i}>({inner_return_var})"
541
+ )
542
+ return aliased_returns, non_aliased_returns
543
+
544
+
545
+ # When functionalization "no-op's" and redispatches on a mutable operator, we need to take care so that:
546
+ # - For fresh outputs, we return the result of the redispatch (without wrapping outputs)
547
+ # - For outputs that were aliased to inputs, we return the inputs directly (since some of them might have been wrapped)
548
+ def return_from_mutable_noop_redispatch(
549
+ f: NativeFunction, inner_return_var: str
550
+ ) -> str:
551
+ aliased, non_aliased = get_mutable_redispatch_return_names(f, inner_return_var)
552
+ # Just get all of the return names, and immediately return them
553
+ return return_str(f.func.returns, aliased + non_aliased)
554
+
555
+
556
+ def wrap_propagate_mutations_and_return(
557
+ f: NativeFunction, functional_op: NativeFunction, inner_return_var: str
558
+ ) -> str:
559
+ mutable_arg_names = f.func.arguments.mutable_arg_names()
560
+ (
561
+ aliased_outer_rets,
562
+ non_aliased_outer_rets,
563
+ ) = get_mutable_redispatch_return_names(f, inner_return_var)
564
+ _, non_aliased_inner_rets = get_mutable_redispatch_return_names(
565
+ functional_op, inner_return_var
566
+ )
567
+ # The outer function may have a mix of aliased and non-aliased outputs,
568
+ # But the inner functional op that we're transforming to should only have non-aliased outputs
569
+ assert len(mutable_arg_names) + len(non_aliased_outer_rets) == len(
570
+ non_aliased_inner_rets
571
+ )
572
+
573
+ # First, take all of the newly created outputs from the inner call and wrap them into functional tensors
574
+ updates = []
575
+ non_aliased_wrapped_ret_names = []
576
+ for i, inner_ret in enumerate(
577
+ non_aliased_inner_rets[: len(non_aliased_outer_rets)]
578
+ ):
579
+ ret_name = f"output_{i}"
580
+ updates.append(
581
+ f"""\
582
+ auto output_{i} = at::functionalization::impl::to_functional_tensor({inner_ret});"""
583
+ )
584
+ non_aliased_wrapped_ret_names.append(ret_name)
585
+
586
+ # Next, take all of the mutated outputs from the inner call corresponding to mutated inputs,
587
+ # and propagate the mutations
588
+ for outer_arg, inner_ret in zip(
589
+ mutable_arg_names, non_aliased_inner_rets[len(non_aliased_outer_rets) :]
590
+ ):
591
+ updates.append(
592
+ f"""\
593
+ auto {outer_arg}_inner = at::functionalization::impl::from_functional_tensor({outer_arg});
594
+ at::functionalization::impl::replace_({outer_arg}, {inner_ret});
595
+ at::functionalization::impl::commit_update({outer_arg});
596
+ at::functionalization::impl::sync({outer_arg});
597
+ auto {outer_arg}_inner_updated = at::functionalization::impl::from_functional_tensor({outer_arg});
598
+ at::functionalization::impl::propagate_xla_data_direct({outer_arg}_inner, {outer_arg}_inner_updated);"""
599
+ )
600
+
601
+ # Finally, we return:
602
+ # - Any mutable arguments that also returns
603
+ # - Any immutable returns that were created wrapping the output from the inner call
604
+ returns_str = return_str(
605
+ f.func.returns, aliased_outer_rets + non_aliased_wrapped_ret_names
606
+ )
607
+ updates_str = "\n".join(updates)
608
+ return f"""\
609
+ {updates_str}
610
+ {returns_str}"""
611
+
612
+
613
+ # Generates the Functionalization kernel for:
614
+ # - mutation ops (inplace and out= ops)
615
+ @with_native_function_and
616
+ def emit_inplace_functionalization_body(
617
+ f: NativeFunction, g: NativeFunctionsGroup
618
+ ) -> str:
619
+ # mutation case
620
+ assert modifies_arguments(f)
621
+
622
+ dispatcher_sig = DispatcherSignature.from_schema(f.func)
623
+
624
+ unwrap_tensor_args_str, unwrapped_args_ctx = unwrap_tensor_args(
625
+ dispatcher_sig, is_view_op=False
626
+ )
627
+
628
+ mutated_names = [
629
+ a.name
630
+ for a in f.func.arguments.flat_all
631
+ if a.type.is_tensor_like() and a.annotation is not None
632
+ ]
633
+ non_mutated_names = [
634
+ a.name
635
+ for a in f.func.arguments.flat_all
636
+ if a.type.is_tensor_like() and a.annotation is None
637
+ ]
638
+ non_mutated_tensor_names = [
639
+ a.name
640
+ for a in f.func.arguments.flat_all
641
+ if a.type == BaseType(BaseTy.Tensor) and a.annotation is None
642
+ ]
643
+ # all mutable inputs must be functional tensors in order to participate in functionalization
644
+ check_all_mutated_args_are_functional = " && ".join(
645
+ ["true"]
646
+ + [
647
+ f"at::functionalization::impl::isFunctionalTensor({a})"
648
+ for a in mutated_names
649
+ ]
650
+ )
651
+ check_any_non_mutated_args_are_functional = " || ".join(
652
+ ["false"]
653
+ + [
654
+ f"at::functionalization::impl::isFunctionalTensor({a})"
655
+ for a in non_mutated_names
656
+ ]
657
+ )
658
+
659
+ check_any_non_mutated_tensors_are_xla = " || ".join(
660
+ ["false"]
661
+ + [
662
+ f"{a}.device().type() == c10::DeviceType::XLA"
663
+ for a in non_mutated_tensor_names
664
+ ]
665
+ )
666
+ # These are used in the cases where we don't functionalize and redispatch to the inplace op
667
+ # case 1: we hit an inplace op that doesn't have an out-of-place equivalent
668
+ # case 2: we hit an inplace ops but our inputs are not functional tensors (in which case our kernel just no-ops)
669
+ inplace_exprs = [
670
+ e.expr
671
+ for e in translate(unwrapped_args_ctx, dispatcher_sig.arguments(), method=False)
672
+ ]
673
+
674
+ # call the out-of-place variant of the op
675
+ return_type = (
676
+ dispatcher.returns_type(g.functional.func.returns).remove_const_ref().cpp_type()
677
+ )
678
+ functional_sig = DispatcherSignature.from_schema(g.functional.func)
679
+ functional_exprs = [
680
+ e.expr
681
+ for e in translate(unwrapped_args_ctx, functional_sig.arguments(), method=False)
682
+ ]
683
+
684
+ if f.func.is_out_fn():
685
+ mutable_input_post_processing = "\n".join(
686
+ [
687
+ f"""
688
+ at::functionalization::impl::replace_(
689
+ {a.name}, {'std::get<' + str(i) + '>(tmp_output)' if len(f.func.returns) > 1 else 'tmp_output'});
690
+ at::functionalization::impl::commit_update({a.name});"""
691
+ for (i, a) in enumerate(f.func.arguments.out)
692
+ if a.annotation and a.annotation.is_write and a.type.is_tensor_like()
693
+ ]
694
+ )
695
+ else:
696
+ mutable_input_post_processing = "\n".join(
697
+ [
698
+ f"""
699
+ at::functionalization::impl::replace_({a.name}, tmp_output);
700
+ at::functionalization::impl::commit_update({a.name});"""
701
+ for a in f.func.arguments.flat_all
702
+ if a.annotation and a.annotation.is_write and a.type.is_tensor_like()
703
+ ]
704
+ )
705
+
706
+ meta_conversion_str, meta_call_ctx = convert_to_meta_tensors(dispatcher_sig)
707
+ # We don't want to run the inplace meta func for ops like .set_(), because:
708
+ # (1) they're unnecessary: inplace meta checks are only useful for ops like add_(),
709
+ # where broadcasting will work for the out-of-place case but should fail on the inplace call
710
+ # (2) They'll also fail without adding extra infra: we'd need to convert the input storage argument
711
+ # into a meta storage
712
+ any_storage_args = any(
713
+ a.type == BaseType(BaseTy.Storage) for a in f.func.arguments.flat_all
714
+ )
715
+
716
+ return f"""
717
+ {dispatcher_sig.defn(name=wrapper_name(f.func), is_redispatching_fn=True)} {{
718
+ if ({str(not any_storage_args and f.func.kind() == SchemaKind.inplace).lower()}) {{
719
+ // Before converting the mutable op to its functional variant, run meta tensors through the original op.
720
+ // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
721
+ // (We can only do this for inplace ops today though, because they technically all support meta tensors).
722
+ {meta_conversion_str}
723
+ at::AutoDispatchSkipFunctionalize func_guard;
724
+ c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
725
+ at::_ops::{f.func.name.unambiguous_name()}::call({', '.join(a.name for a in meta_call_ctx)});
726
+ }}
727
+ {unwrap_tensor_args_str}
728
+ if (!({check_all_mutated_args_are_functional})) {{
729
+ // We want to disable this check if there are any XLA tensors.
730
+ // cpu_tensor.copy_(xla_tensor) is valid code.
731
+ if (!({check_any_non_mutated_tensors_are_xla}) && ({check_any_non_mutated_args_are_functional})) {{
732
+ // case 1: trying to mutate a non functional tensor with a functional tensor is an error
733
+ TORCH_INTERNAL_ASSERT(false,
734
+ "mutating a non-functional tensor with a functional tensor is not allowed.",
735
+ " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
736
+ }} else {{
737
+ // case 2: arguments are not functional tensors, so we no-op and redispatch.
738
+ at::AutoDispatchSkipFunctionalize guard;
739
+ {maybe_create_output(f, 'tmp_output')}at::_ops::{f.func.name.unambiguous_name()}::call({', '.join(inplace_exprs)});
740
+ {return_from_mutable_noop_redispatch(f, 'tmp_output')}
741
+ }}
742
+ }} else {{
743
+ {return_type} tmp_output;
744
+ {{
745
+ at::AutoDispatchSkipFunctionalize guard;
746
+ tmp_output = at::_ops::{g.functional.func.name.unambiguous_name()}::call({', '.join(functional_exprs)});
747
+ }}
748
+ {wrap_propagate_mutations_and_return(f, g.functional, 'tmp_output')}
749
+ }}
750
+ }}"""
751
+
752
+
753
+ # The below functions generate RegisterFunctionalization.cpp
754
+ # These files provide the kernels that run the functionalization pass, which can be opted into
755
+ # per backend (e.g. XLA or Vulkan), or as a composable transform (functionalize() in functorch).
756
+
757
+
758
+ # See Note [Functionalization Pass: View Inverses].
759
+ def gen_functionalization_view_inverse_declaration(
760
+ selector: SelectiveBuilder, g: NativeFunctionsViewGroup
761
+ ) -> str | None:
762
+ # For every (non-composite) view op, we need a corresponding "inverse view" function.
763
+ # This generates the declarations so we get a good compiler error when someone adds a new view.
764
+ @with_native_function
765
+ def emit_decl_helper(g: NativeFunctionsViewGroup) -> str | None:
766
+ if g.view.has_composite_implicit_autograd_kernel:
767
+ return None
768
+ view_inverse_sig = ViewInverseSignature(g)
769
+ return view_inverse_sig.decl()
770
+
771
+ return emit_decl_helper(g)
772
+
773
+
774
+ def gen_functionalization_registration(
775
+ selector: SelectiveBuilder,
776
+ g: NativeFunction | NativeFunctionsGroup | NativeFunctionsViewGroup,
777
+ composite_implicit_autograd_index: BackendIndex,
778
+ ) -> list[str]:
779
+ @with_native_function
780
+ def emit_registration_helper(f: NativeFunction) -> str:
781
+ assert not f.has_composite_implicit_autograd_kernel
782
+ registration_str = f"TORCH_FN(functionalization::{wrapper_name(f.func)})"
783
+ return f'm.impl("{f.func.name}", {registration_str});'
784
+
785
+ # Don't generate kernels in mobile build
786
+ if not selector.include_all_operators:
787
+ return []
788
+
789
+ if isinstance(g, NativeFunctionsViewGroup):
790
+ # functionalization needs to register kernels for view + view_inplace ops
791
+ # See Note [Functionalization <> torch.Tensor constructor]
792
+ if str(g.view.func.name) == "lift_fresh":
793
+ return []
794
+ view_str = []
795
+ if not g.view.has_composite_implicit_autograd_kernel:
796
+ view_str.append(emit_registration_helper(g.view))
797
+ if (
798
+ g.view_inplace is not None
799
+ and not g.view_inplace.has_composite_implicit_autograd_kernel
800
+ ):
801
+ assert g.view_inplace.is_view_op
802
+ view_str.append(emit_registration_helper(g.view_inplace))
803
+ return view_str
804
+
805
+ elif isinstance(g, NativeFunctionsGroup):
806
+ # Gets a hand-written functionalization kernel
807
+ if g.inplace is not None and str(g.inplace.func.name) == "set_.source_Tensor":
808
+ fns = []
809
+ else:
810
+ fns = list(g.functions())
811
+ else:
812
+ if str(g.func.name) in MUTABLE_OPS_NOT_USING_FUNCTIONALIZATION:
813
+ return []
814
+ fns = [g]
815
+
816
+ registrations = []
817
+ for f in fns:
818
+ if f.has_composite_implicit_autograd_kernel:
819
+ continue
820
+ if str(f.func.name) == "lift":
821
+ # See Note [Functionalization <> torch.Tensor constructor]
822
+ return []
823
+ if str(f.func.name) == "resize_":
824
+ # See Note [resize_ in Functionalization]
825
+ return []
826
+ if str(f.func.name.name) != "set_":
827
+ assert not f.is_view_op
828
+ # functionalization needs to generate and register kernels for inplace ops.
829
+ # We *also* need to directly register CompositeImplicitAUtograd kernels
830
+ # so that they decompose properly before functioanlization.
831
+ if modifies_arguments(f):
832
+ registrations.append(emit_registration_helper(f))
833
+ return registrations
834
+
835
+
836
+ def gen_functionalization_definition(
837
+ selector: SelectiveBuilder,
838
+ # Note: Ideally this code should never have to look at NativeFunction
839
+ # (and instead only need to operate on grouped NativeFunctions).
840
+ # The only reason currently is because we need to emit direct dispatch registrations
841
+ # For CompositeImplicitAutograd operators, which are potentially ungrouped.
842
+ g: NativeFunction | NativeFunctionsGroup | NativeFunctionsViewGroup,
843
+ ) -> list[str]:
844
+ # Don't generate kernels in mobile build
845
+ if not selector.include_all_operators:
846
+ return []
847
+
848
+ if isinstance(g, NativeFunctionsViewGroup):
849
+ # Case 1: emit view -> view_copy kernels for the functionalization pass
850
+ view_defs = []
851
+ if not g.composite:
852
+ # invariant: NativeFunctionsViewGroup's always have a view_copy operator
853
+ # if the view is not composite (implicit autograd)
854
+ assert g.view_copy is not None, dataclass_repr(g, indent=1)
855
+ view_defs.append(emit_view_functionalization_body(g, view_inplace=False))
856
+ if g.view_inplace is not None:
857
+ view_defs.append(emit_view_functionalization_body(g, view_inplace=True))
858
+ return view_defs
859
+ elif isinstance(g, NativeFunction):
860
+ # Invariant: all mutable operators that we need to handle in functionalization
861
+ # should have been properly grouped up.
862
+ # TODO: The below ops all have "problematic" schemas that prevent them from
863
+ # getting functionalized. Instead of bending over backwards to get things to work,
864
+ # I think we should either:
865
+ # (1) fix their schemas (BC-breaking)
866
+ # (2) hand-write their functionalization kernels
867
+ if (
868
+ str(g.func.name) not in MUTABLE_OPS_NOT_USING_FUNCTIONALIZATION
869
+ and str(g.func.name.name) not in MUTABLE_OPS_NOT_USING_FUNCTIONALIZATION
870
+ ):
871
+ assert g.has_composite_implicit_autograd_kernel or not modifies_arguments(g)
872
+ return []
873
+ else:
874
+ # Case 2: emit inplace -> out-of-place kernels for the functionalization pass
875
+ mutation_defs = []
876
+ mutation_defs.append(emit_inplace_functionalization_body(g.out, g))
877
+ if g.inplace is not None:
878
+ mutation_defs.append(emit_inplace_functionalization_body(g.inplace, g))
879
+ if g.mutable is not None:
880
+ mutation_defs.append(emit_inplace_functionalization_body(g.mutable, g))
881
+ return mutation_defs
882
+ return []
vllm/lib/python3.10/site-packages/torchgen/gen_lazy_tensor.py ADDED
@@ -0,0 +1,581 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import argparse
4
+ import os
5
+ from collections import namedtuple
6
+ from pathlib import Path
7
+ from typing import Any, Callable, Iterable, Iterator, Sequence
8
+
9
+ import yaml
10
+
11
+ import torchgen.dest as dest
12
+ from torchgen.api.lazy import setValueT
13
+ from torchgen.api.types import BaseCppType
14
+ from torchgen.dest.lazy_ir import GenLazyIR, GenLazyNativeFuncDefinition, GenTSLazyIR
15
+ from torchgen.gen import get_grouped_native_functions, parse_native_yaml
16
+ from torchgen.gen_backend_stubs import (
17
+ error_on_missing_kernels,
18
+ gen_dispatcher_registrations,
19
+ gen_dispatchkey_nativefunc_headers,
20
+ parse_backend_yaml,
21
+ )
22
+ from torchgen.model import NativeFunction, NativeFunctionsGroup, OperatorName
23
+ from torchgen.selective_build.selector import SelectiveBuilder
24
+ from torchgen.utils import FileManager, NamespaceHelper
25
+ from torchgen.yaml_utils import YamlLoader
26
+
27
+
28
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
29
+ #
30
+ # Lazy Tensor Codegen
31
+ #
32
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
33
+ # Overview
34
+ # ~~~~~~~~
35
+ #
36
+ # This codegen script builds on existing data models and helpers used
37
+ # by all ATen backends, and adds new functionality specific to lazy
38
+ # tensor backends.
39
+ #
40
+ # Inputs:
41
+ # - <backend>_native_functions.yaml: controls which operators are
42
+ # supported by the backend.
43
+ #
44
+ # Outputs:
45
+ # (for all backends)
46
+ # <DispatchKey>Ir.h defines Lazy IR classes to be constructed during tracing
47
+ # - opt-in: also generate 'lowering' methods for the TorchScript backend only
48
+ # <DispatchKey>NativeFunctions.cpp defines implementations of native functions which perform lazy tracing
49
+ # - opt-in: 'full_codegen' section of backend yaml; 'supported' section omits these implementations
50
+ # <DispatchKey>NativeFunctions.h declares implementations of native functions for both 'supported' and 'full_codegen'
51
+ # ops
52
+ #
53
+ # Register<DispatchKey>.cpp registers all op implementations with the dispatcher
54
+ # RegisterAutograd<DispatchKey>.cpp registers all autograd implementations with the dispatcher
55
+ #
56
+ # Validation Helpers:
57
+ # - Shape Inference: errs if any ops in backend yaml require shape inference not provided by meta kernels or
58
+ # implementations in torch/csrc/lazy/core/shape_inference.*
59
+ # - native function impls: errs if any 'supported' ops do not have an implementation defined in the backend
60
+ # (non-codegen) implementation file
61
+ #
62
+ #
63
+ # About the Data Model
64
+ # ~~~~~~~~~~~~~~~~~~~~
65
+ #
66
+ # Modeled after ATen codegen, the first step is to parse yaml and build a data model for the operators
67
+ # we care about. In this case, the <backend>_native_functions yaml defines a subset of the core operators
68
+ # (defined in more detail in the main native_functions.yaml), which will be supported by your backend.
69
+ # Backends can list ops in two categories:
70
+ # - `supported` ops require hand-implementations but still get codegenned declarations and registrations
71
+ # - `full_codegen` ops get implementations (and IR classes) generated too
72
+ #
73
+ # Each native function is modeled as an object with a schema, and each schema has objects representing their
74
+ # arguments. Much of the codegen is manipulation of the arguments and their types. For example, lazy tensor
75
+ # backends need to transform 'at::Tensor' arguments into 'lazy::Value' objects, as well as replacing reference
76
+ # types (stringref) with actual string objects, and this is done by manipulating the data model objects.
77
+ # - see api/lazy.py for the lazy data model
78
+ #
79
+ # Once the data model is set up, the rest of this script processes a number of templates for output CPP file
80
+ # and fills in the template values using helpers in `dest/lazy_ir.py` and `dest/lazy_ts_lowering.py`. These
81
+ # helpers mostly iterate over functions and their arguments, outputting different c++ snippets.
82
+ #
83
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
84
+
85
+
86
+ # Parses the external backend's yaml, and adds a new BackendIndex for the backend's dispatch key.
87
+ # Returns a Tuple of (backend_key, autograd_key, cpp_namespace, updated BackendIndex mapping, full_codegen)
88
+ ParsedExternalYaml = namedtuple(
89
+ "ParsedExternalYaml",
90
+ ["backend_key", "autograd_key", "cpp_namespace", "backend_indices", "full_codegen"],
91
+ )
92
+
93
+
94
+ def parse_native_functions_keys(
95
+ backend_yaml_path: str,
96
+ grouped_native_functions: Sequence[NativeFunction | NativeFunctionsGroup],
97
+ ) -> tuple[list[OperatorName], list[Any], list[OperatorName]]:
98
+ with open(backend_yaml_path) as f:
99
+ yaml_values = yaml.load(f, Loader=YamlLoader)
100
+ assert isinstance(yaml_values, dict)
101
+
102
+ full_codegen = yaml_values.pop("full_codegen", [])
103
+ non_native = yaml_values.pop("non_native", [])
104
+ ir_gen = yaml_values.pop("ir_gen", [])
105
+ assert isinstance(full_codegen, list)
106
+ assert isinstance(non_native, list)
107
+ assert isinstance(ir_gen, list)
108
+ full_codegen_opnames = [OperatorName.parse(name) for name in full_codegen]
109
+ ir_gen_opnames = [OperatorName.parse(name) for name in ir_gen]
110
+ return full_codegen_opnames, non_native, ir_gen_opnames
111
+
112
+
113
+ def validate_shape_inference_header(
114
+ shape_inference_hdr: str, expected_shape_infr_decls: list[str]
115
+ ) -> None:
116
+ try:
117
+ with open(shape_inference_hdr) as f:
118
+ shape_infr_decls = f.read()
119
+ shape_infr_decl_lines = set(shape_infr_decls.split("\n"))
120
+ except OSError as e:
121
+ raise AssertionError(
122
+ f"Unable to read from the specified shape_inference_hdr file: {shape_inference_hdr}"
123
+ ) from e
124
+
125
+ # TODO(whc) add a check for shape inference functions that have meta kernels implement and should be retired.
126
+
127
+ missing_decls = [
128
+ decl for decl in expected_shape_infr_decls if decl not in shape_infr_decl_lines
129
+ ]
130
+ if missing_decls:
131
+ raise Exception( # noqa: TRY002
132
+ f"""Missing shape inference function.\n
133
+ Please add declare this function in {shape_inference_hdr}:\n
134
+ and implement it in the corresponding shape_inference.cpp file.\n
135
+ {os.linesep.join(missing_decls)}"""
136
+ )
137
+
138
+
139
+ # Some helper functions for the codegen.
140
+ def get_ltc_helper_fns() -> str:
141
+ return """\
142
+ at::Tensor to_meta(const at::Tensor& tensor) {
143
+ // undefined tensors can't be converted to the meta device, since they don't have sizes/strides
144
+ if (!tensor.defined()) return tensor;
145
+ auto out = at::native::empty_strided_meta_symint(tensor.sym_sizes(), tensor.sym_strides(), \
146
+ /*dtype=*/std::make_optional(tensor.scalar_type()), /*layout=*/std::make_optional(tensor.layout()), \
147
+ /*device=*/std::make_optional(c10::Device(c10::kMeta)), /*pin_memory=*/std::nullopt);
148
+ // needs to handle wrapped numbers, so dtype promotion works properly.
149
+ if (tensor.unsafeGetTensorImpl()->is_wrapped_number()) {
150
+ out.unsafeGetTensorImpl()->set_wrapped_number(true);
151
+ }
152
+ return out;
153
+ }
154
+ std::optional<at::Tensor> to_meta(const std::optional<at::Tensor>& tensor) {
155
+ if (tensor.has_value()) {
156
+ return to_meta(*tensor);
157
+ }
158
+ return std::nullopt;
159
+ }
160
+
161
+ std::vector<at::Tensor> to_meta(at::ITensorListRef t_list) {
162
+ std::vector<at::Tensor> outs;
163
+ outs.reserve(t_list.size());
164
+ for (const auto& tensor : t_list) {
165
+ outs.push_back(to_meta(tensor));
166
+ }
167
+ return outs;
168
+ }
169
+ """
170
+
171
+
172
+ class default_args:
173
+ node_base: str = "Node"
174
+ node_base_hdr: str | None = None
175
+ shape_inference_hdr: str = "torch/csrc/lazy/core/shape_inference.h"
176
+ tensor_class: str = "torch::lazy::LazyTensor"
177
+ tensor_class_hdr: str = "torch/csrc/lazy/core/tensor.h"
178
+ lazy_ir_generator: type[GenLazyIR] = GenLazyIR
179
+ native_func_definition_generator: type[
180
+ GenLazyNativeFuncDefinition
181
+ ] = GenLazyNativeFuncDefinition
182
+ backend_name: str = "TorchScript"
183
+
184
+
185
+ def main() -> None:
186
+ parser = argparse.ArgumentParser(description="Generate Lazy Tensor backend files")
187
+ parser.add_argument(
188
+ "-s",
189
+ "--source-yaml",
190
+ "--source_yaml",
191
+ help="path to source yaml file containing operator external definitions",
192
+ )
193
+ parser.add_argument("-o", "--output-dir", "--output_dir", help="output directory")
194
+ parser.add_argument(
195
+ "--dry-run", "--dry_run", type=bool, default=False, help="output directory"
196
+ )
197
+ parser.add_argument(
198
+ "--impl-path",
199
+ "--impl_path",
200
+ type=str,
201
+ default=None,
202
+ help="path to the source C++ file containing kernel definitions",
203
+ )
204
+ parser.add_argument(
205
+ "--gen-ts-lowerings",
206
+ "--gen_ts_lowerings",
207
+ action="store_true",
208
+ help="Generate TorchScript lowerings in addition to Lazy IR and NativeFunctions",
209
+ )
210
+ parser.add_argument(
211
+ "--node-base",
212
+ "--node_base",
213
+ type=str,
214
+ default=default_args.node_base,
215
+ help="Name of backend specific custom Lazy IR Node base class",
216
+ )
217
+ parser.add_argument(
218
+ "--node-base-hdr",
219
+ "--node_base_hdr",
220
+ type=str,
221
+ default=default_args.node_base_hdr,
222
+ help="Path to header file defining custom Lazy IR Node base class",
223
+ )
224
+ parser.add_argument(
225
+ "--shape-inference-hdr",
226
+ "--shape_inference_hdr",
227
+ type=str,
228
+ default=default_args.shape_inference_hdr,
229
+ help="Path to header file defining custom Lazy shape inference functions",
230
+ )
231
+ parser.add_argument(
232
+ "--tensor-class",
233
+ "--tensor_class",
234
+ type=str,
235
+ default=default_args.tensor_class,
236
+ help="Name of backend specific custom Lazy Tensor class",
237
+ )
238
+ parser.add_argument(
239
+ "--tensor-class-hdr",
240
+ "--tensor_class_hdr",
241
+ type=str,
242
+ default=default_args.tensor_class_hdr,
243
+ help="Path to header file defining custom Lazy Tensor class",
244
+ )
245
+ parser.add_argument(
246
+ "--backend-name",
247
+ "--backend_name",
248
+ type=str,
249
+ default=default_args.backend_name,
250
+ help="Name of the backend to generate",
251
+ )
252
+ options = parser.parse_args()
253
+
254
+ # Assumes that this file lives at PYTORCH_ROOT/torchgen/gen_backend_stubs.py
255
+ torch_root = Path(__file__).parent.parent.parent.absolute()
256
+ aten_path = str(torch_root / "aten" / "src" / "ATen")
257
+ lazy_ir_generator: type[GenLazyIR] = default_args.lazy_ir_generator
258
+ if options.gen_ts_lowerings:
259
+ lazy_ir_generator = GenTSLazyIR
260
+ native_func_definition_generator: type[
261
+ GenLazyNativeFuncDefinition
262
+ ] = default_args.native_func_definition_generator
263
+
264
+ run_gen_lazy_tensor(
265
+ aten_path,
266
+ options.source_yaml,
267
+ options.output_dir,
268
+ options.dry_run,
269
+ options.impl_path,
270
+ options.node_base,
271
+ options.node_base_hdr,
272
+ options.tensor_class,
273
+ options.tensor_class_hdr,
274
+ options.shape_inference_hdr,
275
+ lazy_ir_generator,
276
+ native_func_definition_generator,
277
+ options.backend_name,
278
+ )
279
+
280
+
281
+ def run_gen_lazy_tensor(
282
+ aten_path: str,
283
+ source_yaml: str,
284
+ output_dir: str,
285
+ dry_run: bool,
286
+ impl_path: str | None,
287
+ node_base: str = default_args.node_base,
288
+ node_base_hdr: str | None = default_args.node_base_hdr,
289
+ tensor_class: str = default_args.tensor_class,
290
+ tensor_class_hdr: str = default_args.tensor_class_hdr,
291
+ shape_inference_hdr: str = default_args.shape_inference_hdr,
292
+ lazy_ir_generator: type[GenLazyIR] = default_args.lazy_ir_generator,
293
+ native_func_definition_generator: type[
294
+ GenLazyNativeFuncDefinition
295
+ ] = default_args.native_func_definition_generator,
296
+ # build_in_tree is true for TS backend and affects include paths
297
+ build_in_tree: bool = False,
298
+ # per_operator_headers changes whether ATen/Functions.h or individual operator headers are used
299
+ # it must match how ATen was built
300
+ per_operator_headers: bool = False,
301
+ backend_name: str = default_args.backend_name,
302
+ gen_forced_fallback_code: bool = False,
303
+ use_lazy_shape: bool = True,
304
+ # the following arguments are temporary customization points for xla backend migration.
305
+ # do not rely on them otherwise, they should be removed once migration is complete
306
+ backend_namespace: str = "torch::lazy",
307
+ get_tensorlist: str = "GetTensorList",
308
+ get_tensor_or_wrap_number: str = "GetLtcTensorOrCreateForWrappedNumber",
309
+ try_get_tensor: str = "TryGetLtcTensor",
310
+ metrics_counter: str = 'TORCH_LAZY_FN_COUNTER("lazy::")',
311
+ create_tensor: str = "LazyTensor::Create",
312
+ create_from_first_tensor: bool = False,
313
+ create_aten_from_ltc_tensor: str = "torch::lazy::CreateAtenFromLtcTensor",
314
+ tuple_aten_from_ltc_tensors: str = "torch::lazy::TupleAtenFromLtcTensors",
315
+ lazy_value_class: str = "torch::lazy::Value",
316
+ lazy_tensor_ptr: str = "LazyTensorPtr",
317
+ get_device_fn: str = "torch::lazy::GetBackendDevice",
318
+ ) -> None:
319
+ lv_tokens = lazy_value_class.split("::")
320
+ lv_class = lv_tokens[-1]
321
+ lv_ns = "::".join(lv_tokens[:-1])
322
+ setValueT(BaseCppType(lv_ns, lv_class))
323
+ template_dir = os.path.join(aten_path, "templates")
324
+
325
+ def make_file_manager(install_dir: str) -> FileManager:
326
+ return FileManager(
327
+ install_dir=install_dir, template_dir=template_dir, dry_run=dry_run
328
+ )
329
+
330
+ fm = make_file_manager(output_dir)
331
+
332
+ native_yaml_path = os.path.join(aten_path, "native/native_functions.yaml")
333
+ tags_yaml_path = os.path.join(aten_path, "native/tags.yaml")
334
+ parsed_yaml = parse_native_yaml(native_yaml_path, tags_yaml_path)
335
+ native_functions, backend_indices = (
336
+ parsed_yaml.native_functions,
337
+ parsed_yaml.backend_indices,
338
+ )
339
+ grouped_native_functions = get_grouped_native_functions(native_functions)
340
+
341
+ def sort_native_function(f: NativeFunctionsGroup | NativeFunction) -> str:
342
+ """
343
+ We sort the native function because of the note in concat_map_codegen.
344
+ TODO(alanwaketan): Remove this sorting hack once all ops are grouped properly.
345
+ """
346
+ func = f.functional.func if isinstance(f, NativeFunctionsGroup) else f.func
347
+ return str(func.name.name)
348
+
349
+ grouped_native_functions = sorted(
350
+ grouped_native_functions, key=sort_native_function
351
+ )
352
+
353
+ parsed_backend_yaml = parse_backend_yaml(
354
+ source_yaml, grouped_native_functions, backend_indices
355
+ )
356
+ backend_key = parsed_backend_yaml.backend_key
357
+ autograd_key = parsed_backend_yaml.autograd_key
358
+ cpp_namespace = parsed_backend_yaml.cpp_namespace
359
+ backend_indices = parsed_backend_yaml.backend_indices
360
+ # the following 3 keys are all processed differently
361
+ # for full_codegen, we generate IR, kernels, etc
362
+ # for ir_gen, we generate only IR
363
+ # non_native is used to register kernels not declared in
364
+ # native_functions.yaml
365
+ full_codegen, non_native, ir_gen = parse_native_functions_keys(
366
+ source_yaml, grouped_native_functions
367
+ )
368
+
369
+ def concat_map_codegen(
370
+ func: Callable[[NativeFunction], Sequence[str]],
371
+ xs: Iterable[NativeFunctionsGroup | NativeFunction],
372
+ ops_list: list[OperatorName] = full_codegen,
373
+ ) -> Iterator[str]:
374
+ """
375
+ We code-gen for the functional variant, which is all we need for IR classes/lowerings/shape inferences, but we
376
+ only code-gen additional entries for the inplace variant for the native functions.
377
+ """
378
+
379
+ for x in xs:
380
+ fs = list(x.functions()) if isinstance(x, NativeFunctionsGroup) else [x]
381
+ for f in fs:
382
+ if f.func.name in ops_list:
383
+ yield from func(f)
384
+
385
+ selector = SelectiveBuilder.get_nop_selector()
386
+
387
+ assert backend_key is not None
388
+ class_name = backend_indices[backend_key].native_function_class_name()
389
+
390
+ if impl_path is not None:
391
+ error_on_missing_kernels(
392
+ native_functions,
393
+ backend_indices,
394
+ backend_key,
395
+ autograd_key,
396
+ class_name,
397
+ impl_path,
398
+ full_codegen,
399
+ )
400
+
401
+ """ Validate Shape Inference Definitions
402
+
403
+ Generated lazy native functions all perform shape inference, by first using a meta:: kernel
404
+ if available for that op, and otherwise using a 'compute_shape_{op}' function instead. The generator
405
+ knows the call signature for compute_shape_{op} because it matches the nativefunction (and meta::) signature,
406
+ so it just has to check whether the op is structured and generate a call for one or the other. It's up to the dev
407
+ to supply the missing compute_shape_{op} function, but the codegen at least warns you about this and provides
408
+ the expected signature which can be copy-pasted into shape_inference.h.
409
+
410
+ compute_shape_{op} functions are handwritten and should be replaced over time as ops get ported
411
+ to structured kernels.
412
+
413
+ See torch/csrc/lazy/core/shape_inference.cpp #READ THIS! for more information.
414
+ """
415
+ if shape_inference_hdr is not None:
416
+ expected_shape_infr_decls = list(
417
+ concat_map_codegen(
418
+ dest.GenLazyShapeInferenceDefinition(
419
+ backend_indices[backend_key], tensor_class
420
+ ),
421
+ grouped_native_functions,
422
+ )
423
+ )
424
+
425
+ validate_shape_inference_header(shape_inference_hdr, expected_shape_infr_decls)
426
+ assert class_name is not None
427
+
428
+ # Generate nativefunction declarations
429
+ # Note, eager registrations is set to False for the lazy TS backend as another LTC backend
430
+ # may want to register their own lazy kernels instead of registering the TS ones.
431
+ # The registration will lazily happen when init_ts_backend is called.
432
+ gen_dispatchkey_nativefunc_headers(
433
+ fm,
434
+ class_name,
435
+ cpp_namespace,
436
+ backend_indices,
437
+ grouped_native_functions,
438
+ backend_key,
439
+ autograd_key,
440
+ backend_name,
441
+ )
442
+
443
+ # Generate Dispatcher registrations which hook up the nativefunctions
444
+ for dispatch_key in (
445
+ [backend_key] if autograd_key is None else [backend_key, autograd_key]
446
+ ):
447
+ gen_dispatcher_registrations(
448
+ fm,
449
+ output_dir,
450
+ class_name,
451
+ backend_indices,
452
+ grouped_native_functions,
453
+ backend_key,
454
+ dispatch_key,
455
+ selector,
456
+ build_in_tree=build_in_tree,
457
+ per_operator_headers=per_operator_headers,
458
+ backend_name=backend_name,
459
+ eager_registration=False,
460
+ )
461
+
462
+ # Generate native function impls that build IR nodes
463
+ ns_helper = NamespaceHelper(cpp_namespace)
464
+ fm.write_with_template(
465
+ f"{backend_key}NativeFunctions.cpp",
466
+ "DispatchKeyNativeFunctions.cpp",
467
+ lambda: {
468
+ "includes": [
469
+ f"#include <{path}>"
470
+ for path in [
471
+ tensor_class_hdr,
472
+ shape_inference_hdr,
473
+ "ATen/Functions.h",
474
+ "ATen/native/TensorConversions.h",
475
+ "ATen/NativeFunctions.h",
476
+ "ATen/CompositeExplicitAutogradNonFunctionalFunctions.h",
477
+ "ATen/MetaFunctions.h",
478
+ "ATen/Operators.h",
479
+ "ATen/native/CPUFallback.h",
480
+ "torch/csrc/lazy/core/ir_builder.h",
481
+ "torch/csrc/lazy/core/lazy_graph_executor.h",
482
+ "torch/csrc/lazy/core/metrics.h",
483
+ "torch/csrc/lazy/core/shape.h",
484
+ f"{output_dir}/{backend_key}NativeFunctions.h",
485
+ f"{output_dir}/LazyIr.h",
486
+ ]
487
+ + (
488
+ ["torch/csrc/lazy/ts_backend/ts_eager_fallback.h"]
489
+ if gen_forced_fallback_code
490
+ else []
491
+ )
492
+ ],
493
+ "helper_fns": get_ltc_helper_fns(),
494
+ "native_functions_include": "",
495
+ "namespace_prologue": ns_helper.prologue,
496
+ "namespace_epilogue": ns_helper.epilogue,
497
+ "native_function_definitions": list(
498
+ concat_map_codegen(
499
+ native_func_definition_generator(
500
+ f"{backend_key}NativeFunctions",
501
+ backend_indices[backend_key],
502
+ tensor_class,
503
+ gen_forced_fallback_code,
504
+ backend_namespace,
505
+ get_tensorlist,
506
+ get_tensor_or_wrap_number,
507
+ try_get_tensor,
508
+ metrics_counter,
509
+ create_tensor,
510
+ create_from_first_tensor,
511
+ create_aten_from_ltc_tensor,
512
+ tuple_aten_from_ltc_tensors,
513
+ lazy_tensor_ptr,
514
+ get_device_fn,
515
+ ),
516
+ grouped_native_functions,
517
+ )
518
+ ),
519
+ },
520
+ )
521
+ # Generate IR node classes
522
+ lazy_ir_obj = lazy_ir_generator(
523
+ backend_indices[backend_key], backend_name, node_base, use_lazy_shape
524
+ )
525
+
526
+ fm.write_with_template(
527
+ "LazyIr.h",
528
+ "LazyIr.h",
529
+ lambda: {
530
+ "lazy_ir_sysinc": [
531
+ f"#include <{path}>"
532
+ for path in [
533
+ "ATen/core/Formatting.h",
534
+ "c10/core/ScalarType.h",
535
+ "torch/csrc/lazy/core/hash.h",
536
+ "torch/csrc/lazy/core/ir.h",
537
+ "torch/csrc/lazy/core/shape.h",
538
+ "optional",
539
+ "vector",
540
+ ]
541
+ ],
542
+ "lazy_ir_inc": [f'#include "{node_base_hdr}"']
543
+ if node_base_hdr is not None
544
+ else [],
545
+ "ir_declarations": list(
546
+ concat_map_codegen(
547
+ lazy_ir_obj, grouped_native_functions, full_codegen + ir_gen
548
+ )
549
+ ),
550
+ "namespace_prologue": ns_helper.prologue,
551
+ "namespace_epilogue": ns_helper.epilogue,
552
+ },
553
+ )
554
+
555
+ # Generate Non Native IR Node classes
556
+ fm.write_with_template(
557
+ "LazyNonNativeIr.h",
558
+ "LazyNonNativeIr.h",
559
+ lambda: {
560
+ "lazy_non_native_ir_inc": [
561
+ f"#include <{path}>"
562
+ for path in [
563
+ "torch/csrc/lazy/core/ir.h",
564
+ "torch/csrc/lazy/core/ir_builder.h",
565
+ "torch/csrc/lazy/core/internal_ops/ltc_ops.h",
566
+ "torch/csrc/lazy/core/shape_inference.h",
567
+ ]
568
+ + ([node_base_hdr] if node_base_hdr else [])
569
+ if path
570
+ ],
571
+ "non_native_ir_nodes": dest.generate_non_native_lazy_ir_nodes(
572
+ non_native, lazy_ir_obj
573
+ ),
574
+ "namespace_prologue": ns_helper.prologue,
575
+ "namespace_epilogue": ns_helper.epilogue,
576
+ },
577
+ )
578
+
579
+
580
+ if __name__ == "__main__":
581
+ main()
vllm/lib/python3.10/site-packages/torchgen/gen_schema_utils.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Optional, Tuple, Union
2
+
3
+ from torchgen.model import (
4
+ Annotation,
5
+ Argument,
6
+ Arguments,
7
+ BaseOperatorName,
8
+ BaseTy,
9
+ BaseType,
10
+ CustomClassType,
11
+ FunctionSchema,
12
+ ListType,
13
+ OperatorName,
14
+ Return,
15
+ )
16
+
17
+
18
+ # Note: These aren't actually used in torchgen, they're some utilities for generating a schema
19
+ # from real arguments. For example, this is used to generate HigherOrderOperators' schema since
20
+ # their schemas can vary for different instances of the same HOP.
21
+
22
+
23
+ class TypeGen:
24
+ convert_to_base_ty = {
25
+ int: BaseTy.int,
26
+ float: BaseTy.float,
27
+ str: BaseTy.str,
28
+ bool: BaseTy.bool,
29
+ }
30
+
31
+ @staticmethod
32
+ def from_example(obj: Any) -> Union[BaseType, ListType, CustomClassType]:
33
+ import torch
34
+
35
+ if isinstance(obj, torch.fx.GraphModule):
36
+ return BaseType(BaseTy.GraphModule)
37
+ elif isinstance(obj, torch.Tensor):
38
+ return BaseType(BaseTy.Tensor)
39
+ elif isinstance(obj, torch.SymInt):
40
+ return BaseType(BaseTy.SymInt)
41
+ elif isinstance(obj, torch.SymBool):
42
+ return BaseType(BaseTy.SymBool)
43
+ elif isinstance(obj, torch.ScriptObject):
44
+ return CustomClassType(obj._type().name()) # type: ignore[attr-defined]
45
+ elif isinstance(obj, (list, tuple)):
46
+ assert len(obj) > 0
47
+ all_base_tys = [TypeGen.from_example(x) for x in obj]
48
+ if len(set(all_base_tys)) > 1:
49
+ raise RuntimeError(
50
+ f"Cannot generate schema for a seqeunce of args of heterogeneous types: {all_base_tys}. "
51
+ "Consider unpacking the argument and give proper names to them if possible "
52
+ "instead of using *args."
53
+ )
54
+ return ListType(all_base_tys[0], len(obj))
55
+ tp = type(obj)
56
+ if tp not in TypeGen.convert_to_base_ty:
57
+ raise RuntimeError(f"unsupported type {tp}")
58
+ return BaseType(TypeGen.convert_to_base_ty[tp])
59
+
60
+
61
+ class ReturnGen:
62
+ @staticmethod
63
+ def from_example(
64
+ name: Optional[str], obj: Any, annotation: Optional[Annotation]
65
+ ) -> Return:
66
+ return Return(name, TypeGen.from_example(obj), annotation)
67
+
68
+
69
+ class ArgumentGen:
70
+ @staticmethod
71
+ def from_example(
72
+ name: str, obj: Any, default: Optional[str], annotation: Optional[Annotation]
73
+ ) -> Argument:
74
+ return Argument(
75
+ name, TypeGen.from_example(obj), default=default, annotation=annotation
76
+ )
77
+
78
+
79
+ class FunctionSchemaGen:
80
+ @staticmethod
81
+ def from_example(
82
+ op_name: str,
83
+ example_inputs: Tuple[Tuple[str, Any], ...],
84
+ example_outputs: Tuple[Any, ...],
85
+ ) -> FunctionSchema:
86
+ args = []
87
+ for name, inp in example_inputs:
88
+ args.append(ArgumentGen.from_example(name, inp, None, None))
89
+ # ignore the annotations and other attributes for now, we could add more when needed.
90
+ arguments = Arguments(
91
+ tuple(), None, tuple(args), tuple(), None, tuple(), tuple()
92
+ )
93
+ returns = tuple(
94
+ ReturnGen.from_example(None, out, None) for out in example_outputs
95
+ )
96
+ op_name = OperatorName(BaseOperatorName(op_name, False, False, False), "")
97
+ return FunctionSchema(op_name, arguments, returns)
vllm/lib/python3.10/site-packages/torchgen/gen_vmap_plumbing.py ADDED
@@ -0,0 +1,271 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import textwrap
4
+ from dataclasses import dataclass
5
+ from typing import Sequence
6
+
7
+ from torchgen.api.translate import translate
8
+ from torchgen.api.types import DispatcherSignature
9
+ from torchgen.context import method_with_native_function
10
+ from torchgen.model import (
11
+ Argument,
12
+ BaseTy,
13
+ BaseType,
14
+ FunctionSchema,
15
+ ListType,
16
+ NativeFunction,
17
+ OptionalType,
18
+ Return,
19
+ SchemaKind,
20
+ Type,
21
+ )
22
+ from torchgen.utils import mapMaybe
23
+
24
+
25
+ def is_tensor(typ: Type) -> bool:
26
+ return isinstance(typ, BaseType) and typ.name == BaseTy.Tensor
27
+
28
+
29
+ def is_optional_tensor(typ: Type) -> bool:
30
+ return isinstance(typ, OptionalType) and is_tensor(typ.elem)
31
+
32
+
33
+ def is_tensor_list(typ: Type) -> bool:
34
+ return isinstance(typ, ListType) and is_tensor(typ.elem)
35
+
36
+
37
+ def unwrap_tensor(name: str, cur_level_var: str) -> list[str]:
38
+ result = f"""\
39
+ auto [{name}_value, {name}_bdim] = unwrapTensorAtLevel({name}, {cur_level_var});"""
40
+ return textwrap.dedent(result).split("\n")
41
+
42
+
43
+ def unwrap_optional_tensor(name: str, cur_level_var: str) -> list[str]:
44
+ result = f"""\
45
+ std::optional<Tensor> {name}_value;
46
+ std::optional<int64_t> {name}_bdim;
47
+ if ({name}) {{
48
+ std::tie({name}_value, {name}_bdim) = unwrapTensorAtLevel({name}.value(), {cur_level_var});
49
+ }}"""
50
+ return textwrap.dedent(result).split("\n")
51
+
52
+
53
+ def gen_unwraps(
54
+ flat_arguments: Sequence[Argument], cur_level_var: str
55
+ ) -> tuple[str, list[str]]:
56
+ arg_names = [a.name for a in flat_arguments]
57
+ arg_types = [a.type for a in flat_arguments]
58
+
59
+ tensors = [name for typ, name in zip(arg_types, arg_names) if is_tensor(typ)]
60
+ optional_tensors = [
61
+ name for typ, name in zip(arg_types, arg_names) if is_optional_tensor(typ)
62
+ ]
63
+
64
+ unwraps = []
65
+ for tensor in tensors:
66
+ unwraps += unwrap_tensor(tensor, cur_level_var)
67
+
68
+ for opt_tensor in optional_tensors:
69
+ unwraps += unwrap_optional_tensor(opt_tensor, cur_level_var)
70
+ unwrap_code = "\n".join(unwraps)
71
+
72
+ unwrapped_arg_list = []
73
+ for arg in arg_names:
74
+ if arg in tensors or arg in optional_tensors:
75
+ unwrapped_arg_list += [f"{arg}_value", f"{arg}_bdim"]
76
+ else:
77
+ unwrapped_arg_list.append(arg)
78
+ return unwrap_code, unwrapped_arg_list
79
+
80
+
81
+ def gen_case_where_all_bdims_are_none(
82
+ outer_sig: DispatcherSignature, schema: FunctionSchema, cur_level_var: str
83
+ ) -> str:
84
+ conditions = []
85
+ flat_args = schema.arguments.flat_all
86
+ for arg in flat_args:
87
+ if not arg.type.is_tensor_like():
88
+ continue
89
+ conditions.append(f"!isBatchedAtLevel({arg.name}, {cur_level_var})")
90
+
91
+ sig = DispatcherSignature.from_schema(schema)
92
+ translated_args = ", ".join(
93
+ e.expr for e in translate(outer_sig.arguments(), sig.arguments())
94
+ )
95
+ return f"""\
96
+ if ({' && '.join(conditions)}) {{
97
+ return at::_ops::{sig.func.name.unambiguous_name()}::call({translated_args});
98
+ }}"""
99
+
100
+
101
+ def gen_returns(
102
+ returns: tuple[Return, ...], cur_level_var: str, results_var: str
103
+ ) -> str:
104
+ idx = 0
105
+ wrapped_returns = []
106
+ for ret in returns:
107
+ if is_tensor(ret.type):
108
+ wrapped_returns.append(
109
+ f"makeBatched(std::get<{idx}>({results_var}), std::get<{idx + 1}>({results_var}), {cur_level_var})"
110
+ )
111
+ idx += 2
112
+ elif is_tensor_list(ret.type):
113
+ wrapped_returns.append(
114
+ f"makeBatchedVector(std::get<{idx}>({results_var}), std::get<{idx+1}>({results_var}), {cur_level_var})"
115
+ )
116
+ idx += 2
117
+ else:
118
+ wrapped_returns.append(f"std::get<{idx}>({results_var})")
119
+ idx += 1
120
+ if len(wrapped_returns) == 1:
121
+ result = f"return {wrapped_returns[0]};"
122
+ else:
123
+ result = f'return std::make_tuple({", ".join(wrapped_returns)});'
124
+ return result
125
+
126
+
127
+ def accepts_at_least_one_tensor_input(schema: FunctionSchema) -> bool:
128
+ return any(a.type.is_tensor_like() for a in schema.arguments.flat_all)
129
+
130
+
131
+ def is_mutated_arg(argument: Argument) -> bool:
132
+ return argument.annotation is not None and argument.annotation.is_write
133
+
134
+
135
+ def gen_vmap_inplace_plumbing(native_function: NativeFunction) -> str | None:
136
+ # Assumptions:
137
+ # - only one argument is being modified in-place
138
+ # - the argument that is being modified in-place is the first argument
139
+ # - all returns are either Tensor, tuple of Tensor, or TensorList
140
+ schema = native_function.func
141
+ sig = DispatcherSignature.from_schema(schema)
142
+ returns = schema.returns
143
+
144
+ # Check assumptions. If these are invalid we return None
145
+ # and punt the work to handle them to the future.
146
+ assert schema.kind() == SchemaKind.inplace
147
+ if not is_mutated_arg(schema.arguments.flat_all[0]):
148
+ return None
149
+ if not len([arg for arg in schema.arguments.flat_all if is_mutated_arg(arg)]) == 1:
150
+ return None
151
+
152
+ # Only support cases where all returns are Tensors or vector<Tensor>
153
+ if len(returns) == 0:
154
+ return None
155
+ if not all(is_tensor(ret.type) or is_tensor_list(ret.type) for ret in returns):
156
+ return None
157
+ if not accepts_at_least_one_tensor_input(schema):
158
+ return None
159
+
160
+ cur_level_var = "cur_level"
161
+
162
+ unwraps, unwrapped_arg_list = gen_unwraps(schema.arguments.flat_all, cur_level_var)
163
+ bdims_all_none_case = gen_case_where_all_bdims_are_none(sig, schema, cur_level_var)
164
+
165
+ return f"""\
166
+ template <typename batch_rule_t, batch_rule_t batch_rule>
167
+ {sig.decl(name=schema.name.unambiguous_name() + '_generated_plumbing')} {{
168
+ c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
169
+ auto maybe_layer = maybeCurrentDynamicLayer();
170
+ vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
171
+ int64_t {cur_level_var} = maybe_layer->layerId();
172
+ {textwrap.indent(bdims_all_none_case, " ")}
173
+ {textwrap.indent(unwraps, " ")}
174
+ batch_rule({', '.join(unwrapped_arg_list)});
175
+ return {schema.arguments.flat_all[0].name};
176
+ }}"""
177
+
178
+
179
+ def gen_vmap_plumbing_no_returns(native_function: NativeFunction) -> str:
180
+ schema = native_function.func
181
+ sig = DispatcherSignature.from_schema(schema)
182
+ cur_level_var = "cur_level"
183
+
184
+ unwraps, unwrapped_arg_list = gen_unwraps(schema.arguments.flat_all, cur_level_var)
185
+ bdims_all_none_case = gen_case_where_all_bdims_are_none(sig, schema, cur_level_var)
186
+
187
+ return f"""\
188
+ template <typename batch_rule_t, batch_rule_t batch_rule>
189
+ {sig.decl(name=schema.name.unambiguous_name() + '_generated_plumbing')} {{
190
+ c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
191
+ auto maybe_layer = maybeCurrentDynamicLayer();
192
+ vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
193
+ int64_t {cur_level_var} = maybe_layer->layerId();
194
+ {textwrap.indent(bdims_all_none_case, " ")}
195
+ {textwrap.indent(unwraps, " ")}
196
+ batch_rule({', '.join(unwrapped_arg_list)});
197
+ }}"""
198
+
199
+
200
+ def gen_vmap_plumbing(native_function: NativeFunction) -> str | None:
201
+ schema = native_function.func
202
+ sig = DispatcherSignature.from_schema(schema)
203
+ returns = schema.returns
204
+
205
+ # Only support cases where all returns are Tensors or vector<Tensor>
206
+ if not accepts_at_least_one_tensor_input(schema):
207
+ return None
208
+ if len(returns) == 0:
209
+ return gen_vmap_plumbing_no_returns(native_function)
210
+ return_symint_overrides = [
211
+ "_scaled_dot_product_flash_attention",
212
+ "_scaled_dot_product_cudnn_attention",
213
+ ]
214
+ if (
215
+ not all(ret.type.is_tensor_like() for ret in returns)
216
+ and schema.name.unambiguous_name() not in return_symint_overrides
217
+ ):
218
+ return None
219
+ # in-place views need special handling
220
+ if "inplace_view" in native_function.tags:
221
+ return None
222
+
223
+ if schema.kind() == SchemaKind.inplace:
224
+ return gen_vmap_inplace_plumbing(native_function)
225
+
226
+ # Don't support these (mutable, out, scratch)
227
+ if schema.kind() != SchemaKind.functional:
228
+ return None
229
+
230
+ results_var = "results"
231
+ cur_level_var = "cur_level"
232
+
233
+ unwraps, unwrapped_arg_list = gen_unwraps(schema.arguments.flat_all, cur_level_var)
234
+ bdims_all_none_case = gen_case_where_all_bdims_are_none(sig, schema, cur_level_var)
235
+
236
+ wrapped_returns = gen_returns(returns, cur_level_var, results_var)
237
+ return f"""\
238
+ template <typename batch_rule_t, batch_rule_t batch_rule>
239
+ {sig.decl(name=schema.name.unambiguous_name() + '_generated_plumbing')} {{
240
+ c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
241
+ auto maybe_layer = maybeCurrentDynamicLayer();
242
+ vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
243
+ int64_t {cur_level_var} = maybe_layer->layerId();
244
+ {textwrap.indent(bdims_all_none_case, " ")}
245
+ {textwrap.indent(unwraps, " ")}
246
+ auto {results_var} = batch_rule({', '.join(unwrapped_arg_list)});
247
+ {wrapped_returns}
248
+ }}"""
249
+
250
+
251
+ @dataclass(frozen=True)
252
+ class ComputeBatchRulePlumbing:
253
+ @method_with_native_function
254
+ def __call__(self, f: NativeFunction) -> str | None:
255
+ result = gen_vmap_plumbing(f)
256
+ return result
257
+
258
+
259
+ def gen_all_vmap_plumbing(native_functions: Sequence[NativeFunction]) -> str:
260
+ body = "\n".join(list(mapMaybe(ComputeBatchRulePlumbing(), native_functions)))
261
+ return f"""
262
+ #pragma once
263
+ #include <ATen/Operators.h>
264
+ #include <ATen/functorch/PlumbingHelper.h>
265
+
266
+ namespace at {{ namespace functorch {{
267
+
268
+ {body}
269
+
270
+ }}}} // namespace at::functorch
271
+ """
vllm/lib/python3.10/site-packages/torchgen/local.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import threading
4
+ from contextlib import contextmanager
5
+ from typing import Iterator
6
+
7
+
8
+ # Simple dynamic scoping implementation. The name "parametrize" comes
9
+ # from Racket.
10
+ #
11
+ # WARNING WARNING: LOOKING TO EDIT THIS FILE? Think carefully about
12
+ # why you need to add a toggle to the global behavior of code
13
+ # generation. The parameters here should really only be used
14
+ # for "temporary" situations, where we need to temporarily change
15
+ # the codegen in some cases because we cannot conveniently update
16
+ # all call sites, and are slated to be eliminated once all call
17
+ # sites are eliminated. If you don't have a plan for how to get there,
18
+ # DON'T add a new entry here.
19
+
20
+
21
+ class Locals(threading.local):
22
+ use_const_ref_for_mutable_tensors: bool | None = None
23
+ use_ilistref_for_tensor_lists: bool | None = None
24
+
25
+
26
+ _locals = Locals()
27
+
28
+
29
+ def use_const_ref_for_mutable_tensors() -> bool:
30
+ assert _locals.use_const_ref_for_mutable_tensors is not None, (
31
+ "need to initialize local.use_const_ref_for_mutable_tensors with "
32
+ "local.parametrize"
33
+ )
34
+ return _locals.use_const_ref_for_mutable_tensors
35
+
36
+
37
+ def use_ilistref_for_tensor_lists() -> bool:
38
+ assert _locals.use_ilistref_for_tensor_lists is not None, (
39
+ "need to initialize local.use_ilistref_for_tensor_lists with "
40
+ "local.parametrize"
41
+ )
42
+ return _locals.use_ilistref_for_tensor_lists
43
+
44
+
45
+ @contextmanager
46
+ def parametrize(
47
+ *, use_const_ref_for_mutable_tensors: bool, use_ilistref_for_tensor_lists: bool
48
+ ) -> Iterator[None]:
49
+ old_use_const_ref_for_mutable_tensors = _locals.use_const_ref_for_mutable_tensors
50
+ old_use_ilistref_for_tensor_lists = _locals.use_ilistref_for_tensor_lists
51
+ try:
52
+ _locals.use_const_ref_for_mutable_tensors = use_const_ref_for_mutable_tensors
53
+ _locals.use_ilistref_for_tensor_lists = use_ilistref_for_tensor_lists
54
+ yield
55
+ finally:
56
+ _locals.use_const_ref_for_mutable_tensors = (
57
+ old_use_const_ref_for_mutable_tensors
58
+ )
59
+ _locals.use_ilistref_for_tensor_lists = old_use_ilistref_for_tensor_lists
vllm/lib/python3.10/site-packages/torchgen/model.py ADDED
The diff for this file is too large to render. See raw diff
 
vllm/lib/python3.10/site-packages/torchgen/native_function_generation.py ADDED
@@ -0,0 +1,646 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from collections import defaultdict
4
+ from typing import Sequence
5
+
6
+ import torchgen.api.dispatcher as dispatcher
7
+ from torchgen.api.translate import translate
8
+ from torchgen.api.types import Binding, DispatcherSignature, Expr
9
+ from torchgen.context import with_native_function
10
+ from torchgen.model import (
11
+ Annotation,
12
+ Argument,
13
+ BackendIndex,
14
+ BackendMetadata,
15
+ BaseOperatorName,
16
+ BaseTy,
17
+ BaseType,
18
+ DEFAULT_KERNEL_NAMESPACE,
19
+ DeviceCheckType,
20
+ DispatchKey,
21
+ FunctionSchema,
22
+ NativeFunction,
23
+ NativeFunctionsGroup,
24
+ OperatorName,
25
+ Return,
26
+ SchemaKind,
27
+ Variant,
28
+ )
29
+ from torchgen.utils import concatMap
30
+
31
+
32
+ # See Note: [Out ops with functional variants that don't get grouped properly]
33
+ OUT_OPS_THAT_DONT_GET_GROUPED_PROPERLY = [
34
+ # This has a functional variant, but it's currently marked private.
35
+ # This function should be marked private as well (*_backward ops aren't exposed to python anyway).
36
+ "adaptive_avg_pool3d_backward.grad_input",
37
+ # There's a functional variant, _slow_conv2d_backward.output_mask, that isn't grouped properly.
38
+ # Maybe we can kill this operator in favor of convolution_backward?
39
+ "_slow_conv2d_backward.grad_input",
40
+ ]
41
+
42
+
43
+ # See Note: [Mutable ops that cannot get an out variant]
44
+ MUTABLE_OPS_THAT_CANNOT_GET_AN_OUT_VARIANT = [
45
+ # should be out=?
46
+ "_cummax_helper",
47
+ # should be out=?
48
+ "_cummin_helper",
49
+ ]
50
+
51
+ # All of these operators don't have any tensor like returns
52
+ FUNCTIONAL_OPS_THAT_CANNOT_GET_AN_OUT_VARIANT = [
53
+ "_assert_async", # no return
54
+ "_assert_async.msg", # no return
55
+ "_cslt_sparse_mm_search", # returns an int
56
+ "_assert_scalar", # no return
57
+ "_dimI", # returns an int
58
+ "_dimV", # returns an int
59
+ "_has_same_storage_numel", # returns a boolean
60
+ "_linalg_check_errors", # no return
61
+ "_local_scalar_dense", # returns a Scalar
62
+ "_nested_tensor_from_mask_left_aligned", # returns a boolean
63
+ "_nnz", # returns an int
64
+ "_use_cudnn_ctc_loss", # returns a boolean
65
+ "_use_cudnn_ctc_loss.Tensor", # returns a boolean
66
+ "_validate_compressed_sparse_indices", # no return
67
+ "allclose", # returns a boolean
68
+ "dense_dim", # returns an int
69
+ "equal", # returns a boolean
70
+ "is_coalesced", # returns an boolean
71
+ "is_pinned", # returns a boolean
72
+ "is_same_size", # returns a boolean
73
+ "is_set_to", # returns a boolean
74
+ "q_per_channel_axis", # returns an int
75
+ "q_scale", # returns a float
76
+ "q_zero_point", # returns an int
77
+ "qscheme", # returns a QScheme
78
+ "record_stream", # no return
79
+ "sparse_dim", # returns an int
80
+ "sym_constrain_range", # no return
81
+ "sym_constrain_range_for_size", # no return
82
+ "_nested_tensor_storage_offsets", # returns a vector of ints
83
+ "_chunk_grad_outputs_efficient_attention", # returns a bool
84
+ "_fused_sdp_choice", # returns an int
85
+ "_print", # no return
86
+ "_sink_tokens", # no return
87
+ "_nested_get_ragged_idx", # returns an int
88
+ ]
89
+
90
+ INPLACE_OPS_THAT_DONT_GET_GROUPED_PROPERLY = [
91
+ # polygamma and polygamma.out both exist, but have a
92
+ # pre-self arg (while polygamma_ does not)
93
+ # We should either fix this schema so it can be grouped properly,
94
+ # or allow the codegen to generate new functional/out= NativeFunctions for this op
95
+ # (which would require changing its overload name to prevent overload ambiguity).
96
+ "polygamma_"
97
+ ]
98
+
99
+
100
+ # Groups "similar" NativeFunctions together
101
+ # example add.Tensor, add_.Tensor, add.out
102
+ # "similar" NativeFunctions are all expected to have an identical `signature()`,
103
+ # But have differing SchemaKinds.
104
+ def pre_group_native_functions(
105
+ native_functions: Sequence[NativeFunction],
106
+ ) -> dict[FunctionSchema, dict[SchemaKind, NativeFunction]]:
107
+ pre_grouped_native_functions: dict[
108
+ FunctionSchema, dict[SchemaKind, NativeFunction]
109
+ ] = defaultdict(dict)
110
+ for f in native_functions:
111
+ d = pre_grouped_native_functions[f.func.signature()]
112
+ assert f.func.kind() not in d
113
+ d[f.func.kind()] = f
114
+ return pre_grouped_native_functions
115
+
116
+
117
+ # Returns the out variant overload name given a base function overload name
118
+ def get_expected_out_variant_overload_name(overload_name: str | None) -> str:
119
+ return "out" if not overload_name else f"{overload_name}_out"
120
+
121
+
122
+ # Helper function: given an inplace FunctionSchema, generate its corresponding out= variant
123
+ # Example before:
124
+ # _add_relu_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
125
+ # Example after:
126
+ # _add_relu.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out)
127
+ def self_to_out_signature(func: FunctionSchema) -> FunctionSchema:
128
+ # Generating an out= schema from an inplace schema.
129
+ assert func.kind() == SchemaKind.inplace
130
+ assert func.arguments.self_arg is not None
131
+ # The new out= schema has:
132
+ # - a new out argument with the same type as "func" (but with a mutable annotation)
133
+ # - The returns (if any) now alias the out= argument instead of "func"
134
+ # - an "out" overload name
135
+ return FunctionSchema(
136
+ name=func.name.remove_inplace().with_overload(
137
+ get_expected_out_variant_overload_name(func.name.overload_name)
138
+ ),
139
+ arguments=func.arguments.remove_self_annotation().with_out_args(
140
+ [
141
+ Argument(
142
+ name="out",
143
+ type=func.arguments.self_arg.argument.type,
144
+ default=None,
145
+ annotation=func.arguments.self_arg.argument.annotation,
146
+ )
147
+ ]
148
+ ),
149
+ returns=func.returns,
150
+ )
151
+
152
+
153
+ # Helper function: given a functional FunctionSchema, generate its corresponding out= variant
154
+ # Example before:
155
+ # _to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None,
156
+ # bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor
157
+ # Example after:
158
+ # _to_copy._out(Tensor self, *, bool non_blocking=False, MemoryFormat? memory_format=None,
159
+ # Tensor(a!) out) -> Tensor(a!)
160
+ def functional_to_out_signature(func: FunctionSchema) -> FunctionSchema:
161
+ # Generating an out= schema from a functional schema.
162
+ assert func.kind() == SchemaKind.functional
163
+
164
+ new_returns, new_out_args = generate_out_args_from_schema(func)
165
+ # The new out= schema has:
166
+ # - one or more new out argument(s) with the same type as returns (but with a mutable annotation)
167
+ # - The returns now alias the out= arguments
168
+ # - an "_out" overload name
169
+ return FunctionSchema(
170
+ name=func.name.with_overload(
171
+ get_expected_out_variant_overload_name(func.name.overload_name)
172
+ ),
173
+ arguments=func.arguments.signature().with_out_args(
174
+ new_out_args,
175
+ ),
176
+ returns=tuple(new_returns),
177
+ )
178
+
179
+
180
+ # Helper function: given a function schema, generate corresponding out arguments, also the updated return annotations.
181
+ def generate_out_args_from_schema(
182
+ func: FunctionSchema,
183
+ ) -> tuple[list[Return], list[Argument]]:
184
+ # More of a sanity check - our existing restrictions on schemas should enforce that
185
+ # mutable schema kinds never return their mutable arguments.
186
+ assert not any(
187
+ r.annotation is not None and r.annotation.is_write for r in func.returns
188
+ )
189
+
190
+ tensorlike_rets = [r for r in func.returns if r.type.is_tensor_like()]
191
+ assert len(tensorlike_rets) > 0
192
+
193
+ used_annotations = concatMap(
194
+ lambda a: [] if a.annotation is None else a.annotation.alias_set,
195
+ func.arguments.flat_all,
196
+ )
197
+ valid_annotations = [
198
+ x for x in "abcdefghijklmnopqrstuvwxyz" if x not in used_annotations
199
+ ]
200
+
201
+ all_rets_are_tensors = all(r.type == BaseType(BaseTy.Tensor) for r in func.returns)
202
+
203
+ new_out_args: list[Argument] = []
204
+ # The end result of new_returns is that:
205
+ # - If every return is a plain tensor, then the new returns == the old returns, but with the out= alias annotations added.
206
+ # - Otherwise, none of the out arguments show up in the returns (and we're only left with non-tensor-like returns, if any).
207
+ new_returns: list[Return] = []
208
+ for i, r in enumerate(func.returns):
209
+ if r.type.is_tensor_like():
210
+ new_out = Argument(
211
+ name="out" if len(func.returns) == 1 else f"out{i}",
212
+ type=r.type,
213
+ default=None,
214
+ annotation=Annotation.parse(f"{valid_annotations[i]}!"),
215
+ )
216
+ new_out_args.append(new_out)
217
+ if all_rets_are_tensors:
218
+ # The convention for out= schemas is that they only return their out arguments
219
+ # if the return is a plain Tensor (or if it's a tuple of plain Tensors)
220
+ new_ret = Return(
221
+ name=None, type=new_out.type, annotation=new_out.annotation
222
+ )
223
+ new_returns.append(new_ret)
224
+ else:
225
+ new_returns.append(r)
226
+ return new_returns, new_out_args
227
+
228
+
229
+ # Helper function: given a mutable FunctionSchema, generate its corresponding out= variant
230
+ # Example before:
231
+ # _fused_moving_avg_obs_fq_helper(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask) # noqa: B950
232
+ # Example after:
233
+ # _fused_moving_avg_obs_fq_helper._out(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False, *, Tensor(e!) out0, Tensor(f!) out1) -> (Tensor(e!), Tensor(f!)) # noqa: B950
234
+ def mutable_to_out_signature(func: FunctionSchema) -> FunctionSchema:
235
+ # Generating an out= schema from a mutable schema.
236
+ assert func.kind() == SchemaKind.mutable
237
+ # The new out= schema has:
238
+ # - Any non-aliased tensor-like returns are converted to mutable, aliased out= arguments
239
+ # (if the argument is a tensor then we also return it for method chaining,
240
+ # otherwise we return nothing)
241
+ # - an "out" overload name
242
+ #
243
+ # Note that:
244
+ # (1) This also means that we can *only* generate an out= variant from a mutable schema
245
+ # if the mutable schema has at least one tensor-like non-aliasing return.
246
+ # (2) The generated out= variant still has mutable positional arguments,
247
+ # but if necessary we could probably add another out= variant that also
248
+ # functionalizes the mutable arguments (a functional_out variant)
249
+
250
+ new_returns, new_out_args = generate_out_args_from_schema(func)
251
+
252
+ return FunctionSchema(
253
+ name=func.name.remove_inplace().with_overload(
254
+ get_expected_out_variant_overload_name(func.name.overload_name)
255
+ ),
256
+ arguments=func.arguments.with_out_args(new_out_args),
257
+ returns=tuple(new_returns),
258
+ )
259
+
260
+
261
+ # This function, given function of one SchemaKind, as well as a target SchemaKind,
262
+ # generates a new NativeFunction with the same properties, but using the target SchemaKind.
263
+ # We only actually generate functions for either functional or out= SchemaKinds.
264
+ # This function returns a tuple, with:
265
+ # - The generated NativeFunction
266
+ # - a dictionary of `BackendIndex` objects, describing which dispatch keys
267
+ # we will generate kernels for, for the new NativeFunction.
268
+ # Details are in the function, but we only generate composite kernels (in some cases) today.
269
+ def generate_function(
270
+ f: NativeFunction, k: SchemaKind
271
+ ) -> tuple[NativeFunction, dict[DispatchKey, dict[OperatorName, BackendMetadata]]]:
272
+ from torchgen.api import cpp
273
+
274
+ if k == SchemaKind.functional:
275
+ assert f.func.kind() != SchemaKind.functional
276
+ # The new "functional" NativeFunction has:
277
+ # - any mutable arguments have been converted into (immutable) returns.
278
+ # (if a mutable argument was not also a return, it gets converted to one)
279
+ # - "_functional" appended to the base name, ONLY IF this op has a mutable variant.
280
+ # See Note [Overload Ambiguity With Functional Variants]
281
+ # The default grouping logic in signature() actually already does this,
282
+ # so we can piggy-back off it (but we still want return names)
283
+ func = f.func.signature(keep_return_names=True).with_name(
284
+ OperatorName(
285
+ name=BaseOperatorName(
286
+ base=f.func.name.name.base,
287
+ inplace=False,
288
+ dunder_method=f.func.name.name.dunder_method,
289
+ # See Note [Overload Ambiguity With Functional Variants]
290
+ functional_overload=f.func.kind() == SchemaKind.mutable,
291
+ ),
292
+ overload_name=f.func.name.overload_name,
293
+ )
294
+ )
295
+ elif k == SchemaKind.out:
296
+ # We generate out= ops mostly just so that we can pair up NativeFunctions into groups easily,
297
+ # but at least today, there is no good reason to actually use them.
298
+ # we'll generate a dispatcher entry for them, but won't actually register any kernels for them.
299
+ if f.func.kind() == SchemaKind.inplace:
300
+ func = self_to_out_signature(f.func)
301
+ elif f.func.kind() == SchemaKind.mutable:
302
+ func = mutable_to_out_signature(f.func)
303
+ elif f.func.kind() == SchemaKind.functional:
304
+ func = functional_to_out_signature(f.func)
305
+ else:
306
+ raise AssertionError(
307
+ "We only bother generating out= functions from either inplace or mutable or functional variants"
308
+ )
309
+ else:
310
+ raise AssertionError(
311
+ "We currently only generate either functional or out= NativeFunctions"
312
+ )
313
+
314
+ # Generated kernel naming convention for out: <op_name>_<overload_name>. The reason for this is to
315
+ # disambiguate operator with the same name but different overload name, e.g., `randn.names_out` and
316
+ # `randn.generator_with_names_out`.
317
+ kernel_name = (
318
+ func.name.unambiguous_name()
319
+ if func.kind() == SchemaKind.out
320
+ else cpp.name(func)
321
+ )
322
+ if f.func.has_symint():
323
+ kernel_name += "_symint"
324
+ backend_metadata = {
325
+ DispatchKey.CompositeExplicitAutograd: {
326
+ func.name: BackendMetadata(
327
+ kernel=kernel_name,
328
+ structured=False,
329
+ cpp_namespace=DEFAULT_KERNEL_NAMESPACE,
330
+ )
331
+ }
332
+ }
333
+ tags = {"generated"} | set(
334
+ f.tags & {"nondeterministic_seeded", "view_copy", "pt2_compliant_tag"}
335
+ )
336
+
337
+ return (
338
+ NativeFunction(
339
+ func=func,
340
+ use_const_ref_for_mutable_tensors=f.use_const_ref_for_mutable_tensors,
341
+ # These generated fn's aren't meant to be user friendly- don't generate methods.
342
+ variants={Variant.function},
343
+ structured=False,
344
+ structured_delegate=None,
345
+ structured_inherits=None,
346
+ precomputed=None,
347
+ autogen=[],
348
+ ufunc_inner_loop={},
349
+ manual_kernel_registration=False,
350
+ manual_cpp_binding=False,
351
+ python_module=None,
352
+ category_override=None,
353
+ device_guard=False,
354
+ device_check=DeviceCheckType.NoCheck,
355
+ loc=f.loc,
356
+ cpp_no_default_args=set(),
357
+ is_abstract=f.is_abstract,
358
+ has_composite_implicit_autograd_kernel=False,
359
+ has_composite_implicit_autograd_nested_tensor_kernel=False,
360
+ has_composite_explicit_autograd_kernel=True,
361
+ has_composite_explicit_autograd_non_functional_kernel=False,
362
+ # Every generated NativeFunction gets a "generated" tag, so it's easy to tell
363
+ # which NativeFunction objects did not come directly from native_functions.yaml.
364
+ tags=tags,
365
+ namespace=f.namespace,
366
+ ),
367
+ backend_metadata,
368
+ )
369
+
370
+
371
+ # This function is responsible for adding generated NativeFunctions which don't appear
372
+ # explicitly in the codegen.
373
+ # You can inspect the full list of NativeFunctions yourself with the torchgen package, by running
374
+ # torchgen.parse_native_yaml("aten/src/ATen/native/native_functions.yaml", "aten/src/ATen/native/tags.yaml")
375
+ # (Maybe we should make a friendly API for this)
376
+ #
377
+ # Note: this function *mutates* its two inputs,
378
+ # adding the new NativeFunctions / BackendMetadata to them
379
+ def add_generated_native_functions(
380
+ rs: list[NativeFunction],
381
+ indices: dict[DispatchKey, dict[OperatorName, BackendMetadata]],
382
+ ) -> None:
383
+ # The main code for generating new NativeFunctions
384
+ # First we group of NativeFunctions by schema kind,
385
+ # then we detect which ones are missing and generate them.
386
+ pre_grouped_native_functions = pre_group_native_functions(rs)
387
+ for d in pre_grouped_native_functions.values():
388
+ has_functional = SchemaKind.functional in d
389
+ has_inplace = SchemaKind.inplace in d
390
+ has_mutable = SchemaKind.mutable in d
391
+ has_out = SchemaKind.out in d
392
+
393
+ # We automatically generate a few native functions that don't exist in the yaml, for a few reasons:
394
+ # (1) If an operator has an inplace/out= variant but no functional variant, we can generate
395
+ # a simple functional variant that the functionalization pass can consume.
396
+ # (2) If an operator has an inplace or functional but no out= variant, we generate an out=
397
+ # variant, mostly so we can easily pair up functions into NativeFunctionsGroup,
398
+ # while maintaining the constraint that the out= variant is "required".
399
+ if has_mutable or has_inplace or has_out or has_functional:
400
+ # Don't bother generating functions trio's for native functions that bypass the dispatcher.
401
+ are_manual = all(f.manual_cpp_binding for f in d.values())
402
+ # Don't bother generating functional + out= variants for view operators
403
+ # set_ is technically an inplace_view, but for now it is treated
404
+ # as a normal inplace op in the codegen
405
+ has_view_ops = any(
406
+ f.is_view_op and str(f.func.name.name) != "set_" for f in d.values()
407
+ )
408
+ # Don't generate the other variants for CompositeImplicitAutograd operators.
409
+ # We could probably do this, but the main benefit of generating the function triplets
410
+ # is for transforms that need them, and transforms don't need to act directly
411
+ # on CompositeImplicitAutograd operators (since we let them decompose).
412
+ are_composite_implicit = all(
413
+ f.has_composite_implicit_autograd_kernel for f in d.values()
414
+ )
415
+ if are_manual or has_view_ops or are_composite_implicit:
416
+ continue
417
+ if has_out and len(d.values()) == 1:
418
+ # Note: [Out ops with functional variants that don't get grouped properly]
419
+ # In theory we could validly have an out= operator in native_functions.yaml
420
+ # that has no other variants.
421
+ # But today, all of the operators where that's the case actually do have
422
+ # functional variants, that we are just unable to pair up properly.
423
+ # I think banning this all together is probably safer
424
+ # (you can always add a functional variant yourself if you want to add a new out= operator).
425
+ #
426
+ # We should probably fix the existing cases; this check is to prevent us from adding more over time.
427
+ if (
428
+ str(d[SchemaKind.out].func.name)
429
+ not in OUT_OPS_THAT_DONT_GET_GROUPED_PROPERLY
430
+ ):
431
+ raise AssertionError(
432
+ f"Found an out= operator that we could not find any other variants of: {str(d[SchemaKind.out].func)}"
433
+ )
434
+ continue
435
+
436
+ # Some inplace ops that have problematic schemas (that we should fix), which prevent us
437
+ # from generating out= and functional variants
438
+ if (
439
+ has_inplace
440
+ and str(d[SchemaKind.inplace].func.name)
441
+ in INPLACE_OPS_THAT_DONT_GET_GROUPED_PROPERLY
442
+ ):
443
+ continue
444
+
445
+ base_fn = (
446
+ d[SchemaKind.inplace]
447
+ if has_inplace
448
+ else d[SchemaKind.mutable]
449
+ if has_mutable
450
+ else d[SchemaKind.out]
451
+ if has_out
452
+ else d[SchemaKind.functional]
453
+ )
454
+
455
+ # Note: [Mutable ops that cannot get an out variant]
456
+ # We can only generate an out= variant if either:
457
+ # - the original function has tensor-like returns (since we can convert them to out kwargs)
458
+ # - or it's inplace (since we can convert `self` to an out kwarg)
459
+ # There are only two functions that don't fit this criteria today though,
460
+ # and they both look like they should be fixed to be out= variants,
461
+ # so if feels safer to ban this schema all-together
462
+ base_fn_valid = base_fn.func.kind() == SchemaKind.inplace or any(
463
+ r.type.is_tensor_like() for r in base_fn.func.returns
464
+ )
465
+ # Note: [Loosen the assertion that all functional should have out variant]
466
+ # By design all functional operators should have our variants. The needs_out check
467
+ # is loosening this requirement, changing it to only generate out variant if there's
468
+ # an `autogen` block in the native function, in the long run it should be removed.
469
+ # FIXME: Remove this after figuring out CI job failures related to min, max, mean
470
+ needs_out = any("out" in str(op_name) for op_name in base_fn.autogen)
471
+ gets_out_variant = not has_out and base_fn_valid and needs_out
472
+ if not has_out and not base_fn_valid:
473
+ if (
474
+ str(base_fn.func.name)
475
+ not in MUTABLE_OPS_THAT_CANNOT_GET_AN_OUT_VARIANT
476
+ and str(base_fn.func.name)
477
+ not in FUNCTIONAL_OPS_THAT_CANNOT_GET_AN_OUT_VARIANT
478
+ ):
479
+ raise AssertionError(
480
+ f"""Found an operator that we could not generate an out= variant for: {str(base_fn.func)}.
481
+ This type of operators don't have tensor-like return, making it difficult to generate a proper out= variant. If
482
+ out= variant is not needed, please add the function name into FUNCTIONAL_OPS_THAT_CANNOT_GET_AN_OUT_VARIANT list."""
483
+ )
484
+
485
+ # Generate an out= variant
486
+ if gets_out_variant:
487
+ fn, metadata = generate_function(base_fn, SchemaKind.out)
488
+ d[SchemaKind.out] = fn
489
+ BackendIndex.grow_index(indices, metadata)
490
+ rs.append(fn)
491
+
492
+ # Generate a functional variant, but only do it if the operator got an out= variant
493
+ # (Functional variants are only useful if we can group up the variants,
494
+ # which we can only do if they have an out= variant)
495
+ if not has_functional and (has_out or gets_out_variant):
496
+ fn, metadata = generate_function(base_fn, SchemaKind.functional)
497
+ d[SchemaKind.functional] = fn
498
+ BackendIndex.grow_index(indices, metadata)
499
+ rs.append(fn)
500
+
501
+
502
+ def return_str(rets: tuple[Return, ...], names: list[str]) -> str:
503
+ assert len(rets) == len(names)
504
+ if len(rets) == 0:
505
+ return ""
506
+ elif len(rets) == 1:
507
+ return f"return {names[0]};"
508
+ else:
509
+ return f"return {dispatcher.returns_type(rets).cpp_type()}({', '.join(names)});"
510
+
511
+
512
+ # Given a function, and the name of a variable corresponding to the output of that function,
513
+ # gather up all of the individual returns that are not aliased
514
+ def gather_nonaliased_inner_rets(func: FunctionSchema, out_var: str) -> list[str]:
515
+ aliased_rets = func.aliased_return_names()
516
+ non_aliased_names = []
517
+ is_out_var_a_tuple = len(func.returns) > 1
518
+ for i, r in enumerate(aliased_rets):
519
+ if r is None:
520
+ non_aliased_names.append(
521
+ f"std::get<{i}>({out_var})" if is_out_var_a_tuple else out_var
522
+ )
523
+ return non_aliased_names
524
+
525
+
526
+ # Generates functional kernels in terms of their inplace.mutable counterparts.
527
+ # We only do this for "generated" NativeFunctions
528
+ @with_native_function
529
+ def gen_composite_functional_kernel(g: NativeFunctionsGroup) -> str | None:
530
+ # We should only be generating these for code-generated NativeFunctions
531
+ if "generated" not in g.functional.tags:
532
+ return None
533
+ # And we always write the kernel for a generated op in terms of a non-generated op.
534
+ if g.inplace is not None and "generated" not in g.inplace.tags:
535
+ target_f = g.inplace
536
+ elif g.mutable is not None and "generated" not in g.mutable.tags:
537
+ target_f = g.mutable
538
+ else:
539
+ # We should be guaranteed to have a valid inplace/mutable variant to call into.
540
+ # See Note: [Mutable Ops Not Using Functionalization]
541
+ raise AssertionError(str(g.functional.func))
542
+
543
+ sig = DispatcherSignature(g.functional.func)
544
+ target_sig = DispatcherSignature(target_f.func)
545
+
546
+ context: list[Binding | Expr] = []
547
+ clone_mutable_inputs = []
548
+ cloned_return_names = []
549
+ # We can't just directly pass all of the arguments from the functional op into the mutating op.
550
+ # We need to check for which inputs to the mutating operator are mutable,
551
+ # and clone those inputs first.
552
+ for a_curr, a_tgt in zip(
553
+ dispatcher.jit_arguments(g.functional.func),
554
+ dispatcher.jit_arguments(target_f.func),
555
+ ):
556
+ if a_tgt.annotation is not None and a_tgt.annotation.is_write:
557
+ clone_mutable_inputs.append(
558
+ f"auto {a_curr.name}_clone = clone_arg({a_curr.name});"
559
+ )
560
+ context.append(
561
+ Expr(
562
+ expr=f"{a_curr.name}_clone",
563
+ type=dispatcher.argument_type(a_curr, binds=a_curr.name),
564
+ )
565
+ )
566
+ # Invariant: mutable arguments on the inner mutable op are always returns on the functional op.
567
+ cloned_return_names.append(f"{a_curr.name}_clone")
568
+ else:
569
+ context.append(dispatcher.argument(a_curr))
570
+ exprs = ", ".join([e.expr for e in translate(context, target_sig.arguments())])
571
+
572
+ out_name = "output"
573
+ maybe_assign = f"auto {out_name} = " if len(target_f.func.returns) > 0 else ""
574
+ inner_return_names = gather_nonaliased_inner_rets(target_f.func, out_name)
575
+ ret_str = return_str(
576
+ g.functional.func.returns, inner_return_names + cloned_return_names
577
+ )
578
+
579
+ clone_mutable_inputs_str = "\n".join(clone_mutable_inputs)
580
+ return f"""
581
+ {sig.defn(name=sig.name() + ("_symint" if g.out.func.has_symint() else ""))} {{
582
+ {clone_mutable_inputs_str}
583
+ {maybe_assign}at::_ops::{target_f.func.name.unambiguous_name()}::call({exprs});
584
+ {ret_str}
585
+ }}
586
+ """
587
+
588
+
589
+ # Generates out= kernels in terms of their functional counterparts.
590
+ # We only do this for "generated" NativeFunctions
591
+ @with_native_function
592
+ def gen_composite_out_kernel(g: NativeFunctionsGroup) -> str | None:
593
+ # We should only be generating these for code-generated NativeFunctions
594
+ if "generated" not in g.out.tags:
595
+ return None
596
+ # And we always write the kernel for the out= op in terms of the functional.
597
+ # Note that the functional op might have also been generated, but we don't have to
598
+ # worry about cycles, because the generated functional kernels are always implemented
599
+ # in terms of non-generated kernels (see gen_composite_functional_kernel).
600
+
601
+ sig = DispatcherSignature(g.out.func)
602
+ target_sig = DispatcherSignature(g.functional.func)
603
+
604
+ exprs = ", ".join(
605
+ [e.expr for e in translate(sig.arguments(), target_sig.arguments())]
606
+ )
607
+
608
+ copy_outs = []
609
+ out_name = "tmp_output"
610
+ for i, out_arg in enumerate(g.out.func.arguments.out):
611
+ functional_return_name = (
612
+ out_name
613
+ if len(g.functional.func.returns) == 1
614
+ else f"std::get<{i}>({out_name})"
615
+ )
616
+ copy_outs.append(
617
+ f"""\
618
+ resize_out_helper({out_arg.name}, {functional_return_name});
619
+ copy_arg({out_arg.name}, {functional_return_name});"""
620
+ )
621
+
622
+ rets = []
623
+ # For each return arg in the calling (out=) operator,
624
+ # If it corresponds to an aliased input, return the input.
625
+ # Otherwise, return the corresponding output from calling the functional operator.
626
+ for i, ret_name in enumerate(g.out.func.aliased_return_names()):
627
+ if ret_name is not None:
628
+ rets.append(ret_name)
629
+ else:
630
+ functional_return_name = (
631
+ out_name
632
+ if len(g.functional.func.returns) == 1
633
+ else f"std::get<{i}>({out_name})"
634
+ )
635
+ rets.append(functional_return_name)
636
+
637
+ copy_outs_str = "\n".join(copy_outs)
638
+
639
+ # Kernel name needs to follow the naming convention defined in `generate_function()`
640
+ return f"""
641
+ {sig.defn(name=g.out.func.name.unambiguous_name() + ("_symint" if g.out.func.has_symint() else ""))} {{
642
+ auto {out_name} = at::_ops::{g.functional.func.name.unambiguous_name()}::call({exprs});
643
+ {copy_outs_str}
644
+ {return_str(g.out.func.returns, rets)}
645
+ }}
646
+ """
vllm/lib/python3.10/site-packages/torchgen/operator_versions/__init__.py ADDED
File without changes
vllm/lib/python3.10/site-packages/torchgen/operator_versions/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (176 Bytes). View file
 
vllm/lib/python3.10/site-packages/torchgen/operator_versions/__pycache__/gen_mobile_upgraders.cpython-310.pyc ADDED
Binary file (9.85 kB). View file
 
vllm/lib/python3.10/site-packages/torchgen/operator_versions/__pycache__/gen_mobile_upgraders_constant.cpython-310.pyc ADDED
Binary file (435 Bytes). View file
 
vllm/lib/python3.10/site-packages/torchgen/operator_versions/gen_mobile_upgraders.py ADDED
@@ -0,0 +1,395 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ from __future__ import annotations
4
+
5
+ import os
6
+ from enum import Enum
7
+ from operator import itemgetter
8
+ from pathlib import Path
9
+ from typing import Any
10
+
11
+ import torch
12
+ from torch.jit.generate_bytecode import generate_upgraders_bytecode
13
+ from torchgen.code_template import CodeTemplate
14
+ from torchgen.operator_versions.gen_mobile_upgraders_constant import (
15
+ MOBILE_UPGRADERS_HEADER_DESCRIPTION,
16
+ )
17
+
18
+
19
+ class ByteCode(Enum):
20
+ instructions = 1
21
+ constants = 2
22
+ types = 3
23
+ operators = 4
24
+ register_size = 5
25
+
26
+
27
+ EXCLUDED_OP_SET = [
28
+ "aten::full.names",
29
+ "aten::full.out",
30
+ "aten::full",
31
+ ]
32
+
33
+ EXCLUE_UPGRADER_SET = ["full_0_4", "full_out_0_4"]
34
+
35
+ ONE_INSTRUCTION = CodeTemplate(
36
+ """
37
+ Instruction{OpCode::${operator_name}, ${X}, ${N}},"""
38
+ )
39
+
40
+ INSTRUCTION_LIST = CodeTemplate(
41
+ """std::vector<Instruction>({
42
+ ${instruction_list}
43
+ }), // instructions list"""
44
+ )
45
+
46
+ ONE_CONSTANT = CodeTemplate(
47
+ """
48
+ c10::IValue(${constant}),"""
49
+ )
50
+
51
+ CONSTANT_LIST = CodeTemplate(
52
+ """std::vector<c10::IValue>({
53
+ ${constant_list}
54
+ }), // constants list"""
55
+ )
56
+
57
+ CONSTANTS_LIST_EMPTY = """std::vector<c10::IValue>(), // constants list"""
58
+
59
+ ONE_TYPE = CodeTemplate("""c10::parseType("${type_str}"),""")
60
+
61
+ TYPE_LIST = CodeTemplate(
62
+ """std::vector<c10::TypePtr>({
63
+ ${type_list}
64
+ }), // types list"""
65
+ )
66
+
67
+ TYPE_LIST_EMPTY = """std::vector<c10::TypePtr>(), // types list"""
68
+
69
+ ONE_OPERATOTR_STRING = CodeTemplate(
70
+ """
71
+ OperatorString({"${operator_name}", "${overload_name}", ${num_of_args}}),"""
72
+ )
73
+
74
+ OPERATOR_STRING_LIST = CodeTemplate(
75
+ """
76
+ std::vector<OperatorString>({
77
+ ${operator_string_list}
78
+ }), // operators list"""
79
+ )
80
+
81
+ ONE_UPGRADER_FUNCTION = CodeTemplate(
82
+ """
83
+ mobile::Function::registerFunc(
84
+ "${upgrader_name}",
85
+ ${instruction_list},
86
+ ${constant_list},
87
+ ${type_list},
88
+ ${register_size}
89
+ )"""
90
+ )
91
+
92
+ ONE_UPGRADER_SRC = CodeTemplate(
93
+ """
94
+ ByteCodeFunctionWithOperator({
95
+ ${bytecode_function},
96
+ ${operator_string_list}
97
+ }),"""
98
+ )
99
+
100
+
101
+ ONE_UPGRADER_IN_VERSION_MAP = CodeTemplate(
102
+ """Upgrader({${upgrader_min_version}, ${upgrader_max_version}, "${upgrader_name}", ${bytecode_func_index}})"""
103
+ ) # noqa: E501
104
+
105
+ ONE_OPERATOR_IN_VERSION_MAP = CodeTemplate(
106
+ """
107
+ {std::string("${operator_name}"),
108
+ std::vector<Upgrader>({
109
+ ${upgrader_list_in_version_map}
110
+ })},"""
111
+ )
112
+
113
+
114
+ OPERATOR_VERSION_MAP = CodeTemplate(
115
+ """
116
+ const std::unordered_map<std::string, std::vector<Upgrader>>
117
+ getOperatorVersionMapForMobile() {
118
+ static std::unordered_map<std::string, std::vector<Upgrader>>
119
+ operatorVersionMapForMobile({
120
+ ${operator_list_in_version_map}
121
+ });
122
+ return operatorVersionMapForMobile;
123
+ }
124
+ """
125
+ )
126
+
127
+
128
+ UPGRADER_CPP_SRC = CodeTemplate(
129
+ MOBILE_UPGRADERS_HEADER_DESCRIPTION
130
+ + """
131
+ #include <caffe2/serialize/versions.h>
132
+ #include <torch/csrc/jit/mobile/upgrader_mobile.h>
133
+
134
+ namespace c10 {
135
+ TypePtr parseType(const std::string& pythonStr);
136
+ } // namespace c10
137
+
138
+ namespace torch {
139
+ namespace jit {
140
+
141
+ // clang-format off
142
+
143
+ // From operator_versions_map
144
+ ${operator_version_map}
145
+
146
+ const std::vector<ByteCodeFunctionWithOperator>& getUpgraderBytecodeList() {
147
+ auto generate_upgrader_bytecode_list = []() {
148
+ std::vector<ByteCodeFunctionWithOperator> upgrader_function_list({
149
+ ${upgrader_bytecode}
150
+ });
151
+ for (const auto& upgrader_function : upgrader_function_list) {
152
+ for (const auto& op : upgrader_function.operators) {
153
+ upgrader_function.function.append_operator(
154
+ op.name,
155
+ op.overload_name,
156
+ op.num_specified_args);
157
+ }
158
+ }
159
+ return upgrader_function_list;
160
+ };
161
+ static std::vector<ByteCodeFunctionWithOperator> upgraderBytecodeList =
162
+ generate_upgrader_bytecode_list();
163
+ return upgraderBytecodeList;
164
+ }
165
+
166
+ // clang-format on
167
+
168
+ } // namespace jit
169
+ } // namespace torch
170
+ """
171
+ )
172
+
173
+ UPGRADER_MOBILE_FILE_NAME = "upgrader_mobile.cpp"
174
+
175
+ UPGRADER_ELEMENT = CodeTemplate(
176
+ """\
177
+ Upgrader({${min_version}, ${max_version}, ${operator_name}, ${index}}),
178
+ """
179
+ )
180
+
181
+ PER_OPERATOR_UPGRADER_LIST = CodeTemplate(
182
+ """\
183
+ {
184
+ std::string(${operator_name}),
185
+ std::vector<Upgrader>({${upgrader_list}});
186
+ }
187
+ """
188
+ )
189
+
190
+
191
+ def construct_instruction(instruction_list_from_yaml: list[Any]) -> str:
192
+ instruction_list_part = []
193
+ for instruction in instruction_list_from_yaml:
194
+ instruction_list_part.append(
195
+ ONE_INSTRUCTION.substitute(
196
+ operator_name=instruction[0],
197
+ X=instruction[1],
198
+ N=instruction[2],
199
+ )
200
+ )
201
+ return INSTRUCTION_LIST.substitute(
202
+ instruction_list="".join(instruction_list_part).lstrip("\n")
203
+ )
204
+
205
+
206
+ def construct_constants(constants_list_from_yaml: list[Any]) -> str:
207
+ constants_list_part = []
208
+ for constant_from_yaml in constants_list_from_yaml:
209
+ convert_constant = None
210
+ if isinstance(constant_from_yaml, str):
211
+ # Add quotes if it's string
212
+ convert_constant = f'"{constant_from_yaml}"'
213
+ elif isinstance(constant_from_yaml, bool):
214
+ convert_constant = "true" if constant_from_yaml else "false"
215
+ elif constant_from_yaml is None:
216
+ convert_constant = ""
217
+ elif isinstance(constant_from_yaml, int):
218
+ convert_constant = str(constant_from_yaml)
219
+ else:
220
+ raise ValueError(
221
+ f"The type of {constant_from_yaml} is {type(constant_from_yaml)}. "
222
+ "Please add change in construct_constants function in gen_mobile_upgraders.py."
223
+ )
224
+ constants_list_part.append(ONE_CONSTANT.substitute(constant=convert_constant))
225
+ if len(constants_list_part) == 0:
226
+ return CONSTANTS_LIST_EMPTY
227
+ return CONSTANT_LIST.substitute(
228
+ constant_list="".join(constants_list_part).lstrip("\n")
229
+ )
230
+
231
+
232
+ def construct_operators(operator_list_from_yaml: list[Any]) -> str:
233
+ operator_list_part = []
234
+ for operator in operator_list_from_yaml:
235
+ operator_list_part.append(
236
+ ONE_OPERATOTR_STRING.substitute(
237
+ operator_name=operator[0],
238
+ overload_name=operator[1],
239
+ num_of_args=operator[2],
240
+ )
241
+ )
242
+ return OPERATOR_STRING_LIST.substitute(
243
+ operator_string_list="".join(operator_list_part).lstrip("\n")
244
+ )
245
+
246
+
247
+ def construct_types(types_tr_list_from_yaml: list[Any]) -> str:
248
+ types_tr_list_part = []
249
+ for types_tr in types_tr_list_from_yaml:
250
+ types_tr_list_part.append(ONE_TYPE.substitute(type_str=types_tr))
251
+ if len(types_tr_list_part) == 0:
252
+ return TYPE_LIST_EMPTY
253
+ return TYPE_LIST.substitute(type_list="".join(types_tr_list_part).lstrip("\n"))
254
+
255
+
256
+ def construct_register_size(register_size_from_yaml: int) -> str:
257
+ if not isinstance(register_size_from_yaml, int):
258
+ raise ValueError(
259
+ f"Input register size is {register_size_from_yaml} and"
260
+ "it's type is {type(register_size_from_yaml)}. An int type is expected."
261
+ )
262
+ return str(register_size_from_yaml)
263
+
264
+
265
+ def construct_version_maps(
266
+ upgrader_bytecode_function_to_index_map: dict[str, Any]
267
+ ) -> str:
268
+ version_map = torch._C._get_operator_version_map()
269
+ sorted_version_map_ = sorted(version_map.items(), key=itemgetter(0)) # type: ignore[no-any-return]
270
+ sorted_version_map = dict(sorted_version_map_)
271
+
272
+ operator_list_in_version_map_part = []
273
+ for op_name in sorted_version_map:
274
+ upgraders_in_version_map_part = []
275
+ # TODO: remove the skip after these two operators schemas are fixed
276
+ if op_name in EXCLUDED_OP_SET:
277
+ continue
278
+ upgrader_ranges = torch._C._get_upgrader_ranges(op_name)
279
+ upgrader_entries = sorted_version_map[op_name]
280
+ assert len(upgrader_ranges) == len(upgrader_entries)
281
+ for idx, upgrader_entry in enumerate(upgrader_entries):
282
+ upgrader_name = upgrader_entry.upgrader_name
283
+ bytecode_function_index = upgrader_bytecode_function_to_index_map[
284
+ upgrader_name
285
+ ]
286
+ upgraders_in_version_map_part.append(
287
+ ONE_UPGRADER_IN_VERSION_MAP.substitute(
288
+ upgrader_min_version=upgrader_ranges[idx].min_version,
289
+ upgrader_max_version=upgrader_ranges[idx].max_version,
290
+ upgrader_name=upgrader_name,
291
+ bytecode_func_index=bytecode_function_index,
292
+ )
293
+ )
294
+ operator_list_in_version_map_part.append(
295
+ ONE_OPERATOR_IN_VERSION_MAP.substitute(
296
+ operator_name=op_name,
297
+ upgrader_list_in_version_map="".join(upgraders_in_version_map_part),
298
+ )
299
+ )
300
+ return OPERATOR_VERSION_MAP.substitute(
301
+ operator_list_in_version_map="".join(operator_list_in_version_map_part).lstrip(
302
+ "\n"
303
+ )
304
+ )
305
+
306
+
307
+ def get_upgrader_bytecode_function_to_index_map(
308
+ upgrader_dict: list[dict[str, Any]]
309
+ ) -> dict[str, Any]:
310
+ upgrader_bytecode_function_to_index_map = {}
311
+ index = 0
312
+ for upgrader_bytecode in upgrader_dict:
313
+ for upgrader_name in upgrader_bytecode.keys():
314
+ if upgrader_name in EXCLUE_UPGRADER_SET:
315
+ continue
316
+ upgrader_bytecode_function_to_index_map[upgrader_name] = index
317
+ index += 1
318
+ return upgrader_bytecode_function_to_index_map
319
+
320
+
321
+ def write_cpp(cpp_path: str, upgrader_dict: list[dict[str, Any]]) -> None:
322
+ body_parts = []
323
+ upgrader_bytecode_function_to_index_map = (
324
+ get_upgrader_bytecode_function_to_index_map(upgrader_dict)
325
+ )
326
+ version_map_src = construct_version_maps(upgrader_bytecode_function_to_index_map)
327
+ all_upgrader_src_string = []
328
+ for upgrader_bytecode in upgrader_dict:
329
+ for upgrader_name, bytecode in upgrader_bytecode.items():
330
+ # TODO: remove the skip after these two operators schemas are fixed
331
+ if upgrader_name in EXCLUE_UPGRADER_SET:
332
+ continue
333
+ instruction_list_str = ""
334
+ constant_list_str = ""
335
+ type_list_str = ""
336
+ register_size_str = ""
337
+ operator_list_str = ""
338
+ for table_name, contents in bytecode.items():
339
+ element = ByteCode[table_name]
340
+ body_string = ""
341
+ if element is ByteCode.instructions:
342
+ instruction_list_str = construct_instruction(contents)
343
+ elif element is ByteCode.constants:
344
+ constant_list_str = construct_constants(contents)
345
+ elif element is ByteCode.operators:
346
+ operator_list_str = construct_operators(contents)
347
+ elif element is ByteCode.types:
348
+ type_list_str = construct_types(contents)
349
+ elif element is ByteCode.register_size:
350
+ register_size_str = construct_register_size(contents)
351
+
352
+ one_upgrader_function_string = ONE_UPGRADER_FUNCTION.substitute(
353
+ upgrader_name=upgrader_name,
354
+ instruction_list=instruction_list_str,
355
+ constant_list=constant_list_str,
356
+ type_list=type_list_str,
357
+ register_size=register_size_str,
358
+ )
359
+ one_upgrader_src_string = ONE_UPGRADER_SRC.substitute(
360
+ bytecode_function=one_upgrader_function_string.lstrip("\n"),
361
+ operator_string_list=operator_list_str.lstrip("\n"),
362
+ )
363
+ all_upgrader_src_string.append(one_upgrader_src_string)
364
+
365
+ upgrader_file_content = UPGRADER_CPP_SRC.substitute(
366
+ operator_version_map=version_map_src,
367
+ upgrader_bytecode="".join(all_upgrader_src_string).lstrip("\n"),
368
+ )
369
+ body_parts.append(upgrader_file_content)
370
+ print("writing file to : ", cpp_path + "/" + UPGRADER_MOBILE_FILE_NAME)
371
+ with open(os.path.join(cpp_path, UPGRADER_MOBILE_FILE_NAME), "wb") as out_file:
372
+ final_output = "".join(body_parts)
373
+ out_file.write(upgrader_file_content.encode("utf-8"))
374
+
375
+
376
+ def sort_upgrader(upgrader_list: list[dict[str, Any]]) -> list[dict[str, Any]]:
377
+ sorted_upgrader_list = sorted(
378
+ upgrader_list, key=lambda one_upgrader: next(iter(one_upgrader))
379
+ )
380
+ return sorted_upgrader_list
381
+
382
+
383
+ def main() -> None:
384
+ upgrader_list = generate_upgraders_bytecode()
385
+ sorted_upgrader_list = sort_upgrader(upgrader_list)
386
+ for up in sorted_upgrader_list:
387
+ print("after sort upgrader : ", next(iter(up)))
388
+
389
+ pytorch_dir = Path(__file__).resolve().parents[2]
390
+ upgrader_path = pytorch_dir / "torch" / "csrc" / "jit" / "mobile"
391
+ write_cpp(str(upgrader_path), sorted_upgrader_list)
392
+
393
+
394
+ if __name__ == "__main__":
395
+ main()
vllm/lib/python3.10/site-packages/torchgen/operator_versions/gen_mobile_upgraders_constant.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ MOBILE_UPGRADERS_HEADER_DESCRIPTION = """/**
2
+ * @generated
3
+ * This is an auto-generated file. Please do not modify it by hand.
4
+ * To re-generate, please run:
5
+ * cd ~/pytorch && python torchgen/operator_versions/gen_mobile_upgraders.py
6
+ */
7
+ """
vllm/lib/python3.10/site-packages/torchgen/packaged/autograd/BUILD.bazel ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ load("//:tools/bazel.bzl", "rules")
2
+ load(":build.bzl", "define_targets")
3
+
4
+ define_targets(rules = rules)
vllm/lib/python3.10/site-packages/torchgen/packaged/autograd/__init__.py ADDED
File without changes
vllm/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_annotated_fn_args.cpython-310.pyc ADDED
Binary file (4.34 kB). View file
 
vllm/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_autograd_functions.cpython-310.pyc ADDED
Binary file (21 kB). View file
 
vllm/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_python_functions.cpython-310.pyc ADDED
Binary file (28.7 kB). View file
 
vllm/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_variable_factories.cpython-310.pyc ADDED
Binary file (3.98 kB). View file
 
vllm/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/load_derivatives.cpython-310.pyc ADDED
Binary file (24.8 kB). View file
 
vllm/lib/python3.10/site-packages/torchgen/packaged/autograd/derivatives.yaml ADDED
The diff for this file is too large to render. See raw diff
 
vllm/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_annotated_fn_args.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ For procedural tests needed for __torch_function__, we use this function
3
+ to export method names and signatures as needed by the tests in
4
+ test/test_overrides.py.
5
+
6
+ python -m tools.autograd.gen_annotated_fn_args \
7
+ aten/src/ATen/native/native_functions.yaml \
8
+ aten/src/ATen/native/tags.yaml \
9
+ $OUTPUT_DIR \
10
+ tools/autograd
11
+
12
+ Where $OUTPUT_DIR is where you would like the files to be
13
+ generated. In the full build system, OUTPUT_DIR is
14
+ torch/testing/_internal/generated
15
+ """
16
+
17
+ from __future__ import annotations
18
+
19
+ import argparse
20
+ import os
21
+ import textwrap
22
+ from collections import defaultdict
23
+ from typing import Any, Sequence, TYPE_CHECKING
24
+
25
+ import torchgen.api.python as python
26
+ from torchgen.context import with_native_function
27
+ from torchgen.gen import parse_native_yaml
28
+ from torchgen.utils import FileManager
29
+
30
+ from .gen_python_functions import (
31
+ is_py_fft_function,
32
+ is_py_linalg_function,
33
+ is_py_nn_function,
34
+ is_py_special_function,
35
+ is_py_torch_function,
36
+ is_py_variable_method,
37
+ should_generate_py_binding,
38
+ )
39
+
40
+
41
+ if TYPE_CHECKING:
42
+ from torchgen.model import Argument, BaseOperatorName, NativeFunction
43
+
44
+
45
+ def gen_annotated(
46
+ native_yaml_path: str, tags_yaml_path: str, out: str, autograd_dir: str
47
+ ) -> None:
48
+ native_functions = parse_native_yaml(
49
+ native_yaml_path, tags_yaml_path
50
+ ).native_functions
51
+ mappings = (
52
+ (is_py_torch_function, "torch._C._VariableFunctions"),
53
+ (is_py_nn_function, "torch._C._nn"),
54
+ (is_py_linalg_function, "torch._C._linalg"),
55
+ (is_py_special_function, "torch._C._special"),
56
+ (is_py_fft_function, "torch._C._fft"),
57
+ (is_py_variable_method, "torch.Tensor"),
58
+ )
59
+ annotated_args: list[str] = []
60
+ for pred, namespace in mappings:
61
+ groups: dict[BaseOperatorName, list[NativeFunction]] = defaultdict(list)
62
+ for f in native_functions:
63
+ if not should_generate_py_binding(f) or not pred(f):
64
+ continue
65
+ groups[f.func.name.name].append(f)
66
+ for group in groups.values():
67
+ for f in group:
68
+ annotated_args.append(f"{namespace}.{gen_annotated_args(f)}")
69
+
70
+ template_path = os.path.join(autograd_dir, "templates")
71
+ fm = FileManager(install_dir=out, template_dir=template_path, dry_run=False)
72
+ fm.write_with_template(
73
+ "annotated_fn_args.py",
74
+ "annotated_fn_args.py.in",
75
+ lambda: {
76
+ "annotated_args": textwrap.indent("\n".join(annotated_args), " "),
77
+ },
78
+ )
79
+
80
+
81
+ @with_native_function
82
+ def gen_annotated_args(f: NativeFunction) -> str:
83
+ def _get_kwargs_func_exclusion_list() -> list[str]:
84
+ # functions that currently don't work with kwargs in test_overrides.py
85
+ return [
86
+ "diagonal",
87
+ "round_",
88
+ "round",
89
+ "scatter_",
90
+ ]
91
+
92
+ def _add_out_arg(
93
+ out_args: list[dict[str, Any]], args: Sequence[Argument], *, is_kwarg_only: bool
94
+ ) -> None:
95
+ for arg in args:
96
+ if arg.default is not None:
97
+ continue
98
+ out_arg: dict[str, Any] = {}
99
+ out_arg["is_kwarg_only"] = str(is_kwarg_only)
100
+ out_arg["name"] = arg.name
101
+ out_arg["simple_type"] = python.argument_type_str(
102
+ arg.type, simple_type=True
103
+ )
104
+ size_t = python.argument_type_size(arg.type)
105
+ if size_t:
106
+ out_arg["size"] = size_t
107
+ out_args.append(out_arg)
108
+
109
+ out_args: list[dict[str, Any]] = []
110
+ _add_out_arg(out_args, f.func.arguments.flat_positional, is_kwarg_only=False)
111
+ if f"{f.func.name.name}" not in _get_kwargs_func_exclusion_list():
112
+ _add_out_arg(out_args, f.func.arguments.flat_kwarg_only, is_kwarg_only=True)
113
+
114
+ return f"{f.func.name.name}: {repr(out_args)},"
115
+
116
+
117
+ def main() -> None:
118
+ parser = argparse.ArgumentParser(description="Generate annotated_fn_args script")
119
+ parser.add_argument(
120
+ "native_functions", metavar="NATIVE", help="path to native_functions.yaml"
121
+ )
122
+ parser.add_argument("tags", metavar="TAGS", help="path to tags.yaml")
123
+ parser.add_argument("out", metavar="OUT", help="path to output directory")
124
+ parser.add_argument(
125
+ "autograd", metavar="AUTOGRAD", help="path to template directory"
126
+ )
127
+ args = parser.parse_args()
128
+ gen_annotated(args.native_functions, args.tags, args.out, args.autograd)
129
+
130
+
131
+ if __name__ == "__main__":
132
+ main()
vllm/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_autograd.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ To run this file by hand from the root of the PyTorch
3
+ repository, run:
4
+
5
+ python -m tools.autograd.gen_autograd \
6
+ aten/src/ATen/native/native_functions.yaml \
7
+ aten/src/ATen/native/tags.yaml \
8
+ $OUTPUT_DIR \
9
+ tools/autograd
10
+
11
+ Where $OUTPUT_DIR is where you would like the files to be
12
+ generated. In the full build system, OUTPUT_DIR is
13
+ torch/csrc/autograd/generated/
14
+ """
15
+
16
+ # gen_autograd.py generates C++ autograd functions and Python bindings.
17
+ #
18
+ # It delegates to the following scripts:
19
+ #
20
+ # gen_autograd_functions.py: generates subclasses of torch::autograd::Node
21
+ # gen_variable_type.py: generates VariableType.h which contains all tensor methods
22
+ # gen_python_functions.py: generates Python bindings to THPVariable
23
+ #
24
+
25
+ from __future__ import annotations
26
+
27
+ import argparse
28
+ import os
29
+
30
+ from torchgen.api import cpp
31
+ from torchgen.api.autograd import (
32
+ match_differentiability_info,
33
+ NativeFunctionWithDifferentiabilityInfo,
34
+ )
35
+ from torchgen.gen import parse_native_yaml
36
+ from torchgen.selective_build.selector import SelectiveBuilder
37
+
38
+ from . import gen_python_functions
39
+ from .gen_autograd_functions import (
40
+ gen_autograd_functions_lib,
41
+ gen_autograd_functions_python,
42
+ )
43
+ from .gen_inplace_or_view_type import gen_inplace_or_view_type
44
+ from .gen_trace_type import gen_trace_type
45
+ from .gen_variable_factories import gen_variable_factories
46
+ from .gen_variable_type import gen_variable_type
47
+ from .gen_view_funcs import gen_view_funcs
48
+ from .load_derivatives import load_derivatives
49
+
50
+
51
+ def gen_autograd(
52
+ native_functions_path: str,
53
+ tags_path: str,
54
+ out: str,
55
+ autograd_dir: str,
56
+ operator_selector: SelectiveBuilder,
57
+ disable_autograd: bool = False,
58
+ ) -> None:
59
+ # Parse and load derivatives.yaml
60
+ differentiability_infos, used_dispatch_keys = load_derivatives(
61
+ os.path.join(autograd_dir, "derivatives.yaml"), native_functions_path, tags_path
62
+ )
63
+
64
+ template_path = os.path.join(autograd_dir, "templates")
65
+
66
+ native_funcs = parse_native_yaml(native_functions_path, tags_path).native_functions
67
+ fns = sorted(
68
+ filter(
69
+ operator_selector.is_native_function_selected_for_training, native_funcs
70
+ ),
71
+ key=lambda f: cpp.name(f.func),
72
+ )
73
+ fns_with_diff_infos: list[
74
+ NativeFunctionWithDifferentiabilityInfo
75
+ ] = match_differentiability_info(fns, differentiability_infos)
76
+
77
+ # Generate VariableType.h/cpp
78
+ if not disable_autograd:
79
+ gen_variable_type(
80
+ out,
81
+ native_functions_path,
82
+ tags_path,
83
+ fns_with_diff_infos,
84
+ template_path,
85
+ used_dispatch_keys,
86
+ )
87
+
88
+ gen_inplace_or_view_type(
89
+ out, native_functions_path, tags_path, fns_with_diff_infos, template_path
90
+ )
91
+
92
+ # operator filter not applied as tracing sources are excluded in selective build
93
+ gen_trace_type(out, native_funcs, template_path)
94
+ # Generate Functions.h/cpp
95
+ gen_autograd_functions_lib(out, differentiability_infos, template_path)
96
+
97
+ # Generate variable_factories.h
98
+ gen_variable_factories(out, native_functions_path, tags_path, template_path)
99
+
100
+ # Generate ViewFuncs.h/cpp
101
+ gen_view_funcs(out, fns_with_diff_infos, template_path)
102
+
103
+
104
+ def gen_autograd_python(
105
+ native_functions_path: str,
106
+ tags_path: str,
107
+ out: str,
108
+ autograd_dir: str,
109
+ ) -> None:
110
+ differentiability_infos, _ = load_derivatives(
111
+ os.path.join(autograd_dir, "derivatives.yaml"), native_functions_path, tags_path
112
+ )
113
+
114
+ template_path = os.path.join(autograd_dir, "templates")
115
+
116
+ # Generate Functions.h/cpp
117
+ gen_autograd_functions_python(out, differentiability_infos, template_path)
118
+
119
+ # Generate Python bindings
120
+ deprecated_path = os.path.join(autograd_dir, "deprecated.yaml")
121
+ gen_python_functions.gen(
122
+ out, native_functions_path, tags_path, deprecated_path, template_path
123
+ )
124
+
125
+
126
+ def main() -> None:
127
+ parser = argparse.ArgumentParser(description="Generate autograd C++ files script")
128
+ parser.add_argument(
129
+ "native_functions", metavar="NATIVE", help="path to native_functions.yaml"
130
+ )
131
+ parser.add_argument("tags", metavar="NATIVE", help="path to tags.yaml")
132
+ parser.add_argument("out", metavar="OUT", help="path to output directory")
133
+ parser.add_argument(
134
+ "autograd", metavar="AUTOGRAD", help="path to autograd directory"
135
+ )
136
+ args = parser.parse_args()
137
+ gen_autograd(
138
+ args.native_functions,
139
+ args.tags,
140
+ args.out,
141
+ args.autograd,
142
+ SelectiveBuilder.get_nop_selector(),
143
+ )
144
+
145
+
146
+ if __name__ == "__main__":
147
+ main()