Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- minigpt2/lib/python3.10/site-packages/torchgen/code_template.py +99 -0
- minigpt2/lib/python3.10/site-packages/torchgen/executorch/__init__.py +0 -0
- minigpt2/lib/python3.10/site-packages/torchgen/executorch/__pycache__/__init__.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/torchgen/executorch/__pycache__/model.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/torchgen/executorch/__pycache__/parse.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/torchgen/executorch/api/__init__.py +0 -0
- minigpt2/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/__init__.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/custom_ops.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/et_cpp.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/unboxing.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/torchgen/executorch/api/custom_ops.py +149 -0
- minigpt2/lib/python3.10/site-packages/torchgen/executorch/api/et_cpp.py +370 -0
- minigpt2/lib/python3.10/site-packages/torchgen/executorch/api/types/__init__.py +4 -0
- minigpt2/lib/python3.10/site-packages/torchgen/executorch/api/types/__pycache__/__init__.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/torchgen/executorch/api/types/__pycache__/signatures.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/torchgen/executorch/api/types/__pycache__/types.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/torchgen/executorch/api/types/signatures.py +76 -0
- minigpt2/lib/python3.10/site-packages/torchgen/executorch/api/types/types.py +83 -0
- minigpt2/lib/python3.10/site-packages/torchgen/executorch/api/unboxing.py +230 -0
- minigpt2/lib/python3.10/site-packages/torchgen/executorch/model.py +220 -0
- minigpt2/lib/python3.10/site-packages/torchgen/executorch/parse.py +153 -0
- minigpt2/lib/python3.10/site-packages/torchgen/gen.py +0 -0
- minigpt2/lib/python3.10/site-packages/torchgen/gen_aoti_c_shim.py +486 -0
- minigpt2/lib/python3.10/site-packages/torchgen/gen_backend_stubs.py +611 -0
- minigpt2/lib/python3.10/site-packages/torchgen/gen_functionalization_type.py +882 -0
- minigpt2/lib/python3.10/site-packages/torchgen/gen_lazy_tensor.py +581 -0
- minigpt2/lib/python3.10/site-packages/torchgen/gen_schema_utils.py +97 -0
- minigpt2/lib/python3.10/site-packages/torchgen/gen_vmap_plumbing.py +271 -0
- minigpt2/lib/python3.10/site-packages/torchgen/model.py +0 -0
- minigpt2/lib/python3.10/site-packages/torchgen/native_function_generation.py +646 -0
- minigpt2/lib/python3.10/site-packages/torchgen/operator_versions/__init__.py +0 -0
- minigpt2/lib/python3.10/site-packages/torchgen/operator_versions/__pycache__/__init__.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/torchgen/operator_versions/__pycache__/gen_mobile_upgraders.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/torchgen/operator_versions/__pycache__/gen_mobile_upgraders_constant.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/torchgen/operator_versions/gen_mobile_upgraders.py +395 -0
- minigpt2/lib/python3.10/site-packages/torchgen/operator_versions/gen_mobile_upgraders_constant.py +7 -0
- minigpt2/lib/python3.10/site-packages/torchgen/packaged/autograd/README.md +3 -0
- minigpt2/lib/python3.10/site-packages/torchgen/packaged/autograd/__init__.py +0 -0
- minigpt2/lib/python3.10/site-packages/torchgen/packaged/autograd/context.py +31 -0
- minigpt2/lib/python3.10/site-packages/torchgen/packaged/autograd/deprecated.yaml +134 -0
- minigpt2/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_autograd.py +147 -0
- minigpt2/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_autograd_functions.py +925 -0
- minigpt2/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_inplace_or_view_type.py +675 -0
- minigpt2/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_trace_type.py +536 -0
- minigpt2/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_variable_type.py +2180 -0
- minigpt2/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/ADInplaceOrViewType.cpp +38 -0
- minigpt2/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/Functions.cpp +20 -0
- minigpt2/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/Functions.h +51 -0
- minigpt2/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/TraceType.cpp +40 -0
- minigpt2/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/VariableType.h +59 -0
minigpt2/lib/python3.10/site-packages/torchgen/code_template.py
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import re
|
| 4 |
+
from typing import Mapping, Sequence
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
# match $identifier or ${identifier} and replace with value in env
|
| 8 |
+
# If this identifier is at the beginning of whitespace on a line
|
| 9 |
+
# and its value is a list then it is treated as
|
| 10 |
+
# block substitution by indenting to that depth and putting each element
|
| 11 |
+
# of the list on its own line
|
| 12 |
+
# if the identifier is on a line starting with non-whitespace and a list
|
| 13 |
+
# then it is comma separated ${,foo} will insert a comma before the list
|
| 14 |
+
# if this list is not empty and ${foo,} will insert one after.
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class CodeTemplate:
|
| 18 |
+
substitution_str = r"(^[^\n\S]*)?\$([^\d\W]\w*|\{,?[^\d\W]\w*\,?})"
|
| 19 |
+
substitution = re.compile(substitution_str, re.MULTILINE)
|
| 20 |
+
|
| 21 |
+
pattern: str
|
| 22 |
+
filename: str
|
| 23 |
+
|
| 24 |
+
@staticmethod
|
| 25 |
+
def from_file(filename: str) -> CodeTemplate:
|
| 26 |
+
with open(filename) as f:
|
| 27 |
+
return CodeTemplate(f.read(), filename)
|
| 28 |
+
|
| 29 |
+
def __init__(self, pattern: str, filename: str = "") -> None:
|
| 30 |
+
self.pattern = pattern
|
| 31 |
+
self.filename = filename
|
| 32 |
+
|
| 33 |
+
def substitute(
|
| 34 |
+
self, env: Mapping[str, object] | None = None, **kwargs: object
|
| 35 |
+
) -> str:
|
| 36 |
+
if env is None:
|
| 37 |
+
env = {}
|
| 38 |
+
|
| 39 |
+
def lookup(v: str) -> object:
|
| 40 |
+
assert env is not None
|
| 41 |
+
return kwargs[v] if v in kwargs else env[v]
|
| 42 |
+
|
| 43 |
+
def indent_lines(indent: str, v: Sequence[object]) -> str:
|
| 44 |
+
return "".join(
|
| 45 |
+
[indent + l + "\n" for e in v for l in str(e).splitlines()]
|
| 46 |
+
).rstrip()
|
| 47 |
+
|
| 48 |
+
def replace(match: re.Match[str]) -> str:
|
| 49 |
+
indent = match.group(1)
|
| 50 |
+
key = match.group(2)
|
| 51 |
+
comma_before = ""
|
| 52 |
+
comma_after = ""
|
| 53 |
+
if key[0] == "{":
|
| 54 |
+
key = key[1:-1]
|
| 55 |
+
if key[0] == ",":
|
| 56 |
+
comma_before = ", "
|
| 57 |
+
key = key[1:]
|
| 58 |
+
if key[-1] == ",":
|
| 59 |
+
comma_after = ", "
|
| 60 |
+
key = key[:-1]
|
| 61 |
+
v = lookup(key)
|
| 62 |
+
if indent is not None:
|
| 63 |
+
if not isinstance(v, list):
|
| 64 |
+
v = [v]
|
| 65 |
+
return indent_lines(indent, v)
|
| 66 |
+
elif isinstance(v, list):
|
| 67 |
+
middle = ", ".join([str(x) for x in v])
|
| 68 |
+
if len(v) == 0:
|
| 69 |
+
return middle
|
| 70 |
+
return comma_before + middle + comma_after
|
| 71 |
+
else:
|
| 72 |
+
return str(v)
|
| 73 |
+
|
| 74 |
+
return self.substitution.sub(replace, self.pattern)
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
if __name__ == "__main__":
|
| 78 |
+
c = CodeTemplate(
|
| 79 |
+
"""\
|
| 80 |
+
int foo($args) {
|
| 81 |
+
|
| 82 |
+
$bar
|
| 83 |
+
$bar
|
| 84 |
+
$a+$b
|
| 85 |
+
}
|
| 86 |
+
int commatest(int a${,stuff})
|
| 87 |
+
int notest(int a${,empty,})
|
| 88 |
+
"""
|
| 89 |
+
)
|
| 90 |
+
print(
|
| 91 |
+
c.substitute(
|
| 92 |
+
args=["hi", 8],
|
| 93 |
+
bar=["what", 7],
|
| 94 |
+
a=3,
|
| 95 |
+
b=4,
|
| 96 |
+
stuff=["things...", "others"],
|
| 97 |
+
empty=[],
|
| 98 |
+
)
|
| 99 |
+
)
|
minigpt2/lib/python3.10/site-packages/torchgen/executorch/__init__.py
ADDED
|
File without changes
|
minigpt2/lib/python3.10/site-packages/torchgen/executorch/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (173 Bytes). View file
|
|
|
minigpt2/lib/python3.10/site-packages/torchgen/executorch/__pycache__/model.cpython-310.pyc
ADDED
|
Binary file (7.4 kB). View file
|
|
|
minigpt2/lib/python3.10/site-packages/torchgen/executorch/__pycache__/parse.cpython-310.pyc
ADDED
|
Binary file (4.45 kB). View file
|
|
|
minigpt2/lib/python3.10/site-packages/torchgen/executorch/api/__init__.py
ADDED
|
File without changes
|
minigpt2/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (177 Bytes). View file
|
|
|
minigpt2/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/custom_ops.cpython-310.pyc
ADDED
|
Binary file (4.31 kB). View file
|
|
|
minigpt2/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/et_cpp.cpython-310.pyc
ADDED
|
Binary file (7.69 kB). View file
|
|
|
minigpt2/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/unboxing.cpython-310.pyc
ADDED
|
Binary file (6.52 kB). View file
|
|
|
minigpt2/lib/python3.10/site-packages/torchgen/executorch/api/custom_ops.py
ADDED
|
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from collections import defaultdict
|
| 4 |
+
from dataclasses import dataclass
|
| 5 |
+
from typing import Sequence, TYPE_CHECKING
|
| 6 |
+
|
| 7 |
+
from torchgen import dest
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
# disable import sorting to avoid circular dependency.
|
| 11 |
+
from torchgen.api.types import DispatcherSignature # usort: skip
|
| 12 |
+
from torchgen.context import method_with_native_function
|
| 13 |
+
from torchgen.model import BaseTy, BaseType, DispatchKey, NativeFunction, Variant
|
| 14 |
+
from torchgen.utils import concatMap, Target
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
if TYPE_CHECKING:
|
| 18 |
+
from torchgen.executorch.model import ETKernelIndex
|
| 19 |
+
from torchgen.selective_build.selector import SelectiveBuilder
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
# Generates RegisterKernelStub.cpp, which provides placeholder kernels for custom operators. This will be used at
|
| 23 |
+
# model authoring side.
|
| 24 |
+
@dataclass(frozen=True)
|
| 25 |
+
class ComputeNativeFunctionStub:
|
| 26 |
+
@method_with_native_function
|
| 27 |
+
def __call__(self, f: NativeFunction) -> str | None:
|
| 28 |
+
if Variant.function not in f.variants:
|
| 29 |
+
return None
|
| 30 |
+
|
| 31 |
+
sig = DispatcherSignature.from_schema(
|
| 32 |
+
f.func, prefix=f"wrapper_CPU_{f.func.name.overload_name}_", symint=False
|
| 33 |
+
)
|
| 34 |
+
assert sig is not None
|
| 35 |
+
if len(f.func.returns) == 0:
|
| 36 |
+
ret_name = ""
|
| 37 |
+
elif len(f.func.returns) == 1:
|
| 38 |
+
if f.func.arguments.out:
|
| 39 |
+
ret_name = f.func.arguments.out[0].name
|
| 40 |
+
else:
|
| 41 |
+
ret_name = next(
|
| 42 |
+
(
|
| 43 |
+
a.name
|
| 44 |
+
for a in f.func.arguments.flat_non_out
|
| 45 |
+
if a.type == f.func.returns[0].type
|
| 46 |
+
),
|
| 47 |
+
"",
|
| 48 |
+
)
|
| 49 |
+
if not ret_name:
|
| 50 |
+
# if return type is tensor
|
| 51 |
+
if f.func.returns[0].type == BaseType(BaseTy.Tensor):
|
| 52 |
+
# Returns an empty tensor
|
| 53 |
+
ret_name = "at::Tensor()"
|
| 54 |
+
else:
|
| 55 |
+
raise Exception( # noqa: TRY002
|
| 56 |
+
f"Can't handle this return type {f.func}"
|
| 57 |
+
) # noqa: TRY002
|
| 58 |
+
elif len(f.func.arguments.out) == len(f.func.returns):
|
| 59 |
+
# Returns a tuple of out arguments
|
| 60 |
+
tensor_type = "at::Tensor &"
|
| 61 |
+
comma = ", "
|
| 62 |
+
ret_name = f"""::std::tuple<{comma.join([tensor_type] * len(f.func.returns))}>(
|
| 63 |
+
{comma.join([r.name for r in f.func.arguments.out])}
|
| 64 |
+
)"""
|
| 65 |
+
else:
|
| 66 |
+
assert all(
|
| 67 |
+
a.type == BaseType(BaseTy.Tensor) for a in f.func.returns
|
| 68 |
+
), f"Only support tensor returns but got {f.func.returns}"
|
| 69 |
+
# Returns a tuple of empty tensors
|
| 70 |
+
tensor_type = "at::Tensor"
|
| 71 |
+
comma = ", "
|
| 72 |
+
ret_name = f"""::std::tuple<{comma.join([tensor_type] * len(f.func.returns))}>(
|
| 73 |
+
{comma.join(["at::Tensor()" for _ in f.func.returns])}
|
| 74 |
+
)"""
|
| 75 |
+
ret_str = f"return {ret_name};" if len(f.func.returns) > 0 else ""
|
| 76 |
+
return f"""
|
| 77 |
+
{sig.defn()} {{
|
| 78 |
+
{ret_str}
|
| 79 |
+
}}
|
| 80 |
+
"""
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def gen_custom_ops_registration(
|
| 84 |
+
*,
|
| 85 |
+
native_functions: Sequence[NativeFunction],
|
| 86 |
+
selector: SelectiveBuilder,
|
| 87 |
+
kernel_index: ETKernelIndex,
|
| 88 |
+
rocm: bool,
|
| 89 |
+
) -> tuple[str, str]:
|
| 90 |
+
"""
|
| 91 |
+
Generate custom ops registration code for dest.RegisterDispatchKey.
|
| 92 |
+
|
| 93 |
+
:param native_functions: a sequence of `NativeFunction`
|
| 94 |
+
:param selector: for selective build.
|
| 95 |
+
:param kernel_index: kernels for all the ops.
|
| 96 |
+
:param rocm: bool for dest.RegisterDispatchKey.
|
| 97 |
+
:return: generated C++ code to register custom operators into PyTorch
|
| 98 |
+
"""
|
| 99 |
+
|
| 100 |
+
# convert kernel index to BackendIndex. This is because we can't handle ETKernelIndex yet.
|
| 101 |
+
# TODO larryliu: evaluate if this code is still needed. If yes let it handle ETKernelIndex.
|
| 102 |
+
|
| 103 |
+
dispatch_key = DispatchKey.CPU
|
| 104 |
+
backend_index = kernel_index._to_backend_index()
|
| 105 |
+
static_init_dispatch_registrations = ""
|
| 106 |
+
ns_grouped_native_functions: dict[str, list[NativeFunction]] = defaultdict(list)
|
| 107 |
+
for native_function in native_functions:
|
| 108 |
+
ns_grouped_native_functions[native_function.namespace].append(native_function)
|
| 109 |
+
|
| 110 |
+
for namespace, functions in ns_grouped_native_functions.items():
|
| 111 |
+
if len(functions) == 0:
|
| 112 |
+
continue
|
| 113 |
+
dispatch_registrations_body = "\n".join(
|
| 114 |
+
list(
|
| 115 |
+
concatMap(
|
| 116 |
+
dest.RegisterDispatchKey(
|
| 117 |
+
backend_index,
|
| 118 |
+
Target.REGISTRATION,
|
| 119 |
+
selector,
|
| 120 |
+
rocm=rocm,
|
| 121 |
+
symint=False,
|
| 122 |
+
class_method_name=None,
|
| 123 |
+
skip_dispatcher_op_registration=False,
|
| 124 |
+
),
|
| 125 |
+
functions,
|
| 126 |
+
)
|
| 127 |
+
)
|
| 128 |
+
)
|
| 129 |
+
static_init_dispatch_registrations += f"""
|
| 130 |
+
TORCH_LIBRARY_IMPL({namespace}, {dispatch_key}, m) {{
|
| 131 |
+
{dispatch_registrations_body}
|
| 132 |
+
}};"""
|
| 133 |
+
anonymous_definition = "\n".join(
|
| 134 |
+
list(
|
| 135 |
+
concatMap(
|
| 136 |
+
dest.RegisterDispatchKey(
|
| 137 |
+
backend_index,
|
| 138 |
+
Target.ANONYMOUS_DEFINITION,
|
| 139 |
+
selector,
|
| 140 |
+
rocm=rocm,
|
| 141 |
+
symint=False,
|
| 142 |
+
class_method_name=None,
|
| 143 |
+
skip_dispatcher_op_registration=False,
|
| 144 |
+
),
|
| 145 |
+
native_functions,
|
| 146 |
+
)
|
| 147 |
+
)
|
| 148 |
+
)
|
| 149 |
+
return anonymous_definition, static_init_dispatch_registrations
|
minigpt2/lib/python3.10/site-packages/torchgen/executorch/api/et_cpp.py
ADDED
|
@@ -0,0 +1,370 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from typing import Sequence
|
| 4 |
+
|
| 5 |
+
from torchgen import local
|
| 6 |
+
from torchgen.api.types import (
|
| 7 |
+
ArgName,
|
| 8 |
+
BaseCType,
|
| 9 |
+
Binding,
|
| 10 |
+
ConstRefCType,
|
| 11 |
+
CType,
|
| 12 |
+
MutRefCType,
|
| 13 |
+
NamedCType,
|
| 14 |
+
SpecialArgName,
|
| 15 |
+
TupleCType,
|
| 16 |
+
VectorCType,
|
| 17 |
+
voidT,
|
| 18 |
+
)
|
| 19 |
+
from torchgen.executorch.api.types import (
|
| 20 |
+
ArrayRefCType,
|
| 21 |
+
BaseTypeToCppMapping,
|
| 22 |
+
OptionalCType,
|
| 23 |
+
scalarT,
|
| 24 |
+
tensorListT,
|
| 25 |
+
tensorT,
|
| 26 |
+
)
|
| 27 |
+
from torchgen.model import (
|
| 28 |
+
Argument,
|
| 29 |
+
Arguments,
|
| 30 |
+
BaseTy,
|
| 31 |
+
BaseType,
|
| 32 |
+
ListType,
|
| 33 |
+
NativeFunction,
|
| 34 |
+
OptionalType,
|
| 35 |
+
Return,
|
| 36 |
+
SelfArgument,
|
| 37 |
+
TensorOptionsArguments,
|
| 38 |
+
Type,
|
| 39 |
+
)
|
| 40 |
+
from torchgen.utils import assert_never
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
"""
|
| 44 |
+
This file describes the translation of JIT schema to the public C++ API, which is what people use when they call
|
| 45 |
+
functions like at::add. It also serves as a native function API, which is the signature of kernels,
|
| 46 |
+
since in Executorch CppSignature is the same as NativeSignature.
|
| 47 |
+
|
| 48 |
+
Difference between this file and torchgen.api.cpp.py:
|
| 49 |
+
|
| 50 |
+
- Executorch doesn't support TensorOptions, however in this file we still keep the logic here to be compatible with
|
| 51 |
+
torchgen.api.cpp, so that we can do stuff like ATen mode (running ATen kernels in Executorch).
|
| 52 |
+
|
| 53 |
+
- Executorch doesn't support Dimname.
|
| 54 |
+
|
| 55 |
+
- Executorch runtime doesn't support SymInt, will treat it as int.
|
| 56 |
+
"""
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
# Translation of "value types" in JIT schema to C++ API type. Value
|
| 60 |
+
# types look the same no matter if they are argument types or return
|
| 61 |
+
# types. Returns None if the type in question is not a value type.
|
| 62 |
+
def valuetype_type(
|
| 63 |
+
t: Type,
|
| 64 |
+
*,
|
| 65 |
+
binds: ArgName,
|
| 66 |
+
remove_non_owning_ref_types: bool = False,
|
| 67 |
+
) -> NamedCType | None:
|
| 68 |
+
if isinstance(t, BaseType):
|
| 69 |
+
if t.name == BaseTy.Tensor or t.name == BaseTy.Scalar:
|
| 70 |
+
return None
|
| 71 |
+
# For SymInt we simply treat it as int.
|
| 72 |
+
elif str(t) == "SymInt":
|
| 73 |
+
return NamedCType(binds, BaseCType(BaseTypeToCppMapping[BaseTy.int]))
|
| 74 |
+
if remove_non_owning_ref_types:
|
| 75 |
+
if t.name == BaseTy.str:
|
| 76 |
+
raise AssertionError(
|
| 77 |
+
"string ref->value conversion: not implemented yet"
|
| 78 |
+
)
|
| 79 |
+
# All other BaseType currently map directly to BaseCppTypes.
|
| 80 |
+
return NamedCType(binds, BaseCType(BaseTypeToCppMapping[t.name]))
|
| 81 |
+
elif isinstance(t, OptionalType):
|
| 82 |
+
elem = valuetype_type(t.elem, binds=binds)
|
| 83 |
+
if elem is None:
|
| 84 |
+
return None
|
| 85 |
+
return NamedCType(binds, OptionalCType(elem.type))
|
| 86 |
+
elif isinstance(t, ListType):
|
| 87 |
+
if str(t.elem) == "bool":
|
| 88 |
+
assert t.size is not None
|
| 89 |
+
return NamedCType(
|
| 90 |
+
binds, ArrayRefCType(BaseCType(BaseTypeToCppMapping[BaseTy.bool]))
|
| 91 |
+
)
|
| 92 |
+
else:
|
| 93 |
+
return None
|
| 94 |
+
else:
|
| 95 |
+
raise AssertionError(f"unrecognized type {repr(t)}")
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
# Translation of types occurring in JIT arguments to a C++ argument type.
|
| 99 |
+
# If remove_non_owning_ref_types is set, we'll guarantee that the outputed CType is not a non-owning reference type.
|
| 100 |
+
# For example, we'll return std::vector<int> instead of IntArrayRef.
|
| 101 |
+
# See Note [translation from C++ reference to value types]
|
| 102 |
+
def argumenttype_type(
|
| 103 |
+
t: Type,
|
| 104 |
+
*,
|
| 105 |
+
mutable: bool,
|
| 106 |
+
binds: ArgName,
|
| 107 |
+
remove_non_owning_ref_types: bool = False,
|
| 108 |
+
) -> NamedCType:
|
| 109 |
+
# If it's a value type, do the value type translation
|
| 110 |
+
r = valuetype_type(
|
| 111 |
+
t,
|
| 112 |
+
binds=binds,
|
| 113 |
+
remove_non_owning_ref_types=remove_non_owning_ref_types,
|
| 114 |
+
)
|
| 115 |
+
if r is not None:
|
| 116 |
+
return r
|
| 117 |
+
if isinstance(t, BaseType):
|
| 118 |
+
if t.name == BaseTy.Tensor:
|
| 119 |
+
if mutable and not local.use_const_ref_for_mutable_tensors():
|
| 120 |
+
return NamedCType(binds, MutRefCType(BaseCType(tensorT)))
|
| 121 |
+
else:
|
| 122 |
+
return NamedCType(binds, ConstRefCType(BaseCType(tensorT)))
|
| 123 |
+
elif t.name == BaseTy.Scalar:
|
| 124 |
+
return NamedCType(binds, ConstRefCType(BaseCType(scalarT)))
|
| 125 |
+
else:
|
| 126 |
+
raise AssertionError(f"base type should have been value type {t}")
|
| 127 |
+
elif isinstance(t, OptionalType):
|
| 128 |
+
if str(t.elem) == "Tensor":
|
| 129 |
+
if mutable and not local.use_const_ref_for_mutable_tensors():
|
| 130 |
+
return NamedCType(
|
| 131 |
+
binds, MutRefCType(BaseCType(tensorT))
|
| 132 |
+
) # TODO: fix this discrepancy
|
| 133 |
+
else:
|
| 134 |
+
return NamedCType(
|
| 135 |
+
binds, ConstRefCType(OptionalCType(BaseCType(tensorT)))
|
| 136 |
+
)
|
| 137 |
+
elif str(t.elem) == "Scalar":
|
| 138 |
+
return NamedCType(binds, ConstRefCType(OptionalCType(BaseCType(scalarT))))
|
| 139 |
+
elem = argumenttype_type(t.elem, mutable=mutable, binds=binds)
|
| 140 |
+
return NamedCType(binds, OptionalCType(elem.type))
|
| 141 |
+
elif isinstance(t, ListType):
|
| 142 |
+
# TODO: keeping these special cases for Tensor[] and Tensor?[] so that we can hookup with ATen kernels.
|
| 143 |
+
if str(t.elem) == "Tensor":
|
| 144 |
+
return NamedCType(binds, BaseCType(tensorListT))
|
| 145 |
+
elif str(t.elem) == "Dimname":
|
| 146 |
+
raise NotImplementedError("Executorch doesn't support Dimname")
|
| 147 |
+
elif str(t.elem) == "Tensor?":
|
| 148 |
+
return NamedCType(binds, ArrayRefCType(OptionalCType(BaseCType(tensorT))))
|
| 149 |
+
elem = argumenttype_type(t.elem, mutable=mutable, binds=binds)
|
| 150 |
+
return NamedCType(binds, ArrayRefCType(elem.type))
|
| 151 |
+
else:
|
| 152 |
+
raise AssertionError(f"unrecognized type {repr(t)}")
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
# Translate a JIT argument into its C++ type
|
| 156 |
+
def argument_type(a: Argument, *, binds: ArgName) -> NamedCType:
|
| 157 |
+
return argumenttype_type(a.type, mutable=a.is_write, binds=binds)
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
# Translation of a (non-multi) return type from JIT to C++
|
| 161 |
+
# N.B: returntype_type returns a CType, not a NamedCType.
|
| 162 |
+
# This is mostly because of the mismatch between return types and return names.
|
| 163 |
+
# e.g. a function with a return type of 'void' has 0 return names,
|
| 164 |
+
# and a function with a return type of 'std::tuple' has >1 return name.
|
| 165 |
+
def returntype_type(t: Type, *, mutable: bool) -> CType:
|
| 166 |
+
# placeholder is ignored
|
| 167 |
+
r = valuetype_type(t, binds="__placeholder__")
|
| 168 |
+
if r is not None:
|
| 169 |
+
return r.type
|
| 170 |
+
|
| 171 |
+
if isinstance(t, BaseType):
|
| 172 |
+
if t.name == BaseTy.Tensor:
|
| 173 |
+
if mutable:
|
| 174 |
+
if local.use_const_ref_for_mutable_tensors():
|
| 175 |
+
return ConstRefCType(BaseCType(tensorT))
|
| 176 |
+
else:
|
| 177 |
+
return MutRefCType(BaseCType(tensorT))
|
| 178 |
+
else:
|
| 179 |
+
# Note [Tensor Copy Returns]
|
| 180 |
+
# Currently, we use "Argument.is_write" to determine
|
| 181 |
+
# whether or not Tensor return types should be copies or references.
|
| 182 |
+
# If that ever changes, take a look at other locations of this note!
|
| 183 |
+
return BaseCType(tensorT)
|
| 184 |
+
elif t.name == BaseTy.Scalar:
|
| 185 |
+
return BaseCType(scalarT)
|
| 186 |
+
elif isinstance(t, ListType):
|
| 187 |
+
assert (
|
| 188 |
+
not mutable
|
| 189 |
+
), "Native functions should never return a mutable tensor list. They should return void."
|
| 190 |
+
elem = returntype_type(t.elem, mutable=False)
|
| 191 |
+
assert t.size is None, f"fixed size list returns not supported: {t}"
|
| 192 |
+
return VectorCType(elem)
|
| 193 |
+
|
| 194 |
+
raise AssertionError(f"unrecognized return type {t}")
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
# Translation of a single return to its C++ type
|
| 198 |
+
def return_type(r: Return) -> CType:
|
| 199 |
+
return returntype_type(r.type, mutable=r.is_write)
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
# Translation of a full (possibly multi) return from JIT to its C++ type
|
| 203 |
+
def returns_type(rs: Sequence[Return]) -> CType:
|
| 204 |
+
if len(rs) == 0:
|
| 205 |
+
return BaseCType(voidT)
|
| 206 |
+
elif len(rs) == 1:
|
| 207 |
+
return return_type(rs[0])
|
| 208 |
+
else:
|
| 209 |
+
return TupleCType([return_type(r) for r in rs])
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
def return_names(f: NativeFunction, *, fallback_name: str = "result") -> Sequence[str]:
|
| 213 |
+
returns: list[str] = []
|
| 214 |
+
for i, r in enumerate(f.func.returns):
|
| 215 |
+
# If we have an inplace function, the return argument is
|
| 216 |
+
# implicitly named self.
|
| 217 |
+
# TODO: Consider incorporating this into the data model
|
| 218 |
+
if f.func.name.name.inplace:
|
| 219 |
+
assert i == 0, "illegal inplace function with multiple returns"
|
| 220 |
+
name = "self"
|
| 221 |
+
# If we are out function, the name is the name of the
|
| 222 |
+
# corresponding output function (r.name will get recorded
|
| 223 |
+
# in field_name later.)
|
| 224 |
+
elif f.func.is_out_fn():
|
| 225 |
+
name = f.func.arguments.out[i].name
|
| 226 |
+
# If the return argument is explicitly named...
|
| 227 |
+
elif r.name:
|
| 228 |
+
name_conflict = any(
|
| 229 |
+
r.name == a.name for a in f.func.schema_order_arguments()
|
| 230 |
+
)
|
| 231 |
+
if name_conflict and not f.func.is_out_fn():
|
| 232 |
+
name = f"{r.name}_return"
|
| 233 |
+
else:
|
| 234 |
+
name = r.name
|
| 235 |
+
# If there is no explicit name and no fallback name was passed in, we just name the output result,
|
| 236 |
+
# unless it's a multi-return, in which case it's result0,
|
| 237 |
+
# result1, etc (zero-indexed)
|
| 238 |
+
else:
|
| 239 |
+
name = fallback_name if len(f.func.returns) == 1 else f"{fallback_name}{i}"
|
| 240 |
+
returns.append(name)
|
| 241 |
+
return returns
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
JIT_TO_CPP_DEFAULT = {
|
| 245 |
+
"False": "false",
|
| 246 |
+
"True": "true",
|
| 247 |
+
"None": "torch::executorch::nullopt", # UGH this one is type directed
|
| 248 |
+
"[]": "{}",
|
| 249 |
+
"contiguous_format": "torch::executorch::MemoryFormat::Contiguous",
|
| 250 |
+
"long": "torch::executorch::kLong",
|
| 251 |
+
}
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
# Convert a JIT default into C++ expression representing the default
|
| 255 |
+
def default_expr(d: str, t: Type) -> str:
|
| 256 |
+
if d == "None" and str(t) == "Tensor?":
|
| 257 |
+
return "{}"
|
| 258 |
+
if isinstance(t, BaseType) and t.name is BaseTy.str:
|
| 259 |
+
# Schema allows single quotes but C++ needs double
|
| 260 |
+
if len(d) >= 2 and d[0] == "'" and d[-1] == "'":
|
| 261 |
+
s = ""
|
| 262 |
+
i = 1
|
| 263 |
+
while i + 1 < len(d):
|
| 264 |
+
if d[i] != "\\":
|
| 265 |
+
if d[i] == '"':
|
| 266 |
+
s += '\\"'
|
| 267 |
+
else:
|
| 268 |
+
s += d[i]
|
| 269 |
+
i += 1
|
| 270 |
+
else:
|
| 271 |
+
if d[i + 1] == "'":
|
| 272 |
+
s += "'"
|
| 273 |
+
else:
|
| 274 |
+
s += d[i : i + 2]
|
| 275 |
+
i += 2
|
| 276 |
+
|
| 277 |
+
return f'"{s}"'
|
| 278 |
+
|
| 279 |
+
if isinstance(t, OptionalType):
|
| 280 |
+
if d == "None":
|
| 281 |
+
return "torch::executor::nullopt"
|
| 282 |
+
|
| 283 |
+
return default_expr(d, t.elem)
|
| 284 |
+
|
| 285 |
+
if isinstance(t, ListType):
|
| 286 |
+
if d.startswith("[") and d.endswith("]"):
|
| 287 |
+
return "{" + d[1:-1] + "}"
|
| 288 |
+
elif t.size is None:
|
| 289 |
+
# NOTE: Sized lists can have scalar defaults
|
| 290 |
+
raise ValueError(f"Expected a list default '[...]' but found: '{d}'")
|
| 291 |
+
|
| 292 |
+
return JIT_TO_CPP_DEFAULT.get(d, d)
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
# Convert an argument into its C++ API form
|
| 296 |
+
|
| 297 |
+
|
| 298 |
+
def argument(
|
| 299 |
+
a: Argument | TensorOptionsArguments | SelfArgument,
|
| 300 |
+
*,
|
| 301 |
+
cpp_no_default_args: set[str],
|
| 302 |
+
method: bool,
|
| 303 |
+
faithful: bool,
|
| 304 |
+
has_tensor_options: bool,
|
| 305 |
+
) -> list[Binding]:
|
| 306 |
+
def sub_argument(
|
| 307 |
+
a: Argument | TensorOptionsArguments | SelfArgument,
|
| 308 |
+
) -> list[Binding]:
|
| 309 |
+
return argument(
|
| 310 |
+
a,
|
| 311 |
+
cpp_no_default_args=cpp_no_default_args,
|
| 312 |
+
method=method,
|
| 313 |
+
faithful=faithful,
|
| 314 |
+
has_tensor_options=has_tensor_options,
|
| 315 |
+
)
|
| 316 |
+
|
| 317 |
+
if isinstance(a, Argument):
|
| 318 |
+
binds: ArgName
|
| 319 |
+
if a.name == "memory_format" and has_tensor_options:
|
| 320 |
+
binds = SpecialArgName.possibly_redundant_memory_format
|
| 321 |
+
else:
|
| 322 |
+
binds = a.name
|
| 323 |
+
default: str | None = None
|
| 324 |
+
if a.name not in cpp_no_default_args and a.default is not None:
|
| 325 |
+
default = default_expr(a.default, a.type)
|
| 326 |
+
return [
|
| 327 |
+
Binding(
|
| 328 |
+
nctype=argument_type(a, binds=binds),
|
| 329 |
+
name=a.name,
|
| 330 |
+
default=default,
|
| 331 |
+
argument=a,
|
| 332 |
+
)
|
| 333 |
+
]
|
| 334 |
+
elif isinstance(a, TensorOptionsArguments):
|
| 335 |
+
raise NotImplementedError("Need to implement type resolution for TensorOptions")
|
| 336 |
+
elif isinstance(a, SelfArgument):
|
| 337 |
+
if method:
|
| 338 |
+
# Caller is responsible for installing implicit this in context!
|
| 339 |
+
return []
|
| 340 |
+
else:
|
| 341 |
+
return sub_argument(a.argument)
|
| 342 |
+
else:
|
| 343 |
+
assert_never(a)
|
| 344 |
+
|
| 345 |
+
|
| 346 |
+
def arguments(
|
| 347 |
+
arguments: Arguments,
|
| 348 |
+
*,
|
| 349 |
+
faithful: bool,
|
| 350 |
+
method: bool,
|
| 351 |
+
cpp_no_default_args: set[str],
|
| 352 |
+
) -> list[Binding]:
|
| 353 |
+
args: list[Argument | TensorOptionsArguments | SelfArgument] = []
|
| 354 |
+
if faithful:
|
| 355 |
+
args.extend(arguments.non_out)
|
| 356 |
+
args.extend(arguments.out)
|
| 357 |
+
else:
|
| 358 |
+
args.extend(arguments.out)
|
| 359 |
+
args.extend(arguments.non_out)
|
| 360 |
+
return [
|
| 361 |
+
r.no_default() if faithful else r
|
| 362 |
+
for a in args
|
| 363 |
+
for r in argument(
|
| 364 |
+
a,
|
| 365 |
+
faithful=faithful,
|
| 366 |
+
method=method,
|
| 367 |
+
has_tensor_options=arguments.tensor_options is not None,
|
| 368 |
+
cpp_no_default_args=cpp_no_default_args,
|
| 369 |
+
)
|
| 370 |
+
]
|
minigpt2/lib/python3.10/site-packages/torchgen/executorch/api/types/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from torchgen.executorch.api.types.types import *
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
from torchgen.executorch.api.types.signatures import * # usort: skip
|
minigpt2/lib/python3.10/site-packages/torchgen/executorch/api/types/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (287 Bytes). View file
|
|
|
minigpt2/lib/python3.10/site-packages/torchgen/executorch/api/types/__pycache__/signatures.cpython-310.pyc
ADDED
|
Binary file (3.17 kB). View file
|
|
|
minigpt2/lib/python3.10/site-packages/torchgen/executorch/api/types/__pycache__/types.cpython-310.pyc
ADDED
|
Binary file (2.67 kB). View file
|
|
|
minigpt2/lib/python3.10/site-packages/torchgen/executorch/api/types/signatures.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from dataclasses import dataclass
|
| 4 |
+
from typing import TYPE_CHECKING
|
| 5 |
+
|
| 6 |
+
import torchgen.api.cpp as aten_cpp
|
| 7 |
+
from torchgen.executorch.api.types.types import contextArg
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
if TYPE_CHECKING:
|
| 11 |
+
from torchgen.api.types import Binding, CType
|
| 12 |
+
from torchgen.model import FunctionSchema, NativeFunction
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
@dataclass(frozen=True)
|
| 16 |
+
class ExecutorchCppSignature:
|
| 17 |
+
"""
|
| 18 |
+
This signature is merely a CppSignature with Executorch types (optionally
|
| 19 |
+
contains KernelRuntimeContext as well). The inline definition of
|
| 20 |
+
CppSignature is generated in Functions.h and it's used by unboxing
|
| 21 |
+
functions.
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
# The schema this signature is derived from
|
| 25 |
+
func: FunctionSchema
|
| 26 |
+
|
| 27 |
+
# The set of C++ arguments which should not have defaults applied to them
|
| 28 |
+
cpp_no_default_args: set[str]
|
| 29 |
+
|
| 30 |
+
# Allows you to prepend an arbitrary prefix to the signature name.
|
| 31 |
+
# This is useful for parts of the codegen that generate wrappers around kernels,
|
| 32 |
+
# and need to avoid naming collisions.
|
| 33 |
+
prefix: str = ""
|
| 34 |
+
|
| 35 |
+
def arguments(self, *, include_context: bool = True) -> list[Binding]:
|
| 36 |
+
return ([contextArg] if include_context else []) + et_cpp.arguments(
|
| 37 |
+
self.func.arguments,
|
| 38 |
+
faithful=True, # always faithful, out argument at the end
|
| 39 |
+
method=False, # method not supported
|
| 40 |
+
cpp_no_default_args=self.cpp_no_default_args,
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
def name(self) -> str:
|
| 44 |
+
return self.prefix + aten_cpp.name(
|
| 45 |
+
self.func,
|
| 46 |
+
faithful_name_for_out_overloads=True,
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
def decl(self, name: str | None = None, *, include_context: bool = True) -> str:
|
| 50 |
+
args_str = ", ".join(
|
| 51 |
+
a.decl() for a in self.arguments(include_context=include_context)
|
| 52 |
+
)
|
| 53 |
+
if name is None:
|
| 54 |
+
name = self.name()
|
| 55 |
+
return f"{self.returns_type().cpp_type()} {name}({args_str})"
|
| 56 |
+
|
| 57 |
+
def defn(self, name: str | None = None) -> str:
|
| 58 |
+
args = [a.defn() for a in self.arguments()]
|
| 59 |
+
args_str = ", ".join(args)
|
| 60 |
+
if name is None:
|
| 61 |
+
name = self.name()
|
| 62 |
+
return f"{self.returns_type().cpp_type()} {name}({args_str})"
|
| 63 |
+
|
| 64 |
+
def returns_type(self) -> CType:
|
| 65 |
+
return et_cpp.returns_type(self.func.returns)
|
| 66 |
+
|
| 67 |
+
@staticmethod
|
| 68 |
+
def from_native_function(
|
| 69 |
+
f: NativeFunction, *, prefix: str = ""
|
| 70 |
+
) -> ExecutorchCppSignature:
|
| 71 |
+
return ExecutorchCppSignature(
|
| 72 |
+
func=f.func, prefix=prefix, cpp_no_default_args=f.cpp_no_default_args
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
from torchgen.executorch.api import et_cpp
|
minigpt2/lib/python3.10/site-packages/torchgen/executorch/api/types/types.py
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from dataclasses import dataclass
|
| 4 |
+
|
| 5 |
+
from torchgen.api.types import (
|
| 6 |
+
BaseCppType,
|
| 7 |
+
BaseCType,
|
| 8 |
+
Binding,
|
| 9 |
+
boolT,
|
| 10 |
+
CType,
|
| 11 |
+
doubleT,
|
| 12 |
+
Expr,
|
| 13 |
+
longT,
|
| 14 |
+
MutRefCType,
|
| 15 |
+
NamedCType,
|
| 16 |
+
)
|
| 17 |
+
from torchgen.model import BaseTy
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
halfT = BaseCppType("torch::executor", "Half")
|
| 21 |
+
bfloat16T = BaseCppType("torch::executor", "BFloat16")
|
| 22 |
+
stringT = BaseCppType("torch::executor", "string_view")
|
| 23 |
+
scalarTypeT = BaseCppType("torch::executor", "ScalarType")
|
| 24 |
+
tensorT = BaseCppType("torch::executor", "Tensor")
|
| 25 |
+
tensorListT = BaseCppType("torch::executor", "TensorList")
|
| 26 |
+
scalarT = BaseCppType("torch::executor", "Scalar")
|
| 27 |
+
memoryFormatT = BaseCppType("torch::executor", "MemoryFormat")
|
| 28 |
+
intArrayRefT = BaseCppType("torch::executor", "IntArrayRef")
|
| 29 |
+
optionalT = BaseCppType("torch::executor", "optional")
|
| 30 |
+
contextT = BaseCppType("torch::executor", "KernelRuntimeContext")
|
| 31 |
+
|
| 32 |
+
contextExpr = Expr(
|
| 33 |
+
expr="context",
|
| 34 |
+
type=NamedCType(name="context", type=MutRefCType(BaseCType(contextT))),
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
contextArg = Binding(
|
| 38 |
+
name="context",
|
| 39 |
+
nctype=contextExpr.type,
|
| 40 |
+
argument=None, # type: ignore[arg-type]
|
| 41 |
+
default=None,
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
BaseTypeToCppMapping: dict[BaseTy, BaseCppType] = {
|
| 45 |
+
BaseTy.int: longT,
|
| 46 |
+
BaseTy.float: doubleT,
|
| 47 |
+
BaseTy.bool: boolT,
|
| 48 |
+
BaseTy.str: stringT,
|
| 49 |
+
BaseTy.ScalarType: scalarTypeT,
|
| 50 |
+
BaseTy.Tensor: tensorT,
|
| 51 |
+
BaseTy.Scalar: scalarT,
|
| 52 |
+
BaseTy.MemoryFormat: memoryFormatT,
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
@dataclass(frozen=True)
|
| 57 |
+
class OptionalCType(CType):
|
| 58 |
+
elem: CType
|
| 59 |
+
|
| 60 |
+
def cpp_type(self, *, strip_ref: bool = False) -> str:
|
| 61 |
+
# Do not pass `strip_ref` recursively.
|
| 62 |
+
return f"torch::executor::optional<{self.elem.cpp_type()}>"
|
| 63 |
+
|
| 64 |
+
def cpp_type_registration_declarations(self) -> str:
|
| 65 |
+
return f"torch::executor::optional<{self.elem.cpp_type_registration_declarations()}>"
|
| 66 |
+
|
| 67 |
+
def remove_const_ref(self) -> CType:
|
| 68 |
+
return OptionalCType(self.elem.remove_const_ref())
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
@dataclass(frozen=True)
|
| 72 |
+
class ArrayRefCType(CType):
|
| 73 |
+
elem: CType
|
| 74 |
+
|
| 75 |
+
def cpp_type(self, *, strip_ref: bool = False) -> str:
|
| 76 |
+
# Do not pass `strip_ref` recursively.
|
| 77 |
+
return f"torch::executor::ArrayRef<{self.elem.cpp_type()}>"
|
| 78 |
+
|
| 79 |
+
def cpp_type_registration_declarations(self) -> str:
|
| 80 |
+
return f"torch::executor::ArrayRef<{self.elem.cpp_type_registration_declarations()}>"
|
| 81 |
+
|
| 82 |
+
def remove_const_ref(self) -> CType:
|
| 83 |
+
return ArrayRefCType(self.elem.remove_const_ref())
|
minigpt2/lib/python3.10/site-packages/torchgen/executorch/api/unboxing.py
ADDED
|
@@ -0,0 +1,230 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from dataclasses import dataclass
|
| 4 |
+
from typing import Callable, Sequence, TYPE_CHECKING
|
| 5 |
+
|
| 6 |
+
from torchgen.model import (
|
| 7 |
+
Argument,
|
| 8 |
+
BaseTy,
|
| 9 |
+
BaseType,
|
| 10 |
+
ListType,
|
| 11 |
+
NativeFunction,
|
| 12 |
+
OptionalType,
|
| 13 |
+
Type,
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
if TYPE_CHECKING:
|
| 18 |
+
from torchgen.api.types import Binding, CType, NamedCType
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
connector = "\n\t"
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
# Return unboxing function name for a NativeFunction
|
| 25 |
+
def name(f: NativeFunction) -> str:
|
| 26 |
+
return f.func.name.unambiguous_name()
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
@dataclass(frozen=True)
|
| 30 |
+
class Unboxing:
|
| 31 |
+
"""
|
| 32 |
+
Takes a sequence of Bindings and unbox EValues to these Bindings. Return generated code that performs correct unboxing.
|
| 33 |
+
A sample generated code:
|
| 34 |
+
// aten::mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
|
| 35 |
+
void mul_out(EValue** stack) {
|
| 36 |
+
EValue& self = *stack[0];
|
| 37 |
+
EValue& other = *stack[1];
|
| 38 |
+
EValue& out = *stack[2];
|
| 39 |
+
const torch::executor::Tensor & self_base = self.to<torch::executor::Tensor>();
|
| 40 |
+
const torch::executor::Tensor & other_base = other.to<torch::executor::Tensor>();
|
| 41 |
+
torch::executor::Tensor & out_base = out.to<torch::executor::Tensor>();
|
| 42 |
+
|
| 43 |
+
EXECUTORCH_SCOPE_PROF("native_call_mul.out");
|
| 44 |
+
torch::executor::mul_outf(self_base, other_base, out_base);
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
}
|
| 48 |
+
"""
|
| 49 |
+
|
| 50 |
+
# this is a callable that converts a JIT argument, into its C++ type.
|
| 51 |
+
# Translates (type, mutability, binds) to NamedCType. E.g., torchgen.api.cpp.argumenttype_type.
|
| 52 |
+
argument_type_gen: Callable[
|
| 53 |
+
...,
|
| 54 |
+
NamedCType,
|
| 55 |
+
]
|
| 56 |
+
|
| 57 |
+
# Convert all the arguments in a NativeFunction to C++ code
|
| 58 |
+
def convert_arguments(
|
| 59 |
+
self, args: Sequence[Binding]
|
| 60 |
+
) -> tuple[list[Binding], list[str]]:
|
| 61 |
+
code_list = [f"EValue& {args[i].name} = *stack[{i}];" for i in range(len(args))]
|
| 62 |
+
binding_list = []
|
| 63 |
+
for arg in args:
|
| 64 |
+
# expecting only Argument
|
| 65 |
+
if not isinstance(arg.argument, Argument):
|
| 66 |
+
raise Exception( # noqa: TRY002
|
| 67 |
+
f"Unexpected argument type, expecting `Argument` but got {arg}"
|
| 68 |
+
)
|
| 69 |
+
argument: Argument = arg.argument
|
| 70 |
+
unboxed_name, _, code, decl = self.argumenttype_evalue_convert(
|
| 71 |
+
argument.type, argument.name, mutable=argument.is_write
|
| 72 |
+
)
|
| 73 |
+
code_list.extend(decl)
|
| 74 |
+
code_list.extend(code)
|
| 75 |
+
binding_list.append(arg.with_name(unboxed_name))
|
| 76 |
+
return binding_list, code_list
|
| 77 |
+
|
| 78 |
+
def argumenttype_evalue_convert(
|
| 79 |
+
self, t: Type, arg_name: str, *, mutable: bool = False
|
| 80 |
+
) -> tuple[str, CType, list[str], list[str]]:
|
| 81 |
+
"""
|
| 82 |
+
Takes in the type, name and mutability corresponding to an argument, and generates a tuple of:
|
| 83 |
+
(1) the C++ code necessary to unbox the argument
|
| 84 |
+
(2) A Binding corresponding to the newly created unboxed variable, including variable name and its CType
|
| 85 |
+
:param t: a `Type` of an argument
|
| 86 |
+
:param arg_name: argument name
|
| 87 |
+
:param mutable: boolean for whether this argument type is mutable
|
| 88 |
+
:return: unboxed result
|
| 89 |
+
"""
|
| 90 |
+
ctype = self.argument_type_gen(t, mutable=mutable, binds=arg_name).type
|
| 91 |
+
|
| 92 |
+
if isinstance(t, BaseType):
|
| 93 |
+
out_name = f"{arg_name}_base"
|
| 94 |
+
code, decl = self._gen_code_base_type(
|
| 95 |
+
arg_name=arg_name, out_name=out_name, ctype=ctype
|
| 96 |
+
)
|
| 97 |
+
elif isinstance(t, OptionalType):
|
| 98 |
+
out_name = f"{arg_name}_opt_out"
|
| 99 |
+
code, decl = self._gen_code_optional_type(
|
| 100 |
+
arg_name=arg_name, out_name=out_name, t=t, ctype=ctype
|
| 101 |
+
)
|
| 102 |
+
elif isinstance(t, ListType):
|
| 103 |
+
out_name = f"{arg_name}_list_out"
|
| 104 |
+
code, decl = self._gen_code_list_type(
|
| 105 |
+
arg_name=arg_name, out_name=out_name, t=t, ctype=ctype
|
| 106 |
+
)
|
| 107 |
+
else:
|
| 108 |
+
raise Exception( # noqa: TRY002
|
| 109 |
+
f"Cannot handle type {t}. arg_name: {arg_name}"
|
| 110 |
+
) # noqa: TRY002
|
| 111 |
+
return out_name, ctype, code, decl
|
| 112 |
+
|
| 113 |
+
def _gen_code_base_type(
|
| 114 |
+
self, arg_name: str, out_name: str, ctype: CType
|
| 115 |
+
) -> tuple[list[str], list[str]]:
|
| 116 |
+
return [
|
| 117 |
+
f"{ctype.cpp_type()} {out_name} = {arg_name}.to<{ctype.cpp_type(strip_ref=True)}>();"
|
| 118 |
+
], []
|
| 119 |
+
|
| 120 |
+
def _gen_code_optional_type(
|
| 121 |
+
self, arg_name: str, out_name: str, t: OptionalType, ctype: CType
|
| 122 |
+
) -> tuple[list[str], list[str]]:
|
| 123 |
+
in_name = f"{arg_name}_opt_in"
|
| 124 |
+
res_name, base_type, res_code, decl = self.argumenttype_evalue_convert(
|
| 125 |
+
t.elem, in_name
|
| 126 |
+
)
|
| 127 |
+
return (
|
| 128 |
+
f"""
|
| 129 |
+
auto {out_name} = {arg_name}.toOptional<{base_type.cpp_type(strip_ref=True)}>();
|
| 130 |
+
""".split(
|
| 131 |
+
"\n"
|
| 132 |
+
),
|
| 133 |
+
decl,
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
def _gen_code_list_type(
|
| 137 |
+
self, arg_name: str, out_name: str, t: ListType, ctype: CType
|
| 138 |
+
) -> tuple[list[str], list[str]]:
|
| 139 |
+
in_name = f"{arg_name}_list_in"
|
| 140 |
+
elem_name = f"{arg_name}_elem"
|
| 141 |
+
code = []
|
| 142 |
+
res_name, res_ctype, res_code, decl = self.argumenttype_evalue_convert(
|
| 143 |
+
t.elem, elem_name
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
if isinstance(t.elem, BaseType) and t.elem.name == BaseTy.Tensor:
|
| 147 |
+
code.extend(
|
| 148 |
+
f"""
|
| 149 |
+
auto {out_name} = {arg_name}.toTensorList();
|
| 150 |
+
""".split(
|
| 151 |
+
"\n"
|
| 152 |
+
)
|
| 153 |
+
)
|
| 154 |
+
elif isinstance(t.elem, BaseType) and (
|
| 155 |
+
t.elem.name == BaseTy.int or t.elem.name == BaseTy.SymInt
|
| 156 |
+
):
|
| 157 |
+
code.extend(
|
| 158 |
+
f"""
|
| 159 |
+
auto {out_name} = {arg_name}.toIntList();
|
| 160 |
+
""".split(
|
| 161 |
+
"\n"
|
| 162 |
+
)
|
| 163 |
+
)
|
| 164 |
+
elif isinstance(t.elem, BaseType) and t.elem.name == BaseTy.float:
|
| 165 |
+
code.extend(
|
| 166 |
+
f"""
|
| 167 |
+
auto {out_name} = {arg_name}.toDoubleList();
|
| 168 |
+
""".split(
|
| 169 |
+
"\n"
|
| 170 |
+
)
|
| 171 |
+
)
|
| 172 |
+
elif isinstance(t.elem, BaseType) and t.elem.name == BaseTy.bool:
|
| 173 |
+
# handle list type with size, e.g., bool[4]
|
| 174 |
+
code.extend(
|
| 175 |
+
f"""
|
| 176 |
+
#ifdef USE_ATEN_LIB
|
| 177 |
+
std::array<bool, {t.size}> {out_name};
|
| 178 |
+
auto {in_name} = {arg_name}.toBoolList();
|
| 179 |
+
size_t _i = 0;
|
| 180 |
+
for (auto {elem_name}: {in_name}) {{
|
| 181 |
+
{out_name}[_i++] = {elem_name};
|
| 182 |
+
}}
|
| 183 |
+
#else
|
| 184 |
+
auto {out_name} = {arg_name}.toBoolList();
|
| 185 |
+
#endif
|
| 186 |
+
""".split(
|
| 187 |
+
"\n"
|
| 188 |
+
)
|
| 189 |
+
)
|
| 190 |
+
# pytorch codegen:
|
| 191 |
+
# we have to use c10::List for optional element. e.g., Tensor?[] -> c10::List<::std::optional<at::Tensor>>
|
| 192 |
+
elif (
|
| 193 |
+
isinstance(t.elem, OptionalType)
|
| 194 |
+
and isinstance(t.elem.elem, BaseType)
|
| 195 |
+
and t.elem.elem.name == BaseTy.Tensor
|
| 196 |
+
):
|
| 197 |
+
code.extend(
|
| 198 |
+
f"""
|
| 199 |
+
#ifdef USE_ATEN_LIB
|
| 200 |
+
auto {in_name} = {arg_name}.toListOptionalTensor();
|
| 201 |
+
c10::List<::std::optional<at::Tensor>> {out_name};
|
| 202 |
+
for (auto {elem_name}: {in_name}) {{
|
| 203 |
+
{out_name}.push_back({elem_name});
|
| 204 |
+
}}
|
| 205 |
+
#else
|
| 206 |
+
auto {out_name} = {arg_name}.toListOptionalTensor();
|
| 207 |
+
#endif
|
| 208 |
+
""".split(
|
| 209 |
+
"\n"
|
| 210 |
+
)
|
| 211 |
+
)
|
| 212 |
+
else:
|
| 213 |
+
# use ArrayRef as default.
|
| 214 |
+
vec_name = arg_name + "_vec"
|
| 215 |
+
# need to bring vector instantiation out of scope so that ArrayRef has valid data
|
| 216 |
+
decl.append(
|
| 217 |
+
f"std::vector<{res_ctype.cpp_type(strip_ref=True)}> {vec_name};"
|
| 218 |
+
)
|
| 219 |
+
code.extend(
|
| 220 |
+
f"""
|
| 221 |
+
for (EValue {elem_name}: {in_name}) {{
|
| 222 |
+
{connector.join(res_code)}
|
| 223 |
+
{vec_name}.push_back({res_name});
|
| 224 |
+
}}
|
| 225 |
+
{ctype.cpp_type(strip_ref=True)} {out_name}({vec_name});
|
| 226 |
+
""".split(
|
| 227 |
+
"\n"
|
| 228 |
+
)
|
| 229 |
+
)
|
| 230 |
+
return code, decl
|
minigpt2/lib/python3.10/site-packages/torchgen/executorch/model.py
ADDED
|
@@ -0,0 +1,220 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Represents all kernels used by an Executorch model.
|
| 2 |
+
# It maintains a Dict[OperatorName, Dict[ETKernelKey, BackendMetadata]] structure.
|
| 3 |
+
|
| 4 |
+
from __future__ import annotations
|
| 5 |
+
|
| 6 |
+
import itertools
|
| 7 |
+
from collections import defaultdict, namedtuple
|
| 8 |
+
from dataclasses import dataclass
|
| 9 |
+
from enum import IntEnum
|
| 10 |
+
|
| 11 |
+
from torchgen.model import (
|
| 12 |
+
BackendIndex,
|
| 13 |
+
BackendMetadata,
|
| 14 |
+
DispatchKey,
|
| 15 |
+
NativeFunction,
|
| 16 |
+
NativeFunctionsGroup,
|
| 17 |
+
OperatorName,
|
| 18 |
+
)
|
| 19 |
+
from torchgen.utils import assert_never
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
KERNEL_KEY_VERSION = 1
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
# TODO: Duplicated Subset from codegen.tool.gen_oplist, remove declaration in codegen
|
| 26 |
+
class ScalarType(IntEnum):
|
| 27 |
+
Byte = 0
|
| 28 |
+
Char = 1
|
| 29 |
+
Short = 2
|
| 30 |
+
Int = 3
|
| 31 |
+
Long = 4
|
| 32 |
+
Float = 6
|
| 33 |
+
Double = 7
|
| 34 |
+
Bool = 11
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
ETParsedYaml = namedtuple("ETParsedYaml", ["native_functions", "kernel_index"])
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
@dataclass(frozen=True)
|
| 41 |
+
class ETKernelKeyOpArgMeta:
|
| 42 |
+
arg_name: str
|
| 43 |
+
dtype: str
|
| 44 |
+
# The order of the dimensions if entry is a Tensor
|
| 45 |
+
dim_order: tuple[int, ...]
|
| 46 |
+
|
| 47 |
+
def to_native_string(self) -> str:
|
| 48 |
+
dtype_str = ScalarType[self.dtype].value
|
| 49 |
+
dim_str = str(self.dim_order)[1:-1].replace(" ", "")
|
| 50 |
+
return f"{dtype_str};{dim_str}"
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
@dataclass(frozen=True)
|
| 54 |
+
class ETKernelKey:
|
| 55 |
+
# Field undefined is default = True
|
| 56 |
+
arg_meta: tuple[ETKernelKeyOpArgMeta, ...] = ()
|
| 57 |
+
|
| 58 |
+
# Indicator for this kernel being used as a catch all
|
| 59 |
+
default: bool = False
|
| 60 |
+
|
| 61 |
+
version: int = KERNEL_KEY_VERSION
|
| 62 |
+
|
| 63 |
+
@staticmethod
|
| 64 |
+
def gen_from_yaml(
|
| 65 |
+
args: dict[str, tuple[str, str]],
|
| 66 |
+
type_alias_map: dict[str, list[str]], # TODO: Support unwrapped str val
|
| 67 |
+
dim_order_alias_map: dict[str, list[int]],
|
| 68 |
+
) -> list[ETKernelKey]:
|
| 69 |
+
"""Generate ETKernelKeys from arg kernel specs
|
| 70 |
+
Multiple ETKernelKeys are returned due to dtype permutations from utilizing
|
| 71 |
+
type_alias_map (actualizing each potential type permutation as a KernelKey)
|
| 72 |
+
|
| 73 |
+
Args:
|
| 74 |
+
args: Mapping from argument name to kernel specs
|
| 75 |
+
Kernel specs are a tuple of (dtype, dim_order).
|
| 76 |
+
Currently tuple entries must be aliased via the alias map arguments
|
| 77 |
+
type_alias_map: Mapping from type alias to potential type enums
|
| 78 |
+
i.e { T0 : [Double, Int] } means T0 can be either Double or Int
|
| 79 |
+
Used for lookup by args
|
| 80 |
+
dim_order_alias_map: Mapping from alias to a list of dimension orders
|
| 81 |
+
Used for lookup by args
|
| 82 |
+
"""
|
| 83 |
+
# Cast to dim order to int
|
| 84 |
+
dim_order_alias_map = {
|
| 85 |
+
k: [int(alias) for alias in v] for k, v in dim_order_alias_map.items()
|
| 86 |
+
}
|
| 87 |
+
kernel_keys = []
|
| 88 |
+
|
| 89 |
+
# Get all used Dtype Alias
|
| 90 |
+
dtype_alias_used = set()
|
| 91 |
+
for type_alias, dim_order in args.values():
|
| 92 |
+
# Enforce usage of alias initially
|
| 93 |
+
# TODO: Support inlined arguments
|
| 94 |
+
assert type_alias in type_alias_map, "Undefined type alias: " + str(
|
| 95 |
+
type_alias
|
| 96 |
+
)
|
| 97 |
+
assert (
|
| 98 |
+
dim_order in dim_order_alias_map
|
| 99 |
+
), "Undefined dim_order alias: " + str(dim_order)
|
| 100 |
+
dtype_alias_used.add(type_alias)
|
| 101 |
+
|
| 102 |
+
# Generate all permutations of dtype alias values
|
| 103 |
+
alias_dtypes = [
|
| 104 |
+
[(alias, dtype) for dtype in type_alias_map[alias]]
|
| 105 |
+
for alias in dtype_alias_used
|
| 106 |
+
]
|
| 107 |
+
alias_permutations = [
|
| 108 |
+
dict(permutation) for permutation in list(itertools.product(*alias_dtypes))
|
| 109 |
+
]
|
| 110 |
+
|
| 111 |
+
# Using each alias value permutation, generate kernel keys
|
| 112 |
+
op_arg_cache = {}
|
| 113 |
+
for permutation in alias_permutations:
|
| 114 |
+
arg_list = []
|
| 115 |
+
for arg_name, arg_spec in args.items():
|
| 116 |
+
dtype = permutation[arg_spec[0]]
|
| 117 |
+
dim_order = dim_order_alias_map[arg_spec[1]] # type: ignore[assignment]
|
| 118 |
+
if (
|
| 119 |
+
cache_key := (arg_name, dtype, tuple(dim_order))
|
| 120 |
+
) not in op_arg_cache:
|
| 121 |
+
op_arg_cache[cache_key] = ETKernelKeyOpArgMeta(*cache_key) # type: ignore[arg-type]
|
| 122 |
+
|
| 123 |
+
arg_list.append(op_arg_cache[cache_key])
|
| 124 |
+
kernel_keys.append(ETKernelKey(tuple(arg_list)))
|
| 125 |
+
|
| 126 |
+
return kernel_keys
|
| 127 |
+
|
| 128 |
+
def to_native_string(self) -> str:
|
| 129 |
+
if self.default:
|
| 130 |
+
return "default"
|
| 131 |
+
return (
|
| 132 |
+
"v"
|
| 133 |
+
+ str(KERNEL_KEY_VERSION)
|
| 134 |
+
+ "/"
|
| 135 |
+
+ "|".join([arg.to_native_string() for arg in self.arg_meta])
|
| 136 |
+
)
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
@dataclass(frozen=True)
|
| 140 |
+
class ETKernelIndex:
|
| 141 |
+
index: dict[OperatorName, dict[ETKernelKey, BackendMetadata]]
|
| 142 |
+
|
| 143 |
+
def has_kernels(self, g: NativeFunction | NativeFunctionsGroup) -> bool:
|
| 144 |
+
m = self.get_kernels(g)
|
| 145 |
+
return m is not None
|
| 146 |
+
|
| 147 |
+
def get_kernels(
|
| 148 |
+
self, g: NativeFunction | NativeFunctionsGroup
|
| 149 |
+
) -> dict[ETKernelKey, BackendMetadata]:
|
| 150 |
+
if isinstance(g, NativeFunction):
|
| 151 |
+
f = g
|
| 152 |
+
elif isinstance(g, NativeFunctionsGroup):
|
| 153 |
+
f = g.functional
|
| 154 |
+
else:
|
| 155 |
+
assert_never(g)
|
| 156 |
+
if f.func.name not in self.index:
|
| 157 |
+
return {}
|
| 158 |
+
return self.index[f.func.name]
|
| 159 |
+
|
| 160 |
+
@staticmethod
|
| 161 |
+
def grow_from_backend_indices(
|
| 162 |
+
kernel_index: dict[OperatorName, dict[ETKernelKey, BackendMetadata]],
|
| 163 |
+
backend_indices: dict[DispatchKey, dict[OperatorName, BackendMetadata]],
|
| 164 |
+
) -> None:
|
| 165 |
+
for dk in backend_indices:
|
| 166 |
+
index = backend_indices[dk]
|
| 167 |
+
for op, backend_metadata in index.items():
|
| 168 |
+
if op in kernel_index:
|
| 169 |
+
kernel_index[op][ETKernelKey(default=True)] = backend_metadata
|
| 170 |
+
else:
|
| 171 |
+
kernel_index[op] = {ETKernelKey(default=True): backend_metadata}
|
| 172 |
+
|
| 173 |
+
@staticmethod
|
| 174 |
+
def from_backend_indices(
|
| 175 |
+
backend_indices: dict[DispatchKey, dict[OperatorName, BackendMetadata]]
|
| 176 |
+
) -> ETKernelIndex:
|
| 177 |
+
kernel_index: dict[
|
| 178 |
+
OperatorName, dict[ETKernelKey, BackendMetadata]
|
| 179 |
+
] = defaultdict(dict)
|
| 180 |
+
ETKernelIndex.grow_from_backend_indices(kernel_index, backend_indices)
|
| 181 |
+
return ETKernelIndex(kernel_index)
|
| 182 |
+
|
| 183 |
+
def grow(
|
| 184 |
+
self, backend_indices: dict[DispatchKey, dict[OperatorName, BackendMetadata]]
|
| 185 |
+
) -> ETKernelIndex:
|
| 186 |
+
ETKernelIndex.grow_from_backend_indices(self.index, backend_indices)
|
| 187 |
+
return self
|
| 188 |
+
|
| 189 |
+
def _to_backend_index(self) -> BackendIndex:
|
| 190 |
+
"""
|
| 191 |
+
WARNING: this will be deprecated once all the codegen places know how to handle ETKernelIndex.
|
| 192 |
+
"""
|
| 193 |
+
index: dict[OperatorName, BackendMetadata] = {}
|
| 194 |
+
for op in self.index:
|
| 195 |
+
kernel_dict = self.index[op]
|
| 196 |
+
assert (
|
| 197 |
+
len(kernel_dict.values()) == 1
|
| 198 |
+
), f"Can't convert ETKernelIndex to BackendIndex because {op} has more than one kernels. Got {kernel_dict}"
|
| 199 |
+
index[op] = kernel_dict.get(
|
| 200 |
+
ETKernelKey(default=True),
|
| 201 |
+
BackendMetadata(kernel="", structured=False, cpp_namespace=""),
|
| 202 |
+
)
|
| 203 |
+
return BackendIndex(
|
| 204 |
+
dispatch_key=DispatchKey.CPU,
|
| 205 |
+
use_out_as_primary=False,
|
| 206 |
+
device_guard=False,
|
| 207 |
+
external=False,
|
| 208 |
+
index=index,
|
| 209 |
+
)
|
| 210 |
+
|
| 211 |
+
# Note duplicate ETKernelKey from index_b will clobber the metadata from index_a
|
| 212 |
+
@staticmethod
|
| 213 |
+
def merge_indices(index_a: ETKernelIndex, index_b: ETKernelIndex) -> ETKernelIndex:
|
| 214 |
+
combined = defaultdict(dict, index_a.index.copy())
|
| 215 |
+
|
| 216 |
+
for op, entry in index_b.index.items():
|
| 217 |
+
for key, metadata in entry.items():
|
| 218 |
+
combined[op][key] = metadata
|
| 219 |
+
|
| 220 |
+
return ETKernelIndex(combined)
|
minigpt2/lib/python3.10/site-packages/torchgen/executorch/parse.py
ADDED
|
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from collections import defaultdict, namedtuple
|
| 4 |
+
from typing import Any
|
| 5 |
+
|
| 6 |
+
import yaml
|
| 7 |
+
|
| 8 |
+
from torchgen.executorch.model import ETKernelIndex, ETKernelKey
|
| 9 |
+
from torchgen.gen import LineLoader, parse_native_yaml
|
| 10 |
+
from torchgen.model import (
|
| 11 |
+
BackendMetadata,
|
| 12 |
+
DispatchKey,
|
| 13 |
+
FunctionSchema,
|
| 14 |
+
NativeFunction,
|
| 15 |
+
OperatorName,
|
| 16 |
+
)
|
| 17 |
+
from torchgen.utils import NamespaceHelper
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# Parse native_functions.yaml into a sequence of NativeFunctions and ET Backend Indices.
|
| 21 |
+
ETParsedYaml = namedtuple("ETParsedYaml", ["native_functions", "et_kernel_indices"])
|
| 22 |
+
|
| 23 |
+
# Fields in native_functions.yaml used to determine which kernels should be used
|
| 24 |
+
ET_FIELDS = ["kernels", "type_alias", "dim_order_alias"]
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def parse_from_yaml(ei: dict[str, object]) -> dict[ETKernelKey, BackendMetadata]:
|
| 28 |
+
"""Given a loaded yaml representing kernel assignment information, extract the
|
| 29 |
+
mapping from `kernel keys` to `BackendMetadata` (the latter representing the kernel instance)
|
| 30 |
+
|
| 31 |
+
Args:
|
| 32 |
+
ei: Dict keys {kernels, type_alias, dim_order_alias}
|
| 33 |
+
See ETKernelKey for description of arguments
|
| 34 |
+
"""
|
| 35 |
+
e = ei.copy()
|
| 36 |
+
if (kernels := e.pop("kernels", None)) is None:
|
| 37 |
+
return {}
|
| 38 |
+
|
| 39 |
+
type_alias: dict[str, list[str]] = e.pop("type_alias", {}) # type: ignore[assignment]
|
| 40 |
+
dim_order_alias: dict[str, list[str]] = e.pop("dim_order_alias", {}) # type: ignore[assignment]
|
| 41 |
+
dim_order_alias.pop("__line__", None)
|
| 42 |
+
|
| 43 |
+
kernel_mapping: dict[ETKernelKey, BackendMetadata] = {}
|
| 44 |
+
|
| 45 |
+
for entry in kernels: # type: ignore[attr-defined]
|
| 46 |
+
arg_meta = entry.get("arg_meta")
|
| 47 |
+
if arg_meta is not None:
|
| 48 |
+
arg_meta.pop("__line__")
|
| 49 |
+
|
| 50 |
+
kernel_name = entry.get("kernel_name")
|
| 51 |
+
namespace_helper = NamespaceHelper.from_namespaced_entity(
|
| 52 |
+
kernel_name, max_level=3
|
| 53 |
+
)
|
| 54 |
+
kernel_namespace = namespace_helper.get_cpp_namespace(default="at")
|
| 55 |
+
backend_metadata = BackendMetadata(
|
| 56 |
+
kernel=namespace_helper.entity_name,
|
| 57 |
+
structured=False,
|
| 58 |
+
cpp_namespace=(kernel_namespace + "::native"),
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
kernel_keys = (
|
| 62 |
+
[ETKernelKey((), default=True)]
|
| 63 |
+
if arg_meta is None
|
| 64 |
+
else ETKernelKey.gen_from_yaml(arg_meta, type_alias, dim_order_alias) # type: ignore[arg-type]
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
for kernel_key in kernel_keys:
|
| 68 |
+
assert kernel_key not in kernel_mapping, (
|
| 69 |
+
"Duplicate kernel key: " + str(kernel_key) + " " + str(e)
|
| 70 |
+
)
|
| 71 |
+
kernel_mapping[kernel_key] = backend_metadata
|
| 72 |
+
|
| 73 |
+
return kernel_mapping
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def parse_et_yaml_struct(es: object) -> ETKernelIndex:
|
| 77 |
+
"""Given a loaded yaml representing a list of operators, for each op extract the mapping
|
| 78 |
+
of `kernel keys` to `BackendMetadata` (the latter representing the kernel instance
|
| 79 |
+
that should be used by the kernel key).
|
| 80 |
+
"""
|
| 81 |
+
indices: dict[OperatorName, dict[ETKernelKey, BackendMetadata]] = {}
|
| 82 |
+
for ei in es: # type: ignore[attr-defined]
|
| 83 |
+
e = ei.copy()
|
| 84 |
+
|
| 85 |
+
funcs = e.pop("func")
|
| 86 |
+
assert isinstance(funcs, str), f"not a str: {funcs}"
|
| 87 |
+
namespace_helper = NamespaceHelper.from_namespaced_entity(
|
| 88 |
+
namespaced_entity=funcs, max_level=1
|
| 89 |
+
)
|
| 90 |
+
opname = FunctionSchema.parse(namespace_helper.entity_name).name
|
| 91 |
+
|
| 92 |
+
assert opname not in indices, f"Duplicate func found in yaml: {opname} already"
|
| 93 |
+
|
| 94 |
+
if len(index := parse_from_yaml(e)) != 0:
|
| 95 |
+
indices[opname] = index
|
| 96 |
+
|
| 97 |
+
return ETKernelIndex(indices)
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def extract_kernel_fields(es: object) -> dict[OperatorName, dict[str, Any]]:
|
| 101 |
+
"""Given a loaded yaml representing a list of operators, extract the
|
| 102 |
+
kernel key related fields indexed by the operator name.
|
| 103 |
+
"""
|
| 104 |
+
fields: dict[OperatorName, dict[str, Any]] = defaultdict(dict)
|
| 105 |
+
for ei in es: # type: ignore[attr-defined]
|
| 106 |
+
funcs = ei.get("func")
|
| 107 |
+
assert isinstance(funcs, str), f"not a str: {funcs}"
|
| 108 |
+
namespace_helper = NamespaceHelper.from_namespaced_entity(
|
| 109 |
+
namespaced_entity=funcs, max_level=1
|
| 110 |
+
)
|
| 111 |
+
opname = FunctionSchema.parse(namespace_helper.entity_name).name
|
| 112 |
+
|
| 113 |
+
for field in ET_FIELDS:
|
| 114 |
+
if (value := ei.get(field)) is not None:
|
| 115 |
+
fields[opname][field] = value
|
| 116 |
+
|
| 117 |
+
return fields
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def parse_et_yaml(
|
| 121 |
+
path: str,
|
| 122 |
+
tags_yaml_path: str,
|
| 123 |
+
ignore_keys: set[DispatchKey] | None = None,
|
| 124 |
+
skip_native_fns_gen: bool = False,
|
| 125 |
+
) -> tuple[list[NativeFunction], dict[OperatorName, dict[str, Any]]]:
|
| 126 |
+
"""Parse native_functions.yaml into NativeFunctions and an Operator Indexed Dict
|
| 127 |
+
of fields to persist from native_functions.yaml to functions.yaml
|
| 128 |
+
"""
|
| 129 |
+
with open(path) as f:
|
| 130 |
+
es = yaml.load(f, Loader=LineLoader)
|
| 131 |
+
|
| 132 |
+
et_kernel = extract_kernel_fields(es)
|
| 133 |
+
|
| 134 |
+
# Remove ET specific fields from entries for BC compatibility
|
| 135 |
+
strip_et_fields(es)
|
| 136 |
+
|
| 137 |
+
native_yaml = parse_native_yaml(
|
| 138 |
+
path,
|
| 139 |
+
tags_yaml_path,
|
| 140 |
+
ignore_keys,
|
| 141 |
+
skip_native_fns_gen=skip_native_fns_gen,
|
| 142 |
+
loaded_yaml=es,
|
| 143 |
+
)
|
| 144 |
+
return native_yaml.native_functions, et_kernel
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
def strip_et_fields(es: object) -> None:
|
| 148 |
+
"""Given a loaded yaml representing a list of operators,
|
| 149 |
+
remove ET specific fields from every entries for BC compatibility
|
| 150 |
+
"""
|
| 151 |
+
for entry in es: # type: ignore[attr-defined]
|
| 152 |
+
for field in ET_FIELDS:
|
| 153 |
+
entry.pop(field, None)
|
minigpt2/lib/python3.10/site-packages/torchgen/gen.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
minigpt2/lib/python3.10/site-packages/torchgen/gen_aoti_c_shim.py
ADDED
|
@@ -0,0 +1,486 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import textwrap
|
| 4 |
+
from dataclasses import dataclass
|
| 5 |
+
from typing import Sequence
|
| 6 |
+
|
| 7 |
+
from torchgen.api.types import DispatcherSignature
|
| 8 |
+
from torchgen.api.types.signatures import CppSignature, CppSignatureGroup
|
| 9 |
+
from torchgen.context import method_with_native_function
|
| 10 |
+
from torchgen.model import (
|
| 11 |
+
Argument,
|
| 12 |
+
BackendIndex,
|
| 13 |
+
BaseTy,
|
| 14 |
+
BaseType,
|
| 15 |
+
DispatchKey,
|
| 16 |
+
FunctionSchema,
|
| 17 |
+
ListType,
|
| 18 |
+
NativeFunction,
|
| 19 |
+
NativeFunctionsGroup,
|
| 20 |
+
OperatorName,
|
| 21 |
+
OptionalType,
|
| 22 |
+
Type,
|
| 23 |
+
)
|
| 24 |
+
from torchgen.utils import mapMaybe
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
base_type_to_c_type = {
|
| 28 |
+
BaseTy.Tensor: "AtenTensorHandle",
|
| 29 |
+
BaseTy.bool: "int32_t", # Use int to pass bool
|
| 30 |
+
BaseTy.int: "int64_t",
|
| 31 |
+
BaseTy.SymInt: "int64_t", # Inductor-generated code won't see a SymInt
|
| 32 |
+
BaseTy.Scalar: "double", # Use double to pass both integer and floating point
|
| 33 |
+
BaseTy.float: "double", # TODO: how about other floating point types?
|
| 34 |
+
BaseTy.str: "const char*",
|
| 35 |
+
BaseTy.DeviceIndex: "int32_t",
|
| 36 |
+
BaseTy.Layout: "int32_t", # Represent enum as int
|
| 37 |
+
BaseTy.MemoryFormat: "int32_t", # Represent enum as int
|
| 38 |
+
BaseTy.ScalarType: "int32_t", # Represent enum as int
|
| 39 |
+
BaseTy.Generator: "AtenGeneratorHandle",
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
base_type_to_aten_type = {
|
| 43 |
+
BaseTy.Tensor: "at::Tensor",
|
| 44 |
+
BaseTy.bool: "bool",
|
| 45 |
+
BaseTy.int: "int64_t",
|
| 46 |
+
BaseTy.SymInt: "c10::SymInt",
|
| 47 |
+
BaseTy.Scalar: "c10::Scalar",
|
| 48 |
+
BaseTy.float: "double",
|
| 49 |
+
BaseTy.str: "c10::string_view",
|
| 50 |
+
BaseTy.DeviceIndex: "c10::DeviceIndex",
|
| 51 |
+
BaseTy.Layout: "c10::Layout",
|
| 52 |
+
BaseTy.MemoryFormat: "c10::MemoryFormat",
|
| 53 |
+
BaseTy.ScalarType: "c10::ScalarType",
|
| 54 |
+
BaseTy.Generator: "at::Generator",
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
base_type_to_callsite_expr = {
|
| 58 |
+
BaseTy.Tensor: "*tensor_handle_to_tensor_pointer",
|
| 59 |
+
BaseTy.bool: "",
|
| 60 |
+
BaseTy.int: "",
|
| 61 |
+
BaseTy.SymInt: "",
|
| 62 |
+
BaseTy.Scalar: "",
|
| 63 |
+
BaseTy.float: "",
|
| 64 |
+
BaseTy.str: "",
|
| 65 |
+
BaseTy.DeviceIndex: "static_cast<c10::DeviceIndex>",
|
| 66 |
+
BaseTy.Layout: "static_cast<c10::Layout>",
|
| 67 |
+
BaseTy.MemoryFormat: "static_cast<c10::MemoryFormat>",
|
| 68 |
+
BaseTy.ScalarType: "static_cast<c10::ScalarType>",
|
| 69 |
+
BaseTy.Generator: "*generator_handle_to_generator_pointer",
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
# convert args to C types, names in declarations, and expressions in function bodies
|
| 74 |
+
def convert_arg_type_and_name(typ: Type, name: str) -> tuple[list[str], list[str], list[str], list[str]]: # type: ignore[return]
|
| 75 |
+
if isinstance(typ, BaseType):
|
| 76 |
+
if typ.name in base_type_to_c_type:
|
| 77 |
+
return (
|
| 78 |
+
[base_type_to_c_type[typ.name]],
|
| 79 |
+
[name],
|
| 80 |
+
[base_type_to_aten_type[typ.name]],
|
| 81 |
+
[
|
| 82 |
+
f"{base_type_to_callsite_expr[typ.name]}({name})"
|
| 83 |
+
if base_type_to_callsite_expr[typ.name]
|
| 84 |
+
else name
|
| 85 |
+
],
|
| 86 |
+
)
|
| 87 |
+
elif typ.name == BaseTy.Device:
|
| 88 |
+
return (
|
| 89 |
+
["int32_t", "int32_t"],
|
| 90 |
+
[name, name + "_index_"],
|
| 91 |
+
["c10::Device"],
|
| 92 |
+
[
|
| 93 |
+
f"c10::Device(static_cast<c10::DeviceType>({name}), static_cast<c10::DeviceIndex>({name}_index_))"
|
| 94 |
+
],
|
| 95 |
+
)
|
| 96 |
+
else:
|
| 97 |
+
# TODO: BaseTy.Dimname, etc.
|
| 98 |
+
raise NotImplementedError(f"TODO: add support for arg type {repr(typ)}")
|
| 99 |
+
elif isinstance(typ, OptionalType):
|
| 100 |
+
c_types, names, aten_types, callsite_exprs = convert_arg_type_and_name(
|
| 101 |
+
typ.elem, name
|
| 102 |
+
)
|
| 103 |
+
j = 0 # index for names
|
| 104 |
+
new_aten_types = []
|
| 105 |
+
new_callsite_exprs = []
|
| 106 |
+
for aten_type in aten_types:
|
| 107 |
+
# Use pointer to denote optional type
|
| 108 |
+
c_types[j] = c_types[j] + "*"
|
| 109 |
+
if aten_type.startswith("c10::ArrayRef<"):
|
| 110 |
+
# ArrayRef is passed as pointer + size, but no need to add "*" to the size argument
|
| 111 |
+
new_aten_types.append(f"::std::optional<{aten_type}>")
|
| 112 |
+
base_type = aten_type[len("c10::ArrayRef<") : -1]
|
| 113 |
+
new_callsite_exprs.append(
|
| 114 |
+
f"pointer_to_optional_list<{base_type}>({names[j]}, {names[j+1]})"
|
| 115 |
+
)
|
| 116 |
+
j += 2
|
| 117 |
+
elif aten_type == "c10::Device":
|
| 118 |
+
# Device is passed as device_type + device_index
|
| 119 |
+
new_aten_types.append("::std::optional<c10::Device>")
|
| 120 |
+
new_callsite_exprs.append(
|
| 121 |
+
f"pointer_to_optional_device({names[j]}, {names[j+1]})"
|
| 122 |
+
)
|
| 123 |
+
j += 2
|
| 124 |
+
else:
|
| 125 |
+
new_aten_types.append(f"::std::optional<{aten_type}>")
|
| 126 |
+
new_callsite_exprs.append(
|
| 127 |
+
f"pointer_to_optional<{aten_type}>({names[j]})"
|
| 128 |
+
)
|
| 129 |
+
j += 1
|
| 130 |
+
|
| 131 |
+
return (
|
| 132 |
+
c_types,
|
| 133 |
+
names,
|
| 134 |
+
new_aten_types,
|
| 135 |
+
new_callsite_exprs,
|
| 136 |
+
)
|
| 137 |
+
elif isinstance(typ, ListType):
|
| 138 |
+
# Need to explictly pass the list as pointer + length
|
| 139 |
+
c_types, names, aten_types, _ = convert_arg_type_and_name(typ.elem, name)
|
| 140 |
+
assert len(c_types) == 1, "ListType with unsupported element type " + repr(typ)
|
| 141 |
+
|
| 142 |
+
# The list content should never be modified
|
| 143 |
+
c_types[0] = f"const {c_types[0]}*"
|
| 144 |
+
c_types.append("int64_t")
|
| 145 |
+
name = names[0]
|
| 146 |
+
names.append(name + "_len_")
|
| 147 |
+
|
| 148 |
+
atype = aten_types[0]
|
| 149 |
+
callsite_exprs = []
|
| 150 |
+
if atype == "bool":
|
| 151 |
+
# no converter from std::vector<bool> to c10::ArrayRef<bool>
|
| 152 |
+
# construct std::array<bool, N> instead
|
| 153 |
+
assert typ.size is not None
|
| 154 |
+
callsite_exprs.append(f"pointer_to_list<{typ.size}>({name})")
|
| 155 |
+
elif atype == "::std::optional<at::Tensor>":
|
| 156 |
+
# convert from std::vector<::std::optional<at::Tensor>> to c10::List<::std::optional<at::Tensor>>
|
| 157 |
+
callsite_exprs.append(
|
| 158 |
+
f"c10::List<{atype}>(c10::ArrayRef<{atype}>(pointer_to_list<{atype}>({name}, {name}_len_)))"
|
| 159 |
+
)
|
| 160 |
+
else:
|
| 161 |
+
callsite_exprs.append(f"pointer_to_list<{atype}>({name}, {name}_len_)")
|
| 162 |
+
|
| 163 |
+
aten_types = [f"c10::ArrayRef<{t}>" for t in aten_types]
|
| 164 |
+
return (
|
| 165 |
+
c_types,
|
| 166 |
+
names,
|
| 167 |
+
aten_types,
|
| 168 |
+
callsite_exprs,
|
| 169 |
+
)
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
def zip_type_and_name(types: list[str], names: list[str]) -> list[str]:
|
| 173 |
+
return [typ + " " + name for typ, name in zip(types, names)]
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
# Generate argument declarations and callsite expressions
|
| 177 |
+
def gen_arguments(flat_arguments: Sequence[Argument]) -> tuple[list[str], list[str]]:
|
| 178 |
+
types = []
|
| 179 |
+
new_names = []
|
| 180 |
+
callsite_exprs = []
|
| 181 |
+
for arg in flat_arguments:
|
| 182 |
+
new_types, names, _, new_callsite_exprs = convert_arg_type_and_name(
|
| 183 |
+
arg.type, arg.name
|
| 184 |
+
)
|
| 185 |
+
types.extend(new_types)
|
| 186 |
+
new_names.extend(names)
|
| 187 |
+
callsite_exprs.extend(new_callsite_exprs)
|
| 188 |
+
return zip_type_and_name(types, new_names), callsite_exprs
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
# Return values are passed out as pointer arguments because all the C shim functions
|
| 192 |
+
# are expected to return AOTITorchError.
|
| 193 |
+
# Generate returns as declarations and callsite expressions
|
| 194 |
+
def gen_returns(schema: FunctionSchema) -> tuple[list[str], list[str]]:
|
| 195 |
+
types = []
|
| 196 |
+
names = []
|
| 197 |
+
for idx, ret in enumerate(schema.returns):
|
| 198 |
+
names.append(f"ret{idx}")
|
| 199 |
+
if isinstance(ret.type, BaseType) and ret.type.name in base_type_to_c_type:
|
| 200 |
+
types.append(base_type_to_c_type[ret.type.name] + "*")
|
| 201 |
+
else:
|
| 202 |
+
raise NotImplementedError(
|
| 203 |
+
f"TODO: add support for return type {repr(ret.type)}"
|
| 204 |
+
)
|
| 205 |
+
|
| 206 |
+
def convert_return(typ: BaseType, val: str) -> str:
|
| 207 |
+
if typ.name == BaseTy.Tensor:
|
| 208 |
+
return f"new_tensor_handle(std::move({val}));"
|
| 209 |
+
elif typ.name == BaseTy.SymInt:
|
| 210 |
+
return f"{val}.expect_int()"
|
| 211 |
+
elif typ.name == BaseTy.Scalar:
|
| 212 |
+
return f"{val}.toDouble()"
|
| 213 |
+
else:
|
| 214 |
+
return val
|
| 215 |
+
|
| 216 |
+
ret_pointer_can_be_null = False
|
| 217 |
+
unambiguous_name = schema.name.unambiguous_name()
|
| 218 |
+
for name in [
|
| 219 |
+
"_scaled_dot_product_flash_attention",
|
| 220 |
+
"_scaled_dot_product_efficient_attention",
|
| 221 |
+
"_scaled_dot_product_cudnn_attention",
|
| 222 |
+
"convolution_backward",
|
| 223 |
+
]:
|
| 224 |
+
if name in unambiguous_name:
|
| 225 |
+
ret_pointer_can_be_null = True
|
| 226 |
+
break
|
| 227 |
+
|
| 228 |
+
callsite_exprs: list[str] = []
|
| 229 |
+
for idx, ret in enumerate(schema.returns):
|
| 230 |
+
tmp = "tmp_result" if len(names) == 1 else f"std::get<{idx}>(tmp_result)"
|
| 231 |
+
assert isinstance(ret.type, BaseType)
|
| 232 |
+
rval = convert_return(ret.type, tmp)
|
| 233 |
+
if ret_pointer_can_be_null:
|
| 234 |
+
callsite_exprs.append(f"if ({names[idx]}) {{ *{names[idx]} = {rval}; }}")
|
| 235 |
+
else:
|
| 236 |
+
callsite_exprs.append(f"*{names[idx]} = {rval};")
|
| 237 |
+
|
| 238 |
+
return zip_type_and_name(types, names), callsite_exprs
|
| 239 |
+
|
| 240 |
+
|
| 241 |
+
# gen.py generates header first and then src, so caching the result here to avoid duplicate work
|
| 242 |
+
declaration_definition_cache: dict[tuple[str, str, str], tuple[str, str]] = {}
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
def gen_declaration_and_definition(
|
| 246 |
+
schema: FunctionSchema, device: str, backend_call: str
|
| 247 |
+
) -> tuple[str, str]:
|
| 248 |
+
func_name = schema.name.unambiguous_name()
|
| 249 |
+
|
| 250 |
+
global declaration_definition_cache
|
| 251 |
+
if (func_name, device, backend_call) in declaration_definition_cache:
|
| 252 |
+
return declaration_definition_cache[(func_name, device, backend_call)]
|
| 253 |
+
|
| 254 |
+
if schema.is_out_fn():
|
| 255 |
+
# out_variant has out arguments in the front, and it's ok to ignore return values
|
| 256 |
+
# because C shim functions only return AOTITorchError
|
| 257 |
+
args, callsite_exprs = gen_arguments(
|
| 258 |
+
[*schema.arguments.out, *schema.arguments.flat_non_out]
|
| 259 |
+
)
|
| 260 |
+
ret_assignments: list[str] = []
|
| 261 |
+
else:
|
| 262 |
+
args, callsite_exprs = gen_arguments(schema.arguments.flat_all)
|
| 263 |
+
# ignore return values for inplace ops
|
| 264 |
+
ret_declarations, ret_assignments = (
|
| 265 |
+
([], []) if schema.name.name.inplace else gen_returns(schema)
|
| 266 |
+
)
|
| 267 |
+
args.extend(ret_declarations)
|
| 268 |
+
|
| 269 |
+
declaration = f"AOTITorchError aoti_torch_{device}_{func_name}({', '.join(args)})"
|
| 270 |
+
|
| 271 |
+
tmp_result = "auto tmp_result = " if ret_assignments else ""
|
| 272 |
+
ret_assignments_str = "\n" + "\n".join(ret_assignments) if ret_assignments else ""
|
| 273 |
+
definition = f"""
|
| 274 |
+
{declaration} {{
|
| 275 |
+
AOTI_TORCH_CONVERT_EXCEPTION_TO_ERROR_CODE({{
|
| 276 |
+
{tmp_result}{backend_call}(
|
| 277 |
+
{textwrap.indent(', '.join(callsite_exprs), " ")}
|
| 278 |
+
);{textwrap.indent(ret_assignments_str, " ")}
|
| 279 |
+
}});
|
| 280 |
+
}}
|
| 281 |
+
"""
|
| 282 |
+
declaration_definition_cache[(func_name, device, backend_call)] = (
|
| 283 |
+
declaration,
|
| 284 |
+
definition,
|
| 285 |
+
)
|
| 286 |
+
return declaration, definition
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
def gen_static_dispatch_backend_call_signature(
|
| 290 |
+
sig: CppSignature | DispatcherSignature,
|
| 291 |
+
f: NativeFunction,
|
| 292 |
+
) -> CppSignature:
|
| 293 |
+
sig = DispatcherSignature.from_schema(f.func)
|
| 294 |
+
cpp_sigs = CppSignatureGroup.from_native_function(
|
| 295 |
+
f, method=False, fallback_binding=False
|
| 296 |
+
)
|
| 297 |
+
if sig.symint and f.func.has_symint():
|
| 298 |
+
cpp_sig = cpp_sigs.symint_signature
|
| 299 |
+
else:
|
| 300 |
+
cpp_sig = cpp_sigs.signature
|
| 301 |
+
assert cpp_sig is not None
|
| 302 |
+
return cpp_sig
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
def gen_static_dispatch_backend_call(
|
| 306 |
+
f: NativeFunction,
|
| 307 |
+
backend_index: BackendIndex,
|
| 308 |
+
) -> str:
|
| 309 |
+
sig = DispatcherSignature.from_schema(f.func)
|
| 310 |
+
cpp_sig = gen_static_dispatch_backend_call_signature(sig, f)
|
| 311 |
+
return f"at::{backend_index.dispatch_key.lower()}::{cpp_sig.name()}"
|
| 312 |
+
|
| 313 |
+
|
| 314 |
+
def get_backend_index_for_aoti(
|
| 315 |
+
func: NativeFunction,
|
| 316 |
+
func_group_mapping: dict[OperatorName, NativeFunctionsGroup],
|
| 317 |
+
dispatch_key: DispatchKey,
|
| 318 |
+
backend_indices: dict[DispatchKey, BackendIndex],
|
| 319 |
+
) -> BackendIndex | None:
|
| 320 |
+
backend_index = None
|
| 321 |
+
if backend_indices[dispatch_key].has_kernel(func) or (
|
| 322 |
+
func.structured_delegate is not None
|
| 323 |
+
and func.structured_delegate in func_group_mapping
|
| 324 |
+
and backend_indices[dispatch_key].has_kernel(
|
| 325 |
+
func_group_mapping[func.structured_delegate]
|
| 326 |
+
)
|
| 327 |
+
):
|
| 328 |
+
backend_index = backend_indices[dispatch_key]
|
| 329 |
+
elif backend_indices[DispatchKey.CompositeExplicitAutograd].has_kernel(func):
|
| 330 |
+
# We need to create C shim wrappers for CompositeExplicitAutograd kernels
|
| 331 |
+
backend_index = backend_indices[DispatchKey.CompositeExplicitAutograd]
|
| 332 |
+
elif backend_indices[DispatchKey.CompositeExplicitAutogradNonFunctional].has_kernel(
|
| 333 |
+
func
|
| 334 |
+
):
|
| 335 |
+
# We need to create C shim wrappers for CompositeExplicitAutogradNonFunctional kernels
|
| 336 |
+
backend_index = backend_indices[
|
| 337 |
+
DispatchKey.CompositeExplicitAutogradNonFunctional
|
| 338 |
+
]
|
| 339 |
+
elif backend_indices[DispatchKey.CompositeImplicitAutograd].has_kernel(func):
|
| 340 |
+
backend_index = backend_indices[DispatchKey.CompositeImplicitAutograd]
|
| 341 |
+
|
| 342 |
+
return backend_index
|
| 343 |
+
|
| 344 |
+
|
| 345 |
+
def get_header_for_aoti(
|
| 346 |
+
func: NativeFunction,
|
| 347 |
+
func_group_mapping: dict[OperatorName, NativeFunctionsGroup],
|
| 348 |
+
dispatch_key: DispatchKey,
|
| 349 |
+
backend_indices: dict[DispatchKey, BackendIndex],
|
| 350 |
+
) -> str | None:
|
| 351 |
+
backend_index = get_backend_index_for_aoti(
|
| 352 |
+
func, func_group_mapping, dispatch_key, backend_indices
|
| 353 |
+
)
|
| 354 |
+
return (
|
| 355 |
+
None
|
| 356 |
+
if backend_index is None
|
| 357 |
+
else f"#include <ATen/ops/{func.root_name}_{backend_index.dispatch_key.lower()}_dispatch.h>"
|
| 358 |
+
)
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
def get_fallback_op_name(func: NativeFunction) -> str:
|
| 362 |
+
return (
|
| 363 |
+
f"{func.namespace}.{func.func.name.name}.{func.func.name.overload_name}"
|
| 364 |
+
if func.func.name.overload_name
|
| 365 |
+
else f"{func.namespace}.{func.func.name.name}.default"
|
| 366 |
+
)
|
| 367 |
+
|
| 368 |
+
|
| 369 |
+
def gen_c_shim(
|
| 370 |
+
func: NativeFunction,
|
| 371 |
+
func_group_mapping: dict[OperatorName, NativeFunctionsGroup],
|
| 372 |
+
dispatch_key: DispatchKey,
|
| 373 |
+
backend_indices: dict[DispatchKey, BackendIndex],
|
| 374 |
+
header: bool,
|
| 375 |
+
) -> str | None:
|
| 376 |
+
backend_index = get_backend_index_for_aoti(
|
| 377 |
+
func, func_group_mapping, dispatch_key, backend_indices
|
| 378 |
+
)
|
| 379 |
+
if backend_index is None:
|
| 380 |
+
return None
|
| 381 |
+
|
| 382 |
+
schema = func.func
|
| 383 |
+
device = dispatch_key.lower()
|
| 384 |
+
backend_call = gen_static_dispatch_backend_call(
|
| 385 |
+
func,
|
| 386 |
+
backend_index,
|
| 387 |
+
)
|
| 388 |
+
|
| 389 |
+
try:
|
| 390 |
+
if header:
|
| 391 |
+
declaration, _ = gen_declaration_and_definition(
|
| 392 |
+
schema, device, backend_call
|
| 393 |
+
)
|
| 394 |
+
return f"AOTI_TORCH_EXPORT {declaration};"
|
| 395 |
+
else:
|
| 396 |
+
_, definition = gen_declaration_and_definition(schema, device, backend_call)
|
| 397 |
+
return definition
|
| 398 |
+
|
| 399 |
+
except NotImplementedError:
|
| 400 |
+
return None
|
| 401 |
+
|
| 402 |
+
|
| 403 |
+
@dataclass(frozen=True)
|
| 404 |
+
class ShimGenerator:
|
| 405 |
+
func_group_mapping: dict[OperatorName, NativeFunctionsGroup]
|
| 406 |
+
dispatch_key: DispatchKey
|
| 407 |
+
backend_indices: dict[DispatchKey, BackendIndex]
|
| 408 |
+
header: bool # True to generate .h and False to generate .cpp
|
| 409 |
+
|
| 410 |
+
@method_with_native_function
|
| 411 |
+
def __call__(
|
| 412 |
+
self,
|
| 413 |
+
func: NativeFunction,
|
| 414 |
+
) -> str | None:
|
| 415 |
+
result = gen_c_shim(
|
| 416 |
+
func,
|
| 417 |
+
self.func_group_mapping,
|
| 418 |
+
self.dispatch_key,
|
| 419 |
+
self.backend_indices,
|
| 420 |
+
self.header,
|
| 421 |
+
)
|
| 422 |
+
return result
|
| 423 |
+
|
| 424 |
+
|
| 425 |
+
def gen_aoti_c_shim(
|
| 426 |
+
native_functions: Sequence[NativeFunction],
|
| 427 |
+
func_group_mapping: dict[OperatorName, NativeFunctionsGroup],
|
| 428 |
+
dispatch_key: DispatchKey,
|
| 429 |
+
backend_indices: dict[DispatchKey, BackendIndex],
|
| 430 |
+
header: bool,
|
| 431 |
+
includes: str = "",
|
| 432 |
+
) -> str:
|
| 433 |
+
body = "\n".join(
|
| 434 |
+
list(
|
| 435 |
+
mapMaybe(
|
| 436 |
+
ShimGenerator(
|
| 437 |
+
func_group_mapping, dispatch_key, backend_indices, header
|
| 438 |
+
),
|
| 439 |
+
native_functions,
|
| 440 |
+
)
|
| 441 |
+
)
|
| 442 |
+
)
|
| 443 |
+
device = dispatch_key.lower()
|
| 444 |
+
|
| 445 |
+
warning = """
|
| 446 |
+
// WARNING: THIS FILE IS AUTOGENERATED BY torchgen. DO NOT MODIFY BY HAND.
|
| 447 |
+
// See https://github.com/pytorch/pytorch/blob/7e86a7c0155295539996e0cf422883571126073e/torchgen/gen.py#L2424-L2436 for details"""
|
| 448 |
+
|
| 449 |
+
if header:
|
| 450 |
+
return f"""
|
| 451 |
+
{warning}
|
| 452 |
+
|
| 453 |
+
#pragma once
|
| 454 |
+
|
| 455 |
+
#include <torch/csrc/inductor/aoti_torch/c/shim.h>
|
| 456 |
+
|
| 457 |
+
#ifdef __cplusplus
|
| 458 |
+
extern "C" {{
|
| 459 |
+
#endif
|
| 460 |
+
|
| 461 |
+
{body}
|
| 462 |
+
|
| 463 |
+
#ifdef __cplusplus
|
| 464 |
+
}} // extern "C"
|
| 465 |
+
#endif
|
| 466 |
+
"""
|
| 467 |
+
|
| 468 |
+
else:
|
| 469 |
+
return f"""
|
| 470 |
+
{warning}
|
| 471 |
+
|
| 472 |
+
#include <torch/csrc/inductor/aoti_torch/generated/c_shim_{device}.h>
|
| 473 |
+
#include <torch/csrc/inductor/aoti_torch/utils.h>
|
| 474 |
+
|
| 475 |
+
#ifndef AT_PER_OPERATOR_HEADERS
|
| 476 |
+
#include <ATen/{str(dispatch_key)}Functions.h>
|
| 477 |
+
#include <ATen/CompositeExplicitAutogradFunctions.h>
|
| 478 |
+
#include <ATen/CompositeExplicitAutogradNonFunctionalFunctions.h>
|
| 479 |
+
#include <ATen/CompositeImplicitAutogradFunctions.h>
|
| 480 |
+
#else
|
| 481 |
+
{includes}
|
| 482 |
+
#endif
|
| 483 |
+
|
| 484 |
+
using namespace torch::aot_inductor;
|
| 485 |
+
|
| 486 |
+
{body}"""
|
minigpt2/lib/python3.10/site-packages/torchgen/gen_backend_stubs.py
ADDED
|
@@ -0,0 +1,611 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import argparse
|
| 4 |
+
import os
|
| 5 |
+
import re
|
| 6 |
+
from collections import Counter, defaultdict, namedtuple
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
from typing import Sequence
|
| 9 |
+
|
| 10 |
+
import yaml
|
| 11 |
+
|
| 12 |
+
import torchgen.api.dispatcher as dispatcher
|
| 13 |
+
import torchgen.dest as dest
|
| 14 |
+
from torchgen.api.types import DispatcherSignature
|
| 15 |
+
from torchgen.code_template import CodeTemplate
|
| 16 |
+
from torchgen.context import native_function_manager
|
| 17 |
+
from torchgen.gen import get_grouped_native_functions, parse_native_yaml
|
| 18 |
+
from torchgen.model import (
|
| 19 |
+
BackendIndex,
|
| 20 |
+
BackendMetadata,
|
| 21 |
+
DispatchKey,
|
| 22 |
+
NativeFunction,
|
| 23 |
+
NativeFunctionsGroup,
|
| 24 |
+
OperatorName,
|
| 25 |
+
)
|
| 26 |
+
from torchgen.selective_build.selector import SelectiveBuilder
|
| 27 |
+
from torchgen.utils import concatMap, context, FileManager, NamespaceHelper, Target
|
| 28 |
+
from torchgen.yaml_utils import YamlLoader
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
# Parses the external backend's yaml, and adds a new BackendIndex for the backend's dispatch key.
|
| 32 |
+
# Returns a Tuple of (backend_key, autograd_key, cpp_namespace, updated BackendIndex mapping)
|
| 33 |
+
ParsedExternalYaml = namedtuple(
|
| 34 |
+
"ParsedExternalYaml",
|
| 35 |
+
["backend_key", "autograd_key", "class_name", "cpp_namespace", "backend_indices"],
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def parse_backend_yaml(
|
| 40 |
+
backend_yaml_path: str,
|
| 41 |
+
grouped_native_functions: Sequence[NativeFunction | NativeFunctionsGroup],
|
| 42 |
+
backend_indices: dict[DispatchKey, BackendIndex],
|
| 43 |
+
) -> ParsedExternalYaml:
|
| 44 |
+
native_functions_map: dict[OperatorName, NativeFunction] = {
|
| 45 |
+
f.func.name: f
|
| 46 |
+
for f in concatMap(
|
| 47 |
+
lambda f: [f] if isinstance(f, NativeFunction) else list(f.functions()),
|
| 48 |
+
grouped_native_functions,
|
| 49 |
+
)
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
with open(backend_yaml_path) as f:
|
| 53 |
+
yaml_values = yaml.load(f, Loader=YamlLoader)
|
| 54 |
+
assert isinstance(yaml_values, dict)
|
| 55 |
+
|
| 56 |
+
valid_keys = [
|
| 57 |
+
"backend",
|
| 58 |
+
"class_name",
|
| 59 |
+
"cpp_namespace",
|
| 60 |
+
"extra_headers",
|
| 61 |
+
"supported",
|
| 62 |
+
"autograd",
|
| 63 |
+
"full_codegen",
|
| 64 |
+
"non_native",
|
| 65 |
+
"ir_gen",
|
| 66 |
+
"symint",
|
| 67 |
+
]
|
| 68 |
+
|
| 69 |
+
backend = yaml_values.pop("backend", None)
|
| 70 |
+
assert backend is not None, 'You must provide a value for "backend"'
|
| 71 |
+
|
| 72 |
+
class_name = yaml_values.pop("class_name", None)
|
| 73 |
+
|
| 74 |
+
cpp_namespace = yaml_values.pop("cpp_namespace", None)
|
| 75 |
+
assert cpp_namespace is not None, 'You must provide a value for "cpp_namespace"'
|
| 76 |
+
|
| 77 |
+
# Mostly just defaulting to false to stick with LazyTensor convention.
|
| 78 |
+
use_out_as_primary = yaml_values.pop("use_out_as_primary", False)
|
| 79 |
+
assert isinstance(
|
| 80 |
+
use_out_as_primary, bool
|
| 81 |
+
), f"You must provide either True or False for use_out_as_primary. Provided: {use_out_as_primary}"
|
| 82 |
+
|
| 83 |
+
use_device_guard = yaml_values.pop("device_guard", False)
|
| 84 |
+
assert isinstance(
|
| 85 |
+
use_device_guard, bool
|
| 86 |
+
), f"You must provide either True or False for device_guard. Provided: {use_device_guard}"
|
| 87 |
+
|
| 88 |
+
supported = yaml_values.pop("supported", [])
|
| 89 |
+
if supported is None:
|
| 90 |
+
supported = [] # Allow an empty list of supported ops
|
| 91 |
+
assert isinstance(
|
| 92 |
+
supported, list
|
| 93 |
+
), f'expected "supported" to be a list, but got: {supported} (of type {type(supported)})'
|
| 94 |
+
|
| 95 |
+
symint = yaml_values.pop("symint", [])
|
| 96 |
+
if symint is None:
|
| 97 |
+
symint = [] # Allow an empty list of symint ops
|
| 98 |
+
assert isinstance(
|
| 99 |
+
symint, list
|
| 100 |
+
), f'expected "symint" to be a list, but got: {supported} (of type {type(supported)})'
|
| 101 |
+
symint_set = set(symint)
|
| 102 |
+
|
| 103 |
+
supported_autograd = yaml_values.pop("autograd", [])
|
| 104 |
+
assert isinstance(
|
| 105 |
+
supported_autograd, list
|
| 106 |
+
), f'expected "autograd" to be a list, but got: {supported_autograd}'
|
| 107 |
+
|
| 108 |
+
# full_codegen is ignored by parse_backend_yaml, and re-parsed in gen_lazy_tensor.py
|
| 109 |
+
full_codegen = yaml_values.pop("full_codegen", [])
|
| 110 |
+
supported.extend(full_codegen)
|
| 111 |
+
|
| 112 |
+
# non_native is ignored by parse_backend_yaml, and re-parsed in gen_lazy_tensor.py
|
| 113 |
+
yaml_values.pop("non_native", {})
|
| 114 |
+
|
| 115 |
+
# ir_gen is ignored by parse_backend_yaml, and re-parsed in gen_lazy_tensor.py
|
| 116 |
+
yaml_values.pop("ir_gen", {})
|
| 117 |
+
|
| 118 |
+
assert (
|
| 119 |
+
len(yaml_values.keys()) == 0
|
| 120 |
+
), f'{backend_yaml_path} contains unexpected keys: {", ".join(yaml_values.keys())}. \
|
| 121 |
+
Only the following keys are supported: {", ".join(valid_keys)}'
|
| 122 |
+
|
| 123 |
+
def create_backend_index(
|
| 124 |
+
backend_ops: list[str],
|
| 125 |
+
symint_ops: set[str],
|
| 126 |
+
dispatch_key: DispatchKey,
|
| 127 |
+
*,
|
| 128 |
+
use_out_as_primary: bool,
|
| 129 |
+
use_device_guard: bool,
|
| 130 |
+
) -> BackendIndex:
|
| 131 |
+
metadata: dict[OperatorName, BackendMetadata] = {}
|
| 132 |
+
for op in backend_ops:
|
| 133 |
+
op_name = OperatorName.parse(op)
|
| 134 |
+
assert (
|
| 135 |
+
op_name in native_functions_map
|
| 136 |
+
), f"Found an invalid operator name: {op_name}"
|
| 137 |
+
# See Note [External Backends Follow Dispatcher API]
|
| 138 |
+
kernel_name = dispatcher.name(native_functions_map[op_name].func)
|
| 139 |
+
if op in symint_ops:
|
| 140 |
+
kernel_name += "_symint"
|
| 141 |
+
# TODO: allow structured external backends later.
|
| 142 |
+
m = BackendMetadata(
|
| 143 |
+
kernel=kernel_name, structured=False, cpp_namespace=cpp_namespace
|
| 144 |
+
)
|
| 145 |
+
metadata[op_name] = m
|
| 146 |
+
return BackendIndex(
|
| 147 |
+
dispatch_key=dispatch_key,
|
| 148 |
+
use_out_as_primary=use_out_as_primary,
|
| 149 |
+
external=True,
|
| 150 |
+
device_guard=use_device_guard,
|
| 151 |
+
index=metadata,
|
| 152 |
+
)
|
| 153 |
+
|
| 154 |
+
backend_key: DispatchKey | None = None
|
| 155 |
+
if len(supported) > 0:
|
| 156 |
+
with context(
|
| 157 |
+
lambda: f'The provided value for "backend" must be a valid DispatchKey, but got {backend}.'
|
| 158 |
+
):
|
| 159 |
+
backend_key = DispatchKey.parse(backend)
|
| 160 |
+
|
| 161 |
+
backend_idx = create_backend_index(
|
| 162 |
+
supported,
|
| 163 |
+
symint_set,
|
| 164 |
+
backend_key,
|
| 165 |
+
use_out_as_primary=use_out_as_primary,
|
| 166 |
+
use_device_guard=use_device_guard,
|
| 167 |
+
)
|
| 168 |
+
assert backend_key not in backend_indices
|
| 169 |
+
backend_indices[backend_key] = backend_idx
|
| 170 |
+
|
| 171 |
+
autograd_key: DispatchKey | None = None
|
| 172 |
+
if len(supported_autograd) > 0:
|
| 173 |
+
with context(
|
| 174 |
+
lambda: f'The "autograd" key was specified, which indicates that you would like to override \
|
| 175 |
+
the behavior of autograd for some operators on your backend. However "Autograd{backend}" is not a valid DispatchKey.'
|
| 176 |
+
):
|
| 177 |
+
autograd_key = DispatchKey.parse(f"Autograd{backend}")
|
| 178 |
+
|
| 179 |
+
autograd_idx = create_backend_index(
|
| 180 |
+
supported_autograd,
|
| 181 |
+
symint_set,
|
| 182 |
+
autograd_key,
|
| 183 |
+
use_out_as_primary=use_out_as_primary,
|
| 184 |
+
use_device_guard=use_device_guard,
|
| 185 |
+
)
|
| 186 |
+
assert autograd_key not in backend_indices
|
| 187 |
+
backend_indices[autograd_key] = autograd_idx
|
| 188 |
+
|
| 189 |
+
for g in grouped_native_functions:
|
| 190 |
+
if isinstance(g, NativeFunction):
|
| 191 |
+
forward_kernels = (
|
| 192 |
+
[]
|
| 193 |
+
if backend_key is None
|
| 194 |
+
else [
|
| 195 |
+
m
|
| 196 |
+
for m in [backend_indices[backend_key].get_kernel(g)]
|
| 197 |
+
if m is not None
|
| 198 |
+
]
|
| 199 |
+
)
|
| 200 |
+
backward_kernels = (
|
| 201 |
+
[]
|
| 202 |
+
if autograd_key is None
|
| 203 |
+
else [
|
| 204 |
+
m
|
| 205 |
+
for m in [backend_indices[autograd_key].get_kernel(g)]
|
| 206 |
+
if m is not None
|
| 207 |
+
]
|
| 208 |
+
)
|
| 209 |
+
else:
|
| 210 |
+
forward_kernels = (
|
| 211 |
+
[]
|
| 212 |
+
if backend_key is None
|
| 213 |
+
else [
|
| 214 |
+
m
|
| 215 |
+
for m in [
|
| 216 |
+
backend_indices[backend_key].get_kernel(f)
|
| 217 |
+
for f in g.functions()
|
| 218 |
+
]
|
| 219 |
+
if m is not None
|
| 220 |
+
]
|
| 221 |
+
)
|
| 222 |
+
backward_kernels = (
|
| 223 |
+
[]
|
| 224 |
+
if autograd_key is None
|
| 225 |
+
else [
|
| 226 |
+
m
|
| 227 |
+
for m in [
|
| 228 |
+
backend_indices[autograd_key].get_kernel(f)
|
| 229 |
+
for f in g.functions()
|
| 230 |
+
]
|
| 231 |
+
if m is not None
|
| 232 |
+
]
|
| 233 |
+
)
|
| 234 |
+
|
| 235 |
+
forward_kernels = [f for f in forward_kernels if f is not None]
|
| 236 |
+
backward_kernels = [f for f in backward_kernels if f is not None]
|
| 237 |
+
assert (
|
| 238 |
+
len(forward_kernels) == 0 or len(backward_kernels) == 0
|
| 239 |
+
), f'Currently, all variants of an op must either be registered to a backend key, or to a backend\'s \
|
| 240 |
+
autograd key. They cannot be mix and matched. If this is something you need, feel free to create an issue! \
|
| 241 |
+
{forward_kernels[0].kernel} is listed under "supported", but {backward_kernels[0].kernel} is listed under "autograd".'
|
| 242 |
+
|
| 243 |
+
return ParsedExternalYaml(
|
| 244 |
+
backend_key, autograd_key, class_name, cpp_namespace, backend_indices
|
| 245 |
+
)
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
def error_on_missing_kernels(
|
| 249 |
+
native_functions: Sequence[NativeFunction],
|
| 250 |
+
backend_indices: dict[DispatchKey, BackendIndex],
|
| 251 |
+
backend_key: DispatchKey,
|
| 252 |
+
autograd_key: DispatchKey | None,
|
| 253 |
+
class_name: str,
|
| 254 |
+
kernel_defn_file_path: str,
|
| 255 |
+
full_codegen: list[OperatorName] | None = None,
|
| 256 |
+
) -> None:
|
| 257 |
+
try:
|
| 258 |
+
with open(kernel_defn_file_path) as f:
|
| 259 |
+
backend_defns = f.read()
|
| 260 |
+
except OSError as e:
|
| 261 |
+
raise AssertionError(
|
| 262 |
+
f"Unable to read from the specified impl_path file: {kernel_defn_file_path}"
|
| 263 |
+
) from e
|
| 264 |
+
|
| 265 |
+
if full_codegen is None:
|
| 266 |
+
full_codegen = []
|
| 267 |
+
|
| 268 |
+
indices = [backend_indices[backend_key].index] + (
|
| 269 |
+
[] if autograd_key is None else [backend_indices[autograd_key].index]
|
| 270 |
+
)
|
| 271 |
+
# Quick mapping from each OperatorName used by the external backend
|
| 272 |
+
# to its backend kernel name
|
| 273 |
+
expected_backend_op_names: dict[OperatorName, str] = dict(
|
| 274 |
+
list(
|
| 275 |
+
concatMap(
|
| 276 |
+
lambda index: [
|
| 277 |
+
(op_name, metadata.kernel) for op_name, metadata in index.items()
|
| 278 |
+
],
|
| 279 |
+
indices,
|
| 280 |
+
)
|
| 281 |
+
)
|
| 282 |
+
)
|
| 283 |
+
expected_backend_native_funcs: list[NativeFunction] = [
|
| 284 |
+
f
|
| 285 |
+
for f in native_functions
|
| 286 |
+
if f.func.name in expected_backend_op_names.keys()
|
| 287 |
+
and f.func.name not in full_codegen
|
| 288 |
+
]
|
| 289 |
+
expected_backend_kernel_name_counts: dict[str, list[NativeFunction]] = defaultdict(
|
| 290 |
+
list
|
| 291 |
+
)
|
| 292 |
+
for native_f in expected_backend_native_funcs:
|
| 293 |
+
expected_backend_kernel_name_counts[
|
| 294 |
+
expected_backend_op_names[native_f.func.name]
|
| 295 |
+
].append(native_f)
|
| 296 |
+
|
| 297 |
+
# This just looks for lines containing "foo(", and assumes that the kernel foo has been implemented.
|
| 298 |
+
# It might cause false negatives (we won't catch all cases), but that's ok - if we catch a missing kernel
|
| 299 |
+
# here, then we get a nicer error message. If we miss it, you get a linker error.
|
| 300 |
+
kernel_defn_regex = rf"(.*){class_name}::\s*([\w\d]*)\("
|
| 301 |
+
actual_backend_kernel_name_counts = Counter(
|
| 302 |
+
# A bit unwieldy (this could probably be moved into regex),
|
| 303 |
+
# but we don't want to include kernel names that come from function calls,
|
| 304 |
+
# like "return torch_xla::XLANativeFunctions::empty_strided_symint(...)".
|
| 305 |
+
# Easy check is to ignore any lines with colons before the class name.
|
| 306 |
+
[
|
| 307 |
+
y
|
| 308 |
+
for (x, y) in re.findall(kernel_defn_regex, backend_defns)
|
| 309 |
+
if not x.endswith(":")
|
| 310 |
+
]
|
| 311 |
+
)
|
| 312 |
+
|
| 313 |
+
missing_kernels_err_msg = ""
|
| 314 |
+
for expected_name, funcs in expected_backend_kernel_name_counts.items():
|
| 315 |
+
expected_overload_count = len(funcs)
|
| 316 |
+
actual_overload_count = actual_backend_kernel_name_counts[expected_name]
|
| 317 |
+
if expected_overload_count != actual_overload_count:
|
| 318 |
+
|
| 319 |
+
def create_decl(f: NativeFunction) -> str:
|
| 320 |
+
with native_function_manager(f):
|
| 321 |
+
return DispatcherSignature.from_schema(f.func).decl()
|
| 322 |
+
|
| 323 |
+
expected_schemas_str = "\n".join([create_decl(f) for f in funcs])
|
| 324 |
+
missing_kernels_err_msg += f"""
|
| 325 |
+
{class_name} is missing a kernel definition for {expected_name}. We found {actual_overload_count} kernel(s) with that name,
|
| 326 |
+
but expected {expected_overload_count} kernel(s). The expected function schemas for the missing operator are:
|
| 327 |
+
{expected_schemas_str}
|
| 328 |
+
|
| 329 |
+
"""
|
| 330 |
+
assert missing_kernels_err_msg == "", missing_kernels_err_msg
|
| 331 |
+
|
| 332 |
+
|
| 333 |
+
def main() -> None:
|
| 334 |
+
parser = argparse.ArgumentParser(description="Generate backend stub files")
|
| 335 |
+
parser.add_argument(
|
| 336 |
+
"-s",
|
| 337 |
+
"--source-yaml",
|
| 338 |
+
"--source_yaml",
|
| 339 |
+
help="path to source yaml file containing operator external definitions",
|
| 340 |
+
)
|
| 341 |
+
parser.add_argument("-o", "--output-dir", "--output_dir", help="output directory")
|
| 342 |
+
parser.add_argument(
|
| 343 |
+
"--dry-run", "--dry_run", type=bool, default=False, help="output directory"
|
| 344 |
+
)
|
| 345 |
+
parser.add_argument(
|
| 346 |
+
"--impl-path",
|
| 347 |
+
"--impl_path",
|
| 348 |
+
type=str,
|
| 349 |
+
default=None,
|
| 350 |
+
help="path to the source C++ file containing kernel definitions",
|
| 351 |
+
)
|
| 352 |
+
options = parser.parse_args()
|
| 353 |
+
|
| 354 |
+
run(options.source_yaml, options.output_dir, options.dry_run, options.impl_path)
|
| 355 |
+
|
| 356 |
+
|
| 357 |
+
def gen_dispatchkey_nativefunc_headers(
|
| 358 |
+
fm: FileManager,
|
| 359 |
+
class_name: str,
|
| 360 |
+
cpp_namespace: str,
|
| 361 |
+
backend_indices: dict[DispatchKey, BackendIndex],
|
| 362 |
+
grouped_native_functions: Sequence[NativeFunction | NativeFunctionsGroup],
|
| 363 |
+
backend_dispatch_key: DispatchKey,
|
| 364 |
+
autograd_dispatch_key: DispatchKey | None,
|
| 365 |
+
backend_name: str = "",
|
| 366 |
+
) -> None:
|
| 367 |
+
assert class_name is not None
|
| 368 |
+
generated_comment = (
|
| 369 |
+
"Autogenerated file by gen_backend_stubs.py. Do not edit directly!"
|
| 370 |
+
)
|
| 371 |
+
|
| 372 |
+
# Convert to a set first to remove duplicate kernel names.
|
| 373 |
+
# Backends are allowed to repeat kernel names; only generate the declaration once!
|
| 374 |
+
# Sort for deterministic output.
|
| 375 |
+
backend_declarations = sorted(
|
| 376 |
+
set(
|
| 377 |
+
concatMap(
|
| 378 |
+
lambda f: dest.compute_native_function_declaration(
|
| 379 |
+
f, backend_indices[backend_dispatch_key]
|
| 380 |
+
),
|
| 381 |
+
grouped_native_functions,
|
| 382 |
+
)
|
| 383 |
+
)
|
| 384 |
+
)
|
| 385 |
+
autograd_declarations = sorted(
|
| 386 |
+
set(
|
| 387 |
+
concatMap(
|
| 388 |
+
lambda f: []
|
| 389 |
+
if autograd_dispatch_key is None
|
| 390 |
+
else dest.compute_native_function_declaration(
|
| 391 |
+
f, backend_indices[autograd_dispatch_key]
|
| 392 |
+
),
|
| 393 |
+
grouped_native_functions,
|
| 394 |
+
)
|
| 395 |
+
)
|
| 396 |
+
)
|
| 397 |
+
|
| 398 |
+
ns_helper = NamespaceHelper(cpp_namespace)
|
| 399 |
+
fm.write_with_template(
|
| 400 |
+
f"{backend_dispatch_key}NativeFunctions.h",
|
| 401 |
+
"DispatchKeyNativeFunctions.h",
|
| 402 |
+
lambda: {
|
| 403 |
+
"generated_comment": generated_comment,
|
| 404 |
+
"namespace_prologue": ns_helper.prologue,
|
| 405 |
+
"class_name": class_name,
|
| 406 |
+
"namespace_epilogue": ns_helper.epilogue,
|
| 407 |
+
"dispatch_declarations": backend_declarations + autograd_declarations,
|
| 408 |
+
"BackendName": backend_name,
|
| 409 |
+
"DispatchKey": backend_dispatch_key,
|
| 410 |
+
},
|
| 411 |
+
)
|
| 412 |
+
|
| 413 |
+
|
| 414 |
+
def gen_dispatcher_registrations(
|
| 415 |
+
fm: FileManager,
|
| 416 |
+
output_dir: str,
|
| 417 |
+
class_name: str,
|
| 418 |
+
backend_indices: dict[DispatchKey, BackendIndex],
|
| 419 |
+
grouped_native_functions: Sequence[NativeFunction | NativeFunctionsGroup],
|
| 420 |
+
backend_dispatch_key: DispatchKey,
|
| 421 |
+
dispatch_key: DispatchKey,
|
| 422 |
+
selector: SelectiveBuilder,
|
| 423 |
+
# build_in_tree is true for lazy TS backend and affects include paths, not used for external backends
|
| 424 |
+
build_in_tree: bool = False,
|
| 425 |
+
per_operator_headers: bool = False,
|
| 426 |
+
backend_name: str = "",
|
| 427 |
+
eager_registration: bool = True,
|
| 428 |
+
) -> None:
|
| 429 |
+
headers = [
|
| 430 |
+
f"{output_dir}/{backend_dispatch_key}NativeFunctions.h",
|
| 431 |
+
]
|
| 432 |
+
if build_in_tree:
|
| 433 |
+
external_backend_headers_str = "\n".join(f"#include <{h}>" for h in headers)
|
| 434 |
+
else:
|
| 435 |
+
external_backend_headers_str = "\n".join(f'#include "{h}"' for h in headers)
|
| 436 |
+
|
| 437 |
+
assert class_name is not None
|
| 438 |
+
backend_index = backend_indices[dispatch_key]
|
| 439 |
+
|
| 440 |
+
dispatch_registrations_body = list(
|
| 441 |
+
concatMap(
|
| 442 |
+
dest.RegisterDispatchKey(
|
| 443 |
+
backend_index,
|
| 444 |
+
Target.REGISTRATION,
|
| 445 |
+
selector,
|
| 446 |
+
rocm=False,
|
| 447 |
+
symint=True,
|
| 448 |
+
class_method_name=f"{class_name}",
|
| 449 |
+
skip_dispatcher_op_registration=False,
|
| 450 |
+
),
|
| 451 |
+
grouped_native_functions,
|
| 452 |
+
)
|
| 453 |
+
)
|
| 454 |
+
newline = "\n"
|
| 455 |
+
ns_helper = NamespaceHelper(namespace_str="at")
|
| 456 |
+
deferred_dispatch_registrations = ""
|
| 457 |
+
static_init_dispatch_registrations = ""
|
| 458 |
+
if eager_registration:
|
| 459 |
+
static_template = CodeTemplate(
|
| 460 |
+
"""\
|
| 461 |
+
TORCH_LIBRARY_IMPL(aten, $dispatch_key, m) {
|
| 462 |
+
$dispatch_registrations_body
|
| 463 |
+
};"""
|
| 464 |
+
)
|
| 465 |
+
static_init_dispatch_registrations = static_template.substitute(
|
| 466 |
+
dispatch_key=dispatch_key,
|
| 467 |
+
dispatch_registrations_body=dispatch_registrations_body,
|
| 468 |
+
)
|
| 469 |
+
else:
|
| 470 |
+
deferred_template = CodeTemplate(
|
| 471 |
+
"""\
|
| 472 |
+
TORCH_API void Register${backend_name}${dispatch_key}NativeFunctions();
|
| 473 |
+
TORCH_API void Register${backend_name}${dispatch_key}NativeFunctions() {
|
| 474 |
+
static auto m = MAKE_TORCH_LIBRARY_IMPL(aten, $dispatch_key);
|
| 475 |
+
$dispatch_registrations_body
|
| 476 |
+
}"""
|
| 477 |
+
)
|
| 478 |
+
deferred_dispatch_registrations = deferred_template.substitute(
|
| 479 |
+
backend_name=backend_name,
|
| 480 |
+
dispatch_key=dispatch_key,
|
| 481 |
+
dispatch_registrations_body=dispatch_registrations_body,
|
| 482 |
+
)
|
| 483 |
+
|
| 484 |
+
fm.write_with_template(
|
| 485 |
+
f"Register{dispatch_key}.cpp",
|
| 486 |
+
"RegisterDispatchKey.cpp",
|
| 487 |
+
lambda: {
|
| 488 |
+
"extra_cuda_headers": "",
|
| 489 |
+
"external_backend_headers": external_backend_headers_str,
|
| 490 |
+
"ops_headers": "#include <ATen/Functions.h>"
|
| 491 |
+
if not per_operator_headers
|
| 492 |
+
else "",
|
| 493 |
+
"DispatchKey": dispatch_key,
|
| 494 |
+
"dispatch_namespace": dispatch_key.lower(),
|
| 495 |
+
"dispatch_headers": dest.gen_registration_headers(
|
| 496 |
+
backend_index, per_operator_headers=per_operator_headers, rocm=False
|
| 497 |
+
),
|
| 498 |
+
"dispatch_definitions": fm.substitute_with_template(
|
| 499 |
+
"RegisterDispatchDefinitions.ini",
|
| 500 |
+
lambda: {
|
| 501 |
+
"ns_prologue": ns_helper.prologue,
|
| 502 |
+
"ns_epilogue": ns_helper.epilogue,
|
| 503 |
+
"static_init_dispatch_registrations": static_init_dispatch_registrations,
|
| 504 |
+
"deferred_dispatch_registrations": deferred_dispatch_registrations,
|
| 505 |
+
"dispatch_helpers": dest.gen_registration_helpers(backend_index),
|
| 506 |
+
"dispatch_namespace": dispatch_key.lower(),
|
| 507 |
+
"dispatch_namespaced_definitions": "",
|
| 508 |
+
"dispatch_anonymous_definitions": list(
|
| 509 |
+
concatMap(
|
| 510 |
+
dest.RegisterDispatchKey(
|
| 511 |
+
backend_index,
|
| 512 |
+
Target.ANONYMOUS_DEFINITION,
|
| 513 |
+
selector,
|
| 514 |
+
rocm=False,
|
| 515 |
+
symint=True,
|
| 516 |
+
class_method_name=f"{class_name}",
|
| 517 |
+
skip_dispatcher_op_registration=False,
|
| 518 |
+
),
|
| 519 |
+
grouped_native_functions,
|
| 520 |
+
)
|
| 521 |
+
),
|
| 522 |
+
},
|
| 523 |
+
).split(newline),
|
| 524 |
+
},
|
| 525 |
+
)
|
| 526 |
+
|
| 527 |
+
|
| 528 |
+
def run(
|
| 529 |
+
source_yaml: str, output_dir: str, dry_run: bool, impl_path: str | None = None
|
| 530 |
+
) -> None:
|
| 531 |
+
# Assumes that this file lives at PYTORCH_ROOT/torchgen/gen_backend_stubs.py
|
| 532 |
+
pytorch_root = Path(__file__).parent.parent.absolute()
|
| 533 |
+
template_dir = os.path.join(pytorch_root, "aten/src/ATen/templates")
|
| 534 |
+
|
| 535 |
+
def make_file_manager(install_dir: str) -> FileManager:
|
| 536 |
+
return FileManager(
|
| 537 |
+
install_dir=install_dir, template_dir=template_dir, dry_run=dry_run
|
| 538 |
+
)
|
| 539 |
+
|
| 540 |
+
fm = make_file_manager(output_dir)
|
| 541 |
+
|
| 542 |
+
native_yaml_path = os.path.join(
|
| 543 |
+
pytorch_root, "aten/src/ATen/native/native_functions.yaml"
|
| 544 |
+
)
|
| 545 |
+
tags_yaml_path = os.path.join(pytorch_root, "aten/src/ATen/native/tags.yaml")
|
| 546 |
+
parsed_yaml = parse_native_yaml(native_yaml_path, tags_yaml_path)
|
| 547 |
+
native_functions, backend_indices = (
|
| 548 |
+
parsed_yaml.native_functions,
|
| 549 |
+
parsed_yaml.backend_indices,
|
| 550 |
+
)
|
| 551 |
+
grouped_native_functions = get_grouped_native_functions(native_functions)
|
| 552 |
+
parsed_backend_yaml = parse_backend_yaml(
|
| 553 |
+
source_yaml, grouped_native_functions, backend_indices
|
| 554 |
+
)
|
| 555 |
+
backend_key = parsed_backend_yaml.backend_key
|
| 556 |
+
autograd_key = parsed_backend_yaml.autograd_key
|
| 557 |
+
cpp_namespace = parsed_backend_yaml.cpp_namespace
|
| 558 |
+
class_name = parsed_backend_yaml.class_name
|
| 559 |
+
backend_indices = parsed_backend_yaml.backend_indices
|
| 560 |
+
|
| 561 |
+
selector = SelectiveBuilder.get_nop_selector()
|
| 562 |
+
|
| 563 |
+
if backend_key is None:
|
| 564 |
+
# This could be useful if a backend wants to quickly set up a noop yaml file but doesn't have any kernels ready yet.
|
| 565 |
+
return
|
| 566 |
+
|
| 567 |
+
if class_name is None:
|
| 568 |
+
# class_name is an optional argument to backend yaml file.
|
| 569 |
+
# if specified it allows an external backend to override
|
| 570 |
+
# the name of the class that all generated kernel definitions live under.
|
| 571 |
+
# if not specified, its value is given as native_function_class_name.
|
| 572 |
+
class_name = backend_indices[backend_key].native_function_class_name()
|
| 573 |
+
assert class_name is not None
|
| 574 |
+
|
| 575 |
+
if impl_path is not None:
|
| 576 |
+
error_on_missing_kernels(
|
| 577 |
+
native_functions,
|
| 578 |
+
backend_indices,
|
| 579 |
+
backend_key,
|
| 580 |
+
autograd_key,
|
| 581 |
+
class_name,
|
| 582 |
+
impl_path,
|
| 583 |
+
)
|
| 584 |
+
|
| 585 |
+
gen_dispatchkey_nativefunc_headers(
|
| 586 |
+
fm,
|
| 587 |
+
class_name,
|
| 588 |
+
cpp_namespace,
|
| 589 |
+
backend_indices,
|
| 590 |
+
grouped_native_functions,
|
| 591 |
+
backend_key,
|
| 592 |
+
autograd_key,
|
| 593 |
+
)
|
| 594 |
+
|
| 595 |
+
for dispatch_key in (
|
| 596 |
+
[backend_key] if autograd_key is None else [backend_key, autograd_key]
|
| 597 |
+
):
|
| 598 |
+
gen_dispatcher_registrations(
|
| 599 |
+
fm,
|
| 600 |
+
output_dir,
|
| 601 |
+
class_name,
|
| 602 |
+
backend_indices,
|
| 603 |
+
grouped_native_functions,
|
| 604 |
+
backend_key,
|
| 605 |
+
dispatch_key,
|
| 606 |
+
selector,
|
| 607 |
+
)
|
| 608 |
+
|
| 609 |
+
|
| 610 |
+
if __name__ == "__main__":
|
| 611 |
+
main()
|
minigpt2/lib/python3.10/site-packages/torchgen/gen_functionalization_type.py
ADDED
|
@@ -0,0 +1,882 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from dataclasses import dataclass
|
| 4 |
+
from typing import Callable, TYPE_CHECKING
|
| 5 |
+
|
| 6 |
+
from torchgen.api import cpp, dispatcher
|
| 7 |
+
from torchgen.api.translate import translate
|
| 8 |
+
from torchgen.api.types import (
|
| 9 |
+
BaseCType,
|
| 10 |
+
Binding,
|
| 11 |
+
CType,
|
| 12 |
+
DispatcherSignature,
|
| 13 |
+
FunctionalizationLambda,
|
| 14 |
+
iTensorListRefT,
|
| 15 |
+
NativeSignature,
|
| 16 |
+
OptionalCType,
|
| 17 |
+
optionalSymIntArrayRefT,
|
| 18 |
+
symIntArrayRefT,
|
| 19 |
+
SymIntT,
|
| 20 |
+
tensorListT,
|
| 21 |
+
tensorT,
|
| 22 |
+
VectorCType,
|
| 23 |
+
ViewInverseSignature,
|
| 24 |
+
)
|
| 25 |
+
from torchgen.context import (
|
| 26 |
+
method_with_native_function,
|
| 27 |
+
native_function_manager,
|
| 28 |
+
with_native_function,
|
| 29 |
+
with_native_function_and,
|
| 30 |
+
)
|
| 31 |
+
from torchgen.model import (
|
| 32 |
+
Argument,
|
| 33 |
+
BackendIndex,
|
| 34 |
+
BaseTy,
|
| 35 |
+
BaseType,
|
| 36 |
+
FunctionSchema,
|
| 37 |
+
ListType,
|
| 38 |
+
NativeFunction,
|
| 39 |
+
NativeFunctionsGroup,
|
| 40 |
+
NativeFunctionsViewGroup,
|
| 41 |
+
Return,
|
| 42 |
+
SchemaKind,
|
| 43 |
+
SelfArgument,
|
| 44 |
+
TensorOptionsArguments,
|
| 45 |
+
)
|
| 46 |
+
from torchgen.native_function_generation import (
|
| 47 |
+
INPLACE_OPS_THAT_DONT_GET_GROUPED_PROPERLY,
|
| 48 |
+
MUTABLE_OPS_THAT_CANNOT_GET_AN_OUT_VARIANT,
|
| 49 |
+
OUT_OPS_THAT_DONT_GET_GROUPED_PROPERLY,
|
| 50 |
+
)
|
| 51 |
+
from torchgen.utils import dataclass_repr
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
if TYPE_CHECKING:
|
| 55 |
+
from torchgen.selective_build.selector import SelectiveBuilder
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
# Note: [Mutable Ops Not Using Functionalization]
|
| 59 |
+
# Ops in this list currently do not work with functionalization and should be fixed.
|
| 60 |
+
MUTABLE_OPS_NOT_USING_FUNCTIONALIZATION = (
|
| 61 |
+
OUT_OPS_THAT_DONT_GET_GROUPED_PROPERLY
|
| 62 |
+
+ MUTABLE_OPS_THAT_CANNOT_GET_AN_OUT_VARIANT
|
| 63 |
+
+ INPLACE_OPS_THAT_DONT_GET_GROUPED_PROPERLY
|
| 64 |
+
+ [
|
| 65 |
+
# It will be BC-breaking, but we should fix their schemas.
|
| 66 |
+
# should be inplace?
|
| 67 |
+
"record_stream",
|
| 68 |
+
# See Note [resize_ in Functionalization]
|
| 69 |
+
"resize_",
|
| 70 |
+
"resize_as_",
|
| 71 |
+
# This function is used as for testing purposes only.
|
| 72 |
+
"_fill_mem_eff_dropout_mask_",
|
| 73 |
+
]
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
# This file contains codegen that relates to the functionalization pass.
|
| 77 |
+
# It includes:
|
| 78 |
+
# - gen_functionalization_definition
|
| 79 |
+
# Generates dispatcher kernel definitions for the functionalization pass.
|
| 80 |
+
# - gen_functionalization_registration
|
| 81 |
+
# Generates dispatcher kernel registrations for the functionalization pass.
|
| 82 |
+
# - gen_functionalization_view_inverse_declaration
|
| 83 |
+
# Generates a declaration for an "inverse view", for every view op
|
| 84 |
+
# that is needed in functionalization. We manually implement their definitions.
|
| 85 |
+
# - gen_composite_view_copy_kernel
|
| 86 |
+
# Generates view_copy() composite kernels for all view_copy operators.
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
# Generates the body of the default composite C++ kernel for a {view}_copy NativeFunction
|
| 90 |
+
# See Note [view_copy NativeFunctions]
|
| 91 |
+
@dataclass(frozen=True)
|
| 92 |
+
class GenCompositeViewCopyKernel:
|
| 93 |
+
backend_index: BackendIndex
|
| 94 |
+
|
| 95 |
+
@method_with_native_function
|
| 96 |
+
def __call__(self, g: NativeFunctionsViewGroup) -> str | None:
|
| 97 |
+
if g.view_copy is None:
|
| 98 |
+
return None
|
| 99 |
+
elif g.view_copy.func.name.name.base != f"{g.view.func.name.name}_copy":
|
| 100 |
+
# If the view_copy doesn't match the standard naming scheme of <op>_copy,
|
| 101 |
+
# assume it already exists and doesn't need to be generated.
|
| 102 |
+
# Example: slice_inverse() with the copy variant named slice_scatter()
|
| 103 |
+
# instead of slice_inverse_copy()
|
| 104 |
+
return None
|
| 105 |
+
|
| 106 |
+
metadata = self.backend_index.get_kernel(g.view_copy)
|
| 107 |
+
assert metadata is not None
|
| 108 |
+
|
| 109 |
+
# We can make view_copy work in more cases by using reshape()
|
| 110 |
+
# when a normal view call would ordinarily fail.
|
| 111 |
+
# This also makes LTC more efficient, because they don't need to include
|
| 112 |
+
# clone() calls in their graph (which is normally needed by reshape).
|
| 113 |
+
if str(g.view_copy.func.name) == "view_copy":
|
| 114 |
+
assert metadata.kernel == "view_copy_symint"
|
| 115 |
+
return """\
|
| 116 |
+
at::Tensor view_copy_symint(const at::Tensor & self, at::SymIntArrayRef size) {
|
| 117 |
+
c10::SymDimVector shape = infer_size_dv(size, self.sym_numel());
|
| 118 |
+
if (!at::detail::computeStride(self.sym_sizes(), self.sym_strides(), shape).has_value()) {
|
| 119 |
+
return self.reshape_symint(size);
|
| 120 |
+
} else {
|
| 121 |
+
auto output = at::_ops::view::call(self, size);
|
| 122 |
+
return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous);
|
| 123 |
+
}
|
| 124 |
+
}
|
| 125 |
+
"""
|
| 126 |
+
# view_copy is a native signature, since we're generating an at::native:: kernel
|
| 127 |
+
# Functionalization always operates on symints though
|
| 128 |
+
view_copy_sig = NativeSignature(
|
| 129 |
+
g.view_copy.func, symint=metadata.supports_symint()
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
# view is a dispatcher signature, since we're calling into the at::_ops API
|
| 133 |
+
view_sig = DispatcherSignature(g.view.func)
|
| 134 |
+
|
| 135 |
+
view_api_name = g.view.func.name.unambiguous_name()
|
| 136 |
+
exprs = ", ".join(
|
| 137 |
+
[e.expr for e in translate(view_copy_sig.arguments(), view_sig.arguments())]
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
# view ops today always return either a Tensor or a list of Tensors
|
| 141 |
+
assert len(g.view.func.returns) == 1
|
| 142 |
+
assert g.view.func.returns[0].type == BaseType(
|
| 143 |
+
BaseTy.Tensor
|
| 144 |
+
) or g.view.func.returns[0].type == ListType(BaseType(BaseTy.Tensor), None)
|
| 145 |
+
|
| 146 |
+
if g.view.func.returns[0].type == BaseType(BaseTy.Tensor):
|
| 147 |
+
return_cloned_output = """\
|
| 148 |
+
return output.clone(/*memory_format=*/at::MemoryFormat::Contiguous);"""
|
| 149 |
+
else:
|
| 150 |
+
# If the return type is a list, we need to clone each tensor in the list.
|
| 151 |
+
return_cloned_output = f"""\
|
| 152 |
+
{view_copy_sig.returns_type().cpp_type()} out_clone;
|
| 153 |
+
for (const auto i : c10::irange(output.size())) {{
|
| 154 |
+
out_clone.push_back(output[i].clone(/*memory_format=*/at::MemoryFormat::Contiguous));
|
| 155 |
+
}}
|
| 156 |
+
return out_clone;"""
|
| 157 |
+
|
| 158 |
+
# The default generated composite kernel for {view}_copy() operators just clones
|
| 159 |
+
# the input tensor, and runs the underlying view on the clone.
|
| 160 |
+
return f"""
|
| 161 |
+
{view_copy_sig.defn(name=metadata.kernel)} {{
|
| 162 |
+
auto output = at::_ops::{view_api_name}::call({exprs});
|
| 163 |
+
{return_cloned_output}
|
| 164 |
+
}}
|
| 165 |
+
"""
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
def return_str(rets: tuple[Return, ...], names: list[str]) -> str:
|
| 169 |
+
assert len(rets) == len(names)
|
| 170 |
+
if len(rets) == 0:
|
| 171 |
+
return ""
|
| 172 |
+
elif len(rets) == 1:
|
| 173 |
+
return f"return {names[0]};"
|
| 174 |
+
else:
|
| 175 |
+
return f"return {dispatcher.returns_type(rets).cpp_type()}({', '.join(names)});"
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
def modifies_arguments(f: NativeFunction) -> bool:
|
| 179 |
+
return any(
|
| 180 |
+
a.annotation is not None and a.annotation.is_write
|
| 181 |
+
for a in f.func.arguments.flat_all
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
def wrapper_name(func: FunctionSchema) -> str:
|
| 186 |
+
if func.name.overload_name:
|
| 187 |
+
return f"{cpp.name(func)}_{func.name.overload_name}"
|
| 188 |
+
else:
|
| 189 |
+
return cpp.name(func)
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
def is_tensor_like(a: Argument | TensorOptionsArguments | SelfArgument) -> bool:
|
| 193 |
+
return isinstance(a, SelfArgument) or (
|
| 194 |
+
isinstance(a, Argument) and a.type.is_tensor_like()
|
| 195 |
+
)
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
# We need to wrap / unwrap various arguments from the op in the functionalization kernels.
|
| 199 |
+
# Some op schemas include non-owning types though (like TensorList),
|
| 200 |
+
# and when we unwrap them we expect to get out an owning type!.
|
| 201 |
+
# We also return a lambda that tells you how to conver the non-owning type argument into the owning type.
|
| 202 |
+
def get_owning_type(t: CType) -> tuple[CType, Callable[[str], str]]:
|
| 203 |
+
if t == BaseCType(tensorListT):
|
| 204 |
+
return VectorCType(BaseCType(tensorT)), lambda x: f"{x}.vec()"
|
| 205 |
+
if t == BaseCType(iTensorListRefT):
|
| 206 |
+
return VectorCType(BaseCType(tensorT)), lambda x: f"{{{x}.begin(), {x}.end()}}"
|
| 207 |
+
# There are technically other non-owning types out there (like IntArrayRef),
|
| 208 |
+
# but functionalization only actually cares about the ones involving tensors.
|
| 209 |
+
return t, lambda x: x
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
# unwraps all tensor-like arguments, returning:
|
| 213 |
+
# (1) a string containing all of the logic that does the unwrapping
|
| 214 |
+
# (2) a context, to be used by translate(), with all of the relevant bindings.
|
| 215 |
+
def unwrap_tensor_args(
|
| 216 |
+
sig: DispatcherSignature, *, is_view_op: bool
|
| 217 |
+
) -> tuple[str, list[Binding]]:
|
| 218 |
+
context: list[Binding] = []
|
| 219 |
+
unwrapped_tensor_args: list[str] = []
|
| 220 |
+
for arg in sig.arguments():
|
| 221 |
+
if is_tensor_like(arg.argument):
|
| 222 |
+
# for tensor inputs, we want to unwrap them before passing them into the redispatch calls.
|
| 223 |
+
unwrapped_name = f"{arg.name}_"
|
| 224 |
+
# For most ops, the functionalization needs to sync any pending updates on the input tensors
|
| 225 |
+
# before calling the operator, since otherwise the operator will act on stale data.
|
| 226 |
+
# For view ops though, we can continue to defer syncing until the tensor is used by
|
| 227 |
+
# a non-view operator.
|
| 228 |
+
maybe_sync_input = (
|
| 229 |
+
"" if is_view_op else f"at::functionalization::impl::sync({arg.name});"
|
| 230 |
+
)
|
| 231 |
+
unwrapped_type, conversion_fn = get_owning_type(
|
| 232 |
+
arg.nctype.remove_const_ref().type
|
| 233 |
+
)
|
| 234 |
+
unwrapped_tensor_args.append(
|
| 235 |
+
f"""
|
| 236 |
+
{unwrapped_type.cpp_type()} {unwrapped_name};
|
| 237 |
+
if (at::functionalization::impl::isFunctionalTensor({arg.name})) {{
|
| 238 |
+
{maybe_sync_input}
|
| 239 |
+
{unwrapped_name} = at::functionalization::impl::from_functional_tensor({arg.name});
|
| 240 |
+
}} else {{
|
| 241 |
+
{unwrapped_name} = {conversion_fn(arg.name)};
|
| 242 |
+
}}"""
|
| 243 |
+
)
|
| 244 |
+
context.append(arg.with_name(unwrapped_name))
|
| 245 |
+
else:
|
| 246 |
+
# for non-tensor inputs, we want to pass them directly into the redispatch calls.
|
| 247 |
+
context.append(arg)
|
| 248 |
+
unwrap_tensor_args_str = "\n ".join(unwrapped_tensor_args)
|
| 249 |
+
return unwrap_tensor_args_str, context
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
# converts all tensor-like arguments to meta tensors, which are used to compute stride info. Returns:
|
| 253 |
+
# (1) a string containing all of the logic that does the conversions.
|
| 254 |
+
# (2) a context, to be used by translate(), with all of the relevant bindings.
|
| 255 |
+
def convert_to_meta_tensors(sig: DispatcherSignature) -> tuple[str, list[Binding]]:
|
| 256 |
+
context: list[Binding] = []
|
| 257 |
+
unwrapped_tensor_args: list[str] = []
|
| 258 |
+
for arg in sig.arguments():
|
| 259 |
+
if is_tensor_like(arg.argument):
|
| 260 |
+
# for tensor inputs, we want to unwrap them before passing them into the redispatch calls.
|
| 261 |
+
a_ = arg.name
|
| 262 |
+
unwrapped_name = f"{arg.name}_meta"
|
| 263 |
+
unwrapped_tensor_args.append(f"auto {unwrapped_name} = to_meta({a_});")
|
| 264 |
+
context.append(arg.with_name(unwrapped_name))
|
| 265 |
+
else:
|
| 266 |
+
# for non-tensor inputs, we want to pass them directly into the redispatch calls.
|
| 267 |
+
context.append(arg)
|
| 268 |
+
unwrap_tensor_args_str = "\n ".join(unwrapped_tensor_args)
|
| 269 |
+
return unwrap_tensor_args_str, context
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
# The functionalization codegen currently expects view op schemas to have this form:
|
| 273 |
+
# foo(Tensor(a), ...) -> Tensor(a) (e.g. transpose)
|
| 274 |
+
# foo(Tensor(a!), ...) -> Tensor(a!) (e.g. transpose_)
|
| 275 |
+
def assert_view_op_properties(func: FunctionSchema) -> None:
|
| 276 |
+
def is_alias(a: Argument) -> bool:
|
| 277 |
+
return a.annotation is not None
|
| 278 |
+
|
| 279 |
+
args = func.arguments.flat_non_out
|
| 280 |
+
# The first argument is a tensor with an alias semantics (annotations)
|
| 281 |
+
assert len(args) > 0 and args[0].type == BaseType(
|
| 282 |
+
BaseTy.Tensor
|
| 283 |
+
), f"""In the functionalization codegen, we expect the first argument of every view operator to be a tensor,
|
| 284 |
+
but found an argument of type {str(args[0].type)} for operator: {str(func.name)}."""
|
| 285 |
+
# No other arguments have aliasing semantics
|
| 286 |
+
assert is_alias(args[0]) and not any(
|
| 287 |
+
is_alias(a) for a in args[1:]
|
| 288 |
+
), """In the functionalization codegen, we expect the first argument of every view operator to alias the output.
|
| 289 |
+
View operators with multiple aliasing inputs aren't supported yet. Found an operator that doesn't satisfy this constraint"""
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
# One-liner expression for checking if an expression expr of type type has any
|
| 293 |
+
# symbolic values.
|
| 294 |
+
def emit_expr_has_symbolic_values(expr: str, type: CType) -> str:
|
| 295 |
+
if type == BaseCType(SymIntT):
|
| 296 |
+
return f"{expr}.is_symbolic()"
|
| 297 |
+
|
| 298 |
+
if isinstance(type, OptionalCType):
|
| 299 |
+
innerexpr = f"(*{expr})"
|
| 300 |
+
return f"{expr}.has_value() ? {emit_expr_has_symbolic_values(innerexpr, type.elem)} : false"
|
| 301 |
+
|
| 302 |
+
if type == BaseCType(optionalSymIntArrayRefT):
|
| 303 |
+
return emit_expr_has_symbolic_values(
|
| 304 |
+
expr, OptionalCType(BaseCType(symIntArrayRefT))
|
| 305 |
+
)
|
| 306 |
+
|
| 307 |
+
if type in (BaseCType(symIntArrayRefT), VectorCType(BaseCType(SymIntT))):
|
| 308 |
+
argname = "arg"
|
| 309 |
+
lambda_check = emit_expr_has_symbolic_values(argname, BaseCType(SymIntT))
|
| 310 |
+
return (
|
| 311 |
+
"std::any_of("
|
| 312 |
+
f"{expr}.begin(), {expr}.end(), "
|
| 313 |
+
f"[=](auto& {argname}) {{ return {lambda_check}; }})"
|
| 314 |
+
)
|
| 315 |
+
|
| 316 |
+
raise ValueError(
|
| 317 |
+
"unsupported type for has_symbolic_values check. "
|
| 318 |
+
"It should be a SymInt or a collection of those. "
|
| 319 |
+
f"Got: {type.cpp_type()}"
|
| 320 |
+
)
|
| 321 |
+
|
| 322 |
+
|
| 323 |
+
# Detects whether any of the SymInt arguments are, in fact, symbolic values.
|
| 324 |
+
# This is used in the constructor of ViewMeta.
|
| 325 |
+
def emit_has_symbolic_inputs(sig: DispatcherSignature) -> tuple[str, str]:
|
| 326 |
+
name = "has_symbolic_inputs"
|
| 327 |
+
statements = [
|
| 328 |
+
f"{name} = {name} | ({emit_expr_has_symbolic_values(binding.name, binding.nctype.type)});"
|
| 329 |
+
for binding in sig.arguments()
|
| 330 |
+
if (
|
| 331 |
+
isinstance(binding.argument, Argument)
|
| 332 |
+
and binding.argument.type.is_symint_like()
|
| 333 |
+
)
|
| 334 |
+
]
|
| 335 |
+
body = "\n ".join(statements)
|
| 336 |
+
return (
|
| 337 |
+
name,
|
| 338 |
+
f"""
|
| 339 |
+
bool {name} = false;
|
| 340 |
+
{body}""",
|
| 341 |
+
)
|
| 342 |
+
|
| 343 |
+
|
| 344 |
+
# Generates the Functionalization kernel for:
|
| 345 |
+
# - ops that create aliases (e.g. transpose())
|
| 346 |
+
# - ops that are views AND mutations (e.g. transpose_())
|
| 347 |
+
def emit_view_functionalization_body(
|
| 348 |
+
g: NativeFunctionsViewGroup, *, view_inplace: bool
|
| 349 |
+
) -> str:
|
| 350 |
+
if view_inplace:
|
| 351 |
+
# This op is both an inplace op AND a view op.
|
| 352 |
+
# See Note [Functionalization Pass - Inplace View Ops] for details.
|
| 353 |
+
# I currently have the view meta call into the out-of-place variant of the view, to avoid
|
| 354 |
+
# having to define an extra ~20 inplace {view}_inverse_ functions.
|
| 355 |
+
# Most view ops don't have NativeFunctionGroup's both, because we don't define out= variants for view ops.
|
| 356 |
+
# I'm assuming that every inplace-view op has a corresponding out-of-place view op,
|
| 357 |
+
# with the same name but the trailing underscore removed.
|
| 358 |
+
# This is currently asserted at parse time in gen.py (see error_check_native_functions).
|
| 359 |
+
assert g.view_inplace is not None
|
| 360 |
+
f = g.view_inplace
|
| 361 |
+
else:
|
| 362 |
+
f = g.view
|
| 363 |
+
|
| 364 |
+
assert g.view_copy is not None
|
| 365 |
+
with native_function_manager(f):
|
| 366 |
+
call_sig = DispatcherSignature.from_schema(g.view_copy.func)
|
| 367 |
+
|
| 368 |
+
# the "view_copy" op name that the functionalization kernels need to call
|
| 369 |
+
api_name = g.view_copy.func.name.unambiguous_name()
|
| 370 |
+
# Sometimes the functionalization pass needs to no-op (e.g. if it was passed non-functional tensors)
|
| 371 |
+
# "no-op"ing in this context is just redispatching to the original op.
|
| 372 |
+
noop_api_name = f.func.name.unambiguous_name()
|
| 373 |
+
|
| 374 |
+
dispatcher_sig = DispatcherSignature.from_schema(f.func)
|
| 375 |
+
assert_view_op_properties(f.func)
|
| 376 |
+
view_tensor_name = dispatcher_sig.arguments()[0].name
|
| 377 |
+
|
| 378 |
+
return_type = dispatcher_sig.returns_type().remove_const_ref().cpp_type()
|
| 379 |
+
|
| 380 |
+
unwrap_tensor_args_str, unwrapped_args_ctx = unwrap_tensor_args(
|
| 381 |
+
dispatcher_sig, is_view_op=True
|
| 382 |
+
)
|
| 383 |
+
view_redispatch_args = [
|
| 384 |
+
e.expr
|
| 385 |
+
for e in translate(unwrapped_args_ctx, call_sig.arguments(), method=False)
|
| 386 |
+
]
|
| 387 |
+
|
| 388 |
+
forward_lambda = FunctionalizationLambda.from_func(g, is_reverse=False)
|
| 389 |
+
reverse_lambda = FunctionalizationLambda.from_func(g, is_reverse=True)
|
| 390 |
+
|
| 391 |
+
# The meta API call should use the same arguments, but convert all tensors to meta tensors first.
|
| 392 |
+
meta_conversion_str, meta_call_ctx = convert_to_meta_tensors(dispatcher_sig)
|
| 393 |
+
meta_call_args = [
|
| 394 |
+
e.expr for e in translate(meta_call_ctx, call_sig.arguments(), method=False)
|
| 395 |
+
]
|
| 396 |
+
|
| 397 |
+
(
|
| 398 |
+
symbolic_inputs_varname,
|
| 399 |
+
symbolic_inputs_check,
|
| 400 |
+
) = emit_has_symbolic_inputs(call_sig)
|
| 401 |
+
|
| 402 |
+
if "inplace_view" in f.tags:
|
| 403 |
+
# See Note [Functionalization Pass - Inplace View Ops] for more details
|
| 404 |
+
return f"""
|
| 405 |
+
{dispatcher_sig.defn(name=wrapper_name(f.func), is_redispatching_fn=True)} {{
|
| 406 |
+
if (!at::functionalization::impl::isFunctionalTensor({view_tensor_name})) {{
|
| 407 |
+
// functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
|
| 408 |
+
{unwrap_tensor_args_str}
|
| 409 |
+
at::AutoDispatchSkipFunctionalize guard;
|
| 410 |
+
return at::_ops::{noop_api_name}::call({', '.join(view_redispatch_args)});
|
| 411 |
+
}}
|
| 412 |
+
auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
|
| 413 |
+
auto inverse_return_mode = (
|
| 414 |
+
reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
|
| 415 |
+
: at::functionalization::InverseReturnMode::NeverView
|
| 416 |
+
);
|
| 417 |
+
{symbolic_inputs_check}
|
| 418 |
+
at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
|
| 419 |
+
{forward_lambda.decl()} {{
|
| 420 |
+
if (reapply_views) {{
|
| 421 |
+
return {forward_lambda.inner_call(reapply_views=True)}
|
| 422 |
+
}} else {{
|
| 423 |
+
return {forward_lambda.inner_call(reapply_views=False)}
|
| 424 |
+
}}
|
| 425 |
+
}},
|
| 426 |
+
{reverse_lambda.decl()} {{
|
| 427 |
+
return {reverse_lambda.inner_call()}
|
| 428 |
+
}},
|
| 429 |
+
/*has_symbolic_inputs=*/{symbolic_inputs_varname}
|
| 430 |
+
);
|
| 431 |
+
auto compute_reference_meta =
|
| 432 |
+
{view_tensor_name}.key_set().has_backend(c10::BackendComponent::XLABit) ||
|
| 433 |
+
{view_tensor_name}.key_set().has_backend(c10::BackendComponent::LazyBit);
|
| 434 |
+
{return_type} reference_tensor_output;
|
| 435 |
+
if (compute_reference_meta) {{
|
| 436 |
+
{meta_conversion_str}
|
| 437 |
+
at::AutoDispatchSkipFunctionalize func_guard;
|
| 438 |
+
c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
|
| 439 |
+
reference_tensor_output = at::_ops::{noop_api_name}::call({', '.join(meta_call_args)});
|
| 440 |
+
}}
|
| 441 |
+
// This function adds the above view meta to the current tensor and replays them off the base,
|
| 442 |
+
// mutating the size/stride info of the current FunctionalTensorWrapper.
|
| 443 |
+
// Because of this, we need to make sure to run the reference shape function above,
|
| 444 |
+
// BEFORE doing this (otherwise we'll end up runnin the reference function using the wrong sizes/strides)
|
| 445 |
+
at::functionalization::impl::mutate_view_meta({view_tensor_name}, view_meta);
|
| 446 |
+
// See Note [Propagating strides in the functionalization pass]
|
| 447 |
+
// XLA/LTC don't implement the logic to propagate strides correctly, so we need to rely
|
| 448 |
+
// on a reference implementation here (instead of relying on the output from the forward lambda
|
| 449 |
+
// having the correct stride info)
|
| 450 |
+
if (compute_reference_meta) {{
|
| 451 |
+
at::functionalization::impl::set_sizes_strides_offset({view_tensor_name}, reference_tensor_output);
|
| 452 |
+
}}
|
| 453 |
+
return {view_tensor_name};
|
| 454 |
+
}}
|
| 455 |
+
"""
|
| 456 |
+
|
| 457 |
+
else:
|
| 458 |
+
is_multi_output_view = isinstance(f.func.returns[0].type, ListType)
|
| 459 |
+
return f"""
|
| 460 |
+
{dispatcher_sig.defn(name=wrapper_name(f.func), is_redispatching_fn=True)} {{
|
| 461 |
+
{unwrap_tensor_args_str}
|
| 462 |
+
if (!at::functionalization::impl::isFunctionalTensor({view_tensor_name})) {{
|
| 463 |
+
// functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
|
| 464 |
+
at::AutoDispatchSkipFunctionalize guard;
|
| 465 |
+
return at::_ops::{noop_api_name}::call({', '.join(view_redispatch_args)});
|
| 466 |
+
}}
|
| 467 |
+
auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
|
| 468 |
+
auto inverse_return_mode = (
|
| 469 |
+
reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
|
| 470 |
+
: at::functionalization::InverseReturnMode::NeverView
|
| 471 |
+
);
|
| 472 |
+
auto compute_reference_meta =
|
| 473 |
+
{view_tensor_name}.key_set().has_backend(c10::BackendComponent::XLABit) ||
|
| 474 |
+
{view_tensor_name}.key_set().has_backend(c10::BackendComponent::LazyBit);
|
| 475 |
+
{return_type} reference_tensor_output;
|
| 476 |
+
if (compute_reference_meta) {{
|
| 477 |
+
{meta_conversion_str}
|
| 478 |
+
at::AutoDispatchSkipFunctionalize func_guard;
|
| 479 |
+
c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
|
| 480 |
+
reference_tensor_output = at::_ops::{noop_api_name}::call({', '.join(meta_call_args)});
|
| 481 |
+
}}
|
| 482 |
+
{return_type} tmp_output;
|
| 483 |
+
{{
|
| 484 |
+
at::AutoDispatchSkipFunctionalize guard;
|
| 485 |
+
if (reapply_views) {{
|
| 486 |
+
tmp_output = at::_ops::{noop_api_name}::call({', '.join(view_redispatch_args)});
|
| 487 |
+
}} else {{
|
| 488 |
+
tmp_output = at::_ops::{api_name}::call({', '.join(view_redispatch_args)});
|
| 489 |
+
}}
|
| 490 |
+
}}
|
| 491 |
+
{symbolic_inputs_check}
|
| 492 |
+
at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
|
| 493 |
+
{forward_lambda.decl()} {{
|
| 494 |
+
if (reapply_views) {{
|
| 495 |
+
return {forward_lambda.inner_call(reapply_views=True)}
|
| 496 |
+
}} else {{
|
| 497 |
+
return {forward_lambda.inner_call(reapply_views=False)}
|
| 498 |
+
}}
|
| 499 |
+
}},
|
| 500 |
+
{reverse_lambda.decl()} {{
|
| 501 |
+
return {reverse_lambda.inner_call()}
|
| 502 |
+
}},
|
| 503 |
+
/*has_symbolic_inputs=*/{symbolic_inputs_varname},
|
| 504 |
+
/*is_multi_output=*/{str(is_multi_output_view).lower()},
|
| 505 |
+
/*is_as_strided=*/{str(str(f.func.name) == 'as_strided').lower()}
|
| 506 |
+
);
|
| 507 |
+
auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, {view_tensor_name}, view_meta);
|
| 508 |
+
// See Note [Propagating strides in the functionalization pass]
|
| 509 |
+
if (compute_reference_meta) {{
|
| 510 |
+
at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
|
| 511 |
+
}}
|
| 512 |
+
return out;
|
| 513 |
+
}}
|
| 514 |
+
"""
|
| 515 |
+
|
| 516 |
+
|
| 517 |
+
def maybe_create_output(f: NativeFunction, var_name: str) -> str:
|
| 518 |
+
if len(f.func.returns) == 0:
|
| 519 |
+
return ""
|
| 520 |
+
return_type = dispatcher.returns_type(f.func.returns).remove_const_ref().cpp_type()
|
| 521 |
+
return f"{return_type} {var_name} = "
|
| 522 |
+
|
| 523 |
+
|
| 524 |
+
# Given a NativeFunction, and a variable name corresponding to the output of redispatching on the function,
|
| 525 |
+
# this returns two lists of names, consisting of:
|
| 526 |
+
# - the names of returns corresponding to the original (mutable) inputs of the outer function
|
| 527 |
+
# - the names of returns corresponding to the (immutable) outputs of the inner redispatched function
|
| 528 |
+
def get_mutable_redispatch_return_names(
|
| 529 |
+
f: NativeFunction, inner_return_var: str
|
| 530 |
+
) -> tuple[list[str], list[str]]:
|
| 531 |
+
aliased_returns = []
|
| 532 |
+
non_aliased_returns = []
|
| 533 |
+
for i, name in enumerate(f.func.aliased_return_names()):
|
| 534 |
+
if name is not None:
|
| 535 |
+
aliased_returns.append(name)
|
| 536 |
+
else:
|
| 537 |
+
non_aliased_returns.append(
|
| 538 |
+
inner_return_var
|
| 539 |
+
if len(f.func.returns) == 1
|
| 540 |
+
else f"std::get<{i}>({inner_return_var})"
|
| 541 |
+
)
|
| 542 |
+
return aliased_returns, non_aliased_returns
|
| 543 |
+
|
| 544 |
+
|
| 545 |
+
# When functionalization "no-op's" and redispatches on a mutable operator, we need to take care so that:
|
| 546 |
+
# - For fresh outputs, we return the result of the redispatch (without wrapping outputs)
|
| 547 |
+
# - For outputs that were aliased to inputs, we return the inputs directly (since some of them might have been wrapped)
|
| 548 |
+
def return_from_mutable_noop_redispatch(
|
| 549 |
+
f: NativeFunction, inner_return_var: str
|
| 550 |
+
) -> str:
|
| 551 |
+
aliased, non_aliased = get_mutable_redispatch_return_names(f, inner_return_var)
|
| 552 |
+
# Just get all of the return names, and immediately return them
|
| 553 |
+
return return_str(f.func.returns, aliased + non_aliased)
|
| 554 |
+
|
| 555 |
+
|
| 556 |
+
def wrap_propagate_mutations_and_return(
|
| 557 |
+
f: NativeFunction, functional_op: NativeFunction, inner_return_var: str
|
| 558 |
+
) -> str:
|
| 559 |
+
mutable_arg_names = f.func.arguments.mutable_arg_names()
|
| 560 |
+
(
|
| 561 |
+
aliased_outer_rets,
|
| 562 |
+
non_aliased_outer_rets,
|
| 563 |
+
) = get_mutable_redispatch_return_names(f, inner_return_var)
|
| 564 |
+
_, non_aliased_inner_rets = get_mutable_redispatch_return_names(
|
| 565 |
+
functional_op, inner_return_var
|
| 566 |
+
)
|
| 567 |
+
# The outer function may have a mix of aliased and non-aliased outputs,
|
| 568 |
+
# But the inner functional op that we're transforming to should only have non-aliased outputs
|
| 569 |
+
assert len(mutable_arg_names) + len(non_aliased_outer_rets) == len(
|
| 570 |
+
non_aliased_inner_rets
|
| 571 |
+
)
|
| 572 |
+
|
| 573 |
+
# First, take all of the newly created outputs from the inner call and wrap them into functional tensors
|
| 574 |
+
updates = []
|
| 575 |
+
non_aliased_wrapped_ret_names = []
|
| 576 |
+
for i, inner_ret in enumerate(
|
| 577 |
+
non_aliased_inner_rets[: len(non_aliased_outer_rets)]
|
| 578 |
+
):
|
| 579 |
+
ret_name = f"output_{i}"
|
| 580 |
+
updates.append(
|
| 581 |
+
f"""\
|
| 582 |
+
auto output_{i} = at::functionalization::impl::to_functional_tensor({inner_ret});"""
|
| 583 |
+
)
|
| 584 |
+
non_aliased_wrapped_ret_names.append(ret_name)
|
| 585 |
+
|
| 586 |
+
# Next, take all of the mutated outputs from the inner call corresponding to mutated inputs,
|
| 587 |
+
# and propagate the mutations
|
| 588 |
+
for outer_arg, inner_ret in zip(
|
| 589 |
+
mutable_arg_names, non_aliased_inner_rets[len(non_aliased_outer_rets) :]
|
| 590 |
+
):
|
| 591 |
+
updates.append(
|
| 592 |
+
f"""\
|
| 593 |
+
auto {outer_arg}_inner = at::functionalization::impl::from_functional_tensor({outer_arg});
|
| 594 |
+
at::functionalization::impl::replace_({outer_arg}, {inner_ret});
|
| 595 |
+
at::functionalization::impl::commit_update({outer_arg});
|
| 596 |
+
at::functionalization::impl::sync({outer_arg});
|
| 597 |
+
auto {outer_arg}_inner_updated = at::functionalization::impl::from_functional_tensor({outer_arg});
|
| 598 |
+
at::functionalization::impl::propagate_xla_data_direct({outer_arg}_inner, {outer_arg}_inner_updated);"""
|
| 599 |
+
)
|
| 600 |
+
|
| 601 |
+
# Finally, we return:
|
| 602 |
+
# - Any mutable arguments that also returns
|
| 603 |
+
# - Any immutable returns that were created wrapping the output from the inner call
|
| 604 |
+
returns_str = return_str(
|
| 605 |
+
f.func.returns, aliased_outer_rets + non_aliased_wrapped_ret_names
|
| 606 |
+
)
|
| 607 |
+
updates_str = "\n".join(updates)
|
| 608 |
+
return f"""\
|
| 609 |
+
{updates_str}
|
| 610 |
+
{returns_str}"""
|
| 611 |
+
|
| 612 |
+
|
| 613 |
+
# Generates the Functionalization kernel for:
|
| 614 |
+
# - mutation ops (inplace and out= ops)
|
| 615 |
+
@with_native_function_and
|
| 616 |
+
def emit_inplace_functionalization_body(
|
| 617 |
+
f: NativeFunction, g: NativeFunctionsGroup
|
| 618 |
+
) -> str:
|
| 619 |
+
# mutation case
|
| 620 |
+
assert modifies_arguments(f)
|
| 621 |
+
|
| 622 |
+
dispatcher_sig = DispatcherSignature.from_schema(f.func)
|
| 623 |
+
|
| 624 |
+
unwrap_tensor_args_str, unwrapped_args_ctx = unwrap_tensor_args(
|
| 625 |
+
dispatcher_sig, is_view_op=False
|
| 626 |
+
)
|
| 627 |
+
|
| 628 |
+
mutated_names = [
|
| 629 |
+
a.name
|
| 630 |
+
for a in f.func.arguments.flat_all
|
| 631 |
+
if a.type.is_tensor_like() and a.annotation is not None
|
| 632 |
+
]
|
| 633 |
+
non_mutated_names = [
|
| 634 |
+
a.name
|
| 635 |
+
for a in f.func.arguments.flat_all
|
| 636 |
+
if a.type.is_tensor_like() and a.annotation is None
|
| 637 |
+
]
|
| 638 |
+
non_mutated_tensor_names = [
|
| 639 |
+
a.name
|
| 640 |
+
for a in f.func.arguments.flat_all
|
| 641 |
+
if a.type == BaseType(BaseTy.Tensor) and a.annotation is None
|
| 642 |
+
]
|
| 643 |
+
# all mutable inputs must be functional tensors in order to participate in functionalization
|
| 644 |
+
check_all_mutated_args_are_functional = " && ".join(
|
| 645 |
+
["true"]
|
| 646 |
+
+ [
|
| 647 |
+
f"at::functionalization::impl::isFunctionalTensor({a})"
|
| 648 |
+
for a in mutated_names
|
| 649 |
+
]
|
| 650 |
+
)
|
| 651 |
+
check_any_non_mutated_args_are_functional = " || ".join(
|
| 652 |
+
["false"]
|
| 653 |
+
+ [
|
| 654 |
+
f"at::functionalization::impl::isFunctionalTensor({a})"
|
| 655 |
+
for a in non_mutated_names
|
| 656 |
+
]
|
| 657 |
+
)
|
| 658 |
+
|
| 659 |
+
check_any_non_mutated_tensors_are_xla = " || ".join(
|
| 660 |
+
["false"]
|
| 661 |
+
+ [
|
| 662 |
+
f"{a}.device().type() == c10::DeviceType::XLA"
|
| 663 |
+
for a in non_mutated_tensor_names
|
| 664 |
+
]
|
| 665 |
+
)
|
| 666 |
+
# These are used in the cases where we don't functionalize and redispatch to the inplace op
|
| 667 |
+
# case 1: we hit an inplace op that doesn't have an out-of-place equivalent
|
| 668 |
+
# case 2: we hit an inplace ops but our inputs are not functional tensors (in which case our kernel just no-ops)
|
| 669 |
+
inplace_exprs = [
|
| 670 |
+
e.expr
|
| 671 |
+
for e in translate(unwrapped_args_ctx, dispatcher_sig.arguments(), method=False)
|
| 672 |
+
]
|
| 673 |
+
|
| 674 |
+
# call the out-of-place variant of the op
|
| 675 |
+
return_type = (
|
| 676 |
+
dispatcher.returns_type(g.functional.func.returns).remove_const_ref().cpp_type()
|
| 677 |
+
)
|
| 678 |
+
functional_sig = DispatcherSignature.from_schema(g.functional.func)
|
| 679 |
+
functional_exprs = [
|
| 680 |
+
e.expr
|
| 681 |
+
for e in translate(unwrapped_args_ctx, functional_sig.arguments(), method=False)
|
| 682 |
+
]
|
| 683 |
+
|
| 684 |
+
if f.func.is_out_fn():
|
| 685 |
+
mutable_input_post_processing = "\n".join(
|
| 686 |
+
[
|
| 687 |
+
f"""
|
| 688 |
+
at::functionalization::impl::replace_(
|
| 689 |
+
{a.name}, {'std::get<' + str(i) + '>(tmp_output)' if len(f.func.returns) > 1 else 'tmp_output'});
|
| 690 |
+
at::functionalization::impl::commit_update({a.name});"""
|
| 691 |
+
for (i, a) in enumerate(f.func.arguments.out)
|
| 692 |
+
if a.annotation and a.annotation.is_write and a.type.is_tensor_like()
|
| 693 |
+
]
|
| 694 |
+
)
|
| 695 |
+
else:
|
| 696 |
+
mutable_input_post_processing = "\n".join(
|
| 697 |
+
[
|
| 698 |
+
f"""
|
| 699 |
+
at::functionalization::impl::replace_({a.name}, tmp_output);
|
| 700 |
+
at::functionalization::impl::commit_update({a.name});"""
|
| 701 |
+
for a in f.func.arguments.flat_all
|
| 702 |
+
if a.annotation and a.annotation.is_write and a.type.is_tensor_like()
|
| 703 |
+
]
|
| 704 |
+
)
|
| 705 |
+
|
| 706 |
+
meta_conversion_str, meta_call_ctx = convert_to_meta_tensors(dispatcher_sig)
|
| 707 |
+
# We don't want to run the inplace meta func for ops like .set_(), because:
|
| 708 |
+
# (1) they're unnecessary: inplace meta checks are only useful for ops like add_(),
|
| 709 |
+
# where broadcasting will work for the out-of-place case but should fail on the inplace call
|
| 710 |
+
# (2) They'll also fail without adding extra infra: we'd need to convert the input storage argument
|
| 711 |
+
# into a meta storage
|
| 712 |
+
any_storage_args = any(
|
| 713 |
+
a.type == BaseType(BaseTy.Storage) for a in f.func.arguments.flat_all
|
| 714 |
+
)
|
| 715 |
+
|
| 716 |
+
return f"""
|
| 717 |
+
{dispatcher_sig.defn(name=wrapper_name(f.func), is_redispatching_fn=True)} {{
|
| 718 |
+
if ({str(not any_storage_args and f.func.kind() == SchemaKind.inplace).lower()}) {{
|
| 719 |
+
// Before converting the mutable op to its functional variant, run meta tensors through the original op.
|
| 720 |
+
// This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
|
| 721 |
+
// (We can only do this for inplace ops today though, because they technically all support meta tensors).
|
| 722 |
+
{meta_conversion_str}
|
| 723 |
+
at::AutoDispatchSkipFunctionalize func_guard;
|
| 724 |
+
c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
|
| 725 |
+
at::_ops::{f.func.name.unambiguous_name()}::call({', '.join(a.name for a in meta_call_ctx)});
|
| 726 |
+
}}
|
| 727 |
+
{unwrap_tensor_args_str}
|
| 728 |
+
if (!({check_all_mutated_args_are_functional})) {{
|
| 729 |
+
// We want to disable this check if there are any XLA tensors.
|
| 730 |
+
// cpu_tensor.copy_(xla_tensor) is valid code.
|
| 731 |
+
if (!({check_any_non_mutated_tensors_are_xla}) && ({check_any_non_mutated_args_are_functional})) {{
|
| 732 |
+
// case 1: trying to mutate a non functional tensor with a functional tensor is an error
|
| 733 |
+
TORCH_INTERNAL_ASSERT(false,
|
| 734 |
+
"mutating a non-functional tensor with a functional tensor is not allowed.",
|
| 735 |
+
" Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
|
| 736 |
+
}} else {{
|
| 737 |
+
// case 2: arguments are not functional tensors, so we no-op and redispatch.
|
| 738 |
+
at::AutoDispatchSkipFunctionalize guard;
|
| 739 |
+
{maybe_create_output(f, 'tmp_output')}at::_ops::{f.func.name.unambiguous_name()}::call({', '.join(inplace_exprs)});
|
| 740 |
+
{return_from_mutable_noop_redispatch(f, 'tmp_output')}
|
| 741 |
+
}}
|
| 742 |
+
}} else {{
|
| 743 |
+
{return_type} tmp_output;
|
| 744 |
+
{{
|
| 745 |
+
at::AutoDispatchSkipFunctionalize guard;
|
| 746 |
+
tmp_output = at::_ops::{g.functional.func.name.unambiguous_name()}::call({', '.join(functional_exprs)});
|
| 747 |
+
}}
|
| 748 |
+
{wrap_propagate_mutations_and_return(f, g.functional, 'tmp_output')}
|
| 749 |
+
}}
|
| 750 |
+
}}"""
|
| 751 |
+
|
| 752 |
+
|
| 753 |
+
# The below functions generate RegisterFunctionalization.cpp
|
| 754 |
+
# These files provide the kernels that run the functionalization pass, which can be opted into
|
| 755 |
+
# per backend (e.g. XLA or Vulkan), or as a composable transform (functionalize() in functorch).
|
| 756 |
+
|
| 757 |
+
|
| 758 |
+
# See Note [Functionalization Pass: View Inverses].
|
| 759 |
+
def gen_functionalization_view_inverse_declaration(
|
| 760 |
+
selector: SelectiveBuilder, g: NativeFunctionsViewGroup
|
| 761 |
+
) -> str | None:
|
| 762 |
+
# For every (non-composite) view op, we need a corresponding "inverse view" function.
|
| 763 |
+
# This generates the declarations so we get a good compiler error when someone adds a new view.
|
| 764 |
+
@with_native_function
|
| 765 |
+
def emit_decl_helper(g: NativeFunctionsViewGroup) -> str | None:
|
| 766 |
+
if g.view.has_composite_implicit_autograd_kernel:
|
| 767 |
+
return None
|
| 768 |
+
view_inverse_sig = ViewInverseSignature(g)
|
| 769 |
+
return view_inverse_sig.decl()
|
| 770 |
+
|
| 771 |
+
return emit_decl_helper(g)
|
| 772 |
+
|
| 773 |
+
|
| 774 |
+
def gen_functionalization_registration(
|
| 775 |
+
selector: SelectiveBuilder,
|
| 776 |
+
g: NativeFunction | NativeFunctionsGroup | NativeFunctionsViewGroup,
|
| 777 |
+
composite_implicit_autograd_index: BackendIndex,
|
| 778 |
+
) -> list[str]:
|
| 779 |
+
@with_native_function
|
| 780 |
+
def emit_registration_helper(f: NativeFunction) -> str:
|
| 781 |
+
assert not f.has_composite_implicit_autograd_kernel
|
| 782 |
+
registration_str = f"TORCH_FN(functionalization::{wrapper_name(f.func)})"
|
| 783 |
+
return f'm.impl("{f.func.name}", {registration_str});'
|
| 784 |
+
|
| 785 |
+
# Don't generate kernels in mobile build
|
| 786 |
+
if not selector.include_all_operators:
|
| 787 |
+
return []
|
| 788 |
+
|
| 789 |
+
if isinstance(g, NativeFunctionsViewGroup):
|
| 790 |
+
# functionalization needs to register kernels for view + view_inplace ops
|
| 791 |
+
# See Note [Functionalization <> torch.Tensor constructor]
|
| 792 |
+
if str(g.view.func.name) == "lift_fresh":
|
| 793 |
+
return []
|
| 794 |
+
view_str = []
|
| 795 |
+
if not g.view.has_composite_implicit_autograd_kernel:
|
| 796 |
+
view_str.append(emit_registration_helper(g.view))
|
| 797 |
+
if (
|
| 798 |
+
g.view_inplace is not None
|
| 799 |
+
and not g.view_inplace.has_composite_implicit_autograd_kernel
|
| 800 |
+
):
|
| 801 |
+
assert g.view_inplace.is_view_op
|
| 802 |
+
view_str.append(emit_registration_helper(g.view_inplace))
|
| 803 |
+
return view_str
|
| 804 |
+
|
| 805 |
+
elif isinstance(g, NativeFunctionsGroup):
|
| 806 |
+
# Gets a hand-written functionalization kernel
|
| 807 |
+
if g.inplace is not None and str(g.inplace.func.name) == "set_.source_Tensor":
|
| 808 |
+
fns = []
|
| 809 |
+
else:
|
| 810 |
+
fns = list(g.functions())
|
| 811 |
+
else:
|
| 812 |
+
if str(g.func.name) in MUTABLE_OPS_NOT_USING_FUNCTIONALIZATION:
|
| 813 |
+
return []
|
| 814 |
+
fns = [g]
|
| 815 |
+
|
| 816 |
+
registrations = []
|
| 817 |
+
for f in fns:
|
| 818 |
+
if f.has_composite_implicit_autograd_kernel:
|
| 819 |
+
continue
|
| 820 |
+
if str(f.func.name) == "lift":
|
| 821 |
+
# See Note [Functionalization <> torch.Tensor constructor]
|
| 822 |
+
return []
|
| 823 |
+
if str(f.func.name) == "resize_":
|
| 824 |
+
# See Note [resize_ in Functionalization]
|
| 825 |
+
return []
|
| 826 |
+
if str(f.func.name.name) != "set_":
|
| 827 |
+
assert not f.is_view_op
|
| 828 |
+
# functionalization needs to generate and register kernels for inplace ops.
|
| 829 |
+
# We *also* need to directly register CompositeImplicitAUtograd kernels
|
| 830 |
+
# so that they decompose properly before functioanlization.
|
| 831 |
+
if modifies_arguments(f):
|
| 832 |
+
registrations.append(emit_registration_helper(f))
|
| 833 |
+
return registrations
|
| 834 |
+
|
| 835 |
+
|
| 836 |
+
def gen_functionalization_definition(
|
| 837 |
+
selector: SelectiveBuilder,
|
| 838 |
+
# Note: Ideally this code should never have to look at NativeFunction
|
| 839 |
+
# (and instead only need to operate on grouped NativeFunctions).
|
| 840 |
+
# The only reason currently is because we need to emit direct dispatch registrations
|
| 841 |
+
# For CompositeImplicitAutograd operators, which are potentially ungrouped.
|
| 842 |
+
g: NativeFunction | NativeFunctionsGroup | NativeFunctionsViewGroup,
|
| 843 |
+
) -> list[str]:
|
| 844 |
+
# Don't generate kernels in mobile build
|
| 845 |
+
if not selector.include_all_operators:
|
| 846 |
+
return []
|
| 847 |
+
|
| 848 |
+
if isinstance(g, NativeFunctionsViewGroup):
|
| 849 |
+
# Case 1: emit view -> view_copy kernels for the functionalization pass
|
| 850 |
+
view_defs = []
|
| 851 |
+
if not g.composite:
|
| 852 |
+
# invariant: NativeFunctionsViewGroup's always have a view_copy operator
|
| 853 |
+
# if the view is not composite (implicit autograd)
|
| 854 |
+
assert g.view_copy is not None, dataclass_repr(g, indent=1)
|
| 855 |
+
view_defs.append(emit_view_functionalization_body(g, view_inplace=False))
|
| 856 |
+
if g.view_inplace is not None:
|
| 857 |
+
view_defs.append(emit_view_functionalization_body(g, view_inplace=True))
|
| 858 |
+
return view_defs
|
| 859 |
+
elif isinstance(g, NativeFunction):
|
| 860 |
+
# Invariant: all mutable operators that we need to handle in functionalization
|
| 861 |
+
# should have been properly grouped up.
|
| 862 |
+
# TODO: The below ops all have "problematic" schemas that prevent them from
|
| 863 |
+
# getting functionalized. Instead of bending over backwards to get things to work,
|
| 864 |
+
# I think we should either:
|
| 865 |
+
# (1) fix their schemas (BC-breaking)
|
| 866 |
+
# (2) hand-write their functionalization kernels
|
| 867 |
+
if (
|
| 868 |
+
str(g.func.name) not in MUTABLE_OPS_NOT_USING_FUNCTIONALIZATION
|
| 869 |
+
and str(g.func.name.name) not in MUTABLE_OPS_NOT_USING_FUNCTIONALIZATION
|
| 870 |
+
):
|
| 871 |
+
assert g.has_composite_implicit_autograd_kernel or not modifies_arguments(g)
|
| 872 |
+
return []
|
| 873 |
+
else:
|
| 874 |
+
# Case 2: emit inplace -> out-of-place kernels for the functionalization pass
|
| 875 |
+
mutation_defs = []
|
| 876 |
+
mutation_defs.append(emit_inplace_functionalization_body(g.out, g))
|
| 877 |
+
if g.inplace is not None:
|
| 878 |
+
mutation_defs.append(emit_inplace_functionalization_body(g.inplace, g))
|
| 879 |
+
if g.mutable is not None:
|
| 880 |
+
mutation_defs.append(emit_inplace_functionalization_body(g.mutable, g))
|
| 881 |
+
return mutation_defs
|
| 882 |
+
return []
|
minigpt2/lib/python3.10/site-packages/torchgen/gen_lazy_tensor.py
ADDED
|
@@ -0,0 +1,581 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import argparse
|
| 4 |
+
import os
|
| 5 |
+
from collections import namedtuple
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
from typing import Any, Callable, Iterable, Iterator, Sequence
|
| 8 |
+
|
| 9 |
+
import yaml
|
| 10 |
+
|
| 11 |
+
import torchgen.dest as dest
|
| 12 |
+
from torchgen.api.lazy import setValueT
|
| 13 |
+
from torchgen.api.types import BaseCppType
|
| 14 |
+
from torchgen.dest.lazy_ir import GenLazyIR, GenLazyNativeFuncDefinition, GenTSLazyIR
|
| 15 |
+
from torchgen.gen import get_grouped_native_functions, parse_native_yaml
|
| 16 |
+
from torchgen.gen_backend_stubs import (
|
| 17 |
+
error_on_missing_kernels,
|
| 18 |
+
gen_dispatcher_registrations,
|
| 19 |
+
gen_dispatchkey_nativefunc_headers,
|
| 20 |
+
parse_backend_yaml,
|
| 21 |
+
)
|
| 22 |
+
from torchgen.model import NativeFunction, NativeFunctionsGroup, OperatorName
|
| 23 |
+
from torchgen.selective_build.selector import SelectiveBuilder
|
| 24 |
+
from torchgen.utils import FileManager, NamespaceHelper
|
| 25 |
+
from torchgen.yaml_utils import YamlLoader
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 29 |
+
#
|
| 30 |
+
# Lazy Tensor Codegen
|
| 31 |
+
#
|
| 32 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 33 |
+
# Overview
|
| 34 |
+
# ~~~~~~~~
|
| 35 |
+
#
|
| 36 |
+
# This codegen script builds on existing data models and helpers used
|
| 37 |
+
# by all ATen backends, and adds new functionality specific to lazy
|
| 38 |
+
# tensor backends.
|
| 39 |
+
#
|
| 40 |
+
# Inputs:
|
| 41 |
+
# - <backend>_native_functions.yaml: controls which operators are
|
| 42 |
+
# supported by the backend.
|
| 43 |
+
#
|
| 44 |
+
# Outputs:
|
| 45 |
+
# (for all backends)
|
| 46 |
+
# <DispatchKey>Ir.h defines Lazy IR classes to be constructed during tracing
|
| 47 |
+
# - opt-in: also generate 'lowering' methods for the TorchScript backend only
|
| 48 |
+
# <DispatchKey>NativeFunctions.cpp defines implementations of native functions which perform lazy tracing
|
| 49 |
+
# - opt-in: 'full_codegen' section of backend yaml; 'supported' section omits these implementations
|
| 50 |
+
# <DispatchKey>NativeFunctions.h declares implementations of native functions for both 'supported' and 'full_codegen'
|
| 51 |
+
# ops
|
| 52 |
+
#
|
| 53 |
+
# Register<DispatchKey>.cpp registers all op implementations with the dispatcher
|
| 54 |
+
# RegisterAutograd<DispatchKey>.cpp registers all autograd implementations with the dispatcher
|
| 55 |
+
#
|
| 56 |
+
# Validation Helpers:
|
| 57 |
+
# - Shape Inference: errs if any ops in backend yaml require shape inference not provided by meta kernels or
|
| 58 |
+
# implementations in torch/csrc/lazy/core/shape_inference.*
|
| 59 |
+
# - native function impls: errs if any 'supported' ops do not have an implementation defined in the backend
|
| 60 |
+
# (non-codegen) implementation file
|
| 61 |
+
#
|
| 62 |
+
#
|
| 63 |
+
# About the Data Model
|
| 64 |
+
# ~~~~~~~~~~~~~~~~~~~~
|
| 65 |
+
#
|
| 66 |
+
# Modeled after ATen codegen, the first step is to parse yaml and build a data model for the operators
|
| 67 |
+
# we care about. In this case, the <backend>_native_functions yaml defines a subset of the core operators
|
| 68 |
+
# (defined in more detail in the main native_functions.yaml), which will be supported by your backend.
|
| 69 |
+
# Backends can list ops in two categories:
|
| 70 |
+
# - `supported` ops require hand-implementations but still get codegenned declarations and registrations
|
| 71 |
+
# - `full_codegen` ops get implementations (and IR classes) generated too
|
| 72 |
+
#
|
| 73 |
+
# Each native function is modeled as an object with a schema, and each schema has objects representing their
|
| 74 |
+
# arguments. Much of the codegen is manipulation of the arguments and their types. For example, lazy tensor
|
| 75 |
+
# backends need to transform 'at::Tensor' arguments into 'lazy::Value' objects, as well as replacing reference
|
| 76 |
+
# types (stringref) with actual string objects, and this is done by manipulating the data model objects.
|
| 77 |
+
# - see api/lazy.py for the lazy data model
|
| 78 |
+
#
|
| 79 |
+
# Once the data model is set up, the rest of this script processes a number of templates for output CPP file
|
| 80 |
+
# and fills in the template values using helpers in `dest/lazy_ir.py` and `dest/lazy_ts_lowering.py`. These
|
| 81 |
+
# helpers mostly iterate over functions and their arguments, outputting different c++ snippets.
|
| 82 |
+
#
|
| 83 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
# Parses the external backend's yaml, and adds a new BackendIndex for the backend's dispatch key.
|
| 87 |
+
# Returns a Tuple of (backend_key, autograd_key, cpp_namespace, updated BackendIndex mapping, full_codegen)
|
| 88 |
+
ParsedExternalYaml = namedtuple(
|
| 89 |
+
"ParsedExternalYaml",
|
| 90 |
+
["backend_key", "autograd_key", "cpp_namespace", "backend_indices", "full_codegen"],
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def parse_native_functions_keys(
|
| 95 |
+
backend_yaml_path: str,
|
| 96 |
+
grouped_native_functions: Sequence[NativeFunction | NativeFunctionsGroup],
|
| 97 |
+
) -> tuple[list[OperatorName], list[Any], list[OperatorName]]:
|
| 98 |
+
with open(backend_yaml_path) as f:
|
| 99 |
+
yaml_values = yaml.load(f, Loader=YamlLoader)
|
| 100 |
+
assert isinstance(yaml_values, dict)
|
| 101 |
+
|
| 102 |
+
full_codegen = yaml_values.pop("full_codegen", [])
|
| 103 |
+
non_native = yaml_values.pop("non_native", [])
|
| 104 |
+
ir_gen = yaml_values.pop("ir_gen", [])
|
| 105 |
+
assert isinstance(full_codegen, list)
|
| 106 |
+
assert isinstance(non_native, list)
|
| 107 |
+
assert isinstance(ir_gen, list)
|
| 108 |
+
full_codegen_opnames = [OperatorName.parse(name) for name in full_codegen]
|
| 109 |
+
ir_gen_opnames = [OperatorName.parse(name) for name in ir_gen]
|
| 110 |
+
return full_codegen_opnames, non_native, ir_gen_opnames
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def validate_shape_inference_header(
|
| 114 |
+
shape_inference_hdr: str, expected_shape_infr_decls: list[str]
|
| 115 |
+
) -> None:
|
| 116 |
+
try:
|
| 117 |
+
with open(shape_inference_hdr) as f:
|
| 118 |
+
shape_infr_decls = f.read()
|
| 119 |
+
shape_infr_decl_lines = set(shape_infr_decls.split("\n"))
|
| 120 |
+
except OSError as e:
|
| 121 |
+
raise AssertionError(
|
| 122 |
+
f"Unable to read from the specified shape_inference_hdr file: {shape_inference_hdr}"
|
| 123 |
+
) from e
|
| 124 |
+
|
| 125 |
+
# TODO(whc) add a check for shape inference functions that have meta kernels implement and should be retired.
|
| 126 |
+
|
| 127 |
+
missing_decls = [
|
| 128 |
+
decl for decl in expected_shape_infr_decls if decl not in shape_infr_decl_lines
|
| 129 |
+
]
|
| 130 |
+
if missing_decls:
|
| 131 |
+
raise Exception( # noqa: TRY002
|
| 132 |
+
f"""Missing shape inference function.\n
|
| 133 |
+
Please add declare this function in {shape_inference_hdr}:\n
|
| 134 |
+
and implement it in the corresponding shape_inference.cpp file.\n
|
| 135 |
+
{os.linesep.join(missing_decls)}"""
|
| 136 |
+
)
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
# Some helper functions for the codegen.
|
| 140 |
+
def get_ltc_helper_fns() -> str:
|
| 141 |
+
return """\
|
| 142 |
+
at::Tensor to_meta(const at::Tensor& tensor) {
|
| 143 |
+
// undefined tensors can't be converted to the meta device, since they don't have sizes/strides
|
| 144 |
+
if (!tensor.defined()) return tensor;
|
| 145 |
+
auto out = at::native::empty_strided_meta_symint(tensor.sym_sizes(), tensor.sym_strides(), \
|
| 146 |
+
/*dtype=*/std::make_optional(tensor.scalar_type()), /*layout=*/std::make_optional(tensor.layout()), \
|
| 147 |
+
/*device=*/std::make_optional(c10::Device(c10::kMeta)), /*pin_memory=*/std::nullopt);
|
| 148 |
+
// needs to handle wrapped numbers, so dtype promotion works properly.
|
| 149 |
+
if (tensor.unsafeGetTensorImpl()->is_wrapped_number()) {
|
| 150 |
+
out.unsafeGetTensorImpl()->set_wrapped_number(true);
|
| 151 |
+
}
|
| 152 |
+
return out;
|
| 153 |
+
}
|
| 154 |
+
std::optional<at::Tensor> to_meta(const std::optional<at::Tensor>& tensor) {
|
| 155 |
+
if (tensor.has_value()) {
|
| 156 |
+
return to_meta(*tensor);
|
| 157 |
+
}
|
| 158 |
+
return std::nullopt;
|
| 159 |
+
}
|
| 160 |
+
|
| 161 |
+
std::vector<at::Tensor> to_meta(at::ITensorListRef t_list) {
|
| 162 |
+
std::vector<at::Tensor> outs;
|
| 163 |
+
outs.reserve(t_list.size());
|
| 164 |
+
for (const auto& tensor : t_list) {
|
| 165 |
+
outs.push_back(to_meta(tensor));
|
| 166 |
+
}
|
| 167 |
+
return outs;
|
| 168 |
+
}
|
| 169 |
+
"""
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
class default_args:
|
| 173 |
+
node_base: str = "Node"
|
| 174 |
+
node_base_hdr: str | None = None
|
| 175 |
+
shape_inference_hdr: str = "torch/csrc/lazy/core/shape_inference.h"
|
| 176 |
+
tensor_class: str = "torch::lazy::LazyTensor"
|
| 177 |
+
tensor_class_hdr: str = "torch/csrc/lazy/core/tensor.h"
|
| 178 |
+
lazy_ir_generator: type[GenLazyIR] = GenLazyIR
|
| 179 |
+
native_func_definition_generator: type[
|
| 180 |
+
GenLazyNativeFuncDefinition
|
| 181 |
+
] = GenLazyNativeFuncDefinition
|
| 182 |
+
backend_name: str = "TorchScript"
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
def main() -> None:
|
| 186 |
+
parser = argparse.ArgumentParser(description="Generate Lazy Tensor backend files")
|
| 187 |
+
parser.add_argument(
|
| 188 |
+
"-s",
|
| 189 |
+
"--source-yaml",
|
| 190 |
+
"--source_yaml",
|
| 191 |
+
help="path to source yaml file containing operator external definitions",
|
| 192 |
+
)
|
| 193 |
+
parser.add_argument("-o", "--output-dir", "--output_dir", help="output directory")
|
| 194 |
+
parser.add_argument(
|
| 195 |
+
"--dry-run", "--dry_run", type=bool, default=False, help="output directory"
|
| 196 |
+
)
|
| 197 |
+
parser.add_argument(
|
| 198 |
+
"--impl-path",
|
| 199 |
+
"--impl_path",
|
| 200 |
+
type=str,
|
| 201 |
+
default=None,
|
| 202 |
+
help="path to the source C++ file containing kernel definitions",
|
| 203 |
+
)
|
| 204 |
+
parser.add_argument(
|
| 205 |
+
"--gen-ts-lowerings",
|
| 206 |
+
"--gen_ts_lowerings",
|
| 207 |
+
action="store_true",
|
| 208 |
+
help="Generate TorchScript lowerings in addition to Lazy IR and NativeFunctions",
|
| 209 |
+
)
|
| 210 |
+
parser.add_argument(
|
| 211 |
+
"--node-base",
|
| 212 |
+
"--node_base",
|
| 213 |
+
type=str,
|
| 214 |
+
default=default_args.node_base,
|
| 215 |
+
help="Name of backend specific custom Lazy IR Node base class",
|
| 216 |
+
)
|
| 217 |
+
parser.add_argument(
|
| 218 |
+
"--node-base-hdr",
|
| 219 |
+
"--node_base_hdr",
|
| 220 |
+
type=str,
|
| 221 |
+
default=default_args.node_base_hdr,
|
| 222 |
+
help="Path to header file defining custom Lazy IR Node base class",
|
| 223 |
+
)
|
| 224 |
+
parser.add_argument(
|
| 225 |
+
"--shape-inference-hdr",
|
| 226 |
+
"--shape_inference_hdr",
|
| 227 |
+
type=str,
|
| 228 |
+
default=default_args.shape_inference_hdr,
|
| 229 |
+
help="Path to header file defining custom Lazy shape inference functions",
|
| 230 |
+
)
|
| 231 |
+
parser.add_argument(
|
| 232 |
+
"--tensor-class",
|
| 233 |
+
"--tensor_class",
|
| 234 |
+
type=str,
|
| 235 |
+
default=default_args.tensor_class,
|
| 236 |
+
help="Name of backend specific custom Lazy Tensor class",
|
| 237 |
+
)
|
| 238 |
+
parser.add_argument(
|
| 239 |
+
"--tensor-class-hdr",
|
| 240 |
+
"--tensor_class_hdr",
|
| 241 |
+
type=str,
|
| 242 |
+
default=default_args.tensor_class_hdr,
|
| 243 |
+
help="Path to header file defining custom Lazy Tensor class",
|
| 244 |
+
)
|
| 245 |
+
parser.add_argument(
|
| 246 |
+
"--backend-name",
|
| 247 |
+
"--backend_name",
|
| 248 |
+
type=str,
|
| 249 |
+
default=default_args.backend_name,
|
| 250 |
+
help="Name of the backend to generate",
|
| 251 |
+
)
|
| 252 |
+
options = parser.parse_args()
|
| 253 |
+
|
| 254 |
+
# Assumes that this file lives at PYTORCH_ROOT/torchgen/gen_backend_stubs.py
|
| 255 |
+
torch_root = Path(__file__).parent.parent.parent.absolute()
|
| 256 |
+
aten_path = str(torch_root / "aten" / "src" / "ATen")
|
| 257 |
+
lazy_ir_generator: type[GenLazyIR] = default_args.lazy_ir_generator
|
| 258 |
+
if options.gen_ts_lowerings:
|
| 259 |
+
lazy_ir_generator = GenTSLazyIR
|
| 260 |
+
native_func_definition_generator: type[
|
| 261 |
+
GenLazyNativeFuncDefinition
|
| 262 |
+
] = default_args.native_func_definition_generator
|
| 263 |
+
|
| 264 |
+
run_gen_lazy_tensor(
|
| 265 |
+
aten_path,
|
| 266 |
+
options.source_yaml,
|
| 267 |
+
options.output_dir,
|
| 268 |
+
options.dry_run,
|
| 269 |
+
options.impl_path,
|
| 270 |
+
options.node_base,
|
| 271 |
+
options.node_base_hdr,
|
| 272 |
+
options.tensor_class,
|
| 273 |
+
options.tensor_class_hdr,
|
| 274 |
+
options.shape_inference_hdr,
|
| 275 |
+
lazy_ir_generator,
|
| 276 |
+
native_func_definition_generator,
|
| 277 |
+
options.backend_name,
|
| 278 |
+
)
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
def run_gen_lazy_tensor(
|
| 282 |
+
aten_path: str,
|
| 283 |
+
source_yaml: str,
|
| 284 |
+
output_dir: str,
|
| 285 |
+
dry_run: bool,
|
| 286 |
+
impl_path: str | None,
|
| 287 |
+
node_base: str = default_args.node_base,
|
| 288 |
+
node_base_hdr: str | None = default_args.node_base_hdr,
|
| 289 |
+
tensor_class: str = default_args.tensor_class,
|
| 290 |
+
tensor_class_hdr: str = default_args.tensor_class_hdr,
|
| 291 |
+
shape_inference_hdr: str = default_args.shape_inference_hdr,
|
| 292 |
+
lazy_ir_generator: type[GenLazyIR] = default_args.lazy_ir_generator,
|
| 293 |
+
native_func_definition_generator: type[
|
| 294 |
+
GenLazyNativeFuncDefinition
|
| 295 |
+
] = default_args.native_func_definition_generator,
|
| 296 |
+
# build_in_tree is true for TS backend and affects include paths
|
| 297 |
+
build_in_tree: bool = False,
|
| 298 |
+
# per_operator_headers changes whether ATen/Functions.h or individual operator headers are used
|
| 299 |
+
# it must match how ATen was built
|
| 300 |
+
per_operator_headers: bool = False,
|
| 301 |
+
backend_name: str = default_args.backend_name,
|
| 302 |
+
gen_forced_fallback_code: bool = False,
|
| 303 |
+
use_lazy_shape: bool = True,
|
| 304 |
+
# the following arguments are temporary customization points for xla backend migration.
|
| 305 |
+
# do not rely on them otherwise, they should be removed once migration is complete
|
| 306 |
+
backend_namespace: str = "torch::lazy",
|
| 307 |
+
get_tensorlist: str = "GetTensorList",
|
| 308 |
+
get_tensor_or_wrap_number: str = "GetLtcTensorOrCreateForWrappedNumber",
|
| 309 |
+
try_get_tensor: str = "TryGetLtcTensor",
|
| 310 |
+
metrics_counter: str = 'TORCH_LAZY_FN_COUNTER("lazy::")',
|
| 311 |
+
create_tensor: str = "LazyTensor::Create",
|
| 312 |
+
create_from_first_tensor: bool = False,
|
| 313 |
+
create_aten_from_ltc_tensor: str = "torch::lazy::CreateAtenFromLtcTensor",
|
| 314 |
+
tuple_aten_from_ltc_tensors: str = "torch::lazy::TupleAtenFromLtcTensors",
|
| 315 |
+
lazy_value_class: str = "torch::lazy::Value",
|
| 316 |
+
lazy_tensor_ptr: str = "LazyTensorPtr",
|
| 317 |
+
get_device_fn: str = "torch::lazy::GetBackendDevice",
|
| 318 |
+
) -> None:
|
| 319 |
+
lv_tokens = lazy_value_class.split("::")
|
| 320 |
+
lv_class = lv_tokens[-1]
|
| 321 |
+
lv_ns = "::".join(lv_tokens[:-1])
|
| 322 |
+
setValueT(BaseCppType(lv_ns, lv_class))
|
| 323 |
+
template_dir = os.path.join(aten_path, "templates")
|
| 324 |
+
|
| 325 |
+
def make_file_manager(install_dir: str) -> FileManager:
|
| 326 |
+
return FileManager(
|
| 327 |
+
install_dir=install_dir, template_dir=template_dir, dry_run=dry_run
|
| 328 |
+
)
|
| 329 |
+
|
| 330 |
+
fm = make_file_manager(output_dir)
|
| 331 |
+
|
| 332 |
+
native_yaml_path = os.path.join(aten_path, "native/native_functions.yaml")
|
| 333 |
+
tags_yaml_path = os.path.join(aten_path, "native/tags.yaml")
|
| 334 |
+
parsed_yaml = parse_native_yaml(native_yaml_path, tags_yaml_path)
|
| 335 |
+
native_functions, backend_indices = (
|
| 336 |
+
parsed_yaml.native_functions,
|
| 337 |
+
parsed_yaml.backend_indices,
|
| 338 |
+
)
|
| 339 |
+
grouped_native_functions = get_grouped_native_functions(native_functions)
|
| 340 |
+
|
| 341 |
+
def sort_native_function(f: NativeFunctionsGroup | NativeFunction) -> str:
|
| 342 |
+
"""
|
| 343 |
+
We sort the native function because of the note in concat_map_codegen.
|
| 344 |
+
TODO(alanwaketan): Remove this sorting hack once all ops are grouped properly.
|
| 345 |
+
"""
|
| 346 |
+
func = f.functional.func if isinstance(f, NativeFunctionsGroup) else f.func
|
| 347 |
+
return str(func.name.name)
|
| 348 |
+
|
| 349 |
+
grouped_native_functions = sorted(
|
| 350 |
+
grouped_native_functions, key=sort_native_function
|
| 351 |
+
)
|
| 352 |
+
|
| 353 |
+
parsed_backend_yaml = parse_backend_yaml(
|
| 354 |
+
source_yaml, grouped_native_functions, backend_indices
|
| 355 |
+
)
|
| 356 |
+
backend_key = parsed_backend_yaml.backend_key
|
| 357 |
+
autograd_key = parsed_backend_yaml.autograd_key
|
| 358 |
+
cpp_namespace = parsed_backend_yaml.cpp_namespace
|
| 359 |
+
backend_indices = parsed_backend_yaml.backend_indices
|
| 360 |
+
# the following 3 keys are all processed differently
|
| 361 |
+
# for full_codegen, we generate IR, kernels, etc
|
| 362 |
+
# for ir_gen, we generate only IR
|
| 363 |
+
# non_native is used to register kernels not declared in
|
| 364 |
+
# native_functions.yaml
|
| 365 |
+
full_codegen, non_native, ir_gen = parse_native_functions_keys(
|
| 366 |
+
source_yaml, grouped_native_functions
|
| 367 |
+
)
|
| 368 |
+
|
| 369 |
+
def concat_map_codegen(
|
| 370 |
+
func: Callable[[NativeFunction], Sequence[str]],
|
| 371 |
+
xs: Iterable[NativeFunctionsGroup | NativeFunction],
|
| 372 |
+
ops_list: list[OperatorName] = full_codegen,
|
| 373 |
+
) -> Iterator[str]:
|
| 374 |
+
"""
|
| 375 |
+
We code-gen for the functional variant, which is all we need for IR classes/lowerings/shape inferences, but we
|
| 376 |
+
only code-gen additional entries for the inplace variant for the native functions.
|
| 377 |
+
"""
|
| 378 |
+
|
| 379 |
+
for x in xs:
|
| 380 |
+
fs = list(x.functions()) if isinstance(x, NativeFunctionsGroup) else [x]
|
| 381 |
+
for f in fs:
|
| 382 |
+
if f.func.name in ops_list:
|
| 383 |
+
yield from func(f)
|
| 384 |
+
|
| 385 |
+
selector = SelectiveBuilder.get_nop_selector()
|
| 386 |
+
|
| 387 |
+
assert backend_key is not None
|
| 388 |
+
class_name = backend_indices[backend_key].native_function_class_name()
|
| 389 |
+
|
| 390 |
+
if impl_path is not None:
|
| 391 |
+
error_on_missing_kernels(
|
| 392 |
+
native_functions,
|
| 393 |
+
backend_indices,
|
| 394 |
+
backend_key,
|
| 395 |
+
autograd_key,
|
| 396 |
+
class_name,
|
| 397 |
+
impl_path,
|
| 398 |
+
full_codegen,
|
| 399 |
+
)
|
| 400 |
+
|
| 401 |
+
""" Validate Shape Inference Definitions
|
| 402 |
+
|
| 403 |
+
Generated lazy native functions all perform shape inference, by first using a meta:: kernel
|
| 404 |
+
if available for that op, and otherwise using a 'compute_shape_{op}' function instead. The generator
|
| 405 |
+
knows the call signature for compute_shape_{op} because it matches the nativefunction (and meta::) signature,
|
| 406 |
+
so it just has to check whether the op is structured and generate a call for one or the other. It's up to the dev
|
| 407 |
+
to supply the missing compute_shape_{op} function, but the codegen at least warns you about this and provides
|
| 408 |
+
the expected signature which can be copy-pasted into shape_inference.h.
|
| 409 |
+
|
| 410 |
+
compute_shape_{op} functions are handwritten and should be replaced over time as ops get ported
|
| 411 |
+
to structured kernels.
|
| 412 |
+
|
| 413 |
+
See torch/csrc/lazy/core/shape_inference.cpp #READ THIS! for more information.
|
| 414 |
+
"""
|
| 415 |
+
if shape_inference_hdr is not None:
|
| 416 |
+
expected_shape_infr_decls = list(
|
| 417 |
+
concat_map_codegen(
|
| 418 |
+
dest.GenLazyShapeInferenceDefinition(
|
| 419 |
+
backend_indices[backend_key], tensor_class
|
| 420 |
+
),
|
| 421 |
+
grouped_native_functions,
|
| 422 |
+
)
|
| 423 |
+
)
|
| 424 |
+
|
| 425 |
+
validate_shape_inference_header(shape_inference_hdr, expected_shape_infr_decls)
|
| 426 |
+
assert class_name is not None
|
| 427 |
+
|
| 428 |
+
# Generate nativefunction declarations
|
| 429 |
+
# Note, eager registrations is set to False for the lazy TS backend as another LTC backend
|
| 430 |
+
# may want to register their own lazy kernels instead of registering the TS ones.
|
| 431 |
+
# The registration will lazily happen when init_ts_backend is called.
|
| 432 |
+
gen_dispatchkey_nativefunc_headers(
|
| 433 |
+
fm,
|
| 434 |
+
class_name,
|
| 435 |
+
cpp_namespace,
|
| 436 |
+
backend_indices,
|
| 437 |
+
grouped_native_functions,
|
| 438 |
+
backend_key,
|
| 439 |
+
autograd_key,
|
| 440 |
+
backend_name,
|
| 441 |
+
)
|
| 442 |
+
|
| 443 |
+
# Generate Dispatcher registrations which hook up the nativefunctions
|
| 444 |
+
for dispatch_key in (
|
| 445 |
+
[backend_key] if autograd_key is None else [backend_key, autograd_key]
|
| 446 |
+
):
|
| 447 |
+
gen_dispatcher_registrations(
|
| 448 |
+
fm,
|
| 449 |
+
output_dir,
|
| 450 |
+
class_name,
|
| 451 |
+
backend_indices,
|
| 452 |
+
grouped_native_functions,
|
| 453 |
+
backend_key,
|
| 454 |
+
dispatch_key,
|
| 455 |
+
selector,
|
| 456 |
+
build_in_tree=build_in_tree,
|
| 457 |
+
per_operator_headers=per_operator_headers,
|
| 458 |
+
backend_name=backend_name,
|
| 459 |
+
eager_registration=False,
|
| 460 |
+
)
|
| 461 |
+
|
| 462 |
+
# Generate native function impls that build IR nodes
|
| 463 |
+
ns_helper = NamespaceHelper(cpp_namespace)
|
| 464 |
+
fm.write_with_template(
|
| 465 |
+
f"{backend_key}NativeFunctions.cpp",
|
| 466 |
+
"DispatchKeyNativeFunctions.cpp",
|
| 467 |
+
lambda: {
|
| 468 |
+
"includes": [
|
| 469 |
+
f"#include <{path}>"
|
| 470 |
+
for path in [
|
| 471 |
+
tensor_class_hdr,
|
| 472 |
+
shape_inference_hdr,
|
| 473 |
+
"ATen/Functions.h",
|
| 474 |
+
"ATen/native/TensorConversions.h",
|
| 475 |
+
"ATen/NativeFunctions.h",
|
| 476 |
+
"ATen/CompositeExplicitAutogradNonFunctionalFunctions.h",
|
| 477 |
+
"ATen/MetaFunctions.h",
|
| 478 |
+
"ATen/Operators.h",
|
| 479 |
+
"ATen/native/CPUFallback.h",
|
| 480 |
+
"torch/csrc/lazy/core/ir_builder.h",
|
| 481 |
+
"torch/csrc/lazy/core/lazy_graph_executor.h",
|
| 482 |
+
"torch/csrc/lazy/core/metrics.h",
|
| 483 |
+
"torch/csrc/lazy/core/shape.h",
|
| 484 |
+
f"{output_dir}/{backend_key}NativeFunctions.h",
|
| 485 |
+
f"{output_dir}/LazyIr.h",
|
| 486 |
+
]
|
| 487 |
+
+ (
|
| 488 |
+
["torch/csrc/lazy/ts_backend/ts_eager_fallback.h"]
|
| 489 |
+
if gen_forced_fallback_code
|
| 490 |
+
else []
|
| 491 |
+
)
|
| 492 |
+
],
|
| 493 |
+
"helper_fns": get_ltc_helper_fns(),
|
| 494 |
+
"native_functions_include": "",
|
| 495 |
+
"namespace_prologue": ns_helper.prologue,
|
| 496 |
+
"namespace_epilogue": ns_helper.epilogue,
|
| 497 |
+
"native_function_definitions": list(
|
| 498 |
+
concat_map_codegen(
|
| 499 |
+
native_func_definition_generator(
|
| 500 |
+
f"{backend_key}NativeFunctions",
|
| 501 |
+
backend_indices[backend_key],
|
| 502 |
+
tensor_class,
|
| 503 |
+
gen_forced_fallback_code,
|
| 504 |
+
backend_namespace,
|
| 505 |
+
get_tensorlist,
|
| 506 |
+
get_tensor_or_wrap_number,
|
| 507 |
+
try_get_tensor,
|
| 508 |
+
metrics_counter,
|
| 509 |
+
create_tensor,
|
| 510 |
+
create_from_first_tensor,
|
| 511 |
+
create_aten_from_ltc_tensor,
|
| 512 |
+
tuple_aten_from_ltc_tensors,
|
| 513 |
+
lazy_tensor_ptr,
|
| 514 |
+
get_device_fn,
|
| 515 |
+
),
|
| 516 |
+
grouped_native_functions,
|
| 517 |
+
)
|
| 518 |
+
),
|
| 519 |
+
},
|
| 520 |
+
)
|
| 521 |
+
# Generate IR node classes
|
| 522 |
+
lazy_ir_obj = lazy_ir_generator(
|
| 523 |
+
backend_indices[backend_key], backend_name, node_base, use_lazy_shape
|
| 524 |
+
)
|
| 525 |
+
|
| 526 |
+
fm.write_with_template(
|
| 527 |
+
"LazyIr.h",
|
| 528 |
+
"LazyIr.h",
|
| 529 |
+
lambda: {
|
| 530 |
+
"lazy_ir_sysinc": [
|
| 531 |
+
f"#include <{path}>"
|
| 532 |
+
for path in [
|
| 533 |
+
"ATen/core/Formatting.h",
|
| 534 |
+
"c10/core/ScalarType.h",
|
| 535 |
+
"torch/csrc/lazy/core/hash.h",
|
| 536 |
+
"torch/csrc/lazy/core/ir.h",
|
| 537 |
+
"torch/csrc/lazy/core/shape.h",
|
| 538 |
+
"optional",
|
| 539 |
+
"vector",
|
| 540 |
+
]
|
| 541 |
+
],
|
| 542 |
+
"lazy_ir_inc": [f'#include "{node_base_hdr}"']
|
| 543 |
+
if node_base_hdr is not None
|
| 544 |
+
else [],
|
| 545 |
+
"ir_declarations": list(
|
| 546 |
+
concat_map_codegen(
|
| 547 |
+
lazy_ir_obj, grouped_native_functions, full_codegen + ir_gen
|
| 548 |
+
)
|
| 549 |
+
),
|
| 550 |
+
"namespace_prologue": ns_helper.prologue,
|
| 551 |
+
"namespace_epilogue": ns_helper.epilogue,
|
| 552 |
+
},
|
| 553 |
+
)
|
| 554 |
+
|
| 555 |
+
# Generate Non Native IR Node classes
|
| 556 |
+
fm.write_with_template(
|
| 557 |
+
"LazyNonNativeIr.h",
|
| 558 |
+
"LazyNonNativeIr.h",
|
| 559 |
+
lambda: {
|
| 560 |
+
"lazy_non_native_ir_inc": [
|
| 561 |
+
f"#include <{path}>"
|
| 562 |
+
for path in [
|
| 563 |
+
"torch/csrc/lazy/core/ir.h",
|
| 564 |
+
"torch/csrc/lazy/core/ir_builder.h",
|
| 565 |
+
"torch/csrc/lazy/core/internal_ops/ltc_ops.h",
|
| 566 |
+
"torch/csrc/lazy/core/shape_inference.h",
|
| 567 |
+
]
|
| 568 |
+
+ ([node_base_hdr] if node_base_hdr else [])
|
| 569 |
+
if path
|
| 570 |
+
],
|
| 571 |
+
"non_native_ir_nodes": dest.generate_non_native_lazy_ir_nodes(
|
| 572 |
+
non_native, lazy_ir_obj
|
| 573 |
+
),
|
| 574 |
+
"namespace_prologue": ns_helper.prologue,
|
| 575 |
+
"namespace_epilogue": ns_helper.epilogue,
|
| 576 |
+
},
|
| 577 |
+
)
|
| 578 |
+
|
| 579 |
+
|
| 580 |
+
if __name__ == "__main__":
|
| 581 |
+
main()
|
minigpt2/lib/python3.10/site-packages/torchgen/gen_schema_utils.py
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Optional, Tuple, Union
|
| 2 |
+
|
| 3 |
+
from torchgen.model import (
|
| 4 |
+
Annotation,
|
| 5 |
+
Argument,
|
| 6 |
+
Arguments,
|
| 7 |
+
BaseOperatorName,
|
| 8 |
+
BaseTy,
|
| 9 |
+
BaseType,
|
| 10 |
+
CustomClassType,
|
| 11 |
+
FunctionSchema,
|
| 12 |
+
ListType,
|
| 13 |
+
OperatorName,
|
| 14 |
+
Return,
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
# Note: These aren't actually used in torchgen, they're some utilities for generating a schema
|
| 19 |
+
# from real arguments. For example, this is used to generate HigherOrderOperators' schema since
|
| 20 |
+
# their schemas can vary for different instances of the same HOP.
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class TypeGen:
|
| 24 |
+
convert_to_base_ty = {
|
| 25 |
+
int: BaseTy.int,
|
| 26 |
+
float: BaseTy.float,
|
| 27 |
+
str: BaseTy.str,
|
| 28 |
+
bool: BaseTy.bool,
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
@staticmethod
|
| 32 |
+
def from_example(obj: Any) -> Union[BaseType, ListType, CustomClassType]:
|
| 33 |
+
import torch
|
| 34 |
+
|
| 35 |
+
if isinstance(obj, torch.fx.GraphModule):
|
| 36 |
+
return BaseType(BaseTy.GraphModule)
|
| 37 |
+
elif isinstance(obj, torch.Tensor):
|
| 38 |
+
return BaseType(BaseTy.Tensor)
|
| 39 |
+
elif isinstance(obj, torch.SymInt):
|
| 40 |
+
return BaseType(BaseTy.SymInt)
|
| 41 |
+
elif isinstance(obj, torch.SymBool):
|
| 42 |
+
return BaseType(BaseTy.SymBool)
|
| 43 |
+
elif isinstance(obj, torch.ScriptObject):
|
| 44 |
+
return CustomClassType(obj._type().name()) # type: ignore[attr-defined]
|
| 45 |
+
elif isinstance(obj, (list, tuple)):
|
| 46 |
+
assert len(obj) > 0
|
| 47 |
+
all_base_tys = [TypeGen.from_example(x) for x in obj]
|
| 48 |
+
if len(set(all_base_tys)) > 1:
|
| 49 |
+
raise RuntimeError(
|
| 50 |
+
f"Cannot generate schema for a seqeunce of args of heterogeneous types: {all_base_tys}. "
|
| 51 |
+
"Consider unpacking the argument and give proper names to them if possible "
|
| 52 |
+
"instead of using *args."
|
| 53 |
+
)
|
| 54 |
+
return ListType(all_base_tys[0], len(obj))
|
| 55 |
+
tp = type(obj)
|
| 56 |
+
if tp not in TypeGen.convert_to_base_ty:
|
| 57 |
+
raise RuntimeError(f"unsupported type {tp}")
|
| 58 |
+
return BaseType(TypeGen.convert_to_base_ty[tp])
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
class ReturnGen:
|
| 62 |
+
@staticmethod
|
| 63 |
+
def from_example(
|
| 64 |
+
name: Optional[str], obj: Any, annotation: Optional[Annotation]
|
| 65 |
+
) -> Return:
|
| 66 |
+
return Return(name, TypeGen.from_example(obj), annotation)
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
class ArgumentGen:
|
| 70 |
+
@staticmethod
|
| 71 |
+
def from_example(
|
| 72 |
+
name: str, obj: Any, default: Optional[str], annotation: Optional[Annotation]
|
| 73 |
+
) -> Argument:
|
| 74 |
+
return Argument(
|
| 75 |
+
name, TypeGen.from_example(obj), default=default, annotation=annotation
|
| 76 |
+
)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
class FunctionSchemaGen:
|
| 80 |
+
@staticmethod
|
| 81 |
+
def from_example(
|
| 82 |
+
op_name: str,
|
| 83 |
+
example_inputs: Tuple[Tuple[str, Any], ...],
|
| 84 |
+
example_outputs: Tuple[Any, ...],
|
| 85 |
+
) -> FunctionSchema:
|
| 86 |
+
args = []
|
| 87 |
+
for name, inp in example_inputs:
|
| 88 |
+
args.append(ArgumentGen.from_example(name, inp, None, None))
|
| 89 |
+
# ignore the annotations and other attributes for now, we could add more when needed.
|
| 90 |
+
arguments = Arguments(
|
| 91 |
+
tuple(), None, tuple(args), tuple(), None, tuple(), tuple()
|
| 92 |
+
)
|
| 93 |
+
returns = tuple(
|
| 94 |
+
ReturnGen.from_example(None, out, None) for out in example_outputs
|
| 95 |
+
)
|
| 96 |
+
op_name = OperatorName(BaseOperatorName(op_name, False, False, False), "")
|
| 97 |
+
return FunctionSchema(op_name, arguments, returns)
|
minigpt2/lib/python3.10/site-packages/torchgen/gen_vmap_plumbing.py
ADDED
|
@@ -0,0 +1,271 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import textwrap
|
| 4 |
+
from dataclasses import dataclass
|
| 5 |
+
from typing import Sequence
|
| 6 |
+
|
| 7 |
+
from torchgen.api.translate import translate
|
| 8 |
+
from torchgen.api.types import DispatcherSignature
|
| 9 |
+
from torchgen.context import method_with_native_function
|
| 10 |
+
from torchgen.model import (
|
| 11 |
+
Argument,
|
| 12 |
+
BaseTy,
|
| 13 |
+
BaseType,
|
| 14 |
+
FunctionSchema,
|
| 15 |
+
ListType,
|
| 16 |
+
NativeFunction,
|
| 17 |
+
OptionalType,
|
| 18 |
+
Return,
|
| 19 |
+
SchemaKind,
|
| 20 |
+
Type,
|
| 21 |
+
)
|
| 22 |
+
from torchgen.utils import mapMaybe
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def is_tensor(typ: Type) -> bool:
|
| 26 |
+
return isinstance(typ, BaseType) and typ.name == BaseTy.Tensor
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def is_optional_tensor(typ: Type) -> bool:
|
| 30 |
+
return isinstance(typ, OptionalType) and is_tensor(typ.elem)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def is_tensor_list(typ: Type) -> bool:
|
| 34 |
+
return isinstance(typ, ListType) and is_tensor(typ.elem)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def unwrap_tensor(name: str, cur_level_var: str) -> list[str]:
|
| 38 |
+
result = f"""\
|
| 39 |
+
auto [{name}_value, {name}_bdim] = unwrapTensorAtLevel({name}, {cur_level_var});"""
|
| 40 |
+
return textwrap.dedent(result).split("\n")
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def unwrap_optional_tensor(name: str, cur_level_var: str) -> list[str]:
|
| 44 |
+
result = f"""\
|
| 45 |
+
std::optional<Tensor> {name}_value;
|
| 46 |
+
std::optional<int64_t> {name}_bdim;
|
| 47 |
+
if ({name}) {{
|
| 48 |
+
std::tie({name}_value, {name}_bdim) = unwrapTensorAtLevel({name}.value(), {cur_level_var});
|
| 49 |
+
}}"""
|
| 50 |
+
return textwrap.dedent(result).split("\n")
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def gen_unwraps(
|
| 54 |
+
flat_arguments: Sequence[Argument], cur_level_var: str
|
| 55 |
+
) -> tuple[str, list[str]]:
|
| 56 |
+
arg_names = [a.name for a in flat_arguments]
|
| 57 |
+
arg_types = [a.type for a in flat_arguments]
|
| 58 |
+
|
| 59 |
+
tensors = [name for typ, name in zip(arg_types, arg_names) if is_tensor(typ)]
|
| 60 |
+
optional_tensors = [
|
| 61 |
+
name for typ, name in zip(arg_types, arg_names) if is_optional_tensor(typ)
|
| 62 |
+
]
|
| 63 |
+
|
| 64 |
+
unwraps = []
|
| 65 |
+
for tensor in tensors:
|
| 66 |
+
unwraps += unwrap_tensor(tensor, cur_level_var)
|
| 67 |
+
|
| 68 |
+
for opt_tensor in optional_tensors:
|
| 69 |
+
unwraps += unwrap_optional_tensor(opt_tensor, cur_level_var)
|
| 70 |
+
unwrap_code = "\n".join(unwraps)
|
| 71 |
+
|
| 72 |
+
unwrapped_arg_list = []
|
| 73 |
+
for arg in arg_names:
|
| 74 |
+
if arg in tensors or arg in optional_tensors:
|
| 75 |
+
unwrapped_arg_list += [f"{arg}_value", f"{arg}_bdim"]
|
| 76 |
+
else:
|
| 77 |
+
unwrapped_arg_list.append(arg)
|
| 78 |
+
return unwrap_code, unwrapped_arg_list
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def gen_case_where_all_bdims_are_none(
|
| 82 |
+
outer_sig: DispatcherSignature, schema: FunctionSchema, cur_level_var: str
|
| 83 |
+
) -> str:
|
| 84 |
+
conditions = []
|
| 85 |
+
flat_args = schema.arguments.flat_all
|
| 86 |
+
for arg in flat_args:
|
| 87 |
+
if not arg.type.is_tensor_like():
|
| 88 |
+
continue
|
| 89 |
+
conditions.append(f"!isBatchedAtLevel({arg.name}, {cur_level_var})")
|
| 90 |
+
|
| 91 |
+
sig = DispatcherSignature.from_schema(schema)
|
| 92 |
+
translated_args = ", ".join(
|
| 93 |
+
e.expr for e in translate(outer_sig.arguments(), sig.arguments())
|
| 94 |
+
)
|
| 95 |
+
return f"""\
|
| 96 |
+
if ({' && '.join(conditions)}) {{
|
| 97 |
+
return at::_ops::{sig.func.name.unambiguous_name()}::call({translated_args});
|
| 98 |
+
}}"""
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def gen_returns(
|
| 102 |
+
returns: tuple[Return, ...], cur_level_var: str, results_var: str
|
| 103 |
+
) -> str:
|
| 104 |
+
idx = 0
|
| 105 |
+
wrapped_returns = []
|
| 106 |
+
for ret in returns:
|
| 107 |
+
if is_tensor(ret.type):
|
| 108 |
+
wrapped_returns.append(
|
| 109 |
+
f"makeBatched(std::get<{idx}>({results_var}), std::get<{idx + 1}>({results_var}), {cur_level_var})"
|
| 110 |
+
)
|
| 111 |
+
idx += 2
|
| 112 |
+
elif is_tensor_list(ret.type):
|
| 113 |
+
wrapped_returns.append(
|
| 114 |
+
f"makeBatchedVector(std::get<{idx}>({results_var}), std::get<{idx+1}>({results_var}), {cur_level_var})"
|
| 115 |
+
)
|
| 116 |
+
idx += 2
|
| 117 |
+
else:
|
| 118 |
+
wrapped_returns.append(f"std::get<{idx}>({results_var})")
|
| 119 |
+
idx += 1
|
| 120 |
+
if len(wrapped_returns) == 1:
|
| 121 |
+
result = f"return {wrapped_returns[0]};"
|
| 122 |
+
else:
|
| 123 |
+
result = f'return std::make_tuple({", ".join(wrapped_returns)});'
|
| 124 |
+
return result
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
def accepts_at_least_one_tensor_input(schema: FunctionSchema) -> bool:
|
| 128 |
+
return any(a.type.is_tensor_like() for a in schema.arguments.flat_all)
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
def is_mutated_arg(argument: Argument) -> bool:
|
| 132 |
+
return argument.annotation is not None and argument.annotation.is_write
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def gen_vmap_inplace_plumbing(native_function: NativeFunction) -> str | None:
|
| 136 |
+
# Assumptions:
|
| 137 |
+
# - only one argument is being modified in-place
|
| 138 |
+
# - the argument that is being modified in-place is the first argument
|
| 139 |
+
# - all returns are either Tensor, tuple of Tensor, or TensorList
|
| 140 |
+
schema = native_function.func
|
| 141 |
+
sig = DispatcherSignature.from_schema(schema)
|
| 142 |
+
returns = schema.returns
|
| 143 |
+
|
| 144 |
+
# Check assumptions. If these are invalid we return None
|
| 145 |
+
# and punt the work to handle them to the future.
|
| 146 |
+
assert schema.kind() == SchemaKind.inplace
|
| 147 |
+
if not is_mutated_arg(schema.arguments.flat_all[0]):
|
| 148 |
+
return None
|
| 149 |
+
if not len([arg for arg in schema.arguments.flat_all if is_mutated_arg(arg)]) == 1:
|
| 150 |
+
return None
|
| 151 |
+
|
| 152 |
+
# Only support cases where all returns are Tensors or vector<Tensor>
|
| 153 |
+
if len(returns) == 0:
|
| 154 |
+
return None
|
| 155 |
+
if not all(is_tensor(ret.type) or is_tensor_list(ret.type) for ret in returns):
|
| 156 |
+
return None
|
| 157 |
+
if not accepts_at_least_one_tensor_input(schema):
|
| 158 |
+
return None
|
| 159 |
+
|
| 160 |
+
cur_level_var = "cur_level"
|
| 161 |
+
|
| 162 |
+
unwraps, unwrapped_arg_list = gen_unwraps(schema.arguments.flat_all, cur_level_var)
|
| 163 |
+
bdims_all_none_case = gen_case_where_all_bdims_are_none(sig, schema, cur_level_var)
|
| 164 |
+
|
| 165 |
+
return f"""\
|
| 166 |
+
template <typename batch_rule_t, batch_rule_t batch_rule>
|
| 167 |
+
{sig.decl(name=schema.name.unambiguous_name() + '_generated_plumbing')} {{
|
| 168 |
+
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
| 169 |
+
auto maybe_layer = maybeCurrentDynamicLayer();
|
| 170 |
+
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
| 171 |
+
int64_t {cur_level_var} = maybe_layer->layerId();
|
| 172 |
+
{textwrap.indent(bdims_all_none_case, " ")}
|
| 173 |
+
{textwrap.indent(unwraps, " ")}
|
| 174 |
+
batch_rule({', '.join(unwrapped_arg_list)});
|
| 175 |
+
return {schema.arguments.flat_all[0].name};
|
| 176 |
+
}}"""
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
def gen_vmap_plumbing_no_returns(native_function: NativeFunction) -> str:
|
| 180 |
+
schema = native_function.func
|
| 181 |
+
sig = DispatcherSignature.from_schema(schema)
|
| 182 |
+
cur_level_var = "cur_level"
|
| 183 |
+
|
| 184 |
+
unwraps, unwrapped_arg_list = gen_unwraps(schema.arguments.flat_all, cur_level_var)
|
| 185 |
+
bdims_all_none_case = gen_case_where_all_bdims_are_none(sig, schema, cur_level_var)
|
| 186 |
+
|
| 187 |
+
return f"""\
|
| 188 |
+
template <typename batch_rule_t, batch_rule_t batch_rule>
|
| 189 |
+
{sig.decl(name=schema.name.unambiguous_name() + '_generated_plumbing')} {{
|
| 190 |
+
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
| 191 |
+
auto maybe_layer = maybeCurrentDynamicLayer();
|
| 192 |
+
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
| 193 |
+
int64_t {cur_level_var} = maybe_layer->layerId();
|
| 194 |
+
{textwrap.indent(bdims_all_none_case, " ")}
|
| 195 |
+
{textwrap.indent(unwraps, " ")}
|
| 196 |
+
batch_rule({', '.join(unwrapped_arg_list)});
|
| 197 |
+
}}"""
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
def gen_vmap_plumbing(native_function: NativeFunction) -> str | None:
|
| 201 |
+
schema = native_function.func
|
| 202 |
+
sig = DispatcherSignature.from_schema(schema)
|
| 203 |
+
returns = schema.returns
|
| 204 |
+
|
| 205 |
+
# Only support cases where all returns are Tensors or vector<Tensor>
|
| 206 |
+
if not accepts_at_least_one_tensor_input(schema):
|
| 207 |
+
return None
|
| 208 |
+
if len(returns) == 0:
|
| 209 |
+
return gen_vmap_plumbing_no_returns(native_function)
|
| 210 |
+
return_symint_overrides = [
|
| 211 |
+
"_scaled_dot_product_flash_attention",
|
| 212 |
+
"_scaled_dot_product_cudnn_attention",
|
| 213 |
+
]
|
| 214 |
+
if (
|
| 215 |
+
not all(ret.type.is_tensor_like() for ret in returns)
|
| 216 |
+
and schema.name.unambiguous_name() not in return_symint_overrides
|
| 217 |
+
):
|
| 218 |
+
return None
|
| 219 |
+
# in-place views need special handling
|
| 220 |
+
if "inplace_view" in native_function.tags:
|
| 221 |
+
return None
|
| 222 |
+
|
| 223 |
+
if schema.kind() == SchemaKind.inplace:
|
| 224 |
+
return gen_vmap_inplace_plumbing(native_function)
|
| 225 |
+
|
| 226 |
+
# Don't support these (mutable, out, scratch)
|
| 227 |
+
if schema.kind() != SchemaKind.functional:
|
| 228 |
+
return None
|
| 229 |
+
|
| 230 |
+
results_var = "results"
|
| 231 |
+
cur_level_var = "cur_level"
|
| 232 |
+
|
| 233 |
+
unwraps, unwrapped_arg_list = gen_unwraps(schema.arguments.flat_all, cur_level_var)
|
| 234 |
+
bdims_all_none_case = gen_case_where_all_bdims_are_none(sig, schema, cur_level_var)
|
| 235 |
+
|
| 236 |
+
wrapped_returns = gen_returns(returns, cur_level_var, results_var)
|
| 237 |
+
return f"""\
|
| 238 |
+
template <typename batch_rule_t, batch_rule_t batch_rule>
|
| 239 |
+
{sig.decl(name=schema.name.unambiguous_name() + '_generated_plumbing')} {{
|
| 240 |
+
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
| 241 |
+
auto maybe_layer = maybeCurrentDynamicLayer();
|
| 242 |
+
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
| 243 |
+
int64_t {cur_level_var} = maybe_layer->layerId();
|
| 244 |
+
{textwrap.indent(bdims_all_none_case, " ")}
|
| 245 |
+
{textwrap.indent(unwraps, " ")}
|
| 246 |
+
auto {results_var} = batch_rule({', '.join(unwrapped_arg_list)});
|
| 247 |
+
{wrapped_returns}
|
| 248 |
+
}}"""
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
@dataclass(frozen=True)
|
| 252 |
+
class ComputeBatchRulePlumbing:
|
| 253 |
+
@method_with_native_function
|
| 254 |
+
def __call__(self, f: NativeFunction) -> str | None:
|
| 255 |
+
result = gen_vmap_plumbing(f)
|
| 256 |
+
return result
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
def gen_all_vmap_plumbing(native_functions: Sequence[NativeFunction]) -> str:
|
| 260 |
+
body = "\n".join(list(mapMaybe(ComputeBatchRulePlumbing(), native_functions)))
|
| 261 |
+
return f"""
|
| 262 |
+
#pragma once
|
| 263 |
+
#include <ATen/Operators.h>
|
| 264 |
+
#include <ATen/functorch/PlumbingHelper.h>
|
| 265 |
+
|
| 266 |
+
namespace at {{ namespace functorch {{
|
| 267 |
+
|
| 268 |
+
{body}
|
| 269 |
+
|
| 270 |
+
}}}} // namespace at::functorch
|
| 271 |
+
"""
|
minigpt2/lib/python3.10/site-packages/torchgen/model.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
minigpt2/lib/python3.10/site-packages/torchgen/native_function_generation.py
ADDED
|
@@ -0,0 +1,646 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from collections import defaultdict
|
| 4 |
+
from typing import Sequence
|
| 5 |
+
|
| 6 |
+
import torchgen.api.dispatcher as dispatcher
|
| 7 |
+
from torchgen.api.translate import translate
|
| 8 |
+
from torchgen.api.types import Binding, DispatcherSignature, Expr
|
| 9 |
+
from torchgen.context import with_native_function
|
| 10 |
+
from torchgen.model import (
|
| 11 |
+
Annotation,
|
| 12 |
+
Argument,
|
| 13 |
+
BackendIndex,
|
| 14 |
+
BackendMetadata,
|
| 15 |
+
BaseOperatorName,
|
| 16 |
+
BaseTy,
|
| 17 |
+
BaseType,
|
| 18 |
+
DEFAULT_KERNEL_NAMESPACE,
|
| 19 |
+
DeviceCheckType,
|
| 20 |
+
DispatchKey,
|
| 21 |
+
FunctionSchema,
|
| 22 |
+
NativeFunction,
|
| 23 |
+
NativeFunctionsGroup,
|
| 24 |
+
OperatorName,
|
| 25 |
+
Return,
|
| 26 |
+
SchemaKind,
|
| 27 |
+
Variant,
|
| 28 |
+
)
|
| 29 |
+
from torchgen.utils import concatMap
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
# See Note: [Out ops with functional variants that don't get grouped properly]
|
| 33 |
+
OUT_OPS_THAT_DONT_GET_GROUPED_PROPERLY = [
|
| 34 |
+
# This has a functional variant, but it's currently marked private.
|
| 35 |
+
# This function should be marked private as well (*_backward ops aren't exposed to python anyway).
|
| 36 |
+
"adaptive_avg_pool3d_backward.grad_input",
|
| 37 |
+
# There's a functional variant, _slow_conv2d_backward.output_mask, that isn't grouped properly.
|
| 38 |
+
# Maybe we can kill this operator in favor of convolution_backward?
|
| 39 |
+
"_slow_conv2d_backward.grad_input",
|
| 40 |
+
]
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
# See Note: [Mutable ops that cannot get an out variant]
|
| 44 |
+
MUTABLE_OPS_THAT_CANNOT_GET_AN_OUT_VARIANT = [
|
| 45 |
+
# should be out=?
|
| 46 |
+
"_cummax_helper",
|
| 47 |
+
# should be out=?
|
| 48 |
+
"_cummin_helper",
|
| 49 |
+
]
|
| 50 |
+
|
| 51 |
+
# All of these operators don't have any tensor like returns
|
| 52 |
+
FUNCTIONAL_OPS_THAT_CANNOT_GET_AN_OUT_VARIANT = [
|
| 53 |
+
"_assert_async", # no return
|
| 54 |
+
"_assert_async.msg", # no return
|
| 55 |
+
"_cslt_sparse_mm_search", # returns an int
|
| 56 |
+
"_assert_scalar", # no return
|
| 57 |
+
"_dimI", # returns an int
|
| 58 |
+
"_dimV", # returns an int
|
| 59 |
+
"_has_same_storage_numel", # returns a boolean
|
| 60 |
+
"_linalg_check_errors", # no return
|
| 61 |
+
"_local_scalar_dense", # returns a Scalar
|
| 62 |
+
"_nested_tensor_from_mask_left_aligned", # returns a boolean
|
| 63 |
+
"_nnz", # returns an int
|
| 64 |
+
"_use_cudnn_ctc_loss", # returns a boolean
|
| 65 |
+
"_use_cudnn_ctc_loss.Tensor", # returns a boolean
|
| 66 |
+
"_validate_compressed_sparse_indices", # no return
|
| 67 |
+
"allclose", # returns a boolean
|
| 68 |
+
"dense_dim", # returns an int
|
| 69 |
+
"equal", # returns a boolean
|
| 70 |
+
"is_coalesced", # returns an boolean
|
| 71 |
+
"is_pinned", # returns a boolean
|
| 72 |
+
"is_same_size", # returns a boolean
|
| 73 |
+
"is_set_to", # returns a boolean
|
| 74 |
+
"q_per_channel_axis", # returns an int
|
| 75 |
+
"q_scale", # returns a float
|
| 76 |
+
"q_zero_point", # returns an int
|
| 77 |
+
"qscheme", # returns a QScheme
|
| 78 |
+
"record_stream", # no return
|
| 79 |
+
"sparse_dim", # returns an int
|
| 80 |
+
"sym_constrain_range", # no return
|
| 81 |
+
"sym_constrain_range_for_size", # no return
|
| 82 |
+
"_nested_tensor_storage_offsets", # returns a vector of ints
|
| 83 |
+
"_chunk_grad_outputs_efficient_attention", # returns a bool
|
| 84 |
+
"_fused_sdp_choice", # returns an int
|
| 85 |
+
"_print", # no return
|
| 86 |
+
"_sink_tokens", # no return
|
| 87 |
+
"_nested_get_ragged_idx", # returns an int
|
| 88 |
+
]
|
| 89 |
+
|
| 90 |
+
INPLACE_OPS_THAT_DONT_GET_GROUPED_PROPERLY = [
|
| 91 |
+
# polygamma and polygamma.out both exist, but have a
|
| 92 |
+
# pre-self arg (while polygamma_ does not)
|
| 93 |
+
# We should either fix this schema so it can be grouped properly,
|
| 94 |
+
# or allow the codegen to generate new functional/out= NativeFunctions for this op
|
| 95 |
+
# (which would require changing its overload name to prevent overload ambiguity).
|
| 96 |
+
"polygamma_"
|
| 97 |
+
]
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
# Groups "similar" NativeFunctions together
|
| 101 |
+
# example add.Tensor, add_.Tensor, add.out
|
| 102 |
+
# "similar" NativeFunctions are all expected to have an identical `signature()`,
|
| 103 |
+
# But have differing SchemaKinds.
|
| 104 |
+
def pre_group_native_functions(
|
| 105 |
+
native_functions: Sequence[NativeFunction],
|
| 106 |
+
) -> dict[FunctionSchema, dict[SchemaKind, NativeFunction]]:
|
| 107 |
+
pre_grouped_native_functions: dict[
|
| 108 |
+
FunctionSchema, dict[SchemaKind, NativeFunction]
|
| 109 |
+
] = defaultdict(dict)
|
| 110 |
+
for f in native_functions:
|
| 111 |
+
d = pre_grouped_native_functions[f.func.signature()]
|
| 112 |
+
assert f.func.kind() not in d
|
| 113 |
+
d[f.func.kind()] = f
|
| 114 |
+
return pre_grouped_native_functions
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
# Returns the out variant overload name given a base function overload name
|
| 118 |
+
def get_expected_out_variant_overload_name(overload_name: str | None) -> str:
|
| 119 |
+
return "out" if not overload_name else f"{overload_name}_out"
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
# Helper function: given an inplace FunctionSchema, generate its corresponding out= variant
|
| 123 |
+
# Example before:
|
| 124 |
+
# _add_relu_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
|
| 125 |
+
# Example after:
|
| 126 |
+
# _add_relu.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out)
|
| 127 |
+
def self_to_out_signature(func: FunctionSchema) -> FunctionSchema:
|
| 128 |
+
# Generating an out= schema from an inplace schema.
|
| 129 |
+
assert func.kind() == SchemaKind.inplace
|
| 130 |
+
assert func.arguments.self_arg is not None
|
| 131 |
+
# The new out= schema has:
|
| 132 |
+
# - a new out argument with the same type as "func" (but with a mutable annotation)
|
| 133 |
+
# - The returns (if any) now alias the out= argument instead of "func"
|
| 134 |
+
# - an "out" overload name
|
| 135 |
+
return FunctionSchema(
|
| 136 |
+
name=func.name.remove_inplace().with_overload(
|
| 137 |
+
get_expected_out_variant_overload_name(func.name.overload_name)
|
| 138 |
+
),
|
| 139 |
+
arguments=func.arguments.remove_self_annotation().with_out_args(
|
| 140 |
+
[
|
| 141 |
+
Argument(
|
| 142 |
+
name="out",
|
| 143 |
+
type=func.arguments.self_arg.argument.type,
|
| 144 |
+
default=None,
|
| 145 |
+
annotation=func.arguments.self_arg.argument.annotation,
|
| 146 |
+
)
|
| 147 |
+
]
|
| 148 |
+
),
|
| 149 |
+
returns=func.returns,
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
# Helper function: given a functional FunctionSchema, generate its corresponding out= variant
|
| 154 |
+
# Example before:
|
| 155 |
+
# _to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None,
|
| 156 |
+
# bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor
|
| 157 |
+
# Example after:
|
| 158 |
+
# _to_copy._out(Tensor self, *, bool non_blocking=False, MemoryFormat? memory_format=None,
|
| 159 |
+
# Tensor(a!) out) -> Tensor(a!)
|
| 160 |
+
def functional_to_out_signature(func: FunctionSchema) -> FunctionSchema:
|
| 161 |
+
# Generating an out= schema from a functional schema.
|
| 162 |
+
assert func.kind() == SchemaKind.functional
|
| 163 |
+
|
| 164 |
+
new_returns, new_out_args = generate_out_args_from_schema(func)
|
| 165 |
+
# The new out= schema has:
|
| 166 |
+
# - one or more new out argument(s) with the same type as returns (but with a mutable annotation)
|
| 167 |
+
# - The returns now alias the out= arguments
|
| 168 |
+
# - an "_out" overload name
|
| 169 |
+
return FunctionSchema(
|
| 170 |
+
name=func.name.with_overload(
|
| 171 |
+
get_expected_out_variant_overload_name(func.name.overload_name)
|
| 172 |
+
),
|
| 173 |
+
arguments=func.arguments.signature().with_out_args(
|
| 174 |
+
new_out_args,
|
| 175 |
+
),
|
| 176 |
+
returns=tuple(new_returns),
|
| 177 |
+
)
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
# Helper function: given a function schema, generate corresponding out arguments, also the updated return annotations.
|
| 181 |
+
def generate_out_args_from_schema(
|
| 182 |
+
func: FunctionSchema,
|
| 183 |
+
) -> tuple[list[Return], list[Argument]]:
|
| 184 |
+
# More of a sanity check - our existing restrictions on schemas should enforce that
|
| 185 |
+
# mutable schema kinds never return their mutable arguments.
|
| 186 |
+
assert not any(
|
| 187 |
+
r.annotation is not None and r.annotation.is_write for r in func.returns
|
| 188 |
+
)
|
| 189 |
+
|
| 190 |
+
tensorlike_rets = [r for r in func.returns if r.type.is_tensor_like()]
|
| 191 |
+
assert len(tensorlike_rets) > 0
|
| 192 |
+
|
| 193 |
+
used_annotations = concatMap(
|
| 194 |
+
lambda a: [] if a.annotation is None else a.annotation.alias_set,
|
| 195 |
+
func.arguments.flat_all,
|
| 196 |
+
)
|
| 197 |
+
valid_annotations = [
|
| 198 |
+
x for x in "abcdefghijklmnopqrstuvwxyz" if x not in used_annotations
|
| 199 |
+
]
|
| 200 |
+
|
| 201 |
+
all_rets_are_tensors = all(r.type == BaseType(BaseTy.Tensor) for r in func.returns)
|
| 202 |
+
|
| 203 |
+
new_out_args: list[Argument] = []
|
| 204 |
+
# The end result of new_returns is that:
|
| 205 |
+
# - If every return is a plain tensor, then the new returns == the old returns, but with the out= alias annotations added.
|
| 206 |
+
# - Otherwise, none of the out arguments show up in the returns (and we're only left with non-tensor-like returns, if any).
|
| 207 |
+
new_returns: list[Return] = []
|
| 208 |
+
for i, r in enumerate(func.returns):
|
| 209 |
+
if r.type.is_tensor_like():
|
| 210 |
+
new_out = Argument(
|
| 211 |
+
name="out" if len(func.returns) == 1 else f"out{i}",
|
| 212 |
+
type=r.type,
|
| 213 |
+
default=None,
|
| 214 |
+
annotation=Annotation.parse(f"{valid_annotations[i]}!"),
|
| 215 |
+
)
|
| 216 |
+
new_out_args.append(new_out)
|
| 217 |
+
if all_rets_are_tensors:
|
| 218 |
+
# The convention for out= schemas is that they only return their out arguments
|
| 219 |
+
# if the return is a plain Tensor (or if it's a tuple of plain Tensors)
|
| 220 |
+
new_ret = Return(
|
| 221 |
+
name=None, type=new_out.type, annotation=new_out.annotation
|
| 222 |
+
)
|
| 223 |
+
new_returns.append(new_ret)
|
| 224 |
+
else:
|
| 225 |
+
new_returns.append(r)
|
| 226 |
+
return new_returns, new_out_args
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
# Helper function: given a mutable FunctionSchema, generate its corresponding out= variant
|
| 230 |
+
# Example before:
|
| 231 |
+
# _fused_moving_avg_obs_fq_helper(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask) # noqa: B950
|
| 232 |
+
# Example after:
|
| 233 |
+
# _fused_moving_avg_obs_fq_helper._out(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False, *, Tensor(e!) out0, Tensor(f!) out1) -> (Tensor(e!), Tensor(f!)) # noqa: B950
|
| 234 |
+
def mutable_to_out_signature(func: FunctionSchema) -> FunctionSchema:
|
| 235 |
+
# Generating an out= schema from a mutable schema.
|
| 236 |
+
assert func.kind() == SchemaKind.mutable
|
| 237 |
+
# The new out= schema has:
|
| 238 |
+
# - Any non-aliased tensor-like returns are converted to mutable, aliased out= arguments
|
| 239 |
+
# (if the argument is a tensor then we also return it for method chaining,
|
| 240 |
+
# otherwise we return nothing)
|
| 241 |
+
# - an "out" overload name
|
| 242 |
+
#
|
| 243 |
+
# Note that:
|
| 244 |
+
# (1) This also means that we can *only* generate an out= variant from a mutable schema
|
| 245 |
+
# if the mutable schema has at least one tensor-like non-aliasing return.
|
| 246 |
+
# (2) The generated out= variant still has mutable positional arguments,
|
| 247 |
+
# but if necessary we could probably add another out= variant that also
|
| 248 |
+
# functionalizes the mutable arguments (a functional_out variant)
|
| 249 |
+
|
| 250 |
+
new_returns, new_out_args = generate_out_args_from_schema(func)
|
| 251 |
+
|
| 252 |
+
return FunctionSchema(
|
| 253 |
+
name=func.name.remove_inplace().with_overload(
|
| 254 |
+
get_expected_out_variant_overload_name(func.name.overload_name)
|
| 255 |
+
),
|
| 256 |
+
arguments=func.arguments.with_out_args(new_out_args),
|
| 257 |
+
returns=tuple(new_returns),
|
| 258 |
+
)
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
# This function, given function of one SchemaKind, as well as a target SchemaKind,
|
| 262 |
+
# generates a new NativeFunction with the same properties, but using the target SchemaKind.
|
| 263 |
+
# We only actually generate functions for either functional or out= SchemaKinds.
|
| 264 |
+
# This function returns a tuple, with:
|
| 265 |
+
# - The generated NativeFunction
|
| 266 |
+
# - a dictionary of `BackendIndex` objects, describing which dispatch keys
|
| 267 |
+
# we will generate kernels for, for the new NativeFunction.
|
| 268 |
+
# Details are in the function, but we only generate composite kernels (in some cases) today.
|
| 269 |
+
def generate_function(
|
| 270 |
+
f: NativeFunction, k: SchemaKind
|
| 271 |
+
) -> tuple[NativeFunction, dict[DispatchKey, dict[OperatorName, BackendMetadata]]]:
|
| 272 |
+
from torchgen.api import cpp
|
| 273 |
+
|
| 274 |
+
if k == SchemaKind.functional:
|
| 275 |
+
assert f.func.kind() != SchemaKind.functional
|
| 276 |
+
# The new "functional" NativeFunction has:
|
| 277 |
+
# - any mutable arguments have been converted into (immutable) returns.
|
| 278 |
+
# (if a mutable argument was not also a return, it gets converted to one)
|
| 279 |
+
# - "_functional" appended to the base name, ONLY IF this op has a mutable variant.
|
| 280 |
+
# See Note [Overload Ambiguity With Functional Variants]
|
| 281 |
+
# The default grouping logic in signature() actually already does this,
|
| 282 |
+
# so we can piggy-back off it (but we still want return names)
|
| 283 |
+
func = f.func.signature(keep_return_names=True).with_name(
|
| 284 |
+
OperatorName(
|
| 285 |
+
name=BaseOperatorName(
|
| 286 |
+
base=f.func.name.name.base,
|
| 287 |
+
inplace=False,
|
| 288 |
+
dunder_method=f.func.name.name.dunder_method,
|
| 289 |
+
# See Note [Overload Ambiguity With Functional Variants]
|
| 290 |
+
functional_overload=f.func.kind() == SchemaKind.mutable,
|
| 291 |
+
),
|
| 292 |
+
overload_name=f.func.name.overload_name,
|
| 293 |
+
)
|
| 294 |
+
)
|
| 295 |
+
elif k == SchemaKind.out:
|
| 296 |
+
# We generate out= ops mostly just so that we can pair up NativeFunctions into groups easily,
|
| 297 |
+
# but at least today, there is no good reason to actually use them.
|
| 298 |
+
# we'll generate a dispatcher entry for them, but won't actually register any kernels for them.
|
| 299 |
+
if f.func.kind() == SchemaKind.inplace:
|
| 300 |
+
func = self_to_out_signature(f.func)
|
| 301 |
+
elif f.func.kind() == SchemaKind.mutable:
|
| 302 |
+
func = mutable_to_out_signature(f.func)
|
| 303 |
+
elif f.func.kind() == SchemaKind.functional:
|
| 304 |
+
func = functional_to_out_signature(f.func)
|
| 305 |
+
else:
|
| 306 |
+
raise AssertionError(
|
| 307 |
+
"We only bother generating out= functions from either inplace or mutable or functional variants"
|
| 308 |
+
)
|
| 309 |
+
else:
|
| 310 |
+
raise AssertionError(
|
| 311 |
+
"We currently only generate either functional or out= NativeFunctions"
|
| 312 |
+
)
|
| 313 |
+
|
| 314 |
+
# Generated kernel naming convention for out: <op_name>_<overload_name>. The reason for this is to
|
| 315 |
+
# disambiguate operator with the same name but different overload name, e.g., `randn.names_out` and
|
| 316 |
+
# `randn.generator_with_names_out`.
|
| 317 |
+
kernel_name = (
|
| 318 |
+
func.name.unambiguous_name()
|
| 319 |
+
if func.kind() == SchemaKind.out
|
| 320 |
+
else cpp.name(func)
|
| 321 |
+
)
|
| 322 |
+
if f.func.has_symint():
|
| 323 |
+
kernel_name += "_symint"
|
| 324 |
+
backend_metadata = {
|
| 325 |
+
DispatchKey.CompositeExplicitAutograd: {
|
| 326 |
+
func.name: BackendMetadata(
|
| 327 |
+
kernel=kernel_name,
|
| 328 |
+
structured=False,
|
| 329 |
+
cpp_namespace=DEFAULT_KERNEL_NAMESPACE,
|
| 330 |
+
)
|
| 331 |
+
}
|
| 332 |
+
}
|
| 333 |
+
tags = {"generated"} | set(
|
| 334 |
+
f.tags & {"nondeterministic_seeded", "view_copy", "pt2_compliant_tag"}
|
| 335 |
+
)
|
| 336 |
+
|
| 337 |
+
return (
|
| 338 |
+
NativeFunction(
|
| 339 |
+
func=func,
|
| 340 |
+
use_const_ref_for_mutable_tensors=f.use_const_ref_for_mutable_tensors,
|
| 341 |
+
# These generated fn's aren't meant to be user friendly- don't generate methods.
|
| 342 |
+
variants={Variant.function},
|
| 343 |
+
structured=False,
|
| 344 |
+
structured_delegate=None,
|
| 345 |
+
structured_inherits=None,
|
| 346 |
+
precomputed=None,
|
| 347 |
+
autogen=[],
|
| 348 |
+
ufunc_inner_loop={},
|
| 349 |
+
manual_kernel_registration=False,
|
| 350 |
+
manual_cpp_binding=False,
|
| 351 |
+
python_module=None,
|
| 352 |
+
category_override=None,
|
| 353 |
+
device_guard=False,
|
| 354 |
+
device_check=DeviceCheckType.NoCheck,
|
| 355 |
+
loc=f.loc,
|
| 356 |
+
cpp_no_default_args=set(),
|
| 357 |
+
is_abstract=f.is_abstract,
|
| 358 |
+
has_composite_implicit_autograd_kernel=False,
|
| 359 |
+
has_composite_implicit_autograd_nested_tensor_kernel=False,
|
| 360 |
+
has_composite_explicit_autograd_kernel=True,
|
| 361 |
+
has_composite_explicit_autograd_non_functional_kernel=False,
|
| 362 |
+
# Every generated NativeFunction gets a "generated" tag, so it's easy to tell
|
| 363 |
+
# which NativeFunction objects did not come directly from native_functions.yaml.
|
| 364 |
+
tags=tags,
|
| 365 |
+
namespace=f.namespace,
|
| 366 |
+
),
|
| 367 |
+
backend_metadata,
|
| 368 |
+
)
|
| 369 |
+
|
| 370 |
+
|
| 371 |
+
# This function is responsible for adding generated NativeFunctions which don't appear
|
| 372 |
+
# explicitly in the codegen.
|
| 373 |
+
# You can inspect the full list of NativeFunctions yourself with the torchgen package, by running
|
| 374 |
+
# torchgen.parse_native_yaml("aten/src/ATen/native/native_functions.yaml", "aten/src/ATen/native/tags.yaml")
|
| 375 |
+
# (Maybe we should make a friendly API for this)
|
| 376 |
+
#
|
| 377 |
+
# Note: this function *mutates* its two inputs,
|
| 378 |
+
# adding the new NativeFunctions / BackendMetadata to them
|
| 379 |
+
def add_generated_native_functions(
|
| 380 |
+
rs: list[NativeFunction],
|
| 381 |
+
indices: dict[DispatchKey, dict[OperatorName, BackendMetadata]],
|
| 382 |
+
) -> None:
|
| 383 |
+
# The main code for generating new NativeFunctions
|
| 384 |
+
# First we group of NativeFunctions by schema kind,
|
| 385 |
+
# then we detect which ones are missing and generate them.
|
| 386 |
+
pre_grouped_native_functions = pre_group_native_functions(rs)
|
| 387 |
+
for d in pre_grouped_native_functions.values():
|
| 388 |
+
has_functional = SchemaKind.functional in d
|
| 389 |
+
has_inplace = SchemaKind.inplace in d
|
| 390 |
+
has_mutable = SchemaKind.mutable in d
|
| 391 |
+
has_out = SchemaKind.out in d
|
| 392 |
+
|
| 393 |
+
# We automatically generate a few native functions that don't exist in the yaml, for a few reasons:
|
| 394 |
+
# (1) If an operator has an inplace/out= variant but no functional variant, we can generate
|
| 395 |
+
# a simple functional variant that the functionalization pass can consume.
|
| 396 |
+
# (2) If an operator has an inplace or functional but no out= variant, we generate an out=
|
| 397 |
+
# variant, mostly so we can easily pair up functions into NativeFunctionsGroup,
|
| 398 |
+
# while maintaining the constraint that the out= variant is "required".
|
| 399 |
+
if has_mutable or has_inplace or has_out or has_functional:
|
| 400 |
+
# Don't bother generating functions trio's for native functions that bypass the dispatcher.
|
| 401 |
+
are_manual = all(f.manual_cpp_binding for f in d.values())
|
| 402 |
+
# Don't bother generating functional + out= variants for view operators
|
| 403 |
+
# set_ is technically an inplace_view, but for now it is treated
|
| 404 |
+
# as a normal inplace op in the codegen
|
| 405 |
+
has_view_ops = any(
|
| 406 |
+
f.is_view_op and str(f.func.name.name) != "set_" for f in d.values()
|
| 407 |
+
)
|
| 408 |
+
# Don't generate the other variants for CompositeImplicitAutograd operators.
|
| 409 |
+
# We could probably do this, but the main benefit of generating the function triplets
|
| 410 |
+
# is for transforms that need them, and transforms don't need to act directly
|
| 411 |
+
# on CompositeImplicitAutograd operators (since we let them decompose).
|
| 412 |
+
are_composite_implicit = all(
|
| 413 |
+
f.has_composite_implicit_autograd_kernel for f in d.values()
|
| 414 |
+
)
|
| 415 |
+
if are_manual or has_view_ops or are_composite_implicit:
|
| 416 |
+
continue
|
| 417 |
+
if has_out and len(d.values()) == 1:
|
| 418 |
+
# Note: [Out ops with functional variants that don't get grouped properly]
|
| 419 |
+
# In theory we could validly have an out= operator in native_functions.yaml
|
| 420 |
+
# that has no other variants.
|
| 421 |
+
# But today, all of the operators where that's the case actually do have
|
| 422 |
+
# functional variants, that we are just unable to pair up properly.
|
| 423 |
+
# I think banning this all together is probably safer
|
| 424 |
+
# (you can always add a functional variant yourself if you want to add a new out= operator).
|
| 425 |
+
#
|
| 426 |
+
# We should probably fix the existing cases; this check is to prevent us from adding more over time.
|
| 427 |
+
if (
|
| 428 |
+
str(d[SchemaKind.out].func.name)
|
| 429 |
+
not in OUT_OPS_THAT_DONT_GET_GROUPED_PROPERLY
|
| 430 |
+
):
|
| 431 |
+
raise AssertionError(
|
| 432 |
+
f"Found an out= operator that we could not find any other variants of: {str(d[SchemaKind.out].func)}"
|
| 433 |
+
)
|
| 434 |
+
continue
|
| 435 |
+
|
| 436 |
+
# Some inplace ops that have problematic schemas (that we should fix), which prevent us
|
| 437 |
+
# from generating out= and functional variants
|
| 438 |
+
if (
|
| 439 |
+
has_inplace
|
| 440 |
+
and str(d[SchemaKind.inplace].func.name)
|
| 441 |
+
in INPLACE_OPS_THAT_DONT_GET_GROUPED_PROPERLY
|
| 442 |
+
):
|
| 443 |
+
continue
|
| 444 |
+
|
| 445 |
+
base_fn = (
|
| 446 |
+
d[SchemaKind.inplace]
|
| 447 |
+
if has_inplace
|
| 448 |
+
else d[SchemaKind.mutable]
|
| 449 |
+
if has_mutable
|
| 450 |
+
else d[SchemaKind.out]
|
| 451 |
+
if has_out
|
| 452 |
+
else d[SchemaKind.functional]
|
| 453 |
+
)
|
| 454 |
+
|
| 455 |
+
# Note: [Mutable ops that cannot get an out variant]
|
| 456 |
+
# We can only generate an out= variant if either:
|
| 457 |
+
# - the original function has tensor-like returns (since we can convert them to out kwargs)
|
| 458 |
+
# - or it's inplace (since we can convert `self` to an out kwarg)
|
| 459 |
+
# There are only two functions that don't fit this criteria today though,
|
| 460 |
+
# and they both look like they should be fixed to be out= variants,
|
| 461 |
+
# so if feels safer to ban this schema all-together
|
| 462 |
+
base_fn_valid = base_fn.func.kind() == SchemaKind.inplace or any(
|
| 463 |
+
r.type.is_tensor_like() for r in base_fn.func.returns
|
| 464 |
+
)
|
| 465 |
+
# Note: [Loosen the assertion that all functional should have out variant]
|
| 466 |
+
# By design all functional operators should have our variants. The needs_out check
|
| 467 |
+
# is loosening this requirement, changing it to only generate out variant if there's
|
| 468 |
+
# an `autogen` block in the native function, in the long run it should be removed.
|
| 469 |
+
# FIXME: Remove this after figuring out CI job failures related to min, max, mean
|
| 470 |
+
needs_out = any("out" in str(op_name) for op_name in base_fn.autogen)
|
| 471 |
+
gets_out_variant = not has_out and base_fn_valid and needs_out
|
| 472 |
+
if not has_out and not base_fn_valid:
|
| 473 |
+
if (
|
| 474 |
+
str(base_fn.func.name)
|
| 475 |
+
not in MUTABLE_OPS_THAT_CANNOT_GET_AN_OUT_VARIANT
|
| 476 |
+
and str(base_fn.func.name)
|
| 477 |
+
not in FUNCTIONAL_OPS_THAT_CANNOT_GET_AN_OUT_VARIANT
|
| 478 |
+
):
|
| 479 |
+
raise AssertionError(
|
| 480 |
+
f"""Found an operator that we could not generate an out= variant for: {str(base_fn.func)}.
|
| 481 |
+
This type of operators don't have tensor-like return, making it difficult to generate a proper out= variant. If
|
| 482 |
+
out= variant is not needed, please add the function name into FUNCTIONAL_OPS_THAT_CANNOT_GET_AN_OUT_VARIANT list."""
|
| 483 |
+
)
|
| 484 |
+
|
| 485 |
+
# Generate an out= variant
|
| 486 |
+
if gets_out_variant:
|
| 487 |
+
fn, metadata = generate_function(base_fn, SchemaKind.out)
|
| 488 |
+
d[SchemaKind.out] = fn
|
| 489 |
+
BackendIndex.grow_index(indices, metadata)
|
| 490 |
+
rs.append(fn)
|
| 491 |
+
|
| 492 |
+
# Generate a functional variant, but only do it if the operator got an out= variant
|
| 493 |
+
# (Functional variants are only useful if we can group up the variants,
|
| 494 |
+
# which we can only do if they have an out= variant)
|
| 495 |
+
if not has_functional and (has_out or gets_out_variant):
|
| 496 |
+
fn, metadata = generate_function(base_fn, SchemaKind.functional)
|
| 497 |
+
d[SchemaKind.functional] = fn
|
| 498 |
+
BackendIndex.grow_index(indices, metadata)
|
| 499 |
+
rs.append(fn)
|
| 500 |
+
|
| 501 |
+
|
| 502 |
+
def return_str(rets: tuple[Return, ...], names: list[str]) -> str:
|
| 503 |
+
assert len(rets) == len(names)
|
| 504 |
+
if len(rets) == 0:
|
| 505 |
+
return ""
|
| 506 |
+
elif len(rets) == 1:
|
| 507 |
+
return f"return {names[0]};"
|
| 508 |
+
else:
|
| 509 |
+
return f"return {dispatcher.returns_type(rets).cpp_type()}({', '.join(names)});"
|
| 510 |
+
|
| 511 |
+
|
| 512 |
+
# Given a function, and the name of a variable corresponding to the output of that function,
|
| 513 |
+
# gather up all of the individual returns that are not aliased
|
| 514 |
+
def gather_nonaliased_inner_rets(func: FunctionSchema, out_var: str) -> list[str]:
|
| 515 |
+
aliased_rets = func.aliased_return_names()
|
| 516 |
+
non_aliased_names = []
|
| 517 |
+
is_out_var_a_tuple = len(func.returns) > 1
|
| 518 |
+
for i, r in enumerate(aliased_rets):
|
| 519 |
+
if r is None:
|
| 520 |
+
non_aliased_names.append(
|
| 521 |
+
f"std::get<{i}>({out_var})" if is_out_var_a_tuple else out_var
|
| 522 |
+
)
|
| 523 |
+
return non_aliased_names
|
| 524 |
+
|
| 525 |
+
|
| 526 |
+
# Generates functional kernels in terms of their inplace.mutable counterparts.
|
| 527 |
+
# We only do this for "generated" NativeFunctions
|
| 528 |
+
@with_native_function
|
| 529 |
+
def gen_composite_functional_kernel(g: NativeFunctionsGroup) -> str | None:
|
| 530 |
+
# We should only be generating these for code-generated NativeFunctions
|
| 531 |
+
if "generated" not in g.functional.tags:
|
| 532 |
+
return None
|
| 533 |
+
# And we always write the kernel for a generated op in terms of a non-generated op.
|
| 534 |
+
if g.inplace is not None and "generated" not in g.inplace.tags:
|
| 535 |
+
target_f = g.inplace
|
| 536 |
+
elif g.mutable is not None and "generated" not in g.mutable.tags:
|
| 537 |
+
target_f = g.mutable
|
| 538 |
+
else:
|
| 539 |
+
# We should be guaranteed to have a valid inplace/mutable variant to call into.
|
| 540 |
+
# See Note: [Mutable Ops Not Using Functionalization]
|
| 541 |
+
raise AssertionError(str(g.functional.func))
|
| 542 |
+
|
| 543 |
+
sig = DispatcherSignature(g.functional.func)
|
| 544 |
+
target_sig = DispatcherSignature(target_f.func)
|
| 545 |
+
|
| 546 |
+
context: list[Binding | Expr] = []
|
| 547 |
+
clone_mutable_inputs = []
|
| 548 |
+
cloned_return_names = []
|
| 549 |
+
# We can't just directly pass all of the arguments from the functional op into the mutating op.
|
| 550 |
+
# We need to check for which inputs to the mutating operator are mutable,
|
| 551 |
+
# and clone those inputs first.
|
| 552 |
+
for a_curr, a_tgt in zip(
|
| 553 |
+
dispatcher.jit_arguments(g.functional.func),
|
| 554 |
+
dispatcher.jit_arguments(target_f.func),
|
| 555 |
+
):
|
| 556 |
+
if a_tgt.annotation is not None and a_tgt.annotation.is_write:
|
| 557 |
+
clone_mutable_inputs.append(
|
| 558 |
+
f"auto {a_curr.name}_clone = clone_arg({a_curr.name});"
|
| 559 |
+
)
|
| 560 |
+
context.append(
|
| 561 |
+
Expr(
|
| 562 |
+
expr=f"{a_curr.name}_clone",
|
| 563 |
+
type=dispatcher.argument_type(a_curr, binds=a_curr.name),
|
| 564 |
+
)
|
| 565 |
+
)
|
| 566 |
+
# Invariant: mutable arguments on the inner mutable op are always returns on the functional op.
|
| 567 |
+
cloned_return_names.append(f"{a_curr.name}_clone")
|
| 568 |
+
else:
|
| 569 |
+
context.append(dispatcher.argument(a_curr))
|
| 570 |
+
exprs = ", ".join([e.expr for e in translate(context, target_sig.arguments())])
|
| 571 |
+
|
| 572 |
+
out_name = "output"
|
| 573 |
+
maybe_assign = f"auto {out_name} = " if len(target_f.func.returns) > 0 else ""
|
| 574 |
+
inner_return_names = gather_nonaliased_inner_rets(target_f.func, out_name)
|
| 575 |
+
ret_str = return_str(
|
| 576 |
+
g.functional.func.returns, inner_return_names + cloned_return_names
|
| 577 |
+
)
|
| 578 |
+
|
| 579 |
+
clone_mutable_inputs_str = "\n".join(clone_mutable_inputs)
|
| 580 |
+
return f"""
|
| 581 |
+
{sig.defn(name=sig.name() + ("_symint" if g.out.func.has_symint() else ""))} {{
|
| 582 |
+
{clone_mutable_inputs_str}
|
| 583 |
+
{maybe_assign}at::_ops::{target_f.func.name.unambiguous_name()}::call({exprs});
|
| 584 |
+
{ret_str}
|
| 585 |
+
}}
|
| 586 |
+
"""
|
| 587 |
+
|
| 588 |
+
|
| 589 |
+
# Generates out= kernels in terms of their functional counterparts.
|
| 590 |
+
# We only do this for "generated" NativeFunctions
|
| 591 |
+
@with_native_function
|
| 592 |
+
def gen_composite_out_kernel(g: NativeFunctionsGroup) -> str | None:
|
| 593 |
+
# We should only be generating these for code-generated NativeFunctions
|
| 594 |
+
if "generated" not in g.out.tags:
|
| 595 |
+
return None
|
| 596 |
+
# And we always write the kernel for the out= op in terms of the functional.
|
| 597 |
+
# Note that the functional op might have also been generated, but we don't have to
|
| 598 |
+
# worry about cycles, because the generated functional kernels are always implemented
|
| 599 |
+
# in terms of non-generated kernels (see gen_composite_functional_kernel).
|
| 600 |
+
|
| 601 |
+
sig = DispatcherSignature(g.out.func)
|
| 602 |
+
target_sig = DispatcherSignature(g.functional.func)
|
| 603 |
+
|
| 604 |
+
exprs = ", ".join(
|
| 605 |
+
[e.expr for e in translate(sig.arguments(), target_sig.arguments())]
|
| 606 |
+
)
|
| 607 |
+
|
| 608 |
+
copy_outs = []
|
| 609 |
+
out_name = "tmp_output"
|
| 610 |
+
for i, out_arg in enumerate(g.out.func.arguments.out):
|
| 611 |
+
functional_return_name = (
|
| 612 |
+
out_name
|
| 613 |
+
if len(g.functional.func.returns) == 1
|
| 614 |
+
else f"std::get<{i}>({out_name})"
|
| 615 |
+
)
|
| 616 |
+
copy_outs.append(
|
| 617 |
+
f"""\
|
| 618 |
+
resize_out_helper({out_arg.name}, {functional_return_name});
|
| 619 |
+
copy_arg({out_arg.name}, {functional_return_name});"""
|
| 620 |
+
)
|
| 621 |
+
|
| 622 |
+
rets = []
|
| 623 |
+
# For each return arg in the calling (out=) operator,
|
| 624 |
+
# If it corresponds to an aliased input, return the input.
|
| 625 |
+
# Otherwise, return the corresponding output from calling the functional operator.
|
| 626 |
+
for i, ret_name in enumerate(g.out.func.aliased_return_names()):
|
| 627 |
+
if ret_name is not None:
|
| 628 |
+
rets.append(ret_name)
|
| 629 |
+
else:
|
| 630 |
+
functional_return_name = (
|
| 631 |
+
out_name
|
| 632 |
+
if len(g.functional.func.returns) == 1
|
| 633 |
+
else f"std::get<{i}>({out_name})"
|
| 634 |
+
)
|
| 635 |
+
rets.append(functional_return_name)
|
| 636 |
+
|
| 637 |
+
copy_outs_str = "\n".join(copy_outs)
|
| 638 |
+
|
| 639 |
+
# Kernel name needs to follow the naming convention defined in `generate_function()`
|
| 640 |
+
return f"""
|
| 641 |
+
{sig.defn(name=g.out.func.name.unambiguous_name() + ("_symint" if g.out.func.has_symint() else ""))} {{
|
| 642 |
+
auto {out_name} = at::_ops::{g.functional.func.name.unambiguous_name()}::call({exprs});
|
| 643 |
+
{copy_outs_str}
|
| 644 |
+
{return_str(g.out.func.returns, rets)}
|
| 645 |
+
}}
|
| 646 |
+
"""
|
minigpt2/lib/python3.10/site-packages/torchgen/operator_versions/__init__.py
ADDED
|
File without changes
|
minigpt2/lib/python3.10/site-packages/torchgen/operator_versions/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (180 Bytes). View file
|
|
|
minigpt2/lib/python3.10/site-packages/torchgen/operator_versions/__pycache__/gen_mobile_upgraders.cpython-310.pyc
ADDED
|
Binary file (9.85 kB). View file
|
|
|
minigpt2/lib/python3.10/site-packages/torchgen/operator_versions/__pycache__/gen_mobile_upgraders_constant.cpython-310.pyc
ADDED
|
Binary file (439 Bytes). View file
|
|
|
minigpt2/lib/python3.10/site-packages/torchgen/operator_versions/gen_mobile_upgraders.py
ADDED
|
@@ -0,0 +1,395 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import os
|
| 6 |
+
from enum import Enum
|
| 7 |
+
from operator import itemgetter
|
| 8 |
+
from pathlib import Path
|
| 9 |
+
from typing import Any
|
| 10 |
+
|
| 11 |
+
import torch
|
| 12 |
+
from torch.jit.generate_bytecode import generate_upgraders_bytecode
|
| 13 |
+
from torchgen.code_template import CodeTemplate
|
| 14 |
+
from torchgen.operator_versions.gen_mobile_upgraders_constant import (
|
| 15 |
+
MOBILE_UPGRADERS_HEADER_DESCRIPTION,
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class ByteCode(Enum):
|
| 20 |
+
instructions = 1
|
| 21 |
+
constants = 2
|
| 22 |
+
types = 3
|
| 23 |
+
operators = 4
|
| 24 |
+
register_size = 5
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
EXCLUDED_OP_SET = [
|
| 28 |
+
"aten::full.names",
|
| 29 |
+
"aten::full.out",
|
| 30 |
+
"aten::full",
|
| 31 |
+
]
|
| 32 |
+
|
| 33 |
+
EXCLUE_UPGRADER_SET = ["full_0_4", "full_out_0_4"]
|
| 34 |
+
|
| 35 |
+
ONE_INSTRUCTION = CodeTemplate(
|
| 36 |
+
"""
|
| 37 |
+
Instruction{OpCode::${operator_name}, ${X}, ${N}},"""
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
INSTRUCTION_LIST = CodeTemplate(
|
| 41 |
+
"""std::vector<Instruction>({
|
| 42 |
+
${instruction_list}
|
| 43 |
+
}), // instructions list"""
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
ONE_CONSTANT = CodeTemplate(
|
| 47 |
+
"""
|
| 48 |
+
c10::IValue(${constant}),"""
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
CONSTANT_LIST = CodeTemplate(
|
| 52 |
+
"""std::vector<c10::IValue>({
|
| 53 |
+
${constant_list}
|
| 54 |
+
}), // constants list"""
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
CONSTANTS_LIST_EMPTY = """std::vector<c10::IValue>(), // constants list"""
|
| 58 |
+
|
| 59 |
+
ONE_TYPE = CodeTemplate("""c10::parseType("${type_str}"),""")
|
| 60 |
+
|
| 61 |
+
TYPE_LIST = CodeTemplate(
|
| 62 |
+
"""std::vector<c10::TypePtr>({
|
| 63 |
+
${type_list}
|
| 64 |
+
}), // types list"""
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
TYPE_LIST_EMPTY = """std::vector<c10::TypePtr>(), // types list"""
|
| 68 |
+
|
| 69 |
+
ONE_OPERATOTR_STRING = CodeTemplate(
|
| 70 |
+
"""
|
| 71 |
+
OperatorString({"${operator_name}", "${overload_name}", ${num_of_args}}),"""
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
OPERATOR_STRING_LIST = CodeTemplate(
|
| 75 |
+
"""
|
| 76 |
+
std::vector<OperatorString>({
|
| 77 |
+
${operator_string_list}
|
| 78 |
+
}), // operators list"""
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
ONE_UPGRADER_FUNCTION = CodeTemplate(
|
| 82 |
+
"""
|
| 83 |
+
mobile::Function::registerFunc(
|
| 84 |
+
"${upgrader_name}",
|
| 85 |
+
${instruction_list},
|
| 86 |
+
${constant_list},
|
| 87 |
+
${type_list},
|
| 88 |
+
${register_size}
|
| 89 |
+
)"""
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
ONE_UPGRADER_SRC = CodeTemplate(
|
| 93 |
+
"""
|
| 94 |
+
ByteCodeFunctionWithOperator({
|
| 95 |
+
${bytecode_function},
|
| 96 |
+
${operator_string_list}
|
| 97 |
+
}),"""
|
| 98 |
+
)
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
ONE_UPGRADER_IN_VERSION_MAP = CodeTemplate(
|
| 102 |
+
"""Upgrader({${upgrader_min_version}, ${upgrader_max_version}, "${upgrader_name}", ${bytecode_func_index}})"""
|
| 103 |
+
) # noqa: E501
|
| 104 |
+
|
| 105 |
+
ONE_OPERATOR_IN_VERSION_MAP = CodeTemplate(
|
| 106 |
+
"""
|
| 107 |
+
{std::string("${operator_name}"),
|
| 108 |
+
std::vector<Upgrader>({
|
| 109 |
+
${upgrader_list_in_version_map}
|
| 110 |
+
})},"""
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
OPERATOR_VERSION_MAP = CodeTemplate(
|
| 115 |
+
"""
|
| 116 |
+
const std::unordered_map<std::string, std::vector<Upgrader>>
|
| 117 |
+
getOperatorVersionMapForMobile() {
|
| 118 |
+
static std::unordered_map<std::string, std::vector<Upgrader>>
|
| 119 |
+
operatorVersionMapForMobile({
|
| 120 |
+
${operator_list_in_version_map}
|
| 121 |
+
});
|
| 122 |
+
return operatorVersionMapForMobile;
|
| 123 |
+
}
|
| 124 |
+
"""
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
UPGRADER_CPP_SRC = CodeTemplate(
|
| 129 |
+
MOBILE_UPGRADERS_HEADER_DESCRIPTION
|
| 130 |
+
+ """
|
| 131 |
+
#include <caffe2/serialize/versions.h>
|
| 132 |
+
#include <torch/csrc/jit/mobile/upgrader_mobile.h>
|
| 133 |
+
|
| 134 |
+
namespace c10 {
|
| 135 |
+
TypePtr parseType(const std::string& pythonStr);
|
| 136 |
+
} // namespace c10
|
| 137 |
+
|
| 138 |
+
namespace torch {
|
| 139 |
+
namespace jit {
|
| 140 |
+
|
| 141 |
+
// clang-format off
|
| 142 |
+
|
| 143 |
+
// From operator_versions_map
|
| 144 |
+
${operator_version_map}
|
| 145 |
+
|
| 146 |
+
const std::vector<ByteCodeFunctionWithOperator>& getUpgraderBytecodeList() {
|
| 147 |
+
auto generate_upgrader_bytecode_list = []() {
|
| 148 |
+
std::vector<ByteCodeFunctionWithOperator> upgrader_function_list({
|
| 149 |
+
${upgrader_bytecode}
|
| 150 |
+
});
|
| 151 |
+
for (const auto& upgrader_function : upgrader_function_list) {
|
| 152 |
+
for (const auto& op : upgrader_function.operators) {
|
| 153 |
+
upgrader_function.function.append_operator(
|
| 154 |
+
op.name,
|
| 155 |
+
op.overload_name,
|
| 156 |
+
op.num_specified_args);
|
| 157 |
+
}
|
| 158 |
+
}
|
| 159 |
+
return upgrader_function_list;
|
| 160 |
+
};
|
| 161 |
+
static std::vector<ByteCodeFunctionWithOperator> upgraderBytecodeList =
|
| 162 |
+
generate_upgrader_bytecode_list();
|
| 163 |
+
return upgraderBytecodeList;
|
| 164 |
+
}
|
| 165 |
+
|
| 166 |
+
// clang-format on
|
| 167 |
+
|
| 168 |
+
} // namespace jit
|
| 169 |
+
} // namespace torch
|
| 170 |
+
"""
|
| 171 |
+
)
|
| 172 |
+
|
| 173 |
+
UPGRADER_MOBILE_FILE_NAME = "upgrader_mobile.cpp"
|
| 174 |
+
|
| 175 |
+
UPGRADER_ELEMENT = CodeTemplate(
|
| 176 |
+
"""\
|
| 177 |
+
Upgrader({${min_version}, ${max_version}, ${operator_name}, ${index}}),
|
| 178 |
+
"""
|
| 179 |
+
)
|
| 180 |
+
|
| 181 |
+
PER_OPERATOR_UPGRADER_LIST = CodeTemplate(
|
| 182 |
+
"""\
|
| 183 |
+
{
|
| 184 |
+
std::string(${operator_name}),
|
| 185 |
+
std::vector<Upgrader>({${upgrader_list}});
|
| 186 |
+
}
|
| 187 |
+
"""
|
| 188 |
+
)
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
def construct_instruction(instruction_list_from_yaml: list[Any]) -> str:
|
| 192 |
+
instruction_list_part = []
|
| 193 |
+
for instruction in instruction_list_from_yaml:
|
| 194 |
+
instruction_list_part.append(
|
| 195 |
+
ONE_INSTRUCTION.substitute(
|
| 196 |
+
operator_name=instruction[0],
|
| 197 |
+
X=instruction[1],
|
| 198 |
+
N=instruction[2],
|
| 199 |
+
)
|
| 200 |
+
)
|
| 201 |
+
return INSTRUCTION_LIST.substitute(
|
| 202 |
+
instruction_list="".join(instruction_list_part).lstrip("\n")
|
| 203 |
+
)
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
def construct_constants(constants_list_from_yaml: list[Any]) -> str:
|
| 207 |
+
constants_list_part = []
|
| 208 |
+
for constant_from_yaml in constants_list_from_yaml:
|
| 209 |
+
convert_constant = None
|
| 210 |
+
if isinstance(constant_from_yaml, str):
|
| 211 |
+
# Add quotes if it's string
|
| 212 |
+
convert_constant = f'"{constant_from_yaml}"'
|
| 213 |
+
elif isinstance(constant_from_yaml, bool):
|
| 214 |
+
convert_constant = "true" if constant_from_yaml else "false"
|
| 215 |
+
elif constant_from_yaml is None:
|
| 216 |
+
convert_constant = ""
|
| 217 |
+
elif isinstance(constant_from_yaml, int):
|
| 218 |
+
convert_constant = str(constant_from_yaml)
|
| 219 |
+
else:
|
| 220 |
+
raise ValueError(
|
| 221 |
+
f"The type of {constant_from_yaml} is {type(constant_from_yaml)}. "
|
| 222 |
+
"Please add change in construct_constants function in gen_mobile_upgraders.py."
|
| 223 |
+
)
|
| 224 |
+
constants_list_part.append(ONE_CONSTANT.substitute(constant=convert_constant))
|
| 225 |
+
if len(constants_list_part) == 0:
|
| 226 |
+
return CONSTANTS_LIST_EMPTY
|
| 227 |
+
return CONSTANT_LIST.substitute(
|
| 228 |
+
constant_list="".join(constants_list_part).lstrip("\n")
|
| 229 |
+
)
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
def construct_operators(operator_list_from_yaml: list[Any]) -> str:
|
| 233 |
+
operator_list_part = []
|
| 234 |
+
for operator in operator_list_from_yaml:
|
| 235 |
+
operator_list_part.append(
|
| 236 |
+
ONE_OPERATOTR_STRING.substitute(
|
| 237 |
+
operator_name=operator[0],
|
| 238 |
+
overload_name=operator[1],
|
| 239 |
+
num_of_args=operator[2],
|
| 240 |
+
)
|
| 241 |
+
)
|
| 242 |
+
return OPERATOR_STRING_LIST.substitute(
|
| 243 |
+
operator_string_list="".join(operator_list_part).lstrip("\n")
|
| 244 |
+
)
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
def construct_types(types_tr_list_from_yaml: list[Any]) -> str:
|
| 248 |
+
types_tr_list_part = []
|
| 249 |
+
for types_tr in types_tr_list_from_yaml:
|
| 250 |
+
types_tr_list_part.append(ONE_TYPE.substitute(type_str=types_tr))
|
| 251 |
+
if len(types_tr_list_part) == 0:
|
| 252 |
+
return TYPE_LIST_EMPTY
|
| 253 |
+
return TYPE_LIST.substitute(type_list="".join(types_tr_list_part).lstrip("\n"))
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
def construct_register_size(register_size_from_yaml: int) -> str:
|
| 257 |
+
if not isinstance(register_size_from_yaml, int):
|
| 258 |
+
raise ValueError(
|
| 259 |
+
f"Input register size is {register_size_from_yaml} and"
|
| 260 |
+
"it's type is {type(register_size_from_yaml)}. An int type is expected."
|
| 261 |
+
)
|
| 262 |
+
return str(register_size_from_yaml)
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
def construct_version_maps(
|
| 266 |
+
upgrader_bytecode_function_to_index_map: dict[str, Any]
|
| 267 |
+
) -> str:
|
| 268 |
+
version_map = torch._C._get_operator_version_map()
|
| 269 |
+
sorted_version_map_ = sorted(version_map.items(), key=itemgetter(0)) # type: ignore[no-any-return]
|
| 270 |
+
sorted_version_map = dict(sorted_version_map_)
|
| 271 |
+
|
| 272 |
+
operator_list_in_version_map_part = []
|
| 273 |
+
for op_name in sorted_version_map:
|
| 274 |
+
upgraders_in_version_map_part = []
|
| 275 |
+
# TODO: remove the skip after these two operators schemas are fixed
|
| 276 |
+
if op_name in EXCLUDED_OP_SET:
|
| 277 |
+
continue
|
| 278 |
+
upgrader_ranges = torch._C._get_upgrader_ranges(op_name)
|
| 279 |
+
upgrader_entries = sorted_version_map[op_name]
|
| 280 |
+
assert len(upgrader_ranges) == len(upgrader_entries)
|
| 281 |
+
for idx, upgrader_entry in enumerate(upgrader_entries):
|
| 282 |
+
upgrader_name = upgrader_entry.upgrader_name
|
| 283 |
+
bytecode_function_index = upgrader_bytecode_function_to_index_map[
|
| 284 |
+
upgrader_name
|
| 285 |
+
]
|
| 286 |
+
upgraders_in_version_map_part.append(
|
| 287 |
+
ONE_UPGRADER_IN_VERSION_MAP.substitute(
|
| 288 |
+
upgrader_min_version=upgrader_ranges[idx].min_version,
|
| 289 |
+
upgrader_max_version=upgrader_ranges[idx].max_version,
|
| 290 |
+
upgrader_name=upgrader_name,
|
| 291 |
+
bytecode_func_index=bytecode_function_index,
|
| 292 |
+
)
|
| 293 |
+
)
|
| 294 |
+
operator_list_in_version_map_part.append(
|
| 295 |
+
ONE_OPERATOR_IN_VERSION_MAP.substitute(
|
| 296 |
+
operator_name=op_name,
|
| 297 |
+
upgrader_list_in_version_map="".join(upgraders_in_version_map_part),
|
| 298 |
+
)
|
| 299 |
+
)
|
| 300 |
+
return OPERATOR_VERSION_MAP.substitute(
|
| 301 |
+
operator_list_in_version_map="".join(operator_list_in_version_map_part).lstrip(
|
| 302 |
+
"\n"
|
| 303 |
+
)
|
| 304 |
+
)
|
| 305 |
+
|
| 306 |
+
|
| 307 |
+
def get_upgrader_bytecode_function_to_index_map(
|
| 308 |
+
upgrader_dict: list[dict[str, Any]]
|
| 309 |
+
) -> dict[str, Any]:
|
| 310 |
+
upgrader_bytecode_function_to_index_map = {}
|
| 311 |
+
index = 0
|
| 312 |
+
for upgrader_bytecode in upgrader_dict:
|
| 313 |
+
for upgrader_name in upgrader_bytecode.keys():
|
| 314 |
+
if upgrader_name in EXCLUE_UPGRADER_SET:
|
| 315 |
+
continue
|
| 316 |
+
upgrader_bytecode_function_to_index_map[upgrader_name] = index
|
| 317 |
+
index += 1
|
| 318 |
+
return upgrader_bytecode_function_to_index_map
|
| 319 |
+
|
| 320 |
+
|
| 321 |
+
def write_cpp(cpp_path: str, upgrader_dict: list[dict[str, Any]]) -> None:
|
| 322 |
+
body_parts = []
|
| 323 |
+
upgrader_bytecode_function_to_index_map = (
|
| 324 |
+
get_upgrader_bytecode_function_to_index_map(upgrader_dict)
|
| 325 |
+
)
|
| 326 |
+
version_map_src = construct_version_maps(upgrader_bytecode_function_to_index_map)
|
| 327 |
+
all_upgrader_src_string = []
|
| 328 |
+
for upgrader_bytecode in upgrader_dict:
|
| 329 |
+
for upgrader_name, bytecode in upgrader_bytecode.items():
|
| 330 |
+
# TODO: remove the skip after these two operators schemas are fixed
|
| 331 |
+
if upgrader_name in EXCLUE_UPGRADER_SET:
|
| 332 |
+
continue
|
| 333 |
+
instruction_list_str = ""
|
| 334 |
+
constant_list_str = ""
|
| 335 |
+
type_list_str = ""
|
| 336 |
+
register_size_str = ""
|
| 337 |
+
operator_list_str = ""
|
| 338 |
+
for table_name, contents in bytecode.items():
|
| 339 |
+
element = ByteCode[table_name]
|
| 340 |
+
body_string = ""
|
| 341 |
+
if element is ByteCode.instructions:
|
| 342 |
+
instruction_list_str = construct_instruction(contents)
|
| 343 |
+
elif element is ByteCode.constants:
|
| 344 |
+
constant_list_str = construct_constants(contents)
|
| 345 |
+
elif element is ByteCode.operators:
|
| 346 |
+
operator_list_str = construct_operators(contents)
|
| 347 |
+
elif element is ByteCode.types:
|
| 348 |
+
type_list_str = construct_types(contents)
|
| 349 |
+
elif element is ByteCode.register_size:
|
| 350 |
+
register_size_str = construct_register_size(contents)
|
| 351 |
+
|
| 352 |
+
one_upgrader_function_string = ONE_UPGRADER_FUNCTION.substitute(
|
| 353 |
+
upgrader_name=upgrader_name,
|
| 354 |
+
instruction_list=instruction_list_str,
|
| 355 |
+
constant_list=constant_list_str,
|
| 356 |
+
type_list=type_list_str,
|
| 357 |
+
register_size=register_size_str,
|
| 358 |
+
)
|
| 359 |
+
one_upgrader_src_string = ONE_UPGRADER_SRC.substitute(
|
| 360 |
+
bytecode_function=one_upgrader_function_string.lstrip("\n"),
|
| 361 |
+
operator_string_list=operator_list_str.lstrip("\n"),
|
| 362 |
+
)
|
| 363 |
+
all_upgrader_src_string.append(one_upgrader_src_string)
|
| 364 |
+
|
| 365 |
+
upgrader_file_content = UPGRADER_CPP_SRC.substitute(
|
| 366 |
+
operator_version_map=version_map_src,
|
| 367 |
+
upgrader_bytecode="".join(all_upgrader_src_string).lstrip("\n"),
|
| 368 |
+
)
|
| 369 |
+
body_parts.append(upgrader_file_content)
|
| 370 |
+
print("writing file to : ", cpp_path + "/" + UPGRADER_MOBILE_FILE_NAME)
|
| 371 |
+
with open(os.path.join(cpp_path, UPGRADER_MOBILE_FILE_NAME), "wb") as out_file:
|
| 372 |
+
final_output = "".join(body_parts)
|
| 373 |
+
out_file.write(upgrader_file_content.encode("utf-8"))
|
| 374 |
+
|
| 375 |
+
|
| 376 |
+
def sort_upgrader(upgrader_list: list[dict[str, Any]]) -> list[dict[str, Any]]:
|
| 377 |
+
sorted_upgrader_list = sorted(
|
| 378 |
+
upgrader_list, key=lambda one_upgrader: next(iter(one_upgrader))
|
| 379 |
+
)
|
| 380 |
+
return sorted_upgrader_list
|
| 381 |
+
|
| 382 |
+
|
| 383 |
+
def main() -> None:
|
| 384 |
+
upgrader_list = generate_upgraders_bytecode()
|
| 385 |
+
sorted_upgrader_list = sort_upgrader(upgrader_list)
|
| 386 |
+
for up in sorted_upgrader_list:
|
| 387 |
+
print("after sort upgrader : ", next(iter(up)))
|
| 388 |
+
|
| 389 |
+
pytorch_dir = Path(__file__).resolve().parents[2]
|
| 390 |
+
upgrader_path = pytorch_dir / "torch" / "csrc" / "jit" / "mobile"
|
| 391 |
+
write_cpp(str(upgrader_path), sorted_upgrader_list)
|
| 392 |
+
|
| 393 |
+
|
| 394 |
+
if __name__ == "__main__":
|
| 395 |
+
main()
|
minigpt2/lib/python3.10/site-packages/torchgen/operator_versions/gen_mobile_upgraders_constant.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MOBILE_UPGRADERS_HEADER_DESCRIPTION = """/**
|
| 2 |
+
* @generated
|
| 3 |
+
* This is an auto-generated file. Please do not modify it by hand.
|
| 4 |
+
* To re-generate, please run:
|
| 5 |
+
* cd ~/pytorch && python torchgen/operator_versions/gen_mobile_upgraders.py
|
| 6 |
+
*/
|
| 7 |
+
"""
|
minigpt2/lib/python3.10/site-packages/torchgen/packaged/autograd/README.md
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
If you add a file to this directory, you **MUST** update
|
| 2 |
+
`torch/CMakeLists.txt` and add the file as a dependency to
|
| 3 |
+
the `add_custom_command` call.
|
minigpt2/lib/python3.10/site-packages/torchgen/packaged/autograd/__init__.py
ADDED
|
File without changes
|
minigpt2/lib/python3.10/site-packages/torchgen/packaged/autograd/context.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
from typing import Callable
|
| 3 |
+
|
| 4 |
+
from torchgen.api.autograd import NativeFunctionWithDifferentiabilityInfo as NFWDI
|
| 5 |
+
from torchgen.context import native_function_manager
|
| 6 |
+
from torchgen.utils import T
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
# Like tools.api.context.with_native_function, but for
|
| 10 |
+
# NativeFunctionWithDifferentiabilityInfo.
|
| 11 |
+
def with_native_function_with_differentiability_info(
|
| 12 |
+
func: Callable[[NFWDI], T]
|
| 13 |
+
) -> Callable[[NFWDI], T]:
|
| 14 |
+
@functools.wraps(func)
|
| 15 |
+
def wrapper(f: NFWDI) -> T:
|
| 16 |
+
with native_function_manager(f.func):
|
| 17 |
+
return func(f)
|
| 18 |
+
|
| 19 |
+
return wrapper
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
# Like the above but with an additional dispatch key string argument
|
| 23 |
+
def with_native_function_with_differentiability_info_and_key(
|
| 24 |
+
func: Callable[[NFWDI, str], T]
|
| 25 |
+
) -> Callable[[NFWDI, str], T]:
|
| 26 |
+
@functools.wraps(func)
|
| 27 |
+
def wrapper(f: NFWDI, key: str) -> T:
|
| 28 |
+
with native_function_manager(f.func):
|
| 29 |
+
return func(f, key)
|
| 30 |
+
|
| 31 |
+
return wrapper
|
minigpt2/lib/python3.10/site-packages/torchgen/packaged/autograd/deprecated.yaml
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Deprecated function signatures. These are exposed in Python, but not included
|
| 2 |
+
# in the error message suggestions.
|
| 3 |
+
|
| 4 |
+
- name: add(Tensor self, Scalar alpha, Tensor other) -> Tensor
|
| 5 |
+
aten: add(self, other, alpha)
|
| 6 |
+
|
| 7 |
+
- name: add_(Tensor(a!) self, Scalar alpha, Tensor other) -> Tensor(a!)
|
| 8 |
+
aten: add_(self, other, alpha)
|
| 9 |
+
|
| 10 |
+
- name: add(Tensor self, Scalar alpha, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
|
| 11 |
+
aten: add_out(out, self, other, alpha)
|
| 12 |
+
|
| 13 |
+
- name: addbmm(Scalar beta, Tensor self, Scalar alpha, Tensor batch1, Tensor batch2) -> Tensor
|
| 14 |
+
aten: addbmm(self, batch1, batch2, beta, alpha)
|
| 15 |
+
|
| 16 |
+
- name: addbmm_(Scalar beta, Tensor(a!) self, Scalar alpha, Tensor batch1, Tensor batch2) -> Tensor(a!)
|
| 17 |
+
aten: addbmm_(self, batch1, batch2, beta, alpha)
|
| 18 |
+
|
| 19 |
+
- name: addbmm(Scalar beta, Tensor self, Scalar alpha, Tensor batch1, Tensor batch2, *, Tensor(a!) out) -> Tensor(a!)
|
| 20 |
+
aten: addbmm_out(out, self, batch1, batch2, beta, alpha)
|
| 21 |
+
|
| 22 |
+
- name: addbmm(Scalar beta, Tensor self, Tensor batch1, Tensor batch2) -> Tensor
|
| 23 |
+
aten: addbmm(self, batch1, batch2, beta, 1)
|
| 24 |
+
|
| 25 |
+
- name: addbmm_(Scalar beta, Tensor(a!) self, Tensor batch1, Tensor batch2) -> Tensor(a!)
|
| 26 |
+
aten: addbmm_(self, batch1, batch2, beta, 1)
|
| 27 |
+
|
| 28 |
+
- name: addbmm(Scalar beta, Tensor self, Tensor batch1, Tensor batch2, *, Tensor(a!) out) -> Tensor(a!)
|
| 29 |
+
aten: addbmm_out(out, self, batch1, batch2, beta, 1)
|
| 30 |
+
|
| 31 |
+
- name: addcdiv(Tensor self, Scalar value, Tensor tensor1, Tensor tensor2) -> Tensor
|
| 32 |
+
aten: addcdiv(self, tensor1, tensor2, value)
|
| 33 |
+
|
| 34 |
+
- name: addcdiv_(Tensor(a!) self, Scalar value, Tensor tensor1, Tensor tensor2) -> Tensor(a!)
|
| 35 |
+
aten: addcdiv_(self, tensor1, tensor2, value)
|
| 36 |
+
|
| 37 |
+
- name: addcdiv(Tensor self, Scalar value, Tensor tensor1, Tensor tensor2, *, Tensor(a!) out) -> Tensor(a!)
|
| 38 |
+
aten: addcdiv_out(out, self, tensor1, tensor2, value)
|
| 39 |
+
|
| 40 |
+
- name: addcmul(Tensor self, Scalar value, Tensor tensor1, Tensor tensor2) -> Tensor
|
| 41 |
+
aten: addcmul(self, tensor1, tensor2, value)
|
| 42 |
+
|
| 43 |
+
- name: addcmul_(Tensor(a!) self, Scalar value, Tensor tensor1, Tensor tensor2) -> Tensor(a!)
|
| 44 |
+
aten: addcmul_(self, tensor1, tensor2, value)
|
| 45 |
+
|
| 46 |
+
- name: addcmul(Tensor self, Scalar value, Tensor tensor1, Tensor tensor2, *, Tensor(a!) out) -> Tensor(a!)
|
| 47 |
+
aten: addcmul_out(out, self, tensor1, tensor2, value)
|
| 48 |
+
|
| 49 |
+
- name: addmm(Scalar beta, Tensor self, Scalar alpha, Tensor mat1, Tensor mat2) -> Tensor
|
| 50 |
+
aten: addmm(self, mat1, mat2, beta, alpha)
|
| 51 |
+
|
| 52 |
+
- name: addmm_(Scalar beta, Tensor(a!) self, Scalar alpha, Tensor mat1, Tensor mat2) -> Tensor(a!)
|
| 53 |
+
aten: addmm_(self, mat1, mat2, beta, alpha)
|
| 54 |
+
|
| 55 |
+
- name: addmm(Scalar beta, Tensor self, Scalar alpha, Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
|
| 56 |
+
aten: addmm_out(out, self, mat1, mat2, beta, alpha)
|
| 57 |
+
|
| 58 |
+
- name: addmm(Scalar beta, Tensor self, Tensor mat1, Tensor mat2) -> Tensor
|
| 59 |
+
aten: addmm(self, mat1, mat2, beta, 1)
|
| 60 |
+
|
| 61 |
+
- name: addmm_(Scalar beta, Tensor(a!) self, Tensor mat1, Tensor mat2) -> Tensor(a!)
|
| 62 |
+
aten: addmm_(self, mat1, mat2, beta, 1)
|
| 63 |
+
|
| 64 |
+
- name: addmm(Scalar beta, Tensor self, Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
|
| 65 |
+
aten: addmm_out(out, self, mat1, mat2, beta, 1)
|
| 66 |
+
|
| 67 |
+
- name: sspaddmm(Scalar beta, Tensor self, Scalar alpha, Tensor mat1, Tensor mat2) -> Tensor
|
| 68 |
+
aten: sspaddmm(self, mat1, mat2, beta, alpha)
|
| 69 |
+
|
| 70 |
+
- name: sspaddmm(Scalar beta, Tensor self, Tensor mat1, Tensor mat2) -> Tensor
|
| 71 |
+
aten: sspaddmm(self, mat1, mat2, beta, 1)
|
| 72 |
+
|
| 73 |
+
- name: addmv(Scalar beta, Tensor self, Scalar alpha, Tensor mat, Tensor vec) -> Tensor
|
| 74 |
+
aten: addmv(self, mat, vec, beta, alpha)
|
| 75 |
+
|
| 76 |
+
- name: addmv_(Scalar beta, Tensor(a!) self, Scalar alpha, Tensor mat, Tensor vec) -> Tensor(a!)
|
| 77 |
+
aten: addmv_(self, mat, vec, beta, alpha)
|
| 78 |
+
|
| 79 |
+
- name: addmv(Scalar beta, Tensor self, Scalar alpha, Tensor mat, Tensor vec, *, Tensor(a!) out) -> Tensor(a!)
|
| 80 |
+
aten: addmv_out(out, self, mat, vec, beta, alpha)
|
| 81 |
+
|
| 82 |
+
- name: addmv(Scalar beta, Tensor self, Tensor mat, Tensor vec) -> Tensor
|
| 83 |
+
aten: addmv(self, mat, vec, beta, 1)
|
| 84 |
+
|
| 85 |
+
- name: addmv_(Scalar beta, Tensor(a!) self, Tensor mat, Tensor vec) -> Tensor(a!)
|
| 86 |
+
aten: addmv_(self, mat, vec, beta, 1)
|
| 87 |
+
|
| 88 |
+
- name: addmv(Scalar beta, Tensor self, Tensor mat, Tensor vec, *, Tensor(a!) out) -> Tensor(a!)
|
| 89 |
+
aten: addmv_out(out, self, mat, vec, beta, 1)
|
| 90 |
+
|
| 91 |
+
- name: addr(Scalar beta, Tensor self, Scalar alpha, Tensor vec1, Tensor vec2) -> Tensor
|
| 92 |
+
aten: addr(self, vec1, vec2, beta, alpha)
|
| 93 |
+
|
| 94 |
+
- name: addr_(Scalar beta, Tensor(a!) self, Scalar alpha, Tensor vec1, Tensor vec2) -> Tensor(a!)
|
| 95 |
+
aten: addr_(self, vec1, vec2, beta, alpha)
|
| 96 |
+
|
| 97 |
+
- name: addr(Scalar beta, Tensor self, Scalar alpha, Tensor vec1, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)
|
| 98 |
+
aten: addr_out(out, self, vec1, vec2, beta, alpha)
|
| 99 |
+
|
| 100 |
+
- name: addr(Scalar beta, Tensor self, Tensor vec1, Tensor vec2) -> Tensor
|
| 101 |
+
aten: addr(self, vec1, vec2, beta, 1)
|
| 102 |
+
|
| 103 |
+
- name: addr_(Scalar beta, Tensor(a!) self, Tensor vec1, Tensor vec2) -> Tensor(a!)
|
| 104 |
+
aten: addr_(self, vec1, vec2, beta, 1)
|
| 105 |
+
|
| 106 |
+
- name: addr(Scalar beta, Tensor self, Tensor vec1, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)
|
| 107 |
+
aten: addr_out(out, self, vec1, vec2, beta, 1)
|
| 108 |
+
|
| 109 |
+
- name: baddbmm(Scalar beta, Tensor self, Scalar alpha, Tensor batch1, Tensor batch2) -> Tensor
|
| 110 |
+
aten: baddbmm(self, batch1, batch2, beta, alpha)
|
| 111 |
+
|
| 112 |
+
- name: baddbmm_(Scalar beta, Tensor(a!) self, Scalar alpha, Tensor batch1, Tensor batch2) -> Tensor(a!)
|
| 113 |
+
aten: baddbmm_(self, batch1, batch2, beta, alpha)
|
| 114 |
+
|
| 115 |
+
- name: baddbmm(Scalar beta, Tensor self, Scalar alpha, Tensor batch1, Tensor batch2, *, Tensor(a!) out) -> Tensor(a!)
|
| 116 |
+
aten: baddbmm_out(out, self, batch1, batch2, beta, alpha)
|
| 117 |
+
|
| 118 |
+
- name: baddbmm(Scalar beta, Tensor self, Tensor batch1, Tensor batch2) -> Tensor
|
| 119 |
+
aten: baddbmm(self, batch1, batch2, beta, 1)
|
| 120 |
+
|
| 121 |
+
- name: baddbmm_(Scalar beta, Tensor(a!) self, Tensor batch1, Tensor batch2) -> Tensor(a!)
|
| 122 |
+
aten: baddbmm_(self, batch1, batch2, beta, 1)
|
| 123 |
+
|
| 124 |
+
- name: baddbmm(Scalar beta, Tensor self, Tensor batch1, Tensor batch2, *, Tensor(a!) out) -> Tensor(a!)
|
| 125 |
+
aten: baddbmm_out(out, self, batch1, batch2, beta, 1)
|
| 126 |
+
|
| 127 |
+
- name: sub(Tensor self, Scalar alpha, Tensor other) -> Tensor
|
| 128 |
+
aten: sub(self, other, alpha)
|
| 129 |
+
|
| 130 |
+
- name: sub_(Tensor(a!) self, Scalar alpha, Tensor other) -> Tensor(a!)
|
| 131 |
+
aten: sub_(self, other, alpha)
|
| 132 |
+
|
| 133 |
+
- name: sub(Tensor self, Scalar alpha, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
|
| 134 |
+
aten: sub_out(out, self, other, alpha)
|
minigpt2/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_autograd.py
ADDED
|
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
To run this file by hand from the root of the PyTorch
|
| 3 |
+
repository, run:
|
| 4 |
+
|
| 5 |
+
python -m tools.autograd.gen_autograd \
|
| 6 |
+
aten/src/ATen/native/native_functions.yaml \
|
| 7 |
+
aten/src/ATen/native/tags.yaml \
|
| 8 |
+
$OUTPUT_DIR \
|
| 9 |
+
tools/autograd
|
| 10 |
+
|
| 11 |
+
Where $OUTPUT_DIR is where you would like the files to be
|
| 12 |
+
generated. In the full build system, OUTPUT_DIR is
|
| 13 |
+
torch/csrc/autograd/generated/
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
# gen_autograd.py generates C++ autograd functions and Python bindings.
|
| 17 |
+
#
|
| 18 |
+
# It delegates to the following scripts:
|
| 19 |
+
#
|
| 20 |
+
# gen_autograd_functions.py: generates subclasses of torch::autograd::Node
|
| 21 |
+
# gen_variable_type.py: generates VariableType.h which contains all tensor methods
|
| 22 |
+
# gen_python_functions.py: generates Python bindings to THPVariable
|
| 23 |
+
#
|
| 24 |
+
|
| 25 |
+
from __future__ import annotations
|
| 26 |
+
|
| 27 |
+
import argparse
|
| 28 |
+
import os
|
| 29 |
+
|
| 30 |
+
from torchgen.api import cpp
|
| 31 |
+
from torchgen.api.autograd import (
|
| 32 |
+
match_differentiability_info,
|
| 33 |
+
NativeFunctionWithDifferentiabilityInfo,
|
| 34 |
+
)
|
| 35 |
+
from torchgen.gen import parse_native_yaml
|
| 36 |
+
from torchgen.selective_build.selector import SelectiveBuilder
|
| 37 |
+
|
| 38 |
+
from . import gen_python_functions
|
| 39 |
+
from .gen_autograd_functions import (
|
| 40 |
+
gen_autograd_functions_lib,
|
| 41 |
+
gen_autograd_functions_python,
|
| 42 |
+
)
|
| 43 |
+
from .gen_inplace_or_view_type import gen_inplace_or_view_type
|
| 44 |
+
from .gen_trace_type import gen_trace_type
|
| 45 |
+
from .gen_variable_factories import gen_variable_factories
|
| 46 |
+
from .gen_variable_type import gen_variable_type
|
| 47 |
+
from .gen_view_funcs import gen_view_funcs
|
| 48 |
+
from .load_derivatives import load_derivatives
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def gen_autograd(
|
| 52 |
+
native_functions_path: str,
|
| 53 |
+
tags_path: str,
|
| 54 |
+
out: str,
|
| 55 |
+
autograd_dir: str,
|
| 56 |
+
operator_selector: SelectiveBuilder,
|
| 57 |
+
disable_autograd: bool = False,
|
| 58 |
+
) -> None:
|
| 59 |
+
# Parse and load derivatives.yaml
|
| 60 |
+
differentiability_infos, used_dispatch_keys = load_derivatives(
|
| 61 |
+
os.path.join(autograd_dir, "derivatives.yaml"), native_functions_path, tags_path
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
template_path = os.path.join(autograd_dir, "templates")
|
| 65 |
+
|
| 66 |
+
native_funcs = parse_native_yaml(native_functions_path, tags_path).native_functions
|
| 67 |
+
fns = sorted(
|
| 68 |
+
filter(
|
| 69 |
+
operator_selector.is_native_function_selected_for_training, native_funcs
|
| 70 |
+
),
|
| 71 |
+
key=lambda f: cpp.name(f.func),
|
| 72 |
+
)
|
| 73 |
+
fns_with_diff_infos: list[
|
| 74 |
+
NativeFunctionWithDifferentiabilityInfo
|
| 75 |
+
] = match_differentiability_info(fns, differentiability_infos)
|
| 76 |
+
|
| 77 |
+
# Generate VariableType.h/cpp
|
| 78 |
+
if not disable_autograd:
|
| 79 |
+
gen_variable_type(
|
| 80 |
+
out,
|
| 81 |
+
native_functions_path,
|
| 82 |
+
tags_path,
|
| 83 |
+
fns_with_diff_infos,
|
| 84 |
+
template_path,
|
| 85 |
+
used_dispatch_keys,
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
gen_inplace_or_view_type(
|
| 89 |
+
out, native_functions_path, tags_path, fns_with_diff_infos, template_path
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
# operator filter not applied as tracing sources are excluded in selective build
|
| 93 |
+
gen_trace_type(out, native_funcs, template_path)
|
| 94 |
+
# Generate Functions.h/cpp
|
| 95 |
+
gen_autograd_functions_lib(out, differentiability_infos, template_path)
|
| 96 |
+
|
| 97 |
+
# Generate variable_factories.h
|
| 98 |
+
gen_variable_factories(out, native_functions_path, tags_path, template_path)
|
| 99 |
+
|
| 100 |
+
# Generate ViewFuncs.h/cpp
|
| 101 |
+
gen_view_funcs(out, fns_with_diff_infos, template_path)
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def gen_autograd_python(
|
| 105 |
+
native_functions_path: str,
|
| 106 |
+
tags_path: str,
|
| 107 |
+
out: str,
|
| 108 |
+
autograd_dir: str,
|
| 109 |
+
) -> None:
|
| 110 |
+
differentiability_infos, _ = load_derivatives(
|
| 111 |
+
os.path.join(autograd_dir, "derivatives.yaml"), native_functions_path, tags_path
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
template_path = os.path.join(autograd_dir, "templates")
|
| 115 |
+
|
| 116 |
+
# Generate Functions.h/cpp
|
| 117 |
+
gen_autograd_functions_python(out, differentiability_infos, template_path)
|
| 118 |
+
|
| 119 |
+
# Generate Python bindings
|
| 120 |
+
deprecated_path = os.path.join(autograd_dir, "deprecated.yaml")
|
| 121 |
+
gen_python_functions.gen(
|
| 122 |
+
out, native_functions_path, tags_path, deprecated_path, template_path
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def main() -> None:
|
| 127 |
+
parser = argparse.ArgumentParser(description="Generate autograd C++ files script")
|
| 128 |
+
parser.add_argument(
|
| 129 |
+
"native_functions", metavar="NATIVE", help="path to native_functions.yaml"
|
| 130 |
+
)
|
| 131 |
+
parser.add_argument("tags", metavar="NATIVE", help="path to tags.yaml")
|
| 132 |
+
parser.add_argument("out", metavar="OUT", help="path to output directory")
|
| 133 |
+
parser.add_argument(
|
| 134 |
+
"autograd", metavar="AUTOGRAD", help="path to autograd directory"
|
| 135 |
+
)
|
| 136 |
+
args = parser.parse_args()
|
| 137 |
+
gen_autograd(
|
| 138 |
+
args.native_functions,
|
| 139 |
+
args.tags,
|
| 140 |
+
args.out,
|
| 141 |
+
args.autograd,
|
| 142 |
+
SelectiveBuilder.get_nop_selector(),
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
if __name__ == "__main__":
|
| 147 |
+
main()
|
minigpt2/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_autograd_functions.py
ADDED
|
@@ -0,0 +1,925 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Generates C++ autograd functions for the derivatives of ATen operations
|
| 2 |
+
#
|
| 3 |
+
# This writes two files:
|
| 4 |
+
# Functions.h/cpp: subclasses of autograd::Node
|
| 5 |
+
# python_functions.h/cpp: Python bindings for the above classes
|
| 6 |
+
#
|
| 7 |
+
|
| 8 |
+
from __future__ import annotations
|
| 9 |
+
|
| 10 |
+
from typing import Sequence
|
| 11 |
+
|
| 12 |
+
from torchgen.api.autograd import (
|
| 13 |
+
Derivative,
|
| 14 |
+
DifferentiabilityInfo,
|
| 15 |
+
SavedAttribute,
|
| 16 |
+
uses_retain_variables,
|
| 17 |
+
uses_single_grad,
|
| 18 |
+
)
|
| 19 |
+
from torchgen.api.types import (
|
| 20 |
+
ArrayRefCType,
|
| 21 |
+
BaseCppType,
|
| 22 |
+
BaseCType,
|
| 23 |
+
Binding,
|
| 24 |
+
boolT,
|
| 25 |
+
doubleT,
|
| 26 |
+
intArrayRefT,
|
| 27 |
+
iTensorListRefT,
|
| 28 |
+
ListCType,
|
| 29 |
+
longT,
|
| 30 |
+
MutRefCType,
|
| 31 |
+
OptionalCType,
|
| 32 |
+
optionalIntArrayRefT,
|
| 33 |
+
optionalSymIntArrayRefT,
|
| 34 |
+
scalarT,
|
| 35 |
+
stringT,
|
| 36 |
+
symIntArrayRefT,
|
| 37 |
+
SymIntT,
|
| 38 |
+
TENSOR_LIST_LIKE_CTYPES,
|
| 39 |
+
tensorListT,
|
| 40 |
+
tensorT,
|
| 41 |
+
VectorCType,
|
| 42 |
+
)
|
| 43 |
+
from torchgen.code_template import CodeTemplate
|
| 44 |
+
from torchgen.model import Argument, FunctionSchema
|
| 45 |
+
from torchgen.utils import FileManager
|
| 46 |
+
|
| 47 |
+
from .gen_inplace_or_view_type import VIEW_FUNCTIONS
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
FUNCTION_DECLARATION = CodeTemplate(
|
| 51 |
+
"""\
|
| 52 |
+
#ifdef _WIN32
|
| 53 |
+
struct ${op} : public ${superclass} {
|
| 54 |
+
TORCH_API ${op}() = default;
|
| 55 |
+
#else
|
| 56 |
+
struct TORCH_API ${op} : public ${superclass} {
|
| 57 |
+
#endif
|
| 58 |
+
using ${superclass}::${superclass};
|
| 59 |
+
variable_list apply(variable_list&& grads) override;
|
| 60 |
+
std::string name() const override { return "${op}"; }
|
| 61 |
+
void release_variables() override {
|
| 62 |
+
${thread_lock}
|
| 63 |
+
${release_variables}
|
| 64 |
+
}
|
| 65 |
+
${will_release_variables}
|
| 66 |
+
void compiled_args(CompiledNodeArgs& args) override;
|
| 67 |
+
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
|
| 68 |
+
${saved_variables}
|
| 69 |
+
${saved_list_sizes}
|
| 70 |
+
};
|
| 71 |
+
"""
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
WILL_RELEASE_VARIABLES = CodeTemplate(
|
| 75 |
+
"""\
|
| 76 |
+
bool retain_variables = true;
|
| 77 |
+
void will_release_variables() override {
|
| 78 |
+
retain_variables = false;
|
| 79 |
+
}
|
| 80 |
+
"""
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
FUNCTION_DEFINITION = CodeTemplate(
|
| 84 |
+
"""\
|
| 85 |
+
variable_list ${op}::apply(variable_list&& grads) {
|
| 86 |
+
${thread_lock}
|
| 87 |
+
${asserts}
|
| 88 |
+
IndexRangeGenerator gen;
|
| 89 |
+
${compute_index_ranges}
|
| 90 |
+
variable_list grad_inputs(gen.size());
|
| 91 |
+
${body}
|
| 92 |
+
return grad_inputs;
|
| 93 |
+
}
|
| 94 |
+
void ${op}::compiled_args(CompiledNodeArgs& args) {
|
| 95 |
+
${compiled_args}
|
| 96 |
+
}
|
| 97 |
+
variable_list ${op}::apply_with_saved(const variable_list& grads, SwapSavedVariables& saved) {
|
| 98 |
+
${apply_with_saved_before}
|
| 99 |
+
variable_list result = apply(variable_list(grads));
|
| 100 |
+
${apply_with_saved_after}
|
| 101 |
+
return result;
|
| 102 |
+
}
|
| 103 |
+
"""
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
GRAD_INPUT_MASK = CodeTemplate(
|
| 107 |
+
"""\
|
| 108 |
+
auto grad_input_mask = std::array<bool, ${n}>{
|
| 109 |
+
${masks}
|
| 110 |
+
};\
|
| 111 |
+
"""
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
DERIVATIVE_SINGLE = CodeTemplate(
|
| 115 |
+
"""\
|
| 116 |
+
if (task_should_compute_output({ ${name}_ix })) {
|
| 117 |
+
auto grad_result = ${derivative};
|
| 118 |
+
copy_range(grad_inputs, ${name}_ix, grad_result);
|
| 119 |
+
}
|
| 120 |
+
"""
|
| 121 |
+
)
|
| 122 |
+
|
| 123 |
+
# note(crcrpar): `self` argument and other optional positional argument
|
| 124 |
+
# of foreach functions are basically a list of n `Tensor`s thus iterating over
|
| 125 |
+
# `grads` in order to utilize and apply the existing derivative definitions
|
| 126 |
+
# to each `Tensor`(s) of `self`, and the others.
|
| 127 |
+
DERIVATIVE_SINGLE_FOREACH = CodeTemplate(
|
| 128 |
+
"""\
|
| 129 |
+
if (task_should_compute_output({ ${name}_ix })) {
|
| 130 |
+
std::vector<Tensor> grad_result;
|
| 131 |
+
grad_result.reserve(grads.size());
|
| 132 |
+
for (const auto & i : c10::irange(grads.size())) {
|
| 133 |
+
if (grads[i].defined()) {
|
| 134 |
+
grad_result.emplace_back(${derivative});
|
| 135 |
+
} else {
|
| 136 |
+
grad_result.emplace_back(Tensor());
|
| 137 |
+
}
|
| 138 |
+
}
|
| 139 |
+
copy_range(grad_inputs, ${name}_ix, grad_result);
|
| 140 |
+
}
|
| 141 |
+
"""
|
| 142 |
+
)
|
| 143 |
+
|
| 144 |
+
DERIVATIVE_MULTI_COPY_RANGE = CodeTemplate(
|
| 145 |
+
"""\
|
| 146 |
+
if (task_should_compute_output({ ${name}_ix })) {
|
| 147 |
+
copy_range(grad_inputs, ${name}_ix, std::get<${i}>(grad_result));
|
| 148 |
+
}
|
| 149 |
+
"""
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
DERIVATIVE_MULTI = CodeTemplate(
|
| 153 |
+
"""\
|
| 154 |
+
if (task_should_compute_output({ ${idx_ranges} })) {
|
| 155 |
+
${grad_input_mask}
|
| 156 |
+
auto grad_result = ${derivative};
|
| 157 |
+
${copy_ranges}
|
| 158 |
+
}
|
| 159 |
+
"""
|
| 160 |
+
)
|
| 161 |
+
|
| 162 |
+
# Generates python bindings
|
| 163 |
+
#
|
| 164 |
+
# This generates the definitions for:
|
| 165 |
+
# (1) The PyTypeObject for each backward grad_fn subclassing Node
|
| 166 |
+
# (2) The entry for PyTypeObject's tp_getset slot (an array of PyGetSetDef structs)
|
| 167 |
+
# We generate one PyGetSetDef struct for each of grad_fn's saved inputs and outputs
|
| 168 |
+
# Each PyGetSetDef has a function ptr to a getter, also defined here (3).
|
| 169 |
+
# (3) Getters for each of grad_fn's saved inputs and outputs.
|
| 170 |
+
#
|
| 171 |
+
PY_FUNCTION_DEFINITION = CodeTemplate(
|
| 172 |
+
"""\
|
| 173 |
+
static PyTypeObject ${op}Class;
|
| 174 |
+
addClass<${op}>(module, ${op}Class, "${op}", ${op}_properties);
|
| 175 |
+
"""
|
| 176 |
+
)
|
| 177 |
+
|
| 178 |
+
PY_FUNCTION_PROPS_AND_GETTERS = CodeTemplate(
|
| 179 |
+
"""\
|
| 180 |
+
${all_getter_definitions}
|
| 181 |
+
|
| 182 |
+
static struct PyGetSetDef ${op}_properties[] = {
|
| 183 |
+
THP_FUNCTION_DEFAULT_PROPERTIES,
|
| 184 |
+
${all_getsetdef_structs}
|
| 185 |
+
{nullptr} /* sentinel */
|
| 186 |
+
};
|
| 187 |
+
|
| 188 |
+
"""
|
| 189 |
+
)
|
| 190 |
+
|
| 191 |
+
PY_GETSETDEF_STRUCT = CodeTemplate(
|
| 192 |
+
"""\
|
| 193 |
+
{(char*)"_saved_${name}", (getter)THP${op}_${name}_getter, nullptr, nullptr, nullptr}"""
|
| 194 |
+
)
|
| 195 |
+
|
| 196 |
+
PY_RAW_GETSETDEF_STRUCT = CodeTemplate(
|
| 197 |
+
"""\
|
| 198 |
+
{(char*)"_raw_saved_${name}", (getter)THP${op}_${name}_raw_getter, nullptr, nullptr, nullptr}"""
|
| 199 |
+
)
|
| 200 |
+
|
| 201 |
+
# Getter templates
|
| 202 |
+
GETTER_DEFINITION = CodeTemplate(
|
| 203 |
+
"""\
|
| 204 |
+
PyObject* THP${op}_${name}_getter(THPCppFunction *self, void *_unused) {
|
| 205 |
+
HANDLE_TH_ERRORS
|
| 206 |
+
auto prop = static_cast<${op}*>(self->cdata.get())->${name};
|
| 207 |
+
${body}
|
| 208 |
+
END_HANDLE_TH_ERRORS
|
| 209 |
+
}
|
| 210 |
+
"""
|
| 211 |
+
)
|
| 212 |
+
|
| 213 |
+
GETTER_DEFINITION_SAVEDVAR = CodeTemplate(
|
| 214 |
+
"""\
|
| 215 |
+
PyObject* THP${op}_${name}_getter(THPCppFunction *self, void *_unused) {
|
| 216 |
+
HANDLE_TH_ERRORS
|
| 217 |
+
const auto& prop = static_cast<${op}*>(self->cdata.get())->${name}_;
|
| 218 |
+
${body}
|
| 219 |
+
END_HANDLE_TH_ERRORS
|
| 220 |
+
}
|
| 221 |
+
"""
|
| 222 |
+
)
|
| 223 |
+
|
| 224 |
+
GETTER_DEFINITION_RAW_SAVEDVAR = CodeTemplate(
|
| 225 |
+
"""\
|
| 226 |
+
PyObject* THP${op}_${name}_raw_getter(THPCppFunction *self, void *_unused) {
|
| 227 |
+
HANDLE_TH_ERRORS
|
| 228 |
+
const auto& prop = static_cast<${op}*>(self->cdata.get())->${name}_;
|
| 229 |
+
${body}
|
| 230 |
+
END_HANDLE_TH_ERRORS
|
| 231 |
+
}
|
| 232 |
+
"""
|
| 233 |
+
)
|
| 234 |
+
|
| 235 |
+
GETTER_DEFINITION_VEC_SAVEDVAR = CodeTemplate(
|
| 236 |
+
"""\
|
| 237 |
+
PyObject* THP${op}_${name}_getter(THPCppFunction *self, void *_unused) {
|
| 238 |
+
HANDLE_TH_ERRORS
|
| 239 |
+
const auto *node = static_cast<${op}*>(self->cdata.get());
|
| 240 |
+
const auto& prop = node->${name}_;
|
| 241 |
+
if (node->${name}_released_) {
|
| 242 |
+
PyErr_SetString(PyExc_RuntimeError, ERR_BACKWARD_TWICE);
|
| 243 |
+
return nullptr;
|
| 244 |
+
}
|
| 245 |
+
${body}
|
| 246 |
+
END_HANDLE_TH_ERRORS
|
| 247 |
+
}
|
| 248 |
+
"""
|
| 249 |
+
)
|
| 250 |
+
|
| 251 |
+
GETTER_DEFINITION_RAW_VEC_SAVEDVAR = CodeTemplate(
|
| 252 |
+
"""\
|
| 253 |
+
PyObject* THP${op}_${name}_raw_getter(THPCppFunction *self, void *_unused) {
|
| 254 |
+
HANDLE_TH_ERRORS
|
| 255 |
+
const auto *node = static_cast<${op}*>(self->cdata.get());
|
| 256 |
+
const auto& prop = node->${name}_;
|
| 257 |
+
if (node->${name}_released_) {
|
| 258 |
+
PyErr_SetString(PyExc_RuntimeError, ERR_BACKWARD_TWICE);
|
| 259 |
+
return nullptr;
|
| 260 |
+
}
|
| 261 |
+
${body}
|
| 262 |
+
END_HANDLE_TH_ERRORS
|
| 263 |
+
}
|
| 264 |
+
"""
|
| 265 |
+
)
|
| 266 |
+
|
| 267 |
+
GETTER_DEFINITION_OPT = CodeTemplate(
|
| 268 |
+
"""\
|
| 269 |
+
PyObject* THP${op}_${name}_getter(THPCppFunction *self, void *_unused) {
|
| 270 |
+
HANDLE_TH_ERRORS
|
| 271 |
+
auto opt_prop = static_cast<${op}*>(self->cdata.get())->${name};
|
| 272 |
+
if (!opt_prop.has_value()) {
|
| 273 |
+
Py_RETURN_NONE;
|
| 274 |
+
}
|
| 275 |
+
auto prop = opt_prop.value();
|
| 276 |
+
${body}
|
| 277 |
+
END_HANDLE_TH_ERRORS
|
| 278 |
+
}
|
| 279 |
+
"""
|
| 280 |
+
)
|
| 281 |
+
|
| 282 |
+
GETTER_DEFINITION_OPT_ARRAYREF = CodeTemplate(
|
| 283 |
+
"""\
|
| 284 |
+
PyObject* THP${op}_${name}_getter(THPCppFunction *self, void *_unused) {
|
| 285 |
+
HANDLE_TH_ERRORS
|
| 286 |
+
auto opt_prop = static_cast<${op}*>(self->cdata.get())->${name};
|
| 287 |
+
if (!opt_prop.list.has_value()) {
|
| 288 |
+
Py_RETURN_NONE;
|
| 289 |
+
}
|
| 290 |
+
auto prop = opt_prop.list.value();
|
| 291 |
+
${body}
|
| 292 |
+
END_HANDLE_TH_ERRORS
|
| 293 |
+
}
|
| 294 |
+
"""
|
| 295 |
+
)
|
| 296 |
+
|
| 297 |
+
# Getter body
|
| 298 |
+
GETTER_BODY_SAVEDVAR = """\
|
| 299 |
+
return THPVariable_Wrap(prop.unpack(self->cdata));
|
| 300 |
+
"""
|
| 301 |
+
|
| 302 |
+
GETTER_BODY_RAW_SAVEDVAR = """\
|
| 303 |
+
pybind11::object obj = pybind11::cast(prop, pybind11::return_value_policy::reference);
|
| 304 |
+
return obj.release().ptr();
|
| 305 |
+
"""
|
| 306 |
+
|
| 307 |
+
GETTER_BODY_VEC_SAVEDVAR = """\
|
| 308 |
+
PyObject* tup = PyTuple_New((Py_ssize_t) prop.size());
|
| 309 |
+
for (auto i: c10::irange(prop.size())) {
|
| 310 |
+
PyTuple_SetItem(tup, (Py_ssize_t) i, THPVariable_Wrap(prop[i].unpack(self->cdata)));
|
| 311 |
+
}
|
| 312 |
+
return tup;
|
| 313 |
+
"""
|
| 314 |
+
|
| 315 |
+
GETTER_BODY_RAW_VEC_SAVEDVAR = """\
|
| 316 |
+
PyObject* tup = PyTuple_New((Py_ssize_t) prop.size());
|
| 317 |
+
for (auto i : c10::irange(prop.size())) {
|
| 318 |
+
pybind11::object obj = pybind11::cast(prop[i], pybind11::return_value_policy::reference);
|
| 319 |
+
PyTuple_SetItem(tup, (Py_ssize_t) i, obj.release().ptr());
|
| 320 |
+
}
|
| 321 |
+
return tup;
|
| 322 |
+
"""
|
| 323 |
+
|
| 324 |
+
GETTER_BODY_ARRAYREF_LONG = """\
|
| 325 |
+
PyObject* tup = PyTuple_New((Py_ssize_t) prop.size());
|
| 326 |
+
for (auto i : c10::irange(prop.size())) {
|
| 327 |
+
PyTuple_SetItem(tup, (Py_ssize_t) i, PyLong_FromUnsignedLong((uint64_t) prop[i]));
|
| 328 |
+
}
|
| 329 |
+
return tup;
|
| 330 |
+
"""
|
| 331 |
+
|
| 332 |
+
GETTER_BODY_ARRAYREF_SYMINT = """\
|
| 333 |
+
PyObject* tup = PyTuple_New((Py_ssize_t) prop.size());
|
| 334 |
+
for (auto i : c10::irange(prop.size())) {
|
| 335 |
+
auto si = prop[i];
|
| 336 |
+
if (auto m = si.maybe_as_int()) {
|
| 337 |
+
PyTuple_SetItem(tup, (Py_ssize_t) i, PyLong_FromUnsignedLong(*m));
|
| 338 |
+
} else {
|
| 339 |
+
auto py_symint = py::cast(si).release().ptr();
|
| 340 |
+
PyTuple_SetItem(tup, (Py_ssize_t) i, py_symint);
|
| 341 |
+
}
|
| 342 |
+
}
|
| 343 |
+
return tup;
|
| 344 |
+
"""
|
| 345 |
+
|
| 346 |
+
GETTER_BODY_ARRAYREF_DOUBLE = """\
|
| 347 |
+
PyObject* tup = PyTuple_New((Py_ssize_t) prop.size());
|
| 348 |
+
for (auto i : c10::irange(prop.size())) {
|
| 349 |
+
PyTuple_SetItem(tup, (Py_ssize_t) i, PyFloat_FromDouble((double) prop[i]));
|
| 350 |
+
}
|
| 351 |
+
return tup;
|
| 352 |
+
"""
|
| 353 |
+
|
| 354 |
+
GETTER_BODY_INT64_T = """\
|
| 355 |
+
return PyLong_FromUnsignedLong((int64_t) prop);
|
| 356 |
+
"""
|
| 357 |
+
|
| 358 |
+
GETTER_BODY_SYMINT = """\
|
| 359 |
+
if (auto m = prop.maybe_as_int()) {
|
| 360 |
+
return PyLong_FromUnsignedLong(*m);
|
| 361 |
+
} else {
|
| 362 |
+
return py::cast(prop).release().ptr();
|
| 363 |
+
}
|
| 364 |
+
"""
|
| 365 |
+
|
| 366 |
+
GETTER_BODY_DOUBLE = """\
|
| 367 |
+
return PyFloat_FromDouble((double) prop);
|
| 368 |
+
"""
|
| 369 |
+
|
| 370 |
+
GETTER_BODY_BOOL = """\
|
| 371 |
+
if (prop) {
|
| 372 |
+
Py_RETURN_TRUE;
|
| 373 |
+
} else {
|
| 374 |
+
Py_RETURN_FALSE;
|
| 375 |
+
}
|
| 376 |
+
"""
|
| 377 |
+
|
| 378 |
+
GETTER_BODY_STRING = """\
|
| 379 |
+
return PyUnicode_FromStringAndSize(prop.data(), prop.size());
|
| 380 |
+
"""
|
| 381 |
+
|
| 382 |
+
GETTER_BODY_SCALAR = """\
|
| 383 |
+
if (prop.isComplex()) {
|
| 384 |
+
auto cprop = prop.to<c10::complex<double>>();
|
| 385 |
+
return PyComplex_FromDoubles(cprop.real(), cprop.imag());
|
| 386 |
+
} else if (prop.isFloatingPoint()) {
|
| 387 |
+
return PyFloat_FromDouble(prop.to<double>());
|
| 388 |
+
} else if (prop.isIntegral(/*includeBool=*/false)) {
|
| 389 |
+
return PyLong_FromLong(prop.to<int64_t>());
|
| 390 |
+
} else if (prop.isBoolean()) {
|
| 391 |
+
if (prop.to<bool>()) {
|
| 392 |
+
Py_RETURN_TRUE;
|
| 393 |
+
} else {
|
| 394 |
+
Py_RETURN_FALSE;
|
| 395 |
+
}
|
| 396 |
+
} else {
|
| 397 |
+
PyErr_SetString(PyExc_RuntimeError, "Unknown scalar type");
|
| 398 |
+
return nullptr;
|
| 399 |
+
}
|
| 400 |
+
"""
|
| 401 |
+
|
| 402 |
+
|
| 403 |
+
GETTER_BODY_VEC_SCALAR = """\
|
| 404 |
+
PyObject* tup = PyTuple_New((Py_ssize_t) prop.size());
|
| 405 |
+
for (auto i: c10::irange(prop.size())) {
|
| 406 |
+
if (prop[i].isComplex()) {
|
| 407 |
+
auto cprop = prop[i].to<c10::complex<double>>();
|
| 408 |
+
PyTuple_SetItem(tup, (Py_ssize_t) i, PyComplex_FromDoubles(cprop.real(), cprop.imag()));
|
| 409 |
+
} else if (prop[i].isFloatingPoint()) {
|
| 410 |
+
auto double_prop = prop[i].to<double>();
|
| 411 |
+
PyTuple_SetItem(tup, (Py_ssize_t) i, PyFloat_FromDouble(double_prop));
|
| 412 |
+
} else if (prop[i].isIntegral(/*includeBool=*/false)) {
|
| 413 |
+
auto long_prop = prop[i].to<int64_t>();
|
| 414 |
+
PyTuple_SetItem(tup, (Py_ssize_t) i, PyLong_FromLong(long_prop));
|
| 415 |
+
} else if (prop[i].isBoolean()) {
|
| 416 |
+
if (prop[i].to<bool>()) {
|
| 417 |
+
PyTuple_SetItem(tup, (Py_ssize_t) i, Py_True);
|
| 418 |
+
} else {
|
| 419 |
+
PyTuple_SetItem(tup, (Py_ssize_t) i, Py_False);
|
| 420 |
+
}
|
| 421 |
+
} else {
|
| 422 |
+
PyErr_SetString(PyExc_RuntimeError, "Unknown scalar type");
|
| 423 |
+
return nullptr;
|
| 424 |
+
}
|
| 425 |
+
}
|
| 426 |
+
return tup;
|
| 427 |
+
"""
|
| 428 |
+
|
| 429 |
+
|
| 430 |
+
MISC_GETTER_DEFS = {
|
| 431 |
+
OptionalCType(BaseCType(longT)): (GETTER_DEFINITION_OPT, GETTER_BODY_INT64_T),
|
| 432 |
+
OptionalCType(BaseCType(SymIntT)): (GETTER_DEFINITION_OPT, GETTER_BODY_SYMINT),
|
| 433 |
+
BaseCType(doubleT): (GETTER_DEFINITION, GETTER_BODY_DOUBLE),
|
| 434 |
+
OptionalCType(BaseCType(doubleT)): (GETTER_DEFINITION_OPT, GETTER_BODY_DOUBLE),
|
| 435 |
+
BaseCType(boolT): (GETTER_DEFINITION, GETTER_BODY_BOOL),
|
| 436 |
+
BaseCType(scalarT): (GETTER_DEFINITION, GETTER_BODY_SCALAR),
|
| 437 |
+
OptionalCType(BaseCType(scalarT)): (GETTER_DEFINITION_OPT, GETTER_BODY_SCALAR),
|
| 438 |
+
}
|
| 439 |
+
|
| 440 |
+
# These functions have backwards which cannot be traced, and so must have
|
| 441 |
+
# their backward functions traced opaquely.
|
| 442 |
+
# VIEW_FUNCTIONS are not traceable because they use as_strided, which
|
| 443 |
+
# has an untraceable backwards, see
|
| 444 |
+
# https://github.com/pytorch/pytorch/issues/4250
|
| 445 |
+
# TODO: This is probably not exhaustive, but it's a start
|
| 446 |
+
UNTRACEABLE_FUNCTIONS = VIEW_FUNCTIONS
|
| 447 |
+
|
| 448 |
+
|
| 449 |
+
def get_infos_with_derivatives_list(
|
| 450 |
+
differentiability_infos: dict[FunctionSchema, dict[str, DifferentiabilityInfo]]
|
| 451 |
+
) -> list[DifferentiabilityInfo]:
|
| 452 |
+
diff_info_list = [
|
| 453 |
+
info
|
| 454 |
+
for diffinfo_dict in differentiability_infos.values()
|
| 455 |
+
for info in diffinfo_dict.values()
|
| 456 |
+
]
|
| 457 |
+
|
| 458 |
+
return list(filter(lambda info: info.args_with_derivatives, diff_info_list))
|
| 459 |
+
|
| 460 |
+
|
| 461 |
+
def gen_autograd_functions_lib(
|
| 462 |
+
out: str,
|
| 463 |
+
differentiability_infos: dict[FunctionSchema, dict[str, DifferentiabilityInfo]],
|
| 464 |
+
template_path: str,
|
| 465 |
+
) -> None:
|
| 466 |
+
"""Functions.h and Functions.cpp body
|
| 467 |
+
|
| 468 |
+
These contain the auto-generated subclasses of torch::autograd::Node
|
| 469 |
+
for each every differentiable torch function.
|
| 470 |
+
"""
|
| 471 |
+
|
| 472 |
+
# get a 1D list of diffinfos, we do not need them to be per FunctionSchema/DispatchKey here
|
| 473 |
+
# infos with the diff dispatchkeys but the same name will still be in the same shard.
|
| 474 |
+
infos = get_infos_with_derivatives_list(differentiability_infos)
|
| 475 |
+
declarations = [process_function(f, FUNCTION_DECLARATION) for f in infos]
|
| 476 |
+
definitions = [process_function(f, FUNCTION_DEFINITION) for f in infos]
|
| 477 |
+
|
| 478 |
+
file_basename = "Functions"
|
| 479 |
+
fm = FileManager(install_dir=out, template_dir=template_path, dry_run=False)
|
| 480 |
+
for suffix in [".h", ".cpp"]:
|
| 481 |
+
fname = file_basename + suffix
|
| 482 |
+
fm.write_with_template(
|
| 483 |
+
fname,
|
| 484 |
+
fname,
|
| 485 |
+
lambda: {
|
| 486 |
+
"generated_comment": "@"
|
| 487 |
+
+ f"generated from {fm.template_dir_for_comments()}/"
|
| 488 |
+
+ fname,
|
| 489 |
+
"autograd_function_declarations": declarations,
|
| 490 |
+
"autograd_function_definitions": definitions,
|
| 491 |
+
},
|
| 492 |
+
)
|
| 493 |
+
|
| 494 |
+
|
| 495 |
+
def gen_autograd_functions_python(
|
| 496 |
+
out: str,
|
| 497 |
+
differentiability_infos: dict[FunctionSchema, dict[str, DifferentiabilityInfo]],
|
| 498 |
+
template_path: str,
|
| 499 |
+
) -> None:
|
| 500 |
+
fm = FileManager(install_dir=out, template_dir=template_path, dry_run=False)
|
| 501 |
+
num_shards = 5
|
| 502 |
+
fm.write(
|
| 503 |
+
"python_functions.h",
|
| 504 |
+
lambda: {
|
| 505 |
+
"generated_comment": "@"
|
| 506 |
+
+ f"generated from {fm.template_dir_for_comments()}/python_functions.h",
|
| 507 |
+
"shard_forward_declare": [
|
| 508 |
+
f"void initialize_autogenerated_functions_{i}(PyObject* module);"
|
| 509 |
+
for i in range(num_shards)
|
| 510 |
+
],
|
| 511 |
+
"shard_call": [
|
| 512 |
+
f"initialize_autogenerated_functions_{i}(module);"
|
| 513 |
+
for i in range(num_shards)
|
| 514 |
+
],
|
| 515 |
+
},
|
| 516 |
+
)
|
| 517 |
+
|
| 518 |
+
# get a 1D list of diffinfos, we do not need them to be per FunctionSchema/DispatchKey here
|
| 519 |
+
# infos with the diff dispatchkeys but the same name will still be in the same shard.
|
| 520 |
+
infos = get_infos_with_derivatives_list(differentiability_infos)
|
| 521 |
+
fm.write_sharded(
|
| 522 |
+
"python_functions.cpp",
|
| 523 |
+
infos,
|
| 524 |
+
key_fn=lambda info: info.name,
|
| 525 |
+
base_env={
|
| 526 |
+
"generated_comment": "@"
|
| 527 |
+
+ f"generated from {fm.template_dir_for_comments()}/python_functions.cpp",
|
| 528 |
+
},
|
| 529 |
+
env_callable=lambda info: {
|
| 530 |
+
"py_function_initializers": [
|
| 531 |
+
process_function(info, PY_FUNCTION_DEFINITION)
|
| 532 |
+
],
|
| 533 |
+
"py_function_props_and_getters": [
|
| 534 |
+
process_function(info, PY_FUNCTION_PROPS_AND_GETTERS)
|
| 535 |
+
],
|
| 536 |
+
},
|
| 537 |
+
num_shards=num_shards,
|
| 538 |
+
sharded_keys={"py_function_initializers", "py_function_props_and_getters"},
|
| 539 |
+
)
|
| 540 |
+
|
| 541 |
+
|
| 542 |
+
def process_function(info: DifferentiabilityInfo, template: CodeTemplate) -> str:
|
| 543 |
+
saved_variables: list[str] = []
|
| 544 |
+
release_variables: list[str] = []
|
| 545 |
+
saved_list_sizes: list[str] = []
|
| 546 |
+
unpack: list[str] = []
|
| 547 |
+
asserts: list[str] = []
|
| 548 |
+
compute_index_ranges: list[str] = []
|
| 549 |
+
getter_definitions: list[str] = []
|
| 550 |
+
py_getsetdef_structs: list[str] = []
|
| 551 |
+
compiled_args: list[str] = []
|
| 552 |
+
apply_with_saved_before: list[str] = []
|
| 553 |
+
apply_with_saved_after: list[str] = []
|
| 554 |
+
|
| 555 |
+
for arg in info.args_with_derivatives:
|
| 556 |
+
if arg.type in TENSOR_LIST_LIKE_CTYPES:
|
| 557 |
+
size = f"{arg.name}_size_"
|
| 558 |
+
saved_list_sizes.append(f"size_t {arg.name}_size_;")
|
| 559 |
+
else:
|
| 560 |
+
size = "1"
|
| 561 |
+
compute_index_ranges.append(f"auto {arg.name}_ix = gen.range({size});")
|
| 562 |
+
|
| 563 |
+
def save_var(var: SavedAttribute, is_output: bool) -> None:
|
| 564 |
+
name = var.nctype.name
|
| 565 |
+
type = var.nctype.type
|
| 566 |
+
should_append_getsetdef = True
|
| 567 |
+
should_append_raw_getsetdef = False
|
| 568 |
+
visit_name = name
|
| 569 |
+
uses_cpp_saved_variable_cls = False
|
| 570 |
+
|
| 571 |
+
if (
|
| 572 |
+
type == BaseCType(tensorT)
|
| 573 |
+
or type == OptionalCType(BaseCType(tensorT))
|
| 574 |
+
or type == MutRefCType(OptionalCType(BaseCType(tensorT)))
|
| 575 |
+
or (type == BaseCType(scalarT) and is_output)
|
| 576 |
+
):
|
| 577 |
+
uses_cpp_saved_variable_cls = True
|
| 578 |
+
saved_variables.append(f"SavedVariable {name}_;")
|
| 579 |
+
release_variables.append(f"{name}_.reset_data();")
|
| 580 |
+
ptr = "shared_from_this()" if is_output else ""
|
| 581 |
+
unpack.append(f"auto {name} = {name}_.unpack({ptr});")
|
| 582 |
+
getter_definitions.append(
|
| 583 |
+
GETTER_DEFINITION_SAVEDVAR.substitute(
|
| 584 |
+
op=info.op, name=name, body=GETTER_BODY_SAVEDVAR
|
| 585 |
+
)
|
| 586 |
+
)
|
| 587 |
+
getter_definitions.append(
|
| 588 |
+
GETTER_DEFINITION_RAW_SAVEDVAR.substitute(
|
| 589 |
+
op=info.op, name=name, body=GETTER_BODY_RAW_SAVEDVAR
|
| 590 |
+
)
|
| 591 |
+
)
|
| 592 |
+
should_append_raw_getsetdef = True
|
| 593 |
+
visit_name = f"{name}_"
|
| 594 |
+
elif (
|
| 595 |
+
type == BaseCType(tensorListT)
|
| 596 |
+
or type == BaseCType(iTensorListRefT)
|
| 597 |
+
or type == VectorCType(BaseCType(tensorT))
|
| 598 |
+
):
|
| 599 |
+
# note(crcrpar): [nuanced return type of out-of-place foreach functions]
|
| 600 |
+
# When an out-of-place foreach function whose return signature is `Tensor[]`
|
| 601 |
+
# spells out its backward definitions in `derivatives.yaml`, and some of them depend on
|
| 602 |
+
# `result`, `result`'s type is interpreted and treated as `std::vector<Tensor>`.
|
| 603 |
+
# An out-of-place foreach whose backwards rely on their output doesn't suffer from this
|
| 604 |
+
# difference if the definitions are codegen'ed.
|
| 605 |
+
# This special case is needed for `_foreach_pow.List` and `_foreach_pow.ScalarAndTensor`
|
| 606 |
+
# as of https://github.com/pytorch/pytorch/pull/105504.
|
| 607 |
+
if type == VectorCType(BaseCType(tensorT)):
|
| 608 |
+
assert (
|
| 609 |
+
info.func.func.name.name.base.startswith("_foreach") and is_output
|
| 610 |
+
)
|
| 611 |
+
uses_cpp_saved_variable_cls = True
|
| 612 |
+
saved_variables.append(f"std::vector<SavedVariable> {name}_;")
|
| 613 |
+
saved_variables.append(f"bool {name}_released_ = false;")
|
| 614 |
+
# Just clear() is sufficient, we don't need to loop and clear each variable.
|
| 615 |
+
# Because the SavedVariable owns a tensor and a grad_fn, removing the SavedVariable makes them go away as well.
|
| 616 |
+
release_variables.append(f"{name}_.clear();")
|
| 617 |
+
release_variables.append(f"{name}_released_ = true;")
|
| 618 |
+
ptr = "shared_from_this()" if is_output else "nullptr"
|
| 619 |
+
unpack.append(f"auto {name} = unpack_list({name}_, {ptr});")
|
| 620 |
+
asserts.append(f"TORCH_CHECK(!{name}_released_, ERR_BACKWARD_TWICE);")
|
| 621 |
+
getter_definitions.append(
|
| 622 |
+
GETTER_DEFINITION_VEC_SAVEDVAR.substitute(
|
| 623 |
+
op=info.op, name=name, body=GETTER_BODY_VEC_SAVEDVAR
|
| 624 |
+
)
|
| 625 |
+
)
|
| 626 |
+
getter_definitions.append(
|
| 627 |
+
GETTER_DEFINITION_RAW_VEC_SAVEDVAR.substitute(
|
| 628 |
+
op=info.op, name=name, body=GETTER_BODY_RAW_VEC_SAVEDVAR
|
| 629 |
+
)
|
| 630 |
+
)
|
| 631 |
+
should_append_raw_getsetdef = True
|
| 632 |
+
visit_name = f"{name}_"
|
| 633 |
+
elif type == ListCType(OptionalCType(BaseCType(tensorT))):
|
| 634 |
+
uses_cpp_saved_variable_cls = True
|
| 635 |
+
saved_variables.append(f"std::vector<SavedVariable> {name}_;")
|
| 636 |
+
saved_variables.append(f"bool {name}_released_ = false;")
|
| 637 |
+
# Just clear() is sufficient, we don't need to loop and clear each variable.
|
| 638 |
+
# Because the SavedVariable owns a tensor and a grad_fn, removing the SavedVariable makes them go away as well.
|
| 639 |
+
release_variables.append(f"{name}_.clear();")
|
| 640 |
+
release_variables.append(f"{name}_released_ = true;")
|
| 641 |
+
unpack.append(f"auto {name} = unpack_opt_list({name}_);")
|
| 642 |
+
asserts.append(f"TORCH_CHECK(!{name}_released_, ERR_BACKWARD_TWICE);")
|
| 643 |
+
getter_definitions.append(
|
| 644 |
+
GETTER_DEFINITION_VEC_SAVEDVAR.substitute(
|
| 645 |
+
op=info.op, name=name, body=GETTER_BODY_VEC_SAVEDVAR
|
| 646 |
+
)
|
| 647 |
+
)
|
| 648 |
+
getter_definitions.append(
|
| 649 |
+
GETTER_DEFINITION_RAW_VEC_SAVEDVAR.substitute(
|
| 650 |
+
op=info.op, name=name, body=GETTER_BODY_RAW_VEC_SAVEDVAR
|
| 651 |
+
)
|
| 652 |
+
)
|
| 653 |
+
should_append_raw_getsetdef = True
|
| 654 |
+
visit_name = f"{name}_"
|
| 655 |
+
elif type == BaseCType(intArrayRefT):
|
| 656 |
+
saved_variables.append(f"std::vector<int64_t> {name};")
|
| 657 |
+
getter_definitions.append(
|
| 658 |
+
GETTER_DEFINITION.substitute(
|
| 659 |
+
op=info.op, name=name, body=GETTER_BODY_ARRAYREF_LONG
|
| 660 |
+
)
|
| 661 |
+
)
|
| 662 |
+
elif type == BaseCType(symIntArrayRefT):
|
| 663 |
+
saved_variables.append(f"std::vector<c10::SymInt> {name};")
|
| 664 |
+
getter_definitions.append(
|
| 665 |
+
GETTER_DEFINITION.substitute(
|
| 666 |
+
op=info.op, name=name, body=GETTER_BODY_ARRAYREF_SYMINT
|
| 667 |
+
)
|
| 668 |
+
)
|
| 669 |
+
elif type == BaseCType(optionalIntArrayRefT):
|
| 670 |
+
saved_variables.append(f"c10::OptionalArray<int64_t> {name};")
|
| 671 |
+
getter_definitions.append(
|
| 672 |
+
GETTER_DEFINITION_OPT_ARRAYREF.substitute(
|
| 673 |
+
op=info.op, name=name, body=GETTER_BODY_ARRAYREF_LONG
|
| 674 |
+
)
|
| 675 |
+
)
|
| 676 |
+
elif type == BaseCType(optionalSymIntArrayRefT):
|
| 677 |
+
saved_variables.append(f"c10::OptionalArray<c10::SymInt> {name};")
|
| 678 |
+
getter_definitions.append(
|
| 679 |
+
GETTER_DEFINITION_OPT_ARRAYREF.substitute(
|
| 680 |
+
op=info.op, name=name, body=GETTER_BODY_ARRAYREF_SYMINT
|
| 681 |
+
)
|
| 682 |
+
)
|
| 683 |
+
elif type == OptionalCType(BaseCType(intArrayRefT)):
|
| 684 |
+
saved_variables.append(f"c10::OptionalArray<int64_t> {name};")
|
| 685 |
+
getter_definitions.append(
|
| 686 |
+
GETTER_DEFINITION_OPT_ARRAYREF.substitute(
|
| 687 |
+
op=info.op, name=name, body=GETTER_BODY_ARRAYREF_LONG
|
| 688 |
+
)
|
| 689 |
+
)
|
| 690 |
+
elif type == OptionalCType(BaseCType(symIntArrayRefT)):
|
| 691 |
+
saved_variables.append(f"c10::OptionalArray<c10::SymInt> {name};")
|
| 692 |
+
getter_definitions.append(
|
| 693 |
+
GETTER_DEFINITION_OPT_ARRAYREF.substitute(
|
| 694 |
+
op=info.op, name=name, body=GETTER_BODY_ARRAYREF_SYMINT
|
| 695 |
+
)
|
| 696 |
+
)
|
| 697 |
+
elif type == OptionalCType(ArrayRefCType(BaseCType(doubleT))):
|
| 698 |
+
saved_variables.append(f"c10::OptionalArray<double> {name};")
|
| 699 |
+
getter_definitions.append(
|
| 700 |
+
GETTER_DEFINITION_OPT_ARRAYREF.substitute(
|
| 701 |
+
op=info.op, name=name, body=GETTER_BODY_ARRAYREF_DOUBLE
|
| 702 |
+
)
|
| 703 |
+
)
|
| 704 |
+
elif type == BaseCType(longT):
|
| 705 |
+
saved_variables.append(f"{type.cpp_type()} {name} = 0;")
|
| 706 |
+
getter_definitions.append(
|
| 707 |
+
GETTER_DEFINITION.substitute(
|
| 708 |
+
op=info.op, name=name, body=GETTER_BODY_INT64_T
|
| 709 |
+
)
|
| 710 |
+
)
|
| 711 |
+
elif type == BaseCType(SymIntT):
|
| 712 |
+
saved_variables.append(f"c10::SymInt {name};")
|
| 713 |
+
getter_definitions.append(
|
| 714 |
+
GETTER_DEFINITION.substitute(
|
| 715 |
+
op=info.op, name=name, body=GETTER_BODY_SYMINT
|
| 716 |
+
)
|
| 717 |
+
)
|
| 718 |
+
elif type == BaseCType(stringT):
|
| 719 |
+
saved_variables.append(f"std::string {name};")
|
| 720 |
+
getter_definitions.append(
|
| 721 |
+
GETTER_DEFINITION.substitute(
|
| 722 |
+
op=info.op, name=name, body=GETTER_BODY_STRING
|
| 723 |
+
)
|
| 724 |
+
)
|
| 725 |
+
elif type == OptionalCType(BaseCType(stringT)):
|
| 726 |
+
saved_variables.append(f"std::optional<std::string> {name};")
|
| 727 |
+
getter_definitions.append(
|
| 728 |
+
GETTER_DEFINITION_OPT.substitute(
|
| 729 |
+
op=info.op, name=name, body=GETTER_BODY_STRING
|
| 730 |
+
)
|
| 731 |
+
)
|
| 732 |
+
elif type == ArrayRefCType(
|
| 733 |
+
elem=BaseCType(type=BaseCppType(ns="at", name="Scalar"))
|
| 734 |
+
):
|
| 735 |
+
saved_variables.append(f"std::vector<at::Scalar> {name};")
|
| 736 |
+
saved_variables.append(f"bool {name}_released_ = false;")
|
| 737 |
+
# Just clear() is sufficient, we don't need to loop and clear each variable.
|
| 738 |
+
# Because the SavedVariable owns a tensor and a grad_fn, removing the SavedVariable makes them go away as well.
|
| 739 |
+
release_variables.append(f"{name}.clear();")
|
| 740 |
+
# release_variables.append(f"{name}_released_ = true;")
|
| 741 |
+
# unpack.append(f"auto {name} = unpack_list({name}_);")
|
| 742 |
+
# asserts.append(f"TORCH_CHECK(!{name}_released_, ERR_BACKWARD_TWICE);")
|
| 743 |
+
getter_definitions.append(
|
| 744 |
+
CodeTemplate(
|
| 745 |
+
"""\
|
| 746 |
+
PyObject* THP${op}_${name}_getter(THPCppFunction *self, void *_unused) {
|
| 747 |
+
HANDLE_TH_ERRORS
|
| 748 |
+
const auto *node = static_cast<${op}*>(self->cdata.get());
|
| 749 |
+
const auto& prop = node->${name};
|
| 750 |
+
if (node->${name}_released_) {
|
| 751 |
+
PyErr_SetString(PyExc_RuntimeError, ERR_BACKWARD_TWICE);
|
| 752 |
+
return nullptr;
|
| 753 |
+
}
|
| 754 |
+
${body}
|
| 755 |
+
END_HANDLE_TH_ERRORS
|
| 756 |
+
}
|
| 757 |
+
"""
|
| 758 |
+
).substitute(
|
| 759 |
+
op=info.op,
|
| 760 |
+
name=name,
|
| 761 |
+
body=GETTER_BODY_VEC_SCALAR,
|
| 762 |
+
)
|
| 763 |
+
)
|
| 764 |
+
else:
|
| 765 |
+
# Check for indicators that you're putting a non-owning reference
|
| 766 |
+
# into the saved variable field. If this is spuriously firing,
|
| 767 |
+
# edit this field. Otherwise, you probably need to add a case
|
| 768 |
+
# above.
|
| 769 |
+
assert (
|
| 770 |
+
"ref" not in type.cpp_type().lower()
|
| 771 |
+
and "view" not in type.cpp_type().lower()
|
| 772 |
+
and "*" not in type.cpp_type()
|
| 773 |
+
and "&" not in type.cpp_type()
|
| 774 |
+
), f"{type.cpp_type()} looks like it contains a non-owning reference"
|
| 775 |
+
saved_variables.append(f"{type.cpp_type()} {name};")
|
| 776 |
+
|
| 777 |
+
if type in MISC_GETTER_DEFS:
|
| 778 |
+
getter_def, body = MISC_GETTER_DEFS[type]
|
| 779 |
+
getter_definitions.append(
|
| 780 |
+
getter_def.substitute(op=info.op, name=name, body=body)
|
| 781 |
+
)
|
| 782 |
+
else:
|
| 783 |
+
# Types we don't expose python bindings to yet:
|
| 784 |
+
# TypeAndSize, at::ScalarType, TensorOptions, TensorGeometry,
|
| 785 |
+
# std::vector<std::vector<int64_t>>, std::vector<at::ScalarType>
|
| 786 |
+
should_append_getsetdef = False
|
| 787 |
+
|
| 788 |
+
if should_append_getsetdef:
|
| 789 |
+
py_getsetdef_structs.append(
|
| 790 |
+
PY_GETSETDEF_STRUCT.substitute(op=info.op, name=name)
|
| 791 |
+
)
|
| 792 |
+
if should_append_raw_getsetdef:
|
| 793 |
+
py_getsetdef_structs.append(
|
| 794 |
+
PY_RAW_GETSETDEF_STRUCT.substitute(op=info.op, name=name)
|
| 795 |
+
)
|
| 796 |
+
|
| 797 |
+
if uses_cpp_saved_variable_cls:
|
| 798 |
+
compiled_args.append(
|
| 799 |
+
f"args.collect({visit_name}, {'true' if is_output else 'false'});"
|
| 800 |
+
)
|
| 801 |
+
else:
|
| 802 |
+
compiled_args.append(f"args.collect({visit_name});")
|
| 803 |
+
apply_with_saved_before.append(f"saved.before({visit_name});")
|
| 804 |
+
apply_with_saved_after.append(f"saved.after({visit_name});")
|
| 805 |
+
|
| 806 |
+
for var in sorted(info.all_saved_inputs, key=lambda sa: str(sa.nctype.name)):
|
| 807 |
+
save_var(var, is_output=False)
|
| 808 |
+
for var in sorted(info.all_saved_outputs, key=lambda sa: str(sa.nctype.name)):
|
| 809 |
+
save_var(var, is_output=True)
|
| 810 |
+
|
| 811 |
+
# lock the mutex when we release variables and in Node::apply to protect thread safety
|
| 812 |
+
# see Note [Thread Safety on Autograd Node]
|
| 813 |
+
if len(release_variables) > 0:
|
| 814 |
+
thread_lock = "std::lock_guard<std::mutex> lock(mutex_);"
|
| 815 |
+
else:
|
| 816 |
+
thread_lock = ""
|
| 817 |
+
|
| 818 |
+
if uses_retain_variables(info):
|
| 819 |
+
will_release_variables = WILL_RELEASE_VARIABLES.substitute()
|
| 820 |
+
else:
|
| 821 |
+
will_release_variables = ""
|
| 822 |
+
|
| 823 |
+
body: list[str] = []
|
| 824 |
+
|
| 825 |
+
if uses_single_grad(info):
|
| 826 |
+
body.append("const auto& grad = grads[0];")
|
| 827 |
+
else:
|
| 828 |
+
# Generate aliases for gradients named for returned values.
|
| 829 |
+
body.extend(
|
| 830 |
+
f"const auto& {name} = grads[{info.available_named_gradients.index(name)}];"
|
| 831 |
+
for name in sorted(info.used_named_gradients)
|
| 832 |
+
)
|
| 833 |
+
|
| 834 |
+
def emit_derivative(
|
| 835 |
+
derivative: Derivative,
|
| 836 |
+
args_with_derivatives: Sequence[Binding],
|
| 837 |
+
) -> tuple[bool, str]:
|
| 838 |
+
formula = derivative.formula
|
| 839 |
+
var_names = derivative.var_names
|
| 840 |
+
if len(var_names) == 1:
|
| 841 |
+
checks_any_grad_defined = False
|
| 842 |
+
if "not_implemented" not in formula:
|
| 843 |
+
matching_args = [
|
| 844 |
+
arg for arg in args_with_derivatives if arg.name == var_names[0]
|
| 845 |
+
]
|
| 846 |
+
if len(matching_args) == 1:
|
| 847 |
+
# We can add undefined grad support if the input variable is a Tensor
|
| 848 |
+
arg = matching_args[0]
|
| 849 |
+
if isinstance(arg.argument, Argument) and str(
|
| 850 |
+
arg.argument.type
|
| 851 |
+
) in ("Tensor", "Tensor?"):
|
| 852 |
+
formula = "any_grad_defined ? (" + formula + ") : Tensor()"
|
| 853 |
+
checks_any_grad_defined = True
|
| 854 |
+
if info.name.startswith("_foreach_"):
|
| 855 |
+
derivative_template = DERIVATIVE_SINGLE_FOREACH
|
| 856 |
+
else:
|
| 857 |
+
derivative_template = DERIVATIVE_SINGLE
|
| 858 |
+
return (
|
| 859 |
+
checks_any_grad_defined,
|
| 860 |
+
derivative_template.substitute(name=var_names[0], derivative=formula),
|
| 861 |
+
)
|
| 862 |
+
else:
|
| 863 |
+
if "grad_input_mask" in formula:
|
| 864 |
+
masks = [
|
| 865 |
+
f"task_should_compute_output({{ {n}_ix }})," for n in var_names
|
| 866 |
+
]
|
| 867 |
+
grad_input_mask = GRAD_INPUT_MASK.substitute(
|
| 868 |
+
masks=masks, n=len(var_names)
|
| 869 |
+
)
|
| 870 |
+
else:
|
| 871 |
+
grad_input_mask = ""
|
| 872 |
+
idx_ranges = ", ".join(f"{n}_ix" for n in var_names)
|
| 873 |
+
copy_ranges: list[str] = []
|
| 874 |
+
for i, n in enumerate(var_names):
|
| 875 |
+
copy_ranges.append(DERIVATIVE_MULTI_COPY_RANGE.substitute(name=n, i=i))
|
| 876 |
+
return False, DERIVATIVE_MULTI.substitute(
|
| 877 |
+
idx_ranges=idx_ranges,
|
| 878 |
+
copy_ranges=copy_ranges,
|
| 879 |
+
derivative=formula,
|
| 880 |
+
grad_input_mask=grad_input_mask,
|
| 881 |
+
)
|
| 882 |
+
|
| 883 |
+
body.extend(unpack)
|
| 884 |
+
need_any_grad_defined_var = False
|
| 885 |
+
for derivative in info.derivatives:
|
| 886 |
+
checks_any_grad_defined, derivative_text = emit_derivative(
|
| 887 |
+
derivative, info.args_with_derivatives
|
| 888 |
+
)
|
| 889 |
+
body.append(derivative_text)
|
| 890 |
+
need_any_grad_defined_var |= checks_any_grad_defined
|
| 891 |
+
# Since single-output derivative formulas need to check if grads are
|
| 892 |
+
# defined, only perform the check once, before all the formulas
|
| 893 |
+
if need_any_grad_defined_var:
|
| 894 |
+
body.insert(
|
| 895 |
+
-len(info.derivatives),
|
| 896 |
+
"bool any_grad_defined = any_variable_defined(grads);",
|
| 897 |
+
)
|
| 898 |
+
|
| 899 |
+
if info.name in UNTRACEABLE_FUNCTIONS:
|
| 900 |
+
superclass = "Node"
|
| 901 |
+
else:
|
| 902 |
+
superclass = "TraceableFunction"
|
| 903 |
+
|
| 904 |
+
all_getsetdef_structs = (
|
| 905 |
+
",\n".join(py_getsetdef_structs) + "," if len(py_getsetdef_structs) != 0 else ""
|
| 906 |
+
)
|
| 907 |
+
all_getter_definitions = "\n".join(getter_definitions)
|
| 908 |
+
|
| 909 |
+
return template.substitute(
|
| 910 |
+
op=info.op,
|
| 911 |
+
compute_index_ranges=compute_index_ranges,
|
| 912 |
+
saved_variables=saved_variables,
|
| 913 |
+
release_variables=release_variables,
|
| 914 |
+
saved_list_sizes=saved_list_sizes,
|
| 915 |
+
asserts=asserts,
|
| 916 |
+
thread_lock=thread_lock,
|
| 917 |
+
will_release_variables=will_release_variables,
|
| 918 |
+
body=body,
|
| 919 |
+
superclass=superclass,
|
| 920 |
+
all_getter_definitions=all_getter_definitions,
|
| 921 |
+
all_getsetdef_structs=all_getsetdef_structs,
|
| 922 |
+
compiled_args=compiled_args,
|
| 923 |
+
apply_with_saved_before=apply_with_saved_before,
|
| 924 |
+
apply_with_saved_after=apply_with_saved_after,
|
| 925 |
+
)
|
minigpt2/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_inplace_or_view_type.py
ADDED
|
@@ -0,0 +1,675 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Generates ADInplaceOrViewType.h/cpp
|
| 2 |
+
#
|
| 3 |
+
# NOTE: If any changes are being made to the ADInplaceOrView codegen please also check
|
| 4 |
+
# if updates are needed in torch/csrc/autograd/autograd_not_implemented_fallback.cpp
|
| 5 |
+
# The fallback is expected to mimick this codegen, so we should keep the two in sync.
|
| 6 |
+
|
| 7 |
+
from __future__ import annotations
|
| 8 |
+
|
| 9 |
+
from torchgen.api import cpp
|
| 10 |
+
from torchgen.api.autograd import (
|
| 11 |
+
dispatch_strategy,
|
| 12 |
+
gen_differentiable_outputs,
|
| 13 |
+
NativeFunctionWithDifferentiabilityInfo,
|
| 14 |
+
)
|
| 15 |
+
from torchgen.api.types import (
|
| 16 |
+
BaseCType,
|
| 17 |
+
Binding,
|
| 18 |
+
boolT,
|
| 19 |
+
ConstRefCType,
|
| 20 |
+
CType,
|
| 21 |
+
DispatcherSignature,
|
| 22 |
+
intArrayRefT,
|
| 23 |
+
longT,
|
| 24 |
+
OptionalCType,
|
| 25 |
+
symIntArrayRefT,
|
| 26 |
+
SymIntT,
|
| 27 |
+
tensorT,
|
| 28 |
+
)
|
| 29 |
+
from torchgen.code_template import CodeTemplate
|
| 30 |
+
from torchgen.context import with_native_function
|
| 31 |
+
from torchgen.model import (
|
| 32 |
+
NativeFunction,
|
| 33 |
+
SchemaKind,
|
| 34 |
+
SelfArgument,
|
| 35 |
+
TensorOptionsArguments,
|
| 36 |
+
Type,
|
| 37 |
+
)
|
| 38 |
+
from torchgen.utils import FileManager
|
| 39 |
+
|
| 40 |
+
from .context import with_native_function_with_differentiability_info
|
| 41 |
+
from .gen_trace_type import (
|
| 42 |
+
get_return_value,
|
| 43 |
+
MANUAL_AUTOGRAD,
|
| 44 |
+
tie_return_values,
|
| 45 |
+
type_wrapper_name,
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
# See NOTE [ Autograd View Variables ] in variable.h for details.
|
| 50 |
+
# If you update list VIEW_FUNCTIONS or RETURNS_VIEWS_OF_INPUT,
|
| 51 |
+
# you **MUST** also update the public list of view ops accordingly in
|
| 52 |
+
# docs/source/tensor_view.rst. Note not all ATen functions are exposed to public,
|
| 53 |
+
# e.g alias & sparse_coo_tensor_with_dims_and_tensors.
|
| 54 |
+
#
|
| 55 |
+
# A map: function name => name of the argument that all outputs are view of
|
| 56 |
+
|
| 57 |
+
VIEW_FUNCTIONS_WITH_METADATA_CHANGE = [
|
| 58 |
+
"view_as_complex",
|
| 59 |
+
"view_as_real",
|
| 60 |
+
"_conj",
|
| 61 |
+
"_neg_view",
|
| 62 |
+
"_nested_get_values",
|
| 63 |
+
"_nested_view_from_buffer",
|
| 64 |
+
"_nested_view_from_jagged",
|
| 65 |
+
]
|
| 66 |
+
|
| 67 |
+
VIEW_FUNCTIONS = {
|
| 68 |
+
"numpy_T": "self",
|
| 69 |
+
"alias": "self",
|
| 70 |
+
"as_strided": "self",
|
| 71 |
+
"diagonal": "self",
|
| 72 |
+
"expand": "self",
|
| 73 |
+
"permute": "self",
|
| 74 |
+
"select": "self",
|
| 75 |
+
"slice": "self",
|
| 76 |
+
"slice_inverse": "self",
|
| 77 |
+
"split": "self",
|
| 78 |
+
"split_with_sizes": "self",
|
| 79 |
+
"squeeze": "self",
|
| 80 |
+
"t": "self",
|
| 81 |
+
"transpose": "self",
|
| 82 |
+
"unfold": "self",
|
| 83 |
+
"unsqueeze": "self",
|
| 84 |
+
"flatten": "self",
|
| 85 |
+
"view": "self",
|
| 86 |
+
"unbind": "self",
|
| 87 |
+
"_indices": "self",
|
| 88 |
+
"_values": "self",
|
| 89 |
+
"indices": "self",
|
| 90 |
+
"values": "self",
|
| 91 |
+
"crow_indices": "self",
|
| 92 |
+
"col_indices": "self",
|
| 93 |
+
"ccol_indices": "self",
|
| 94 |
+
"row_indices": "self",
|
| 95 |
+
# sparse_coo ctor output should really be views of both indices and values,
|
| 96 |
+
# but we only supports making as view of a single variable, and indices is
|
| 97 |
+
# discrete anyways.
|
| 98 |
+
# FIXME: clone indices on construction.
|
| 99 |
+
"sparse_coo_tensor_with_dims_and_tensors": "values",
|
| 100 |
+
"_reshape_alias": "self",
|
| 101 |
+
"_test_autograd_multiple_dispatch_view": "self",
|
| 102 |
+
}
|
| 103 |
+
|
| 104 |
+
for key in VIEW_FUNCTIONS_WITH_METADATA_CHANGE:
|
| 105 |
+
VIEW_FUNCTIONS[key] = "self"
|
| 106 |
+
|
| 107 |
+
# note: some VIEW_FUNCTIONS are just compositions of the view functions above
|
| 108 |
+
# this list contains both the root view functions and any that are purely composed
|
| 109 |
+
# of viewing functions, and is used by the JIT to determine when an operator
|
| 110 |
+
# may return a view of its inputs; however they may sometimes return a copy.
|
| 111 |
+
# (e.g. `contiguous`)
|
| 112 |
+
RETURNS_VIEWS_OF_INPUT = set(VIEW_FUNCTIONS.keys()).union(
|
| 113 |
+
{
|
| 114 |
+
"chunk",
|
| 115 |
+
"detach",
|
| 116 |
+
"contiguous",
|
| 117 |
+
"reshape",
|
| 118 |
+
"reshape_as",
|
| 119 |
+
"expand_as",
|
| 120 |
+
"view_as",
|
| 121 |
+
"real",
|
| 122 |
+
"imag",
|
| 123 |
+
"narrow",
|
| 124 |
+
"movedim",
|
| 125 |
+
"tensor_split",
|
| 126 |
+
"swapdims",
|
| 127 |
+
"swapaxes",
|
| 128 |
+
"mT",
|
| 129 |
+
"mH",
|
| 130 |
+
"adjoint",
|
| 131 |
+
"matrix_H",
|
| 132 |
+
}
|
| 133 |
+
)
|
| 134 |
+
|
| 135 |
+
# These are the functions we consider views for the purposes of validating
|
| 136 |
+
# StorageImpl and TensorImpl in gen_variable_type.
|
| 137 |
+
# `_unsafe_view` is not included in VIEW_FUNCTIONS above because it is not a
|
| 138 |
+
# view for the purposes of ADInplaceOrView kernel, we do not want to call as_view
|
| 139 |
+
# See NOTE [Unsafe View] for more info.
|
| 140 |
+
ALL_VIEW_FUNCTIONS = {
|
| 141 |
+
**VIEW_FUNCTIONS,
|
| 142 |
+
"_unsafe_view": "self",
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
ARRAYREF_TO_VEC = CodeTemplate(
|
| 146 |
+
"""\
|
| 147 |
+
auto ${vec} = ${arg}.vec();
|
| 148 |
+
"""
|
| 149 |
+
)
|
| 150 |
+
|
| 151 |
+
OPTIONAL_TO_VAL = CodeTemplate(
|
| 152 |
+
"""\
|
| 153 |
+
auto ${val} = ${arg}.value_or(${default});
|
| 154 |
+
"""
|
| 155 |
+
)
|
| 156 |
+
|
| 157 |
+
CALL_DISPATCH = CodeTemplate(
|
| 158 |
+
"""\
|
| 159 |
+
at::_ops::${unambiguous_name}::call(${unpacked_args})"""
|
| 160 |
+
)
|
| 161 |
+
|
| 162 |
+
REVERSE_VIEW_DISPATCH = CodeTemplate(
|
| 163 |
+
"""\
|
| 164 |
+
${reverse_name}(${unpacked_args})"""
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
MULTI_OUTPUT_VIEW_ITERATION = CodeTemplate(
|
| 168 |
+
"""\
|
| 169 |
+
for (auto ${view_idx} : c10::irange(${var}.size())) {
|
| 170 |
+
${body}
|
| 171 |
+
}
|
| 172 |
+
"""
|
| 173 |
+
)
|
| 174 |
+
|
| 175 |
+
SETUP_REPLAY_VIEW_IF_NOT_SUPPORT_AS_STRIDED_OR_VIEW_WITH_METADATA_CHANGE = CodeTemplate(
|
| 176 |
+
"""\
|
| 177 |
+
std::unique_ptr<torch::autograd::ViewFunc> func(nullptr);
|
| 178 |
+
std::function<at::Tensor(const at::Tensor&)> rev_func=nullptr;
|
| 179 |
+
if (${is_view_with_metadata_change} ||
|
| 180 |
+
!self.unsafeGetTensorImpl()->support_as_strided() ||
|
| 181 |
+
self.unsafeGetTensorImpl()->is_python_dispatch() ||
|
| 182 |
+
c10::AutogradState::get_tls_state().get_view_replay_enabled()) {
|
| 183 |
+
${replay_view_func}
|
| 184 |
+
${reverse_replay_view_func}
|
| 185 |
+
}
|
| 186 |
+
"""
|
| 187 |
+
)
|
| 188 |
+
|
| 189 |
+
REPLAY_VIEW_FUNC = CodeTemplate(
|
| 190 |
+
"""\
|
| 191 |
+
func = std::make_unique<${view_func_name}>(${view_func_args});
|
| 192 |
+
"""
|
| 193 |
+
)
|
| 194 |
+
|
| 195 |
+
REVERSE_REPLAY_VIEW_LAMBDA_FUNC = CodeTemplate(
|
| 196 |
+
"""\
|
| 197 |
+
rev_func = [=](const at::Tensor& ${input_view}) {
|
| 198 |
+
return ${reverse_replay_view_call};
|
| 199 |
+
};
|
| 200 |
+
"""
|
| 201 |
+
)
|
| 202 |
+
|
| 203 |
+
METHOD_DEFINITION = CodeTemplate(
|
| 204 |
+
"""\
|
| 205 |
+
${return_type} ${type_wrapper_name}(${formals}) {
|
| 206 |
+
${type_definition_body}
|
| 207 |
+
}
|
| 208 |
+
"""
|
| 209 |
+
)
|
| 210 |
+
|
| 211 |
+
WRAPPER_REGISTRATION = CodeTemplate(
|
| 212 |
+
"""\
|
| 213 |
+
m.impl("${unqual_operator_name_with_overload}",
|
| 214 |
+
TORCH_FN(${class_type}::${type_wrapper_name})
|
| 215 |
+
);
|
| 216 |
+
"""
|
| 217 |
+
)
|
| 218 |
+
|
| 219 |
+
AUTOGRAD_NOT_IMPLEMENTED_REGISTRATION = CodeTemplate(
|
| 220 |
+
"""\
|
| 221 |
+
m.impl("${unqual_operator_name_with_overload}", torch::autograd::autogradNotImplementedFallback());
|
| 222 |
+
"""
|
| 223 |
+
)
|
| 224 |
+
|
| 225 |
+
INPLACE_REDISPATCH = CodeTemplate(
|
| 226 |
+
"""\
|
| 227 |
+
{
|
| 228 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
| 229 |
+
at::_ops::${unambiguous_name}::redispatch(${unpacked_args});
|
| 230 |
+
}
|
| 231 |
+
"""
|
| 232 |
+
)
|
| 233 |
+
|
| 234 |
+
ASSIGN_RETURN_VALUE = CodeTemplate(
|
| 235 |
+
"""\
|
| 236 |
+
${return_values} = ${rhs_value};
|
| 237 |
+
"""
|
| 238 |
+
)
|
| 239 |
+
|
| 240 |
+
VIEW_REDISPATCH = CodeTemplate(
|
| 241 |
+
"""\
|
| 242 |
+
${assign_return_values} ([&]() {
|
| 243 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
| 244 |
+
return at::_ops::${unambiguous_name}::redispatch(${unpacked_args});
|
| 245 |
+
})();
|
| 246 |
+
"""
|
| 247 |
+
)
|
| 248 |
+
|
| 249 |
+
TMP_VAR = "_tmp"
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
# FIXME: Ideally these functions should be methods on Type class, but we have a
|
| 253 |
+
# comment in codegen/model.py there saying these concepts are not well defined.
|
| 254 |
+
# Thus we put a version that commonly used by autograd codegen here.
|
| 255 |
+
def is_tensor_type(t: Type) -> bool:
|
| 256 |
+
# TODO: Should handle optional here?
|
| 257 |
+
return t.is_tensor_like() and t.is_list_like() is None
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
def is_tensor_list_type(t: Type) -> bool:
|
| 261 |
+
# TODO: Should handle optional here?
|
| 262 |
+
return t.is_tensor_like() and t.is_list_like() is not None
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
UNPACK_TENSOR = CodeTemplate(
|
| 266 |
+
"""\
|
| 267 |
+
auto${ref} ${arg_name}_ = unpack${suffix}(${arg_name}, "${arg_name}", ${arg_pos});"""
|
| 268 |
+
)
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
def unpacked_name(arg_name: str) -> str:
|
| 272 |
+
return arg_name + "_"
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
# e.g. select.int -> select_copy_int_inverse()
|
| 276 |
+
def inverse_view_name(f: NativeFunction) -> str:
|
| 277 |
+
copy_variant = f"{f.root_name}_copy"
|
| 278 |
+
overload = f"{f.func.name.overload_name}"
|
| 279 |
+
if overload != "":
|
| 280 |
+
overload = "_" + overload
|
| 281 |
+
return f"{copy_variant}{overload}_inverse"
|
| 282 |
+
|
| 283 |
+
|
| 284 |
+
def extract_bindings(f: NativeFunction) -> list[Binding]:
|
| 285 |
+
return [
|
| 286 |
+
r
|
| 287 |
+
for a in f.func.schema_order_arguments()
|
| 288 |
+
for r in cpp.argument(
|
| 289 |
+
a,
|
| 290 |
+
method=False,
|
| 291 |
+
symint=True,
|
| 292 |
+
cpp_no_default_args=set(),
|
| 293 |
+
faithful=False,
|
| 294 |
+
has_tensor_options=False,
|
| 295 |
+
)
|
| 296 |
+
]
|
| 297 |
+
|
| 298 |
+
|
| 299 |
+
@with_native_function
|
| 300 |
+
def unpack_args(f: NativeFunction) -> tuple[list[str], list[Binding]]:
|
| 301 |
+
body: list[str] = []
|
| 302 |
+
unpacked_bindings: list[Binding] = []
|
| 303 |
+
|
| 304 |
+
for i, binding in enumerate(extract_bindings(f)):
|
| 305 |
+
assert not isinstance(binding.argument, SelfArgument)
|
| 306 |
+
if isinstance(binding.argument, TensorOptionsArguments):
|
| 307 |
+
raise RuntimeError("VariableKernel shouldn't take TensorOptions")
|
| 308 |
+
|
| 309 |
+
is_nullable = binding.argument.type.is_nullable()
|
| 310 |
+
if not binding.argument.type.is_tensor_like() or is_nullable:
|
| 311 |
+
unpacked_bindings.append(binding)
|
| 312 |
+
continue
|
| 313 |
+
|
| 314 |
+
is_tensor_list = is_tensor_list_type(binding.argument.type)
|
| 315 |
+
ref = (not is_nullable) and not is_tensor_list
|
| 316 |
+
suffix = "_opt" if is_nullable and not is_tensor_list else ""
|
| 317 |
+
body.append(
|
| 318 |
+
UNPACK_TENSOR.substitute(
|
| 319 |
+
arg_name=binding.name,
|
| 320 |
+
arg_pos=i,
|
| 321 |
+
suffix=suffix,
|
| 322 |
+
ref="&" if ref else "",
|
| 323 |
+
)
|
| 324 |
+
)
|
| 325 |
+
unpacked_bindings.append(
|
| 326 |
+
Binding(
|
| 327 |
+
name=unpacked_name(binding.name),
|
| 328 |
+
nctype=binding.nctype,
|
| 329 |
+
argument=binding.argument,
|
| 330 |
+
default=binding.default,
|
| 331 |
+
)
|
| 332 |
+
)
|
| 333 |
+
|
| 334 |
+
return body, unpacked_bindings
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
def get_base_name(f: NativeFunction) -> str:
|
| 338 |
+
return f.func.name.name.base # TODO: should be str(f.func.name.name)?
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
def get_view_info(f: NativeFunction) -> str | None:
|
| 342 |
+
base_name = get_base_name(f)
|
| 343 |
+
view_info = VIEW_FUNCTIONS.get(base_name, None)
|
| 344 |
+
if view_info is None and base_name in RETURNS_VIEWS_OF_INPUT:
|
| 345 |
+
view_info = "self"
|
| 346 |
+
return view_info
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
def emit_view_func(
|
| 350 |
+
f: NativeFunction, bindings: list[Binding], view_idx: str | None = None
|
| 351 |
+
) -> str:
|
| 352 |
+
"""Generate an additional lambda function to recover views in backward when as_strided is not supported.
|
| 353 |
+
See Note [View + Inplace update for base tensor] and [View + Inplace update for view tensor] for more details.
|
| 354 |
+
"""
|
| 355 |
+
# TODO: Clean this logic up if we get rid of reverse view funcs or reify them.
|
| 356 |
+
input_base = "input_base"
|
| 357 |
+
replay_view_func = ""
|
| 358 |
+
updated_args: list[str] = []
|
| 359 |
+
known_view_arg_simple_types: list[CType] = [
|
| 360 |
+
BaseCType(longT),
|
| 361 |
+
OptionalCType(BaseCType(longT)),
|
| 362 |
+
BaseCType(SymIntT),
|
| 363 |
+
OptionalCType(BaseCType(SymIntT)),
|
| 364 |
+
BaseCType(boolT),
|
| 365 |
+
BaseCType(intArrayRefT),
|
| 366 |
+
BaseCType(symIntArrayRefT),
|
| 367 |
+
ConstRefCType(BaseCType(tensorT)),
|
| 368 |
+
ConstRefCType(OptionalCType(BaseCType(tensorT))),
|
| 369 |
+
]
|
| 370 |
+
for binding in bindings:
|
| 371 |
+
arg, arg_type = binding.name, binding.nctype.type
|
| 372 |
+
if arg == "self":
|
| 373 |
+
updated_args.append(input_base)
|
| 374 |
+
continue
|
| 375 |
+
if arg_type not in known_view_arg_simple_types:
|
| 376 |
+
known_types_str = ", ".join([str(t) for t in known_view_arg_simple_types])
|
| 377 |
+
raise TypeError(
|
| 378 |
+
f"You are adding an {arg_type} {arg} argument to op {cpp.name(f.func)} in addition to known types: "
|
| 379 |
+
f"{known_types_str}. Please update the list or materialize it so that it can be closed "
|
| 380 |
+
"over by value, also add a test in pytorch/xla/test/test_operations.py where this code "
|
| 381 |
+
"is exercised."
|
| 382 |
+
)
|
| 383 |
+
if arg_type == BaseCType(intArrayRefT) or arg_type == BaseCType(
|
| 384 |
+
symIntArrayRefT
|
| 385 |
+
):
|
| 386 |
+
# It's not safe to close over IntArrayRef by value, since this is a
|
| 387 |
+
# reference type, so materialize a vector to close over by value
|
| 388 |
+
arg_vec = arg + "_vec"
|
| 389 |
+
replay_view_func += ARRAYREF_TO_VEC.substitute(arg=arg, vec=arg_vec)
|
| 390 |
+
updated_args.append(arg_vec)
|
| 391 |
+
elif arg_type == OptionalCType(BaseCType(longT)):
|
| 392 |
+
# Materialize int64_t? to int64_t
|
| 393 |
+
arg_value = arg + "_val"
|
| 394 |
+
replay_view_func += OPTIONAL_TO_VAL.substitute(
|
| 395 |
+
arg=arg, val=arg_value, default="0"
|
| 396 |
+
)
|
| 397 |
+
updated_args.append(arg_value)
|
| 398 |
+
elif arg_type == ConstRefCType(BaseCType(tensorT)) or arg_type == ConstRefCType(
|
| 399 |
+
OptionalCType(BaseCType(tensorT))
|
| 400 |
+
):
|
| 401 |
+
# NB: Closing over a tensor. If a user modifies this tensor, this will be silently
|
| 402 |
+
# incorrect. The proper thing to do is to store the version counter and copy on write.
|
| 403 |
+
updated_args.append(arg)
|
| 404 |
+
else:
|
| 405 |
+
updated_args.append(arg)
|
| 406 |
+
|
| 407 |
+
from .gen_view_funcs import view_func_name
|
| 408 |
+
|
| 409 |
+
view_func_args = [b.name for b in bindings if b.name != "self"]
|
| 410 |
+
if view_idx is not None:
|
| 411 |
+
view_func_args.append(f"{view_idx}")
|
| 412 |
+
replay_view_func += REPLAY_VIEW_FUNC.substitute(
|
| 413 |
+
view_func_name=view_func_name(f, include_namespace=True),
|
| 414 |
+
view_func_args=view_func_args,
|
| 415 |
+
)
|
| 416 |
+
|
| 417 |
+
input_view = "input_view"
|
| 418 |
+
reverse_unpacked_args = [
|
| 419 |
+
"self",
|
| 420 |
+
f"{input_view}",
|
| 421 |
+
# inverse_return_mode=
|
| 422 |
+
"at::functionalization::InverseReturnMode::AlwaysView",
|
| 423 |
+
*(() if view_idx is None else (f"{view_idx}",)),
|
| 424 |
+
# skip input_base arg
|
| 425 |
+
*updated_args[1:],
|
| 426 |
+
]
|
| 427 |
+
|
| 428 |
+
from torchgen.api.functionalization import reverse_name
|
| 429 |
+
|
| 430 |
+
reverse_replay_view_call = REVERSE_VIEW_DISPATCH.substitute(
|
| 431 |
+
reverse_name=reverse_name(f, include_namespace=True),
|
| 432 |
+
unpacked_args=reverse_unpacked_args,
|
| 433 |
+
)
|
| 434 |
+
reverse_replay_view_func = REVERSE_REPLAY_VIEW_LAMBDA_FUNC.substitute(
|
| 435 |
+
input_view=input_view, reverse_replay_view_call=reverse_replay_view_call
|
| 436 |
+
)
|
| 437 |
+
|
| 438 |
+
is_view_with_metadata_change = (
|
| 439 |
+
"true" if cpp.name(f.func) in VIEW_FUNCTIONS_WITH_METADATA_CHANGE else "false"
|
| 440 |
+
)
|
| 441 |
+
|
| 442 |
+
return SETUP_REPLAY_VIEW_IF_NOT_SUPPORT_AS_STRIDED_OR_VIEW_WITH_METADATA_CHANGE.substitute(
|
| 443 |
+
is_view_with_metadata_change=is_view_with_metadata_change,
|
| 444 |
+
replay_view_func=replay_view_func,
|
| 445 |
+
reverse_replay_view_func=reverse_replay_view_func,
|
| 446 |
+
)
|
| 447 |
+
|
| 448 |
+
|
| 449 |
+
def emit_view_body(
|
| 450 |
+
fn: NativeFunctionWithDifferentiabilityInfo, var: str
|
| 451 |
+
) -> tuple[str, str]:
|
| 452 |
+
# See NOTE [ Autograd View Variables ] in variable.h for details.
|
| 453 |
+
f = fn.func
|
| 454 |
+
base_name = get_base_name(f)
|
| 455 |
+
view_info = get_view_info(f)
|
| 456 |
+
call = ""
|
| 457 |
+
differentiable_outputs = gen_differentiable_outputs(fn)
|
| 458 |
+
differentiable_output_vars = {r.name for r in differentiable_outputs}
|
| 459 |
+
if not isinstance(view_info, str):
|
| 460 |
+
raise TypeError(
|
| 461 |
+
f"The view info should be a string for {base_name}, but it is: {view_info}"
|
| 462 |
+
)
|
| 463 |
+
if len(differentiable_output_vars) == 0:
|
| 464 |
+
# no output is differentiable (.indices() for SparseTensors for example)
|
| 465 |
+
rhs_value = (
|
| 466 |
+
f"as_view({view_info}, {var}, "
|
| 467 |
+
f"/* is_bw_differentiable */ false, /* is_fw_differentiable */ false)"
|
| 468 |
+
)
|
| 469 |
+
elif len(differentiable_output_vars) == 1:
|
| 470 |
+
# Single differentiable output (Tensor or Tensor[])
|
| 471 |
+
return_info = differentiable_outputs[0]
|
| 472 |
+
# We only support simple Tensor or a TensorList for functions that return views
|
| 473 |
+
if not is_tensor_type(return_info.type) and not is_tensor_list_type(
|
| 474 |
+
return_info.type
|
| 475 |
+
):
|
| 476 |
+
raise RuntimeError(
|
| 477 |
+
f"{base_name} that return differentiable views can only return Tensor or Tensor[]"
|
| 478 |
+
)
|
| 479 |
+
|
| 480 |
+
# See Note [ View + Inplace detection]
|
| 481 |
+
def get_creation_meta_in_mode(original: str) -> str:
|
| 482 |
+
creation_meta_with_grad_mode = f"(at::GradMode::is_enabled() ? {original} : CreationMeta::NO_GRAD_MODE)"
|
| 483 |
+
return f"InferenceMode::is_enabled() ? CreationMeta::INFERENCE_MODE : {creation_meta_with_grad_mode}"
|
| 484 |
+
|
| 485 |
+
# Only allow rebasing of the history if we return a single Tensor
|
| 486 |
+
# If we are in a no grad block, raise a warning
|
| 487 |
+
# See NOTE [ View + Inplace detection ] for more details about this logic
|
| 488 |
+
if is_tensor_list_type(return_info.type):
|
| 489 |
+
creation_meta = get_creation_meta_in_mode("CreationMeta::MULTI_OUTPUT_NODE")
|
| 490 |
+
view_idx = "view_idx"
|
| 491 |
+
view_func = emit_view_func(
|
| 492 |
+
f, extract_bindings(f), view_idx=view_idx
|
| 493 |
+
).strip()
|
| 494 |
+
as_view_call = (
|
| 495 |
+
f"as_view(/* base */ {view_info}, /* output */ {var}[{view_idx}], "
|
| 496 |
+
"/* is_bw_differentiable */ true, /* is_fw_differentiable */ true, "
|
| 497 |
+
"/* view_func */ std::move(func), /* rev_view_func */ rev_func, "
|
| 498 |
+
f"/* creation_meta */ {creation_meta});"
|
| 499 |
+
)
|
| 500 |
+
call += MULTI_OUTPUT_VIEW_ITERATION.substitute(
|
| 501 |
+
var=var, view_idx=view_idx, body=f"{view_func}\n{as_view_call}"
|
| 502 |
+
)
|
| 503 |
+
rhs_value = f"std::move({var})"
|
| 504 |
+
else:
|
| 505 |
+
call += emit_view_func(f, extract_bindings(f), view_idx=None)
|
| 506 |
+
creation_meta = get_creation_meta_in_mode("CreationMeta::DEFAULT")
|
| 507 |
+
rhs_value = (
|
| 508 |
+
f"as_view(/* base */ {view_info}, /* output */ {var}, /* is_bw_differentiable */ true, "
|
| 509 |
+
"/* is_fw_differentiable */ true, "
|
| 510 |
+
f"/* view_func */ std::move(func), /* rev_view_func */ rev_func, /* creation_meta */ {creation_meta})"
|
| 511 |
+
)
|
| 512 |
+
else:
|
| 513 |
+
# This could be supported but we don't need it at the moment, so keeping things simple.
|
| 514 |
+
raise RuntimeError(
|
| 515 |
+
"Function that return multiple differentiable output "
|
| 516 |
+
"when at least one of them is view is not supported."
|
| 517 |
+
)
|
| 518 |
+
return call, rhs_value
|
| 519 |
+
|
| 520 |
+
|
| 521 |
+
def modifies_arguments(f: NativeFunction) -> bool:
|
| 522 |
+
return f.func.kind() in [SchemaKind.inplace, SchemaKind.out]
|
| 523 |
+
|
| 524 |
+
|
| 525 |
+
@with_native_function_with_differentiability_info
|
| 526 |
+
def emit_inplace_or_view_body(fn: NativeFunctionWithDifferentiabilityInfo) -> list[str]:
|
| 527 |
+
f = fn.func
|
| 528 |
+
inplace_view_body: list[str] = []
|
| 529 |
+
|
| 530 |
+
dispatcher_sig = DispatcherSignature.from_schema(f.func)
|
| 531 |
+
dispatcher_exprs = dispatcher_sig.exprs()
|
| 532 |
+
|
| 533 |
+
# code-generated ADInplaceOrView kernels plumb and recompute dispatch keys directly through the kernel for performance.
|
| 534 |
+
# See Note [Plumbing Keys Through The Dispatcher] for details.
|
| 535 |
+
dispatch_key_set = "ks & c10::after_ADInplaceOrView_keyset"
|
| 536 |
+
redispatch_args = ", ".join([dispatch_key_set] + [a.expr for a in dispatcher_exprs])
|
| 537 |
+
|
| 538 |
+
# Note that this calls the slow, dispatching variants of manual_cpp_binding ops.
|
| 539 |
+
# We could probably work harder to ensure that the fast variants are called instead, but the perf benefit would be minimal.
|
| 540 |
+
if modifies_arguments(f): # inplace op
|
| 541 |
+
inplace_view_body.append(
|
| 542 |
+
INPLACE_REDISPATCH.substitute(
|
| 543 |
+
unambiguous_name=f.func.name.unambiguous_name(),
|
| 544 |
+
unpacked_args=redispatch_args,
|
| 545 |
+
)
|
| 546 |
+
)
|
| 547 |
+
for r in cpp.return_names(f):
|
| 548 |
+
inplace_view_body.append(f"increment_version({r});")
|
| 549 |
+
else:
|
| 550 |
+
assert get_view_info(f) is not None
|
| 551 |
+
inplace_view_body.append(
|
| 552 |
+
VIEW_REDISPATCH.substitute(
|
| 553 |
+
assign_return_values="auto " + TMP_VAR + " = ",
|
| 554 |
+
unambiguous_name=f.func.name.unambiguous_name(),
|
| 555 |
+
unpacked_args=redispatch_args,
|
| 556 |
+
)
|
| 557 |
+
)
|
| 558 |
+
call, rhs_value = emit_view_body(fn, TMP_VAR)
|
| 559 |
+
inplace_view_body.append(call)
|
| 560 |
+
assert rhs_value is not None
|
| 561 |
+
inplace_view_body.append(
|
| 562 |
+
ASSIGN_RETURN_VALUE.substitute(
|
| 563 |
+
return_values=tie_return_values(f), rhs_value=rhs_value
|
| 564 |
+
)
|
| 565 |
+
)
|
| 566 |
+
if f.func.returns:
|
| 567 |
+
inplace_view_body.append(f"return {get_return_value(f)};")
|
| 568 |
+
return inplace_view_body
|
| 569 |
+
|
| 570 |
+
|
| 571 |
+
@with_native_function
|
| 572 |
+
def gen_formals(f: NativeFunction) -> str:
|
| 573 |
+
return ", ".join(
|
| 574 |
+
# code-generated autograd kernels plumb and recompute dispatch keys directly through the kernel for performance.
|
| 575 |
+
# See Note [Plumbing Keys Through The Dispatcher] for details.
|
| 576 |
+
["c10::DispatchKeySet ks"]
|
| 577 |
+
+ [
|
| 578 |
+
f'{cpp.argument_type(a, binds="__placeholder__", symint=True).cpp_type()} {a.name}'
|
| 579 |
+
for a in f.func.schema_order_arguments()
|
| 580 |
+
]
|
| 581 |
+
)
|
| 582 |
+
|
| 583 |
+
|
| 584 |
+
@with_native_function_with_differentiability_info
|
| 585 |
+
def inplace_or_view_method_definition(
|
| 586 |
+
fn: NativeFunctionWithDifferentiabilityInfo,
|
| 587 |
+
) -> str | None:
|
| 588 |
+
f = fn.func
|
| 589 |
+
if get_view_info(f) is None and (
|
| 590 |
+
# For functions that modify their inputs but don't return them,
|
| 591 |
+
# we can't give them autograd support.
|
| 592 |
+
# See https://github.com/pytorch/pytorch/issues/53796
|
| 593 |
+
not modifies_arguments(f)
|
| 594 |
+
or len(f.func.returns) == 0
|
| 595 |
+
):
|
| 596 |
+
return None
|
| 597 |
+
return METHOD_DEFINITION.substitute(
|
| 598 |
+
return_type=cpp.returns_type(f.func.returns, symint=True).cpp_type(),
|
| 599 |
+
type_wrapper_name=type_wrapper_name(f),
|
| 600 |
+
formals=gen_formals(f),
|
| 601 |
+
type_definition_body=emit_inplace_or_view_body(fn),
|
| 602 |
+
)
|
| 603 |
+
|
| 604 |
+
|
| 605 |
+
@with_native_function_with_differentiability_info
|
| 606 |
+
def inplace_or_view_method_registration(
|
| 607 |
+
fn: NativeFunctionWithDifferentiabilityInfo,
|
| 608 |
+
) -> str | None:
|
| 609 |
+
f = fn.func
|
| 610 |
+
if get_view_info(f) is None and (
|
| 611 |
+
not modifies_arguments(f) or len(f.func.returns) == 0
|
| 612 |
+
):
|
| 613 |
+
return None
|
| 614 |
+
return WRAPPER_REGISTRATION.substitute(
|
| 615 |
+
unqual_operator_name_with_overload=f.func.name,
|
| 616 |
+
type_wrapper_name=type_wrapper_name(f),
|
| 617 |
+
class_type="ADInplaceOrView",
|
| 618 |
+
)
|
| 619 |
+
|
| 620 |
+
|
| 621 |
+
def use_derived(fn: NativeFunctionWithDifferentiabilityInfo) -> bool:
|
| 622 |
+
f = fn.func
|
| 623 |
+
name = cpp.name(f.func)
|
| 624 |
+
return name not in MANUAL_AUTOGRAD and dispatch_strategy(fn) == "use_derived"
|
| 625 |
+
|
| 626 |
+
|
| 627 |
+
def gen_inplace_or_view_type_env(
|
| 628 |
+
fn: NativeFunctionWithDifferentiabilityInfo,
|
| 629 |
+
) -> dict[str, list[str]]:
|
| 630 |
+
definition = inplace_or_view_method_definition(fn)
|
| 631 |
+
registration = inplace_or_view_method_registration(fn)
|
| 632 |
+
|
| 633 |
+
return {
|
| 634 |
+
"ops_headers": (
|
| 635 |
+
[f"#include <ATen/ops/{fn.func.root_name}_ops.h>"]
|
| 636 |
+
if definition is not None
|
| 637 |
+
else []
|
| 638 |
+
),
|
| 639 |
+
"inplace_or_view_method_definitions": [definition]
|
| 640 |
+
if definition is not None
|
| 641 |
+
else [],
|
| 642 |
+
"inplace_or_view_wrapper_registrations": [registration]
|
| 643 |
+
if registration is not None
|
| 644 |
+
else [],
|
| 645 |
+
}
|
| 646 |
+
|
| 647 |
+
|
| 648 |
+
def gen_inplace_or_view_type(
|
| 649 |
+
out: str,
|
| 650 |
+
native_yaml_path: str,
|
| 651 |
+
tags_yaml_path: str,
|
| 652 |
+
fns_with_infos: list[NativeFunctionWithDifferentiabilityInfo],
|
| 653 |
+
template_path: str,
|
| 654 |
+
) -> None:
|
| 655 |
+
# NOTE: see Note [Sharded File] at the top of the VariableType.cpp
|
| 656 |
+
# template regarding sharding of the generated files.
|
| 657 |
+
num_shards = 2
|
| 658 |
+
|
| 659 |
+
fm = FileManager(install_dir=out, template_dir=template_path, dry_run=False)
|
| 660 |
+
fm.write_sharded(
|
| 661 |
+
"ADInplaceOrViewType.cpp",
|
| 662 |
+
[fn for fn in fns_with_infos if use_derived(fn)],
|
| 663 |
+
key_fn=lambda fn: fn.func.root_name,
|
| 664 |
+
base_env={
|
| 665 |
+
"generated_comment": "@"
|
| 666 |
+
+ f"generated from {fm.template_dir_for_comments()}/ADInplaceOrViewType.cpp",
|
| 667 |
+
},
|
| 668 |
+
env_callable=gen_inplace_or_view_type_env,
|
| 669 |
+
num_shards=2,
|
| 670 |
+
sharded_keys={
|
| 671 |
+
"ops_headers",
|
| 672 |
+
"inplace_or_view_method_definitions",
|
| 673 |
+
"inplace_or_view_wrapper_registrations",
|
| 674 |
+
},
|
| 675 |
+
)
|
minigpt2/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_trace_type.py
ADDED
|
@@ -0,0 +1,536 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import itertools
|
| 4 |
+
from typing import Sequence
|
| 5 |
+
|
| 6 |
+
from torchgen.api import cpp
|
| 7 |
+
from torchgen.api.types import DispatcherSignature
|
| 8 |
+
from torchgen.code_template import CodeTemplate
|
| 9 |
+
from torchgen.context import with_native_function
|
| 10 |
+
from torchgen.model import Argument, NativeFunction, SchemaKind, TensorOptionsArguments
|
| 11 |
+
from torchgen.utils import FileManager
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
# Note [Manual Backend kernels]
|
| 15 |
+
# For these ops, we want to manually register to dispatch key Backend and
|
| 16 |
+
# skip codegen-ed registeration to all keys before Backend.
|
| 17 |
+
# For codegen this means:
|
| 18 |
+
# - op set below must match ops with manual_kernel_registration=True in native_functions.yaml
|
| 19 |
+
# where we skip codegen backend kernels
|
| 20 |
+
# - all ops below are part of MANUAL_AUTOGRAD to skip codegen Autograd kernel registration
|
| 21 |
+
# - all ops below are part of MANUAL_TRACER to skip codegen Tracer kernel registration
|
| 22 |
+
# Note: we still register to dispatch key Profiler for these ops, keeping it untouched for now.
|
| 23 |
+
# You can find the manual registration in torch/csrc/autograd/VariableTypeManual.cpp
|
| 24 |
+
MANUAL_BACKEND = {
|
| 25 |
+
"options",
|
| 26 |
+
"data",
|
| 27 |
+
"set_data",
|
| 28 |
+
"is_leaf",
|
| 29 |
+
"output_nr",
|
| 30 |
+
"_version",
|
| 31 |
+
"retain_grad",
|
| 32 |
+
"_backward",
|
| 33 |
+
"requires_grad_",
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
# For these ops we want to skip the codegen-ed registration to both Autograd and Tracer keys.
|
| 37 |
+
# You can find the manual registration in torch/csrc/autograd/VariableTypeManual.cpp
|
| 38 |
+
MANUAL_AUTOGRAD_AND_TRACER = {
|
| 39 |
+
"resize_",
|
| 40 |
+
"resize_as_",
|
| 41 |
+
"detach",
|
| 42 |
+
"detach_",
|
| 43 |
+
"copy_",
|
| 44 |
+
"_fw_primal",
|
| 45 |
+
"_make_dual",
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
# Currently MANUAL_AUTOGRAD and MANUAL_TRACER share the same set of ops:
|
| 49 |
+
# union(MANUAL_BACKEND, MANUAL_AUTOGRAD_AND_TRACER)
|
| 50 |
+
# You can find the manual registration in torch/csrc/autograd/VariableTypeManual.cpp
|
| 51 |
+
MANUAL_AUTOGRAD = MANUAL_TRACER = MANUAL_BACKEND | MANUAL_AUTOGRAD_AND_TRACER
|
| 52 |
+
|
| 53 |
+
# These functions we don't want to record for tracing, because we always want
|
| 54 |
+
# to trace their constituent parts. This is a temporary hack in lieue
|
| 55 |
+
# of proper scopes, where subsequent compilation passes can ask for the unfolding
|
| 56 |
+
# on demand. Only concrete ATen methods can be disabled this way; it will have
|
| 57 |
+
# NO EFFECT otherwise.
|
| 58 |
+
DONT_RECORD_TRACE = {
|
| 59 |
+
"convolution",
|
| 60 |
+
"conv1d",
|
| 61 |
+
"conv2d",
|
| 62 |
+
"conv3d",
|
| 63 |
+
"conv_transpose1d",
|
| 64 |
+
"conv_transpose2d",
|
| 65 |
+
"conv_transpose3d",
|
| 66 |
+
"lstm_cell",
|
| 67 |
+
"gru_cell",
|
| 68 |
+
"rnn_tanh_cell",
|
| 69 |
+
"rnn_relu_cell",
|
| 70 |
+
# FIXME: figure out a better way when we support sparse tensors in jit
|
| 71 |
+
"_coalesced",
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def should_trace(f: NativeFunction) -> bool:
|
| 76 |
+
# Operations involving Storage or Type are not traceable at the moment
|
| 77 |
+
if any(
|
| 78 |
+
str(arg.type) in {"Storage", "Type", "ConstQuantizerPtr"}
|
| 79 |
+
for arg in f.func.schema_order_arguments()
|
| 80 |
+
):
|
| 81 |
+
return False
|
| 82 |
+
# We can't trace functions which don't have any Tensor or TensorList returns
|
| 83 |
+
if not any(r.type.is_tensor_like() for r in f.func.returns):
|
| 84 |
+
return False
|
| 85 |
+
return f.func.name.name.base not in DONT_RECORD_TRACE
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
SELECT = CodeTemplate(
|
| 89 |
+
"""\
|
| 90 |
+
|
| 91 |
+
if (${cond}) {
|
| 92 |
+
${true}
|
| 93 |
+
} else {
|
| 94 |
+
${false}
|
| 95 |
+
}
|
| 96 |
+
"""
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
OP_NAME = CodeTemplate(
|
| 100 |
+
"""\
|
| 101 |
+
op_name = c10::Symbol::fromQualString("aten::${trace_name}");
|
| 102 |
+
"""
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
# These functions have their names recorded under trace renamed,
|
| 106 |
+
RENAME_TRACE = {
|
| 107 |
+
"zero": "zeros_like", # replacing aten::zero_ with aten::zeros_like
|
| 108 |
+
"fill": "full_like", # replacing aten::fill_ with aten::full_like
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def format_trace_op_name(f: NativeFunction) -> str:
|
| 113 |
+
# TODO: byte-for-byte compatible with old codegen behavior - should clean up
|
| 114 |
+
if (
|
| 115 |
+
f.func.kind() in (SchemaKind.functional, SchemaKind.out)
|
| 116 |
+
or f.func.name.name.dunder_method
|
| 117 |
+
):
|
| 118 |
+
# special case for *_out functions: the in-place and out-of-place ops
|
| 119 |
+
# are overloaded with the same name in the JIT
|
| 120 |
+
trace_name = str(f.func.name.name)
|
| 121 |
+
trace_name = RENAME_TRACE.get(trace_name, trace_name)
|
| 122 |
+
return OP_NAME.substitute(trace_name=trace_name)
|
| 123 |
+
|
| 124 |
+
# otherwise, this is an in-place op and we need to emit both in- and
|
| 125 |
+
# out-of-place versions
|
| 126 |
+
outplace_trace_name = f.func.name.name.base
|
| 127 |
+
inplace_trace_name = cpp.name(f.func)
|
| 128 |
+
outplace_trace_name = RENAME_TRACE.get(outplace_trace_name, outplace_trace_name)
|
| 129 |
+
inplace_trace_name = RENAME_TRACE.get(inplace_trace_name, inplace_trace_name)
|
| 130 |
+
|
| 131 |
+
return SELECT.substitute(
|
| 132 |
+
cond="tracer_state->force_outplace",
|
| 133 |
+
true=OP_NAME.substitute(trace_name=outplace_trace_name),
|
| 134 |
+
false=OP_NAME.substitute(trace_name=inplace_trace_name),
|
| 135 |
+
)
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
ADD_TRACE_INPUT = CodeTemplate("""jit::tracer::addInputs(node, "${name}", ${input});""")
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
def format_trace_inputs(f: NativeFunction) -> str:
|
| 142 |
+
def dispatch_trace_input(arg: Argument | TensorOptionsArguments) -> Sequence[str]:
|
| 143 |
+
if isinstance(arg, TensorOptionsArguments):
|
| 144 |
+
name = "options"
|
| 145 |
+
return [
|
| 146 |
+
ADD_TRACE_INPUT.substitute(
|
| 147 |
+
name=name, input="c10::optTypeMetaToScalarType(options.dtype_opt())"
|
| 148 |
+
),
|
| 149 |
+
ADD_TRACE_INPUT.substitute(name=name, input="options.layout()"),
|
| 150 |
+
ADD_TRACE_INPUT.substitute(name=name, input="options.device()"),
|
| 151 |
+
ADD_TRACE_INPUT.substitute(name=name, input="options.pinned_memory()"),
|
| 152 |
+
]
|
| 153 |
+
else:
|
| 154 |
+
name = arg.name
|
| 155 |
+
if str(arg.type) == "Tensor?[]":
|
| 156 |
+
return [f'jit::tracer::addInputs(node, "{name}", {name});']
|
| 157 |
+
else:
|
| 158 |
+
return [ADD_TRACE_INPUT.substitute(name=name, input=name)]
|
| 159 |
+
|
| 160 |
+
args: list[Argument | TensorOptionsArguments] = list(
|
| 161 |
+
f.func.schema_order_arguments()
|
| 162 |
+
)
|
| 163 |
+
|
| 164 |
+
if f.func.is_out_fn():
|
| 165 |
+
# *_out functions take the result as a separate argument, but we don't want to
|
| 166 |
+
# trace that argument directly. Instead, we trace its TensorOptions.
|
| 167 |
+
# So first, we need to remove the out argument from the list of arguments to trace.
|
| 168 |
+
num_out_args = len(f.func.arguments.out)
|
| 169 |
+
args = args[:-num_out_args]
|
| 170 |
+
|
| 171 |
+
trace_inputs = itertools.chain.from_iterable(
|
| 172 |
+
dispatch_trace_input(arg) for arg in args
|
| 173 |
+
)
|
| 174 |
+
|
| 175 |
+
if f.func.is_out_fn():
|
| 176 |
+
# for *_out functions, handle the result argument differently for inplace/outplace.
|
| 177 |
+
# For inplace: just add the input to the end to confirm with the JIT schema
|
| 178 |
+
inplace = [
|
| 179 |
+
ADD_TRACE_INPUT.substitute(
|
| 180 |
+
name=f.func.arguments.out[i].name, input=f.func.arguments.out[i].name
|
| 181 |
+
)
|
| 182 |
+
for i in range(num_out_args)
|
| 183 |
+
]
|
| 184 |
+
|
| 185 |
+
# for outplace: do nothing, except if the function is a factory.
|
| 186 |
+
# Factories are a bit special because their out-of-place overloads
|
| 187 |
+
# take an extra TensorOptions argument, which is missing in the _out function
|
| 188 |
+
has_tensor_return = any(r.type.is_tensor_like() for r in f.func.returns)
|
| 189 |
+
has_tensor_input_arg = any(
|
| 190 |
+
a.type.is_tensor_like() for a in f.func.arguments.flat_non_out
|
| 191 |
+
)
|
| 192 |
+
is_factory_method = f.category_override == "factory" or (
|
| 193 |
+
has_tensor_return and not has_tensor_input_arg
|
| 194 |
+
)
|
| 195 |
+
|
| 196 |
+
# HACK: preserve old codegen behavior - the old codegen set the `is_factory_method`
|
| 197 |
+
# flag for the whole family of ops with the same basename if any of them is a
|
| 198 |
+
# factory method. For most cases the whole family of ops are indeed all factory
|
| 199 |
+
# method - 'normal' is the only exception. So we handle it specially here to avoid
|
| 200 |
+
# cloning the old logic.
|
| 201 |
+
if f.func.name.name.base == "normal":
|
| 202 |
+
is_factory_method = True
|
| 203 |
+
|
| 204 |
+
if is_factory_method:
|
| 205 |
+
outplace = [
|
| 206 |
+
ADD_TRACE_INPUT.substitute(
|
| 207 |
+
name="out",
|
| 208 |
+
input="c10::optTypeMetaToScalarType(out.options().dtype_opt())",
|
| 209 |
+
),
|
| 210 |
+
ADD_TRACE_INPUT.substitute(name="out", input="out.options().layout()"),
|
| 211 |
+
ADD_TRACE_INPUT.substitute(name="out", input="out.options().device()"),
|
| 212 |
+
ADD_TRACE_INPUT.substitute(
|
| 213 |
+
name="out", input="out.options().pinned_memory()"
|
| 214 |
+
),
|
| 215 |
+
]
|
| 216 |
+
else:
|
| 217 |
+
outplace = []
|
| 218 |
+
|
| 219 |
+
trace_inputs = itertools.chain(
|
| 220 |
+
trace_inputs,
|
| 221 |
+
[
|
| 222 |
+
SELECT.substitute(
|
| 223 |
+
cond="tracer_state->force_outplace",
|
| 224 |
+
true="\n".join(outplace),
|
| 225 |
+
false="\n".join(inplace),
|
| 226 |
+
)
|
| 227 |
+
],
|
| 228 |
+
)
|
| 229 |
+
|
| 230 |
+
return "\n".join(trace_inputs)
|
| 231 |
+
|
| 232 |
+
|
| 233 |
+
# `torch.jit.trace` have undocumented keyword argument `_force_outplace`,
|
| 234 |
+
# which force jit to replace functions with outplace variants (for
|
| 235 |
+
# example `aten::add_` becomes `aten::add`).
|
| 236 |
+
#
|
| 237 |
+
# This replacement implemented in-place with minimum modifications of
|
| 238 |
+
# arguments stack (as it assumes that outplace call has the same arguments
|
| 239 |
+
# as inplace version).
|
| 240 |
+
#
|
| 241 |
+
# However there are no such substitutions available for `aten::fill_`
|
| 242 |
+
# and `aten::zero_` operators, as we never implemented `aten::fill`
|
| 243 |
+
# and `aten::zero`. So jit tracing hack replacing `aten::zero_` with
|
| 244 |
+
# `aten::zeros_like` and replacing `aten::fill_` with `aten::full_like`.
|
| 245 |
+
#
|
| 246 |
+
# But as they potentially can have different arguments, we also have
|
| 247 |
+
# to hack into the stack and add missing ones.
|
| 248 |
+
#
|
| 249 |
+
# A possible alternative would be:
|
| 250 |
+
#
|
| 251 |
+
# - Add `aten::fill` and `aten::zero`
|
| 252 |
+
#
|
| 253 |
+
# - Or keep `aten::zeros_like` arguments aligned with `aten::zero_`
|
| 254 |
+
# arguments (inside of the `native_functions.yaml`)
|
| 255 |
+
RENAME_TRACE_ADD_ARGS = {
|
| 256 |
+
"fill": """\
|
| 257 |
+
jit::tracer::addInputs(node, "options", ::std::optional<ScalarType>());
|
| 258 |
+
jit::tracer::addInputs(node, "options", layout_or_default(::std::nullopt));
|
| 259 |
+
jit::tracer::addInputs(node, "options", device_or_default(::std::nullopt));
|
| 260 |
+
jit::tracer::addInputs(node, "options", pinned_memory_or_default(::std::nullopt));
|
| 261 |
+
::std::optional<MemoryFormat> memory_format = c10::MemoryFormat::Preserve;
|
| 262 |
+
jit::tracer::addInputs(node, "memory_format", memory_format);
|
| 263 |
+
""",
|
| 264 |
+
"zero": """\
|
| 265 |
+
jit::tracer::addInputs(node, "options", ::std::optional<ScalarType>());
|
| 266 |
+
jit::tracer::addInputs(node, "options", layout_or_default(::std::nullopt));
|
| 267 |
+
jit::tracer::addInputs(node, "options", device_or_default(::std::nullopt));
|
| 268 |
+
jit::tracer::addInputs(node, "options", pinned_memory_or_default(::std::nullopt));
|
| 269 |
+
::std::optional<MemoryFormat> memory_format = c10::MemoryFormat::Preserve;
|
| 270 |
+
jit::tracer::addInputs(node, "memory_format", memory_format);
|
| 271 |
+
""",
|
| 272 |
+
}
|
| 273 |
+
|
| 274 |
+
INPLACE_GUARD = CodeTemplate(
|
| 275 |
+
"""\
|
| 276 |
+
jit::tracer::ensureUniqueIfOutOfPlaced("${name}", ${mutable_input});
|
| 277 |
+
"""
|
| 278 |
+
)
|
| 279 |
+
|
| 280 |
+
PRE_RECORD_TRACE = CodeTemplate(
|
| 281 |
+
"""\
|
| 282 |
+
torch::jit::Node* node = nullptr;
|
| 283 |
+
std::shared_ptr<jit::tracer::TracingState> tracer_state;
|
| 284 |
+
if (jit::tracer::isTracing()) {
|
| 285 |
+
tracer_state = jit::tracer::getTracingState();
|
| 286 |
+
at::Symbol op_name;
|
| 287 |
+
${set_op_name}
|
| 288 |
+
node = tracer_state->createNode(op_name, /*num_outputs=*/0);
|
| 289 |
+
jit::tracer::recordSourceLocation(node);
|
| 290 |
+
${add_trace_inputs}
|
| 291 |
+
tracer_state->insertNode(node);
|
| 292 |
+
${inplace_guard}
|
| 293 |
+
jit::tracer::setTracingState(nullptr);
|
| 294 |
+
}
|
| 295 |
+
"""
|
| 296 |
+
)
|
| 297 |
+
|
| 298 |
+
|
| 299 |
+
def format_prerecord_trace(f: NativeFunction) -> str:
|
| 300 |
+
if not should_trace(f):
|
| 301 |
+
return ""
|
| 302 |
+
|
| 303 |
+
# TODO: clean up old codegen behavior
|
| 304 |
+
is_inplace = (
|
| 305 |
+
f.func.kind() in (SchemaKind.inplace, SchemaKind.out)
|
| 306 |
+
and not f.func.name.name.dunder_method
|
| 307 |
+
)
|
| 308 |
+
add_args = (
|
| 309 |
+
RENAME_TRACE_ADD_ARGS.get(f.func.name.name.base, "") if is_inplace else ""
|
| 310 |
+
)
|
| 311 |
+
additional_inputs = (
|
| 312 |
+
SELECT.substitute(
|
| 313 |
+
cond="tracer_state->force_outplace",
|
| 314 |
+
true=add_args,
|
| 315 |
+
false="",
|
| 316 |
+
)
|
| 317 |
+
if add_args
|
| 318 |
+
else ""
|
| 319 |
+
)
|
| 320 |
+
|
| 321 |
+
return PRE_RECORD_TRACE.substitute(
|
| 322 |
+
set_op_name=format_trace_op_name(f),
|
| 323 |
+
add_trace_inputs=format_trace_inputs(f) + additional_inputs,
|
| 324 |
+
inplace_guard=INPLACE_GUARD.substitute(
|
| 325 |
+
name=cpp.name(f.func),
|
| 326 |
+
mutable_input=f.func.arguments.out[0].name
|
| 327 |
+
if f.func.arguments.out
|
| 328 |
+
else "self",
|
| 329 |
+
)
|
| 330 |
+
if is_inplace
|
| 331 |
+
else "",
|
| 332 |
+
)
|
| 333 |
+
|
| 334 |
+
|
| 335 |
+
POST_RECORD_TRACE = CodeTemplate(
|
| 336 |
+
"""\
|
| 337 |
+
if (tracer_state) {
|
| 338 |
+
jit::tracer::setTracingState(std::move(tracer_state));
|
| 339 |
+
${add_trace_outputs}
|
| 340 |
+
}
|
| 341 |
+
"""
|
| 342 |
+
)
|
| 343 |
+
|
| 344 |
+
|
| 345 |
+
def format_postrecord_trace(f: NativeFunction) -> str:
|
| 346 |
+
if not should_trace(f):
|
| 347 |
+
return ""
|
| 348 |
+
|
| 349 |
+
# For outplacing ops, *_out overloads require special handling to move the
|
| 350 |
+
# output *argument* to a return value
|
| 351 |
+
if f.func.is_out_fn():
|
| 352 |
+
output_names_outplace = [arg.name for arg in f.func.arguments.out]
|
| 353 |
+
output_names_inplace = cpp.return_names(f)
|
| 354 |
+
|
| 355 |
+
# Code size optimization: the common case is that the return value is
|
| 356 |
+
# the same for both variants
|
| 357 |
+
if output_names_outplace == output_names_inplace:
|
| 358 |
+
outputs = [
|
| 359 |
+
f"jit::tracer::addOutput(node, {n});" for n in output_names_outplace
|
| 360 |
+
]
|
| 361 |
+
return POST_RECORD_TRACE.substitute(add_trace_outputs=outputs)
|
| 362 |
+
|
| 363 |
+
selection = SELECT.substitute(
|
| 364 |
+
cond="force_outplace",
|
| 365 |
+
true="\n".join(
|
| 366 |
+
f"jit::tracer::addOutput(node, {n});" for n in output_names_outplace
|
| 367 |
+
),
|
| 368 |
+
false="\n".join(
|
| 369 |
+
f"jit::tracer::addOutput(node, {n});" for n in output_names_inplace
|
| 370 |
+
),
|
| 371 |
+
)
|
| 372 |
+
return POST_RECORD_TRACE.substitute(add_trace_outputs=selection)
|
| 373 |
+
else:
|
| 374 |
+
output_names = cpp.return_names(f)
|
| 375 |
+
outputs = [f"jit::tracer::addOutput(node, {n});" for n in output_names]
|
| 376 |
+
return POST_RECORD_TRACE.substitute(add_trace_outputs=outputs)
|
| 377 |
+
|
| 378 |
+
|
| 379 |
+
def tie_return_values(f: NativeFunction) -> str:
|
| 380 |
+
if len(f.func.returns) == 1:
|
| 381 |
+
return f'auto {f.func.returns[0].name or "result"}'
|
| 382 |
+
names = cpp.return_names(f)
|
| 383 |
+
return f'auto [{", ".join(names)}]'
|
| 384 |
+
|
| 385 |
+
|
| 386 |
+
def get_return_value(f: NativeFunction) -> str:
|
| 387 |
+
names = cpp.return_names(f)
|
| 388 |
+
if len(f.func.returns) == 1:
|
| 389 |
+
return names[0]
|
| 390 |
+
if f.func.kind() == SchemaKind.out:
|
| 391 |
+
return f'std::forward_as_tuple({", ".join(names)})'
|
| 392 |
+
else:
|
| 393 |
+
moved = ", ".join(f"std::move({name})" for name in names)
|
| 394 |
+
return f"std::make_tuple({moved})"
|
| 395 |
+
|
| 396 |
+
|
| 397 |
+
TRACE_DISPATCH = CodeTemplate(
|
| 398 |
+
"""\
|
| 399 |
+
${assign_return_values}at::_ops::${unambiguous_name}::redispatch(${unpacked_args});"""
|
| 400 |
+
)
|
| 401 |
+
|
| 402 |
+
|
| 403 |
+
def emit_trace_body(f: NativeFunction) -> list[str]:
|
| 404 |
+
trace_body: list[str] = []
|
| 405 |
+
|
| 406 |
+
trace_body.append(format_prerecord_trace(f))
|
| 407 |
+
|
| 408 |
+
dispatcher_sig = DispatcherSignature.from_schema(f.func)
|
| 409 |
+
dispatcher_exprs = dispatcher_sig.exprs()
|
| 410 |
+
|
| 411 |
+
# code-generated tracing kernels plumb and recompute dispatch keys directly through the kernel for performance.
|
| 412 |
+
# See Note [Plumbing Keys Through The Dispatcher] for details.
|
| 413 |
+
dispatch_key_set = "ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::Tracer)"
|
| 414 |
+
redispatch_args = ", ".join([dispatch_key_set] + [a.expr for a in dispatcher_exprs])
|
| 415 |
+
|
| 416 |
+
assign_return_values = (
|
| 417 |
+
f"{tie_return_values(f)} = "
|
| 418 |
+
if f.func.kind() in [SchemaKind.functional, SchemaKind.mutable]
|
| 419 |
+
and f.func.returns
|
| 420 |
+
else ""
|
| 421 |
+
)
|
| 422 |
+
|
| 423 |
+
# Note that this calls the slow, dispatching variants of manual_cpp_binding ops.
|
| 424 |
+
# We could probably work harder to ensure that the fast variants are
|
| 425 |
+
# called instead, but the perf benefit would be minimal.
|
| 426 |
+
trace_body.append(
|
| 427 |
+
TRACE_DISPATCH.substitute(
|
| 428 |
+
assign_return_values=assign_return_values,
|
| 429 |
+
unambiguous_name=f.func.name.unambiguous_name(),
|
| 430 |
+
unpacked_args=redispatch_args,
|
| 431 |
+
)
|
| 432 |
+
)
|
| 433 |
+
|
| 434 |
+
trace_body.append(format_postrecord_trace(f))
|
| 435 |
+
if f.func.returns:
|
| 436 |
+
trace_body.append(f"return {get_return_value(f)};")
|
| 437 |
+
return trace_body
|
| 438 |
+
|
| 439 |
+
|
| 440 |
+
METHOD_DEFINITION = CodeTemplate(
|
| 441 |
+
"""\
|
| 442 |
+
${return_type} ${type_wrapper_name}(${formals}) {
|
| 443 |
+
${type_definition_body}
|
| 444 |
+
}
|
| 445 |
+
"""
|
| 446 |
+
)
|
| 447 |
+
|
| 448 |
+
|
| 449 |
+
def type_wrapper_name(f: NativeFunction, key: str = "Default") -> str:
|
| 450 |
+
if f.func.name.overload_name:
|
| 451 |
+
name = f"{cpp.name(f.func)}_{f.func.name.overload_name}"
|
| 452 |
+
else:
|
| 453 |
+
name = cpp.name(f.func)
|
| 454 |
+
|
| 455 |
+
# The key argument is only used in gen_variable_type where we need fns per autograd dispatch key.
|
| 456 |
+
# In gen_trace_type and gen_inplace_view_type where only one fn per native_fn must be generated,
|
| 457 |
+
# the key argument should not be passed.
|
| 458 |
+
# We do not append key if it is Default so that generated functions from
|
| 459 |
+
# before per-dispatch-key derivatives were added retain the same names.
|
| 460 |
+
if key != "Default":
|
| 461 |
+
name = name + f"_{key}"
|
| 462 |
+
return name
|
| 463 |
+
|
| 464 |
+
|
| 465 |
+
@with_native_function
|
| 466 |
+
def method_definition(f: NativeFunction) -> str:
|
| 467 |
+
assert cpp.name(f.func) not in MANUAL_TRACER
|
| 468 |
+
|
| 469 |
+
formals = ", ".join(
|
| 470 |
+
# code-generated tracing kernels plumb and recompute dispatch keys directly through the kernel for performance.
|
| 471 |
+
# See Note [Plumbing Keys Through The Dispatcher] for details.
|
| 472 |
+
["c10::DispatchKeySet ks"]
|
| 473 |
+
+ [
|
| 474 |
+
f'{cpp.argument_type(a, binds="__placeholder__", symint=True).cpp_type()} {a.name}'
|
| 475 |
+
for a in f.func.schema_order_arguments()
|
| 476 |
+
]
|
| 477 |
+
)
|
| 478 |
+
|
| 479 |
+
return METHOD_DEFINITION.substitute(
|
| 480 |
+
return_type=cpp.returns_type(f.func.returns, symint=True).cpp_type(),
|
| 481 |
+
type_wrapper_name=type_wrapper_name(f),
|
| 482 |
+
formals=formals,
|
| 483 |
+
type_definition_body=emit_trace_body(f),
|
| 484 |
+
)
|
| 485 |
+
|
| 486 |
+
|
| 487 |
+
WRAPPER_REGISTRATION = CodeTemplate(
|
| 488 |
+
"""\
|
| 489 |
+
m.impl("${name}",
|
| 490 |
+
TORCH_FN(${class_type}::${type_wrapper_name})
|
| 491 |
+
);
|
| 492 |
+
"""
|
| 493 |
+
)
|
| 494 |
+
|
| 495 |
+
|
| 496 |
+
@with_native_function
|
| 497 |
+
def method_registration(f: NativeFunction) -> str:
|
| 498 |
+
assert cpp.name(f.func) not in MANUAL_TRACER
|
| 499 |
+
|
| 500 |
+
return WRAPPER_REGISTRATION.substitute(
|
| 501 |
+
name=f.func.name,
|
| 502 |
+
type_wrapper_name=type_wrapper_name(f),
|
| 503 |
+
class_type="TraceType",
|
| 504 |
+
)
|
| 505 |
+
|
| 506 |
+
|
| 507 |
+
def gen_trace_type_func(fn: NativeFunction) -> dict[str, list[str]]:
|
| 508 |
+
return {
|
| 509 |
+
"ops_headers": [f"#include <ATen/ops/{fn.root_name}_ops.h>"],
|
| 510 |
+
"trace_method_definitions": [method_definition(fn)],
|
| 511 |
+
"trace_wrapper_registrations": [method_registration(fn)],
|
| 512 |
+
}
|
| 513 |
+
|
| 514 |
+
|
| 515 |
+
def gen_trace_type(
|
| 516 |
+
out: str, native_functions: list[NativeFunction], template_path: str
|
| 517 |
+
) -> None:
|
| 518 |
+
# NOTE: see Note [Sharded File] at the top of the VariableType.cpp
|
| 519 |
+
# template regarding sharding of the generated files.
|
| 520 |
+
fm = FileManager(install_dir=out, template_dir=template_path, dry_run=False)
|
| 521 |
+
fm.write_sharded(
|
| 522 |
+
"TraceType.cpp",
|
| 523 |
+
[fn for fn in native_functions if cpp.name(fn.func) not in MANUAL_TRACER],
|
| 524 |
+
key_fn=lambda fn: fn.root_name,
|
| 525 |
+
base_env={
|
| 526 |
+
"generated_comment": "@"
|
| 527 |
+
+ f"generated from {fm.template_dir_for_comments()}/TraceType.cpp",
|
| 528 |
+
},
|
| 529 |
+
env_callable=gen_trace_type_func,
|
| 530 |
+
num_shards=5,
|
| 531 |
+
sharded_keys={
|
| 532 |
+
"ops_headers",
|
| 533 |
+
"trace_method_definitions",
|
| 534 |
+
"trace_wrapper_registrations",
|
| 535 |
+
},
|
| 536 |
+
)
|
minigpt2/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_variable_type.py
ADDED
|
@@ -0,0 +1,2180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Generates VariableType.h/cpp
|
| 2 |
+
#
|
| 3 |
+
# **If any changes are being made to the VariableType codegen please also check
|
| 4 |
+
# if updates are needed in torch/csrc/autograd/autograd_not_implemented_fallback.cpp
|
| 5 |
+
#
|
| 6 |
+
# VariableType is a subclass of at::Type that provides the binding code
|
| 7 |
+
# necessary to provide a differentiable version of ATen operators. There are a
|
| 8 |
+
# number of different things we could mean:
|
| 9 |
+
#
|
| 10 |
+
# - Given a non-differentiable forward implementation, we might
|
| 11 |
+
# directly associate it with a backward implementation to make
|
| 12 |
+
# it differentiable. This is the common case.
|
| 13 |
+
#
|
| 14 |
+
# - Some functions don't need a backwards implementation, because
|
| 15 |
+
# backpropagation will never propagate beyond them. There are a
|
| 16 |
+
# number of different reasons why this may be the case:
|
| 17 |
+
#
|
| 18 |
+
# - The function has no differentiable inputs
|
| 19 |
+
# - The function's output is not differentiable
|
| 20 |
+
# - The function has no data dependency on its input
|
| 21 |
+
#
|
| 22 |
+
# - Some function don't need a backwards implementation because they
|
| 23 |
+
# are implemented as a composition of other (differentiable) ATen
|
| 24 |
+
# functions. These are dispatched directly to the Type superclass,
|
| 25 |
+
# which will in turn dispatch back to VariableType for its
|
| 26 |
+
# differentiable subcomponents.
|
| 27 |
+
#
|
| 28 |
+
|
| 29 |
+
from __future__ import annotations
|
| 30 |
+
|
| 31 |
+
import re
|
| 32 |
+
from typing import Callable, Sequence
|
| 33 |
+
|
| 34 |
+
from torchgen.api import cpp
|
| 35 |
+
from torchgen.api.autograd import (
|
| 36 |
+
DifferentiableInput,
|
| 37 |
+
dispatch_strategy,
|
| 38 |
+
ForwardDerivative,
|
| 39 |
+
gen_differentiable_outputs,
|
| 40 |
+
is_differentiable,
|
| 41 |
+
NativeFunctionWithDifferentiabilityInfo,
|
| 42 |
+
SavedAttribute,
|
| 43 |
+
)
|
| 44 |
+
from torchgen.api.types import (
|
| 45 |
+
ArrayRefCType,
|
| 46 |
+
BaseCppType,
|
| 47 |
+
BaseCType,
|
| 48 |
+
Binding,
|
| 49 |
+
DispatcherSignature,
|
| 50 |
+
intArrayRefT,
|
| 51 |
+
iTensorListRefT,
|
| 52 |
+
ListCType,
|
| 53 |
+
MutRefCType,
|
| 54 |
+
OptionalCType,
|
| 55 |
+
scalarT,
|
| 56 |
+
SpecialArgName,
|
| 57 |
+
stringT,
|
| 58 |
+
symIntArrayRefT,
|
| 59 |
+
TENSOR_LIST_LIKE_CTYPES,
|
| 60 |
+
tensorListT,
|
| 61 |
+
tensorT,
|
| 62 |
+
TupleCType,
|
| 63 |
+
VectorCType,
|
| 64 |
+
)
|
| 65 |
+
from torchgen.code_template import CodeTemplate
|
| 66 |
+
from torchgen.context import (
|
| 67 |
+
native_function_manager,
|
| 68 |
+
with_native_function,
|
| 69 |
+
with_native_function_and,
|
| 70 |
+
)
|
| 71 |
+
from torchgen.model import (
|
| 72 |
+
Argument,
|
| 73 |
+
BaseType,
|
| 74 |
+
ListType,
|
| 75 |
+
NativeFunction,
|
| 76 |
+
SchemaKind,
|
| 77 |
+
SelfArgument,
|
| 78 |
+
TensorOptionsArguments,
|
| 79 |
+
)
|
| 80 |
+
from torchgen.utils import FileManager, mapMaybe
|
| 81 |
+
|
| 82 |
+
from .context import with_native_function_with_differentiability_info_and_key
|
| 83 |
+
from .gen_inplace_or_view_type import (
|
| 84 |
+
ALL_VIEW_FUNCTIONS,
|
| 85 |
+
ASSIGN_RETURN_VALUE,
|
| 86 |
+
AUTOGRAD_NOT_IMPLEMENTED_REGISTRATION,
|
| 87 |
+
gen_formals,
|
| 88 |
+
get_base_name,
|
| 89 |
+
get_view_info,
|
| 90 |
+
is_tensor_list_type,
|
| 91 |
+
is_tensor_type,
|
| 92 |
+
METHOD_DEFINITION,
|
| 93 |
+
modifies_arguments,
|
| 94 |
+
TMP_VAR,
|
| 95 |
+
unpack_args,
|
| 96 |
+
unpacked_name,
|
| 97 |
+
use_derived,
|
| 98 |
+
WRAPPER_REGISTRATION,
|
| 99 |
+
)
|
| 100 |
+
from .gen_trace_type import (
|
| 101 |
+
get_return_value,
|
| 102 |
+
MANUAL_AUTOGRAD_AND_TRACER,
|
| 103 |
+
MANUAL_BACKEND,
|
| 104 |
+
tie_return_values,
|
| 105 |
+
type_wrapper_name,
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
# We don't set or modify grad_fn on these methods. Generally, they return
|
| 110 |
+
# tensors that have requires_grad=False. In-place functions listed here will
|
| 111 |
+
# not examine or modify requires_grad or grad_fn.
|
| 112 |
+
# NB: this does NOT include overload name
|
| 113 |
+
DONT_REQUIRE_DERIVATIVE = {
|
| 114 |
+
# These only depend on the input Tensor's shape and device, not the data
|
| 115 |
+
"empty_like",
|
| 116 |
+
"ones_like",
|
| 117 |
+
"full_like",
|
| 118 |
+
"zeros_like",
|
| 119 |
+
"rand_like",
|
| 120 |
+
"randn_like",
|
| 121 |
+
"new_empty",
|
| 122 |
+
"new_empty_strided",
|
| 123 |
+
"new_full",
|
| 124 |
+
"new_zeros",
|
| 125 |
+
"new_ones",
|
| 126 |
+
# These are only implemented on integral types
|
| 127 |
+
"__and__",
|
| 128 |
+
"__iand__",
|
| 129 |
+
"__ilshift__",
|
| 130 |
+
"__ior__",
|
| 131 |
+
"__irshift__",
|
| 132 |
+
"__ixor__",
|
| 133 |
+
"__lshift__",
|
| 134 |
+
"__or__",
|
| 135 |
+
"__rshift__",
|
| 136 |
+
"__xor__",
|
| 137 |
+
# These work on integral data types, and hence don't require derivative
|
| 138 |
+
"_sobol_engine_draw",
|
| 139 |
+
"_sobol_engine_ff",
|
| 140 |
+
"_sobol_engine_scramble_",
|
| 141 |
+
"_sobol_engine_initialize_state_",
|
| 142 |
+
# This is an unsafe method that is meant to be out of reach of autograd.
|
| 143 |
+
"_coalesced_",
|
| 144 |
+
# Quantize functions should not record gradients
|
| 145 |
+
"quantize_per_tensor",
|
| 146 |
+
"quantize_per_channel",
|
| 147 |
+
# Functions that return integers should not have output that require gradients
|
| 148 |
+
"argmax",
|
| 149 |
+
"argmin",
|
| 150 |
+
"argsort",
|
| 151 |
+
"searchsorted",
|
| 152 |
+
"bucketize",
|
| 153 |
+
# Functions that return booleans are not differentiable
|
| 154 |
+
"isnan",
|
| 155 |
+
"isposinf",
|
| 156 |
+
"isneginf",
|
| 157 |
+
"isinf",
|
| 158 |
+
"signbit",
|
| 159 |
+
"isin",
|
| 160 |
+
"allclose",
|
| 161 |
+
# Functions return none are not differentiable
|
| 162 |
+
"record_stream",
|
| 163 |
+
# These functions are not differentiable
|
| 164 |
+
"logical_and",
|
| 165 |
+
"logical_xor",
|
| 166 |
+
"logical_not",
|
| 167 |
+
"logical_or",
|
| 168 |
+
# This function returns nested_tensor shape as a tensor that is non-differentiable
|
| 169 |
+
"_nested_tensor_size",
|
| 170 |
+
"_nested_tensor_strides",
|
| 171 |
+
"_nested_tensor_storage_offsets",
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
# The C -> R functions at the time of adding this are still being audited and tested
|
| 175 |
+
# but will not error out.
|
| 176 |
+
# C -> C, R -> C functions for which backward is correctly implemented and tested
|
| 177 |
+
GRADIENT_IMPLEMENTED_FOR_COMPLEX = {
|
| 178 |
+
"fill",
|
| 179 |
+
"t",
|
| 180 |
+
"t_copy",
|
| 181 |
+
"view",
|
| 182 |
+
"reshape",
|
| 183 |
+
"reshape_as",
|
| 184 |
+
"view_as",
|
| 185 |
+
"view_copy",
|
| 186 |
+
"roll",
|
| 187 |
+
"clone",
|
| 188 |
+
"block_diag",
|
| 189 |
+
"diag_embed",
|
| 190 |
+
"repeat",
|
| 191 |
+
"expand",
|
| 192 |
+
"expand_copy",
|
| 193 |
+
"flip",
|
| 194 |
+
"fliplr",
|
| 195 |
+
"flipud",
|
| 196 |
+
"rot90",
|
| 197 |
+
"nanmean",
|
| 198 |
+
"nansum",
|
| 199 |
+
"transpose",
|
| 200 |
+
"permute",
|
| 201 |
+
"squeeze",
|
| 202 |
+
"unsqueeze",
|
| 203 |
+
"unsqueeze_copy",
|
| 204 |
+
"resize",
|
| 205 |
+
"resize_as",
|
| 206 |
+
"tril",
|
| 207 |
+
"triu",
|
| 208 |
+
"chunk",
|
| 209 |
+
"zero_",
|
| 210 |
+
"eq_",
|
| 211 |
+
"ne_",
|
| 212 |
+
"add",
|
| 213 |
+
"__radd__",
|
| 214 |
+
"sum",
|
| 215 |
+
"_conj",
|
| 216 |
+
"sin",
|
| 217 |
+
"cos",
|
| 218 |
+
"mul",
|
| 219 |
+
"sinc",
|
| 220 |
+
"sinh",
|
| 221 |
+
"cosh",
|
| 222 |
+
"__rmul__",
|
| 223 |
+
"sgn",
|
| 224 |
+
"asin",
|
| 225 |
+
"acos",
|
| 226 |
+
"sub",
|
| 227 |
+
"div",
|
| 228 |
+
"cat",
|
| 229 |
+
"view_as_complex",
|
| 230 |
+
"index_put",
|
| 231 |
+
"neg",
|
| 232 |
+
"complex",
|
| 233 |
+
"select",
|
| 234 |
+
"where",
|
| 235 |
+
"as_strided",
|
| 236 |
+
"as_strided_copy",
|
| 237 |
+
"as_strided_scatter",
|
| 238 |
+
"slice",
|
| 239 |
+
"constant_pad_nd",
|
| 240 |
+
"unbind",
|
| 241 |
+
"split",
|
| 242 |
+
"split_with_sizes",
|
| 243 |
+
"unsafe_split",
|
| 244 |
+
"split_with_sizes_backward",
|
| 245 |
+
"dot",
|
| 246 |
+
"vdot",
|
| 247 |
+
"cholesky",
|
| 248 |
+
"triangular_solve",
|
| 249 |
+
"mm",
|
| 250 |
+
"_unsafe_view",
|
| 251 |
+
"mv",
|
| 252 |
+
"outer",
|
| 253 |
+
"bmm",
|
| 254 |
+
"diagonal",
|
| 255 |
+
"alias",
|
| 256 |
+
"atan",
|
| 257 |
+
"log",
|
| 258 |
+
"log10",
|
| 259 |
+
"log1p",
|
| 260 |
+
"log2",
|
| 261 |
+
"logaddexp",
|
| 262 |
+
"logsumexp",
|
| 263 |
+
"logcumsumexp",
|
| 264 |
+
"reciprocal",
|
| 265 |
+
"tan",
|
| 266 |
+
"pow",
|
| 267 |
+
"rsqrt",
|
| 268 |
+
"tanh",
|
| 269 |
+
"tanh_backward",
|
| 270 |
+
"asinh",
|
| 271 |
+
"acosh",
|
| 272 |
+
"atanh",
|
| 273 |
+
"take",
|
| 274 |
+
"fill_",
|
| 275 |
+
"exp",
|
| 276 |
+
"exp2",
|
| 277 |
+
"expm1",
|
| 278 |
+
"nonzero",
|
| 279 |
+
"mean",
|
| 280 |
+
"std_mean",
|
| 281 |
+
"var_mean",
|
| 282 |
+
"inverse",
|
| 283 |
+
"solve",
|
| 284 |
+
"linalg_cholesky",
|
| 285 |
+
"addcmul",
|
| 286 |
+
"addcdiv",
|
| 287 |
+
"matrix_exp",
|
| 288 |
+
"linalg_matrix_exp",
|
| 289 |
+
"_linalg_eigh",
|
| 290 |
+
"cholesky_solve",
|
| 291 |
+
"linalg_qr",
|
| 292 |
+
"_linalg_svd",
|
| 293 |
+
"_fft_c2c",
|
| 294 |
+
"_fft_r2c",
|
| 295 |
+
"linalg_solve",
|
| 296 |
+
"sqrt",
|
| 297 |
+
"stack",
|
| 298 |
+
"gather",
|
| 299 |
+
"index_select",
|
| 300 |
+
"index_add_",
|
| 301 |
+
"linalg_inv",
|
| 302 |
+
"linalg_inv_ex",
|
| 303 |
+
"baddbmm",
|
| 304 |
+
"addbmm",
|
| 305 |
+
"addmm",
|
| 306 |
+
"addmv",
|
| 307 |
+
"addr",
|
| 308 |
+
"linalg_householder_product",
|
| 309 |
+
"ormqr",
|
| 310 |
+
"reflection_pad1d",
|
| 311 |
+
"reflection_pad2d",
|
| 312 |
+
"reflection_pad3d",
|
| 313 |
+
"linalg_cholesky_ex",
|
| 314 |
+
"linalg_eig",
|
| 315 |
+
"diagonal_copy",
|
| 316 |
+
"diagonal_scatter",
|
| 317 |
+
"alias_copy",
|
| 318 |
+
"select_backward",
|
| 319 |
+
"diagonal_backward",
|
| 320 |
+
"slice_backward",
|
| 321 |
+
"reflection_pad1d_backward",
|
| 322 |
+
"reflection_pad2d_backward",
|
| 323 |
+
"reflection_pad3d_backward",
|
| 324 |
+
"_sparse_sparse_matmul",
|
| 325 |
+
"replication_pad1d",
|
| 326 |
+
"replication_pad2d",
|
| 327 |
+
"replication_pad3d",
|
| 328 |
+
"put",
|
| 329 |
+
"put_",
|
| 330 |
+
"_to_copy",
|
| 331 |
+
"replication_pad1d_backward",
|
| 332 |
+
"replication_pad2d_backward",
|
| 333 |
+
"replication_pad3d_backward",
|
| 334 |
+
"diag",
|
| 335 |
+
"masked_scatter",
|
| 336 |
+
"masked_select",
|
| 337 |
+
"index_add",
|
| 338 |
+
"index_fill",
|
| 339 |
+
"trace",
|
| 340 |
+
"polar",
|
| 341 |
+
"cumsum",
|
| 342 |
+
"rsub",
|
| 343 |
+
"eig",
|
| 344 |
+
"lerp",
|
| 345 |
+
"linalg_vector_norm",
|
| 346 |
+
"cumprod",
|
| 347 |
+
"prod",
|
| 348 |
+
"index_copy",
|
| 349 |
+
"lu",
|
| 350 |
+
"unfold",
|
| 351 |
+
"unfold_backward",
|
| 352 |
+
"index",
|
| 353 |
+
"masked_fill",
|
| 354 |
+
"masked_scatter_backward",
|
| 355 |
+
"linalg_cross",
|
| 356 |
+
"lu_unpack",
|
| 357 |
+
"renorm",
|
| 358 |
+
"_conj_physical",
|
| 359 |
+
"linalg_lu_factor_ex",
|
| 360 |
+
"scatter",
|
| 361 |
+
"scatter_add",
|
| 362 |
+
"sigmoid",
|
| 363 |
+
"sigmoid_backward",
|
| 364 |
+
"sparse_mask",
|
| 365 |
+
"trapezoid",
|
| 366 |
+
"cumulative_trapezoid",
|
| 367 |
+
"conj_physical_",
|
| 368 |
+
"_neg_view",
|
| 369 |
+
"_reshape_alias",
|
| 370 |
+
"_reshape_copy",
|
| 371 |
+
"_linalg_det",
|
| 372 |
+
"lu_solve",
|
| 373 |
+
"linalg_solve_triangular",
|
| 374 |
+
"linalg_pinv",
|
| 375 |
+
"linalg_lstsq",
|
| 376 |
+
"unfold_copy",
|
| 377 |
+
"col2im",
|
| 378 |
+
"im2col",
|
| 379 |
+
"cholesky_inverse",
|
| 380 |
+
"to_sparse",
|
| 381 |
+
"sparse_sampled_addmm",
|
| 382 |
+
"linalg_lu",
|
| 383 |
+
"pixel_shuffle",
|
| 384 |
+
"pixel_unshuffle",
|
| 385 |
+
"channel_shuffle",
|
| 386 |
+
"linalg_lu_solve",
|
| 387 |
+
"_linalg_slogdet",
|
| 388 |
+
"_linalg_solve_ex",
|
| 389 |
+
"_unsafe_index",
|
| 390 |
+
"_unsafe_index_put",
|
| 391 |
+
"_unsafe_masked_index",
|
| 392 |
+
"_unsafe_masked_index_put_accumulate",
|
| 393 |
+
}
|
| 394 |
+
|
| 395 |
+
GRADIENT_IMPLEMENTED_FOR_SPARSE_COMPLEX = {
|
| 396 |
+
"_to_dense",
|
| 397 |
+
"_coalesce",
|
| 398 |
+
"coalesce",
|
| 399 |
+
"values",
|
| 400 |
+
"_sparse_coo_tensor_with_dims_and_tensors",
|
| 401 |
+
"_sparse_addmm",
|
| 402 |
+
}
|
| 403 |
+
|
| 404 |
+
GRADIENT_IMPLEMENTED_FOR_COMPLEX.update(GRADIENT_IMPLEMENTED_FOR_SPARSE_COMPLEX)
|
| 405 |
+
|
| 406 |
+
# Some operators invalidate the grad_accumulator. Let's reset it.
|
| 407 |
+
RESET_GRAD_ACCUMULATOR = {"set_", "resize_"}
|
| 408 |
+
|
| 409 |
+
# NOTE [ TensorImpl and Storage Pointer Sanity Checks ]
|
| 410 |
+
#
|
| 411 |
+
# We check the following properties:
|
| 412 |
+
# 1) A function should never change the input tensors' underlying c10::TensorImpl
|
| 413 |
+
# pointers or c10::Storage pointers, even if it modifies its input tensors (via
|
| 414 |
+
# inplace or out-variants)
|
| 415 |
+
# If the function does not modify its arguments, we also check the following properties
|
| 416 |
+
# pertaining to its output:
|
| 417 |
+
# 2) Its TensorImpl has use_count of 1
|
| 418 |
+
# 3) If the function is a view function, it has the same StorageImpl as that of
|
| 419 |
+
# the input it is aliased with. Otherwise, its StorageImpl has use_count of 1
|
| 420 |
+
#
|
| 421 |
+
# The following code templates implement the checks for this invariant:
|
| 422 |
+
SAVE_TENSOR_STORAGE = CodeTemplate(
|
| 423 |
+
"""\
|
| 424 |
+
auto ${tensor_name}_storage_saved =
|
| 425 |
+
${tensor_name}.has_storage() ? ::std::optional<Storage>(${tensor_name}.storage()) : ::std::nullopt;
|
| 426 |
+
"""
|
| 427 |
+
)
|
| 428 |
+
|
| 429 |
+
|
| 430 |
+
# If tensor_name == out_tensor_name, used to enforce (1), otherwise used for (2)
|
| 431 |
+
ENFORCE_SAME_TENSOR_STORAGE = CodeTemplate(
|
| 432 |
+
"""\
|
| 433 |
+
if (${tensor_name}_storage_saved.has_value() &&
|
| 434 |
+
!at::impl::dispatch_mode_enabled() &&
|
| 435 |
+
!at::impl::tensor_has_dispatch(${tensor_name}) &&
|
| 436 |
+
!at::impl::tensor_has_dispatch(${out_tensor_name}))
|
| 437 |
+
TORCH_INTERNAL_ASSERT(${tensor_name}_storage_saved.value().is_alias_of(${out_tensor_name}.storage()));
|
| 438 |
+
"""
|
| 439 |
+
)
|
| 440 |
+
|
| 441 |
+
SAVE_TENSORLIST_STORAGE = CodeTemplate(
|
| 442 |
+
"""\
|
| 443 |
+
std::vector<::std::optional<Storage>> ${tensorlist_name}_storage_saved(${tensorlist_name}.size());
|
| 444 |
+
for (const Tensor& tensor : ${tensorlist_name})
|
| 445 |
+
${tensorlist_name}_storage_saved.push_back(
|
| 446 |
+
tensor.has_storage() ? ::std::optional<Storage>(tensor.storage()) : ::std::nullopt);
|
| 447 |
+
"""
|
| 448 |
+
)
|
| 449 |
+
|
| 450 |
+
ENFORCE_SAME_TENSORLIST_STORAGE = CodeTemplate(
|
| 451 |
+
"""\
|
| 452 |
+
for (size_t i=0; i<${tensorlist_name}.size() && !at::impl::dispatch_mode_enabled(); i++) {
|
| 453 |
+
if (${tensorlist_name}_storage_saved[i].has_value() && !at::impl::tensorlist_has_dispatch(${tensorlist_name}))
|
| 454 |
+
TORCH_INTERNAL_ASSERT(${tensorlist_name}_storage_saved[i].value().is_alias_of(${tensorlist_name}[i].storage()));
|
| 455 |
+
}
|
| 456 |
+
"""
|
| 457 |
+
)
|
| 458 |
+
|
| 459 |
+
SAVE_OPTIONALTENSORLIST_STORAGE = CodeTemplate(
|
| 460 |
+
"""\
|
| 461 |
+
std::vector<::std::optional<Storage>> ${tensorlist_name}_storage_saved(${tensorlist_name}.size());
|
| 462 |
+
for (const ::std::optional<Tensor>& tensor : ${tensorlist_name})
|
| 463 |
+
${tensorlist_name}_storage_saved.push_back(
|
| 464 |
+
tensor.has_value() && tensor->has_storage() ? ::std::optional<Storage>(tensor->storage()) : ::std::nullopt);
|
| 465 |
+
"""
|
| 466 |
+
)
|
| 467 |
+
|
| 468 |
+
ENFORCE_SAME_OPTIONALTENSORLIST_STORAGE = CodeTemplate(
|
| 469 |
+
"""\
|
| 470 |
+
for (size_t i=0; i<${tensorlist_name}.size() && !at::impl::dispatch_mode_enabled(); i++) {
|
| 471 |
+
if (${tensorlist_name}_storage_saved[i].has_value() && !at::impl::tensorlist_has_dispatch(${tensorlist_name}))
|
| 472 |
+
TORCH_INTERNAL_ASSERT(${tensorlist_name}_storage_saved[i].value().is_alias_of(
|
| 473 |
+
static_cast<::std::optional<Tensor>>(${tensorlist_name}[i])->storage()));
|
| 474 |
+
}
|
| 475 |
+
"""
|
| 476 |
+
)
|
| 477 |
+
|
| 478 |
+
SAVE_TENSOR_IMPL = CodeTemplate(
|
| 479 |
+
"""\
|
| 480 |
+
c10::intrusive_ptr<TensorImpl> ${tensor_name}_impl_saved;
|
| 481 |
+
if (${tensor_name}.defined()) ${tensor_name}_impl_saved = ${tensor_name}.getIntrusivePtr();
|
| 482 |
+
"""
|
| 483 |
+
)
|
| 484 |
+
|
| 485 |
+
ENFORCE_SAME_TENSOR_IMPL = CodeTemplate(
|
| 486 |
+
"""\
|
| 487 |
+
if (${tensor_name}_impl_saved && !at::impl::dispatch_mode_enabled() && !at::impl::tensor_has_dispatch(${tensor_name}))
|
| 488 |
+
TORCH_INTERNAL_ASSERT(${tensor_name}_impl_saved == ${tensor_name}.getIntrusivePtr());
|
| 489 |
+
"""
|
| 490 |
+
)
|
| 491 |
+
|
| 492 |
+
ENFORCE_TENSOR_IMPL_USE_COUNT_LT_OR_EQ_ONE = CodeTemplate(
|
| 493 |
+
"""\
|
| 494 |
+
if (!at::impl::dispatch_mode_enabled() && !at::impl::tensor_has_dispatch(${tensor_name}))
|
| 495 |
+
TORCH_INTERNAL_ASSERT(${tensor_name}.use_count() <= 1, "function: ${fn_name}");
|
| 496 |
+
"""
|
| 497 |
+
)
|
| 498 |
+
|
| 499 |
+
ENFORCE_TENSOR_STORAGE_USE_COUNT_EQUALS_ONE = CodeTemplate(
|
| 500 |
+
"""\
|
| 501 |
+
if (${tensor_name}.has_storage() && !at::impl::dispatch_mode_enabled() && !at::impl::tensor_has_dispatch(${tensor_name})) {
|
| 502 |
+
TORCH_INTERNAL_ASSERT(${tensor_name}.storage().use_count() == 1, "function: ${fn_name}");
|
| 503 |
+
}
|
| 504 |
+
"""
|
| 505 |
+
)
|
| 506 |
+
|
| 507 |
+
SAVE_TENSORLIST_IMPL = CodeTemplate(
|
| 508 |
+
"""\
|
| 509 |
+
std::vector<c10::intrusive_ptr<TensorImpl>> ${tensorlist_name}_impl_saved(${tensorlist_name}.size());
|
| 510 |
+
for (size_t i=0; i<${tensorlist_name}.size(); i++)
|
| 511 |
+
if (${tensorlist_name}[i].defined()) ${tensorlist_name}_impl_saved[i] = ${tensorlist_name}[i].getIntrusivePtr();
|
| 512 |
+
"""
|
| 513 |
+
)
|
| 514 |
+
|
| 515 |
+
ENFORCE_SAME_TENSORLIST_IMPL = CodeTemplate(
|
| 516 |
+
"""\
|
| 517 |
+
for (size_t i=0; i<${tensorlist_name}.size() && !at::impl::dispatch_mode_enabled(); i++) {
|
| 518 |
+
if (${tensorlist_name}_impl_saved[i] && !at::impl::tensorlist_has_dispatch(${tensorlist_name}))
|
| 519 |
+
TORCH_INTERNAL_ASSERT(${tensorlist_name}_impl_saved[i] == ${tensorlist_name}[i].getIntrusivePtr());
|
| 520 |
+
}
|
| 521 |
+
"""
|
| 522 |
+
)
|
| 523 |
+
|
| 524 |
+
SAVE_OPTIONALTENSORLIST_IMPL = CodeTemplate(
|
| 525 |
+
"""\
|
| 526 |
+
std::vector<c10::intrusive_ptr<TensorImpl>> ${tensorlist_name}_impl_saved(${tensorlist_name}.size());
|
| 527 |
+
for (size_t i=0; i<${tensorlist_name}.size(); i++) {
|
| 528 |
+
::std::optional<Tensor> t = ${tensorlist_name}[i];
|
| 529 |
+
if (t.has_value() && t->defined()) ${tensorlist_name}_impl_saved[i] = t->getIntrusivePtr();
|
| 530 |
+
}
|
| 531 |
+
"""
|
| 532 |
+
)
|
| 533 |
+
|
| 534 |
+
ENFORCE_SAME_OPTIONALTENSORLIST_IMPL = CodeTemplate(
|
| 535 |
+
"""\
|
| 536 |
+
for (size_t i=0; i<${tensorlist_name}.size() && !at::impl::dispatch_mode_enabled(); i++) {
|
| 537 |
+
if (${tensorlist_name}_impl_saved[i])
|
| 538 |
+
TORCH_INTERNAL_ASSERT(
|
| 539 |
+
${tensorlist_name}_impl_saved[i] == static_cast<::std::optional<Tensor>>(${tensorlist_name}[i])->getIntrusivePtr());
|
| 540 |
+
}
|
| 541 |
+
"""
|
| 542 |
+
)
|
| 543 |
+
|
| 544 |
+
# The following list contains functions that we don't enforce the invariant on.
|
| 545 |
+
DONT_ENFORCE_SAME_TENSOR_IMPL_OR_STORAGE = {
|
| 546 |
+
# These functions are expected to change impl or storage of input tensors
|
| 547 |
+
"set_",
|
| 548 |
+
"_cudnn_rnn_flatten_weight",
|
| 549 |
+
"_unsafe_masked_index",
|
| 550 |
+
"_unsafe_masked_index_put_accumulate",
|
| 551 |
+
}
|
| 552 |
+
DONT_ENFORCE_TENSOR_IMPL_USE_COUNT = {
|
| 553 |
+
# These non-inplace, non-out functions return tensors with use_count > 1
|
| 554 |
+
# Therefore, they MAY (but not necessarily) return one of its inputs as-is
|
| 555 |
+
# See https://github.com/pytorch/pytorch/issues/60426 for more information
|
| 556 |
+
"_embedding_bag",
|
| 557 |
+
"_embedding_bag_forward_only",
|
| 558 |
+
"q_per_channel_scales",
|
| 559 |
+
"q_per_channel_zero_points",
|
| 560 |
+
"lu_unpack",
|
| 561 |
+
"_cudnn_rnn_backward",
|
| 562 |
+
# The below failed StorageImpl use_count check but we skip tensor_impl check
|
| 563 |
+
# just in case
|
| 564 |
+
"_cudnn_rnn",
|
| 565 |
+
"dequantize_self",
|
| 566 |
+
# lift() should never actually be called with a requires_grad=True tensor,
|
| 567 |
+
"lift",
|
| 568 |
+
"lift_fresh",
|
| 569 |
+
"lift_fresh_copy",
|
| 570 |
+
# Nested Tensors related functions
|
| 571 |
+
# _nested_tensor_size() should never actually be called with requires_grad=True tensor
|
| 572 |
+
"_nested_tensor_size",
|
| 573 |
+
"_nested_tensor_strides",
|
| 574 |
+
"_nested_tensor_storage_offsets",
|
| 575 |
+
}
|
| 576 |
+
|
| 577 |
+
DONT_ENFORCE_STORAGE_IMPL_USE_COUNT = {
|
| 578 |
+
# These non-view functions return tensors with storage use_count != 1
|
| 579 |
+
"_slow_conv2d_forward",
|
| 580 |
+
"slow_conv3d_forward",
|
| 581 |
+
"channel_shuffle",
|
| 582 |
+
# If an input is returned as-is in output, we cannot guarantee its storage_impl
|
| 583 |
+
# use count to be 1 either.
|
| 584 |
+
*DONT_ENFORCE_TENSOR_IMPL_USE_COUNT,
|
| 585 |
+
}
|
| 586 |
+
# END CHECKS FOR [ TensorImpl and Storage Pointer Sanity Checks ]
|
| 587 |
+
|
| 588 |
+
DECLARE_GRAD_FN = CodeTemplate(
|
| 589 |
+
"""\
|
| 590 |
+
std::shared_ptr<${op}> grad_fn;
|
| 591 |
+
"""
|
| 592 |
+
)
|
| 593 |
+
|
| 594 |
+
DECLARE_VECTOR_OF_GRAD_FN = CodeTemplate(
|
| 595 |
+
"""\
|
| 596 |
+
std::vector<std::shared_ptr<${op}>> grad_fns;
|
| 597 |
+
"""
|
| 598 |
+
)
|
| 599 |
+
|
| 600 |
+
SETUP_ANY_REQUIRES_GRAD = CodeTemplate(
|
| 601 |
+
"""\
|
| 602 |
+
[[maybe_unused]] auto _any_requires_grad = compute_requires_grad( ${args_with_derivatives} );
|
| 603 |
+
${extra_differentiability_conditions}
|
| 604 |
+
"""
|
| 605 |
+
)
|
| 606 |
+
|
| 607 |
+
SETUP_DERIVATIVE = CodeTemplate(
|
| 608 |
+
"""\
|
| 609 |
+
if (_any_requires_grad) {
|
| 610 |
+
${setup}
|
| 611 |
+
}
|
| 612 |
+
"""
|
| 613 |
+
)
|
| 614 |
+
|
| 615 |
+
SETUP_NONE_REQUIRES_GRAD = CodeTemplate(
|
| 616 |
+
"""\
|
| 617 |
+
if (compute_requires_grad( ${args_to_check} )) {
|
| 618 |
+
throw_error_out_requires_grad("${base_name}");
|
| 619 |
+
}
|
| 620 |
+
"""
|
| 621 |
+
)
|
| 622 |
+
|
| 623 |
+
ASSIGN_GRAD_FN = CodeTemplate(
|
| 624 |
+
"""\
|
| 625 |
+
grad_fn = std::shared_ptr<${op}>(new ${op}(${op_ctor}), deleteNode);
|
| 626 |
+
grad_fn->set_next_edges(collect_next_edges( ${args_with_derivatives} ));
|
| 627 |
+
"""
|
| 628 |
+
)
|
| 629 |
+
|
| 630 |
+
# note(crcrpar): `compute_requires_grad` in the template below is supplied with arguments indexed with `i`
|
| 631 |
+
# while the `SETUP_ANY_REQUIRES_GRAD` above takes whole tensors and scalars.
|
| 632 |
+
ASSIGN_VECTOR_OF_GRAD_FN = CodeTemplate(
|
| 633 |
+
"""\
|
| 634 |
+
for (const auto& i : c10::irange( ${irange} )) {
|
| 635 |
+
const auto ith_requires_grad = compute_requires_grad(${args_with_derivatives});
|
| 636 |
+
check_inplace(self[i], ith_requires_grad);
|
| 637 |
+
grad_fns.push_back([&]() -> std::shared_ptr<${op}> {
|
| 638 |
+
if (!ith_requires_grad) {
|
| 639 |
+
return nullptr;
|
| 640 |
+
} else {
|
| 641 |
+
auto grad_fn = std::shared_ptr<${op}>(new ${op}(${op_ctor}), deleteNode);
|
| 642 |
+
grad_fn->set_next_edges(collect_next_edges( ${args_with_derivatives} ));
|
| 643 |
+
return grad_fn;
|
| 644 |
+
}
|
| 645 |
+
}());
|
| 646 |
+
}
|
| 647 |
+
"""
|
| 648 |
+
)
|
| 649 |
+
|
| 650 |
+
CALL_REDISPATCH = CodeTemplate(
|
| 651 |
+
"""\
|
| 652 |
+
at::redispatch::${api_name}(${unpacked_args})"""
|
| 653 |
+
)
|
| 654 |
+
# If the non-variable operation has return values, we use the `tmp` variable to hold the
|
| 655 |
+
# values temporarily and pass the values to the return variables outside of the
|
| 656 |
+
# `at::AutoDispatchBelowAutograd` guard block.
|
| 657 |
+
DISPATCH_TO_NON_VAR_TYPE_WITH_TMP_RETURN_VALUES_JVP_DECOMP = CodeTemplate(
|
| 658 |
+
"""\
|
| 659 |
+
auto ${tmp_var} = ([&]() {
|
| 660 |
+
if (${any_has_forward_grad}) {
|
| 661 |
+
static c10::OperatorName full_name("aten::${op_name}", "${op_overload}");
|
| 662 |
+
static ::std::optional<c10::OperatorHandle> opt_op = c10::Dispatcher::singleton().findSchema(full_name);
|
| 663 |
+
return impl::run_jit_decomposition_with_args_for_jvp<${return_types}>("${op_name}", *opt_op, ks, ${arg_names});
|
| 664 |
+
} else {
|
| 665 |
+
${guard}
|
| 666 |
+
return ${base_type_call};
|
| 667 |
+
}
|
| 668 |
+
})();
|
| 669 |
+
"""
|
| 670 |
+
)
|
| 671 |
+
|
| 672 |
+
DISPATCH_TO_NON_VAR_TYPE_WITH_TMP_RETURN_VALUES = CodeTemplate(
|
| 673 |
+
"""\
|
| 674 |
+
auto ${tmp_var} = ([&]() {
|
| 675 |
+
${guard}
|
| 676 |
+
return ${base_type_call};
|
| 677 |
+
})();
|
| 678 |
+
"""
|
| 679 |
+
)
|
| 680 |
+
|
| 681 |
+
DISPATCH_TO_NON_VAR_TYPE_WITHOUT_RETURN_VALUES = CodeTemplate(
|
| 682 |
+
"""\
|
| 683 |
+
{
|
| 684 |
+
${guard}
|
| 685 |
+
${base_type_call};
|
| 686 |
+
}
|
| 687 |
+
"""
|
| 688 |
+
)
|
| 689 |
+
|
| 690 |
+
SET_HISTORY = CodeTemplate(
|
| 691 |
+
"""\
|
| 692 |
+
if (grad_fn) {
|
| 693 |
+
${fn}_history(${differentiable_outputs}, grad_fn);
|
| 694 |
+
}
|
| 695 |
+
"""
|
| 696 |
+
)
|
| 697 |
+
|
| 698 |
+
LOOP_OVER_VECTOR_OF_GRAD_FNS = CodeTemplate(
|
| 699 |
+
"""\
|
| 700 |
+
if (!grad_fns.empty()) {
|
| 701 |
+
${preamble}
|
| 702 |
+
for (const auto& i : c10::irange(grad_fns.size())) {
|
| 703 |
+
auto grad_fn = grad_fns[i];
|
| 704 |
+
if (grad_fn != nullptr) {
|
| 705 |
+
${statements}
|
| 706 |
+
}
|
| 707 |
+
}
|
| 708 |
+
}
|
| 709 |
+
"""
|
| 710 |
+
)
|
| 711 |
+
|
| 712 |
+
CONDITIONAL = CodeTemplate(
|
| 713 |
+
"""\
|
| 714 |
+
if (${cond}) {
|
| 715 |
+
${statements}
|
| 716 |
+
}
|
| 717 |
+
"""
|
| 718 |
+
)
|
| 719 |
+
|
| 720 |
+
RUN_ONLY_IN_DEBUG_MODE = CodeTemplate(
|
| 721 |
+
"""\
|
| 722 |
+
#ifndef NDEBUG
|
| 723 |
+
${statements}
|
| 724 |
+
#endif
|
| 725 |
+
"""
|
| 726 |
+
)
|
| 727 |
+
|
| 728 |
+
FW_DERIVATIVE_CHECK_TEMPLATE = CodeTemplate(
|
| 729 |
+
"""\
|
| 730 |
+
isFwGradDefined(${req_inp})\
|
| 731 |
+
"""
|
| 732 |
+
)
|
| 733 |
+
FW_DERIVATIVE_SIZE_CHECK_TEMPLATE = CodeTemplate(
|
| 734 |
+
"""\
|
| 735 |
+
TORCH_CHECK(
|
| 736 |
+
self.size() == ${inp_name}.size(),
|
| 737 |
+
"Tensor lists must have the same number of tensors, got ",
|
| 738 |
+
self.size(),
|
| 739 |
+
" and ",
|
| 740 |
+
${inp_name}.size());
|
| 741 |
+
"""
|
| 742 |
+
)
|
| 743 |
+
|
| 744 |
+
FW_DERIVATIVE_TENSORLIST_CHECK_TEMPLATE = CodeTemplate(
|
| 745 |
+
"""\
|
| 746 |
+
isFwGradDefinedTensorList(${req_inp})\
|
| 747 |
+
"""
|
| 748 |
+
)
|
| 749 |
+
|
| 750 |
+
FW_DERIVATIVE_DEFINED_GRAD_TEMPLATE = CodeTemplate(
|
| 751 |
+
"""\
|
| 752 |
+
auto ${inp_name}_t_raw = toNonOptFwGrad(${inp});
|
| 753 |
+
auto ${inp_name}_tensor = toNonOptTensor(${inp});
|
| 754 |
+
auto ${inp_name}_t = (${inp_name}_t_raw.defined() || !${inp_name}_tensor.defined())
|
| 755 |
+
? ${inp_name}_t_raw : at::${zeros_fn}(${inp_name}_tensor.sym_sizes(), ${inp_name}_tensor.options());
|
| 756 |
+
"""
|
| 757 |
+
)
|
| 758 |
+
|
| 759 |
+
FW_DERIVATIVE_DEFINED_PRIMAL_TEMPLATE = CodeTemplate(
|
| 760 |
+
"""\
|
| 761 |
+
auto ${inp_name}_p = toNonOptPrimal(${inp});
|
| 762 |
+
"""
|
| 763 |
+
)
|
| 764 |
+
|
| 765 |
+
FW_DERIVATIVE_SETTER_TENSOR = CodeTemplate(
|
| 766 |
+
"""\
|
| 767 |
+
if (${out_arg}_new_fw_grad_opt.has_value() && ${out_arg}_new_fw_grad_opt.value().defined() && ${out_arg}.defined()) {
|
| 768 |
+
// The hardcoded 0 here will need to be updated once we support multiple levels.
|
| 769 |
+
${out_arg}._set_fw_grad(${out_arg}_new_fw_grad_opt.value(), /* level */ 0, /* is_inplace_op */ ${is_inplace});
|
| 770 |
+
}
|
| 771 |
+
"""
|
| 772 |
+
)
|
| 773 |
+
|
| 774 |
+
FW_DERIVATIVE_SETTER_TENSOR_FOREACH = CodeTemplate(
|
| 775 |
+
"""\
|
| 776 |
+
for (const auto& i : c10::irange(${out_arg}_new_fw_grad_opts.size())) {
|
| 777 |
+
auto& ${out_arg}_new_fw_grad_opt = ${out_arg}_new_fw_grad_opts[i];
|
| 778 |
+
if (${out_arg}_new_fw_grad_opt.has_value() && ${out_arg}_new_fw_grad_opt.value().defined() && ${out_arg}[i].defined()) {
|
| 779 |
+
// The hardcoded 0 here will need to be updated once we support multiple levels.
|
| 780 |
+
${out_arg}[i]._set_fw_grad(${out_arg}_new_fw_grad_opt.value(), /* level */ 0, /* is_inplace_op */ ${is_inplace});
|
| 781 |
+
}
|
| 782 |
+
}
|
| 783 |
+
"""
|
| 784 |
+
)
|
| 785 |
+
|
| 786 |
+
FW_DERIVATIVE_SETTER_MULTI_OUTPUT = CodeTemplate(
|
| 787 |
+
"""\
|
| 788 |
+
if (${all_res}_new_fw_grad_opt.has_value() && std::get<${idx}>(${all_res}_new_fw_grad_opt.value()).defined()
|
| 789 |
+
&& ${out_arg}.defined()) {
|
| 790 |
+
${out_arg}._set_fw_grad(std::get<${idx}>(${all_res}_new_fw_grad_opt.value()), /* level */ 0, /* is_inplace_op */ false);
|
| 791 |
+
}
|
| 792 |
+
"""
|
| 793 |
+
)
|
| 794 |
+
|
| 795 |
+
FW_DERIVATIVE_SETTER_TENSOR_LIST = CodeTemplate(
|
| 796 |
+
"""\
|
| 797 |
+
if (${out_arg}_new_fw_grad_opt.has_value()) {
|
| 798 |
+
auto ${out_arg}_new_fw_grad = ${out_arg}_new_fw_grad_opt.value();
|
| 799 |
+
TORCH_INTERNAL_ASSERT(${out_arg}.size() == ${out_arg}_new_fw_grad.size());
|
| 800 |
+
for (const auto i : c10::irange(${out_arg}.size())) {
|
| 801 |
+
if (${out_arg}_new_fw_grad[i].defined() && ${out_arg}[i].defined()) {
|
| 802 |
+
// The hardcoded 0 here will need to be updated once we support multiple levels.
|
| 803 |
+
${out_arg}[i]._set_fw_grad(${out_arg}_new_fw_grad[i], /* level */ 0, /* is_inplace_op */ ${is_inplace});
|
| 804 |
+
}
|
| 805 |
+
}
|
| 806 |
+
}
|
| 807 |
+
"""
|
| 808 |
+
)
|
| 809 |
+
|
| 810 |
+
FW_DERIVATIVE_TEMPLATE = CodeTemplate(
|
| 811 |
+
"""\
|
| 812 |
+
${fw_grad_opt_definition}
|
| 813 |
+
if (${requires_fw_grad}) {
|
| 814 |
+
${unpacked_arguments}
|
| 815 |
+
${out_arg}_new_fw_grad_opt = ${formula};
|
| 816 |
+
}
|
| 817 |
+
"""
|
| 818 |
+
)
|
| 819 |
+
|
| 820 |
+
FW_DERIVATIVE_FOREACH_TEMPLATE = CodeTemplate(
|
| 821 |
+
"""\
|
| 822 |
+
${fw_grad_opt_definition}
|
| 823 |
+
for (const auto& i : c10::irange(${vector_of_optional_tensor}.size())) {
|
| 824 |
+
if (${any_has_forward_grad_for_current_index}) {
|
| 825 |
+
${unpacked_arguments}
|
| 826 |
+
${vector_of_optional_tensor}[i] = ${formula};
|
| 827 |
+
}
|
| 828 |
+
}
|
| 829 |
+
"""
|
| 830 |
+
)
|
| 831 |
+
|
| 832 |
+
FW_DERIVATIVE_FORBID_TEMPLATE = CodeTemplate(
|
| 833 |
+
"""\
|
| 834 |
+
TORCH_CHECK_NOT_IMPLEMENTED(!(${cond}), "Trying to use forward AD with ${name} that does not support it ${msg}");
|
| 835 |
+
"""
|
| 836 |
+
)
|
| 837 |
+
|
| 838 |
+
FW_DERIVATIVE_FORBID_LIST_TEMPLATE = CodeTemplate(
|
| 839 |
+
"""\
|
| 840 |
+
for (const auto& _t: ${arg}) {
|
| 841 |
+
TORCH_CHECK_NOT_IMPLEMENTED(!(${cond}), "Trying to use forward AD with ${name} that does not support it ${msg}");
|
| 842 |
+
}
|
| 843 |
+
"""
|
| 844 |
+
)
|
| 845 |
+
|
| 846 |
+
|
| 847 |
+
def gen_variable_type(
|
| 848 |
+
out: str,
|
| 849 |
+
native_yaml_path: str,
|
| 850 |
+
tags_yaml_path: str,
|
| 851 |
+
fns_with_diff_infos: list[NativeFunctionWithDifferentiabilityInfo],
|
| 852 |
+
template_path: str,
|
| 853 |
+
used_keys: set[str],
|
| 854 |
+
) -> None:
|
| 855 |
+
"""VariableType.h and VariableType.cpp body
|
| 856 |
+
|
| 857 |
+
This is the at::Type subclass for differentiable tensors. The
|
| 858 |
+
implementation of each function dispatches to the base tensor type to
|
| 859 |
+
compute the output. The grad_fn is attached to differentiable functions.
|
| 860 |
+
"""
|
| 861 |
+
fm = FileManager(install_dir=out, template_dir=template_path, dry_run=False)
|
| 862 |
+
fm.write(
|
| 863 |
+
"VariableType.h",
|
| 864 |
+
lambda: {
|
| 865 |
+
"generated_comment": "@"
|
| 866 |
+
+ f"generated from {fm.template_dir_for_comments()}/VariableType.h"
|
| 867 |
+
},
|
| 868 |
+
)
|
| 869 |
+
|
| 870 |
+
# helper that generates a TORCH_LIBRARY_IMPL macro for each
|
| 871 |
+
# dispatch key that appears in derivatives.yaml
|
| 872 |
+
def wrapper_registrations(used_keys: set[str]) -> str:
|
| 873 |
+
library_impl_macro_list: list[str] = []
|
| 874 |
+
for key in sorted(used_keys):
|
| 875 |
+
dispatch_key = key
|
| 876 |
+
if key == "Default":
|
| 877 |
+
dispatch_key = "Autograd"
|
| 878 |
+
library_impl_macro = (
|
| 879 |
+
f"TORCH_LIBRARY_IMPL(aten, {dispatch_key}, m) "
|
| 880 |
+
+ "{\n"
|
| 881 |
+
+ "${"
|
| 882 |
+
+ f"wrapper_registrations_{key}"
|
| 883 |
+
+ "}\n}"
|
| 884 |
+
)
|
| 885 |
+
library_impl_macro_list += [library_impl_macro]
|
| 886 |
+
return "\n\n".join(library_impl_macro_list)
|
| 887 |
+
|
| 888 |
+
# Generate a new template from VariableType.cpp which replaces ${wrapper_registrations}
|
| 889 |
+
# with per key TORCH_LIBRARY_IMPL macros for each key that appears in derivatives.yaml
|
| 890 |
+
fm1 = FileManager(
|
| 891 |
+
install_dir=out + "/templates", template_dir=template_path, dry_run=False
|
| 892 |
+
)
|
| 893 |
+
fm1.write(
|
| 894 |
+
"VariableType.cpp",
|
| 895 |
+
lambda: {
|
| 896 |
+
"type_derived_method_definitions": "\n\n".join(
|
| 897 |
+
[
|
| 898 |
+
"${" + f"type_derived_method_definitions_{key}" + "}"
|
| 899 |
+
for key in sorted(used_keys)
|
| 900 |
+
]
|
| 901 |
+
),
|
| 902 |
+
"wrapper_registrations": wrapper_registrations(used_keys),
|
| 903 |
+
},
|
| 904 |
+
)
|
| 905 |
+
|
| 906 |
+
# Generate final VariableType_*.cpp files from the generated template
|
| 907 |
+
fm2 = FileManager(install_dir=out, template_dir=out + "/templates", dry_run=False)
|
| 908 |
+
|
| 909 |
+
sharded_keys = set(
|
| 910 |
+
[f"type_derived_method_definitions_{key}" for key in sorted(used_keys)]
|
| 911 |
+
+ [f"wrapper_registrations_{key}" for key in sorted(used_keys)]
|
| 912 |
+
)
|
| 913 |
+
# NOTE: see Note [Sharded File] at the top of the VariableType.cpp
|
| 914 |
+
# template regarding sharding of the generated files.
|
| 915 |
+
fm2.write_sharded(
|
| 916 |
+
"VariableType.cpp",
|
| 917 |
+
[fn for fn in fns_with_diff_infos if use_derived(fn)],
|
| 918 |
+
key_fn=lambda fn: cpp.name(fn.func.func),
|
| 919 |
+
base_env={
|
| 920 |
+
"generated_comment": "@"
|
| 921 |
+
+ f"generated from {fm.template_dir_for_comments()}/VariableType.cpp",
|
| 922 |
+
},
|
| 923 |
+
env_callable=gen_variable_type_func,
|
| 924 |
+
num_shards=5,
|
| 925 |
+
sharded_keys=sharded_keys,
|
| 926 |
+
)
|
| 927 |
+
|
| 928 |
+
|
| 929 |
+
@with_native_function_and
|
| 930 |
+
def gen_wrapper_registration(f: NativeFunction, key: str = "Default") -> str:
|
| 931 |
+
return WRAPPER_REGISTRATION.substitute(
|
| 932 |
+
unqual_operator_name_with_overload=f.func.name,
|
| 933 |
+
type_wrapper_name=type_wrapper_name(f, key),
|
| 934 |
+
class_type="VariableType",
|
| 935 |
+
)
|
| 936 |
+
|
| 937 |
+
|
| 938 |
+
def gen_variable_type_func(
|
| 939 |
+
fn: NativeFunctionWithDifferentiabilityInfo,
|
| 940 |
+
) -> dict[str, list[str]]:
|
| 941 |
+
f = fn.func
|
| 942 |
+
result = {}
|
| 943 |
+
with native_function_manager(f):
|
| 944 |
+
name = cpp.name(f.func)
|
| 945 |
+
formals = gen_formals(f)
|
| 946 |
+
|
| 947 |
+
if (
|
| 948 |
+
fn.info is None
|
| 949 |
+
and str(f.func.name.name) not in RESET_GRAD_ACCUMULATOR
|
| 950 |
+
and get_base_name(f) not in DONT_REQUIRE_DERIVATIVE
|
| 951 |
+
and len(gen_differentiable_outputs(fn)) > 0
|
| 952 |
+
and cpp.name(f.func) not in DONT_ENFORCE_SAME_TENSOR_IMPL_OR_STORAGE
|
| 953 |
+
and type_wrapper_name(f) not in DONT_ENFORCE_STORAGE_IMPL_USE_COUNT
|
| 954 |
+
and type_wrapper_name(f) not in DONT_ENFORCE_TENSOR_IMPL_USE_COUNT
|
| 955 |
+
):
|
| 956 |
+
# NOTE: [ Registering AutogradNotImplemented boxed kernel ]
|
| 957 |
+
#
|
| 958 |
+
# When there is no derivatives.yaml entry, we register a generic boxed
|
| 959 |
+
# NotImplemented kernel to set grad_fn to be NotImplemented, so that forward
|
| 960 |
+
# proceeds as usual but an error is properly produced on backward.
|
| 961 |
+
# TODO: it would be nice to not have these special cases
|
| 962 |
+
#
|
| 963 |
+
# There are several cases where still let codegen handle it:
|
| 964 |
+
# 1) ops that need to reset grad accumulator (we let codegen handle this case
|
| 965 |
+
# because) the list is (currently) only accessible in Python.
|
| 966 |
+
# 2) User explicitly specifies DONT_REQUIRE_DERIVATIVE. This basically makes
|
| 967 |
+
# autograd a fallthrough with NDEBUG checks. This can be useful for when all
|
| 968 |
+
# outputs are integral.
|
| 969 |
+
# 3) When there are no differentiable outputs. This is similar to (2).
|
| 970 |
+
# 4) There are certain ops where we skip certain NDEBUG checks. this is similar
|
| 971 |
+
# to (1).
|
| 972 |
+
type_definition = ""
|
| 973 |
+
wrapper_registration = AUTOGRAD_NOT_IMPLEMENTED_REGISTRATION.substitute(
|
| 974 |
+
unqual_operator_name_with_overload=f.func.name
|
| 975 |
+
)
|
| 976 |
+
result["type_derived_method_definitions_Default"] = [type_definition]
|
| 977 |
+
result["wrapper_registrations_Default"] = [wrapper_registration]
|
| 978 |
+
else:
|
| 979 |
+
if not fn.info:
|
| 980 |
+
key = "Default"
|
| 981 |
+
type_definition = METHOD_DEFINITION.substitute(
|
| 982 |
+
return_type=cpp.returns_type(
|
| 983 |
+
f.func.returns, symint=True
|
| 984 |
+
).cpp_type(),
|
| 985 |
+
type_wrapper_name=type_wrapper_name(f, key),
|
| 986 |
+
type_definition_body=emit_body(fn, key),
|
| 987 |
+
formals=formals,
|
| 988 |
+
)
|
| 989 |
+
wrapper_registration = gen_wrapper_registration(f, key)
|
| 990 |
+
result[f"type_derived_method_definitions_{key}"] = [type_definition]
|
| 991 |
+
result[f"wrapper_registrations_{key}"] = [wrapper_registration]
|
| 992 |
+
else:
|
| 993 |
+
for key in fn.info.keys():
|
| 994 |
+
type_definition = METHOD_DEFINITION.substitute(
|
| 995 |
+
return_type=cpp.returns_type(
|
| 996 |
+
f.func.returns, symint=True
|
| 997 |
+
).cpp_type(),
|
| 998 |
+
type_wrapper_name=type_wrapper_name(f, key),
|
| 999 |
+
type_definition_body=emit_body(fn, key),
|
| 1000 |
+
formals=formals,
|
| 1001 |
+
)
|
| 1002 |
+
wrapper_registration = gen_wrapper_registration(f, key)
|
| 1003 |
+
result[f"type_derived_method_definitions_{key}"] = [type_definition]
|
| 1004 |
+
result[f"wrapper_registrations_{key}"] = [wrapper_registration]
|
| 1005 |
+
# See Note [Manual Backend kernels]
|
| 1006 |
+
assert (name in MANUAL_BACKEND) == f.manual_kernel_registration
|
| 1007 |
+
# If you want to register a kernel to Autograd, you must make the op abstract.
|
| 1008 |
+
# In other words, this op must have dispatch section in native_functions.yaml.
|
| 1009 |
+
if name in MANUAL_AUTOGRAD_AND_TRACER or (
|
| 1010 |
+
fn.info and any(info.has_derivatives for info in fn.info.values())
|
| 1011 |
+
):
|
| 1012 |
+
msg = (
|
| 1013 |
+
f"There's a formula for {name}(or its functional variant) in derivatives.yaml. "
|
| 1014 |
+
f"It's required to add a dispatch section for it with explicit supported backends e.g CPU/CUDA "
|
| 1015 |
+
f"or CompositeExplicitAutograd in native_functions.yaml. Please see "
|
| 1016 |
+
f"https://github.com/pytorch/pytorch/tree/master/aten/src/ATen/native#choosing-the-right-dispatch-keyword "
|
| 1017 |
+
f"for instructions to choose the right dispatch keyword."
|
| 1018 |
+
)
|
| 1019 |
+
assert f.is_abstract, msg
|
| 1020 |
+
|
| 1021 |
+
return result
|
| 1022 |
+
|
| 1023 |
+
|
| 1024 |
+
_foreach_ops_without_differentiability_info = {
|
| 1025 |
+
# No reference backward available due to the lack of `{maximum, minimum}(tensor, scalar)`.
|
| 1026 |
+
("_foreach_maximum", "Scalar"),
|
| 1027 |
+
("_foreach_maximum", "ScalarList"),
|
| 1028 |
+
("_foreach_minimum", "Scalar"),
|
| 1029 |
+
("_foreach_minimum", "ScalarList"),
|
| 1030 |
+
# No reference backward available as addcdiv/addcmul don't support Tensor as scaling factor.
|
| 1031 |
+
("_foreach_addcdiv", "Tensor"),
|
| 1032 |
+
("_foreach_addcmul", "Tensor"),
|
| 1033 |
+
("_foreach_copy", ""),
|
| 1034 |
+
}
|
| 1035 |
+
|
| 1036 |
+
_foreach_ops_with_different_arity = {
|
| 1037 |
+
# These ops lack `alpha` of scaling factor to applied to the right hand side argument.
|
| 1038 |
+
("_foreach_add", "Scalar"),
|
| 1039 |
+
("_foreach_add", "ScalarList"),
|
| 1040 |
+
("_foreach_sub", "Scalar"),
|
| 1041 |
+
("_foreach_sub", "ScalarList"),
|
| 1042 |
+
}
|
| 1043 |
+
|
| 1044 |
+
|
| 1045 |
+
@with_native_function_with_differentiability_info_and_key
|
| 1046 |
+
def emit_body(
|
| 1047 |
+
fn: NativeFunctionWithDifferentiabilityInfo, key: str = "Default"
|
| 1048 |
+
) -> list[str]:
|
| 1049 |
+
assert dispatch_strategy(fn) == "use_derived"
|
| 1050 |
+
f = fn.func
|
| 1051 |
+
info = fn.info[key] if fn.info else None
|
| 1052 |
+
fw_derivatives = fn.fw_derivatives.get(key, []) if fn.fw_derivatives else []
|
| 1053 |
+
|
| 1054 |
+
name = cpp.name(f.func)
|
| 1055 |
+
inplace = f.func.kind() == SchemaKind.inplace
|
| 1056 |
+
is_out_fn = f.func.kind() == SchemaKind.out
|
| 1057 |
+
returns_void = len(f.func.returns) == 0
|
| 1058 |
+
base_name = get_base_name(f)
|
| 1059 |
+
view_info = get_view_info(f)
|
| 1060 |
+
|
| 1061 |
+
is_foreach = name.startswith("_foreach")
|
| 1062 |
+
is_inplace_foreach = is_foreach and inplace
|
| 1063 |
+
if is_inplace_foreach:
|
| 1064 |
+
inplace_foreacharg2refarg: dict[Argument, Argument] = {}
|
| 1065 |
+
refargname2inplace_foreacharg: dict[str, Argument] = {}
|
| 1066 |
+
base_name_and_overload_name = (f.func.name.name.base, f.func.name.overload_name)
|
| 1067 |
+
if info is None:
|
| 1068 |
+
assert (
|
| 1069 |
+
base_name_and_overload_name
|
| 1070 |
+
in _foreach_ops_without_differentiability_info
|
| 1071 |
+
), f"{'.'.join(base_name_and_overload_name)} should have a differentiability info"
|
| 1072 |
+
else:
|
| 1073 |
+
assert (
|
| 1074 |
+
len(f.func.arguments.flat_non_out)
|
| 1075 |
+
== len(info.func.func.arguments.flat_non_out)
|
| 1076 |
+
) or (base_name_and_overload_name in _foreach_ops_with_different_arity), (
|
| 1077 |
+
f"{'.'.join(base_name_and_overload_name)} has {len(f.func.arguments.flat_non_out)} args "
|
| 1078 |
+
f"but the reference has {len(info.func.func.arguments.flat_non_out)}"
|
| 1079 |
+
)
|
| 1080 |
+
for foreach_arg, ref_arg in zip(
|
| 1081 |
+
f.func.arguments.flat_non_out, info.func.func.arguments.flat_non_out
|
| 1082 |
+
):
|
| 1083 |
+
foreach_arg_type = foreach_arg.type
|
| 1084 |
+
if isinstance(foreach_arg_type, ListType):
|
| 1085 |
+
foreach_arg_type = foreach_arg_type.elem
|
| 1086 |
+
assert foreach_arg_type == ref_arg.type
|
| 1087 |
+
inplace_foreacharg2refarg[foreach_arg] = ref_arg
|
| 1088 |
+
refargname2inplace_foreacharg[ref_arg.name] = foreach_arg
|
| 1089 |
+
|
| 1090 |
+
def gen_differentiable_input(
|
| 1091 |
+
arg: Argument | SelfArgument | TensorOptionsArguments,
|
| 1092 |
+
) -> DifferentiableInput | None:
|
| 1093 |
+
if isinstance(arg, TensorOptionsArguments):
|
| 1094 |
+
return None
|
| 1095 |
+
a: Argument = arg.argument if isinstance(arg, SelfArgument) else arg
|
| 1096 |
+
|
| 1097 |
+
# TODO: `cpp_type` is only to keep it byte-for-byte compatible with the old codegen, should remove.
|
| 1098 |
+
# NB: This is not a clone of cpp.argument() - TensorOptionsArguments / faithful / binds are
|
| 1099 |
+
# not handled properly as they are irrelevant for this codegen.
|
| 1100 |
+
cpp_type = cpp.argument_type(a, binds=a.name, symint=True).cpp_type()
|
| 1101 |
+
|
| 1102 |
+
if not is_differentiable(a.name, a.type, info):
|
| 1103 |
+
return None
|
| 1104 |
+
return DifferentiableInput(
|
| 1105 |
+
name=a.name,
|
| 1106 |
+
type=a.type,
|
| 1107 |
+
cpp_type=cpp_type,
|
| 1108 |
+
)
|
| 1109 |
+
|
| 1110 |
+
@with_native_function
|
| 1111 |
+
def gen_differentiable_inputs(f: NativeFunction) -> list[DifferentiableInput]:
|
| 1112 |
+
arguments = list(f.func.arguments.non_out)
|
| 1113 |
+
if is_inplace_foreach and info is not None:
|
| 1114 |
+
for i, arg in enumerate(f.func.arguments.flat_non_out):
|
| 1115 |
+
if arg in inplace_foreacharg2refarg:
|
| 1116 |
+
# note(crcrpar): From what I understand, what matters is only the name.
|
| 1117 |
+
# Thus originally I only replace argument only when the names are different.
|
| 1118 |
+
# TODO(crcrpar): Make it simpler.
|
| 1119 |
+
mapped_arg = inplace_foreacharg2refarg[arg]
|
| 1120 |
+
arguments[i] = Argument(
|
| 1121 |
+
mapped_arg.name,
|
| 1122 |
+
mapped_arg.type,
|
| 1123 |
+
mapped_arg.default,
|
| 1124 |
+
mapped_arg.annotation,
|
| 1125 |
+
)
|
| 1126 |
+
return list(mapMaybe(gen_differentiable_input, arguments))
|
| 1127 |
+
|
| 1128 |
+
def find_args_with_derivatives(
|
| 1129 |
+
differentiable_inputs: list[DifferentiableInput],
|
| 1130 |
+
) -> list[DifferentiableInput]:
|
| 1131 |
+
"""Find arguments that have derivative definitions"""
|
| 1132 |
+
if info is None or not info.has_derivatives:
|
| 1133 |
+
return differentiable_inputs
|
| 1134 |
+
names = {name for d in info.derivatives for name in d.var_names}
|
| 1135 |
+
differentiable = [arg for arg in differentiable_inputs if arg.name in names]
|
| 1136 |
+
if len(differentiable) != len(names):
|
| 1137 |
+
missing = names - {arg.name for arg in differentiable}
|
| 1138 |
+
raise RuntimeError(
|
| 1139 |
+
f"Missing arguments for derivatives: {missing} in {info.name}"
|
| 1140 |
+
)
|
| 1141 |
+
return differentiable
|
| 1142 |
+
|
| 1143 |
+
differentiable_inputs = gen_differentiable_inputs(f)
|
| 1144 |
+
args_with_derivatives = find_args_with_derivatives(differentiable_inputs)
|
| 1145 |
+
differentiable_outputs = gen_differentiable_outputs(fn, key)
|
| 1146 |
+
|
| 1147 |
+
undifferentiable = (base_name in DONT_REQUIRE_DERIVATIVE) or (
|
| 1148 |
+
name in DONT_REQUIRE_DERIVATIVE
|
| 1149 |
+
)
|
| 1150 |
+
|
| 1151 |
+
requires_derivative = (
|
| 1152 |
+
(not undifferentiable)
|
| 1153 |
+
and (len(differentiable_inputs) > 0)
|
| 1154 |
+
and (
|
| 1155 |
+
(len(differentiable_outputs) > 0)
|
| 1156 |
+
# note(crcrpar): In-place foreach functions are a void function.
|
| 1157 |
+
or is_inplace_foreach
|
| 1158 |
+
)
|
| 1159 |
+
)
|
| 1160 |
+
|
| 1161 |
+
if (
|
| 1162 |
+
info is not None
|
| 1163 |
+
and info.has_derivatives
|
| 1164 |
+
and not requires_derivative
|
| 1165 |
+
# out= ops are allowed to have zero returns which cause requires_derivative to be False
|
| 1166 |
+
# we shouldn't error out though (out= ops for autograd just redispatch)
|
| 1167 |
+
and len(f.func.returns) > 0
|
| 1168 |
+
):
|
| 1169 |
+
raise RuntimeError(
|
| 1170 |
+
f"ERROR: derivative ignored for {name} -- specified an autograd function without derivative"
|
| 1171 |
+
)
|
| 1172 |
+
|
| 1173 |
+
# note(crcrpar): In-place foreach functions do not support forward AD
|
| 1174 |
+
if requires_derivative and len(fw_derivatives) > 0 and not is_inplace_foreach:
|
| 1175 |
+
assert sum(len(derivative.var_names) for derivative in fw_derivatives) == len(
|
| 1176 |
+
differentiable_outputs
|
| 1177 |
+
), (
|
| 1178 |
+
"Expected the number of forward derivatives implemented to match the "
|
| 1179 |
+
"number of differentiable outputs. NB: This only applies when at least "
|
| 1180 |
+
"one forward derivative is implemented. Not implementing any forward "
|
| 1181 |
+
"derivatives is also okay, and we would require inputs to the op to "
|
| 1182 |
+
"not have associated tangents in that case."
|
| 1183 |
+
)
|
| 1184 |
+
|
| 1185 |
+
try_jit_decomposition = (
|
| 1186 |
+
requires_derivative
|
| 1187 |
+
and len(fw_derivatives) == 0
|
| 1188 |
+
and (not modifies_arguments(f))
|
| 1189 |
+
and (not returns_void)
|
| 1190 |
+
)
|
| 1191 |
+
|
| 1192 |
+
def emit_save_inputs() -> list[str]:
|
| 1193 |
+
setup: list[str] = []
|
| 1194 |
+
if info is None or not info.has_derivatives:
|
| 1195 |
+
return setup
|
| 1196 |
+
|
| 1197 |
+
has_tensorlist_arg = any(
|
| 1198 |
+
is_tensor_list_type(arg.type) for arg in args_with_derivatives
|
| 1199 |
+
)
|
| 1200 |
+
|
| 1201 |
+
# We don't want to save tensors if we know that they will never be used
|
| 1202 |
+
# when computing the derivative, so we add guards to those statements
|
| 1203 |
+
def guard_for(arg: SavedAttribute) -> str | None:
|
| 1204 |
+
assert info is not None
|
| 1205 |
+
|
| 1206 |
+
# It's hard to determine the edge offset if we have TensorLists
|
| 1207 |
+
# NOTE(crcrpar): in-place foreach functions' arguments include tensorlist
|
| 1208 |
+
# but their derivatives don't use it, so let them bypass this check.
|
| 1209 |
+
if has_tensorlist_arg and (not is_inplace_foreach):
|
| 1210 |
+
return None
|
| 1211 |
+
|
| 1212 |
+
# Empirical evaluation of the cases where we insert those guards in
|
| 1213 |
+
# backward show that they are somewhat useless. E.g. there's no need
|
| 1214 |
+
# to guard on some values captured from forward, because they had to
|
| 1215 |
+
# require_grad if the backward function even gets executed. I don't
|
| 1216 |
+
# have any good ideas for detecting those cases, so I simply disabled the
|
| 1217 |
+
# checks.
|
| 1218 |
+
if "backward" in info.name:
|
| 1219 |
+
return None
|
| 1220 |
+
|
| 1221 |
+
# If there's a single derivative we could compute, we already have
|
| 1222 |
+
# a requires_grad check that is sufficient
|
| 1223 |
+
if len(args_with_derivatives) <= 1:
|
| 1224 |
+
return None
|
| 1225 |
+
|
| 1226 |
+
# We really only care about trimming down the amount of tensors we save
|
| 1227 |
+
if arg.nctype.type != BaseCType(tensorT):
|
| 1228 |
+
return None
|
| 1229 |
+
|
| 1230 |
+
# We want to emit simple guards, so we only allow that if checking one
|
| 1231 |
+
# input is enough to determine whether we need that value
|
| 1232 |
+
used_in = [d for d in info.derivatives if arg in d.saved_inputs]
|
| 1233 |
+
assert len(used_in) > 0
|
| 1234 |
+
if len(used_in) != 1:
|
| 1235 |
+
return None
|
| 1236 |
+
derivative = used_in[0]
|
| 1237 |
+
|
| 1238 |
+
# Case with multioutput formulas
|
| 1239 |
+
# TODO: process all derivative formulas!!!
|
| 1240 |
+
if len(derivative.var_names) != 1:
|
| 1241 |
+
wrap_opt_if_start = derivative.formula.find(
|
| 1242 |
+
f"wrap_opt_if({arg.nctype.name}"
|
| 1243 |
+
)
|
| 1244 |
+
if wrap_opt_if_start == -1:
|
| 1245 |
+
return None
|
| 1246 |
+
|
| 1247 |
+
wrap_opt_if_match = re.match(
|
| 1248 |
+
rf"wrap_opt_if\({arg.nctype.name},(.*?)\)",
|
| 1249 |
+
derivative.formula[wrap_opt_if_start:],
|
| 1250 |
+
)
|
| 1251 |
+
assert wrap_opt_if_match is not None
|
| 1252 |
+
|
| 1253 |
+
# Condition is between 'wrap_opt_if(var_name,' and ')'.
|
| 1254 |
+
condition_slice = slice(len(rf"wrap_opt_if\({arg.nctype.name},"), -1)
|
| 1255 |
+
wrap_opt_if_condition = wrap_opt_if_match.group(0)[
|
| 1256 |
+
condition_slice
|
| 1257 |
+
].strip()
|
| 1258 |
+
# replace 'grad_input_mask[num]' with 'grad_fn->should_compute_output(num)'
|
| 1259 |
+
wrap_opt_if_condition = re.sub(
|
| 1260 |
+
r"grad_input_mask\[(\d+)\]",
|
| 1261 |
+
r"grad_fn->should_compute_output(\1)",
|
| 1262 |
+
wrap_opt_if_condition,
|
| 1263 |
+
)
|
| 1264 |
+
return f"{wrap_opt_if_condition}"
|
| 1265 |
+
|
| 1266 |
+
# Figure out the offset of the edge that uses this variable
|
| 1267 |
+
derivative_var_name = derivative.var_names[0]
|
| 1268 |
+
for edge_off, a in enumerate(args_with_derivatives):
|
| 1269 |
+
if a.name == derivative_var_name:
|
| 1270 |
+
break
|
| 1271 |
+
else:
|
| 1272 |
+
raise AssertionError
|
| 1273 |
+
return f"grad_fn->should_compute_output({edge_off})"
|
| 1274 |
+
|
| 1275 |
+
if is_inplace_foreach:
|
| 1276 |
+
save_input_stmts = save_variables(info.all_saved_inputs, False, guard_for)
|
| 1277 |
+
if save_input_stmts:
|
| 1278 |
+
setup.append(
|
| 1279 |
+
LOOP_OVER_VECTOR_OF_GRAD_FNS.substitute(
|
| 1280 |
+
preamble="", statements=save_input_stmts
|
| 1281 |
+
)
|
| 1282 |
+
)
|
| 1283 |
+
else:
|
| 1284 |
+
setup.extend(save_variables(info.all_saved_inputs, False, guard_for))
|
| 1285 |
+
for arg in args_with_derivatives:
|
| 1286 |
+
if is_tensor_list_type(arg.type):
|
| 1287 |
+
setup.append(f"grad_fn->{arg.name}_size_ = {arg.name}.size();")
|
| 1288 |
+
return setup
|
| 1289 |
+
|
| 1290 |
+
def setup_derivative(differentiable_inputs: list[DifferentiableInput]) -> list[str]:
|
| 1291 |
+
body: list[str] = []
|
| 1292 |
+
if is_out_fn:
|
| 1293 |
+
# For out functions, ensure that no input or output requires grad
|
| 1294 |
+
body.append(DECLARE_GRAD_FN.substitute(op="Node"))
|
| 1295 |
+
body.append(
|
| 1296 |
+
SETUP_NONE_REQUIRES_GRAD.substitute(
|
| 1297 |
+
base_name=base_name,
|
| 1298 |
+
args_to_check=[arg.name for arg in differentiable_inputs],
|
| 1299 |
+
)
|
| 1300 |
+
)
|
| 1301 |
+
body.append(
|
| 1302 |
+
SETUP_NONE_REQUIRES_GRAD.substitute(
|
| 1303 |
+
base_name=base_name,
|
| 1304 |
+
args_to_check=[arg.name for arg in differentiable_outputs],
|
| 1305 |
+
)
|
| 1306 |
+
)
|
| 1307 |
+
return body
|
| 1308 |
+
|
| 1309 |
+
op = info.op if info is not None and info.has_derivatives else "NotImplemented"
|
| 1310 |
+
setup = []
|
| 1311 |
+
if not is_inplace_foreach:
|
| 1312 |
+
setup.extend(
|
| 1313 |
+
ASSIGN_GRAD_FN.substitute(
|
| 1314 |
+
op=op,
|
| 1315 |
+
op_ctor=""
|
| 1316 |
+
if info is not None and info.has_derivatives
|
| 1317 |
+
else f'"{cpp.name(f.func)}"',
|
| 1318 |
+
args_with_derivatives=[arg.name for arg in args_with_derivatives],
|
| 1319 |
+
).split("\n")
|
| 1320 |
+
)
|
| 1321 |
+
else:
|
| 1322 |
+
# note(crcrpar): Assuming in-place foreach function's self_arg is always TensorList.
|
| 1323 |
+
list_like_arg = "self"
|
| 1324 |
+
args = [arg.name for arg in args_with_derivatives]
|
| 1325 |
+
for i, arg in enumerate(args):
|
| 1326 |
+
if is_inplace_foreach and info is not None:
|
| 1327 |
+
if arg in refargname2inplace_foreacharg:
|
| 1328 |
+
foreach_arg = refargname2inplace_foreacharg[arg]
|
| 1329 |
+
args[i] = foreach_arg.name + (
|
| 1330 |
+
"[i]" if isinstance(foreach_arg.type, ListType) else ""
|
| 1331 |
+
)
|
| 1332 |
+
else:
|
| 1333 |
+
if arg == list_like_arg:
|
| 1334 |
+
args[i] = arg + "[i]"
|
| 1335 |
+
setup.extend(
|
| 1336 |
+
ASSIGN_VECTOR_OF_GRAD_FN.substitute(
|
| 1337 |
+
op=op,
|
| 1338 |
+
op_ctor=""
|
| 1339 |
+
if info is not None and info.has_derivatives
|
| 1340 |
+
else f'"{cpp.name(f.func)}"',
|
| 1341 |
+
args_with_derivatives=args,
|
| 1342 |
+
irange=f"{list_like_arg}.size()",
|
| 1343 |
+
).split("\n")
|
| 1344 |
+
)
|
| 1345 |
+
setup.extend(emit_save_inputs())
|
| 1346 |
+
|
| 1347 |
+
body.extend(
|
| 1348 |
+
emit_check_no_requires_grad(differentiable_inputs, args_with_derivatives)
|
| 1349 |
+
)
|
| 1350 |
+
declare_grad_fn_template = (
|
| 1351 |
+
DECLARE_GRAD_FN if not is_inplace_foreach else DECLARE_VECTOR_OF_GRAD_FN
|
| 1352 |
+
)
|
| 1353 |
+
body.append(declare_grad_fn_template.substitute(op=op))
|
| 1354 |
+
body.append(SETUP_DERIVATIVE.substitute(setup=setup))
|
| 1355 |
+
return body
|
| 1356 |
+
|
| 1357 |
+
def emit_check_if_in_complex_autograd_allowlist() -> list[str]:
|
| 1358 |
+
body: list[str] = []
|
| 1359 |
+
if base_name in GRADIENT_IMPLEMENTED_FOR_COMPLEX:
|
| 1360 |
+
return body
|
| 1361 |
+
for arg in differentiable_outputs:
|
| 1362 |
+
name = arg.name
|
| 1363 |
+
# TODO: should be `arg.type.is_tensor_like()`?
|
| 1364 |
+
if arg.cpp_type == "at::Tensor" or arg.cpp_type in TENSOR_LIST_LIKE_CTYPES:
|
| 1365 |
+
body.append(f'throw_error_for_complex_autograd({name}, "{base_name}");')
|
| 1366 |
+
return body
|
| 1367 |
+
|
| 1368 |
+
def emit_check_no_requires_grad(
|
| 1369 |
+
tensor_args: list[DifferentiableInput],
|
| 1370 |
+
args_with_derivatives: list[DifferentiableInput],
|
| 1371 |
+
) -> list[str]:
|
| 1372 |
+
"""Checks that arguments without derivatives don't require grad"""
|
| 1373 |
+
body: list[str] = []
|
| 1374 |
+
for arg in tensor_args:
|
| 1375 |
+
if arg in args_with_derivatives:
|
| 1376 |
+
continue
|
| 1377 |
+
arg_name = arg.name
|
| 1378 |
+
if info and arg_name in info.non_differentiable_arg_names:
|
| 1379 |
+
continue
|
| 1380 |
+
if arg_name == "output":
|
| 1381 |
+
# Double-backwards definitions sometimes take in 'input' and
|
| 1382 |
+
# 'output', but only define the derivative for input.
|
| 1383 |
+
continue
|
| 1384 |
+
body.append(f'check_no_requires_grad({arg_name}, "{arg_name}", "{name}");')
|
| 1385 |
+
return body
|
| 1386 |
+
|
| 1387 |
+
def emit_original_self_definition() -> list[str]:
|
| 1388 |
+
body: list[str] = []
|
| 1389 |
+
if inplace:
|
| 1390 |
+
if is_inplace_foreach:
|
| 1391 |
+
body.append(
|
| 1392 |
+
"std::vector<::std::optional<at::Tensor>> original_selfs(self.size());"
|
| 1393 |
+
)
|
| 1394 |
+
else:
|
| 1395 |
+
body.append("::std::optional<at::Tensor> original_self;")
|
| 1396 |
+
|
| 1397 |
+
all_forward_grad_cond = []
|
| 1398 |
+
for derivative in fw_derivatives:
|
| 1399 |
+
if derivative.required_original_self_value:
|
| 1400 |
+
all_forward_grad_cond.append(
|
| 1401 |
+
get_any_has_forward_grad_name(derivative.var_names)
|
| 1402 |
+
)
|
| 1403 |
+
|
| 1404 |
+
if all_forward_grad_cond:
|
| 1405 |
+
if not is_inplace_foreach:
|
| 1406 |
+
body.append(f'if ({" || ".join(all_forward_grad_cond)}) {{')
|
| 1407 |
+
body.append(" original_self = self.clone();")
|
| 1408 |
+
body.append("}")
|
| 1409 |
+
else:
|
| 1410 |
+
current_all_forward_grad_cond = [
|
| 1411 |
+
f"{cond}[i]" for cond in all_forward_grad_cond
|
| 1412 |
+
]
|
| 1413 |
+
body.append("for (const auto& i : c10::irange(self.size())) {")
|
| 1414 |
+
body.append(
|
| 1415 |
+
f" if ({' || '.join(current_all_forward_grad_cond)}) {{"
|
| 1416 |
+
)
|
| 1417 |
+
body.append(" original_selfs[i] = self[i].clone();")
|
| 1418 |
+
body.append(" }")
|
| 1419 |
+
body.append("}")
|
| 1420 |
+
|
| 1421 |
+
return body
|
| 1422 |
+
|
| 1423 |
+
def save_variables(
|
| 1424 |
+
saved_variables: Sequence[SavedAttribute],
|
| 1425 |
+
is_output: bool,
|
| 1426 |
+
guard_for: Callable[[SavedAttribute], str | None] = lambda name: None,
|
| 1427 |
+
) -> Sequence[str]:
|
| 1428 |
+
# assign the saved variables to the generated grad_fn
|
| 1429 |
+
stmts: list[str] = []
|
| 1430 |
+
for arg in sorted(saved_variables, key=lambda sa: str(sa.nctype.name)):
|
| 1431 |
+
name = (
|
| 1432 |
+
arg.nctype.name.name
|
| 1433 |
+
if isinstance(arg.nctype.name, SpecialArgName)
|
| 1434 |
+
else arg.nctype.name
|
| 1435 |
+
)
|
| 1436 |
+
foreacharg: Argument | None = None
|
| 1437 |
+
is_foreacharg_list_type: bool = False
|
| 1438 |
+
type = arg.nctype.type
|
| 1439 |
+
expr = arg.expr
|
| 1440 |
+
stmts_prepend = None
|
| 1441 |
+
if is_inplace_foreach and info is not None:
|
| 1442 |
+
# todo(crcrpar): See if we can add some check e.g. `assert foreacharg is not None`.
|
| 1443 |
+
# for now the example assert would fail.
|
| 1444 |
+
name_to_query = name.split("_scalar_type")[0]
|
| 1445 |
+
if name_to_query in refargname2inplace_foreacharg:
|
| 1446 |
+
foreacharg = refargname2inplace_foreacharg[name_to_query]
|
| 1447 |
+
is_foreacharg_list_type = isinstance(foreacharg.type, ListType)
|
| 1448 |
+
if foreacharg is not None:
|
| 1449 |
+
name_in_expr = (
|
| 1450 |
+
f"{foreacharg.name}{'[i]' if is_foreacharg_list_type else ''}"
|
| 1451 |
+
)
|
| 1452 |
+
src_name = name
|
| 1453 |
+
if "_scalar_type" in src_name:
|
| 1454 |
+
split_src_name = src_name.split("_scalar_type")
|
| 1455 |
+
assert len(split_src_name) == 2
|
| 1456 |
+
src_name = split_src_name[0]
|
| 1457 |
+
expr = expr.replace(src_name, name_in_expr)
|
| 1458 |
+
if (
|
| 1459 |
+
type == BaseCType(tensorT)
|
| 1460 |
+
or type == OptionalCType(BaseCType(tensorT))
|
| 1461 |
+
or type == MutRefCType(OptionalCType(BaseCType(tensorT)))
|
| 1462 |
+
or (is_output and type == BaseCType(scalarT))
|
| 1463 |
+
):
|
| 1464 |
+
# note(crcrpar): Here `expr` is generated from scratch, `arg.expr` is ignored.
|
| 1465 |
+
var = name
|
| 1466 |
+
name += "_"
|
| 1467 |
+
if var == "self" and inplace:
|
| 1468 |
+
original_self_var = (
|
| 1469 |
+
"original_self"
|
| 1470 |
+
if not is_inplace_foreach
|
| 1471 |
+
else "original_selfs[i]"
|
| 1472 |
+
)
|
| 1473 |
+
self_var = var if not is_inplace_foreach else var + "[i]"
|
| 1474 |
+
stmts_prepend = f"if (!{original_self_var}.has_value()) {original_self_var} = {self_var}.clone()"
|
| 1475 |
+
var = f"{original_self_var}.value()"
|
| 1476 |
+
assert not is_output
|
| 1477 |
+
if inplace and is_output:
|
| 1478 |
+
assert name == "result_"
|
| 1479 |
+
var = (
|
| 1480 |
+
"self[i]"
|
| 1481 |
+
if is_inplace_foreach or is_foreacharg_list_type
|
| 1482 |
+
else "self"
|
| 1483 |
+
)
|
| 1484 |
+
is_inplace_view = f"{var}.is_view()"
|
| 1485 |
+
expr = f"SavedVariable({var}, {str(is_output).lower()}, {is_inplace_view})"
|
| 1486 |
+
else:
|
| 1487 |
+
expr = f"SavedVariable({var}, {str(is_output).lower()})"
|
| 1488 |
+
if foreacharg is not None and "original_selfs" not in expr:
|
| 1489 |
+
expr = expr.replace(src_name, name_in_expr)
|
| 1490 |
+
elif (
|
| 1491 |
+
type == BaseCType(tensorListT)
|
| 1492 |
+
or type == ListCType(OptionalCType(BaseCType(tensorT)))
|
| 1493 |
+
or type == BaseCType(iTensorListRefT)
|
| 1494 |
+
or type == VectorCType(BaseCType(tensorT))
|
| 1495 |
+
):
|
| 1496 |
+
# See Note [nuanced return type of out-of-place foreach functions]
|
| 1497 |
+
if type == VectorCType(BaseCType(tensorT)):
|
| 1498 |
+
assert is_foreach and is_output
|
| 1499 |
+
expr = f"make_saved_variable_list({name}, {str(is_foreach and is_output).lower()})"
|
| 1500 |
+
name += "_"
|
| 1501 |
+
elif type == BaseCType(intArrayRefT):
|
| 1502 |
+
expr = expr + ".vec()"
|
| 1503 |
+
elif type == BaseCType(symIntArrayRefT):
|
| 1504 |
+
expr = expr + ".vec()"
|
| 1505 |
+
elif type == BaseCType(stringT):
|
| 1506 |
+
expr = f"std::string({expr})"
|
| 1507 |
+
elif type == OptionalCType(BaseCType(stringT)):
|
| 1508 |
+
expr = f"{expr}.has_value() ? ::std::optional<std::string>(std::string({expr}.value())) : ::std::nullopt"
|
| 1509 |
+
elif type == ArrayRefCType(
|
| 1510 |
+
elem=BaseCType(type=BaseCppType(ns="at", name="Scalar"))
|
| 1511 |
+
):
|
| 1512 |
+
expr = expr + ".vec()"
|
| 1513 |
+
|
| 1514 |
+
guard = guard_for(arg)
|
| 1515 |
+
if guard is None:
|
| 1516 |
+
if stmts_prepend:
|
| 1517 |
+
stmts.append(f"{stmts_prepend};")
|
| 1518 |
+
stmts.append(f"grad_fn->{name} = {expr};")
|
| 1519 |
+
else:
|
| 1520 |
+
stmts.append(f"if ({guard}) {{")
|
| 1521 |
+
if stmts_prepend:
|
| 1522 |
+
stmts.append(f" {stmts_prepend};")
|
| 1523 |
+
stmts.append(f" grad_fn->{name} = {expr};")
|
| 1524 |
+
stmts.append("}")
|
| 1525 |
+
return stmts
|
| 1526 |
+
|
| 1527 |
+
# Generates a Dispatcher::redispatch() call into the dispatcher. We do this mainly for performance reasons:
|
| 1528 |
+
# - Pre-compute the full DispatchKeySet. This saves the dispatcher from having to read from TLS.
|
| 1529 |
+
# - redispatch() avoids a redundant call to RecordFunction, which was already called right before
|
| 1530 |
+
# we entered this autograd kernel.
|
| 1531 |
+
def emit_dispatch_call(
|
| 1532 |
+
f: NativeFunction, input_base: str, unpacked_args: Sequence[str]
|
| 1533 |
+
) -> str:
|
| 1534 |
+
"""Dispatch call via function in a namespace or method on Tensor."""
|
| 1535 |
+
dispatcher_sig = DispatcherSignature.from_schema(f.func)
|
| 1536 |
+
dispatcher_exprs = dispatcher_sig.exprs()
|
| 1537 |
+
|
| 1538 |
+
# code-generated autograd kernels plumb and recompute dispatch keys directly through the kernel for performance.
|
| 1539 |
+
# Ops also always have a function variant of the redispatch API.
|
| 1540 |
+
# See Note [Plumbing Keys Through The Dispatcher] for details.
|
| 1541 |
+
dispatch_key_set = "ks & c10::after_autograd_keyset"
|
| 1542 |
+
call = CALL_REDISPATCH.substitute(
|
| 1543 |
+
api_name=cpp.name(
|
| 1544 |
+
f.func,
|
| 1545 |
+
faithful_name_for_out_overloads=True,
|
| 1546 |
+
symint_overload=f.func.has_symint(),
|
| 1547 |
+
),
|
| 1548 |
+
unpacked_args=[dispatch_key_set] + list(unpacked_args),
|
| 1549 |
+
)
|
| 1550 |
+
return call
|
| 1551 |
+
|
| 1552 |
+
def wrap_output(
|
| 1553 |
+
f: NativeFunction, unpacked_bindings: list[Binding], var: str
|
| 1554 |
+
) -> str:
|
| 1555 |
+
call = ""
|
| 1556 |
+
rhs_value: str | None = None
|
| 1557 |
+
if not any(r.type.is_tensor_like() for r in f.func.returns):
|
| 1558 |
+
rhs_value = var
|
| 1559 |
+
else:
|
| 1560 |
+
rhs_value = f"std::move({var})"
|
| 1561 |
+
assert rhs_value is not None
|
| 1562 |
+
call += ASSIGN_RETURN_VALUE.substitute(
|
| 1563 |
+
return_values=tie_return_values(f), rhs_value=rhs_value
|
| 1564 |
+
)
|
| 1565 |
+
return call
|
| 1566 |
+
|
| 1567 |
+
def check_tensorimpl_and_storage(
|
| 1568 |
+
call: str, unpacked_bindings: list[Binding]
|
| 1569 |
+
) -> str:
|
| 1570 |
+
# See NOTE [ TensorImpl and Storage Pointer Sanity Checks ]
|
| 1571 |
+
stmts_before_call: list[str] = []
|
| 1572 |
+
stmts_after_call: list[str] = []
|
| 1573 |
+
|
| 1574 |
+
if cpp.name(f.func) in DONT_ENFORCE_SAME_TENSOR_IMPL_OR_STORAGE:
|
| 1575 |
+
return call
|
| 1576 |
+
|
| 1577 |
+
# Check properties of inputs (enforce (1))
|
| 1578 |
+
for unpacked_binding in unpacked_bindings:
|
| 1579 |
+
arg = unpacked_binding.name
|
| 1580 |
+
noref_cpp_type = unpacked_binding.nctype.type.remove_const_ref()
|
| 1581 |
+
if noref_cpp_type == BaseCType(tensorListT) or noref_cpp_type == BaseCType(
|
| 1582 |
+
iTensorListRefT
|
| 1583 |
+
):
|
| 1584 |
+
stmts_before_call += [
|
| 1585 |
+
SAVE_TENSORLIST_STORAGE.substitute(tensorlist_name=arg),
|
| 1586 |
+
SAVE_TENSORLIST_IMPL.substitute(tensorlist_name=arg),
|
| 1587 |
+
]
|
| 1588 |
+
stmts_after_call += [
|
| 1589 |
+
ENFORCE_SAME_TENSORLIST_STORAGE.substitute(tensorlist_name=arg),
|
| 1590 |
+
ENFORCE_SAME_TENSORLIST_IMPL.substitute(tensorlist_name=arg),
|
| 1591 |
+
]
|
| 1592 |
+
elif noref_cpp_type == ListCType(OptionalCType(BaseCType(tensorT))):
|
| 1593 |
+
stmts_before_call += [
|
| 1594 |
+
SAVE_OPTIONALTENSORLIST_STORAGE.substitute(tensorlist_name=arg),
|
| 1595 |
+
SAVE_OPTIONALTENSORLIST_IMPL.substitute(tensorlist_name=arg),
|
| 1596 |
+
]
|
| 1597 |
+
stmts_after_call += [
|
| 1598 |
+
ENFORCE_SAME_OPTIONALTENSORLIST_STORAGE.substitute(
|
| 1599 |
+
tensorlist_name=arg
|
| 1600 |
+
),
|
| 1601 |
+
ENFORCE_SAME_OPTIONALTENSORLIST_IMPL.substitute(
|
| 1602 |
+
tensorlist_name=arg
|
| 1603 |
+
),
|
| 1604 |
+
]
|
| 1605 |
+
elif noref_cpp_type == BaseCType(tensorT):
|
| 1606 |
+
stmts_before_call += [
|
| 1607 |
+
SAVE_TENSOR_STORAGE.substitute(tensor_name=arg),
|
| 1608 |
+
SAVE_TENSOR_IMPL.substitute(tensor_name=arg),
|
| 1609 |
+
]
|
| 1610 |
+
stmts_after_call += [
|
| 1611 |
+
ENFORCE_SAME_TENSOR_STORAGE.substitute(
|
| 1612 |
+
tensor_name=arg, out_tensor_name=arg
|
| 1613 |
+
),
|
| 1614 |
+
ENFORCE_SAME_TENSOR_IMPL.substitute(tensor_name=arg),
|
| 1615 |
+
]
|
| 1616 |
+
|
| 1617 |
+
assert (stmts_before_call and stmts_after_call) or (
|
| 1618 |
+
not stmts_before_call and not stmts_after_call
|
| 1619 |
+
)
|
| 1620 |
+
|
| 1621 |
+
# Check properties of outputs (enforce (2), (3))
|
| 1622 |
+
if f.func.kind() not in (SchemaKind.inplace, SchemaKind.out):
|
| 1623 |
+
base_name = f.func.name.name.base # TODO: should be str(f.func.name.name)?
|
| 1624 |
+
aliased_arg_name = ALL_VIEW_FUNCTIONS.get(base_name, None)
|
| 1625 |
+
if aliased_arg_name is not None:
|
| 1626 |
+
aliased_arg_name = unpacked_name(aliased_arg_name)
|
| 1627 |
+
for i, (ret, ret_name) in enumerate(
|
| 1628 |
+
zip(f.func.returns, cpp.return_names(f))
|
| 1629 |
+
):
|
| 1630 |
+
noref_cpp_type = cpp.return_type(ret, symint=True).remove_const_ref()
|
| 1631 |
+
if noref_cpp_type == BaseCType(tensorT):
|
| 1632 |
+
if aliased_arg_name is not None:
|
| 1633 |
+
assert (
|
| 1634 |
+
i == 0
|
| 1635 |
+
), "Expect non-CompositeImplicitAutograd view function {base} to return single output"
|
| 1636 |
+
stmts_after_call += [
|
| 1637 |
+
ENFORCE_SAME_TENSOR_STORAGE.substitute(
|
| 1638 |
+
tensor_name=aliased_arg_name, out_tensor_name=ret_name
|
| 1639 |
+
)
|
| 1640 |
+
]
|
| 1641 |
+
else:
|
| 1642 |
+
if (
|
| 1643 |
+
type_wrapper_name(f)
|
| 1644 |
+
not in DONT_ENFORCE_STORAGE_IMPL_USE_COUNT
|
| 1645 |
+
):
|
| 1646 |
+
stmts_after_call += [
|
| 1647 |
+
ENFORCE_TENSOR_STORAGE_USE_COUNT_EQUALS_ONE.substitute(
|
| 1648 |
+
tensor_name=ret_name, fn_name=type_wrapper_name(f)
|
| 1649 |
+
)
|
| 1650 |
+
]
|
| 1651 |
+
|
| 1652 |
+
if type_wrapper_name(f) not in DONT_ENFORCE_TENSOR_IMPL_USE_COUNT:
|
| 1653 |
+
stmts_after_call += [
|
| 1654 |
+
ENFORCE_TENSOR_IMPL_USE_COUNT_LT_OR_EQ_ONE.substitute(
|
| 1655 |
+
tensor_name=ret_name, fn_name=type_wrapper_name(f)
|
| 1656 |
+
)
|
| 1657 |
+
]
|
| 1658 |
+
|
| 1659 |
+
# Currently we don't have any functions that return the following types, but
|
| 1660 |
+
# we should update the checks once we do
|
| 1661 |
+
elif noref_cpp_type == ListCType(OptionalCType(BaseCType(tensorT))):
|
| 1662 |
+
raise AssertionError(
|
| 1663 |
+
f"Please add use_count checks for {noref_cpp_type}"
|
| 1664 |
+
)
|
| 1665 |
+
elif noref_cpp_type == BaseCType(tensorListT):
|
| 1666 |
+
raise AssertionError(
|
| 1667 |
+
f"Please add use_count checks for {noref_cpp_type}"
|
| 1668 |
+
)
|
| 1669 |
+
|
| 1670 |
+
if stmts_before_call and stmts_after_call:
|
| 1671 |
+
call = (
|
| 1672 |
+
RUN_ONLY_IN_DEBUG_MODE.substitute(statements=stmts_before_call)
|
| 1673 |
+
+ call
|
| 1674 |
+
+ RUN_ONLY_IN_DEBUG_MODE.substitute(statements=stmts_after_call)
|
| 1675 |
+
)
|
| 1676 |
+
return call
|
| 1677 |
+
|
| 1678 |
+
def emit_call(
|
| 1679 |
+
f: NativeFunction, unpacked_bindings: list[Binding], try_jit_decomposition: bool
|
| 1680 |
+
) -> str:
|
| 1681 |
+
# We only care about adding `at::AutoDispatchBelowAutograd` guard for non-variable dispatch
|
| 1682 |
+
# (which corresponds to 'use_derived' strategy). The purpose of this guard is to make sure
|
| 1683 |
+
# the baseType operations still dispatch to non-Variable type, even if the arguments passed
|
| 1684 |
+
# in are now Variables.
|
| 1685 |
+
# See NOTE [ Treating Variables as non-Variables in type dispatch ] for details.
|
| 1686 |
+
unpacked_args = [b.name for b in unpacked_bindings]
|
| 1687 |
+
base_type_call = emit_dispatch_call(f, "self_", unpacked_args)
|
| 1688 |
+
|
| 1689 |
+
if get_view_info(f) is not None or modifies_arguments(f):
|
| 1690 |
+
guard = "at::AutoDispatchBelowAutograd guard;"
|
| 1691 |
+
else:
|
| 1692 |
+
guard = "at::AutoDispatchBelowADInplaceOrView guard;"
|
| 1693 |
+
|
| 1694 |
+
any_has_forward_grad = (
|
| 1695 |
+
get_any_has_fw_grad_cond(derivative=None)
|
| 1696 |
+
if requires_derivative
|
| 1697 |
+
else "false"
|
| 1698 |
+
)
|
| 1699 |
+
return_types = ", ".join(
|
| 1700 |
+
[cpp.return_type(a, symint=True).cpp_type() for a in f.func.returns]
|
| 1701 |
+
)
|
| 1702 |
+
if len(f.func.returns) > 1:
|
| 1703 |
+
return_types = f"std::tuple<{return_types}>"
|
| 1704 |
+
|
| 1705 |
+
arg_names = [
|
| 1706 |
+
a.name
|
| 1707 |
+
for a in cpp.arguments(
|
| 1708 |
+
f.func.arguments,
|
| 1709 |
+
faithful=True,
|
| 1710 |
+
symint=True,
|
| 1711 |
+
method=False,
|
| 1712 |
+
cpp_no_default_args=set(),
|
| 1713 |
+
)
|
| 1714 |
+
]
|
| 1715 |
+
|
| 1716 |
+
if not modifies_arguments(f) and not returns_void:
|
| 1717 |
+
if try_jit_decomposition:
|
| 1718 |
+
call = DISPATCH_TO_NON_VAR_TYPE_WITH_TMP_RETURN_VALUES_JVP_DECOMP.substitute(
|
| 1719 |
+
base_type_call=base_type_call,
|
| 1720 |
+
tmp_var=TMP_VAR,
|
| 1721 |
+
guard=guard,
|
| 1722 |
+
any_has_forward_grad=any_has_forward_grad,
|
| 1723 |
+
op_name=cpp.name(f.func),
|
| 1724 |
+
op_overload=f.func.name.overload_name,
|
| 1725 |
+
return_types=return_types,
|
| 1726 |
+
arg_names=arg_names,
|
| 1727 |
+
)
|
| 1728 |
+
else:
|
| 1729 |
+
call = DISPATCH_TO_NON_VAR_TYPE_WITH_TMP_RETURN_VALUES.substitute(
|
| 1730 |
+
base_type_call=base_type_call,
|
| 1731 |
+
tmp_var=TMP_VAR,
|
| 1732 |
+
guard=guard,
|
| 1733 |
+
)
|
| 1734 |
+
|
| 1735 |
+
call += wrap_output(f, unpacked_bindings, TMP_VAR)
|
| 1736 |
+
else:
|
| 1737 |
+
assert not try_jit_decomposition
|
| 1738 |
+
call = DISPATCH_TO_NON_VAR_TYPE_WITHOUT_RETURN_VALUES.substitute(
|
| 1739 |
+
base_type_call=base_type_call, guard=guard
|
| 1740 |
+
)
|
| 1741 |
+
call = check_tensorimpl_and_storage(call, unpacked_bindings)
|
| 1742 |
+
return call
|
| 1743 |
+
|
| 1744 |
+
def emit_history() -> str:
|
| 1745 |
+
fn = "rebase" if modifies_arguments(f) and view_info is None else "set"
|
| 1746 |
+
output_names = [r.name for r in differentiable_outputs]
|
| 1747 |
+
# TODO: flatten allocates a std::vector, which could be expensive
|
| 1748 |
+
outs = CodeTemplate("flatten_tensor_args( ${outs} )").substitute(
|
| 1749 |
+
outs=output_names if not is_inplace_foreach else "self"
|
| 1750 |
+
)
|
| 1751 |
+
if not is_inplace_foreach:
|
| 1752 |
+
return SET_HISTORY.substitute(fn=fn, differentiable_outputs=outs)
|
| 1753 |
+
else:
|
| 1754 |
+
return LOOP_OVER_VECTOR_OF_GRAD_FNS.substitute(
|
| 1755 |
+
preamble=(
|
| 1756 |
+
f"auto differentiable_outputs = {outs};\n"
|
| 1757 |
+
f"TORCH_INTERNAL_ASSERT(differentiable_outputs.size() == grad_fns.size());"
|
| 1758 |
+
),
|
| 1759 |
+
statements=f"{fn}_history(differentiable_outputs[i], grad_fns[i]);",
|
| 1760 |
+
)
|
| 1761 |
+
|
| 1762 |
+
def emit_save_outputs() -> str:
|
| 1763 |
+
if is_out_fn:
|
| 1764 |
+
# out functions don't currently support differentiation
|
| 1765 |
+
return ""
|
| 1766 |
+
if info is not None and info.has_derivatives:
|
| 1767 |
+
stmts = save_variables(info.all_saved_outputs, True)
|
| 1768 |
+
if len(stmts) == 0:
|
| 1769 |
+
return ""
|
| 1770 |
+
if not is_inplace_foreach:
|
| 1771 |
+
return CONDITIONAL.substitute(cond="grad_fn", statements=stmts)
|
| 1772 |
+
else:
|
| 1773 |
+
return LOOP_OVER_VECTOR_OF_GRAD_FNS.substitute(
|
| 1774 |
+
preamble="", statements=stmts
|
| 1775 |
+
)
|
| 1776 |
+
return ""
|
| 1777 |
+
|
| 1778 |
+
def emit_any_requires_grad() -> list[str]:
|
| 1779 |
+
extra_condition = ""
|
| 1780 |
+
if info and info.output_differentiability_conditions:
|
| 1781 |
+
assert len(info.output_differentiability_conditions) == 1
|
| 1782 |
+
extra_condition = f"_any_requires_grad &= ({info.output_differentiability_conditions[0]});"
|
| 1783 |
+
names_of_args_with_derivatives = [arg.name for arg in args_with_derivatives]
|
| 1784 |
+
if is_inplace_foreach and info is not None:
|
| 1785 |
+
for i, arg in enumerate(names_of_args_with_derivatives):
|
| 1786 |
+
for f_arg, r_arg in inplace_foreacharg2refarg.items():
|
| 1787 |
+
if arg == r_arg.name:
|
| 1788 |
+
names_of_args_with_derivatives[i] = f_arg.name
|
| 1789 |
+
return [
|
| 1790 |
+
SETUP_ANY_REQUIRES_GRAD.substitute(
|
| 1791 |
+
args_with_derivatives=names_of_args_with_derivatives,
|
| 1792 |
+
extra_differentiability_conditions=extra_condition,
|
| 1793 |
+
)
|
| 1794 |
+
]
|
| 1795 |
+
|
| 1796 |
+
def get_any_has_forward_grad_name(var_names: tuple[str, ...]) -> str:
|
| 1797 |
+
if len(var_names) == 1:
|
| 1798 |
+
return f"_any_has_forward_grad_{var_names[0]}"
|
| 1799 |
+
else:
|
| 1800 |
+
return f'_any_has_forward_grad_{"_".join(var_names)}'
|
| 1801 |
+
|
| 1802 |
+
def emit_any_has_forward_grad() -> list[str]:
|
| 1803 |
+
content: list[str] = []
|
| 1804 |
+
if not is_foreach:
|
| 1805 |
+
for derivative in fw_derivatives:
|
| 1806 |
+
requires_fw_grad = get_any_has_fw_grad_cond(derivative=derivative)
|
| 1807 |
+
if info and info.output_differentiability_conditions:
|
| 1808 |
+
assert len(info.output_differentiability_conditions) == 1
|
| 1809 |
+
requires_fw_grad = f"({info.output_differentiability_conditions[0]}) && {requires_fw_grad}"
|
| 1810 |
+
content.append(
|
| 1811 |
+
f"[[maybe_unused]] auto {get_any_has_forward_grad_name(derivative.var_names)} = {requires_fw_grad};"
|
| 1812 |
+
)
|
| 1813 |
+
else:
|
| 1814 |
+
for derivative in fw_derivatives:
|
| 1815 |
+
bool_vector_name = get_any_has_forward_grad_name(derivative.var_names)
|
| 1816 |
+
cur_derivative_conditions = []
|
| 1817 |
+
for inp in differentiable_inputs:
|
| 1818 |
+
if derivative.required_inputs_fw_grad is None:
|
| 1819 |
+
continue
|
| 1820 |
+
if inp.name not in derivative.required_inputs_fw_grad:
|
| 1821 |
+
continue
|
| 1822 |
+
inp_name = (
|
| 1823 |
+
inp.name
|
| 1824 |
+
if not inplace
|
| 1825 |
+
else refargname2inplace_foreacharg[inp.name].name
|
| 1826 |
+
)
|
| 1827 |
+
inp_type = (
|
| 1828 |
+
inp.type
|
| 1829 |
+
if not inplace
|
| 1830 |
+
else refargname2inplace_foreacharg[inp.name].type
|
| 1831 |
+
)
|
| 1832 |
+
is_list_type = is_tensor_list_type(inp_type)
|
| 1833 |
+
if is_list_type:
|
| 1834 |
+
if inp_name != "self":
|
| 1835 |
+
content.append(
|
| 1836 |
+
FW_DERIVATIVE_SIZE_CHECK_TEMPLATE.substitute(
|
| 1837 |
+
inp_name=inp_name
|
| 1838 |
+
)
|
| 1839 |
+
)
|
| 1840 |
+
cur_derivative_conditions.append(
|
| 1841 |
+
FW_DERIVATIVE_CHECK_TEMPLATE.substitute(
|
| 1842 |
+
req_inp=inp_name + "[i]"
|
| 1843 |
+
)
|
| 1844 |
+
)
|
| 1845 |
+
else:
|
| 1846 |
+
cur_derivative_conditions.append(
|
| 1847 |
+
FW_DERIVATIVE_CHECK_TEMPLATE.substitute(req_inp=inp_name)
|
| 1848 |
+
)
|
| 1849 |
+
|
| 1850 |
+
content.append(f"std::vector<bool> {bool_vector_name}(self.size());")
|
| 1851 |
+
content.append("for (const auto& i : c10::irange(self.size())) {")
|
| 1852 |
+
content.append(
|
| 1853 |
+
f" {bool_vector_name}[i] = {' || '.join(cur_derivative_conditions)};"
|
| 1854 |
+
)
|
| 1855 |
+
content.append("}")
|
| 1856 |
+
return content
|
| 1857 |
+
|
| 1858 |
+
def emit_check_inplace() -> list[str]:
|
| 1859 |
+
if not inplace:
|
| 1860 |
+
return []
|
| 1861 |
+
return [
|
| 1862 |
+
f"check_inplace({arg.name}, _any_requires_grad);"
|
| 1863 |
+
for arg in differentiable_outputs
|
| 1864 |
+
]
|
| 1865 |
+
|
| 1866 |
+
def emit_fw_derivatives() -> list[str]:
|
| 1867 |
+
content: list[str] = []
|
| 1868 |
+
fw_grad_setters: list[str] = []
|
| 1869 |
+
for derivative in fw_derivatives:
|
| 1870 |
+
res = derivative.var_names
|
| 1871 |
+
if f.func.name.name.inplace:
|
| 1872 |
+
assert (
|
| 1873 |
+
len(res) == 1
|
| 1874 |
+
), "Expected number of outputs to be 1 if function is inplace"
|
| 1875 |
+
# TODO update this when inplace namings are unified
|
| 1876 |
+
res = ("self",)
|
| 1877 |
+
|
| 1878 |
+
assert derivative.required_inputs_fw_grad is not None
|
| 1879 |
+
|
| 1880 |
+
unpacked_arguments = ""
|
| 1881 |
+
for inp in differentiable_inputs:
|
| 1882 |
+
inp_name = inp.name
|
| 1883 |
+
is_input_tensorlist = is_foreach and is_tensor_list_type(
|
| 1884 |
+
inp.type
|
| 1885 |
+
if not inplace
|
| 1886 |
+
else refargname2inplace_foreacharg[inp.name].type
|
| 1887 |
+
)
|
| 1888 |
+
input_suffix = "[i]" if is_input_tensorlist else ""
|
| 1889 |
+
if is_inplace_foreach:
|
| 1890 |
+
if inp.name in refargname2inplace_foreacharg:
|
| 1891 |
+
inp_name = refargname2inplace_foreacharg[inp.name].name
|
| 1892 |
+
zeros_fn = (
|
| 1893 |
+
"zeros_symint"
|
| 1894 |
+
if inplace and inp.name == "self"
|
| 1895 |
+
else "_efficientzerotensor_symint"
|
| 1896 |
+
)
|
| 1897 |
+
if inp.name in derivative.required_inputs_fw_grad:
|
| 1898 |
+
unpacked_arguments += (
|
| 1899 |
+
FW_DERIVATIVE_DEFINED_GRAD_TEMPLATE.substitute(
|
| 1900 |
+
inp_name=inp.name,
|
| 1901 |
+
inp=inp_name + input_suffix,
|
| 1902 |
+
zeros_fn=zeros_fn,
|
| 1903 |
+
)
|
| 1904 |
+
)
|
| 1905 |
+
if inp.name in (derivative.required_inputs_primal or []):
|
| 1906 |
+
unpacked_arguments += (
|
| 1907 |
+
FW_DERIVATIVE_DEFINED_PRIMAL_TEMPLATE.substitute(
|
| 1908 |
+
inp_name=inp.name,
|
| 1909 |
+
inp=inp_name + input_suffix,
|
| 1910 |
+
)
|
| 1911 |
+
)
|
| 1912 |
+
if derivative.required_original_self_value:
|
| 1913 |
+
input_suffix = "s[i]" if is_inplace_foreach else ""
|
| 1914 |
+
unpacked_arguments += FW_DERIVATIVE_DEFINED_GRAD_TEMPLATE.substitute(
|
| 1915 |
+
inp_name="original_self",
|
| 1916 |
+
inp="original_self" + input_suffix,
|
| 1917 |
+
zeros_fn=zeros_fn,
|
| 1918 |
+
)
|
| 1919 |
+
unpacked_arguments += FW_DERIVATIVE_DEFINED_PRIMAL_TEMPLATE.substitute(
|
| 1920 |
+
inp_name="original_self",
|
| 1921 |
+
inp="original_self" + input_suffix,
|
| 1922 |
+
)
|
| 1923 |
+
elif inplace and derivative.is_reusing_outplace_formula:
|
| 1924 |
+
# The gradient wasn't already cloned, do it if grad mode is enabled
|
| 1925 |
+
unpacked_arguments += (
|
| 1926 |
+
"self_t = GradMode::is_enabled() ? self_t.clone() : self_t;"
|
| 1927 |
+
)
|
| 1928 |
+
|
| 1929 |
+
if inplace:
|
| 1930 |
+
is_inplace_str = "true"
|
| 1931 |
+
else:
|
| 1932 |
+
is_inplace_str = "false"
|
| 1933 |
+
|
| 1934 |
+
requires_fw_grad = get_any_has_forward_grad_name(derivative.var_names)
|
| 1935 |
+
|
| 1936 |
+
if all(
|
| 1937 |
+
(isinstance(var_type, BaseType) and var_type.is_tensor_like())
|
| 1938 |
+
for var_type in derivative.var_types
|
| 1939 |
+
):
|
| 1940 |
+
# Is there a way to get from BaseType to BaseCType
|
| 1941 |
+
if len(derivative.var_types) == 1:
|
| 1942 |
+
opt_res_grad_type = OptionalCType(BaseCType(tensorT)).cpp_type()
|
| 1943 |
+
if not is_foreach:
|
| 1944 |
+
fw_grad_setters.append(
|
| 1945 |
+
FW_DERIVATIVE_SETTER_TENSOR.substitute(
|
| 1946 |
+
out_arg=res[0], is_inplace=is_inplace_str
|
| 1947 |
+
)
|
| 1948 |
+
)
|
| 1949 |
+
else:
|
| 1950 |
+
assert res[0] == ("result" if not inplace else "self")
|
| 1951 |
+
fw_grad_setters.append(
|
| 1952 |
+
FW_DERIVATIVE_SETTER_TENSOR_FOREACH.substitute(
|
| 1953 |
+
out_arg=res[0], is_inplace=is_inplace_str
|
| 1954 |
+
)
|
| 1955 |
+
)
|
| 1956 |
+
requires_fw_grad += f" && ({derivative.var_names[0]}.defined())"
|
| 1957 |
+
else:
|
| 1958 |
+
tuple_type = TupleCType(
|
| 1959 |
+
[BaseCType(tensorT)] * len(derivative.var_types)
|
| 1960 |
+
)
|
| 1961 |
+
opt_res_grad_type = OptionalCType(tuple_type).cpp_type()
|
| 1962 |
+
for idx, single_res in enumerate(res):
|
| 1963 |
+
fw_grad_setters.append(
|
| 1964 |
+
FW_DERIVATIVE_SETTER_MULTI_OUTPUT.substitute(
|
| 1965 |
+
idx=idx, all_res="_".join(res), out_arg=single_res
|
| 1966 |
+
)
|
| 1967 |
+
)
|
| 1968 |
+
elif (
|
| 1969 |
+
isinstance(derivative.var_types[0], ListType)
|
| 1970 |
+
and derivative.var_types[0].is_tensor_like()
|
| 1971 |
+
):
|
| 1972 |
+
assert (
|
| 1973 |
+
len(derivative.var_types) == 1
|
| 1974 |
+
), "Expected number of outputs to be 1 if function returns ListType"
|
| 1975 |
+
if not is_foreach:
|
| 1976 |
+
opt_res_grad_type = OptionalCType(
|
| 1977 |
+
VectorCType(BaseCType(tensorT))
|
| 1978 |
+
).cpp_type()
|
| 1979 |
+
fw_grad_setters.append(
|
| 1980 |
+
FW_DERIVATIVE_SETTER_TENSOR_LIST.substitute(
|
| 1981 |
+
out_arg=res[0], is_inplace=is_inplace_str
|
| 1982 |
+
)
|
| 1983 |
+
)
|
| 1984 |
+
else:
|
| 1985 |
+
# TODO(crcrpar): Should this (= the foreach specific logic) be refactored somehow?
|
| 1986 |
+
# Only out-place foreach functions that have entries in `tools/autograd/derivatives.yaml`
|
| 1987 |
+
# can reach here.
|
| 1988 |
+
opt_res_grad_type = OptionalCType(BaseCType(tensorT)).cpp_type()
|
| 1989 |
+
fw_grad_setters.append(
|
| 1990 |
+
FW_DERIVATIVE_SETTER_TENSOR_FOREACH.substitute(
|
| 1991 |
+
out_arg=res[0], is_inplace=is_inplace_str
|
| 1992 |
+
)
|
| 1993 |
+
)
|
| 1994 |
+
else:
|
| 1995 |
+
raise RuntimeError("Unsupported output type for forward derivative")
|
| 1996 |
+
|
| 1997 |
+
if not is_foreach:
|
| 1998 |
+
fw_grad_opt_definition = f"{opt_res_grad_type} {'_'.join(res)}_new_fw_grad_opt = ::std::nullopt;"
|
| 1999 |
+
# View ops create fw_grad that already is a view of the base's fw_grad so just use that
|
| 2000 |
+
content.append(
|
| 2001 |
+
FW_DERIVATIVE_TEMPLATE.substitute(
|
| 2002 |
+
fw_grad_opt_definition=fw_grad_opt_definition,
|
| 2003 |
+
requires_fw_grad=requires_fw_grad,
|
| 2004 |
+
formula=derivative.formula,
|
| 2005 |
+
out_arg="_".join(res),
|
| 2006 |
+
unpacked_arguments=unpacked_arguments,
|
| 2007 |
+
)
|
| 2008 |
+
)
|
| 2009 |
+
else:
|
| 2010 |
+
# note(crcrpar): Assuming `self` is TensorList.
|
| 2011 |
+
fw_grad_opt_definition = (
|
| 2012 |
+
f"std::vector<{opt_res_grad_type}> {'_'.join(res)}_new_fw_grad_opts"
|
| 2013 |
+
"(self.size(), ::std::nullopt);"
|
| 2014 |
+
)
|
| 2015 |
+
foreach_forward_grad_formula = derivative.formula
|
| 2016 |
+
_foreach_arg: Argument | DifferentiableInput
|
| 2017 |
+
if inplace:
|
| 2018 |
+
for _foreach_arg, _ref_arg in inplace_foreacharg2refarg.items():
|
| 2019 |
+
# note(crcrpar): Massage only Scalar and ArrayRef<Scalar> here.
|
| 2020 |
+
if not (
|
| 2021 |
+
is_tensor_type(_foreach_arg.type)
|
| 2022 |
+
or is_tensor_list_type(_foreach_arg.type)
|
| 2023 |
+
):
|
| 2024 |
+
pattern = _foreach_arg.name
|
| 2025 |
+
if isinstance(_foreach_arg.type, ListType):
|
| 2026 |
+
pattern += "[i]"
|
| 2027 |
+
foreach_forward_grad_formula = (
|
| 2028 |
+
foreach_forward_grad_formula.replace(
|
| 2029 |
+
_ref_arg.name, pattern
|
| 2030 |
+
)
|
| 2031 |
+
)
|
| 2032 |
+
else:
|
| 2033 |
+
if (
|
| 2034 |
+
"result" in foreach_forward_grad_formula
|
| 2035 |
+
and "result[i]" not in foreach_forward_grad_formula
|
| 2036 |
+
):
|
| 2037 |
+
foreach_forward_grad_formula = (
|
| 2038 |
+
foreach_forward_grad_formula.replace("result", "result[i]")
|
| 2039 |
+
)
|
| 2040 |
+
|
| 2041 |
+
content.append(
|
| 2042 |
+
FW_DERIVATIVE_FOREACH_TEMPLATE.substitute(
|
| 2043 |
+
fw_grad_opt_definition=fw_grad_opt_definition,
|
| 2044 |
+
vector_of_optional_tensor=f"{'_'.join(res)}_new_fw_grad_opts",
|
| 2045 |
+
any_has_forward_grad_for_current_index=" || ".join(
|
| 2046 |
+
get_any_has_forward_grad_name(derivative.var_names) + "[i]"
|
| 2047 |
+
for derivative in fw_derivatives
|
| 2048 |
+
),
|
| 2049 |
+
formula=foreach_forward_grad_formula,
|
| 2050 |
+
unpacked_arguments=unpacked_arguments,
|
| 2051 |
+
)
|
| 2052 |
+
)
|
| 2053 |
+
|
| 2054 |
+
# Set all the grads at the end to avoid: https://github.com/pytorch/pytorch/issues/67367
|
| 2055 |
+
content.append("\n".join(fw_grad_setters))
|
| 2056 |
+
return content
|
| 2057 |
+
|
| 2058 |
+
def get_any_has_fw_grad_cond(derivative: ForwardDerivative | None) -> str:
|
| 2059 |
+
#
|
| 2060 |
+
# Produces a condition string (e.g, "isFwGradDefined(grad_output) || isFwGradDefined(output)")
|
| 2061 |
+
#
|
| 2062 |
+
if derivative is None:
|
| 2063 |
+
# (1) If a derivative is NOT provided, cond will check fw_grad of ALL differentiable inputs
|
| 2064 |
+
# - Used in the out_fn case when we want to forbid fw derivatives
|
| 2065 |
+
# - Used in the case where the fw_derivative is not defined, but we want
|
| 2066 |
+
# To check if there is a decomposition registered for jvp
|
| 2067 |
+
to_check: list[str] = []
|
| 2068 |
+
for inp in list(
|
| 2069 |
+
mapMaybe(
|
| 2070 |
+
gen_differentiable_input,
|
| 2071 |
+
f.func.arguments.non_out + list(f.func.arguments.out), # type: ignore[operator]
|
| 2072 |
+
)
|
| 2073 |
+
):
|
| 2074 |
+
if is_tensor_type(inp.type):
|
| 2075 |
+
to_check.append(
|
| 2076 |
+
FW_DERIVATIVE_CHECK_TEMPLATE.substitute(req_inp=inp.name)
|
| 2077 |
+
)
|
| 2078 |
+
elif is_tensor_list_type(inp.type):
|
| 2079 |
+
to_check.append(
|
| 2080 |
+
FW_DERIVATIVE_TENSORLIST_CHECK_TEMPLATE.substitute(
|
| 2081 |
+
req_inp=inp.name
|
| 2082 |
+
)
|
| 2083 |
+
)
|
| 2084 |
+
else:
|
| 2085 |
+
raise RuntimeError(
|
| 2086 |
+
f'Unsupported input type for "{name}" when forbidding forward AD usage.'
|
| 2087 |
+
)
|
| 2088 |
+
return f'({" || ".join(to_check)})'
|
| 2089 |
+
else:
|
| 2090 |
+
# (2) If derivative is provided, use that information to determine which inputs
|
| 2091 |
+
# to check fw_grad for
|
| 2092 |
+
assert derivative.required_inputs_fw_grad is not None
|
| 2093 |
+
|
| 2094 |
+
if len(derivative.required_inputs_fw_grad) == 0:
|
| 2095 |
+
# Handle functions like stack
|
| 2096 |
+
# For these, we don't unpack anything and always call the user function
|
| 2097 |
+
if not (
|
| 2098 |
+
len(differentiable_inputs) == 1
|
| 2099 |
+
and is_tensor_list_type(differentiable_inputs[0].type)
|
| 2100 |
+
):
|
| 2101 |
+
raise RuntimeError(
|
| 2102 |
+
f'No differentiable input to "{name}" is a differentiable Tensor (as the provided '
|
| 2103 |
+
"forward AD formula does not use any input tangent) even though a forward gradient "
|
| 2104 |
+
"formula has been defined for it. This case should only happen for function that "
|
| 2105 |
+
"take a single TensorList as input. All other cases are not supported right now."
|
| 2106 |
+
)
|
| 2107 |
+
any_has_fw_grad = "true"
|
| 2108 |
+
else:
|
| 2109 |
+
any_has_fw_grad = " || ".join(
|
| 2110 |
+
[
|
| 2111 |
+
(
|
| 2112 |
+
FW_DERIVATIVE_TENSORLIST_CHECK_TEMPLATE
|
| 2113 |
+
if is_tensor_list_type(inp.type)
|
| 2114 |
+
else FW_DERIVATIVE_CHECK_TEMPLATE
|
| 2115 |
+
).substitute(req_inp=inp.name)
|
| 2116 |
+
for inp in differentiable_inputs
|
| 2117 |
+
if inp.name in derivative.required_inputs_fw_grad
|
| 2118 |
+
]
|
| 2119 |
+
)
|
| 2120 |
+
any_has_fw_grad = f"({any_has_fw_grad})"
|
| 2121 |
+
|
| 2122 |
+
return any_has_fw_grad
|
| 2123 |
+
|
| 2124 |
+
def emit_forbid_fw_derivatives(is_out_fn: bool = False) -> str:
|
| 2125 |
+
if is_out_fn:
|
| 2126 |
+
msg = "because it is an out= function"
|
| 2127 |
+
else:
|
| 2128 |
+
msg = (
|
| 2129 |
+
"because it has not been implemented yet.\\nPlease file an issue "
|
| 2130 |
+
"to PyTorch at https://github.com/pytorch/pytorch/issues/new?template=feature-request.yml "
|
| 2131 |
+
"so that we can prioritize its implementation."
|
| 2132 |
+
)
|
| 2133 |
+
cond = get_any_has_fw_grad_cond(derivative=None)
|
| 2134 |
+
return (
|
| 2135 |
+
FW_DERIVATIVE_FORBID_TEMPLATE.substitute(cond=cond, name=name, msg=msg)
|
| 2136 |
+
if cond != ""
|
| 2137 |
+
else ""
|
| 2138 |
+
)
|
| 2139 |
+
|
| 2140 |
+
body: list[str] = []
|
| 2141 |
+
unpack_args_stats, unpacked_bindings = unpack_args(f)
|
| 2142 |
+
|
| 2143 |
+
body.extend(unpack_args_stats)
|
| 2144 |
+
if requires_derivative:
|
| 2145 |
+
body.extend(emit_any_requires_grad())
|
| 2146 |
+
body.extend(emit_any_has_forward_grad())
|
| 2147 |
+
body.extend(emit_check_inplace())
|
| 2148 |
+
body.extend(emit_original_self_definition())
|
| 2149 |
+
body.extend(setup_derivative(differentiable_inputs))
|
| 2150 |
+
|
| 2151 |
+
body.append(emit_call(f, unpacked_bindings, try_jit_decomposition))
|
| 2152 |
+
if requires_derivative:
|
| 2153 |
+
# set_flags has to appear after version_counter, because rebase_history
|
| 2154 |
+
# requires that the counter is incremented before it is called
|
| 2155 |
+
body.append(emit_history())
|
| 2156 |
+
body.extend(emit_check_if_in_complex_autograd_allowlist())
|
| 2157 |
+
|
| 2158 |
+
if is_out_fn:
|
| 2159 |
+
body.append(emit_forbid_fw_derivatives(is_out_fn=True))
|
| 2160 |
+
else:
|
| 2161 |
+
if requires_derivative and not try_jit_decomposition:
|
| 2162 |
+
if len(fw_derivatives) > 0:
|
| 2163 |
+
body.extend(emit_fw_derivatives())
|
| 2164 |
+
else:
|
| 2165 |
+
body.append(emit_forbid_fw_derivatives())
|
| 2166 |
+
|
| 2167 |
+
if requires_derivative:
|
| 2168 |
+
# Save only after the forward AD has been set up
|
| 2169 |
+
body.append(emit_save_outputs())
|
| 2170 |
+
|
| 2171 |
+
if str(f.func.name.name) in RESET_GRAD_ACCUMULATOR:
|
| 2172 |
+
# `inplace` implies that there is exactly one output named `self`,
|
| 2173 |
+
# so we can keep the generated code easy. If you need to
|
| 2174 |
+
# `reset_grad_accumulator` in an operator that's not `inplace`, you can
|
| 2175 |
+
# remove this assert but the code generation will get more elaborate
|
| 2176 |
+
assert inplace
|
| 2177 |
+
body.append("reset_grad_accumulator(self);")
|
| 2178 |
+
if not returns_void:
|
| 2179 |
+
body.append(f"return {get_return_value(f)};")
|
| 2180 |
+
return body
|
minigpt2/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/ADInplaceOrViewType.cpp
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
|
| 2 |
+
#include "torch/csrc/autograd/VariableTypeUtils.h"
|
| 3 |
+
#include "torch/csrc/autograd/generated/ViewFuncs.h"
|
| 4 |
+
|
| 5 |
+
#include <torch/library.h>
|
| 6 |
+
#include <ATen/FunctionalInverses.h>
|
| 7 |
+
#include <ATen/FunctionalTensorWrapper.h>
|
| 8 |
+
|
| 9 |
+
// ${generated_comment}
|
| 10 |
+
|
| 11 |
+
#ifndef AT_PER_OPERATOR_HEADERS
|
| 12 |
+
#include <ATen/Operators.h>
|
| 13 |
+
#else
|
| 14 |
+
$ops_headers
|
| 15 |
+
#endif
|
| 16 |
+
|
| 17 |
+
using namespace at;
|
| 18 |
+
using torch::autograd::CreationMeta;
|
| 19 |
+
using torch::autograd::as_view;
|
| 20 |
+
using torch::autograd::increment_version;
|
| 21 |
+
|
| 22 |
+
namespace torch {
|
| 23 |
+
|
| 24 |
+
namespace ADInplaceOrView {
|
| 25 |
+
|
| 26 |
+
namespace {
|
| 27 |
+
${inplace_or_view_method_definitions}
|
| 28 |
+
} // namespace
|
| 29 |
+
} // namespace ADInplaceOrView
|
| 30 |
+
|
| 31 |
+
namespace {
|
| 32 |
+
|
| 33 |
+
TORCH_LIBRARY_IMPL(aten, ADInplaceOrView, m) {
|
| 34 |
+
${inplace_or_view_wrapper_registrations};
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
} // namespace
|
| 38 |
+
} // namespace torch
|
minigpt2/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/Functions.cpp
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include "torch/csrc/autograd/FunctionsManual.h"
|
| 2 |
+
#include "torch/csrc/dynamo/compiled_autograd.h"
|
| 3 |
+
|
| 4 |
+
// ${generated_comment}
|
| 5 |
+
|
| 6 |
+
// The manual function definitions that used to be here are now in torch/csrc/autograd/FunctionsManual.cpp
|
| 7 |
+
// This speeds up re-compilation and allow to share these implementations so that they can be
|
| 8 |
+
// used for forward mode AD formulas as well.
|
| 9 |
+
|
| 10 |
+
using namespace torch::autograd::generated::details;
|
| 11 |
+
using at::Tensor;
|
| 12 |
+
using at::Scalar;
|
| 13 |
+
using at::IntArrayRef;
|
| 14 |
+
using at::TensorList;
|
| 15 |
+
|
| 16 |
+
namespace torch::autograd::generated {
|
| 17 |
+
|
| 18 |
+
${autograd_function_definitions}
|
| 19 |
+
|
| 20 |
+
} // namespace torch::autograd::generated
|
minigpt2/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/Functions.h
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// ${generated_comment}
|
| 4 |
+
|
| 5 |
+
#include <ATen/ATen.h>
|
| 6 |
+
#include <ATen/core/functional.h>
|
| 7 |
+
#include <ATen/TensorGeometry.h>
|
| 8 |
+
|
| 9 |
+
#include "torch/csrc/autograd/function.h"
|
| 10 |
+
#include "torch/csrc/autograd/variable.h"
|
| 11 |
+
#include "torch/csrc/autograd/saved_variable.h"
|
| 12 |
+
#include <torch/csrc/Export.h>
|
| 13 |
+
|
| 14 |
+
#include <c10/core/SymIntArrayRef.h>
|
| 15 |
+
|
| 16 |
+
namespace torch { namespace autograd { namespace generated {
|
| 17 |
+
|
| 18 |
+
using at::Scalar;
|
| 19 |
+
using at::Tensor;
|
| 20 |
+
using at::IntArrayRef;
|
| 21 |
+
using at::ArrayRef;
|
| 22 |
+
using at::Type;
|
| 23 |
+
using at::TensorGeometry;
|
| 24 |
+
using at::ScalarType;
|
| 25 |
+
using std::optional;
|
| 26 |
+
using c10::fmap;
|
| 27 |
+
|
| 28 |
+
inline std::vector<Tensor> unpack_list(at::ArrayRef<SavedVariable> xs, std::shared_ptr<Node> saved_for = nullptr) {
|
| 29 |
+
// NB: we must explicitly do the conversion in the lambda, otherwise template
|
| 30 |
+
// deduction will give a Tensor of Variable which is not convertible
|
| 31 |
+
return fmap(xs, [&saved_for](const SavedVariable& x) {
|
| 32 |
+
// TODO(crcrpar): Use `std::move(saved_for)` to avoid incrementing refcount, which would need refactoring.
|
| 33 |
+
return static_cast<Tensor>(x.unpack(saved_for));
|
| 34 |
+
});
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
inline c10::List<std::optional<Tensor>> unpack_opt_list(at::ArrayRef<SavedVariable> xs, std::shared_ptr<Node> saved_for = nullptr) {
|
| 38 |
+
torch::List<std::optional<Tensor>> result;
|
| 39 |
+
result.reserve(xs.size());
|
| 40 |
+
for (const SavedVariable& v : xs) {
|
| 41 |
+
auto var = v.unpack(saved_for);
|
| 42 |
+
result.push_back(var.defined() ? std::optional<Tensor>(var) : ::std::nullopt);
|
| 43 |
+
}
|
| 44 |
+
return result;
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
using torch::autograd::TypeAndSize;
|
| 48 |
+
|
| 49 |
+
${autograd_function_declarations}
|
| 50 |
+
|
| 51 |
+
}}} // namespace torch::autograd::generated
|
minigpt2/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/TraceType.cpp
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
|
| 2 |
+
#include "torch/csrc/jit/frontend/tracer.h"
|
| 3 |
+
|
| 4 |
+
#include <torch/library.h>
|
| 5 |
+
|
| 6 |
+
#include "torch/csrc/autograd/function.h"
|
| 7 |
+
|
| 8 |
+
#include "ATen/quantized/Quantizer.h"
|
| 9 |
+
|
| 10 |
+
// ${generated_comment}
|
| 11 |
+
|
| 12 |
+
// See the `Tracer` section in `torch/csrc/jit/OVERVIEW.md`.
|
| 13 |
+
// NOTE See [Sharded File] comment in VariableType
|
| 14 |
+
|
| 15 |
+
#ifndef AT_PER_OPERATOR_HEADERS
|
| 16 |
+
#include <ATen/Operators.h>
|
| 17 |
+
#else
|
| 18 |
+
$ops_headers
|
| 19 |
+
#endif
|
| 20 |
+
|
| 21 |
+
using namespace at;
|
| 22 |
+
|
| 23 |
+
namespace torch {
|
| 24 |
+
|
| 25 |
+
namespace TraceType {
|
| 26 |
+
|
| 27 |
+
namespace {
|
| 28 |
+
${trace_method_definitions}
|
| 29 |
+
} // namespace
|
| 30 |
+
} // namespace TraceType
|
| 31 |
+
|
| 32 |
+
namespace {
|
| 33 |
+
|
| 34 |
+
TORCH_LIBRARY_IMPL(aten, Tracer, m) {
|
| 35 |
+
${trace_wrapper_registrations};
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
} // namespace
|
| 39 |
+
|
| 40 |
+
} // namespace torch
|
minigpt2/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/VariableType.h
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// ${generated_comment}
|
| 4 |
+
|
| 5 |
+
#include <ATen/core/Tensor.h>
|
| 6 |
+
#include <ATen/Context.h>
|
| 7 |
+
|
| 8 |
+
#include <c10/util/intrusive_ptr.h>
|
| 9 |
+
|
| 10 |
+
#include <torch/csrc/Export.h>
|
| 11 |
+
#include <torch/csrc/autograd/autograd_not_implemented_fallback.h>
|
| 12 |
+
|
| 13 |
+
#include <cstdint> // for size_t
|
| 14 |
+
#include <functional> // for function
|
| 15 |
+
#include <memory> // for unique_ptr
|
| 16 |
+
#include <string>
|
| 17 |
+
#include <vector>
|
| 18 |
+
|
| 19 |
+
namespace at {
|
| 20 |
+
struct Quantizer;
|
| 21 |
+
};
|
| 22 |
+
|
| 23 |
+
namespace torch { namespace autograd {
|
| 24 |
+
|
| 25 |
+
using Variable = at::Tensor;
|
| 26 |
+
using at::Context;
|
| 27 |
+
using at::Device;
|
| 28 |
+
using at::Dimname;
|
| 29 |
+
using at::DimnameList;
|
| 30 |
+
using at::Generator;
|
| 31 |
+
using at::IntArrayRef;
|
| 32 |
+
using at::MemoryFormat;
|
| 33 |
+
using at::QScheme;
|
| 34 |
+
using at::Scalar;
|
| 35 |
+
using at::ScalarType;
|
| 36 |
+
using at::Storage;
|
| 37 |
+
using at::Tensor;
|
| 38 |
+
using at::TensorList;
|
| 39 |
+
using at::TensorOptions;
|
| 40 |
+
using at::Quantizer;
|
| 41 |
+
// This is temporary typedef to enable Quantizer in aten native function API
|
| 42 |
+
// we'll remove them when we are actually exposing Quantizer class
|
| 43 |
+
// to frontend
|
| 44 |
+
using ConstQuantizerPtr = const c10::intrusive_ptr<Quantizer>&;
|
| 45 |
+
using std::optional;
|
| 46 |
+
|
| 47 |
+
namespace VariableType {
|
| 48 |
+
TORCH_API std::vector<at::DeprecatedTypeProperties*> allCUDATypes();
|
| 49 |
+
TORCH_API std::vector<at::DeprecatedTypeProperties*> allXPUTypes();
|
| 50 |
+
TORCH_API std::vector<at::DeprecatedTypeProperties*> allCPUTypes();
|
| 51 |
+
TORCH_API std::vector<at::DeprecatedTypeProperties*> allPrivateUser1Types();
|
| 52 |
+
|
| 53 |
+
at::Tensor & unpack(Tensor & t, const char * name, int pos);
|
| 54 |
+
const at::Tensor & unpack(const Tensor & t, const char * name, int pos);
|
| 55 |
+
at::Tensor unpack_opt(const Tensor & t, const char * name, int pos);
|
| 56 |
+
std::vector<at::Tensor> unpack(const at::ITensorListRef& tl, const char *name, int pos);
|
| 57 |
+
};
|
| 58 |
+
|
| 59 |
+
}} // namespace torch::autograd
|