Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- infer_4_33_0/lib/python3.10/site-packages/fifty/utilities/models/4096_2.h5 +3 -0
- janus/lib/python3.10/site-packages/torch/_export/__init__.py +317 -0
- janus/lib/python3.10/site-packages/torch/_export/converter.py +1584 -0
- janus/lib/python3.10/site-packages/torch/_export/db/__init__.py +5 -0
- janus/lib/python3.10/site-packages/torch/_export/db/case.py +174 -0
- janus/lib/python3.10/site-packages/torch/_export/db/examples/class_method.py +22 -0
- janus/lib/python3.10/site-packages/torch/_export/db/examples/dictionary.py +17 -0
- janus/lib/python3.10/site-packages/torch/_export/db/examples/model_attr_mutation.py +26 -0
- janus/lib/python3.10/site-packages/torch/_export/db/logging.py +47 -0
- janus/lib/python3.10/site-packages/torch/_export/error.py +56 -0
- janus/lib/python3.10/site-packages/torch/_export/non_strict_utils.py +523 -0
- janus/lib/python3.10/site-packages/torch/_export/pass_base.py +441 -0
- janus/lib/python3.10/site-packages/torch/_export/pass_infra/__init__.py +0 -0
- janus/lib/python3.10/site-packages/torch/_export/pass_infra/__pycache__/__init__.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/_export/pass_infra/__pycache__/node_metadata.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/_export/pass_infra/__pycache__/proxy_value.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/_export/pass_infra/proxy_value.py +42 -0
- janus/lib/python3.10/site-packages/torch/_export/passes/__init__.py +1 -0
- janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/__init__.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/_node_metadata_hook.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/add_runtime_assertions_for_constraints_pass.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/collect_tracepoints_pass.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/constant_folding.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/functionalize_side_effectful_ops_pass.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/lift_constants_pass.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/remove_runtime_assertions.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_autocast_with_hop_pass.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_quantized_ops_with_standard_ops_pass.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_set_grad_with_hop_pass.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_view_ops_with_view_copy_ops_pass.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_with_hop_pass_util.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/_export/passes/_node_metadata_hook.py +80 -0
- janus/lib/python3.10/site-packages/torch/_export/passes/add_runtime_assertions_for_constraints_pass.py +227 -0
- janus/lib/python3.10/site-packages/torch/_export/passes/collect_tracepoints_pass.py +102 -0
- janus/lib/python3.10/site-packages/torch/_export/passes/constant_folding.py +299 -0
- janus/lib/python3.10/site-packages/torch/_export/passes/functionalize_side_effectful_ops_pass.py +94 -0
- janus/lib/python3.10/site-packages/torch/_export/passes/lift_constants_pass.py +318 -0
- janus/lib/python3.10/site-packages/torch/_export/passes/remove_runtime_assertions.py +27 -0
- janus/lib/python3.10/site-packages/torch/_export/passes/replace_autocast_with_hop_pass.py +179 -0
- janus/lib/python3.10/site-packages/torch/_export/passes/replace_quantized_ops_with_standard_ops_pass.py +673 -0
- janus/lib/python3.10/site-packages/torch/_export/passes/replace_set_grad_with_hop_pass.py +110 -0
- janus/lib/python3.10/site-packages/torch/_export/passes/replace_view_ops_with_view_copy_ops_pass.py +65 -0
- janus/lib/python3.10/site-packages/torch/_export/passes/replace_with_hop_pass_util.py +178 -0
- janus/lib/python3.10/site-packages/torch/_export/tools.py +146 -0
- janus/lib/python3.10/site-packages/torch/_export/utils.py +893 -0
- janus/lib/python3.10/site-packages/torch/_export/verifier.py +456 -0
- janus/lib/python3.10/site-packages/torch/_export/wrappers.py +121 -0
- janus/lib/python3.10/site-packages/torch/cuda/__pycache__/__init__.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/cuda/__pycache__/_gpu_trace.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/cuda/__pycache__/_memory_viz.cpython-310.pyc +0 -0
infer_4_33_0/lib/python3.10/site-packages/fifty/utilities/models/4096_2.h5
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e7f9ae009b40fbd2181f7788f963a693da5c64d443d879e752ee48bd82f5d48e
|
| 3 |
+
size 4815072
|
janus/lib/python3.10/site-packages/torch/_export/__init__.py
ADDED
|
@@ -0,0 +1,317 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import copy
|
| 3 |
+
import dataclasses
|
| 4 |
+
import functools
|
| 5 |
+
import io
|
| 6 |
+
import json
|
| 7 |
+
import logging
|
| 8 |
+
import os
|
| 9 |
+
import re
|
| 10 |
+
import sys
|
| 11 |
+
import types
|
| 12 |
+
import warnings
|
| 13 |
+
import weakref
|
| 14 |
+
import zipfile
|
| 15 |
+
from collections import OrderedDict
|
| 16 |
+
from contextlib import contextmanager
|
| 17 |
+
from functools import lru_cache
|
| 18 |
+
|
| 19 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
| 20 |
+
from unittest.mock import patch
|
| 21 |
+
|
| 22 |
+
import torch
|
| 23 |
+
import torch.fx
|
| 24 |
+
import torch.utils._pytree as pytree
|
| 25 |
+
|
| 26 |
+
from torch._dispatch.python import enable_python_dispatcher
|
| 27 |
+
from torch._utils_internal import log_export_usage
|
| 28 |
+
from torch.export._tree_utils import reorder_kwargs
|
| 29 |
+
from torch.export.graph_signature import (
|
| 30 |
+
ArgumentSpec,
|
| 31 |
+
ConstantArgument,
|
| 32 |
+
ExportGraphSignature,
|
| 33 |
+
InputKind,
|
| 34 |
+
InputSpec,
|
| 35 |
+
OutputKind,
|
| 36 |
+
OutputSpec,
|
| 37 |
+
SymIntArgument,
|
| 38 |
+
TensorArgument,
|
| 39 |
+
)
|
| 40 |
+
from torch.fx import traceback as fx_traceback
|
| 41 |
+
from torch.fx._compatibility import compatibility
|
| 42 |
+
from torch.fx.experimental.proxy_tensor import make_fx
|
| 43 |
+
from torch._subclasses.fake_tensor import unset_fake_temporarily
|
| 44 |
+
from torch.fx.graph import _PyTreeCodeGen, _PyTreeInfo
|
| 45 |
+
|
| 46 |
+
from .wrappers import _wrap_submodules
|
| 47 |
+
|
| 48 |
+
log = logging.getLogger(__name__)
|
| 49 |
+
|
| 50 |
+
@dataclasses.dataclass
|
| 51 |
+
class ExportDynamoConfig:
|
| 52 |
+
"""
|
| 53 |
+
Manage Export-specific configurations of Dynamo.
|
| 54 |
+
"""
|
| 55 |
+
allow_rnn: bool = True
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
# We only want to print this once to avoid flooding logs in workflows where capture_pre_autograd_graph
|
| 59 |
+
# is called multiple times.
|
| 60 |
+
@lru_cache
|
| 61 |
+
def capture_pre_autograd_graph_warning():
|
| 62 |
+
from torch._inductor import config
|
| 63 |
+
|
| 64 |
+
log.warning("+============================+")
|
| 65 |
+
log.warning("| !!! WARNING !!! |")
|
| 66 |
+
log.warning("+============================+")
|
| 67 |
+
log.warning("capture_pre_autograd_graph() is deprecated and doesn't provide any function guarantee moving forward.")
|
| 68 |
+
log.warning("Please switch to use torch.export.export_for_training instead.")
|
| 69 |
+
if config.is_fbcode():
|
| 70 |
+
log.warning("Unless the unittest is in the blocklist, capture_pre_autograd_graph() will fallback to torch.export.export_for_training.") # noqa: B950
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
@compatibility(is_backward_compatible=False)
|
| 74 |
+
def capture_pre_autograd_graph(
|
| 75 |
+
f: torch.nn.Module,
|
| 76 |
+
args: Tuple[Any],
|
| 77 |
+
kwargs: Optional[Dict[str, Any]] = None,
|
| 78 |
+
dynamic_shapes: Optional[Union[Dict[str, Any], Tuple[Any]]] = None,
|
| 79 |
+
) -> torch.nn.Module:
|
| 80 |
+
"""
|
| 81 |
+
A helper function that is intended to trace a module before any pre-autograd
|
| 82 |
+
decomposition is run. The produced module will be "non-functional" and
|
| 83 |
+
composed of aten operators. Later this API will be deleted in favor of more general
|
| 84 |
+
torch.export API.
|
| 85 |
+
|
| 86 |
+
Args:
|
| 87 |
+
f: nn.Module to be traced
|
| 88 |
+
|
| 89 |
+
args: example positional inputs.
|
| 90 |
+
|
| 91 |
+
kwargs: optional example keyword inputs.
|
| 92 |
+
|
| 93 |
+
dynamic_shapes: Should either be:
|
| 94 |
+
1) a dict from argument names of ``f`` to their dynamic shape specifications,
|
| 95 |
+
2) a tuple that specifies dynamic shape specifications for each input in original order.
|
| 96 |
+
If you are specifying dynamism on keyword args, you will need to pass them in the order that
|
| 97 |
+
is defined in the original function signature.
|
| 98 |
+
|
| 99 |
+
The dynamic shape of a tensor argument can be specified as either
|
| 100 |
+
(1) a dict from dynamic dimension indices to :func:`Dim` types, where it is
|
| 101 |
+
not required to include static dimension indices in this dict, but when they are,
|
| 102 |
+
they should be mapped to None; or (2) a tuple / list of :func:`Dim` types or None,
|
| 103 |
+
where the :func:`Dim` types correspond to dynamic dimensions, and static dimensions
|
| 104 |
+
are denoted by None. Arguments that are dicts or tuples / lists of tensors are
|
| 105 |
+
recursively specified by using mappings or sequences of contained specifications.
|
| 106 |
+
|
| 107 |
+
Returns:
|
| 108 |
+
An nn.Module containing the traced method.
|
| 109 |
+
|
| 110 |
+
"""
|
| 111 |
+
from torch.export._trace import _extract_fake_inputs, DEFAULT_EXPORT_DYNAMO_CONFIG, _ignore_backend_decomps
|
| 112 |
+
from torch._utils_internal import capture_pre_autograd_graph_using_training_ir
|
| 113 |
+
from torch._export.non_strict_utils import make_constraints
|
| 114 |
+
from torch._subclasses.functional_tensor import FunctionalTensor
|
| 115 |
+
from torch.export._unlift import _create_stateful_graph_module
|
| 116 |
+
from torch.export.dynamic_shapes import _combine_args
|
| 117 |
+
|
| 118 |
+
capture_pre_autograd_graph_warning()
|
| 119 |
+
|
| 120 |
+
if sys.platform == "win32":
|
| 121 |
+
raise RuntimeError("capture_pre_autograd_graph not yet supported on Windows")
|
| 122 |
+
|
| 123 |
+
assert isinstance(f, torch.nn.Module), "Expected an nn.Module instance."
|
| 124 |
+
|
| 125 |
+
if kwargs is None:
|
| 126 |
+
kwargs = {}
|
| 127 |
+
|
| 128 |
+
if capture_pre_autograd_graph_using_training_ir():
|
| 129 |
+
@lru_cache
|
| 130 |
+
def print_export_warning():
|
| 131 |
+
log.warning("Using torch.export.export_for_training(...,strict=True)")
|
| 132 |
+
print_export_warning()
|
| 133 |
+
module = torch.export.export_for_training(f, args, kwargs, dynamic_shapes=dynamic_shapes, strict=True).module()
|
| 134 |
+
else:
|
| 135 |
+
log_export_usage(event="export.private_api", flags={"capture_pre_autograd_graph"})
|
| 136 |
+
|
| 137 |
+
# Do not decompose dropout for exported models, because in eval mode the dropout
|
| 138 |
+
# op disappears from the graph, which makes it difficult to switch to train mode.
|
| 139 |
+
# See https://github.com/pytorch/pytorch/pull/115258#issuecomment-1900755832.
|
| 140 |
+
decomp_table = {
|
| 141 |
+
op: op.decompose
|
| 142 |
+
for op in FunctionalTensor.maybe_aliasing_or_mutating_ops
|
| 143 |
+
if op != torch.ops.aten.dropout.default
|
| 144 |
+
}
|
| 145 |
+
with torch._dynamo.config.patch(dataclasses.asdict(DEFAULT_EXPORT_DYNAMO_CONFIG)), _ignore_backend_decomps():
|
| 146 |
+
m = torch._dynamo.export(
|
| 147 |
+
f,
|
| 148 |
+
dynamic_shapes=dynamic_shapes,
|
| 149 |
+
assume_static_by_default=True,
|
| 150 |
+
tracing_mode="symbolic",
|
| 151 |
+
decomposition_table=decomp_table,
|
| 152 |
+
pre_dispatch=True,
|
| 153 |
+
aten_graph=True,
|
| 154 |
+
_log_export_usage=False,
|
| 155 |
+
)(
|
| 156 |
+
*args,
|
| 157 |
+
**kwargs,
|
| 158 |
+
)[0]
|
| 159 |
+
|
| 160 |
+
_, _, fake_mode = _extract_fake_inputs(m, args, kwargs)
|
| 161 |
+
|
| 162 |
+
m.meta["inline_constraints"] = {
|
| 163 |
+
k: v
|
| 164 |
+
for k, v in fake_mode.shape_env.var_to_range.items()
|
| 165 |
+
if re.match(r"^[if]\d+$", str(k))
|
| 166 |
+
}
|
| 167 |
+
|
| 168 |
+
if isinstance(f, torch.nn.Module):
|
| 169 |
+
from torch.export._trace import _restore_state_dict
|
| 170 |
+
_restore_state_dict(f, m)
|
| 171 |
+
|
| 172 |
+
flat_args, _ = pytree.tree_flatten((args, kwargs or {}))
|
| 173 |
+
combined_args = _combine_args(f, args, kwargs)
|
| 174 |
+
range_constraints = make_constraints(
|
| 175 |
+
fake_mode,
|
| 176 |
+
m,
|
| 177 |
+
combined_args,
|
| 178 |
+
dynamic_shapes,
|
| 179 |
+
0,
|
| 180 |
+
)
|
| 181 |
+
|
| 182 |
+
module = _create_stateful_graph_module(
|
| 183 |
+
m,
|
| 184 |
+
range_constraints=range_constraints,
|
| 185 |
+
)
|
| 186 |
+
|
| 187 |
+
error_message = \
|
| 188 |
+
"""
|
| 189 |
+
Calling train() or eval() is not supported for exported models.
|
| 190 |
+
Alternatively, you may override these methods to do custom user behavior as follows:
|
| 191 |
+
|
| 192 |
+
def _my_train(self, mode: bool = True):
|
| 193 |
+
...
|
| 194 |
+
|
| 195 |
+
def _my_eval(self):
|
| 196 |
+
...
|
| 197 |
+
|
| 198 |
+
model.train = types.MethodType(_my_train, model)
|
| 199 |
+
model.eval = types.MethodType(_my_eval, model)
|
| 200 |
+
"""
|
| 201 |
+
|
| 202 |
+
def _train(self, mode: bool = True):
|
| 203 |
+
raise NotImplementedError(error_message)
|
| 204 |
+
|
| 205 |
+
def _eval(self, mode: bool = True):
|
| 206 |
+
raise NotImplementedError(error_message)
|
| 207 |
+
|
| 208 |
+
module.train = types.MethodType(_train, module) # type: ignore[method-assign]
|
| 209 |
+
module.eval = types.MethodType(_eval, module) # type: ignore[method-assign]
|
| 210 |
+
|
| 211 |
+
# Remove Proxy because they cannot be deepcopied or pickled.
|
| 212 |
+
if hasattr(module, "_buffers"):
|
| 213 |
+
torch._export.utils.remove_proxy_from_state_dict(
|
| 214 |
+
module._buffers, in_place=True
|
| 215 |
+
)
|
| 216 |
+
return module
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
def aot_compile(
|
| 220 |
+
f: Callable,
|
| 221 |
+
args: Tuple[Any],
|
| 222 |
+
kwargs: Optional[Dict[str, Any]] = None,
|
| 223 |
+
*,
|
| 224 |
+
dynamic_shapes: Optional[Dict[str, Any]] = None,
|
| 225 |
+
options: Optional[Dict[str, Any]] = None,
|
| 226 |
+
remove_runtime_assertions: bool = False,
|
| 227 |
+
disable_constraint_solver: bool = False,
|
| 228 |
+
same_signature: bool = True,
|
| 229 |
+
) -> str:
|
| 230 |
+
"""
|
| 231 |
+
Note: this function is not stable yet
|
| 232 |
+
|
| 233 |
+
Traces either an nn.Module's forward function or just a callable with PyTorch
|
| 234 |
+
operations inside, generates executable cpp code from the program, and returns
|
| 235 |
+
the path to the generated shared library
|
| 236 |
+
|
| 237 |
+
Args:
|
| 238 |
+
f: the `nn.Module` or callable to trace.
|
| 239 |
+
|
| 240 |
+
args: example positional inputs.
|
| 241 |
+
|
| 242 |
+
kwargs: optional example keyword inputs.
|
| 243 |
+
|
| 244 |
+
dynamic_shapes: Should either be:
|
| 245 |
+
1) a dict from argument names of ``f`` to their dynamic shape specifications,
|
| 246 |
+
2) a tuple that specifies dynamic shape specifications for each input in original order.
|
| 247 |
+
If you are specifying dynamism on keyword args, you will need to pass them in the order that
|
| 248 |
+
is defined in the original function signature.
|
| 249 |
+
|
| 250 |
+
The dynamic shape of a tensor argument can be specified as either
|
| 251 |
+
(1) a dict from dynamic dimension indices to :func:`Dim` types, where it is
|
| 252 |
+
not required to include static dimension indices in this dict, but when they are,
|
| 253 |
+
they should be mapped to None; or (2) a tuple / list of :func:`Dim` types or None,
|
| 254 |
+
where the :func:`Dim` types correspond to dynamic dimensions, and static dimensions
|
| 255 |
+
are denoted by None. Arguments that are dicts or tuples / lists of tensors are
|
| 256 |
+
recursively specified by using mappings or sequences of contained specifications.
|
| 257 |
+
|
| 258 |
+
options: A dictionary of options to control inductor
|
| 259 |
+
|
| 260 |
+
disable_constraint_solver: Whether the dim constraint solver must be disabled.
|
| 261 |
+
|
| 262 |
+
Returns:
|
| 263 |
+
Path to the generated shared library
|
| 264 |
+
"""
|
| 265 |
+
from torch.export._trace import _export_to_torch_ir
|
| 266 |
+
from torch._inductor.decomposition import select_decomp_table
|
| 267 |
+
from torch._inductor import config
|
| 268 |
+
|
| 269 |
+
if config.is_predispatch:
|
| 270 |
+
gm = torch.export._trace._export(f, args, kwargs, dynamic_shapes, pre_dispatch=True).module()
|
| 271 |
+
else:
|
| 272 |
+
# We want to export to Torch IR here to utilize the pre_grad passes in
|
| 273 |
+
# inductor, which run on Torch IR.
|
| 274 |
+
gm = _export_to_torch_ir(
|
| 275 |
+
f,
|
| 276 |
+
args,
|
| 277 |
+
kwargs,
|
| 278 |
+
dynamic_shapes,
|
| 279 |
+
disable_constraint_solver=disable_constraint_solver,
|
| 280 |
+
same_signature=same_signature,
|
| 281 |
+
# Disabling this flag, because instead we can rely on the mapping
|
| 282 |
+
# dynamo_flat_name_to_original_fqn which is coming from Dynamo.
|
| 283 |
+
restore_fqn=False,
|
| 284 |
+
)
|
| 285 |
+
|
| 286 |
+
with torch.no_grad():
|
| 287 |
+
so_path = torch._inductor.aot_compile(gm, args, kwargs, options=options) # type: ignore[arg-type]
|
| 288 |
+
|
| 289 |
+
return so_path
|
| 290 |
+
|
| 291 |
+
def aot_load(so_path: str, device: str) -> Callable:
|
| 292 |
+
"""
|
| 293 |
+
Loads a shared library generated by aot_compile and returns a callable
|
| 294 |
+
|
| 295 |
+
Args:
|
| 296 |
+
so_path: Path to the shared library
|
| 297 |
+
|
| 298 |
+
Returns:
|
| 299 |
+
A callable
|
| 300 |
+
"""
|
| 301 |
+
if device == "cpu":
|
| 302 |
+
runner = torch._C._aoti.AOTIModelContainerRunnerCpu(so_path, 1) # type: ignore[call-arg]
|
| 303 |
+
elif device == "cuda" or device.startswith("cuda:"):
|
| 304 |
+
runner = torch._C._aoti.AOTIModelContainerRunnerCuda(so_path, 1, device) # type: ignore[assignment, call-arg]
|
| 305 |
+
else:
|
| 306 |
+
raise RuntimeError("Unsupported device " + device)
|
| 307 |
+
|
| 308 |
+
def optimized(*args, **kwargs):
|
| 309 |
+
call_spec = runner.get_call_spec() # type: ignore[attr-defined]
|
| 310 |
+
in_spec = pytree.treespec_loads(call_spec[0])
|
| 311 |
+
out_spec = pytree.treespec_loads(call_spec[1])
|
| 312 |
+
flat_inputs = pytree.tree_flatten((args, reorder_kwargs(kwargs, in_spec)))[0]
|
| 313 |
+
flat_inputs = [x for x in flat_inputs if isinstance(x, torch.Tensor)]
|
| 314 |
+
flat_outputs = runner.run(flat_inputs) # type: ignore[attr-defined]
|
| 315 |
+
return pytree.tree_unflatten(flat_outputs, out_spec)
|
| 316 |
+
|
| 317 |
+
return optimized
|
janus/lib/python3.10/site-packages/torch/_export/converter.py
ADDED
|
@@ -0,0 +1,1584 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import builtins
|
| 3 |
+
import logging
|
| 4 |
+
import operator
|
| 5 |
+
import typing
|
| 6 |
+
import warnings
|
| 7 |
+
from contextlib import contextmanager
|
| 8 |
+
from typing import Any, Dict, List, Optional, Sequence, Set, Tuple, Union
|
| 9 |
+
|
| 10 |
+
import torch
|
| 11 |
+
import torch.export._trace
|
| 12 |
+
from torch import _C
|
| 13 |
+
from torch._export.passes.replace_quantized_ops_with_standard_ops_pass import (
|
| 14 |
+
replace_quantized_ops_with_standard_ops,
|
| 15 |
+
)
|
| 16 |
+
from torch.export.exported_program import ExportedProgram
|
| 17 |
+
from torch.export.graph_signature import (
|
| 18 |
+
ConstantArgument,
|
| 19 |
+
CustomObjArgument,
|
| 20 |
+
InputKind,
|
| 21 |
+
InputSpec,
|
| 22 |
+
OutputKind,
|
| 23 |
+
OutputSpec,
|
| 24 |
+
TensorArgument,
|
| 25 |
+
)
|
| 26 |
+
from torch.fx import subgraph_rewriter
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
log = logging.getLogger(__name__)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def _get_param_count_list(method_graph, args_params):
|
| 33 |
+
param_count_list = []
|
| 34 |
+
for input_, arg_params_ in zip(method_graph.inputs(), args_params):
|
| 35 |
+
if "PackedParams" in str(input_.type()):
|
| 36 |
+
in_vars, _ = torch.jit._flatten(arg_params_)
|
| 37 |
+
param_count_list.append(len(in_vars))
|
| 38 |
+
else:
|
| 39 |
+
param_count_list.append(arg_params_ is not None)
|
| 40 |
+
|
| 41 |
+
return param_count_list
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def _trace_and_get_graph_from_model(model, args):
|
| 45 |
+
# A basic sanity check: make sure the state_dict keys are the same
|
| 46 |
+
# before and after running the model. Fail fast!
|
| 47 |
+
orig_state_dict_keys = torch.jit._unique_state_dict(model).keys()
|
| 48 |
+
|
| 49 |
+
# Disable Autocast cache because it replaces kernel's weight and bias
|
| 50 |
+
# by (undesired) constants.
|
| 51 |
+
# No perf impact for when there are reused weights since https://github.com/pytorch/pytorch/pull/85665
|
| 52 |
+
prev_autocast_cache_enabled = torch.is_autocast_cache_enabled()
|
| 53 |
+
torch.set_autocast_cache_enabled(False)
|
| 54 |
+
trace_graph, torch_out, inputs_states = torch.jit._get_trace_graph(
|
| 55 |
+
model,
|
| 56 |
+
args,
|
| 57 |
+
strict=False,
|
| 58 |
+
_force_outplace=False,
|
| 59 |
+
_return_inputs_states=True,
|
| 60 |
+
)
|
| 61 |
+
torch.set_autocast_cache_enabled(prev_autocast_cache_enabled)
|
| 62 |
+
|
| 63 |
+
if orig_state_dict_keys != torch.jit._unique_state_dict(model).keys():
|
| 64 |
+
raise RuntimeError(
|
| 65 |
+
"state_dict changed after running the tracer; "
|
| 66 |
+
"something weird is happening in your model!"
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
return trace_graph, torch_out
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def _create_jit_graph(
|
| 73 |
+
model: Union[torch.nn.Module, torch.jit.ScriptFunction], args: Sequence[Any]
|
| 74 |
+
) -> Tuple[torch.Graph, List["_C.IValue"], Any, Optional[torch.ScriptModule]]:
|
| 75 |
+
if isinstance(model, (torch.jit.ScriptFunction, torch.jit.ScriptModule)):
|
| 76 |
+
flattened_args = tuple(torch.jit._flatten(tuple(args))[0])
|
| 77 |
+
torch_out = None
|
| 78 |
+
|
| 79 |
+
if isinstance(model, torch.jit.ScriptModule):
|
| 80 |
+
try:
|
| 81 |
+
graph = model.forward.graph # type: ignore[attr-defined]
|
| 82 |
+
except AttributeError as e:
|
| 83 |
+
raise RuntimeError("'forward' method must be a script method") from e
|
| 84 |
+
_C._jit_pass_onnx_function_substitution(graph)
|
| 85 |
+
freezed_module = _C._freeze_module(
|
| 86 |
+
typing.cast(_C.ScriptModule, model._c), preserveParameters=True
|
| 87 |
+
)
|
| 88 |
+
module, params = _C._jit_onnx_list_model_parameters(freezed_module)
|
| 89 |
+
method_graph = module._get_method("forward").graph
|
| 90 |
+
args_params = tuple(args) + tuple(params)
|
| 91 |
+
param_count_list = _get_param_count_list(method_graph, args_params)
|
| 92 |
+
in_vars, _ = torch.jit._flatten(args_params)
|
| 93 |
+
graph = _C._propagate_and_assign_input_shapes(
|
| 94 |
+
method_graph, tuple(in_vars), param_count_list, False, False
|
| 95 |
+
)
|
| 96 |
+
return graph, params, torch_out, module
|
| 97 |
+
|
| 98 |
+
# torch.jit.ScriptFunction
|
| 99 |
+
params = []
|
| 100 |
+
graph = model.graph
|
| 101 |
+
_C._jit_pass_onnx_function_substitution(graph)
|
| 102 |
+
param_count_list = _get_param_count_list(graph, args)
|
| 103 |
+
graph = _C._propagate_and_assign_input_shapes(
|
| 104 |
+
graph, flattened_args, param_count_list, False, False
|
| 105 |
+
)
|
| 106 |
+
return graph, params, torch_out, None
|
| 107 |
+
|
| 108 |
+
graph, torch_out = _trace_and_get_graph_from_model(model, args)
|
| 109 |
+
_C._jit_pass_onnx_lint(graph)
|
| 110 |
+
state_dict = torch.jit._unique_state_dict(model)
|
| 111 |
+
params = list(state_dict.values())
|
| 112 |
+
graph_inputs = list(graph.inputs())
|
| 113 |
+
user_input_num = len(graph_inputs) - len(state_dict)
|
| 114 |
+
param_names = list(state_dict.keys())
|
| 115 |
+
for i, inp in enumerate(graph_inputs):
|
| 116 |
+
if i >= user_input_num:
|
| 117 |
+
inp.setDebugName(param_names[i - user_input_num])
|
| 118 |
+
_C._jit_pass_onnx_function_substitution(graph)
|
| 119 |
+
return graph, params, torch_out, None
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def list_add(a, b):
|
| 123 |
+
return a + b
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def list_append(container, element):
|
| 127 |
+
return container + [element]
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def execute_subgraph_from_prim_loop(
|
| 131 |
+
subgraph, iter_idx, len_loop_local_arguments, *args, **kwargs
|
| 132 |
+
):
|
| 133 |
+
"""
|
| 134 |
+
subgraph: GraphModule from sub-block.
|
| 135 |
+
iter_idx: The index of interation.
|
| 136 |
+
len_loop_local_arguments: The number of loop local arguments in args.
|
| 137 |
+
"""
|
| 138 |
+
|
| 139 |
+
# Loop local variables. TS graph create those as inputs because their values
|
| 140 |
+
# are updated inside the loop.
|
| 141 |
+
loop_local_args = args[:len_loop_local_arguments]
|
| 142 |
+
# Global variables that are not passed in as inputs to the loop sub-blocks
|
| 143 |
+
# but are directly used. Most of time, their values are not updated, but
|
| 144 |
+
# the only exception is when there are some operations that perform inplace
|
| 145 |
+
# updates.
|
| 146 |
+
global_args = args[len_loop_local_arguments:]
|
| 147 |
+
return subgraph(*global_args, iter_idx, *loop_local_args, **kwargs)
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
def inplace_optimize_sym_size_div(gm: torch.fx.GraphModule):
|
| 151 |
+
def pattern(im, dim, scale):
|
| 152 |
+
sym_size_int = torch.ops.aten.sym_size.int(im, dim)
|
| 153 |
+
scalar_tensor = torch.ops.aten.scalar_tensor(sym_size_int)
|
| 154 |
+
div_scalar_mode = torch.ops.aten.div.Scalar_mode(
|
| 155 |
+
scalar_tensor, scale, rounding_mode="trunc"
|
| 156 |
+
)
|
| 157 |
+
int_tensor = torch.ops.aten.Int.Tensor(div_scalar_mode)
|
| 158 |
+
return int_tensor
|
| 159 |
+
|
| 160 |
+
def replacement(im, dim, scale):
|
| 161 |
+
sym_size_int = torch.ops.aten.sym_size.int(im, dim)
|
| 162 |
+
return sym_size_int // scale
|
| 163 |
+
|
| 164 |
+
replaced_patterns = subgraph_rewriter.replace_pattern(gm, pattern, replacement)
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
def is_valid_for_codegen(name):
|
| 168 |
+
if len(name) == 0:
|
| 169 |
+
raise RuntimeError("Empty argument name for codegen")
|
| 170 |
+
if name[0].isdigit():
|
| 171 |
+
return False
|
| 172 |
+
return True
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
def normalize_name(name: str, prefix: str = "rename") -> str:
|
| 176 |
+
name = name.replace(".", "_")
|
| 177 |
+
if is_valid_for_codegen(name):
|
| 178 |
+
return name
|
| 179 |
+
return f"{prefix}_{name}"
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
def ir_name_to_func_name(name: str) -> str:
|
| 183 |
+
"""prim::If -> convert_prim_If"""
|
| 184 |
+
name_list = name.split("::")
|
| 185 |
+
return "convert_" + "_".join(name_list)
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
def get_node_as_placeholder_or_get_attr(fx_graph, name, is_top_level_graph):
|
| 189 |
+
if is_top_level_graph:
|
| 190 |
+
return fx_graph.get_attr(name)
|
| 191 |
+
return fx_graph.placeholder(name)
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
_TORCH_DTYPE_TO_ENUM = {
|
| 195 |
+
torch.uint8: 0,
|
| 196 |
+
torch.int8: 1,
|
| 197 |
+
torch.int16: 2,
|
| 198 |
+
torch.int32: 3,
|
| 199 |
+
torch.int64: 4,
|
| 200 |
+
torch.float16: 5,
|
| 201 |
+
torch.float32: 6,
|
| 202 |
+
torch.float64: 7,
|
| 203 |
+
torch.complex32: 8,
|
| 204 |
+
torch.complex64: 9,
|
| 205 |
+
torch.complex128: 10,
|
| 206 |
+
torch.bool: 11,
|
| 207 |
+
torch.qint8: 12,
|
| 208 |
+
torch.quint8: 13,
|
| 209 |
+
torch.bfloat16: 15,
|
| 210 |
+
}
|
| 211 |
+
|
| 212 |
+
_TORCH_ENUM_TO_DTYPE = {value: key for key, value in _TORCH_DTYPE_TO_ENUM.items()}
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
def get_dtype_as_int(tensor):
|
| 216 |
+
"""
|
| 217 |
+
prim::dtype has the signature "Tensor a) -> int", where it gets the dtype of
|
| 218 |
+
the tensor and returns the integer corresponding to this dtype based on the
|
| 219 |
+
enum in ScalarType.h
|
| 220 |
+
"""
|
| 221 |
+
dtype = tensor.dtype
|
| 222 |
+
if dtype not in _TORCH_DTYPE_TO_ENUM:
|
| 223 |
+
raise RuntimeError(f"Unsupported dtype {dtype}")
|
| 224 |
+
return _TORCH_DTYPE_TO_ENUM[dtype]
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
# Those operators will be automatically populated to a instance method
|
| 228 |
+
# of TS2FXGraphConverter with name convert_<namespace>_<opname>().
|
| 229 |
+
# Please check __init__ for method population implementations.
|
| 230 |
+
kind_to_standard_operators = {
|
| 231 |
+
"prim::max": builtins.max,
|
| 232 |
+
"prim::min": builtins.min,
|
| 233 |
+
"prim::TupleIndex": operator.getitem,
|
| 234 |
+
"aten::__is__": operator.is_,
|
| 235 |
+
"aten::__isnot__": operator.is_not,
|
| 236 |
+
"aten::__not__": operator.not_,
|
| 237 |
+
"aten::__contains__": operator.contains,
|
| 238 |
+
"prim::dtype": get_dtype_as_int,
|
| 239 |
+
"aten::len": len,
|
| 240 |
+
# Mapping from specialized op to its symbolic counterpart.
|
| 241 |
+
# They currently do not have any other overrides.
|
| 242 |
+
"aten::numel": torch.ops.aten.sym_numel,
|
| 243 |
+
"aten::size": torch.ops.aten.sym_size,
|
| 244 |
+
"aten::storage_offset": torch.ops.aten.sym_storage_offset,
|
| 245 |
+
"aten::stride": torch.ops.aten.sym_stride,
|
| 246 |
+
}
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
def get_ir_value_parent_name_and_attr_name(node):
|
| 250 |
+
irv_parent_name, irv_name = node.input().debugName(), node.output().debugName()
|
| 251 |
+
attr_name = node.s("name")
|
| 252 |
+
return irv_name, irv_parent_name, attr_name
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
def construct_fqn(ir, ref_map, name_map):
|
| 256 |
+
name_list = []
|
| 257 |
+
while ir in ref_map:
|
| 258 |
+
name_list.append(name_map[ir])
|
| 259 |
+
ir = ref_map[ir]
|
| 260 |
+
return ".".join(reversed(name_list))
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
def get_block_to_lifted_attrs(graph: torch._C.Graph) -> Dict[torch._C.Block, Set[str]]:
|
| 264 |
+
"""
|
| 265 |
+
Perform two passes to get a mapping of blocks to a set of FQNs of its lifted attributes.
|
| 266 |
+
When a graph has control flow, the graph will be divided into multiple blocks. We want to convert
|
| 267 |
+
each block to a graph which will be passed into torch.cond. A restriction for torch.cond is that model
|
| 268 |
+
parameters/buffers are expected to be lifted as inputs to the subgraphs. Before converting the model,
|
| 269 |
+
we will run this pass which will:
|
| 270 |
+
1. Figure out which params/buffers are used within blocks through tracing the GetAttr calls.
|
| 271 |
+
2. Process the graph bottom up to find the lifted attributes of each block by taking the union
|
| 272 |
+
of the attributes used in the current block, and the lifted attributes of all its child blocks.
|
| 273 |
+
|
| 274 |
+
Returns:
|
| 275 |
+
A mapping of blocks to a set of FQNs of its lifted attributes.
|
| 276 |
+
"""
|
| 277 |
+
|
| 278 |
+
# A map from a block to its expected to be lifted arguments.
|
| 279 |
+
blocks_to_lifted_attrs: Dict[torch._C.Block, Set[str]] = {}
|
| 280 |
+
|
| 281 |
+
# Reference map stores the input (i.e., src) and output (i.e., dest) IR of a
|
| 282 |
+
# GetAttr node. By traversing this reference map, we can figure out the
|
| 283 |
+
# full IR aliasing pass and figure out the FQN of an attribute.
|
| 284 |
+
# E.g., %2 = GetAttr(linear)[%1] --> node_to_parent_map["%2"] = "%1"
|
| 285 |
+
node_to_parent_map: Dict[str, str] = {}
|
| 286 |
+
|
| 287 |
+
# Used for reconstructing the FQN of an attribute based on the reference map.
|
| 288 |
+
# In nutshell, for each GetAttr call, GetAttr(input IR, attribute name) -> output IR
|
| 289 |
+
# This name map stores which attribute name is called for a src IR --> dest IR action.
|
| 290 |
+
# E.g., %2 = GetAttr(linear)[%1] --> node_to_attr_name["%2"] = "linear"
|
| 291 |
+
node_to_attr_name: Dict[str, str] = {}
|
| 292 |
+
|
| 293 |
+
def _dfs_get_attr_dependency(entry):
|
| 294 |
+
"""
|
| 295 |
+
First DFS path to construct reference map and name map.
|
| 296 |
+
"""
|
| 297 |
+
for node in entry.nodes():
|
| 298 |
+
if node.kind() == "prim::GetAttr":
|
| 299 |
+
(
|
| 300 |
+
irv_name,
|
| 301 |
+
irv_parent_name,
|
| 302 |
+
attr_name,
|
| 303 |
+
) = get_ir_value_parent_name_and_attr_name(node)
|
| 304 |
+
node_to_parent_map[irv_name] = irv_parent_name
|
| 305 |
+
node_to_attr_name[irv_name] = attr_name
|
| 306 |
+
for block in node.blocks():
|
| 307 |
+
_dfs_get_attr_dependency(block)
|
| 308 |
+
|
| 309 |
+
def _map_blocks_to_lifted_attrs(entry):
|
| 310 |
+
"""
|
| 311 |
+
Walk the graph in a bottom-up fashion to build the expected to be
|
| 312 |
+
lifted arguments for each block.
|
| 313 |
+
"""
|
| 314 |
+
arguments: Set[str] = set()
|
| 315 |
+
for node in entry.nodes():
|
| 316 |
+
for block in node.blocks():
|
| 317 |
+
# Recursively build.
|
| 318 |
+
arguments = arguments.union(_map_blocks_to_lifted_attrs(block))
|
| 319 |
+
if node.kind() == "prim::GetAttr":
|
| 320 |
+
irv_name = node.output().debugName()
|
| 321 |
+
# Skip for intermediate GetAttr, which will anyway not result a FQN.
|
| 322 |
+
# E.g., node_to_parent_name: {"%3": "%2", "%2": "%1"}
|
| 323 |
+
# node_to_attr_name: {"%3": "weight", "%2": "linear", "%1": "self"}
|
| 324 |
+
# There is only one FQN %3-->%2-->%1: self.linear.weight
|
| 325 |
+
# %2-->%1 is not a FQN: self.linear
|
| 326 |
+
if irv_name not in set(node_to_parent_map.values()):
|
| 327 |
+
arguments.add(
|
| 328 |
+
construct_fqn(irv_name, node_to_parent_map, node_to_attr_name)
|
| 329 |
+
)
|
| 330 |
+
if not isinstance(entry, torch._C.Graph): # Skip the top level.
|
| 331 |
+
blocks_to_lifted_attrs[entry] = arguments
|
| 332 |
+
return arguments
|
| 333 |
+
|
| 334 |
+
_dfs_get_attr_dependency(graph)
|
| 335 |
+
_map_blocks_to_lifted_attrs(graph)
|
| 336 |
+
|
| 337 |
+
return blocks_to_lifted_attrs
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
def get_attribute_fqn_from_ts_node(
|
| 341 |
+
name_to_attribute_fqn: Dict[str, str], node: torch._C.Node
|
| 342 |
+
) -> str:
|
| 343 |
+
def get_attr(name: str):
|
| 344 |
+
if name in name_to_attribute_fqn:
|
| 345 |
+
return name_to_attribute_fqn[name]
|
| 346 |
+
else:
|
| 347 |
+
raise ValueError(f"Attribute {name} not found")
|
| 348 |
+
|
| 349 |
+
if node.kind() == "prim::SetAttr":
|
| 350 |
+
input_name = next(node.inputs()).debugName()
|
| 351 |
+
elif node.kind() == "prim::GetAttr":
|
| 352 |
+
input_name = node.input().debugName()
|
| 353 |
+
else:
|
| 354 |
+
raise RuntimeError(
|
| 355 |
+
f"Unexpected node kind when getting attribute fqn. node: {node} "
|
| 356 |
+
)
|
| 357 |
+
|
| 358 |
+
attr_name = node.s("name")
|
| 359 |
+
root_attr_name = get_attr(input_name)
|
| 360 |
+
attr_fqn = f"{root_attr_name}.{attr_name}" if root_attr_name else attr_name
|
| 361 |
+
|
| 362 |
+
return attr_fqn
|
| 363 |
+
|
| 364 |
+
|
| 365 |
+
def get_op_overload(node: torch._C.Node):
|
| 366 |
+
schema_str = node.schema()
|
| 367 |
+
assert schema_str != "(no schema)", f"got empty schema for {node}"
|
| 368 |
+
schema: torch._C.FunctionSchema = torch._C.parse_schema(schema_str)
|
| 369 |
+
ns, op_name = str(schema.name).split("::")
|
| 370 |
+
override = schema.overload_name
|
| 371 |
+
|
| 372 |
+
try:
|
| 373 |
+
op_overload_mod = getattr(torch.ops, ns)
|
| 374 |
+
op_overload_packet = getattr(op_overload_mod, op_name)
|
| 375 |
+
if override:
|
| 376 |
+
op_overload = getattr(op_overload_packet, override)
|
| 377 |
+
else:
|
| 378 |
+
op_overload = op_overload_packet.default
|
| 379 |
+
except Exception as e:
|
| 380 |
+
raise RuntimeError(
|
| 381 |
+
f"Unable to find operator {node.kind()} with schema {node.schema()}"
|
| 382 |
+
) from e
|
| 383 |
+
|
| 384 |
+
return op_overload
|
| 385 |
+
|
| 386 |
+
|
| 387 |
+
class TS2FXGraphConverter:
|
| 388 |
+
def __init__(
|
| 389 |
+
self,
|
| 390 |
+
ts_graph: Union[torch._C.Graph, torch._C.Block],
|
| 391 |
+
name_to_param: Dict[str, torch.Tensor],
|
| 392 |
+
name_to_buffer: Dict[str, torch.Tensor],
|
| 393 |
+
blocks_to_lifted_attrs: Dict[torch._C.Block, Set[str]],
|
| 394 |
+
name_to_non_tensor_attribute: Dict[str, Any],
|
| 395 |
+
name_to_constant: Dict[str, Any],
|
| 396 |
+
):
|
| 397 |
+
self.ts_graph = ts_graph
|
| 398 |
+
self.name_to_param = name_to_param
|
| 399 |
+
self.name_to_buffer = name_to_buffer
|
| 400 |
+
|
| 401 |
+
self.fx_graph: torch.fx.Graph = torch.fx.Graph()
|
| 402 |
+
self.input_specs: List[InputSpec] = []
|
| 403 |
+
self.output_specs: List[OutputSpec] = []
|
| 404 |
+
|
| 405 |
+
self.name_to_node: Dict[
|
| 406 |
+
str, Union[torch.fx.Node, List[torch.fx.Node], Dict[Any, torch.fx.Node]]
|
| 407 |
+
] = {}
|
| 408 |
+
self.name_to_constant: Dict[str, Any] = name_to_constant
|
| 409 |
+
|
| 410 |
+
# Mapping from torchscript node output name to attribute fully qualified name
|
| 411 |
+
self.name_to_attribute_fqn: Dict[str, str] = {}
|
| 412 |
+
|
| 413 |
+
# Mapping from fully qualified name to real values or a fx graph node
|
| 414 |
+
# During convert, this represents the current value of a non-tensor attribute
|
| 415 |
+
# One use case is:
|
| 416 |
+
# def forward(self, x):
|
| 417 |
+
# c1 = self.count
|
| 418 |
+
# self.count += 1
|
| 419 |
+
# c2 = self.count
|
| 420 |
+
# return x + c1 + c2
|
| 421 |
+
self.name_to_non_tensor_attribute_node: Dict[str, Any] = {}
|
| 422 |
+
|
| 423 |
+
# Mapping from fully qualified name to initial real values inputs
|
| 424 |
+
# We separate it from self.name_to_non_tensor_attribute_node since
|
| 425 |
+
# we need initial real value input when we construct fx.GraphModule
|
| 426 |
+
self.name_to_non_tensor_attribute: Dict[str, Any] = name_to_non_tensor_attribute
|
| 427 |
+
|
| 428 |
+
self.subgraphs: Dict[str, torch.fx.GraphModule] = {}
|
| 429 |
+
|
| 430 |
+
self.blocks_to_lifted_attrs = blocks_to_lifted_attrs
|
| 431 |
+
|
| 432 |
+
# Populate methods for the standard operators.
|
| 433 |
+
for k in kind_to_standard_operators.keys():
|
| 434 |
+
handler_func_name = ir_name_to_func_name(k)
|
| 435 |
+
# Create an indirect function call:
|
| 436 |
+
# convert_<namespace>_<opname> --> lambda node: _convert_standard_operator(node)
|
| 437 |
+
setattr(
|
| 438 |
+
self,
|
| 439 |
+
handler_func_name,
|
| 440 |
+
lambda node: self._convert_standard_operators(node),
|
| 441 |
+
)
|
| 442 |
+
|
| 443 |
+
# This stores a list of return results that do not appear in the original TS
|
| 444 |
+
# graph's outputs. The reason we maintain this is because some operations in the sub-block
|
| 445 |
+
# might have inplace updates to the variable defined in the parent fx graph. After
|
| 446 |
+
# the execution of that sub-block, the variable defined in the parent fx graph also
|
| 447 |
+
# needs to be updated.
|
| 448 |
+
self.name_update_from_subblock_to_parent: Set[str] = set()
|
| 449 |
+
|
| 450 |
+
def _is_get_attr_node(self, fqn):
|
| 451 |
+
return (
|
| 452 |
+
fqn in self.name_to_buffer
|
| 453 |
+
or fqn in self.name_to_param
|
| 454 |
+
or (
|
| 455 |
+
fqn in self.name_to_constant
|
| 456 |
+
and isinstance(self.name_to_constant[fqn], torch.ScriptObject)
|
| 457 |
+
)
|
| 458 |
+
)
|
| 459 |
+
|
| 460 |
+
def _convert_block_to_subgraph(self, node: torch._C.Node, arguments: List[str]):
|
| 461 |
+
subgraph_nodes, subgraph_converters = [], []
|
| 462 |
+
for block in node.blocks():
|
| 463 |
+
subgraph_converter = TS2FXGraphConverter(
|
| 464 |
+
block,
|
| 465 |
+
self.name_to_param,
|
| 466 |
+
self.name_to_buffer,
|
| 467 |
+
self.blocks_to_lifted_attrs,
|
| 468 |
+
{},
|
| 469 |
+
self.name_to_constant,
|
| 470 |
+
)
|
| 471 |
+
subgraph_converter.name_to_attribute_fqn = self.name_to_attribute_fqn
|
| 472 |
+
|
| 473 |
+
for block_arg in arguments:
|
| 474 |
+
normalized_block_arg_name = normalize_name(block_arg)
|
| 475 |
+
placeholder_node = subgraph_converter.fx_graph.placeholder(
|
| 476 |
+
normalized_block_arg_name
|
| 477 |
+
)
|
| 478 |
+
subgraph_converter.name_to_node[block_arg] = placeholder_node
|
| 479 |
+
|
| 480 |
+
subgraph = subgraph_converter.convert()
|
| 481 |
+
subgraph_name = self.add_subgraph(subgraph)
|
| 482 |
+
subgraph_nodes.append(self.fx_graph.get_attr(subgraph_name))
|
| 483 |
+
subgraph_converters.append(subgraph_converter)
|
| 484 |
+
return subgraph_nodes, subgraph_converters
|
| 485 |
+
|
| 486 |
+
def _identify_inputs_as_arguments(self, entry):
|
| 487 |
+
"""
|
| 488 |
+
Identify inputs from the innermost sub-block. This is needed
|
| 489 |
+
for nested sub-blocks when the input is hidden in the nested sub-block.
|
| 490 |
+
E.g., example IR of input is hidden in the nested sub-block.
|
| 491 |
+
Graph[x.1]
|
| 492 |
+
%1 = ...
|
| 493 |
+
Block[]
|
| 494 |
+
Block[x.1]
|
| 495 |
+
%2 = x.1 ...
|
| 496 |
+
"""
|
| 497 |
+
arguments: Set[str] = set()
|
| 498 |
+
for block in entry.blocks():
|
| 499 |
+
for block_node in block.nodes():
|
| 500 |
+
for block_node_in in block_node.inputs():
|
| 501 |
+
if (
|
| 502 |
+
block_node_in.debugName() in self.name_to_node
|
| 503 |
+
and block_node_in.debugName() not in self.name_to_attribute_fqn
|
| 504 |
+
):
|
| 505 |
+
arguments.add(block_node_in.debugName())
|
| 506 |
+
arguments = arguments.union(
|
| 507 |
+
self._identify_inputs_as_arguments(block_node)
|
| 508 |
+
)
|
| 509 |
+
return arguments
|
| 510 |
+
|
| 511 |
+
def is_top_level_graph(self):
|
| 512 |
+
return isinstance(self.ts_graph, torch._C.Graph)
|
| 513 |
+
|
| 514 |
+
def add_subgraph(self, subgraph) -> str:
|
| 515 |
+
name = f"subgraph_{len(self.subgraphs)}"
|
| 516 |
+
self.subgraphs[name] = subgraph
|
| 517 |
+
return name
|
| 518 |
+
|
| 519 |
+
def get_args_kwargs(self, node: torch._C.Node, schema):
|
| 520 |
+
args = []
|
| 521 |
+
kwargs = {}
|
| 522 |
+
for input, schema_arg in zip(node.inputs(), schema.arguments):
|
| 523 |
+
if schema_arg.kwarg_only:
|
| 524 |
+
kwargs[schema_arg.name] = self.get_fx_value_by_ir_value(input)
|
| 525 |
+
else:
|
| 526 |
+
args.append(self.get_fx_value_by_ir_value(input))
|
| 527 |
+
|
| 528 |
+
return tuple(args), kwargs
|
| 529 |
+
|
| 530 |
+
def get_fx_value_by_ir_value(self, value: torch._C.Value):
|
| 531 |
+
value_name = value.debugName()
|
| 532 |
+
|
| 533 |
+
if value_name in self.name_to_node:
|
| 534 |
+
input_node = self.name_to_node[value_name]
|
| 535 |
+
return input_node
|
| 536 |
+
elif value_name in self.name_to_constant:
|
| 537 |
+
if isinstance(self.name_to_constant[value_name], torch.ScriptObject):
|
| 538 |
+
return self.fx_graph.get_attr(value_name)
|
| 539 |
+
return self.name_to_constant[value_name]
|
| 540 |
+
else:
|
| 541 |
+
raise ValueError(f"Input {value_name} not found")
|
| 542 |
+
|
| 543 |
+
def get_fx_value_by_fqn(self, name):
|
| 544 |
+
if name in self.name_to_node:
|
| 545 |
+
fx_node = self.name_to_node[name]
|
| 546 |
+
elif name in self.name_to_constant:
|
| 547 |
+
fx_node = self.name_to_constant[name]
|
| 548 |
+
elif name in self.name_to_non_tensor_attribute_node:
|
| 549 |
+
fx_node = self.name_to_non_tensor_attribute_node[name]
|
| 550 |
+
elif name in self.name_to_non_tensor_attribute:
|
| 551 |
+
fx_node = self.name_to_non_tensor_attribute[name]
|
| 552 |
+
else:
|
| 553 |
+
raise ValueError(f"Attribute {name} not found")
|
| 554 |
+
return fx_node
|
| 555 |
+
|
| 556 |
+
def convert(self) -> torch.fx.GraphModule:
|
| 557 |
+
self.convert_graph_inputs()
|
| 558 |
+
|
| 559 |
+
for node in self.ts_graph.nodes():
|
| 560 |
+
self.convert_node(node)
|
| 561 |
+
|
| 562 |
+
self.convert_graph_outputs()
|
| 563 |
+
|
| 564 |
+
# Pass parameter and buffer to the root for lookup.
|
| 565 |
+
gm = torch.fx.GraphModule(
|
| 566 |
+
{
|
| 567 |
+
**self.subgraphs,
|
| 568 |
+
**self.name_to_param,
|
| 569 |
+
**self.name_to_buffer,
|
| 570 |
+
**self.name_to_non_tensor_attribute,
|
| 571 |
+
**self.name_to_constant,
|
| 572 |
+
},
|
| 573 |
+
self.fx_graph,
|
| 574 |
+
)
|
| 575 |
+
|
| 576 |
+
inplace_optimize_sym_size_div(gm)
|
| 577 |
+
|
| 578 |
+
gm.graph.lint()
|
| 579 |
+
|
| 580 |
+
return gm
|
| 581 |
+
|
| 582 |
+
def convert_graph_inputs(self):
|
| 583 |
+
for graph_input in self.ts_graph.inputs():
|
| 584 |
+
name = graph_input.debugName()
|
| 585 |
+
|
| 586 |
+
if name in self.name_to_param:
|
| 587 |
+
normalized_name = normalize_name(name)
|
| 588 |
+
self.input_specs.append(
|
| 589 |
+
InputSpec(
|
| 590 |
+
InputKind.PARAMETER,
|
| 591 |
+
arg=TensorArgument(name=normalized_name),
|
| 592 |
+
target=name,
|
| 593 |
+
)
|
| 594 |
+
)
|
| 595 |
+
fx_node = get_node_as_placeholder_or_get_attr(
|
| 596 |
+
self.fx_graph, name, self.is_top_level_graph()
|
| 597 |
+
)
|
| 598 |
+
elif name in self.name_to_buffer:
|
| 599 |
+
normalized_name = normalize_name(name)
|
| 600 |
+
self.input_specs.append(
|
| 601 |
+
InputSpec(
|
| 602 |
+
InputKind.BUFFER,
|
| 603 |
+
arg=TensorArgument(name=normalized_name),
|
| 604 |
+
target=name,
|
| 605 |
+
persistent=True,
|
| 606 |
+
)
|
| 607 |
+
)
|
| 608 |
+
fx_node = get_node_as_placeholder_or_get_attr(
|
| 609 |
+
self.fx_graph, name, self.is_top_level_graph()
|
| 610 |
+
)
|
| 611 |
+
elif name in self.name_to_constant:
|
| 612 |
+
assert isinstance(
|
| 613 |
+
self.name_to_constant[name], torch.ScriptObject
|
| 614 |
+
), "Input conversion only handles ScriptObject"
|
| 615 |
+
normalized_name = normalize_name(name)
|
| 616 |
+
self.input_specs.append(
|
| 617 |
+
InputSpec(
|
| 618 |
+
InputKind.CUSTOM_OBJ,
|
| 619 |
+
arg=CustomObjArgument(
|
| 620 |
+
name=normalized_name, class_fqn=normalized_name
|
| 621 |
+
),
|
| 622 |
+
target=name,
|
| 623 |
+
persistent=False,
|
| 624 |
+
)
|
| 625 |
+
)
|
| 626 |
+
fx_node = get_node_as_placeholder_or_get_attr(
|
| 627 |
+
self.fx_graph, name, self.is_top_level_graph()
|
| 628 |
+
)
|
| 629 |
+
elif isinstance(graph_input.type(), torch.ClassType):
|
| 630 |
+
# Directly skip inputs that are ScriptObject but not used in the graph.
|
| 631 |
+
continue
|
| 632 |
+
else:
|
| 633 |
+
normalized_name = normalize_name(name, prefix="input")
|
| 634 |
+
self.input_specs.append(
|
| 635 |
+
InputSpec(
|
| 636 |
+
InputKind.USER_INPUT,
|
| 637 |
+
arg=TensorArgument(name=normalized_name),
|
| 638 |
+
target=name,
|
| 639 |
+
)
|
| 640 |
+
)
|
| 641 |
+
fx_node = self.fx_graph.placeholder(normalized_name)
|
| 642 |
+
|
| 643 |
+
self.name_to_node[name] = fx_node
|
| 644 |
+
|
| 645 |
+
def convert_aten_Float(self, node: torch._C.Node):
|
| 646 |
+
def to_float_tensor(t):
|
| 647 |
+
return t.to(dtype=torch.float).item()
|
| 648 |
+
|
| 649 |
+
inp_list = [
|
| 650 |
+
self.get_fx_value_by_ir_value(inp) for inp in node.inputs()
|
| 651 |
+
] # noqa: C416
|
| 652 |
+
fx_node = self.fx_graph.call_function(
|
| 653 |
+
to_float_tensor,
|
| 654 |
+
tuple(inp_list),
|
| 655 |
+
)
|
| 656 |
+
self.name_to_node[node.output().debugName()] = fx_node
|
| 657 |
+
|
| 658 |
+
def convert_aten_tensor(self, node: torch._C.Node):
|
| 659 |
+
"""aten::tensor creates a constant tensor ad-hoc --> GetAttr"""
|
| 660 |
+
args, kwargs = self.get_args_kwargs(node, torch.ops.aten.tensor.default._schema)
|
| 661 |
+
|
| 662 |
+
for k in kwargs:
|
| 663 |
+
if k == "requires_grad":
|
| 664 |
+
kwargs[k] = bool(kwargs[k]) # 0 -> False, 1 -> True
|
| 665 |
+
|
| 666 |
+
to_tensor = (
|
| 667 |
+
torch.tensor
|
| 668 |
+
if all(isinstance(a, int) for a in args)
|
| 669 |
+
else torch._refs.tensor
|
| 670 |
+
)
|
| 671 |
+
|
| 672 |
+
def target(*args, **kwargs):
|
| 673 |
+
if "dtype" in kwargs and kwargs["dtype"] is not None:
|
| 674 |
+
kwargs["dtype"] = _TORCH_ENUM_TO_DTYPE[kwargs["dtype"]]
|
| 675 |
+
return to_tensor(*args, **kwargs)
|
| 676 |
+
|
| 677 |
+
# def to_dynamic_tensor(*args, **kwargs):
|
| 678 |
+
# if "dtype" in kwargs and kwargs["dtype"] is not None:
|
| 679 |
+
# kwargs["dtype"] = _TORCH_ENUM_TO_DTYPE[kwargs["dtype"]]
|
| 680 |
+
# return torch._refs.tensor(*args, **kwargs)
|
| 681 |
+
|
| 682 |
+
output_name = node.output().debugName()
|
| 683 |
+
fx_node = self.fx_graph.call_function(target, args, kwargs)
|
| 684 |
+
self.name_to_node[output_name] = fx_node
|
| 685 |
+
|
| 686 |
+
def convert_aten_append(self, node: torch._C.Node):
|
| 687 |
+
# special handle python list append: "aten::append.t(t[](a!) self, t(c -> *) el) -> t[](a!)"
|
| 688 |
+
|
| 689 |
+
# inplace append to the list!! This is kinda crazy, as we are inplace mutating the list
|
| 690 |
+
# This makes the converter "non-functional", and the result depends on the order of the nodes being converter
|
| 691 |
+
# In a sense, the converter now becomes an stateful interpreter
|
| 692 |
+
warnings.warn(
|
| 693 |
+
"Converting aten::append.t, which is a inplace mutation of the list. "
|
| 694 |
+
"This makes the converter non-functional: the result depends on the order of the append nodes being converter!"
|
| 695 |
+
)
|
| 696 |
+
|
| 697 |
+
args = tuple(self.get_fx_value_by_ir_value(inp) for inp in node.inputs())
|
| 698 |
+
fx_node = self.fx_graph.call_function(list_append, args)
|
| 699 |
+
self.name_to_node[node.output().debugName()] = fx_node
|
| 700 |
+
|
| 701 |
+
# inplace mutate arg[0], which is the python list
|
| 702 |
+
self.name_to_node[node.inputsAt(0).debugName()] = fx_node
|
| 703 |
+
|
| 704 |
+
# Variables that need to be updated to parent module.
|
| 705 |
+
if not self.is_top_level_graph() and args[0].op == "placeholder":
|
| 706 |
+
self.name_update_from_subblock_to_parent.add(node.inputsAt(0).debugName())
|
| 707 |
+
|
| 708 |
+
def convert_prim_Constant(self, node: torch._C.Node):
|
| 709 |
+
name = node.output().debugName()
|
| 710 |
+
|
| 711 |
+
value: Any = None
|
| 712 |
+
if node.hasAttribute("value"):
|
| 713 |
+
constant_kind = node.kindOf("value")
|
| 714 |
+
if constant_kind == "i":
|
| 715 |
+
value = node.i("value")
|
| 716 |
+
elif constant_kind == "f":
|
| 717 |
+
value = node.f("value")
|
| 718 |
+
elif constant_kind == "s":
|
| 719 |
+
value = node.s("value")
|
| 720 |
+
elif constant_kind == "t":
|
| 721 |
+
alias_name = (
|
| 722 |
+
f"lifted_tensor_{name}" # Follow naming convention from EP tracing.
|
| 723 |
+
)
|
| 724 |
+
fx_node = self.fx_graph.get_attr(alias_name)
|
| 725 |
+
self.name_to_node[name] = fx_node
|
| 726 |
+
name, value = alias_name, node.t("value")
|
| 727 |
+
elif constant_kind == "ival":
|
| 728 |
+
value = node.ival("value")
|
| 729 |
+
else:
|
| 730 |
+
raise ValueError(f"Unsupported constant type: {node.kindOf('value')}")
|
| 731 |
+
else:
|
| 732 |
+
value = None
|
| 733 |
+
|
| 734 |
+
self.name_to_constant[name] = value
|
| 735 |
+
|
| 736 |
+
def convert_prim_CallMethod(self, node: torch._C.Node):
|
| 737 |
+
inp_list = [
|
| 738 |
+
self.get_fx_value_by_ir_value(inp) for inp in node.inputs()
|
| 739 |
+
] # noqa: C416
|
| 740 |
+
fx_node = self.fx_graph.call_method(
|
| 741 |
+
node.s("name"),
|
| 742 |
+
tuple(inp_list),
|
| 743 |
+
)
|
| 744 |
+
self.name_to_node[node.output().debugName()] = fx_node
|
| 745 |
+
|
| 746 |
+
def convert_prim_device(self, node: torch._C.Node):
|
| 747 |
+
input_type = node.input().type()
|
| 748 |
+
if input_type.isSubtypeOf(torch._C.TensorType.get()):
|
| 749 |
+
device = input_type.device() # type: ignore[attr-defined]
|
| 750 |
+
output_name = node.output().debugName()
|
| 751 |
+
self.name_to_constant[output_name] = device
|
| 752 |
+
else:
|
| 753 |
+
raise ValueError(f"Unsupported JitType ({input_type}) when get device")
|
| 754 |
+
|
| 755 |
+
def convert_prim_GetAttr(self, node: torch._C.Node):
|
| 756 |
+
# Build fully qulified name
|
| 757 |
+
attr_fqn = get_attribute_fqn_from_ts_node(self.name_to_attribute_fqn, node)
|
| 758 |
+
output_name = node.output().debugName()
|
| 759 |
+
self.name_to_attribute_fqn[output_name] = attr_fqn
|
| 760 |
+
|
| 761 |
+
if self.is_top_level_graph():
|
| 762 |
+
if self._is_get_attr_node(attr_fqn):
|
| 763 |
+
# We insert a get_attr node due to two reasons.
|
| 764 |
+
# First, ts graph does not lift tensor constants as input nodes. So
|
| 765 |
+
# tensor constants may be ignored by in convert_graph_inputs().
|
| 766 |
+
# Second, attr_fqn may have been written to via SetAttr. Two
|
| 767 |
+
# GetAttr may give different values.
|
| 768 |
+
self.name_to_node[output_name] = self.fx_graph.get_attr(attr_fqn)
|
| 769 |
+
else:
|
| 770 |
+
if attr_fqn not in self.name_to_non_tensor_attribute_node:
|
| 771 |
+
self.name_to_non_tensor_attribute_node[
|
| 772 |
+
attr_fqn
|
| 773 |
+
] = self.name_to_non_tensor_attribute[attr_fqn]
|
| 774 |
+
self.name_to_node[output_name] = self.name_to_non_tensor_attribute_node[
|
| 775 |
+
attr_fqn
|
| 776 |
+
]
|
| 777 |
+
else:
|
| 778 |
+
# Special support for if blocks which do not allow SetAttr TorchScript
|
| 779 |
+
# node and get_attr FX Graph Node.
|
| 780 |
+
if self._is_get_attr_node(attr_fqn):
|
| 781 |
+
self.name_to_node[output_name] = self.name_to_node[attr_fqn]
|
| 782 |
+
|
| 783 |
+
def convert_prim_SetAttr(self, node: torch._C.Node):
|
| 784 |
+
attr_fqn = get_attribute_fqn_from_ts_node(self.name_to_attribute_fqn, node)
|
| 785 |
+
attr_value = tuple(node.inputs())[1]
|
| 786 |
+
ts_graph_tensor_input = self.get_fx_value_by_ir_value(attr_value)
|
| 787 |
+
if self._is_get_attr_node(attr_fqn):
|
| 788 |
+
fx_attr_node = self.fx_graph.get_attr(attr_fqn)
|
| 789 |
+
self.fx_graph.call_function(
|
| 790 |
+
torch.Tensor.copy_, (fx_attr_node, ts_graph_tensor_input)
|
| 791 |
+
)
|
| 792 |
+
else:
|
| 793 |
+
self.name_to_non_tensor_attribute_node[attr_fqn] = ts_graph_tensor_input
|
| 794 |
+
|
| 795 |
+
def convert_call_function_op(self, node: torch._C.Node):
|
| 796 |
+
target = get_op_overload(node)
|
| 797 |
+
|
| 798 |
+
args, kwargs = self.get_args_kwargs(node, target._schema)
|
| 799 |
+
|
| 800 |
+
fx_node = self.fx_graph.call_function(target, args, kwargs)
|
| 801 |
+
|
| 802 |
+
# TODO: covnert sourceRange() into stack_trace
|
| 803 |
+
# fx_node.meta["stack_trace"] = node.sourceRange()
|
| 804 |
+
|
| 805 |
+
if node.outputsSize() == 1:
|
| 806 |
+
output_name = node.output().debugName()
|
| 807 |
+
self.name_to_node[output_name] = fx_node
|
| 808 |
+
else:
|
| 809 |
+
for i, outp in enumerate(node.outputs()):
|
| 810 |
+
output_name = outp.debugName()
|
| 811 |
+
next_fx_node = self.fx_graph.call_function(
|
| 812 |
+
operator.getitem, (fx_node, i)
|
| 813 |
+
)
|
| 814 |
+
self.name_to_node[output_name] = next_fx_node
|
| 815 |
+
|
| 816 |
+
def convert_prim_TupleConstruct(self, node: torch._C.Node):
|
| 817 |
+
self._convert_prim_iterator(node)
|
| 818 |
+
|
| 819 |
+
def convert_prim_ListConstruct(self, node: torch._C.Node):
|
| 820 |
+
self._convert_prim_iterator(node)
|
| 821 |
+
|
| 822 |
+
def _convert_prim_iterator(self, node: torch._C.Node):
|
| 823 |
+
output_list = []
|
| 824 |
+
for inp in node.inputs():
|
| 825 |
+
output_list.append(self.get_fx_value_by_ir_value(inp))
|
| 826 |
+
|
| 827 |
+
output_name = node.output().debugName()
|
| 828 |
+
self.name_to_node[output_name] = output_list
|
| 829 |
+
|
| 830 |
+
def convert_prim_DictConstruct(self, node: torch._C.Node):
|
| 831 |
+
output_dict = {}
|
| 832 |
+
k, v = None, None
|
| 833 |
+
for i, inp in enumerate(node.inputs()):
|
| 834 |
+
# We assume key value are stored in pair in the DictConstruct.
|
| 835 |
+
# The first element is the key and the following is the value.
|
| 836 |
+
if i % 2 == 0:
|
| 837 |
+
k = self.get_fx_value_by_ir_value(inp)
|
| 838 |
+
else:
|
| 839 |
+
v = self.get_fx_value_by_ir_value(inp)
|
| 840 |
+
assert (
|
| 841 |
+
k is not None and v is not None
|
| 842 |
+
), "DictConstruct has an empty key value pair."
|
| 843 |
+
output_dict[k] = v
|
| 844 |
+
k, v = None, None
|
| 845 |
+
|
| 846 |
+
assert (
|
| 847 |
+
k is None and v is None
|
| 848 |
+
), "DictConstruct has an odd number of elements (violating our assumption)."
|
| 849 |
+
|
| 850 |
+
output_name = node.output().debugName()
|
| 851 |
+
self.name_to_node[output_name] = output_dict
|
| 852 |
+
|
| 853 |
+
def convert_prim_ListUnpack(self, node: torch._C.Node):
|
| 854 |
+
self._convert_prim_unpack_iterator(node)
|
| 855 |
+
|
| 856 |
+
def convert_prim_TupleUnpack(self, node: torch._C.Node):
|
| 857 |
+
self._convert_prim_unpack_iterator(node)
|
| 858 |
+
|
| 859 |
+
def _convert_prim_unpack_iterator(self, node: torch._C.Node):
|
| 860 |
+
# Single input and multiple outputs for unpacking.
|
| 861 |
+
for i, outp in enumerate(node.outputs()):
|
| 862 |
+
outp_name = outp.debugName()
|
| 863 |
+
inp = self.get_fx_value_by_ir_value(node.input())
|
| 864 |
+
fx_node = self.fx_graph.call_function(operator.getitem, (inp, i))
|
| 865 |
+
self.name_to_node[outp_name] = fx_node
|
| 866 |
+
|
| 867 |
+
def convert_aten_Int(self, node: torch._C.Node):
|
| 868 |
+
# converts aten::Int as aten._to_copy + aten::_local_scalar_dense
|
| 869 |
+
target = torch.ops.aten._to_copy.default
|
| 870 |
+
args = tuple(self.get_fx_value_by_ir_value(input) for input in node.inputs())
|
| 871 |
+
to_copy_node = self.fx_graph.call_function(target, args, {"dtype": torch.int32})
|
| 872 |
+
|
| 873 |
+
fx_node = self.fx_graph.call_function(
|
| 874 |
+
torch.ops.aten._local_scalar_dense.default, (to_copy_node,)
|
| 875 |
+
)
|
| 876 |
+
|
| 877 |
+
# TODO: covnert sourceRange() into stack_trace
|
| 878 |
+
# fx_node.meta["stack_trace"] = node.sourceRange()
|
| 879 |
+
|
| 880 |
+
output_name = node.output().debugName()
|
| 881 |
+
self.name_to_node[output_name] = fx_node
|
| 882 |
+
|
| 883 |
+
def convert_prim_NumToTensor(self, node: torch._C.Node):
|
| 884 |
+
# Converts prim::NumToTensor as aten.scalar_tensor.
|
| 885 |
+
# prim::NumToTensor IRs are currently triggered by:
|
| 886 |
+
# .size() https://github.com/pytorch/pytorch/blob/main/torch/csrc/jit/frontend/tracer.cpp#L950
|
| 887 |
+
# .numel() https://github.com/pytorch/pytorch/blob/main/torch/csrc/jit/frontend/tracer.cpp#L971
|
| 888 |
+
# For both of those APIs, torch.jit.trace implicitly sets the output tensor type
|
| 889 |
+
# to be LongTensor.
|
| 890 |
+
target = torch.ops.aten.scalar_tensor
|
| 891 |
+
args = tuple(self.get_fx_value_by_ir_value(input) for input in node.inputs())
|
| 892 |
+
|
| 893 |
+
fx_node = self.fx_graph.call_function(target, args, {"dtype": torch.long})
|
| 894 |
+
output_name = node.output().debugName()
|
| 895 |
+
self.name_to_node[output_name] = fx_node
|
| 896 |
+
|
| 897 |
+
def convert_prim_CreateObject(self, node: torch._C.Node):
|
| 898 |
+
output_name = node.output().debugName()
|
| 899 |
+
self.name_to_attribute_fqn[output_name] = ""
|
| 900 |
+
|
| 901 |
+
def convert_aten__convolution(self, node: torch._C.Node):
|
| 902 |
+
# converts aten::_convolution as aten.convolution, since aten::_convolution
|
| 903 |
+
# doesn't have a meta function
|
| 904 |
+
target = torch.ops.aten.convolution.default
|
| 905 |
+
args, kwargs = self.get_args_kwargs(node, target._schema)
|
| 906 |
+
|
| 907 |
+
fx_node = self.fx_graph.call_function(target, args, kwargs)
|
| 908 |
+
|
| 909 |
+
output_name = node.output().debugName()
|
| 910 |
+
self.name_to_node[output_name] = fx_node
|
| 911 |
+
|
| 912 |
+
def convert_aten_div(self, node: torch._C.Node):
|
| 913 |
+
target = get_op_overload(node)
|
| 914 |
+
schema = target._schema
|
| 915 |
+
|
| 916 |
+
args, kwargs = self.get_args_kwargs(node, schema)
|
| 917 |
+
|
| 918 |
+
# converts aten::div.Tensor_mode(x, tensor_constant)
|
| 919 |
+
# as aten.div.Scalar_mode(x, tensor_constant.item())
|
| 920 |
+
if schema.overload_name == "Tensor_mode":
|
| 921 |
+
arg1_name = args[1].name
|
| 922 |
+
if arg1_name in self.name_to_constant and isinstance(
|
| 923 |
+
self.name_to_constant[arg1_name], torch.Tensor
|
| 924 |
+
):
|
| 925 |
+
tensor_constant = self.name_to_constant[arg1_name]
|
| 926 |
+
if tensor_constant.numel() == 1:
|
| 927 |
+
updated_args = list(args)
|
| 928 |
+
updated_args[1] = self.name_to_constant[arg1_name].item()
|
| 929 |
+
|
| 930 |
+
fx_node = self.fx_graph.call_function(
|
| 931 |
+
torch.ops.aten.div.Scalar_mode,
|
| 932 |
+
tuple(updated_args),
|
| 933 |
+
kwargs,
|
| 934 |
+
)
|
| 935 |
+
|
| 936 |
+
# TODO: covnert sourceRange() into stack_trace
|
| 937 |
+
# fx_node.meta["stack_trace"] = node.sourceRange()
|
| 938 |
+
|
| 939 |
+
output_name = node.output().debugName()
|
| 940 |
+
self.name_to_node[output_name] = fx_node
|
| 941 |
+
return
|
| 942 |
+
|
| 943 |
+
self.convert_call_function_op(node)
|
| 944 |
+
|
| 945 |
+
def convert_aten___getitem__(self, node: torch._C.Node):
|
| 946 |
+
input_container, index = tuple(
|
| 947 |
+
self.get_fx_value_by_ir_value(input) for input in node.inputs()
|
| 948 |
+
)
|
| 949 |
+
fx_node = self.fx_graph.call_function(
|
| 950 |
+
operator.getitem, (input_container, index)
|
| 951 |
+
)
|
| 952 |
+
output_name = node.output().debugName()
|
| 953 |
+
self.name_to_node[output_name] = fx_node
|
| 954 |
+
|
| 955 |
+
def convert_aten_to(self, node: torch._C.Node):
|
| 956 |
+
target = get_op_overload(node)
|
| 957 |
+
args, kwargs = self.get_args_kwargs(node, target._schema)
|
| 958 |
+
|
| 959 |
+
# special handle aten.to.dtype and aten.to.prim_dtype followed by inplace_mutation_op
|
| 960 |
+
# coz aten.to + inplace_mutation_op pattern would trigger
|
| 961 |
+
# "cannot mutate tensors with frozen storage" functionalization error.
|
| 962 |
+
# To work around the issue, we override the copy to be True, so that the output
|
| 963 |
+
# is for sure not an alias of input
|
| 964 |
+
if target == torch.ops.aten.to.dtype or target == torch.ops.aten.to.prim_dtype:
|
| 965 |
+
user_nodes = [use.user for use in node.output().uses()]
|
| 966 |
+
user_targets = [
|
| 967 |
+
get_op_overload(user_node)
|
| 968 |
+
for user_node in user_nodes
|
| 969 |
+
if user_node.schema() != "(no schema)"
|
| 970 |
+
]
|
| 971 |
+
has_mutable_target = any(
|
| 972 |
+
target._schema.is_mutable for target in user_targets
|
| 973 |
+
)
|
| 974 |
+
|
| 975 |
+
if has_mutable_target:
|
| 976 |
+
assert len(args) >= 4
|
| 977 |
+
new_args = list(args)
|
| 978 |
+
new_args[3] = True # copy, override to True
|
| 979 |
+
fx_node = self.fx_graph.call_function(
|
| 980 |
+
torch.ops.aten.to.dtype, tuple(new_args)
|
| 981 |
+
)
|
| 982 |
+
# temp hack to work around the issue https://github.com/pytorch/pytorch/issues/131679
|
| 983 |
+
# When this issue is fixed, the clone node would be no longer needed
|
| 984 |
+
clone_node = self.fx_graph.call_function(
|
| 985 |
+
torch.ops.aten.clone.default, (fx_node,)
|
| 986 |
+
)
|
| 987 |
+
output_name = node.output().debugName()
|
| 988 |
+
self.name_to_node[output_name] = clone_node
|
| 989 |
+
return
|
| 990 |
+
|
| 991 |
+
self.convert_call_function_op(node)
|
| 992 |
+
|
| 993 |
+
def convert_aten_add(self, node: torch._C.Node):
|
| 994 |
+
if node.schema() == "(no schema)":
|
| 995 |
+
if isinstance(node.inputsAt(0).type(), torch.ListType) and isinstance(
|
| 996 |
+
node.inputsAt(1).type(), torch.ListType
|
| 997 |
+
):
|
| 998 |
+
target = torch.ops.aten.add.t
|
| 999 |
+
else:
|
| 1000 |
+
raise RuntimeError(f"unable to determind the target for {node}")
|
| 1001 |
+
else:
|
| 1002 |
+
target = get_op_overload(node)
|
| 1003 |
+
|
| 1004 |
+
if target == torch.ops.aten.add.t:
|
| 1005 |
+
# special handle python list/tuple add: "aten::add.t(t[] a, t[] b) -> t[]" for
|
| 1006 |
+
# RuntimeError: aten::add() Expected a value of type 'List[t]' for argument 'a' but instead found type 'immutable_list'.
|
| 1007 |
+
args, kwargs = self.get_args_kwargs(node, target._schema)
|
| 1008 |
+
output_name = node.output().debugName()
|
| 1009 |
+
self.name_to_node[output_name] = self.fx_graph.call_function(list_add, args)
|
| 1010 |
+
else:
|
| 1011 |
+
self.convert_call_function_op(node)
|
| 1012 |
+
|
| 1013 |
+
def _check_prim_loop_support(self, node):
|
| 1014 |
+
inputs = list(node.inputs())
|
| 1015 |
+
|
| 1016 |
+
# TODO: (1/N) stage.
|
| 1017 |
+
if inputs[0].debugName() not in self.name_to_constant:
|
| 1018 |
+
raise RuntimeError(
|
| 1019 |
+
"prim::Loop currently cannot run with dynamic value of number of iterations."
|
| 1020 |
+
)
|
| 1021 |
+
|
| 1022 |
+
# Make sure the condition is not updated in the subblock.
|
| 1023 |
+
subblock = next(node.blocks())
|
| 1024 |
+
condition_output_name = next(subblock.outputs()).debugName()
|
| 1025 |
+
for node in subblock.nodes():
|
| 1026 |
+
if (
|
| 1027 |
+
node.outputsSize() == 1
|
| 1028 |
+
and node.output().debugName() == condition_output_name
|
| 1029 |
+
):
|
| 1030 |
+
raise RuntimeError(
|
| 1031 |
+
"prim::Loop currently cannot run with dynamic value of condition."
|
| 1032 |
+
)
|
| 1033 |
+
if node.outputsSize() >= 2:
|
| 1034 |
+
for outp in node.outputs():
|
| 1035 |
+
if outp.debugName() == condition_output_name:
|
| 1036 |
+
raise RuntimeError(
|
| 1037 |
+
"prim::Loop currently cannot run with dynamic value of condition."
|
| 1038 |
+
)
|
| 1039 |
+
|
| 1040 |
+
def convert_prim_Loop(self, node: torch._C.Node):
|
| 1041 |
+
inputs = list(node.inputs())
|
| 1042 |
+
self._check_prim_loop_support(node)
|
| 1043 |
+
|
| 1044 |
+
num_iterations = self.get_fx_value_by_ir_value(inputs[0])
|
| 1045 |
+
|
| 1046 |
+
# Find inputs.
|
| 1047 |
+
loop_local_arguments = [inp.debugName() for inp in inputs[2:]]
|
| 1048 |
+
|
| 1049 |
+
global_arguments = self._identify_inputs_as_arguments(node)
|
| 1050 |
+
|
| 1051 |
+
# Lift parameters as inputs.
|
| 1052 |
+
for block in node.blocks():
|
| 1053 |
+
global_arguments = global_arguments.union(
|
| 1054 |
+
self.blocks_to_lifted_attrs[block]
|
| 1055 |
+
)
|
| 1056 |
+
|
| 1057 |
+
global_arguments = list(global_arguments)
|
| 1058 |
+
|
| 1059 |
+
subgraph_nodes, subgraph_converters = self._convert_block_to_subgraph(
|
| 1060 |
+
node, global_arguments
|
| 1061 |
+
)
|
| 1062 |
+
|
| 1063 |
+
assert len(subgraph_nodes) == 1
|
| 1064 |
+
subgraph_converter = subgraph_converters[0]
|
| 1065 |
+
if not self.is_top_level_graph():
|
| 1066 |
+
self.name_update_from_subblock_to_parent = (
|
| 1067 |
+
self.name_update_from_subblock_to_parent.union(
|
| 1068 |
+
subgraph_converter.name_update_from_subblock_to_parent
|
| 1069 |
+
)
|
| 1070 |
+
)
|
| 1071 |
+
|
| 1072 |
+
fx_block_args = [
|
| 1073 |
+
self.get_fx_value_by_fqn(name)
|
| 1074 |
+
for name in loop_local_arguments + global_arguments
|
| 1075 |
+
]
|
| 1076 |
+
for iter_idx in range(num_iterations):
|
| 1077 |
+
loop_node = self.fx_graph.call_function(
|
| 1078 |
+
execute_subgraph_from_prim_loop,
|
| 1079 |
+
# Check execute_node function for the expected arguments order.
|
| 1080 |
+
(
|
| 1081 |
+
subgraph_nodes[0],
|
| 1082 |
+
iter_idx,
|
| 1083 |
+
len(loop_local_arguments),
|
| 1084 |
+
*fx_block_args,
|
| 1085 |
+
),
|
| 1086 |
+
{},
|
| 1087 |
+
)
|
| 1088 |
+
|
| 1089 |
+
# Update the value of loop local variables.
|
| 1090 |
+
if node.outputsSize() >= 1:
|
| 1091 |
+
for i, outp in enumerate(node.outputs()):
|
| 1092 |
+
output_name = outp.debugName()
|
| 1093 |
+
self.name_to_node[output_name] = self.fx_graph.call_function(
|
| 1094 |
+
operator.getitem,
|
| 1095 |
+
(
|
| 1096 |
+
loop_node,
|
| 1097 |
+
i + 1,
|
| 1098 |
+
), # + 1 because the 0th element is the condition.
|
| 1099 |
+
)
|
| 1100 |
+
fx_block_args[i] = self.name_to_node[output_name]
|
| 1101 |
+
|
| 1102 |
+
# Update the value of global variables, whose values are modified inplace.
|
| 1103 |
+
for i, name in enumerate(
|
| 1104 |
+
subgraph_converter.name_update_from_subblock_to_parent
|
| 1105 |
+
):
|
| 1106 |
+
self.name_to_node[name] = self.fx_graph.call_function(
|
| 1107 |
+
operator.getitem,
|
| 1108 |
+
(
|
| 1109 |
+
loop_node,
|
| 1110 |
+
i + node.outputsSize() + 1,
|
| 1111 |
+
), # + 1 because the 0th element is the condition.
|
| 1112 |
+
)
|
| 1113 |
+
global_argument_index = global_arguments.index(name)
|
| 1114 |
+
fx_block_args[
|
| 1115 |
+
i + node.outputsSize() + global_argument_index
|
| 1116 |
+
] = self.name_to_node[name]
|
| 1117 |
+
|
| 1118 |
+
def _check_set_attr_in_if_block(self, if_node: torch._C.Node):
|
| 1119 |
+
for block in if_node.blocks():
|
| 1120 |
+
for node in block.nodes():
|
| 1121 |
+
if node.kind() == "prim::SetAttr":
|
| 1122 |
+
raise RuntimeError(
|
| 1123 |
+
"During converting prim::If to torch.cond, found prim::SetAttr op"
|
| 1124 |
+
" which is not supported yet. Please file an issue if you come "
|
| 1125 |
+
"across this error."
|
| 1126 |
+
)
|
| 1127 |
+
|
| 1128 |
+
def convert_prim_If(self, node: torch._C.Node):
|
| 1129 |
+
self._check_set_attr_in_if_block(node)
|
| 1130 |
+
|
| 1131 |
+
inputs = list(node.inputs())
|
| 1132 |
+
assert len(inputs) == 1
|
| 1133 |
+
predicate = self.get_fx_value_by_ir_value(inputs[0])
|
| 1134 |
+
|
| 1135 |
+
# Find inputs.
|
| 1136 |
+
arguments = self._identify_inputs_as_arguments(node)
|
| 1137 |
+
|
| 1138 |
+
# Lift parameters as inputs.
|
| 1139 |
+
for block in node.blocks():
|
| 1140 |
+
arguments = arguments.union(self.blocks_to_lifted_attrs[block])
|
| 1141 |
+
|
| 1142 |
+
arguments = list(arguments)
|
| 1143 |
+
subgraph_nodes, _ = self._convert_block_to_subgraph(node, arguments)
|
| 1144 |
+
|
| 1145 |
+
assert len(subgraph_nodes) == 2
|
| 1146 |
+
|
| 1147 |
+
fx_block_args = [self.get_fx_value_by_fqn(name) for name in arguments]
|
| 1148 |
+
|
| 1149 |
+
args = (
|
| 1150 |
+
predicate,
|
| 1151 |
+
subgraph_nodes[0],
|
| 1152 |
+
subgraph_nodes[1],
|
| 1153 |
+
tuple(fx_block_args),
|
| 1154 |
+
)
|
| 1155 |
+
|
| 1156 |
+
cond_node = self.fx_graph.call_function(torch.cond, args, {})
|
| 1157 |
+
|
| 1158 |
+
# prim::If can also have zero output.
|
| 1159 |
+
if node.outputsSize() == 1:
|
| 1160 |
+
output_name = node.output().debugName()
|
| 1161 |
+
self.name_to_node[output_name] = cond_node
|
| 1162 |
+
elif node.outputsSize() > 1:
|
| 1163 |
+
for i, output in enumerate(node.outputs()):
|
| 1164 |
+
output_name = output.debugName()
|
| 1165 |
+
getitem = self.fx_graph.call_function(operator.getitem, (cond_node, i))
|
| 1166 |
+
self.name_to_node[output_name] = getitem
|
| 1167 |
+
|
| 1168 |
+
def convert_aten_Bool(self, node: torch._C.Node):
|
| 1169 |
+
self._convert_as_noop(node)
|
| 1170 |
+
|
| 1171 |
+
def convert_prim_Enter(self, node: torch._C.Node):
|
| 1172 |
+
# export generally treats prim::Enter as noop
|
| 1173 |
+
# The only context manager export supports is aten::enable_grad.
|
| 1174 |
+
# Unfortunately, TorchScript does not support aten::enable_grad yet.
|
| 1175 |
+
# TODO: support aten::enable_grad in both TorchScript and Converter.
|
| 1176 |
+
return
|
| 1177 |
+
|
| 1178 |
+
def convert_prim_Exit(self, node: torch._C.Node):
|
| 1179 |
+
# export treats prim::Exit as noop
|
| 1180 |
+
return
|
| 1181 |
+
|
| 1182 |
+
def _convert_as_noop(self, node: torch._C.Node):
|
| 1183 |
+
# Converts the node as a no-op by mapping its output node as arg[0]
|
| 1184 |
+
|
| 1185 |
+
target = get_op_overload(node)
|
| 1186 |
+
schema = target._schema
|
| 1187 |
+
|
| 1188 |
+
args, kwargs = self.get_args_kwargs(node, schema)
|
| 1189 |
+
|
| 1190 |
+
output_name = node.output().debugName()
|
| 1191 |
+
self.name_to_node[output_name] = args[0]
|
| 1192 |
+
|
| 1193 |
+
def convert_profiler__record_function_exit(self, node: torch._C.Node):
|
| 1194 |
+
# _record_function_exit has side effect so we keep it in fx.graph
|
| 1195 |
+
# currently, _record_function_enter_new and _record_function_exit are
|
| 1196 |
+
# discarded during `retrace_as_exported_program`.
|
| 1197 |
+
target = torch.ops.profiler._record_function_exit
|
| 1198 |
+
args = tuple(self.get_fx_value_by_ir_value(input) for input in node.inputs())
|
| 1199 |
+
self.fx_graph.call_function(target, args)
|
| 1200 |
+
|
| 1201 |
+
def convert_prim_tolist(self, node: torch._C.Node):
|
| 1202 |
+
# prim::tolist cannot be supported by `_convert_standard_operators`
|
| 1203 |
+
# since it requires call_method instead of call_function.
|
| 1204 |
+
target = "tolist"
|
| 1205 |
+
args = (self.get_fx_value_by_ir_value(next(node.inputs())),)
|
| 1206 |
+
fx_node = self.fx_graph.call_method(target, args)
|
| 1207 |
+
output_name = node.output().debugName()
|
| 1208 |
+
self.name_to_node[output_name] = fx_node
|
| 1209 |
+
|
| 1210 |
+
def convert_prim_Uninitialized(self, node: torch._C.Node):
|
| 1211 |
+
# `prim::Uninitialized` is inserted by the compiler when it can prove
|
| 1212 |
+
# the value will never be used. It can be introduced by exceptions,
|
| 1213 |
+
# breaks, continues, and returns.
|
| 1214 |
+
# So we add a dummy constant to the graph.
|
| 1215 |
+
output_name = node.output().debugName()
|
| 1216 |
+
self.name_to_constant[output_name] = torch.Tensor()
|
| 1217 |
+
|
| 1218 |
+
def _convert_standard_operators(self, node: torch._C.Node):
|
| 1219 |
+
target = kind_to_standard_operators[node.kind()]
|
| 1220 |
+
args = tuple(self.get_fx_value_by_ir_value(input) for input in node.inputs())
|
| 1221 |
+
fx_node = self.fx_graph.call_function(target, args)
|
| 1222 |
+
output_name = node.output().debugName()
|
| 1223 |
+
self.name_to_node[output_name] = fx_node
|
| 1224 |
+
|
| 1225 |
+
def convert_node(self, node: torch._C.Node):
|
| 1226 |
+
node_kind = node.kind()
|
| 1227 |
+
|
| 1228 |
+
# Get handler based on namespace and operator name.
|
| 1229 |
+
# Provide a default node handler as well in case we don't find
|
| 1230 |
+
# matching converter for that.
|
| 1231 |
+
handler_func_name = ir_name_to_func_name(node_kind)
|
| 1232 |
+
handler_func = getattr(self, handler_func_name, self.convert_call_function_op)
|
| 1233 |
+
|
| 1234 |
+
# str calls print function implemented in CPP. To avoid repeating
|
| 1235 |
+
# the entire logic here, we simply keep first line from node string (getting rid
|
| 1236 |
+
# of sub-blocks IR prints).
|
| 1237 |
+
node_str = "".join(str(node).split("\n")[:1])
|
| 1238 |
+
log.debug("[%s] converts [%s]", handler_func.__name__, node_str)
|
| 1239 |
+
try:
|
| 1240 |
+
handler_func(node)
|
| 1241 |
+
except Exception as e:
|
| 1242 |
+
raise RuntimeError(f"TS2EPConverter failed for node {node_kind}") from e
|
| 1243 |
+
|
| 1244 |
+
def convert_graph_outputs(self):
|
| 1245 |
+
args = []
|
| 1246 |
+
outp_name_list = [outp.debugName() for outp in self.ts_graph.outputs()] + list(
|
| 1247 |
+
self.name_update_from_subblock_to_parent
|
| 1248 |
+
)
|
| 1249 |
+
for output_name in outp_name_list:
|
| 1250 |
+
if output_name in self.name_to_node:
|
| 1251 |
+
fx_node = self.name_to_node[output_name]
|
| 1252 |
+
# TODO: Revisit this later after HigherOrderOp design changes.
|
| 1253 |
+
# Currently, we cannot directly return input as output.
|
| 1254 |
+
if (
|
| 1255 |
+
not self.is_top_level_graph()
|
| 1256 |
+
and isinstance(fx_node, torch.fx.Node)
|
| 1257 |
+
and fx_node.op == "placeholder"
|
| 1258 |
+
):
|
| 1259 |
+
fx_node = self.fx_graph.call_function(torch.clone, (fx_node,))
|
| 1260 |
+
args.append(fx_node)
|
| 1261 |
+
self.output_specs.append(
|
| 1262 |
+
OutputSpec(
|
| 1263 |
+
OutputKind.USER_OUTPUT,
|
| 1264 |
+
arg=TensorArgument(name=output_name),
|
| 1265 |
+
target=output_name,
|
| 1266 |
+
)
|
| 1267 |
+
)
|
| 1268 |
+
elif output_name in self.name_to_constant:
|
| 1269 |
+
args.append(self.name_to_constant[output_name])
|
| 1270 |
+
self.output_specs.append(
|
| 1271 |
+
OutputSpec(
|
| 1272 |
+
OutputKind.USER_OUTPUT,
|
| 1273 |
+
arg=ConstantArgument(
|
| 1274 |
+
name=output_name, value=self.name_to_constant[output_name]
|
| 1275 |
+
),
|
| 1276 |
+
target=output_name,
|
| 1277 |
+
)
|
| 1278 |
+
)
|
| 1279 |
+
else:
|
| 1280 |
+
raise ValueError(f"Output {output_name} not found")
|
| 1281 |
+
|
| 1282 |
+
if len(args) == 0:
|
| 1283 |
+
# Sub-block of prim::If can have zero output.
|
| 1284 |
+
self.fx_graph.output([])
|
| 1285 |
+
elif len(args) == 1:
|
| 1286 |
+
self.fx_graph.output(
|
| 1287 |
+
args[0]
|
| 1288 |
+
) # Get rid of an extra list wrapped around final output.
|
| 1289 |
+
elif len(args) > 1:
|
| 1290 |
+
self.fx_graph.output(
|
| 1291 |
+
args
|
| 1292 |
+
) # For prim::Loop and prim::If with multiple outputs.
|
| 1293 |
+
else:
|
| 1294 |
+
# Sub-block of prim::Loop can have multiple outputs.
|
| 1295 |
+
self.fx_graph.output(args)
|
| 1296 |
+
|
| 1297 |
+
|
| 1298 |
+
class ExplainTS2FXGraphConverter(TS2FXGraphConverter):
|
| 1299 |
+
"""
|
| 1300 |
+
Run TS2FXGraphConverter in an explain mode. It collects all failed operators conversions
|
| 1301 |
+
and provide that information to users. In order to collect all failed conversions, it
|
| 1302 |
+
also mocks some internal attributes (e.g., name_to_node).
|
| 1303 |
+
"""
|
| 1304 |
+
|
| 1305 |
+
class _DictMock(dict):
|
| 1306 |
+
def __init__(self, dict_data, mock_value):
|
| 1307 |
+
super().__init__(dict_data)
|
| 1308 |
+
self.mock_value = mock_value
|
| 1309 |
+
|
| 1310 |
+
def __getitem__(self, key):
|
| 1311 |
+
# If the original dictionary has the key, return its value.
|
| 1312 |
+
# Otherwise, return the mock value.
|
| 1313 |
+
if not super().__contains__(key):
|
| 1314 |
+
return self.mock_value
|
| 1315 |
+
return super().__getitem__(key)
|
| 1316 |
+
|
| 1317 |
+
def __contains__(self, key):
|
| 1318 |
+
return True
|
| 1319 |
+
|
| 1320 |
+
def __init__(
|
| 1321 |
+
self,
|
| 1322 |
+
ts_graph: Union[torch._C.Graph, torch._C.Block],
|
| 1323 |
+
name_to_param: Dict[str, torch.Tensor],
|
| 1324 |
+
name_to_buffer: Dict[str, torch.Tensor],
|
| 1325 |
+
blocks_to_lifted_attrs: Dict[torch._C.Block, Set[str]],
|
| 1326 |
+
name_to_non_tensor_attribute: Dict[str, Any],
|
| 1327 |
+
name_to_constant: Dict[str, Any],
|
| 1328 |
+
):
|
| 1329 |
+
super().__init__(
|
| 1330 |
+
ts_graph,
|
| 1331 |
+
name_to_param,
|
| 1332 |
+
name_to_buffer,
|
| 1333 |
+
blocks_to_lifted_attrs,
|
| 1334 |
+
name_to_non_tensor_attribute,
|
| 1335 |
+
name_to_constant,
|
| 1336 |
+
)
|
| 1337 |
+
|
| 1338 |
+
# Data to keep track of unsupported nodes.
|
| 1339 |
+
self.unsupported_node_list: List[torch._C.Node] = []
|
| 1340 |
+
|
| 1341 |
+
# Add mock to needed attributes.
|
| 1342 |
+
self.name_to_node = ExplainTS2FXGraphConverter._DictMock(
|
| 1343 |
+
self.name_to_node,
|
| 1344 |
+
# Dummy node.
|
| 1345 |
+
torch.fx.Node(
|
| 1346 |
+
None, # type: ignore[arg-type]
|
| 1347 |
+
"mock",
|
| 1348 |
+
"call_function",
|
| 1349 |
+
lambda: None,
|
| 1350 |
+
(),
|
| 1351 |
+
{},
|
| 1352 |
+
),
|
| 1353 |
+
)
|
| 1354 |
+
|
| 1355 |
+
def explain(self):
|
| 1356 |
+
self.convert_graph_inputs()
|
| 1357 |
+
for node in self.ts_graph.nodes():
|
| 1358 |
+
self.convert_node(node)
|
| 1359 |
+
self.convert_graph_outputs()
|
| 1360 |
+
|
| 1361 |
+
def convert_node(self, node):
|
| 1362 |
+
try:
|
| 1363 |
+
super().convert_node(node)
|
| 1364 |
+
except Exception as e:
|
| 1365 |
+
self.unsupported_node_list.append(node)
|
| 1366 |
+
|
| 1367 |
+
|
| 1368 |
+
@contextmanager
|
| 1369 |
+
def disable_logging(log):
|
| 1370 |
+
disabled = log.disabled
|
| 1371 |
+
log.disabled = True
|
| 1372 |
+
try:
|
| 1373 |
+
yield
|
| 1374 |
+
finally:
|
| 1375 |
+
log.disabled = disabled
|
| 1376 |
+
|
| 1377 |
+
|
| 1378 |
+
class TS2EPConverter:
|
| 1379 |
+
# TorchScript model to ExportedProgram converter
|
| 1380 |
+
def __init__(
|
| 1381 |
+
self,
|
| 1382 |
+
ts_model: Union[torch.jit.ScriptModule, torch.jit.ScriptFunction],
|
| 1383 |
+
sample_args: Tuple[Any, ...],
|
| 1384 |
+
sample_kwargs: Optional[Dict[str, Any]] = None,
|
| 1385 |
+
):
|
| 1386 |
+
self.ts_model = ts_model
|
| 1387 |
+
self.ts_graph, self.params, _, _ = _create_jit_graph(ts_model, sample_args)
|
| 1388 |
+
|
| 1389 |
+
self.sample_args = sample_args
|
| 1390 |
+
self.sample_kwargs = sample_kwargs
|
| 1391 |
+
|
| 1392 |
+
self.name_to_param: Dict[str, torch.Tensor] = {}
|
| 1393 |
+
self.name_to_buffer: Dict[str, torch.Tensor] = {}
|
| 1394 |
+
param_list = (
|
| 1395 |
+
list(self.ts_model.parameters())
|
| 1396 |
+
if not isinstance(self.ts_model, torch._C.ScriptFunction)
|
| 1397 |
+
else []
|
| 1398 |
+
)
|
| 1399 |
+
if not isinstance(self.ts_model, torch._C.ScriptFunction):
|
| 1400 |
+
for k, tensor in self.ts_model.state_dict().items(): # type: ignore[union-attr]
|
| 1401 |
+
# Check if tensor belongs to any parameter.
|
| 1402 |
+
if any(
|
| 1403 |
+
(tensor == param).all()
|
| 1404 |
+
for param in param_list
|
| 1405 |
+
if tensor.shape == param.shape
|
| 1406 |
+
):
|
| 1407 |
+
self.name_to_param[k] = tensor
|
| 1408 |
+
else:
|
| 1409 |
+
self.name_to_buffer[k] = tensor
|
| 1410 |
+
|
| 1411 |
+
self.name_to_non_tensor_attributes: Dict[str, Any] = {}
|
| 1412 |
+
self.name_to_constant: Dict[str, Any] = {}
|
| 1413 |
+
|
| 1414 |
+
self.lift_get_attr()
|
| 1415 |
+
|
| 1416 |
+
def convert(self) -> ExportedProgram:
|
| 1417 |
+
log.info(
|
| 1418 |
+
"""
|
| 1419 |
+
TS2EPConverter logging starts from here.
|
| 1420 |
+
|
| 1421 |
+
INFO: (TORCH_LOGS="export" <cmd>)
|
| 1422 |
+
* Log TorchScript IR.
|
| 1423 |
+
|
| 1424 |
+
DEBUG: (TORCH_LOGS="+export" <cmd>), additionally
|
| 1425 |
+
* Log conversion IR by IR in a format of [<conversion handler name>] converts [<IR>].
|
| 1426 |
+
"""
|
| 1427 |
+
)
|
| 1428 |
+
log.info("TorchScript graph\n\n%s\n", self.ts_graph)
|
| 1429 |
+
|
| 1430 |
+
blocks_to_lifted_attrs = get_block_to_lifted_attrs(self.ts_graph)
|
| 1431 |
+
|
| 1432 |
+
graph_converter = TS2FXGraphConverter(
|
| 1433 |
+
self.ts_graph,
|
| 1434 |
+
self.name_to_param,
|
| 1435 |
+
self.name_to_buffer,
|
| 1436 |
+
blocks_to_lifted_attrs,
|
| 1437 |
+
self.name_to_non_tensor_attributes,
|
| 1438 |
+
self.name_to_constant,
|
| 1439 |
+
)
|
| 1440 |
+
gm = graph_converter.convert()
|
| 1441 |
+
|
| 1442 |
+
# Post-proccessing step to deal with quantized operators.
|
| 1443 |
+
replace_quantized_ops_with_standard_ops(gm)
|
| 1444 |
+
log.info("GraphModule: %s", gm.print_readable(print_output=False))
|
| 1445 |
+
|
| 1446 |
+
ep = self.retrace_as_exported_program(
|
| 1447 |
+
gm,
|
| 1448 |
+
graph_converter.name_to_constant,
|
| 1449 |
+
)
|
| 1450 |
+
log.info("%s", ep)
|
| 1451 |
+
|
| 1452 |
+
# Post-processing step to ensure ExportedProgram has the same state_dict as
|
| 1453 |
+
# the original TorchScript model. Throw warnings for additionally populated
|
| 1454 |
+
# state_dict entries.
|
| 1455 |
+
if not isinstance(self.ts_model, torch._C.ScriptFunction):
|
| 1456 |
+
for k, tensor in self.ts_model.state_dict().items(): # type: ignore[union-attr]
|
| 1457 |
+
if k not in ep.state_dict:
|
| 1458 |
+
warnings.warn(
|
| 1459 |
+
f"Manually populate {k} into state_dict ExportedProgram, but it is never used by the ExportedProgram."
|
| 1460 |
+
)
|
| 1461 |
+
ep.state_dict[k] = tensor
|
| 1462 |
+
|
| 1463 |
+
return ep
|
| 1464 |
+
|
| 1465 |
+
@disable_logging(log)
|
| 1466 |
+
def explain(self, print_output=True):
|
| 1467 |
+
blocks_to_lifted_attrs = get_block_to_lifted_attrs(self.ts_graph)
|
| 1468 |
+
|
| 1469 |
+
graph_converter = ExplainTS2FXGraphConverter(
|
| 1470 |
+
self.ts_graph,
|
| 1471 |
+
self.name_to_param,
|
| 1472 |
+
self.name_to_buffer,
|
| 1473 |
+
blocks_to_lifted_attrs,
|
| 1474 |
+
self.name_to_non_tensor_attributes,
|
| 1475 |
+
self.name_to_constant,
|
| 1476 |
+
)
|
| 1477 |
+
graph_converter.explain()
|
| 1478 |
+
if len(graph_converter.unsupported_node_list) > 0:
|
| 1479 |
+
explain_str = "Unsupported nodes are found in the following list:"
|
| 1480 |
+
for i, n in enumerate(graph_converter.unsupported_node_list):
|
| 1481 |
+
node_str = "".join(str(n).split("\n")[:1])
|
| 1482 |
+
explain_str += f"\n\n {i}. {n.kind()} [{node_str}]"
|
| 1483 |
+
else:
|
| 1484 |
+
explain_str = "Success!"
|
| 1485 |
+
if print_output:
|
| 1486 |
+
print(explain_str)
|
| 1487 |
+
return explain_str
|
| 1488 |
+
|
| 1489 |
+
def retrace_as_exported_program(
|
| 1490 |
+
self,
|
| 1491 |
+
gm: torch.fx.GraphModule,
|
| 1492 |
+
name_to_constant: Dict[str, Any],
|
| 1493 |
+
):
|
| 1494 |
+
# TODO: adjust input orders to match GraphSignature convention
|
| 1495 |
+
ep = torch.export._trace._export(
|
| 1496 |
+
gm,
|
| 1497 |
+
self.sample_args,
|
| 1498 |
+
strict=False,
|
| 1499 |
+
pre_dispatch=True,
|
| 1500 |
+
)
|
| 1501 |
+
|
| 1502 |
+
# Post-processing to make sure the ExportedProgram states are correct.
|
| 1503 |
+
# Because during conversion, we set tensor constants as GetAttr,
|
| 1504 |
+
# retracing cannot recognize them as tensor constants but instead
|
| 1505 |
+
# treat them as buffers. We need to set them again here.
|
| 1506 |
+
ep._constants.update(
|
| 1507 |
+
{
|
| 1508 |
+
k: v
|
| 1509 |
+
for k, v in name_to_constant.items()
|
| 1510 |
+
if isinstance(v, (torch.Tensor, torch.ScriptObject))
|
| 1511 |
+
}
|
| 1512 |
+
)
|
| 1513 |
+
for k in name_to_constant:
|
| 1514 |
+
ep.state_dict.pop(k, None)
|
| 1515 |
+
|
| 1516 |
+
for i, spec in enumerate(ep.graph_signature.input_specs):
|
| 1517 |
+
# Mark as constant tensors for erroneously traced buffers.
|
| 1518 |
+
if spec.kind == InputKind.BUFFER and spec.target in name_to_constant:
|
| 1519 |
+
assert isinstance(
|
| 1520 |
+
name_to_constant[spec.target], torch.Tensor
|
| 1521 |
+
), f"{type(name_to_constant[spec.target])} has been erroneously marked as buffer"
|
| 1522 |
+
spec.kind = InputKind.CONSTANT_TENSOR
|
| 1523 |
+
ep.verifier().check(ep)
|
| 1524 |
+
|
| 1525 |
+
return ep
|
| 1526 |
+
|
| 1527 |
+
def lift_get_attr(self):
|
| 1528 |
+
# This function lifts multiple data types.
|
| 1529 |
+
|
| 1530 |
+
# 1. Tensor constants attributes (e.g., self.data = torch.tensor([2,3]))
|
| 1531 |
+
# to buffers. Currently, when there are tensor constants, export
|
| 1532 |
+
# would error and ask users to register tensor constants as buffers.
|
| 1533 |
+
# Since it is hard to manually do so for TorchScript models
|
| 1534 |
+
# (e.g., source code is missing), this function automatically
|
| 1535 |
+
# lifts tensor constants to be buffers.
|
| 1536 |
+
|
| 1537 |
+
# 2. ScriptObbject to constant. It will then be converted to getattr in
|
| 1538 |
+
# in the fx graph.
|
| 1539 |
+
#
|
| 1540 |
+
# This function should happen in TS2EPConverter instead of
|
| 1541 |
+
# TS2FXGraphConverter since it gets attributes from self.ts_model
|
| 1542 |
+
# which is not accessable in TS2FXGraphConverter. It is similar to where
|
| 1543 |
+
# we collect self.name_to_param and self.name_to_buffer.
|
| 1544 |
+
name_to_attribute_fqn: Dict[str, str] = {}
|
| 1545 |
+
|
| 1546 |
+
def get_attr(fqn: str):
|
| 1547 |
+
name = fqn.split(".")
|
| 1548 |
+
v = self.ts_model
|
| 1549 |
+
for n in name:
|
| 1550 |
+
v = getattr(v, n)
|
| 1551 |
+
return v
|
| 1552 |
+
|
| 1553 |
+
def get_fqn(node: torch._C.Node):
|
| 1554 |
+
attr_name = node.s("name")
|
| 1555 |
+
input_name = node.input().debugName()
|
| 1556 |
+
root_attr_name = name_to_attribute_fqn[input_name]
|
| 1557 |
+
attr_fqn = f"{root_attr_name}.{attr_name}" if root_attr_name else attr_name
|
| 1558 |
+
return attr_fqn
|
| 1559 |
+
|
| 1560 |
+
def _dfs_get_attr(block):
|
| 1561 |
+
for node in block.nodes():
|
| 1562 |
+
if node.kind() == "prim::CreateObject":
|
| 1563 |
+
output_name = node.output().debugName()
|
| 1564 |
+
name_to_attribute_fqn[output_name] = ""
|
| 1565 |
+
|
| 1566 |
+
if node.kind() == "prim::GetAttr":
|
| 1567 |
+
attr_fqn = get_fqn(node)
|
| 1568 |
+
value = get_attr(attr_fqn)
|
| 1569 |
+
output_name = node.output().debugName()
|
| 1570 |
+
name_to_attribute_fqn[output_name] = attr_fqn
|
| 1571 |
+
if isinstance(value, torch.Tensor):
|
| 1572 |
+
if attr_fqn not in self.name_to_buffer:
|
| 1573 |
+
# Lift tensor constants to be a buffer
|
| 1574 |
+
self.name_to_buffer[attr_fqn] = value
|
| 1575 |
+
elif isinstance(value, torch.ScriptObject):
|
| 1576 |
+
if attr_fqn not in self.name_to_constant:
|
| 1577 |
+
self.name_to_constant[attr_fqn] = value
|
| 1578 |
+
else:
|
| 1579 |
+
self.name_to_non_tensor_attributes[attr_fqn] = value
|
| 1580 |
+
|
| 1581 |
+
for subblock in node.blocks():
|
| 1582 |
+
_dfs_get_attr(subblock)
|
| 1583 |
+
|
| 1584 |
+
_dfs_get_attr(self.ts_graph)
|
janus/lib/python3.10/site-packages/torch/_export/db/__init__.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
janus/lib/python3.10/site-packages/torch/_export/db/case.py
ADDED
|
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import inspect
|
| 3 |
+
import re
|
| 4 |
+
import string
|
| 5 |
+
from dataclasses import dataclass, field
|
| 6 |
+
from enum import Enum
|
| 7 |
+
from typing import Any, Dict, List, Optional, Set, Tuple
|
| 8 |
+
from types import ModuleType
|
| 9 |
+
|
| 10 |
+
import torch
|
| 11 |
+
|
| 12 |
+
_TAGS: Dict[str, Dict[str, Any]] = {
|
| 13 |
+
"torch": {
|
| 14 |
+
"cond": {},
|
| 15 |
+
"dynamic-shape": {},
|
| 16 |
+
"escape-hatch": {},
|
| 17 |
+
"map": {},
|
| 18 |
+
"dynamic-value": {},
|
| 19 |
+
"operator": {},
|
| 20 |
+
"mutation": {},
|
| 21 |
+
},
|
| 22 |
+
"python": {
|
| 23 |
+
"assert": {},
|
| 24 |
+
"builtin": {},
|
| 25 |
+
"closure": {},
|
| 26 |
+
"context-manager": {},
|
| 27 |
+
"control-flow": {},
|
| 28 |
+
"data-structure": {},
|
| 29 |
+
"standard-library": {},
|
| 30 |
+
"object-model": {},
|
| 31 |
+
},
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class SupportLevel(Enum):
|
| 36 |
+
"""
|
| 37 |
+
Indicates at what stage the feature
|
| 38 |
+
used in the example is handled in export.
|
| 39 |
+
"""
|
| 40 |
+
|
| 41 |
+
SUPPORTED = 1
|
| 42 |
+
NOT_SUPPORTED_YET = 0
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
ArgsType = Tuple[Any, ...]
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def check_inputs_type(args, kwargs):
|
| 49 |
+
if not isinstance(args, tuple):
|
| 50 |
+
raise ValueError(
|
| 51 |
+
f"Expecting args type to be a tuple, got: {type(args)}"
|
| 52 |
+
)
|
| 53 |
+
if not isinstance(kwargs, dict):
|
| 54 |
+
raise ValueError(
|
| 55 |
+
f"Expecting kwargs type to be a dict, got: {type(kwargs)}"
|
| 56 |
+
)
|
| 57 |
+
for key in kwargs:
|
| 58 |
+
if not isinstance(key, str):
|
| 59 |
+
raise ValueError(
|
| 60 |
+
f"Expecting kwargs keys to be a string, got: {type(key)}"
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
def _validate_tag(tag: str):
|
| 64 |
+
parts = tag.split(".")
|
| 65 |
+
t = _TAGS
|
| 66 |
+
for part in parts:
|
| 67 |
+
assert set(part) <= set(
|
| 68 |
+
string.ascii_lowercase + "-"
|
| 69 |
+
), f"Tag contains invalid characters: {part}"
|
| 70 |
+
if part in t:
|
| 71 |
+
t = t[part]
|
| 72 |
+
else:
|
| 73 |
+
raise ValueError(f"Tag {tag} is not found in registered tags.")
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
@dataclass(frozen=True)
|
| 77 |
+
class ExportCase:
|
| 78 |
+
example_args: ArgsType
|
| 79 |
+
description: str # A description of the use case.
|
| 80 |
+
model: torch.nn.Module
|
| 81 |
+
name: str
|
| 82 |
+
example_kwargs: Dict[str, Any] = field(default_factory=dict)
|
| 83 |
+
extra_args: Optional[ArgsType] = None # For testing graph generalization.
|
| 84 |
+
# Tags associated with the use case. (e.g dynamic-shape, escape-hatch)
|
| 85 |
+
tags: Set[str] = field(default_factory=set)
|
| 86 |
+
support_level: SupportLevel = SupportLevel.SUPPORTED
|
| 87 |
+
dynamic_shapes: Optional[Dict[str, Any]] = None
|
| 88 |
+
|
| 89 |
+
def __post_init__(self):
|
| 90 |
+
check_inputs_type(self.example_args, self.example_kwargs)
|
| 91 |
+
if self.extra_args is not None:
|
| 92 |
+
check_inputs_type(self.extra_args, {})
|
| 93 |
+
|
| 94 |
+
for tag in self.tags:
|
| 95 |
+
_validate_tag(tag)
|
| 96 |
+
|
| 97 |
+
if not isinstance(self.description, str) or len(self.description) == 0:
|
| 98 |
+
raise ValueError(f'Invalid description: "{self.description}"')
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
_EXAMPLE_CASES: Dict[str, ExportCase] = {}
|
| 102 |
+
_MODULES: Set[ModuleType] = set()
|
| 103 |
+
_EXAMPLE_CONFLICT_CASES: Dict[str, List[ExportCase]] = {}
|
| 104 |
+
_EXAMPLE_REWRITE_CASES: Dict[str, List[ExportCase]] = {}
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def register_db_case(case: ExportCase) -> None:
|
| 108 |
+
"""
|
| 109 |
+
Registers a user provided ExportCase into example bank.
|
| 110 |
+
"""
|
| 111 |
+
if case.name in _EXAMPLE_CASES:
|
| 112 |
+
if case.name not in _EXAMPLE_CONFLICT_CASES:
|
| 113 |
+
_EXAMPLE_CONFLICT_CASES[case.name] = [_EXAMPLE_CASES[case.name]]
|
| 114 |
+
_EXAMPLE_CONFLICT_CASES[case.name].append(case)
|
| 115 |
+
return
|
| 116 |
+
|
| 117 |
+
_EXAMPLE_CASES[case.name] = case
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def to_snake_case(name):
|
| 121 |
+
name = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
|
| 122 |
+
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", name).lower()
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def _make_export_case(m, name, configs):
|
| 126 |
+
if not isinstance(m, torch.nn.Module):
|
| 127 |
+
raise TypeError("Export case class should be a torch.nn.Module.")
|
| 128 |
+
|
| 129 |
+
if "description" not in configs:
|
| 130 |
+
# Fallback to docstring if description is missing.
|
| 131 |
+
assert (
|
| 132 |
+
m.__doc__ is not None
|
| 133 |
+
), f"Could not find description or docstring for export case: {m}"
|
| 134 |
+
configs = {**configs, "description": m.__doc__}
|
| 135 |
+
return ExportCase(**{**configs, "model": m, "name": name})
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def export_case(**kwargs):
|
| 139 |
+
"""
|
| 140 |
+
Decorator for registering a user provided case into example bank.
|
| 141 |
+
"""
|
| 142 |
+
|
| 143 |
+
def wrapper(m):
|
| 144 |
+
configs = kwargs
|
| 145 |
+
module = inspect.getmodule(m)
|
| 146 |
+
if module in _MODULES:
|
| 147 |
+
raise RuntimeError("export_case should only be used once per example file.")
|
| 148 |
+
|
| 149 |
+
assert module is not None
|
| 150 |
+
_MODULES.add(module)
|
| 151 |
+
module_name = module.__name__.split(".")[-1]
|
| 152 |
+
case = _make_export_case(m, module_name, configs)
|
| 153 |
+
register_db_case(case)
|
| 154 |
+
return case
|
| 155 |
+
|
| 156 |
+
return wrapper
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
def export_rewrite_case(**kwargs):
|
| 160 |
+
def wrapper(m):
|
| 161 |
+
configs = kwargs
|
| 162 |
+
|
| 163 |
+
parent = configs.pop("parent")
|
| 164 |
+
assert isinstance(parent, ExportCase)
|
| 165 |
+
key = parent.name
|
| 166 |
+
if key not in _EXAMPLE_REWRITE_CASES:
|
| 167 |
+
_EXAMPLE_REWRITE_CASES[key] = []
|
| 168 |
+
|
| 169 |
+
configs["example_args"] = parent.example_args
|
| 170 |
+
case = _make_export_case(m, to_snake_case(m.__name__), configs)
|
| 171 |
+
_EXAMPLE_REWRITE_CASES[key].append(case)
|
| 172 |
+
return case
|
| 173 |
+
|
| 174 |
+
return wrapper
|
janus/lib/python3.10/site-packages/torch/_export/db/examples/class_method.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
class ClassMethod(torch.nn.Module):
|
| 5 |
+
"""
|
| 6 |
+
Class methods are inlined during tracing.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
@classmethod
|
| 10 |
+
def method(cls, x):
|
| 11 |
+
return x + 1
|
| 12 |
+
|
| 13 |
+
def __init__(self) -> None:
|
| 14 |
+
super().__init__()
|
| 15 |
+
self.linear = torch.nn.Linear(4, 2)
|
| 16 |
+
|
| 17 |
+
def forward(self, x):
|
| 18 |
+
x = self.linear(x)
|
| 19 |
+
return self.method(x) * self.__class__.method(x) * type(self).method(x)
|
| 20 |
+
|
| 21 |
+
example_args = (torch.randn(3, 4),)
|
| 22 |
+
model = ClassMethod()
|
janus/lib/python3.10/site-packages/torch/_export/db/examples/dictionary.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
class Dictionary(torch.nn.Module):
|
| 5 |
+
"""
|
| 6 |
+
Dictionary structures are inlined and flattened along tracing.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
def forward(self, x, y):
|
| 10 |
+
elements = {}
|
| 11 |
+
elements["x2"] = x * x
|
| 12 |
+
y = y * elements["x2"]
|
| 13 |
+
return {"y": y}
|
| 14 |
+
|
| 15 |
+
example_args = (torch.randn(3, 2), torch.tensor(4))
|
| 16 |
+
tags = {"python.data-structure"}
|
| 17 |
+
model = Dictionary()
|
janus/lib/python3.10/site-packages/torch/_export/db/examples/model_attr_mutation.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import torch
|
| 3 |
+
from torch._export.db.case import SupportLevel
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class ModelAttrMutation(torch.nn.Module):
|
| 7 |
+
"""
|
| 8 |
+
Attribute mutation is not supported.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
def __init__(self) -> None:
|
| 12 |
+
super().__init__()
|
| 13 |
+
self.attr_list = [torch.randn(3, 2), torch.randn(3, 2)]
|
| 14 |
+
|
| 15 |
+
def recreate_list(self):
|
| 16 |
+
return [torch.zeros(3, 2), torch.zeros(3, 2)]
|
| 17 |
+
|
| 18 |
+
def forward(self, x):
|
| 19 |
+
self.attr_list = self.recreate_list()
|
| 20 |
+
return x.sum() + self.attr_list[0].sum()
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
example_args = (torch.randn(3, 2),)
|
| 24 |
+
tags = {"python.object-model"}
|
| 25 |
+
support_level = SupportLevel.NOT_SUPPORTED_YET
|
| 26 |
+
model = ModelAttrMutation()
|
janus/lib/python3.10/site-packages/torch/_export/db/logging.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def exportdb_error_message(case_name: str):
|
| 5 |
+
from .examples import all_examples
|
| 6 |
+
from torch._utils_internal import log_export_usage
|
| 7 |
+
|
| 8 |
+
ALL_EXAMPLES = all_examples()
|
| 9 |
+
# Detect whether case_name is really registered in exportdb.
|
| 10 |
+
if case_name in ALL_EXAMPLES:
|
| 11 |
+
url_case_name = case_name.replace("_", "-")
|
| 12 |
+
return f"See {case_name} in exportdb for unsupported case. \
|
| 13 |
+
https://pytorch.org/docs/main/generated/exportdb/index.html#{url_case_name}"
|
| 14 |
+
else:
|
| 15 |
+
log_export_usage(
|
| 16 |
+
event="export.error.casenotregistered",
|
| 17 |
+
message=case_name,
|
| 18 |
+
)
|
| 19 |
+
return f"{case_name} is unsupported."
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def get_class_if_classified_error(e):
|
| 23 |
+
"""
|
| 24 |
+
Returns a string case name if the export error e is classified.
|
| 25 |
+
Returns None otherwise.
|
| 26 |
+
"""
|
| 27 |
+
|
| 28 |
+
from torch._dynamo.exc import TorchRuntimeError, Unsupported, UserError
|
| 29 |
+
|
| 30 |
+
ALWAYS_CLASSIFIED = "always_classified"
|
| 31 |
+
DEFAULT_CLASS_SIGIL = "case_name"
|
| 32 |
+
|
| 33 |
+
# add error types that should be classified, along with any attribute name
|
| 34 |
+
# whose presence acts like a sigil to further distinguish which errors of
|
| 35 |
+
# that type should be classified. If the attribute name is None, then the
|
| 36 |
+
# error type is always classified.
|
| 37 |
+
_ALLOW_LIST = {
|
| 38 |
+
Unsupported: DEFAULT_CLASS_SIGIL,
|
| 39 |
+
UserError: DEFAULT_CLASS_SIGIL,
|
| 40 |
+
TorchRuntimeError: None,
|
| 41 |
+
}
|
| 42 |
+
if type(e) in _ALLOW_LIST:
|
| 43 |
+
attr_name = _ALLOW_LIST[type(e)]
|
| 44 |
+
if attr_name is None:
|
| 45 |
+
return ALWAYS_CLASSIFIED
|
| 46 |
+
return getattr(e, attr_name, None)
|
| 47 |
+
return None
|
janus/lib/python3.10/site-packages/torch/_export/error.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from enum import Enum
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class ExportErrorType(Enum):
|
| 5 |
+
# User providing invalid inputs to either tracer, or other public facing APIs
|
| 6 |
+
INVALID_INPUT_TYPE = 1
|
| 7 |
+
|
| 8 |
+
# User returning values from their models that we don't support.
|
| 9 |
+
INVALID_OUTPUT_TYPE = 2
|
| 10 |
+
|
| 11 |
+
# Generated IR does not conform to Export IR Specification.
|
| 12 |
+
VIOLATION_OF_SPEC = 3
|
| 13 |
+
|
| 14 |
+
# User's code contains types and functionalities we don't support.
|
| 15 |
+
NOT_SUPPORTED = 4
|
| 16 |
+
|
| 17 |
+
# User's code didn't provide necessary details for us to successfully trace and export.
|
| 18 |
+
# For example, we use a lot of decorators and ask users to annotate their model.
|
| 19 |
+
MISSING_PROPERTY = 5
|
| 20 |
+
|
| 21 |
+
# User is using an API without proper initialization step.
|
| 22 |
+
UNINITIALIZED = 6
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def internal_assert(pred: bool, assert_msg: str) -> None:
|
| 26 |
+
"""
|
| 27 |
+
This is exir's custom assert method. It internally just throws InternalError.
|
| 28 |
+
Note that the sole purpose is to throw our own error while maintaining similar syntax
|
| 29 |
+
as python assert.
|
| 30 |
+
"""
|
| 31 |
+
|
| 32 |
+
if not pred:
|
| 33 |
+
raise InternalError(assert_msg)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class InternalError(Exception):
|
| 37 |
+
"""
|
| 38 |
+
Raised when an internal invariance is violated in EXIR stack.
|
| 39 |
+
Should hint users to report a bug to dev and expose the original
|
| 40 |
+
error message.
|
| 41 |
+
"""
|
| 42 |
+
|
| 43 |
+
def __init__(self, message: str) -> None:
|
| 44 |
+
super().__init__(message)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
class ExportError(Exception):
|
| 48 |
+
"""
|
| 49 |
+
This type of exception is raised for errors that are directly caused by the user
|
| 50 |
+
code. In general, user errors happen during model authoring, tracing, using our public
|
| 51 |
+
facing APIs, and writing graph passes.
|
| 52 |
+
"""
|
| 53 |
+
|
| 54 |
+
def __init__(self, error_code: ExportErrorType, message: str) -> None:
|
| 55 |
+
prefix = f"[{error_code}]: "
|
| 56 |
+
super().__init__(prefix + message)
|
janus/lib/python3.10/site-packages/torch/_export/non_strict_utils.py
ADDED
|
@@ -0,0 +1,523 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import contextlib
|
| 3 |
+
import inspect
|
| 4 |
+
import logging
|
| 5 |
+
from collections import defaultdict
|
| 6 |
+
from typing import Any, Callable, Dict, List, Tuple, TYPE_CHECKING, Union
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
import torch.utils._pytree as pytree
|
| 10 |
+
from torch._dynamo.source import (
|
| 11 |
+
AttrSource,
|
| 12 |
+
GetItemSource,
|
| 13 |
+
LocalSource,
|
| 14 |
+
TensorProperty,
|
| 15 |
+
TensorPropertySource,
|
| 16 |
+
)
|
| 17 |
+
from torch._dynamo.variables.builder import TrackedFake
|
| 18 |
+
from torch._export.passes.add_runtime_assertions_for_constraints_pass import InputDim
|
| 19 |
+
from torch._export.passes.lift_constants_pass import ConstantAttrMap
|
| 20 |
+
from torch._guards import Source
|
| 21 |
+
from torch._library.fake_class_registry import FakeScriptObject
|
| 22 |
+
from torch._subclasses.fake_tensor import FakeTensorMode
|
| 23 |
+
from torch.export import Constraint
|
| 24 |
+
from torch.export.dynamic_shapes import (
|
| 25 |
+
_check_dynamic_shapes,
|
| 26 |
+
_combine_args,
|
| 27 |
+
_DimHint,
|
| 28 |
+
_process_dynamic_shapes,
|
| 29 |
+
_transform_shapes_for_default_dynamic,
|
| 30 |
+
_tree_map_with_path,
|
| 31 |
+
)
|
| 32 |
+
from torch.export.graph_signature import CustomObjArgument
|
| 33 |
+
from torch.fx.experimental import _config as config
|
| 34 |
+
from torch.fx.experimental.symbolic_shapes import (
|
| 35 |
+
_find_user_code_frame,
|
| 36 |
+
_suggest_fixes_for_data_dependent_error_non_strict,
|
| 37 |
+
ConstraintViolationError,
|
| 38 |
+
DimDynamic,
|
| 39 |
+
EqualityConstraint,
|
| 40 |
+
GuardOnDataDependentSymNode,
|
| 41 |
+
ShapeEnv,
|
| 42 |
+
StatelessSymbolicContext,
|
| 43 |
+
ValueRanges,
|
| 44 |
+
)
|
| 45 |
+
from torch.utils._pytree import (
|
| 46 |
+
GetAttrKey,
|
| 47 |
+
KeyPath,
|
| 48 |
+
MappingKey,
|
| 49 |
+
SequenceKey,
|
| 50 |
+
tree_map_with_path,
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
if TYPE_CHECKING:
|
| 55 |
+
from sympy import Symbol
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
log = logging.getLogger(__name__)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def key_path_to_source(kp: KeyPath) -> Source:
|
| 62 |
+
"""
|
| 63 |
+
Given a key path, return the source for the key path.
|
| 64 |
+
"""
|
| 65 |
+
source: Source = LocalSource("args")
|
| 66 |
+
for k in kp:
|
| 67 |
+
if isinstance(k, SequenceKey):
|
| 68 |
+
source = GetItemSource(source, k.idx)
|
| 69 |
+
elif isinstance(k, MappingKey):
|
| 70 |
+
source = GetItemSource(source, k.key)
|
| 71 |
+
elif isinstance(k, GetAttrKey):
|
| 72 |
+
source = AttrSource(source, k.name)
|
| 73 |
+
else:
|
| 74 |
+
raise ValueError(f"Unknown KeyEntry {k}")
|
| 75 |
+
|
| 76 |
+
return source
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def _is_constant_argument(t):
|
| 80 |
+
return t is None or isinstance(t, (int, float, bool, str))
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def fakify(
|
| 84 |
+
mode: FakeTensorMode,
|
| 85 |
+
kp: KeyPath,
|
| 86 |
+
t: Any,
|
| 87 |
+
t_constraints: Dict[int, Dict[int, Constraint]],
|
| 88 |
+
sources: Dict[Tuple[int, int], List[Source]],
|
| 89 |
+
):
|
| 90 |
+
source = key_path_to_source(kp)
|
| 91 |
+
if _is_constant_argument(t) or isinstance(t, torch.ScriptObject):
|
| 92 |
+
return t
|
| 93 |
+
|
| 94 |
+
if not isinstance(t, torch.Tensor):
|
| 95 |
+
raise ValueError(f"Unsupported input type {type(t)}")
|
| 96 |
+
n_dims = len(t.shape)
|
| 97 |
+
symbolic_context = StatelessSymbolicContext(
|
| 98 |
+
dynamic_sizes=[DimDynamic.DYNAMIC] * n_dims,
|
| 99 |
+
constraint_sizes=[None] * n_dims,
|
| 100 |
+
)
|
| 101 |
+
t_id = id(t)
|
| 102 |
+
assert mode.shape_env is not None
|
| 103 |
+
if t_id in t_constraints:
|
| 104 |
+
for i, constraint in t_constraints[t_id].items():
|
| 105 |
+
symbolic_context.constraint_sizes[i] = constraint.constraint_range
|
| 106 |
+
src = TensorPropertySource(base=source, prop=TensorProperty.SIZE, idx=i)
|
| 107 |
+
sources[(t_id, i)].append(src)
|
| 108 |
+
mode.shape_env.source_name_to_debug_name[src.name()] = constraint.name # type: ignore[assignment]
|
| 109 |
+
fake = mode.from_tensor(t, source=source, symbolic_context=symbolic_context)
|
| 110 |
+
mode.shape_env.tracked_fakes.append(TrackedFake(fake, source, symbolic_context)) # type: ignore[union-attr]
|
| 111 |
+
return fake
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def make_fake_inputs(
|
| 115 |
+
nn_module,
|
| 116 |
+
args,
|
| 117 |
+
kwargs,
|
| 118 |
+
dynamic_shapes,
|
| 119 |
+
_is_torch_jit_trace=False,
|
| 120 |
+
allow_complex_guards_as_runtime_asserts=False,
|
| 121 |
+
):
|
| 122 |
+
"""
|
| 123 |
+
Given an nn module, example inputs, and constraints, return a new fake mode,
|
| 124 |
+
fake inputs created in that mode whose dynamic shape dimensions are constrained
|
| 125 |
+
by the given ranges, and sources for pairs of dynamic shape dimensions that are
|
| 126 |
+
constrained to be equal.
|
| 127 |
+
"""
|
| 128 |
+
# TODO(avik): refactor Dynamo to avoid duplication of the following code
|
| 129 |
+
# between non-strict and strict.
|
| 130 |
+
# Specifically, here (non-strict) we do the following pre-tracing steps:
|
| 131 |
+
# - Fakify inputs.
|
| 132 |
+
# - Process input shape equalities.
|
| 133 |
+
# In strict, these steps are spread across multiple files:
|
| 134 |
+
# - output_graph.py fakifies inputs.
|
| 135 |
+
# - [post-tracing] guards.py processes input shape equalities.
|
| 136 |
+
|
| 137 |
+
combined_args = _combine_args(nn_module, args, kwargs)
|
| 138 |
+
_check_dynamic_shapes(combined_args, dynamic_shapes)
|
| 139 |
+
transformed_dynamic_shapes = _transform_shapes_for_default_dynamic(
|
| 140 |
+
combined_args, dynamic_shapes
|
| 141 |
+
)
|
| 142 |
+
constraints = _process_dynamic_shapes(combined_args, transformed_dynamic_shapes)
|
| 143 |
+
t_constraints: Dict[int, Dict[int, Constraint]] = defaultdict(dict)
|
| 144 |
+
for constraint in constraints:
|
| 145 |
+
t_constraints[constraint.t_id][constraint.dim] = constraint
|
| 146 |
+
|
| 147 |
+
context = torch._guards.TracingContext.try_get()
|
| 148 |
+
if context is not None:
|
| 149 |
+
# This occurs when we are exporting within dynamo. There already exists
|
| 150 |
+
# a toplevel TracingContext with a fake mode, so we do not want to
|
| 151 |
+
# create another fake mode.
|
| 152 |
+
fake_mode = context.fake_mode
|
| 153 |
+
elif not _is_torch_jit_trace:
|
| 154 |
+
code = nn_module.forward.__code__
|
| 155 |
+
co_fields = {
|
| 156 |
+
"co_name": code.co_name,
|
| 157 |
+
"co_filename": code.co_filename,
|
| 158 |
+
"co_firstlineno": code.co_firstlineno,
|
| 159 |
+
}
|
| 160 |
+
fake_mode = FakeTensorMode(
|
| 161 |
+
shape_env=ShapeEnv(
|
| 162 |
+
tracked_fakes=[],
|
| 163 |
+
co_fields=co_fields,
|
| 164 |
+
prefer_deferred_runtime_asserts_over_guards=True,
|
| 165 |
+
allow_complex_guards_as_runtime_asserts=allow_complex_guards_as_runtime_asserts,
|
| 166 |
+
),
|
| 167 |
+
allow_non_fake_inputs=True,
|
| 168 |
+
export=True,
|
| 169 |
+
)
|
| 170 |
+
else:
|
| 171 |
+
fake_mode = FakeTensorMode(
|
| 172 |
+
shape_env=ShapeEnv(
|
| 173 |
+
tracked_fakes=[],
|
| 174 |
+
prefer_deferred_runtime_asserts_over_guards=True,
|
| 175 |
+
allow_complex_guards_as_runtime_asserts=allow_complex_guards_as_runtime_asserts,
|
| 176 |
+
),
|
| 177 |
+
allow_non_fake_inputs=True,
|
| 178 |
+
)
|
| 179 |
+
if fake_mode.shape_env is None or fake_mode.shape_env.tracked_fakes is None:
|
| 180 |
+
raise ValueError(
|
| 181 |
+
"Detected fake_mode does not have a shape_env with tracked fakes. "
|
| 182 |
+
"If you constructed the module under a FakeTensorMode, "
|
| 183 |
+
"please initialize it like: FakeTensorMode(shape_env=ShapeEnv(tracked_fakes=[]))"
|
| 184 |
+
)
|
| 185 |
+
|
| 186 |
+
with fake_mode:
|
| 187 |
+
# FIXME(ycao) ScriptMethod doesn't have signature, I am using an empty one to unblock
|
| 188 |
+
if not _is_torch_jit_trace:
|
| 189 |
+
original_signature = inspect.signature(nn_module.forward)
|
| 190 |
+
else:
|
| 191 |
+
original_signature = None
|
| 192 |
+
sources: Dict[Tuple[int, int], List[Source]] = defaultdict(list)
|
| 193 |
+
fake_args, fake_kwargs = tree_map_with_path(
|
| 194 |
+
lambda kp, val: fakify(fake_mode, kp, val, t_constraints, sources),
|
| 195 |
+
(args, kwargs),
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
+
names: Dict[str, Tuple[int, int]] = {}
|
| 199 |
+
source_pairs: List[Tuple[Source, Source]] = []
|
| 200 |
+
derived_equalities: List[Tuple[Source, Union[Source, Symbol], Callable]] = []
|
| 201 |
+
phantom_symbols: Dict[str, Symbol] = {}
|
| 202 |
+
for constraint in constraints:
|
| 203 |
+
torch.export.dynamic_shapes._process_equalities(
|
| 204 |
+
constraint,
|
| 205 |
+
lambda t_id, dim: sources[(t_id, dim)],
|
| 206 |
+
fake_mode.shape_env,
|
| 207 |
+
names,
|
| 208 |
+
source_pairs,
|
| 209 |
+
derived_equalities,
|
| 210 |
+
phantom_symbols,
|
| 211 |
+
)
|
| 212 |
+
|
| 213 |
+
equalities_inputs = EqualityConstraint(
|
| 214 |
+
source_pairs=source_pairs,
|
| 215 |
+
derived_equalities=derived_equalities,
|
| 216 |
+
phantom_symbols=list(phantom_symbols.values()),
|
| 217 |
+
warn_only=False,
|
| 218 |
+
)
|
| 219 |
+
return (
|
| 220 |
+
fake_mode,
|
| 221 |
+
fake_args,
|
| 222 |
+
fake_kwargs,
|
| 223 |
+
equalities_inputs,
|
| 224 |
+
original_signature,
|
| 225 |
+
transformed_dynamic_shapes,
|
| 226 |
+
)
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
def _flatten_dynamic_shapes(
|
| 230 |
+
combined_args: Dict[str, Any],
|
| 231 |
+
dynamic_shapes: Union[Dict[str, Any], Tuple[Any], List[Any]],
|
| 232 |
+
) -> List[Any]:
|
| 233 |
+
flat_shapes = []
|
| 234 |
+
|
| 235 |
+
def _tree_map_helper(path, t, shape):
|
| 236 |
+
nonlocal flat_shapes
|
| 237 |
+
flat_shapes.append(shape)
|
| 238 |
+
|
| 239 |
+
_tree_map_with_path(_tree_map_helper, combined_args, dynamic_shapes)
|
| 240 |
+
return flat_shapes
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
def produce_guards_and_solve_constraints(
|
| 244 |
+
fake_mode: FakeTensorMode,
|
| 245 |
+
gm: torch.fx.GraphModule,
|
| 246 |
+
dynamic_shapes: Union[Dict[str, Any], Tuple[Any], List[Any], None],
|
| 247 |
+
equalities_inputs: EqualityConstraint,
|
| 248 |
+
original_signature: inspect.Signature,
|
| 249 |
+
_is_torch_jit_trace=False,
|
| 250 |
+
):
|
| 251 |
+
"""
|
| 252 |
+
Given a fake mode, sources pairs corresponding to equal dynamic shape dimensions,
|
| 253 |
+
and a graph module, produce guards on the fake mode's shape env (raising constraint
|
| 254 |
+
violations if any), solve (to suggest simplifications or fixes).
|
| 255 |
+
Dynamo already performs this, so this is for non-strict mode.
|
| 256 |
+
|
| 257 |
+
Additional inputs:
|
| 258 |
+
equalities_inputs: the equality constraints to use for guards
|
| 259 |
+
original_signature: the signature of the forward method
|
| 260 |
+
"""
|
| 261 |
+
shape_env = fake_mode.shape_env
|
| 262 |
+
assert shape_env is not None
|
| 263 |
+
assert shape_env.tracked_fakes is not None
|
| 264 |
+
|
| 265 |
+
placeholders = [tf.fake for tf in shape_env.tracked_fakes]
|
| 266 |
+
sources = [tf.source for tf in shape_env.tracked_fakes]
|
| 267 |
+
input_contexts = [tf.symbolic_context for tf in shape_env.tracked_fakes]
|
| 268 |
+
constraint_violation_error = None
|
| 269 |
+
try:
|
| 270 |
+
shape_env.produce_guards(
|
| 271 |
+
placeholders,
|
| 272 |
+
sources,
|
| 273 |
+
input_contexts=input_contexts,
|
| 274 |
+
equalities_inputs=equalities_inputs,
|
| 275 |
+
ignore_static=False,
|
| 276 |
+
)
|
| 277 |
+
except ConstraintViolationError as e:
|
| 278 |
+
constraint_violation_error = e
|
| 279 |
+
|
| 280 |
+
shape_env.frozen = True
|
| 281 |
+
dim_constraints = shape_env.dim_constraints
|
| 282 |
+
if dim_constraints is None:
|
| 283 |
+
# Expected when shape_env.produce_guards throws an early constraint violation error.
|
| 284 |
+
# There is nothing to solve for in this case.
|
| 285 |
+
# TODO(avik): Maybe record the constraint violation error instead and replay later?
|
| 286 |
+
assert constraint_violation_error
|
| 287 |
+
raise constraint_violation_error
|
| 288 |
+
dim_constraints.solve()
|
| 289 |
+
forced_specializations = dim_constraints.forced_specializations()
|
| 290 |
+
if not _is_torch_jit_trace:
|
| 291 |
+
msg = dim_constraints.prettify_results(
|
| 292 |
+
original_signature,
|
| 293 |
+
dynamic_shapes,
|
| 294 |
+
constraint_violation_error,
|
| 295 |
+
forced_specializations,
|
| 296 |
+
)
|
| 297 |
+
else:
|
| 298 |
+
# FIXME(ycao): This is a hack to get around missing signature from ScriptMethod
|
| 299 |
+
msg = "dummy constraint violation message"
|
| 300 |
+
if constraint_violation_error:
|
| 301 |
+
constraint_violation_error.args = (constraint_violation_error.args[0] + msg,)
|
| 302 |
+
elif forced_specializations:
|
| 303 |
+
constraint_violation_error = ConstraintViolationError(msg)
|
| 304 |
+
if constraint_violation_error:
|
| 305 |
+
raise constraint_violation_error
|
| 306 |
+
|
| 307 |
+
|
| 308 |
+
def make_constraints(
|
| 309 |
+
fake_mode: FakeTensorMode,
|
| 310 |
+
gm: torch.fx.GraphModule,
|
| 311 |
+
combined_args: Dict[str, Any],
|
| 312 |
+
dynamic_shapes: Union[Dict[str, Any], Tuple[Any], List[Any], None],
|
| 313 |
+
num_lifted_inputs: int,
|
| 314 |
+
):
|
| 315 |
+
"""
|
| 316 |
+
Given a fake mode's shape env and user-specified dynamic shapes,
|
| 317 |
+
return the resulting range constraints and equality constraints.
|
| 318 |
+
|
| 319 |
+
Additional args:
|
| 320 |
+
num_lifted_inputs: the number of non-user-input placeholder nodes in the graph
|
| 321 |
+
(used only to enumerate the user-input nodes)
|
| 322 |
+
"""
|
| 323 |
+
|
| 324 |
+
shape_env = fake_mode.shape_env
|
| 325 |
+
assert shape_env is not None
|
| 326 |
+
inline_constraints = gm.meta.get("inline_constraints", [])
|
| 327 |
+
range_constraints = {
|
| 328 |
+
symbol: inline_constraints[symbol] for symbol in inline_constraints
|
| 329 |
+
}
|
| 330 |
+
if not dynamic_shapes:
|
| 331 |
+
return range_constraints
|
| 332 |
+
|
| 333 |
+
# get individual dynamic shapes spec for each input
|
| 334 |
+
if not isinstance(dynamic_shapes, dict):
|
| 335 |
+
assert isinstance(dynamic_shapes, (tuple, list))
|
| 336 |
+
combined_args = type(dynamic_shapes)(combined_args.values()) # type: ignore[assignment, misc]
|
| 337 |
+
flat_dynamic_shapes = _flatten_dynamic_shapes(combined_args, dynamic_shapes)
|
| 338 |
+
|
| 339 |
+
# check number of shapes vs. number of inputs
|
| 340 |
+
num_placeholders = [node.op == "placeholder" for node in gm.graph.nodes].count(True)
|
| 341 |
+
assert len(flat_dynamic_shapes) == num_placeholders - num_lifted_inputs
|
| 342 |
+
|
| 343 |
+
input_dims = defaultdict(list)
|
| 344 |
+
free_symbols = set()
|
| 345 |
+
for input_index, node in enumerate(gm.graph.nodes):
|
| 346 |
+
if input_index < num_lifted_inputs or node.op != "placeholder":
|
| 347 |
+
continue
|
| 348 |
+
if _is_constant_argument(node.meta["val"]) or isinstance(
|
| 349 |
+
node.meta["val"], CustomObjArgument
|
| 350 |
+
):
|
| 351 |
+
continue
|
| 352 |
+
shape_spec = flat_dynamic_shapes[input_index - num_lifted_inputs]
|
| 353 |
+
for i, d in enumerate(node.meta["val"].shape):
|
| 354 |
+
if isinstance(d, torch.SymInt) and not d.node.expr.is_number:
|
| 355 |
+
# Look up the range constraint for the symbol corresponding to this shape dimension
|
| 356 |
+
# and store it indexed by the symbolic expression corresponding to it.
|
| 357 |
+
# NOTE(avik): Use node._expr instead of node.expr for the lookup here because
|
| 358 |
+
# we want the symbol, not its replacement, which could be an expression. Maybe
|
| 359 |
+
# there's a better way to do this, e.g., by (re)computing value ranges for expressions?
|
| 360 |
+
dim = shape_spec[i] if shape_spec else None
|
| 361 |
+
if dim is None or isinstance(dim, _DimHint):
|
| 362 |
+
range_constraints[d.node.expr] = shape_env.var_to_range[
|
| 363 |
+
d.node._expr
|
| 364 |
+
]
|
| 365 |
+
else:
|
| 366 |
+
range_constraints[d.node.expr] = ValueRanges(
|
| 367 |
+
lower=dim.min, upper=dim.max
|
| 368 |
+
)
|
| 369 |
+
input_dims[d.node.expr].append(InputDim(input_name=node.name, dim=i))
|
| 370 |
+
free_symbols.update(d.node.expr.free_symbols)
|
| 371 |
+
|
| 372 |
+
for symbol in free_symbols:
|
| 373 |
+
if symbol not in range_constraints:
|
| 374 |
+
# Placeholders can have symbolic shapes that are derived expressions.
|
| 375 |
+
# The above code will record direct range constraints for them
|
| 376 |
+
# so that we can do runtime assertions. In addition, for serde checks
|
| 377 |
+
# we want to record range constraints for their root symbols.
|
| 378 |
+
range_constraints[symbol] = shape_env.var_to_range[symbol]
|
| 379 |
+
|
| 380 |
+
return range_constraints
|
| 381 |
+
|
| 382 |
+
|
| 383 |
+
def _gather_constant_attrs(m: torch.nn.Module) -> ConstantAttrMap:
|
| 384 |
+
"""Search the module hierarchy, gathering up all tensor and ScriptObject constants.
|
| 385 |
+
|
| 386 |
+
Returns a dictionary mapping hash(value) to the name of the constant. We
|
| 387 |
+
have to abuse `hash` here unfortunately, see: [ScriptObject hash].
|
| 388 |
+
"""
|
| 389 |
+
constants = ConstantAttrMap()
|
| 390 |
+
buffers_parameters = set(m.buffers())
|
| 391 |
+
buffers_parameters.update(m.parameters())
|
| 392 |
+
|
| 393 |
+
def inner(m: torch.nn.Module, prefix_atoms: List[str], constants):
|
| 394 |
+
for k, v in m.__dict__.items():
|
| 395 |
+
if isinstance(
|
| 396 |
+
v,
|
| 397 |
+
(
|
| 398 |
+
torch.Tensor,
|
| 399 |
+
torch.ScriptObject,
|
| 400 |
+
FakeScriptObject,
|
| 401 |
+
),
|
| 402 |
+
):
|
| 403 |
+
if v in buffers_parameters:
|
| 404 |
+
# filter out buffers and parameters, leaving only constants
|
| 405 |
+
continue
|
| 406 |
+
|
| 407 |
+
fqn = ".".join(prefix_atoms + [k])
|
| 408 |
+
constants.add(v, fqn)
|
| 409 |
+
for k, v in m.named_children():
|
| 410 |
+
inner(v, prefix_atoms + [k], constants)
|
| 411 |
+
|
| 412 |
+
inner(m, [], constants)
|
| 413 |
+
return constants
|
| 414 |
+
|
| 415 |
+
|
| 416 |
+
@contextlib.contextmanager
|
| 417 |
+
def _fakify_script_objects(
|
| 418 |
+
mod: torch.nn.Module,
|
| 419 |
+
args: Tuple[Any],
|
| 420 |
+
kwargs: Dict[Any, Any],
|
| 421 |
+
fake_mode: torch._subclasses.fake_tensor.FakeTensorMode,
|
| 422 |
+
):
|
| 423 |
+
# This context manager is used to fakify script objects into FakeScriptObject.
|
| 424 |
+
# Inputs:
|
| 425 |
+
# mod: the module to be exported, it (and its recursive submodules)'s script object attrs haven't been fakified.
|
| 426 |
+
# args, kwargs: the args and kwargs inputs for mod, script object inputs haven't been fakified.
|
| 427 |
+
# fake_mode: the fake mode to be used for fakifying script objects. It's the same mode that fakify input tensors.
|
| 428 |
+
#
|
| 429 |
+
# Returns:
|
| 430 |
+
# mod: the patched module, its (and its recursive submodules) script object attrs have been fakified.
|
| 431 |
+
# fake_args, fake_kwargs: new fakified args and kwargs.
|
| 432 |
+
# Script object inputs have been fakified. Don't touch the tensors.
|
| 433 |
+
# fake_constant_attrs: a new map from FakeScriptObject to the fqn of the original script object.
|
| 434 |
+
# fake_to_real: a mapping between FakeScriptObject and the original script object in order to un-do the patching.
|
| 435 |
+
|
| 436 |
+
constant_attrs: ConstantAttrMap = _gather_constant_attrs(mod)
|
| 437 |
+
assert not any(
|
| 438 |
+
isinstance(obj, FakeScriptObject) for obj in constant_attrs.values()
|
| 439 |
+
), "Mod shouldn't contain any FakeScriptObject."
|
| 440 |
+
assert not pytree.tree_any(
|
| 441 |
+
lambda obj: isinstance(obj, FakeScriptObject), (args, kwargs)
|
| 442 |
+
), "args and kwargs shouldn't contain any FakeScriptObject."
|
| 443 |
+
|
| 444 |
+
patched_attr = {}
|
| 445 |
+
fake_constant_attrs = ConstantAttrMap()
|
| 446 |
+
fake_to_real = {}
|
| 447 |
+
|
| 448 |
+
def _maybe_fakify_obj(obj):
|
| 449 |
+
fake_obj = torch._library.fake_class_registry.maybe_to_fake_obj(fake_mode, obj)
|
| 450 |
+
fake_to_real[fake_obj] = obj
|
| 451 |
+
return fake_obj
|
| 452 |
+
|
| 453 |
+
def _leaf_mod_and_attr(
|
| 454 |
+
mod: torch.nn.Module, attr_fqn: str
|
| 455 |
+
) -> Tuple[torch.nn.Module, str]:
|
| 456 |
+
*prefix_attr, last_attr = attr_fqn.split(".")
|
| 457 |
+
cur_mod = mod
|
| 458 |
+
for attr in prefix_attr:
|
| 459 |
+
cur_mod = getattr(cur_mod, attr)
|
| 460 |
+
return cur_mod, last_attr
|
| 461 |
+
|
| 462 |
+
try:
|
| 463 |
+
for obj, fqns in constant_attrs.items():
|
| 464 |
+
if isinstance(obj, torch.ScriptObject):
|
| 465 |
+
fake_script_obj = _maybe_fakify_obj(obj)
|
| 466 |
+
for fqn in fqns:
|
| 467 |
+
cur_mod, attr = _leaf_mod_and_attr(mod, fqn)
|
| 468 |
+
assert obj is getattr(cur_mod, attr)
|
| 469 |
+
setattr(cur_mod, attr, fake_script_obj)
|
| 470 |
+
fake_constant_attrs.add(fake_script_obj, fqn)
|
| 471 |
+
patched_attr[fqn] = obj
|
| 472 |
+
else:
|
| 473 |
+
for fqn in fqns:
|
| 474 |
+
fake_constant_attrs.add(obj, fqn)
|
| 475 |
+
|
| 476 |
+
fake_args, fake_kwargs = pytree.tree_map_only(
|
| 477 |
+
torch.ScriptObject, _maybe_fakify_obj, (args, kwargs)
|
| 478 |
+
)
|
| 479 |
+
yield (mod, fake_args, fake_kwargs, fake_constant_attrs, fake_to_real)
|
| 480 |
+
finally:
|
| 481 |
+
for fqn, orig_obj in patched_attr.items():
|
| 482 |
+
cur_mod, attr = _leaf_mod_and_attr(mod, fqn)
|
| 483 |
+
setattr(cur_mod, attr, orig_obj)
|
| 484 |
+
|
| 485 |
+
|
| 486 |
+
class _NonStrictTorchFunctionHandler(torch.overrides.TorchFunctionMode):
|
| 487 |
+
"""
|
| 488 |
+
1. Handles data-dependent errors raised by torch function calls in non-strict.
|
| 489 |
+
|
| 490 |
+
Any data-dependent error is due to some condition on unbacked symints
|
| 491 |
+
that cannot be resolved. A mechanical way of fixing the error is to use
|
| 492 |
+
a torch._check() call to assert either that condition or its negation.
|
| 493 |
+
The handler suggests these options as code and points to the location
|
| 494 |
+
of the torch function call that raised the error as part of the error
|
| 495 |
+
message shown to the user, who can then simply select and copy-paste
|
| 496 |
+
a suggested fix at that location.
|
| 497 |
+
|
| 498 |
+
NOTE: Not all data-dependent errors are raised by torch function calls.
|
| 499 |
+
In particular, conditions on unbacked symints can appear outside such
|
| 500 |
+
calls, and as such are not handled here.
|
| 501 |
+
|
| 502 |
+
2. Handles line-of-code logging for each torch function call in non-strict.
|
| 503 |
+
|
| 504 |
+
Usage: TORCHEXPORT_EXTENDED_DEBUG_CURRENT_LOC=1 TORCH_LOGS="+export" ...
|
| 505 |
+
"""
|
| 506 |
+
|
| 507 |
+
def __torch_function__(self, func, types, args=(), kwargs=None):
|
| 508 |
+
kwargs = kwargs or {}
|
| 509 |
+
if log.isEnabledFor(logging.DEBUG) and config.extended_debug_current_loc:
|
| 510 |
+
frame = _find_user_code_frame()
|
| 511 |
+
if frame is not None:
|
| 512 |
+
log.debug(
|
| 513 |
+
"%s called at %s:%s in %s",
|
| 514 |
+
func.__qualname__,
|
| 515 |
+
frame.f_code.co_filename,
|
| 516 |
+
frame.f_lineno,
|
| 517 |
+
frame.f_code.co_name,
|
| 518 |
+
)
|
| 519 |
+
try:
|
| 520 |
+
return func(*args, **kwargs)
|
| 521 |
+
except GuardOnDataDependentSymNode as e:
|
| 522 |
+
_suggest_fixes_for_data_dependent_error_non_strict(e)
|
| 523 |
+
raise
|
janus/lib/python3.10/site-packages/torch/_export/pass_base.py
ADDED
|
@@ -0,0 +1,441 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import operator
|
| 3 |
+
import traceback
|
| 4 |
+
import typing
|
| 5 |
+
from contextlib import nullcontext
|
| 6 |
+
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
from functorch.experimental.control_flow import _unstack_pytree
|
| 10 |
+
from torch import fx
|
| 11 |
+
from torch._dispatch.python import enable_python_dispatcher
|
| 12 |
+
from torch._export.pass_infra.node_metadata import NodeMetadata
|
| 13 |
+
from torch._export.pass_infra.proxy_value import ProxyValue
|
| 14 |
+
from torch._subclasses import FakeTensor, UnsupportedFakeTensorException
|
| 15 |
+
from torch._subclasses.fake_tensor import FakeTensorMode
|
| 16 |
+
from torch.fx import traceback as fx_traceback
|
| 17 |
+
from torch.fx.experimental.proxy_tensor import PythonKeyTracer
|
| 18 |
+
from torch.fx.graph import CodeGen
|
| 19 |
+
from torch.fx.passes.infra.pass_base import PassBase, PassResult
|
| 20 |
+
from torch.fx.passes.shape_prop import _extract_tensor_metadata, TensorMetadata
|
| 21 |
+
from torch.utils import _pytree as pytree
|
| 22 |
+
from torch.fx.experimental.symbolic_shapes import PropagateUnbackedSymInts, compute_unbacked_bindings
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
__all__ = ["_ExportPassBaseDeprecatedDoNotUse"]
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
Argument = Any
|
| 29 |
+
Value = Any
|
| 30 |
+
Fn = Callable[..., Any]
|
| 31 |
+
PassType = Callable[[torch.fx.GraphModule], Optional[PassResult]]
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
_TORCH_SYM_OPS: Set[Callable] = {
|
| 35 |
+
torch.sym_int,
|
| 36 |
+
torch.sym_float,
|
| 37 |
+
torch.sym_ite,
|
| 38 |
+
torch.sym_max,
|
| 39 |
+
torch.sym_min,
|
| 40 |
+
torch.sym_not,
|
| 41 |
+
torch.sym_sqrt,
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
class ExportPassBaseError(RuntimeError):
|
| 46 |
+
pass
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
class _ExportPassBaseDeprecatedDoNotUse(PassBase):
|
| 50 |
+
"""
|
| 51 |
+
Interpreter-based pass class to help users maintain the IR spec while writing
|
| 52 |
+
transformations.
|
| 53 |
+
"""
|
| 54 |
+
|
| 55 |
+
@staticmethod
|
| 56 |
+
def _create_dummy_node_metadata():
|
| 57 |
+
return NodeMetadata({"stack_trace": "".join(traceback.format_stack(limit=1))})
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
class ExportTracer(PythonKeyTracer):
|
| 61 |
+
def __init__(self, callback: "_ExportPassBaseDeprecatedDoNotUse", codegen: CodeGen) -> None:
|
| 62 |
+
super().__init__()
|
| 63 |
+
self.callback = callback
|
| 64 |
+
self.root = torch.nn.Module()
|
| 65 |
+
self.graph = torch.fx.Graph()
|
| 66 |
+
self.graph.set_codegen(codegen)
|
| 67 |
+
self.tensor_attrs: Dict[str, torch.Tensor] = {} # type: ignore[assignment]
|
| 68 |
+
self.fake_tensor_mode: Optional[FakeTensorMode] = None
|
| 69 |
+
self.submodules: Dict[torch.nn.Module, str] = {}
|
| 70 |
+
|
| 71 |
+
def trace(self) -> None: # type: ignore[override]
|
| 72 |
+
raise ExportPassBaseError("ExportTracer doesn't support trace().")
|
| 73 |
+
|
| 74 |
+
def create_arg(self, a: Argument) -> torch.fx.Node:
|
| 75 |
+
if isinstance(a, torch.nn.Module):
|
| 76 |
+
if a not in self.submodules:
|
| 77 |
+
name_submodule = f"submodule_{len(self.submodules)}"
|
| 78 |
+
self.root.add_module(name_submodule, a)
|
| 79 |
+
self.submodules[a] = name_submodule
|
| 80 |
+
elif isinstance(a, FakeTensor):
|
| 81 |
+
if not hasattr(a, "constant") or a.constant is None:
|
| 82 |
+
raise ExportPassBaseError(f"Cannot add {a} to graph.")
|
| 83 |
+
a = a.constant
|
| 84 |
+
node = super().create_arg(a)
|
| 85 |
+
if (
|
| 86 |
+
isinstance(a, torch.Tensor)
|
| 87 |
+
and isinstance(node, torch.fx.Node)
|
| 88 |
+
and node.op == "get_attr"
|
| 89 |
+
):
|
| 90 |
+
self.set_metadata(node, a)
|
| 91 |
+
self.callback.on_attr(ProxyValue(a, node))
|
| 92 |
+
return node
|
| 93 |
+
|
| 94 |
+
def set_metadata(
|
| 95 |
+
self, node: torch.fx.Node, value: Argument,
|
| 96 |
+
) -> None:
|
| 97 |
+
# propagate the fake tensor or sym nodes
|
| 98 |
+
def make_val(
|
| 99 |
+
x: Argument,
|
| 100 |
+
) -> Union[FakeTensor, torch.SymInt, torch.SymFloat, torch.SymBool, int, float, bool, str, None]:
|
| 101 |
+
if isinstance(x, FakeTensor):
|
| 102 |
+
return x
|
| 103 |
+
elif isinstance(x, torch.Tensor):
|
| 104 |
+
if x.is_quantized:
|
| 105 |
+
# TODO (tmanlaibaatar) properly support Quantized FakeTensor
|
| 106 |
+
x = torch.dequantize(x)
|
| 107 |
+
|
| 108 |
+
try:
|
| 109 |
+
assert self.fake_tensor_mode is not None
|
| 110 |
+
# TODO we should allocate static shapes
|
| 111 |
+
# for param/buffer values
|
| 112 |
+
if isinstance(x, torch.nn.Parameter):
|
| 113 |
+
fake_tensor = self.fake_tensor_mode.from_tensor(
|
| 114 |
+
x, static_shapes=True
|
| 115 |
+
)
|
| 116 |
+
else:
|
| 117 |
+
fake_tensor = self.fake_tensor_mode.from_tensor(x)
|
| 118 |
+
except UnsupportedFakeTensorException:
|
| 119 |
+
# TODO: This is just a workaround to get over the
|
| 120 |
+
# x.as_subclass error
|
| 121 |
+
print(
|
| 122 |
+
"Fakeifying a Tensor subclass is not supported \
|
| 123 |
+
right now. Instead a TensorMetadata is used."
|
| 124 |
+
)
|
| 125 |
+
fake_tensor = None
|
| 126 |
+
return fake_tensor
|
| 127 |
+
elif isinstance(x, (torch.SymInt, torch.SymFloat, torch.SymBool, int, float, bool, str)):
|
| 128 |
+
return x
|
| 129 |
+
else:
|
| 130 |
+
return None
|
| 131 |
+
|
| 132 |
+
node.meta["val"] = pytree.tree_map(make_val, value)
|
| 133 |
+
|
| 134 |
+
# Set the tensor_metadata for values that do not have a corresponding FakeTensor
|
| 135 |
+
def make_tensor_meta(x: Argument) -> Optional[TensorMetadata]:
|
| 136 |
+
if not isinstance(x, FakeTensor) and isinstance(x, torch.Tensor):
|
| 137 |
+
if x.is_quantized:
|
| 138 |
+
# TODO (tmanlaibaatar) properly support Quantized FakeTensor
|
| 139 |
+
x = torch.dequantize(x)
|
| 140 |
+
|
| 141 |
+
try:
|
| 142 |
+
assert self.fake_tensor_mode is not None
|
| 143 |
+
_ = self.fake_tensor_mode.from_tensor(x)
|
| 144 |
+
tensor_meta = None
|
| 145 |
+
except UnsupportedFakeTensorException:
|
| 146 |
+
# TODO: This is just a workaround to get over the
|
| 147 |
+
# x.as_subclass error
|
| 148 |
+
tensor_meta = _extract_tensor_metadata(x)
|
| 149 |
+
return tensor_meta
|
| 150 |
+
else:
|
| 151 |
+
return None
|
| 152 |
+
|
| 153 |
+
node.meta["tensor_meta"] = pytree.tree_map(make_tensor_meta, value)
|
| 154 |
+
|
| 155 |
+
class ExportInterpreter(fx.Interpreter):
|
| 156 |
+
def __init__(self, callback: "_ExportPassBaseDeprecatedDoNotUse", gm: fx.GraphModule) -> None:
|
| 157 |
+
super().__init__(gm)
|
| 158 |
+
self.callback = callback
|
| 159 |
+
self.node: torch.fx.Node = next(iter(gm.graph.nodes))
|
| 160 |
+
|
| 161 |
+
def placeholder(
|
| 162 |
+
self,
|
| 163 |
+
target: str, # type: ignore[override]
|
| 164 |
+
args: Tuple[Argument, ...],
|
| 165 |
+
kwargs: Dict[str, Argument],
|
| 166 |
+
) -> ProxyValue:
|
| 167 |
+
arg = super().placeholder(target, args, kwargs)
|
| 168 |
+
return self.callback.placeholder(target, arg, NodeMetadata(self.node.meta))
|
| 169 |
+
|
| 170 |
+
def output(
|
| 171 |
+
self,
|
| 172 |
+
target: torch.fx.node.Target,
|
| 173 |
+
args: Tuple[Argument, ...],
|
| 174 |
+
kwargs: Dict[str, Argument],
|
| 175 |
+
) -> ProxyValue:
|
| 176 |
+
return self.callback.output(args[0], NodeMetadata(self.node.meta)).data
|
| 177 |
+
|
| 178 |
+
def call_function(
|
| 179 |
+
self,
|
| 180 |
+
target: torch.fx.node.Target,
|
| 181 |
+
args: Tuple[Argument, ...],
|
| 182 |
+
kwargs: Dict[str, Argument],
|
| 183 |
+
) -> ProxyValue:
|
| 184 |
+
meta = NodeMetadata(self.node.meta)
|
| 185 |
+
|
| 186 |
+
if target == operator.getitem:
|
| 187 |
+
value, key = args
|
| 188 |
+
return self.callback.call_getitem(value, key, meta)
|
| 189 |
+
elif getattr(target, "__module__", None) in {"_operator", "math"}:
|
| 190 |
+
assert callable(target)
|
| 191 |
+
return self.callback.call_sym(target, args, meta)
|
| 192 |
+
elif target in _TORCH_SYM_OPS:
|
| 193 |
+
assert callable(target)
|
| 194 |
+
return self.callback.call_sym(target, args, meta)
|
| 195 |
+
elif isinstance(target, (torch._ops.OpOverload, torch._ops.OpOverloadPacket)):
|
| 196 |
+
return self.callback.call_operator(
|
| 197 |
+
target,
|
| 198 |
+
args,
|
| 199 |
+
kwargs,
|
| 200 |
+
meta,
|
| 201 |
+
)
|
| 202 |
+
elif target == torch.ops.higher_order.cond:
|
| 203 |
+
pred, true_fn, false_fn, inputs = args
|
| 204 |
+
return self.callback.call_cond(pred, true_fn, false_fn, inputs, meta)
|
| 205 |
+
elif target == torch.ops.higher_order.map_impl:
|
| 206 |
+
f, mapped_args, operands = args # type: ignore[assignment]
|
| 207 |
+
return self.callback.call_map(f, mapped_args, operands, meta)
|
| 208 |
+
# For other unregistered HigherOrderOps, just interpret them blindly
|
| 209 |
+
elif isinstance(target, torch._ops.HigherOrderOperator):
|
| 210 |
+
return self.callback._fx(
|
| 211 |
+
"call_function",
|
| 212 |
+
target,
|
| 213 |
+
args,
|
| 214 |
+
kwargs,
|
| 215 |
+
meta,
|
| 216 |
+
)
|
| 217 |
+
else:
|
| 218 |
+
raise ExportPassBaseError(f"Unsupported target type: {target}")
|
| 219 |
+
|
| 220 |
+
def get_attr(
|
| 221 |
+
self, target: str, args: Tuple[Argument, ...], kwargs: Dict[str, Argument] # type: ignore[override]
|
| 222 |
+
) -> Argument:
|
| 223 |
+
return super().get_attr(target, args, kwargs)
|
| 224 |
+
|
| 225 |
+
def call_module(
|
| 226 |
+
self,
|
| 227 |
+
target: torch.fx.node.Target,
|
| 228 |
+
args: Tuple[Argument, ...],
|
| 229 |
+
kwargs: Dict[str, Argument],
|
| 230 |
+
) -> None:
|
| 231 |
+
raise ExportPassBaseError("call_module is not supported.")
|
| 232 |
+
|
| 233 |
+
def call_method(
|
| 234 |
+
self, target: str, args: Tuple[Argument, ...], kwargs: Dict[str, Argument] # type: ignore[override]
|
| 235 |
+
) -> None:
|
| 236 |
+
raise ExportPassBaseError("call_method is not supported.")
|
| 237 |
+
|
| 238 |
+
def run_node(self, n: torch.fx.Node) -> Argument:
|
| 239 |
+
self.node = n
|
| 240 |
+
self.callback.node_debug_str = n.format_node()
|
| 241 |
+
return super().run_node(n)
|
| 242 |
+
|
| 243 |
+
def __init__(self) -> None:
|
| 244 |
+
self.interpreter = PropagateUnbackedSymInts(
|
| 245 |
+
torch.fx.GraphModule(torch.nn.Module(), torch.fx.Graph())
|
| 246 |
+
)
|
| 247 |
+
self.tracer = self.ExportTracer(self, CodeGen())
|
| 248 |
+
self.fake_tensor_mode: Optional[FakeTensorMode] = None
|
| 249 |
+
self._initialized = True
|
| 250 |
+
self.node_debug_str: typing.Optional[str] = None
|
| 251 |
+
|
| 252 |
+
def _fx(
|
| 253 |
+
self,
|
| 254 |
+
kind: str,
|
| 255 |
+
target: torch.fx.node.Target,
|
| 256 |
+
args: Tuple[Argument, ...],
|
| 257 |
+
kwargs: Dict[str, Argument],
|
| 258 |
+
meta: NodeMetadata,
|
| 259 |
+
) -> ProxyValue:
|
| 260 |
+
args_data, kwargs_data = pytree.tree_map_only(
|
| 261 |
+
ProxyValue, lambda x: x.data, (args, kwargs)
|
| 262 |
+
)
|
| 263 |
+
res_data = getattr(self.interpreter, kind)(target, args_data, kwargs_data)
|
| 264 |
+
args_proxy, kwargs_proxy = pytree.tree_map_only(
|
| 265 |
+
ProxyValue, lambda x: x.proxy, (args, kwargs)
|
| 266 |
+
)
|
| 267 |
+
|
| 268 |
+
name = None
|
| 269 |
+
if isinstance(target, torch._ops.OpOverload):
|
| 270 |
+
name = self.tracer.graph._target_to_str(target.overloadpacket.__name__)
|
| 271 |
+
|
| 272 |
+
res_proxy = self.tracer.create_proxy(kind, target, args_proxy, kwargs_proxy, name=name)
|
| 273 |
+
res_proxy.node.meta.update(meta.data)
|
| 274 |
+
if self.fake_tensor_mode and (shape_env := self.fake_tensor_mode.shape_env):
|
| 275 |
+
if symbol_to_path := compute_unbacked_bindings(shape_env, res_data):
|
| 276 |
+
res_proxy.node.meta["unbacked_bindings"] = symbol_to_path
|
| 277 |
+
self.tracer.set_metadata(res_proxy.node, res_data)
|
| 278 |
+
return ProxyValue(res_data, res_proxy)
|
| 279 |
+
|
| 280 |
+
def inputs(self, graph_module: torch.fx.GraphModule) -> List[Argument]:
|
| 281 |
+
# TODO(angelayi): Update this with what we decide to do for metadata in
|
| 282 |
+
# the exported graph module
|
| 283 |
+
if (args := graph_module.meta.get("args", None)) is not None:
|
| 284 |
+
return list(args)
|
| 285 |
+
|
| 286 |
+
def extract_input(node: torch.fx.Node) -> Optional[FakeTensor]:
|
| 287 |
+
if "val" in node.meta:
|
| 288 |
+
fake = node.meta["val"]
|
| 289 |
+
if hasattr(fake, "constant") and fake.constant is not None:
|
| 290 |
+
return fake.constant
|
| 291 |
+
return fake
|
| 292 |
+
elif tensor_meta := node.meta.get("tensor_meta"):
|
| 293 |
+
assert self.fake_tensor_mode is not None
|
| 294 |
+
return FakeTensor(
|
| 295 |
+
self.fake_tensor_mode,
|
| 296 |
+
torch.empty(
|
| 297 |
+
tensor_meta.shape,
|
| 298 |
+
dtype=tensor_meta.dtype,
|
| 299 |
+
device="meta",
|
| 300 |
+
requires_grad=tensor_meta.requires_grad,
|
| 301 |
+
memory_format=tensor_meta.memory_format,
|
| 302 |
+
),
|
| 303 |
+
torch.device("cpu"),
|
| 304 |
+
)
|
| 305 |
+
elif len(node.users) == 0:
|
| 306 |
+
return None
|
| 307 |
+
raise ExportPassBaseError(
|
| 308 |
+
f"Cannot construct an input for graph module: {graph_module}.",
|
| 309 |
+
)
|
| 310 |
+
|
| 311 |
+
return [
|
| 312 |
+
extract_input(node)
|
| 313 |
+
for node in graph_module.graph.nodes
|
| 314 |
+
if node.op == "placeholder"
|
| 315 |
+
]
|
| 316 |
+
|
| 317 |
+
def on_attr(self, attr: ProxyValue) -> None:
|
| 318 |
+
pass
|
| 319 |
+
|
| 320 |
+
def placeholder(self, name: str, arg: Argument, meta: NodeMetadata) -> ProxyValue:
|
| 321 |
+
arg_proxy = self.tracer.create_proxy("placeholder", name, (), {})
|
| 322 |
+
arg_proxy.node.meta = meta.data
|
| 323 |
+
self.tracer.set_metadata(arg_proxy.node, arg)
|
| 324 |
+
return ProxyValue(arg, arg_proxy)
|
| 325 |
+
|
| 326 |
+
def call_operator(
|
| 327 |
+
self,
|
| 328 |
+
op,
|
| 329 |
+
args: Tuple[Argument, ...],
|
| 330 |
+
kwargs: Dict[str, Argument],
|
| 331 |
+
meta: NodeMetadata,
|
| 332 |
+
) -> ProxyValue:
|
| 333 |
+
return self._fx("call_function", op, args, kwargs, meta)
|
| 334 |
+
|
| 335 |
+
def call_sym(
|
| 336 |
+
self,
|
| 337 |
+
target: Fn,
|
| 338 |
+
args: Tuple[Argument, ...],
|
| 339 |
+
meta: NodeMetadata,
|
| 340 |
+
) -> ProxyValue:
|
| 341 |
+
return self._fx("call_function", target, args, {}, meta)
|
| 342 |
+
|
| 343 |
+
def call_cond(
|
| 344 |
+
self,
|
| 345 |
+
pred: ProxyValue,
|
| 346 |
+
true_fn: torch.fx.GraphModule,
|
| 347 |
+
false_fn: torch.fx.GraphModule,
|
| 348 |
+
inputs: List[Argument],
|
| 349 |
+
meta: NodeMetadata,
|
| 350 |
+
) -> ProxyValue:
|
| 351 |
+
true_branch = self.call_submodule(true_fn, tuple(inputs))
|
| 352 |
+
false_branch = self.call_submodule(false_fn, tuple(inputs))
|
| 353 |
+
assert true_branch is not None
|
| 354 |
+
assert false_branch is not None
|
| 355 |
+
return self._fx(
|
| 356 |
+
"call_function",
|
| 357 |
+
torch.ops.higher_order.cond,
|
| 358 |
+
(pred, true_branch.graph_module, false_branch.graph_module, list(inputs)),
|
| 359 |
+
{},
|
| 360 |
+
meta,
|
| 361 |
+
)
|
| 362 |
+
|
| 363 |
+
def call_map(
|
| 364 |
+
self,
|
| 365 |
+
f: torch.fx.GraphModule,
|
| 366 |
+
mapped_args: List[ProxyValue],
|
| 367 |
+
operands: List[ProxyValue],
|
| 368 |
+
meta: NodeMetadata,
|
| 369 |
+
) -> ProxyValue:
|
| 370 |
+
xs = _unstack_pytree([arg.data for arg in mapped_args])[0]
|
| 371 |
+
f_branch = self.call_submodule(f, tuple(xs + [arg.data for arg in operands]))
|
| 372 |
+
assert f_branch is not None
|
| 373 |
+
return self._fx(
|
| 374 |
+
"call_function",
|
| 375 |
+
torch.ops.higher_order.map_impl,
|
| 376 |
+
(f_branch.graph_module, mapped_args, operands),
|
| 377 |
+
{},
|
| 378 |
+
meta,
|
| 379 |
+
)
|
| 380 |
+
|
| 381 |
+
def call_getitem(
|
| 382 |
+
self, value: ProxyValue, key: int, meta: NodeMetadata
|
| 383 |
+
) -> ProxyValue:
|
| 384 |
+
return self._fx("call_function", operator.getitem, (value, key), {}, meta)
|
| 385 |
+
|
| 386 |
+
def output(self, results: List[Argument], meta: NodeMetadata) -> ProxyValue:
|
| 387 |
+
return self._fx("output", "output", (results,), {}, meta)
|
| 388 |
+
|
| 389 |
+
def call_submodule(
|
| 390 |
+
self, graph_module: fx.GraphModule, inputs: Tuple[Argument, ...]
|
| 391 |
+
) -> PassResult:
|
| 392 |
+
prev_tracer, self.tracer = self.tracer, self.ExportTracer(
|
| 393 |
+
self, graph_module.graph._codegen
|
| 394 |
+
)
|
| 395 |
+
self.tracer.fake_tensor_mode = prev_tracer.fake_tensor_mode
|
| 396 |
+
interpreter = self.ExportInterpreter(self, graph_module)
|
| 397 |
+
prev_interpreter, self.interpreter = self.interpreter, torch.fx.Interpreter( # type: ignore[assignment]
|
| 398 |
+
torch.fx.GraphModule(torch.nn.Module(), torch.fx.Graph())
|
| 399 |
+
)
|
| 400 |
+
inputs_data = pytree.tree_map_only(ProxyValue, lambda x: x.data, inputs)
|
| 401 |
+
with fx_traceback.preserve_node_meta():
|
| 402 |
+
interpreter.run(*inputs_data)
|
| 403 |
+
|
| 404 |
+
new_graph_module = torch.fx.GraphModule(self.tracer.root, self.tracer.graph)
|
| 405 |
+
|
| 406 |
+
self.tracer = prev_tracer
|
| 407 |
+
self.interpreter = prev_interpreter
|
| 408 |
+
return PassResult(
|
| 409 |
+
new_graph_module,
|
| 410 |
+
True,
|
| 411 |
+
)
|
| 412 |
+
|
| 413 |
+
def call(self, graph_module: fx.GraphModule) -> PassResult:
|
| 414 |
+
if not getattr(self, "_initialized", False):
|
| 415 |
+
raise ExportPassBaseError(
|
| 416 |
+
"ExportPass is not initialized with __init__().",
|
| 417 |
+
)
|
| 418 |
+
|
| 419 |
+
inputs = self.inputs(graph_module)
|
| 420 |
+
|
| 421 |
+
fake_tensor_mode = None
|
| 422 |
+
for i in inputs:
|
| 423 |
+
if isinstance(i, FakeTensor):
|
| 424 |
+
assert (
|
| 425 |
+
fake_tensor_mode is None or fake_tensor_mode is i.fake_mode
|
| 426 |
+
), "Multiple fake tensor mode detected."
|
| 427 |
+
fake_tensor_mode = i.fake_mode
|
| 428 |
+
if fake_tensor_mode is None:
|
| 429 |
+
self.tracer.fake_tensor_mode = FakeTensorMode(allow_non_fake_inputs=True)
|
| 430 |
+
fake_tensor_mode = nullcontext() # type: ignore[assignment]
|
| 431 |
+
dispatcher_mode = nullcontext() # type: ignore[assignment]
|
| 432 |
+
else:
|
| 433 |
+
fake_tensor_mode.allow_non_fake_inputs = True
|
| 434 |
+
self.tracer.fake_tensor_mode = fake_tensor_mode
|
| 435 |
+
dispatcher_mode = enable_python_dispatcher() # type: ignore[assignment]
|
| 436 |
+
self.fake_tensor_mode = self.tracer.fake_tensor_mode
|
| 437 |
+
|
| 438 |
+
with fake_tensor_mode, dispatcher_mode: # type: ignore[assignment, union-attr]
|
| 439 |
+
result = self.call_submodule(graph_module, tuple(inputs))
|
| 440 |
+
|
| 441 |
+
return result
|
janus/lib/python3.10/site-packages/torch/_export/pass_infra/__init__.py
ADDED
|
File without changes
|
janus/lib/python3.10/site-packages/torch/_export/pass_infra/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (175 Bytes). View file
|
|
|
janus/lib/python3.10/site-packages/torch/_export/pass_infra/__pycache__/node_metadata.cpython-310.pyc
ADDED
|
Binary file (1.47 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/_export/pass_infra/__pycache__/proxy_value.cpython-310.pyc
ADDED
|
Binary file (1.73 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/_export/pass_infra/proxy_value.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
# pyre-strict
|
| 3 |
+
from typing import Union
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class ProxyValue:
|
| 9 |
+
# pyre-ignore
|
| 10 |
+
def __init__(self, data, proxy: Union[torch.fx.Proxy, torch.fx.Node]):
|
| 11 |
+
# pyre-ignore
|
| 12 |
+
self.data = data
|
| 13 |
+
self.proxy_or_node = proxy
|
| 14 |
+
|
| 15 |
+
@property
|
| 16 |
+
def node(self) -> torch.fx.Node:
|
| 17 |
+
if isinstance(self.proxy_or_node, torch.fx.Node):
|
| 18 |
+
return self.proxy_or_node
|
| 19 |
+
assert isinstance(self.proxy_or_node, torch.fx.Proxy)
|
| 20 |
+
return self.proxy_or_node.node
|
| 21 |
+
|
| 22 |
+
@property
|
| 23 |
+
def proxy(self) -> torch.fx.Proxy:
|
| 24 |
+
if not isinstance(self.proxy_or_node, torch.fx.Proxy):
|
| 25 |
+
raise RuntimeError(
|
| 26 |
+
f"ProxyValue doesn't have attached Proxy object. Node: {self.proxy_or_node.format_node()}"
|
| 27 |
+
)
|
| 28 |
+
return self.proxy_or_node
|
| 29 |
+
|
| 30 |
+
def to_tensor(self) -> torch.Tensor:
|
| 31 |
+
assert isinstance(self.data, torch.Tensor)
|
| 32 |
+
return self.data
|
| 33 |
+
|
| 34 |
+
def is_tensor(self) -> bool:
|
| 35 |
+
return isinstance(self.data, torch.Tensor)
|
| 36 |
+
|
| 37 |
+
# pyre-ignore
|
| 38 |
+
def __iter__(self):
|
| 39 |
+
yield from self.data
|
| 40 |
+
|
| 41 |
+
def __bool__(self) -> bool:
|
| 42 |
+
return bool(self.data)
|
janus/lib/python3.10/site-packages/torch/_export/passes/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
from .replace_view_ops_with_view_copy_ops_pass import ReplaceViewOpsWithViewCopyOpsPass
|
janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (269 Bytes). View file
|
|
|
janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/_node_metadata_hook.cpython-310.pyc
ADDED
|
Binary file (2.43 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/add_runtime_assertions_for_constraints_pass.cpython-310.pyc
ADDED
|
Binary file (6.3 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/collect_tracepoints_pass.cpython-310.pyc
ADDED
|
Binary file (2.75 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/constant_folding.cpython-310.pyc
ADDED
|
Binary file (7.19 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/functionalize_side_effectful_ops_pass.cpython-310.pyc
ADDED
|
Binary file (3.39 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/lift_constants_pass.cpython-310.pyc
ADDED
|
Binary file (8.29 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/remove_runtime_assertions.cpython-310.pyc
ADDED
|
Binary file (1.06 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_autocast_with_hop_pass.cpython-310.pyc
ADDED
|
Binary file (5.59 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_quantized_ops_with_standard_ops_pass.cpython-310.pyc
ADDED
|
Binary file (15.4 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_set_grad_with_hop_pass.cpython-310.pyc
ADDED
|
Binary file (3.74 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_view_ops_with_view_copy_ops_pass.cpython-310.pyc
ADDED
|
Binary file (2.29 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_with_hop_pass_util.cpython-310.pyc
ADDED
|
Binary file (4.32 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/_export/passes/_node_metadata_hook.py
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import contextlib
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
from torch.fx.graph_module import GraphModule
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
_EMPTY_NN_MODULE_STACK_KEY = "_empty_nn_module_stack_from_metadata_hook"
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def _node_metadata_hook(node: torch.fx.Node, stack_trace: str) -> None:
|
| 12 |
+
"""
|
| 13 |
+
Hook for adding the appropriate metadata to nodes that are created during a
|
| 14 |
+
pass using graph.create_node. An example of how to use it:
|
| 15 |
+
|
| 16 |
+
```
|
| 17 |
+
with _set_node_metadata_hook(gm,
|
| 18 |
+
functools.partial(_node_metadata_hook, stack_trace="file")
|
| 19 |
+
):
|
| 20 |
+
pass(gm)
|
| 21 |
+
```
|
| 22 |
+
|
| 23 |
+
This hook should not work for all generic cases -- specifically it assumes
|
| 24 |
+
that nodes being added are only call_function nodes, and copies over the
|
| 25 |
+
first argument node's nn_module_stack.
|
| 26 |
+
"""
|
| 27 |
+
assert node.op == "call_function" and callable(node.target)
|
| 28 |
+
|
| 29 |
+
arg_meta = [arg.meta for arg in node.args if isinstance(arg, torch.fx.Node)]
|
| 30 |
+
assert len(arg_meta) >= 1
|
| 31 |
+
arg_meta = arg_meta[0]
|
| 32 |
+
|
| 33 |
+
if (
|
| 34 |
+
isinstance(node.target, torch._ops.OpOverload)
|
| 35 |
+
and len(node.target._schema.returns) == 0
|
| 36 |
+
):
|
| 37 |
+
node.meta["val"] = None
|
| 38 |
+
else:
|
| 39 |
+
fake_args = [
|
| 40 |
+
arg.meta["val"] if isinstance(arg, torch.fx.Node) else arg
|
| 41 |
+
for arg in node.args
|
| 42 |
+
]
|
| 43 |
+
fake_res = node.target(*fake_args)
|
| 44 |
+
node.meta["val"] = fake_res
|
| 45 |
+
|
| 46 |
+
node.meta["stack_trace"] = stack_trace
|
| 47 |
+
node.meta["nn_module_stack"] = arg_meta.get(
|
| 48 |
+
"nn_module_stack",
|
| 49 |
+
{
|
| 50 |
+
_EMPTY_NN_MODULE_STACK_KEY: (
|
| 51 |
+
_EMPTY_NN_MODULE_STACK_KEY,
|
| 52 |
+
_EMPTY_NN_MODULE_STACK_KEY,
|
| 53 |
+
)
|
| 54 |
+
},
|
| 55 |
+
)
|
| 56 |
+
node.meta["torch_fn"] = (
|
| 57 |
+
f"{node.target.__name__}_0",
|
| 58 |
+
f"{node.target.__class__.__name__}.{node.target.__name__}",
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
@contextlib.contextmanager
|
| 63 |
+
def _set_node_metadata_hook(gm: torch.fx.GraphModule, f):
|
| 64 |
+
"""
|
| 65 |
+
Takes a callable which will be called after we create a new node. The
|
| 66 |
+
callable takes the newly created node as input and returns None.
|
| 67 |
+
"""
|
| 68 |
+
assert callable(f), "node_metadata_hook must be a callable."
|
| 69 |
+
|
| 70 |
+
# Add the hook to all submodules
|
| 71 |
+
for m in gm.modules():
|
| 72 |
+
if isinstance(m, GraphModule):
|
| 73 |
+
m._register_create_node_hook(f)
|
| 74 |
+
try:
|
| 75 |
+
yield
|
| 76 |
+
finally:
|
| 77 |
+
# Restore hook for all submodules
|
| 78 |
+
for m in gm.modules():
|
| 79 |
+
if isinstance(m, GraphModule):
|
| 80 |
+
m._unregister_create_node_hook(f)
|
janus/lib/python3.10/site-packages/torch/_export/passes/add_runtime_assertions_for_constraints_pass.py
ADDED
|
@@ -0,0 +1,227 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import math
|
| 3 |
+
import operator
|
| 4 |
+
import traceback
|
| 5 |
+
from functools import partial
|
| 6 |
+
from typing import Callable, Dict, List, NamedTuple, Set
|
| 7 |
+
|
| 8 |
+
import sympy
|
| 9 |
+
|
| 10 |
+
import torch
|
| 11 |
+
import torch.fx
|
| 12 |
+
from torch.utils._sympy.value_ranges import ValueRanges
|
| 13 |
+
from torch.utils._sympy.numbers import int_oo
|
| 14 |
+
from torch.fx.experimental.symbolic_shapes import free_unbacked_symbols
|
| 15 |
+
from torch.fx.passes.infra.pass_base import PassBase, PassResult
|
| 16 |
+
|
| 17 |
+
__all__ = ["InputDim"]
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class InputDim(NamedTuple):
|
| 21 |
+
input_name: str
|
| 22 |
+
dim: int
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def _convert_to_int(val):
|
| 26 |
+
# Convert simple sympy Integers into concrete int
|
| 27 |
+
if val in (sympy.oo, int_oo):
|
| 28 |
+
return math.inf
|
| 29 |
+
if val in (-sympy.oo, -int_oo):
|
| 30 |
+
return -math.inf
|
| 31 |
+
if isinstance(val, sympy.Integer):
|
| 32 |
+
return int(val)
|
| 33 |
+
raise RuntimeError(
|
| 34 |
+
"Export constraints cannot be non-integer expressions"
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def _convert_range_to_int(range: ValueRanges):
|
| 39 |
+
assert isinstance(range, ValueRanges)
|
| 40 |
+
min_val = _convert_to_int(range.lower)
|
| 41 |
+
max_val = _convert_to_int(range.upper)
|
| 42 |
+
return min_val, max_val
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
class _AddRuntimeAssertionsForInlineConstraintsPass(PassBase):
|
| 46 |
+
def __init__(
|
| 47 |
+
self,
|
| 48 |
+
range_constraints: Dict[sympy.Symbol, ValueRanges],
|
| 49 |
+
):
|
| 50 |
+
super().__init__()
|
| 51 |
+
self.range_constraints: Dict[sympy.Symbol, ValueRanges] = range_constraints
|
| 52 |
+
self._asserts_generated_unbacked_symbols: Set[sympy.Symbol] = set()
|
| 53 |
+
self.counter = 0
|
| 54 |
+
|
| 55 |
+
def _assert_range_constraint(self, node, lower, upper, assert_msg):
|
| 56 |
+
last_node = node
|
| 57 |
+
if lower > -math.inf:
|
| 58 |
+
last_node = self._insert_assert_async(last_node, operator.ge, node, lower, assert_msg)
|
| 59 |
+
|
| 60 |
+
if upper < math.inf:
|
| 61 |
+
last_node = self._insert_assert_async(last_node, operator.le, node, upper, assert_msg)
|
| 62 |
+
|
| 63 |
+
def _insert_assert_async(self, last_node, op, lower, upper, assert_msg):
|
| 64 |
+
"""
|
| 65 |
+
Inserts assert_async call_function nodes in the graph. This function is
|
| 66 |
+
called **during** the interpreter-based pass.
|
| 67 |
+
"""
|
| 68 |
+
self.counter += 1
|
| 69 |
+
graph = last_node.graph
|
| 70 |
+
with graph.inserting_after(last_node):
|
| 71 |
+
cmp = graph.call_function(op, (lower, upper), {})
|
| 72 |
+
with graph.inserting_after(cmp):
|
| 73 |
+
cmp_tensor = graph.call_function(torch.ops.aten.scalar_tensor.default, (cmp,), {})
|
| 74 |
+
with graph.inserting_after(cmp_tensor):
|
| 75 |
+
assert_async = graph.call_function(
|
| 76 |
+
torch.ops.aten._assert_async.msg,
|
| 77 |
+
(cmp_tensor, assert_msg),
|
| 78 |
+
{},
|
| 79 |
+
)
|
| 80 |
+
return assert_async
|
| 81 |
+
|
| 82 |
+
def call(self, graph_module) -> PassResult:
|
| 83 |
+
self.existing_inline_assertions = _get_existing_inline_assertions(
|
| 84 |
+
graph_module, self.range_constraints
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
for module in graph_module.modules():
|
| 88 |
+
if not isinstance(module, torch.fx.GraphModule):
|
| 89 |
+
continue
|
| 90 |
+
for node in module.graph.nodes:
|
| 91 |
+
if node.op != "call_function":
|
| 92 |
+
continue
|
| 93 |
+
if "val" not in node.meta:
|
| 94 |
+
continue
|
| 95 |
+
|
| 96 |
+
val = node.meta["val"]
|
| 97 |
+
# In general, we may have to deal the case such as: ret[1].shape[0].
|
| 98 |
+
# We need first find out what symbols require assertion, then we need to follow the path
|
| 99 |
+
# from ret to the symbol, construct the proxies along the way and construct the messages
|
| 100 |
+
# piece-wise at the same time.
|
| 101 |
+
#
|
| 102 |
+
# We use post-order traversal to collect all the proxies callbacks needed, construct
|
| 103 |
+
# the error message callbacks, and at the top-level traversal tree we execute all the callbacks.
|
| 104 |
+
# We need the callbacks because, in order to call the function to create a proxy for shape[0], we
|
| 105 |
+
# need the proxy for shape, which further requires the proxy for ret[1], etc.
|
| 106 |
+
|
| 107 |
+
def add_assertions(val):
|
| 108 |
+
call_backs: List[Callable] = []
|
| 109 |
+
messages: List[str] = []
|
| 110 |
+
if isinstance(val, (torch.SymInt, torch.SymFloat, torch.SymBool)):
|
| 111 |
+
symbol = val.node.expr
|
| 112 |
+
if symbol in self.existing_inline_assertions:
|
| 113 |
+
return call_backs, messages
|
| 114 |
+
if isinstance(symbol, sympy.Symbol) and free_unbacked_symbols(symbol):
|
| 115 |
+
if symbol in self._asserts_generated_unbacked_symbols:
|
| 116 |
+
return call_backs, messages
|
| 117 |
+
# We only care about unbacked symints for these inline
|
| 118 |
+
# constraints, which are prefixed with 'u'
|
| 119 |
+
constraint = self.range_constraints[symbol]
|
| 120 |
+
min_val, max_val = _convert_range_to_int(constraint)
|
| 121 |
+
assert_msg = f" is outside of inline constraint [{min_val}, {max_val}]."
|
| 122 |
+
call_backs.append(
|
| 123 |
+
partial(self._assert_range_constraint, lower=min_val, upper=max_val)
|
| 124 |
+
)
|
| 125 |
+
messages.append(assert_msg)
|
| 126 |
+
self._asserts_generated_unbacked_symbols.add(symbol)
|
| 127 |
+
|
| 128 |
+
elif isinstance(val, torch.Tensor):
|
| 129 |
+
for i, sym in enumerate(val.shape):
|
| 130 |
+
cbs, msgs = add_assertions(sym)
|
| 131 |
+
for cb, msg in zip(cbs, msgs):
|
| 132 |
+
def sym_size_cb(node, assert_msg, dim):
|
| 133 |
+
with node.graph.inserting_after(node):
|
| 134 |
+
dim_node = module.graph.call_function(
|
| 135 |
+
torch.ops.aten.sym_size.int,
|
| 136 |
+
(node, dim),
|
| 137 |
+
{},
|
| 138 |
+
)
|
| 139 |
+
cb(node=dim_node, assert_msg=assert_msg)
|
| 140 |
+
call_backs.append(partial(sym_size_cb, dim=i))
|
| 141 |
+
messages.append(f".shape[{i}]" + msg)
|
| 142 |
+
return call_backs, messages
|
| 143 |
+
|
| 144 |
+
callbacks, messages = add_assertions(val)
|
| 145 |
+
for cb, msg in zip(callbacks, messages):
|
| 146 |
+
cb(node=node, assert_msg=f"{node}" + msg)
|
| 147 |
+
|
| 148 |
+
module.recompile()
|
| 149 |
+
|
| 150 |
+
# Sometimes this pass would return a wrong graph where we have mismatched
|
| 151 |
+
# node names in signature. Before we fix it, let's just skip it.
|
| 152 |
+
if self.counter == 0 and type(self) is _AddRuntimeAssertionsForInlineConstraintsPass:
|
| 153 |
+
return PassResult(graph_module, False)
|
| 154 |
+
|
| 155 |
+
# Populate the stack trace with dummy vals to respect IR
|
| 156 |
+
for node in graph_module.graph.nodes:
|
| 157 |
+
if not node.meta.get("stack_trace", None) and node.op not in ["placeholder", "output"]:
|
| 158 |
+
node.meta["stack_trace"] = "".join(traceback.format_stack(limit=1))
|
| 159 |
+
return PassResult(graph_module, True)
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
def _get_existing_inline_assertions(
|
| 163 |
+
graph_module: torch.fx.GraphModule,
|
| 164 |
+
range_constraints: Dict[sympy.Symbol, ValueRanges],
|
| 165 |
+
) -> Dict[sympy.Symbol, ValueRanges]:
|
| 166 |
+
existing_inline_assertions: Dict[sympy.Symbol, ValueRanges] = {}
|
| 167 |
+
|
| 168 |
+
for module in graph_module.modules():
|
| 169 |
+
if not isinstance(module, torch.fx.GraphModule):
|
| 170 |
+
continue
|
| 171 |
+
|
| 172 |
+
# Find all the existing inline assertions. They will look something like:
|
| 173 |
+
# %_local_scalar_dense = call_function[target=torch.ops.aten._local_scalar_dense.default](args = (%arg1_1,), kwargs = {})
|
| 174 |
+
# %ge = call_function[target=operator.ge](args = (%_local_scalar_dense, 0), kwargs = {})
|
| 175 |
+
# %_assert_scalar = call_function[target=torch.ops.aten._assert_scalar.default](args = (%scalar_tensor, "..."), kwargs = {})
|
| 176 |
+
for node in module.graph.nodes:
|
| 177 |
+
if node.target != torch.ops.aten._assert_scalar.default:
|
| 178 |
+
continue
|
| 179 |
+
|
| 180 |
+
compare_arg = node.args[0]
|
| 181 |
+
if not (
|
| 182 |
+
isinstance(compare_arg, torch.fx.Node) and
|
| 183 |
+
compare_arg.op == "call_function" and
|
| 184 |
+
compare_arg.target in (operator.le, operator.ge) and
|
| 185 |
+
len(compare_arg.args) == 2
|
| 186 |
+
):
|
| 187 |
+
continue
|
| 188 |
+
|
| 189 |
+
compare_op = compare_arg.target
|
| 190 |
+
lhs, rhs = compare_arg.args
|
| 191 |
+
|
| 192 |
+
def maybe_get_symint(x):
|
| 193 |
+
if (
|
| 194 |
+
isinstance(x, torch.fx.Node) and
|
| 195 |
+
"val" in x.meta and
|
| 196 |
+
isinstance(x.meta["val"], torch.SymInt)
|
| 197 |
+
):
|
| 198 |
+
return x.meta["val"].node.expr
|
| 199 |
+
return x
|
| 200 |
+
|
| 201 |
+
lhs = maybe_get_symint(lhs)
|
| 202 |
+
rhs = maybe_get_symint(rhs)
|
| 203 |
+
|
| 204 |
+
if compare_op == operator.ge:
|
| 205 |
+
lhs, rhs = rhs, lhs
|
| 206 |
+
|
| 207 |
+
if isinstance(lhs, sympy.Symbol) and isinstance(rhs, int):
|
| 208 |
+
symint = lhs
|
| 209 |
+
scalar = rhs
|
| 210 |
+
elif isinstance(rhs, sympy.Symbol) and isinstance(lhs, int):
|
| 211 |
+
symint = rhs
|
| 212 |
+
scalar = lhs
|
| 213 |
+
else:
|
| 214 |
+
continue
|
| 215 |
+
|
| 216 |
+
if symint not in range_constraints:
|
| 217 |
+
raise RuntimeError(f"Unable to find symint {symint} in {range_constraints}")
|
| 218 |
+
|
| 219 |
+
previous_range = existing_inline_assertions.get(symint, ValueRanges(-math.inf, math.inf))
|
| 220 |
+
|
| 221 |
+
if symint is lhs:
|
| 222 |
+
bounds = ValueRanges(-math.inf, scalar)
|
| 223 |
+
else:
|
| 224 |
+
bounds = ValueRanges(scalar, math.inf)
|
| 225 |
+
existing_inline_assertions[symint] = previous_range & bounds
|
| 226 |
+
|
| 227 |
+
return existing_inline_assertions
|
janus/lib/python3.10/site-packages/torch/_export/passes/collect_tracepoints_pass.py
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import operator
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
from torch.export.exported_program import ConstantArgument, TensorArgument
|
| 6 |
+
from torch.fx.passes.infra.pass_base import PassBase, PassResult
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
__all__ = ["CollectTracepointsPass"]
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class CollectTracepointsPass(PassBase):
|
| 13 |
+
"""
|
| 14 |
+
Performs constant folding and constant propagation.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
def __init__(self, specs, sig) -> None:
|
| 18 |
+
super().__init__()
|
| 19 |
+
self.specs = specs
|
| 20 |
+
self.sig = sig
|
| 21 |
+
|
| 22 |
+
def call(self, gm):
|
| 23 |
+
def get_arg_spec(arg):
|
| 24 |
+
if isinstance(arg, torch.fx.Node):
|
| 25 |
+
if isinstance(arg.meta.get("val"), torch.Tensor):
|
| 26 |
+
return TensorArgument(name=arg.name)
|
| 27 |
+
else:
|
| 28 |
+
raise AssertionError(
|
| 29 |
+
"Symint input is not implemented yet for submodule call signature."
|
| 30 |
+
)
|
| 31 |
+
else:
|
| 32 |
+
return ConstantArgument(name="", value=arg)
|
| 33 |
+
|
| 34 |
+
for module in gm.modules():
|
| 35 |
+
if not isinstance(module, torch.fx.GraphModule):
|
| 36 |
+
continue
|
| 37 |
+
nn_module_stack = None
|
| 38 |
+
for node in module.graph.nodes:
|
| 39 |
+
if node.op != "call_function":
|
| 40 |
+
continue
|
| 41 |
+
if node.target == torch.ops.higher_order._export_tracepoint:
|
| 42 |
+
kind = node.kwargs["kind"]
|
| 43 |
+
if kind == "module_call_outputs":
|
| 44 |
+
nn_module_stack = node.meta["nn_module_stack"]
|
| 45 |
+
elif kind == "module_call_inputs":
|
| 46 |
+
nn_module_stack = None
|
| 47 |
+
else:
|
| 48 |
+
raise AssertionError(f"Unknown tracepoint kind: {kind}")
|
| 49 |
+
elif node.meta["nn_module_stack"] == nn_module_stack:
|
| 50 |
+
node.meta["nn_module_stack"].popitem()
|
| 51 |
+
else:
|
| 52 |
+
nn_module_stack = None
|
| 53 |
+
nn_module_stack = None
|
| 54 |
+
for node in reversed(module.graph.nodes):
|
| 55 |
+
if node.op != "call_function":
|
| 56 |
+
continue
|
| 57 |
+
if node.target == torch.ops.higher_order._export_tracepoint:
|
| 58 |
+
kind = node.kwargs["kind"]
|
| 59 |
+
if kind == "module_call_inputs":
|
| 60 |
+
nn_module_stack = node.meta["nn_module_stack"]
|
| 61 |
+
elif kind == "module_call_outputs":
|
| 62 |
+
nn_module_stack = None
|
| 63 |
+
else:
|
| 64 |
+
raise AssertionError(f"Unknown tracepoint kind: {kind}")
|
| 65 |
+
elif node.meta["nn_module_stack"] == nn_module_stack:
|
| 66 |
+
node.meta["nn_module_stack"].popitem()
|
| 67 |
+
else:
|
| 68 |
+
nn_module_stack = None
|
| 69 |
+
for module in gm.modules():
|
| 70 |
+
if not isinstance(module, torch.fx.GraphModule):
|
| 71 |
+
continue
|
| 72 |
+
for node in module.graph.nodes:
|
| 73 |
+
if node.op != "call_function":
|
| 74 |
+
continue
|
| 75 |
+
if node.target == torch.ops.higher_order._export_tracepoint:
|
| 76 |
+
for i, arg in enumerate(node.args):
|
| 77 |
+
kind = node.kwargs["kind"]
|
| 78 |
+
if kind == "module_call_inputs":
|
| 79 |
+
self.specs[node.kwargs["path"]].inputs.append(
|
| 80 |
+
get_arg_spec(arg)
|
| 81 |
+
)
|
| 82 |
+
elif kind == "module_call_outputs":
|
| 83 |
+
self.specs[node.kwargs["path"]].outputs.append(
|
| 84 |
+
get_arg_spec(arg)
|
| 85 |
+
)
|
| 86 |
+
else:
|
| 87 |
+
raise AssertionError(f"Unknown tracepoint kind: {kind}")
|
| 88 |
+
if isinstance(arg, torch.fx.Node):
|
| 89 |
+
for user in node.users:
|
| 90 |
+
assert user.op == "call_function"
|
| 91 |
+
assert user.target == operator.getitem
|
| 92 |
+
assert isinstance(user.args[1], int)
|
| 93 |
+
if user.args[1] == i:
|
| 94 |
+
user.replace_all_uses_with(arg)
|
| 95 |
+
self.sig.replace_all_uses(user.name, arg.name)
|
| 96 |
+
break
|
| 97 |
+
users = list(node.users)
|
| 98 |
+
for user in users:
|
| 99 |
+
assert len(user.users) == 0
|
| 100 |
+
gm.graph.erase_node(user)
|
| 101 |
+
gm.graph.erase_node(node)
|
| 102 |
+
return PassResult(gm, True)
|
janus/lib/python3.10/site-packages/torch/_export/passes/constant_folding.py
ADDED
|
@@ -0,0 +1,299 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import collections
|
| 3 |
+
from collections import defaultdict
|
| 4 |
+
from typing import Any, Callable, Dict, Optional
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
import torch.utils._pytree as pytree
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
aten = torch.ops.aten
|
| 11 |
+
|
| 12 |
+
# We would like to split modules into two subgraphs for runtime weight updates to work correctly.
|
| 13 |
+
# The use case and more information could be found at:
|
| 14 |
+
# https://docs.google.com/document/d/1inZC-8KarJ6gKB7G9egmYLx1V_dKX_apxon0w4zPC0Q/edit?usp=sharing
|
| 15 |
+
META_TAG = "MODULE_TYPE"
|
| 16 |
+
MODULE_TAG = "_MAIN_MODULE"
|
| 17 |
+
CONST_MODULE_TAG = "_CONST_MODULE"
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def replace_node_with_constant(gm, node, constant, name=None):
|
| 21 |
+
g = gm.graph
|
| 22 |
+
|
| 23 |
+
if name:
|
| 24 |
+
qualname = name
|
| 25 |
+
else:
|
| 26 |
+
if not hasattr(gm, "_frozen_param_count"):
|
| 27 |
+
gm._frozen_param_count = 0
|
| 28 |
+
i = gm._frozen_param_count
|
| 29 |
+
|
| 30 |
+
while True:
|
| 31 |
+
qualname = f"_frozen_param{i}"
|
| 32 |
+
if not hasattr(gm, qualname):
|
| 33 |
+
break
|
| 34 |
+
i += 1
|
| 35 |
+
|
| 36 |
+
gm._frozen_param_count = i + 1
|
| 37 |
+
|
| 38 |
+
with g.inserting_before(node):
|
| 39 |
+
new_input_node = g.create_node("get_attr", qualname, (), {})
|
| 40 |
+
node.replace_all_uses_with(new_input_node)
|
| 41 |
+
new_input_node.meta.update(node.meta)
|
| 42 |
+
g.erase_node(node)
|
| 43 |
+
|
| 44 |
+
# needed to suppress `does not reference an nn.Module, nn.Parameter, or buffer` warning
|
| 45 |
+
gm.register_buffer(qualname, constant)
|
| 46 |
+
setattr(gm, qualname, constant)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
class ConstantFolder(torch.fx.Interpreter):
|
| 50 |
+
def __init__(
|
| 51 |
+
self,
|
| 52 |
+
gm,
|
| 53 |
+
skip_constructors=False,
|
| 54 |
+
):
|
| 55 |
+
super().__init__(gm)
|
| 56 |
+
self.node_replacements: Dict[torch.fx.Node, Any] = {}
|
| 57 |
+
self.replaced_uses: Dict[torch.fx.Node, int] = collections.Counter()
|
| 58 |
+
self.unknown_value = object()
|
| 59 |
+
self.skip_constructors: bool = skip_constructors
|
| 60 |
+
|
| 61 |
+
# overwrite this to deallocate env values if their only remaining use
|
| 62 |
+
# is the output
|
| 63 |
+
self.user_to_last_uses = self.node_to_last_non_output_use()
|
| 64 |
+
|
| 65 |
+
def is_impure(self, node: torch.fx.node.Node):
|
| 66 |
+
if (
|
| 67 |
+
node.target == torch.ops.prims.convert_element_type.default
|
| 68 |
+
and node.args[0].op == "get_attr" # type: ignore[union-attr]
|
| 69 |
+
and node.args[0].meta["val"].dtype == torch.int8 # type: ignore[union-attr]
|
| 70 |
+
and node.args[1] == torch.bfloat16
|
| 71 |
+
):
|
| 72 |
+
# For int8_weight -> dq -> bf16_weight
|
| 73 |
+
return True
|
| 74 |
+
if node.target in [
|
| 75 |
+
torch.ops.quantized_decomposed.dequantize_per_channel.default,
|
| 76 |
+
torch.ops.quantized_decomposed.dequantize_per_tensor.default,
|
| 77 |
+
torch.ops.quantized_decomposed.dequantize_per_tensor.tensor,
|
| 78 |
+
]:
|
| 79 |
+
# For the pattern fp32_weight -> q -> dq
|
| 80 |
+
# We only folding fp32_weight -> q
|
| 81 |
+
# int8_weight and leave dq in graph to be fused
|
| 82 |
+
return True
|
| 83 |
+
return False
|
| 84 |
+
|
| 85 |
+
def node_to_last_non_output_use(self):
|
| 86 |
+
last_non_output_use = collections.defaultdict(list)
|
| 87 |
+
seen_uses = set()
|
| 88 |
+
output_node = next(iter(reversed(self.module.graph.nodes)))
|
| 89 |
+
|
| 90 |
+
for node in reversed(self.module.graph.nodes):
|
| 91 |
+
if node.target == "output":
|
| 92 |
+
continue
|
| 93 |
+
|
| 94 |
+
def add_use(inp):
|
| 95 |
+
if inp in seen_uses:
|
| 96 |
+
return
|
| 97 |
+
|
| 98 |
+
seen_uses.add(inp)
|
| 99 |
+
last_non_output_use[node].append(inp)
|
| 100 |
+
|
| 101 |
+
# In-place is fine since we don't mutate
|
| 102 |
+
pytree.tree_map_only_(torch.fx.Node, add_use, (node.args, node.kwargs))
|
| 103 |
+
|
| 104 |
+
# if this node is only used in output, we want to gc it right away
|
| 105 |
+
if len(node.users) == 1 and output_node in node.users:
|
| 106 |
+
last_non_output_use[node].append(node)
|
| 107 |
+
|
| 108 |
+
return last_non_output_use
|
| 109 |
+
|
| 110 |
+
def run_node(self, node):
|
| 111 |
+
if node.target == "output":
|
| 112 |
+
# because we remove nodes from env on last non output use,
|
| 113 |
+
# re-define them now or we'll get error in interpreter
|
| 114 |
+
def set_env(arg):
|
| 115 |
+
self.env[arg] = self.unknown_value
|
| 116 |
+
|
| 117 |
+
# In-place is fine since we don't mutate
|
| 118 |
+
pytree.tree_map_only_(torch.fx.Node, set_env, node.args)
|
| 119 |
+
return super().run_node(node)
|
| 120 |
+
|
| 121 |
+
args, kwargs = self.fetch_args_kwargs_from_env(node)
|
| 122 |
+
flattened_inputs = pytree.arg_tree_leaves(*args, **kwargs)
|
| 123 |
+
|
| 124 |
+
# We need to do this weird thing because in cases where flattened_inputs
|
| 125 |
+
# contains a ScriptObject, equality checking results in a type error if
|
| 126 |
+
# the types are different.
|
| 127 |
+
if any(
|
| 128 |
+
type(self.unknown_value) == type(input_) and self.unknown_value == input_
|
| 129 |
+
for input_ in flattened_inputs
|
| 130 |
+
):
|
| 131 |
+
return self.unknown_value
|
| 132 |
+
|
| 133 |
+
# TODO - fix errors with this
|
| 134 |
+
if (
|
| 135 |
+
node.op == "call_function"
|
| 136 |
+
and node.target == aten._efficientzerotensor.default
|
| 137 |
+
):
|
| 138 |
+
return self.unknown_value
|
| 139 |
+
|
| 140 |
+
# TODO - constant folding triton kernel returns the inputs -- fix this
|
| 141 |
+
if (
|
| 142 |
+
node.op == "call_function"
|
| 143 |
+
and node.name == "triton_kernel_wrapper_functional_proxy"
|
| 144 |
+
):
|
| 145 |
+
return self.unknown_value
|
| 146 |
+
|
| 147 |
+
# skip constructors, since inductor generates optimal code for them already
|
| 148 |
+
# and turning into tensor would result in an additional global memory read
|
| 149 |
+
# TODO - more complicated strategy
|
| 150 |
+
if (
|
| 151 |
+
self.skip_constructors
|
| 152 |
+
and node.op != "get_attr"
|
| 153 |
+
and not any(isinstance(e, torch.Tensor) for e in flattened_inputs)
|
| 154 |
+
):
|
| 155 |
+
return self.unknown_value
|
| 156 |
+
|
| 157 |
+
# All mutations should either be removed or on inputs which we did not make constant
|
| 158 |
+
if (
|
| 159 |
+
isinstance(node.target, torch._ops.OpOverload)
|
| 160 |
+
and torch.Tag.nondeterministic_seeded in node.target.tags
|
| 161 |
+
):
|
| 162 |
+
return self.unknown_value
|
| 163 |
+
|
| 164 |
+
out = super().run_node(node)
|
| 165 |
+
|
| 166 |
+
if node.op != "get_attr" and isinstance(out, torch.Tensor):
|
| 167 |
+
if out.device.type == "meta":
|
| 168 |
+
return out
|
| 169 |
+
|
| 170 |
+
if not self.insertable_tensor_check(out):
|
| 171 |
+
return out
|
| 172 |
+
|
| 173 |
+
if self.is_impure(node):
|
| 174 |
+
return self.unknown_value
|
| 175 |
+
|
| 176 |
+
self.add_node_replacement(node, out)
|
| 177 |
+
|
| 178 |
+
flattened_node_inps = pytree.arg_tree_leaves(*node.args, **node.kwargs)
|
| 179 |
+
|
| 180 |
+
for n in flattened_node_inps:
|
| 181 |
+
if not isinstance(n, torch.fx.Node):
|
| 182 |
+
continue
|
| 183 |
+
|
| 184 |
+
self.replaced_uses[n] += 1
|
| 185 |
+
|
| 186 |
+
for to_delete in self.user_to_last_uses.get(node, []):
|
| 187 |
+
if self.replaced_uses[to_delete] == len(to_delete.users):
|
| 188 |
+
self.node_replacements.pop(to_delete, None)
|
| 189 |
+
|
| 190 |
+
return out
|
| 191 |
+
|
| 192 |
+
def insertable_tensor_check(self, tensor: torch.Tensor) -> bool:
|
| 193 |
+
return True
|
| 194 |
+
|
| 195 |
+
def add_node_replacement(self, node: torch.fx.Node, tensor: torch.Tensor) -> None:
|
| 196 |
+
self.node_replacements[node] = tensor
|
| 197 |
+
|
| 198 |
+
def run(self):
|
| 199 |
+
env = {}
|
| 200 |
+
for n in self.module.graph.find_nodes(op="placeholder"):
|
| 201 |
+
env[n] = self.unknown_value
|
| 202 |
+
return super().run(initial_env=env)
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
def constant_fold(gm, constraint_fn: Optional[Callable[[torch.fx.Node], bool]] = None):
|
| 206 |
+
with torch.utils._python_dispatch._disable_current_modes():
|
| 207 |
+
cf = ConstantFolder(gm, skip_constructors=True)
|
| 208 |
+
cf.run()
|
| 209 |
+
|
| 210 |
+
for node, constant in cf.node_replacements.items():
|
| 211 |
+
if constraint_fn is not None and not constraint_fn(node):
|
| 212 |
+
continue
|
| 213 |
+
replace_node_with_constant(gm, node, constant)
|
| 214 |
+
|
| 215 |
+
erased_params = []
|
| 216 |
+
# Get all attr users by looking up the graph instead from node.users, because in this case
|
| 217 |
+
# _tensor_constant0 and _tensor_constant0_1 are actually refereing to the same tensor.
|
| 218 |
+
|
| 219 |
+
# opcode name target args kwargs
|
| 220 |
+
# ------------- ------------------- ---------------- --------------------------- --------
|
| 221 |
+
# placeholder arg0_1 arg0 () {}
|
| 222 |
+
# get_attr _tensor_constant0 state () {}
|
| 223 |
+
# call_function add aten.add.Tensor (arg0_1, _tensor_constant0) {}
|
| 224 |
+
# get_attr _tensor_constant0_1 state () {}
|
| 225 |
+
# call_function add_ aten.add_.Tensor (_tensor_constant0_1, 1) {}
|
| 226 |
+
# output output output ([add],) {}
|
| 227 |
+
|
| 228 |
+
get_attr_node_users = defaultdict(list)
|
| 229 |
+
for node in gm.graph.nodes:
|
| 230 |
+
if node.op == "get_attr":
|
| 231 |
+
get_attr_node_users[node.target].extend(node.users.keys())
|
| 232 |
+
for node in gm.graph.find_nodes(op="get_attr"):
|
| 233 |
+
if node.op == "get_attr" and len(get_attr_node_users[node.target]) == 0:
|
| 234 |
+
if hasattr(gm, node.target):
|
| 235 |
+
delattr(gm, node.target)
|
| 236 |
+
erased_params.append(node)
|
| 237 |
+
for node in erased_params:
|
| 238 |
+
gm.graph.erase_node(node)
|
| 239 |
+
|
| 240 |
+
gm.graph.eliminate_dead_code()
|
| 241 |
+
gm.graph.lint()
|
| 242 |
+
gm.recompile()
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
def constant_graph_tag(gm: torch.fx.GraphModule):
|
| 246 |
+
with torch.utils._python_dispatch._disable_current_modes():
|
| 247 |
+
cf = ConstantFolder(gm, skip_constructors=True)
|
| 248 |
+
cf.run()
|
| 249 |
+
|
| 250 |
+
for node in gm.graph.nodes:
|
| 251 |
+
if (
|
| 252 |
+
node.op == "get_attr"
|
| 253 |
+
or node in cf.node_replacements
|
| 254 |
+
or node in cf.replaced_uses
|
| 255 |
+
):
|
| 256 |
+
node.meta[META_TAG] = CONST_MODULE_TAG
|
| 257 |
+
else:
|
| 258 |
+
node.meta[META_TAG] = MODULE_TAG
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
def run_and_get_constant_graph(gm: torch.fx.GraphModule) -> torch.fx.GraphModule:
|
| 262 |
+
"""
|
| 263 |
+
Construct a GraphModule which corresponds to the part which could be
|
| 264 |
+
constant folded in provided gm.
|
| 265 |
+
"""
|
| 266 |
+
|
| 267 |
+
constant_graph_tag(gm)
|
| 268 |
+
# We rewrite the tags, if it's a constant being directly consumed, without
|
| 269 |
+
# any folding opportunity, we keep it in main gm.
|
| 270 |
+
for node in gm.graph.find_nodes(op="get_attr"):
|
| 271 |
+
used_to_fold = False
|
| 272 |
+
for u in node.users:
|
| 273 |
+
if u.meta[META_TAG] == CONST_MODULE_TAG:
|
| 274 |
+
used_to_fold = True
|
| 275 |
+
break
|
| 276 |
+
if not used_to_fold:
|
| 277 |
+
node.meta[META_TAG] = MODULE_TAG
|
| 278 |
+
|
| 279 |
+
new_graph = torch.fx.Graph()
|
| 280 |
+
|
| 281 |
+
node_remapping: Dict[torch.fx.Node, torch.fx.Node] = {}
|
| 282 |
+
output_nodes = []
|
| 283 |
+
for node in gm.graph.nodes:
|
| 284 |
+
if node.meta[META_TAG] == MODULE_TAG:
|
| 285 |
+
continue
|
| 286 |
+
|
| 287 |
+
new_node = new_graph.node_copy(node, lambda x: node_remapping[x])
|
| 288 |
+
node_remapping[node] = new_node
|
| 289 |
+
|
| 290 |
+
for user in node.users:
|
| 291 |
+
if user.meta[META_TAG] == MODULE_TAG:
|
| 292 |
+
output_nodes.append(new_node)
|
| 293 |
+
break
|
| 294 |
+
|
| 295 |
+
new_graph.output(tuple(output_nodes))
|
| 296 |
+
new_graph.lint()
|
| 297 |
+
new_gm = torch.fx.GraphModule(gm, new_graph)
|
| 298 |
+
|
| 299 |
+
return new_gm
|
janus/lib/python3.10/site-packages/torch/_export/passes/functionalize_side_effectful_ops_pass.py
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
from typing import Dict, Optional, Tuple, List
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
from torch._export.pass_base import _ExportPassBaseDeprecatedDoNotUse, PassResult, Argument
|
| 6 |
+
from torch._export.pass_infra.node_metadata import NodeMetadata
|
| 7 |
+
from torch._export.pass_infra.proxy_value import ProxyValue
|
| 8 |
+
from torch._ops import OpOverload
|
| 9 |
+
|
| 10 |
+
aten = torch.ops.aten
|
| 11 |
+
|
| 12 |
+
_NON_FUNCTIONAL_TO_FUNCTIONAL_SIDE_EFFECTFUL_FUNCS: Dict[OpOverload, OpOverload] = {
|
| 13 |
+
aten.sym_constrain_range.default: aten._functional_sym_constrain_range,
|
| 14 |
+
aten._assert_async.msg: aten._functional_assert_async.msg,
|
| 15 |
+
}
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class _FunctionalizeSideEffectfulOpsPass(_ExportPassBaseDeprecatedDoNotUse):
|
| 19 |
+
"""
|
| 20 |
+
Functionalize ops with side effect in graph module by replacing the op with
|
| 21 |
+
functional version of it. A new dependency token (`dep_token`) will be
|
| 22 |
+
created and propagated through functional ops to output.
|
| 23 |
+
For example:
|
| 24 |
+
```
|
| 25 |
+
def f(x):
|
| 26 |
+
sym_constrain_range(x.shape[0], min=1, max=3)
|
| 27 |
+
return x.add(3)
|
| 28 |
+
```
|
| 29 |
+
Will be transformed to:
|
| 30 |
+
```
|
| 31 |
+
def f(x):
|
| 32 |
+
dep_token0 = _make_dep_token()
|
| 33 |
+
dep_token1 = _functional_sym_constrain_range(
|
| 34 |
+
x.shape[0], min=1, max=3, dep_token=dep_token0
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
return x.add(3), dep_token1
|
| 38 |
+
```
|
| 39 |
+
"""
|
| 40 |
+
|
| 41 |
+
def __init__(self) -> None:
|
| 42 |
+
super().__init__()
|
| 43 |
+
self._dep_token: Optional[ProxyValue] = None
|
| 44 |
+
self._next_dep_token_index: Optional[int] = None
|
| 45 |
+
|
| 46 |
+
def call(self, graph_module: torch.fx.GraphModule) -> PassResult:
|
| 47 |
+
# Early return if no non-functional assertions.
|
| 48 |
+
if not any(
|
| 49 |
+
n.target in _NON_FUNCTIONAL_TO_FUNCTIONAL_SIDE_EFFECTFUL_FUNCS
|
| 50 |
+
for n in graph_module.graph.nodes
|
| 51 |
+
):
|
| 52 |
+
return PassResult(graph_module=graph_module, modified=False)
|
| 53 |
+
|
| 54 |
+
gm = copy.deepcopy(graph_module)
|
| 55 |
+
self._dep_token = None
|
| 56 |
+
self._next_dep_token_index = None
|
| 57 |
+
return super().call(gm)
|
| 58 |
+
|
| 59 |
+
def call_operator(
|
| 60 |
+
self,
|
| 61 |
+
op: OpOverload,
|
| 62 |
+
args: Tuple[Argument, ...],
|
| 63 |
+
kwargs: Dict[str, Argument],
|
| 64 |
+
meta: NodeMetadata,
|
| 65 |
+
) -> ProxyValue:
|
| 66 |
+
if op not in _NON_FUNCTIONAL_TO_FUNCTIONAL_SIDE_EFFECTFUL_FUNCS:
|
| 67 |
+
return super().call_operator(op, args, kwargs, meta)
|
| 68 |
+
|
| 69 |
+
if self._dep_token is None:
|
| 70 |
+
self._dep_token = super().call_operator(
|
| 71 |
+
aten._make_dep_token,
|
| 72 |
+
args=(),
|
| 73 |
+
kwargs={},
|
| 74 |
+
meta=self._create_dummy_node_metadata(),
|
| 75 |
+
)
|
| 76 |
+
self._dep_token.node.name = "dep_token0"
|
| 77 |
+
self._next_dep_token_index = 1
|
| 78 |
+
|
| 79 |
+
self._dep_token = super().call_operator(
|
| 80 |
+
_NON_FUNCTIONAL_TO_FUNCTIONAL_SIDE_EFFECTFUL_FUNCS[op],
|
| 81 |
+
args=args,
|
| 82 |
+
kwargs={**kwargs, "dep_token": self._dep_token},
|
| 83 |
+
meta=meta,
|
| 84 |
+
)
|
| 85 |
+
assert self._next_dep_token_index is not None
|
| 86 |
+
self._dep_token.node.name = f"dep_token{self._next_dep_token_index}"
|
| 87 |
+
self._next_dep_token_index += 1
|
| 88 |
+
|
| 89 |
+
return self._dep_token
|
| 90 |
+
|
| 91 |
+
def output(self, results: List[Argument], meta: NodeMetadata) -> ProxyValue:
|
| 92 |
+
assert self._dep_token is not None
|
| 93 |
+
|
| 94 |
+
return super().output(results=(*results, self._dep_token), meta=meta) # type: ignore[arg-type]
|
janus/lib/python3.10/site-packages/torch/_export/passes/lift_constants_pass.py
ADDED
|
@@ -0,0 +1,318 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import collections
|
| 3 |
+
import warnings
|
| 4 |
+
from typing import Any, Dict, List, Union
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
from torch._export.verifier import SpecViolationError
|
| 8 |
+
from torch._guards import detect_fake_mode
|
| 9 |
+
from torch._library.fake_class_registry import FakeScriptObject
|
| 10 |
+
from torch._subclasses.fake_tensor import unset_fake_temporarily
|
| 11 |
+
from torch.export.exported_program import (
|
| 12 |
+
ArgumentSpec,
|
| 13 |
+
CustomObjArgument,
|
| 14 |
+
ExportGraphSignature,
|
| 15 |
+
InputKind,
|
| 16 |
+
InputSpec,
|
| 17 |
+
TensorArgument,
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class ConstantAttrMap(collections.abc.MutableMapping):
|
| 22 |
+
"""A mapping class that understands how to use module constants (tensors,
|
| 23 |
+
ScriptObjects, FakeScriptObjects) as keys. We store tensors and FakeScriptObjects normally,
|
| 24 |
+
but ScriptObjects are stored by hash, because different torch.ScriptObjects can point to
|
| 25 |
+
the same underlying value (but we guarantee that they will `hash()` to the same value
|
| 26 |
+
if that's the case).
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
def __init__(self) -> None:
|
| 30 |
+
# Underlying dict that we use to implement this mapping.
|
| 31 |
+
self._constant_attrs: Dict[
|
| 32 |
+
Union[int, torch.Tensor, FakeScriptObject], List[Any]
|
| 33 |
+
] = {}
|
| 34 |
+
# Map from the hash(ScriptObject) to the ScriptObject itself. Used for
|
| 35 |
+
# APIs like `__iter__` that should look like they're returning the
|
| 36 |
+
# original ScriptObjects.
|
| 37 |
+
self._script_object_map: Dict[int, torch.ScriptObject] = {}
|
| 38 |
+
|
| 39 |
+
def __getitem__(
|
| 40 |
+
self, key: Union[torch.Tensor, torch.ScriptObject, FakeScriptObject]
|
| 41 |
+
) -> Any:
|
| 42 |
+
real_key = hash(key) if isinstance(key, torch.ScriptObject) else key
|
| 43 |
+
assert isinstance(real_key, (int, torch.Tensor, FakeScriptObject))
|
| 44 |
+
return self._constant_attrs[real_key]
|
| 45 |
+
|
| 46 |
+
def __setitem__(self, key: Union[torch.Tensor, torch.ScriptObject], value):
|
| 47 |
+
# we shouldn't actually call this, should go to add() instead to handle aliasing
|
| 48 |
+
raise NotImplementedError(
|
| 49 |
+
"""Directly setting values for ConstantAttrMap is not supported, please use add(key, value) instead.
|
| 50 |
+
The same key can be mapped to multiple values, for handling constant aliasing."""
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
def add(
|
| 54 |
+
self, key: Union[torch.Tensor, torch.ScriptObject, FakeScriptObject], value: Any
|
| 55 |
+
) -> None:
|
| 56 |
+
if isinstance(key, torch.ScriptObject):
|
| 57 |
+
if hash(key) not in self._constant_attrs:
|
| 58 |
+
self._constant_attrs[hash(key)] = []
|
| 59 |
+
self._constant_attrs[hash(key)].append(value)
|
| 60 |
+
self._script_object_map[hash(key)] = key
|
| 61 |
+
elif isinstance(key, (torch.Tensor, FakeScriptObject)):
|
| 62 |
+
if key not in self._constant_attrs:
|
| 63 |
+
self._constant_attrs[key] = []
|
| 64 |
+
self._constant_attrs[key].append(value)
|
| 65 |
+
else:
|
| 66 |
+
raise TypeError(
|
| 67 |
+
f"Expected key to be a tensor or ScriptObject, got {type(key)}"
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
def __delitem__(self, key):
|
| 71 |
+
real_key = hash(key) if isinstance(key, torch.ScriptObject) else key
|
| 72 |
+
|
| 73 |
+
del self._constant_attrs[real_key]
|
| 74 |
+
|
| 75 |
+
def __iter__(self):
|
| 76 |
+
for key in self._constant_attrs:
|
| 77 |
+
if isinstance(key, int):
|
| 78 |
+
yield self._script_object_map[key]
|
| 79 |
+
else:
|
| 80 |
+
yield key
|
| 81 |
+
|
| 82 |
+
def __len__(self):
|
| 83 |
+
return len(self._constant_attrs)
|
| 84 |
+
|
| 85 |
+
def __contains__(self, key: object) -> bool:
|
| 86 |
+
real_key = hash(key) if isinstance(key, torch.ScriptObject) else key
|
| 87 |
+
return real_key in self._constant_attrs
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def get_constant_fqn(node: torch.fx.Node, constant_name: str) -> str:
|
| 91 |
+
# The FQN of the constant tensor in the state dict should
|
| 92 |
+
# correspond to the module where the constant tensor was
|
| 93 |
+
# originally used.
|
| 94 |
+
if len(node.meta["nn_module_stack"]) == 0:
|
| 95 |
+
return constant_name
|
| 96 |
+
parent_fqn = list(node.meta["nn_module_stack"].values())[-1][0]
|
| 97 |
+
if len(parent_fqn) > 0:
|
| 98 |
+
return f"{parent_fqn}.{constant_name}"
|
| 99 |
+
else:
|
| 100 |
+
return constant_name
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def _get_first_fqn(
|
| 104 |
+
const_attrs: ConstantAttrMap,
|
| 105 |
+
key: Union[torch.Tensor, torch.ScriptObject, FakeScriptObject],
|
| 106 |
+
) -> Any:
|
| 107 |
+
fqns = const_attrs.get(key)
|
| 108 |
+
return fqns[0] if fqns else None
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def lift_constants_pass(
|
| 112 |
+
gm: torch.fx.GraphModule,
|
| 113 |
+
graph_signature: ExportGraphSignature,
|
| 114 |
+
constant_attrs: ConstantAttrMap,
|
| 115 |
+
) -> Dict[str, Union[torch.Tensor, torch.ScriptObject, FakeScriptObject]]:
|
| 116 |
+
"""
|
| 117 |
+
Takes a graph module, graph signature, and modifies them implace to lift any
|
| 118 |
+
constants (tensors or custom classes) as inputs to the graph. Returns a
|
| 119 |
+
dictionary of names to constants.
|
| 120 |
+
|
| 121 |
+
Arguments:
|
| 122 |
+
gm (torch.fx.GraphModule): The graph module containing the graph and constants to lift.
|
| 123 |
+
graph_signature (ExportGraphSignature): This graph signature will be
|
| 124 |
+
mutated to add additional CONSTANT_TENSOR and CUSTOM_OBJ inputs.
|
| 125 |
+
constant_attrs (ConstantAttr): A mapping from a constant value to its
|
| 126 |
+
fully-qualified path in `gm`. This is used to maintain consistent
|
| 127 |
+
location of constants between the original module and the exported
|
| 128 |
+
version.
|
| 129 |
+
|
| 130 |
+
Returns:
|
| 131 |
+
A dictionary of fqn => constant value.
|
| 132 |
+
"""
|
| 133 |
+
all_constants: Dict[
|
| 134 |
+
str, Union[torch.Tensor, torch.ScriptObject, FakeScriptObject]
|
| 135 |
+
] = {}
|
| 136 |
+
|
| 137 |
+
inputs = graph_signature.input_specs
|
| 138 |
+
num_custom_obj = sum(
|
| 139 |
+
input_specs.kind == InputKind.CUSTOM_OBJ for input_specs in inputs
|
| 140 |
+
)
|
| 141 |
+
num_tensor_constants = sum(
|
| 142 |
+
input_specs.kind == InputKind.CONSTANT_TENSOR for input_specs in inputs
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
+
fake_mode = detect_fake_mode(
|
| 146 |
+
tuple(node.meta["val"] for node in gm.graph.nodes if node.op == "placeholder")
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
first_user_input_loc, first_user_input = 0, None
|
| 150 |
+
for node in gm.graph.nodes:
|
| 151 |
+
if node.op == "placeholder" and node.name in graph_signature.user_inputs:
|
| 152 |
+
first_user_input = node
|
| 153 |
+
break
|
| 154 |
+
first_user_input_loc += 1
|
| 155 |
+
|
| 156 |
+
lifted_objs = ConstantAttrMap()
|
| 157 |
+
for node in gm.graph.nodes:
|
| 158 |
+
if node.op == "get_attr":
|
| 159 |
+
constant_val = getattr(gm, node.target)
|
| 160 |
+
if constant_val in lifted_objs:
|
| 161 |
+
# We already lifted this constant elsewhere. Just rewrite uses
|
| 162 |
+
# of this get_attr to point to the already-existing placeholder
|
| 163 |
+
# node.
|
| 164 |
+
const_placeholder_node = _get_first_fqn(lifted_objs, constant_val)
|
| 165 |
+
node.replace_all_uses_with(const_placeholder_node)
|
| 166 |
+
gm.graph.erase_node(node)
|
| 167 |
+
continue
|
| 168 |
+
|
| 169 |
+
# For ScriptObject, Tensor and FakeScriptObject constants:
|
| 170 |
+
# First check if the constant was an attribute on some module by
|
| 171 |
+
# consulting `constant_attrs` map. If it is, use the fqn that keeps
|
| 172 |
+
# its location consistent with the eager module.
|
| 173 |
+
#
|
| 174 |
+
# If it's not in the `constant_attrs` map, that means it's an inline
|
| 175 |
+
# constant (e.g. x + torch.tensor(0)), and thus did not have a
|
| 176 |
+
# specific location in the eager module. In that case, just generate
|
| 177 |
+
# some name and attach it to the module in which it was used.
|
| 178 |
+
if isinstance(constant_val, (torch.ScriptObject, FakeScriptObject)):
|
| 179 |
+
constant_kind = InputKind.CUSTOM_OBJ
|
| 180 |
+
constant_fqn = _get_first_fqn(constant_attrs, constant_val)
|
| 181 |
+
if constant_fqn is not None:
|
| 182 |
+
constant_name = constant_fqn.replace(".", "_")
|
| 183 |
+
else:
|
| 184 |
+
constant_name = f"lifted_custom_{num_custom_obj}"
|
| 185 |
+
constant_fqn = get_constant_fqn(node, constant_name)
|
| 186 |
+
num_custom_obj += 1
|
| 187 |
+
elif isinstance(constant_val, torch.Tensor):
|
| 188 |
+
# Remove the parameterness of constant_val
|
| 189 |
+
if isinstance(constant_val, torch.nn.Parameter):
|
| 190 |
+
warnings.warn(
|
| 191 |
+
f"{node.target} created when tracing {node.meta['stack_trace']} is a parameter. But"
|
| 192 |
+
f"it's not registered with register_parameter(). export will treat it as a constant tensor"
|
| 193 |
+
)
|
| 194 |
+
# We get the real data out of the parameter by disabling the surrounding fake mode.
|
| 195 |
+
with unset_fake_temporarily():
|
| 196 |
+
constant_val = constant_val.data
|
| 197 |
+
constant_kind = InputKind.CONSTANT_TENSOR
|
| 198 |
+
constant_fqn = _get_first_fqn(constant_attrs, constant_val)
|
| 199 |
+
if constant_fqn is not None:
|
| 200 |
+
constant_name = constant_fqn.replace(".", "_")
|
| 201 |
+
else:
|
| 202 |
+
constant_name = f"lifted_tensor_{num_tensor_constants}"
|
| 203 |
+
constant_fqn = get_constant_fqn(node, constant_name)
|
| 204 |
+
num_tensor_constants += 1
|
| 205 |
+
elif isinstance(constant_val, torch.fx.GraphModule):
|
| 206 |
+
continue
|
| 207 |
+
elif "LoweredBackendModule" in type(constant_val).__name__:
|
| 208 |
+
continue
|
| 209 |
+
else:
|
| 210 |
+
raise SpecViolationError(
|
| 211 |
+
f"getattr node {node} referencing unsupported type {type(constant_val)}"
|
| 212 |
+
)
|
| 213 |
+
|
| 214 |
+
with gm.graph.inserting_before(first_user_input):
|
| 215 |
+
# Insert the constant node before the first user input
|
| 216 |
+
const_placeholder_node = gm.graph.placeholder(constant_name)
|
| 217 |
+
# match target name with its node name in case there is name collision
|
| 218 |
+
# and suffix is added to node name in fx
|
| 219 |
+
const_placeholder_node.target = const_placeholder_node.name
|
| 220 |
+
|
| 221 |
+
for k, v in node.meta.items():
|
| 222 |
+
const_placeholder_node.meta[k] = v
|
| 223 |
+
|
| 224 |
+
# Once the FQN has been used, remove nn_module_stack, stack_trace
|
| 225 |
+
const_placeholder_node.meta.pop("nn_module_stack")
|
| 226 |
+
const_placeholder_node.meta.pop("stack_trace", None)
|
| 227 |
+
|
| 228 |
+
input_spec_arg: ArgumentSpec
|
| 229 |
+
if isinstance(constant_val, torch.Tensor):
|
| 230 |
+
if fake_mode is not None:
|
| 231 |
+
const_placeholder_node.meta["val"] = fake_mode.from_tensor(
|
| 232 |
+
constant_val, static_shapes=True
|
| 233 |
+
)
|
| 234 |
+
const_placeholder_node.meta["val"].constant = constant_val
|
| 235 |
+
else:
|
| 236 |
+
const_placeholder_node.meta["val"] = constant_val
|
| 237 |
+
input_spec_arg = TensorArgument(name=const_placeholder_node.name)
|
| 238 |
+
elif isinstance(constant_val, torch._C.ScriptObject):
|
| 239 |
+
class_fqn = constant_val._type().qualified_name() # type: ignore[attr-defined]
|
| 240 |
+
const_placeholder_node.meta["val"] = CustomObjArgument(
|
| 241 |
+
constant_fqn, class_fqn
|
| 242 |
+
)
|
| 243 |
+
input_spec_arg = CustomObjArgument(
|
| 244 |
+
name=const_placeholder_node.name, class_fqn=class_fqn
|
| 245 |
+
)
|
| 246 |
+
elif isinstance(constant_val, FakeScriptObject):
|
| 247 |
+
class_fqn = constant_val.script_class_name
|
| 248 |
+
const_placeholder_node.meta["val"] = CustomObjArgument(
|
| 249 |
+
constant_fqn, class_fqn, constant_val
|
| 250 |
+
)
|
| 251 |
+
input_spec_arg = CustomObjArgument(
|
| 252 |
+
name=const_placeholder_node.name,
|
| 253 |
+
class_fqn=class_fqn,
|
| 254 |
+
fake_val=constant_val,
|
| 255 |
+
)
|
| 256 |
+
else:
|
| 257 |
+
raise SpecViolationError(
|
| 258 |
+
f"tried to lift unsupported type {type(constant_val)} from node {node.format_node()}"
|
| 259 |
+
)
|
| 260 |
+
|
| 261 |
+
lifted_objs.add(constant_val, const_placeholder_node)
|
| 262 |
+
node.replace_all_uses_with(const_placeholder_node)
|
| 263 |
+
gm.graph.erase_node(node)
|
| 264 |
+
|
| 265 |
+
# Add the constant as a buffer to the graph signature
|
| 266 |
+
graph_signature.input_specs.insert(
|
| 267 |
+
first_user_input_loc,
|
| 268 |
+
InputSpec(
|
| 269 |
+
kind=constant_kind,
|
| 270 |
+
arg=input_spec_arg,
|
| 271 |
+
target=constant_fqn,
|
| 272 |
+
),
|
| 273 |
+
)
|
| 274 |
+
if constant_val in constant_attrs:
|
| 275 |
+
for fqn in constant_attrs[constant_val]:
|
| 276 |
+
all_constants[fqn] = constant_val
|
| 277 |
+
else:
|
| 278 |
+
all_constants[constant_fqn] = constant_val
|
| 279 |
+
first_user_input_loc += 1
|
| 280 |
+
|
| 281 |
+
return all_constants
|
| 282 |
+
|
| 283 |
+
|
| 284 |
+
def rewrite_script_object_meta(
|
| 285 |
+
gm: torch.fx.GraphModule,
|
| 286 |
+
) -> Dict[str, Union[torch.Tensor, torch.ScriptObject, FakeScriptObject],]:
|
| 287 |
+
"""When tracing, we produce a graph with FakeScriptObject in the
|
| 288 |
+
meta["val"].
|
| 289 |
+
|
| 290 |
+
For now, we rewrie meta["val"] to be a placeholder CustomObjArgument
|
| 291 |
+
"""
|
| 292 |
+
constants: Dict[
|
| 293 |
+
str,
|
| 294 |
+
Union[
|
| 295 |
+
torch.Tensor,
|
| 296 |
+
torch.ScriptObject,
|
| 297 |
+
FakeScriptObject,
|
| 298 |
+
],
|
| 299 |
+
] = {}
|
| 300 |
+
for node in gm.graph.nodes:
|
| 301 |
+
if "val" not in node.meta:
|
| 302 |
+
continue
|
| 303 |
+
|
| 304 |
+
old_meta = node.meta["val"]
|
| 305 |
+
|
| 306 |
+
if isinstance(old_meta, torch.ScriptObject):
|
| 307 |
+
class_fqn = old_meta._type().qualified_name() # type: ignore[attr-defined]
|
| 308 |
+
new_meta = CustomObjArgument(node.name, class_fqn)
|
| 309 |
+
constants[node.name] = old_meta
|
| 310 |
+
node.meta["val"] = new_meta
|
| 311 |
+
|
| 312 |
+
elif isinstance(old_meta, FakeScriptObject):
|
| 313 |
+
class_fqn = old_meta.script_class_name # type: ignore[attr-defined]
|
| 314 |
+
new_meta = CustomObjArgument(node.name, class_fqn, old_meta)
|
| 315 |
+
constants[node.name] = old_meta
|
| 316 |
+
node.meta["val"] = new_meta
|
| 317 |
+
|
| 318 |
+
return constants
|
janus/lib/python3.10/site-packages/torch/_export/passes/remove_runtime_assertions.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import torch
|
| 3 |
+
from torch.fx.passes.infra.pass_base import PassBase, PassResult
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class _RemoveRuntimeAssertionsPass(PassBase):
|
| 7 |
+
"""
|
| 8 |
+
Remove runtime assertions inserted by the
|
| 9 |
+
_AddRuntimeAssertionsForInlineConstraintsPass.
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
def call(self, graph_module) -> PassResult:
|
| 13 |
+
modified = False
|
| 14 |
+
for module in graph_module.modules():
|
| 15 |
+
if not isinstance(module, torch.fx.GraphModule):
|
| 16 |
+
continue
|
| 17 |
+
for node in module.graph.nodes:
|
| 18 |
+
if node.target == torch.ops.aten._assert_async.msg:
|
| 19 |
+
assert_async_node = node
|
| 20 |
+
if len(assert_async_node.users) > 0:
|
| 21 |
+
continue
|
| 22 |
+
module.graph.erase_node(assert_async_node)
|
| 23 |
+
# the upstream scalar_tensor <- {le, ge} <- sym_size
|
| 24 |
+
# linear chain of nodes of nodes is removed by the
|
| 25 |
+
# downstream dead code elimination
|
| 26 |
+
modified = True
|
| 27 |
+
return PassResult(graph_module, modified)
|
janus/lib/python3.10/site-packages/torch/_export/passes/replace_autocast_with_hop_pass.py
ADDED
|
@@ -0,0 +1,179 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
from typing import List
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
from torch._higher_order_ops.wrap import wrap_with_autocast
|
| 6 |
+
|
| 7 |
+
from ..utils import node_inline_, nodes_filter, nodes_first, sequential_split
|
| 8 |
+
from .replace_with_hop_pass_util import (
|
| 9 |
+
_replace_with_hop_helper,
|
| 10 |
+
_replace_with_hop_pass_helper,
|
| 11 |
+
_sequential_split_and_maybe_inline_subgraphs_helper,
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def _is_autocast_node(node: torch.fx.Node):
|
| 16 |
+
return (
|
| 17 |
+
node
|
| 18 |
+
and node.op == "call_function"
|
| 19 |
+
and node.target
|
| 20 |
+
in [
|
| 21 |
+
torch.amp.autocast_mode._enter_autocast,
|
| 22 |
+
torch.amp.autocast_mode._exit_autocast,
|
| 23 |
+
]
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def _is_enter_autocast_node(node: torch.fx.Node):
|
| 28 |
+
return (
|
| 29 |
+
node
|
| 30 |
+
and node.op == "call_function"
|
| 31 |
+
and node.target == torch.amp.autocast_mode._enter_autocast
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def _is_exit_autocast_node(node: torch.fx.Node):
|
| 36 |
+
return (
|
| 37 |
+
node
|
| 38 |
+
and node.op == "call_function"
|
| 39 |
+
and node.target == torch.amp.autocast_mode._exit_autocast
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def _is_autocast_sub_mod(node: torch.fx.Node):
|
| 44 |
+
"""
|
| 45 |
+
Check if the first non-placeholder node is `torch.amp.autocast_mode._enter_autocast`.
|
| 46 |
+
"""
|
| 47 |
+
if node.op == "call_module":
|
| 48 |
+
assert isinstance(node.target, str)
|
| 49 |
+
subgm = getattr(node.graph.owning_module, node.target)
|
| 50 |
+
first_non_ph = nodes_first(
|
| 51 |
+
subgm.graph.nodes, lambda node: node.op != "placeholder"
|
| 52 |
+
)
|
| 53 |
+
if (
|
| 54 |
+
first_non_ph
|
| 55 |
+
and first_non_ph.op == "call_function"
|
| 56 |
+
and first_non_ph.target == torch.amp.autocast_mode._enter_autocast
|
| 57 |
+
):
|
| 58 |
+
# TODO: check if current auto-cast type is the same as the args of
|
| 59 |
+
# _enter_autocast. If so, return False, i.e. do not create a submodule.
|
| 60 |
+
return True
|
| 61 |
+
return False
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def _check_valid_autocast_block(enter_autocast_node, exit_autocast_node):
|
| 65 |
+
assert _is_enter_autocast_node(enter_autocast_node)
|
| 66 |
+
assert _is_exit_autocast_node(exit_autocast_node)
|
| 67 |
+
assert exit_autocast_node.args[0] == enter_autocast_node
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def _replace_with_hop(node: torch.fx.Node):
|
| 71 |
+
assert node.op == "call_module"
|
| 72 |
+
graph: torch.fx.Graph = node.graph
|
| 73 |
+
gm: torch.fx.GraphModule = graph.owning_module
|
| 74 |
+
assert isinstance(node.target, str)
|
| 75 |
+
sub_gm = getattr(gm, node.target)
|
| 76 |
+
sub_graph = sub_gm.graph
|
| 77 |
+
autocast_nodes = nodes_filter(sub_graph.nodes, _is_autocast_node)
|
| 78 |
+
if len(autocast_nodes) > 0:
|
| 79 |
+
assert len(autocast_nodes) > 1 # need at least an enter node and an exist node
|
| 80 |
+
enter_autocast_node = autocast_nodes[0]
|
| 81 |
+
exit_autocast_node = autocast_nodes[-1]
|
| 82 |
+
_check_valid_autocast_block(enter_autocast_node, exit_autocast_node)
|
| 83 |
+
|
| 84 |
+
_replace_with_hop_helper(
|
| 85 |
+
node, enter_autocast_node, _is_autocast_node, wrap_with_autocast
|
| 86 |
+
)
|
| 87 |
+
sub_graph.erase_node(exit_autocast_node)
|
| 88 |
+
sub_graph.erase_node(enter_autocast_node)
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def _split_autocast(gm: torch.fx.GraphModule) -> torch.fx.GraphModule:
|
| 92 |
+
"""
|
| 93 |
+
split_autocast creates a new graph module that splits the input graph module into multiple submodules
|
| 94 |
+
based on the `_enter_autocast` and `_exit_autocast` nodes. It doesn't mutate the input graph module.
|
| 95 |
+
|
| 96 |
+
Nodes between the **outer-most** `_enter_autocast` and `_exit_autocast(_enter_autocast)` are splitted
|
| 97 |
+
into a submodule. Nested autocast regions are not splitted.
|
| 98 |
+
`_enter_autocast` and `_exit_autocast(_enter_autocast)` nodes are in the submodule as well.
|
| 99 |
+
|
| 100 |
+
Below is an example of splitting. A, B, C, D, E are blocks of non-autocast nodes in the original graph
|
| 101 |
+
module. Nodes marked with the same number are grouped into the same submodule.
|
| 102 |
+
A # 0
|
| 103 |
+
enter_autocast # 1
|
| 104 |
+
B # 1
|
| 105 |
+
exit_autocast # 1
|
| 106 |
+
C # 2
|
| 107 |
+
enter_autocast # 3
|
| 108 |
+
D # 3
|
| 109 |
+
exit_autocast # 3
|
| 110 |
+
E # 4
|
| 111 |
+
"""
|
| 112 |
+
enter_autocast_node_stack: List[torch.fx.Node] = []
|
| 113 |
+
first_node_after_outer_most_exit: bool = False
|
| 114 |
+
|
| 115 |
+
def node_call_back(node: torch.fx.Node):
|
| 116 |
+
nonlocal enter_autocast_node_stack, first_node_after_outer_most_exit
|
| 117 |
+
if first_node_after_outer_most_exit or (
|
| 118 |
+
len(enter_autocast_node_stack) == 0 and _is_enter_autocast_node(node)
|
| 119 |
+
):
|
| 120 |
+
assert len(enter_autocast_node_stack) == 0
|
| 121 |
+
first_node_after_outer_most_exit = False
|
| 122 |
+
if _is_enter_autocast_node(node):
|
| 123 |
+
enter_autocast_node_stack.append(node)
|
| 124 |
+
return True
|
| 125 |
+
if _is_exit_autocast_node(node):
|
| 126 |
+
assert len(enter_autocast_node_stack) > 0
|
| 127 |
+
last_enter_autocast_node = enter_autocast_node_stack.pop()
|
| 128 |
+
assert node.args[0] == last_enter_autocast_node
|
| 129 |
+
if len(enter_autocast_node_stack) == 0:
|
| 130 |
+
# next node should be in the next submodule since
|
| 131 |
+
# autocast block ends
|
| 132 |
+
first_node_after_outer_most_exit = True
|
| 133 |
+
return False
|
| 134 |
+
|
| 135 |
+
return sequential_split(gm, node_call_back)
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def _sequential_split_and_maybe_inline_subgraphs(
|
| 139 |
+
gm: torch.fx.GraphModule, graph_signature
|
| 140 |
+
):
|
| 141 |
+
"""
|
| 142 |
+
Helper function for replace_autocast_with_hop_pass().
|
| 143 |
+
Split the graph module into multiple subgraphs based on the autocast nodes.
|
| 144 |
+
For each subgraph, decides whether to construct a HOO subgraph, or inline the calls
|
| 145 |
+
back into the parent graph module.
|
| 146 |
+
Nodes between `_enter_autocast` and `_exit_autocast(_enter_autocast)` are considered
|
| 147 |
+
as a subgraph.
|
| 148 |
+
"""
|
| 149 |
+
need_replacing = any(_is_autocast_node(node) for node in gm.graph.nodes)
|
| 150 |
+
if not need_replacing:
|
| 151 |
+
return gm, graph_signature
|
| 152 |
+
|
| 153 |
+
# split_autocast returns a new graph module that could have different output
|
| 154 |
+
# args names. We need to fix the graph signature in `_sequential_split_and_maybe_inline_subgraphs_helper`.
|
| 155 |
+
new_gm = _split_autocast(gm)
|
| 156 |
+
|
| 157 |
+
def _maybe_inline_or_replace_with_hop(node: torch.fx.Node):
|
| 158 |
+
if _is_autocast_sub_mod(node):
|
| 159 |
+
_replace_with_hop(node)
|
| 160 |
+
else:
|
| 161 |
+
assert node.op == "call_module"
|
| 162 |
+
assert isinstance(node.target, str)
|
| 163 |
+
node_inline_(node)
|
| 164 |
+
|
| 165 |
+
return _sequential_split_and_maybe_inline_subgraphs_helper(
|
| 166 |
+
new_gm, graph_signature, _maybe_inline_or_replace_with_hop
|
| 167 |
+
)
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
def replace_autocast_with_hop_pass(gm: torch.fx.GraphModule, graph_signature):
|
| 171 |
+
"""
|
| 172 |
+
Split gm into sub-graph-modules using `sequential_split_and_maybe_inline_subgraphs`, and
|
| 173 |
+
then recursively call itself on each of the submodules.
|
| 174 |
+
"""
|
| 175 |
+
return _replace_with_hop_pass_helper(
|
| 176 |
+
gm,
|
| 177 |
+
graph_signature,
|
| 178 |
+
_sequential_split_and_maybe_inline_subgraphs,
|
| 179 |
+
)
|
janus/lib/python3.10/site-packages/torch/_export/passes/replace_quantized_ops_with_standard_ops_pass.py
ADDED
|
@@ -0,0 +1,673 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import logging
|
| 3 |
+
import operator
|
| 4 |
+
from typing import List, Optional, Tuple, Union
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
import torch.export._trace
|
| 8 |
+
from torch._ops import OpOverload
|
| 9 |
+
from torch.ao.quantization.fx._decomposed import (
|
| 10 |
+
dequantize_per_channel,
|
| 11 |
+
dequantize_per_tensor,
|
| 12 |
+
quantize_per_tensor,
|
| 13 |
+
)
|
| 14 |
+
from torch.ao.quantization.utils import calculate_qmin_qmax
|
| 15 |
+
from torch.fx.graph_module import _assign_attr
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
log = logging.getLogger(__name__)
|
| 19 |
+
|
| 20 |
+
# Those values will need to be carried over multiple operators.
|
| 21 |
+
_INPUT_Q_DTYPE: Optional[Union[torch.dtype, torch.fx.Node]] = None
|
| 22 |
+
_SCALE: Optional[Union[float, torch.fx.Node]] = None
|
| 23 |
+
_ZERO_POINT: Optional[Union[float, torch.fx.Node]] = None
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def int_to_valid_dtype(val: int) -> torch.dtype:
|
| 27 |
+
from torch._export.converter import _TORCH_ENUM_TO_DTYPE # No circular import.
|
| 28 |
+
|
| 29 |
+
if isinstance(val, torch.dtype):
|
| 30 |
+
return val
|
| 31 |
+
dtype = _TORCH_ENUM_TO_DTYPE[val]
|
| 32 |
+
if dtype == torch.quint8:
|
| 33 |
+
return torch.uint8
|
| 34 |
+
elif dtype == torch.qint8:
|
| 35 |
+
return torch.int8
|
| 36 |
+
return dtype
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def fx_enum_to_dtype(gm: torch.fx.GraphModule, val: int) -> torch.fx.Node:
|
| 40 |
+
return gm.graph.call_function(int_to_valid_dtype, (val,))
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def insert_quantized_node(
|
| 44 |
+
gm: torch.fx.GraphModule,
|
| 45 |
+
val_node: torch.fx.Node,
|
| 46 |
+
scale_node: Union[float, torch.fx.Node],
|
| 47 |
+
zero_point_node: Union[float, torch.fx.Node],
|
| 48 |
+
qmin_node: Union[float, int, torch.fx.Node],
|
| 49 |
+
qmax_node: Union[float, int, torch.fx.Node],
|
| 50 |
+
dtype_node: Union[torch.dtype, torch.fx.Node],
|
| 51 |
+
qscheme: Optional[torch.qscheme],
|
| 52 |
+
) -> torch.fx.Node:
|
| 53 |
+
return gm.graph.call_function(
|
| 54 |
+
quantize_per_tensor,
|
| 55 |
+
(
|
| 56 |
+
val_node,
|
| 57 |
+
scale_node,
|
| 58 |
+
zero_point_node,
|
| 59 |
+
qmin_node,
|
| 60 |
+
qmax_node,
|
| 61 |
+
dtype_node,
|
| 62 |
+
),
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def get_dequantized(
|
| 67 |
+
val: torch.Tensor,
|
| 68 |
+
scale: Union[float, torch.Tensor],
|
| 69 |
+
zero_point: Union[float, torch.Tensor],
|
| 70 |
+
qmin: Union[float, int],
|
| 71 |
+
qmax: Union[float, int],
|
| 72 |
+
dtype: torch.dtype,
|
| 73 |
+
axis: Optional[int],
|
| 74 |
+
qscheme: Optional[torch.qscheme],
|
| 75 |
+
) -> torch.Tensor:
|
| 76 |
+
if qscheme is torch.per_tensor_affine:
|
| 77 |
+
return dequantize_per_tensor(
|
| 78 |
+
val,
|
| 79 |
+
scale,
|
| 80 |
+
zero_point,
|
| 81 |
+
qmin,
|
| 82 |
+
qmax,
|
| 83 |
+
dtype,
|
| 84 |
+
)
|
| 85 |
+
elif qscheme is torch.per_channel_affine:
|
| 86 |
+
return dequantize_per_channel(
|
| 87 |
+
val,
|
| 88 |
+
scale,
|
| 89 |
+
zero_point,
|
| 90 |
+
axis,
|
| 91 |
+
qmin,
|
| 92 |
+
qmax,
|
| 93 |
+
dtype,
|
| 94 |
+
)
|
| 95 |
+
else:
|
| 96 |
+
raise RuntimeError(f"Unsupported dequantization scheme: {qscheme}")
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def insert_dequantized_node(
|
| 100 |
+
gm: torch.fx.GraphModule,
|
| 101 |
+
val_node: torch.fx.Node,
|
| 102 |
+
scale_node: Union[float, torch.fx.Node],
|
| 103 |
+
zero_point_node: Union[float, torch.fx.Node],
|
| 104 |
+
qmin_node: Union[float, int, torch.fx.Node],
|
| 105 |
+
qmax_node: Union[float, int, torch.fx.Node],
|
| 106 |
+
dtype_node: Union[torch.dtype, torch.fx.Node],
|
| 107 |
+
axis_node: Optional[Union[int, torch.fx.Node]],
|
| 108 |
+
qscheme: Optional[torch.qscheme],
|
| 109 |
+
) -> torch.fx.Node:
|
| 110 |
+
if qscheme is torch.per_tensor_affine:
|
| 111 |
+
return gm.graph.call_function(
|
| 112 |
+
dequantize_per_tensor,
|
| 113 |
+
(
|
| 114 |
+
val_node,
|
| 115 |
+
scale_node,
|
| 116 |
+
zero_point_node,
|
| 117 |
+
qmin_node,
|
| 118 |
+
qmax_node,
|
| 119 |
+
dtype_node,
|
| 120 |
+
),
|
| 121 |
+
)
|
| 122 |
+
elif qscheme is torch.per_channel_affine:
|
| 123 |
+
return gm.graph.call_function(
|
| 124 |
+
dequantize_per_channel,
|
| 125 |
+
(
|
| 126 |
+
val_node,
|
| 127 |
+
scale_node,
|
| 128 |
+
zero_point_node,
|
| 129 |
+
axis_node,
|
| 130 |
+
qmin_node,
|
| 131 |
+
qmax_node,
|
| 132 |
+
dtype_node,
|
| 133 |
+
),
|
| 134 |
+
)
|
| 135 |
+
else:
|
| 136 |
+
raise RuntimeError(f"Unsupported dequantization scheme: {qscheme}")
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def get_qmin_qmax(dtype: torch.dtype) -> Tuple[Union[int, float], Union[int, float]]:
|
| 140 |
+
return calculate_qmin_qmax(None, None, False, dtype, False) # type: ignore[arg-type]
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def insert_qmin_qmax_node(
|
| 144 |
+
gm: torch.fx.GraphModule, dtype_node: Union[torch.dtype, torch.fx.Node]
|
| 145 |
+
) -> Tuple[torch.fx.Node, torch.fx.Node]:
|
| 146 |
+
q_min_max_node = gm.graph.call_function(
|
| 147 |
+
calculate_qmin_qmax, (None, None, False, dtype_node, False)
|
| 148 |
+
)
|
| 149 |
+
qmin_node = gm.graph.call_function(operator.getitem, (q_min_max_node, 0))
|
| 150 |
+
qmax_node = gm.graph.call_function(operator.getitem, (q_min_max_node, 1))
|
| 151 |
+
return qmin_node, qmax_node
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
def get_script_object(
|
| 155 |
+
gm: torch.nn.Module, node: torch.fx.Node
|
| 156 |
+
) -> torch._C.ScriptObject:
|
| 157 |
+
assert isinstance(node, torch.fx.Node)
|
| 158 |
+
assert node.op == "get_attr"
|
| 159 |
+
attr_name = node.target
|
| 160 |
+
assert isinstance(attr_name, str)
|
| 161 |
+
|
| 162 |
+
mod = gm
|
| 163 |
+
for attr in attr_name.split("."):
|
| 164 |
+
mod = getattr(mod, attr)
|
| 165 |
+
assert isinstance(mod, torch._C.ScriptObject)
|
| 166 |
+
return mod
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
def insert_weight_and_bias_get_attr_node_from_get_attr_to_scriptobject(
|
| 170 |
+
gm: torch.fx.GraphModule,
|
| 171 |
+
param_node: torch.fx.Node,
|
| 172 |
+
) -> Tuple[torch.fx.Node, Optional[torch.fx.Node]]:
|
| 173 |
+
"""Directly inline tensor from a get_attr fx node."""
|
| 174 |
+
mod = get_script_object(gm, param_node)
|
| 175 |
+
w_qtensor, b_qtensor = mod.unpack() # type: ignore[attr-defined]
|
| 176 |
+
w_attr_name, b_attr_name = (
|
| 177 |
+
f"dequantized_{param_node.target}_w",
|
| 178 |
+
f"dequantized_{param_node.target}_b",
|
| 179 |
+
)
|
| 180 |
+
return insert_weight_and_bias_get_attr_node(
|
| 181 |
+
gm, w_qtensor, b_qtensor, w_attr_name, b_attr_name
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
def insert_weight_and_bias_get_attr_node_from_get_attr_to_qtensor(
|
| 186 |
+
gm: torch.fx.GraphModule,
|
| 187 |
+
get_attr_to_weight_node: torch.fx.Node,
|
| 188 |
+
get_attr_to_bias_node: Optional[torch.fx.Node],
|
| 189 |
+
) -> Tuple[torch.fx.Node, Optional[torch.fx.Node]]:
|
| 190 |
+
assert isinstance(get_attr_to_weight_node.target, str)
|
| 191 |
+
w_qtensor = getattr(gm, get_attr_to_weight_node.target)
|
| 192 |
+
w_attr_name = f"dequantized_{get_attr_to_weight_node.target}_w"
|
| 193 |
+
|
| 194 |
+
if get_attr_to_bias_node is not None:
|
| 195 |
+
assert isinstance(get_attr_to_bias_node.target, str)
|
| 196 |
+
b_qtensor = getattr(gm, get_attr_to_bias_node.target)
|
| 197 |
+
b_attr_name = f"dequantized_{get_attr_to_bias_node.target}_b"
|
| 198 |
+
else:
|
| 199 |
+
b_qtensor, b_attr_name = None, ""
|
| 200 |
+
|
| 201 |
+
return insert_weight_and_bias_get_attr_node(
|
| 202 |
+
gm, w_qtensor, b_qtensor, w_attr_name, b_attr_name
|
| 203 |
+
)
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
def insert_weight_and_bias_get_attr_node(
|
| 207 |
+
gm: torch.fx.GraphModule,
|
| 208 |
+
w_qtensor: torch.Tensor,
|
| 209 |
+
b_qtensor: Optional[torch.Tensor],
|
| 210 |
+
w_attr_name: str,
|
| 211 |
+
b_attr_name: str,
|
| 212 |
+
) -> Tuple[torch.fx.Node, Optional[torch.fx.Node]]:
|
| 213 |
+
w_tensor = get_tensor_from_qtensor(w_qtensor)
|
| 214 |
+
_assign_attr(w_tensor, gm, w_attr_name)
|
| 215 |
+
w_tensor_attr = gm.graph.get_attr(w_attr_name)
|
| 216 |
+
|
| 217 |
+
if b_qtensor is not None:
|
| 218 |
+
b_tensor = get_tensor_from_qtensor(b_qtensor, dequant=False)
|
| 219 |
+
_assign_attr(b_tensor, gm, b_attr_name)
|
| 220 |
+
b_tensor_attr = gm.graph.get_attr(b_attr_name)
|
| 221 |
+
else:
|
| 222 |
+
b_tensor_attr = None
|
| 223 |
+
|
| 224 |
+
return w_tensor_attr, b_tensor_attr
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
def get_tensor_from_qtensor(
|
| 228 |
+
qtensor: torch.Tensor, dequant: bool = True
|
| 229 |
+
) -> torch.Tensor:
|
| 230 |
+
# Manual conversion because qint8 is not used anymore.
|
| 231 |
+
if qtensor.dtype in [torch.qint8, torch.quint8]:
|
| 232 |
+
tensor = qtensor.int_repr()
|
| 233 |
+
else:
|
| 234 |
+
tensor = qtensor
|
| 235 |
+
|
| 236 |
+
# Weights need dequantization with scaling and zero_point adjustment, but
|
| 237 |
+
# bias does not need that.
|
| 238 |
+
if dequant:
|
| 239 |
+
qscheme = qtensor.qscheme()
|
| 240 |
+
if qscheme == torch.per_channel_affine:
|
| 241 |
+
scale, zero_point, axis = (
|
| 242 |
+
qtensor.q_per_channel_scales(),
|
| 243 |
+
qtensor.q_per_channel_zero_points(),
|
| 244 |
+
qtensor.q_per_channel_axis(),
|
| 245 |
+
)
|
| 246 |
+
else:
|
| 247 |
+
scale, zero_point, axis = (
|
| 248 |
+
qtensor.q_scale(), # type: ignore[assignment]
|
| 249 |
+
qtensor.q_zero_point(), # type: ignore[assignment]
|
| 250 |
+
None,
|
| 251 |
+
)
|
| 252 |
+
dtype = tensor.dtype
|
| 253 |
+
qmin, qmax = get_qmin_qmax(dtype)
|
| 254 |
+
return get_dequantized(
|
| 255 |
+
tensor, scale, zero_point, qmin, qmax, dtype, axis, qscheme
|
| 256 |
+
)
|
| 257 |
+
return tensor
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
def insert_fused_activation_node(
|
| 261 |
+
gm: torch.fx.GraphModule, opname: str, fx_node: torch.fx.Node
|
| 262 |
+
) -> torch.fx.Node:
|
| 263 |
+
if opname in ["conv1d_relu", "conv2d_relu", "linear_relu", "add_relu", "mul_relu"]:
|
| 264 |
+
fx_node = gm.graph.call_function(torch.ops.aten.relu, (fx_node,))
|
| 265 |
+
return fx_node
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
def _conv1d_op_with_squeeze(
|
| 269 |
+
inp: torch.Tensor,
|
| 270 |
+
weight: torch.Tensor,
|
| 271 |
+
bias: Optional[torch.Tensor],
|
| 272 |
+
stride: List[int],
|
| 273 |
+
padding: List[int],
|
| 274 |
+
dilation: List[int],
|
| 275 |
+
groups: int,
|
| 276 |
+
) -> torch.Tensor:
|
| 277 |
+
# In quantized version, conv1d is emulated using conv2d with squeeze and unsqueeze
|
| 278 |
+
# operations before and after the conv2d operation to match the dimension of weights.
|
| 279 |
+
# Reference: https://github.com/pytorch/pytorch/blob/eca0cb0fbe84bb0a34fa94afe261bceecd52c436/aten/src/ATen/native/quantized/cpu/qconv.cpp#L1827 # noqa: B950
|
| 280 |
+
s_inp = torch.ops.aten.unsqueeze(inp, 2)
|
| 281 |
+
conv1d_res = torch.ops.aten.conv2d(
|
| 282 |
+
s_inp,
|
| 283 |
+
weight,
|
| 284 |
+
bias,
|
| 285 |
+
stride,
|
| 286 |
+
padding,
|
| 287 |
+
dilation,
|
| 288 |
+
groups,
|
| 289 |
+
)
|
| 290 |
+
uns_conv1d_res = torch.ops.aten.squeeze(conv1d_res, 2)
|
| 291 |
+
return uns_conv1d_res
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
def _transform_conv_with_packedparam(gm: torch.fx.GraphModule, node: torch.fx.Node):
|
| 295 |
+
"""Conv specfic transformation function."""
|
| 296 |
+
assert isinstance(node.target, torch._ops.OpOverload)
|
| 297 |
+
opname = node.target._opname
|
| 298 |
+
scale_node, zero_point_node = node.args[2], node.args[3]
|
| 299 |
+
|
| 300 |
+
op_f = (
|
| 301 |
+
torch.ops.aten.conv2d
|
| 302 |
+
if opname in ["conv2d", "conv2d_relu"]
|
| 303 |
+
else _conv1d_op_with_squeeze
|
| 304 |
+
)
|
| 305 |
+
|
| 306 |
+
inp_node, param_node = node.args[0], node.args[1]
|
| 307 |
+
assert isinstance(inp_node, torch.fx.Node)
|
| 308 |
+
assert isinstance(param_node, torch.fx.Node)
|
| 309 |
+
|
| 310 |
+
if param_node.op == "call_function":
|
| 311 |
+
# Using Conv2dPrepackParam from conv_prepack.
|
| 312 |
+
# We directly skip the packing call and inline weights and bias.
|
| 313 |
+
w_node, b_node = param_node.args[0], param_node.args[1]
|
| 314 |
+
assert isinstance(w_node, torch.fx.Node)
|
| 315 |
+
assert b_node is None or isinstance(b_node, torch.fx.Node)
|
| 316 |
+
(
|
| 317 |
+
param_0,
|
| 318 |
+
param_1,
|
| 319 |
+
) = insert_weight_and_bias_get_attr_node_from_get_attr_to_qtensor(
|
| 320 |
+
gm, w_node, b_node
|
| 321 |
+
)
|
| 322 |
+
op_res_node = gm.graph.call_function(
|
| 323 |
+
op_f, (inp_node, param_0, param_1, *param_node.args[2:])
|
| 324 |
+
)
|
| 325 |
+
else:
|
| 326 |
+
# Using ConvPrepackedParam.
|
| 327 |
+
param = get_script_object(gm, param_node)
|
| 328 |
+
(
|
| 329 |
+
param_0,
|
| 330 |
+
param_1,
|
| 331 |
+
) = insert_weight_and_bias_get_attr_node_from_get_attr_to_scriptobject(
|
| 332 |
+
gm, param_node
|
| 333 |
+
) # type: ignore[assignment]
|
| 334 |
+
op_res_node = gm.graph.call_function(
|
| 335 |
+
op_f,
|
| 336 |
+
(
|
| 337 |
+
inp_node,
|
| 338 |
+
param_0,
|
| 339 |
+
param_1,
|
| 340 |
+
param.stride(), # type: ignore[attr-defined]
|
| 341 |
+
param.padding(), # type: ignore[attr-defined]
|
| 342 |
+
param.dilation(), # type: ignore[attr-defined]
|
| 343 |
+
param.groups(), # type: ignore[attr-defined]
|
| 344 |
+
),
|
| 345 |
+
)
|
| 346 |
+
return op_res_node, scale_node, zero_point_node
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
def _transform_linear_with_packedparam(gm: torch.fx.GraphModule, node: torch.fx.Node):
|
| 350 |
+
"""Linear specfic transformation function."""
|
| 351 |
+
scale_node, zero_point_node = node.args[2], node.args[3]
|
| 352 |
+
|
| 353 |
+
inp_node, param_node = node.args[0], node.args[1]
|
| 354 |
+
assert isinstance(inp_node, torch.fx.Node)
|
| 355 |
+
assert isinstance(param_node, torch.fx.Node)
|
| 356 |
+
|
| 357 |
+
if param_node.op == "call_function":
|
| 358 |
+
# Using LinearPrepackParam from linear_prepack.
|
| 359 |
+
# We directly skip the packing call and inline weights and bias.
|
| 360 |
+
w_node, b_node = param_node.args[0], param_node.args[1]
|
| 361 |
+
assert isinstance(w_node, torch.fx.Node)
|
| 362 |
+
assert b_node is None or isinstance(b_node, torch.fx.Node)
|
| 363 |
+
(
|
| 364 |
+
param_0,
|
| 365 |
+
param_1,
|
| 366 |
+
) = insert_weight_and_bias_get_attr_node_from_get_attr_to_qtensor(
|
| 367 |
+
gm, w_node, b_node
|
| 368 |
+
)
|
| 369 |
+
op_res_node = gm.graph.call_function(
|
| 370 |
+
torch.ops.aten.linear, (inp_node, param_0, param_1, *param_node.args[2:])
|
| 371 |
+
)
|
| 372 |
+
else:
|
| 373 |
+
# Using LinearPackedParams.
|
| 374 |
+
(
|
| 375 |
+
param_0,
|
| 376 |
+
param_1,
|
| 377 |
+
) = insert_weight_and_bias_get_attr_node_from_get_attr_to_scriptobject(
|
| 378 |
+
gm, param_node
|
| 379 |
+
) # type: ignore[assignment]
|
| 380 |
+
op_res_node = gm.graph.call_function(
|
| 381 |
+
torch.ops.aten.linear, (inp_node, param_0, param_1)
|
| 382 |
+
)
|
| 383 |
+
return op_res_node, scale_node, zero_point_node
|
| 384 |
+
|
| 385 |
+
|
| 386 |
+
def _transform_op_where_last_two_arguments_are_scale_and_zero_point(
|
| 387 |
+
gm: torch.fx.GraphModule, node: torch.fx.Node
|
| 388 |
+
):
|
| 389 |
+
"""
|
| 390 |
+
This transformation function can be used for function where the last two
|
| 391 |
+
parameters are scale and zero point. Additionally, the function's parameters
|
| 392 |
+
do not need any unpacking.
|
| 393 |
+
"""
|
| 394 |
+
to_standard_op = {
|
| 395 |
+
"mul": torch.ops.aten.mul,
|
| 396 |
+
"mul_relu": torch.ops.aten.mul,
|
| 397 |
+
"add": torch.ops.aten.add,
|
| 398 |
+
"add_relu": torch.ops.aten.add,
|
| 399 |
+
"softmax": torch.ops.aten.softmax,
|
| 400 |
+
"cat": torch.ops.aten.cat,
|
| 401 |
+
"hardswish": torch.ops.aten.hardswish,
|
| 402 |
+
}
|
| 403 |
+
|
| 404 |
+
assert isinstance(node.target, torch._ops.OpOverload)
|
| 405 |
+
opname, args = node.target._opname, node.args
|
| 406 |
+
scale_node, zero_point_node = args[-2], args[-1]
|
| 407 |
+
op_res_node = gm.graph.call_function(to_standard_op[opname], tuple(args[:-2]))
|
| 408 |
+
return op_res_node, scale_node, zero_point_node
|
| 409 |
+
|
| 410 |
+
|
| 411 |
+
def _transform_scalar_arithmetic(gm: torch.fx.GraphModule, node: torch.fx.Node):
|
| 412 |
+
"""Transform scalar overload for basic arithmetic."""
|
| 413 |
+
to_standard_op = {
|
| 414 |
+
"mul": torch.ops.aten.mul.Scalar,
|
| 415 |
+
"add": torch.ops.aten.add.Scalar,
|
| 416 |
+
}
|
| 417 |
+
assert isinstance(node.target, torch._ops.OpOverload)
|
| 418 |
+
opname, args = node.target._opname, node.args
|
| 419 |
+
op_res_node = gm.graph.call_function(to_standard_op[opname], args)
|
| 420 |
+
return op_res_node, _SCALE, _ZERO_POINT
|
| 421 |
+
|
| 422 |
+
|
| 423 |
+
def _transform_prepacked_op(gm: torch.fx.GraphModule, node: torch.fx.Node):
|
| 424 |
+
"""
|
| 425 |
+
Transformation for functions under prepacked namespace, where they share
|
| 426 |
+
the same handling logic that [...]OpContext contains all parameters.
|
| 427 |
+
"""
|
| 428 |
+
assert isinstance(node.target, torch._ops.OpOverload)
|
| 429 |
+
opname, args = node.target._opname, node.args
|
| 430 |
+
op_f = None
|
| 431 |
+
if opname == "conv2d_clamp_run":
|
| 432 |
+
op_f = torch.ops.aten.conv2d
|
| 433 |
+
elif opname == "linear_clamp_run":
|
| 434 |
+
op_f = torch.ops.aten.linear
|
| 435 |
+
else:
|
| 436 |
+
raise RuntimeError(f"Invalid operator {opname}")
|
| 437 |
+
|
| 438 |
+
assert isinstance(args[1], torch.fx.Node)
|
| 439 |
+
so = get_script_object(gm, args[1])
|
| 440 |
+
|
| 441 |
+
func_args = []
|
| 442 |
+
func_args += [args[0]]
|
| 443 |
+
func_args += so.unpack()[:2] # type: ignore[attr-defined]
|
| 444 |
+
if opname == "conv2d_clamp_run":
|
| 445 |
+
func_args += torch.ops.prepacked.unpack_prepacked_sizes_conv2d(so)[2:]
|
| 446 |
+
|
| 447 |
+
op_res_node = gm.graph.call_function(op_f, tuple(func_args))
|
| 448 |
+
return op_res_node
|
| 449 |
+
|
| 450 |
+
|
| 451 |
+
def _transform_batch_norm(gm: torch.fx.GraphModule, node: torch.fx.Node):
|
| 452 |
+
args = node.args
|
| 453 |
+
scale_node, zero_point_node = args[-2], args[-1]
|
| 454 |
+
op_res_node = gm.graph.call_function(
|
| 455 |
+
torch.ops.aten.native_batch_norm, (*args[:-3], False, 0.1, args[-3])
|
| 456 |
+
)
|
| 457 |
+
op_res_node = gm.graph.call_function(operator.getitem, (op_res_node, 0))
|
| 458 |
+
return op_res_node, scale_node, zero_point_node
|
| 459 |
+
|
| 460 |
+
|
| 461 |
+
def fx_transform_quantized_op_to_standard_op(
|
| 462 |
+
gm: torch.fx.GraphModule, node: torch.fx.Node
|
| 463 |
+
) -> torch.fx.Node:
|
| 464 |
+
global _SCALE, _ZERO_POINT, _INPUT_Q_DTYPE
|
| 465 |
+
|
| 466 |
+
assert isinstance(node.target, torch._ops.OpOverload)
|
| 467 |
+
opname, overload = node.target._opname, node.target._overloadname
|
| 468 |
+
|
| 469 |
+
key = f"{opname}.{overload}"
|
| 470 |
+
opname_to_transform_f = {
|
| 471 |
+
"conv1d.new": _transform_conv_with_packedparam,
|
| 472 |
+
"conv1d_relu.new": _transform_conv_with_packedparam,
|
| 473 |
+
"conv1d.default": _transform_conv_with_packedparam,
|
| 474 |
+
"conv1d_relu.default": _transform_conv_with_packedparam,
|
| 475 |
+
"conv2d.new": _transform_conv_with_packedparam,
|
| 476 |
+
"conv2d_relu.new": _transform_conv_with_packedparam,
|
| 477 |
+
"conv2d.default": _transform_conv_with_packedparam,
|
| 478 |
+
"conv2d_relu.default": _transform_conv_with_packedparam,
|
| 479 |
+
"linear.default": _transform_linear_with_packedparam,
|
| 480 |
+
"linear_relu.default": _transform_linear_with_packedparam,
|
| 481 |
+
"add.default": _transform_op_where_last_two_arguments_are_scale_and_zero_point,
|
| 482 |
+
"add_relu.default": _transform_op_where_last_two_arguments_are_scale_and_zero_point,
|
| 483 |
+
"mul.default": _transform_op_where_last_two_arguments_are_scale_and_zero_point,
|
| 484 |
+
"mul_relu.default": _transform_op_where_last_two_arguments_are_scale_and_zero_point,
|
| 485 |
+
"softmax.default": _transform_op_where_last_two_arguments_are_scale_and_zero_point,
|
| 486 |
+
"cat.default": _transform_op_where_last_two_arguments_are_scale_and_zero_point,
|
| 487 |
+
"hardswish.default": _transform_op_where_last_two_arguments_are_scale_and_zero_point,
|
| 488 |
+
"batch_norm2d.default": _transform_batch_norm,
|
| 489 |
+
"mul.Scalar": _transform_scalar_arithmetic,
|
| 490 |
+
"add.Scalar": _transform_scalar_arithmetic,
|
| 491 |
+
}
|
| 492 |
+
|
| 493 |
+
if f"{key}" not in opname_to_transform_f:
|
| 494 |
+
raise RuntimeError(f"Unsupported quantized op during transformation: {key}")
|
| 495 |
+
|
| 496 |
+
op_res_node, scale_node, zero_point_node = opname_to_transform_f[f"{key}"](gm, node)
|
| 497 |
+
|
| 498 |
+
# Add fused activation layer.
|
| 499 |
+
op_res_node = insert_fused_activation_node(gm, opname, op_res_node)
|
| 500 |
+
_SCALE, _ZERO_POINT = scale_node, zero_point_node
|
| 501 |
+
|
| 502 |
+
assert _INPUT_Q_DTYPE is not None
|
| 503 |
+
qmin_node, qmax_node = insert_qmin_qmax_node(gm, _INPUT_Q_DTYPE)
|
| 504 |
+
q_fx_node = insert_quantized_node(
|
| 505 |
+
gm,
|
| 506 |
+
op_res_node,
|
| 507 |
+
scale_node,
|
| 508 |
+
zero_point_node,
|
| 509 |
+
qmin_node,
|
| 510 |
+
qmax_node,
|
| 511 |
+
_INPUT_Q_DTYPE,
|
| 512 |
+
torch.per_tensor_affine,
|
| 513 |
+
)
|
| 514 |
+
dq_fx_node = insert_dequantized_node(
|
| 515 |
+
gm,
|
| 516 |
+
q_fx_node,
|
| 517 |
+
scale_node,
|
| 518 |
+
zero_point_node,
|
| 519 |
+
qmin_node,
|
| 520 |
+
qmax_node,
|
| 521 |
+
_INPUT_Q_DTYPE,
|
| 522 |
+
None,
|
| 523 |
+
torch.per_tensor_affine,
|
| 524 |
+
)
|
| 525 |
+
return dq_fx_node
|
| 526 |
+
|
| 527 |
+
|
| 528 |
+
def replace_quantized_ops_with_standard_ops(gm: torch.fx.GraphModule):
|
| 529 |
+
"""
|
| 530 |
+
Replace legacy quantized ops (aten.quantize_per_tensor, quantized.conv) with
|
| 531 |
+
PT2 ops (quantize_decomposed.quantize_per_tensor, aten.conv).
|
| 532 |
+
|
| 533 |
+
Before: x || -> aten.q || -> quantized.conv2d || -> quantized.linear || -> aten.dq || -> y
|
| 534 |
+
|
| 535 |
+
After: x || -> qd.q -> qd.dq || -> aten.conv2d -> qd.q -> qd.dq || aten.linear -> qd.q -> qd.dq || -> y
|
| 536 |
+
|
| 537 |
+
(qd == quantized_decomposed library, q = quantize, dq = dequantize)
|
| 538 |
+
^
|
| 539 |
+
|
|
| 540 |
+
getattr(w), getattr(b) from Conv2dParamPrepack
|
| 541 |
+
|
| 542 |
+
During each iteration, the transformation spits out the transformed operator, its quantized output,
|
| 543 |
+
and its dequantized value together. We did this because dequantization need to use the
|
| 544 |
+
scale and zero point parameters from the quantization to recover the approximate original value. After each
|
| 545 |
+
iteration, the new dequantization node will be used as the input to the next node (e.g., dq2 -> linear).
|
| 546 |
+
|
| 547 |
+
For operators like conv2d and linear, their weights and bias are packed in a quantized format in the ScriptObject.
|
| 548 |
+
During the transformation, we unpack those objects, get their dequantized tensor, populate those
|
| 549 |
+
as attributes to the module, and use getattr to access them.
|
| 550 |
+
|
| 551 |
+
One exception in the transformation is conv_prepack and linear_prepack. Those calls pack
|
| 552 |
+
weight and bias constant tensors into ScriptObject, which are then used by subsequent conv2d or linear calls.
|
| 553 |
+
During transformation, we directly skip transforming conv_prepack or linear_prepack. We check whether ScriptObject to the
|
| 554 |
+
quantized::conv2d or linear is from conv_prepack or linear_prepack. If it is, we then inline those parameters
|
| 555 |
+
to the operator by converting them to a getattr fx.node.
|
| 556 |
+
|
| 557 |
+
For prepacked::conv2d_clamp_run and prepacked::linear_clamp_run, we directly convert them to aten.conv2d and aten.linear
|
| 558 |
+
without the need of doing de/quantization.
|
| 559 |
+
|
| 560 |
+
Three global variables defined are _INPUT_Q_DTYPE, _SCALE, _ZERO_POINT. _INPUT_Q_DTYPE determines the de/quantization
|
| 561 |
+
data type, which is the same across the entire program, but it only shows up in the very first quantization
|
| 562 |
+
call. _SCALE and _ZERO_POINT are used only when operators do not have those specified. E.g., mul.Scalar.
|
| 563 |
+
"""
|
| 564 |
+
|
| 565 |
+
global _INPUT_Q_DTYPE
|
| 566 |
+
|
| 567 |
+
quantized = False
|
| 568 |
+
|
| 569 |
+
last_quantized_node = None
|
| 570 |
+
for node in gm.graph.nodes:
|
| 571 |
+
if isinstance(node.target, OpOverload):
|
| 572 |
+
with gm.graph.inserting_before(node):
|
| 573 |
+
namespace, opname = node.target.namespace, node.target._opname
|
| 574 |
+
if namespace == "quantized" and opname not in [
|
| 575 |
+
"conv_prepack",
|
| 576 |
+
"linear_prepack",
|
| 577 |
+
]:
|
| 578 |
+
quantized = True
|
| 579 |
+
fx_node = fx_transform_quantized_op_to_standard_op(gm, node)
|
| 580 |
+
node.replace_all_uses_with(fx_node)
|
| 581 |
+
last_quantized_node = fx_node
|
| 582 |
+
elif namespace == "prepacked":
|
| 583 |
+
quantized = True
|
| 584 |
+
fx_node = _transform_prepacked_op(gm, node)
|
| 585 |
+
node.replace_all_uses_with(fx_node)
|
| 586 |
+
last_quantized_node = fx_node
|
| 587 |
+
elif namespace == "aten" and opname == "quantize_per_tensor":
|
| 588 |
+
inp_node, scale_node, zero_point_node, dtype_node = node.args
|
| 589 |
+
dtype_node = fx_enum_to_dtype(gm, dtype_node)
|
| 590 |
+
_INPUT_Q_DTYPE = dtype_node
|
| 591 |
+
qmin_node, qmax_node = insert_qmin_qmax_node(gm, dtype_node)
|
| 592 |
+
q_fx_node = insert_quantized_node(
|
| 593 |
+
gm,
|
| 594 |
+
inp_node,
|
| 595 |
+
scale_node,
|
| 596 |
+
zero_point_node,
|
| 597 |
+
qmin_node,
|
| 598 |
+
qmax_node,
|
| 599 |
+
dtype_node,
|
| 600 |
+
torch.per_tensor_affine,
|
| 601 |
+
)
|
| 602 |
+
dq_fx_node = insert_dequantized_node(
|
| 603 |
+
gm,
|
| 604 |
+
q_fx_node,
|
| 605 |
+
scale_node,
|
| 606 |
+
zero_point_node,
|
| 607 |
+
qmin_node,
|
| 608 |
+
qmax_node,
|
| 609 |
+
dtype_node,
|
| 610 |
+
None,
|
| 611 |
+
torch.per_tensor_affine,
|
| 612 |
+
)
|
| 613 |
+
node.replace_all_uses_with(dq_fx_node)
|
| 614 |
+
last_quantized_node = dq_fx_node
|
| 615 |
+
elif namespace == "aten" and opname == "dequantize":
|
| 616 |
+
assert last_quantized_node is not None
|
| 617 |
+
node.replace_all_uses_with(last_quantized_node)
|
| 618 |
+
else:
|
| 619 |
+
last_quantized_node = node
|
| 620 |
+
|
| 621 |
+
# Post-processing again to remove legacy ScriptObjects and quantizated tensors
|
| 622 |
+
# stored as attributes or in the buffer. This is used to clean up the GraphModule
|
| 623 |
+
# to not trigger tracing errors like missing __obj_flatten__ functions.
|
| 624 |
+
def _clean_attr(mod: torch.nn.Module):
|
| 625 |
+
for submod in mod.modules():
|
| 626 |
+
attr_names_to_clean = set()
|
| 627 |
+
for k, v in submod.__dict__.items():
|
| 628 |
+
if isinstance(v, torch.ScriptObject):
|
| 629 |
+
attr_names_to_clean.add(k)
|
| 630 |
+
if k == "_buffers":
|
| 631 |
+
buffer_name_to_clean = set()
|
| 632 |
+
for b_name, b_value in v.items():
|
| 633 |
+
if isinstance(b_value, torch.Tensor) and b_value.dtype in [
|
| 634 |
+
torch.qint8,
|
| 635 |
+
torch.quint8,
|
| 636 |
+
]:
|
| 637 |
+
buffer_name_to_clean.add(b_name)
|
| 638 |
+
for b_name in buffer_name_to_clean:
|
| 639 |
+
v.pop(b_name, None)
|
| 640 |
+
for attr_name in attr_names_to_clean:
|
| 641 |
+
delattr(submod, attr_name)
|
| 642 |
+
|
| 643 |
+
if quantized:
|
| 644 |
+
"""
|
| 645 |
+
TODO: SetAttr + quantized ops will result incorrect program. This flag is used to temporarily
|
| 646 |
+
bypass test cases.
|
| 647 |
+
|
| 648 |
+
The deadcode elimination pass is needed to remove legacy quantized ops. Otherwise, retracing
|
| 649 |
+
will throw errors. However, the current way of SetAttr does inplace update to attributes, so
|
| 650 |
+
this pass regard them as dead code and remove them. Below is an example of GraphModule before
|
| 651 |
+
and after the dead code elimination pass.
|
| 652 |
+
|
| 653 |
+
class GraphModule(torch.nn.Module):
|
| 654 |
+
def forward(self, x_1):
|
| 655 |
+
# No stacktrace found for following nodes
|
| 656 |
+
data = self.data; data = None
|
| 657 |
+
data_1 = self.data
|
| 658 |
+
add_tensor = torch.ops.aten.add.Tensor(data_1, x_1, alpha = 1); data_1 = None
|
| 659 |
+
data_2 = self.data
|
| 660 |
+
copy_ = torch_Tensor_copy_(data_2, add_tensor); data_2 = add_tensor = copy_ = None
|
| 661 |
+
data_3 = self.data
|
| 662 |
+
add_tensor_1 = torch.ops.aten.add.Tensor(x_1, data_3, alpha = 1); x_1 = data_3 = None
|
| 663 |
+
return add_tensor_1
|
| 664 |
+
|
| 665 |
+
class GraphModule(torch.nn.Module):
|
| 666 |
+
def forward(self, x_1):
|
| 667 |
+
# No stacktrace found for following nodes
|
| 668 |
+
data_3 = self.data
|
| 669 |
+
add_tensor_1 = torch.ops.aten.add.Tensor(x_1, data_3, alpha = 1); x_1 = data_3 = None
|
| 670 |
+
return add_tensor_1
|
| 671 |
+
"""
|
| 672 |
+
gm.graph.eliminate_dead_code()
|
| 673 |
+
_clean_attr(gm)
|
janus/lib/python3.10/site-packages/torch/_export/passes/replace_set_grad_with_hop_pass.py
ADDED
|
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
from torch._higher_order_ops.wrap import wrap_with_set_grad_enabled
|
| 5 |
+
|
| 6 |
+
from ..utils import node_inline_, nodes_filter, nodes_first, nodes_map, sequential_split
|
| 7 |
+
from .replace_with_hop_pass_util import (
|
| 8 |
+
_replace_with_hop_helper,
|
| 9 |
+
_replace_with_hop_pass_helper,
|
| 10 |
+
_sequential_split_and_maybe_inline_subgraphs_helper,
|
| 11 |
+
)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def _is_set_grad_enabled_node(node: torch.fx.Node):
|
| 15 |
+
return (
|
| 16 |
+
node
|
| 17 |
+
and node.op == "call_function"
|
| 18 |
+
and node.target == torch._C._set_grad_enabled
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def _is_set_grad_enabled_sub_mod(node: torch.fx.Node, omit_if_same_with_ambient=False):
|
| 23 |
+
if node.op == "call_module":
|
| 24 |
+
assert isinstance(node.target, str)
|
| 25 |
+
subgm = getattr(node.graph.owning_module, node.target)
|
| 26 |
+
first_non_ph = nodes_first(
|
| 27 |
+
subgm.graph.nodes, lambda node: node.op != "placeholder"
|
| 28 |
+
)
|
| 29 |
+
if (
|
| 30 |
+
first_non_ph
|
| 31 |
+
and first_non_ph.op == "call_function"
|
| 32 |
+
and first_non_ph.target == torch._C._set_grad_enabled
|
| 33 |
+
):
|
| 34 |
+
return (
|
| 35 |
+
first_non_ph.args[0] != torch.is_grad_enabled()
|
| 36 |
+
if omit_if_same_with_ambient
|
| 37 |
+
else True
|
| 38 |
+
)
|
| 39 |
+
return False
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def _replace_with_hop(node: torch.fx.Node):
|
| 43 |
+
assert node.op == "call_module"
|
| 44 |
+
graph: torch.fx.Graph = node.graph
|
| 45 |
+
gm: torch.fx.GraphModule = graph.owning_module
|
| 46 |
+
assert isinstance(node.target, str)
|
| 47 |
+
sub_gm = getattr(gm, node.target)
|
| 48 |
+
sub_graph = sub_gm.graph
|
| 49 |
+
set_grad_nodes = nodes_filter(sub_graph.nodes, _is_set_grad_enabled_node)
|
| 50 |
+
if len(set_grad_nodes) > 0:
|
| 51 |
+
assert len(set_grad_nodes) == 1
|
| 52 |
+
set_grad_node = set_grad_nodes[0]
|
| 53 |
+
_replace_with_hop_helper(
|
| 54 |
+
node, set_grad_node, _is_set_grad_enabled_node, wrap_with_set_grad_enabled
|
| 55 |
+
)
|
| 56 |
+
sub_graph.erase_node(set_grad_node)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def _remove_set_grad_and_inline(node: torch.fx.Node):
|
| 60 |
+
assert node.op == "call_module"
|
| 61 |
+
graph: torch.fx.Graph = node.graph
|
| 62 |
+
gm: torch.fx.GraphModule = graph.owning_module
|
| 63 |
+
assert isinstance(node.target, str)
|
| 64 |
+
sub_gm = getattr(gm, node.target)
|
| 65 |
+
sub_graph = sub_gm.graph
|
| 66 |
+
nodes_map(
|
| 67 |
+
sub_graph.nodes,
|
| 68 |
+
lambda n: sub_graph.erase_node(n) if _is_set_grad_enabled_node(n) else n,
|
| 69 |
+
)
|
| 70 |
+
node_inline_(node)
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def _sequential_split_and_maybe_inline_subgraphs(
|
| 74 |
+
gm: torch.fx.GraphModule, graph_signature
|
| 75 |
+
):
|
| 76 |
+
"""
|
| 77 |
+
Helper function for replace_set_grad_with_hop_pass().
|
| 78 |
+
Split the graph module into multiple subgraphs based on the set_grad_enabled nodes.
|
| 79 |
+
For each subgraph, decides whether to construct a HOO subgraph, or inline the calls
|
| 80 |
+
back into the parent graph module.
|
| 81 |
+
"""
|
| 82 |
+
need_replacing = any(_is_set_grad_enabled_node(node) for node in gm.graph.nodes)
|
| 83 |
+
if not need_replacing:
|
| 84 |
+
return gm, graph_signature
|
| 85 |
+
|
| 86 |
+
# sequential_split returns a new graph module that could have different output
|
| 87 |
+
# args names. We need to fix the graph signature.
|
| 88 |
+
new_gm = sequential_split(gm, _is_set_grad_enabled_node)
|
| 89 |
+
|
| 90 |
+
def _maybe_inline_or_replace_with_hop(node: torch.fx.Node):
|
| 91 |
+
if _is_set_grad_enabled_sub_mod(node, omit_if_same_with_ambient=True):
|
| 92 |
+
_replace_with_hop(node)
|
| 93 |
+
else:
|
| 94 |
+
_remove_set_grad_and_inline(node)
|
| 95 |
+
|
| 96 |
+
return _sequential_split_and_maybe_inline_subgraphs_helper(
|
| 97 |
+
new_gm, graph_signature, _maybe_inline_or_replace_with_hop
|
| 98 |
+
)
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def replace_set_grad_with_hop_pass(gm: torch.fx.GraphModule, graph_signature):
|
| 102 |
+
"""
|
| 103 |
+
Split gm into sub-graph-modules using `sequential_split_and_maybe_inline_subgraphs`, and
|
| 104 |
+
then recursively call itself on each of the submodules.
|
| 105 |
+
"""
|
| 106 |
+
return _replace_with_hop_pass_helper(
|
| 107 |
+
gm,
|
| 108 |
+
graph_signature,
|
| 109 |
+
_sequential_split_and_maybe_inline_subgraphs,
|
| 110 |
+
)
|
janus/lib/python3.10/site-packages/torch/_export/passes/replace_view_ops_with_view_copy_ops_pass.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
from typing import Dict, Optional
|
| 3 |
+
import torch
|
| 4 |
+
from torch._ops import OpOverload, HigherOrderOperator
|
| 5 |
+
from torch._export.error import InternalError
|
| 6 |
+
from torch._export.pass_base import _ExportPassBaseDeprecatedDoNotUse
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
__all__ = ["ReplaceViewOpsWithViewCopyOpsPass"]
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
_NON_FUNCTIONAL_OPS_TO_FUNCTIONAL_OPS: Dict[OpOverload, OpOverload] = {
|
| 13 |
+
torch.ops.aten._unsafe_view.default: torch.ops.aten.view_copy.default,
|
| 14 |
+
}
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def is_view_op(schema: torch._C.FunctionSchema) -> bool:
|
| 18 |
+
if len(schema.arguments) == 0:
|
| 19 |
+
return False
|
| 20 |
+
alias_info = schema.arguments[0].alias_info
|
| 21 |
+
return (alias_info is not None) and (not alias_info.is_write)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def get_view_copy_of_view_op(schema: torch._C.FunctionSchema) -> Optional[OpOverload]:
|
| 25 |
+
if is_view_op(schema) and schema.name.startswith("aten::"):
|
| 26 |
+
view_op_name = schema.name.split("::")[1]
|
| 27 |
+
view_op_overload = (
|
| 28 |
+
schema.overload_name
|
| 29 |
+
if schema.overload_name != ""
|
| 30 |
+
else "default"
|
| 31 |
+
)
|
| 32 |
+
view_copy_op_name = view_op_name + "_copy"
|
| 33 |
+
if not hasattr(torch.ops.aten, view_copy_op_name):
|
| 34 |
+
raise InternalError(f"{schema.name} is missing a view_copy variant")
|
| 35 |
+
|
| 36 |
+
view_copy_op_overload_packet = getattr(torch.ops.aten, view_copy_op_name)
|
| 37 |
+
|
| 38 |
+
if not hasattr(view_copy_op_overload_packet, view_op_overload):
|
| 39 |
+
raise InternalError(f"{schema.name} is missing a view_copy variant")
|
| 40 |
+
|
| 41 |
+
return getattr(view_copy_op_overload_packet, view_op_overload)
|
| 42 |
+
|
| 43 |
+
return None
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class ReplaceViewOpsWithViewCopyOpsPass(_ExportPassBaseDeprecatedDoNotUse):
|
| 47 |
+
"""
|
| 48 |
+
Our backend expects pure functional operators. For efficiency
|
| 49 |
+
purposes, we keep view ops around while functionalizing the exported
|
| 50 |
+
program. This pass replaces view ops with view copy ops for backends that
|
| 51 |
+
need AOT memory planning.
|
| 52 |
+
"""
|
| 53 |
+
def call_operator(self, op, args, kwargs, meta):
|
| 54 |
+
if op in _NON_FUNCTIONAL_OPS_TO_FUNCTIONAL_OPS:
|
| 55 |
+
return super().call_operator(
|
| 56 |
+
(_NON_FUNCTIONAL_OPS_TO_FUNCTIONAL_OPS[op]), args, kwargs, meta
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
if isinstance(op, HigherOrderOperator):
|
| 60 |
+
return super().call_operator(op, args, kwargs, meta)
|
| 61 |
+
|
| 62 |
+
if view_copy_op := get_view_copy_of_view_op(op._schema):
|
| 63 |
+
return super().call_operator(view_copy_op, args, kwargs, meta)
|
| 64 |
+
|
| 65 |
+
return super().call_operator(op, args, kwargs, meta)
|
janus/lib/python3.10/site-packages/torch/_export/passes/replace_with_hop_pass_util.py
ADDED
|
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
|
| 3 |
+
import contextlib
|
| 4 |
+
import copy
|
| 5 |
+
import operator
|
| 6 |
+
from typing import Callable
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
from torch._ops import HigherOrderOperator
|
| 10 |
+
|
| 11 |
+
from ..utils import node_replace_, nodes_map
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def _replace_with_hop_helper(
|
| 15 |
+
node: torch.fx.Node,
|
| 16 |
+
enter_block_node: torch.fx.Node,
|
| 17 |
+
node_filter: Callable,
|
| 18 |
+
wrap_hoo: HigherOrderOperator,
|
| 19 |
+
):
|
| 20 |
+
graph: torch.fx.Graph = node.graph
|
| 21 |
+
gm: torch.fx.GraphModule = graph.owning_module
|
| 22 |
+
assert isinstance(node.target, str)
|
| 23 |
+
sub_gm = getattr(gm, node.target)
|
| 24 |
+
|
| 25 |
+
def set_hoo_node_meta(call_func_node):
|
| 26 |
+
call_func_node.meta["nn_module_stack"] = copy.copy(
|
| 27 |
+
enter_block_node.meta.get("nn_module_stack", {})
|
| 28 |
+
)
|
| 29 |
+
call_func_node.meta["torch_fn"] = (
|
| 30 |
+
f"{wrap_hoo.__name__}",
|
| 31 |
+
f"{wrap_hoo.__class__.__name__}.{wrap_hoo.__name__}",
|
| 32 |
+
)
|
| 33 |
+
if isinstance(output_args, (tuple, list)):
|
| 34 |
+
call_func_node.meta["val"] = tuple(arg.meta["val"] for arg in output_args)
|
| 35 |
+
elif isinstance(output_args, torch.fx.Node):
|
| 36 |
+
call_func_node.meta["val"] = (output_args.meta["val"],)
|
| 37 |
+
|
| 38 |
+
with graph.inserting_before(node):
|
| 39 |
+
get_attr_node = graph.get_attr(node.target)
|
| 40 |
+
get_attr_node.meta["nn_module_stack"] = copy.copy(
|
| 41 |
+
enter_block_node.meta.get("nn_module_stack", {})
|
| 42 |
+
)
|
| 43 |
+
output_node = next(iter(reversed(sub_gm.graph.nodes)), None)
|
| 44 |
+
# Split_module pass intentially doesn't add output node
|
| 45 |
+
# if the graph doesn't return anything.
|
| 46 |
+
# TODO (tmanlaibaatar) Figure out if this is right behaviour
|
| 47 |
+
# for split_module
|
| 48 |
+
if isinstance(output_node, torch.fx.Node) and output_node.op != "output":
|
| 49 |
+
output_node = None
|
| 50 |
+
if output_node is not None:
|
| 51 |
+
assert len(output_node.args) == 1
|
| 52 |
+
output_args = output_node.args[0]
|
| 53 |
+
enter_block_node_args = enter_block_node.args
|
| 54 |
+
if isinstance(output_args, (tuple, list)):
|
| 55 |
+
call_func_node = graph.call_function(
|
| 56 |
+
wrap_hoo,
|
| 57 |
+
(*enter_block_node_args, get_attr_node, *node.args),
|
| 58 |
+
{},
|
| 59 |
+
)
|
| 60 |
+
# Create the metadata
|
| 61 |
+
set_hoo_node_meta(call_func_node)
|
| 62 |
+
node_replace_(node, call_func_node)
|
| 63 |
+
|
| 64 |
+
# Rename the name of getitem nodes to the actual name of its contents
|
| 65 |
+
# for passing verifier and better readability, also propagate metadata
|
| 66 |
+
for get_item_node in call_func_node.users.keys():
|
| 67 |
+
idx: int = get_item_node.args[1] # type: ignore[assignment]
|
| 68 |
+
output_node = output_args[idx]
|
| 69 |
+
get_item_node._rename(output_node.name)
|
| 70 |
+
get_item_node.meta = output_node.meta
|
| 71 |
+
|
| 72 |
+
elif isinstance(output_args, torch.fx.Node):
|
| 73 |
+
call_func_node = graph.create_node(
|
| 74 |
+
"call_function",
|
| 75 |
+
wrap_hoo,
|
| 76 |
+
(*enter_block_node_args, get_attr_node, *node.args),
|
| 77 |
+
{},
|
| 78 |
+
output_args.name,
|
| 79 |
+
)
|
| 80 |
+
# Modify the subgraph to output a singleton list.
|
| 81 |
+
output_node.args = ((output_args,),)
|
| 82 |
+
# Add in an extra `getitem(wrap_hoo, 0)` node to the toplevel graph.
|
| 83 |
+
get_item_node = graph.create_node(
|
| 84 |
+
"call_function",
|
| 85 |
+
operator.getitem,
|
| 86 |
+
(call_func_node, 0),
|
| 87 |
+
{},
|
| 88 |
+
)
|
| 89 |
+
# Create the metadata
|
| 90 |
+
get_item_node.meta = output_args.meta
|
| 91 |
+
set_hoo_node_meta(call_func_node)
|
| 92 |
+
node_replace_(node, get_item_node)
|
| 93 |
+
else:
|
| 94 |
+
raise NotImplementedError(
|
| 95 |
+
f"repalce_with_hop_pass doesnt' support output type {type(output_args)}"
|
| 96 |
+
)
|
| 97 |
+
else:
|
| 98 |
+
# TODO (shangdiy): remove this line, since the export graph can be non-functional
|
| 99 |
+
node.graph.erase_node(node)
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def _sequential_split_and_maybe_inline_subgraphs_helper(
|
| 103 |
+
new_gm: torch.fx.GraphModule,
|
| 104 |
+
graph_signature,
|
| 105 |
+
maybe_inline_or_replace_with_hop: Callable[[torch.fx.Node], None],
|
| 106 |
+
):
|
| 107 |
+
"""
|
| 108 |
+
Helper function for replacing graph nodse with higher order nodes.
|
| 109 |
+
For each subgraph in `new_gm`, decides whether to construct a HOO subgraph, or inline the calls
|
| 110 |
+
back into the parent graph module, depending on `maybe_inline_or_replace_with_hop`.
|
| 111 |
+
"""
|
| 112 |
+
# new_gm is a new graph module that could have different output args names.
|
| 113 |
+
# We need to fix the graph signature.
|
| 114 |
+
replace_ctx = contextlib.nullcontext()
|
| 115 |
+
new_signature = None
|
| 116 |
+
if graph_signature is not None:
|
| 117 |
+
# Cannot deep copy a real ScriptObject, which is referenced
|
| 118 |
+
# in the FakeScriptObject. Copy should be good enough to guard
|
| 119 |
+
# against accidental mutation to original graph_signature.
|
| 120 |
+
new_signature = copy.copy(graph_signature)
|
| 121 |
+
new_gm_out_node = next(reversed(new_gm.graph.find_nodes(op="output")))
|
| 122 |
+
assert new_gm_out_node.op == "output" and len(new_gm_out_node.args[0]) == len(
|
| 123 |
+
new_signature.output_specs
|
| 124 |
+
)
|
| 125 |
+
for arg_node, out_spec in zip(
|
| 126 |
+
new_gm_out_node.args[0], new_signature.output_specs
|
| 127 |
+
):
|
| 128 |
+
if arg_node is None:
|
| 129 |
+
assert out_spec.arg.value is None
|
| 130 |
+
elif (
|
| 131 |
+
isinstance(arg_node, torch.fx.Node)
|
| 132 |
+
and out_spec.arg.name != arg_node.name
|
| 133 |
+
):
|
| 134 |
+
out_spec.arg.name = arg_node.name
|
| 135 |
+
|
| 136 |
+
replace_ctx = new_gm._set_replace_hook(new_signature.get_replace_hook()) # type: ignore[assignment]
|
| 137 |
+
|
| 138 |
+
with replace_ctx:
|
| 139 |
+
nodes_map(
|
| 140 |
+
list(new_gm.graph.nodes),
|
| 141 |
+
lambda node: (
|
| 142 |
+
maybe_inline_or_replace_with_hop(node)
|
| 143 |
+
if node.op == "call_module"
|
| 144 |
+
else node
|
| 145 |
+
),
|
| 146 |
+
)
|
| 147 |
+
new_gm.recompile()
|
| 148 |
+
return new_gm, new_signature
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
def _replace_with_hop_pass_helper(
|
| 152 |
+
gm: torch.fx.GraphModule,
|
| 153 |
+
graph_signature,
|
| 154 |
+
sequential_split_and_maybe_inline_subgraphs: Callable,
|
| 155 |
+
):
|
| 156 |
+
"""
|
| 157 |
+
Split gm into sub-graph-modules using `sequential_split_and_maybe_inline_subgraphs`, and
|
| 158 |
+
then recursively call itself on each of the submodules.
|
| 159 |
+
"""
|
| 160 |
+
new_gm, new_signature = sequential_split_and_maybe_inline_subgraphs(
|
| 161 |
+
gm, graph_signature
|
| 162 |
+
)
|
| 163 |
+
# recursively call
|
| 164 |
+
for node in new_gm.graph.nodes:
|
| 165 |
+
if node.op == "get_attr":
|
| 166 |
+
subgm = getattr(new_gm, node.target)
|
| 167 |
+
if not isinstance(subgm, torch.fx.GraphModule):
|
| 168 |
+
continue
|
| 169 |
+
new_subgm, _ = _replace_with_hop_pass_helper(
|
| 170 |
+
subgm,
|
| 171 |
+
None,
|
| 172 |
+
sequential_split_and_maybe_inline_subgraphs,
|
| 173 |
+
)
|
| 174 |
+
setattr(new_gm, node.target, new_subgm)
|
| 175 |
+
|
| 176 |
+
new_gm.recompile()
|
| 177 |
+
new_gm.graph.lint()
|
| 178 |
+
return new_gm, new_signature
|
janus/lib/python3.10/site-packages/torch/_export/tools.py
ADDED
|
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import logging
|
| 3 |
+
import warnings
|
| 4 |
+
from typing import Any, Dict, Iterable, Optional, Tuple
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
import torch.export
|
| 8 |
+
import torch.export._trace
|
| 9 |
+
from torch._utils_internal import log_export_usage
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
log = logging.getLogger(__name__)
|
| 13 |
+
|
| 14 |
+
__all__ = ["report_exportability"]
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def _generate_inputs_for_submodules(
|
| 18 |
+
model: torch.nn.Module,
|
| 19 |
+
target_submodules: Iterable[str],
|
| 20 |
+
args: Tuple[Any, ...],
|
| 21 |
+
kwargs: Optional[Dict[str, Any]] = None,
|
| 22 |
+
) -> Dict[str, Tuple[Any, Any]]:
|
| 23 |
+
"""
|
| 24 |
+
Generate inputs for targeting submdoules in the given model. Note that if two submodules refer to the same obj, this
|
| 25 |
+
function doesn't work.
|
| 26 |
+
|
| 27 |
+
Args:
|
| 28 |
+
model: root model.
|
| 29 |
+
inputs: inputs to the root model.
|
| 30 |
+
target_submodules: submodules that we want to generate inputs for.
|
| 31 |
+
|
| 32 |
+
Returns:
|
| 33 |
+
A dict that maps from submodule name to its inputs.
|
| 34 |
+
"""
|
| 35 |
+
kwargs = kwargs or {}
|
| 36 |
+
|
| 37 |
+
handles = []
|
| 38 |
+
results = {}
|
| 39 |
+
submodule_to_names = {mod: name for name, mod in model.named_modules()}
|
| 40 |
+
|
| 41 |
+
def pre_forward(module, module_args, module_kwargs):
|
| 42 |
+
results[submodule_to_names[module]] = (module_args, module_kwargs)
|
| 43 |
+
|
| 44 |
+
try:
|
| 45 |
+
for name, mod in model.named_modules():
|
| 46 |
+
if name in target_submodules:
|
| 47 |
+
handles.append(
|
| 48 |
+
mod.register_forward_pre_hook(pre_forward, with_kwargs=True)
|
| 49 |
+
)
|
| 50 |
+
model(*args, **kwargs)
|
| 51 |
+
except Exception as e:
|
| 52 |
+
warnings.warn(
|
| 53 |
+
f"Failed to generate submodule inputs because of the following error:\n{e}"
|
| 54 |
+
)
|
| 55 |
+
finally:
|
| 56 |
+
for h in handles:
|
| 57 |
+
h.remove()
|
| 58 |
+
return results
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def report_exportability(
|
| 62 |
+
mod: torch.nn.Module,
|
| 63 |
+
args: Tuple[Any, ...],
|
| 64 |
+
kwargs: Optional[Dict[str, Any]] = None,
|
| 65 |
+
*,
|
| 66 |
+
strict: bool = True,
|
| 67 |
+
pre_dispatch: bool = False,
|
| 68 |
+
) -> Dict[str, Optional[Exception]]:
|
| 69 |
+
"""
|
| 70 |
+
Report exportability issues for a module in one-shot.
|
| 71 |
+
|
| 72 |
+
Args:
|
| 73 |
+
mod: root module.
|
| 74 |
+
args: args to the root module.
|
| 75 |
+
kwargs: kwargs to the root module.
|
| 76 |
+
Returns:
|
| 77 |
+
A dict that maps from submodule name to the exception that was raised when trying to export it.
|
| 78 |
+
`None` means the module is exportable without issue.
|
| 79 |
+
Sample output:
|
| 80 |
+
{
|
| 81 |
+
'': UnsupportedOperatorException(func=<OpOverload(op='testlib.op_missing_meta', overload='default')>),
|
| 82 |
+
'submod_1': UnsupportedOperatorException(func=<OpOverload(op='testlib.op_missing_meta', overload='default')>),
|
| 83 |
+
'submod_2': None
|
| 84 |
+
}
|
| 85 |
+
"""
|
| 86 |
+
|
| 87 |
+
log_export_usage(event="export.report_exportability")
|
| 88 |
+
|
| 89 |
+
kwargs = kwargs or {}
|
| 90 |
+
|
| 91 |
+
all_submod_names = [name for name, _ in mod.named_modules() if name != ""]
|
| 92 |
+
submod_inputs = _generate_inputs_for_submodules(mod, all_submod_names, args, kwargs)
|
| 93 |
+
|
| 94 |
+
tried_module_types = set()
|
| 95 |
+
report: Dict[str, Optional[Exception]] = {}
|
| 96 |
+
|
| 97 |
+
def try_export(module, module_name, args, kwargs):
|
| 98 |
+
nonlocal submod_inputs, report, strict, pre_dispatch, tried_module_types
|
| 99 |
+
|
| 100 |
+
if type(module) in tried_module_types:
|
| 101 |
+
return
|
| 102 |
+
tried_module_types.add(type(module))
|
| 103 |
+
|
| 104 |
+
if args is not None or kwargs is not None:
|
| 105 |
+
try:
|
| 106 |
+
torch.export._trace._export(
|
| 107 |
+
module,
|
| 108 |
+
args,
|
| 109 |
+
kwargs,
|
| 110 |
+
strict=strict,
|
| 111 |
+
pre_dispatch=pre_dispatch,
|
| 112 |
+
)
|
| 113 |
+
report[module_name] = None
|
| 114 |
+
log.info("Successfully exported `%s`", module_name)
|
| 115 |
+
return
|
| 116 |
+
except Exception as e:
|
| 117 |
+
short_msg = repr(e).split("\n")[0]
|
| 118 |
+
log.warning(
|
| 119 |
+
"Failed exporting `%s` with exception: %s", module_name, short_msg
|
| 120 |
+
)
|
| 121 |
+
report[module_name] = e
|
| 122 |
+
|
| 123 |
+
for name, submod in module.named_children():
|
| 124 |
+
sub_module_name = name if module_name == "" else f"{module_name}.{name}"
|
| 125 |
+
|
| 126 |
+
submod_args, submod_kwargs = submod_inputs.get(
|
| 127 |
+
sub_module_name, (None, None)
|
| 128 |
+
)
|
| 129 |
+
|
| 130 |
+
try_export(submod, sub_module_name, submod_args, submod_kwargs)
|
| 131 |
+
|
| 132 |
+
return
|
| 133 |
+
|
| 134 |
+
try_export(mod, "", args, kwargs)
|
| 135 |
+
|
| 136 |
+
unique_issues = set()
|
| 137 |
+
for exception in report.values():
|
| 138 |
+
if exception is not None:
|
| 139 |
+
key = repr(exception).split("\\n")[0]
|
| 140 |
+
unique_issues.add(key)
|
| 141 |
+
|
| 142 |
+
log.warning("Found %d export issues:", len(unique_issues))
|
| 143 |
+
for issue in unique_issues:
|
| 144 |
+
log.warning(issue)
|
| 145 |
+
|
| 146 |
+
return report
|
janus/lib/python3.10/site-packages/torch/_export/utils.py
ADDED
|
@@ -0,0 +1,893 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import ast
|
| 3 |
+
import dataclasses
|
| 4 |
+
import inspect
|
| 5 |
+
import math
|
| 6 |
+
import operator
|
| 7 |
+
import re
|
| 8 |
+
from inspect import Parameter
|
| 9 |
+
from typing import Any, Dict, Iterable, List, Optional, Tuple, Type, TYPE_CHECKING
|
| 10 |
+
|
| 11 |
+
import torch
|
| 12 |
+
from torch._guards import detect_fake_mode
|
| 13 |
+
from torch._subclasses.fake_tensor import FakeTensor
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
if TYPE_CHECKING:
|
| 17 |
+
from torch._export.passes.lift_constants_pass import ConstantAttrMap
|
| 18 |
+
from torch.export import ExportedProgram
|
| 19 |
+
from torch.export.graph_signature import ExportGraphSignature
|
| 20 |
+
|
| 21 |
+
from torch.export.graph_signature import InputKind, OutputKind
|
| 22 |
+
from torch.utils._pytree import (
|
| 23 |
+
_register_pytree_node,
|
| 24 |
+
Context,
|
| 25 |
+
FlattenFunc,
|
| 26 |
+
FromDumpableContextFn,
|
| 27 |
+
GetAttrKey,
|
| 28 |
+
KeyPath,
|
| 29 |
+
keystr,
|
| 30 |
+
MappingKey,
|
| 31 |
+
SequenceKey,
|
| 32 |
+
ToDumpableContextFn,
|
| 33 |
+
tree_flatten_with_path,
|
| 34 |
+
UnflattenFunc,
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
placeholder_prefixes = {
|
| 39 |
+
InputKind.USER_INPUT: "",
|
| 40 |
+
InputKind.PARAMETER: "p_",
|
| 41 |
+
InputKind.BUFFER: "b_",
|
| 42 |
+
InputKind.CONSTANT_TENSOR: "c_",
|
| 43 |
+
InputKind.CUSTOM_OBJ: "obj_",
|
| 44 |
+
InputKind.TOKEN: "token",
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def _collect_and_set_constant_attrs(
|
| 49 |
+
graph_signature, constants, mod
|
| 50 |
+
) -> "ConstantAttrMap":
|
| 51 |
+
# the exported module will store constants & non-persistent buffers such that
|
| 52 |
+
# retracing treats them as persistent buffers, so we inform the constants lifting pass
|
| 53 |
+
# and overwrite the new graph signature using the previous program. This is intended to only be used
|
| 54 |
+
# in run_decompositions where we still have access to original EP.
|
| 55 |
+
from torch._export.passes.lift_constants_pass import ConstantAttrMap
|
| 56 |
+
|
| 57 |
+
constant_attrs = ConstantAttrMap()
|
| 58 |
+
non_persistent_buffers = {
|
| 59 |
+
spec.target
|
| 60 |
+
for spec in graph_signature.input_specs
|
| 61 |
+
if spec.kind == InputKind.BUFFER and not spec.persistent
|
| 62 |
+
}
|
| 63 |
+
for name, value in constants.items():
|
| 64 |
+
if name in non_persistent_buffers:
|
| 65 |
+
continue
|
| 66 |
+
# recursive getattr
|
| 67 |
+
_mod = mod
|
| 68 |
+
*atoms, attr = name.split(".")
|
| 69 |
+
for atom in atoms:
|
| 70 |
+
_mod = getattr(_mod, atom)
|
| 71 |
+
# remove as buffer, reassign as constant/non-persistent buffer
|
| 72 |
+
_mod._buffers.pop(attr, None)
|
| 73 |
+
setattr(_mod, attr, value)
|
| 74 |
+
constant_attrs.add(value, name)
|
| 75 |
+
return constant_attrs
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def _overwrite_signature_for_non_persistent_buffers(
|
| 79 |
+
old_sig: "ExportGraphSignature", new_sig: "ExportGraphSignature"
|
| 80 |
+
):
|
| 81 |
+
# overwrite signature for non-persistent buffers
|
| 82 |
+
non_persistent_buffers = {
|
| 83 |
+
spec.target
|
| 84 |
+
for spec in old_sig.input_specs
|
| 85 |
+
if spec.kind == InputKind.BUFFER and not spec.persistent
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
for spec in new_sig.input_specs:
|
| 89 |
+
if spec.kind == InputKind.BUFFER and spec.target in non_persistent_buffers:
|
| 90 |
+
spec.persistent = False
|
| 91 |
+
return new_sig
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def _collect_param_buffer_metadata(mod: torch.fx.GraphModule) -> Dict[str, Any]:
|
| 95 |
+
"""
|
| 96 |
+
Param/buffer metadata needs to be saved before lowering to aten IR
|
| 97 |
+
because aten IR lifts them, as a result, automatic preservation doesn't work.
|
| 98 |
+
This is intended to be called on the strict mode tracing right before lowering to
|
| 99 |
+
aten IR OR run_decomposition pass.
|
| 100 |
+
"""
|
| 101 |
+
params_buffers_to_node_meta = {}
|
| 102 |
+
|
| 103 |
+
def _getattr(model: torch.fx.GraphModule, attr_name: str):
|
| 104 |
+
*prefix, field = attr_name.split(".")
|
| 105 |
+
t = model
|
| 106 |
+
for item in prefix:
|
| 107 |
+
t = getattr(t, item, None) # type: ignore[assignment]
|
| 108 |
+
assert t is not None
|
| 109 |
+
|
| 110 |
+
return getattr(t, field)
|
| 111 |
+
|
| 112 |
+
for node in mod.graph.nodes:
|
| 113 |
+
target = node.target
|
| 114 |
+
meta = node.meta
|
| 115 |
+
if node.op == "call_module":
|
| 116 |
+
submodule = _getattr(mod, target)
|
| 117 |
+
if isinstance(submodule, torch.nn.Module):
|
| 118 |
+
for name, _ in submodule.named_parameters(
|
| 119 |
+
recurse=True, remove_duplicate=False
|
| 120 |
+
):
|
| 121 |
+
params_buffers_to_node_meta[target + "." + name] = meta
|
| 122 |
+
|
| 123 |
+
for name, _ in submodule.named_buffers(
|
| 124 |
+
recurse=True, remove_duplicate=False
|
| 125 |
+
):
|
| 126 |
+
params_buffers_to_node_meta[target + "." + name] = meta
|
| 127 |
+
|
| 128 |
+
if node.op == "get_attr":
|
| 129 |
+
submodule = _getattr(mod, target)
|
| 130 |
+
if not isinstance(submodule, torch.fx.GraphModule):
|
| 131 |
+
params_buffers_to_node_meta[target] = meta
|
| 132 |
+
|
| 133 |
+
# If the call_function uses param as input, we also need to update params' meta
|
| 134 |
+
# with this call_function node's meta.
|
| 135 |
+
# This is basically the same flow as torch.fx.traceback.preserve_meta()
|
| 136 |
+
if node.op == "call_function" and not isinstance(
|
| 137 |
+
node.target, torch._ops.HigherOrderOperator
|
| 138 |
+
):
|
| 139 |
+
for arg in node._input_nodes:
|
| 140 |
+
if arg.op == "get_attr":
|
| 141 |
+
for entry in torch.fx.proxy._COPY_META_FIELDS:
|
| 142 |
+
if entry in meta:
|
| 143 |
+
params_buffers_to_node_meta[arg.target][entry] = meta[entry]
|
| 144 |
+
|
| 145 |
+
return params_buffers_to_node_meta
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
def _populate_param_buffer_metadata_to_new_gm(
|
| 149 |
+
params_buffers_to_node_meta: Dict[str, Any],
|
| 150 |
+
gm: torch.fx.GraphModule,
|
| 151 |
+
new_sig: "ExportGraphSignature",
|
| 152 |
+
) -> None:
|
| 153 |
+
"""
|
| 154 |
+
Given that we collected param'buffer metadata before, we put them back in
|
| 155 |
+
newly traced graph module
|
| 156 |
+
"""
|
| 157 |
+
# Don't copy over nn_module_stack, stack_trace metadata for params/buffers nodes
|
| 158 |
+
for metadata in params_buffers_to_node_meta.values():
|
| 159 |
+
metadata.pop("nn_module_stack", None)
|
| 160 |
+
metadata.pop("stack_trace", None)
|
| 161 |
+
|
| 162 |
+
for node in gm.graph.nodes:
|
| 163 |
+
if node.op == "placeholder":
|
| 164 |
+
if node.target in new_sig.inputs_to_parameters:
|
| 165 |
+
param_name = new_sig.inputs_to_parameters[node.target]
|
| 166 |
+
if param_name in params_buffers_to_node_meta:
|
| 167 |
+
for k, v in params_buffers_to_node_meta[param_name].items():
|
| 168 |
+
node.meta[k] = v
|
| 169 |
+
if node.target in new_sig.inputs_to_buffers:
|
| 170 |
+
buffer_name = new_sig.inputs_to_buffers[node.target]
|
| 171 |
+
if buffer_name in params_buffers_to_node_meta:
|
| 172 |
+
for k, v in params_buffers_to_node_meta[buffer_name].items():
|
| 173 |
+
node.meta[k] = v
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
def _get_shape_env_from_gm(gm: torch.fx.GraphModule):
|
| 177 |
+
vals = [
|
| 178 |
+
node.meta["val"]
|
| 179 |
+
for node in gm.graph.nodes
|
| 180 |
+
if node.meta.get("val", None) is not None
|
| 181 |
+
]
|
| 182 |
+
|
| 183 |
+
fake_mode = _detect_fake_mode_from_gm(gm)
|
| 184 |
+
if fake_mode is not None:
|
| 185 |
+
return fake_mode.shape_env
|
| 186 |
+
for v in vals:
|
| 187 |
+
if isinstance(v, torch.SymInt):
|
| 188 |
+
return v.node.shape_env
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
def _rename_without_collisions(
|
| 192 |
+
name_map: Dict[str, str],
|
| 193 |
+
orig_name: str,
|
| 194 |
+
name: str,
|
| 195 |
+
is_placeholder: bool = False,
|
| 196 |
+
):
|
| 197 |
+
"""
|
| 198 |
+
Renames nodes to avoid name collisions, with suffixing.
|
| 199 |
+
name_map: map from original name to new name
|
| 200 |
+
orig_name: mapping key
|
| 201 |
+
name: candidate name (potentially suffixed, e.g. mul_2)
|
| 202 |
+
is_placeholder: if the node is a placeholder, avoid detecting suffix
|
| 203 |
+
"""
|
| 204 |
+
if name in name_map.values():
|
| 205 |
+
# non-placeholder nodes may be suffixed with the count
|
| 206 |
+
# instead of adding another suffix, we will try to increment it
|
| 207 |
+
match = re.match(r"(.*)_(\d+)", name)
|
| 208 |
+
if match and not is_placeholder:
|
| 209 |
+
name, n = match.group(1), int(match.group(2))
|
| 210 |
+
else:
|
| 211 |
+
n = 0
|
| 212 |
+
while (dup_name := f"{name}_{n + 1}") in name_map.values():
|
| 213 |
+
n += 1
|
| 214 |
+
name_map[orig_name] = dup_name
|
| 215 |
+
else:
|
| 216 |
+
name_map[orig_name] = name
|
| 217 |
+
return name_map[orig_name]
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
def _check_input_constraints_for_graph(
|
| 221 |
+
input_placeholders: List[torch.fx.Node], flat_args_with_path, range_constraints
|
| 222 |
+
):
|
| 223 |
+
def get_keystr(key_path: KeyPath) -> str:
|
| 224 |
+
"""For a given index into the flat_args, return a human readable string
|
| 225 |
+
describing how to access it, e.g. "*args["foo"][0].bar"
|
| 226 |
+
"""
|
| 227 |
+
# Prefix the keypath with "*args" or "**kwargs" to make it clearer where
|
| 228 |
+
# the arguments come from. Ultimately we ought to serialize the
|
| 229 |
+
# original arg names for the best error message here.
|
| 230 |
+
args_kwargs_key_path = key_path[0]
|
| 231 |
+
assert isinstance(args_kwargs_key_path, SequenceKey)
|
| 232 |
+
if args_kwargs_key_path.idx == 0:
|
| 233 |
+
return f"*args{keystr(key_path[1:])}"
|
| 234 |
+
else:
|
| 235 |
+
kwarg_key = key_path[1]
|
| 236 |
+
assert isinstance(kwarg_key, MappingKey)
|
| 237 |
+
name = str(kwarg_key)[1:-1] # get rid of the enclosed []
|
| 238 |
+
return f"{name}{keystr(key_path[2:])}"
|
| 239 |
+
|
| 240 |
+
import sympy
|
| 241 |
+
|
| 242 |
+
from torch._export.passes.add_runtime_assertions_for_constraints_pass import (
|
| 243 |
+
_convert_range_to_int,
|
| 244 |
+
)
|
| 245 |
+
from torch.utils._sympy.solve import try_solve
|
| 246 |
+
|
| 247 |
+
if len(flat_args_with_path) != len(input_placeholders):
|
| 248 |
+
raise RuntimeError(
|
| 249 |
+
"Unexpected number of inputs "
|
| 250 |
+
f"(expected {len(input_placeholders)}, got {len(flat_args_with_path)})"
|
| 251 |
+
)
|
| 252 |
+
# NOTE: export already guarantees that the same symbol is used in metadata
|
| 253 |
+
# for all InputDims related by equality constraints, so we can just unify
|
| 254 |
+
# symbols with given input dimension values to check equality constraints.
|
| 255 |
+
unification_map: Dict[sympy.Symbol, Any] = {}
|
| 256 |
+
for (key_path, arg), node in zip(flat_args_with_path, input_placeholders):
|
| 257 |
+
node_val = node.meta.get("val")
|
| 258 |
+
if isinstance(node_val, FakeTensor):
|
| 259 |
+
if not isinstance(arg, torch.Tensor):
|
| 260 |
+
raise RuntimeError(
|
| 261 |
+
f"Expected input at {get_keystr(key_path)} to be a tensor, but got {type(arg)}",
|
| 262 |
+
)
|
| 263 |
+
|
| 264 |
+
if len(node_val.shape) != len(arg.shape):
|
| 265 |
+
raise RuntimeError(
|
| 266 |
+
f"Unexpected number of dimensions in input at {get_keystr(key_path)}.shape "
|
| 267 |
+
f"(expected {node_val.shape}, got {arg.shape})"
|
| 268 |
+
)
|
| 269 |
+
|
| 270 |
+
for j, (arg_dim, node_dim) in enumerate(zip(arg.shape, node_val.shape)):
|
| 271 |
+
# TODO(avik): Assert the following property in the IR verifier:
|
| 272 |
+
# node_dim is either an int or a SymInt containing an int or a unary sympy.Expr
|
| 273 |
+
if (
|
| 274 |
+
isinstance(node_dim, torch.SymInt)
|
| 275 |
+
and len(node_dim.node.expr.free_symbols) == 1
|
| 276 |
+
):
|
| 277 |
+
symbol = next(iter(node_dim.node.expr.free_symbols))
|
| 278 |
+
if symbol in unification_map:
|
| 279 |
+
existing_dim = node_dim.node.expr.subs(unification_map)
|
| 280 |
+
if arg_dim != existing_dim:
|
| 281 |
+
raise RuntimeError(
|
| 282 |
+
f"Expected input at {get_keystr(key_path)}.shape[{j}] to be equal to "
|
| 283 |
+
f"{existing_dim}, but got {arg_dim}",
|
| 284 |
+
)
|
| 285 |
+
else:
|
| 286 |
+
if (
|
| 287 |
+
isinstance(arg_dim, torch.SymInt)
|
| 288 |
+
and not arg_dim.node.expr.is_number
|
| 289 |
+
):
|
| 290 |
+
# This can happen when, say, arg is a fake tensor.
|
| 291 |
+
# We do not run checks on symbolic shapes of fake inputs as
|
| 292 |
+
# such checks can affect the shape env.
|
| 293 |
+
pass
|
| 294 |
+
else:
|
| 295 |
+
if isinstance(node_dim.node.expr, sympy.Symbol):
|
| 296 |
+
# Short cut for try_solve below. Also useful in cases where
|
| 297 |
+
# sympy.Eq(node_dim.node.expr, arg_dim) would evaluate to False
|
| 298 |
+
# purely because symbol is constrained to be size-like,
|
| 299 |
+
# e.g., when node_dim.node.expr = symbol and arg_dim = 0.
|
| 300 |
+
unification_map[symbol] = int(arg_dim)
|
| 301 |
+
else:
|
| 302 |
+
solution = try_solve(
|
| 303 |
+
sympy.Eq(node_dim.node.expr, arg_dim), symbol
|
| 304 |
+
)
|
| 305 |
+
if solution is None:
|
| 306 |
+
raise RuntimeError( # noqa: B904
|
| 307 |
+
f"Expected input {node.name}.shape[{j}] = {arg_dim} to be "
|
| 308 |
+
f"of the form {node_dim.node.expr}, where {symbol} is an integer"
|
| 309 |
+
)
|
| 310 |
+
else:
|
| 311 |
+
unification_map[symbol] = int(solution[1])
|
| 312 |
+
|
| 313 |
+
if node_dim.node.expr in range_constraints:
|
| 314 |
+
min_val, max_val = _convert_range_to_int(
|
| 315 |
+
range_constraints[node_dim.node.expr]
|
| 316 |
+
)
|
| 317 |
+
# NOTE: we allow dimensions to be 0/1 at runtime
|
| 318 |
+
if min_val > 2:
|
| 319 |
+
if arg_dim < min_val:
|
| 320 |
+
raise RuntimeError(
|
| 321 |
+
f"Expected input at {get_keystr(key_path)}.shape[{j}] to be >= "
|
| 322 |
+
f"{min_val}, but got {arg_dim}",
|
| 323 |
+
)
|
| 324 |
+
if max_val < math.inf:
|
| 325 |
+
if arg_dim > max_val:
|
| 326 |
+
raise RuntimeError(
|
| 327 |
+
f"Expected input at {get_keystr(key_path)}.shape[{j}] to be <= "
|
| 328 |
+
f"{max_val}, but got {arg_dim}",
|
| 329 |
+
)
|
| 330 |
+
else:
|
| 331 |
+
if arg_dim != node_dim:
|
| 332 |
+
if (
|
| 333 |
+
isinstance(node_dim, torch.SymInt)
|
| 334 |
+
and not node_dim.node.expr.is_number
|
| 335 |
+
):
|
| 336 |
+
# this means we deferred a guard from export analysis to runtime, let this pass
|
| 337 |
+
# we'll add a runtime assert checking equality to this replacement expression
|
| 338 |
+
continue
|
| 339 |
+
raise RuntimeError(
|
| 340 |
+
f"Expected input at {get_keystr(key_path)}.shape[{j}] to be equal to "
|
| 341 |
+
f"{node_dim}, but got {arg_dim}",
|
| 342 |
+
)
|
| 343 |
+
elif isinstance(node_val, (int, float, str)):
|
| 344 |
+
if type(arg) != type(node_val) or arg != node_val:
|
| 345 |
+
raise RuntimeError(
|
| 346 |
+
f"Expected input at {get_keystr(key_path)} to be equal to {node_val}, but got {arg}",
|
| 347 |
+
)
|
| 348 |
+
|
| 349 |
+
|
| 350 |
+
def register_dataclass_as_pytree_node(
|
| 351 |
+
cls: Type[Any],
|
| 352 |
+
flatten_fn: Optional[FlattenFunc] = None,
|
| 353 |
+
unflatten_fn: Optional[UnflattenFunc] = None,
|
| 354 |
+
*,
|
| 355 |
+
serialized_type_name: Optional[str] = None,
|
| 356 |
+
to_dumpable_context: Optional[ToDumpableContextFn] = None,
|
| 357 |
+
from_dumpable_context: Optional[FromDumpableContextFn] = None,
|
| 358 |
+
return_none_fields: bool = False,
|
| 359 |
+
) -> None:
|
| 360 |
+
assert dataclasses.is_dataclass(
|
| 361 |
+
cls
|
| 362 |
+
), f"Only dataclasses can be registered with this function: {cls}"
|
| 363 |
+
|
| 364 |
+
def default_flatten_fn(obj: Any) -> Tuple[List[Any], Context]:
|
| 365 |
+
flattened = []
|
| 366 |
+
flat_names = []
|
| 367 |
+
none_names = []
|
| 368 |
+
for f in dataclasses.fields(obj):
|
| 369 |
+
name, val = f.name, getattr(obj, f.name)
|
| 370 |
+
if val is not None or return_none_fields:
|
| 371 |
+
flattened.append(val)
|
| 372 |
+
flat_names.append(name)
|
| 373 |
+
else:
|
| 374 |
+
none_names.append(name)
|
| 375 |
+
return flattened, [flat_names, none_names]
|
| 376 |
+
|
| 377 |
+
def default_unflatten_fn(values: Iterable[Any], context: Context) -> Any:
|
| 378 |
+
flat_names, none_names = context
|
| 379 |
+
return cls(**dict(zip(flat_names, values)), **dict.fromkeys(none_names))
|
| 380 |
+
|
| 381 |
+
def default_flatten_fn_with_keys(obj: Any) -> Tuple[List[Any], Context]:
|
| 382 |
+
flattened, (flat_names, none_names) = flatten_fn(obj) # type: ignore[misc]
|
| 383 |
+
return [(MappingKey(k), v) for k, v in zip(flat_names, flattened)], flat_names
|
| 384 |
+
|
| 385 |
+
flatten_fn = flatten_fn if flatten_fn is not None else default_flatten_fn
|
| 386 |
+
unflatten_fn = unflatten_fn if unflatten_fn is not None else default_unflatten_fn
|
| 387 |
+
|
| 388 |
+
if (to_dumpable_context is None) ^ (from_dumpable_context is None):
|
| 389 |
+
raise ValueError(
|
| 390 |
+
f"Both to_dumpable_context and from_dumpable_context for {cls} must "
|
| 391 |
+
"be None or registered."
|
| 392 |
+
)
|
| 393 |
+
|
| 394 |
+
_register_pytree_node(
|
| 395 |
+
cls,
|
| 396 |
+
flatten_fn,
|
| 397 |
+
unflatten_fn,
|
| 398 |
+
serialized_type_name=serialized_type_name,
|
| 399 |
+
flatten_with_keys_fn=default_flatten_fn_with_keys,
|
| 400 |
+
to_dumpable_context=to_dumpable_context,
|
| 401 |
+
from_dumpable_context=from_dumpable_context,
|
| 402 |
+
)
|
| 403 |
+
|
| 404 |
+
|
| 405 |
+
def is_param(program: "ExportedProgram", node: torch.fx.Node) -> bool:
|
| 406 |
+
"""
|
| 407 |
+
Checks if the given node is a parameter within the exported program
|
| 408 |
+
"""
|
| 409 |
+
|
| 410 |
+
return node.name in program.graph_signature.inputs_to_parameters
|
| 411 |
+
|
| 412 |
+
|
| 413 |
+
def get_param(
|
| 414 |
+
program: "ExportedProgram",
|
| 415 |
+
node: torch.fx.Node,
|
| 416 |
+
) -> Optional[torch.nn.Parameter]:
|
| 417 |
+
"""
|
| 418 |
+
Returns the parameter associated with the given node in the exported program.
|
| 419 |
+
Returns None if the node is not a parameter within the exported program
|
| 420 |
+
"""
|
| 421 |
+
|
| 422 |
+
if is_param(program, node):
|
| 423 |
+
parameter_name = program.graph_signature.inputs_to_parameters[node.name]
|
| 424 |
+
return program.state_dict[parameter_name]
|
| 425 |
+
|
| 426 |
+
return None
|
| 427 |
+
|
| 428 |
+
|
| 429 |
+
def is_buffer(program: "ExportedProgram", node: torch.fx.Node) -> bool:
|
| 430 |
+
"""
|
| 431 |
+
Checks if the given node is a buffer within the exported program
|
| 432 |
+
"""
|
| 433 |
+
|
| 434 |
+
return node.name in program.graph_signature.inputs_to_buffers
|
| 435 |
+
|
| 436 |
+
|
| 437 |
+
def get_buffer(
|
| 438 |
+
program: "ExportedProgram",
|
| 439 |
+
node: torch.fx.Node,
|
| 440 |
+
) -> Optional[torch.Tensor]:
|
| 441 |
+
"""
|
| 442 |
+
Returns the buffer associated with the given node in the exported program.
|
| 443 |
+
Returns None if the node is not a buffer within the exported program
|
| 444 |
+
"""
|
| 445 |
+
|
| 446 |
+
if is_buffer(program, node):
|
| 447 |
+
buffer_name = program.graph_signature.inputs_to_buffers[node.name]
|
| 448 |
+
if buffer_name in program.graph_signature.non_persistent_buffers:
|
| 449 |
+
return program.constants[buffer_name]
|
| 450 |
+
else:
|
| 451 |
+
return program.state_dict[buffer_name]
|
| 452 |
+
|
| 453 |
+
return None
|
| 454 |
+
|
| 455 |
+
|
| 456 |
+
def is_lifted_tensor_constant(
|
| 457 |
+
program: "ExportedProgram",
|
| 458 |
+
node: torch.fx.Node,
|
| 459 |
+
) -> bool:
|
| 460 |
+
"""
|
| 461 |
+
Checks if the given node is a lifted tensor constant within the exported program
|
| 462 |
+
"""
|
| 463 |
+
|
| 464 |
+
return node.name in program.graph_signature.inputs_to_lifted_tensor_constants
|
| 465 |
+
|
| 466 |
+
|
| 467 |
+
def get_lifted_tensor_constant(
|
| 468 |
+
program: "ExportedProgram",
|
| 469 |
+
node: torch.fx.Node,
|
| 470 |
+
) -> Optional[torch.Tensor]:
|
| 471 |
+
"""
|
| 472 |
+
Returns the lifted tensor constant associated with the given node in the exported program.
|
| 473 |
+
Returns None if the node is not a lifted tensor constant within the exported program
|
| 474 |
+
"""
|
| 475 |
+
|
| 476 |
+
if is_lifted_tensor_constant(program, node):
|
| 477 |
+
lifted_tensor_name = program.graph_signature.inputs_to_lifted_tensor_constants[
|
| 478 |
+
node.name
|
| 479 |
+
]
|
| 480 |
+
return program.constants[lifted_tensor_name]
|
| 481 |
+
|
| 482 |
+
return None
|
| 483 |
+
|
| 484 |
+
|
| 485 |
+
def sequential_split(gm: torch.fx.GraphModule, node_call_back) -> torch.fx.GraphModule:
|
| 486 |
+
"""
|
| 487 |
+
sequential_split creates a new graph module that splits the input graph module into multiple submodules
|
| 488 |
+
based on the node_call_back. It doesn't mutate the input graph module. The node_call_back should return
|
| 489 |
+
True if the node is a delimiter. Delimiter will be the first node in the next submodule.
|
| 490 |
+
"""
|
| 491 |
+
from torch.fx.passes.split_module import split_module
|
| 492 |
+
|
| 493 |
+
split_map = {}
|
| 494 |
+
split_id = 0
|
| 495 |
+
for node in gm.graph.nodes:
|
| 496 |
+
if node_call_back(node):
|
| 497 |
+
split_id += 1
|
| 498 |
+
split_map[node] = split_id
|
| 499 |
+
|
| 500 |
+
new_gm = split_module(
|
| 501 |
+
gm,
|
| 502 |
+
gm,
|
| 503 |
+
lambda node: split_map[node],
|
| 504 |
+
keep_original_order=True,
|
| 505 |
+
keep_original_node_name=True,
|
| 506 |
+
)
|
| 507 |
+
# Keep the codegen from original graph module to preserve e.g. pytree info.
|
| 508 |
+
new_gm.graph._codegen = gm.graph._codegen
|
| 509 |
+
new_gm.recompile()
|
| 510 |
+
return new_gm
|
| 511 |
+
|
| 512 |
+
|
| 513 |
+
def nodes_filter(nodes: List[torch.fx.Node], node_call_back) -> List[torch.fx.Node]:
|
| 514 |
+
"""Returns the nodes that match the node_call_back as a list."""
|
| 515 |
+
return [node for node in nodes if node_call_back(node)]
|
| 516 |
+
|
| 517 |
+
|
| 518 |
+
def nodes_first(
|
| 519 |
+
nodes: List[torch.fx.Node], node_call_back=None
|
| 520 |
+
) -> Optional[torch.fx.Node]:
|
| 521 |
+
"""
|
| 522 |
+
Returns the first node that matches the node_call_back. If no node matches, returns None.
|
| 523 |
+
When node_call_back is None, returns the first node in the node list.
|
| 524 |
+
"""
|
| 525 |
+
ret = nodes_filter(nodes, node_call_back if node_call_back else lambda node: True)
|
| 526 |
+
if len(ret) > 0:
|
| 527 |
+
return ret[0]
|
| 528 |
+
return None
|
| 529 |
+
|
| 530 |
+
|
| 531 |
+
def nodes_count(nodes: List[torch.fx.Node], node_call_back) -> int:
|
| 532 |
+
"""Returns the number of nodes that match the node_call_back."""
|
| 533 |
+
return len(nodes_filter(nodes, node_call_back))
|
| 534 |
+
|
| 535 |
+
|
| 536 |
+
def nodes_map(nodes: List[torch.fx.Node], node_call_back) -> List[torch.fx.Node]:
|
| 537 |
+
"""
|
| 538 |
+
Sequentially visit the nodes list and invoke node_call_back on each element.
|
| 539 |
+
Returns the nodes list after the node_call_back is invoked on each element.
|
| 540 |
+
"""
|
| 541 |
+
for node in nodes:
|
| 542 |
+
node_call_back(node)
|
| 543 |
+
return nodes
|
| 544 |
+
|
| 545 |
+
|
| 546 |
+
def node_replace_(old_node: torch.fx.Node, new_node: torch.fx.Node) -> None:
|
| 547 |
+
"""
|
| 548 |
+
Replace all uses of old_node with new_node.
|
| 549 |
+
"""
|
| 550 |
+
old_node.replace_all_uses_with(new_node)
|
| 551 |
+
old_node.users.clear()
|
| 552 |
+
old_node.graph.erase_node(old_node)
|
| 553 |
+
|
| 554 |
+
|
| 555 |
+
def node_inline_(call_mod_node: torch.fx.Node) -> None:
|
| 556 |
+
"""
|
| 557 |
+
Inline the submodule of the given node into the parent module.
|
| 558 |
+
Note: we only support the case where submodule takes tensors inputs.
|
| 559 |
+
"""
|
| 560 |
+
assert call_mod_node.op == "call_module"
|
| 561 |
+
gm = call_mod_node.graph.owning_module
|
| 562 |
+
|
| 563 |
+
assert isinstance(call_mod_node.target, str)
|
| 564 |
+
sub_gm = getattr(gm, call_mod_node.target)
|
| 565 |
+
|
| 566 |
+
phs = (node for node in sub_gm.graph.nodes if node.op == "placeholder")
|
| 567 |
+
body = (
|
| 568 |
+
node for node in sub_gm.graph.nodes if node.op not in ("placeholder", "output")
|
| 569 |
+
)
|
| 570 |
+
output = [node for node in sub_gm.graph.nodes if node.op == "output"]
|
| 571 |
+
|
| 572 |
+
for ph, arg in zip(phs, call_mod_node.args):
|
| 573 |
+
assert isinstance(arg, torch.fx.Node)
|
| 574 |
+
node_replace_(ph, arg)
|
| 575 |
+
|
| 576 |
+
with gm.graph.inserting_before(call_mod_node):
|
| 577 |
+
for node in body:
|
| 578 |
+
new_node = gm.graph.node_copy(node)
|
| 579 |
+
node_replace_(node, new_node)
|
| 580 |
+
|
| 581 |
+
if len(output) > 0:
|
| 582 |
+
assert len(output) == 1 and len(output[0].args) == 1
|
| 583 |
+
new_output = output[0].args[0]
|
| 584 |
+
|
| 585 |
+
if isinstance(new_output, torch.fx.Node):
|
| 586 |
+
# Clear the users of the output node and set
|
| 587 |
+
# the users to be the users of original call_module node.
|
| 588 |
+
new_output.users.clear()
|
| 589 |
+
node_replace_(call_mod_node, new_output)
|
| 590 |
+
elif isinstance(new_output, (list, tuple)):
|
| 591 |
+
# Pop subgraph output node from users.
|
| 592 |
+
for node in new_output:
|
| 593 |
+
node.users.pop(output[0])
|
| 594 |
+
|
| 595 |
+
# Inline the get_item calls for the output node.
|
| 596 |
+
get_item_users = nodes_filter(
|
| 597 |
+
list(call_mod_node.users.keys()),
|
| 598 |
+
lambda node: node.op == "call_function"
|
| 599 |
+
and node.target == operator.getitem,
|
| 600 |
+
)
|
| 601 |
+
# get_item_node.args[1] is the idx referring to new_output[idx]
|
| 602 |
+
nodes_map(
|
| 603 |
+
get_item_users,
|
| 604 |
+
lambda get_item_node: node_replace_(
|
| 605 |
+
get_item_node,
|
| 606 |
+
new_output[get_item_node.args[1]],
|
| 607 |
+
),
|
| 608 |
+
)
|
| 609 |
+
call_mod_node.graph.erase_node(call_mod_node)
|
| 610 |
+
else:
|
| 611 |
+
raise NotImplementedError(
|
| 612 |
+
f"Unsupported output type {type(new_output)}. Expect it to be a Node or a list/tuple of Nodes."
|
| 613 |
+
)
|
| 614 |
+
else:
|
| 615 |
+
call_mod_node.graph.erase_node(call_mod_node)
|
| 616 |
+
|
| 617 |
+
gm.delete_all_unused_submodules()
|
| 618 |
+
gm.recompile()
|
| 619 |
+
return gm
|
| 620 |
+
|
| 621 |
+
|
| 622 |
+
def _get_torch_jit_trace_forward_signature(mod: torch.nn.Module):
|
| 623 |
+
"""
|
| 624 |
+
Get source code and parse argument names using AST. The function returns
|
| 625 |
+
a signature of the forward() function.
|
| 626 |
+
|
| 627 |
+
# TODO: Directly provide inspect.signature compatible TS-d module.
|
| 628 |
+
"""
|
| 629 |
+
ast_mod = ast.parse(mod.code)
|
| 630 |
+
ast_func_def: ast.FunctionDef = ast_mod.body[0] # type: ignore[assignment]
|
| 631 |
+
|
| 632 |
+
# FIXME(jiashenc): TorchScript should only allow positional or keywords arguments.
|
| 633 |
+
arg_type_map = {"args": Parameter.POSITIONAL_OR_KEYWORD}
|
| 634 |
+
|
| 635 |
+
# Traverse all argument types in AST tree and create associated parameters.
|
| 636 |
+
param_list = []
|
| 637 |
+
for arg_type, param_type in arg_type_map.items():
|
| 638 |
+
arg_name_list = [a.arg for a in getattr(ast_func_def.args, arg_type)]
|
| 639 |
+
for arg_name in arg_name_list:
|
| 640 |
+
if arg_name == "self":
|
| 641 |
+
continue # Skip self argument.
|
| 642 |
+
param_list.append(inspect.Parameter(arg_name, param_type))
|
| 643 |
+
|
| 644 |
+
return inspect.Signature(parameters=param_list)
|
| 645 |
+
|
| 646 |
+
|
| 647 |
+
def _bind_signature_to_inputs(mod, fake_args, fake_kwargs):
|
| 648 |
+
if isinstance(mod, (torch.jit.ScriptModule, torch.jit.TracedModule)):
|
| 649 |
+
sig = _get_torch_jit_trace_forward_signature(mod)
|
| 650 |
+
|
| 651 |
+
# Sanity check for placeholder names coming from TorchScript.
|
| 652 |
+
assert len(sig.parameters) == len(fake_args) + len(fake_kwargs), (
|
| 653 |
+
"Arguments other than POSITIONAL_OR_KEYWORD kinds in forward() "
|
| 654 |
+
"are not supported in _get_torch_jit_trace_forward_signature"
|
| 655 |
+
)
|
| 656 |
+
else:
|
| 657 |
+
sig = inspect.signature(mod.forward)
|
| 658 |
+
|
| 659 |
+
return sig.bind(*fake_args, **fake_kwargs).arguments
|
| 660 |
+
|
| 661 |
+
|
| 662 |
+
def _name_hoo_subgraph_placeholders(gm: torch.fx.GraphModule) -> None:
|
| 663 |
+
"""
|
| 664 |
+
Propagate placeholder names from the top-level graph into HigherOrderOp subgraphs,
|
| 665 |
+
and handle collisions with non-placeholders by count suffixing.
|
| 666 |
+
Different HOO subgraph types have different input schemas, so we first enumerate them
|
| 667 |
+
and gather the top-level named placeholder nodes.
|
| 668 |
+
"""
|
| 669 |
+
# gather all HOO subgraphs and their top-level named placeholder nodes
|
| 670 |
+
subgraph_ph_tuples: List[Tuple[torch.fx.GraphModule, List[torch.fx.Node]]] = []
|
| 671 |
+
for node in gm.graph.nodes:
|
| 672 |
+
if node.op == "call_function" and isinstance(
|
| 673 |
+
node.target, torch._ops.HigherOrderOperator
|
| 674 |
+
):
|
| 675 |
+
# HOO subgraphs have varying input schemas, so we enumerate them there
|
| 676 |
+
if node.target._name == "cond":
|
| 677 |
+
_, true_graph, false_graph, cond_args = node._args
|
| 678 |
+
subgraph_ph_tuples.append((getattr(gm, true_graph.target), cond_args))
|
| 679 |
+
subgraph_ph_tuples.append((getattr(gm, false_graph.target), cond_args))
|
| 680 |
+
elif node.target._name == "wrap_with_set_grad_enabled":
|
| 681 |
+
subgraph, phs = node._args[1], node._args[2:]
|
| 682 |
+
subgraph_ph_tuples.append((getattr(gm, subgraph.target), phs))
|
| 683 |
+
elif node.target._name == "map_impl":
|
| 684 |
+
body_graph, array, args = node._args
|
| 685 |
+
subgraph_ph_tuples.append(
|
| 686 |
+
(getattr(gm, body_graph.target), array + args)
|
| 687 |
+
)
|
| 688 |
+
|
| 689 |
+
# propagate names
|
| 690 |
+
for subgraph, hoo_phs in subgraph_ph_tuples:
|
| 691 |
+
name_map: Dict[str, str] = {}
|
| 692 |
+
for i, node in enumerate(subgraph.graph.nodes):
|
| 693 |
+
if i < len(hoo_phs): # placeholder, retain name
|
| 694 |
+
name_map[node.name] = hoo_phs[i].name
|
| 695 |
+
node.name = node.target = hoo_phs[i].name
|
| 696 |
+
else: # non-placeholder, check for collisions
|
| 697 |
+
node.name = _rename_without_collisions(name_map, node.name, node.name)
|
| 698 |
+
|
| 699 |
+
# recurse and recompile
|
| 700 |
+
_name_hoo_subgraph_placeholders(subgraph)
|
| 701 |
+
subgraph.recompile()
|
| 702 |
+
|
| 703 |
+
|
| 704 |
+
def placeholder_naming_pass(
|
| 705 |
+
gm: torch.fx.GraphModule,
|
| 706 |
+
export_graph_signature: "ExportGraphSignature",
|
| 707 |
+
mod: torch.nn.Module,
|
| 708 |
+
fake_args,
|
| 709 |
+
fake_kwargs,
|
| 710 |
+
fake_params_buffers,
|
| 711 |
+
constants: Dict[str, Any],
|
| 712 |
+
) -> None:
|
| 713 |
+
"""
|
| 714 |
+
This pass is run at the end of _export_non_strict() to assign better placeholder node names:
|
| 715 |
+
- User inputs:
|
| 716 |
+
These follow the signature of mod.forward(), e.g. forward(x, y) produces nodes x, y.
|
| 717 |
+
For nested inputs from dictionaries, lists, tuples, or dataclasses,
|
| 718 |
+
the names are a concatenation of the path to the tensor.
|
| 719 |
+
e.g. x = {
|
| 720 |
+
'a': torch.randn(),
|
| 721 |
+
'b': [torch.randn(), torch.randn()]
|
| 722 |
+
}
|
| 723 |
+
produces nodes x_a, x_b_0, x_b_1.
|
| 724 |
+
- Parameters/buffers/constants/custom objects:
|
| 725 |
+
These follow the FQN of the object, prefixed by "p", "b", "c", "obj" respectively.
|
| 726 |
+
e.g. self.bar.l0.weight produces "p_bar_l0_weight".
|
| 727 |
+
- Effect tokens:
|
| 728 |
+
These are named token, token_1, ...
|
| 729 |
+
"""
|
| 730 |
+
|
| 731 |
+
def _strip_name(x):
|
| 732 |
+
if x.startswith("L__self___"):
|
| 733 |
+
x = x[len("L__self___") :]
|
| 734 |
+
elif x.startswith("self_"):
|
| 735 |
+
x = x[len("self_") :]
|
| 736 |
+
x = re.sub(r"[^a-zA-Z0-9]", "_", x)
|
| 737 |
+
return x
|
| 738 |
+
|
| 739 |
+
def _extract_pytree_key(x):
|
| 740 |
+
if isinstance(x, MappingKey):
|
| 741 |
+
x = re.sub(r"[^a-zA-Z0-9]", "_", str(x.key))
|
| 742 |
+
return x
|
| 743 |
+
elif isinstance(x, SequenceKey):
|
| 744 |
+
return str(x.idx)
|
| 745 |
+
elif isinstance(x, GetAttrKey):
|
| 746 |
+
return x.name
|
| 747 |
+
else:
|
| 748 |
+
raise RuntimeError(f"Pytree key of type {type(x)} not handled for {x}")
|
| 749 |
+
|
| 750 |
+
name_map: Dict[str, str] = {}
|
| 751 |
+
|
| 752 |
+
# map user input names with mod.forward() signature
|
| 753 |
+
combined_args = _bind_signature_to_inputs(mod, fake_args, fake_kwargs)
|
| 754 |
+
|
| 755 |
+
flat_args_with_path, _ = tree_flatten_with_path(combined_args)
|
| 756 |
+
user_input_names = [
|
| 757 |
+
spec.arg.name
|
| 758 |
+
for spec in export_graph_signature.input_specs
|
| 759 |
+
if spec.kind == InputKind.USER_INPUT
|
| 760 |
+
]
|
| 761 |
+
|
| 762 |
+
# use pytree path to name nested user inputs
|
| 763 |
+
for (arg_path, arg), user_input_name in zip(flat_args_with_path, user_input_names):
|
| 764 |
+
if user_input_name:
|
| 765 |
+
_rename_without_collisions(
|
| 766 |
+
name_map,
|
| 767 |
+
user_input_name,
|
| 768 |
+
placeholder_prefixes[InputKind.USER_INPUT]
|
| 769 |
+
+ "_".join(_extract_pytree_key(x).lower() for x in arg_path),
|
| 770 |
+
is_placeholder=True,
|
| 771 |
+
)
|
| 772 |
+
|
| 773 |
+
# use graph signature input specs to map param/buffer/constant names
|
| 774 |
+
# name effect tokens as token, token_1, ... (these aren't visible to user)
|
| 775 |
+
for spec in export_graph_signature.input_specs:
|
| 776 |
+
if spec.kind == InputKind.USER_INPUT:
|
| 777 |
+
continue
|
| 778 |
+
if spec.kind == InputKind.TOKEN:
|
| 779 |
+
base_name = ""
|
| 780 |
+
else:
|
| 781 |
+
base_name = _strip_name(spec.target).lower()
|
| 782 |
+
base_name = re.sub(r"[^a-zA-Z0-9]", "_", base_name)
|
| 783 |
+
|
| 784 |
+
_rename_without_collisions(
|
| 785 |
+
name_map,
|
| 786 |
+
spec.arg.name,
|
| 787 |
+
placeholder_prefixes[spec.kind] + base_name,
|
| 788 |
+
is_placeholder=True,
|
| 789 |
+
)
|
| 790 |
+
|
| 791 |
+
# handle naming collisions with call_function/get_attr inputs.
|
| 792 |
+
# here, we want to prioritize user input names over call_function names
|
| 793 |
+
# e.g. not have forward(self, mul): lead to a placeholder node called mul_13,
|
| 794 |
+
# so we increment the suffix of call_function nodes as needed
|
| 795 |
+
for node in gm.graph.nodes:
|
| 796 |
+
if node.op == "placeholder":
|
| 797 |
+
continue
|
| 798 |
+
_rename_without_collisions(name_map, node.name, node.name)
|
| 799 |
+
|
| 800 |
+
# assign new node names
|
| 801 |
+
for node in gm.graph.nodes:
|
| 802 |
+
if node.op == "placeholder":
|
| 803 |
+
assert node.name in name_map
|
| 804 |
+
node.name = node.target = name_map[node.name]
|
| 805 |
+
elif node.name in name_map:
|
| 806 |
+
node.name = name_map[node.name]
|
| 807 |
+
|
| 808 |
+
# propagate names to higher order op subgraphs
|
| 809 |
+
_name_hoo_subgraph_placeholders(gm)
|
| 810 |
+
|
| 811 |
+
# re-generate graph module code
|
| 812 |
+
gm.recompile()
|
| 813 |
+
|
| 814 |
+
# modify graph signature (input specs, output specs, user input mutations)
|
| 815 |
+
for spec in export_graph_signature.input_specs:
|
| 816 |
+
assert spec.arg.name in name_map
|
| 817 |
+
spec.arg.name = name_map[spec.arg.name]
|
| 818 |
+
if ( # handle targets for custom objects
|
| 819 |
+
spec.kind == InputKind.CUSTOM_OBJ and spec.target in name_map
|
| 820 |
+
):
|
| 821 |
+
spec.target = name_map[spec.target][4:] # strip obj_ prefix
|
| 822 |
+
|
| 823 |
+
for spec in export_graph_signature.output_specs:
|
| 824 |
+
if spec.arg.name in name_map:
|
| 825 |
+
spec.arg.name = name_map[spec.arg.name]
|
| 826 |
+
if spec.kind == OutputKind.USER_INPUT_MUTATION and spec.target in name_map:
|
| 827 |
+
spec.target = name_map[spec.target]
|
| 828 |
+
|
| 829 |
+
# rename keys in constants dict for custom objects
|
| 830 |
+
for name in list(constants.keys()):
|
| 831 |
+
constant = constants[name]
|
| 832 |
+
if name in name_map and not isinstance(
|
| 833 |
+
constant, torch.Tensor
|
| 834 |
+
): # rename custom objects with generic names
|
| 835 |
+
new_name = name_map[name]
|
| 836 |
+
if (
|
| 837 |
+
new_name != name
|
| 838 |
+
and re.match(r"arg(\d+)_1", name)
|
| 839 |
+
and new_name != placeholder_prefixes[InputKind.CUSTOM_OBJ] + name
|
| 840 |
+
):
|
| 841 |
+
constants[new_name] = constant
|
| 842 |
+
del constants[name]
|
| 843 |
+
|
| 844 |
+
|
| 845 |
+
def remove_proxy_from_state_dict(state_dict: Dict, in_place: bool) -> Dict:
|
| 846 |
+
"""
|
| 847 |
+
If `in_place` is false, return a new copy of `state_dict` with "proxy" removed from `v.__dict__`.
|
| 848 |
+
`v` is the values in the dictionary.
|
| 849 |
+
If `in_place` is true, modify `state_dict` in place.
|
| 850 |
+
"""
|
| 851 |
+
if in_place:
|
| 852 |
+
for k, v in state_dict.items():
|
| 853 |
+
if hasattr(v, "proxy"):
|
| 854 |
+
delattr(state_dict[k], "proxy")
|
| 855 |
+
return state_dict
|
| 856 |
+
else:
|
| 857 |
+
new_state_dict = {}
|
| 858 |
+
for k, v in state_dict.items():
|
| 859 |
+
if hasattr(v, "proxy"):
|
| 860 |
+
new_state_dict[k] = v.clone().detach()
|
| 861 |
+
else:
|
| 862 |
+
new_state_dict[k] = v
|
| 863 |
+
return new_state_dict
|
| 864 |
+
|
| 865 |
+
|
| 866 |
+
def _detect_fake_mode_from_gm(
|
| 867 |
+
gm: torch.fx.GraphModule,
|
| 868 |
+
) -> torch._subclasses.fake_tensor.FakeTensorMode:
|
| 869 |
+
"""
|
| 870 |
+
For a given graph module, we look at the "val" of placeholder nodes to find the fake inputs.
|
| 871 |
+
Additionally, if gm doesn't have placeholders, we further look at the "example_value" or "val" of other nodes.
|
| 872 |
+
If no fake mode is found, we return None for fake_mode.
|
| 873 |
+
"""
|
| 874 |
+
|
| 875 |
+
fake_inps: List[torch.Tensor] = []
|
| 876 |
+
fake_vals: List[torch.Tensor] = []
|
| 877 |
+
for node in gm.graph.nodes:
|
| 878 |
+
if node.op == "placeholder" and "val" in node.meta:
|
| 879 |
+
fake_val = node.meta["val"]
|
| 880 |
+
if fake_val is not None and isinstance(fake_val, torch.Tensor):
|
| 881 |
+
fake_inps.append(fake_val)
|
| 882 |
+
elif len(fake_inps) == 0 and (
|
| 883 |
+
"example_value" in node.meta or "val" in node.meta
|
| 884 |
+
):
|
| 885 |
+
fake_val = None
|
| 886 |
+
if "example_value" in node.meta:
|
| 887 |
+
fake_val = node.meta["example_value"]
|
| 888 |
+
elif "val" in node.meta:
|
| 889 |
+
fake_val = node.meta["val"]
|
| 890 |
+
if fake_val is not None and isinstance(fake_val, torch.Tensor):
|
| 891 |
+
fake_vals.append(fake_val)
|
| 892 |
+
|
| 893 |
+
return detect_fake_mode(fake_inps + fake_vals)
|
janus/lib/python3.10/site-packages/torch/_export/verifier.py
ADDED
|
@@ -0,0 +1,456 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import inspect
|
| 3 |
+
import math
|
| 4 |
+
import operator
|
| 5 |
+
from collections.abc import Iterable
|
| 6 |
+
from typing import Any, Dict, final, List, Tuple, Type, TYPE_CHECKING
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
from torch._ops import HigherOrderOperator, OpOverload
|
| 10 |
+
from torch._subclasses.fake_tensor import FakeTensor
|
| 11 |
+
from torch.export.graph_signature import (
|
| 12 |
+
CustomObjArgument,
|
| 13 |
+
InputKind,
|
| 14 |
+
SymIntArgument,
|
| 15 |
+
TensorArgument,
|
| 16 |
+
TokenArgument,
|
| 17 |
+
)
|
| 18 |
+
from torch.fx import GraphModule
|
| 19 |
+
|
| 20 |
+
if TYPE_CHECKING:
|
| 21 |
+
from torch.export.exported_program import ExportedProgram
|
| 22 |
+
|
| 23 |
+
class SpecViolationError(Exception):
|
| 24 |
+
pass
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def is_functional(op: OpOverload) -> bool:
|
| 28 |
+
return not op._schema.is_mutable
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def _check_has_fake_tensor(node: torch.fx.Node) -> None:
|
| 32 |
+
# TODO(angelayi): remove this in favor of _check_val
|
| 33 |
+
return _check_val(node)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def _check_val(node: torch.fx.Node) -> None:
|
| 37 |
+
from torch.fx.experimental.symbolic_shapes import SymBool, SymFloat, SymInt
|
| 38 |
+
|
| 39 |
+
def _check_correct_val(val):
|
| 40 |
+
if val is None:
|
| 41 |
+
return True
|
| 42 |
+
elif isinstance(val, (int, bool, str, float)):
|
| 43 |
+
return True
|
| 44 |
+
elif isinstance(val, (torch.memory_format, torch.dtype, torch.device, torch.layout)):
|
| 45 |
+
return True
|
| 46 |
+
elif isinstance(val, (FakeTensor, torch.Tensor)): # TODO(zhxchen17) Remove Tensor.
|
| 47 |
+
return True
|
| 48 |
+
elif isinstance(val, (SymInt, SymFloat, SymBool)):
|
| 49 |
+
return True
|
| 50 |
+
elif isinstance(val, CustomObjArgument):
|
| 51 |
+
return True
|
| 52 |
+
elif isinstance(val, Iterable):
|
| 53 |
+
return all(_check_correct_val(x) for x in val)
|
| 54 |
+
return False
|
| 55 |
+
|
| 56 |
+
def _no_returns(op):
|
| 57 |
+
if not isinstance(op, OpOverload):
|
| 58 |
+
return False
|
| 59 |
+
return len(op._schema.returns) == 0
|
| 60 |
+
|
| 61 |
+
if "val" not in node.meta:
|
| 62 |
+
if node.op == "call_function" and _no_returns(node.target):
|
| 63 |
+
return
|
| 64 |
+
raise SpecViolationError(f"Node.meta {node.name} is missing val field.")
|
| 65 |
+
|
| 66 |
+
val = node.meta["val"]
|
| 67 |
+
if not _check_correct_val(val):
|
| 68 |
+
raise SpecViolationError(f"Node.meta {node.name} has invalid val field {val}")
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def _check_torch_fn(node: torch.fx.Node) -> None:
|
| 72 |
+
torch_fn = node.meta.get("torch_fn")
|
| 73 |
+
if torch_fn is None:
|
| 74 |
+
raise SpecViolationError(f"Unable to find torch_fn metadata for node {node.name}")
|
| 75 |
+
if (
|
| 76 |
+
not isinstance(torch_fn, tuple) and
|
| 77 |
+
isinstance(torch_fn[0], str) and
|
| 78 |
+
isinstance(torch_fn[1], str)
|
| 79 |
+
):
|
| 80 |
+
raise SpecViolationError(f"Node.meta {node.name} has invalid torch_fn field {torch_fn}")
|
| 81 |
+
|
| 82 |
+
class _VerifierMeta(type):
|
| 83 |
+
_registry: Dict[str, Type['Verifier']] = {}
|
| 84 |
+
|
| 85 |
+
def __new__(metacls, name, bases, attrs):
|
| 86 |
+
if bases:
|
| 87 |
+
if "check" in attrs or "_check_graph_module" in attrs:
|
| 88 |
+
raise SyntaxError("Overriding method check is not allowed.")
|
| 89 |
+
assert "dialect" in attrs and attrs["dialect"] != "ATEN"
|
| 90 |
+
else:
|
| 91 |
+
assert "check" in attrs
|
| 92 |
+
assert "_check_graph_module" in attrs
|
| 93 |
+
assert attrs["dialect"] == "ATEN"
|
| 94 |
+
|
| 95 |
+
assert isinstance(attrs["dialect"], str)
|
| 96 |
+
ret = type.__new__(metacls, name, bases, attrs)
|
| 97 |
+
metacls._registry[attrs["dialect"]] = ret # type: ignore[assignment]
|
| 98 |
+
return ret
|
| 99 |
+
|
| 100 |
+
def getattr_recursive(obj: Any, target: str) -> Any:
|
| 101 |
+
target_atoms = target.split('.')
|
| 102 |
+
attr_itr = obj
|
| 103 |
+
for i, atom in enumerate(target_atoms):
|
| 104 |
+
if not hasattr(attr_itr, atom):
|
| 105 |
+
raise RuntimeError(f"Node referenced nonexistent target {'.'.join(target_atoms[:i])}")
|
| 106 |
+
attr_itr = getattr(attr_itr, atom)
|
| 107 |
+
return attr_itr
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
class Verifier(metaclass=_VerifierMeta):
|
| 111 |
+
dialect = "ATEN"
|
| 112 |
+
|
| 113 |
+
def allowed_builtin_ops(self) -> List:
|
| 114 |
+
return [
|
| 115 |
+
operator.getitem,
|
| 116 |
+
operator.add,
|
| 117 |
+
operator.mul,
|
| 118 |
+
operator.sub,
|
| 119 |
+
operator.truediv,
|
| 120 |
+
operator.ge,
|
| 121 |
+
operator.le,
|
| 122 |
+
operator.gt,
|
| 123 |
+
operator.lt,
|
| 124 |
+
operator.eq,
|
| 125 |
+
operator.ne,
|
| 126 |
+
operator.floordiv,
|
| 127 |
+
operator.mod,
|
| 128 |
+
operator.and_,
|
| 129 |
+
operator.or_,
|
| 130 |
+
operator.not_,
|
| 131 |
+
operator.pow,
|
| 132 |
+
operator.neg,
|
| 133 |
+
operator.abs,
|
| 134 |
+
math.ceil,
|
| 135 |
+
math.floor,
|
| 136 |
+
math.trunc,
|
| 137 |
+
]
|
| 138 |
+
|
| 139 |
+
def allowed_op_types(self) -> Tuple[Type[Any], ...]:
|
| 140 |
+
return (OpOverload, HigherOrderOperator)
|
| 141 |
+
|
| 142 |
+
def allowed_getattr_types(self) -> Tuple[Type[Any], ...]:
|
| 143 |
+
return (torch.fx.GraphModule,)
|
| 144 |
+
|
| 145 |
+
def check_valid_op(self, op):
|
| 146 |
+
pass
|
| 147 |
+
|
| 148 |
+
def check_additional(self, gm: GraphModule) -> None:
|
| 149 |
+
"""
|
| 150 |
+
Additional checks that are specific to some dialects.
|
| 151 |
+
"""
|
| 152 |
+
|
| 153 |
+
@final
|
| 154 |
+
def check(self, ep: "ExportedProgram") -> None:
|
| 155 |
+
self._check_graph_module(ep.graph_module)
|
| 156 |
+
_verify_exported_program_module_call_graph(ep)
|
| 157 |
+
_verify_exported_program_signature(ep)
|
| 158 |
+
|
| 159 |
+
@final
|
| 160 |
+
def _check_graph_module(self, gm: torch.fx.GraphModule) -> None:
|
| 161 |
+
def _allowed_getattr_types() -> Tuple[Type[Any], ...]:
|
| 162 |
+
ret = self.allowed_getattr_types()
|
| 163 |
+
assert not any(t is object for t in ret)
|
| 164 |
+
return ret
|
| 165 |
+
|
| 166 |
+
def _check_valid_op(op) -> None:
|
| 167 |
+
def _allowed_builtin_ops() -> List:
|
| 168 |
+
ret = self.allowed_builtin_ops()
|
| 169 |
+
assert all(inspect.isbuiltin(op) for op in ret)
|
| 170 |
+
return ret
|
| 171 |
+
|
| 172 |
+
def _allowed_op_types() -> Tuple[Type[Any], ...]:
|
| 173 |
+
ret = self.allowed_op_types()
|
| 174 |
+
assert not any(t is object for t in ret)
|
| 175 |
+
return ret
|
| 176 |
+
|
| 177 |
+
# TODO Remove this allowlist.
|
| 178 |
+
_allowed_torch_functions = (
|
| 179 |
+
torch.autograd.grad_mode.set_grad_enabled,
|
| 180 |
+
torch.sym_int,
|
| 181 |
+
torch.sym_float,
|
| 182 |
+
torch.sym_ite,
|
| 183 |
+
torch.sym_max,
|
| 184 |
+
torch.sym_min,
|
| 185 |
+
torch.sym_not,
|
| 186 |
+
torch.sym_sqrt,
|
| 187 |
+
# TODO (tmanlaibaatar)
|
| 188 |
+
# Predispatch export is able to contain autograd ops.
|
| 189 |
+
# These will be modeled as HOO later
|
| 190 |
+
torch._C._set_grad_enabled,
|
| 191 |
+
)
|
| 192 |
+
|
| 193 |
+
if not isinstance(op, _allowed_op_types()):
|
| 194 |
+
if op not in _allowed_builtin_ops() and op not in _allowed_torch_functions:
|
| 195 |
+
raise SpecViolationError(
|
| 196 |
+
f"Operator '{op}' is not an allowed operator type: {_allowed_op_types()}\n"
|
| 197 |
+
f"Valid builtin ops: {_allowed_builtin_ops()}"
|
| 198 |
+
f"Valid torch functions: {_allowed_torch_functions}"
|
| 199 |
+
)
|
| 200 |
+
|
| 201 |
+
if isinstance(op, OpOverload):
|
| 202 |
+
# All ops functional
|
| 203 |
+
# TODO (tmanlaibaatar) more proper way is needed here
|
| 204 |
+
if self.dialect != "TRAINING" and not is_functional(op):
|
| 205 |
+
raise SpecViolationError(
|
| 206 |
+
f"operator '{op}' is not functional"
|
| 207 |
+
)
|
| 208 |
+
self.check_valid_op(op)
|
| 209 |
+
|
| 210 |
+
for mod in gm.modules():
|
| 211 |
+
if not isinstance(mod, torch.fx.GraphModule):
|
| 212 |
+
continue
|
| 213 |
+
|
| 214 |
+
mod.graph.lint()
|
| 215 |
+
for node in mod.graph.nodes:
|
| 216 |
+
# TODO(T140410192): should have fake tensor for all dialects
|
| 217 |
+
if node.op in {"call_module", "call_method"}:
|
| 218 |
+
raise SpecViolationError(
|
| 219 |
+
f"call_module is not valid: got a class '{node.target}' ",
|
| 220 |
+
)
|
| 221 |
+
|
| 222 |
+
elif node.op == "call_function":
|
| 223 |
+
_check_val(node)
|
| 224 |
+
|
| 225 |
+
_check_valid_op(node.target)
|
| 226 |
+
|
| 227 |
+
elif node.op == "get_attr":
|
| 228 |
+
if not isinstance(node.target, str):
|
| 229 |
+
raise SpecViolationError(
|
| 230 |
+
f"Expected get_attr target to be string, but got {type(node.target)}"
|
| 231 |
+
)
|
| 232 |
+
|
| 233 |
+
attr = getattr_recursive(mod, node.target)
|
| 234 |
+
if isinstance(attr, torch.nn.Module):
|
| 235 |
+
def _is_type(name, ty):
|
| 236 |
+
return isinstance(getattr(attr, name, None), ty)
|
| 237 |
+
if type(attr).__name__ == "LoweredBackendModule":
|
| 238 |
+
if _is_type("backend_id", str) \
|
| 239 |
+
and _is_type("processed_bytes", bytes) \
|
| 240 |
+
and _is_type("compile_specs", list) \
|
| 241 |
+
and hasattr(attr, "original_module"):
|
| 242 |
+
continue
|
| 243 |
+
else:
|
| 244 |
+
backend_id = getattr(attr, "backend_id", None)
|
| 245 |
+
processed_bytes = getattr(attr, "processed_bytes", None)
|
| 246 |
+
compile_specs = getattr(attr, "compile_specs", None)
|
| 247 |
+
raise SpecViolationError(
|
| 248 |
+
f"Invalid get_attr type {type(attr)}. \n"
|
| 249 |
+
f"LoweredBackendModule fields: "
|
| 250 |
+
f"backend_id(str) : {type(backend_id)}, "
|
| 251 |
+
f"processed_bytes(bytes) : {type(processed_bytes)}, "
|
| 252 |
+
f"compile_specs(list) : {type(compile_specs)}"
|
| 253 |
+
)
|
| 254 |
+
|
| 255 |
+
if not isinstance(attr, _allowed_getattr_types()):
|
| 256 |
+
raise SpecViolationError(
|
| 257 |
+
f"Invalid get_attr type {type(attr)}. \n"
|
| 258 |
+
f"Valid get_attr types: {_allowed_getattr_types()}"
|
| 259 |
+
)
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
elif node.op == "placeholder":
|
| 263 |
+
_check_val(node)
|
| 264 |
+
# TODO(zhxchen17)
|
| 265 |
+
# elif node.op == "output":
|
| 266 |
+
# _check_flattened_outputs()
|
| 267 |
+
|
| 268 |
+
self.check_additional(gm)
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
class TrainingIRVerifier(Verifier):
|
| 272 |
+
dialect = "TRAINING"
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
def _verify_exported_program_module_call_graph(exported_program) -> None:
|
| 276 |
+
module_call_graph = exported_program.module_call_graph
|
| 277 |
+
nodes = {
|
| 278 |
+
node.name for node in exported_program.graph.nodes
|
| 279 |
+
}
|
| 280 |
+
for entry in module_call_graph:
|
| 281 |
+
if entry.signature is not None:
|
| 282 |
+
for arg in entry.signature.inputs:
|
| 283 |
+
if arg.name and arg.name not in nodes:
|
| 284 |
+
raise SpecViolationError(
|
| 285 |
+
f"Input {arg.name} does not exist in the graph."
|
| 286 |
+
)
|
| 287 |
+
for arg in entry.signature.outputs:
|
| 288 |
+
if arg.name and arg.name not in nodes:
|
| 289 |
+
raise SpecViolationError(
|
| 290 |
+
f"Output {arg.name} does not exist in the graph."
|
| 291 |
+
)
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
def _verify_exported_program_signature(exported_program) -> None:
|
| 295 |
+
# Check ExportedProgram signature matches
|
| 296 |
+
gs = exported_program.graph_signature
|
| 297 |
+
|
| 298 |
+
# Check every node in the signature exists in the graph
|
| 299 |
+
input_node_names = [node.name for node in exported_program.graph.nodes if node.op == "placeholder"]
|
| 300 |
+
|
| 301 |
+
if len(input_node_names) != len(gs.input_specs):
|
| 302 |
+
raise SpecViolationError(
|
| 303 |
+
f"Number of graph inputs ({len(input_node_names)}) "
|
| 304 |
+
f"does not match number of inputs in the graph signature ({len(gs.input_specs)})"
|
| 305 |
+
)
|
| 306 |
+
|
| 307 |
+
for input_spec, node in zip(gs.input_specs, input_node_names):
|
| 308 |
+
if isinstance(input_spec.arg, (TensorArgument, SymIntArgument)):
|
| 309 |
+
if input_spec.arg.name != node:
|
| 310 |
+
raise SpecViolationError(
|
| 311 |
+
f"Input spec name {input_spec.arg.name} does not match node name {node}"
|
| 312 |
+
)
|
| 313 |
+
|
| 314 |
+
if input_spec.kind == InputKind.USER_INPUT:
|
| 315 |
+
continue
|
| 316 |
+
|
| 317 |
+
elif input_spec.kind == InputKind.PARAMETER:
|
| 318 |
+
if not isinstance(input_spec.arg, TensorArgument):
|
| 319 |
+
raise SpecViolationError(
|
| 320 |
+
f"Parameter {input_spec.name} is not a tensor argument. Found {input_spec.arg} instead."
|
| 321 |
+
)
|
| 322 |
+
if input_spec.target is None:
|
| 323 |
+
raise SpecViolationError(
|
| 324 |
+
f"InputSpec for {input_spec.name} has no target."
|
| 325 |
+
)
|
| 326 |
+
|
| 327 |
+
param = input_spec.target
|
| 328 |
+
if param not in exported_program.state_dict:
|
| 329 |
+
raise SpecViolationError(
|
| 330 |
+
f"Parameter {param} is not in the state dict."
|
| 331 |
+
)
|
| 332 |
+
|
| 333 |
+
if not isinstance(exported_program.state_dict[param], torch.nn.Parameter):
|
| 334 |
+
raise SpecViolationError(
|
| 335 |
+
f"State dict entry for parameter {param} is not an instance of torch.nn.Parameter."
|
| 336 |
+
)
|
| 337 |
+
|
| 338 |
+
elif input_spec.kind == InputKind.BUFFER:
|
| 339 |
+
if not isinstance(input_spec.arg, TensorArgument):
|
| 340 |
+
raise SpecViolationError(
|
| 341 |
+
f"Buffer {input_spec.name} is not a tensor argument. Found {input_spec.arg} instead."
|
| 342 |
+
)
|
| 343 |
+
if input_spec.target is None:
|
| 344 |
+
raise SpecViolationError(
|
| 345 |
+
f"InputSpec for {input_spec.name} has no target."
|
| 346 |
+
)
|
| 347 |
+
|
| 348 |
+
buffer = input_spec.target
|
| 349 |
+
if input_spec.persistent is None:
|
| 350 |
+
raise SpecViolationError(
|
| 351 |
+
f"Buffer {buffer} is missing a persistence flag"
|
| 352 |
+
)
|
| 353 |
+
|
| 354 |
+
if input_spec.persistent is True and buffer not in exported_program.state_dict:
|
| 355 |
+
raise SpecViolationError(
|
| 356 |
+
f"Buffer {buffer} is not in the state dict."
|
| 357 |
+
)
|
| 358 |
+
|
| 359 |
+
if input_spec.persistent is False and buffer in exported_program.state_dict:
|
| 360 |
+
raise SpecViolationError(
|
| 361 |
+
f"Non-persistent buffer {buffer} is in the state dict, it should not be."
|
| 362 |
+
)
|
| 363 |
+
elif input_spec.kind == InputKind.CONSTANT_TENSOR:
|
| 364 |
+
if not isinstance(input_spec.arg, TensorArgument):
|
| 365 |
+
raise SpecViolationError(
|
| 366 |
+
f"Constant tensor {input_spec.name} is not a tensor argument. Found {input_spec.arg} instead."
|
| 367 |
+
)
|
| 368 |
+
if input_spec.target is None:
|
| 369 |
+
raise SpecViolationError(
|
| 370 |
+
f"InputSpec for {input_spec.name} has no target."
|
| 371 |
+
)
|
| 372 |
+
|
| 373 |
+
tensor_const = input_spec.target
|
| 374 |
+
if tensor_const not in exported_program.constants:
|
| 375 |
+
raise SpecViolationError(
|
| 376 |
+
f"Constant tensor {tensor_const} is not in the constants dictionary."
|
| 377 |
+
)
|
| 378 |
+
elif input_spec.kind == InputKind.CUSTOM_OBJ:
|
| 379 |
+
if not isinstance(input_spec.arg, CustomObjArgument):
|
| 380 |
+
raise SpecViolationError(
|
| 381 |
+
f"Custom object {input_spec.name} is not a custom object argument. Found {input_spec.arg} instead."
|
| 382 |
+
)
|
| 383 |
+
if input_spec.target is None:
|
| 384 |
+
raise SpecViolationError(
|
| 385 |
+
f"InputSpec for {input_spec.name} has no target."
|
| 386 |
+
)
|
| 387 |
+
|
| 388 |
+
custom_obj = input_spec.target
|
| 389 |
+
if custom_obj not in exported_program.constants:
|
| 390 |
+
raise SpecViolationError(
|
| 391 |
+
f"Custom object {custom_obj} is not in the constants dictionary."
|
| 392 |
+
)
|
| 393 |
+
elif input_spec.kind == InputKind.TOKEN:
|
| 394 |
+
if not isinstance(input_spec.arg, TokenArgument):
|
| 395 |
+
raise SpecViolationError(
|
| 396 |
+
f"Constant tensor {input_spec.name} is not a tensor argument. Found {input_spec.arg} instead."
|
| 397 |
+
)
|
| 398 |
+
else:
|
| 399 |
+
raise SpecViolationError(
|
| 400 |
+
f"Unknown InputKind {input_spec.kind}."
|
| 401 |
+
)
|
| 402 |
+
|
| 403 |
+
# Check outputs
|
| 404 |
+
output_node = list(exported_program.graph.nodes)[-1]
|
| 405 |
+
assert output_node.op == "output"
|
| 406 |
+
output_nodes = [
|
| 407 |
+
arg.name if isinstance(arg, torch.fx.Node) else arg
|
| 408 |
+
for arg in output_node.args[0]
|
| 409 |
+
]
|
| 410 |
+
|
| 411 |
+
if len(output_nodes) != len(gs.output_specs):
|
| 412 |
+
raise SpecViolationError(
|
| 413 |
+
f"Number of output nodes {len(output_nodes)} is different "
|
| 414 |
+
"Than the number of outputs specified by the graph signature: \n"
|
| 415 |
+
f"Number of mutated buffers: {len(gs.buffers_to_mutate)}. \n"
|
| 416 |
+
f"Number of user outputs: {len(gs.user_outputs)}. \n"
|
| 417 |
+
)
|
| 418 |
+
|
| 419 |
+
num_tokens = len(gs.output_tokens)
|
| 420 |
+
end = len(gs.buffers_to_mutate) + len(gs.user_inputs_to_mutate) + num_tokens
|
| 421 |
+
mutate_nodes: List[str] = output_nodes[num_tokens:end]
|
| 422 |
+
user_output_nodes = output_nodes[end:end + len(gs.user_outputs)]
|
| 423 |
+
|
| 424 |
+
for mutation_node in mutate_nodes:
|
| 425 |
+
if mutation_node in gs.buffers_to_mutate:
|
| 426 |
+
if gs.buffers_to_mutate[mutation_node] not in gs.buffers:
|
| 427 |
+
raise SpecViolationError(
|
| 428 |
+
f"Buffer output {mutation_node} does not point to a buffer that exists. \n"
|
| 429 |
+
f"Dict of buffers that are mutated, in order: {gs.buffers_to_mutate} \n"
|
| 430 |
+
f"Buffer nodes available: {gs.buffers} \n"
|
| 431 |
+
)
|
| 432 |
+
elif mutation_node in gs.user_inputs_to_mutate:
|
| 433 |
+
if gs.user_inputs_to_mutate[mutation_node] not in gs.user_inputs:
|
| 434 |
+
raise SpecViolationError(
|
| 435 |
+
f"User input output {mutation_node} does not point to a user input that exists. \n"
|
| 436 |
+
f"Dict of user inputs that are mutated, in order: {gs.user_inputs_to_mutate} \n"
|
| 437 |
+
f"User input nodes available: {gs.user_inputs} \n")
|
| 438 |
+
else:
|
| 439 |
+
raise SpecViolationError(
|
| 440 |
+
f"Mutation node {mutation_node} is neither a buffer nor a user input. "
|
| 441 |
+
f"Buffers to mutate: {gs.buffers_to_mutate}, User inputs to mutate: {gs.user_inputs_to_mutate}"
|
| 442 |
+
)
|
| 443 |
+
|
| 444 |
+
for user_output_node, user_output_name in zip(user_output_nodes, gs.user_outputs):
|
| 445 |
+
if user_output_node != user_output_name:
|
| 446 |
+
raise SpecViolationError(
|
| 447 |
+
f"User output {user_output_node} is not in the correct "
|
| 448 |
+
"order or is not found in the "
|
| 449 |
+
f"exported program's user_output list: {gs.user_outputs}. "
|
| 450 |
+
)
|
| 451 |
+
|
| 452 |
+
|
| 453 |
+
def load_verifier(dialect: str) -> Type[Verifier]:
|
| 454 |
+
if dialect == "ATEN" or dialect == "":
|
| 455 |
+
return _VerifierMeta._registry.get(dialect, Verifier)
|
| 456 |
+
return _VerifierMeta._registry[dialect]
|
janus/lib/python3.10/site-packages/torch/_export/wrappers.py
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
from contextlib import contextmanager
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
import torch._custom_ops
|
| 6 |
+
from torch._C import DispatchKey
|
| 7 |
+
from torch._higher_order_ops.strict_mode import strict_mode
|
| 8 |
+
from torch._higher_order_ops.utils import autograd_not_implemented
|
| 9 |
+
from torch._ops import HigherOrderOperator
|
| 10 |
+
from torch._subclasses.fake_tensor import FakeTensorMode
|
| 11 |
+
from torch.fx.experimental.proxy_tensor import ProxyTorchDispatchMode, track_tensor_tree
|
| 12 |
+
from torch.utils import _pytree as pytree
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class ExportTracepoint(HigherOrderOperator):
|
| 16 |
+
def __init__(self):
|
| 17 |
+
super().__init__("_export_tracepoint")
|
| 18 |
+
|
| 19 |
+
def __call__(self, *args, **kwargs):
|
| 20 |
+
return super().__call__(*args, **kwargs)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
_export_tracepoint = ExportTracepoint()
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
@_export_tracepoint.py_impl(ProxyTorchDispatchMode)
|
| 27 |
+
def export_tracepoint_dispatch_mode(mode, *args, **kwargs):
|
| 28 |
+
p_args, p_kwargs = pytree.tree_map(mode.tracer.unwrap_proxy, (args, kwargs))
|
| 29 |
+
proxy = mode.tracer.create_proxy(
|
| 30 |
+
"call_function", _export_tracepoint, p_args, p_kwargs
|
| 31 |
+
)
|
| 32 |
+
return track_tensor_tree(args, proxy, constant=None, tracer=mode.tracer)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
@_export_tracepoint.py_impl(FakeTensorMode)
|
| 36 |
+
def export_tracepoint_fake_tensor_mode(mode, *args, **kwargs):
|
| 37 |
+
with mode:
|
| 38 |
+
return args
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
@_export_tracepoint.py_functionalize_impl
|
| 42 |
+
def export_tracepoint_functional(ctx, *args, **kwargs):
|
| 43 |
+
unwrapped_args = ctx.unwrap_tensors(args)
|
| 44 |
+
unwrapped_kwargs = ctx.unwrap_tensors(kwargs)
|
| 45 |
+
|
| 46 |
+
with ctx.redispatch_to_next():
|
| 47 |
+
out = _export_tracepoint(*unwrapped_args, **unwrapped_kwargs)
|
| 48 |
+
return ctx.wrap_tensors(out)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
_export_tracepoint.py_impl(DispatchKey.Autograd)(
|
| 52 |
+
autograd_not_implemented(_export_tracepoint, deferred_error=True)
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
@_export_tracepoint.py_impl(DispatchKey.CPU)
|
| 57 |
+
def export_tracepoint_cpu(*args, **kwargs):
|
| 58 |
+
return args
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def _wrap_submodule(mod, path, module_call_specs):
|
| 62 |
+
assert isinstance(mod, torch.nn.Module)
|
| 63 |
+
assert path != ""
|
| 64 |
+
submodule = mod
|
| 65 |
+
for name in path.split("."):
|
| 66 |
+
if not hasattr(submodule, name):
|
| 67 |
+
raise RuntimeError(f"Couldn't find submodule at path {path}")
|
| 68 |
+
submodule = getattr(submodule, name)
|
| 69 |
+
|
| 70 |
+
def update_module_call_signatures(path, in_spec, out_spec):
|
| 71 |
+
if path in module_call_specs:
|
| 72 |
+
assert module_call_specs[path]["in_spec"] == in_spec
|
| 73 |
+
assert module_call_specs[path]["out_spec"] == out_spec
|
| 74 |
+
module_call_specs[path] = {"in_spec": in_spec, "out_spec": out_spec}
|
| 75 |
+
|
| 76 |
+
def check_flattened(flat_args):
|
| 77 |
+
for a in flat_args:
|
| 78 |
+
if not (isinstance(a, (torch.Tensor, str, int, float, bool)) or a is None):
|
| 79 |
+
raise AssertionError(
|
| 80 |
+
f"Only Tensors or scalars are supported as pytree flattened inputs, got: {a}"
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
def pre_hook(module, args, kwargs):
|
| 84 |
+
flat_args, in_spec = pytree.tree_flatten((args, kwargs))
|
| 85 |
+
check_flattened(flat_args)
|
| 86 |
+
flat_args = _export_tracepoint(*flat_args, kind="module_call_inputs", path=path)
|
| 87 |
+
args, kwargs = pytree.tree_unflatten(flat_args, in_spec)
|
| 88 |
+
return args, kwargs
|
| 89 |
+
|
| 90 |
+
def post_hook(module, args, kwargs, res):
|
| 91 |
+
_, in_spec = pytree.tree_flatten((args, kwargs))
|
| 92 |
+
flat_res, out_spec = pytree.tree_flatten(res)
|
| 93 |
+
check_flattened(flat_res)
|
| 94 |
+
flat_res = _export_tracepoint(*flat_res, kind="module_call_outputs", path=path)
|
| 95 |
+
update_module_call_signatures(path, in_spec, out_spec)
|
| 96 |
+
return pytree.tree_unflatten(flat_res, out_spec)
|
| 97 |
+
|
| 98 |
+
pre_handle = submodule.register_forward_pre_hook(pre_hook, with_kwargs=True)
|
| 99 |
+
post_handle = submodule.register_forward_hook(post_hook, with_kwargs=True)
|
| 100 |
+
return pre_handle, post_handle
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
@contextmanager
|
| 104 |
+
def _wrap_submodules(f, preserve_signature, module_call_signatures):
|
| 105 |
+
handles = []
|
| 106 |
+
|
| 107 |
+
try:
|
| 108 |
+
for path in preserve_signature:
|
| 109 |
+
handles.extend(_wrap_submodule(f, path, module_call_signatures))
|
| 110 |
+
yield
|
| 111 |
+
finally:
|
| 112 |
+
for handle in handles:
|
| 113 |
+
handle.remove()
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def _mark_strict_experimental(cls):
|
| 117 |
+
def call(self, *args):
|
| 118 |
+
return strict_mode(self, args)
|
| 119 |
+
|
| 120 |
+
cls.__call__ = call
|
| 121 |
+
return cls
|
janus/lib/python3.10/site-packages/torch/cuda/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (47.7 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/cuda/__pycache__/_gpu_trace.cpython-310.pyc
ADDED
|
Binary file (2.73 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/cuda/__pycache__/_memory_viz.cpython-310.pyc
ADDED
|
Binary file (20.8 kB). View file
|
|
|