Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +3 -0
- llava_next/lib/python3.10/site-packages/torch/_dispatch/__init__.py +0 -0
- llava_next/lib/python3.10/site-packages/torch/_dispatch/__pycache__/__init__.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/_dispatch/__pycache__/python.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/_dispatch/python.py +178 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/config.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/dependencies.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/inductor_prims.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/optimize_indexing.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/test_operators.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/_lazy/__pycache__/__init__.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/_lazy/__pycache__/closure.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/_lazy/__pycache__/computation.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/_lazy/__pycache__/config.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/_lazy/__pycache__/debug.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/_lazy/__pycache__/device_context.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/_lazy/__pycache__/extract_compiled_graph.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/_lazy/__pycache__/ir_cache.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/_lazy/__pycache__/metrics.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/_lazy/__pycache__/tensor_factory_functions.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/_lazy/__pycache__/ts_backend.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/_lazy/debug.py +21 -0
- llava_next/lib/python3.10/site-packages/torch/_lazy/ir_cache.py +13 -0
- llava_next/lib/python3.10/site-packages/torch/_lazy/tensor_factory_functions.py +48 -0
- llava_next/lib/python3.10/site-packages/torch/_lazy/ts_backend.py +6 -0
- llava_next/lib/python3.10/site-packages/torch/fx/__init__.py +89 -0
- llava_next/lib/python3.10/site-packages/torch/fx/__init__.pyi +11 -0
- llava_next/lib/python3.10/site-packages/torch/fx/_compatibility.py +34 -0
- llava_next/lib/python3.10/site-packages/torch/fx/_pytree.py +42 -0
- llava_next/lib/python3.10/site-packages/torch/fx/_symbolic_trace.py +1159 -0
- llava_next/lib/python3.10/site-packages/torch/fx/annotate.py +21 -0
- llava_next/lib/python3.10/site-packages/torch/fx/config.py +6 -0
- llava_next/lib/python3.10/site-packages/torch/fx/graph.py +1570 -0
- llava_next/lib/python3.10/site-packages/torch/fx/graph_module.py +799 -0
- llava_next/lib/python3.10/site-packages/torch/fx/immutable_collections.py +52 -0
- llava_next/lib/python3.10/site-packages/torch/fx/interpreter.py +505 -0
- llava_next/lib/python3.10/site-packages/torch/fx/node.py +656 -0
- llava_next/lib/python3.10/site-packages/torch/fx/operator_schemas.py +427 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/__init__.py +11 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/graph_manipulation.py +110 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/net_min_base.py +618 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/reinplace.py +674 -0
- llava_next/lib/python3.10/site-packages/torch/fx/proxy.py +559 -0
- llava_next/lib/python3.10/site-packages/torch/fx/subgraph_rewriter.py +339 -0
- llava_next/lib/python3.10/site-packages/torch/fx/tensor_type.py +104 -0
- llava_next/lib/python3.10/site-packages/torch/fx/traceback.py +100 -0
- llava_next/lib/python3.10/site-packages/torch/linalg/__pycache__/__init__.cpython-310.pyc +3 -0
- llava_next/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/cuda_to_hip_mappings.cpython-310.pyc +3 -0
- vlmpy310/lib/python3.10/site-packages/pyglet/input/__pycache__/controller_db.cpython-310.pyc +3 -0
- vlmpy310/lib/python3.10/site-packages/skimage/filters/__pycache__/__init__.cpython-310.pyc +0 -0
.gitattributes
CHANGED
|
@@ -1189,3 +1189,6 @@ vlmpy310/lib/python3.10/site-packages/pyglet/gl/__pycache__/gl_compat.cpython-31
|
|
| 1189 |
vlmpy310/lib/python3.10/site-packages/pyglet/libs/win32/__pycache__/constants.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1190 |
llava_next/lib/python3.10/site-packages/torch/lib/libgomp-a34b3233.so.1 filter=lfs diff=lfs merge=lfs -text
|
| 1191 |
vlmpy310/lib/python3.10/site-packages/pyglet/gl/__pycache__/gl.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
| 1189 |
vlmpy310/lib/python3.10/site-packages/pyglet/libs/win32/__pycache__/constants.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1190 |
llava_next/lib/python3.10/site-packages/torch/lib/libgomp-a34b3233.so.1 filter=lfs diff=lfs merge=lfs -text
|
| 1191 |
vlmpy310/lib/python3.10/site-packages/pyglet/gl/__pycache__/gl.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1192 |
+
llava_next/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/cuda_to_hip_mappings.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1193 |
+
llava_next/lib/python3.10/site-packages/torch/linalg/__pycache__/__init__.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1194 |
+
vlmpy310/lib/python3.10/site-packages/pyglet/input/__pycache__/controller_db.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
llava_next/lib/python3.10/site-packages/torch/_dispatch/__init__.py
ADDED
|
File without changes
|
llava_next/lib/python3.10/site-packages/torch/_dispatch/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (171 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/_dispatch/__pycache__/python.cpython-310.pyc
ADDED
|
Binary file (6.66 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/_dispatch/python.py
ADDED
|
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import itertools
|
| 2 |
+
import unittest.mock
|
| 3 |
+
from contextlib import contextmanager
|
| 4 |
+
from typing import Iterator
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
import torch._C
|
| 8 |
+
import torch._ops
|
| 9 |
+
import torch.utils._python_dispatch
|
| 10 |
+
import torch.utils._pytree as pytree
|
| 11 |
+
|
| 12 |
+
__all__ = ["enable_python_dispatcher", "no_python_dispatcher", "enable_pre_dispatch"]
|
| 13 |
+
|
| 14 |
+
no_python_dispatcher = torch._C._DisablePythonDispatcher
|
| 15 |
+
enable_python_dispatcher = torch._C._EnablePythonDispatcher
|
| 16 |
+
enable_pre_dispatch = torch._C._EnablePreDispatch
|
| 17 |
+
|
| 18 |
+
CROSSREF_FUNCTIONALIZE = False
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def all_py_loaded_overloads() -> Iterator[torch._ops.OpOverload]:
|
| 22 |
+
"""
|
| 23 |
+
Warning: the set of overloads this will report is very subtle. It is precisely
|
| 24 |
+
the set of torch.ops functions that have actually been accessed from Python
|
| 25 |
+
(e.g., we actually called torch.ops.aten.blah at some point. This is DIFFERENT
|
| 26 |
+
from the set of registered operators, which will in general be a larger set,
|
| 27 |
+
as this would include all operators which we ran C++ static initializers or
|
| 28 |
+
Python operator registration on. This does not eagerly populate the list on
|
| 29 |
+
torch.ops.aten; this list is lazy!
|
| 30 |
+
|
| 31 |
+
In other words, this is good for traversing over everything that has an
|
| 32 |
+
OpOverload object allocated in Python. We use it for cache invalidation, but
|
| 33 |
+
don't rely on this list being complete.
|
| 34 |
+
|
| 35 |
+
Note that even if we did report all C++ registered overloads, this isn't guaranteed
|
| 36 |
+
to be complete either, as a subsequent lazy load of a library which triggers more
|
| 37 |
+
registrations could add more things to the set.
|
| 38 |
+
"""
|
| 39 |
+
for ns in torch.ops:
|
| 40 |
+
packets = getattr(torch.ops, ns)
|
| 41 |
+
for op_name in packets:
|
| 42 |
+
packet = getattr(packets, op_name)
|
| 43 |
+
for overload in packet:
|
| 44 |
+
yield getattr(packet, overload)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
@contextmanager
|
| 48 |
+
def suspend_functionalization():
|
| 49 |
+
f_tls = torch._C._dispatch_tls_is_dispatch_key_included(
|
| 50 |
+
torch._C.DispatchKey.Functionalize
|
| 51 |
+
)
|
| 52 |
+
f_rv = torch._C._functionalization_reapply_views_tls()
|
| 53 |
+
if f_tls:
|
| 54 |
+
torch._disable_functionalization()
|
| 55 |
+
try:
|
| 56 |
+
yield
|
| 57 |
+
finally:
|
| 58 |
+
if f_tls:
|
| 59 |
+
torch._enable_functionalization(reapply_views=f_rv)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def check_tensor_metadata_matches(nv, rv, desc):
|
| 63 |
+
assert callable(desc)
|
| 64 |
+
assert nv.size() == rv.size(), f"{desc()}: sizes {nv.size()} != {rv.size()}"
|
| 65 |
+
assert nv.dtype == rv.dtype, f"{desc()}: dtype {nv.dtype} != {rv.dtype}"
|
| 66 |
+
same_strides, idx = torch._prims_common.check_significant_strides(
|
| 67 |
+
nv, rv, only_cuda=False
|
| 68 |
+
)
|
| 69 |
+
assert (
|
| 70 |
+
same_strides
|
| 71 |
+
), f"{desc()}: strides {nv.stride()} != {rv.stride()} (mismatch at index {idx})"
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def check_metadata_matches(n, r, desc):
|
| 75 |
+
assert callable(desc)
|
| 76 |
+
n_vals, n_spec = pytree.tree_flatten(n)
|
| 77 |
+
r_vals, r_spec = pytree.tree_flatten(r)
|
| 78 |
+
# TODO: test the specs match; empirically sometimes we have a tuple
|
| 79 |
+
# on one side and a list on the other
|
| 80 |
+
assert len(n_vals) == len(r_vals), f"{len(n_vals)} != {len(r_vals)}"
|
| 81 |
+
for i, nv, rv in zip(range(len(n_vals)), n_vals, r_vals):
|
| 82 |
+
if not isinstance(rv, torch.Tensor):
|
| 83 |
+
continue
|
| 84 |
+
check_tensor_metadata_matches(nv, rv, lambda: f"{desc()} output {i}")
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
class Lit:
|
| 88 |
+
def __init__(self, s):
|
| 89 |
+
self.s = s
|
| 90 |
+
|
| 91 |
+
def __repr__(self):
|
| 92 |
+
return self.s
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def _fmt(a: object) -> object:
|
| 96 |
+
if isinstance(a, torch.Tensor):
|
| 97 |
+
return Lit(
|
| 98 |
+
f"torch.empty_strided({tuple(a.size())}, {a.stride()}, dtype={a.dtype})"
|
| 99 |
+
)
|
| 100 |
+
else:
|
| 101 |
+
return a
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def make_crossref_functionalize(op, final_key):
|
| 105 |
+
from torch._subclasses.fake_tensor import FakeTensorMode
|
| 106 |
+
|
| 107 |
+
# This case is pretty weird, suppress it for now
|
| 108 |
+
if op == torch.ops.aten.lift_fresh.default:
|
| 109 |
+
return final_key
|
| 110 |
+
|
| 111 |
+
def handler(*args, **kwargs):
|
| 112 |
+
fake_mode = FakeTensorMode()
|
| 113 |
+
|
| 114 |
+
def fakeify_defun(t):
|
| 115 |
+
if isinstance(t, torch.Tensor):
|
| 116 |
+
if torch._is_functional_tensor(t):
|
| 117 |
+
r = torch._from_functional_tensor(t)
|
| 118 |
+
# NB: This assumes that the inner tensor sizes/strides match
|
| 119 |
+
# the outer tensor sizes/strides. This doesn't necessarily have to
|
| 120 |
+
# be the case, see discussion at
|
| 121 |
+
# https://github.com/pytorch/pytorch/pull/87610/files/401ddeda1d769bedc88a12de332c7357b60e51a4#r1007264456
|
| 122 |
+
assert t.size() == r.size()
|
| 123 |
+
assert t.stride() == r.stride()
|
| 124 |
+
else:
|
| 125 |
+
r = t
|
| 126 |
+
# TODO: suppress guards
|
| 127 |
+
return fake_mode.from_tensor(r)
|
| 128 |
+
return t
|
| 129 |
+
|
| 130 |
+
def maybe_detach(t):
|
| 131 |
+
if isinstance(t, torch.Tensor):
|
| 132 |
+
return t.detach()
|
| 133 |
+
else:
|
| 134 |
+
return t
|
| 135 |
+
|
| 136 |
+
# TODO: This probably does the wrong thing if you're running other
|
| 137 |
+
# substantive modes with the normal op outside here
|
| 138 |
+
with torch.utils._python_dispatch._disable_current_modes(), suspend_functionalization():
|
| 139 |
+
f_args, f_kwargs = pytree.tree_map(fakeify_defun, (args, kwargs))
|
| 140 |
+
orig_f_args, orig_f_kwargs = pytree.tree_map(
|
| 141 |
+
maybe_detach, (f_args, f_kwargs)
|
| 142 |
+
)
|
| 143 |
+
with fake_mode:
|
| 144 |
+
f_r = op(*f_args, **f_kwargs)
|
| 145 |
+
r = op._op_dk(final_key, *args, **kwargs)
|
| 146 |
+
|
| 147 |
+
def desc():
|
| 148 |
+
fmt_args = ", ".join(
|
| 149 |
+
itertools.chain(
|
| 150 |
+
(repr(pytree.tree_map(_fmt, a)) for a in orig_f_args),
|
| 151 |
+
(
|
| 152 |
+
f"{k}={pytree.tree_map(_fmt, v)}"
|
| 153 |
+
for k, v in orig_f_kwargs.items()
|
| 154 |
+
),
|
| 155 |
+
)
|
| 156 |
+
)
|
| 157 |
+
return f"{op}({fmt_args})"
|
| 158 |
+
|
| 159 |
+
check_metadata_matches(f_r, r, desc)
|
| 160 |
+
return r
|
| 161 |
+
|
| 162 |
+
return handler
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
# NB: enabling this is slow, don't do it in a hot loop. This is purely
|
| 166 |
+
# for debugging purposes.
|
| 167 |
+
@contextmanager
|
| 168 |
+
def enable_crossref_functionalize():
|
| 169 |
+
for op in all_py_loaded_overloads():
|
| 170 |
+
op._uncache_dispatch(torch._C.DispatchKey.Functionalize)
|
| 171 |
+
try:
|
| 172 |
+
with enable_python_dispatcher(), unittest.mock.patch(
|
| 173 |
+
"torch._dispatch.python.CROSSREF_FUNCTIONALIZE", True
|
| 174 |
+
):
|
| 175 |
+
yield
|
| 176 |
+
finally:
|
| 177 |
+
for op in all_py_loaded_overloads():
|
| 178 |
+
op._uncache_dispatch(torch._C.DispatchKey.Functionalize)
|
llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/config.cpython-310.pyc
ADDED
|
Binary file (6.14 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/dependencies.cpython-310.pyc
ADDED
|
Binary file (13.7 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/inductor_prims.cpython-310.pyc
ADDED
|
Binary file (2.71 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/optimize_indexing.cpython-310.pyc
ADDED
|
Binary file (2.61 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/_inductor/__pycache__/test_operators.cpython-310.pyc
ADDED
|
Binary file (1.22 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/_lazy/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (2.36 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/_lazy/__pycache__/closure.cpython-310.pyc
ADDED
|
Binary file (5.28 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/_lazy/__pycache__/computation.cpython-310.pyc
ADDED
|
Binary file (1.22 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/_lazy/__pycache__/config.cpython-310.pyc
ADDED
|
Binary file (806 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/_lazy/__pycache__/debug.cpython-310.pyc
ADDED
|
Binary file (934 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/_lazy/__pycache__/device_context.cpython-310.pyc
ADDED
|
Binary file (1.02 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/_lazy/__pycache__/extract_compiled_graph.cpython-310.pyc
ADDED
|
Binary file (7.21 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/_lazy/__pycache__/ir_cache.cpython-310.pyc
ADDED
|
Binary file (635 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/_lazy/__pycache__/metrics.cpython-310.pyc
ADDED
|
Binary file (974 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/_lazy/__pycache__/tensor_factory_functions.cpython-310.pyc
ADDED
|
Binary file (720 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/_lazy/__pycache__/ts_backend.cpython-310.pyc
ADDED
|
Binary file (397 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/_lazy/debug.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch._C._lazy
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def render_ir_graph(tensors):
|
| 5 |
+
"""Return a text dump of the LTC IR graph in dot format for the tensors.
|
| 6 |
+
The text can be processed by tools like dot to be rendered in pdf,png etc."""
|
| 7 |
+
return torch._C._lazy._get_tensors_dot(tensors)
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def dump_ir(tensors, ir_format):
|
| 11 |
+
"""Return a dump of the tensors in the specified format.
|
| 12 |
+
Valid format are
|
| 13 |
+
- text: for LTC IR
|
| 14 |
+
- backend: for the activate backend IR
|
| 15 |
+
"""
|
| 16 |
+
if ir_format == "text":
|
| 17 |
+
return torch._C._lazy._get_tensors_text(tensors)
|
| 18 |
+
elif ir_format == "backend":
|
| 19 |
+
return torch._C._lazy._get_tensors_backend(tensors)
|
| 20 |
+
else:
|
| 21 |
+
raise RuntimeError(f"Unrecognized IR format: {ir_format}")
|
llava_next/lib/python3.10/site-packages/torch/_lazy/ir_cache.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch._C._lazy
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def dump(dot_file_name: str):
|
| 5 |
+
"""Dump TrieCache in the dot format"""
|
| 6 |
+
return torch._C._lazy._dump_ir_cache(dot_file_name)
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def reset():
|
| 10 |
+
"""Clear TrieCache. This is needed in testing to avoid
|
| 11 |
+
node reusing between different tests.
|
| 12 |
+
"""
|
| 13 |
+
return torch._C._lazy._clear_ir_cache()
|
llava_next/lib/python3.10/site-packages/torch/_lazy/tensor_factory_functions.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
|
| 3 |
+
"""
|
| 4 |
+
tensor_factory_functions defines the list of torch functions that create tensors.
|
| 5 |
+
The list is grabbed by searching thru native_functions.yaml by the following
|
| 6 |
+
regular expression:
|
| 7 |
+
|
| 8 |
+
cat native_functions.yaml | grep 'func:' | grep -v "Tensor.*->" | grep "[-]>.*Tensor"
|
| 9 |
+
|
| 10 |
+
It's possible that new tensor factory functions are added making this list stale.
|
| 11 |
+
Use at your own risk or regenerate the list.
|
| 12 |
+
"""
|
| 13 |
+
tensor_factory_functions = (
|
| 14 |
+
torch._cudnn_init_dropout_state,
|
| 15 |
+
torch.arange,
|
| 16 |
+
torch.bartlett_window,
|
| 17 |
+
torch.blackman_window,
|
| 18 |
+
torch._empty_affine_quantized,
|
| 19 |
+
torch.empty_strided,
|
| 20 |
+
torch.eye,
|
| 21 |
+
torch.full,
|
| 22 |
+
torch.from_file,
|
| 23 |
+
torch.hann_window,
|
| 24 |
+
torch.hamming_window,
|
| 25 |
+
torch.kaiser_window,
|
| 26 |
+
torch.linspace,
|
| 27 |
+
torch.logspace,
|
| 28 |
+
torch.ones,
|
| 29 |
+
torch.scalar_tensor,
|
| 30 |
+
torch.rand,
|
| 31 |
+
torch.randint,
|
| 32 |
+
torch.randn,
|
| 33 |
+
torch.randperm,
|
| 34 |
+
torch.range,
|
| 35 |
+
torch._efficientzerotensor,
|
| 36 |
+
torch.zeros,
|
| 37 |
+
torch.tril_indices,
|
| 38 |
+
torch.triu_indices,
|
| 39 |
+
# Note: the following functions match the regular expression search above but
|
| 40 |
+
# they are not available in the torch module. Comment out.
|
| 41 |
+
# torch._sparse_coo_tensor_with_dims,
|
| 42 |
+
# torch.fft_fftfreq,
|
| 43 |
+
# torch.fft_rfftfreq,
|
| 44 |
+
) + (
|
| 45 |
+
# torch.tensor is special since it's not in native_functions.yaml
|
| 46 |
+
# add it separately
|
| 47 |
+
torch.tensor,
|
| 48 |
+
)
|
llava_next/lib/python3.10/site-packages/torch/_lazy/ts_backend.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch._C._lazy_ts_backend
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def init():
|
| 5 |
+
"""Initializes the lazy Torchscript backend"""
|
| 6 |
+
torch._C._lazy_ts_backend._init()
|
llava_next/lib/python3.10/site-packages/torch/fx/__init__.py
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
r'''
|
| 2 |
+
FX is a toolkit for developers to use to transform ``nn.Module``
|
| 3 |
+
instances. FX consists of three main components: a **symbolic tracer,**
|
| 4 |
+
an **intermediate representation**, and **Python code generation**. A
|
| 5 |
+
demonstration of these components in action:
|
| 6 |
+
|
| 7 |
+
::
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
# Simple module for demonstration
|
| 11 |
+
class MyModule(torch.nn.Module):
|
| 12 |
+
def __init__(self):
|
| 13 |
+
super().__init__()
|
| 14 |
+
self.param = torch.nn.Parameter(torch.rand(3, 4))
|
| 15 |
+
self.linear = torch.nn.Linear(4, 5)
|
| 16 |
+
|
| 17 |
+
def forward(self, x):
|
| 18 |
+
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
|
| 19 |
+
|
| 20 |
+
module = MyModule()
|
| 21 |
+
|
| 22 |
+
from torch.fx import symbolic_trace
|
| 23 |
+
# Symbolic tracing frontend - captures the semantics of the module
|
| 24 |
+
symbolic_traced : torch.fx.GraphModule = symbolic_trace(module)
|
| 25 |
+
|
| 26 |
+
# High-level intermediate representation (IR) - Graph representation
|
| 27 |
+
print(symbolic_traced.graph)
|
| 28 |
+
"""
|
| 29 |
+
graph():
|
| 30 |
+
%x : [num_users=1] = placeholder[target=x]
|
| 31 |
+
%param : [num_users=1] = get_attr[target=param]
|
| 32 |
+
%add : [num_users=1] = call_function[target=operator.add](args = (%x, %param), kwargs = {})
|
| 33 |
+
%linear : [num_users=1] = call_module[target=linear](args = (%add,), kwargs = {})
|
| 34 |
+
%clamp : [num_users=1] = call_method[target=clamp](args = (%linear,), kwargs = {min: 0.0, max: 1.0})
|
| 35 |
+
return clamp
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
# Code generation - valid Python code
|
| 39 |
+
print(symbolic_traced.code)
|
| 40 |
+
"""
|
| 41 |
+
def forward(self, x):
|
| 42 |
+
param = self.param
|
| 43 |
+
add = x + param; x = param = None
|
| 44 |
+
linear = self.linear(add); add = None
|
| 45 |
+
clamp = linear.clamp(min = 0.0, max = 1.0); linear = None
|
| 46 |
+
return clamp
|
| 47 |
+
"""
|
| 48 |
+
|
| 49 |
+
The **symbolic tracer** performs "symbolic execution" of the Python
|
| 50 |
+
code. It feeds fake values, called Proxies, through the code. Operations
|
| 51 |
+
on theses Proxies are recorded. More information about symbolic tracing
|
| 52 |
+
can be found in the :func:`symbolic_trace` and :class:`Tracer`
|
| 53 |
+
documentation.
|
| 54 |
+
|
| 55 |
+
The **intermediate representation** is the container for the operations
|
| 56 |
+
that were recorded during symbolic tracing. It consists of a list of
|
| 57 |
+
Nodes that represent function inputs, callsites (to functions, methods,
|
| 58 |
+
or :class:`torch.nn.Module` instances), and return values. More information
|
| 59 |
+
about the IR can be found in the documentation for :class:`Graph`. The
|
| 60 |
+
IR is the format on which transformations are applied.
|
| 61 |
+
|
| 62 |
+
**Python code generation** is what makes FX a Python-to-Python (or
|
| 63 |
+
Module-to-Module) transformation toolkit. For each Graph IR, we can
|
| 64 |
+
create valid Python code matching the Graph's semantics. This
|
| 65 |
+
functionality is wrapped up in :class:`GraphModule`, which is a
|
| 66 |
+
:class:`torch.nn.Module` instance that holds a :class:`Graph` as well as a
|
| 67 |
+
``forward`` method generated from the Graph.
|
| 68 |
+
|
| 69 |
+
Taken together, this pipeline of components (symbolic tracing ->
|
| 70 |
+
intermediate representation -> transforms -> Python code generation)
|
| 71 |
+
constitutes the Python-to-Python transformation pipeline of FX. In
|
| 72 |
+
addition, these components can be used separately. For example,
|
| 73 |
+
symbolic tracing can be used in isolation to capture a form of
|
| 74 |
+
the code for analysis (and not transformation) purposes. Code
|
| 75 |
+
generation can be used for programmatically generating models, for
|
| 76 |
+
example from a config file. There are many uses for FX!
|
| 77 |
+
|
| 78 |
+
Several example transformations can be found at the
|
| 79 |
+
`examples <https://github.com/pytorch/examples/tree/master/fx>`__
|
| 80 |
+
repository.
|
| 81 |
+
'''
|
| 82 |
+
|
| 83 |
+
from .graph_module import GraphModule
|
| 84 |
+
from ._symbolic_trace import symbolic_trace, Tracer, wrap, PH, ProxyableClassMeta
|
| 85 |
+
from .graph import Graph, CodeGen
|
| 86 |
+
from .node import Node, map_arg, has_side_effect
|
| 87 |
+
from .proxy import Proxy
|
| 88 |
+
from .interpreter import Interpreter as Interpreter, Transformer as Transformer
|
| 89 |
+
from .subgraph_rewriter import replace_pattern
|
llava_next/lib/python3.10/site-packages/torch/fx/__init__.pyi
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ._symbolic_trace import (
|
| 2 |
+
symbolic_trace as symbolic_trace,
|
| 3 |
+
Tracer as Tracer,
|
| 4 |
+
wrap as wrap,
|
| 5 |
+
)
|
| 6 |
+
from .graph import Graph as Graph
|
| 7 |
+
from .graph_module import GraphModule as GraphModule
|
| 8 |
+
from .interpreter import Interpreter as Interpreter, Transformer as Transformer
|
| 9 |
+
from .node import has_side_effect as has_side_effect, map_arg as map_arg, Node as Node
|
| 10 |
+
from .proxy import Proxy as Proxy
|
| 11 |
+
from .subgraph_rewriter import replace_pattern as replace_pattern
|
llava_next/lib/python3.10/site-packages/torch/fx/_compatibility.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Dict
|
| 2 |
+
import textwrap
|
| 3 |
+
|
| 4 |
+
_BACK_COMPAT_OBJECTS : Dict[Any, None] = {}
|
| 5 |
+
_MARKED_WITH_COMPATIBILITY : Dict[Any, None] = {}
|
| 6 |
+
|
| 7 |
+
def compatibility(is_backward_compatible : bool):
|
| 8 |
+
if is_backward_compatible:
|
| 9 |
+
|
| 10 |
+
def mark_back_compat(fn):
|
| 11 |
+
docstring = textwrap.dedent(getattr(fn, '__doc__', None) or '')
|
| 12 |
+
docstring += """
|
| 13 |
+
.. note::
|
| 14 |
+
Backwards-compatibility for this API is guaranteed.
|
| 15 |
+
"""
|
| 16 |
+
fn.__doc__ = docstring
|
| 17 |
+
_BACK_COMPAT_OBJECTS.setdefault(fn)
|
| 18 |
+
_MARKED_WITH_COMPATIBILITY.setdefault(fn)
|
| 19 |
+
return fn
|
| 20 |
+
|
| 21 |
+
return mark_back_compat
|
| 22 |
+
else:
|
| 23 |
+
|
| 24 |
+
def mark_not_back_compat(fn):
|
| 25 |
+
docstring = textwrap.dedent(getattr(fn, '__doc__', None) or '')
|
| 26 |
+
docstring += """
|
| 27 |
+
.. warning::
|
| 28 |
+
This API is experimental and is *NOT* backward-compatible.
|
| 29 |
+
"""
|
| 30 |
+
fn.__doc__ = docstring
|
| 31 |
+
_MARKED_WITH_COMPATIBILITY.setdefault(fn)
|
| 32 |
+
return fn
|
| 33 |
+
|
| 34 |
+
return mark_not_back_compat
|
llava_next/lib/python3.10/site-packages/torch/fx/_pytree.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Callable, Any, Tuple, List, Dict, Type, NamedTuple
|
| 2 |
+
from torch.utils._pytree import PyTree, TreeSpec, LeafSpec
|
| 3 |
+
from collections import namedtuple
|
| 4 |
+
|
| 5 |
+
FlattenFuncSpec = Callable[[PyTree, TreeSpec], List]
|
| 6 |
+
|
| 7 |
+
SUPPORTED_NODES: Dict[Type[Any], Any] = {}
|
| 8 |
+
def register_pytree_flatten_spec(typ: Any, flatten_fn_spec: FlattenFuncSpec) -> None:
|
| 9 |
+
SUPPORTED_NODES[typ] = flatten_fn_spec
|
| 10 |
+
|
| 11 |
+
def tree_flatten_spec(pytree: PyTree, spec: TreeSpec) -> List[Any]:
|
| 12 |
+
if isinstance(spec, LeafSpec):
|
| 13 |
+
return [pytree]
|
| 14 |
+
if spec.type not in SUPPORTED_NODES:
|
| 15 |
+
raise RuntimeError(
|
| 16 |
+
f"{type(pytree)} does not have a flatten_fn_spec associated with it. Please register one with "
|
| 17 |
+
"torch.fx._pytree.register_pytree_flatten_spec. If you have serialized your model, make "
|
| 18 |
+
"sure that any custom pytrees have been registered before loading it.")
|
| 19 |
+
flatten_fn_spec = SUPPORTED_NODES[spec.type]
|
| 20 |
+
child_pytrees = flatten_fn_spec(pytree, spec)
|
| 21 |
+
result = []
|
| 22 |
+
for child, child_spec in zip(child_pytrees, spec.children_specs):
|
| 23 |
+
flat = tree_flatten_spec(child, child_spec)
|
| 24 |
+
result += flat
|
| 25 |
+
return result
|
| 26 |
+
|
| 27 |
+
def _dict_flatten_spec(d: Dict[Any, Any], spec: TreeSpec) -> List[Any]:
|
| 28 |
+
return [d[k] for k in spec.context]
|
| 29 |
+
|
| 30 |
+
def _list_flatten_spec(d: List[Any], spec: TreeSpec) -> List[Any]:
|
| 31 |
+
return [d[i] for i in range(len(spec.children_specs))]
|
| 32 |
+
|
| 33 |
+
def _tuple_flatten_spec(d: Tuple[Any], spec: TreeSpec) -> List[Any]:
|
| 34 |
+
return [d[i] for i in range(len(spec.children_specs))]
|
| 35 |
+
|
| 36 |
+
def _namedtuple_flatten_spec(d: NamedTuple, spec: TreeSpec) -> List[Any]:
|
| 37 |
+
return [d[i] for i in range(len(spec.children_specs))]
|
| 38 |
+
|
| 39 |
+
register_pytree_flatten_spec(dict, _dict_flatten_spec)
|
| 40 |
+
register_pytree_flatten_spec(list, _list_flatten_spec)
|
| 41 |
+
register_pytree_flatten_spec(tuple, _tuple_flatten_spec)
|
| 42 |
+
register_pytree_flatten_spec(namedtuple, _tuple_flatten_spec)
|
llava_next/lib/python3.10/site-packages/torch/fx/_symbolic_trace.py
ADDED
|
@@ -0,0 +1,1159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import builtins
|
| 2 |
+
import copy
|
| 3 |
+
import functools
|
| 4 |
+
import inspect
|
| 5 |
+
import math
|
| 6 |
+
import os
|
| 7 |
+
import warnings
|
| 8 |
+
import collections
|
| 9 |
+
from itertools import chain
|
| 10 |
+
from types import CodeType, FunctionType, ModuleType
|
| 11 |
+
from typing import (
|
| 12 |
+
Any,
|
| 13 |
+
Callable,
|
| 14 |
+
Dict,
|
| 15 |
+
List,
|
| 16 |
+
NamedTuple,
|
| 17 |
+
Optional,
|
| 18 |
+
Set,
|
| 19 |
+
Tuple,
|
| 20 |
+
Type,
|
| 21 |
+
Union,
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
import torch
|
| 25 |
+
import torch.utils._pytree as pytree
|
| 26 |
+
from torch._C import ScriptObject # type: ignore[attr-defined]
|
| 27 |
+
|
| 28 |
+
from ._compatibility import compatibility
|
| 29 |
+
from .graph import _PyTreeCodeGen, _PyTreeInfo, Graph
|
| 30 |
+
from .graph_module import GraphModule
|
| 31 |
+
from .node import Argument, base_types, map_aggregate
|
| 32 |
+
from .proxy import ParameterProxy, Proxy, TracerBase, Scope, ScopeContextManager
|
| 33 |
+
|
| 34 |
+
HAS_VARSTUFF = inspect.CO_VARARGS | inspect.CO_VARKEYWORDS
|
| 35 |
+
|
| 36 |
+
# These need to run in global scope to handle nested calls correctly
|
| 37 |
+
_orig_module_call: Callable = torch.nn.Module.__call__
|
| 38 |
+
_orig_module_getattr: Callable = torch.nn.Module.__getattr__
|
| 39 |
+
|
| 40 |
+
_proxyable_classes: Dict[Type, None] = {}
|
| 41 |
+
|
| 42 |
+
_is_fx_tracing_flag = False
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def is_fx_tracing():
|
| 46 |
+
return _is_fx_tracing_flag
|
| 47 |
+
|
| 48 |
+
@compatibility(is_backward_compatible=True)
|
| 49 |
+
class ProxyableClassMeta(type):
|
| 50 |
+
"""
|
| 51 |
+
ProxyableClassMeta allows you to make construction of a given Python class
|
| 52 |
+
symbolically traceable. For example::
|
| 53 |
+
|
| 54 |
+
import torch
|
| 55 |
+
import torch.fx
|
| 56 |
+
|
| 57 |
+
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
|
| 58 |
+
def __init__(self, left, right):
|
| 59 |
+
self.left, self.right = left, right
|
| 60 |
+
|
| 61 |
+
def add(self, other):
|
| 62 |
+
l = self.left + other.left
|
| 63 |
+
r = self.right + other.right
|
| 64 |
+
return TensorPair(l, r)
|
| 65 |
+
|
| 66 |
+
def mul(self, other):
|
| 67 |
+
l = self.left * other.left
|
| 68 |
+
r = self.right * other.right
|
| 69 |
+
return TensorPair(l, r)
|
| 70 |
+
|
| 71 |
+
def use_tensor_pair_ctor(x : TensorPair, y : torch.Tensor):
|
| 72 |
+
s = x.add(TensorPair(y, y))
|
| 73 |
+
return s.mul(x)
|
| 74 |
+
|
| 75 |
+
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
|
| 76 |
+
y = torch.randn(5, 3)
|
| 77 |
+
ref_out = use_tensor_pair_ctor(x, y)
|
| 78 |
+
|
| 79 |
+
traced = torch.fx.symbolic_trace(use_tensor_pair_ctor)
|
| 80 |
+
print(traced.code)
|
| 81 |
+
'''
|
| 82 |
+
def forward(self, x : __main___TensorPair, y : torch.Tensor):
|
| 83 |
+
tensor_pair = __main___TensorPair(y, y); y = None
|
| 84 |
+
add = x.add(tensor_pair); tensor_pair = None
|
| 85 |
+
mul = add.mul(x); add = x = None
|
| 86 |
+
return mul
|
| 87 |
+
'''
|
| 88 |
+
|
| 89 |
+
From this example, we can see that construction of a class (``TensorPair``)
|
| 90 |
+
defined with ``ProxyableClassMeta`` as metaclass can be recorded in symbolic
|
| 91 |
+
tracing.
|
| 92 |
+
"""
|
| 93 |
+
|
| 94 |
+
def __init__(cls, name, bases, attrs):
|
| 95 |
+
_proxyable_classes.setdefault(cls)
|
| 96 |
+
super().__init__(name, bases, attrs)
|
| 97 |
+
|
| 98 |
+
def __call__(cls, *args, **kwargs):
|
| 99 |
+
instance = cls.__new__(cls) # type: ignore[call-overload]
|
| 100 |
+
|
| 101 |
+
found_proxies = []
|
| 102 |
+
|
| 103 |
+
def check_proxy(a):
|
| 104 |
+
if isinstance(a, Proxy):
|
| 105 |
+
found_proxies.append(a)
|
| 106 |
+
|
| 107 |
+
map_aggregate(args, check_proxy)
|
| 108 |
+
map_aggregate(kwargs, check_proxy)
|
| 109 |
+
|
| 110 |
+
if len(found_proxies) != 0:
|
| 111 |
+
tracer = found_proxies[0].tracer
|
| 112 |
+
return tracer.create_proxy("call_function", cls, args, kwargs)
|
| 113 |
+
else:
|
| 114 |
+
cls.__init__(instance, *args, **kwargs) # type: ignore[misc]
|
| 115 |
+
return instance
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def _patch_function(fn: FunctionType, nargs: int) -> FunctionType:
|
| 119 |
+
co = fn.__code__
|
| 120 |
+
co_flags = co.co_flags & ~HAS_VARSTUFF
|
| 121 |
+
co_args: tuple
|
| 122 |
+
if hasattr(co, "co_qualname"):
|
| 123 |
+
# Python-3.11+ code signature
|
| 124 |
+
co_args = (
|
| 125 |
+
nargs,
|
| 126 |
+
0,
|
| 127 |
+
0,
|
| 128 |
+
co.co_nlocals,
|
| 129 |
+
co.co_stacksize,
|
| 130 |
+
co_flags,
|
| 131 |
+
co.co_code,
|
| 132 |
+
co.co_consts,
|
| 133 |
+
co.co_names,
|
| 134 |
+
co.co_varnames,
|
| 135 |
+
co.co_filename,
|
| 136 |
+
co.co_name,
|
| 137 |
+
co.co_qualname, # type: ignore[attr-defined]
|
| 138 |
+
co.co_firstlineno,
|
| 139 |
+
co.co_lnotab,
|
| 140 |
+
co.co_exceptiontable, # type: ignore[attr-defined]
|
| 141 |
+
co.co_freevars,
|
| 142 |
+
co.co_cellvars,
|
| 143 |
+
)
|
| 144 |
+
elif hasattr(co, "co_posonlyargcount"):
|
| 145 |
+
co_args = (
|
| 146 |
+
nargs,
|
| 147 |
+
0,
|
| 148 |
+
0,
|
| 149 |
+
co.co_nlocals,
|
| 150 |
+
co.co_stacksize,
|
| 151 |
+
co_flags,
|
| 152 |
+
co.co_code,
|
| 153 |
+
co.co_consts,
|
| 154 |
+
co.co_names,
|
| 155 |
+
co.co_varnames,
|
| 156 |
+
co.co_filename,
|
| 157 |
+
co.co_name,
|
| 158 |
+
co.co_firstlineno,
|
| 159 |
+
co.co_lnotab,
|
| 160 |
+
co.co_freevars,
|
| 161 |
+
co.co_cellvars,
|
| 162 |
+
)
|
| 163 |
+
else:
|
| 164 |
+
co_args = (
|
| 165 |
+
nargs,
|
| 166 |
+
0,
|
| 167 |
+
co.co_nlocals,
|
| 168 |
+
co.co_stacksize,
|
| 169 |
+
co_flags,
|
| 170 |
+
co.co_code,
|
| 171 |
+
co.co_consts,
|
| 172 |
+
co.co_names,
|
| 173 |
+
co.co_varnames,
|
| 174 |
+
co.co_filename,
|
| 175 |
+
co.co_name,
|
| 176 |
+
co.co_firstlineno,
|
| 177 |
+
co.co_lnotab,
|
| 178 |
+
co.co_freevars,
|
| 179 |
+
co.co_cellvars,
|
| 180 |
+
)
|
| 181 |
+
new_code = CodeType(*co_args) # type: ignore[arg-type]
|
| 182 |
+
return FunctionType(
|
| 183 |
+
new_code, fn.__globals__, fn.__name__, fn.__defaults__, fn.__closure__
|
| 184 |
+
)
|
| 185 |
+
|
| 186 |
+
# we need to insert placeholder nodes for *args and **kwargs
|
| 187 |
+
# we can't call this function normally, otherwise it would try to unpack them
|
| 188 |
+
# instead, let's make python think that args and kwargs are normal variables
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
@compatibility(is_backward_compatible=False)
|
| 192 |
+
class PHBase:
|
| 193 |
+
"""
|
| 194 |
+
Object representing an input placeholder to `concrete_args`
|
| 195 |
+
"""
|
| 196 |
+
|
| 197 |
+
def __repr__(self):
|
| 198 |
+
return "PH"
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
PH = PHBase()
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
@compatibility(is_backward_compatible=False)
|
| 205 |
+
class PHWithMeta(PHBase):
|
| 206 |
+
"""
|
| 207 |
+
Object representing an input placeholder to `concrete_args`
|
| 208 |
+
"""
|
| 209 |
+
def __init__(self, ph_key: Optional[str] = None):
|
| 210 |
+
super().__init__()
|
| 211 |
+
|
| 212 |
+
# Provide a hey for user to identify placeholder node during analysis
|
| 213 |
+
self.ph_key = ph_key
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
@compatibility(is_backward_compatible=True)
|
| 217 |
+
class Tracer(TracerBase):
|
| 218 |
+
# Reference: https://github.com/pytorch/pytorch/issues/54354
|
| 219 |
+
# The first line of this docstring overrides the one Sphinx generates for the
|
| 220 |
+
# documentation. We need it so that Sphinx doesn't leak `math`s path from the
|
| 221 |
+
# build environment (e.g. `<module 'math' from '/leaked/path').
|
| 222 |
+
|
| 223 |
+
"""Tracer(autowrap_modules=(math,), autowrap_functions=())
|
| 224 |
+
|
| 225 |
+
``Tracer`` is the class that implements the symbolic tracing functionality
|
| 226 |
+
of ``torch.fx.symbolic_trace``. A call to ``symbolic_trace(m)`` is equivalent
|
| 227 |
+
to ``Tracer().trace(m)``.
|
| 228 |
+
|
| 229 |
+
Tracer can be subclassed to override various behaviors of the tracing
|
| 230 |
+
process. The different behaviors that can be overridden are described
|
| 231 |
+
in the docstrings of the methods on this class.
|
| 232 |
+
"""
|
| 233 |
+
|
| 234 |
+
# Not checking BC on this API because the default value for `autowrap_modules`
|
| 235 |
+
# includes the local filepath to the `math` module, which would jitter
|
| 236 |
+
# across machines.
|
| 237 |
+
@compatibility(is_backward_compatible=True)
|
| 238 |
+
def __init__(
|
| 239 |
+
self,
|
| 240 |
+
autowrap_modules: Tuple[ModuleType] = (math,),
|
| 241 |
+
autowrap_functions: Tuple[Callable, ...] = (),
|
| 242 |
+
param_shapes_constant: bool = False,
|
| 243 |
+
) -> None:
|
| 244 |
+
# This method's signature is overridden by the first line of this class'
|
| 245 |
+
# docstring. If this method's signature is modified, the signature that
|
| 246 |
+
# overrides it also should be modified accordingly.
|
| 247 |
+
|
| 248 |
+
"""
|
| 249 |
+
Construct a Tracer object.
|
| 250 |
+
|
| 251 |
+
Args:
|
| 252 |
+
|
| 253 |
+
autowrap_modules (Tuple[ModuleType]): defaults to `(math, )`,
|
| 254 |
+
Python modules whose functions should be wrapped automatically
|
| 255 |
+
without needing to use fx.wrap(). Backward-compatibility for
|
| 256 |
+
this parameter is guaranteed.
|
| 257 |
+
|
| 258 |
+
autowrap_functions (Tuple[Callable, ...]): defaults to `()`,
|
| 259 |
+
Python functions that should be wrapped automatically without
|
| 260 |
+
needing to use fx.wrap(). Backward compatibility for this
|
| 261 |
+
parameter is guaranteed.
|
| 262 |
+
|
| 263 |
+
param_shapes_constant (bool): When this flag is set, calls to shape,
|
| 264 |
+
size and a few other shape like attributes of a module's parameter
|
| 265 |
+
will be evaluated directly, rather than returning a new Proxy value
|
| 266 |
+
for an attribute access. Backward compatibility for this parameter
|
| 267 |
+
is guaranteed.
|
| 268 |
+
"""
|
| 269 |
+
|
| 270 |
+
super().__init__()
|
| 271 |
+
|
| 272 |
+
# Functions we will eagerly wrap when we see them while tracing
|
| 273 |
+
# this captures both `math.sqrt()` and `from math import sqrt` automatically
|
| 274 |
+
self._autowrap_function_ids: Set[int] = {
|
| 275 |
+
id(value)
|
| 276 |
+
for name, value in chain(*[m.__dict__.items() for m in autowrap_modules])
|
| 277 |
+
if not name.startswith("_") and callable(value)
|
| 278 |
+
}
|
| 279 |
+
self._autowrap_function_ids.update({id(f) for f in autowrap_functions})
|
| 280 |
+
|
| 281 |
+
# Python modules to apply autowrap to at the start, in addition to
|
| 282 |
+
# modules we see while tracing
|
| 283 |
+
self._autowrap_search: List[ModuleType] = list(autowrap_modules)
|
| 284 |
+
self.param_shapes_constant = param_shapes_constant
|
| 285 |
+
|
| 286 |
+
self.submodule_paths: Optional[Dict[torch.nn.Module, str]] = None
|
| 287 |
+
self.root_module_name: str = ""
|
| 288 |
+
# Maps the containing module's name to the operator name
|
| 289 |
+
self.scope = Scope("", None)
|
| 290 |
+
# Records the module call stack
|
| 291 |
+
self.module_stack = collections.OrderedDict()
|
| 292 |
+
# Mapping of node name to module scope
|
| 293 |
+
self.node_name_to_scope: Dict[str, Tuple[str, type]] = {}
|
| 294 |
+
|
| 295 |
+
@compatibility(is_backward_compatible=True)
|
| 296 |
+
def create_arg(self, a: Any) -> "Argument":
|
| 297 |
+
"""
|
| 298 |
+
A method to specify the behavior of tracing when preparing values to
|
| 299 |
+
be used as arguments to nodes in the ``Graph``.
|
| 300 |
+
|
| 301 |
+
By default, the behavior includes:
|
| 302 |
+
|
| 303 |
+
#. Iterate through collection types (e.g. tuple, list, dict) and recursively
|
| 304 |
+
call ``create_args`` on the elements.
|
| 305 |
+
#. Given a Proxy object, return a reference to the underlying IR ``Node``
|
| 306 |
+
#. Given a non-Proxy Tensor object, emit IR for various cases:
|
| 307 |
+
|
| 308 |
+
* For a Parameter, emit a ``get_attr`` node referring to that Parameter
|
| 309 |
+
* For a non-Parameter Tensor, store the Tensor away in a special
|
| 310 |
+
attribute referring to that attribute.
|
| 311 |
+
|
| 312 |
+
This method can be overridden to support more types.
|
| 313 |
+
|
| 314 |
+
Args:
|
| 315 |
+
|
| 316 |
+
a (Any): The value to be emitted as an ``Argument`` in the ``Graph``.
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
Returns:
|
| 320 |
+
|
| 321 |
+
The value ``a`` converted into the appropriate ``Argument``
|
| 322 |
+
"""
|
| 323 |
+
# The base tracer is used to construct Graphs when there is no associated
|
| 324 |
+
# module hierarchy, so it can never create parameter references.
|
| 325 |
+
# The default tracer adds the ability to refer to parameters when
|
| 326 |
+
# tracing modules.
|
| 327 |
+
if isinstance(a, torch.nn.Parameter):
|
| 328 |
+
for n, p in self.root.named_parameters():
|
| 329 |
+
if a is p:
|
| 330 |
+
return self.create_node("get_attr", n, (), {})
|
| 331 |
+
raise NameError("parameter is not a member of this module")
|
| 332 |
+
elif isinstance(a, torch.Tensor):
|
| 333 |
+
for n_, p_ in self.root.named_buffers():
|
| 334 |
+
if a is p_:
|
| 335 |
+
return self.create_node("get_attr", n_, (), {})
|
| 336 |
+
elif isinstance(a, torch.nn.Module):
|
| 337 |
+
for n_, p_ in self.root.named_modules():
|
| 338 |
+
if a is p_:
|
| 339 |
+
return self.create_node("get_attr", n_, (), {})
|
| 340 |
+
# For NamedTuple instances that appear literally as args, we emit
|
| 341 |
+
# a node to construct the NamedTuple and use that Node as the argument.
|
| 342 |
+
if isinstance(a, tuple) and hasattr(a, "_fields"):
|
| 343 |
+
args = tuple(self.create_arg(elem) for elem in a)
|
| 344 |
+
return self.create_node("call_function", a.__class__, args, {})
|
| 345 |
+
|
| 346 |
+
# Tensors do not have a reliable string repr() from which they can be
|
| 347 |
+
# constructed (and we probably don't want to rely on that, either), so
|
| 348 |
+
# for any constant Tensor values we encounter, first search for if they
|
| 349 |
+
# are an attribute of some module in the module hierarchy. If so, emit
|
| 350 |
+
# a get_attr to retrieve that tensor. Otherwise, we'll store away the
|
| 351 |
+
# tensor value into a special attribute on the Module s.t. we can
|
| 352 |
+
# retrieve it with a get_attr.
|
| 353 |
+
if isinstance(a, (torch.Tensor, ScriptObject)):
|
| 354 |
+
qualname: Optional[str] = self.tensor_attrs.get(a)
|
| 355 |
+
|
| 356 |
+
# Tensor was not found in the Module hierarchy, stow it away in a
|
| 357 |
+
# special attribute and set the qualname to refer to that
|
| 358 |
+
if not qualname:
|
| 359 |
+
i = 0
|
| 360 |
+
while True:
|
| 361 |
+
qualname = f"_tensor_constant{i}"
|
| 362 |
+
if not hasattr(self.root, qualname):
|
| 363 |
+
break
|
| 364 |
+
i += 1
|
| 365 |
+
self.tensor_attrs[a] = qualname
|
| 366 |
+
setattr(self.root, qualname, a)
|
| 367 |
+
|
| 368 |
+
return self.create_node("get_attr", qualname, (), {})
|
| 369 |
+
|
| 370 |
+
if type(a) in _proxyable_classes:
|
| 371 |
+
# This is an instance of a proxyable class for which we did not
|
| 372 |
+
# witness its construction. Intern this as a constant attribute
|
| 373 |
+
|
| 374 |
+
# TODO: binary search
|
| 375 |
+
i = 0
|
| 376 |
+
while True:
|
| 377 |
+
qualname = f"_{a.__class__.__name__}_constant_{i}"
|
| 378 |
+
if not hasattr(self.root, qualname):
|
| 379 |
+
break
|
| 380 |
+
i += 1
|
| 381 |
+
setattr(self.root, qualname, a)
|
| 382 |
+
|
| 383 |
+
return self.create_node("get_attr", qualname, (), {})
|
| 384 |
+
|
| 385 |
+
return super().create_arg(a)
|
| 386 |
+
|
| 387 |
+
@compatibility(is_backward_compatible=True)
|
| 388 |
+
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name: str) -> bool:
|
| 389 |
+
"""
|
| 390 |
+
A method to specify whether a given ``nn.Module`` is a "leaf" module.
|
| 391 |
+
|
| 392 |
+
Leaf modules are the atomic units that appear in
|
| 393 |
+
the IR, referenced by ``call_module`` calls. By default,
|
| 394 |
+
Modules in the PyTorch standard library namespace (torch.nn)
|
| 395 |
+
are leaf modules. All other modules are traced through and
|
| 396 |
+
their constituent ops are recorded, unless specified otherwise
|
| 397 |
+
via this parameter.
|
| 398 |
+
|
| 399 |
+
Args:
|
| 400 |
+
|
| 401 |
+
m (Module): The module being queried about
|
| 402 |
+
module_qualified_name (str): The path to root of this module. For example,
|
| 403 |
+
if you have a module hierarchy where submodule ``foo`` contains
|
| 404 |
+
submodule ``bar``, which contains submodule ``baz``, that module will
|
| 405 |
+
appear with the qualified name ``foo.bar.baz`` here.
|
| 406 |
+
"""
|
| 407 |
+
return (
|
| 408 |
+
(m.__module__.startswith("torch.nn") or m.__module__.startswith("torch.ao.nn"))
|
| 409 |
+
and not isinstance(m, torch.nn.Sequential)
|
| 410 |
+
)
|
| 411 |
+
|
| 412 |
+
@compatibility(is_backward_compatible=True)
|
| 413 |
+
def path_of_module(self, mod: torch.nn.Module) -> str:
|
| 414 |
+
"""
|
| 415 |
+
Helper method to find the qualified name of ``mod`` in the Module hierarchy
|
| 416 |
+
of ``root``. For example, if ``root`` has a submodule named ``foo``, which has
|
| 417 |
+
a submodule named ``bar``, passing ``bar`` into this function will return
|
| 418 |
+
the string "foo.bar".
|
| 419 |
+
|
| 420 |
+
Args:
|
| 421 |
+
|
| 422 |
+
mod (str): The ``Module`` to retrieve the qualified name for.
|
| 423 |
+
"""
|
| 424 |
+
# Prefer the O(1) algorithm
|
| 425 |
+
if self.submodule_paths:
|
| 426 |
+
path = self.submodule_paths.get(mod)
|
| 427 |
+
if path is None:
|
| 428 |
+
raise NameError("module is not installed as a submodule")
|
| 429 |
+
assert isinstance(path, str)
|
| 430 |
+
return path
|
| 431 |
+
# O(N^2) fallback in the case that we didn't store the submodule
|
| 432 |
+
# paths.
|
| 433 |
+
else:
|
| 434 |
+
for n, p in self.root.named_modules():
|
| 435 |
+
if mod is p:
|
| 436 |
+
return n
|
| 437 |
+
raise NameError("module is not installed as a submodule")
|
| 438 |
+
|
| 439 |
+
@compatibility(is_backward_compatible=True)
|
| 440 |
+
def call_module(
|
| 441 |
+
self,
|
| 442 |
+
m: torch.nn.Module,
|
| 443 |
+
forward: Callable[..., Any],
|
| 444 |
+
args: Tuple[Any, ...],
|
| 445 |
+
kwargs: Dict[str, Any],
|
| 446 |
+
) -> Any:
|
| 447 |
+
"""
|
| 448 |
+
Method that specifies the behavior of this ``Tracer`` when it encounters
|
| 449 |
+
a call to an ``nn.Module`` instance.
|
| 450 |
+
|
| 451 |
+
By default, the behavior is to check if the called module is a leaf module
|
| 452 |
+
via ``is_leaf_module``. If it is, emit a ``call_module`` node referring to
|
| 453 |
+
``m`` in the ``Graph``. Otherwise, call the ``Module`` normally, tracing through
|
| 454 |
+
the operations in its ``forward`` function.
|
| 455 |
+
|
| 456 |
+
This method can be overridden to--for example--create nested traced
|
| 457 |
+
GraphModules, or any other behavior you would want while tracing across
|
| 458 |
+
``Module`` boundaries.
|
| 459 |
+
|
| 460 |
+
Args:
|
| 461 |
+
|
| 462 |
+
m (Module): The module for which a call is being emitted
|
| 463 |
+
forward (Callable): The forward() method of the ``Module`` to be invoked
|
| 464 |
+
args (Tuple): args of the module callsite
|
| 465 |
+
kwargs (Dict): kwargs of the module callsite
|
| 466 |
+
|
| 467 |
+
Return:
|
| 468 |
+
|
| 469 |
+
The return value from the Module call. In the case that a ``call_module``
|
| 470 |
+
node was emitted, this is a ``Proxy`` value. Otherwise, it is whatever
|
| 471 |
+
value was returned from the ``Module`` invocation.
|
| 472 |
+
"""
|
| 473 |
+
module_qualified_name = self.path_of_module(m)
|
| 474 |
+
with ScopeContextManager(self.scope, Scope(module_qualified_name, type(m))) as _scope:
|
| 475 |
+
# module_stack is an ordered dict so writing then deleting the
|
| 476 |
+
# entry is equivalent to push/pop on a list
|
| 477 |
+
self.module_stack[_scope.module_path] = _scope.module_type
|
| 478 |
+
if not self.is_leaf_module(m, module_qualified_name):
|
| 479 |
+
ret_val = forward(*args, **kwargs)
|
| 480 |
+
else:
|
| 481 |
+
ret_val = self.create_proxy("call_module", module_qualified_name, args, kwargs)
|
| 482 |
+
key, _ = self.module_stack.popitem(last=True)
|
| 483 |
+
assert key == _scope.module_path, f" Unexpected key {key}"
|
| 484 |
+
|
| 485 |
+
return ret_val
|
| 486 |
+
|
| 487 |
+
@compatibility(is_backward_compatible=False)
|
| 488 |
+
def getattr(self, attr: str, attr_val: Any, parameter_proxy_cache: Dict[str, Any]):
|
| 489 |
+
"""
|
| 490 |
+
Method that specifies the behavior of this ``Tracer`` when we call getattr
|
| 491 |
+
on a call to an ``nn.Module`` instance.
|
| 492 |
+
|
| 493 |
+
By default, the behavior is to return a proxy value for the attribute. It
|
| 494 |
+
also stores the proxy value in the ``parameter_proxy_cache``, so that future
|
| 495 |
+
calls will reuse the proxy rather than creating a new one.
|
| 496 |
+
|
| 497 |
+
This method can be overridden to --for example-- not return proxies when
|
| 498 |
+
querying parameters.
|
| 499 |
+
|
| 500 |
+
Args:
|
| 501 |
+
|
| 502 |
+
attr (str): The name of the attribute being queried
|
| 503 |
+
attr_val (Any): The value of the attribute
|
| 504 |
+
parameter_proxy_cache (Dict[str, Any]): A cache of attr names to proxies
|
| 505 |
+
|
| 506 |
+
Return:
|
| 507 |
+
|
| 508 |
+
The return value from the getattr call.
|
| 509 |
+
"""
|
| 510 |
+
def maybe_get_proxy_for_attr(
|
| 511 |
+
attr_val, collection_to_search, parameter_proxy_cache
|
| 512 |
+
):
|
| 513 |
+
for n, p in collection_to_search:
|
| 514 |
+
if attr_val is p:
|
| 515 |
+
if n not in parameter_proxy_cache:
|
| 516 |
+
kwargs = {}
|
| 517 |
+
if (
|
| 518 |
+
"proxy_factory_fn"
|
| 519 |
+
in inspect.signature(self.create_proxy).parameters
|
| 520 |
+
):
|
| 521 |
+
kwargs["proxy_factory_fn"] = (
|
| 522 |
+
None
|
| 523 |
+
if not self.param_shapes_constant
|
| 524 |
+
else lambda node: ParameterProxy(
|
| 525 |
+
self, node, n, attr_val
|
| 526 |
+
)
|
| 527 |
+
)
|
| 528 |
+
val_proxy = self.create_proxy("get_attr", n, (), {}, **kwargs) # type: ignore[arg-type]
|
| 529 |
+
parameter_proxy_cache[n] = val_proxy
|
| 530 |
+
return parameter_proxy_cache[n]
|
| 531 |
+
return None
|
| 532 |
+
|
| 533 |
+
if isinstance(attr_val, torch.nn.Parameter):
|
| 534 |
+
maybe_parameter_proxy = maybe_get_proxy_for_attr(
|
| 535 |
+
attr_val, self.root.named_parameters(), parameter_proxy_cache
|
| 536 |
+
)
|
| 537 |
+
if maybe_parameter_proxy is not None:
|
| 538 |
+
return maybe_parameter_proxy
|
| 539 |
+
|
| 540 |
+
if self.proxy_buffer_attributes and isinstance(attr_val, torch.Tensor):
|
| 541 |
+
maybe_buffer_proxy = maybe_get_proxy_for_attr(
|
| 542 |
+
attr_val, self.root.named_buffers(), parameter_proxy_cache
|
| 543 |
+
)
|
| 544 |
+
if maybe_buffer_proxy is not None:
|
| 545 |
+
return maybe_buffer_proxy
|
| 546 |
+
|
| 547 |
+
return attr_val
|
| 548 |
+
|
| 549 |
+
# This method will be refactored
|
| 550 |
+
@compatibility(is_backward_compatible=False)
|
| 551 |
+
def create_args_for_root(self, root_fn, is_module, concrete_args=None):
|
| 552 |
+
"""
|
| 553 |
+
Create ``placeholder`` nodes corresponding to the signature of the ``root``
|
| 554 |
+
Module. This method introspects root's signature and emits those
|
| 555 |
+
nodes accordingly, also supporting ``*args`` and ``**kwargs``.
|
| 556 |
+
"""
|
| 557 |
+
# In some cases, a function or method has been decorated with a wrapper
|
| 558 |
+
# defined via ``functools.wraps``. In this case, the outer code object
|
| 559 |
+
# will likely not contain the actual parameters we care about, so unwrap
|
| 560 |
+
# the function to get to the innermost callable.
|
| 561 |
+
fn_for_analysis = inspect.unwrap(root_fn)
|
| 562 |
+
co = fn_for_analysis.__code__
|
| 563 |
+
total_args = co.co_argcount + co.co_kwonlyargcount
|
| 564 |
+
orig_args = list(co.co_varnames)
|
| 565 |
+
names_iter = iter(co.co_varnames)
|
| 566 |
+
args: List[Any] = []
|
| 567 |
+
skip_arg_idx = 0
|
| 568 |
+
if is_module:
|
| 569 |
+
if total_args == 0:
|
| 570 |
+
raise RuntimeError(
|
| 571 |
+
"``self`` argument cannot be part of *args expansion!"
|
| 572 |
+
)
|
| 573 |
+
skip_arg_idx = 1
|
| 574 |
+
next(names_iter) # skip self
|
| 575 |
+
args.append(self.root)
|
| 576 |
+
|
| 577 |
+
sig = inspect.signature(fn_for_analysis)
|
| 578 |
+
|
| 579 |
+
def proxy_placeholder(name: str):
|
| 580 |
+
if concrete_args is not None and name in concrete_args:
|
| 581 |
+
cnt = 0
|
| 582 |
+
|
| 583 |
+
def replace_ph(x):
|
| 584 |
+
nonlocal cnt
|
| 585 |
+
cnt += 1
|
| 586 |
+
param = sig.parameters[name]
|
| 587 |
+
default = (
|
| 588 |
+
()
|
| 589 |
+
if param.default is inspect.Parameter.empty
|
| 590 |
+
else (param.default,)
|
| 591 |
+
)
|
| 592 |
+
out = self.create_proxy(
|
| 593 |
+
"placeholder", f"{name}_{str(cnt)}", default, {}
|
| 594 |
+
)
|
| 595 |
+
if isinstance(x, PHBase):
|
| 596 |
+
def transfer_attrs(fr, to):
|
| 597 |
+
for attr_name in dir(fr):
|
| 598 |
+
attr_val = getattr(fr, attr_name)
|
| 599 |
+
if (
|
| 600 |
+
not callable(attr_val)
|
| 601 |
+
and not attr_name.startswith("__")
|
| 602 |
+
and not hasattr(to, attr_name)
|
| 603 |
+
):
|
| 604 |
+
setattr(to, attr_name, attr_val)
|
| 605 |
+
|
| 606 |
+
if x != PH:
|
| 607 |
+
# Transfer attrs in the case where you're using a placeholder other
|
| 608 |
+
# than the singleton PH (PH has no attributes to transfer).
|
| 609 |
+
# Proxies were created out of the placeholders.
|
| 610 |
+
# Transfer any metadata (put on the placeholders in the form of
|
| 611 |
+
# attributes set by the user) from the placeholder to the
|
| 612 |
+
# underlying nodes (the proxy is unwrapped by the user, but
|
| 613 |
+
# the metadata should hold).
|
| 614 |
+
transfer_attrs(fr=x, to=out.node)
|
| 615 |
+
|
| 616 |
+
return out
|
| 617 |
+
# Union[int, bool] == bool in Python <= 3.6
|
| 618 |
+
if (
|
| 619 |
+
type(x) == bool
|
| 620 |
+
or type(x) in base_types
|
| 621 |
+
and type(x) != torch.Tensor
|
| 622 |
+
):
|
| 623 |
+
torch._assert(
|
| 624 |
+
out == x,
|
| 625 |
+
f"{name} has been specialized to have value {x} but got another value",
|
| 626 |
+
)
|
| 627 |
+
elif type(x) == type(None):
|
| 628 |
+
args = (
|
| 629 |
+
out,
|
| 630 |
+
f"{name} has been specialized to have value None but got another value",
|
| 631 |
+
)
|
| 632 |
+
self.create_proxy("call_function", _assert_is_none, args, {})
|
| 633 |
+
else:
|
| 634 |
+
warnings.warn(
|
| 635 |
+
f"Was not able to add assertion to guarantee correct input {name} to "
|
| 636 |
+
f"specialized function. It is up to the user to make sure that your inputs match the "
|
| 637 |
+
f"inputs you specialized the function with."
|
| 638 |
+
)
|
| 639 |
+
|
| 640 |
+
return x
|
| 641 |
+
|
| 642 |
+
return pytree.tree_map(replace_ph, concrete_args[name])
|
| 643 |
+
if name[0] == "*":
|
| 644 |
+
default = ()
|
| 645 |
+
else:
|
| 646 |
+
param = sig.parameters[name]
|
| 647 |
+
default = () if param.default is inspect.Parameter.empty else (param.default,) # type: ignore[assignment]
|
| 648 |
+
return self.create_proxy(
|
| 649 |
+
"placeholder",
|
| 650 |
+
name,
|
| 651 |
+
default,
|
| 652 |
+
{},
|
| 653 |
+
type_expr=fn_for_analysis.__annotations__.get(name, None)
|
| 654 |
+
)
|
| 655 |
+
|
| 656 |
+
arg_names = [next(names_iter) for idx in range(skip_arg_idx, total_args)]
|
| 657 |
+
if isinstance(concrete_args, tuple):
|
| 658 |
+
if len(arg_names) != len(concrete_args):
|
| 659 |
+
raise RuntimeError(
|
| 660 |
+
f"Tracing expected {len(arg_names)} arguments but got {len(concrete_args)} concrete arguments"
|
| 661 |
+
)
|
| 662 |
+
concrete_args = dict(zip(arg_names, concrete_args))
|
| 663 |
+
args.extend(proxy_placeholder(names) for names in arg_names)
|
| 664 |
+
|
| 665 |
+
if co.co_kwonlyargcount > 0 or co.co_flags & HAS_VARSTUFF:
|
| 666 |
+
# TODO: type annotations for *args and **kwargs
|
| 667 |
+
if co.co_flags & inspect.CO_VARARGS:
|
| 668 |
+
args.append(proxy_placeholder("*" + next(names_iter)))
|
| 669 |
+
if co.co_flags & inspect.CO_VARKEYWORDS:
|
| 670 |
+
args.append(proxy_placeholder("**" + next(names_iter)))
|
| 671 |
+
root_fn = _patch_function(root_fn, len(args))
|
| 672 |
+
|
| 673 |
+
flat_args, in_spec = pytree.tree_flatten(tuple(args))
|
| 674 |
+
if any(not isinstance(i, pytree.LeafSpec) for i in in_spec.children_specs):
|
| 675 |
+
# In the case that we have pytree-flattened inputs in
|
| 676 |
+
# `concrete_args`, generate a flattening wrapper around the
|
| 677 |
+
# original root function and return that.
|
| 678 |
+
self.graph._codegen = _PyTreeCodeGen(
|
| 679 |
+
_PyTreeInfo(orig_args[:total_args], in_spec, None)
|
| 680 |
+
)
|
| 681 |
+
|
| 682 |
+
def flatten_fn(*args):
|
| 683 |
+
tree_args = pytree.tree_unflatten(list(args), in_spec)
|
| 684 |
+
tree_out = root_fn(*tree_args)
|
| 685 |
+
out_args, out_spec = pytree.tree_flatten(tree_out)
|
| 686 |
+
assert isinstance(self.graph._codegen, _PyTreeCodeGen)
|
| 687 |
+
self.graph._codegen.pytree_info = (
|
| 688 |
+
self.graph._codegen.pytree_info._replace(out_spec=out_spec)
|
| 689 |
+
)
|
| 690 |
+
return out_args
|
| 691 |
+
|
| 692 |
+
return flatten_fn, flat_args
|
| 693 |
+
return root_fn, args
|
| 694 |
+
|
| 695 |
+
@compatibility(is_backward_compatible=True)
|
| 696 |
+
def trace(
|
| 697 |
+
self,
|
| 698 |
+
root: Union[torch.nn.Module, Callable[..., Any]],
|
| 699 |
+
concrete_args: Optional[Dict[str, Any]] = None,
|
| 700 |
+
) -> Graph:
|
| 701 |
+
"""
|
| 702 |
+
Trace ``root`` and return the corresponding FX ``Graph`` representation. ``root``
|
| 703 |
+
can either be an ``nn.Module`` instance or a Python callable.
|
| 704 |
+
|
| 705 |
+
Note that after this call, ``self.root`` may be different from the ``root`` passed
|
| 706 |
+
in here. For example, when a free function is passed to ``trace()``, we will
|
| 707 |
+
create an ``nn.Module`` instance to use as the root and add embedded constants
|
| 708 |
+
to.
|
| 709 |
+
|
| 710 |
+
|
| 711 |
+
Args:
|
| 712 |
+
|
| 713 |
+
root (Union[Module, Callable]): Either a ``Module`` or a function to be
|
| 714 |
+
traced through. Backwards-compatibility for this parameter is
|
| 715 |
+
guaranteed.
|
| 716 |
+
concrete_args (Optional[Dict[str, any]]): Concrete arguments that should
|
| 717 |
+
not be treated as Proxies. This parameter is experimental and
|
| 718 |
+
its backwards-compatibility is *NOT* guaranteed.
|
| 719 |
+
|
| 720 |
+
Returns:
|
| 721 |
+
|
| 722 |
+
A ``Graph`` representing the semantics of the passed-in ``root``.
|
| 723 |
+
"""
|
| 724 |
+
global _is_fx_tracing_flag
|
| 725 |
+
old_is_fx_tracing_flag = _is_fx_tracing_flag
|
| 726 |
+
_is_fx_tracing_flag = True
|
| 727 |
+
try:
|
| 728 |
+
if isinstance(root, torch.nn.Module):
|
| 729 |
+
self.root = root
|
| 730 |
+
|
| 731 |
+
assert hasattr(
|
| 732 |
+
type(root), self.traced_func_name
|
| 733 |
+
), f"traced_func_name={self.traced_func_name} doesn't exist in {type(root).__name__}"
|
| 734 |
+
|
| 735 |
+
fn = getattr(type(root), self.traced_func_name)
|
| 736 |
+
self.root_module_name = root._get_name()
|
| 737 |
+
self.submodule_paths = {mod: name for name, mod in root.named_modules()}
|
| 738 |
+
else:
|
| 739 |
+
self.root = torch.nn.Module()
|
| 740 |
+
fn = root
|
| 741 |
+
|
| 742 |
+
tracer_cls: Optional[Type[Tracer]] = getattr(self, "__class__", None)
|
| 743 |
+
self.graph = Graph(tracer_cls=tracer_cls)
|
| 744 |
+
if hasattr(fn, '__code__'):
|
| 745 |
+
code = fn.__code__
|
| 746 |
+
self.graph._co_fields = {
|
| 747 |
+
'co_name': code.co_name,
|
| 748 |
+
'co_filename': code.co_filename,
|
| 749 |
+
'co_firstlineno': code.co_firstlineno,
|
| 750 |
+
}
|
| 751 |
+
|
| 752 |
+
# When we encounter a Tensor value that's not a parameter, we look if it
|
| 753 |
+
# is some other attribute on the model. Construct a dict mapping Tensor
|
| 754 |
+
# values to the qualified name here for efficiency. This is used downstream
|
| 755 |
+
# in create_arg
|
| 756 |
+
self.tensor_attrs: Dict[Union[torch.Tensor, ScriptObject], str] = {}
|
| 757 |
+
|
| 758 |
+
def collect_tensor_attrs(m: torch.nn.Module, prefix_atoms: List[str]):
|
| 759 |
+
for k, v in m.__dict__.items():
|
| 760 |
+
if isinstance(v, (torch.Tensor, ScriptObject)):
|
| 761 |
+
self.tensor_attrs[v] = ".".join(prefix_atoms + [k])
|
| 762 |
+
for k, v in m.named_children():
|
| 763 |
+
collect_tensor_attrs(v, prefix_atoms + [k])
|
| 764 |
+
|
| 765 |
+
collect_tensor_attrs(self.root, [])
|
| 766 |
+
|
| 767 |
+
assert isinstance(fn, FunctionType)
|
| 768 |
+
|
| 769 |
+
fn_globals = fn.__globals__ # run before it gets patched
|
| 770 |
+
fn, args = self.create_args_for_root(
|
| 771 |
+
fn, isinstance(root, torch.nn.Module), concrete_args
|
| 772 |
+
)
|
| 773 |
+
|
| 774 |
+
parameter_proxy_cache: Dict[
|
| 775 |
+
str, Proxy
|
| 776 |
+
] = {} # Reduce number of get_attr calls
|
| 777 |
+
|
| 778 |
+
# Method dispatch on parameters is not recorded unless it's directly used.
|
| 779 |
+
# Thus, we need to insert a proxy when __getattr__ requests a parameter.
|
| 780 |
+
@functools.wraps(_orig_module_getattr)
|
| 781 |
+
def module_getattr_wrapper(mod, attr):
|
| 782 |
+
attr_val = _orig_module_getattr(mod, attr)
|
| 783 |
+
return self.getattr(attr, attr_val, parameter_proxy_cache)
|
| 784 |
+
|
| 785 |
+
@functools.wraps(_orig_module_call)
|
| 786 |
+
def module_call_wrapper(mod, *args, **kwargs):
|
| 787 |
+
def forward(*args, **kwargs):
|
| 788 |
+
return _orig_module_call(mod, *args, **kwargs)
|
| 789 |
+
|
| 790 |
+
_autowrap_check(
|
| 791 |
+
patcher,
|
| 792 |
+
getattr(getattr(mod, "forward", mod), "__globals__", {}),
|
| 793 |
+
self._autowrap_function_ids,
|
| 794 |
+
)
|
| 795 |
+
return self.call_module(mod, forward, args, kwargs)
|
| 796 |
+
|
| 797 |
+
with _Patcher() as patcher:
|
| 798 |
+
# allow duplicate patches to support the case of nested calls
|
| 799 |
+
patcher.patch_method(
|
| 800 |
+
torch.nn.Module,
|
| 801 |
+
"__getattr__",
|
| 802 |
+
module_getattr_wrapper,
|
| 803 |
+
deduplicate=False,
|
| 804 |
+
)
|
| 805 |
+
patcher.patch_method(
|
| 806 |
+
torch.nn.Module, "__call__", module_call_wrapper, deduplicate=False
|
| 807 |
+
)
|
| 808 |
+
_patch_wrapped_functions(patcher)
|
| 809 |
+
_autowrap_check(patcher, fn_globals, self._autowrap_function_ids)
|
| 810 |
+
for module in self._autowrap_search:
|
| 811 |
+
_autowrap_check(
|
| 812 |
+
patcher, module.__dict__, self._autowrap_function_ids
|
| 813 |
+
)
|
| 814 |
+
self.create_node(
|
| 815 |
+
"output",
|
| 816 |
+
"output",
|
| 817 |
+
(self.create_arg(fn(*args)),),
|
| 818 |
+
{},
|
| 819 |
+
type_expr=fn.__annotations__.get("return", None),
|
| 820 |
+
)
|
| 821 |
+
|
| 822 |
+
self.submodule_paths = None
|
| 823 |
+
finally:
|
| 824 |
+
_is_fx_tracing_flag = old_is_fx_tracing_flag
|
| 825 |
+
return self.graph
|
| 826 |
+
|
| 827 |
+
def __deepcopy__(self, memo):
|
| 828 |
+
# _autowrap_search contains modules, which cannot be deepcopied.
|
| 829 |
+
new_tracer = Tracer.__new__(Tracer)
|
| 830 |
+
|
| 831 |
+
for k, v in self.__dict__.items():
|
| 832 |
+
if k in {'_autowrap_search'}:
|
| 833 |
+
new_obj = copy.copy(v)
|
| 834 |
+
else:
|
| 835 |
+
new_obj = copy.deepcopy(v, memo)
|
| 836 |
+
|
| 837 |
+
new_tracer.__dict__[k] = new_obj
|
| 838 |
+
|
| 839 |
+
return new_tracer
|
| 840 |
+
|
| 841 |
+
|
| 842 |
+
# Dictionary of (id(globals dict), function name) => globals_dict to patch for
|
| 843 |
+
# the purposes of the wrap() API.
|
| 844 |
+
# We key by the globals dict id and function name to ensure we're wrapping a given
|
| 845 |
+
# function only once.
|
| 846 |
+
_wrapped_fns_to_patch: Dict[Tuple[int, str], dict] = {}
|
| 847 |
+
|
| 848 |
+
# List of methods on classes to wrap (class type, function name)
|
| 849 |
+
# this currently only works for Tensor.* methods that aren't traced properly
|
| 850 |
+
_wrapped_methods_to_patch: List[Tuple[type, str]] = []
|
| 851 |
+
|
| 852 |
+
if os.environ.get("FX_PATCH_GETITEM") == "1":
|
| 853 |
+
# This change is needed to trace models like PositionalEmbedding from BERT:
|
| 854 |
+
# https://github.com/pytorch/benchmark/blob/master/torchbenchmark/models/BERT_pytorch/bert_pytorch/model/embedding/position.py
|
| 855 |
+
# but causes issues in quantization documented here:
|
| 856 |
+
# https://github.com/pytorch/pytorch/issues/50710
|
| 857 |
+
# once that is fixed we can make this the default behavior.
|
| 858 |
+
_wrapped_methods_to_patch.append((torch.Tensor, "__getitem__"))
|
| 859 |
+
|
| 860 |
+
|
| 861 |
+
def _find_proxy(*objects_to_search):
|
| 862 |
+
"""
|
| 863 |
+
Recursively search a data structure for a Proxy() and return it,
|
| 864 |
+
return None if not found.
|
| 865 |
+
"""
|
| 866 |
+
proxy = None
|
| 867 |
+
|
| 868 |
+
def find_proxy(x):
|
| 869 |
+
nonlocal proxy
|
| 870 |
+
if isinstance(x, Proxy):
|
| 871 |
+
proxy = x
|
| 872 |
+
|
| 873 |
+
map_aggregate(objects_to_search, find_proxy)
|
| 874 |
+
return proxy
|
| 875 |
+
|
| 876 |
+
|
| 877 |
+
def _create_wrapped_func(orig_fn):
|
| 878 |
+
@functools.wraps(orig_fn)
|
| 879 |
+
def wrapped(*args, **kwargs):
|
| 880 |
+
"""
|
| 881 |
+
Given an closed-over ``orig_function`` to invoke, search the args and kwargs for
|
| 882 |
+
a Proxy object. If there is one, emit a ``call_function`` node to preserve the
|
| 883 |
+
call to this leaf function directly. Otherwise, just return the results of
|
| 884 |
+
this function call, as this function is not being traced.
|
| 885 |
+
"""
|
| 886 |
+
proxy = _find_proxy(args, kwargs)
|
| 887 |
+
if proxy is not None:
|
| 888 |
+
return_proxy = proxy.tracer.create_proxy(
|
| 889 |
+
"call_function", orig_fn, args, kwargs
|
| 890 |
+
)
|
| 891 |
+
return_proxy.node.meta["is_wrapped"] = True
|
| 892 |
+
return return_proxy
|
| 893 |
+
return orig_fn(*args, **kwargs)
|
| 894 |
+
|
| 895 |
+
return wrapped
|
| 896 |
+
|
| 897 |
+
|
| 898 |
+
def _create_wrapped_method(cls, name):
|
| 899 |
+
orig_fn = getattr(cls, name)
|
| 900 |
+
|
| 901 |
+
@functools.wraps(orig_fn)
|
| 902 |
+
def wrapped(*args, **kwargs):
|
| 903 |
+
"""
|
| 904 |
+
Search the args and kwargs for a Proxy object. If there is one,
|
| 905 |
+
emit a ``call_method`` node to preserve the call to this method
|
| 906 |
+
directly. Otherwise, just return the results of this function
|
| 907 |
+
call, as this function is not being traced.
|
| 908 |
+
"""
|
| 909 |
+
proxy = _find_proxy(args, kwargs)
|
| 910 |
+
if proxy is not None:
|
| 911 |
+
return proxy.tracer.create_proxy("call_method", name, args, kwargs)
|
| 912 |
+
return orig_fn(*args, **kwargs)
|
| 913 |
+
|
| 914 |
+
return wrapped
|
| 915 |
+
|
| 916 |
+
|
| 917 |
+
class _PatchedFn(NamedTuple):
|
| 918 |
+
frame_dict: Any
|
| 919 |
+
fn_name: str
|
| 920 |
+
orig_fn: Any
|
| 921 |
+
|
| 922 |
+
def revert(self):
|
| 923 |
+
raise NotImplementedError()
|
| 924 |
+
|
| 925 |
+
|
| 926 |
+
class _PatchedFnSetItem(_PatchedFn):
|
| 927 |
+
def revert(self):
|
| 928 |
+
self.frame_dict[self.fn_name] = self.orig_fn
|
| 929 |
+
|
| 930 |
+
|
| 931 |
+
class _PatchedFnDel(_PatchedFn):
|
| 932 |
+
def revert(self):
|
| 933 |
+
del self.frame_dict[self.fn_name]
|
| 934 |
+
|
| 935 |
+
|
| 936 |
+
class _PatchedFnSetAttr(_PatchedFn):
|
| 937 |
+
def revert(self):
|
| 938 |
+
setattr(self.frame_dict, self.fn_name, self.orig_fn)
|
| 939 |
+
|
| 940 |
+
|
| 941 |
+
class _Patcher:
|
| 942 |
+
def __init__(self):
|
| 943 |
+
super().__init__()
|
| 944 |
+
self.patches_made: List[_PatchedFn] = []
|
| 945 |
+
self.visited: Set[int] = set()
|
| 946 |
+
|
| 947 |
+
def patch(
|
| 948 |
+
self,
|
| 949 |
+
frame_dict: Dict[str, Any],
|
| 950 |
+
name: str,
|
| 951 |
+
new_fn: Callable,
|
| 952 |
+
deduplicate: bool = True,
|
| 953 |
+
):
|
| 954 |
+
"""
|
| 955 |
+
Replace frame_dict[name] with new_fn until we exit the context manager.
|
| 956 |
+
"""
|
| 957 |
+
new_fn.__fx_already_patched = deduplicate # type: ignore[attr-defined]
|
| 958 |
+
if name not in frame_dict and hasattr(builtins, name):
|
| 959 |
+
self.patches_made.append(_PatchedFnDel(frame_dict, name, None))
|
| 960 |
+
elif getattr(frame_dict[name], "__fx_already_patched", False):
|
| 961 |
+
return # already patched, no need to do it again
|
| 962 |
+
else:
|
| 963 |
+
self.patches_made.append(
|
| 964 |
+
_PatchedFnSetItem(frame_dict, name, frame_dict[name])
|
| 965 |
+
)
|
| 966 |
+
frame_dict[name] = new_fn
|
| 967 |
+
|
| 968 |
+
def patch_method(
|
| 969 |
+
self, cls: type, name: str, new_fn: Callable, deduplicate: bool = True
|
| 970 |
+
):
|
| 971 |
+
"""
|
| 972 |
+
Replace object_or_dict.name with new_fn until we exit the context manager.
|
| 973 |
+
"""
|
| 974 |
+
new_fn.__fx_already_patched = deduplicate # type: ignore[attr-defined]
|
| 975 |
+
orig_fn = getattr(cls, name)
|
| 976 |
+
if getattr(orig_fn, "__fx_already_patched", False):
|
| 977 |
+
return # already patched, no need to do it again
|
| 978 |
+
self.patches_made.append(_PatchedFnSetAttr(cls, name, orig_fn))
|
| 979 |
+
setattr(cls, name, new_fn)
|
| 980 |
+
|
| 981 |
+
def visit_once(self, thing: Any):
|
| 982 |
+
"""Return True on the first call to with thing, otherwise false"""
|
| 983 |
+
idx = id(thing)
|
| 984 |
+
if idx in self.visited:
|
| 985 |
+
return False
|
| 986 |
+
self.visited.add(idx)
|
| 987 |
+
return True
|
| 988 |
+
|
| 989 |
+
def __enter__(self):
|
| 990 |
+
return self
|
| 991 |
+
|
| 992 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
| 993 |
+
"""
|
| 994 |
+
Undo all the changes made via self.patch() and self.patch_method()
|
| 995 |
+
"""
|
| 996 |
+
while self.patches_made:
|
| 997 |
+
# unpatch in reverse order to handle duplicates correctly
|
| 998 |
+
self.patches_made.pop().revert()
|
| 999 |
+
self.visited.clear()
|
| 1000 |
+
|
| 1001 |
+
|
| 1002 |
+
def _patch_wrapped_functions(patcher: _Patcher):
|
| 1003 |
+
"""
|
| 1004 |
+
Go through ``_wrapped_fn_patch_table`` and, for each frame object, wrap
|
| 1005 |
+
the listed global functions in the `_create_wrapped_func` wrapper.
|
| 1006 |
+
"""
|
| 1007 |
+
for (_, name), frame_dict in _wrapped_fns_to_patch.items():
|
| 1008 |
+
if name not in frame_dict and hasattr(builtins, name):
|
| 1009 |
+
orig_fn = getattr(builtins, name)
|
| 1010 |
+
else:
|
| 1011 |
+
orig_fn = frame_dict[name]
|
| 1012 |
+
patcher.patch(frame_dict, name, _create_wrapped_func(orig_fn))
|
| 1013 |
+
|
| 1014 |
+
for cls, name in _wrapped_methods_to_patch:
|
| 1015 |
+
patcher.patch_method(cls, name, _create_wrapped_method(cls, name))
|
| 1016 |
+
|
| 1017 |
+
|
| 1018 |
+
def _autowrap_check(
|
| 1019 |
+
patcher: _Patcher, frame_dict: Dict[str, Any], function_ids: Set[int]
|
| 1020 |
+
):
|
| 1021 |
+
"""
|
| 1022 |
+
Some methods, like `math.sqrt` are common enough we want to automatically wrap them as we see them.
|
| 1023 |
+
This method searches a scope for them and patches them if found.
|
| 1024 |
+
"""
|
| 1025 |
+
if patcher.visit_once(frame_dict):
|
| 1026 |
+
for name, value in frame_dict.items():
|
| 1027 |
+
if (
|
| 1028 |
+
not name.startswith("_")
|
| 1029 |
+
and callable(value)
|
| 1030 |
+
and id(value) in function_ids
|
| 1031 |
+
):
|
| 1032 |
+
patcher.patch(frame_dict, name, _create_wrapped_func(value))
|
| 1033 |
+
|
| 1034 |
+
|
| 1035 |
+
@compatibility(is_backward_compatible=True)
|
| 1036 |
+
def wrap(fn_or_name: Union[str, Callable]):
|
| 1037 |
+
"""
|
| 1038 |
+
This function can be called at module-level scope to register fn_or_name as a "leaf function".
|
| 1039 |
+
A "leaf function" will be preserved as a CallFunction node in the FX trace instead of being
|
| 1040 |
+
traced through::
|
| 1041 |
+
|
| 1042 |
+
# foo/bar/baz.py
|
| 1043 |
+
def my_custom_function(x, y):
|
| 1044 |
+
return x * x + y * y
|
| 1045 |
+
|
| 1046 |
+
torch.fx.wrap('my_custom_function')
|
| 1047 |
+
|
| 1048 |
+
def fn_to_be_traced(x, y):
|
| 1049 |
+
# When symbolic tracing, the below call to my_custom_function will be inserted into
|
| 1050 |
+
# the graph rather than tracing it.
|
| 1051 |
+
return my_custom_function(x, y)
|
| 1052 |
+
|
| 1053 |
+
This function can also equivalently be used as a decorator::
|
| 1054 |
+
|
| 1055 |
+
# foo/bar/baz.py
|
| 1056 |
+
@torch.fx.wrap
|
| 1057 |
+
def my_custom_function(x, y):
|
| 1058 |
+
return x * x + y * y
|
| 1059 |
+
|
| 1060 |
+
A wrapped function can be thought of a "leaf function", analogous to the concept of
|
| 1061 |
+
"leaf modules", that is, they are functions that are left as calls in the FX trace
|
| 1062 |
+
rather than traced through.
|
| 1063 |
+
|
| 1064 |
+
Args:
|
| 1065 |
+
|
| 1066 |
+
fn_or_name (Union[str, Callable]): The function or name of the global function to insert into the
|
| 1067 |
+
graph when it's called
|
| 1068 |
+
"""
|
| 1069 |
+
if not callable(fn_or_name) and not isinstance(fn_or_name, str):
|
| 1070 |
+
raise RuntimeError(
|
| 1071 |
+
"Unsupported type for global function! Must be either a callable or "
|
| 1072 |
+
"string name"
|
| 1073 |
+
)
|
| 1074 |
+
|
| 1075 |
+
if callable(fn_or_name):
|
| 1076 |
+
assert not isinstance(fn_or_name, str) # to make mypy happy
|
| 1077 |
+
fn_name = fn_or_name.__name__
|
| 1078 |
+
else:
|
| 1079 |
+
assert isinstance(
|
| 1080 |
+
fn_or_name, str
|
| 1081 |
+
), "fn_or_name must be a global function or string name"
|
| 1082 |
+
fn_name = fn_or_name
|
| 1083 |
+
|
| 1084 |
+
currentframe = inspect.currentframe()
|
| 1085 |
+
assert currentframe is not None
|
| 1086 |
+
f = currentframe.f_back
|
| 1087 |
+
assert f is not None
|
| 1088 |
+
if f.f_code.co_name != "<module>":
|
| 1089 |
+
raise NotImplementedError("wrap must be called at the top level of a module")
|
| 1090 |
+
|
| 1091 |
+
# consider implementing Callable version of this via _autowrap_function_ids / _autowrap_search
|
| 1092 |
+
# semantics would be slightly different, but would add support `from x import wrapped_function`
|
| 1093 |
+
_wrapped_fns_to_patch[(id(f.f_globals), fn_name)] = f.f_globals
|
| 1094 |
+
return fn_or_name
|
| 1095 |
+
|
| 1096 |
+
|
| 1097 |
+
@compatibility(is_backward_compatible=True)
|
| 1098 |
+
def symbolic_trace(
|
| 1099 |
+
root: Union[torch.nn.Module, Callable[..., Any]],
|
| 1100 |
+
concrete_args: Optional[Dict[str, Any]] = None,
|
| 1101 |
+
) -> GraphModule:
|
| 1102 |
+
"""
|
| 1103 |
+
Symbolic tracing API
|
| 1104 |
+
|
| 1105 |
+
Given an ``nn.Module`` or function instance ``root``, this function will return a ``GraphModule``
|
| 1106 |
+
constructed by recording operations seen while tracing through ``root``.
|
| 1107 |
+
|
| 1108 |
+
``concrete_args`` allows you to partially specialize your function, whether it's to remove control flow or data structures.
|
| 1109 |
+
|
| 1110 |
+
For example::
|
| 1111 |
+
|
| 1112 |
+
def f(a, b):
|
| 1113 |
+
if b == True:
|
| 1114 |
+
return a
|
| 1115 |
+
else:
|
| 1116 |
+
return a*2
|
| 1117 |
+
|
| 1118 |
+
FX can typically not trace through this due to the presence of control
|
| 1119 |
+
flow. However, we can use `concrete_args` to specialize on the value of
|
| 1120 |
+
`b` to trace through this::
|
| 1121 |
+
|
| 1122 |
+
f = fx.symbolic_trace(f, concrete_args={'b': False})
|
| 1123 |
+
assert f(3, False) == 6
|
| 1124 |
+
|
| 1125 |
+
Note that although you can still pass in different values of `b`, they will be ignored.
|
| 1126 |
+
|
| 1127 |
+
We can also use `concrete_args` to eliminate data-structure handling from
|
| 1128 |
+
our function. This will use pytrees to flatten your input. To avoid
|
| 1129 |
+
overspecializing, pass in `fx.PH` for values that shouldn't be
|
| 1130 |
+
specialized. For example::
|
| 1131 |
+
|
| 1132 |
+
def f(x):
|
| 1133 |
+
out = 0
|
| 1134 |
+
for v in x.values():
|
| 1135 |
+
out += v
|
| 1136 |
+
return out
|
| 1137 |
+
f = fx.symbolic_trace(f, concrete_args={'x': {'a': fx.PH, 'b': fx.PH, 'c': fx.PH}})
|
| 1138 |
+
assert f({'a': 1, 'b': 2, 'c': 4}) == 7
|
| 1139 |
+
|
| 1140 |
+
|
| 1141 |
+
Args:
|
| 1142 |
+
root (Union[torch.nn.Module, Callable]): Module or function to be traced and converted
|
| 1143 |
+
into a Graph representation.
|
| 1144 |
+
concrete_args (Optional[Dict[str, any]]): Inputs to be partially specialized
|
| 1145 |
+
|
| 1146 |
+
Returns:
|
| 1147 |
+
GraphModule: a Module created from the recorded operations from ``root``.
|
| 1148 |
+
"""
|
| 1149 |
+
tracer = Tracer()
|
| 1150 |
+
graph = tracer.trace(root, concrete_args)
|
| 1151 |
+
name = (
|
| 1152 |
+
root.__class__.__name__ if isinstance(root, torch.nn.Module) else root.__name__
|
| 1153 |
+
)
|
| 1154 |
+
return GraphModule(tracer.root, graph, name)
|
| 1155 |
+
|
| 1156 |
+
|
| 1157 |
+
@wrap
|
| 1158 |
+
def _assert_is_none(value, msg):
|
| 1159 |
+
assert value is None, msg
|
llava_next/lib/python3.10/site-packages/torch/fx/annotate.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from torch.fx.proxy import Proxy
|
| 2 |
+
from ._compatibility import compatibility
|
| 3 |
+
|
| 4 |
+
@compatibility(is_backward_compatible=False)
|
| 5 |
+
def annotate(val, type):
|
| 6 |
+
# val could be either a regular value (not tracing)
|
| 7 |
+
# or fx.Proxy (tracing)
|
| 8 |
+
if isinstance(val, Proxy):
|
| 9 |
+
if val.node.type:
|
| 10 |
+
raise RuntimeError(f"Tried to annotate a value that already had a type on it!"
|
| 11 |
+
f" Existing type is {val.node.type} "
|
| 12 |
+
f"and new type is {type}. "
|
| 13 |
+
f"This could happen if you tried to annotate a function parameter "
|
| 14 |
+
f"value (in which case you should use the type slot "
|
| 15 |
+
f"on the function signature) or you called "
|
| 16 |
+
f"annotate on the same value twice")
|
| 17 |
+
else:
|
| 18 |
+
val.node.type = type
|
| 19 |
+
return val
|
| 20 |
+
else:
|
| 21 |
+
return val
|
llava_next/lib/python3.10/site-packages/torch/fx/config.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Whether to disable showing progress on compilation passes
|
| 2 |
+
# Need to add a new config otherwise wil get a circular import if dynamo config is imported here
|
| 3 |
+
disable_progress = True
|
| 4 |
+
|
| 5 |
+
# If True this also shows the node names in each pass, for small models this is great but larger models it's quite noisy
|
| 6 |
+
verbose_progress = False
|
llava_next/lib/python3.10/site-packages/torch/fx/graph.py
ADDED
|
@@ -0,0 +1,1570 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections import defaultdict
|
| 2 |
+
from .node import Node, Argument, Target, map_arg, _type_repr, _get_qualified_name
|
| 3 |
+
import torch.utils._pytree as pytree
|
| 4 |
+
from . import _pytree as fx_pytree
|
| 5 |
+
from ._compatibility import compatibility
|
| 6 |
+
|
| 7 |
+
import contextlib
|
| 8 |
+
from typing import TYPE_CHECKING, Callable, Any, List, Dict, NamedTuple, Optional, Tuple, Set, FrozenSet, Type
|
| 9 |
+
from dataclasses import dataclass
|
| 10 |
+
from contextlib import contextmanager
|
| 11 |
+
import copy
|
| 12 |
+
import torch
|
| 13 |
+
import keyword
|
| 14 |
+
import re
|
| 15 |
+
import builtins
|
| 16 |
+
import math
|
| 17 |
+
import warnings
|
| 18 |
+
import inspect
|
| 19 |
+
|
| 20 |
+
__all__ = ["PythonCode", "CodeGen", "Graph"]
|
| 21 |
+
|
| 22 |
+
if TYPE_CHECKING:
|
| 23 |
+
from .graph_module import GraphModule # noqa: F401
|
| 24 |
+
from ._symbolic_trace import Tracer # noqa: F401
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
# Mapping of builtins to their `typing` equivalent.
|
| 28 |
+
_origin_type_map = {
|
| 29 |
+
list: List,
|
| 30 |
+
dict: Dict,
|
| 31 |
+
set: Set,
|
| 32 |
+
frozenset: FrozenSet,
|
| 33 |
+
tuple: Tuple,
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
# Signature for functions thattransforms the body (`list[str]`) of the
|
| 38 |
+
# generated code
|
| 39 |
+
TransformCodeFunc = Callable[[List[str]], List[str]]
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class _CustomBuiltin(NamedTuple):
|
| 43 |
+
"""Additional objs that we add to every graph's globals.
|
| 44 |
+
|
| 45 |
+
The repr() for some standard library objects is not valid Python code without
|
| 46 |
+
an import. For common objects of this sort, we bundle them in the globals of
|
| 47 |
+
every FX graph.
|
| 48 |
+
"""
|
| 49 |
+
# How to import this object from the standard library.
|
| 50 |
+
import_str: str
|
| 51 |
+
# The actual object, produced from that import string.
|
| 52 |
+
obj: Any
|
| 53 |
+
|
| 54 |
+
_custom_builtins: Dict[str, _CustomBuiltin] = {}
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def _register_custom_builtin(name: str, import_str: str, obj: Any):
|
| 58 |
+
_custom_builtins[name] = _CustomBuiltin(import_str, obj)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
_register_custom_builtin('inf', 'from math import inf', math.inf)
|
| 62 |
+
_register_custom_builtin('nan', 'from math import nan', math.nan)
|
| 63 |
+
_register_custom_builtin('NoneType', 'NoneType = type(None)', type(None))
|
| 64 |
+
_register_custom_builtin('torch', 'import torch', torch)
|
| 65 |
+
_register_custom_builtin('device', 'from torch import device', torch.device)
|
| 66 |
+
_register_custom_builtin('fx_pytree', 'import torch.fx._pytree as fx_pytree', fx_pytree)
|
| 67 |
+
_register_custom_builtin('pytree', 'import torch.utils._pytree as pytree', pytree)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def _is_magic(x: str) -> bool:
|
| 71 |
+
return x.startswith('__') and x.endswith('__')
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def _snake_case(s: str) -> str:
|
| 75 |
+
"""
|
| 76 |
+
Transforms the given string ``s`` to a Python-style variable name
|
| 77 |
+
|
| 78 |
+
Examples:
|
| 79 |
+
``mod.snake_case`` -> ``mod.snake_case``
|
| 80 |
+
``mod.pascalCase``-> ``mod.pascal_case``
|
| 81 |
+
``mod.ALL_CAPS`` -> ``mod.all_caps``
|
| 82 |
+
"""
|
| 83 |
+
chars = []
|
| 84 |
+
prev_lower = False
|
| 85 |
+
for c in s:
|
| 86 |
+
if prev_lower and c.isupper():
|
| 87 |
+
chars.append('_')
|
| 88 |
+
chars.append(c.lower())
|
| 89 |
+
prev_lower = c.islower()
|
| 90 |
+
return ''.join(chars)
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def _is_from_torch(obj: Any) -> bool:
|
| 94 |
+
module_name = getattr(obj, '__module__', None)
|
| 95 |
+
if module_name is not None:
|
| 96 |
+
base_module = module_name.partition('.')[0]
|
| 97 |
+
return (
|
| 98 |
+
base_module == 'torch' and
|
| 99 |
+
not module_name.startswith("torch._dynamo.") and
|
| 100 |
+
not module_name.startswith("torch._inductor.")
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
name = getattr(obj, '__name__', None)
|
| 104 |
+
# exclude torch because torch.torch.torch.torch works. idk mang
|
| 105 |
+
if name is not None and name != 'torch':
|
| 106 |
+
for guess in [torch, torch.nn.functional]:
|
| 107 |
+
if getattr(guess, name, None) is obj:
|
| 108 |
+
return True
|
| 109 |
+
|
| 110 |
+
return False
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
class _Namespace:
|
| 114 |
+
"""A context for associating names uniquely with objects.
|
| 115 |
+
|
| 116 |
+
The following invariants are enforced:
|
| 117 |
+
- Each object gets a single name.
|
| 118 |
+
- Each name is unique within a given namespace.
|
| 119 |
+
- Names generated do not shadow builtins, unless the object is indeed that builtin.
|
| 120 |
+
"""
|
| 121 |
+
def __init__(self):
|
| 122 |
+
self._obj_to_name: Dict[Any, str] = {}
|
| 123 |
+
self._unassociated_names = set()
|
| 124 |
+
self._used_names: Set[str] = set()
|
| 125 |
+
self._base_count: Dict[str, int] = defaultdict(int)
|
| 126 |
+
|
| 127 |
+
self._illegal_char_regex = re.compile('[^0-9a-zA-Z_]+')
|
| 128 |
+
self._name_suffix_regex = re.compile(r"(.*)_(\d+)$")
|
| 129 |
+
|
| 130 |
+
def create_name(self, candidate: str, obj: Optional[Any]) -> str:
|
| 131 |
+
"""Create a unique name.
|
| 132 |
+
|
| 133 |
+
Arguments:
|
| 134 |
+
candidate: used as the basis for the unique name, relevant to the user.
|
| 135 |
+
obj: If not None, an object that will be associated with the unique name.
|
| 136 |
+
"""
|
| 137 |
+
if obj is not None and obj in self._obj_to_name:
|
| 138 |
+
return self._obj_to_name[obj]
|
| 139 |
+
|
| 140 |
+
# delete all characters that are illegal in a Python identifier
|
| 141 |
+
candidate = self._illegal_char_regex.sub('_', candidate)
|
| 142 |
+
|
| 143 |
+
if not candidate:
|
| 144 |
+
candidate = '_unnamed'
|
| 145 |
+
|
| 146 |
+
if candidate[0].isdigit():
|
| 147 |
+
candidate = f'_{candidate}'
|
| 148 |
+
|
| 149 |
+
match = self._name_suffix_regex.match(candidate)
|
| 150 |
+
if match is None:
|
| 151 |
+
base = candidate
|
| 152 |
+
num = None
|
| 153 |
+
else:
|
| 154 |
+
base, num_str = match.group(1, 2)
|
| 155 |
+
num = int(num_str)
|
| 156 |
+
|
| 157 |
+
candidate = base if num is None else f'{base}_{num}'
|
| 158 |
+
if not num:
|
| 159 |
+
num = self._base_count[base]
|
| 160 |
+
|
| 161 |
+
while candidate in self._used_names or self._is_illegal_name(candidate, obj):
|
| 162 |
+
num += 1
|
| 163 |
+
candidate = f'{base}_{num}'
|
| 164 |
+
|
| 165 |
+
self._used_names.add(candidate)
|
| 166 |
+
self._base_count[base] = num
|
| 167 |
+
if obj is None:
|
| 168 |
+
self._unassociated_names.add(candidate)
|
| 169 |
+
else:
|
| 170 |
+
self._obj_to_name[obj] = candidate
|
| 171 |
+
return candidate
|
| 172 |
+
|
| 173 |
+
def associate_name_with_obj(self, name: str, obj: Any):
|
| 174 |
+
"""Associate a unique name with an object.
|
| 175 |
+
|
| 176 |
+
Neither `name` nor `obj` should be associated already.
|
| 177 |
+
"""
|
| 178 |
+
assert obj not in self._obj_to_name
|
| 179 |
+
assert name in self._unassociated_names
|
| 180 |
+
self._obj_to_name[obj] = name
|
| 181 |
+
self._unassociated_names.remove(name)
|
| 182 |
+
|
| 183 |
+
def _is_illegal_name(self, name: str, obj: Any) -> bool:
|
| 184 |
+
# 1. keywords are never allowed as names.
|
| 185 |
+
if name in keyword.kwlist:
|
| 186 |
+
return True
|
| 187 |
+
|
| 188 |
+
# 2. Can't shadow a builtin name, unless you *are* that builtin.
|
| 189 |
+
if name in builtins.__dict__:
|
| 190 |
+
return obj is not builtins.__dict__[name]
|
| 191 |
+
|
| 192 |
+
# 3. Can't shadow our custom builtins either
|
| 193 |
+
if name in _custom_builtins:
|
| 194 |
+
return obj is not _custom_builtins[name].obj
|
| 195 |
+
|
| 196 |
+
return False
|
| 197 |
+
|
| 198 |
+
dtype_abbrs = {
|
| 199 |
+
torch.bfloat16: 'bf16',
|
| 200 |
+
torch.float64: 'f64',
|
| 201 |
+
torch.float32: 'f32',
|
| 202 |
+
torch.float16: 'f16',
|
| 203 |
+
torch.float8_e4m3fn: 'f8e4m3fn',
|
| 204 |
+
torch.float8_e5m2: 'f8e5m2',
|
| 205 |
+
torch.complex32: 'c32',
|
| 206 |
+
torch.complex64: 'c64',
|
| 207 |
+
torch.complex128: 'c128',
|
| 208 |
+
torch.int8: 'i8',
|
| 209 |
+
torch.int16: 'i16',
|
| 210 |
+
torch.int32: 'i32',
|
| 211 |
+
torch.int64: 'i64',
|
| 212 |
+
torch.bool: 'b8',
|
| 213 |
+
torch.uint8: 'u8',
|
| 214 |
+
}
|
| 215 |
+
|
| 216 |
+
@compatibility(is_backward_compatible=True)
|
| 217 |
+
@dataclass
|
| 218 |
+
class PythonCode:
|
| 219 |
+
"""
|
| 220 |
+
Represents all the information necessary to exec or save a graph as Python code.
|
| 221 |
+
"""
|
| 222 |
+
# Python source code for the forward function definition.
|
| 223 |
+
src: str
|
| 224 |
+
# Values in global scope during execution of `src_def`.
|
| 225 |
+
globals: Dict[str, Any]
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
def _format_target(base: str, target: str) -> str:
|
| 229 |
+
elems = target.split('.')
|
| 230 |
+
r = base
|
| 231 |
+
for e in elems:
|
| 232 |
+
if not e.isidentifier():
|
| 233 |
+
r = f'getattr({r}, "{e}")'
|
| 234 |
+
else:
|
| 235 |
+
r = f'{r}.{e}'
|
| 236 |
+
return r
|
| 237 |
+
|
| 238 |
+
class _InsertPoint:
|
| 239 |
+
def __init__(self, graph, new_insert):
|
| 240 |
+
self.graph = graph
|
| 241 |
+
self.orig_insert, graph._insert = graph._insert, new_insert
|
| 242 |
+
|
| 243 |
+
def __enter__(self):
|
| 244 |
+
pass
|
| 245 |
+
|
| 246 |
+
def __exit__(self, type, value, tb):
|
| 247 |
+
self.graph._insert = self.orig_insert
|
| 248 |
+
|
| 249 |
+
class _node_list:
|
| 250 |
+
def __init__(self, graph: 'Graph', direction: str = '_next'):
|
| 251 |
+
assert direction in ['_next', '_prev']
|
| 252 |
+
self.graph = graph
|
| 253 |
+
self.direction = direction
|
| 254 |
+
|
| 255 |
+
def __len__(self):
|
| 256 |
+
return self.graph._len
|
| 257 |
+
|
| 258 |
+
def __iter__(self):
|
| 259 |
+
root, direction = self.graph._root, self.direction
|
| 260 |
+
cur = getattr(root, direction)
|
| 261 |
+
while cur is not root:
|
| 262 |
+
if not cur._erased:
|
| 263 |
+
yield cur
|
| 264 |
+
cur = getattr(cur, direction)
|
| 265 |
+
|
| 266 |
+
def __reversed__(self):
|
| 267 |
+
return _node_list(self.graph, '_next' if self.direction == '_prev' else '_prev')
|
| 268 |
+
|
| 269 |
+
class _PyTreeInfo(NamedTuple):
|
| 270 |
+
"""
|
| 271 |
+
Contains extra info stored when we're using Pytrees
|
| 272 |
+
"""
|
| 273 |
+
orig_args: List[str]
|
| 274 |
+
in_spec: pytree.TreeSpec
|
| 275 |
+
out_spec: Optional[pytree.TreeSpec]
|
| 276 |
+
|
| 277 |
+
@compatibility(is_backward_compatible=False)
|
| 278 |
+
class CodeGen:
|
| 279 |
+
def __init__(self):
|
| 280 |
+
self._body_transformer: Optional[TransformCodeFunc] = None
|
| 281 |
+
|
| 282 |
+
def gen_fn_def(self, free_vars: List[str], maybe_return_annotation: str) -> str:
|
| 283 |
+
"""
|
| 284 |
+
Given the free variables and a return annotation, generates the beginning of the FX function.
|
| 285 |
+
By default, `gen_fn_def(['a', 'b'], '') == 'def forward(a, b):'`
|
| 286 |
+
"""
|
| 287 |
+
# If the original function didn't have self as its first argument, we
|
| 288 |
+
# would have added it.
|
| 289 |
+
if len(free_vars) == 0 or free_vars[0] != 'self':
|
| 290 |
+
free_vars.insert(0, 'self')
|
| 291 |
+
return f"def forward({', '.join(free_vars)}){maybe_return_annotation}:"
|
| 292 |
+
|
| 293 |
+
def generate_output(self, output_args: Argument) -> str:
|
| 294 |
+
"""
|
| 295 |
+
Given the output arguments, generates the return statement of the FX function.
|
| 296 |
+
Note: The returned statement should not be indented.
|
| 297 |
+
"""
|
| 298 |
+
return f'return {repr(output_args)}'
|
| 299 |
+
|
| 300 |
+
def process_inputs(self, *args: Any) -> Any:
|
| 301 |
+
"""
|
| 302 |
+
Transforms the inputs so that the graph can take them as arguments, as
|
| 303 |
+
non-default codegen may result in the inputs to the function being
|
| 304 |
+
different from the inputs to the graph.
|
| 305 |
+
|
| 306 |
+
If the graph was directly runnable, this invariant should hold true
|
| 307 |
+
`f.graph.process_outputs(f.graph(*f.graph.process_inputs(*inputs))) == f(*inputs)`
|
| 308 |
+
"""
|
| 309 |
+
return args
|
| 310 |
+
|
| 311 |
+
def process_outputs(self, outputs: Any) -> Any:
|
| 312 |
+
"""
|
| 313 |
+
Transforms the outputs of the graph to be identical to the codegen.
|
| 314 |
+
|
| 315 |
+
See ``process_inputs`` for more details.
|
| 316 |
+
"""
|
| 317 |
+
return outputs
|
| 318 |
+
|
| 319 |
+
def additional_globals(self) -> List[Tuple[str, Any]]:
|
| 320 |
+
"""
|
| 321 |
+
If your codegen uses extra global values, add tuples of (identifier,reference to the value) here.
|
| 322 |
+
For example, return ['List', typing.List] if you need ``List`` in the global context.
|
| 323 |
+
"""
|
| 324 |
+
return []
|
| 325 |
+
|
| 326 |
+
def _gen_python_code(self, nodes, root_module: str, namespace: _Namespace, *, verbose: bool = False) -> PythonCode:
|
| 327 |
+
free_vars: List[str] = []
|
| 328 |
+
body: List[str] = []
|
| 329 |
+
globals_: Dict[str, Any] = {}
|
| 330 |
+
wrapped_fns: Dict[str, None] = {}
|
| 331 |
+
|
| 332 |
+
# Wrap string in list to pass by reference
|
| 333 |
+
maybe_return_annotation : List[str] = ['']
|
| 334 |
+
|
| 335 |
+
def add_global(name_hint: str, obj: Any):
|
| 336 |
+
"""Add an obj to be tracked as a global.
|
| 337 |
+
|
| 338 |
+
We call this for names that reference objects external to the
|
| 339 |
+
Graph, like functions or types.
|
| 340 |
+
|
| 341 |
+
Returns: the global name that should be used to reference 'obj' in generated source.
|
| 342 |
+
"""
|
| 343 |
+
if _is_from_torch(obj) and obj != torch.device: # to support registering torch.device
|
| 344 |
+
# HACK: workaround for how torch custom ops are registered. We
|
| 345 |
+
# can't import them like normal modules so they must retain their
|
| 346 |
+
# fully qualified name.
|
| 347 |
+
return _get_qualified_name(obj)
|
| 348 |
+
|
| 349 |
+
# normalize the name hint to get a proper identifier
|
| 350 |
+
global_name = namespace.create_name(name_hint, obj)
|
| 351 |
+
|
| 352 |
+
if global_name in globals_:
|
| 353 |
+
assert globals_[global_name] is obj
|
| 354 |
+
return global_name
|
| 355 |
+
globals_[global_name] = obj
|
| 356 |
+
return global_name
|
| 357 |
+
|
| 358 |
+
# Pre-fill the globals table with registered builtins.
|
| 359 |
+
for name, (_, obj) in _custom_builtins.items():
|
| 360 |
+
add_global(name, obj)
|
| 361 |
+
|
| 362 |
+
def type_repr(o : Any):
|
| 363 |
+
if o == ():
|
| 364 |
+
# Empty tuple is used for empty tuple type annotation Tuple[()]
|
| 365 |
+
return '()'
|
| 366 |
+
|
| 367 |
+
typename = _type_repr(o)
|
| 368 |
+
|
| 369 |
+
if hasattr(o, '__origin__'):
|
| 370 |
+
# This is a generic type, e.g. typing.List[torch.Tensor]
|
| 371 |
+
origin_type = _origin_type_map.get(o.__origin__, o.__origin__)
|
| 372 |
+
origin_typename = add_global(_type_repr(origin_type), origin_type)
|
| 373 |
+
|
| 374 |
+
if hasattr(o, '__args__'):
|
| 375 |
+
# Assign global names for each of the inner type variables.
|
| 376 |
+
args = [type_repr(arg) for arg in o.__args__]
|
| 377 |
+
|
| 378 |
+
if len(args) == 0:
|
| 379 |
+
# Bare type, such as `typing.Tuple` with no subscript
|
| 380 |
+
# This code-path used in Python < 3.9
|
| 381 |
+
return origin_typename
|
| 382 |
+
|
| 383 |
+
return f'{origin_typename}[{",".join(args)}]'
|
| 384 |
+
else:
|
| 385 |
+
# Bare type, such as `typing.Tuple` with no subscript
|
| 386 |
+
# This code-path used in Python 3.9+
|
| 387 |
+
return origin_typename
|
| 388 |
+
|
| 389 |
+
# Common case: this is a regular module name like 'foo.bar.baz'
|
| 390 |
+
return add_global(typename, o)
|
| 391 |
+
|
| 392 |
+
def _format_args(args: Tuple[Argument, ...], kwargs: Dict[str, Argument]) -> str:
|
| 393 |
+
def _get_repr(arg):
|
| 394 |
+
# Handle NamedTuples (if it has `_fields`) via add_global.
|
| 395 |
+
if isinstance(arg, tuple) and hasattr(arg, '_fields'):
|
| 396 |
+
qualified_name = _get_qualified_name(type(arg))
|
| 397 |
+
global_name = add_global(qualified_name, type(arg))
|
| 398 |
+
return f"{global_name}{repr(tuple(arg))}"
|
| 399 |
+
elif isinstance(arg, torch._ops.OpOverload):
|
| 400 |
+
qualified_name = _get_qualified_name(arg)
|
| 401 |
+
global_name = add_global(qualified_name, arg)
|
| 402 |
+
return f"{global_name}"
|
| 403 |
+
return repr(arg)
|
| 404 |
+
args_s = ', '.join(_get_repr(a) for a in args)
|
| 405 |
+
kwargs_s = ', '.join(f'{k} = {_get_repr(v)}' for k, v in kwargs.items())
|
| 406 |
+
if args_s and kwargs_s:
|
| 407 |
+
return f'{args_s}, {kwargs_s}'
|
| 408 |
+
return args_s or kwargs_s
|
| 409 |
+
|
| 410 |
+
# Run through reverse nodes and record the first instance of a use
|
| 411 |
+
# of a given node. This represents the *last* use of the node in the
|
| 412 |
+
# execution order of the program, which we will use to free unused
|
| 413 |
+
# values
|
| 414 |
+
node_to_last_use : Dict[Node, Node] = {}
|
| 415 |
+
user_to_last_uses : Dict[Node, List[Node]] = {}
|
| 416 |
+
|
| 417 |
+
def register_last_uses(n : Node, user : Node):
|
| 418 |
+
if n not in node_to_last_use:
|
| 419 |
+
node_to_last_use[n] = user
|
| 420 |
+
user_to_last_uses.setdefault(user, []).append(n)
|
| 421 |
+
|
| 422 |
+
for node in reversed(nodes):
|
| 423 |
+
map_arg(node.args, lambda n: register_last_uses(n, node))
|
| 424 |
+
map_arg(node.kwargs, lambda n: register_last_uses(n, node))
|
| 425 |
+
|
| 426 |
+
def delete_unused_values(user : Node):
|
| 427 |
+
"""
|
| 428 |
+
Delete values after their last use. This ensures that values that are
|
| 429 |
+
not used in the remainder of the code are freed and the memory usage
|
| 430 |
+
of the code is optimal.
|
| 431 |
+
"""
|
| 432 |
+
if user.op == 'placeholder':
|
| 433 |
+
return
|
| 434 |
+
if user.op == 'output':
|
| 435 |
+
body.append('\n')
|
| 436 |
+
return
|
| 437 |
+
nodes_to_delete = user_to_last_uses.get(user, [])
|
| 438 |
+
if len(nodes_to_delete):
|
| 439 |
+
to_delete_str = ' = '.join([repr(n) for n in nodes_to_delete] + ['None'])
|
| 440 |
+
body.append(f'; {to_delete_str}\n')
|
| 441 |
+
else:
|
| 442 |
+
body.append('\n')
|
| 443 |
+
|
| 444 |
+
prev_stacktrace = None
|
| 445 |
+
|
| 446 |
+
def append_stacktrace_summary(node : Node):
|
| 447 |
+
"""
|
| 448 |
+
Append a summary of the stacktrace to the generated code. This is
|
| 449 |
+
useful for debugging.
|
| 450 |
+
"""
|
| 451 |
+
nonlocal prev_stacktrace
|
| 452 |
+
pattern = re.compile(r"^File \"(.+)\", line (\d+), in (.+)$")
|
| 453 |
+
|
| 454 |
+
if node.op not in {'placeholder', 'output'}:
|
| 455 |
+
if node.stack_trace:
|
| 456 |
+
if node.stack_trace != prev_stacktrace:
|
| 457 |
+
prev_stacktrace = node.stack_trace
|
| 458 |
+
|
| 459 |
+
lines = node.stack_trace.strip().split('\n')
|
| 460 |
+
# stacktrace should have innermost frame last, so we
|
| 461 |
+
# iterate backwards to find the first line that starts
|
| 462 |
+
# with 'File '
|
| 463 |
+
summary_str = ""
|
| 464 |
+
for idx in range(len(lines) - 2, -1, -1):
|
| 465 |
+
line = lines[idx].strip()
|
| 466 |
+
matches = pattern.match(line)
|
| 467 |
+
if matches:
|
| 468 |
+
file = matches.group(1)
|
| 469 |
+
lineno = matches.group(2)
|
| 470 |
+
# next line should be the code
|
| 471 |
+
code = lines[idx + 1].strip()
|
| 472 |
+
summary_str = f'File: {file}:{lineno}, code: {code}'
|
| 473 |
+
break
|
| 474 |
+
body.append(f'\n# {summary_str}\n')
|
| 475 |
+
elif prev_stacktrace != "":
|
| 476 |
+
prev_stacktrace = ""
|
| 477 |
+
body.append('\n# No stacktrace found for following nodes\n')
|
| 478 |
+
|
| 479 |
+
def stringify_shape(shape : torch.Size) -> str:
|
| 480 |
+
return f"[{', '.join(str(x) for x in shape)}]"
|
| 481 |
+
|
| 482 |
+
def emit_node(node : Node):
|
| 483 |
+
maybe_type_annotation = '' if node.type is None else f' : {type_repr(node.type)}'
|
| 484 |
+
|
| 485 |
+
if verbose:
|
| 486 |
+
# override annotation with more detailed information
|
| 487 |
+
from torch._subclasses.fake_tensor import FakeTensor
|
| 488 |
+
from torch.fx.experimental.proxy_tensor import py_sym_types
|
| 489 |
+
from torch.fx.passes.shape_prop import TensorMetadata
|
| 490 |
+
|
| 491 |
+
meta_val = node.meta.get('val', node.meta.get('tensor_meta', None))
|
| 492 |
+
|
| 493 |
+
if isinstance(meta_val, FakeTensor):
|
| 494 |
+
maybe_type_annotation = f': {dtype_abbrs[meta_val.dtype]}{stringify_shape(meta_val.shape)}'
|
| 495 |
+
elif isinstance(meta_val, py_sym_types):
|
| 496 |
+
maybe_type_annotation = f': Sym({meta_val})'
|
| 497 |
+
elif isinstance(meta_val, TensorMetadata):
|
| 498 |
+
maybe_type_annotation = f': {dtype_abbrs[meta_val.dtype]}{stringify_shape(meta_val.shape)}'
|
| 499 |
+
|
| 500 |
+
if node.op == 'placeholder':
|
| 501 |
+
assert isinstance(node.target, str)
|
| 502 |
+
maybe_default_arg = '' if not node.args else f' = {repr(node.args[0])}'
|
| 503 |
+
free_vars.append(f'{node.target}{maybe_type_annotation}{maybe_default_arg}')
|
| 504 |
+
raw_name = node.target.replace('*', '')
|
| 505 |
+
if raw_name != repr(node):
|
| 506 |
+
body.append(f'{repr(node)} = {raw_name}\n')
|
| 507 |
+
return
|
| 508 |
+
elif node.op == 'call_method':
|
| 509 |
+
assert isinstance(node.target, str)
|
| 510 |
+
body.append(
|
| 511 |
+
f'{repr(node)}{maybe_type_annotation} = {_format_target(repr(node.args[0]), node.target)}'
|
| 512 |
+
f'({_format_args(node.args[1:], node.kwargs)})')
|
| 513 |
+
return
|
| 514 |
+
elif node.op == 'call_function':
|
| 515 |
+
assert callable(node.target)
|
| 516 |
+
# pretty print operators
|
| 517 |
+
if getattr(node.target, "__module__", "") == '_operator' and node.target.__name__ in magic_methods:
|
| 518 |
+
assert isinstance(node.args, tuple)
|
| 519 |
+
body.append(f'{repr(node)}{maybe_type_annotation} = '
|
| 520 |
+
f'{magic_methods[node.target.__name__].format(*(repr(a) for a in node.args))}')
|
| 521 |
+
return
|
| 522 |
+
|
| 523 |
+
# pretty print inplace operators; required for jit.script to work properly
|
| 524 |
+
# not currently supported in normal FX graphs, but generated by torchdynamo
|
| 525 |
+
if getattr(node.target, "__module__", "") == '_operator' and node.target.__name__ in inplace_methods:
|
| 526 |
+
body.append(f'{inplace_methods[node.target.__name__].format(*(repr(a) for a in node.args))}; '
|
| 527 |
+
f'{repr(node)}{maybe_type_annotation} = {repr(node.args[0])}')
|
| 528 |
+
return
|
| 529 |
+
|
| 530 |
+
qualified_name = _get_qualified_name(node.target)
|
| 531 |
+
global_name = add_global(qualified_name, node.target)
|
| 532 |
+
# special case for getattr: node.args could be 2-argument or 3-argument
|
| 533 |
+
# 2-argument: attribute access; 3-argument: fall through to attrib function call with default value
|
| 534 |
+
if global_name == 'getattr' and \
|
| 535 |
+
isinstance(node.args, tuple) and \
|
| 536 |
+
isinstance(node.args[1], str) and \
|
| 537 |
+
node.args[1].isidentifier() and \
|
| 538 |
+
len(node.args) == 2:
|
| 539 |
+
body.append(f'{repr(node)}{maybe_type_annotation} = {_format_target(repr(node.args[0]), node.args[1])}')
|
| 540 |
+
return
|
| 541 |
+
body.append(f'{repr(node)}{maybe_type_annotation} = {global_name}({_format_args(node.args, node.kwargs)})')
|
| 542 |
+
if node.meta.get('is_wrapped', False):
|
| 543 |
+
wrapped_fns.setdefault(global_name)
|
| 544 |
+
return
|
| 545 |
+
elif node.op == 'call_module':
|
| 546 |
+
assert isinstance(node.target, str)
|
| 547 |
+
body.append(f'{repr(node)}{maybe_type_annotation} = '
|
| 548 |
+
f'{_format_target(root_module, node.target)}({_format_args(node.args, node.kwargs)})')
|
| 549 |
+
return
|
| 550 |
+
elif node.op == 'get_attr':
|
| 551 |
+
assert isinstance(node.target, str)
|
| 552 |
+
body.append(f'{repr(node)}{maybe_type_annotation} = {_format_target(root_module, node.target)}')
|
| 553 |
+
return
|
| 554 |
+
elif node.op == 'output':
|
| 555 |
+
if node.type is not None:
|
| 556 |
+
maybe_return_annotation[0] = f" -> {type_repr(node.type)}"
|
| 557 |
+
body.append(self.generate_output(node.args[0]))
|
| 558 |
+
return
|
| 559 |
+
raise NotImplementedError(f'node: {node.op} {node.target}')
|
| 560 |
+
|
| 561 |
+
for node in nodes:
|
| 562 |
+
# NOTE: emit_node does not emit a string with newline. It depends
|
| 563 |
+
# on delete_unused_values to append one
|
| 564 |
+
if verbose:
|
| 565 |
+
append_stacktrace_summary(node)
|
| 566 |
+
emit_node(node)
|
| 567 |
+
delete_unused_values(node)
|
| 568 |
+
|
| 569 |
+
if len(body) == 0:
|
| 570 |
+
# If the Graph has no non-placeholder nodes, no lines for the body
|
| 571 |
+
# have been emitted. To continue to have valid Python code, emit a
|
| 572 |
+
# single pass statement
|
| 573 |
+
body.append('pass\n')
|
| 574 |
+
|
| 575 |
+
|
| 576 |
+
|
| 577 |
+
if len(wrapped_fns) > 0:
|
| 578 |
+
wrap_name = add_global('wrap', torch.fx.wrap)
|
| 579 |
+
wrap_stmts = '\n'.join([f'{wrap_name}("{name}")' for name in wrapped_fns])
|
| 580 |
+
else:
|
| 581 |
+
wrap_stmts = ''
|
| 582 |
+
|
| 583 |
+
if self._body_transformer:
|
| 584 |
+
body = self._body_transformer(body)
|
| 585 |
+
|
| 586 |
+
for name, value in self.additional_globals():
|
| 587 |
+
add_global(name, value)
|
| 588 |
+
|
| 589 |
+
prologue = self.gen_fn_def(free_vars, maybe_return_annotation[0])
|
| 590 |
+
|
| 591 |
+
code = ''.join(body).lstrip('\n')
|
| 592 |
+
code = '\n'.join(' ' + line for line in code.split('\n'))
|
| 593 |
+
fn_code = f"""
|
| 594 |
+
{wrap_stmts}
|
| 595 |
+
|
| 596 |
+
{prologue}
|
| 597 |
+
{code}"""
|
| 598 |
+
return PythonCode(fn_code, globals_)
|
| 599 |
+
|
| 600 |
+
|
| 601 |
+
# Ideally, we'd like to refactor all of the pytree logic into this codegen
|
| 602 |
+
# class. Unfortunately, there are 3 areas we currently need extra logic in FX.
|
| 603 |
+
# 1. In the initial symbolic trace, the pytree logic is tied up with `concrete_args`.
|
| 604 |
+
# 2. In the FX graph, we need to access 2 attributes - in_spec and out_spec.
|
| 605 |
+
# Since we can't access .graph within the FX forward, we need to copy the attribute to the module.
|
| 606 |
+
# 3. We currently can't register the pytree imports with `add_global` - not sure why.
|
| 607 |
+
class _PyTreeCodeGen(CodeGen):
|
| 608 |
+
def __init__(self, pytree_info: _PyTreeInfo):
|
| 609 |
+
super().__init__()
|
| 610 |
+
self.pytree_info: _PyTreeInfo = pytree_info
|
| 611 |
+
|
| 612 |
+
def process_inputs(self, *inputs: Any) -> Any:
|
| 613 |
+
flat_args, _ = pytree.tree_flatten(inputs)
|
| 614 |
+
return flat_args
|
| 615 |
+
|
| 616 |
+
def process_outputs(self, out: Any) -> Any:
|
| 617 |
+
if self.pytree_info is None or self.pytree_info.out_spec is None:
|
| 618 |
+
return out
|
| 619 |
+
if not isinstance(out, (list, tuple)):
|
| 620 |
+
out = [out]
|
| 621 |
+
assert(self.pytree_info.out_spec is not None)
|
| 622 |
+
return pytree.tree_unflatten(out, self.pytree_info.out_spec)
|
| 623 |
+
|
| 624 |
+
def gen_fn_def(self, free_vars, maybe_return_annotation):
|
| 625 |
+
# Given a user function/model:
|
| 626 |
+
# myargs = [myargs0, myargs1]
|
| 627 |
+
# mykwargs = {'mykwargs0': ..., 'mykwargs1': ...}
|
| 628 |
+
# def forward(self, mypos, *myargs, mykey=None, **mykwargs):
|
| 629 |
+
#
|
| 630 |
+
# The generated code flattens all keywords into positional arguments for `forward()`
|
| 631 |
+
# e.g forward(self, mypos, myargs0, myargs1, mykey, mykwargs0, mykwargs1):
|
| 632 |
+
#
|
| 633 |
+
# Within `forward`, `tree_flatten_spec``still parses args and kwargs separately
|
| 634 |
+
# e.g. tree_flatten_spec(([mypos, myargs0, myargs1],
|
| 635 |
+
# {'mykey':mykey, 'mykwargs0':mykwargs0, 'mykwargs1':mykwargs1}),
|
| 636 |
+
# self._in_spec)
|
| 637 |
+
#
|
| 638 |
+
# If the user function/model does not have keywords, the dict is suppressed from tree_flatten_spec
|
| 639 |
+
# e.g. tree_flatten_spec([mypos, myargs0, myargs1]), self._in_spec)
|
| 640 |
+
if self.pytree_info is None:
|
| 641 |
+
return super().gen_fn_def(free_vars, maybe_return_annotation)
|
| 642 |
+
|
| 643 |
+
fn_args = self.pytree_info.orig_args
|
| 644 |
+
has_orig_self = (fn_args[0] == 'self') if len(fn_args) > 0 else False
|
| 645 |
+
if has_orig_self:
|
| 646 |
+
free_vars.insert(0, 'self')
|
| 647 |
+
fn_definition = super().gen_fn_def(fn_args[:], maybe_return_annotation)
|
| 648 |
+
|
| 649 |
+
if len(free_vars) > 0: # pytree has placeholders in it
|
| 650 |
+
# when kwargs is present, in_spec is tuple(args, kwargs)
|
| 651 |
+
has_args_kwargs_tuple = self.pytree_info.in_spec.type == tuple and \
|
| 652 |
+
len(self.pytree_info.in_spec.children_specs) == 2 and \
|
| 653 |
+
self.pytree_info.in_spec.children_specs[0].type == tuple and \
|
| 654 |
+
self.pytree_info.in_spec.children_specs[1].type == dict
|
| 655 |
+
fn_kwargs = '{}'
|
| 656 |
+
fn_signature = f"[{', '.join(fn_args)}], self._in_spec"
|
| 657 |
+
if has_args_kwargs_tuple:
|
| 658 |
+
count_args = len(self.pytree_info.in_spec.children_specs[0].children_specs)
|
| 659 |
+
fn_args = self.pytree_info.orig_args[:count_args]
|
| 660 |
+
fn_kwargs = '{' + ', '.join(f"'{k}':{v}" for k, v in zip(
|
| 661 |
+
self.pytree_info.in_spec.children_specs[1].context,
|
| 662 |
+
self.pytree_info.orig_args[count_args:])) + '}'
|
| 663 |
+
fn_signature = f"([{', '.join(fn_args)}], {fn_kwargs}), self._in_spec"
|
| 664 |
+
|
| 665 |
+
fn_definition += f"""
|
| 666 |
+
{', '.join(free_vars)}, = fx_pytree.tree_flatten_spec({fn_signature})"""
|
| 667 |
+
return fn_definition
|
| 668 |
+
|
| 669 |
+
def generate_output(self, output_args):
|
| 670 |
+
if self.pytree_info and self.pytree_info.out_spec:
|
| 671 |
+
return f'return pytree.tree_unflatten({repr(output_args)}, self._out_spec)'
|
| 672 |
+
else:
|
| 673 |
+
return super().generate_output(output_args)
|
| 674 |
+
|
| 675 |
+
@compatibility(is_backward_compatible=True)
|
| 676 |
+
class Graph:
|
| 677 |
+
"""
|
| 678 |
+
``Graph`` is the main data structure used in the FX Intermediate Representation.
|
| 679 |
+
It consists of a series of ``Node`` s, each representing callsites (or other
|
| 680 |
+
syntactic constructs). The list of ``Node`` s, taken together, constitute a
|
| 681 |
+
valid Python function.
|
| 682 |
+
|
| 683 |
+
For example, the following code
|
| 684 |
+
|
| 685 |
+
.. code-block:: python
|
| 686 |
+
|
| 687 |
+
import torch
|
| 688 |
+
import torch.fx
|
| 689 |
+
|
| 690 |
+
class MyModule(torch.nn.Module):
|
| 691 |
+
def __init__(self):
|
| 692 |
+
super().__init__()
|
| 693 |
+
self.param = torch.nn.Parameter(torch.rand(3, 4))
|
| 694 |
+
self.linear = torch.nn.Linear(4, 5)
|
| 695 |
+
|
| 696 |
+
def forward(self, x):
|
| 697 |
+
return torch.topk(torch.sum(self.linear(x + self.linear.weight).relu(), dim=-1), 3)
|
| 698 |
+
|
| 699 |
+
m = MyModule()
|
| 700 |
+
gm = torch.fx.symbolic_trace(m)
|
| 701 |
+
|
| 702 |
+
Will produce the following Graph::
|
| 703 |
+
|
| 704 |
+
print(gm.graph)
|
| 705 |
+
|
| 706 |
+
.. code-block:: text
|
| 707 |
+
|
| 708 |
+
graph(x):
|
| 709 |
+
%linear_weight : [num_users=1] = self.linear.weight
|
| 710 |
+
%add_1 : [num_users=1] = call_function[target=operator.add](args = (%x, %linear_weight), kwargs = {})
|
| 711 |
+
%linear_1 : [num_users=1] = call_module[target=linear](args = (%add_1,), kwargs = {})
|
| 712 |
+
%relu_1 : [num_users=1] = call_method[target=relu](args = (%linear_1,), kwargs = {})
|
| 713 |
+
%sum_1 : [num_users=1] = call_function[target=torch.sum](args = (%relu_1,), kwargs = {dim: -1})
|
| 714 |
+
%topk_1 : [num_users=1] = call_function[target=torch.topk](args = (%sum_1, 3), kwargs = {})
|
| 715 |
+
return topk_1
|
| 716 |
+
|
| 717 |
+
For the semantics of operations represented in the ``Graph``, please see :class:`Node`.
|
| 718 |
+
"""
|
| 719 |
+
|
| 720 |
+
@compatibility(is_backward_compatible=True)
|
| 721 |
+
def __init__(self, owning_module: Optional["GraphModule"] = None, tracer_cls: Optional[Type["Tracer"]] = None,
|
| 722 |
+
tracer_extras: Optional[Dict[str, Any]] = None):
|
| 723 |
+
"""
|
| 724 |
+
Construct an empty Graph.
|
| 725 |
+
"""
|
| 726 |
+
self._root : Node = Node(self, '', 'root', '', (), {})
|
| 727 |
+
self._used_names : Dict[str, int] = {} # base name -> number
|
| 728 |
+
self._insert = self._root.prepend
|
| 729 |
+
self._len = 0
|
| 730 |
+
self._graph_namespace = _Namespace()
|
| 731 |
+
self._owning_module = owning_module
|
| 732 |
+
self._tracer_cls = tracer_cls
|
| 733 |
+
self._tracer_extras = tracer_extras
|
| 734 |
+
self._codegen = CodeGen()
|
| 735 |
+
self._co_fields : Dict[str, Any] = {}
|
| 736 |
+
|
| 737 |
+
@property
|
| 738 |
+
def owning_module(self):
|
| 739 |
+
return self._owning_module
|
| 740 |
+
|
| 741 |
+
@owning_module.setter
|
| 742 |
+
def owning_module(self, mod: Optional["GraphModule"]):
|
| 743 |
+
self._owning_module = mod
|
| 744 |
+
|
| 745 |
+
@property
|
| 746 |
+
def nodes(self) -> _node_list:
|
| 747 |
+
"""
|
| 748 |
+
Get the list of Nodes that constitute this Graph.
|
| 749 |
+
|
| 750 |
+
Note that this ``Node`` list representation is a doubly-linked list. Mutations
|
| 751 |
+
during iteration (e.g. delete a Node, add a Node) are safe.
|
| 752 |
+
|
| 753 |
+
Returns:
|
| 754 |
+
|
| 755 |
+
A doubly-linked list of Nodes. Note that ``reversed`` can be called on
|
| 756 |
+
this list to switch iteration order.
|
| 757 |
+
"""
|
| 758 |
+
return _node_list(self)
|
| 759 |
+
|
| 760 |
+
@compatibility(is_backward_compatible=True)
|
| 761 |
+
def graph_copy(self, g : 'Graph', val_map : Dict[Node, Node], return_output_node=False) -> 'Optional[Argument]':
|
| 762 |
+
"""
|
| 763 |
+
Copy all nodes from a given graph into ``self``.
|
| 764 |
+
|
| 765 |
+
Args:
|
| 766 |
+
|
| 767 |
+
g (Graph): The source graph from which to copy Nodes.
|
| 768 |
+
|
| 769 |
+
val_map (Dict[Node, Node]): a dictionary that will be populated with a mapping
|
| 770 |
+
from nodes in ``g`` to nodes in ``self``. Note that ``val_map`` can be passed
|
| 771 |
+
in with values in it already to override copying of certain values.
|
| 772 |
+
|
| 773 |
+
Returns:
|
| 774 |
+
|
| 775 |
+
The value in ``self`` that is now equivalent to the output value in ``g``,
|
| 776 |
+
if ``g`` had an ``output`` node. ``None`` otherwise.
|
| 777 |
+
"""
|
| 778 |
+
for node in g.nodes:
|
| 779 |
+
if node in val_map:
|
| 780 |
+
continue
|
| 781 |
+
if node.op == 'output':
|
| 782 |
+
rv = map_arg(node.args[0], lambda n: val_map[n])
|
| 783 |
+
return rv if not return_output_node else (rv, node)
|
| 784 |
+
val_map[node] = self.node_copy(node, lambda n : val_map[n])
|
| 785 |
+
return None
|
| 786 |
+
|
| 787 |
+
def __deepcopy__(self, memo=None) -> 'Graph':
|
| 788 |
+
"""
|
| 789 |
+
Explicitly implement __deepcopy__ to prevent excessive recursion depth
|
| 790 |
+
from the default implementation. This uses graph_copy to copy the nodes
|
| 791 |
+
in an iterative way, rather than recursive. It also populates the
|
| 792 |
+
memoization table to prevent unnecessary copies (e.g. references to
|
| 793 |
+
nodes or other parts of the Graph from a custom GraphModule implementation.
|
| 794 |
+
"""
|
| 795 |
+
memo = memo if memo else {}
|
| 796 |
+
g = Graph(tracer_cls=self._tracer_cls)
|
| 797 |
+
output_vals = g.graph_copy(self, val_map=memo, return_output_node=True)
|
| 798 |
+
g._codegen = copy.deepcopy(self._codegen)
|
| 799 |
+
assert isinstance(output_vals, tuple)
|
| 800 |
+
output_val, old_output_node = output_vals
|
| 801 |
+
new_output_node = g.output(output_val, type_expr=getattr(old_output_node, 'type', None))
|
| 802 |
+
new_output_node.meta = copy.copy(old_output_node.meta)
|
| 803 |
+
return g
|
| 804 |
+
|
| 805 |
+
@compatibility(is_backward_compatible=True)
|
| 806 |
+
def create_node(self, op: str, target: 'Target',
|
| 807 |
+
args: Optional[Tuple['Argument', ...]] = None,
|
| 808 |
+
kwargs: Optional[Dict[str, 'Argument']] = None,
|
| 809 |
+
name: Optional[str] = None,
|
| 810 |
+
type_expr: Optional[Any] = None) -> Node:
|
| 811 |
+
"""
|
| 812 |
+
Create a ``Node`` and add it to the ``Graph`` at the current insert-point.
|
| 813 |
+
Note that the current insert-point can be set via :meth:`Graph.inserting_before`
|
| 814 |
+
and :meth:`Graph.inserting_after`.
|
| 815 |
+
|
| 816 |
+
Args:
|
| 817 |
+
op (str): the opcode for this Node. One of 'call_function', 'call_method', 'get_attr',
|
| 818 |
+
'call_module', 'placeholder', or 'output'. The semantics of these opcodes are
|
| 819 |
+
described in the ``Graph`` docstring.
|
| 820 |
+
|
| 821 |
+
args (Optional[Tuple[Argument, ...]]): is a tuple of arguments to this node.
|
| 822 |
+
|
| 823 |
+
kwargs (Optional[Dict[str, Argument]]): the kwargs of this Node
|
| 824 |
+
|
| 825 |
+
name (Optional[str]): an optional string name for the ``Node``.
|
| 826 |
+
This will influence the name of the value assigned to in the
|
| 827 |
+
Python generated code.
|
| 828 |
+
|
| 829 |
+
type_expr (Optional[Any]): an optional type annotation representing the
|
| 830 |
+
Python type the output of this node will have.
|
| 831 |
+
|
| 832 |
+
Returns:
|
| 833 |
+
|
| 834 |
+
The newly-created and inserted node.
|
| 835 |
+
"""
|
| 836 |
+
assert op in ('call_function', 'call_method', 'get_attr', 'call_module', 'placeholder', 'output')
|
| 837 |
+
args = () if args is None else args
|
| 838 |
+
kwargs = {} if kwargs is None else kwargs
|
| 839 |
+
assert isinstance(args, tuple), "args must be a tuple"
|
| 840 |
+
assert isinstance(kwargs, dict), "kwargs must be a dict"
|
| 841 |
+
|
| 842 |
+
candidate = name if name is not None else self._target_to_str(target)
|
| 843 |
+
name = self._graph_namespace.create_name(candidate, None)
|
| 844 |
+
n = Node(self, name, op, target, args, kwargs, type_expr)
|
| 845 |
+
|
| 846 |
+
self._graph_namespace.associate_name_with_obj(name, n)
|
| 847 |
+
|
| 848 |
+
self._insert(n)
|
| 849 |
+
self._len += 1
|
| 850 |
+
return n
|
| 851 |
+
|
| 852 |
+
@compatibility(is_backward_compatible=False)
|
| 853 |
+
def process_inputs(self, *args):
|
| 854 |
+
"""
|
| 855 |
+
Processes args so that they can be passed to the FX graph.
|
| 856 |
+
"""
|
| 857 |
+
return self._codegen.process_inputs(*args)
|
| 858 |
+
|
| 859 |
+
@compatibility(is_backward_compatible=False)
|
| 860 |
+
def process_outputs(self, out):
|
| 861 |
+
return self._codegen.process_outputs(out)
|
| 862 |
+
|
| 863 |
+
|
| 864 |
+
@compatibility(is_backward_compatible=True)
|
| 865 |
+
def erase_node(self, to_erase : Node) -> None:
|
| 866 |
+
"""
|
| 867 |
+
Erases a ``Node`` from the ``Graph``. Throws an exception if
|
| 868 |
+
there are still users of that node in the ``Graph``.
|
| 869 |
+
|
| 870 |
+
Args:
|
| 871 |
+
|
| 872 |
+
to_erase (Node): The ``Node`` to erase from the ``Graph``.
|
| 873 |
+
"""
|
| 874 |
+
if len(to_erase.users) > 0:
|
| 875 |
+
raise RuntimeError(f'Tried to erase Node {to_erase} but it still had {len(to_erase.users)} '
|
| 876 |
+
f'users in the graph: {to_erase.users}!')
|
| 877 |
+
if to_erase._erased:
|
| 878 |
+
warnings.warn(f"erase_node({to_erase}) on an already erased node")
|
| 879 |
+
return
|
| 880 |
+
|
| 881 |
+
to_erase._remove_from_list()
|
| 882 |
+
to_erase._erased = True # iterators may retain handles to erased nodes
|
| 883 |
+
self._len -= 1
|
| 884 |
+
|
| 885 |
+
# Null out this Node's argument nodes so that the Nodes referred to
|
| 886 |
+
# can update their ``users`` accordingly
|
| 887 |
+
new_args = map_arg(to_erase.args, lambda n: None)
|
| 888 |
+
assert isinstance(new_args, tuple)
|
| 889 |
+
to_erase.args = new_args
|
| 890 |
+
new_kwargs = map_arg(to_erase.kwargs, lambda n: None)
|
| 891 |
+
assert isinstance(new_kwargs, dict)
|
| 892 |
+
to_erase.kwargs = new_kwargs
|
| 893 |
+
|
| 894 |
+
@compatibility(is_backward_compatible=True)
|
| 895 |
+
def inserting_before(self, n: Optional[Node] = None):
|
| 896 |
+
"""Set the point at which create_node and companion methods will insert into the graph.
|
| 897 |
+
When used within a 'with' statement, this will temporary set the insert point and
|
| 898 |
+
then restore it when the with statement exits::
|
| 899 |
+
|
| 900 |
+
with g.inserting_before(n):
|
| 901 |
+
... # inserting before node n
|
| 902 |
+
... # insert point restored to what it was previously
|
| 903 |
+
g.inserting_before(n) # set the insert point permanently
|
| 904 |
+
|
| 905 |
+
Args:
|
| 906 |
+
|
| 907 |
+
n (Optional[Node]): The node before which to insert. If None this will insert before
|
| 908 |
+
the beginning of the entire graph.
|
| 909 |
+
|
| 910 |
+
Returns:
|
| 911 |
+
A resource manager that will restore the insert point on ``__exit__``.
|
| 912 |
+
"""
|
| 913 |
+
if n is None:
|
| 914 |
+
return self.inserting_after(self._root)
|
| 915 |
+
assert n.graph == self, "Node to insert before is not in graph."
|
| 916 |
+
return _InsertPoint(self, n.prepend)
|
| 917 |
+
|
| 918 |
+
@compatibility(is_backward_compatible=True)
|
| 919 |
+
def inserting_after(self, n: Optional[Node] = None):
|
| 920 |
+
"""Set the point at which create_node and companion methods will insert into the graph.
|
| 921 |
+
When used within a 'with' statement, this will temporary set the insert point and
|
| 922 |
+
then restore it when the with statement exits::
|
| 923 |
+
|
| 924 |
+
with g.inserting_after(n):
|
| 925 |
+
... # inserting after node n
|
| 926 |
+
... # insert point restored to what it was previously
|
| 927 |
+
g.inserting_after(n) # set the insert point permanently
|
| 928 |
+
|
| 929 |
+
Args:
|
| 930 |
+
|
| 931 |
+
n (Optional[Node]): The node before which to insert. If None this will insert after
|
| 932 |
+
the beginning of the entire graph.
|
| 933 |
+
|
| 934 |
+
Returns:
|
| 935 |
+
A resource manager that will restore the insert point on ``__exit__``.
|
| 936 |
+
"""
|
| 937 |
+
if n is None:
|
| 938 |
+
return self.inserting_before(self._root)
|
| 939 |
+
assert n.graph == self, "Node to insert after is not in graph."
|
| 940 |
+
return _InsertPoint(self, n.append)
|
| 941 |
+
|
| 942 |
+
@compatibility(is_backward_compatible=True)
|
| 943 |
+
def placeholder(self, name: str, type_expr: Optional[Any] = None,
|
| 944 |
+
default_value : Any = inspect.Signature.empty) -> Node:
|
| 945 |
+
"""
|
| 946 |
+
Insert a ``placeholder`` node into the Graph. A ``placeholder`` represents
|
| 947 |
+
a function input.
|
| 948 |
+
|
| 949 |
+
Args:
|
| 950 |
+
|
| 951 |
+
name (str): A name for the input value. This corresponds to the name
|
| 952 |
+
of the positional argument to the function this ``Graph`` represents.
|
| 953 |
+
|
| 954 |
+
type_expr (Optional[Any]): an optional type annotation representing the
|
| 955 |
+
Python type the output of this node will have. This is needed in some
|
| 956 |
+
cases for proper code generation (e.g. when the function is used
|
| 957 |
+
subsequently in TorchScript compilation).
|
| 958 |
+
|
| 959 |
+
default_value (Any): The default value this function argument should take
|
| 960 |
+
on. NOTE: to allow for `None` as a default value, `inspect.Signature.empty`
|
| 961 |
+
should be passed as this argument to specify that the parameter does _not_
|
| 962 |
+
have a default value.
|
| 963 |
+
|
| 964 |
+
.. note::
|
| 965 |
+
The same insertion point and type expression rules apply for this method
|
| 966 |
+
as ``Graph.create_node``.
|
| 967 |
+
"""
|
| 968 |
+
args = () if default_value is inspect.Signature.empty else (default_value,)
|
| 969 |
+
return self.create_node('placeholder', name, args=args, type_expr=type_expr)
|
| 970 |
+
|
| 971 |
+
@compatibility(is_backward_compatible=True)
|
| 972 |
+
def get_attr(self, qualified_name: str, type_expr: Optional[Any] = None) -> Node:
|
| 973 |
+
"""
|
| 974 |
+
Insert a ``get_attr`` node into the Graph. A ``get_attr`` ``Node`` represents the
|
| 975 |
+
fetch of an attribute from the ``Module`` hierarchy.
|
| 976 |
+
|
| 977 |
+
Args:
|
| 978 |
+
|
| 979 |
+
qualified_name (str): the fully-qualified name of the attribute to be retrieved.
|
| 980 |
+
For example, if the traced Module has a submodule named ``foo``, which has a
|
| 981 |
+
submodule named ``bar``, which has an attribute named ``baz``, the qualified
|
| 982 |
+
name ``foo.bar.baz`` should be passed as ``qualified_name``.
|
| 983 |
+
|
| 984 |
+
type_expr (Optional[Any]): an optional type annotation representing the
|
| 985 |
+
Python type the output of this node will have.
|
| 986 |
+
|
| 987 |
+
|
| 988 |
+
Returns:
|
| 989 |
+
|
| 990 |
+
The newly-created and inserted ``get_attr`` node.
|
| 991 |
+
|
| 992 |
+
.. note::
|
| 993 |
+
The same insertion point and type expression rules apply for this method
|
| 994 |
+
as ``Graph.create_node``.
|
| 995 |
+
"""
|
| 996 |
+
def _get_attr_reference_exists(mod: torch.nn.Module, qualified_name: str) -> bool:
|
| 997 |
+
module_path, _, name = qualified_name.rpartition(".")
|
| 998 |
+
|
| 999 |
+
try:
|
| 1000 |
+
submod: torch.nn.Module = mod.get_submodule(module_path)
|
| 1001 |
+
except AttributeError:
|
| 1002 |
+
warnings.warn(f"Failed to fetch module {module_path}!")
|
| 1003 |
+
return False
|
| 1004 |
+
|
| 1005 |
+
if not hasattr(submod, name):
|
| 1006 |
+
return False
|
| 1007 |
+
|
| 1008 |
+
res = getattr(submod, name)
|
| 1009 |
+
|
| 1010 |
+
if (not isinstance(res, torch.nn.Module)
|
| 1011 |
+
and not isinstance(res, torch.nn.Parameter)
|
| 1012 |
+
and name not in submod._buffers):
|
| 1013 |
+
return False
|
| 1014 |
+
|
| 1015 |
+
return True
|
| 1016 |
+
|
| 1017 |
+
if (self.owning_module and
|
| 1018 |
+
not _get_attr_reference_exists(self.owning_module, qualified_name)):
|
| 1019 |
+
warnings.warn("Attempted to insert a get_attr Node with no "
|
| 1020 |
+
"underlying reference in the owning "
|
| 1021 |
+
"GraphModule! Call "
|
| 1022 |
+
"GraphModule.add_submodule to add the "
|
| 1023 |
+
"necessary submodule, "
|
| 1024 |
+
"GraphModule.add_parameter to add the "
|
| 1025 |
+
"necessary Parameter, or "
|
| 1026 |
+
"nn.Module.register_buffer to add the "
|
| 1027 |
+
"necessary buffer", stacklevel=2)
|
| 1028 |
+
return self.create_node('get_attr', qualified_name, type_expr=type_expr)
|
| 1029 |
+
|
| 1030 |
+
@compatibility(is_backward_compatible=True)
|
| 1031 |
+
def call_module(self,
|
| 1032 |
+
module_name: str,
|
| 1033 |
+
args: Optional[Tuple['Argument', ...]] = None,
|
| 1034 |
+
kwargs: Optional[Dict[str, 'Argument']] = None,
|
| 1035 |
+
type_expr: Optional[Any] = None) -> Node:
|
| 1036 |
+
"""
|
| 1037 |
+
Insert a ``call_module`` ``Node`` into the ``Graph``. A ``call_module`` node
|
| 1038 |
+
represents a call to the forward() function of a ``Module`` in the ``Module``
|
| 1039 |
+
hierarchy.
|
| 1040 |
+
|
| 1041 |
+
Args:
|
| 1042 |
+
|
| 1043 |
+
module_name (str): The qualified name of the ``Module`` in the ``Module``
|
| 1044 |
+
hierarchy to be called. For example, if the traced ``Module`` has a
|
| 1045 |
+
submodule named ``foo``, which has a submodule named ``bar``, the
|
| 1046 |
+
qualified name ``foo.bar`` should be passed as ``module_name`` to
|
| 1047 |
+
call that module.
|
| 1048 |
+
|
| 1049 |
+
args (Optional[Tuple[Argument, ...]]): The positional arguments to be passed
|
| 1050 |
+
to the called method. Note that this should *not* include a ``self`` argument.
|
| 1051 |
+
|
| 1052 |
+
kwargs (Optional[Dict[str, Argument]]): The keyword arguments to be passed
|
| 1053 |
+
to the called method
|
| 1054 |
+
|
| 1055 |
+
type_expr (Optional[Any]): an optional type annotation representing the
|
| 1056 |
+
Python type the output of this node will have.
|
| 1057 |
+
|
| 1058 |
+
Returns:
|
| 1059 |
+
|
| 1060 |
+
The newly-created and inserted ``call_module`` node.
|
| 1061 |
+
|
| 1062 |
+
.. note::
|
| 1063 |
+
The same insertion point and type expression rules apply for this method
|
| 1064 |
+
as :meth:`Graph.create_node`.
|
| 1065 |
+
"""
|
| 1066 |
+
if (self.owning_module and
|
| 1067 |
+
self.owning_module.get_submodule(module_name) is None):
|
| 1068 |
+
warnings.warn("Attempted to insert a call_module Node with "
|
| 1069 |
+
"no underlying reference in the owning "
|
| 1070 |
+
"GraphModule! Call "
|
| 1071 |
+
"GraphModule.add_submodule to add the "
|
| 1072 |
+
"necessary submodule")
|
| 1073 |
+
return self.create_node('call_module', module_name, args, kwargs, type_expr=type_expr)
|
| 1074 |
+
|
| 1075 |
+
@compatibility(is_backward_compatible=True)
|
| 1076 |
+
def call_method(self,
|
| 1077 |
+
method_name: str,
|
| 1078 |
+
args: Optional[Tuple['Argument', ...]] = None,
|
| 1079 |
+
kwargs: Optional[Dict[str, 'Argument']] = None,
|
| 1080 |
+
type_expr: Optional[Any] = None) -> Node:
|
| 1081 |
+
"""
|
| 1082 |
+
Insert a ``call_method`` ``Node`` into the ``Graph``. A ``call_method`` node
|
| 1083 |
+
represents a call to a given method on the 0th element of ``args``.
|
| 1084 |
+
|
| 1085 |
+
Args:
|
| 1086 |
+
|
| 1087 |
+
method_name (str): The name of the method to apply to the self argument.
|
| 1088 |
+
For example, if args[0] is a ``Node`` representing a ``Tensor``,
|
| 1089 |
+
then to call ``relu()`` on that ``Tensor``, pass ``relu`` to ``method_name``.
|
| 1090 |
+
|
| 1091 |
+
args (Optional[Tuple[Argument, ...]]): The positional arguments to be passed
|
| 1092 |
+
to the called method. Note that this *should* include a ``self`` argument.
|
| 1093 |
+
|
| 1094 |
+
kwargs (Optional[Dict[str, Argument]]): The keyword arguments to be passed
|
| 1095 |
+
to the called method
|
| 1096 |
+
|
| 1097 |
+
type_expr (Optional[Any]): an optional type annotation representing the
|
| 1098 |
+
Python type the output of this node will have.
|
| 1099 |
+
|
| 1100 |
+
Returns:
|
| 1101 |
+
|
| 1102 |
+
The newly created and inserted ``call_method`` node.
|
| 1103 |
+
|
| 1104 |
+
.. note::
|
| 1105 |
+
The same insertion point and type expression rules apply for this method
|
| 1106 |
+
as :meth:`Graph.create_node`.
|
| 1107 |
+
"""
|
| 1108 |
+
return self.create_node('call_method', method_name, args, kwargs, type_expr=type_expr)
|
| 1109 |
+
|
| 1110 |
+
@compatibility(is_backward_compatible=True)
|
| 1111 |
+
def call_function(self,
|
| 1112 |
+
the_function: Callable[..., Any],
|
| 1113 |
+
args: Optional[Tuple['Argument', ...]] = None,
|
| 1114 |
+
kwargs: Optional[Dict[str, 'Argument']] = None,
|
| 1115 |
+
type_expr: Optional[Any] = None) -> Node:
|
| 1116 |
+
"""
|
| 1117 |
+
Insert a ``call_function`` ``Node`` into the ``Graph``. A ``call_function`` node
|
| 1118 |
+
represents a call to a Python callable, specified by ``the_function``.
|
| 1119 |
+
|
| 1120 |
+
Args:
|
| 1121 |
+
|
| 1122 |
+
the_function (Callable[..., Any]): The function to be called. Can be any PyTorch
|
| 1123 |
+
operator, Python function, or member of the ``builtins`` or ``operator``
|
| 1124 |
+
namespaces.
|
| 1125 |
+
|
| 1126 |
+
args (Optional[Tuple[Argument, ...]]): The positional arguments to be passed
|
| 1127 |
+
to the called function.
|
| 1128 |
+
|
| 1129 |
+
kwargs (Optional[Dict[str, Argument]]): The keyword arguments to be passed
|
| 1130 |
+
to the called function
|
| 1131 |
+
|
| 1132 |
+
type_expr (Optional[Any]): an optional type annotation representing the
|
| 1133 |
+
Python type the output of this node will have.
|
| 1134 |
+
|
| 1135 |
+
Returns:
|
| 1136 |
+
|
| 1137 |
+
The newly created and inserted ``call_function`` node.
|
| 1138 |
+
|
| 1139 |
+
.. note::
|
| 1140 |
+
The same insertion point and type expression rules apply for this method
|
| 1141 |
+
as :meth:`Graph.create_node`.
|
| 1142 |
+
"""
|
| 1143 |
+
return self.create_node('call_function', the_function, args, kwargs, type_expr=type_expr)
|
| 1144 |
+
|
| 1145 |
+
@compatibility(is_backward_compatible=True)
|
| 1146 |
+
def node_copy(self, node: Node, arg_transform: Callable[[Node], 'Argument'] = lambda x: x) -> Node:
|
| 1147 |
+
"""
|
| 1148 |
+
Copy a node from one graph into another. ``arg_transform`` needs to transform arguments from
|
| 1149 |
+
the graph of node to the graph of self. Example::
|
| 1150 |
+
|
| 1151 |
+
# Copying all the nodes in `g` into `new_graph`
|
| 1152 |
+
g : torch.fx.Graph = ...
|
| 1153 |
+
new_graph = torch.fx.graph()
|
| 1154 |
+
value_remap = {}
|
| 1155 |
+
for node in g.nodes:
|
| 1156 |
+
value_remap[node] = new_graph.node_copy(node, lambda n : value_remap[n])
|
| 1157 |
+
|
| 1158 |
+
Args:
|
| 1159 |
+
|
| 1160 |
+
node (Node): The node to copy into ``self``.
|
| 1161 |
+
|
| 1162 |
+
arg_transform (Callable[[Node], Argument]): A function that transforms
|
| 1163 |
+
``Node`` arguments in node's ``args`` and ``kwargs`` into the
|
| 1164 |
+
equivalent argument in ``self``. In the simplest case, this should
|
| 1165 |
+
retrieve a value out of a table mapping Nodes in the original
|
| 1166 |
+
graph to ``self``.
|
| 1167 |
+
"""
|
| 1168 |
+
args = map_arg(node.args, arg_transform)
|
| 1169 |
+
kwargs = map_arg(node.kwargs, arg_transform)
|
| 1170 |
+
assert isinstance(args, tuple)
|
| 1171 |
+
assert isinstance(kwargs, dict)
|
| 1172 |
+
result_node = self.create_node(node.op, node.target, args, kwargs, node.name, node.type)
|
| 1173 |
+
result_node.meta = copy.copy(node.meta)
|
| 1174 |
+
return result_node
|
| 1175 |
+
|
| 1176 |
+
@compatibility(is_backward_compatible=True)
|
| 1177 |
+
def output(self, result: 'Argument', type_expr: Optional[Any] = None):
|
| 1178 |
+
"""
|
| 1179 |
+
Insert an ``output`` ``Node`` into the ``Graph``. An ``output`` node represents
|
| 1180 |
+
a ``return`` statement in Python code. ``result`` is the value that should
|
| 1181 |
+
be returned.
|
| 1182 |
+
|
| 1183 |
+
Args:
|
| 1184 |
+
|
| 1185 |
+
result (Argument): The value to be returned.
|
| 1186 |
+
|
| 1187 |
+
type_expr (Optional[Any]): an optional type annotation representing the
|
| 1188 |
+
Python type the output of this node will have.
|
| 1189 |
+
|
| 1190 |
+
.. note::
|
| 1191 |
+
|
| 1192 |
+
The same insertion point and type expression rules apply for this method
|
| 1193 |
+
as ``Graph.create_node``.
|
| 1194 |
+
"""
|
| 1195 |
+
return self.create_node(op='output', target='output', args=(result,), type_expr=type_expr)
|
| 1196 |
+
|
| 1197 |
+
def _target_to_str(self, target : Target) -> str:
|
| 1198 |
+
if callable(target):
|
| 1199 |
+
op = target.__name__
|
| 1200 |
+
else:
|
| 1201 |
+
assert isinstance(target, str)
|
| 1202 |
+
op = target
|
| 1203 |
+
if _is_magic(op):
|
| 1204 |
+
op = op[2:-2]
|
| 1205 |
+
op = _snake_case(op)
|
| 1206 |
+
return op
|
| 1207 |
+
|
| 1208 |
+
@compatibility(is_backward_compatible=True)
|
| 1209 |
+
def python_code(self, root_module: str, *, verbose: bool = False) -> PythonCode:
|
| 1210 |
+
"""
|
| 1211 |
+
Turn this ``Graph`` into valid Python code.
|
| 1212 |
+
|
| 1213 |
+
Args:
|
| 1214 |
+
|
| 1215 |
+
root_module (str): The name of the root module on which to look-up
|
| 1216 |
+
qualified name targets. This is usually 'self'.
|
| 1217 |
+
|
| 1218 |
+
Returns:
|
| 1219 |
+
|
| 1220 |
+
A PythonCode object, consisting of two fields:
|
| 1221 |
+
src: the Python source code representing the object
|
| 1222 |
+
globals: a dictionary of global names in `src` -> the objects that they reference.
|
| 1223 |
+
"""
|
| 1224 |
+
# NOTE: [Graph Namespaces]
|
| 1225 |
+
#
|
| 1226 |
+
# There are two types of symbols in generated Python source code:
|
| 1227 |
+
# locals and globals.
|
| 1228 |
+
# Locals are locally defined by the output of a node in the Graph.
|
| 1229 |
+
# Globals are references to external objects, like functions or types.
|
| 1230 |
+
#
|
| 1231 |
+
# When generating Python code, we need to make sure to name things
|
| 1232 |
+
# appropriately. In particular:
|
| 1233 |
+
# - All names should be unique, to avoid weird shadowing bugs.
|
| 1234 |
+
# - These names need to be consistent, e.g. a object should always be
|
| 1235 |
+
# referenced by the same name.
|
| 1236 |
+
#
|
| 1237 |
+
# To do this, we create a new namespace just for this source. All names
|
| 1238 |
+
# that get printed must come from this namespace.
|
| 1239 |
+
#
|
| 1240 |
+
# Why can't we re-use node.name? Because it was generated within the
|
| 1241 |
+
# namespace `self._graph_namespace`. In order to provide uniqueness
|
| 1242 |
+
# over both locals (node.name) *and* globals, we create a completely
|
| 1243 |
+
# new namespace to put all identifiers in.
|
| 1244 |
+
namespace = _Namespace()
|
| 1245 |
+
|
| 1246 |
+
# Override Node's repr to generate a valid name within our namespace.
|
| 1247 |
+
# Since repr() is designed to produce a valid Python expression, it
|
| 1248 |
+
# makes sense to re-use it. This way, it's easy to print something like
|
| 1249 |
+
# Tuple[Node, Node] by simply calling repr() on it. Node's __repr__ is
|
| 1250 |
+
# implemented cooperatively to allow this.
|
| 1251 |
+
def node_repr(n: Node):
|
| 1252 |
+
return namespace.create_name(n.name, n)
|
| 1253 |
+
|
| 1254 |
+
@contextmanager
|
| 1255 |
+
def override_node_repr(graph: Graph):
|
| 1256 |
+
orig_repr_fns = {}
|
| 1257 |
+
for node in graph.nodes:
|
| 1258 |
+
orig_repr_fns[node] = node._repr_fn
|
| 1259 |
+
node._repr_fn = node_repr
|
| 1260 |
+
try:
|
| 1261 |
+
yield None
|
| 1262 |
+
finally:
|
| 1263 |
+
# restore the original repr functions
|
| 1264 |
+
for node in graph.nodes:
|
| 1265 |
+
node._repr_fn = orig_repr_fns[node]
|
| 1266 |
+
|
| 1267 |
+
with override_node_repr(self):
|
| 1268 |
+
return self._python_code(root_module, namespace, verbose=verbose)
|
| 1269 |
+
|
| 1270 |
+
def _python_code(self, root_module: str, namespace: _Namespace, *, verbose: bool = False) -> PythonCode:
|
| 1271 |
+
return self._codegen._gen_python_code(self.nodes, root_module, namespace, verbose=verbose)
|
| 1272 |
+
|
| 1273 |
+
|
| 1274 |
+
def __str__(self) -> str:
|
| 1275 |
+
"""
|
| 1276 |
+
Return a human-readable (not machine-readable) string representation
|
| 1277 |
+
of this Graph
|
| 1278 |
+
"""
|
| 1279 |
+
placeholder_names : List[str] = []
|
| 1280 |
+
# This is a one-element array just so ``format_node`` can modify the closed
|
| 1281 |
+
# over value
|
| 1282 |
+
maybe_return_typename : List[str] = ['']
|
| 1283 |
+
|
| 1284 |
+
node_strs = [node.format_node(placeholder_names) for node in self.nodes]
|
| 1285 |
+
param_str = ', '.join(placeholder_names)
|
| 1286 |
+
s = f'graph({param_str}){maybe_return_typename[0]}:'
|
| 1287 |
+
for node_str in node_strs:
|
| 1288 |
+
if node_str:
|
| 1289 |
+
s += '\n ' + node_str
|
| 1290 |
+
return s
|
| 1291 |
+
|
| 1292 |
+
@compatibility(is_backward_compatible=True)
|
| 1293 |
+
def print_tabular(self):
|
| 1294 |
+
"""
|
| 1295 |
+
Prints the intermediate representation of the graph in tabular
|
| 1296 |
+
format. Note that this API requires the ``tabulate`` module to be
|
| 1297 |
+
installed.
|
| 1298 |
+
"""
|
| 1299 |
+
try:
|
| 1300 |
+
from tabulate import tabulate
|
| 1301 |
+
except ImportError:
|
| 1302 |
+
print("`print_tabular` relies on the library `tabulate`, "
|
| 1303 |
+
"which could not be found on this machine. Run `pip "
|
| 1304 |
+
"install tabulate` to install the library.")
|
| 1305 |
+
raise
|
| 1306 |
+
|
| 1307 |
+
node_specs = [[n.op, n.name, n.target, n.args, n.kwargs]
|
| 1308 |
+
for n in self.nodes]
|
| 1309 |
+
print(tabulate(node_specs,
|
| 1310 |
+
headers=['opcode', 'name', 'target', 'args', 'kwargs']))
|
| 1311 |
+
|
| 1312 |
+
@compatibility(is_backward_compatible=True)
|
| 1313 |
+
def lint(self):
|
| 1314 |
+
"""
|
| 1315 |
+
Runs various checks on this Graph to make sure it is well-formed. In
|
| 1316 |
+
particular:
|
| 1317 |
+
- Checks Nodes have correct ownership (owned by this graph)
|
| 1318 |
+
- Checks Nodes appear in topological order
|
| 1319 |
+
- If this Graph has an owning GraphModule, checks that targets
|
| 1320 |
+
exist in that GraphModule
|
| 1321 |
+
"""
|
| 1322 |
+
|
| 1323 |
+
# Check topo order
|
| 1324 |
+
def check_arg(arg : Node, n : Optional[Node] = None) -> None:
|
| 1325 |
+
context_str = f' of Node \'{n}\' ' if n else ' '
|
| 1326 |
+
if arg.graph is not self:
|
| 1327 |
+
raise RuntimeError(f'Argument \'{arg}\'{context_str}does not belong to this Graph, '
|
| 1328 |
+
f'but was used as an argument! If you are copying nodes from another graph, make '
|
| 1329 |
+
f'sure to use ``arg_transform`` on node_copy() to remap values\n{self}')
|
| 1330 |
+
if arg not in seen_values:
|
| 1331 |
+
raise RuntimeError(f'Argument \'{arg}\'{context_str}was used before it has been '
|
| 1332 |
+
f'defined! Please check that Nodes in the graph are topologically ordered\n{self}')
|
| 1333 |
+
|
| 1334 |
+
seen_names : Set[str] = set()
|
| 1335 |
+
seen_values : Set[Node] = set()
|
| 1336 |
+
for node in self.nodes:
|
| 1337 |
+
if node.op not in ['placeholder', 'call_method', 'call_module', 'call_function', 'get_attr', 'output']:
|
| 1338 |
+
raise RuntimeError(f'Node {node} had unknown opcode {node.op}!')
|
| 1339 |
+
if node.graph is not self:
|
| 1340 |
+
raise RuntimeError(f'Node \'{node}\' does not belong to this Graph!')
|
| 1341 |
+
map_arg(node.args, lambda arg: check_arg(arg, node))
|
| 1342 |
+
map_arg(node.kwargs, lambda arg: check_arg(arg, node))
|
| 1343 |
+
seen_values.add(node)
|
| 1344 |
+
|
| 1345 |
+
if node.name in seen_names:
|
| 1346 |
+
raise RuntimeError(f'Node redefined name {node.name}!')
|
| 1347 |
+
seen_names.add(node.name)
|
| 1348 |
+
|
| 1349 |
+
# Check targets are legit
|
| 1350 |
+
if self.owning_module:
|
| 1351 |
+
for node in self.nodes:
|
| 1352 |
+
if node.op == 'call_function':
|
| 1353 |
+
if not callable(node.target):
|
| 1354 |
+
raise ValueError(f'Node {node} target {node.target} has type {torch.typename(node.target)} but '
|
| 1355 |
+
'a Callable is expected')
|
| 1356 |
+
else:
|
| 1357 |
+
if not isinstance(node.target, str):
|
| 1358 |
+
raise ValueError(f'Node {node} target {node.target} has type {torch.typename(node.target)} but '
|
| 1359 |
+
'a str is expected')
|
| 1360 |
+
if node.op in ['get_attr', 'call_module']:
|
| 1361 |
+
target_atoms = node.target.split('.')
|
| 1362 |
+
m_itr = self.owning_module
|
| 1363 |
+
for i, atom in enumerate(target_atoms):
|
| 1364 |
+
new_m_itr = getattr(m_itr, atom, None)
|
| 1365 |
+
seen_qualname = '.'.join(target_atoms[:i])
|
| 1366 |
+
if new_m_itr is None:
|
| 1367 |
+
raise RuntimeError(f'Node {node} target {node.target} references nonexistent attribute '
|
| 1368 |
+
f'{atom} of {seen_qualname}')
|
| 1369 |
+
if (node.op == "call_module"
|
| 1370 |
+
and not isinstance(new_m_itr, torch.nn.Module)):
|
| 1371 |
+
raise RuntimeError(f'Node {node} target {node.target} {atom} of {seen_qualname} does '
|
| 1372 |
+
'not reference an nn.Module')
|
| 1373 |
+
elif (node.op == "get_attr"
|
| 1374 |
+
and not isinstance(new_m_itr, torch.nn.Module)
|
| 1375 |
+
and not isinstance(new_m_itr, torch.nn.Parameter)
|
| 1376 |
+
and atom not in m_itr._buffers):
|
| 1377 |
+
warnings.warn(f'Node {node} target {node.target} {atom} of {seen_qualname} does '
|
| 1378 |
+
'not reference an nn.Module, nn.Parameter, or buffer, which is '
|
| 1379 |
+
'what \'get_attr\' Nodes typically target')
|
| 1380 |
+
else:
|
| 1381 |
+
m_itr = new_m_itr
|
| 1382 |
+
|
| 1383 |
+
@compatibility(is_backward_compatible=True)
|
| 1384 |
+
def eliminate_dead_code(self):
|
| 1385 |
+
"""
|
| 1386 |
+
Remove all dead code from the graph, based on each node's number of
|
| 1387 |
+
users, and whether the nodes have any side effects. The graph must be
|
| 1388 |
+
topologically sorted before calling.
|
| 1389 |
+
|
| 1390 |
+
Returns:
|
| 1391 |
+
bool: Whether the graph was changed as a result of the pass.
|
| 1392 |
+
|
| 1393 |
+
Example:
|
| 1394 |
+
|
| 1395 |
+
Before dead code is eliminated, `a` from `a = x + 1` below has no users
|
| 1396 |
+
and thus can be eliminated from the graph without having an effect.
|
| 1397 |
+
|
| 1398 |
+
.. code-block:: python
|
| 1399 |
+
|
| 1400 |
+
def forward(self, x):
|
| 1401 |
+
a = x + 1
|
| 1402 |
+
return x + self.attr_1
|
| 1403 |
+
|
| 1404 |
+
After dead code is eliminated, `a = x + 1` has been removed, and the rest
|
| 1405 |
+
of `forward` remains.
|
| 1406 |
+
|
| 1407 |
+
.. code-block:: python
|
| 1408 |
+
|
| 1409 |
+
def forward(self, x):
|
| 1410 |
+
return x + self.attr_1
|
| 1411 |
+
|
| 1412 |
+
.. warning::
|
| 1413 |
+
|
| 1414 |
+
Dead code elimination has some heuristics to avoid removing
|
| 1415 |
+
side-effectful nodes (see Node.is_impure) but in general coverage
|
| 1416 |
+
is very bad, so you should assume that this method is not sound
|
| 1417 |
+
to call unless you know that your FX graph consists entirely
|
| 1418 |
+
of functional operations.
|
| 1419 |
+
"""
|
| 1420 |
+
# Lint the graph first to make sure its topologically sorted, otherwise
|
| 1421 |
+
# DCE below will not behave as expected.
|
| 1422 |
+
self.lint()
|
| 1423 |
+
|
| 1424 |
+
# Reverse iterate so that when we remove a node, any nodes used as an
|
| 1425 |
+
# input to that node have an updated user count that no longer reflects
|
| 1426 |
+
# the removed node.
|
| 1427 |
+
changed = False
|
| 1428 |
+
for node in reversed(self.nodes):
|
| 1429 |
+
if not node.is_impure() and len(node.users) == 0:
|
| 1430 |
+
self.erase_node(node)
|
| 1431 |
+
changed = True
|
| 1432 |
+
|
| 1433 |
+
return changed
|
| 1434 |
+
|
| 1435 |
+
@compatibility(is_backward_compatible=False)
|
| 1436 |
+
def set_codegen(self, codegen: CodeGen):
|
| 1437 |
+
self._codegen = codegen
|
| 1438 |
+
|
| 1439 |
+
@compatibility(is_backward_compatible=False)
|
| 1440 |
+
def on_generate_code(
|
| 1441 |
+
self,
|
| 1442 |
+
make_transformer: Callable[[Optional[TransformCodeFunc]], TransformCodeFunc]
|
| 1443 |
+
):
|
| 1444 |
+
"""Register a transformer function when python code is generated
|
| 1445 |
+
|
| 1446 |
+
Args:
|
| 1447 |
+
make_transformer (Callable[[Optional[TransformCodeFunc]], TransformCodeFunc]):
|
| 1448 |
+
a function that returns a code transformer to be registered.
|
| 1449 |
+
This function is called by `on_generate_code` to obtain the
|
| 1450 |
+
code transformer.
|
| 1451 |
+
|
| 1452 |
+
This function is also given as its input the currently
|
| 1453 |
+
registered code transformer (or None if nothing is registered),
|
| 1454 |
+
in case it is not desirable to overwrite it. This is useful to
|
| 1455 |
+
chain code transformers together.
|
| 1456 |
+
|
| 1457 |
+
Returns:
|
| 1458 |
+
a context manager that when used in a `with` statement, to automatically
|
| 1459 |
+
restore the previously registered code transformer.
|
| 1460 |
+
|
| 1461 |
+
Example:
|
| 1462 |
+
|
| 1463 |
+
.. code-block:: python
|
| 1464 |
+
|
| 1465 |
+
|
| 1466 |
+
gm: fx.GraphModule = ...
|
| 1467 |
+
|
| 1468 |
+
# This is a code transformer we want to register. This code
|
| 1469 |
+
# transformer prepends a pdb import and trace statement at the very
|
| 1470 |
+
# beginning of the generated torch.fx code to allow for manual
|
| 1471 |
+
# debugging with the PDB library.
|
| 1472 |
+
def insert_pdb(body):
|
| 1473 |
+
return ["import pdb; pdb.set_trace()\\n", *body]
|
| 1474 |
+
|
| 1475 |
+
# Registers `insert_pdb`, and overwrites the current registered
|
| 1476 |
+
# code transformer (given by `_` to the lambda):
|
| 1477 |
+
gm.graph.on_generate_code(
|
| 1478 |
+
lambda _: insert_pdb
|
| 1479 |
+
)
|
| 1480 |
+
|
| 1481 |
+
# Or alternatively, registers a code transformer which first
|
| 1482 |
+
# runs `body` through existing registered transformer, then
|
| 1483 |
+
# through `insert_pdb`:
|
| 1484 |
+
gm.graph.on_generate_code(
|
| 1485 |
+
lambda current_trans: (
|
| 1486 |
+
lambda body: insert_pdb(
|
| 1487 |
+
current_trans(body) if current_trans
|
| 1488 |
+
else body
|
| 1489 |
+
)
|
| 1490 |
+
)
|
| 1491 |
+
)
|
| 1492 |
+
|
| 1493 |
+
gm.recompile()
|
| 1494 |
+
gm(*inputs) # drops into pdb
|
| 1495 |
+
|
| 1496 |
+
|
| 1497 |
+
This function can also be used as a context manager, with the benefit to
|
| 1498 |
+
automatically restores the previously registered code transformer:
|
| 1499 |
+
|
| 1500 |
+
.. code-block:: python
|
| 1501 |
+
|
| 1502 |
+
# ... continue from previous example
|
| 1503 |
+
|
| 1504 |
+
with gm.graph.on_generate_code(lambda _: insert_pdb):
|
| 1505 |
+
# do more stuff with `gm`...
|
| 1506 |
+
gm.recompile()
|
| 1507 |
+
gm(*inputs) # drops into pdb
|
| 1508 |
+
|
| 1509 |
+
# now previous code transformer is restored (but `gm`'s code with pdb
|
| 1510 |
+
# remains - that means you can run `gm` with pdb here too, until you
|
| 1511 |
+
# run next `recompile()`).
|
| 1512 |
+
"""
|
| 1513 |
+
on_gen_code_old = self._codegen._body_transformer
|
| 1514 |
+
self._codegen._body_transformer = make_transformer(on_gen_code_old)
|
| 1515 |
+
|
| 1516 |
+
@contextlib.contextmanager
|
| 1517 |
+
def on_generate_code_context_manager():
|
| 1518 |
+
try:
|
| 1519 |
+
yield
|
| 1520 |
+
finally:
|
| 1521 |
+
self._codegen._body_transformer = on_gen_code_old
|
| 1522 |
+
|
| 1523 |
+
return on_generate_code_context_manager()
|
| 1524 |
+
|
| 1525 |
+
|
| 1526 |
+
reflectable_magic_methods = {
|
| 1527 |
+
'add': '{} + {}',
|
| 1528 |
+
'sub': '{} - {}',
|
| 1529 |
+
'mul': '{} * {}',
|
| 1530 |
+
'floordiv': '{} // {}',
|
| 1531 |
+
'truediv': '{} / {}',
|
| 1532 |
+
'div': '{} / {}',
|
| 1533 |
+
'mod': '{} % {}',
|
| 1534 |
+
'pow': '{} ** {}',
|
| 1535 |
+
'lshift': '{} << {}',
|
| 1536 |
+
'rshift': '{} >> {}',
|
| 1537 |
+
'and_': '{} & {}',
|
| 1538 |
+
'or_': '{} | {}',
|
| 1539 |
+
'xor': '{} ^ {}',
|
| 1540 |
+
'getitem': '{}[{}]',
|
| 1541 |
+
'matmul': '{} @ {}',
|
| 1542 |
+
}
|
| 1543 |
+
|
| 1544 |
+
magic_methods = dict({
|
| 1545 |
+
'eq': '{} == {}',
|
| 1546 |
+
'ne': '{} != {}',
|
| 1547 |
+
'lt': '{} < {}',
|
| 1548 |
+
'gt': '{} > {}',
|
| 1549 |
+
'le': '{} <= {}',
|
| 1550 |
+
'ge': '{} >= {}',
|
| 1551 |
+
'pos': '+{}',
|
| 1552 |
+
'neg': '-{}',
|
| 1553 |
+
'invert': '~{}'}, **reflectable_magic_methods)
|
| 1554 |
+
|
| 1555 |
+
inplace_methods = {
|
| 1556 |
+
'iadd': '{} += {}',
|
| 1557 |
+
'iand': '{} &= {}',
|
| 1558 |
+
'ifloordiv': '{} //= {}',
|
| 1559 |
+
'ilshift': '{} <<= {}',
|
| 1560 |
+
'imod': '{} %= {}',
|
| 1561 |
+
'imul': '{} *= {}',
|
| 1562 |
+
'imatmul': '{} @= {}',
|
| 1563 |
+
'ior': '{} |= {}',
|
| 1564 |
+
'ipow': '{} **= {}',
|
| 1565 |
+
'irshift': '{} >>= {}',
|
| 1566 |
+
'isub': '{} -= {}',
|
| 1567 |
+
'itruediv': '{} /= {}',
|
| 1568 |
+
'ixor': '{} ^= {}',
|
| 1569 |
+
'setitem': '{}[{}] = {}',
|
| 1570 |
+
}
|
llava_next/lib/python3.10/site-packages/torch/fx/graph_module.py
ADDED
|
@@ -0,0 +1,799 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import torch.overrides
|
| 4 |
+
from torch.nn.modules.module import _addindent
|
| 5 |
+
from torch.package import PackageImporter, PackageExporter
|
| 6 |
+
import linecache
|
| 7 |
+
from typing import Type, Dict, List, Any, Union, Optional, Set
|
| 8 |
+
from .graph import Graph, _PyTreeCodeGen, _is_from_torch, _custom_builtins, PythonCode
|
| 9 |
+
from ._compatibility import compatibility
|
| 10 |
+
from torch.package import Importer, sys_importer
|
| 11 |
+
import copy
|
| 12 |
+
import itertools
|
| 13 |
+
import sys
|
| 14 |
+
import traceback
|
| 15 |
+
from pathlib import Path
|
| 16 |
+
import os
|
| 17 |
+
import warnings
|
| 18 |
+
|
| 19 |
+
__all__ = ["reduce_graph_module", "reduce_package_graph_module", "reduce_deploy_graph_module", "GraphModule"]
|
| 20 |
+
|
| 21 |
+
_USER_PRESERVED_ATTRIBUTES_KEY = "_user_preserved_attributes"
|
| 22 |
+
|
| 23 |
+
# Normal exec loses the source code, however we can work with
|
| 24 |
+
# the linecache module to recover it.
|
| 25 |
+
# Using _exec_with_source will add it to our local cache
|
| 26 |
+
# and then tools like TorchScript will be able to get source info.
|
| 27 |
+
class _EvalCacheLoader:
|
| 28 |
+
def __init__(self):
|
| 29 |
+
self.eval_cache = {}
|
| 30 |
+
self.next_id = 0
|
| 31 |
+
|
| 32 |
+
def cache(self, src: str, globals: Dict[str, Any], co_fields=None):
|
| 33 |
+
"""Store the source in a private cache, and add a lazy entry in linecache
|
| 34 |
+
that allows the source to be retrieved by 'filename'.
|
| 35 |
+
|
| 36 |
+
Args:
|
| 37 |
+
src (str): The module source to cache
|
| 38 |
+
globals (dict): The module globals
|
| 39 |
+
|
| 40 |
+
Returns:
|
| 41 |
+
str: The cache key (and dummy filename) generated for src.
|
| 42 |
+
"""
|
| 43 |
+
|
| 44 |
+
key = self._get_key()
|
| 45 |
+
if co_fields:
|
| 46 |
+
key += f" from {co_fields['co_filename']}:{co_fields['co_firstlineno']} in {co_fields['co_name']}"
|
| 47 |
+
self.eval_cache[key] = src
|
| 48 |
+
|
| 49 |
+
# Don't mutate globals so that this loader is only used
|
| 50 |
+
# to populate linecache, and doesn't interact with other modules
|
| 51 |
+
# that might check `__loader__`
|
| 52 |
+
globals_copy = globals.copy()
|
| 53 |
+
globals_copy['__file__'] = key
|
| 54 |
+
globals_copy['__name__'] = key
|
| 55 |
+
globals_copy['__loader__'] = self
|
| 56 |
+
linecache.lazycache(key, globals_copy)
|
| 57 |
+
|
| 58 |
+
return key
|
| 59 |
+
|
| 60 |
+
# Part of the loader protocol (PEP 302)
|
| 61 |
+
# linecache will use this method when trying to find source code
|
| 62 |
+
def get_source(self, module_name) -> Optional[str]:
|
| 63 |
+
if module_name in self.eval_cache:
|
| 64 |
+
return self.eval_cache[module_name]
|
| 65 |
+
return None
|
| 66 |
+
|
| 67 |
+
def _get_key(self):
|
| 68 |
+
key = f'<eval_with_key>.{self.next_id}'
|
| 69 |
+
self.next_id += 1
|
| 70 |
+
return key
|
| 71 |
+
|
| 72 |
+
_loader = _EvalCacheLoader()
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def _exec_with_source(src: str, globals: Dict[str, Any], co_fields=None):
|
| 76 |
+
key = _loader.cache(src, globals, co_fields)
|
| 77 |
+
exec(compile(src, key, 'exec'), globals)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def _forward_from_src(src: str, globals: Dict[str, Any], co_fields=None):
|
| 81 |
+
# avoid mutating the passed in dict
|
| 82 |
+
globals_copy = globals.copy()
|
| 83 |
+
_exec_with_source(src, globals_copy, co_fields)
|
| 84 |
+
forward_fn = globals_copy['forward']
|
| 85 |
+
del globals_copy['forward']
|
| 86 |
+
return forward_fn
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def _format_import_statement(name: str, obj: Any, importer: Importer) -> str:
|
| 90 |
+
if name in _custom_builtins:
|
| 91 |
+
return _custom_builtins[name].import_str
|
| 92 |
+
if _is_from_torch(name):
|
| 93 |
+
return 'import torch'
|
| 94 |
+
module_name, attr_name = importer.get_name(obj)
|
| 95 |
+
return f'from {module_name} import {attr_name} as {name}'
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def _format_import_block(globals: Dict[str, Any], importer: Importer):
|
| 99 |
+
import_strs: Set[str] = set()
|
| 100 |
+
for name, obj in globals.items():
|
| 101 |
+
import_strs.add(_format_import_statement(name, obj, importer))
|
| 102 |
+
return '\n'.join(import_strs)
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
@compatibility(is_backward_compatible=True)
|
| 106 |
+
def reduce_graph_module(body: Dict[Any, Any], import_block: str) -> torch.nn.Module:
|
| 107 |
+
# BC: attribute name was changed from `code` to `_code` to facilitate
|
| 108 |
+
# making `code` into a property and adding a docstring to it
|
| 109 |
+
fn_src = body.get('_code') or body['code']
|
| 110 |
+
forward = _forward_from_src(import_block + fn_src, {})
|
| 111 |
+
return _deserialize_graph_module(forward, body)
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
@compatibility(is_backward_compatible=True)
|
| 115 |
+
def reduce_package_graph_module(
|
| 116 |
+
importer: PackageImporter, body: Dict[Any, Any], generated_module_name: str
|
| 117 |
+
) -> torch.nn.Module:
|
| 118 |
+
forward = importer.import_module(generated_module_name).forward
|
| 119 |
+
return _deserialize_graph_module(forward, body)
|
| 120 |
+
|
| 121 |
+
@compatibility(is_backward_compatible=True)
|
| 122 |
+
def reduce_deploy_graph_module(
|
| 123 |
+
importer: PackageImporter, body: Dict[Any, Any], import_block: str
|
| 124 |
+
) -> torch.nn.Module:
|
| 125 |
+
ns = {}
|
| 126 |
+
ns["__builtins__"] = importer.patched_builtins
|
| 127 |
+
fn_src = body.get('_code')
|
| 128 |
+
assert fn_src is not None
|
| 129 |
+
forward = _forward_from_src(import_block + fn_src, ns)
|
| 130 |
+
return _deserialize_graph_module(forward, body)
|
| 131 |
+
|
| 132 |
+
# We create a dummy class here because symbolic_trace pulls the forward()
|
| 133 |
+
# function off of the class, rather than the instance. This class is used
|
| 134 |
+
# in _deserialize_graph_module() below.
|
| 135 |
+
class _CodeOnlyModule(torch.nn.Module):
|
| 136 |
+
def __init__(self, body):
|
| 137 |
+
super().__init__()
|
| 138 |
+
self.__dict__ = body
|
| 139 |
+
|
| 140 |
+
def _deserialize_graph_module(forward, body: Dict[Any, Any]) -> torch.nn.Module:
|
| 141 |
+
"""
|
| 142 |
+
Deserialize a GraphModule given the dictionary of the original module,
|
| 143 |
+
using the code to reconstruct the graph. We delete the actual graph before
|
| 144 |
+
saving the dictionary so that changes to the in-memory graph format do not
|
| 145 |
+
get serialized.
|
| 146 |
+
"""
|
| 147 |
+
|
| 148 |
+
# Try to retrieve the forward source in a backward-compatible way
|
| 149 |
+
_CodeOnlyModule.forward = forward
|
| 150 |
+
|
| 151 |
+
tracer_cls = body.get('_tracer_cls')
|
| 152 |
+
if tracer_cls is None:
|
| 153 |
+
from ._symbolic_trace import Tracer
|
| 154 |
+
tracer_cls = Tracer
|
| 155 |
+
|
| 156 |
+
graphmodule_cls_name = body.get('_graphmodule_cls_name', 'GraphModule')
|
| 157 |
+
|
| 158 |
+
# This is a workaround for a mypy linter issue related to
|
| 159 |
+
# passing base class as an argument - https://github.com/python/mypy/issues/5865.
|
| 160 |
+
cls_tracer : Any = tracer_cls
|
| 161 |
+
|
| 162 |
+
class KeepModules(cls_tracer):
|
| 163 |
+
# we shouldn't trace into any of the submodules,
|
| 164 |
+
# because they were not traced in the original GraphModule
|
| 165 |
+
def is_leaf_module(self, _: torch.nn.Module, __: str) -> bool:
|
| 166 |
+
return True
|
| 167 |
+
|
| 168 |
+
com = _CodeOnlyModule(body)
|
| 169 |
+
|
| 170 |
+
tracer_extras = body.get('_tracer_extras', {})
|
| 171 |
+
graph = KeepModules().trace(com, **tracer_extras)
|
| 172 |
+
|
| 173 |
+
# Manually set Tracer class on the reconstructed Graph, to avoid
|
| 174 |
+
# referencing the private local subclass KeepModules.
|
| 175 |
+
graph._tracer_cls = tracer_cls
|
| 176 |
+
gm = GraphModule(com, graph, class_name=graphmodule_cls_name)
|
| 177 |
+
|
| 178 |
+
# The GraphModule constructor only retains attributes referenced by the graph.
|
| 179 |
+
# In this case, our goal is return a GraphModule as close to identical as the one
|
| 180 |
+
# put into the package. If any additional attributes were present in body,
|
| 181 |
+
# we should keep them.
|
| 182 |
+
for k, v in body.items():
|
| 183 |
+
if not hasattr(gm, k):
|
| 184 |
+
setattr(gm, k, v)
|
| 185 |
+
return gm
|
| 186 |
+
|
| 187 |
+
# copy an attribute value with qualified name 'target' from 'from_module' to 'to_module'
|
| 188 |
+
# This installs empty Modules where none exist yet if they are subpaths of target
|
| 189 |
+
def _copy_attr(from_module: torch.nn.Module, to_module: torch.nn.Module, target: str):
|
| 190 |
+
*prefix, field = target.split('.')
|
| 191 |
+
for item in prefix:
|
| 192 |
+
f = getattr(from_module, item)
|
| 193 |
+
t = getattr(to_module, item, None)
|
| 194 |
+
if f is t:
|
| 195 |
+
# we have already installed one of its parents
|
| 196 |
+
# (e.g. target = root.linear.weight, but we have already installed root.linear)
|
| 197 |
+
# once we install a parent, we no longer need to copy the children
|
| 198 |
+
# since all the needed properties will already be present
|
| 199 |
+
return
|
| 200 |
+
|
| 201 |
+
if t is None:
|
| 202 |
+
t = torch.nn.Module()
|
| 203 |
+
setattr(to_module, item, t)
|
| 204 |
+
from_module, to_module = f, t
|
| 205 |
+
|
| 206 |
+
orig = getattr(from_module, field)
|
| 207 |
+
# If it is a tensor and not a parameter attribute of a module, it should be a named buffer.
|
| 208 |
+
# So, we register it as a named buffer in the target module.
|
| 209 |
+
if isinstance(orig, torch.Tensor) and not isinstance(orig, torch.nn.Parameter):
|
| 210 |
+
to_module.register_buffer(field, orig)
|
| 211 |
+
else:
|
| 212 |
+
setattr(to_module, field, orig)
|
| 213 |
+
|
| 214 |
+
# Assign attribute 'from_obj' to the qualified name 'target' on 'to_module
|
| 215 |
+
# This installs empty Modules where none exist yet if they are subpaths of target
|
| 216 |
+
def _assign_attr(from_obj: Any, to_module: torch.nn.Module, target: str):
|
| 217 |
+
*prefix, field = target.split('.')
|
| 218 |
+
for item in prefix:
|
| 219 |
+
t = getattr(to_module, item, None)
|
| 220 |
+
|
| 221 |
+
if t is None:
|
| 222 |
+
t = torch.nn.Module()
|
| 223 |
+
setattr(to_module, item, t)
|
| 224 |
+
to_module = t
|
| 225 |
+
|
| 226 |
+
# If it is a tensor and not a parameter attribute of a module, it should be a named buffer.
|
| 227 |
+
# So, we register it as a named buffer in the target module.
|
| 228 |
+
if isinstance(from_obj, torch.Tensor) and not isinstance(from_obj, torch.nn.Parameter):
|
| 229 |
+
to_module.register_buffer(field, from_obj)
|
| 230 |
+
else:
|
| 231 |
+
setattr(to_module, field, from_obj)
|
| 232 |
+
|
| 233 |
+
class _WrappedCall:
|
| 234 |
+
def __init__(self, cls, cls_call):
|
| 235 |
+
self.cls = cls
|
| 236 |
+
self.cls_call = cls_call
|
| 237 |
+
|
| 238 |
+
# Previously, if an error occurred when valid
|
| 239 |
+
# symbolically-traced code was run with an invalid input, the
|
| 240 |
+
# user would see the source of the error as coming from
|
| 241 |
+
# `File "<eval_with_key_N">`, where N is some number. We use
|
| 242 |
+
# this function to generate a more informative error message. We
|
| 243 |
+
# return the traceback itself, a message explaining that the
|
| 244 |
+
# error occurred in a traced Module's generated forward
|
| 245 |
+
# function, and five lines of context surrounding the faulty
|
| 246 |
+
# line
|
| 247 |
+
@staticmethod
|
| 248 |
+
def _generate_error_message(frame_summary: traceback.FrameSummary) -> str:
|
| 249 |
+
# auxiliary variables (for readability)
|
| 250 |
+
err_lineno = frame_summary.lineno
|
| 251 |
+
assert err_lineno is not None
|
| 252 |
+
line = frame_summary.line
|
| 253 |
+
assert line is not None
|
| 254 |
+
err_line_len = len(line)
|
| 255 |
+
all_src_lines = linecache.getlines(frame_summary.filename)
|
| 256 |
+
|
| 257 |
+
# constituent substrings of the error message
|
| 258 |
+
tb_repr = traceback.format_exc()
|
| 259 |
+
custom_msg = ("Call using an FX-traced Module, "
|
| 260 |
+
f"line {err_lineno} of the traced Module's "
|
| 261 |
+
"generated forward function:")
|
| 262 |
+
before_err = "".join(all_src_lines[err_lineno - 2 : err_lineno])
|
| 263 |
+
marker = "~" * err_line_len + "~~~ <--- HERE"
|
| 264 |
+
err_and_after_err = "\n".join(all_src_lines[err_lineno : err_lineno + 2])
|
| 265 |
+
|
| 266 |
+
# joined message
|
| 267 |
+
return "\n".join([tb_repr, custom_msg, before_err, marker, err_and_after_err])
|
| 268 |
+
|
| 269 |
+
def __call__(self, obj, *args, **kwargs):
|
| 270 |
+
try:
|
| 271 |
+
if self.cls_call is not None:
|
| 272 |
+
return self.cls_call(obj, *args, **kwargs)
|
| 273 |
+
else:
|
| 274 |
+
return super(self.cls, obj).__call__(*args, **kwargs) # type: ignore[misc]
|
| 275 |
+
except Exception as e:
|
| 276 |
+
assert e.__traceback__
|
| 277 |
+
topmost_framesummary: traceback.FrameSummary = \
|
| 278 |
+
traceback.StackSummary.extract(traceback.walk_tb(e.__traceback__))[-1] # type: ignore[arg-type]
|
| 279 |
+
if "eval_with_key" in topmost_framesummary.filename:
|
| 280 |
+
print(_WrappedCall._generate_error_message(topmost_framesummary),
|
| 281 |
+
file=sys.stderr)
|
| 282 |
+
raise e.with_traceback(None)
|
| 283 |
+
else:
|
| 284 |
+
raise e
|
| 285 |
+
|
| 286 |
+
@compatibility(is_backward_compatible=True)
|
| 287 |
+
class GraphModule(torch.nn.Module):
|
| 288 |
+
"""
|
| 289 |
+
GraphModule is an nn.Module generated from an fx.Graph. Graphmodule has a
|
| 290 |
+
``graph`` attribute, as well as ``code`` and ``forward`` attributes generated
|
| 291 |
+
from that ``graph``.
|
| 292 |
+
|
| 293 |
+
.. warning::
|
| 294 |
+
|
| 295 |
+
When ``graph`` is reassigned, ``code`` and ``forward`` will be automatically
|
| 296 |
+
regenerated. However, if you edit the contents of the ``graph`` without reassigning
|
| 297 |
+
the ``graph`` attribute itself, you must call ``recompile()`` to update the generated
|
| 298 |
+
code.
|
| 299 |
+
"""
|
| 300 |
+
def __new__(cls: 'Type[GraphModule]', *args, **kwargs):
|
| 301 |
+
# each instance of a graph module needs its own forward method
|
| 302 |
+
# so create a new singleton class for each instance.
|
| 303 |
+
# it is a subclass of the user-defined class, the only difference
|
| 304 |
+
# is an extra layer to install the forward method
|
| 305 |
+
|
| 306 |
+
# address issue described at https://github.com/pytorch/pytorch/issues/63883
|
| 307 |
+
# in other words, traverse class hierarchy to fix the redundant class definition problem
|
| 308 |
+
for t in cls.__mro__:
|
| 309 |
+
c = t.__qualname__.split('.')[-1]
|
| 310 |
+
if c != 'GraphModuleImpl':
|
| 311 |
+
cls = t
|
| 312 |
+
break
|
| 313 |
+
|
| 314 |
+
class GraphModuleImpl(cls): # type: ignore[misc, valid-type]
|
| 315 |
+
pass
|
| 316 |
+
return super().__new__(GraphModuleImpl)
|
| 317 |
+
|
| 318 |
+
@compatibility(is_backward_compatible=True)
|
| 319 |
+
def __init__(self,
|
| 320 |
+
root: Union[torch.nn.Module, Dict[str, Any]],
|
| 321 |
+
graph: Graph,
|
| 322 |
+
class_name: str = 'GraphModule'):
|
| 323 |
+
"""
|
| 324 |
+
Construct a GraphModule.
|
| 325 |
+
|
| 326 |
+
Args:
|
| 327 |
+
|
| 328 |
+
root (Union[torch.nn.Module, Dict[str, Any]):
|
| 329 |
+
``root`` can either be an nn.Module instance or a Dict mapping strings to any attribute type.
|
| 330 |
+
In the case that ``root`` is a Module, any references to Module-based objects (via qualified
|
| 331 |
+
name) in the Graph's Nodes' ``target`` field will be copied over from the respective place
|
| 332 |
+
within ``root``'s Module hierarchy into the GraphModule's module hierarchy.
|
| 333 |
+
In the case that ``root`` is a dict, the qualified name found in a Node's ``target`` will be
|
| 334 |
+
looked up directly in the dict's keys. The object mapped to by the Dict will be copied
|
| 335 |
+
over into the appropriate place within the GraphModule's module hierarchy.
|
| 336 |
+
|
| 337 |
+
graph (Graph): ``graph`` contains the nodes this GraphModule should use for code generation
|
| 338 |
+
|
| 339 |
+
class_name (str): ``name`` denotes the name of this GraphModule for debugging purposes. If it's unset, all
|
| 340 |
+
error messages will report as originating from ``GraphModule``. It may be helpful to set this
|
| 341 |
+
to ``root``'s original name or a name that makes sense within the context of your transform.
|
| 342 |
+
"""
|
| 343 |
+
super().__init__()
|
| 344 |
+
self.__class__.__name__ = class_name
|
| 345 |
+
if isinstance(root, torch.nn.Module):
|
| 346 |
+
if hasattr(root, 'training'):
|
| 347 |
+
self.training = root.training
|
| 348 |
+
|
| 349 |
+
# When we pickle/unpickle graph module, we don't want to drop any module or attributes.
|
| 350 |
+
if isinstance(root, _CodeOnlyModule):
|
| 351 |
+
for k, _ in root.named_children():
|
| 352 |
+
_copy_attr(root, self, k)
|
| 353 |
+
|
| 354 |
+
for k, _ in root.named_buffers():
|
| 355 |
+
_copy_attr(root, self, k)
|
| 356 |
+
|
| 357 |
+
for k, _ in root.named_parameters():
|
| 358 |
+
_copy_attr(root, self, k)
|
| 359 |
+
|
| 360 |
+
for node in graph.nodes:
|
| 361 |
+
if node.op in ['get_attr', 'call_module']:
|
| 362 |
+
assert isinstance(node.target, str)
|
| 363 |
+
_copy_attr(root, self, node.target)
|
| 364 |
+
elif isinstance(root, dict):
|
| 365 |
+
targets_to_copy = []
|
| 366 |
+
for node in graph.nodes:
|
| 367 |
+
if node.op in ['get_attr', 'call_module']:
|
| 368 |
+
assert isinstance(node.target, str)
|
| 369 |
+
if node.target not in root:
|
| 370 |
+
raise RuntimeError('Node ' + str(node) + ' referenced target ' + node.target +
|
| 371 |
+
' but that target was not provided in ``root``!')
|
| 372 |
+
targets_to_copy.append(node.target)
|
| 373 |
+
# Sort targets in ascending order of the # of atoms.
|
| 374 |
+
# This will ensure that less deeply nested attributes are assigned
|
| 375 |
+
# before more deeply nested attributes. For example, foo.bar
|
| 376 |
+
# will be assigned before foo.bar.baz. Otherwise, we might assign
|
| 377 |
+
# the user-provided ``foo.bar`` and wipe out the previously-assigned
|
| 378 |
+
# ``foo.bar.baz``
|
| 379 |
+
targets_to_copy.sort(key=lambda t: t.count('.'))
|
| 380 |
+
for target_to_copy in targets_to_copy:
|
| 381 |
+
_assign_attr(root[target_to_copy], self, target_to_copy)
|
| 382 |
+
else:
|
| 383 |
+
raise RuntimeError('Unsupported type ' + str(root) + ' passed for root!')
|
| 384 |
+
|
| 385 |
+
self.graph = graph
|
| 386 |
+
|
| 387 |
+
# Store the Tracer class responsible for creating a Graph separately as part of the
|
| 388 |
+
# GraphModule state, except when the Tracer is defined in a local namespace.
|
| 389 |
+
# Locally defined Tracers are not pickleable. This is needed because torch.package will
|
| 390 |
+
# serialize a GraphModule without retaining the Graph, and needs to use the correct Tracer
|
| 391 |
+
# to re-create the Graph during deserialization.
|
| 392 |
+
self._tracer_cls = None
|
| 393 |
+
if self.graph._tracer_cls and '<locals>' not in self.graph._tracer_cls.__qualname__:
|
| 394 |
+
self._tracer_cls = self.graph._tracer_cls
|
| 395 |
+
|
| 396 |
+
self._tracer_extras = {}
|
| 397 |
+
if self.graph._tracer_extras:
|
| 398 |
+
self._tracer_extras = self.graph._tracer_extras
|
| 399 |
+
|
| 400 |
+
# Dictionary to store metadata
|
| 401 |
+
self.meta : Dict[str, Any] = {}
|
| 402 |
+
|
| 403 |
+
# TorchScript breaks trying to compile the graph setter because of the
|
| 404 |
+
# continued string literal. Issue here: https://github.com/pytorch/pytorch/issues/44842
|
| 405 |
+
#
|
| 406 |
+
# Shouldn't be an issue since these methods shouldn't be used in TorchScript anyway
|
| 407 |
+
__jit_unused_properties__ = ['graph']
|
| 408 |
+
|
| 409 |
+
@property
|
| 410 |
+
def graph(self) -> Graph:
|
| 411 |
+
"""
|
| 412 |
+
Return the ``Graph`` underlying this ``GraphModule``
|
| 413 |
+
"""
|
| 414 |
+
return self._graph
|
| 415 |
+
|
| 416 |
+
@graph.setter
|
| 417 |
+
def graph(self, g : Graph) -> None:
|
| 418 |
+
"""
|
| 419 |
+
Set the underlying ``Graph`` for this ``GraphModule``. This will internally
|
| 420 |
+
recompile the ``GraphModule`` so that the generated ``forward()`` function
|
| 421 |
+
corresponds to ``g``
|
| 422 |
+
"""
|
| 423 |
+
assert isinstance(g, Graph), f'Expected a Graph instance, but got {type(g)}'
|
| 424 |
+
self._graph = g
|
| 425 |
+
g.owning_module = self
|
| 426 |
+
self.recompile()
|
| 427 |
+
|
| 428 |
+
@compatibility(is_backward_compatible=False)
|
| 429 |
+
def to_folder(self, folder: Union[str, os.PathLike], module_name : str = "FxModule"):
|
| 430 |
+
"""Dumps out module to ``folder`` with ``module_name`` so that it can be
|
| 431 |
+
imported with ``from <folder> import <module_name>``
|
| 432 |
+
|
| 433 |
+
Args:
|
| 434 |
+
|
| 435 |
+
folder (Union[str, os.PathLike]): The folder to write the code out to
|
| 436 |
+
|
| 437 |
+
module_name (str): Top-level name to use for the ``Module`` while
|
| 438 |
+
writing out the code
|
| 439 |
+
"""
|
| 440 |
+
folder = Path(folder)
|
| 441 |
+
Path(folder).mkdir(exist_ok=True)
|
| 442 |
+
torch.save(self.state_dict(), folder / 'state_dict.pt')
|
| 443 |
+
tab = " " * 4
|
| 444 |
+
custom_builtins = '\n'.join([v.import_str for v in _custom_builtins.values()])
|
| 445 |
+
model_str = f"""
|
| 446 |
+
import torch
|
| 447 |
+
{custom_builtins}
|
| 448 |
+
|
| 449 |
+
from torch.nn import *
|
| 450 |
+
class {module_name}(torch.nn.Module):
|
| 451 |
+
def __init__(self):
|
| 452 |
+
super().__init__()
|
| 453 |
+
"""
|
| 454 |
+
|
| 455 |
+
def _gen_model_repr(module_name: str, module: torch.nn.Module) -> Optional[str]:
|
| 456 |
+
safe_reprs = [nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d, nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d]
|
| 457 |
+
if type(module) in safe_reprs:
|
| 458 |
+
return f"{module.__repr__()}"
|
| 459 |
+
else:
|
| 460 |
+
return None
|
| 461 |
+
|
| 462 |
+
blobified_modules = []
|
| 463 |
+
for module_name, module in self.named_children():
|
| 464 |
+
module_str = _gen_model_repr(module_name, module)
|
| 465 |
+
if module_str is None:
|
| 466 |
+
module_file = folder / f'{module_name}.pt'
|
| 467 |
+
torch.save(module, module_file)
|
| 468 |
+
blobified_modules.append(module_name)
|
| 469 |
+
module_repr = module.__repr__().replace('\r', ' ').replace('\n', ' ')
|
| 470 |
+
module_str = f"torch.load(r'{module_file}') # {module_repr}"
|
| 471 |
+
model_str += f"{tab*2}self.{module_name} = {module_str}\n"
|
| 472 |
+
|
| 473 |
+
for buffer_name, buffer in self._buffers.items():
|
| 474 |
+
if buffer is None:
|
| 475 |
+
continue
|
| 476 |
+
model_str += f"{tab*2}self.register_buffer('{buffer_name}', torch.empty({list(buffer.shape)}, dtype={buffer.dtype}))\n"
|
| 477 |
+
|
| 478 |
+
for param_name, param in self._parameters.items():
|
| 479 |
+
if param is None:
|
| 480 |
+
continue
|
| 481 |
+
model_str += f"{tab*2}self.{param_name} = torch.nn.Parameter(torch.empty({list(param.shape)}, dtype={param.dtype}))\n"
|
| 482 |
+
|
| 483 |
+
model_str += f"{tab*2}self.load_state_dict(torch.load(r'{folder}/state_dict.pt'))\n"
|
| 484 |
+
model_str += f"{_addindent(self.code, 4)}\n"
|
| 485 |
+
|
| 486 |
+
module_file = folder / 'module.py'
|
| 487 |
+
module_file.write_text(model_str)
|
| 488 |
+
|
| 489 |
+
init_file = folder / '__init__.py'
|
| 490 |
+
init_file.write_text('from .module import *')
|
| 491 |
+
|
| 492 |
+
if len(blobified_modules) > 0:
|
| 493 |
+
warnings.warn("Was not able to save the following children modules as reprs -"
|
| 494 |
+
f"saved as pickled files instead: {blobified_modules}")
|
| 495 |
+
|
| 496 |
+
@compatibility(is_backward_compatible=True)
|
| 497 |
+
def add_submodule(self, target: str, m: torch.nn.Module) -> bool:
|
| 498 |
+
"""
|
| 499 |
+
Adds the given submodule to ``self``.
|
| 500 |
+
|
| 501 |
+
This installs empty Modules where none exist yet if they are
|
| 502 |
+
subpaths of ``target``.
|
| 503 |
+
|
| 504 |
+
Args:
|
| 505 |
+
target: The fully-qualified string name of the new submodule
|
| 506 |
+
(See example in ``nn.Module.get_submodule`` for how to
|
| 507 |
+
specify a fully-qualified string.)
|
| 508 |
+
m: The submodule itself; the actual object we want to
|
| 509 |
+
install in the current Module
|
| 510 |
+
|
| 511 |
+
Return:
|
| 512 |
+
bool: Whether or not the submodule could be inserted. For
|
| 513 |
+
this method to return True, each object in the chain
|
| 514 |
+
denoted by ``target`` must either a) not exist yet,
|
| 515 |
+
or b) reference an ``nn.Module`` (not a parameter or
|
| 516 |
+
other attribute)
|
| 517 |
+
"""
|
| 518 |
+
*prefix, field = target.split('.')
|
| 519 |
+
mod: torch.nn.Module = self
|
| 520 |
+
|
| 521 |
+
for item in prefix:
|
| 522 |
+
|
| 523 |
+
submod = getattr(mod, item, None)
|
| 524 |
+
|
| 525 |
+
if submod is None:
|
| 526 |
+
submod = torch.nn.Module()
|
| 527 |
+
setattr(mod, item, submod)
|
| 528 |
+
|
| 529 |
+
if not isinstance(submod, torch.nn.Module):
|
| 530 |
+
return False
|
| 531 |
+
|
| 532 |
+
mod = submod
|
| 533 |
+
|
| 534 |
+
mod.add_module(field, m)
|
| 535 |
+
return True
|
| 536 |
+
|
| 537 |
+
@compatibility(is_backward_compatible=True)
|
| 538 |
+
def delete_submodule(self, target: str) -> bool:
|
| 539 |
+
"""
|
| 540 |
+
Deletes the given submodule from ``self``.
|
| 541 |
+
|
| 542 |
+
The module will not be deleted if ``target`` is not a valid
|
| 543 |
+
target.
|
| 544 |
+
|
| 545 |
+
Args:
|
| 546 |
+
target: The fully-qualified string name of the new submodule
|
| 547 |
+
(See example in ``nn.Module.get_submodule`` for how to
|
| 548 |
+
specify a fully-qualified string.)
|
| 549 |
+
|
| 550 |
+
Returns:
|
| 551 |
+
bool: Whether or not the target string referenced a
|
| 552 |
+
submodule we want to delete. A return value of ``False``
|
| 553 |
+
means that the ``target`` was not a valid reference to
|
| 554 |
+
a submodule.
|
| 555 |
+
"""
|
| 556 |
+
atoms = target.split(".")
|
| 557 |
+
path, target_submod = atoms[:-1], atoms[-1]
|
| 558 |
+
mod: torch.nn.Module = self
|
| 559 |
+
|
| 560 |
+
# Get the parent module
|
| 561 |
+
for item in path:
|
| 562 |
+
|
| 563 |
+
if not hasattr(mod, item):
|
| 564 |
+
return False
|
| 565 |
+
|
| 566 |
+
mod = getattr(mod, item)
|
| 567 |
+
|
| 568 |
+
if not isinstance(mod, torch.nn.Module):
|
| 569 |
+
return False
|
| 570 |
+
|
| 571 |
+
if not hasattr(mod, target_submod):
|
| 572 |
+
return False
|
| 573 |
+
|
| 574 |
+
if not isinstance(getattr(mod, target_submod), torch.nn.Module):
|
| 575 |
+
return False
|
| 576 |
+
|
| 577 |
+
delattr(mod, target_submod)
|
| 578 |
+
return True
|
| 579 |
+
|
| 580 |
+
@compatibility(is_backward_compatible=True)
|
| 581 |
+
def delete_all_unused_submodules(self) -> None:
|
| 582 |
+
"""
|
| 583 |
+
Deletes all unused submodules from ``self``.
|
| 584 |
+
|
| 585 |
+
A Module is considered "used" if any one of the following is
|
| 586 |
+
true:
|
| 587 |
+
1. It has children that are used
|
| 588 |
+
2. Its forward is called directly via a ``call_module`` node
|
| 589 |
+
3. It has a non-Module attribute that is used from a
|
| 590 |
+
``get_attr`` node
|
| 591 |
+
|
| 592 |
+
This method can be called to clean up an ``nn.Module`` without
|
| 593 |
+
manually calling ``delete_submodule`` on each unused submodule.
|
| 594 |
+
"""
|
| 595 |
+
used: List[str] = []
|
| 596 |
+
|
| 597 |
+
for node in self.graph.nodes:
|
| 598 |
+
|
| 599 |
+
if node.op == "call_module" or node.op == "get_attr":
|
| 600 |
+
|
| 601 |
+
# A list of strings representing the different parts
|
| 602 |
+
# of the path. For example, `foo.bar.baz` gives us
|
| 603 |
+
# ["foo", "bar", "baz"]
|
| 604 |
+
fullpath = node.target.split(".")
|
| 605 |
+
|
| 606 |
+
# If we're looking at multiple parts of a path, join
|
| 607 |
+
# join them with a dot. Otherwise, return that single
|
| 608 |
+
# element without doing anything to it.
|
| 609 |
+
def join_fn(x: str, y: str) -> str:
|
| 610 |
+
return '.'.join([x, y] if y else [x])
|
| 611 |
+
|
| 612 |
+
# Progressively collect all the names of intermediate
|
| 613 |
+
# modules. For example, if we have the target
|
| 614 |
+
# `foo.bar.baz`, we'll add `foo`, `foo.bar`, and
|
| 615 |
+
# `foo.bar.baz` to the list.
|
| 616 |
+
for path in itertools.accumulate(fullpath, join_fn):
|
| 617 |
+
used.append(path)
|
| 618 |
+
|
| 619 |
+
# For a `call_module` node, also register all recursive submodules
|
| 620 |
+
# as used
|
| 621 |
+
if node.op == "call_module":
|
| 622 |
+
try:
|
| 623 |
+
submod = self.get_submodule(node.target)
|
| 624 |
+
|
| 625 |
+
for submod_name, _ in submod.named_modules():
|
| 626 |
+
if submod_name != '':
|
| 627 |
+
used.append('.'.join([node.target, submod_name]))
|
| 628 |
+
except AttributeError:
|
| 629 |
+
# Node referenced nonexistent submodule, don't need to
|
| 630 |
+
# worry about GCing anything
|
| 631 |
+
pass
|
| 632 |
+
|
| 633 |
+
to_delete = [name for name, _ in self.named_modules()
|
| 634 |
+
if name not in used]
|
| 635 |
+
|
| 636 |
+
for name in to_delete:
|
| 637 |
+
self.delete_submodule(name)
|
| 638 |
+
|
| 639 |
+
@property
|
| 640 |
+
def code(self) -> str:
|
| 641 |
+
"""
|
| 642 |
+
Return the Python code generated from the ``Graph`` underlying this
|
| 643 |
+
``GraphModule``.
|
| 644 |
+
"""
|
| 645 |
+
if not hasattr(self, '_code'):
|
| 646 |
+
raise RuntimeError('Code has not been generated! Please report a bug to PyTorch')
|
| 647 |
+
return self._code
|
| 648 |
+
|
| 649 |
+
@compatibility(is_backward_compatible=True)
|
| 650 |
+
def recompile(self) -> PythonCode:
|
| 651 |
+
"""
|
| 652 |
+
Recompile this GraphModule from its ``graph`` attribute. This should be
|
| 653 |
+
called after editing the contained ``graph``, otherwise the generated
|
| 654 |
+
code of this ``GraphModule`` will be out of date.
|
| 655 |
+
"""
|
| 656 |
+
if isinstance(self._graph._codegen, _PyTreeCodeGen):
|
| 657 |
+
self._in_spec = self._graph._codegen.pytree_info.in_spec
|
| 658 |
+
self._out_spec = self._graph._codegen.pytree_info.out_spec
|
| 659 |
+
python_code = self._graph.python_code(root_module='self')
|
| 660 |
+
self._code = python_code.src
|
| 661 |
+
|
| 662 |
+
cls = type(self)
|
| 663 |
+
co_fields = self._graph._co_fields if hasattr(self._graph, '_co_fields') else {}
|
| 664 |
+
cls.forward = _forward_from_src(self._code, python_code.globals, co_fields)
|
| 665 |
+
|
| 666 |
+
# Determine whether this class explicitly defines a __call__ implementation
|
| 667 |
+
# to wrap. If it does, save it in order to have wrapped_call invoke it.
|
| 668 |
+
# If it does not, wrapped_call can use a dynamic call to super() instead.
|
| 669 |
+
# In most cases, super().__call__ should be torch.nn.Module.__call__.
|
| 670 |
+
# We do not want to hold a reference to Module.__call__ here; doing so will
|
| 671 |
+
# bypass patching of torch.nn.Module.__call__ done while symbolic tracing.
|
| 672 |
+
cls_call = cls.__call__ if "__call__" in vars(cls) else None
|
| 673 |
+
|
| 674 |
+
if '_wrapped_call' not in vars(cls):
|
| 675 |
+
cls._wrapped_call = _WrappedCall(cls, cls_call) # type: ignore[attr-defined]
|
| 676 |
+
|
| 677 |
+
def call_wrapped(self, *args, **kwargs):
|
| 678 |
+
return self._wrapped_call(self, *args, **kwargs)
|
| 679 |
+
|
| 680 |
+
cls.__call__ = call_wrapped
|
| 681 |
+
|
| 682 |
+
return python_code
|
| 683 |
+
|
| 684 |
+
# Passing Tracer as argument allows subclasses extending fx.GraphModule
|
| 685 |
+
# define their own Tracer (extending fx.Tracer).
|
| 686 |
+
def __reduce_deploy__(self, importer: Importer):
|
| 687 |
+
dict_without_graph = self.__dict__.copy()
|
| 688 |
+
dict_without_graph['_graphmodule_cls_name'] = self.__class__.__name__
|
| 689 |
+
del dict_without_graph['_graph']
|
| 690 |
+
|
| 691 |
+
python_code = self.recompile()
|
| 692 |
+
import_block = _format_import_block(python_code.globals, importer)
|
| 693 |
+
return (reduce_deploy_graph_module, (dict_without_graph, import_block))
|
| 694 |
+
|
| 695 |
+
def __reduce_package__(self, exporter: PackageExporter):
|
| 696 |
+
dict_without_graph = self.__dict__.copy()
|
| 697 |
+
dict_without_graph['_graphmodule_cls_name'] = self.__class__.__name__
|
| 698 |
+
del dict_without_graph['_graph']
|
| 699 |
+
|
| 700 |
+
generated_module_name = f'fx-generated._{exporter.get_unique_id()}'
|
| 701 |
+
python_code = self.recompile()
|
| 702 |
+
import_block = _format_import_block(python_code.globals, exporter.importer)
|
| 703 |
+
module_code = import_block + self.code
|
| 704 |
+
exporter.save_source_string(generated_module_name, module_code)
|
| 705 |
+
return (reduce_package_graph_module, (dict_without_graph, generated_module_name))
|
| 706 |
+
|
| 707 |
+
def __reduce__(self):
|
| 708 |
+
"""
|
| 709 |
+
Serialization of GraphModule. We serialize only the generated code, not
|
| 710 |
+
the underlying ``Graph``. This is because ``Graph`` does not have on-disk
|
| 711 |
+
backward-compatibility guarantees, whereas Python source code does.
|
| 712 |
+
On the deserialization side, we symbolically trace through the generated
|
| 713 |
+
code to regenerate the underlying ``Graph``
|
| 714 |
+
"""
|
| 715 |
+
dict_without_graph = self.__dict__.copy()
|
| 716 |
+
python_code = self.recompile()
|
| 717 |
+
import_block = _format_import_block(python_code.globals, sys_importer)
|
| 718 |
+
del dict_without_graph['_graph']
|
| 719 |
+
return (reduce_graph_module, (dict_without_graph, import_block))
|
| 720 |
+
|
| 721 |
+
# because __reduce__ is defined for serialization,
|
| 722 |
+
# we need to define deepcopy otherwise it will call __reduce__
|
| 723 |
+
# and cause symbolic tracing to occur every time we try to copy the object
|
| 724 |
+
def __deepcopy__(self, memo):
|
| 725 |
+
res = type(self).__new__(type(self))
|
| 726 |
+
memo[id(self)] = res
|
| 727 |
+
fake_mod = torch.nn.Module()
|
| 728 |
+
fake_mod.__dict__ = copy.deepcopy(self.__dict__, memo)
|
| 729 |
+
GraphModule.__init__(res, fake_mod, fake_mod.__dict__['_graph'])
|
| 730 |
+
# hooks are lost during `GraphModule.__init__`, so we need to copy over
|
| 731 |
+
# them explicitly, note right now we are only copying state_dict related
|
| 732 |
+
# hooks, to reduce bc-related issues, we can copy forward/backward related
|
| 733 |
+
# hooks in the future as well if needed
|
| 734 |
+
extra_preserved_attrs = [
|
| 735 |
+
"_state_dict_hooks",
|
| 736 |
+
"_load_state_dict_pre_hooks",
|
| 737 |
+
"_load_state_dict_post_hooks"
|
| 738 |
+
]
|
| 739 |
+
for attr in extra_preserved_attrs:
|
| 740 |
+
if attr in self.__dict__:
|
| 741 |
+
setattr(res, attr, copy.deepcopy(self.__dict__[attr], memo))
|
| 742 |
+
res.meta = copy.deepcopy(getattr(self, 'meta', {}), memo)
|
| 743 |
+
if _USER_PRESERVED_ATTRIBUTES_KEY in res.meta:
|
| 744 |
+
for attr_name, attr in res.meta[_USER_PRESERVED_ATTRIBUTES_KEY].items():
|
| 745 |
+
setattr(res, attr_name, attr)
|
| 746 |
+
return res
|
| 747 |
+
|
| 748 |
+
def __copy__(self):
|
| 749 |
+
res = GraphModule(self, self.graph)
|
| 750 |
+
res.meta = getattr(self, 'meta', {})
|
| 751 |
+
return res
|
| 752 |
+
|
| 753 |
+
@compatibility(is_backward_compatible=False)
|
| 754 |
+
def print_readable(self, print_output=True):
|
| 755 |
+
"""
|
| 756 |
+
Return the Python code generated for current GraphModule and its children GraphModules
|
| 757 |
+
"""
|
| 758 |
+
verbose_python_code = self._graph.python_code(root_module='self', verbose=True)
|
| 759 |
+
module_code = verbose_python_code.src
|
| 760 |
+
module_code = module_code.lstrip('\n')
|
| 761 |
+
module_code = f"class {self._get_name()}(torch.nn.Module):\n" + module_code
|
| 762 |
+
module_code = _addindent(module_code, 4)
|
| 763 |
+
|
| 764 |
+
submodule_code_list = [""]
|
| 765 |
+
for submodule in self.children():
|
| 766 |
+
if isinstance(submodule, GraphModule):
|
| 767 |
+
submodule_code_list.append(submodule.print_readable(print_output=False))
|
| 768 |
+
submodule_code = "\n".join(submodule_code_list)
|
| 769 |
+
submodule_code = _addindent(submodule_code, 4)
|
| 770 |
+
|
| 771 |
+
output = module_code + submodule_code
|
| 772 |
+
if print_output:
|
| 773 |
+
print(module_code + submodule_code)
|
| 774 |
+
return output
|
| 775 |
+
|
| 776 |
+
def __str__(self) -> str:
|
| 777 |
+
orig_str = super().__str__()
|
| 778 |
+
print_readable_reminder = "# To see more debug info, please use `graph_module.print_readable()`"
|
| 779 |
+
return '\n'.join([orig_str, self._code, print_readable_reminder])
|
| 780 |
+
|
| 781 |
+
def _replicate_for_data_parallel(self):
|
| 782 |
+
new_gm = self.__copy__()
|
| 783 |
+
new_gm._is_replica = True
|
| 784 |
+
return new_gm
|
| 785 |
+
|
| 786 |
+
# workarounds for issues in __torch_function__
|
| 787 |
+
|
| 788 |
+
# WAR for __torch_function__ not handling tensor lists,
|
| 789 |
+
# fix is in https://github.com/pytorch/pytorch/pull/34725
|
| 790 |
+
# orig_cat = torch.cat
|
| 791 |
+
# def patched_cat(*args, **kwargs):
|
| 792 |
+
# tensors = args[0]
|
| 793 |
+
# for t in tensors:
|
| 794 |
+
# if isinstance(t, Proxy):
|
| 795 |
+
# return t.__torch_function__(patched_cat, (), args, kwargs)
|
| 796 |
+
# return orig_cat(*args, **kwargs)
|
| 797 |
+
# patched_cat.__module__ = 'torch'
|
| 798 |
+
# patched_cat.__name__ = 'cat'
|
| 799 |
+
# torch.cat = patched_cat
|
llava_next/lib/python3.10/site-packages/torch/fx/immutable_collections.py
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Dict, Tuple, List
|
| 2 |
+
|
| 3 |
+
from ._compatibility import compatibility
|
| 4 |
+
from torch.utils._pytree import Context, _register_pytree_node
|
| 5 |
+
|
| 6 |
+
__all__ = ["immutable_list", "immutable_dict"]
|
| 7 |
+
|
| 8 |
+
_help_mutation = """\
|
| 9 |
+
If you are attempting to modify the kwargs or args of a torch.fx.Node object,
|
| 10 |
+
instead create a new copy of it and assign the copy to the node:
|
| 11 |
+
new_args = ... # copy and mutate args
|
| 12 |
+
node.args = new_args
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
def _no_mutation(self, *args, **kwargs):
|
| 16 |
+
raise NotImplementedError(f"'{type(self).__name__}' object does not support mutation. {_help_mutation}")
|
| 17 |
+
|
| 18 |
+
def _create_immutable_container(base, mutable_functions):
|
| 19 |
+
container = type('immutable_' + base.__name__, (base,), {})
|
| 20 |
+
for attr in mutable_functions:
|
| 21 |
+
setattr(container, attr, _no_mutation)
|
| 22 |
+
return container
|
| 23 |
+
|
| 24 |
+
immutable_list = _create_immutable_container(list,
|
| 25 |
+
['__delitem__', '__iadd__', '__imul__', '__setitem__', 'append',
|
| 26 |
+
'clear', 'extend', 'insert', 'pop', 'remove'])
|
| 27 |
+
immutable_list.__reduce__ = lambda self: (immutable_list, (tuple(iter(self)),))
|
| 28 |
+
|
| 29 |
+
compatibility(is_backward_compatible=True)(immutable_list)
|
| 30 |
+
|
| 31 |
+
immutable_dict = _create_immutable_container(dict, ['__delitem__', '__setitem__', 'clear', 'pop', 'popitem', 'update'])
|
| 32 |
+
immutable_dict.__reduce__ = lambda self: (immutable_dict, (iter(self.items()),))
|
| 33 |
+
compatibility(is_backward_compatible=True)(immutable_dict)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
# Register immutable collections for PyTree operations
|
| 37 |
+
|
| 38 |
+
def _immutable_dict_flatten(d: Dict[Any, Any]) -> Tuple[List[Any], Context]:
|
| 39 |
+
return list(d.values()), list(d.keys())
|
| 40 |
+
|
| 41 |
+
def _immutable_dict_unflatten(values: List[Any], context: Context) -> Dict[Any, Any]:
|
| 42 |
+
return immutable_dict(dict(zip(context, values)))
|
| 43 |
+
|
| 44 |
+
def _immutable_list_flatten(d: List[Any]) -> Tuple[List[Any], Context]:
|
| 45 |
+
return d, None
|
| 46 |
+
|
| 47 |
+
def _immutable_list_unflatten(values: List[Any], context: Context) -> List[Any]:
|
| 48 |
+
return immutable_list(values)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
_register_pytree_node(immutable_dict, _immutable_dict_flatten, _immutable_dict_unflatten)
|
| 52 |
+
_register_pytree_node(immutable_list, _immutable_list_flatten, _immutable_list_unflatten)
|
llava_next/lib/python3.10/site-packages/torch/fx/interpreter.py
ADDED
|
@@ -0,0 +1,505 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .graph_module import GraphModule
|
| 2 |
+
from .graph import Graph
|
| 3 |
+
from .node import Argument, Node, Target, map_arg, map_aggregate
|
| 4 |
+
from .proxy import Proxy
|
| 5 |
+
from ._symbolic_trace import Tracer
|
| 6 |
+
from ._compatibility import compatibility
|
| 7 |
+
from . import config
|
| 8 |
+
import torch.fx.traceback as fx_traceback
|
| 9 |
+
import torch
|
| 10 |
+
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
|
| 11 |
+
import inspect
|
| 12 |
+
from contextlib import contextmanager
|
| 13 |
+
from torch.hub import tqdm
|
| 14 |
+
|
| 15 |
+
__all__ = ['Interpreter', 'Transformer']
|
| 16 |
+
|
| 17 |
+
@compatibility(is_backward_compatible=True)
|
| 18 |
+
class Interpreter:
|
| 19 |
+
"""
|
| 20 |
+
An Interpreter executes an FX graph Node-by-Node. This pattern
|
| 21 |
+
can be useful for many things, including writing code
|
| 22 |
+
transformations as well as analysis passes.
|
| 23 |
+
|
| 24 |
+
Methods in the Interpreter class can be overridden to customize
|
| 25 |
+
the behavior of execution. The map of overrideable methods
|
| 26 |
+
in terms of call hierarchy::
|
| 27 |
+
|
| 28 |
+
run()
|
| 29 |
+
+-- run_node
|
| 30 |
+
+-- placeholder()
|
| 31 |
+
+-- get_attr()
|
| 32 |
+
+-- call_function()
|
| 33 |
+
+-- call_method()
|
| 34 |
+
+-- call_module()
|
| 35 |
+
+-- output()
|
| 36 |
+
|
| 37 |
+
Example:
|
| 38 |
+
|
| 39 |
+
Suppose we want to swap all instances of ``torch.neg`` with
|
| 40 |
+
``torch.sigmoid`` and vice versa (including their ``Tensor``
|
| 41 |
+
method equivalents). We could subclass Interpreter like so::
|
| 42 |
+
|
| 43 |
+
class NegSigmSwapInterpreter(Interpreter):
|
| 44 |
+
def call_function(self, target : Target,
|
| 45 |
+
args : Tuple, kwargs : Dict) -> Any:
|
| 46 |
+
if target == torch.sigmoid:
|
| 47 |
+
return torch.neg(*args, **kwargs)
|
| 48 |
+
return super().call_function(n)
|
| 49 |
+
|
| 50 |
+
def call_method(self, target : Target,
|
| 51 |
+
args : Tuple, kwargs : Dict) -> Any:
|
| 52 |
+
if target == 'neg':
|
| 53 |
+
call_self, *args_tail = args
|
| 54 |
+
return call_self.sigmoid(*args_tail, **kwargs)
|
| 55 |
+
return super().call_method(n)
|
| 56 |
+
|
| 57 |
+
def fn(x):
|
| 58 |
+
return torch.sigmoid(x).neg()
|
| 59 |
+
|
| 60 |
+
gm = torch.fx.symbolic_trace(fn)
|
| 61 |
+
input = torch.randn(3, 4)
|
| 62 |
+
result = NegSigmSwapInterpreter(gm).run(input)
|
| 63 |
+
torch.testing.assert_close(result, torch.neg(input).sigmoid())
|
| 64 |
+
|
| 65 |
+
Args:
|
| 66 |
+
module (GraphModule): The module to be executed
|
| 67 |
+
garbage_collect_values (bool): Whether to delete values after their last
|
| 68 |
+
use within the Module's execution. This ensures optimal memory usage during
|
| 69 |
+
execution. This can be disabled to, for example, examine all of the intermediate
|
| 70 |
+
values in the execution by looking at the ``Interpreter.env`` attribute.
|
| 71 |
+
"""
|
| 72 |
+
@compatibility(is_backward_compatible=True)
|
| 73 |
+
def __init__(self, module : GraphModule, garbage_collect_values : bool = True):
|
| 74 |
+
assert isinstance(module, GraphModule)
|
| 75 |
+
self.module = module
|
| 76 |
+
self.submodules = dict(self.module.named_modules())
|
| 77 |
+
self.env : Dict[Node, Any] = {}
|
| 78 |
+
self.name = "Interpreter"
|
| 79 |
+
self.garbage_collect_values = garbage_collect_values
|
| 80 |
+
self.extra_traceback = True
|
| 81 |
+
|
| 82 |
+
if self.garbage_collect_values:
|
| 83 |
+
# Run through reverse nodes and record the first instance of a use
|
| 84 |
+
# of a given node. This represents the *last* use of the node in the
|
| 85 |
+
# execution order of the program, which we will use to free unused
|
| 86 |
+
# values
|
| 87 |
+
node_to_last_use : Dict[Node, Node] = {}
|
| 88 |
+
self.user_to_last_uses : Dict[Node, List[Node]] = {}
|
| 89 |
+
|
| 90 |
+
def register_last_uses(n : Node, user : Node):
|
| 91 |
+
if n not in node_to_last_use:
|
| 92 |
+
node_to_last_use[n] = user
|
| 93 |
+
self.user_to_last_uses.setdefault(user, []).append(n)
|
| 94 |
+
|
| 95 |
+
for node in reversed(self.module.graph.nodes):
|
| 96 |
+
map_arg(node.args, lambda n: register_last_uses(n, node))
|
| 97 |
+
map_arg(node.kwargs, lambda n: register_last_uses(n, node))
|
| 98 |
+
|
| 99 |
+
@compatibility(is_backward_compatible=True)
|
| 100 |
+
def run(self, *args, initial_env : Optional[Dict[Node, Any]] = None, enable_io_processing : bool = True) -> Any:
|
| 101 |
+
"""
|
| 102 |
+
Run `module` via interpretation and return the result.
|
| 103 |
+
|
| 104 |
+
Args:
|
| 105 |
+
*args: The arguments to the Module to run, in positional order
|
| 106 |
+
initial_env (Optional[Dict[Node, Any]]): An optional starting environment for execution.
|
| 107 |
+
This is a dict mapping `Node` to any value. This can be used, for example, to
|
| 108 |
+
pre-populate results for certain `Nodes` so as to do only partial evaluation within
|
| 109 |
+
the interpreter.
|
| 110 |
+
enable_io_processing (bool): If true, we process the inputs and outputs with graph's process_inputs and
|
| 111 |
+
process_outputs function first before using them.
|
| 112 |
+
|
| 113 |
+
Returns:
|
| 114 |
+
Any: The value returned from executing the Module
|
| 115 |
+
"""
|
| 116 |
+
self.env = initial_env if initial_env is not None else {}
|
| 117 |
+
|
| 118 |
+
# Positional function args are consumed left-to-right by
|
| 119 |
+
# `placeholder` nodes. Use an iterator to keep track of
|
| 120 |
+
# position and extract those values.
|
| 121 |
+
if enable_io_processing:
|
| 122 |
+
args = self.module.graph.process_inputs(*args)
|
| 123 |
+
self.args_iter : Iterator[Any] = iter(args)
|
| 124 |
+
pbar = tqdm(total=len(self.module.graph.nodes),
|
| 125 |
+
desc=f"{self.name}: {str(list(self.module.graph.nodes)) if config.verbose_progress else ''}",
|
| 126 |
+
initial=0, position=0, leave=True, disable=config.disable_progress, delay=0)
|
| 127 |
+
|
| 128 |
+
for node in self.module.graph.nodes:
|
| 129 |
+
pbar.update(1)
|
| 130 |
+
if node in self.env:
|
| 131 |
+
# Short circuit if we have this value. This could
|
| 132 |
+
# be used, for example, for partial evaluation
|
| 133 |
+
# where the caller has pre-populated `env` with
|
| 134 |
+
# values for a subset of the program.
|
| 135 |
+
continue
|
| 136 |
+
|
| 137 |
+
try:
|
| 138 |
+
self.env[node] = self.run_node(node)
|
| 139 |
+
except Exception as e:
|
| 140 |
+
if self.extra_traceback:
|
| 141 |
+
msg = f"While executing {node.format_node()}"
|
| 142 |
+
msg = f'{e.args[0]}\n\n{msg}' if e.args else str(msg)
|
| 143 |
+
msg += f"\nOriginal traceback:\n{node.stack_trace}"
|
| 144 |
+
e.args = (msg,) + e.args[1:]
|
| 145 |
+
if isinstance(e, KeyError):
|
| 146 |
+
raise RuntimeError(*e.args) from e
|
| 147 |
+
raise
|
| 148 |
+
|
| 149 |
+
if self.garbage_collect_values:
|
| 150 |
+
for to_delete in self.user_to_last_uses.get(node, []):
|
| 151 |
+
del self.env[to_delete]
|
| 152 |
+
|
| 153 |
+
if node.op == 'output':
|
| 154 |
+
output_val = self.env[node]
|
| 155 |
+
return self.module.graph.process_outputs(output_val) if enable_io_processing else output_val
|
| 156 |
+
|
| 157 |
+
@compatibility(is_backward_compatible=True)
|
| 158 |
+
def boxed_run(self, args_list):
|
| 159 |
+
"""
|
| 160 |
+
Run `module` via interpretation and return the result. This uses the "boxed"
|
| 161 |
+
calling convention, where you pass a list of arguments, which will be cleared
|
| 162 |
+
by the interpreter. This ensures that input tensors are promptly deallocated.
|
| 163 |
+
"""
|
| 164 |
+
args_iter = iter(args_list)
|
| 165 |
+
env = {}
|
| 166 |
+
for n in self.module.graph.nodes:
|
| 167 |
+
if n.op == "placeholder":
|
| 168 |
+
env[n] = next(args_iter)
|
| 169 |
+
args_list.clear()
|
| 170 |
+
return self.run(initial_env=env)
|
| 171 |
+
|
| 172 |
+
@contextmanager
|
| 173 |
+
def _set_current_node(self, node):
|
| 174 |
+
with fx_traceback.set_current_meta(node):
|
| 175 |
+
yield
|
| 176 |
+
|
| 177 |
+
@compatibility(is_backward_compatible=True)
|
| 178 |
+
def run_node(self, n : Node) -> Any:
|
| 179 |
+
"""
|
| 180 |
+
Run a specific node ``n`` and return the result.
|
| 181 |
+
Calls into placeholder, get_attr, call_function,
|
| 182 |
+
call_method, call_module, or output depending
|
| 183 |
+
on ``node.op``
|
| 184 |
+
|
| 185 |
+
Args:
|
| 186 |
+
n (Node): The Node to execute
|
| 187 |
+
|
| 188 |
+
Returns:
|
| 189 |
+
Any: The result of executing ``n``
|
| 190 |
+
"""
|
| 191 |
+
with self._set_current_node(n):
|
| 192 |
+
args, kwargs = self.fetch_args_kwargs_from_env(n)
|
| 193 |
+
assert isinstance(args, tuple)
|
| 194 |
+
assert isinstance(kwargs, dict)
|
| 195 |
+
return getattr(self, n.op)(n.target, args, kwargs)
|
| 196 |
+
|
| 197 |
+
# Main Node running APIs
|
| 198 |
+
@compatibility(is_backward_compatible=True)
|
| 199 |
+
def placeholder(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
|
| 200 |
+
"""
|
| 201 |
+
Execute a ``placeholder`` node. Note that this is stateful:
|
| 202 |
+
``Interpreter`` maintains an internal iterator over
|
| 203 |
+
arguments passed to ``run`` and this method returns
|
| 204 |
+
next() on that iterator.
|
| 205 |
+
|
| 206 |
+
Args:
|
| 207 |
+
target (Target): The call target for this node. See
|
| 208 |
+
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
|
| 209 |
+
details on semantics
|
| 210 |
+
args (Tuple): Tuple of positional args for this invocation
|
| 211 |
+
kwargs (Dict): Dict of keyword arguments for this invocation
|
| 212 |
+
|
| 213 |
+
Returns:
|
| 214 |
+
Any: The argument value that was retrieved.
|
| 215 |
+
"""
|
| 216 |
+
assert isinstance(target, str)
|
| 217 |
+
if target.startswith('*'):
|
| 218 |
+
# For a starred parameter e.g. `*args`, retrieve all
|
| 219 |
+
# remaining values from the args list.
|
| 220 |
+
return list(self.args_iter)
|
| 221 |
+
else:
|
| 222 |
+
try:
|
| 223 |
+
return next(self.args_iter)
|
| 224 |
+
except StopIteration as si:
|
| 225 |
+
if len(args) > 0:
|
| 226 |
+
return args[0]
|
| 227 |
+
else:
|
| 228 |
+
raise RuntimeError(f'Expected positional argument for parameter {target}, but one was not passed in!') from si
|
| 229 |
+
|
| 230 |
+
@compatibility(is_backward_compatible=True)
|
| 231 |
+
def get_attr(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
|
| 232 |
+
"""
|
| 233 |
+
Execute a ``get_attr`` node. Will retrieve an attribute
|
| 234 |
+
value from the ``Module`` hierarchy of ``self.module``.
|
| 235 |
+
|
| 236 |
+
Args:
|
| 237 |
+
target (Target): The call target for this node. See
|
| 238 |
+
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
|
| 239 |
+
details on semantics
|
| 240 |
+
args (Tuple): Tuple of positional args for this invocation
|
| 241 |
+
kwargs (Dict): Dict of keyword arguments for this invocation
|
| 242 |
+
|
| 243 |
+
Return:
|
| 244 |
+
Any: The value of the attribute that was retrieved
|
| 245 |
+
"""
|
| 246 |
+
assert isinstance(target, str)
|
| 247 |
+
return self.fetch_attr(target)
|
| 248 |
+
|
| 249 |
+
@compatibility(is_backward_compatible=True)
|
| 250 |
+
def call_function(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
|
| 251 |
+
"""
|
| 252 |
+
Execute a ``call_function`` node and return the result.
|
| 253 |
+
|
| 254 |
+
Args:
|
| 255 |
+
target (Target): The call target for this node. See
|
| 256 |
+
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
|
| 257 |
+
details on semantics
|
| 258 |
+
args (Tuple): Tuple of positional args for this invocation
|
| 259 |
+
kwargs (Dict): Dict of keyword arguments for this invocation
|
| 260 |
+
|
| 261 |
+
Return
|
| 262 |
+
Any: The value returned by the function invocation
|
| 263 |
+
"""
|
| 264 |
+
assert not isinstance(target, str)
|
| 265 |
+
|
| 266 |
+
# Execute the function and return the result
|
| 267 |
+
return target(*args, **kwargs)
|
| 268 |
+
|
| 269 |
+
@compatibility(is_backward_compatible=True)
|
| 270 |
+
def call_method(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
|
| 271 |
+
"""
|
| 272 |
+
Execute a ``call_method`` node and return the result.
|
| 273 |
+
|
| 274 |
+
Args:
|
| 275 |
+
target (Target): The call target for this node. See
|
| 276 |
+
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
|
| 277 |
+
details on semantics
|
| 278 |
+
args (Tuple): Tuple of positional args for this invocation
|
| 279 |
+
kwargs (Dict): Dict of keyword arguments for this invocation
|
| 280 |
+
|
| 281 |
+
Return
|
| 282 |
+
Any: The value returned by the method invocation
|
| 283 |
+
"""
|
| 284 |
+
# args[0] is the `self` object for this method call
|
| 285 |
+
self_obj, *args_tail = args
|
| 286 |
+
|
| 287 |
+
# Execute the method and return the result
|
| 288 |
+
assert isinstance(target, str)
|
| 289 |
+
return getattr(self_obj, target)(*args_tail, **kwargs)
|
| 290 |
+
|
| 291 |
+
@compatibility(is_backward_compatible=True)
|
| 292 |
+
def call_module(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
|
| 293 |
+
"""
|
| 294 |
+
Execute a ``call_module`` node and return the result.
|
| 295 |
+
|
| 296 |
+
Args:
|
| 297 |
+
target (Target): The call target for this node. See
|
| 298 |
+
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
|
| 299 |
+
details on semantics
|
| 300 |
+
args (Tuple): Tuple of positional args for this invocation
|
| 301 |
+
kwargs (Dict): Dict of keyword arguments for this invocation
|
| 302 |
+
|
| 303 |
+
Return
|
| 304 |
+
Any: The value returned by the module invocation
|
| 305 |
+
"""
|
| 306 |
+
# Retrieve executed args and kwargs values from the environment
|
| 307 |
+
|
| 308 |
+
# Execute the method and return the result
|
| 309 |
+
assert isinstance(target, str)
|
| 310 |
+
submod = self.fetch_attr(target)
|
| 311 |
+
|
| 312 |
+
return submod(*args, **kwargs)
|
| 313 |
+
|
| 314 |
+
@compatibility(is_backward_compatible=True)
|
| 315 |
+
def output(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
|
| 316 |
+
"""
|
| 317 |
+
Execute an ``output`` node. This really just retrieves
|
| 318 |
+
the value referenced by the ``output`` node and returns it.
|
| 319 |
+
|
| 320 |
+
Args:
|
| 321 |
+
target (Target): The call target for this node. See
|
| 322 |
+
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
|
| 323 |
+
details on semantics
|
| 324 |
+
args (Tuple): Tuple of positional args for this invocation
|
| 325 |
+
kwargs (Dict): Dict of keyword arguments for this invocation
|
| 326 |
+
|
| 327 |
+
Return:
|
| 328 |
+
Any: The return value referenced by the output node
|
| 329 |
+
"""
|
| 330 |
+
return args[0]
|
| 331 |
+
|
| 332 |
+
# Helper methods
|
| 333 |
+
@compatibility(is_backward_compatible=True)
|
| 334 |
+
def fetch_attr(self, target : str):
|
| 335 |
+
"""
|
| 336 |
+
Fetch an attribute from the ``Module`` hierarchy of ``self.module``.
|
| 337 |
+
|
| 338 |
+
Args:
|
| 339 |
+
target (str): The fully-qualified name of the attribute to fetch
|
| 340 |
+
|
| 341 |
+
Return:
|
| 342 |
+
Any: The value of the attribute.
|
| 343 |
+
"""
|
| 344 |
+
target_atoms = target.split('.')
|
| 345 |
+
attr_itr = self.module
|
| 346 |
+
for i, atom in enumerate(target_atoms):
|
| 347 |
+
if not hasattr(attr_itr, atom):
|
| 348 |
+
raise RuntimeError(f"Node referenced nonexistent target {'.'.join(target_atoms[:i])}")
|
| 349 |
+
attr_itr = getattr(attr_itr, atom)
|
| 350 |
+
return attr_itr
|
| 351 |
+
|
| 352 |
+
@compatibility(is_backward_compatible=True)
|
| 353 |
+
def fetch_args_kwargs_from_env(self, n : Node) -> Tuple[Tuple, Dict]:
|
| 354 |
+
"""
|
| 355 |
+
Fetch the concrete values of ``args`` and ``kwargs`` of node ``n``
|
| 356 |
+
from the current execution environment.
|
| 357 |
+
|
| 358 |
+
Args:
|
| 359 |
+
n (Node): The node for which ``args`` and ``kwargs`` should be fetched.
|
| 360 |
+
|
| 361 |
+
Return:
|
| 362 |
+
Tuple[Tuple, Dict]: ``args`` and ``kwargs`` with concrete values for ``n``.
|
| 363 |
+
"""
|
| 364 |
+
args = self.map_nodes_to_values(n.args, n)
|
| 365 |
+
assert isinstance(args, tuple)
|
| 366 |
+
kwargs = self.map_nodes_to_values(n.kwargs, n)
|
| 367 |
+
assert isinstance(kwargs, dict)
|
| 368 |
+
return args, kwargs
|
| 369 |
+
|
| 370 |
+
@compatibility(is_backward_compatible=True)
|
| 371 |
+
def map_nodes_to_values(self, args : Argument, n : Node) -> Argument:
|
| 372 |
+
"""
|
| 373 |
+
Recursively descend through ``args`` and look up the concrete value
|
| 374 |
+
for each ``Node`` in the current execution environment.
|
| 375 |
+
|
| 376 |
+
Args:
|
| 377 |
+
args (Argument): Data structure within which to look up concrete values
|
| 378 |
+
|
| 379 |
+
n (Node): Node to which ``args`` belongs. This is only used for error reporting.
|
| 380 |
+
"""
|
| 381 |
+
def load_arg(n_arg : Node) -> Any:
|
| 382 |
+
if n_arg not in self.env:
|
| 383 |
+
raise RuntimeError(f'Node {n} referenced nonexistent value {n_arg}! Run Graph.lint() '
|
| 384 |
+
f'to diagnose such issues')
|
| 385 |
+
return self.env[n_arg]
|
| 386 |
+
return map_arg(args, load_arg)
|
| 387 |
+
|
| 388 |
+
@compatibility(is_backward_compatible=True)
|
| 389 |
+
class Transformer(Interpreter):
|
| 390 |
+
"""
|
| 391 |
+
``Transformer`` is a special type of interpreter that produces a
|
| 392 |
+
new ``Module``. It exposes a ``transform()`` method that returns
|
| 393 |
+
the transformed ``Module``. ``Transformer`` does not require
|
| 394 |
+
arguments to run, as ``Interpreter`` does. ``Transformer`` works
|
| 395 |
+
entirely symbolically.
|
| 396 |
+
|
| 397 |
+
Example:
|
| 398 |
+
|
| 399 |
+
Suppose we want to swap all instances of ``torch.neg`` with
|
| 400 |
+
``torch.sigmoid`` and vice versa (including their ``Tensor``
|
| 401 |
+
method equivalents). We could subclass ``Transformer`` like so::
|
| 402 |
+
|
| 403 |
+
class NegSigmSwapXformer(Transformer):
|
| 404 |
+
def call_function(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
|
| 405 |
+
if target == torch.sigmoid:
|
| 406 |
+
return torch.neg(*args, **kwargs)
|
| 407 |
+
return super().call_function(n)
|
| 408 |
+
|
| 409 |
+
def call_method(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
|
| 410 |
+
if target == 'neg':
|
| 411 |
+
call_self, *args_tail = args
|
| 412 |
+
return call_self.sigmoid(*args_tail, **kwargs)
|
| 413 |
+
return super().call_method(n)
|
| 414 |
+
|
| 415 |
+
def fn(x):
|
| 416 |
+
return torch.sigmoid(x).neg()
|
| 417 |
+
|
| 418 |
+
gm = torch.fx.symbolic_trace(fn)
|
| 419 |
+
|
| 420 |
+
transformed : torch.nn.Module = NegSigmSwapXformer(gm).transform()
|
| 421 |
+
input = torch.randn(3, 4)
|
| 422 |
+
torch.testing.assert_close(transformed(input), torch.neg(input).sigmoid())
|
| 423 |
+
|
| 424 |
+
Args:
|
| 425 |
+
module (GraphModule): The ``Module`` to be transformed.
|
| 426 |
+
"""
|
| 427 |
+
|
| 428 |
+
@compatibility(is_backward_compatible=True)
|
| 429 |
+
def __init__(self, module):
|
| 430 |
+
super().__init__(module)
|
| 431 |
+
self.new_graph = Graph()
|
| 432 |
+
self.new_graph.set_codegen(module.graph._codegen)
|
| 433 |
+
|
| 434 |
+
class TransformerTracer(Tracer):
|
| 435 |
+
def __init__(self, graph: Graph):
|
| 436 |
+
super().__init__()
|
| 437 |
+
self.graph = graph
|
| 438 |
+
self.tensor_attrs: Dict[torch.Tensor, str] = {} # type: ignore[assignment]
|
| 439 |
+
|
| 440 |
+
def is_leaf_module(self, _, __) -> bool:
|
| 441 |
+
return True
|
| 442 |
+
|
| 443 |
+
self.tracer = TransformerTracer(self.new_graph)
|
| 444 |
+
self.tracer.root = module
|
| 445 |
+
|
| 446 |
+
@compatibility(is_backward_compatible=True)
|
| 447 |
+
def placeholder(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Proxy:
|
| 448 |
+
"""
|
| 449 |
+
Execute a ``placeholder`` node. In ``Transformer``, this is
|
| 450 |
+
overridden to insert a new ``placeholder`` into the output
|
| 451 |
+
graph.
|
| 452 |
+
|
| 453 |
+
Args:
|
| 454 |
+
target (Target): The call target for this node. See
|
| 455 |
+
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
|
| 456 |
+
details on semantics
|
| 457 |
+
args (Tuple): Tuple of positional args for this invocation
|
| 458 |
+
kwargs (Dict): Dict of keyword arguments for this invocation
|
| 459 |
+
"""
|
| 460 |
+
assert isinstance(target, str)
|
| 461 |
+
default_value = next(iter(args)) if args else inspect.Signature.empty
|
| 462 |
+
return Proxy(self.new_graph.placeholder(target, default_value=default_value), self.tracer)
|
| 463 |
+
|
| 464 |
+
@compatibility(is_backward_compatible=True)
|
| 465 |
+
def get_attr(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Proxy:
|
| 466 |
+
"""
|
| 467 |
+
Execute a ``get_attr`` node. In ``Transformer``, this is
|
| 468 |
+
overridden to insert a new ``get_attr`` node into the output
|
| 469 |
+
graph.
|
| 470 |
+
|
| 471 |
+
Args:
|
| 472 |
+
target (Target): The call target for this node. See
|
| 473 |
+
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
|
| 474 |
+
details on semantics
|
| 475 |
+
args (Tuple): Tuple of positional args for this invocation
|
| 476 |
+
kwargs (Dict): Dict of keyword arguments for this invocation
|
| 477 |
+
"""
|
| 478 |
+
assert isinstance(target, str)
|
| 479 |
+
return self.tracer.create_proxy("get_attr", target, args, kwargs)
|
| 480 |
+
|
| 481 |
+
@compatibility(is_backward_compatible=True)
|
| 482 |
+
def call_module(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
|
| 483 |
+
# Override so that the leaf module policy from `self.tracer` is respected.
|
| 484 |
+
assert isinstance(target, str)
|
| 485 |
+
submod = self.fetch_attr(target)
|
| 486 |
+
return self.tracer.call_module(submod, submod.forward, args, kwargs)
|
| 487 |
+
|
| 488 |
+
@compatibility(is_backward_compatible=True)
|
| 489 |
+
def call_function(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
|
| 490 |
+
# Override so that functions that were wrapped are still wrapped.
|
| 491 |
+
return self.tracer.create_proxy('call_function', target, args, kwargs)
|
| 492 |
+
|
| 493 |
+
@compatibility(is_backward_compatible=True)
|
| 494 |
+
def transform(self) -> GraphModule:
|
| 495 |
+
"""
|
| 496 |
+
Transform ``self.module`` and return the transformed
|
| 497 |
+
``GraphModule``.
|
| 498 |
+
"""
|
| 499 |
+
with fx_traceback.preserve_node_meta():
|
| 500 |
+
result = super().run(enable_io_processing=False)
|
| 501 |
+
if result is not None:
|
| 502 |
+
def strip_proxy(a : Union[Argument, Proxy]) -> Any:
|
| 503 |
+
return a.node if isinstance(a, Proxy) else a
|
| 504 |
+
self.new_graph.output(map_aggregate(result, strip_proxy))
|
| 505 |
+
return GraphModule(self.module, self.new_graph)
|
llava_next/lib/python3.10/site-packages/torch/fx/node.py
ADDED
|
@@ -0,0 +1,656 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Nodes represent a definition of a value in our graph of operators.
|
| 2 |
+
from typing import TYPE_CHECKING, Union, Callable, Any, Tuple, List, Optional, Dict, Set
|
| 3 |
+
from ._compatibility import compatibility
|
| 4 |
+
from .immutable_collections import immutable_dict, immutable_list
|
| 5 |
+
import torch
|
| 6 |
+
import builtins
|
| 7 |
+
import types
|
| 8 |
+
import warnings
|
| 9 |
+
from torch.fx.operator_schemas import normalize_function, normalize_module, ArgsKwargsPair
|
| 10 |
+
from .._ops import ops as _ops
|
| 11 |
+
|
| 12 |
+
if TYPE_CHECKING:
|
| 13 |
+
from .graph import Graph
|
| 14 |
+
|
| 15 |
+
__all__ = ['Node', 'map_arg', 'map_aggregate', "has_side_effect"]
|
| 16 |
+
|
| 17 |
+
BaseArgumentTypes = Union[str, int, float, bool, complex, torch.dtype,
|
| 18 |
+
torch.Tensor, torch.device, torch.memory_format, torch.layout, torch._ops.OpOverload]
|
| 19 |
+
base_types = BaseArgumentTypes.__args__ # type: ignore[attr-defined]
|
| 20 |
+
|
| 21 |
+
Target = Union[Callable[..., Any], str]
|
| 22 |
+
|
| 23 |
+
Argument = Optional[Union[
|
| 24 |
+
Tuple[Any, ...], # actually Argument, but mypy can't represent recursive types
|
| 25 |
+
List[Any], # actually Argument
|
| 26 |
+
Dict[str, Any], # actually Argument
|
| 27 |
+
slice, # Slice[Argument, Argument, Argument], but slice is not a templated type in typing
|
| 28 |
+
range,
|
| 29 |
+
'Node',
|
| 30 |
+
BaseArgumentTypes
|
| 31 |
+
]]
|
| 32 |
+
|
| 33 |
+
_side_effectful_functions: Set[Callable] = {
|
| 34 |
+
torch._assert,
|
| 35 |
+
torch._assert_async,
|
| 36 |
+
_ops.aten._assert_async.msg,
|
| 37 |
+
_ops.aten.copy_.default,
|
| 38 |
+
_ops.aten.sym_constrain_range.default,
|
| 39 |
+
_ops.aten.sym_constrain_range_for_size.default,
|
| 40 |
+
_ops.profiler._record_function_enter,
|
| 41 |
+
_ops.profiler._record_function_enter_new,
|
| 42 |
+
_ops.profiler._record_function_exit}
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
@compatibility(is_backward_compatible=False)
|
| 46 |
+
def has_side_effect(fn: Callable) -> None:
|
| 47 |
+
_side_effectful_functions.add(fn)
|
| 48 |
+
return fn
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
# this is fixed on master, WAR for 1.5
|
| 52 |
+
def _find_module_of_method(orig_method: Callable[..., Any]) -> str:
|
| 53 |
+
name = orig_method.__name__
|
| 54 |
+
module = orig_method.__module__
|
| 55 |
+
if module is not None:
|
| 56 |
+
return module
|
| 57 |
+
for guess in [torch, torch.nn.functional]:
|
| 58 |
+
if getattr(guess, name, None) is orig_method:
|
| 59 |
+
return guess.__name__
|
| 60 |
+
raise RuntimeError(f'cannot find module for {orig_method}')
|
| 61 |
+
|
| 62 |
+
# Borrowed from CPython typing module
|
| 63 |
+
# https://github.com/python/cpython/blob/f90dc36c15d7fee0efaf6d39e97be0bdf2683e93/Lib/typing.py#L156
|
| 64 |
+
def _type_repr(obj):
|
| 65 |
+
"""Return the repr() of an object, special-casing types (internal helper).
|
| 66 |
+
If obj is a type, we return a shorter version than the default
|
| 67 |
+
type.__repr__, based on the module and qualified name, which is
|
| 68 |
+
typically enough to uniquely identify a type. For everything
|
| 69 |
+
else, we fall back on repr(obj).
|
| 70 |
+
"""
|
| 71 |
+
if isinstance(obj, type):
|
| 72 |
+
if obj.__module__ == 'builtins':
|
| 73 |
+
return obj.__qualname__
|
| 74 |
+
return f'{obj.__module__}.{obj.__qualname__}'
|
| 75 |
+
if obj is ...:
|
| 76 |
+
return('...')
|
| 77 |
+
if isinstance(obj, types.FunctionType):
|
| 78 |
+
return obj.__name__
|
| 79 |
+
return repr(obj)
|
| 80 |
+
|
| 81 |
+
def _get_qualified_name(func: Callable[..., Any]) -> str:
|
| 82 |
+
# things like getattr just appear in builtins
|
| 83 |
+
if getattr(builtins, func.__name__, None) is func:
|
| 84 |
+
return func.__name__
|
| 85 |
+
# torch.Tensor.{fn}
|
| 86 |
+
if (isinstance(func, (types.MethodDescriptorType, types.WrapperDescriptorType))
|
| 87 |
+
and func is getattr(torch.Tensor, func.__name__, None)):
|
| 88 |
+
return f"torch.Tensor.{func.__name__}"
|
| 89 |
+
name = func.__name__
|
| 90 |
+
module = _find_module_of_method(func)
|
| 91 |
+
module = module.replace('torch._ops', 'torch.ops') # WAR for bug in how torch.ops assigns module
|
| 92 |
+
# Fixup segment_reduce mismatch
|
| 93 |
+
if module == "torch" and name == "segment_reduce":
|
| 94 |
+
name = "_" + name
|
| 95 |
+
return f'{module}.{name}'
|
| 96 |
+
|
| 97 |
+
def _format_arg(arg, max_list_len=float('inf')) -> str:
|
| 98 |
+
if hasattr(arg, '_custom_fx_repr_fn'):
|
| 99 |
+
return arg._custom_fx_repr_fn()
|
| 100 |
+
elif isinstance(arg, list):
|
| 101 |
+
items = ', '.join(_format_arg(a) for idx, a in enumerate(arg) if idx < max_list_len)
|
| 102 |
+
maybe_len = '' if len(arg) < max_list_len + 1 else f', ...[total_len={len(arg)}]'
|
| 103 |
+
return f'[{items}{maybe_len}]'
|
| 104 |
+
elif isinstance(arg, tuple):
|
| 105 |
+
items = ', '.join(_format_arg(a) for idx, a in enumerate(arg) if idx < max_list_len)
|
| 106 |
+
maybe_len = '' if len(arg) < max_list_len + 1 else f', ...[total_len={len(arg)}]'
|
| 107 |
+
maybe_comma = ',' if len(arg) == 1 else ''
|
| 108 |
+
return f'({items}{maybe_comma}{maybe_len})'
|
| 109 |
+
elif isinstance(arg, dict):
|
| 110 |
+
items_str = ', '.join(f'{k}: {_format_arg(v)}' for k, v in arg.items())
|
| 111 |
+
return f'{{{items_str}}}'
|
| 112 |
+
|
| 113 |
+
if isinstance(arg, Node):
|
| 114 |
+
return '%' + str(arg)
|
| 115 |
+
else:
|
| 116 |
+
return str(arg)
|
| 117 |
+
|
| 118 |
+
@compatibility(is_backward_compatible=True)
|
| 119 |
+
class Node:
|
| 120 |
+
"""
|
| 121 |
+
``Node`` is the data structure that represents individual operations within
|
| 122 |
+
a ``Graph``. For the most part, Nodes represent callsites to various entities,
|
| 123 |
+
such as operators, methods, and Modules (some exceptions include nodes that
|
| 124 |
+
specify function inputs and outputs). Each ``Node`` has a function specified
|
| 125 |
+
by its ``op`` property. The ``Node`` semantics for each value of ``op`` are as follows:
|
| 126 |
+
|
| 127 |
+
- ``placeholder`` represents a function input. The ``name`` attribute specifies the name this value will take on.
|
| 128 |
+
``target`` is similarly the name of the argument. ``args`` holds either: 1) nothing, or 2) a single argument
|
| 129 |
+
denoting the default parameter of the function input. ``kwargs`` is don't-care. Placeholders correspond to
|
| 130 |
+
the function parameters (e.g. ``x``) in the graph printout.
|
| 131 |
+
- ``get_attr`` retrieves a parameter from the module hierarchy. ``name`` is similarly the name the result of the
|
| 132 |
+
fetch is assigned to. ``target`` is the fully-qualified name of the parameter's position in the module hierarchy.
|
| 133 |
+
``args`` and ``kwargs`` are don't-care
|
| 134 |
+
- ``call_function`` applies a free function to some values. ``name`` is similarly the name of the value to assign
|
| 135 |
+
to. ``target`` is the function to be applied. ``args`` and ``kwargs`` represent the arguments to the function,
|
| 136 |
+
following the Python calling convention
|
| 137 |
+
- ``call_module`` applies a module in the module hierarchy's ``forward()`` method to given arguments. ``name`` is
|
| 138 |
+
as previous. ``target`` is the fully-qualified name of the module in the module hierarchy to call.
|
| 139 |
+
``args`` and ``kwargs`` represent the arguments to invoke the module on, *excluding the self argument*.
|
| 140 |
+
- ``call_method`` calls a method on a value. ``name`` is as similar. ``target`` is the string name of the method
|
| 141 |
+
to apply to the ``self`` argument. ``args`` and ``kwargs`` represent the arguments to invoke the module on,
|
| 142 |
+
*including the self argument*
|
| 143 |
+
- ``output`` contains the output of the traced function in its ``args[0]`` attribute. This corresponds to the "return" statement
|
| 144 |
+
in the Graph printout.
|
| 145 |
+
"""
|
| 146 |
+
|
| 147 |
+
@compatibility(is_backward_compatible=True)
|
| 148 |
+
def __init__(self, graph: 'Graph', name: str, op: str, target: 'Target',
|
| 149 |
+
args: Tuple['Argument', ...], kwargs: Dict[str, 'Argument'],
|
| 150 |
+
return_type : Optional[Any] = None) -> None:
|
| 151 |
+
"""
|
| 152 |
+
Instantiate an instance of ``Node``. Note: most often, you want to use the
|
| 153 |
+
Graph APIs, i.e. ``Graph.call_module``, ``Graph.call_method``, etc. rather
|
| 154 |
+
than instantiating a ``Node`` directly.
|
| 155 |
+
|
| 156 |
+
Args:
|
| 157 |
+
graph (Graph): The ``Graph`` to which this ``Node`` should belong.
|
| 158 |
+
|
| 159 |
+
name (str): The name to which the output of this ``Node`` should be assigned
|
| 160 |
+
|
| 161 |
+
op (str): The opcode for this ``Node``. Can be one of 'placeholder',
|
| 162 |
+
'call_method', 'call_module', 'call_function', 'get_attr',
|
| 163 |
+
'output'
|
| 164 |
+
|
| 165 |
+
target ('Target'): The target this op should call. See the broader
|
| 166 |
+
``Node`` docstring for more details.
|
| 167 |
+
|
| 168 |
+
args (Tuple['Argument']): The args to be passed to ``target``
|
| 169 |
+
|
| 170 |
+
kwargs (Dict[str, 'Argument']): The kwargs to be passed to ``target``
|
| 171 |
+
|
| 172 |
+
return_type (Optional[Any]): The python type expression representing the
|
| 173 |
+
type of the output of this node. This field can be used for
|
| 174 |
+
annotation of values in the generated code or for other types
|
| 175 |
+
of analyses.
|
| 176 |
+
"""
|
| 177 |
+
self.graph = graph
|
| 178 |
+
self.name = name # unique name of value being created
|
| 179 |
+
assert op in ['placeholder', 'call_method', 'call_module', 'call_function', 'get_attr', 'output', 'root']
|
| 180 |
+
self.op = op # the kind of operation = placeholder|call_method|call_module|call_function|get_attr
|
| 181 |
+
if op == 'call_function':
|
| 182 |
+
if not callable(target):
|
| 183 |
+
raise ValueError(f'Node [graph = {graph}, name = \'{name}\'] target {target} has type {torch.typename(target)} '
|
| 184 |
+
'but a Callable is expected')
|
| 185 |
+
else:
|
| 186 |
+
if not isinstance(target, str):
|
| 187 |
+
raise ValueError(f'Node [graph = {graph}, name = \'{name}\'] target {target} has type {torch.typename(target)} '
|
| 188 |
+
'but a str is expected')
|
| 189 |
+
self.target = target # for method/module/function, the name of the method/module/function/attr
|
| 190 |
+
# being invoked, e.g add, layer1, or torch.add
|
| 191 |
+
|
| 192 |
+
# All `Node`-valued inputs. Key is the Node, value is don't-care.
|
| 193 |
+
# The public API for this is `all_input_nodes`, this private attribute
|
| 194 |
+
# should not be accessed directly.
|
| 195 |
+
self._input_nodes : Dict[Node, None] = {}
|
| 196 |
+
self.__update_args_kwargs(map_arg(args, lambda x: x), map_arg(kwargs, lambda x: x)) # type: ignore[arg-type]
|
| 197 |
+
|
| 198 |
+
# All of the nodes that use the value produced by this Node
|
| 199 |
+
# Note one user may correspond to several uses, e.g. the node fo ``x + x``
|
| 200 |
+
# would appear once here, but represents two uses.
|
| 201 |
+
#
|
| 202 |
+
# Is a dict to act as an "ordered set". Keys are significant, value dont-care
|
| 203 |
+
self.users : Dict[Node, None] = {}
|
| 204 |
+
# Type expression representing the output value of this node.
|
| 205 |
+
# This should contain the same class of Type objects that would appear
|
| 206 |
+
# as type annotations for function inputs/outputs.
|
| 207 |
+
#
|
| 208 |
+
# For placeholder nodes, this value will be used to type-annotate the
|
| 209 |
+
# generated function parameters.
|
| 210 |
+
# For the return node, this value will be used to type-annotate the
|
| 211 |
+
# generated function return type. (Note this is a special case. ``return``
|
| 212 |
+
# does not produce a value, it's more of a notation. Thus, this value
|
| 213 |
+
# describes the type of args[0] in the ``return`` node.
|
| 214 |
+
self.type : Optional[Any] = return_type
|
| 215 |
+
self._prev = self
|
| 216 |
+
self._next = self
|
| 217 |
+
self._erased = False
|
| 218 |
+
|
| 219 |
+
# If set, use this fn to print this node
|
| 220 |
+
self._repr_fn : Optional[Callable[[Node], str]] = None
|
| 221 |
+
|
| 222 |
+
# Dictionary to store metadata passes need to do their
|
| 223 |
+
# transformations. This metadata is preserved across node copies
|
| 224 |
+
self.meta : Dict[str, Any] = {}
|
| 225 |
+
|
| 226 |
+
@property
|
| 227 |
+
def next(self) -> 'Node':
|
| 228 |
+
"""
|
| 229 |
+
Returns the next ``Node`` in the linked list of Nodes.
|
| 230 |
+
|
| 231 |
+
Returns:
|
| 232 |
+
|
| 233 |
+
The next ``Node`` in the linked list of Nodes.
|
| 234 |
+
"""
|
| 235 |
+
return self._next
|
| 236 |
+
|
| 237 |
+
@property
|
| 238 |
+
def prev(self) -> 'Node':
|
| 239 |
+
"""
|
| 240 |
+
Returns the previous ``Node`` in the linked list of Nodes.
|
| 241 |
+
|
| 242 |
+
Returns:
|
| 243 |
+
|
| 244 |
+
The previous ``Node`` in the linked list of Nodes.
|
| 245 |
+
"""
|
| 246 |
+
return self._prev
|
| 247 |
+
|
| 248 |
+
@compatibility(is_backward_compatible=True)
|
| 249 |
+
def prepend(self, x: 'Node') -> None:
|
| 250 |
+
"""
|
| 251 |
+
Insert x before this node in the list of nodes in the graph. Example::
|
| 252 |
+
|
| 253 |
+
Before: p -> self
|
| 254 |
+
bx -> x -> ax
|
| 255 |
+
After: p -> x -> self
|
| 256 |
+
bx -> ax
|
| 257 |
+
|
| 258 |
+
Args:
|
| 259 |
+
x (Node): The node to put before this node. Must be a member of the same graph.
|
| 260 |
+
"""
|
| 261 |
+
assert self.graph == x.graph, "Attempting to move a Node into a different Graph"
|
| 262 |
+
if self == x:
|
| 263 |
+
warnings.warn("Trying to prepend a node to itself. This behavior has no effect on the graph.")
|
| 264 |
+
return
|
| 265 |
+
x._remove_from_list()
|
| 266 |
+
p = self._prev
|
| 267 |
+
p._next, x._prev = x, p
|
| 268 |
+
x._next, self._prev = self, x
|
| 269 |
+
|
| 270 |
+
@compatibility(is_backward_compatible=True)
|
| 271 |
+
def append(self, x: 'Node') -> None:
|
| 272 |
+
"""
|
| 273 |
+
Insert ``x`` after this node in the list of nodes in the graph.
|
| 274 |
+
Equivalent to ``self.next.prepend(x)``
|
| 275 |
+
|
| 276 |
+
Args:
|
| 277 |
+
x (Node): The node to put after this node. Must be a member of the same graph.
|
| 278 |
+
"""
|
| 279 |
+
self._next.prepend(x)
|
| 280 |
+
|
| 281 |
+
def _remove_from_list(self):
|
| 282 |
+
p, n = self._prev, self._next
|
| 283 |
+
p._next, n._prev = n, p
|
| 284 |
+
|
| 285 |
+
@property
|
| 286 |
+
def args(self) -> Tuple[Argument, ...]:
|
| 287 |
+
"""
|
| 288 |
+
The tuple of arguments to this ``Node``. The interpretation of arguments
|
| 289 |
+
depends on the node's opcode. See the :class:`Node` docstring for more
|
| 290 |
+
information.
|
| 291 |
+
|
| 292 |
+
Assignment to this property is allowed. All accounting of uses and users
|
| 293 |
+
is updated automatically on assignment.
|
| 294 |
+
"""
|
| 295 |
+
return self._args
|
| 296 |
+
|
| 297 |
+
@args.setter
|
| 298 |
+
def args(self, a : Tuple[Argument, ...]):
|
| 299 |
+
"""
|
| 300 |
+
Set the tuple of arguments to this Node. The interpretation of arguments
|
| 301 |
+
depends on the node's opcode. See the ``fx.Graph`` docstring for more
|
| 302 |
+
information.
|
| 303 |
+
"""
|
| 304 |
+
# DO NOT CALL `__update_args_kwargs` directly. The correct way to
|
| 305 |
+
# set `args` is via direct assignment, i.e. `node.args = new_args`
|
| 306 |
+
self.__update_args_kwargs(map_arg(a, lambda x: x), self._kwargs) # type: ignore[arg-type]
|
| 307 |
+
|
| 308 |
+
@property
|
| 309 |
+
def kwargs(self) -> Dict[str, Argument]:
|
| 310 |
+
"""
|
| 311 |
+
The dict of keyword arguments to this ``Node``. The interpretation of arguments
|
| 312 |
+
depends on the node's opcode. See the :class:`Node` docstring for more
|
| 313 |
+
information.
|
| 314 |
+
|
| 315 |
+
Assignment to this property is allowed. All accounting of uses and users
|
| 316 |
+
is updated automatically on assignment.
|
| 317 |
+
"""
|
| 318 |
+
return self._kwargs
|
| 319 |
+
|
| 320 |
+
@kwargs.setter
|
| 321 |
+
def kwargs(self, k : Dict[str, Argument]):
|
| 322 |
+
"""
|
| 323 |
+
Set the dict of kwargs to this Node. The interpretation of arguments
|
| 324 |
+
depends on the node's opcode. See the ``fx.Graph`` docstring for more
|
| 325 |
+
information.
|
| 326 |
+
"""
|
| 327 |
+
# DO NOT CALL `__update_args_kwargs` directly. The correct way to
|
| 328 |
+
# set `args` is via direct assignment, i.e. `node.kwargs = new_kwargs`
|
| 329 |
+
self.__update_args_kwargs(self._args, map_arg(k, lambda x: x)) # type: ignore[arg-type]
|
| 330 |
+
|
| 331 |
+
@property
|
| 332 |
+
def all_input_nodes(self) -> List['Node']:
|
| 333 |
+
"""
|
| 334 |
+
Return all Nodes that are inputs to this Node. This is equivalent to
|
| 335 |
+
iterating over ``args`` and ``kwargs`` and only collecting the values that
|
| 336 |
+
are Nodes.
|
| 337 |
+
|
| 338 |
+
Returns:
|
| 339 |
+
|
| 340 |
+
List of ``Nodes`` that appear in the ``args`` and ``kwargs`` of this
|
| 341 |
+
``Node``, in that order.
|
| 342 |
+
"""
|
| 343 |
+
return list(self._input_nodes.keys())
|
| 344 |
+
|
| 345 |
+
@compatibility(is_backward_compatible=True)
|
| 346 |
+
def update_arg(self, idx : int, arg : Argument) -> None:
|
| 347 |
+
"""
|
| 348 |
+
Update an existing positional argument to contain the new value
|
| 349 |
+
``arg``. After calling, ``self.args[idx] == arg``.
|
| 350 |
+
|
| 351 |
+
Args:
|
| 352 |
+
|
| 353 |
+
idx (int): The index into ``self.args`` of the element to update
|
| 354 |
+
arg (Argument): The new argument value to write into ``args``
|
| 355 |
+
"""
|
| 356 |
+
args = list(self.args)
|
| 357 |
+
args[idx] = arg
|
| 358 |
+
self.args = tuple(args)
|
| 359 |
+
|
| 360 |
+
@compatibility(is_backward_compatible=True)
|
| 361 |
+
def update_kwarg(self, key : str, arg : Argument) -> None:
|
| 362 |
+
"""
|
| 363 |
+
Update an existing keyword argument to contain the new value
|
| 364 |
+
``arg``. After calling, ``self.kwargs[key] == arg``.
|
| 365 |
+
|
| 366 |
+
Args:
|
| 367 |
+
|
| 368 |
+
key (str): The key in ``self.kwargs`` of the element to update
|
| 369 |
+
arg (Argument): The new argument value to write into ``kwargs``
|
| 370 |
+
"""
|
| 371 |
+
kwargs = dict(self.kwargs)
|
| 372 |
+
kwargs[key] = arg
|
| 373 |
+
self.kwargs = kwargs
|
| 374 |
+
|
| 375 |
+
@property
|
| 376 |
+
def stack_trace(self) -> Optional[str]:
|
| 377 |
+
"""
|
| 378 |
+
Return the Python stack trace that was recorded during tracing, if any.
|
| 379 |
+
When traced with fx.Tracer, this property is usually populated by
|
| 380 |
+
`Tracer.create_proxy`. To record stack traces during tracing for debug purposes,
|
| 381 |
+
set `record_stack_traces = True` on the `Tracer` instance.
|
| 382 |
+
When traced with dynamo, this property will be populated by default by
|
| 383 |
+
`OutputGraph.create_proxy`.
|
| 384 |
+
|
| 385 |
+
stack_trace would have the innermost frame at the end of the string.
|
| 386 |
+
"""
|
| 387 |
+
return self.meta.get("stack_trace", None)
|
| 388 |
+
|
| 389 |
+
@stack_trace.setter
|
| 390 |
+
def stack_trace(self, trace : Optional[str]):
|
| 391 |
+
self.meta["stack_trace"] = trace
|
| 392 |
+
|
| 393 |
+
def __update_args_kwargs(self, new_args : Tuple['Argument', ...], new_kwargs : Dict[str, 'Argument']):
|
| 394 |
+
"""
|
| 395 |
+
This API is internal. Do *not* call it directly.
|
| 396 |
+
"""
|
| 397 |
+
self._args = new_args
|
| 398 |
+
self._kwargs = new_kwargs
|
| 399 |
+
|
| 400 |
+
for old_use in self._input_nodes.keys():
|
| 401 |
+
old_use.users.pop(self)
|
| 402 |
+
|
| 403 |
+
self._input_nodes = {}
|
| 404 |
+
map_arg(self._args, lambda n: self._input_nodes.setdefault(n))
|
| 405 |
+
map_arg(self._kwargs, lambda n: self._input_nodes.setdefault(n))
|
| 406 |
+
|
| 407 |
+
for new_use in self._input_nodes.keys():
|
| 408 |
+
new_use.users.setdefault(self)
|
| 409 |
+
|
| 410 |
+
def __repr__(self) -> str:
|
| 411 |
+
if self._repr_fn:
|
| 412 |
+
return self._repr_fn(self)
|
| 413 |
+
return self.name
|
| 414 |
+
|
| 415 |
+
def _pretty_print_target(self, target):
|
| 416 |
+
"""
|
| 417 |
+
Make target printouts more user-friendly.
|
| 418 |
+
1) builtins will be printed as `builtins.xyz`
|
| 419 |
+
2) operators will be printed as `operator.xyz`
|
| 420 |
+
3) other callables will be printed with qualified name, e.g. torch.add
|
| 421 |
+
"""
|
| 422 |
+
if isinstance(target, str):
|
| 423 |
+
return target
|
| 424 |
+
if hasattr(target, '__module__'):
|
| 425 |
+
if not hasattr(target, '__name__'):
|
| 426 |
+
# Just to be defensive, if we don't have `__name__`, get the
|
| 427 |
+
# qualname. Not sure if this happens for any members of `operator`
|
| 428 |
+
# or `builtins`. This fallback path is not as good, since e.g.
|
| 429 |
+
# things in `operator` have `_operator` as their __module__.
|
| 430 |
+
return _get_qualified_name(target)
|
| 431 |
+
if target.__module__ == 'builtins':
|
| 432 |
+
return f'builtins.{target.__name__}'
|
| 433 |
+
elif target.__module__ == '_operator':
|
| 434 |
+
return f'operator.{target.__name__}'
|
| 435 |
+
return _get_qualified_name(target)
|
| 436 |
+
|
| 437 |
+
@compatibility(is_backward_compatible=True)
|
| 438 |
+
def format_node(self,
|
| 439 |
+
placeholder_names: Optional[List[str]] = None,
|
| 440 |
+
maybe_return_typename: Optional[List[str]] = None) -> Optional[str]:
|
| 441 |
+
"""
|
| 442 |
+
Return a descriptive string representation of ``self``.
|
| 443 |
+
|
| 444 |
+
This method can be used with no arguments as a debugging
|
| 445 |
+
utility.
|
| 446 |
+
|
| 447 |
+
This function is also used internally in the ``__str__`` method
|
| 448 |
+
of ``Graph``. Together, the strings in ``placeholder_names``
|
| 449 |
+
and ``maybe_return_typename`` make up the signature of the
|
| 450 |
+
autogenerated ``forward`` function in this Graph's surrounding
|
| 451 |
+
GraphModule. ``placeholder_names`` and ``maybe_return_typename``
|
| 452 |
+
should not be used otherwise.
|
| 453 |
+
|
| 454 |
+
Args:
|
| 455 |
+
placeholder_names: A list that will store formatted strings
|
| 456 |
+
representing the placeholders in the generated
|
| 457 |
+
``forward`` function. Internal use only.
|
| 458 |
+
maybe_return_typename: A single-element list that will store
|
| 459 |
+
a formatted string representing the output of the
|
| 460 |
+
generated ``forward`` function. Internal use only.
|
| 461 |
+
|
| 462 |
+
Returns:
|
| 463 |
+
str: If 1) we're using ``format_node`` as an internal helper
|
| 464 |
+
in the ``__str__`` method of ``Graph``, and 2) ``self``
|
| 465 |
+
is a placeholder Node, return ``None``. Otherwise,
|
| 466 |
+
return a descriptive string representation of the
|
| 467 |
+
current Node.
|
| 468 |
+
"""
|
| 469 |
+
if self.op == 'placeholder':
|
| 470 |
+
assert isinstance(self.target, str)
|
| 471 |
+
arg_str = self.target
|
| 472 |
+
arg_str += arg_str + f': {_type_repr(self.type)}' if self.type else ''
|
| 473 |
+
if placeholder_names:
|
| 474 |
+
placeholder_names.append(arg_str)
|
| 475 |
+
return None
|
| 476 |
+
maybe_typename = f'{_type_repr(self.type)} ' if self.type else ''
|
| 477 |
+
default_val = '(default=' + str(self.args[0]) + ')' if self.args else ''
|
| 478 |
+
return f'%{self.name} : {maybe_typename}[num_users={len(self.users)}] = {self.op}[target={self.target}]{default_val}'
|
| 479 |
+
elif self.op == 'get_attr':
|
| 480 |
+
maybe_typename = f'{_type_repr(self.type)} ' if self.type is not None else ''
|
| 481 |
+
return f'%{self.name} : {maybe_typename}[num_users={len(self.users)}] = ' \
|
| 482 |
+
f'{self.op}[target={self._pretty_print_target(self.target)}]'
|
| 483 |
+
elif self.op == 'output':
|
| 484 |
+
if self.type and maybe_return_typename:
|
| 485 |
+
maybe_return_typename[0] = f' -> {_type_repr(self.type)}'
|
| 486 |
+
return f'return {self.args[0]}'
|
| 487 |
+
else:
|
| 488 |
+
maybe_typename = f'{_type_repr(self.type)} ' if self.type is not None else ''
|
| 489 |
+
return f'%{self.name} : {maybe_typename}[num_users={len(self.users)}] = ' \
|
| 490 |
+
f'{self.op}[target={self._pretty_print_target(self.target)}](' \
|
| 491 |
+
f'args = {_format_arg(self.args)}, kwargs = {_format_arg(self.kwargs)})'
|
| 492 |
+
|
| 493 |
+
@compatibility(is_backward_compatible=True)
|
| 494 |
+
def replace_all_uses_with(self,
|
| 495 |
+
replace_with : 'Node',
|
| 496 |
+
delete_user_cb: Callable[['Node'], bool] = lambda user: True,
|
| 497 |
+
*,
|
| 498 |
+
propagate_meta=False
|
| 499 |
+
) -> List['Node']:
|
| 500 |
+
"""
|
| 501 |
+
Replace all uses of ``self`` in the Graph with the Node ``replace_with``.
|
| 502 |
+
|
| 503 |
+
Args:
|
| 504 |
+
|
| 505 |
+
replace_with (Node): The node to replace all uses of ``self`` with.
|
| 506 |
+
delete_user_cb (Callable): Callback that is called to determine
|
| 507 |
+
whether a given user of the self node should be removed.
|
| 508 |
+
propagate_meta (bool): Whether or not to copy all properties
|
| 509 |
+
on the .meta field of the original node onto the replacement node.
|
| 510 |
+
For safety, this is only valid to do if the replacement node
|
| 511 |
+
doesn't already have an existing .meta field.
|
| 512 |
+
|
| 513 |
+
Returns:
|
| 514 |
+
|
| 515 |
+
The list of Nodes on which this change was made.
|
| 516 |
+
"""
|
| 517 |
+
if propagate_meta:
|
| 518 |
+
assert len(replace_with.meta) == 0, \
|
| 519 |
+
'Called node.replace_all_uses_with(replace_with, propagate_meta=True), ' \
|
| 520 |
+
'but replace_with already has .meta keys'
|
| 521 |
+
for k, v in self.meta.items():
|
| 522 |
+
replace_with.meta[k] = v
|
| 523 |
+
to_process = list(self.users)
|
| 524 |
+
skipped = []
|
| 525 |
+
for use_node in to_process:
|
| 526 |
+
if not delete_user_cb(use_node):
|
| 527 |
+
skipped.append(use_node)
|
| 528 |
+
continue
|
| 529 |
+
|
| 530 |
+
def maybe_replace_node(n : Node) -> Node:
|
| 531 |
+
if n == self:
|
| 532 |
+
return replace_with
|
| 533 |
+
else:
|
| 534 |
+
return n
|
| 535 |
+
|
| 536 |
+
new_args = map_arg(use_node.args, maybe_replace_node)
|
| 537 |
+
new_kwargs = map_arg(use_node.kwargs, maybe_replace_node)
|
| 538 |
+
assert isinstance(new_args, tuple)
|
| 539 |
+
assert isinstance(new_kwargs, dict)
|
| 540 |
+
use_node.__update_args_kwargs(new_args, new_kwargs)
|
| 541 |
+
|
| 542 |
+
assert len(self.users) - len(skipped) == 0
|
| 543 |
+
return [n for n in to_process if n not in skipped]
|
| 544 |
+
|
| 545 |
+
@compatibility(is_backward_compatible=False)
|
| 546 |
+
def is_impure(self):
|
| 547 |
+
"""
|
| 548 |
+
Returns whether this op is impure, i.e. if its op is a placeholder or
|
| 549 |
+
output, or if a call_function or call_module which is impure.
|
| 550 |
+
|
| 551 |
+
Returns:
|
| 552 |
+
|
| 553 |
+
bool: If the op is impure or not.
|
| 554 |
+
"""
|
| 555 |
+
if self.op in {"placeholder", "output"}:
|
| 556 |
+
return True
|
| 557 |
+
|
| 558 |
+
# Check if an impure function.
|
| 559 |
+
if self.op == "call_function":
|
| 560 |
+
return self.target in _side_effectful_functions
|
| 561 |
+
|
| 562 |
+
# Check if an impure module.
|
| 563 |
+
if self.op == "call_module":
|
| 564 |
+
assert (
|
| 565 |
+
self.graph.owning_module is not None
|
| 566 |
+
), "self.graph.owning_module not set for purity check"
|
| 567 |
+
target_mod = self.graph.owning_module.get_submodule(self.target)
|
| 568 |
+
assert (
|
| 569 |
+
target_mod is not None
|
| 570 |
+
), f"Did not find expected submodule target {self.target}"
|
| 571 |
+
return getattr(target_mod, "_is_impure", False)
|
| 572 |
+
|
| 573 |
+
return False
|
| 574 |
+
|
| 575 |
+
@compatibility(is_backward_compatible=False)
|
| 576 |
+
def normalized_arguments(
|
| 577 |
+
self, root : torch.nn.Module, arg_types : Optional[Tuple[Any]] = None,
|
| 578 |
+
kwarg_types : Optional[Dict[str, Any]] = None,
|
| 579 |
+
normalize_to_only_use_kwargs : bool = False) -> Optional[ArgsKwargsPair]:
|
| 580 |
+
"""
|
| 581 |
+
Returns normalized arguments to Python targets. This means that
|
| 582 |
+
`args/kwargs` will be matched up to the module/functional's
|
| 583 |
+
signature and return exclusively kwargs in positional order
|
| 584 |
+
if `normalize_to_only_use_kwargs` is true.
|
| 585 |
+
Also populates default values. Does not support positional-only
|
| 586 |
+
parameters or varargs parameters.
|
| 587 |
+
|
| 588 |
+
Supports module calls.
|
| 589 |
+
|
| 590 |
+
May require `arg_types` and `kwarg_types` in order to disambiguate overloads.
|
| 591 |
+
|
| 592 |
+
Args:
|
| 593 |
+
root (torch.nn.Module): Module upon which to resolve module targets.
|
| 594 |
+
arg_types (Optional[Tuple[Any]]): Tuple of arg types for the args
|
| 595 |
+
kwarg_types (Optional[Dict[str, Any]]): Dict of arg types for the kwargs
|
| 596 |
+
normalize_to_only_use_kwargs (bool): Whether to normalize to only use kwargs.
|
| 597 |
+
|
| 598 |
+
Returns:
|
| 599 |
+
|
| 600 |
+
Returns NamedTuple ArgsKwargsPair, or `None` if not successful.
|
| 601 |
+
"""
|
| 602 |
+
if self.op == 'call_function':
|
| 603 |
+
assert callable(self.target)
|
| 604 |
+
return normalize_function(self.target, self.args, self.kwargs, arg_types, kwarg_types) # type: ignore[arg-type]
|
| 605 |
+
elif self.op == 'call_module':
|
| 606 |
+
assert isinstance(self.target, str)
|
| 607 |
+
return normalize_module(root, self.target, self.args, self.kwargs) # type: ignore[arg-type]
|
| 608 |
+
|
| 609 |
+
return None
|
| 610 |
+
|
| 611 |
+
@compatibility(is_backward_compatible=True)
|
| 612 |
+
def replace_input_with(self, old_input: 'Node', new_input: 'Node'):
|
| 613 |
+
"""
|
| 614 |
+
Loop through input nodes of ``self``, and replace all instances of
|
| 615 |
+
``old_input`` with ``new_input``.
|
| 616 |
+
|
| 617 |
+
Args:
|
| 618 |
+
|
| 619 |
+
old_input (Node): The old input node to be replaced.
|
| 620 |
+
new_input (Node): The new input node to replace ``old_input``.
|
| 621 |
+
"""
|
| 622 |
+
def maybe_replace_node(n : Node) -> Node:
|
| 623 |
+
return new_input if n == old_input else n
|
| 624 |
+
|
| 625 |
+
new_args = map_arg(self.args, maybe_replace_node)
|
| 626 |
+
new_kwargs = map_arg(self.kwargs, maybe_replace_node)
|
| 627 |
+
assert isinstance(new_args, tuple)
|
| 628 |
+
assert isinstance(new_kwargs, dict)
|
| 629 |
+
self.__update_args_kwargs(new_args, new_kwargs)
|
| 630 |
+
|
| 631 |
+
|
| 632 |
+
@compatibility(is_backward_compatible=True)
|
| 633 |
+
def map_arg(a: Argument, fn: Callable[[Node], Argument]) -> Argument:
|
| 634 |
+
"""
|
| 635 |
+
Apply fn to each Node appearing arg. arg may be a list, tuple, slice, or dict with string keys.
|
| 636 |
+
"""
|
| 637 |
+
assert callable(fn), "torch.fx.map_arg(a, fn): fn must be a callable"
|
| 638 |
+
return map_aggregate(a, lambda x: fn(x) if isinstance(x, Node) else x)
|
| 639 |
+
|
| 640 |
+
@compatibility(is_backward_compatible=True)
|
| 641 |
+
def map_aggregate(a: Argument, fn: Callable[[Argument], Argument]) -> Argument:
|
| 642 |
+
"""
|
| 643 |
+
Apply fn to each Node appearing arg. arg may be a list, tuple, slice, or dict with string keys.
|
| 644 |
+
"""
|
| 645 |
+
if isinstance(a, tuple):
|
| 646 |
+
t = tuple(map_aggregate(elem, fn) for elem in a)
|
| 647 |
+
# Support NamedTuple (if it has `_fields`) by repacking into original type.
|
| 648 |
+
return t if not hasattr(a, '_fields') else type(a)(*t)
|
| 649 |
+
elif isinstance(a, list):
|
| 650 |
+
return immutable_list(map_aggregate(elem, fn) for elem in a)
|
| 651 |
+
elif isinstance(a, dict):
|
| 652 |
+
return immutable_dict((k, map_aggregate(v, fn)) for k, v in a.items())
|
| 653 |
+
elif isinstance(a, slice):
|
| 654 |
+
return slice(map_aggregate(a.start, fn), map_aggregate(a.stop, fn), map_aggregate(a.step, fn))
|
| 655 |
+
else:
|
| 656 |
+
return fn(a)
|
llava_next/lib/python3.10/site-packages/torch/fx/operator_schemas.py
ADDED
|
@@ -0,0 +1,427 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import inspect
|
| 3 |
+
import numbers
|
| 4 |
+
import types
|
| 5 |
+
import typing
|
| 6 |
+
import enum
|
| 7 |
+
import warnings
|
| 8 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple, NamedTuple, cast, TYPE_CHECKING
|
| 9 |
+
from torch._jit_internal import boolean_dispatched
|
| 10 |
+
from ._compatibility import compatibility
|
| 11 |
+
from torch._ops import OpOverloadPacket, OpOverload
|
| 12 |
+
|
| 13 |
+
if TYPE_CHECKING:
|
| 14 |
+
from .node import Argument
|
| 15 |
+
|
| 16 |
+
__all__ = ["ArgsKwargsPair", "check_for_mutable_operation", "get_signature_for_torch_op", "create_type_hint",
|
| 17 |
+
"type_matches", "normalize_function", "normalize_module"]
|
| 18 |
+
|
| 19 |
+
@compatibility(is_backward_compatible=False)
|
| 20 |
+
class ArgsKwargsPair(NamedTuple):
|
| 21 |
+
"""
|
| 22 |
+
Simple named tuple for wrapping args/kwargs pairs.
|
| 23 |
+
"""
|
| 24 |
+
args: Tuple[Any, ...]
|
| 25 |
+
kwargs: Dict[str, Any]
|
| 26 |
+
|
| 27 |
+
_manual_overrides : Dict[Callable, List[inspect.Signature]] = {}
|
| 28 |
+
|
| 29 |
+
def _nonzero_schemas():
|
| 30 |
+
signatures = []
|
| 31 |
+
|
| 32 |
+
def nonzero(self):
|
| 33 |
+
pass
|
| 34 |
+
signatures.append(inspect.signature(nonzero))
|
| 35 |
+
|
| 36 |
+
def nonzero(self, *, as_tuple : bool): # type: ignore[no-redef]
|
| 37 |
+
pass
|
| 38 |
+
signatures.append(inspect.signature(nonzero))
|
| 39 |
+
|
| 40 |
+
return signatures
|
| 41 |
+
|
| 42 |
+
_manual_overrides[torch.nonzero] = _nonzero_schemas()
|
| 43 |
+
|
| 44 |
+
class _FakeGlobalNamespace:
|
| 45 |
+
def __getattr__(self, name):
|
| 46 |
+
if name == 'torch':
|
| 47 |
+
return torch
|
| 48 |
+
raise RuntimeError('Expected a torch namespace lookup')
|
| 49 |
+
|
| 50 |
+
_type_eval_globals = {'Tensor' : torch.Tensor, 'Device' : torch.device, 'Layout' : torch.layout,
|
| 51 |
+
'number' : numbers.Number, 'Future' : torch.jit.Future,
|
| 52 |
+
'AnyEnumType' : enum.Enum, 'QScheme' : torch.qscheme,
|
| 53 |
+
'__torch__': _FakeGlobalNamespace(), 'NoneType': type(None),
|
| 54 |
+
't': typing.TypeVar('t')}
|
| 55 |
+
for k in dir(typing):
|
| 56 |
+
_type_eval_globals[k] = getattr(typing, k)
|
| 57 |
+
|
| 58 |
+
def _torchscript_type_to_python_type(ts_type : 'torch._C.JitType') -> Any:
|
| 59 |
+
"""
|
| 60 |
+
Convert a TorchScript type to a Python type (including subtypes) via
|
| 61 |
+
eval'ing the annotation_str. _type_eval_globals sets up expressions
|
| 62 |
+
like "List" and "Future" to map to actual types (typing.List and jit.Future)
|
| 63 |
+
"""
|
| 64 |
+
return eval(ts_type.annotation_str, _type_eval_globals)
|
| 65 |
+
|
| 66 |
+
def _torchscript_schema_to_signature(ts_schema : torch._C.FunctionSchema) -> inspect.Signature:
|
| 67 |
+
from inspect import Parameter
|
| 68 |
+
parameters : List[Parameter] = []
|
| 69 |
+
for arg in ts_schema.arguments:
|
| 70 |
+
arg_type = _torchscript_type_to_python_type(arg.type)
|
| 71 |
+
default = arg.default_value if arg.has_default_value() else Parameter.empty
|
| 72 |
+
# TODO: Figure out if this is safe. It seems like when generating the type signatures for
|
| 73 |
+
# PythonArgParser, we emit signatures with `input` instead of `self` as the first tensor
|
| 74 |
+
# argument name. Downstream, if someone converts that positional argument to a keyword
|
| 75 |
+
# argument, the name mismatch will break things, so here we're going to normalize the
|
| 76 |
+
# name to "input"
|
| 77 |
+
name = arg.name if arg.name != 'self' else 'input'
|
| 78 |
+
kind = Parameter.KEYWORD_ONLY if arg.kwarg_only else Parameter.POSITIONAL_OR_KEYWORD
|
| 79 |
+
# "from" is a keyword therefore it must be a POSITIONAL_ONLY argument
|
| 80 |
+
if name == "from":
|
| 81 |
+
assert kind == Parameter.POSITIONAL_OR_KEYWORD
|
| 82 |
+
# ParameterKind type is internal implementation detail to inspec package
|
| 83 |
+
# which makes it hard to do type annotation
|
| 84 |
+
kind = Parameter.POSITIONAL_ONLY # type: ignore[assignment]
|
| 85 |
+
# This renders all previous arguments to positional only
|
| 86 |
+
for idx, p in enumerate(parameters):
|
| 87 |
+
assert p.kind == Parameter.POSITIONAL_OR_KEYWORD
|
| 88 |
+
parameters[idx] = Parameter(name=p.name, kind=Parameter.POSITIONAL_ONLY, default=p.default, annotation=p.annotation)
|
| 89 |
+
parameters.append(Parameter(name=name, kind=kind, default=default, annotation=arg_type))
|
| 90 |
+
return_types = [_torchscript_type_to_python_type(ret.type) for ret in ts_schema.returns]
|
| 91 |
+
if len(return_types) == 0:
|
| 92 |
+
return_type = None
|
| 93 |
+
elif len(return_types) == 1:
|
| 94 |
+
return_type = return_types[0]
|
| 95 |
+
else:
|
| 96 |
+
return_type = tuple(return_types)
|
| 97 |
+
|
| 98 |
+
return inspect.Signature(parameters, return_annotation=return_type)
|
| 99 |
+
|
| 100 |
+
@compatibility(is_backward_compatible=False)
|
| 101 |
+
def check_for_mutable_operation(target : Callable, args : Tuple['Argument', ...], kwargs : Dict[str, 'Argument']):
|
| 102 |
+
signatures, schemas = get_signature_for_torch_op(target, return_schemas=True)
|
| 103 |
+
|
| 104 |
+
if signatures and schemas:
|
| 105 |
+
matched_schemas = []
|
| 106 |
+
|
| 107 |
+
# Iterate through all of the schema until we find one that matches
|
| 108 |
+
# If one matches, populate `new_args_and_kwargs` with the new args/kwargs
|
| 109 |
+
# values. If none matches, `new_args_and_kwargs` will be None
|
| 110 |
+
for candidate_signature, schema in zip(signatures, schemas):
|
| 111 |
+
try:
|
| 112 |
+
candidate_signature.bind(*args, **kwargs)
|
| 113 |
+
matched_schemas.append((candidate_signature, schema))
|
| 114 |
+
except TypeError as e:
|
| 115 |
+
continue
|
| 116 |
+
|
| 117 |
+
def throw_if_mutable(schema):
|
| 118 |
+
if schema.is_mutable:
|
| 119 |
+
raise RuntimeError(f'Tried to trace mutable operation {schema}. FX only supports functional '
|
| 120 |
+
f'code, so operations that mutate operands in-place (e.g. via `out` arguments) '
|
| 121 |
+
f'are not supported')
|
| 122 |
+
|
| 123 |
+
if len(matched_schemas) == 0:
|
| 124 |
+
# Did not match any schema. Cannot check for mutation
|
| 125 |
+
pass
|
| 126 |
+
elif len(matched_schemas) == 1:
|
| 127 |
+
# Matched exactly one schema, unambiguous
|
| 128 |
+
_, schema_to_check = matched_schemas[0]
|
| 129 |
+
throw_if_mutable(schema_to_check)
|
| 130 |
+
pass
|
| 131 |
+
else:
|
| 132 |
+
# Ambiguous schema match. Since mutability checking is best effort,
|
| 133 |
+
# do nothing.
|
| 134 |
+
pass
|
| 135 |
+
|
| 136 |
+
@compatibility(is_backward_compatible=False)
|
| 137 |
+
def get_signature_for_torch_op(op : Callable, return_schemas : bool = False):
|
| 138 |
+
"""
|
| 139 |
+
Given an operator on the `torch` namespace, return a list of `inspect.Signature`
|
| 140 |
+
objects corresponding to the overloads of that op.. May return `None` if a signature
|
| 141 |
+
could not be retrieved.
|
| 142 |
+
|
| 143 |
+
Args:
|
| 144 |
+
op (Callable): An operator on the `torch` namespace to look up a signature for
|
| 145 |
+
|
| 146 |
+
Returns:
|
| 147 |
+
Optional[List[inspect.Signature]]: A list of signatures for the overloads of this
|
| 148 |
+
operator, or None if the operator signatures could not be retrieved. If
|
| 149 |
+
return_schemas=True, returns a tuple containing the optional Python signatures
|
| 150 |
+
and the optional TorchScript Function signature
|
| 151 |
+
"""
|
| 152 |
+
if isinstance(op, OpOverload):
|
| 153 |
+
schemas = [op._schema]
|
| 154 |
+
elif isinstance(op, OpOverloadPacket):
|
| 155 |
+
schemas = [getattr(op, overload)._schema for overload in op.overloads()]
|
| 156 |
+
else:
|
| 157 |
+
override = _manual_overrides.get(op)
|
| 158 |
+
if override:
|
| 159 |
+
return (override, None) if return_schemas else None
|
| 160 |
+
|
| 161 |
+
aten_fn = torch.jit._builtins._find_builtin(op)
|
| 162 |
+
|
| 163 |
+
if aten_fn is None:
|
| 164 |
+
return (None, None) if return_schemas else None
|
| 165 |
+
schemas = torch._C._jit_get_schemas_for_operator(aten_fn)
|
| 166 |
+
|
| 167 |
+
signatures = [_torchscript_schema_to_signature(schema) for schema in schemas]
|
| 168 |
+
return (signatures, schemas) if return_schemas else signatures
|
| 169 |
+
|
| 170 |
+
@compatibility(is_backward_compatible=False)
|
| 171 |
+
def create_type_hint(x):
|
| 172 |
+
try:
|
| 173 |
+
if isinstance(x, (list, tuple)):
|
| 174 |
+
# todo(chilli): Figure out the right way for mypy to handle this
|
| 175 |
+
if isinstance(x, list):
|
| 176 |
+
def ret_type(x):
|
| 177 |
+
return List[x] # type: ignore[valid-type]
|
| 178 |
+
else:
|
| 179 |
+
def ret_type(x):
|
| 180 |
+
return Tuple[x, ...]
|
| 181 |
+
if len(x) == 0:
|
| 182 |
+
return ret_type(Any)
|
| 183 |
+
base_type = x[0]
|
| 184 |
+
for t in x:
|
| 185 |
+
if issubclass(t, base_type):
|
| 186 |
+
continue
|
| 187 |
+
elif issubclass(base_type, t):
|
| 188 |
+
base_type = t
|
| 189 |
+
else:
|
| 190 |
+
return ret_type(Any)
|
| 191 |
+
return ret_type(base_type)
|
| 192 |
+
except Exception as e:
|
| 193 |
+
# We tried to create a type hint for list but failed.
|
| 194 |
+
warnings.warn(f"We were not able to successfully create type hint from the type {x}")
|
| 195 |
+
pass
|
| 196 |
+
return x
|
| 197 |
+
|
| 198 |
+
@compatibility(is_backward_compatible=False)
|
| 199 |
+
def type_matches(signature_type : Any, argument_type : Any):
|
| 200 |
+
sig_origin_type = getattr(signature_type, '__origin__', signature_type)
|
| 201 |
+
|
| 202 |
+
if signature_type is argument_type:
|
| 203 |
+
return True
|
| 204 |
+
|
| 205 |
+
# Union types in signature. Given type needs to match one of the
|
| 206 |
+
# contained types in the Union
|
| 207 |
+
if sig_origin_type is typing.Union and signature_type != argument_type:
|
| 208 |
+
sig_contained = signature_type.__args__
|
| 209 |
+
return any(type_matches(c, argument_type) for c in sig_contained)
|
| 210 |
+
|
| 211 |
+
if signature_type is List[int] and argument_type is int:
|
| 212 |
+
# int can be promoted to List[int]
|
| 213 |
+
return True
|
| 214 |
+
|
| 215 |
+
if getattr(signature_type, '__origin__', None) in {list, List}:
|
| 216 |
+
sig_el_type = signature_type.__args__[0]
|
| 217 |
+
if not inspect.isclass(sig_el_type):
|
| 218 |
+
warnings.warn(
|
| 219 |
+
f"Does not support nested parametric types, got {signature_type}. Please file a bug.")
|
| 220 |
+
return False
|
| 221 |
+
if getattr(argument_type, '__origin__', None) in {list, List}:
|
| 222 |
+
return issubclass(argument_type.__args__[0], sig_el_type)
|
| 223 |
+
|
| 224 |
+
def is_homogeneous_tuple(t):
|
| 225 |
+
if getattr(t, "__origin__", None) not in {tuple, Tuple}:
|
| 226 |
+
return False
|
| 227 |
+
contained = t.__args__
|
| 228 |
+
if t.__args__ == ((),): # Tuple[()].__args__ == ((),) for some reason
|
| 229 |
+
return True
|
| 230 |
+
return all((c is Ellipsis) or issubclass(c, sig_el_type) for c in contained)
|
| 231 |
+
|
| 232 |
+
# Tuple[T] is accepted for List[T] parameters
|
| 233 |
+
return is_homogeneous_tuple(argument_type)
|
| 234 |
+
|
| 235 |
+
# Dtype is an int in schemas
|
| 236 |
+
if signature_type is int and argument_type is torch.dtype:
|
| 237 |
+
return True
|
| 238 |
+
|
| 239 |
+
if signature_type is numbers.Number and argument_type in {int, float}:
|
| 240 |
+
return True
|
| 241 |
+
if inspect.isclass(argument_type) and inspect.isclass(signature_type):
|
| 242 |
+
return issubclass(argument_type, signature_type)
|
| 243 |
+
|
| 244 |
+
return False
|
| 245 |
+
|
| 246 |
+
@compatibility(is_backward_compatible=False)
|
| 247 |
+
def normalize_function(
|
| 248 |
+
target: Callable, args: Tuple[Any], kwargs : Optional[Dict[str, Any]] = None, arg_types : Optional[Tuple[Any]] = None,
|
| 249 |
+
kwarg_types : Optional[Dict[str, Any]] = None,
|
| 250 |
+
normalize_to_only_use_kwargs : bool = False) -> Optional[ArgsKwargsPair]:
|
| 251 |
+
"""
|
| 252 |
+
Returns normalized arguments to PyTorch functions. This means that
|
| 253 |
+
`args/kwargs` will be matched up to the functional's
|
| 254 |
+
signature and return exclusively kwargs in positional order if
|
| 255 |
+
`normalize_to_only_use_kwargs` is True.
|
| 256 |
+
Also populates default values. Does not support positional-only
|
| 257 |
+
parameters or varargs parameters (*args, **kwargs). Does not support modules.
|
| 258 |
+
|
| 259 |
+
May require `arg_types` and `kwarg_types` in order to disambiguate overloads.
|
| 260 |
+
|
| 261 |
+
Args:
|
| 262 |
+
target (Callable): Function that we are normalizing
|
| 263 |
+
args (Tuple[Any]): Tuple of args to the function
|
| 264 |
+
kwargs (Optional[Dict[str, Any]]): Dict of kwargs to the function
|
| 265 |
+
arg_types (Optional[Tuple[Any]]): Tuple of arg types for the args
|
| 266 |
+
kwarg_types (Optional[Dict[str, Any]]): Dict of arg types for the kwargs
|
| 267 |
+
normalize_to_only_use_kwargs (bool): Whether to normalize to only use kwargs.
|
| 268 |
+
|
| 269 |
+
Returns:
|
| 270 |
+
|
| 271 |
+
Returns normalized_args_and_kwargs, or `None` if not successful.
|
| 272 |
+
"""
|
| 273 |
+
if kwargs is None:
|
| 274 |
+
kwargs = {}
|
| 275 |
+
new_args_and_kwargs = None
|
| 276 |
+
if not isinstance(target, types.BuiltinFunctionType) and not (
|
| 277 |
+
isinstance(target, (OpOverloadPacket, OpOverload))
|
| 278 |
+
):
|
| 279 |
+
target_for_analysis = target
|
| 280 |
+
if target in boolean_dispatched:
|
| 281 |
+
# HACK: `boolean_dispatch` as used in `torch.nn.functional` makes it so that we have
|
| 282 |
+
# a 2-way dispatch based on a boolean value. Here we check that the `true` and `false`
|
| 283 |
+
# branches of the dispatch have exactly the same signature. If they do, use the `true`
|
| 284 |
+
# branch signature for analysis. Otherwise, leave this un-normalized
|
| 285 |
+
assert not isinstance(target, str)
|
| 286 |
+
dispatched = boolean_dispatched[target]
|
| 287 |
+
if_true, if_false = dispatched['if_true'], dispatched['if_false']
|
| 288 |
+
if inspect.signature(if_true).parameters != inspect.signature(if_false).parameters:
|
| 289 |
+
return None
|
| 290 |
+
target_for_analysis = if_true
|
| 291 |
+
|
| 292 |
+
assert callable(target_for_analysis)
|
| 293 |
+
sig = inspect.signature(inspect.unwrap(target_for_analysis))
|
| 294 |
+
new_args_and_kwargs = _args_kwargs_to_normalized_args_kwargs(sig, args, kwargs, normalize_to_only_use_kwargs)
|
| 295 |
+
else:
|
| 296 |
+
assert callable(target)
|
| 297 |
+
torch_op_schemas = get_signature_for_torch_op(target)
|
| 298 |
+
matched_schemas = []
|
| 299 |
+
if torch_op_schemas:
|
| 300 |
+
# Iterate through all of the schema until we find one that matches
|
| 301 |
+
# If one matches, populate `new_args_and_kwargs` with the new args/kwargs
|
| 302 |
+
# values. If none matches, `new_args_and_kwargs` will be None
|
| 303 |
+
for candidate_signature in torch_op_schemas:
|
| 304 |
+
try:
|
| 305 |
+
candidate_signature.bind(*args, **kwargs)
|
| 306 |
+
matched_schemas.append(candidate_signature)
|
| 307 |
+
except TypeError as e:
|
| 308 |
+
continue
|
| 309 |
+
|
| 310 |
+
if len(matched_schemas) == 0:
|
| 311 |
+
# Did not match any schema. Cannot normalize
|
| 312 |
+
pass
|
| 313 |
+
elif len(matched_schemas) == 1:
|
| 314 |
+
# Matched exactly one schema, unambiguous
|
| 315 |
+
new_args_and_kwargs = _args_kwargs_to_normalized_args_kwargs(matched_schemas[0], args, kwargs,
|
| 316 |
+
normalize_to_only_use_kwargs)
|
| 317 |
+
else:
|
| 318 |
+
if arg_types is not None or kwarg_types is not None:
|
| 319 |
+
arg_types = arg_types if arg_types else cast(Tuple[Any], ())
|
| 320 |
+
kwarg_types = kwarg_types if kwarg_types else {}
|
| 321 |
+
for candidate_signature in torch_op_schemas:
|
| 322 |
+
sig_matches = True
|
| 323 |
+
try:
|
| 324 |
+
bound_types = candidate_signature.bind(*arg_types, **kwarg_types)
|
| 325 |
+
for arg_name, arg_type in bound_types.arguments.items():
|
| 326 |
+
param = candidate_signature.parameters[arg_name]
|
| 327 |
+
sig_matches = sig_matches and type_matches(param.annotation, arg_type)
|
| 328 |
+
except TypeError as e:
|
| 329 |
+
sig_matches = False
|
| 330 |
+
if sig_matches:
|
| 331 |
+
new_args_and_kwargs = _args_kwargs_to_normalized_args_kwargs(candidate_signature, args, kwargs,
|
| 332 |
+
normalize_to_only_use_kwargs)
|
| 333 |
+
break
|
| 334 |
+
else:
|
| 335 |
+
# Matched more than one schema. In this situation, the caller must provide the types of
|
| 336 |
+
# the arguments of the overload they expect.
|
| 337 |
+
schema_printouts = '\n'.join(str(schema) for schema in matched_schemas)
|
| 338 |
+
raise RuntimeError(f'Tried to normalize arguments to {torch.typename(target)} but '
|
| 339 |
+
f'the schema match was ambiguous! Please provide argument types to '
|
| 340 |
+
f'the normalize_arguments() call. Available schemas:\n{schema_printouts}')
|
| 341 |
+
|
| 342 |
+
return new_args_and_kwargs
|
| 343 |
+
|
| 344 |
+
@compatibility(is_backward_compatible=False)
|
| 345 |
+
def normalize_module(
|
| 346 |
+
root: torch.nn.Module, target: str, args: Tuple[Any], kwargs : Optional[Dict[str, Any]] = None,
|
| 347 |
+
normalize_to_only_use_kwargs : bool = False) -> Optional[ArgsKwargsPair]:
|
| 348 |
+
"""
|
| 349 |
+
Returns normalized arguments to PyTorch modules. This means that
|
| 350 |
+
`args/kwargs` will be matched up to the functional's
|
| 351 |
+
signature and return exclusively kwargs in positional order if
|
| 352 |
+
`normalize_to_only_use_kwargs` is True.
|
| 353 |
+
Also populates default values. Does not support positional-only
|
| 354 |
+
parameters or varargs parameters (*args, **kwargs).
|
| 355 |
+
|
| 356 |
+
Args:
|
| 357 |
+
root (nn.Module): root module upon which we query modules
|
| 358 |
+
target (Callable): Function that we are normalizing
|
| 359 |
+
args (Tuple[Any]): Tuple of args to the function
|
| 360 |
+
kwargs (Optional[Dict[str, Any]]): Dict of kwargs to the function
|
| 361 |
+
normalize_to_only_use_kwargs (bool): Whether to normalize to only use kwargs.
|
| 362 |
+
|
| 363 |
+
Returns:
|
| 364 |
+
|
| 365 |
+
Returns normalized_args_and_kwargs, or `None` if not successful.
|
| 366 |
+
"""
|
| 367 |
+
try:
|
| 368 |
+
submod = root.get_submodule(target)
|
| 369 |
+
except AttributeError as e:
|
| 370 |
+
raise RuntimeError(f"Tried to normalize node with target {target} but root did not "
|
| 371 |
+
f"have that target!") from e
|
| 372 |
+
if hasattr(submod.__class__, '__name__'):
|
| 373 |
+
classname = submod.__class__.__name__
|
| 374 |
+
if getattr(torch.nn, classname, None) == submod.__class__:
|
| 375 |
+
sig = inspect.signature(inspect.unwrap(submod.forward))
|
| 376 |
+
if kwargs is None:
|
| 377 |
+
kwargs = {}
|
| 378 |
+
new_args_and_kwargs = _args_kwargs_to_normalized_args_kwargs(sig, args, kwargs,
|
| 379 |
+
normalize_to_only_use_kwargs)
|
| 380 |
+
return new_args_and_kwargs
|
| 381 |
+
return None
|
| 382 |
+
|
| 383 |
+
def _args_kwargs_to_normalized_args_kwargs(sig : inspect.Signature, args : Tuple[Any, ...],
|
| 384 |
+
kwargs : Dict[str, Any],
|
| 385 |
+
normalize_to_only_use_kwargs : bool) -> Optional[ArgsKwargsPair]:
|
| 386 |
+
"""
|
| 387 |
+
Given a call target, args, and kwargs, return the arguments normalized into
|
| 388 |
+
an ArgsKwargsPair, or None if the type signature is not supported by
|
| 389 |
+
this normalization.
|
| 390 |
+
|
| 391 |
+
Args:
|
| 392 |
+
|
| 393 |
+
sig (inspect.Signature): Signature object for the target
|
| 394 |
+
args (Tuple): Arguments that appear at the callsite for `target`
|
| 395 |
+
kwargs (Dict): Keyword arguments that appear at the callsite for `target`
|
| 396 |
+
normalize_to_only_use_kwargs (bool): Whether to normalize to only use kwargs.
|
| 397 |
+
|
| 398 |
+
Returns:
|
| 399 |
+
|
| 400 |
+
Optional[ArgsKwargsPair]: Normalized args and kwargs for `target`, or `None` if
|
| 401 |
+
this target is not supported.
|
| 402 |
+
"""
|
| 403 |
+
|
| 404 |
+
# Don't currently support positional-only
|
| 405 |
+
# or varargs (*args, **kwargs) signatures
|
| 406 |
+
supported_parameter_types = {
|
| 407 |
+
inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY}
|
| 408 |
+
if any(p.kind not in supported_parameter_types for p in sig.parameters.values()):
|
| 409 |
+
# Add an exception for one signature, which is common for random/uniform, i.e.:
|
| 410 |
+
# Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None
|
| 411 |
+
# `from` is Python keyword and as such functions with that signature should have
|
| 412 |
+
# positional-only args, but at the same time they could be dispatched as kwargs
|
| 413 |
+
if list(sig.parameters.keys()) != ['input', 'from', 'to', 'generator']:
|
| 414 |
+
return None
|
| 415 |
+
|
| 416 |
+
bound_args = sig.bind(*args, **kwargs)
|
| 417 |
+
bound_args.apply_defaults()
|
| 418 |
+
|
| 419 |
+
new_kwargs : Dict[str, Any] = {}
|
| 420 |
+
new_args : List[Any] = []
|
| 421 |
+
for i, param in enumerate(sig.parameters):
|
| 422 |
+
if not normalize_to_only_use_kwargs and i < len(args):
|
| 423 |
+
new_args.append(bound_args.arguments[param])
|
| 424 |
+
else:
|
| 425 |
+
new_kwargs[param] = bound_args.arguments[param]
|
| 426 |
+
|
| 427 |
+
return ArgsKwargsPair(tuple(new_args), new_kwargs)
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/__init__.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from . import graph_drawer
|
| 2 |
+
from . import graph_manipulation
|
| 3 |
+
from . import net_min_base
|
| 4 |
+
from . import operator_support
|
| 5 |
+
from . import param_fetch
|
| 6 |
+
from . import reinplace
|
| 7 |
+
from . import shape_prop
|
| 8 |
+
from . import split_module
|
| 9 |
+
from . import split_utils
|
| 10 |
+
from . import splitter_base
|
| 11 |
+
from . import tools_common
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/graph_manipulation.py
ADDED
|
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Dict, List, NamedTuple, Optional
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
from torch.fx._compatibility import compatibility
|
| 5 |
+
from torch.fx.graph import Graph
|
| 6 |
+
from torch.fx.graph_module import GraphModule
|
| 7 |
+
from torch.fx.node import (
|
| 8 |
+
map_arg,
|
| 9 |
+
Node,
|
| 10 |
+
Target,
|
| 11 |
+
)
|
| 12 |
+
from torch.fx.passes.shape_prop import ShapeProp
|
| 13 |
+
|
| 14 |
+
__all__ = ['replace_target_nodes_with', 'size_bytes', 'get_size_of_all_nodes', 'get_tensor_meta',
|
| 15 |
+
'get_size_of_node']
|
| 16 |
+
|
| 17 |
+
@compatibility(is_backward_compatible=False)
|
| 18 |
+
def replace_target_nodes_with(
|
| 19 |
+
fx_module: GraphModule,
|
| 20 |
+
old_op: str,
|
| 21 |
+
old_target: Target,
|
| 22 |
+
new_op: str,
|
| 23 |
+
new_target: Target,
|
| 24 |
+
):
|
| 25 |
+
"""Modifies all nodes in fx_module.graph.nodes which match the specified op code and target,
|
| 26 |
+
and updates them to match the new op code and target"""
|
| 27 |
+
new_graph = Graph()
|
| 28 |
+
val_map: Dict[Node, Node] = {}
|
| 29 |
+
for node in fx_module.graph.nodes:
|
| 30 |
+
if node.op == old_op and node.target == old_target:
|
| 31 |
+
args = map_arg(node.args, lambda n: val_map[n])
|
| 32 |
+
kwargs = map_arg(node.kwargs, lambda n: val_map[n])
|
| 33 |
+
assert isinstance(args, tuple)
|
| 34 |
+
assert isinstance(kwargs, dict)
|
| 35 |
+
val_map[node] = new_graph.create_node(
|
| 36 |
+
new_op, new_target, args, kwargs, node.name
|
| 37 |
+
)
|
| 38 |
+
else:
|
| 39 |
+
val_map[node] = new_graph.node_copy(node, lambda n: val_map[n])
|
| 40 |
+
fx_module.graph = new_graph
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
@compatibility(is_backward_compatible=False)
|
| 44 |
+
class size_bytes(NamedTuple):
|
| 45 |
+
output_size: int
|
| 46 |
+
total_size: int
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
@compatibility(is_backward_compatible=False)
|
| 50 |
+
def get_size_of_all_nodes(
|
| 51 |
+
fx_module: GraphModule, args: Optional[List[torch.Tensor]] = None
|
| 52 |
+
) -> None:
|
| 53 |
+
"""Given a fx graph module, update each node with its total size (weights + bias + output)
|
| 54 |
+
and its output_size(output). For a non-module node, the total size is the output size.
|
| 55 |
+
return total size"""
|
| 56 |
+
if args is not None:
|
| 57 |
+
# Mark shape and dtype for each node (node.shape and node.dtype)
|
| 58 |
+
ShapeProp(fx_module).propagate(*args)
|
| 59 |
+
# Calculate the total size of the whole fx graph
|
| 60 |
+
total_size_of_graph = 0.0
|
| 61 |
+
for node in fx_module.graph.nodes:
|
| 62 |
+
if node.op == "output":
|
| 63 |
+
break
|
| 64 |
+
node.size_bytes = get_size_of_node(fx_module, node)
|
| 65 |
+
return
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
@compatibility(is_backward_compatible=False)
|
| 69 |
+
def get_tensor_meta(node: Node) -> Any:
|
| 70 |
+
tensor_meta = node.meta.get("tensor_meta")
|
| 71 |
+
|
| 72 |
+
if not tensor_meta:
|
| 73 |
+
raise RuntimeError(
|
| 74 |
+
f"Node {node} has no tensor metadata associated with it! "
|
| 75 |
+
f"Check that shape propagation has run."
|
| 76 |
+
)
|
| 77 |
+
|
| 78 |
+
return tensor_meta
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
@compatibility(is_backward_compatible=False)
|
| 82 |
+
def get_size_of_node(fx_module: GraphModule, node: Node) -> size_bytes:
|
| 83 |
+
"""Given a node with node.dtype and node.shape, return its total size and its output size.
|
| 84 |
+
total_size = weights + bias + output_size
|
| 85 |
+
"""
|
| 86 |
+
# Total num of elements
|
| 87 |
+
total_num_of_elems = 0
|
| 88 |
+
# For a module, conside all parameters
|
| 89 |
+
if node.op == "call_module":
|
| 90 |
+
submodule_dict = dict(fx_module.named_modules())
|
| 91 |
+
submodule = submodule_dict[node.target]
|
| 92 |
+
parameters = submodule.named_parameters()
|
| 93 |
+
# Parameters are named tuples
|
| 94 |
+
for name, p in parameters:
|
| 95 |
+
total_num_of_elems += p.numel()
|
| 96 |
+
# Don't forget the output size
|
| 97 |
+
# node.shape is the shape of this node's output
|
| 98 |
+
tensor_meta = get_tensor_meta(node)
|
| 99 |
+
output_elem = tensor_meta.shape.numel()
|
| 100 |
+
total_num_of_elems += output_elem
|
| 101 |
+
# Assume for now if it's quantized then it's qint8 or quint8
|
| 102 |
+
if tensor_meta.is_quantized:
|
| 103 |
+
size_per_elem_bytes = torch._empty_affine_quantized(
|
| 104 |
+
[], dtype=tensor_meta.dtype
|
| 105 |
+
).element_size()
|
| 106 |
+
else:
|
| 107 |
+
size_per_elem_bytes = torch.tensor([], dtype=tensor_meta.dtype).element_size()
|
| 108 |
+
total_size = size_per_elem_bytes * total_num_of_elems
|
| 109 |
+
output_size = size_per_elem_bytes * output_elem
|
| 110 |
+
return size_bytes(output_size, total_size)
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/net_min_base.py
ADDED
|
@@ -0,0 +1,618 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
from dataclasses import dataclass
|
| 3 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
import torch.fx
|
| 7 |
+
from torch.fx._compatibility import compatibility
|
| 8 |
+
from torch.fx.node import map_arg
|
| 9 |
+
|
| 10 |
+
from .shape_prop import ShapeProp
|
| 11 |
+
from .split_utils import split_by_tags
|
| 12 |
+
from .tools_common import (
|
| 13 |
+
CALLABLE_NODE_OPS,
|
| 14 |
+
FxNetAccFusionsFinder,
|
| 15 |
+
Names,
|
| 16 |
+
NodeList,
|
| 17 |
+
NodeSet,
|
| 18 |
+
TensorOrTensors,
|
| 19 |
+
Tensors,
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
__all__ = [
|
| 23 |
+
"FxNetMinimizerBadModuleError",
|
| 24 |
+
"FxNetMinimizerRunFuncError",
|
| 25 |
+
"FxNetMinimizerResultMismatchError",
|
| 26 |
+
]
|
| 27 |
+
|
| 28 |
+
_LOGGER = logging.getLogger(__name__)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
@compatibility(is_backward_compatible=False)
|
| 32 |
+
class FxNetMinimizerBadModuleError(Exception):
|
| 33 |
+
"""
|
| 34 |
+
Raised if failed to split out a minimize module
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
pass
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
@compatibility(is_backward_compatible=False)
|
| 41 |
+
class FxNetMinimizerRunFuncError(Exception):
|
| 42 |
+
"""
|
| 43 |
+
Raised if error occurs during run_a or run_b functions
|
| 44 |
+
"""
|
| 45 |
+
|
| 46 |
+
pass
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
@compatibility(is_backward_compatible=False)
|
| 50 |
+
class FxNetMinimizerResultMismatchError(Exception):
|
| 51 |
+
"""
|
| 52 |
+
Raised if comparing function thinks the results are mismatching.
|
| 53 |
+
"""
|
| 54 |
+
|
| 55 |
+
pass
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
@dataclass
|
| 59 |
+
class _MinimizerSettingBase:
|
| 60 |
+
"""
|
| 61 |
+
Args:
|
| 62 |
+
`accumulate_error`: Instead of using a's input for both converted module to verify
|
| 63 |
+
, use the previous outputs of each converted module as input to accumulate the
|
| 64 |
+
errors.
|
| 65 |
+
|
| 66 |
+
`traverse_method`: "sequential" or "binary" or "accumulate"
|
| 67 |
+
Determine the way of traverse the nodes in FX module.
|
| 68 |
+
|
| 69 |
+
`find_all`: Minimizer will go through the entire model and return all problematic nodes.
|
| 70 |
+
|
| 71 |
+
`return_intermediate`: If true, when using `run_nodes()` function to run the
|
| 72 |
+
model, intermediate results of all the ops will be returned as output.
|
| 73 |
+
"""
|
| 74 |
+
|
| 75 |
+
accumulate_error: bool = False
|
| 76 |
+
traverse_method: str = "sequential"
|
| 77 |
+
find_all: bool = False
|
| 78 |
+
return_intermediate: bool = False
|
| 79 |
+
|
| 80 |
+
def __str__(self):
|
| 81 |
+
settings_str = "FX Minimizer Settings:\n"
|
| 82 |
+
|
| 83 |
+
for k, v in vars(self).items():
|
| 84 |
+
settings_str += f"\t{k}: {v}\n"
|
| 85 |
+
|
| 86 |
+
return settings_str
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
class _MinimizerBase:
|
| 90 |
+
"""
|
| 91 |
+
This class is used to automatically find problematic nodes in a model. It takes a FX
|
| 92 |
+
graphmodule and generate some submodules while traverse the graph. Then two functions
|
| 93 |
+
`run_a` and `run_b` will be used to run the same submodule and a function `compare_fn`
|
| 94 |
+
will be used to compare the results.
|
| 95 |
+
|
| 96 |
+
Currently we provides two ways to traverse the graph and generate submodules.
|
| 97 |
+
1. Sequential traversal: this will traverse the graph node by node and generate
|
| 98 |
+
one submodule with one sigle node.
|
| 99 |
+
2. Binary searching: this will do a binary search style traversal on the graph.
|
| 100 |
+
|
| 101 |
+
For internal Users, a guide can be found here https://fb.quip.com/HDtuAgiKGfkP.
|
| 102 |
+
"""
|
| 103 |
+
|
| 104 |
+
def __init__(
|
| 105 |
+
self,
|
| 106 |
+
module: torch.fx.GraphModule,
|
| 107 |
+
sample_input: Tensors,
|
| 108 |
+
compare_fn: Callable[
|
| 109 |
+
[TensorOrTensors, TensorOrTensors, Names], Tuple[float, bool]
|
| 110 |
+
],
|
| 111 |
+
settings: _MinimizerSettingBase,
|
| 112 |
+
):
|
| 113 |
+
assert isinstance(module, torch.fx.GraphModule)
|
| 114 |
+
|
| 115 |
+
self.module = module
|
| 116 |
+
self.sample_input = sample_input
|
| 117 |
+
self.compare_fn = compare_fn
|
| 118 |
+
self.settings = settings
|
| 119 |
+
|
| 120 |
+
# Stores outputs of run_a function
|
| 121 |
+
self.a_outputs: Dict[str, Any] = {}
|
| 122 |
+
|
| 123 |
+
# Stores outputs of run_b function
|
| 124 |
+
self.b_outputs: Dict[str, Any] = {}
|
| 125 |
+
|
| 126 |
+
# Stores the results of compare_fn
|
| 127 |
+
self.results: Dict[Any, Any] = {}
|
| 128 |
+
|
| 129 |
+
# Stores the report for the runs
|
| 130 |
+
self.reports: List[List[str]] = []
|
| 131 |
+
|
| 132 |
+
# Current iteration
|
| 133 |
+
self.iteration: int = 0
|
| 134 |
+
|
| 135 |
+
callable_nodes = {
|
| 136 |
+
node for node in self.module.graph.nodes if node.op in CALLABLE_NODE_OPS
|
| 137 |
+
}
|
| 138 |
+
ShapeProp(self.module).propagate(*self.sample_input)
|
| 139 |
+
self.fusions = FxNetAccFusionsFinder(self.module, callable_nodes)()
|
| 140 |
+
|
| 141 |
+
# Check if number of input in sample_input matches the number of placeholders
|
| 142 |
+
placeholders = [
|
| 143 |
+
node.name for node in self.module.graph.nodes if node.op == "placeholder"
|
| 144 |
+
]
|
| 145 |
+
assert len(placeholders) == len(self.sample_input)
|
| 146 |
+
|
| 147 |
+
# Store sample_input
|
| 148 |
+
for i, name in enumerate(placeholders):
|
| 149 |
+
self.a_outputs[name] = sample_input[i]
|
| 150 |
+
self.b_outputs[name] = sample_input[i]
|
| 151 |
+
|
| 152 |
+
def run_a(self, mod: torch.fx.GraphModule, inputs: Tensors) -> TensorOrTensors:
|
| 153 |
+
"""
|
| 154 |
+
Run `mod` with `inputs` and generate output. The output will be compared with
|
| 155 |
+
output of run_b().
|
| 156 |
+
"""
|
| 157 |
+
raise RuntimeError("run_a() is not implemented.")
|
| 158 |
+
|
| 159 |
+
def run_b(self, mod: torch.fx.GraphModule, inputs: Tensors) -> TensorOrTensors:
|
| 160 |
+
"""
|
| 161 |
+
Run `mod` with `inputs` and generate output. The output will be compared with
|
| 162 |
+
output of run_a().
|
| 163 |
+
"""
|
| 164 |
+
raise RuntimeError("run_b() is not implemented.")
|
| 165 |
+
|
| 166 |
+
def _store_outputs(
|
| 167 |
+
self,
|
| 168 |
+
a_result: TensorOrTensors,
|
| 169 |
+
b_result: TensorOrTensors,
|
| 170 |
+
submodule: torch.fx.GraphModule,
|
| 171 |
+
):
|
| 172 |
+
"""
|
| 173 |
+
Store the outputs of self.run_a() and self.run_b() into self.a_outputs and
|
| 174 |
+
self.b_outputs, so that we can use them when execute preceding nodes that
|
| 175 |
+
use those outputs as inputs.
|
| 176 |
+
|
| 177 |
+
Args:
|
| 178 |
+
a_result: Output of self.run_a(). Could be a tensor or tensors.
|
| 179 |
+
b_result: Output of self.run_b(). Could be a tensor or tensors.
|
| 180 |
+
submodule: The module that generates a_result and b_result.
|
| 181 |
+
"""
|
| 182 |
+
output_node = next(
|
| 183 |
+
node for node in submodule.graph.nodes if node.op == "output"
|
| 184 |
+
)
|
| 185 |
+
|
| 186 |
+
# Only one output
|
| 187 |
+
if isinstance(output_node.args[0], torch.fx.Node):
|
| 188 |
+
self.a_outputs[output_node.args[0].name] = a_result
|
| 189 |
+
self.b_outputs[output_node.args[0].name] = b_result
|
| 190 |
+
# Multiple outputs
|
| 191 |
+
else:
|
| 192 |
+
for i, arg in enumerate(output_node.args[0]):
|
| 193 |
+
self.a_outputs[arg.name] = a_result[i]
|
| 194 |
+
self.b_outputs[arg.name] = b_result[i]
|
| 195 |
+
|
| 196 |
+
def _get_submod_inputs(
|
| 197 |
+
self, main_module: torch.fx.GraphModule, submod_path: str
|
| 198 |
+
) -> Tuple[Tensors, Tensors]:
|
| 199 |
+
"""
|
| 200 |
+
Try get submodule inputs from stored outputs. If not found then use
|
| 201 |
+
torch_glow.get_submod_inputs to get the inputs.
|
| 202 |
+
|
| 203 |
+
If accumulate_error is False, use a_input for run_a() and run_b()
|
| 204 |
+
otherwise use a_input for run_a and b_input for run_b.
|
| 205 |
+
|
| 206 |
+
Args:
|
| 207 |
+
main_module: Top-levlel fx module.
|
| 208 |
+
submod_path: Path to the submodule we want to run and compare results.
|
| 209 |
+
|
| 210 |
+
Returns:
|
| 211 |
+
a_input: List of tensor(s) that will be used by run_a() as submodule inputs.
|
| 212 |
+
b_input: List of tensor(s) that will be used by run_b() as submodule inputs.
|
| 213 |
+
"""
|
| 214 |
+
a_input = []
|
| 215 |
+
b_input = []
|
| 216 |
+
submodule = getattr(main_module, submod_path)
|
| 217 |
+
placeholders = [
|
| 218 |
+
node.name for node in submodule.graph.nodes if node.op == "placeholder"
|
| 219 |
+
]
|
| 220 |
+
|
| 221 |
+
# If all placeholder can be found in stored outputs, use stored
|
| 222 |
+
# outputs as inputs. Otherwise, use `torch_glow.get_submod_inputs`
|
| 223 |
+
# to get the inputs.
|
| 224 |
+
if set(placeholders) <= self.a_outputs.keys():
|
| 225 |
+
for name in placeholders:
|
| 226 |
+
a_input.append(self.a_outputs[name])
|
| 227 |
+
b_input.append(self.b_outputs[name])
|
| 228 |
+
else:
|
| 229 |
+
if self.settings.accumulate_error:
|
| 230 |
+
print(f"Can't find previous stored outputs named {placeholders}!")
|
| 231 |
+
|
| 232 |
+
def get_inputs(self: torch.nn.Module, inputs: Any):
|
| 233 |
+
nonlocal a_input
|
| 234 |
+
a_input = inputs
|
| 235 |
+
|
| 236 |
+
# Use forward hook to get the inputs to the submodule
|
| 237 |
+
handle = submodule.register_forward_pre_hook(get_inputs)
|
| 238 |
+
main_module(*self.sample_input)
|
| 239 |
+
handle.remove()
|
| 240 |
+
|
| 241 |
+
b_input = a_input
|
| 242 |
+
|
| 243 |
+
if not self.settings.accumulate_error:
|
| 244 |
+
return a_input, a_input
|
| 245 |
+
|
| 246 |
+
return a_input, b_input
|
| 247 |
+
|
| 248 |
+
def _tag_nodes(self, selected_nodes: NodeSet):
|
| 249 |
+
"""
|
| 250 |
+
Tag selected nodes with tag "minimize". Nodes with the same tags will
|
| 251 |
+
be split to the same submodule afterwards.
|
| 252 |
+
|
| 253 |
+
Args:
|
| 254 |
+
selected_nodes: Nodes that we want to minimize. We will tag those nodes
|
| 255 |
+
with "minimize", all preceding nodes with "main_0" and all following
|
| 256 |
+
nodes with "main_1".
|
| 257 |
+
"""
|
| 258 |
+
for node in self.module.graph.nodes:
|
| 259 |
+
if node.op not in CALLABLE_NODE_OPS:
|
| 260 |
+
continue
|
| 261 |
+
|
| 262 |
+
if node in selected_nodes:
|
| 263 |
+
node.tag = "minimize"
|
| 264 |
+
elif any(
|
| 265 |
+
n.tag in {"minimize", "main_1"}
|
| 266 |
+
for n in node.all_input_nodes
|
| 267 |
+
if n.op in CALLABLE_NODE_OPS
|
| 268 |
+
):
|
| 269 |
+
node.tag = "main_1"
|
| 270 |
+
else:
|
| 271 |
+
node.tag = "main_0"
|
| 272 |
+
|
| 273 |
+
def _build_submodule(self, nodes: NodeSet) -> Tuple[torch.fx.GraphModule, str]:
|
| 274 |
+
"""
|
| 275 |
+
Split self.module so that one submodule consists of `nodes` and only `nodes`.
|
| 276 |
+
|
| 277 |
+
Args:
|
| 278 |
+
nodes: Nodes that we want to include in the minimize submodule.
|
| 279 |
+
|
| 280 |
+
Returns:
|
| 281 |
+
split_module (torch.fx.GraphModule): the module after split.
|
| 282 |
+
submodule_name (str): the name of the submodule that consists of `nodes`.
|
| 283 |
+
"""
|
| 284 |
+
# Color provided nodes
|
| 285 |
+
self._tag_nodes(nodes)
|
| 286 |
+
|
| 287 |
+
# Split module based on coloring
|
| 288 |
+
split_module = split_by_tags(self.module, ["main_0", "minimize", "main_1"])
|
| 289 |
+
|
| 290 |
+
# Find submodule containing colored nodes
|
| 291 |
+
submodule_name: str = ""
|
| 292 |
+
for child_name, _ in split_module.named_children():
|
| 293 |
+
# Skip submodules we're not interested in at the moment
|
| 294 |
+
if "minimize" not in child_name:
|
| 295 |
+
continue
|
| 296 |
+
|
| 297 |
+
if submodule_name == "":
|
| 298 |
+
submodule_name = child_name
|
| 299 |
+
else:
|
| 300 |
+
raise FxNetMinimizerBadModuleError(
|
| 301 |
+
f"Expected only one minimize submodule with nodes {nodes}"
|
| 302 |
+
)
|
| 303 |
+
|
| 304 |
+
if submodule_name == "":
|
| 305 |
+
raise FxNetMinimizerBadModuleError(
|
| 306 |
+
f"Minimize submodule was not found with nodes {nodes}"
|
| 307 |
+
)
|
| 308 |
+
|
| 309 |
+
return split_module, submodule_name
|
| 310 |
+
|
| 311 |
+
def _run_and_compare(
|
| 312 |
+
self, split_module: torch.fx.GraphModule, submod_name: str, output_names: Names
|
| 313 |
+
):
|
| 314 |
+
"""
|
| 315 |
+
Run the submodule in `split_module` that has name `submod_name`
|
| 316 |
+
using `self.run_a` and `self.run_b` and compare their results.
|
| 317 |
+
|
| 318 |
+
Args:
|
| 319 |
+
split_module: Main module that contains the minimize submodule.
|
| 320 |
+
submod_name: Name of the minimize submodule.
|
| 321 |
+
output_names: Names of the node we want to output. If None, we
|
| 322 |
+
will use the original output.
|
| 323 |
+
"""
|
| 324 |
+
submodule = getattr(split_module, submod_name)
|
| 325 |
+
a_input, b_input = self._get_submod_inputs(split_module, submod_name)
|
| 326 |
+
|
| 327 |
+
if len(self.reports) == 0:
|
| 328 |
+
self.reports.append([])
|
| 329 |
+
self.iteration = 1
|
| 330 |
+
|
| 331 |
+
report = self.reports[self.iteration - 1]
|
| 332 |
+
report.append("Run and compare ...")
|
| 333 |
+
|
| 334 |
+
if output_names:
|
| 335 |
+
output_nodes: NodeList = []
|
| 336 |
+
for node in submodule.graph.nodes:
|
| 337 |
+
if node.op == "output":
|
| 338 |
+
submodule.graph.erase_node(node)
|
| 339 |
+
|
| 340 |
+
if node.name in output_names:
|
| 341 |
+
output_nodes.append(node)
|
| 342 |
+
|
| 343 |
+
submodule.graph.output(
|
| 344 |
+
output_nodes[0] if len(output_nodes) == 1 else tuple(output_nodes)
|
| 345 |
+
)
|
| 346 |
+
submodule.graph.lint()
|
| 347 |
+
submodule.recompile()
|
| 348 |
+
|
| 349 |
+
# Use name of args in output node as key to store comparison result
|
| 350 |
+
for node in submodule.graph.nodes:
|
| 351 |
+
if node.op == "output":
|
| 352 |
+
result_key = map_arg(node.args, lambda x: x.name)
|
| 353 |
+
|
| 354 |
+
a_result = self.run_a(submodule, a_input)
|
| 355 |
+
b_result = self.run_b(submodule, b_input)
|
| 356 |
+
self._store_outputs(a_result, b_result, submodule)
|
| 357 |
+
|
| 358 |
+
# Compare results
|
| 359 |
+
names: Names = output_names
|
| 360 |
+
if output_names is None:
|
| 361 |
+
names = [str(v) for v in result_key]
|
| 362 |
+
|
| 363 |
+
numeric_result, bool_result = self.compare_fn(a_result, b_result, names)
|
| 364 |
+
|
| 365 |
+
self.results[result_key] = numeric_result
|
| 366 |
+
report.append(f"Numerical accuracy = {numeric_result}")
|
| 367 |
+
if not bool_result:
|
| 368 |
+
report.append(f"Result mismatch for {result_key}")
|
| 369 |
+
raise FxNetMinimizerResultMismatchError(f"Result mismatch for {result_key}")
|
| 370 |
+
|
| 371 |
+
def _binary_search_impl(
|
| 372 |
+
self, all_nodes: NodeList, start_idx: int, end_idx: int
|
| 373 |
+
) -> NodeSet:
|
| 374 |
+
"""
|
| 375 |
+
Recursive binary search implementation.
|
| 376 |
+
"""
|
| 377 |
+
nodes: NodeList = all_nodes[start_idx:end_idx]
|
| 378 |
+
|
| 379 |
+
report: List[str] = []
|
| 380 |
+
self.reports.append(report)
|
| 381 |
+
self.iteration += 1
|
| 382 |
+
report.append(f"Binary search iteration {self.iteration}.")
|
| 383 |
+
report.append(
|
| 384 |
+
f"From node index {start_idx} to {end_idx-1}. "
|
| 385 |
+
f"Size of the interested node list is {len(nodes)}"
|
| 386 |
+
)
|
| 387 |
+
|
| 388 |
+
cur_nodes: NodeSet = set(nodes)
|
| 389 |
+
|
| 390 |
+
for node in nodes:
|
| 391 |
+
if node in self.fusions:
|
| 392 |
+
cur_nodes.update(self.fusions[node])
|
| 393 |
+
|
| 394 |
+
try:
|
| 395 |
+
split_module, submod_name = self._build_submodule(cur_nodes)
|
| 396 |
+
self._run_and_compare(split_module, submod_name, [])
|
| 397 |
+
except (FxNetMinimizerRunFuncError, FxNetMinimizerResultMismatchError):
|
| 398 |
+
|
| 399 |
+
if len(nodes) == 1:
|
| 400 |
+
report.append(
|
| 401 |
+
f"This is the last node in the sub-module. "
|
| 402 |
+
f"Search in the current branch is successful with culprit = {cur_nodes}."
|
| 403 |
+
)
|
| 404 |
+
self.print_report(report)
|
| 405 |
+
return cur_nodes
|
| 406 |
+
|
| 407 |
+
report.append(
|
| 408 |
+
"Proceed to split and lower the halves of the current "
|
| 409 |
+
"sub-module individually."
|
| 410 |
+
)
|
| 411 |
+
self.print_report(report)
|
| 412 |
+
|
| 413 |
+
mid = len(nodes) // 2
|
| 414 |
+
culprits = self._binary_search_impl(all_nodes, start_idx, start_idx + mid)
|
| 415 |
+
|
| 416 |
+
if len(culprits) != 0 and not self.settings.find_all:
|
| 417 |
+
return culprits
|
| 418 |
+
|
| 419 |
+
culprits = self._binary_search_impl(all_nodes, start_idx + mid, end_idx)
|
| 420 |
+
|
| 421 |
+
if len(culprits) == 0:
|
| 422 |
+
report.append(
|
| 423 |
+
f"Further split and lowering found no errors. "
|
| 424 |
+
f"Unable to minimize the submodule with list of nodes: {nodes}"
|
| 425 |
+
)
|
| 426 |
+
self.print_report(report)
|
| 427 |
+
|
| 428 |
+
return culprits
|
| 429 |
+
else:
|
| 430 |
+
report.append("No discrepancy found.")
|
| 431 |
+
self.print_report(report)
|
| 432 |
+
return set()
|
| 433 |
+
|
| 434 |
+
def _binary_traverse(self, nodes: NodeList) -> NodeSet:
|
| 435 |
+
"""
|
| 436 |
+
Binary search on `nodes` for culprit.
|
| 437 |
+
"""
|
| 438 |
+
return self._binary_search_impl(nodes, 0, len(nodes))
|
| 439 |
+
|
| 440 |
+
def _sequential_traverse(self, nodes: NodeList) -> NodeSet:
|
| 441 |
+
"""
|
| 442 |
+
Traverse `nodes` one by one and determine if any of them is a culprit.
|
| 443 |
+
"""
|
| 444 |
+
culprits: NodeSet = set()
|
| 445 |
+
|
| 446 |
+
for node in nodes:
|
| 447 |
+
report: List[str] = []
|
| 448 |
+
self.reports.append(report)
|
| 449 |
+
self.iteration += 1
|
| 450 |
+
report.append(f"Sequential traverse iteration {self.iteration}.")
|
| 451 |
+
report.append(f"Visit node: {node.name}")
|
| 452 |
+
|
| 453 |
+
_LOGGER.info("Visit node: %s", node.name)
|
| 454 |
+
cur_nodes: NodeSet = {node}
|
| 455 |
+
|
| 456 |
+
if node in self.fusions:
|
| 457 |
+
cur_nodes = self.fusions[node]
|
| 458 |
+
|
| 459 |
+
try:
|
| 460 |
+
split_module, submod_name = self._build_submodule(cur_nodes)
|
| 461 |
+
self._run_and_compare(split_module, submod_name, [node.name])
|
| 462 |
+
self.print_report(report)
|
| 463 |
+
except (FxNetMinimizerResultMismatchError):
|
| 464 |
+
culprits.add(node)
|
| 465 |
+
report.append(f"Found culprit from numeric error: {node}")
|
| 466 |
+
self.print_report(report)
|
| 467 |
+
if not self.settings.find_all:
|
| 468 |
+
return culprits
|
| 469 |
+
except (FxNetMinimizerRunFuncError):
|
| 470 |
+
culprits.update(cur_nodes)
|
| 471 |
+
report.append(f"Found culprit from run error: {node}")
|
| 472 |
+
self.print_report(report)
|
| 473 |
+
if not self.settings.find_all:
|
| 474 |
+
return culprits
|
| 475 |
+
|
| 476 |
+
return culprits
|
| 477 |
+
|
| 478 |
+
def _accumulate_traverse(self, nodes: NodeList) -> NodeSet:
|
| 479 |
+
culprits: NodeSet = set()
|
| 480 |
+
nodes_to_run: NodeSet = set()
|
| 481 |
+
|
| 482 |
+
# find_all is not supported for accumulate traversal because all the
|
| 483 |
+
# ops run on NNPI. So we return after the first op that raises error.
|
| 484 |
+
if self.settings.find_all:
|
| 485 |
+
print("'Find All' mode is not supported in accumulate traversal.")
|
| 486 |
+
return culprits
|
| 487 |
+
|
| 488 |
+
for node in nodes:
|
| 489 |
+
report: List[str] = []
|
| 490 |
+
self.reports.append(report)
|
| 491 |
+
self.iteration += 1
|
| 492 |
+
report.append(f"Accumulate traverse iteration {self.iteration}.")
|
| 493 |
+
|
| 494 |
+
nodes_to_run.add(node)
|
| 495 |
+
|
| 496 |
+
node_name = node.name
|
| 497 |
+
if node_name is not None and isinstance(node_name, tuple):
|
| 498 |
+
node_name = node_name[0]
|
| 499 |
+
assert node_name is not None and isinstance(
|
| 500 |
+
node_name, str
|
| 501 |
+
), f"minimize: node_name: {node_name}"
|
| 502 |
+
|
| 503 |
+
report.append(f"Add node: {node_name}")
|
| 504 |
+
|
| 505 |
+
try:
|
| 506 |
+
split_module, submod_name = self._build_submodule(nodes_to_run)
|
| 507 |
+
self._run_and_compare(split_module, submod_name, [node_name])
|
| 508 |
+
self.print_report(report)
|
| 509 |
+
except (FxNetMinimizerResultMismatchError, FxNetMinimizerRunFuncError):
|
| 510 |
+
culprits.add(node)
|
| 511 |
+
report.append(f"Found culprit {node}")
|
| 512 |
+
self.print_report(report)
|
| 513 |
+
return culprits
|
| 514 |
+
|
| 515 |
+
return culprits
|
| 516 |
+
|
| 517 |
+
def _collect_nodes(self, start: Optional[str], end: Optional[str]) -> NodeList:
|
| 518 |
+
"""
|
| 519 |
+
Collect nodes in the model that between nodes with name of `start` and `end`.
|
| 520 |
+
These two nodes are also included.
|
| 521 |
+
"""
|
| 522 |
+
nodes: NodeList = []
|
| 523 |
+
add_node = start is None
|
| 524 |
+
|
| 525 |
+
for node in self.module.graph.nodes:
|
| 526 |
+
if node.op not in CALLABLE_NODE_OPS:
|
| 527 |
+
continue
|
| 528 |
+
|
| 529 |
+
if node.name == start:
|
| 530 |
+
add_node = True
|
| 531 |
+
|
| 532 |
+
if add_node:
|
| 533 |
+
nodes.append(node)
|
| 534 |
+
|
| 535 |
+
if node.name == end:
|
| 536 |
+
break
|
| 537 |
+
|
| 538 |
+
return nodes
|
| 539 |
+
|
| 540 |
+
def run_nodes(self, start: Optional[str] = None, end: Optional[str] = None):
|
| 541 |
+
"""
|
| 542 |
+
Run part of the model from `start` node to `end` node. If `start` is None
|
| 543 |
+
then we start from the beginning of the model. If `end` is None then we
|
| 544 |
+
stop at the end of the model.
|
| 545 |
+
|
| 546 |
+
Args:
|
| 547 |
+
start: The name of the node which is the first node of the submodule
|
| 548 |
+
we want to run. If set to None, then we'll start with the first
|
| 549 |
+
node of the model.
|
| 550 |
+
end: The name of the node which is the last node of the submodule we
|
| 551 |
+
want to run. If set to None, we'll end with the last node of the
|
| 552 |
+
model.
|
| 553 |
+
"""
|
| 554 |
+
nodes = self._collect_nodes(start, end)
|
| 555 |
+
cur_nodes = set(nodes)
|
| 556 |
+
|
| 557 |
+
for node in nodes:
|
| 558 |
+
if node in self.fusions:
|
| 559 |
+
cur_nodes.update(self.fusions[node])
|
| 560 |
+
|
| 561 |
+
output_names = []
|
| 562 |
+
if self.settings.return_intermediate:
|
| 563 |
+
output_names = [node.name for node in nodes]
|
| 564 |
+
|
| 565 |
+
try:
|
| 566 |
+
split_module, submod_name = self._build_submodule(cur_nodes)
|
| 567 |
+
self._run_and_compare(split_module, submod_name, output_names)
|
| 568 |
+
except (
|
| 569 |
+
FxNetMinimizerRunFuncError,
|
| 570 |
+
FxNetMinimizerResultMismatchError,
|
| 571 |
+
) as e:
|
| 572 |
+
print(e)
|
| 573 |
+
|
| 574 |
+
def print_report(self, report: List[str]):
|
| 575 |
+
for i in range(len(report)):
|
| 576 |
+
if i > 0:
|
| 577 |
+
print(" . " + report[i])
|
| 578 |
+
else:
|
| 579 |
+
print(report[i])
|
| 580 |
+
|
| 581 |
+
def print_reports(self):
|
| 582 |
+
for report in self.reports:
|
| 583 |
+
self.print_report(report)
|
| 584 |
+
|
| 585 |
+
def minimize(
|
| 586 |
+
self, start: Optional[str] = None, end: Optional[str] = None
|
| 587 |
+
) -> NodeSet:
|
| 588 |
+
"""
|
| 589 |
+
Minimizing the model from node with name `start` to node with name `end` base
|
| 590 |
+
on self.settings. Find culprits that causes FxNetMinimizerRunFuncError or
|
| 591 |
+
FxNetMinimizerResultMismatchError errors.
|
| 592 |
+
|
| 593 |
+
Args:
|
| 594 |
+
start: The name of the node where we want to start minimizing. If set
|
| 595 |
+
to None, then we'll start with the first node of the model.
|
| 596 |
+
end: The name of the node where we want to terminate minimizing. If
|
| 597 |
+
set to None, we'll end with the last node of the model.
|
| 598 |
+
|
| 599 |
+
Returns:
|
| 600 |
+
nodes: A list of nodes that causes FxNetMinimizerRunFuncError or
|
| 601 |
+
FxNetMinimizerResultMismatchError errors during minimizing.
|
| 602 |
+
"""
|
| 603 |
+
|
| 604 |
+
print(self.settings)
|
| 605 |
+
print(self.module.graph)
|
| 606 |
+
|
| 607 |
+
nodes = self._collect_nodes(start, end)
|
| 608 |
+
|
| 609 |
+
if self.settings.traverse_method == "sequential":
|
| 610 |
+
return self._sequential_traverse(nodes)
|
| 611 |
+
|
| 612 |
+
if self.settings.traverse_method == "binary":
|
| 613 |
+
return self._binary_traverse(nodes)
|
| 614 |
+
|
| 615 |
+
if self.settings.traverse_method == "accumulate":
|
| 616 |
+
return self._accumulate_traverse(nodes)
|
| 617 |
+
|
| 618 |
+
raise RuntimeError(f"Unknown traverse method {self.settings.traverse_method}!")
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/reinplace.py
ADDED
|
@@ -0,0 +1,674 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from torch.fx import Node
|
| 3 |
+
from torch.fx._compatibility import compatibility
|
| 4 |
+
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
|
| 5 |
+
from torch.utils._pytree import tree_map, tree_flatten, tree_map_only
|
| 6 |
+
from torch.multiprocessing.reductions import StorageWeakRef
|
| 7 |
+
|
| 8 |
+
import _operator
|
| 9 |
+
from enum import Enum
|
| 10 |
+
import itertools
|
| 11 |
+
from typing import Set, Dict
|
| 12 |
+
from collections import defaultdict
|
| 13 |
+
|
| 14 |
+
__all__ = ['reinplace']
|
| 15 |
+
|
| 16 |
+
class _ViewType(Enum):
|
| 17 |
+
NonView = 0
|
| 18 |
+
SingleOutputView = 1
|
| 19 |
+
MultiOutputView = 2
|
| 20 |
+
|
| 21 |
+
def _is_view_op(tgt):
|
| 22 |
+
if tgt is not None and isinstance(tgt, torch._ops.OpOverload):
|
| 23 |
+
schema = tgt._schema
|
| 24 |
+
if len(schema.arguments) > 0:
|
| 25 |
+
first_arg = schema.arguments[0]
|
| 26 |
+
# check if op is a view
|
| 27 |
+
return first_arg.alias_info is not None and not first_arg.alias_info.is_write
|
| 28 |
+
|
| 29 |
+
def _get_view_type(tgt) -> _ViewType:
|
| 30 |
+
if tgt is not None and isinstance(tgt, torch._ops.OpOverload):
|
| 31 |
+
schema = tgt._schema
|
| 32 |
+
if len(schema.arguments) > 0:
|
| 33 |
+
first_arg = schema.arguments[0]
|
| 34 |
+
# check if op is a view
|
| 35 |
+
if first_arg.alias_info is not None and not first_arg.alias_info.is_write:
|
| 36 |
+
# check if op is a multi-output view
|
| 37 |
+
if '*' in first_arg.alias_info.after_set:
|
| 38 |
+
return _ViewType.MultiOutputView
|
| 39 |
+
else:
|
| 40 |
+
return _ViewType.SingleOutputView
|
| 41 |
+
return _ViewType.NonView
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
# Stores a bunch of metadata related to functionalization each node.
|
| 45 |
+
# Relevant metadata:
|
| 46 |
+
# n.meta['fake_result']: FakeTensor (same type as the output of the node, but with FakeTenors instead of Tensors)
|
| 47 |
+
# The fake tensor output from running the current node
|
| 48 |
+
# n.meta['view_of']: Node
|
| 49 |
+
# If the current node n is a view of some base tensor, the 'view_of' field tells us which
|
| 50 |
+
# view node was used to generate the current node (a view tensor).
|
| 51 |
+
# This information actually makes `fake_result` redundant, but we can use `fake_result`
|
| 52 |
+
# to sanity check that our aliasing information is correct.
|
| 53 |
+
@compatibility(is_backward_compatible=False)
|
| 54 |
+
class _FunctionalizationMetadataProp(torch.fx.Interpreter):
|
| 55 |
+
|
| 56 |
+
def run_node(self, node: Node):
|
| 57 |
+
self.node_counter += 1
|
| 58 |
+
result = super().run_node(node)
|
| 59 |
+
node.meta['fake_result'] = result
|
| 60 |
+
node.meta['node_idx'] = self.node_counter
|
| 61 |
+
|
| 62 |
+
# (1) Update metadata with the list of nodes that are used by this node
|
| 63 |
+
# copy_() doesn't read from its first argument; it writes to it, overwriting previous data.
|
| 64 |
+
# We don't want to treat it as "being used as an input".
|
| 65 |
+
node_args = node.args
|
| 66 |
+
if node.target is torch.ops.aten.copy_.default:
|
| 67 |
+
node_args = node_args[1:]
|
| 68 |
+
|
| 69 |
+
# (2) Update metadata to track aliasing information about view tensor nodes.
|
| 70 |
+
if node.op == 'call_function':
|
| 71 |
+
view_type = _get_view_type(node.target)
|
| 72 |
+
if view_type == _ViewType.SingleOutputView:
|
| 73 |
+
assert isinstance(node.args[0], Node)
|
| 74 |
+
node.meta['view_of'] = node.args[0]
|
| 75 |
+
elif view_type == _ViewType.MultiOutputView:
|
| 76 |
+
self.multi_output_view_nodes[node] = node.args[0]
|
| 77 |
+
|
| 78 |
+
# Check if we returned a multi-output view,
|
| 79 |
+
# and we're now grabbing the individual views from the output.
|
| 80 |
+
#
|
| 81 |
+
# For multi-output views, we want to map each output view to the base,
|
| 82 |
+
# but this mapping involves two separate nodes in FX IR.
|
| 83 |
+
# e.g. "a, b = x_1.split(...)" becomes:
|
| 84 |
+
# %split_tensor : [num_users=2] = call_function[target=torch.ops.aten.split.Tensor](args = (%x_1, 2), kwargs = {})
|
| 85 |
+
# %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%split_tensor, 0), kwargs = {})
|
| 86 |
+
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%split_tensor, 1), kwargs = {})
|
| 87 |
+
# And we'd like to set:
|
| 88 |
+
# getitem1.meta['view_of'] = x_1
|
| 89 |
+
elif node.target is _operator.getitem:
|
| 90 |
+
list_arg = node.args[0]
|
| 91 |
+
maybe_base_of_view = self.multi_output_view_nodes.get(list_arg, None)
|
| 92 |
+
if maybe_base_of_view is not None:
|
| 93 |
+
# Note: we could also track indexing info here for multi-output views.
|
| 94 |
+
# I don't think this metadata is strictly needed for de-functionalization.
|
| 95 |
+
assert isinstance(maybe_base_of_view, Node)
|
| 96 |
+
node.meta['view_of'] = maybe_base_of_view
|
| 97 |
+
|
| 98 |
+
if 'view_of' in node.meta:
|
| 99 |
+
# We're linking the current node with its first argument as views.
|
| 100 |
+
# Assert here that this is actually the case, and their storages are the same.
|
| 101 |
+
assert isinstance(node.meta['fake_result'], FakeTensor)
|
| 102 |
+
assert isinstance(node.meta['view_of'].meta['fake_result'], FakeTensor)
|
| 103 |
+
view_storage = StorageWeakRef(node.meta['fake_result']._typed_storage())
|
| 104 |
+
base_storage = StorageWeakRef(node.meta['view_of'].meta['fake_result']._typed_storage())
|
| 105 |
+
assert view_storage == base_storage
|
| 106 |
+
return result
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def propagate(self, *args):
|
| 111 |
+
self.multi_output_view_nodes = {}
|
| 112 |
+
self.node_counter = -1
|
| 113 |
+
|
| 114 |
+
with FakeTensorMode() as mode:
|
| 115 |
+
fake_args = [mode.from_tensor(a) for a in args]
|
| 116 |
+
return super().run(*fake_args)
|
| 117 |
+
|
| 118 |
+
def _schemas_match(functional_schema, inplace_schema):
|
| 119 |
+
names_match = inplace_schema.name.endswith("_") and inplace_schema.name[:-1] == functional_schema.name
|
| 120 |
+
arg_types_match = len(functional_schema.arguments) == len(inplace_schema.arguments) and all(
|
| 121 |
+
a1.type == a2.type for a1, a2 in zip(functional_schema.arguments, inplace_schema.arguments))
|
| 122 |
+
# for the inplace op, its first argument should be mutable
|
| 123 |
+
assert inplace_schema.arguments[0].alias_info is not None and inplace_schema.arguments[0].alias_info.is_write
|
| 124 |
+
# and its remaining arguments shouldn't be.
|
| 125 |
+
assert all(a.alias_info is None for a in inplace_schema.arguments[1:])
|
| 126 |
+
return names_match and arg_types_match
|
| 127 |
+
|
| 128 |
+
# TODO: this should be beefed up to be able to properly re-inplace with:
|
| 129 |
+
# - mutating ops (e.g. _fused_moving_avg_obs_fq_helper)
|
| 130 |
+
# - out= ops (e.g. angle -> angle.out)
|
| 131 |
+
# TODO: we should also figure this info out using torchgen.
|
| 132 |
+
def _maybe_get_inplace_op(op):
|
| 133 |
+
# __module__ seems broken; it returns torch._ops.aten which doesn't exist
|
| 134 |
+
if not isinstance(op, torch._ops.OpOverload):
|
| 135 |
+
return None
|
| 136 |
+
# Some view ops have inplace variants (as_strided_, etc),
|
| 137 |
+
# but we do NOT want the reinplacing pass to directly add these into the program.
|
| 138 |
+
# (they'll require extra special handling, aren't aren't really useful for perf anyway)
|
| 139 |
+
if _is_view_op(op):
|
| 140 |
+
return None
|
| 141 |
+
op_namespace = op.__module__.split(".")[-1]
|
| 142 |
+
op_base_name = op.overloadpacket.__name__
|
| 143 |
+
maybe_namespace_module = getattr(torch.ops, op_namespace)
|
| 144 |
+
maybe_inplace_op = None if maybe_namespace_module is None else getattr(maybe_namespace_module, f'{op_base_name}_', None)
|
| 145 |
+
if maybe_inplace_op is None:
|
| 146 |
+
return None
|
| 147 |
+
|
| 148 |
+
inplace_overloads = [
|
| 149 |
+
getattr(maybe_inplace_op, overload_name) for overload_name in maybe_inplace_op.overloads()
|
| 150 |
+
]
|
| 151 |
+
inplace_overloads_with_matching_schemas = [
|
| 152 |
+
f
|
| 153 |
+
for f in inplace_overloads
|
| 154 |
+
if _schemas_match(op._schema, f._schema)
|
| 155 |
+
]
|
| 156 |
+
# Just because foo() and foo_() are both existing operators,
|
| 157 |
+
# They aren't guaranteed to have compatible schemas.
|
| 158 |
+
# For example, pow.Scalar(Scalar self, Tensor exponent) has no valid inplace variant,
|
| 159 |
+
# Even though several overloads of pow_ exist.
|
| 160 |
+
if len(inplace_overloads_with_matching_schemas) == 0:
|
| 161 |
+
return None
|
| 162 |
+
assert len(inplace_overloads_with_matching_schemas) == 1
|
| 163 |
+
inplace_op = inplace_overloads_with_matching_schemas[0]
|
| 164 |
+
return inplace_op
|
| 165 |
+
|
| 166 |
+
_VIEW_INVERSE_MAP = {
|
| 167 |
+
torch.ops.aten.diagonal_scatter.default: torch.ops.aten.diagonal.default,
|
| 168 |
+
torch.ops.aten.select_scatter.default: torch.ops.aten.select.int,
|
| 169 |
+
torch.ops.aten.slice_scatter.default: torch.ops.aten.slice.Tensor,
|
| 170 |
+
torch.ops.aten.as_strided_scatter.default: torch.ops.aten.as_strided.default,
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
# This function, given a set of set of (aliased) tensor nodes,
|
| 174 |
+
# Returns any nodes in the graph that *use* any of the aliases, that occur *after* op_index
|
| 175 |
+
# in the node ordering.
|
| 176 |
+
def _get_all_later_node_usages(tensor_aliases: Set[Node], op_index: int):
|
| 177 |
+
def _add_if_tensor(x, set_):
|
| 178 |
+
if isinstance(x, FakeTensor):
|
| 179 |
+
set_.add(StorageWeakRef(x._typed_storage()))
|
| 180 |
+
|
| 181 |
+
nodes_used_after = set()
|
| 182 |
+
for t in tensor_aliases:
|
| 183 |
+
# get all nodes that use the current alias
|
| 184 |
+
usage_nodes = t.users
|
| 185 |
+
for n in usage_nodes:
|
| 186 |
+
# We only care about usages after the current node
|
| 187 |
+
if 'node_idx' not in n.meta or n.meta['node_idx'] <= op_index:
|
| 188 |
+
continue
|
| 189 |
+
# We also don't care about intermediate view ops.
|
| 190 |
+
# They only matter if their output is then used elsewhere
|
| 191 |
+
# (either in an out-of-place op, or as an output to the function).
|
| 192 |
+
if n in tensor_aliases:
|
| 193 |
+
if isinstance(n.target, torch._ops.OpOverload) or n.target == _operator.getitem:
|
| 194 |
+
continue
|
| 195 |
+
nodes_used_after.add(n)
|
| 196 |
+
return nodes_used_after
|
| 197 |
+
|
| 198 |
+
# Given an op that we're trying to re-inplace, "b = foo(a)",
|
| 199 |
+
# And given a {view}_scatter op that shows up later in the graph, "y = {view}_scatter(base, x, args...)"
|
| 200 |
+
# Then re-inplacing `foo()` would allow us to remove the `{view}_scatter` op entirely, IF:
|
| 201 |
+
# If there are any aliases in the alias_set(a) that satisfy:
|
| 202 |
+
# (1) The base of "alias", "alias_base", has the same size/stride/offset metadata as "base"
|
| 203 |
+
# (2) The output of running {view}(alias, args...) gives you the same size/stride/offset metadata
|
| 204 |
+
# as "alias"
|
| 205 |
+
def _get_view_inverse_node_usages(later_node_usages: Set[Node], self_aliases: Set[Node]) -> Set[Node]:
|
| 206 |
+
def matching_view_metadata(a, b):
|
| 207 |
+
return a.size() == b.size() and \
|
| 208 |
+
a.stride() == b.stride() and \
|
| 209 |
+
a.storage_offset() == b.storage_offset()
|
| 210 |
+
|
| 211 |
+
view_inverse_nodes = set()
|
| 212 |
+
# Go through them in node order, so we can see chains of view_scatter ops.
|
| 213 |
+
for n in sorted(later_node_usages, key=lambda x: x.meta['node_idx']):
|
| 214 |
+
if n.target not in _VIEW_INVERSE_MAP:
|
| 215 |
+
continue
|
| 216 |
+
base = n.args[0]
|
| 217 |
+
mutated_view = n.args[1]
|
| 218 |
+
assert isinstance(base, Node)
|
| 219 |
+
assert isinstance(base.meta['fake_result'], FakeTensor)
|
| 220 |
+
assert isinstance(mutated_view, Node)
|
| 221 |
+
assert isinstance(mutated_view.meta['fake_result'], FakeTensor)
|
| 222 |
+
# Check that this view_inverse op actually corresponds to taking doing the inverse
|
| 223 |
+
# of one of our existing self_alias nodes.
|
| 224 |
+
original_view = _VIEW_INVERSE_MAP[n.target]
|
| 225 |
+
for self_alias in self_aliases:
|
| 226 |
+
# We're looking for some alias of the self arg, "alias",
|
| 227 |
+
# that was created from some op `alias = foo(base, args...)`
|
| 228 |
+
# such that the current _scatter op "inverts" that foo call.
|
| 229 |
+
# We can check that by running the original op again, and checking that the strides match.
|
| 230 |
+
if 'view_of' not in self_alias.meta:
|
| 231 |
+
continue
|
| 232 |
+
self_alias_base = self_alias.meta['view_of']
|
| 233 |
+
try:
|
| 234 |
+
# The we're trying to re-use the args from the view_scatter call inside of the corresponding
|
| 235 |
+
# view op, which might throw. This just indicates that view_scatter op isn't a valid inverse
|
| 236 |
+
# of the current alias we're looking at.
|
| 237 |
+
view_replay_metadata = original_view(self_alias_base.meta['fake_result'], *n.args[2:], **n.kwargs)
|
| 238 |
+
expected_metadata = self_alias.meta['fake_result']
|
| 239 |
+
# If the alias and its base both have matching metadata, then this view_scatter op is valid to re-inplace.
|
| 240 |
+
if matching_view_metadata(self_alias_base.meta['fake_result'], base.meta['fake_result']) and \
|
| 241 |
+
matching_view_metadata(view_replay_metadata, expected_metadata):
|
| 242 |
+
view_inverse_nodes.add(n)
|
| 243 |
+
except Exception:
|
| 244 |
+
continue
|
| 245 |
+
|
| 246 |
+
return view_inverse_nodes
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
@compatibility(is_backward_compatible=True)
|
| 250 |
+
def reinplace(gm, *sample_args):
|
| 251 |
+
"""
|
| 252 |
+
Given an fx.GraphModule, modifies it to perform "reinplacing",
|
| 253 |
+
mutating the nodes of the graph.
|
| 254 |
+
We look for out-of-place op call sites like `b = a.add(...)`,
|
| 255 |
+
and convert them to be inplace (`b = a.add_(...)`),
|
| 256 |
+
as long as the input to the current operator ("a") isn't re-used
|
| 257 |
+
anywhere later in the graph.
|
| 258 |
+
|
| 259 |
+
This pass currently expects to operate on a **functional, ATen** graph.
|
| 260 |
+
This can be obtained by running `make_fx(functionalize(f))`.
|
| 261 |
+
|
| 262 |
+
Sample inputs are needed to determine aliasing relationships of the inputs.
|
| 263 |
+
In general, we can't reinplace node `b = a.add(...)` if "a" aliases any of the
|
| 264 |
+
inputs to the program.
|
| 265 |
+
|
| 266 |
+
Given a node "b = foo(a, args...) the algorithm for re-inplacing is as follows:
|
| 267 |
+
|
| 268 |
+
(1) Perform some initial checks on the metadata of "a" and "args..."
|
| 269 |
+
that can disqualify them from being reinplaced.
|
| 270 |
+
|
| 271 |
+
(1a) Check that the self argument we're attempting to reinplace
|
| 272 |
+
has acceptable dtype/size metadata to reinplace with.
|
| 273 |
+
|
| 274 |
+
For example, if we have:
|
| 275 |
+
a = torch.ones(1)
|
| 276 |
+
b = torch.ones(10)
|
| 277 |
+
out = torch.add(a, b)
|
| 278 |
+
We can't turn that into
|
| 279 |
+
a.add_(b)
|
| 280 |
+
Because that would require resizing "a".
|
| 281 |
+
|
| 282 |
+
Similarly, we can't convert torch.ge(a, b) into a.ge_(b),
|
| 283 |
+
because that would require changing a's dtype (from e.g. float32 to bool).
|
| 284 |
+
Note that in this specific example, we could technically do better..
|
| 285 |
+
|
| 286 |
+
If we see the pattern:
|
| 287 |
+
a_1 = a.ge(b)
|
| 288 |
+
a_2 = aten._to_copy(a_1, a.dtype)
|
| 289 |
+
Then we this should be valid to completely re-inplace
|
| 290 |
+
(this is exactly what functionalization will emit when it sees a.ge_(b)).
|
| 291 |
+
|
| 292 |
+
This optimization is only really important for user programs
|
| 293 |
+
that directly use inplace comparison ops though.
|
| 294 |
+
|
| 295 |
+
We also cannot re-inplace on tensors that have overlapping memory,
|
| 296 |
+
e.g. torch.ones(1).expand(4, 4).add_(1)
|
| 297 |
+
|
| 298 |
+
(1b) Check if "a" is an alias of any of the program inputs.
|
| 299 |
+
|
| 300 |
+
If it is, skip and move to the next node.
|
| 301 |
+
Inplace'ing an op that would cause it to mutate a program is not sound,
|
| 302 |
+
because that would be a side effect visible to the user.
|
| 303 |
+
|
| 304 |
+
NOTE: there's a future optimization that we should make:
|
| 305 |
+
if "a" is a (alias of a) program input, but later in the program
|
| 306 |
+
there is a node that looks like "a.copy_(...)",
|
| 307 |
+
Then re-inplacing is ok to do - we are temporarily re-using a's buffer,
|
| 308 |
+
which will later be overwritten by the copy_() call.
|
| 309 |
+
|
| 310 |
+
This will be an important optimization to have for programs that mutate
|
| 311 |
+
their inputs. It currently isn't implemented though.
|
| 312 |
+
|
| 313 |
+
(1c) Check if "a" and "args..." alias
|
| 314 |
+
|
| 315 |
+
For example, re-inplacing to create code like the below
|
| 316 |
+
isn't guaranteed to be sound:
|
| 317 |
+
|
| 318 |
+
aten.mul_(a, a)
|
| 319 |
+
|
| 320 |
+
(2) Check that "a" and all of its outstanding aliases are not used anywhere
|
| 321 |
+
later in the graph. If this is the case, then it's safe to re-inplace
|
| 322 |
+
to "b = foo_(a)".
|
| 323 |
+
|
| 324 |
+
There are a few caveats to this, explained in more detail below:
|
| 325 |
+
(a) If "a" is used later as an argument to a view op, that is okay.
|
| 326 |
+
It's only a problem if "a" (or that view) is later passed
|
| 327 |
+
into a normal operator, or if it is returned as the program output.
|
| 328 |
+
(b) If "a" is a repeat argument in `foo()`, then don't reinplace.
|
| 329 |
+
Most ATen kernels don't make any guarantees that this is sound,
|
| 330 |
+
e.g. if you do aten.mul_(a, a).
|
| 331 |
+
So we'll just ban re-inplacing in this case.
|
| 332 |
+
It's only a problem if "a" (or that view) is later passed
|
| 333 |
+
(c) If "a" is used as an input into a view "inverse" / "scatter"
|
| 334 |
+
operator, it is potentially fine to re-inplace
|
| 335 |
+
(and remove that scatter operator from the graph).
|
| 336 |
+
See below for a more detailed example.
|
| 337 |
+
|
| 338 |
+
NOTE: there is an optimization in this step that is crucial
|
| 339 |
+
to fully recovering performance from functionalization.
|
| 340 |
+
|
| 341 |
+
Given this program:
|
| 342 |
+
def f(x):
|
| 343 |
+
a = torch.ops.aten.add(x, x)
|
| 344 |
+
b = torch.ops.aten.diagonal(a)
|
| 345 |
+
torch.ops.aten.fill_(b, 0)
|
| 346 |
+
return d
|
| 347 |
+
|
| 348 |
+
Functionalization will emit the following:
|
| 349 |
+
def f(x):
|
| 350 |
+
a = torch.ops.aten.add(x, x)
|
| 351 |
+
b = torch.ops.aten.diagonal(a, 0, 1)
|
| 352 |
+
b_updated = torch.ops.aten.fill(b, 0)
|
| 353 |
+
a_updated = torch.ops.aten.diagonal_scatter(a, b_updated, 0, 1)
|
| 354 |
+
return a_updated
|
| 355 |
+
|
| 356 |
+
Ordinarily, we would not be able to reinplace the fill,
|
| 357 |
+
because "b" aliases with "a" which is used by the diagonal_scatter call.
|
| 358 |
+
|
| 359 |
+
"re-inplacing" is on the hook for figuring out that it is ok to
|
| 360 |
+
completely, the expensive diagonal_scatter call, if we re-inplace the add().
|
| 361 |
+
|
| 362 |
+
So, for every `alias in alias_set(a)`, instead of checking
|
| 363 |
+
that "alias" is not used anywhere later in the graph,
|
| 364 |
+
we check that
|
| 365 |
+
EITHER:
|
| 366 |
+
(a) alias is not used anywhere later in the graph
|
| 367 |
+
OR:
|
| 368 |
+
(b) alias is used exactly once later on in the graph,
|
| 369 |
+
in the following op:
|
| 370 |
+
|
| 371 |
+
out = foo_scatter(alias, x, args...)
|
| 372 |
+
|
| 373 |
+
where the following must hold:
|
| 374 |
+
(i) "foo_scatter" is the "inverse" operator for foo.
|
| 375 |
+
This only applies to "foo" ops that are view operators,
|
| 376 |
+
which view into a subset of the original tensor's memory.
|
| 377 |
+
In practice, there are ~4 operators where this applies:
|
| 378 |
+
diagonal -> diagonal_scatter
|
| 379 |
+
slice -> slice_scatter
|
| 380 |
+
select -> select_scatter
|
| 381 |
+
as_strided -> as_strided_scatter
|
| 382 |
+
(ii) "args..." are the same between the foo() and foo_scatter() calls.
|
| 383 |
+
|
| 384 |
+
(3) Perform the actual re-inplacing on foo!
|
| 385 |
+
|
| 386 |
+
(3b) is the common case, but special care is needed for {view}_scatter (3a)
|
| 387 |
+
|
| 388 |
+
(3a) {view}_scatter ops.
|
| 389 |
+
|
| 390 |
+
Consider this program:
|
| 391 |
+
a = torch.zeros(2, 2)
|
| 392 |
+
b = torch.ones(2)
|
| 393 |
+
a[0] = b
|
| 394 |
+
|
| 395 |
+
Post functionalization, that will look like:
|
| 396 |
+
a = torch.zeros(2)
|
| 397 |
+
b = torch.ones(1)
|
| 398 |
+
a_updated = torch.select_scatter(a, b, 0, 0)
|
| 399 |
+
|
| 400 |
+
In this case though, there is no "functional" op to re-inplace!
|
| 401 |
+
Instead, we'd like to directly remove toe select_scatter call.
|
| 402 |
+
We already know from (3) that this is valid,
|
| 403 |
+
because "a" has no later usages in the graph.
|
| 404 |
+
|
| 405 |
+
We perform the re-inplacing on the {view}_scatter op like so
|
| 406 |
+
Before:
|
| 407 |
+
a_updated = torch.select_scatter(a, b, args...)
|
| 408 |
+
After:
|
| 409 |
+
a_slice = a.select(a, args...)
|
| 410 |
+
a_slice.copy_(b)
|
| 411 |
+
|
| 412 |
+
(3b) Otherwise, replace the functional op with its inplace variant.
|
| 413 |
+
Before:
|
| 414 |
+
b = foo(a, args...)
|
| 415 |
+
After:
|
| 416 |
+
a.foo_(args...)
|
| 417 |
+
|
| 418 |
+
(4) Finally, after converting either:
|
| 419 |
+
Before:
|
| 420 |
+
b = foo(a)
|
| 421 |
+
After:
|
| 422 |
+
foo_(a)
|
| 423 |
+
or
|
| 424 |
+
Before:
|
| 425 |
+
b = {slice}_scatter(a, mutated_slice, args...)
|
| 426 |
+
After:
|
| 427 |
+
slice = {slice}(a, args...)
|
| 428 |
+
slice.copy_(mutated_slice)
|
| 429 |
+
|
| 430 |
+
We now need to find all later nodes that use "b" as an argument
|
| 431 |
+
and update them to take in "a" instead.
|
| 432 |
+
|
| 433 |
+
Note that for the majority of inplace ops, this isn't actually necessary
|
| 434 |
+
(because most inplace ops return "self" as their output).
|
| 435 |
+
This isn't generally true for all mutable ops though, which is why
|
| 436 |
+
we need to actually replace all of the arguments.
|
| 437 |
+
|
| 438 |
+
We also need to update our metadata of Dict[StorageWeakRef, Set[Node]],
|
| 439 |
+
That maps a given tensor storage to the set of all nodes that take in that storage
|
| 440 |
+
as an input.
|
| 441 |
+
Specifically, re-inplacing `b = foo(a)` causes "a" and "b"'s sets to get fused
|
| 442 |
+
together.
|
| 443 |
+
|
| 444 |
+
(5) Any "view_inverse/scatter" nodes that were identified as "it's ok to ignore them"
|
| 445 |
+
during step (3) get manually deleted from the graph.
|
| 446 |
+
Their outputs are no longer used, so technically standard DCE would be able
|
| 447 |
+
to do this, but we can no longer run FX's DCE pass now that we have mutable
|
| 448 |
+
ops in the graph.
|
| 449 |
+
"""
|
| 450 |
+
_FunctionalizationMetadataProp(gm).propagate(*sample_args)
|
| 451 |
+
|
| 452 |
+
# Useful debug printing
|
| 453 |
+
# def _print(x):
|
| 454 |
+
# if isinstance(x, FakeTensor):
|
| 455 |
+
# print(f'fake_result: {StorageWeakRef(x._typed_storage()).cdata}')
|
| 456 |
+
|
| 457 |
+
# for n in gm.graph.nodes:
|
| 458 |
+
# print(n.format_node())
|
| 459 |
+
# if hasattr(n, 'meta'):
|
| 460 |
+
# print(f'node_idx: {n.meta["node_idx"]}')
|
| 461 |
+
# if 'fake_result' in n.meta:
|
| 462 |
+
# tree_map(_print, n.meta['fake_result'])
|
| 463 |
+
# if 'view_of' in n.meta:
|
| 464 |
+
# print(f'view_of: {str(n.meta["view_of"])}')
|
| 465 |
+
# print()
|
| 466 |
+
|
| 467 |
+
# We need to know which nodes correspond to inputs (or their aliases)
|
| 468 |
+
# so we know not to re-inplace them.
|
| 469 |
+
# NOTE: later, we'll need to add an optimization for fully recovering performance
|
| 470 |
+
# on programs that mutate inputs.
|
| 471 |
+
input_storages = {
|
| 472 |
+
StorageWeakRef(
|
| 473 |
+
node.meta['fake_result']._typed_storage()
|
| 474 |
+
) for node in gm.graph.nodes if node.op == 'placeholder'}
|
| 475 |
+
|
| 476 |
+
|
| 477 |
+
# We also need to know for a given node, what are all of its aliasing nodes.
|
| 478 |
+
storage_to_nodes: Dict[StorageWeakRef, Set[Node]] = defaultdict(set)
|
| 479 |
+
for n in gm.graph.nodes:
|
| 480 |
+
if 'fake_result' in n.meta:
|
| 481 |
+
# Tree-mapping because some ops can return lists of tensors.
|
| 482 |
+
def _add_to_map(x):
|
| 483 |
+
if isinstance(x, FakeTensor):
|
| 484 |
+
storage_to_nodes[StorageWeakRef(x._typed_storage())].add(n)
|
| 485 |
+
tree_map(_add_to_map, n.meta['fake_result'])
|
| 486 |
+
|
| 487 |
+
# inplace-ify functional ops, subject to the constraints written below.
|
| 488 |
+
all_later_view_inverse_nodes_to_delete = set()
|
| 489 |
+
for idx, node in enumerate(gm.graph.nodes):
|
| 490 |
+
if node.op == 'call_function':
|
| 491 |
+
|
| 492 |
+
# Today, the re-inplace pass on directly acts on:
|
| 493 |
+
# - functional ops with an inplace variant
|
| 494 |
+
# - {view}_scatter ops that can be potentially removed from the graph.
|
| 495 |
+
# Both of these ops take in tensor first args, so filtering on this condition
|
| 496 |
+
# makes the later code simpler.
|
| 497 |
+
# We should revisit this at some point though, particularly when we also want
|
| 498 |
+
# the reinplacer to be able to handle out= and mutable operators
|
| 499 |
+
# and tensorlist first args (like `_foreach_` ops).
|
| 500 |
+
if not isinstance(node.target, torch._ops.OpOverload):
|
| 501 |
+
continue
|
| 502 |
+
if len(node.target._schema.arguments) < 1:
|
| 503 |
+
continue
|
| 504 |
+
if type(node.target._schema.arguments[0].type) != torch.TensorType:
|
| 505 |
+
continue
|
| 506 |
+
|
| 507 |
+
# Step 1a: Check that the self argument we're attempting to reinplace
|
| 508 |
+
# has the same size/stride as the output.
|
| 509 |
+
# For example, we shouldn't try to reinplace torch.add(scalar_tensor, larger_tensor)
|
| 510 |
+
# As it would require resizing scalar_tensor.
|
| 511 |
+
# (We could potentially swizzle this into larger_tensor.add_(scalar_tensor),
|
| 512 |
+
# this is probably an optimization to revisit later).
|
| 513 |
+
self_arg = node.args[0]
|
| 514 |
+
self_flattened, _ = tree_flatten(self_arg.meta['fake_result'])
|
| 515 |
+
node_flattened, _ = tree_flatten(node.meta['fake_result'])
|
| 516 |
+
self_has_wrong_metadata = False
|
| 517 |
+
if len(self_flattened) == len(node_flattened):
|
| 518 |
+
for self_meta, node_meta in zip(self_flattened, node_flattened):
|
| 519 |
+
if self_meta.numel() != node_meta.numel():
|
| 520 |
+
self_has_wrong_metadata = True
|
| 521 |
+
if self_meta.dtype != node_meta.dtype:
|
| 522 |
+
self_has_wrong_metadata = True
|
| 523 |
+
# We also cannot re-inplace on tensors that have internal memory overlap.
|
| 524 |
+
# e.g. torch.ones(1).expand(4, 4).add_(1)
|
| 525 |
+
if torch._debug_has_internal_overlap(self_meta) == 1:
|
| 526 |
+
self_has_wrong_metadata = True
|
| 527 |
+
# Here, we (optimistically) assume that a.resize(b) is valid to re-inplace,
|
| 528 |
+
# Since users should never really be calling the functional "torch.ops.aten.resize"
|
| 529 |
+
# op directly in their programs.
|
| 530 |
+
if self_has_wrong_metadata and node.target != torch.ops.aten.resize.default:
|
| 531 |
+
continue
|
| 532 |
+
|
| 533 |
+
# Step 1b: ensure that the op we're trying to re-inplace isn't a program input
|
| 534 |
+
self_arg_name = self_arg.name
|
| 535 |
+
self_arg_storage = StorageWeakRef(self_arg.meta['fake_result']._typed_storage())
|
| 536 |
+
if self_arg_storage in input_storages:
|
| 537 |
+
# TODO: later, add the optimization for handling `copy_()` calls in the graph.
|
| 538 |
+
continue
|
| 539 |
+
if len([x for x in node.args if x is self_arg]) > 1:
|
| 540 |
+
# Step 1c:
|
| 541 |
+
# Calling stuff like aten.mul_(a, a) isn't guaranteed to be sound,
|
| 542 |
+
# so we prevent re-inplacing in this case.
|
| 543 |
+
continue
|
| 544 |
+
|
| 545 |
+
self_arg_storage = StorageWeakRef(self_arg.meta['fake_result']._typed_storage())
|
| 546 |
+
self_aliases = storage_to_nodes[self_arg_storage]
|
| 547 |
+
|
| 548 |
+
# First, we find all later usages of any of the aliases of self_arg.
|
| 549 |
+
later_node_usages = _get_all_later_node_usages(self_aliases, node.meta['node_idx'])
|
| 550 |
+
# Then, we check if any of those later usages are actually view_scatter ops
|
| 551 |
+
# that are safe to fully remove.
|
| 552 |
+
later_view_inverse_node_usages = _get_view_inverse_node_usages(later_node_usages, self_aliases)
|
| 553 |
+
|
| 554 |
+
# Step 2: Check to see if the input to the op is re-used later in the graph.
|
| 555 |
+
# If not (same goes for its aliases), then this op is safe to re-in place.
|
| 556 |
+
# This is a slightly roundabout way to check that there are no later usages of the current self argument.
|
| 557 |
+
# (later_view_inverse_node_usages corresponds to "view_scatter" nodes that we are allowed to delete)
|
| 558 |
+
can_reinplace = len(later_node_usages - later_view_inverse_node_usages) == 0
|
| 559 |
+
if not can_reinplace:
|
| 560 |
+
continue
|
| 561 |
+
|
| 562 |
+
# Step 3a: Special handling for when we see *_scatter operators.
|
| 563 |
+
# When we see an operator like `b = torch.slice_scatter(a, ...)`,
|
| 564 |
+
# instead of trying to "inplace" it into a.slice_scatter_(..._),
|
| 565 |
+
# we would prefer to remove it from the graph entirely,
|
| 566 |
+
# and instead copy_() the slice directly into the larger tensor.
|
| 567 |
+
# See the description of the algorithm for a full example.
|
| 568 |
+
if node.target in _VIEW_INVERSE_MAP and node not in all_later_view_inverse_nodes_to_delete:
|
| 569 |
+
view_op = _VIEW_INVERSE_MAP[node.target]
|
| 570 |
+
# Before:
|
| 571 |
+
# base_updated = torch.ops.aten.slice_scatter.default(base, mutated_slice, args...)
|
| 572 |
+
# After:
|
| 573 |
+
# slice = torch.ops.aten.slice.default(base, args...)
|
| 574 |
+
# slice.copy_(mutated_slice)
|
| 575 |
+
with gm.graph.inserting_before(node):
|
| 576 |
+
mutated_slice_node = node.args[1]
|
| 577 |
+
remaining_slice_args = node.args[2:]
|
| 578 |
+
slice_node = gm.graph.create_node(
|
| 579 |
+
'call_function', view_op, (self_arg,) + tuple(remaining_slice_args), node.kwargs)
|
| 580 |
+
copy_node = gm.graph.create_node(
|
| 581 |
+
'call_function', torch.ops.aten.copy_.default, (slice_node, mutated_slice_node,), {})
|
| 582 |
+
# Add the slice_scatter node to our "nodes to delete" list.
|
| 583 |
+
all_later_view_inverse_nodes_to_delete.add(node)
|
| 584 |
+
|
| 585 |
+
|
| 586 |
+
else:
|
| 587 |
+
# Step 3b: Check to see if this operator has an inplace variant.
|
| 588 |
+
maybe_inplace_op = _maybe_get_inplace_op(node.target)
|
| 589 |
+
if maybe_inplace_op is None:
|
| 590 |
+
continue
|
| 591 |
+
# And if so, replace it with its inplace variant.
|
| 592 |
+
node.target = maybe_inplace_op
|
| 593 |
+
|
| 594 |
+
# At this point, 'storage_to_nodes' will be stale.
|
| 595 |
+
# Now that we're inplacing `b = foo(a)`, we need to effectively
|
| 596 |
+
# union together the dict values for b and a's storage.
|
| 597 |
+
# Hmm... morally I think we also want to keep the `fake_result` metadata
|
| 598 |
+
# up to date here, but I'm not sure how easy it is to do.
|
| 599 |
+
# Maybe it's fine to wait until the end of the pass to update it.
|
| 600 |
+
curr_node_storage = StorageWeakRef(node.meta['fake_result']._typed_storage())
|
| 601 |
+
storage_to_nodes[self_arg_storage].update(storage_to_nodes[curr_node_storage])
|
| 602 |
+
storage_to_nodes[curr_node_storage].update(storage_to_nodes[self_arg_storage])
|
| 603 |
+
|
| 604 |
+
# Need to remember the view_scatter view nodes we found so we can remove them alter.
|
| 605 |
+
all_later_view_inverse_nodes_to_delete.update(later_view_inverse_node_usages)
|
| 606 |
+
|
| 607 |
+
# Step 4:
|
| 608 |
+
# Now that we've replaced b = a.foo() with a.foo_(),
|
| 609 |
+
# We need to replace any later usages of "b" with "a"
|
| 610 |
+
for old in itertools.chain([node], later_view_inverse_node_usages):
|
| 611 |
+
new = old.args[0]
|
| 612 |
+
nodes_to_update = [n for n in old.users if n.meta['node_idx'] > node.meta['node_idx']]
|
| 613 |
+
for node_to_update in nodes_to_update:
|
| 614 |
+
new_args = []
|
| 615 |
+
args = node_to_update.args
|
| 616 |
+
|
| 617 |
+
def replace_arg(a):
|
| 618 |
+
if a == old:
|
| 619 |
+
return new
|
| 620 |
+
return a
|
| 621 |
+
|
| 622 |
+
# First, replace usages of "b" with "a"
|
| 623 |
+
node_to_update.args = tree_map_only(Node, replace_arg, node_to_update.args)
|
| 624 |
+
node_to_update.kwargs = tree_map_only(Node, replace_arg, node_to_update.kwargs)
|
| 625 |
+
|
| 626 |
+
# Second, update our storage_to_nodes data structure.
|
| 627 |
+
old_flattened_res, _ = tree_flatten(old.meta['fake_result'])
|
| 628 |
+
node_flattened_res, _ = tree_flatten(node_to_update.meta['fake_result'])
|
| 629 |
+
|
| 630 |
+
old_res_storage = {
|
| 631 |
+
StorageWeakRef(
|
| 632 |
+
x._typed_storage()
|
| 633 |
+
) for x in old_flattened_res if isinstance(x, FakeTensor)}
|
| 634 |
+
node_res_storage = {
|
| 635 |
+
StorageWeakRef(
|
| 636 |
+
x._typed_storage()
|
| 637 |
+
) for x in node_flattened_res if isinstance(x, FakeTensor)}
|
| 638 |
+
|
| 639 |
+
# This will happen if we're updating a view op, e.g.
|
| 640 |
+
# e.g. replacing
|
| 641 |
+
# x = view(old)
|
| 642 |
+
# x = view(new)
|
| 643 |
+
# When that happens, we need to make sure to keep our
|
| 644 |
+
# storage mapping up to date.
|
| 645 |
+
#
|
| 646 |
+
# We're checking for len(...) == 1 here because all view ops are guaranteed to return either a single tensor,
|
| 647 |
+
# or multiple tensors that all share the same storage.
|
| 648 |
+
# We can't just check equality because we might encounter FX nodes that return zero tensor outputs.
|
| 649 |
+
if len(old_res_storage) == 1 and len(node_res_storage) == 1 and old_res_storage == node_res_storage:
|
| 650 |
+
new_flattened_res, _ = tree_flatten(new.meta['fake_result'])
|
| 651 |
+
new_res_storage = {
|
| 652 |
+
StorageWeakRef(
|
| 653 |
+
x._typed_storage()
|
| 654 |
+
) for x in new_flattened_res if isinstance(x, FakeTensor)}
|
| 655 |
+
assert len(new_res_storage) == 1
|
| 656 |
+
(old_ref,) = old_res_storage
|
| 657 |
+
(new_ref,) = new_res_storage
|
| 658 |
+
(node_ref,) = node_res_storage
|
| 659 |
+
# Technically, "old_ref" and all its aliases will remain
|
| 660 |
+
# in our mapping.
|
| 661 |
+
# That should be fine though, since we deleted "old"
|
| 662 |
+
# from the graph at this point.
|
| 663 |
+
storage_to_nodes[node_ref].update(storage_to_nodes[new_ref])
|
| 664 |
+
storage_to_nodes[new_ref].update(storage_to_nodes[node_ref])
|
| 665 |
+
|
| 666 |
+
# Step 4: delete any _scatter nodes that we de-functionalized
|
| 667 |
+
# Need to take care not to delete any of these nodes until after *all* modifications
|
| 668 |
+
# to the graph are finished.
|
| 669 |
+
for to_delete in all_later_view_inverse_nodes_to_delete:
|
| 670 |
+
gm.graph.erase_node(to_delete)
|
| 671 |
+
|
| 672 |
+
|
| 673 |
+
gm.recompile()
|
| 674 |
+
return gm
|
llava_next/lib/python3.10/site-packages/torch/fx/proxy.py
ADDED
|
@@ -0,0 +1,559 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import dis
|
| 2 |
+
import copy
|
| 3 |
+
import sys
|
| 4 |
+
import torch
|
| 5 |
+
import inspect
|
| 6 |
+
import operator
|
| 7 |
+
import traceback
|
| 8 |
+
import collections
|
| 9 |
+
|
| 10 |
+
from dataclasses import is_dataclass, fields
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
from .graph import magic_methods, reflectable_magic_methods, Graph
|
| 14 |
+
from typing import Tuple, Dict, OrderedDict, Optional, Iterable, Any, Iterator, Callable
|
| 15 |
+
from .node import Target, Node, Argument, base_types, map_aggregate
|
| 16 |
+
from ._compatibility import compatibility
|
| 17 |
+
from .operator_schemas import check_for_mutable_operation
|
| 18 |
+
import torch.fx.traceback as fx_traceback
|
| 19 |
+
|
| 20 |
+
__all__ = ['TracerBase', 'GraphAppendingTracer', 'TraceError',
|
| 21 |
+
'Proxy', 'Attribute', 'ParameterProxy', 'Scope',
|
| 22 |
+
'ScopeContextManager']
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
@compatibility(is_backward_compatible=False)
|
| 26 |
+
class Scope:
|
| 27 |
+
""" Scope object that records the module path and the module type
|
| 28 |
+
of a module. Scope is used to track the information of the module
|
| 29 |
+
that contains a Node in a Graph of GraphModule. For example::
|
| 30 |
+
|
| 31 |
+
class Sub(torch.nn.Module):
|
| 32 |
+
def forward(self, x):
|
| 33 |
+
# This will be a call_method Node in GraphModule,
|
| 34 |
+
# scope for this would be (module_path="sub", module_type=Sub)
|
| 35 |
+
return x.transpose(1, 2)
|
| 36 |
+
|
| 37 |
+
class M(torch.nn.Module):
|
| 38 |
+
def __init__(self):
|
| 39 |
+
self.sub = Sub()
|
| 40 |
+
|
| 41 |
+
def forward(self, x):
|
| 42 |
+
# This will be a call_method Node as well,
|
| 43 |
+
# scope for this would be (module_path="", None)
|
| 44 |
+
x = x.transpose(1, 2)
|
| 45 |
+
x = self.sub(x)
|
| 46 |
+
return x
|
| 47 |
+
|
| 48 |
+
"""
|
| 49 |
+
|
| 50 |
+
def __init__(self, module_path: str, module_type: Any):
|
| 51 |
+
super().__init__()
|
| 52 |
+
self.module_path = module_path
|
| 53 |
+
self.module_type = module_type
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
@compatibility(is_backward_compatible=False)
|
| 57 |
+
class ScopeContextManager:
|
| 58 |
+
""" A context manager to track the Scope of Node during symbolic tracing.
|
| 59 |
+
When entering a forward function of a Module, we'll update the scope information of
|
| 60 |
+
the current module, and when we exit, we'll restore the previous scope information.
|
| 61 |
+
"""
|
| 62 |
+
|
| 63 |
+
def __init__(
|
| 64 |
+
self,
|
| 65 |
+
scope: Scope,
|
| 66 |
+
current_scope: Scope,
|
| 67 |
+
):
|
| 68 |
+
super().__init__()
|
| 69 |
+
# Keep a copy of prev scope to restore on exit
|
| 70 |
+
self._prev_scope = copy.copy(scope)
|
| 71 |
+
# Update scope to current scope
|
| 72 |
+
scope.module_path = current_scope.module_path
|
| 73 |
+
scope.module_type = current_scope.module_type
|
| 74 |
+
# Save a reference so we can restore it
|
| 75 |
+
self._scope = scope
|
| 76 |
+
|
| 77 |
+
def __enter__(self):
|
| 78 |
+
return self._scope
|
| 79 |
+
|
| 80 |
+
def __exit__(self, *args):
|
| 81 |
+
self._scope.module_path = self._prev_scope.module_path
|
| 82 |
+
self._scope.module_type = self._prev_scope.module_type
|
| 83 |
+
return
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
_COPY_META_FIELDS = ["nn_module_stack", "source_fn", "original_aten", "recompute", "from_node"]
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
@compatibility(is_backward_compatible=True)
|
| 90 |
+
class TracerBase:
|
| 91 |
+
graph: Graph
|
| 92 |
+
record_stack_traces : bool = False
|
| 93 |
+
# Feature flag for mutable schema checking
|
| 94 |
+
# Enableby default in 1.12
|
| 95 |
+
check_mutable_operations : bool = False
|
| 96 |
+
# Feature flag for assert tracing
|
| 97 |
+
trace_asserts : bool = False
|
| 98 |
+
# Feature flag for proxying accesses to buffer values
|
| 99 |
+
proxy_buffer_attributes : bool = False
|
| 100 |
+
|
| 101 |
+
# Name of the function to be traced. It will only be used when
|
| 102 |
+
# ``root`` is an instance of ``nn.Module``
|
| 103 |
+
traced_func_name: str = "forward"
|
| 104 |
+
|
| 105 |
+
# Maps the containing module's name to the operator name
|
| 106 |
+
scope : Scope
|
| 107 |
+
|
| 108 |
+
# Records the module call stack
|
| 109 |
+
module_stack: OrderedDict[str, str]
|
| 110 |
+
|
| 111 |
+
# Mapping of node name to module scope
|
| 112 |
+
node_name_to_scope: Dict[str, Tuple[str, type]]
|
| 113 |
+
|
| 114 |
+
@compatibility(is_backward_compatible=True)
|
| 115 |
+
def create_node(self, kind : str, target : Target,
|
| 116 |
+
args : Tuple[Argument, ...], kwargs : Dict[str, Argument], name : Optional[str] = None,
|
| 117 |
+
type_expr : Optional[Any] = None) -> Node:
|
| 118 |
+
"""
|
| 119 |
+
Inserts a graph node given target, args, kwargs, and name.
|
| 120 |
+
|
| 121 |
+
This method can be overridden to do extra checking, validation, or
|
| 122 |
+
modification of values used in node creation. For example, one might
|
| 123 |
+
want to disallow in-place operations from being recorded.
|
| 124 |
+
"""
|
| 125 |
+
if kind == 'call_function' and self.check_mutable_operations:
|
| 126 |
+
check_for_mutable_operation(target, args, kwargs)
|
| 127 |
+
|
| 128 |
+
node = self.graph.create_node(kind, target, args, kwargs, name, type_expr)
|
| 129 |
+
# TODO node_name_to_scope will be depreciated in favor of
|
| 130 |
+
# node.meta['nn_module_stack']
|
| 131 |
+
self.node_name_to_scope[node.name] = (
|
| 132 |
+
self.scope.module_path,
|
| 133 |
+
self.scope.module_type,
|
| 134 |
+
)
|
| 135 |
+
# Optionally set stack trace on the created Node for debugging purposes
|
| 136 |
+
if fx_traceback.has_preserved_node_meta():
|
| 137 |
+
current_meta: Dict[str, Any] = fx_traceback.get_current_meta()
|
| 138 |
+
|
| 139 |
+
stack_trace = current_meta.get("stack_trace")
|
| 140 |
+
if stack_trace:
|
| 141 |
+
node.stack_trace = stack_trace
|
| 142 |
+
# Explicitly set the stack_trace, nn_module_stack and source_fn on the node.meta
|
| 143 |
+
# If other meta fields are needed, they can be added here
|
| 144 |
+
for field in _COPY_META_FIELDS:
|
| 145 |
+
if field in current_meta:
|
| 146 |
+
node.meta[field] = copy.copy(current_meta[field])
|
| 147 |
+
|
| 148 |
+
# Here we decrement to account for the sequence_nr having
|
| 149 |
+
# just been incremented while tracing this lowered aten op.
|
| 150 |
+
new_seq_nr = torch.autograd._get_sequence_nr() - 1
|
| 151 |
+
# The sequence_nr increments every time a new autograd Node
|
| 152 |
+
# is created. During the FWD pass we store the sequence_nr
|
| 153 |
+
# corresponding to the last autograd Node created on this fx
|
| 154 |
+
# node's meta. A single aten op can create multiple autograd
|
| 155 |
+
# nodes as is the case with in-place foreach ops. During the
|
| 156 |
+
# BWD pass we retrieve the sequence_nr stored on the current
|
| 157 |
+
# executing autograd Node. See NOTE [ Sequence Number ].
|
| 158 |
+
if current_meta.get("in_grad_fn", False):
|
| 159 |
+
new_seq_nr = current_meta["grad_fn_seq_nr"]
|
| 160 |
+
node.meta["seq_nr"] = new_seq_nr
|
| 161 |
+
|
| 162 |
+
elif self.module_stack:
|
| 163 |
+
node.meta['nn_module_stack'] = copy.copy(self.module_stack)
|
| 164 |
+
return node
|
| 165 |
+
|
| 166 |
+
@compatibility(is_backward_compatible=True)
|
| 167 |
+
def proxy(self, node: Node) -> 'Proxy':
|
| 168 |
+
return Proxy(node, self)
|
| 169 |
+
|
| 170 |
+
@compatibility(is_backward_compatible=True)
|
| 171 |
+
def create_proxy(self, kind: str, target: Target, args: Tuple[Any, ...], kwargs: Dict[str, Any],
|
| 172 |
+
name: Optional[str] = None, type_expr : Optional[Any] = None,
|
| 173 |
+
proxy_factory_fn: Callable[[Node], 'Proxy'] = None):
|
| 174 |
+
'''
|
| 175 |
+
Create a Node from the given arguments, then return the Node
|
| 176 |
+
wrapped in a Proxy object.
|
| 177 |
+
|
| 178 |
+
If kind = 'placeholder', then we're creating a Node that
|
| 179 |
+
represents the parameter of a function. If we need to encode
|
| 180 |
+
a default parameter, we use the ``args`` tuple. ``args`` is
|
| 181 |
+
otherwise empty for ``placeholder`` Nodes.
|
| 182 |
+
'''
|
| 183 |
+
|
| 184 |
+
args_ = self.create_arg(args)
|
| 185 |
+
kwargs_ = self.create_arg(kwargs)
|
| 186 |
+
assert isinstance(args_, tuple)
|
| 187 |
+
assert isinstance(kwargs_, dict)
|
| 188 |
+
|
| 189 |
+
node = self.create_node(kind, target, args_, kwargs_, name, type_expr)
|
| 190 |
+
|
| 191 |
+
if not proxy_factory_fn:
|
| 192 |
+
proxy = self.proxy(node)
|
| 193 |
+
else:
|
| 194 |
+
proxy = proxy_factory_fn(node)
|
| 195 |
+
|
| 196 |
+
if self.record_stack_traces and not proxy.node.stack_trace:
|
| 197 |
+
user_frame = self._find_user_frame()
|
| 198 |
+
if user_frame:
|
| 199 |
+
summary = traceback.extract_stack(user_frame)
|
| 200 |
+
tb_lines = summary.format()
|
| 201 |
+
# stack_trace would have innermost frame at the bottom
|
| 202 |
+
proxy.node.stack_trace = ''.join(tb_lines)
|
| 203 |
+
|
| 204 |
+
return proxy
|
| 205 |
+
|
| 206 |
+
def _find_user_frame(self):
|
| 207 |
+
"""
|
| 208 |
+
Find the Python stack frame executing the user code during
|
| 209 |
+
symbolic tracing.
|
| 210 |
+
"""
|
| 211 |
+
# We have to do a little dance here. Basically, walk up the callstack and
|
| 212 |
+
# record the first frame not in the pytorch source. This is the frame executing
|
| 213 |
+
# the user code during tracing.
|
| 214 |
+
frame = inspect.currentframe()
|
| 215 |
+
|
| 216 |
+
pt_files = ['torch/fx/proxy.py',
|
| 217 |
+
'torch/fx/_symbolic_trace.py',
|
| 218 |
+
'torch/fx/experimental/proxy_tensor.py',
|
| 219 |
+
'torch/_ops.py',
|
| 220 |
+
'torch/_tensor.py',
|
| 221 |
+
'torch/utils/_python_dispatch.py',
|
| 222 |
+
'torch/_prims_common/wrappers.py',
|
| 223 |
+
'torch/_refs/__init__.py',
|
| 224 |
+
'torch/_refs/nn/functional/__init__.py',
|
| 225 |
+
'torch/utils/_stats.py',
|
| 226 |
+
]
|
| 227 |
+
while frame:
|
| 228 |
+
frame = frame.f_back
|
| 229 |
+
if frame and all(not frame.f_code.co_filename.endswith(file) for file in pt_files):
|
| 230 |
+
break
|
| 231 |
+
|
| 232 |
+
if not frame:
|
| 233 |
+
return None
|
| 234 |
+
|
| 235 |
+
return frame
|
| 236 |
+
|
| 237 |
+
@compatibility(is_backward_compatible=True)
|
| 238 |
+
def create_arg(self, a: Any) -> Argument:
|
| 239 |
+
"""
|
| 240 |
+
A method that lowers the objects seen as arguments during symbolic evaluation
|
| 241 |
+
into Argument types that can be stored in IR.
|
| 242 |
+
|
| 243 |
+
Can be override to support more trace-specific types.
|
| 244 |
+
"""
|
| 245 |
+
if not isinstance(a, Proxy) and hasattr(a, '__fx_create_arg__'):
|
| 246 |
+
return a.__fx_create_arg__(self)
|
| 247 |
+
# aggregates
|
| 248 |
+
elif isinstance(a, tuple) and hasattr(a, '_fields'):
|
| 249 |
+
# NamedTuple constructors don't seem to like getting a generator
|
| 250 |
+
# expression as an argument to their constructor, so build this
|
| 251 |
+
# intermediate tuple and unpack it into the NamedTuple constructor
|
| 252 |
+
args = tuple(self.create_arg(elem) for elem in a)
|
| 253 |
+
return type(a)(*args) # type: ignore[arg-type]
|
| 254 |
+
elif isinstance(a, (tuple, list)):
|
| 255 |
+
return type(a)(self.create_arg(elem) for elem in a)
|
| 256 |
+
elif isinstance(a, dict):
|
| 257 |
+
r = {}
|
| 258 |
+
for k, v in a.items():
|
| 259 |
+
# Check for invalid dict keys. We do not want a Proxy to appear
|
| 260 |
+
# anywhere within the key. Since keys can be collection types,
|
| 261 |
+
# we iterate through the key with map_aggregate
|
| 262 |
+
k = self.create_arg(k)
|
| 263 |
+
|
| 264 |
+
def no_node(arg):
|
| 265 |
+
if isinstance(arg, Node):
|
| 266 |
+
raise RuntimeError("Keys for dictionaries used as an argument cannot contain a "
|
| 267 |
+
f"Node. Got key: {k}")
|
| 268 |
+
map_aggregate(k, no_node)
|
| 269 |
+
|
| 270 |
+
r[k] = self.create_arg(v)
|
| 271 |
+
return r
|
| 272 |
+
elif isinstance(a, slice):
|
| 273 |
+
return slice(self.create_arg(a.start), self.create_arg(a.stop), self.create_arg(a.step))
|
| 274 |
+
|
| 275 |
+
elif isinstance(a, range):
|
| 276 |
+
return range(self.create_arg(a.start), self.create_arg(a.stop), self.create_arg(a.step))
|
| 277 |
+
|
| 278 |
+
elif isinstance(a, torch._ops.OpOverload):
|
| 279 |
+
return a
|
| 280 |
+
|
| 281 |
+
if isinstance(a, Proxy):
|
| 282 |
+
# base case: we unwrap the Proxy object
|
| 283 |
+
return a.node
|
| 284 |
+
|
| 285 |
+
if is_dataclass(a):
|
| 286 |
+
kwargs = {field.name: self.create_arg(getattr(a, field.name)) for field in fields(a)}
|
| 287 |
+
return self.create_node("call_function", a.__class__, (), kwargs)
|
| 288 |
+
|
| 289 |
+
elif isinstance(a, base_types) or a is None or a is ...:
|
| 290 |
+
return a
|
| 291 |
+
raise NotImplementedError(f"argument of type: {type(a)}")
|
| 292 |
+
|
| 293 |
+
@compatibility(is_backward_compatible=True)
|
| 294 |
+
def to_bool(self, obj: 'Proxy') -> bool:
|
| 295 |
+
"""Called when a proxy object is being converted to a boolean, such as
|
| 296 |
+
when used in control flow. Normally we don't know what to do because
|
| 297 |
+
we don't know the value of the proxy, but a custom tracer can attach more
|
| 298 |
+
information to the graph node using create_node and can choose to return a value.
|
| 299 |
+
"""
|
| 300 |
+
raise TraceError('symbolically traced variables cannot be used as inputs to control flow')
|
| 301 |
+
|
| 302 |
+
@compatibility(is_backward_compatible=True)
|
| 303 |
+
def iter(self, obj: 'Proxy') -> Iterator:
|
| 304 |
+
"""Called when a proxy object is being iterated over, such as
|
| 305 |
+
when used in control flow. Normally we don't know what to do because
|
| 306 |
+
we don't know the value of the proxy, but a custom tracer can attach more
|
| 307 |
+
information to the graph node using create_node and can choose to return an iterator.
|
| 308 |
+
"""
|
| 309 |
+
raise TraceError('Proxy object cannot be iterated. This can be '
|
| 310 |
+
'attempted when the Proxy is used in a loop or'
|
| 311 |
+
' as a *args or **kwargs function argument. '
|
| 312 |
+
'See the torch.fx docs on pytorch.org for a '
|
| 313 |
+
'more detailed explanation of what types of '
|
| 314 |
+
'control flow can be traced, and check out the'
|
| 315 |
+
' Proxy docstring for help troubleshooting '
|
| 316 |
+
'Proxy iteration errors')
|
| 317 |
+
|
| 318 |
+
@compatibility(is_backward_compatible=True)
|
| 319 |
+
def keys(self, obj: 'Proxy') -> Any:
|
| 320 |
+
"""Called when a proxy object is has the keys() method called.
|
| 321 |
+
This is what happens when ** is called on a proxy. This should return an
|
| 322 |
+
iterator it ** is suppose to work in your custom tracer.
|
| 323 |
+
"""
|
| 324 |
+
return Attribute(obj, 'keys')()
|
| 325 |
+
|
| 326 |
+
|
| 327 |
+
# used in Proxy object when just appending to the graph while not tracing.
|
| 328 |
+
@compatibility(is_backward_compatible=True)
|
| 329 |
+
class GraphAppendingTracer(TracerBase):
|
| 330 |
+
def __init__(self, graph: Graph):
|
| 331 |
+
super().__init__()
|
| 332 |
+
self.graph = graph
|
| 333 |
+
self.scope = Scope("", None)
|
| 334 |
+
self.module_stack = collections.OrderedDict()
|
| 335 |
+
self.node_name_to_scope = {}
|
| 336 |
+
|
| 337 |
+
@compatibility(is_backward_compatible=False)
|
| 338 |
+
def assert_fn(x):
|
| 339 |
+
assert x
|
| 340 |
+
|
| 341 |
+
@compatibility(is_backward_compatible=True)
|
| 342 |
+
class TraceError(ValueError):
|
| 343 |
+
pass
|
| 344 |
+
|
| 345 |
+
@compatibility(is_backward_compatible=True)
|
| 346 |
+
class Proxy:
|
| 347 |
+
"""
|
| 348 |
+
``Proxy`` objects are ``Node`` wrappers that flow through the
|
| 349 |
+
program during symbolic tracing and record all the operations
|
| 350 |
+
(``torch`` function calls, method calls, operators) that they touch
|
| 351 |
+
into the growing FX Graph.
|
| 352 |
+
|
| 353 |
+
If you're doing graph transforms, you can wrap your own ``Proxy``
|
| 354 |
+
method around a raw ``Node`` so that you can use the overloaded
|
| 355 |
+
operators to add additional things to a ``Graph``.
|
| 356 |
+
|
| 357 |
+
``Proxy`` objects cannot be iterated. In other words, the symbolic
|
| 358 |
+
tracer will throw an error if a ``Proxy`` is used in a loop or as
|
| 359 |
+
an ``*args``/``**kwargs`` function argument.
|
| 360 |
+
|
| 361 |
+
There are two main ways around this:
|
| 362 |
+
1. Factor out the untraceable logic into a top-level function and
|
| 363 |
+
use ``fx.wrap`` on it.
|
| 364 |
+
2. If the control flow is static (i.e. the loop trip count is
|
| 365 |
+
based on some hyperparameter), the code can be kept in its original
|
| 366 |
+
position and refactored into something like::
|
| 367 |
+
|
| 368 |
+
for i in range(self.some_hyperparameter):
|
| 369 |
+
indexed_item = proxied_value[i]
|
| 370 |
+
|
| 371 |
+
For a more detailed description into the Proxy internals, check out
|
| 372 |
+
the "Proxy" section in `torch/fx/OVERVIEW.md`
|
| 373 |
+
"""
|
| 374 |
+
|
| 375 |
+
@compatibility(is_backward_compatible=True)
|
| 376 |
+
def __init__(self, node: Node, tracer: 'Optional[TracerBase]' = None):
|
| 377 |
+
if tracer is None:
|
| 378 |
+
# This allows you to create a Proxy object around a raw Node
|
| 379 |
+
tracer = GraphAppendingTracer(node.graph)
|
| 380 |
+
self.tracer = tracer
|
| 381 |
+
self.node = node
|
| 382 |
+
|
| 383 |
+
def __repr__(self) -> str:
|
| 384 |
+
return f'Proxy({self.node.name})'
|
| 385 |
+
|
| 386 |
+
def __getattr__(self, k) -> 'Attribute':
|
| 387 |
+
# note: not added to the graph yet, if this is a method call
|
| 388 |
+
# we peephole optimize to the method invocation
|
| 389 |
+
return Attribute(self, k)
|
| 390 |
+
|
| 391 |
+
def __call__(self, *args, **kwargs) -> 'Proxy':
|
| 392 |
+
return self.tracer.create_proxy('call_method', '__call__', (self,) + args, kwargs)
|
| 393 |
+
|
| 394 |
+
def __iter__(self) -> Iterable['Proxy']:
|
| 395 |
+
frame = inspect.currentframe()
|
| 396 |
+
assert frame is not None
|
| 397 |
+
calling_frame = frame.f_back
|
| 398 |
+
assert calling_frame is not None
|
| 399 |
+
inst_list = list(dis.get_instructions(calling_frame.f_code))
|
| 400 |
+
if sys.version_info >= (3, 11):
|
| 401 |
+
from bisect import bisect_left
|
| 402 |
+
inst_idx = bisect_left(inst_list, calling_frame.f_lasti, key=lambda x: x.offset)
|
| 403 |
+
else:
|
| 404 |
+
inst_idx = calling_frame.f_lasti // 2
|
| 405 |
+
inst = inst_list[inst_idx]
|
| 406 |
+
if inst.opname == 'UNPACK_SEQUENCE':
|
| 407 |
+
return (self[i] for i in range(inst.argval)) # type: ignore[index]
|
| 408 |
+
|
| 409 |
+
return self.tracer.iter(self)
|
| 410 |
+
|
| 411 |
+
def __bool__(self) -> bool:
|
| 412 |
+
if self.tracer.trace_asserts:
|
| 413 |
+
# check if this boolean is used in an assertion, bytecode pattern for assertions
|
| 414 |
+
# is pretty stable for Python 3.7--3.9
|
| 415 |
+
frame = inspect.currentframe()
|
| 416 |
+
assert frame is not None
|
| 417 |
+
calling_frame = frame.f_back
|
| 418 |
+
assert calling_frame is not None
|
| 419 |
+
insts = list(dis.get_instructions(calling_frame.f_code))
|
| 420 |
+
if sys.version_info >= (3, 11):
|
| 421 |
+
from bisect import bisect_left
|
| 422 |
+
cur = bisect_left(insts, calling_frame.f_lasti, key=lambda x: x.offset)
|
| 423 |
+
else:
|
| 424 |
+
cur = calling_frame.f_lasti // 2
|
| 425 |
+
inst = insts[cur]
|
| 426 |
+
|
| 427 |
+
if inst.opname == 'POP_JUMP_IF_TRUE':
|
| 428 |
+
first = insts[cur + 1]
|
| 429 |
+
assert inst.arg is not None
|
| 430 |
+
last = insts[inst.arg // 2 - 1]
|
| 431 |
+
starts_with_assert = (first.opname == 'LOAD_GLOBAL' and first.argval == 'AssertionError'
|
| 432 |
+
or first.opname == 'LOAD_ASSERTION_ERROR')
|
| 433 |
+
if starts_with_assert and last.opname == 'RAISE_VARARGS':
|
| 434 |
+
self.tracer.create_proxy('call_function', assert_fn, (self,), {})
|
| 435 |
+
return True
|
| 436 |
+
|
| 437 |
+
return self.tracer.to_bool(self)
|
| 438 |
+
|
| 439 |
+
@compatibility(is_backward_compatible=True)
|
| 440 |
+
def keys(self):
|
| 441 |
+
return self.tracer.keys(self)
|
| 442 |
+
|
| 443 |
+
def __len__(self):
|
| 444 |
+
raise RuntimeError("'len' is not supported in symbolic tracing by default. If you want "
|
| 445 |
+
"this call to be recorded, please call torch.fx.wrap('len') at "
|
| 446 |
+
"module scope")
|
| 447 |
+
|
| 448 |
+
@classmethod
|
| 449 |
+
def __torch_function__(cls, orig_method, types, args=None, kwargs=None):
|
| 450 |
+
args = args if args else ()
|
| 451 |
+
kwargs = kwargs if kwargs else {}
|
| 452 |
+
|
| 453 |
+
tracers : Dict[Any, None] = {}
|
| 454 |
+
|
| 455 |
+
def find_tracer(a):
|
| 456 |
+
if isinstance(a, cls):
|
| 457 |
+
tracers[a.tracer] = None
|
| 458 |
+
torch.fx.node.map_aggregate(args, find_tracer)
|
| 459 |
+
torch.fx.node.map_aggregate(kwargs, find_tracer)
|
| 460 |
+
|
| 461 |
+
if len(tracers) > 1:
|
| 462 |
+
raise RuntimeError(f'Found multiple different tracers {list(tracers.keys())} while '
|
| 463 |
+
f'trying to trace operations {orig_method}')
|
| 464 |
+
tracer = next(iter(tracers.keys()))
|
| 465 |
+
|
| 466 |
+
if isinstance(orig_method, torch._C.ScriptMethod):
|
| 467 |
+
args = (orig_method.owner,) + args
|
| 468 |
+
return tracer.create_proxy('call_method', orig_method.name, args, kwargs)
|
| 469 |
+
if torch.overrides.is_tensor_method_or_property(orig_method):
|
| 470 |
+
return tracer.create_proxy('call_method', orig_method.__name__, args, kwargs)
|
| 471 |
+
else:
|
| 472 |
+
if isinstance(orig_method, torch._ops.HigherOrderOperator):
|
| 473 |
+
# TODO: Define how to symbolically trace HigherOrderOperators
|
| 474 |
+
raise RuntimeError("Unable to symbolically trace HigherOrderOperators")
|
| 475 |
+
return tracer.create_proxy('call_function', orig_method, args, kwargs,
|
| 476 |
+
name=tracer.graph._target_to_str(orig_method.__name__))
|
| 477 |
+
|
| 478 |
+
|
| 479 |
+
@compatibility(is_backward_compatible=True)
|
| 480 |
+
class Attribute(Proxy):
|
| 481 |
+
@compatibility(is_backward_compatible=True)
|
| 482 |
+
def __init__(self, root: Proxy, attr: str):
|
| 483 |
+
self.root = root
|
| 484 |
+
self.attr = attr
|
| 485 |
+
self.tracer = root.tracer
|
| 486 |
+
self._node: Optional[Node] = None
|
| 487 |
+
|
| 488 |
+
@property
|
| 489 |
+
def node(self):
|
| 490 |
+
# the node for attributes is added lazily, since most will just be method calls
|
| 491 |
+
# which do not rely on the getitem call
|
| 492 |
+
if self._node is None:
|
| 493 |
+
self._node = self.tracer.create_proxy('call_function', getattr, (self.root, self.attr), {}).node
|
| 494 |
+
return self._node
|
| 495 |
+
|
| 496 |
+
def __call__(self, *args, **kwargs):
|
| 497 |
+
return self.tracer.create_proxy('call_method', self.attr, (self.root,) + args, kwargs)
|
| 498 |
+
|
| 499 |
+
|
| 500 |
+
@compatibility(is_backward_compatible=False)
|
| 501 |
+
class ParameterProxy(Proxy):
|
| 502 |
+
"""
|
| 503 |
+
A special proxy which lets "shape", "size", "dim", and a few other
|
| 504 |
+
attribute accesses pass through to the underlying module parameter object,
|
| 505 |
+
so that conditional tests on these attributes will not throw exception during tracing
|
| 506 |
+
"""
|
| 507 |
+
def __init__(self, tracer: TracerBase, node: Node, name, param):
|
| 508 |
+
super().__init__(node, tracer)
|
| 509 |
+
assert(isinstance(param, torch.nn.Parameter))
|
| 510 |
+
self.param = param
|
| 511 |
+
self.name = name
|
| 512 |
+
|
| 513 |
+
def __repr__(self) -> str:
|
| 514 |
+
return f'ParameterProxy({self.name})'
|
| 515 |
+
|
| 516 |
+
@property
|
| 517 |
+
def shape(self):
|
| 518 |
+
return self.param.shape
|
| 519 |
+
|
| 520 |
+
def size(self):
|
| 521 |
+
return self.param.size()
|
| 522 |
+
|
| 523 |
+
def dim(self):
|
| 524 |
+
return self.param.dim()
|
| 525 |
+
|
| 526 |
+
@property
|
| 527 |
+
def ndim(self):
|
| 528 |
+
return self.param.ndim
|
| 529 |
+
|
| 530 |
+
def numel(self):
|
| 531 |
+
return self.param.numel()
|
| 532 |
+
|
| 533 |
+
def nelement(self):
|
| 534 |
+
return self.param.nelement()
|
| 535 |
+
|
| 536 |
+
|
| 537 |
+
for method in magic_methods:
|
| 538 |
+
def _scope(method):
|
| 539 |
+
def impl(*args, **kwargs):
|
| 540 |
+
tracer = args[0].tracer
|
| 541 |
+
target = getattr(operator, method)
|
| 542 |
+
return tracer.create_proxy('call_function', target, args, kwargs)
|
| 543 |
+
impl.__name__ = method
|
| 544 |
+
as_magic = f'__{method.strip("_")}__'
|
| 545 |
+
setattr(Proxy, as_magic, impl)
|
| 546 |
+
_scope(method)
|
| 547 |
+
|
| 548 |
+
def _define_reflectable(orig_method_name):
|
| 549 |
+
method_name = f'__r{orig_method_name.strip("_")}__'
|
| 550 |
+
|
| 551 |
+
def impl(self, rhs):
|
| 552 |
+
target = getattr(operator, orig_method_name)
|
| 553 |
+
return self.tracer.create_proxy('call_function', target, (rhs, self), {})
|
| 554 |
+
impl.__name__ = method_name
|
| 555 |
+
impl.__qualname__ = method_name
|
| 556 |
+
setattr(Proxy, method_name, impl)
|
| 557 |
+
|
| 558 |
+
for orig_method_name in reflectable_magic_methods:
|
| 559 |
+
_define_reflectable(orig_method_name)
|
llava_next/lib/python3.10/site-packages/torch/fx/subgraph_rewriter.py
ADDED
|
@@ -0,0 +1,339 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .graph_module import GraphModule
|
| 2 |
+
from .graph import Graph
|
| 3 |
+
from .node import Node
|
| 4 |
+
from ._symbolic_trace import symbolic_trace
|
| 5 |
+
from ._compatibility import compatibility
|
| 6 |
+
|
| 7 |
+
import copy
|
| 8 |
+
from dataclasses import dataclass
|
| 9 |
+
from typing import Any, Callable, Dict, List, NamedTuple, Optional, Set, Union
|
| 10 |
+
import torch
|
| 11 |
+
|
| 12 |
+
__all__ = ['Match', 'replace_pattern', 'replace_pattern_with_filters', "ReplacedPatterns"]
|
| 13 |
+
|
| 14 |
+
@compatibility(is_backward_compatible=True)
|
| 15 |
+
class Match(NamedTuple):
|
| 16 |
+
# Node from which the match was found
|
| 17 |
+
anchor: Node
|
| 18 |
+
# Maps nodes in the pattern subgraph to nodes in the larger graph
|
| 19 |
+
nodes_map: Dict[Node, Node]
|
| 20 |
+
|
| 21 |
+
@compatibility(is_backward_compatible=False)
|
| 22 |
+
@dataclass
|
| 23 |
+
class ReplacedPatterns:
|
| 24 |
+
# Node from which the match was found
|
| 25 |
+
anchor: Node
|
| 26 |
+
# Maps nodes in the pattern subgraph to nodes in the larger graph
|
| 27 |
+
nodes_map: Dict[Node, Node]
|
| 28 |
+
# List of nodes that were added into the graph
|
| 29 |
+
replacements: List[Node]
|
| 30 |
+
|
| 31 |
+
def _replace_attributes(gm: GraphModule, replacement: torch.nn.Module) -> None:
|
| 32 |
+
gm.delete_all_unused_submodules()
|
| 33 |
+
|
| 34 |
+
if isinstance(replacement, GraphModule):
|
| 35 |
+
replacement.graph.lint()
|
| 36 |
+
|
| 37 |
+
def try_get_attr(gm: torch.nn.Module, target: str) -> Optional[Any]:
|
| 38 |
+
module_path, _, attr_name = target.rpartition(".")
|
| 39 |
+
mod: torch.nn.Module = gm.get_submodule(module_path)
|
| 40 |
+
attr = getattr(mod, attr_name, None)
|
| 41 |
+
return attr
|
| 42 |
+
|
| 43 |
+
for node in gm.graph.nodes:
|
| 44 |
+
if node.op == "call_module" or node.op == "get_attr":
|
| 45 |
+
|
| 46 |
+
gm_attr = try_get_attr(gm, node.target)
|
| 47 |
+
replacement_attr = try_get_attr(replacement, node.target)
|
| 48 |
+
|
| 49 |
+
# CASE 1: This target already exists as an attribute in our
|
| 50 |
+
# result GraphModule. Whether or not it exists in
|
| 51 |
+
# `replacement`, the existing submodule takes precedence.
|
| 52 |
+
if gm_attr is not None:
|
| 53 |
+
continue
|
| 54 |
+
|
| 55 |
+
# CASE 2: The target exists as an attribute in `replacement`
|
| 56 |
+
# only, so we need to copy it over.
|
| 57 |
+
elif replacement_attr is not None:
|
| 58 |
+
new_attr = copy.deepcopy(replacement_attr)
|
| 59 |
+
if isinstance(replacement_attr, torch.nn.Module):
|
| 60 |
+
gm.add_submodule(node.target, new_attr)
|
| 61 |
+
else:
|
| 62 |
+
setattr(gm, node.target, new_attr)
|
| 63 |
+
|
| 64 |
+
# CASE 3: The target doesn't exist as an attribute in `gm`
|
| 65 |
+
# or `replacement`
|
| 66 |
+
else:
|
| 67 |
+
raise RuntimeError("Attempted to create a \"", node.op,
|
| 68 |
+
"\" node during subgraph rewriting "
|
| 69 |
+
f"with target {node.target}, but "
|
| 70 |
+
"the referenced attribute does not "
|
| 71 |
+
"exist in the replacement GraphModule")
|
| 72 |
+
|
| 73 |
+
gm.graph.lint()
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
@compatibility(is_backward_compatible=True)
|
| 77 |
+
def replace_pattern(
|
| 78 |
+
gm: GraphModule,
|
| 79 |
+
pattern: Union[Callable, GraphModule],
|
| 80 |
+
replacement: Union[Callable, GraphModule]
|
| 81 |
+
) -> List[Match]:
|
| 82 |
+
"""
|
| 83 |
+
Matches all possible non-overlapping sets of operators and their
|
| 84 |
+
data dependencies (``pattern``) in the Graph of a GraphModule
|
| 85 |
+
(``gm``), then replaces each of these matched subgraphs with another
|
| 86 |
+
subgraph (``replacement``).
|
| 87 |
+
|
| 88 |
+
Args:
|
| 89 |
+
``gm``: The GraphModule that wraps the Graph to operate on
|
| 90 |
+
``pattern``: The subgraph to match in ``gm`` for replacement
|
| 91 |
+
``replacement``: The subgraph to replace ``pattern`` with
|
| 92 |
+
|
| 93 |
+
Returns:
|
| 94 |
+
List[Match]: A list of ``Match`` objects representing the places
|
| 95 |
+
in the original graph that ``pattern`` was matched to. The list
|
| 96 |
+
is empty if there are no matches. ``Match`` is defined as:
|
| 97 |
+
|
| 98 |
+
.. code-block:: python
|
| 99 |
+
|
| 100 |
+
class Match(NamedTuple):
|
| 101 |
+
# Node from which the match was found
|
| 102 |
+
anchor: Node
|
| 103 |
+
# Maps nodes in the pattern subgraph to nodes in the larger graph
|
| 104 |
+
nodes_map: Dict[Node, Node]
|
| 105 |
+
|
| 106 |
+
Examples:
|
| 107 |
+
|
| 108 |
+
.. code-block:: python
|
| 109 |
+
|
| 110 |
+
import torch
|
| 111 |
+
from torch.fx import symbolic_trace, subgraph_rewriter
|
| 112 |
+
|
| 113 |
+
class M(torch.nn.Module):
|
| 114 |
+
def __init__(self):
|
| 115 |
+
super().__init__()
|
| 116 |
+
|
| 117 |
+
def forward(self, x, w1, w2):
|
| 118 |
+
m1 = torch.cat([w1, w2]).sum()
|
| 119 |
+
m2 = torch.cat([w1, w2]).sum()
|
| 120 |
+
return x + torch.max(m1) + torch.max(m2)
|
| 121 |
+
|
| 122 |
+
def pattern(w1, w2):
|
| 123 |
+
return torch.cat([w1, w2]).sum()
|
| 124 |
+
|
| 125 |
+
def replacement(w1, w2):
|
| 126 |
+
return torch.stack([w1, w2])
|
| 127 |
+
|
| 128 |
+
traced_module = symbolic_trace(M())
|
| 129 |
+
|
| 130 |
+
subgraph_rewriter.replace_pattern(traced_module, pattern, replacement)
|
| 131 |
+
|
| 132 |
+
The above code will first match ``pattern`` in the ``forward``
|
| 133 |
+
method of ``traced_module``. Pattern-matching is done based on
|
| 134 |
+
use-def relationships, not node names. For example, if you had
|
| 135 |
+
``p = torch.cat([a, b])`` in ``pattern``, you could match
|
| 136 |
+
``m = torch.cat([a, b])`` in the original ``forward`` function,
|
| 137 |
+
despite the variable names being different (``p`` vs ``m``).
|
| 138 |
+
|
| 139 |
+
The ``return`` statement in ``pattern`` is matched based on its
|
| 140 |
+
value only; it may or may not match to the ``return`` statement in
|
| 141 |
+
the larger graph. In other words, the pattern doesn't have to extend
|
| 142 |
+
to the end of the larger graph.
|
| 143 |
+
|
| 144 |
+
When the pattern is matched, it will be removed from the larger
|
| 145 |
+
function and replaced by ``replacement``. If there are multiple
|
| 146 |
+
matches for ``pattern`` in the larger function, each non-overlapping
|
| 147 |
+
match will be replaced. In the case of a match overlap, the first
|
| 148 |
+
found match in the set of overlapping matches will be replaced.
|
| 149 |
+
("First" here being defined as the first in a topological ordering
|
| 150 |
+
of the Nodes' use-def relationships. In most cases, the first Node
|
| 151 |
+
is the parameter that appears directly after ``self``, while the
|
| 152 |
+
last Node is whatever the function returns.)
|
| 153 |
+
|
| 154 |
+
One important thing to note is that the parameters of the
|
| 155 |
+
``pattern`` Callable must be used in the Callable itself,
|
| 156 |
+
and the parameters of the ``replacement`` Callable must match
|
| 157 |
+
the pattern. The first rule is why, in the above code block, the
|
| 158 |
+
``forward`` function has parameters ``x, w1, w2``, but the
|
| 159 |
+
``pattern`` function only has parameters ``w1, w2``. ``pattern``
|
| 160 |
+
doesn't use ``x``, so it shouldn't specify ``x`` as a parameter.
|
| 161 |
+
As an example of the second rule, consider replacing
|
| 162 |
+
|
| 163 |
+
.. code-block:: python
|
| 164 |
+
|
| 165 |
+
def pattern(x, y):
|
| 166 |
+
return torch.neg(x) + torch.relu(y)
|
| 167 |
+
|
| 168 |
+
with
|
| 169 |
+
|
| 170 |
+
.. code-block:: python
|
| 171 |
+
|
| 172 |
+
def replacement(x, y):
|
| 173 |
+
return torch.relu(x)
|
| 174 |
+
|
| 175 |
+
In this case, ``replacement`` needs the same number of parameters
|
| 176 |
+
as ``pattern`` (both ``x`` and ``y``), even though the parameter
|
| 177 |
+
``y`` isn't used in ``replacement``.
|
| 178 |
+
|
| 179 |
+
After calling ``subgraph_rewriter.replace_pattern``, the generated
|
| 180 |
+
Python code looks like this:
|
| 181 |
+
|
| 182 |
+
.. code-block:: python
|
| 183 |
+
|
| 184 |
+
def forward(self, x, w1, w2):
|
| 185 |
+
stack_1 = torch.stack([w1, w2])
|
| 186 |
+
sum_1 = stack_1.sum()
|
| 187 |
+
stack_2 = torch.stack([w1, w2])
|
| 188 |
+
sum_2 = stack_2.sum()
|
| 189 |
+
max_1 = torch.max(sum_1)
|
| 190 |
+
add_1 = x + max_1
|
| 191 |
+
max_2 = torch.max(sum_2)
|
| 192 |
+
add_2 = add_1 + max_2
|
| 193 |
+
return add_2
|
| 194 |
+
"""
|
| 195 |
+
match_and_replacements = _replace_pattern(gm, pattern, replacement)
|
| 196 |
+
return [Match(anchor=m.anchor, nodes_map=m.nodes_map) for m in match_and_replacements]
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
# Experimental API, not backward compatible
|
| 200 |
+
@compatibility(is_backward_compatible=False)
|
| 201 |
+
def replace_pattern_with_filters(
|
| 202 |
+
gm: GraphModule,
|
| 203 |
+
pattern: Union[Callable, GraphModule],
|
| 204 |
+
replacement: Union[Callable, GraphModule],
|
| 205 |
+
match_filters: Optional[List[Callable[["InternalMatch", Graph, Graph], bool]]] = None, # type: ignore[name-defined]
|
| 206 |
+
ignore_literals: bool = False,
|
| 207 |
+
) -> List[ReplacedPatterns]:
|
| 208 |
+
"""
|
| 209 |
+
See replace_pattern for documentation. This function is an overload with an additional match_filter argument.
|
| 210 |
+
|
| 211 |
+
Args:
|
| 212 |
+
``match_filters``: A list of functions that take in
|
| 213 |
+
(match: InternalMatch, original_graph: Graph, pattern_graph: Graph) and return a boolean indicating
|
| 214 |
+
whether the match satisfies the condition.
|
| 215 |
+
See matcher_utils.py for definition of InternalMatch.
|
| 216 |
+
"""
|
| 217 |
+
|
| 218 |
+
return _replace_pattern(gm, pattern, replacement, match_filters, ignore_literals)
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
def _replace_pattern(
|
| 222 |
+
gm: GraphModule,
|
| 223 |
+
pattern: Union[Callable, GraphModule],
|
| 224 |
+
replacement: Union[Callable, GraphModule],
|
| 225 |
+
match_filters: Optional[List[Callable[["InternalMatch", Graph, Graph], bool]]] = None, # type: ignore[name-defined]
|
| 226 |
+
ignore_literals: bool = False,
|
| 227 |
+
) -> List[ReplacedPatterns]:
|
| 228 |
+
|
| 229 |
+
from torch.fx.passes.utils.matcher_utils import SubgraphMatcher, InternalMatch
|
| 230 |
+
|
| 231 |
+
if match_filters is None:
|
| 232 |
+
match_filters = []
|
| 233 |
+
|
| 234 |
+
# Get the graphs for `gm`, `pattern`, `replacement`
|
| 235 |
+
original_graph: Graph = gm.graph
|
| 236 |
+
|
| 237 |
+
if isinstance(pattern, GraphModule):
|
| 238 |
+
pattern_graph = pattern.graph
|
| 239 |
+
else:
|
| 240 |
+
pattern_graph = symbolic_trace(pattern).graph
|
| 241 |
+
|
| 242 |
+
if isinstance(replacement, GraphModule):
|
| 243 |
+
replacement_graph = replacement.graph
|
| 244 |
+
else:
|
| 245 |
+
replacement_graph = symbolic_trace(replacement).graph
|
| 246 |
+
|
| 247 |
+
matcher = SubgraphMatcher(pattern_graph, match_output=False, match_placeholder=False,
|
| 248 |
+
remove_overlapping_matches=True, ignore_literals=ignore_literals)
|
| 249 |
+
_matches: List[InternalMatch] = matcher.match(original_graph)
|
| 250 |
+
|
| 251 |
+
# Filter out matches that don't match the filter
|
| 252 |
+
_matches = [
|
| 253 |
+
m for m in _matches
|
| 254 |
+
if all(match_filter(m, original_graph, pattern_graph)
|
| 255 |
+
for match_filter in match_filters)
|
| 256 |
+
]
|
| 257 |
+
|
| 258 |
+
replacement_placeholders = [n for n in replacement_graph.nodes if n.op == "placeholder"]
|
| 259 |
+
|
| 260 |
+
# As we progressively replace nodes, we'll need to keep track of how the match results should change
|
| 261 |
+
match_changed_node: Dict[Node, Node] = {}
|
| 262 |
+
|
| 263 |
+
match_and_replacements = []
|
| 264 |
+
for match in _matches:
|
| 265 |
+
|
| 266 |
+
# Build connecting between replacement graph's input and original graph input producer node
|
| 267 |
+
|
| 268 |
+
# Initialize `val_map` with mappings from placeholder nodes in
|
| 269 |
+
# `replacement` to their corresponding node in `original_graph`
|
| 270 |
+
assert len(match.placeholder_nodes) == len(replacement_placeholders)
|
| 271 |
+
val_map: Dict[Node, Node] = {}
|
| 272 |
+
for rn, gn in zip(replacement_placeholders, match.placeholder_nodes):
|
| 273 |
+
if isinstance(gn, Node):
|
| 274 |
+
val_map[rn] = match_changed_node.get(gn, gn)
|
| 275 |
+
if gn != val_map[rn]:
|
| 276 |
+
# Update match.placeholder_nodes and match.nodes_map with the node that replaced gn
|
| 277 |
+
gn_ind = match.placeholder_nodes.index(gn)
|
| 278 |
+
match.placeholder_nodes[gn_ind] = match_changed_node[gn]
|
| 279 |
+
map_key = list(match.nodes_map.keys())[list(match.nodes_map.values()).index(gn)]
|
| 280 |
+
match.nodes_map[map_key] = match_changed_node[gn]
|
| 281 |
+
else:
|
| 282 |
+
val_map[rn] = gn
|
| 283 |
+
|
| 284 |
+
# Copy the replacement graph over
|
| 285 |
+
user_nodes: Set[Node] = set()
|
| 286 |
+
for n in match.returning_nodes:
|
| 287 |
+
for user in n.users:
|
| 288 |
+
user_nodes.add(user)
|
| 289 |
+
assert user_nodes, "The returning_nodes should have at least one user node"
|
| 290 |
+
|
| 291 |
+
if len(user_nodes) == 1:
|
| 292 |
+
first_user_node = list(user_nodes)[0]
|
| 293 |
+
else:
|
| 294 |
+
# If there are multiple user nodes, we need to find the first user node
|
| 295 |
+
# in the current execution order of the `original_graph`
|
| 296 |
+
for n in original_graph.nodes:
|
| 297 |
+
if n in user_nodes:
|
| 298 |
+
first_user_node = n
|
| 299 |
+
break
|
| 300 |
+
|
| 301 |
+
with original_graph.inserting_before(first_user_node):
|
| 302 |
+
copied_returning_nodes = original_graph.graph_copy(replacement_graph, val_map)
|
| 303 |
+
|
| 304 |
+
if isinstance(copied_returning_nodes, Node):
|
| 305 |
+
copied_returning_nodes = (copied_returning_nodes, )
|
| 306 |
+
|
| 307 |
+
# Get a list of nodes that have been replaced into the graph
|
| 308 |
+
replacement_nodes: List[Node] = [v for v in val_map.values() if v not in match.placeholder_nodes]
|
| 309 |
+
|
| 310 |
+
# Hook the output Node of the replacement subgraph in to the
|
| 311 |
+
# original Graph at the correct location
|
| 312 |
+
assert len(match.returning_nodes) == len(copied_returning_nodes)
|
| 313 |
+
for gn, copied_node in zip(match.returning_nodes, copied_returning_nodes):
|
| 314 |
+
gn.replace_all_uses_with(copied_node)
|
| 315 |
+
match_changed_node[gn] = copied_node
|
| 316 |
+
# Remove the original nodes
|
| 317 |
+
for node in reversed(pattern_graph.nodes):
|
| 318 |
+
if node.op != "placeholder" and node.op != "output":
|
| 319 |
+
gn = match.nodes_map[node]
|
| 320 |
+
gm.graph.erase_node(gn)
|
| 321 |
+
|
| 322 |
+
match_and_replacements.append(
|
| 323 |
+
ReplacedPatterns(
|
| 324 |
+
anchor=match.anchors[0],
|
| 325 |
+
nodes_map=match.nodes_map,
|
| 326 |
+
replacements=replacement_nodes
|
| 327 |
+
)
|
| 328 |
+
)
|
| 329 |
+
|
| 330 |
+
# Update the passed-in GraphModule to reflect the new state of
|
| 331 |
+
# `original_graph`
|
| 332 |
+
gm.recompile()
|
| 333 |
+
|
| 334 |
+
# If `replacement` was an nn.Module, we'll need to make sure that
|
| 335 |
+
# all the submodules have been copied over correctly
|
| 336 |
+
if isinstance(replacement, torch.nn.Module):
|
| 337 |
+
_replace_attributes(gm, replacement)
|
| 338 |
+
|
| 339 |
+
return match_and_replacements
|
llava_next/lib/python3.10/site-packages/torch/fx/tensor_type.py
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from torch.fx.experimental.unification import Var # type: ignore[attr-defined]
|
| 2 |
+
|
| 3 |
+
from ._compatibility import compatibility
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
@compatibility(is_backward_compatible=False)
|
| 7 |
+
class TensorType:
|
| 8 |
+
"""
|
| 9 |
+
TensorType defines a type for tensors, which consists of a list of dimensions.
|
| 10 |
+
Example:
|
| 11 |
+
class M(torch.nn.Module):
|
| 12 |
+
def forward(self, x:TensorType((1,2,3, Dyn)), y:TensorType((1,2,3, Dyn))):
|
| 13 |
+
return torch.add(x, y)
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
def __init__(self, dim):
|
| 17 |
+
self.__origin__ = TensorType
|
| 18 |
+
self.__args__ = dim
|
| 19 |
+
|
| 20 |
+
def __repr__(self):
|
| 21 |
+
return f'TensorType[{self.__args__}]'
|
| 22 |
+
|
| 23 |
+
def __eq__(self, other):
|
| 24 |
+
if isinstance(other, self.__class__):
|
| 25 |
+
return list(self.__args__) == list(other.__args__)
|
| 26 |
+
else:
|
| 27 |
+
return False
|
| 28 |
+
|
| 29 |
+
@staticmethod
|
| 30 |
+
def __class_getitem__(*args):
|
| 31 |
+
if len(args) == 1 and isinstance(args[0], tuple):
|
| 32 |
+
args = args[0]
|
| 33 |
+
return TensorType(tuple(args))
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class _DynType:
|
| 37 |
+
"""
|
| 38 |
+
_DynType defines a type which stands for the absence of type information.
|
| 39 |
+
"""
|
| 40 |
+
def __init__(self):
|
| 41 |
+
self.__name__ = '_DynType'
|
| 42 |
+
|
| 43 |
+
def __eq__(self, other):
|
| 44 |
+
return isinstance(other, self.__class__)
|
| 45 |
+
|
| 46 |
+
def __str__(self):
|
| 47 |
+
return "Dyn"
|
| 48 |
+
|
| 49 |
+
def __repr__(self):
|
| 50 |
+
return "Dyn"
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
Dyn = _DynType()
|
| 54 |
+
|
| 55 |
+
@compatibility(is_backward_compatible=False)
|
| 56 |
+
def is_consistent(t1, t2):
|
| 57 |
+
"""
|
| 58 |
+
A binary relation denoted by ~ that determines if t1 is consistent with t2.
|
| 59 |
+
The relation is reflexive, symmetric but not transitive.
|
| 60 |
+
returns True if t1 and t2 are consistent and False otherwise.
|
| 61 |
+
Example:
|
| 62 |
+
Dyn ~ TensorType((1,2,3))
|
| 63 |
+
int ~ Dyn
|
| 64 |
+
int ~ int
|
| 65 |
+
TensorType((1,Dyn,3)) ~ TensorType((1,2,3))
|
| 66 |
+
"""
|
| 67 |
+
|
| 68 |
+
if t1 == t2:
|
| 69 |
+
return True
|
| 70 |
+
|
| 71 |
+
if t1 == Dyn or t2 == Dyn or isinstance(t1, Var) or isinstance(t2, Var):
|
| 72 |
+
return True
|
| 73 |
+
|
| 74 |
+
if isinstance(t1, TensorType) and isinstance(t2, TensorType):
|
| 75 |
+
return len(t1.__args__) == len(t2.__args__) and \
|
| 76 |
+
all(is_consistent(elem1, elem2) for elem1, elem2 in zip(t1.__args__, t2.__args__))
|
| 77 |
+
else:
|
| 78 |
+
return False
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
@compatibility(is_backward_compatible=False)
|
| 82 |
+
def is_more_precise(t1, t2):
|
| 83 |
+
"""
|
| 84 |
+
A binary relation denoted by <= that determines if t1 is more precise than t2.
|
| 85 |
+
The relation is reflexive and transitive.
|
| 86 |
+
returns True if t1 is more precise than t2 and False otherwise.
|
| 87 |
+
Example:
|
| 88 |
+
Dyn >= TensorType((1,2,3))
|
| 89 |
+
int >= Dyn
|
| 90 |
+
int >= int
|
| 91 |
+
TensorType((1,Dyn,3)) <= TensorType((1,2,3))
|
| 92 |
+
"""
|
| 93 |
+
if t1 == t2:
|
| 94 |
+
return True
|
| 95 |
+
|
| 96 |
+
if isinstance(t2, _DynType):
|
| 97 |
+
return True
|
| 98 |
+
|
| 99 |
+
if isinstance(t1, TensorType) and isinstance(t2, TensorType):
|
| 100 |
+
return len(t1.__args__) == len(t2.__args__) and \
|
| 101 |
+
all(is_more_precise(elem1, elem2) for elem1, elem2 in zip(t1.__args__, t2.__args__))
|
| 102 |
+
|
| 103 |
+
else:
|
| 104 |
+
return False
|
llava_next/lib/python3.10/site-packages/torch/fx/traceback.py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import traceback
|
| 2 |
+
from contextlib import contextmanager
|
| 3 |
+
from typing import List, Any, Dict
|
| 4 |
+
from ._compatibility import compatibility
|
| 5 |
+
|
| 6 |
+
__all__ = ['preserve_node_meta', 'has_preserved_node_meta',
|
| 7 |
+
'set_stack_trace', 'set_grad_fn_seq_nr', 'reset_grad_fn_seq_nr',
|
| 8 |
+
'format_stack', 'set_current_meta', 'get_current_meta']
|
| 9 |
+
|
| 10 |
+
current_meta: Dict[str, Any] = {}
|
| 11 |
+
should_preserve_node_meta = False
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
@compatibility(is_backward_compatible=False)
|
| 15 |
+
@contextmanager
|
| 16 |
+
def preserve_node_meta():
|
| 17 |
+
global should_preserve_node_meta
|
| 18 |
+
|
| 19 |
+
saved_should_preserve_node_meta = should_preserve_node_meta
|
| 20 |
+
try:
|
| 21 |
+
should_preserve_node_meta = True
|
| 22 |
+
yield
|
| 23 |
+
finally:
|
| 24 |
+
should_preserve_node_meta = saved_should_preserve_node_meta
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
@compatibility(is_backward_compatible=False)
|
| 28 |
+
def set_stack_trace(stack : List[str]):
|
| 29 |
+
global current_meta
|
| 30 |
+
|
| 31 |
+
if should_preserve_node_meta and stack:
|
| 32 |
+
current_meta["stack_trace"] = "".join(stack)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
@compatibility(is_backward_compatible=False)
|
| 36 |
+
def set_grad_fn_seq_nr(seq_nr):
|
| 37 |
+
global current_meta
|
| 38 |
+
|
| 39 |
+
if should_preserve_node_meta:
|
| 40 |
+
# The seq_nr is captured by eager mode in the grad_fn during forward
|
| 41 |
+
current_meta["prev_grad_fn_seq_nr"] = current_meta.get("grad_fn_seq_nr", None)
|
| 42 |
+
current_meta["prev_in_grad_fn"] = current_meta.get("in_grad_fn", None)
|
| 43 |
+
current_meta["grad_fn_seq_nr"] = seq_nr
|
| 44 |
+
current_meta["in_grad_fn"] = True
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
@compatibility(is_backward_compatible=False)
|
| 48 |
+
def reset_grad_fn_seq_nr():
|
| 49 |
+
# NB: reset state properly, this would be helpful towards supporting
|
| 50 |
+
# reentrant autograd if we actually wanted to do that.
|
| 51 |
+
global current_meta
|
| 52 |
+
|
| 53 |
+
if should_preserve_node_meta:
|
| 54 |
+
if current_meta["prev_grad_fn_seq_nr"] is None:
|
| 55 |
+
assert current_meta["prev_in_grad_fn"] is None
|
| 56 |
+
del current_meta["grad_fn_seq_nr"]
|
| 57 |
+
del current_meta["in_grad_fn"]
|
| 58 |
+
current_meta["grad_fn_seq_nr"] = current_meta["prev_grad_fn_seq_nr"]
|
| 59 |
+
current_meta["in_grad_fn"] = current_meta["prev_in_grad_fn"]
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
@compatibility(is_backward_compatible=False)
|
| 63 |
+
def format_stack() -> List[str]:
|
| 64 |
+
if should_preserve_node_meta:
|
| 65 |
+
return [current_meta.get("stack_trace", "")]
|
| 66 |
+
else:
|
| 67 |
+
# fallback to traceback.format_stack()
|
| 68 |
+
return traceback.format_list(traceback.extract_stack()[:-1])
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
@compatibility(is_backward_compatible=False)
|
| 72 |
+
def has_preserved_node_meta() -> bool:
|
| 73 |
+
return should_preserve_node_meta
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
@compatibility(is_backward_compatible=False)
|
| 77 |
+
@contextmanager
|
| 78 |
+
def set_current_meta(node):
|
| 79 |
+
global current_meta
|
| 80 |
+
if should_preserve_node_meta and node.meta:
|
| 81 |
+
saved_meta = current_meta
|
| 82 |
+
try:
|
| 83 |
+
current_meta = node.meta.copy()
|
| 84 |
+
|
| 85 |
+
# Append (node.name, node.target) onto "from_node" for provenance tracking
|
| 86 |
+
if "from_node" not in current_meta:
|
| 87 |
+
current_meta["from_node"] = [(node.name, node.target)]
|
| 88 |
+
elif current_meta["from_node"][-1][0] != node.name:
|
| 89 |
+
current_meta["from_node"].append((node.name, node.target))
|
| 90 |
+
|
| 91 |
+
yield
|
| 92 |
+
finally:
|
| 93 |
+
current_meta = saved_meta
|
| 94 |
+
else:
|
| 95 |
+
yield
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
@compatibility(is_backward_compatible=False)
|
| 99 |
+
def get_current_meta() -> Dict[str, Any]:
|
| 100 |
+
return current_meta
|
llava_next/lib/python3.10/site-packages/torch/linalg/__pycache__/__init__.cpython-310.pyc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:30e2b5492df652c324ef24183a99ed659b4e1ec2cedadc773c931b7ebc07de70
|
| 3 |
+
size 112836
|
llava_next/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/cuda_to_hip_mappings.cpython-310.pyc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:43abbd87badbdb37dbd2eeea9c7be711095897a442caf85dda0898a4b4144e92
|
| 3 |
+
size 434948
|
vlmpy310/lib/python3.10/site-packages/pyglet/input/__pycache__/controller_db.cpython-310.pyc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f3a98cc3d22e6f308a7905d91350e9f5efd40bff961b959ad9dc220b17817f5a
|
| 3 |
+
size 194444
|
vlmpy310/lib/python3.10/site-packages/skimage/filters/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (358 Bytes). View file
|
|
|