Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +5 -0
- llava_next/lib/python3.10/site-packages/torch/amp/__pycache__/autocast_mode.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/graph_drawer.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/graph_manipulation.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/net_min_base.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/param_fetch.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/pass_manager.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/shape_prop.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/splitter_base.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/annotate_getitem_nodes.py +42 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/backends/__init__.py +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/backends/__pycache__/__init__.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/backends/__pycache__/cudagraphs.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/backends/cudagraphs.py +56 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/dialect/__init__.py +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/dialect/__pycache__/__init__.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/dialect/common/__init__.py +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/dialect/common/__pycache__/__init__.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/dialect/common/__pycache__/cse_pass.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/dialect/common/cse_pass.py +112 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/fake_tensor_prop.py +61 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/graph_drawer.py +347 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/infra/__init__.py +2 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/infra/__pycache__/__init__.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/infra/__pycache__/partitioner.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/infra/__pycache__/pass_base.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/infra/__pycache__/pass_manager.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/infra/partitioner.py +278 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/infra/pass_base.py +75 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/infra/pass_manager.py +303 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/operator_support.py +220 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/param_fetch.py +66 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/pass_manager.py +247 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/shape_prop.py +193 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/split_module.py +370 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/split_utils.py +280 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/splitter_base.py +871 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/tests/__init__.py +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/tests/__pycache__/__init__.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/tests/__pycache__/test_pass_manager.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/tests/test_pass_manager.py +58 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/tools_common.py +254 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/__init__.py +1 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/__pycache__/__init__.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/__pycache__/common.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/__pycache__/fuser_utils.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/__pycache__/matcher_utils.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/__pycache__/source_matcher_utils.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/common.py +83 -0
- llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/fuser_utils.py +233 -0
.gitattributes
CHANGED
|
@@ -1192,3 +1192,8 @@ vlmpy310/lib/python3.10/site-packages/pyglet/gl/__pycache__/gl.cpython-310.pyc f
|
|
| 1192 |
llava_next/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/cuda_to_hip_mappings.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1193 |
llava_next/lib/python3.10/site-packages/torch/linalg/__pycache__/__init__.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1194 |
vlmpy310/lib/python3.10/site-packages/pyglet/input/__pycache__/controller_db.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1192 |
llava_next/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/cuda_to_hip_mappings.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1193 |
llava_next/lib/python3.10/site-packages/torch/linalg/__pycache__/__init__.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1194 |
vlmpy310/lib/python3.10/site-packages/pyglet/input/__pycache__/controller_db.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1195 |
+
llava_next/lib/python3.10/site-packages/torchvision.libs/libpng16.7f72a3c5.so.16 filter=lfs diff=lfs merge=lfs -text
|
| 1196 |
+
llava_next/lib/python3.10/site-packages/torch/lib/libtorch.so filter=lfs diff=lfs merge=lfs -text
|
| 1197 |
+
vlmpy310/lib/python3.10/site-packages/skimage/segmentation/_watershed_cy.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1198 |
+
vlmpy310/lib/python3.10/site-packages/skimage/segmentation/_felzenszwalb_cy.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1199 |
+
vlmpy310/lib/python3.10/site-packages/skimage/segmentation/_quickshift_cy.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
llava_next/lib/python3.10/site-packages/torch/amp/__pycache__/autocast_mode.cpython-310.pyc
ADDED
|
Binary file (15.2 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/graph_drawer.cpython-310.pyc
ADDED
|
Binary file (10.6 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/graph_manipulation.cpython-310.pyc
ADDED
|
Binary file (3.57 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/net_min_base.cpython-310.pyc
ADDED
|
Binary file (18 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/param_fetch.cpython-310.pyc
ADDED
|
Binary file (2.7 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/pass_manager.cpython-310.pyc
ADDED
|
Binary file (7.31 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/shape_prop.cpython-310.pyc
ADDED
|
Binary file (5.75 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/splitter_base.cpython-310.pyc
ADDED
|
Binary file (25.2 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/annotate_getitem_nodes.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import operator
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def annotate_getitem_nodes(graph: torch.fx.Graph) -> None:
|
| 7 |
+
"""
|
| 8 |
+
Annotate the type of getitem nodes, inferred from the type of sequence node.
|
| 9 |
+
If sequence node is not annotated with a type, do nothing.
|
| 10 |
+
Currently support getitem nodes from Tuple, List, and NamedTuple sequence node.
|
| 11 |
+
|
| 12 |
+
This is helpful since annotations on local names within function are lost during FX transforms.
|
| 13 |
+
Adding back known type annotation for getitem nodes to improve jit scriptability.
|
| 14 |
+
|
| 15 |
+
Args:
|
| 16 |
+
graph (Graph): The graph to be annotated
|
| 17 |
+
"""
|
| 18 |
+
for node in graph.nodes:
|
| 19 |
+
if node.target == operator.getitem:
|
| 20 |
+
sequence_node, index_node = node.args
|
| 21 |
+
if not sequence_node.type:
|
| 22 |
+
continue
|
| 23 |
+
# container types
|
| 24 |
+
if hasattr(sequence_node.type, "_name"):
|
| 25 |
+
parameterized_types = sequence_node.type.__args__
|
| 26 |
+
if sequence_node.type._name == "Tuple":
|
| 27 |
+
if len(parameterized_types) == 2 and isinstance(
|
| 28 |
+
parameterized_types[1], type(...)
|
| 29 |
+
):
|
| 30 |
+
node.type = parameterized_types[0]
|
| 31 |
+
else:
|
| 32 |
+
assert len(parameterized_types) > index_node
|
| 33 |
+
node_type = parameterized_types[index_node]
|
| 34 |
+
node.type = node_type
|
| 35 |
+
elif sequence_node.type._name == "List":
|
| 36 |
+
assert len(parameterized_types) == 1
|
| 37 |
+
node.type = parameterized_types[0]
|
| 38 |
+
# NamedTuple type
|
| 39 |
+
elif hasattr(sequence_node.type, "__annotations__"):
|
| 40 |
+
sequence_node_field_types = sequence_node.type.__annotations__
|
| 41 |
+
field_name = sequence_node.type._fields[index_node]
|
| 42 |
+
node.type = sequence_node_field_types[field_name]
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/backends/__init__.py
ADDED
|
File without changes
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/backends/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (180 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/backends/__pycache__/cudagraphs.cpython-310.pyc
ADDED
|
Binary file (2.17 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/backends/cudagraphs.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner
|
| 3 |
+
from torch.fx.passes.operator_support import OperatorSupport
|
| 4 |
+
from torch.fx.passes.tools_common import CALLABLE_NODE_OPS
|
| 5 |
+
from torch.fx.passes.fake_tensor_prop import FakeTensorProp
|
| 6 |
+
from torch.utils._pytree import tree_map
|
| 7 |
+
|
| 8 |
+
import operator
|
| 9 |
+
|
| 10 |
+
class CudaGraphsSupport(OperatorSupport):
|
| 11 |
+
# TODO: why is submodules passed here
|
| 12 |
+
def is_node_supported(self, submodules, node: torch.fx.Node) -> bool:
|
| 13 |
+
if node.op not in CALLABLE_NODE_OPS:
|
| 14 |
+
return False
|
| 15 |
+
|
| 16 |
+
if node.target in [torch.ops.aten.embedding_dense_backward.default]:
|
| 17 |
+
return False
|
| 18 |
+
|
| 19 |
+
if node.target in [operator.getitem]:
|
| 20 |
+
return True
|
| 21 |
+
|
| 22 |
+
found_not_cuda = False
|
| 23 |
+
|
| 24 |
+
def meta_fk(meta):
|
| 25 |
+
return meta["val"] if "val" in meta else meta["fake_result"]
|
| 26 |
+
|
| 27 |
+
def find_not_cuda(t):
|
| 28 |
+
nonlocal found_not_cuda
|
| 29 |
+
if isinstance(t, torch.Tensor) and t.device.type != 'cuda':
|
| 30 |
+
found_not_cuda = True
|
| 31 |
+
|
| 32 |
+
for n in node.all_input_nodes:
|
| 33 |
+
tree_map(find_not_cuda, meta_fk(n.meta))
|
| 34 |
+
|
| 35 |
+
tree_map(find_not_cuda, meta_fk(node.meta))
|
| 36 |
+
|
| 37 |
+
# NB: factory function is accounted for because the result would be
|
| 38 |
+
# cpu or cuda
|
| 39 |
+
|
| 40 |
+
return not found_not_cuda
|
| 41 |
+
|
| 42 |
+
def partition_cudagraphs(gm, inputs):
|
| 43 |
+
"""
|
| 44 |
+
Partition an FX graph into sub-GraphModules that can be validly run under
|
| 45 |
+
CUDA graphs. For a subgraph to be runnable under CUDA, all of the operations
|
| 46 |
+
must involve CUDA tensors only/
|
| 47 |
+
"""
|
| 48 |
+
|
| 49 |
+
FakeTensorProp(gm).propagate(*inputs)
|
| 50 |
+
supported_ops = CudaGraphsSupport()
|
| 51 |
+
# TODO: single node partition may be wrong due to the pessimization
|
| 52 |
+
# from copying in and out the data. Check in benchmarks, perhaps
|
| 53 |
+
partitioner = CapabilityBasedPartitioner(gm, supported_ops, allows_single_node_partition=True)
|
| 54 |
+
partitions = partitioner.propose_partitions()
|
| 55 |
+
fused_graph = partitioner.fuse_partitions(partitions)
|
| 56 |
+
return fused_graph
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/dialect/__init__.py
ADDED
|
File without changes
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/dialect/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (179 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/dialect/common/__init__.py
ADDED
|
File without changes
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/dialect/common/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (186 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/dialect/common/__pycache__/cse_pass.cpython-310.pyc
ADDED
|
Binary file (3.81 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/dialect/common/cse_pass.py
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict, Tuple, Any
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
from torch.fx.passes.infra.pass_base import PassBase, PassResult
|
| 5 |
+
from torch.utils._pytree import tree_flatten
|
| 6 |
+
|
| 7 |
+
from torch.fx import GraphModule, Graph
|
| 8 |
+
from torch.fx import Node
|
| 9 |
+
|
| 10 |
+
aten = torch.ops.aten
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
# stateful ops are banned from CSE
|
| 14 |
+
rand_ops = {aten.dropout, aten._fused_dropout, aten._standard_gamma, aten.bernoulli, aten.multinomial, aten.native_dropout, aten.normal, aten.poisson, aten.binomial, aten.rrelu, aten.rand_like, aten.rand, aten.randint, aten.randn, aten.randperm} # noqa: E501,B950
|
| 15 |
+
|
| 16 |
+
inplace_ops = {aten.add_, aten.sub_, aten.mul_, aten.div_, aten.pow_, aten.lerp_, aten.relu_, aten.sigmoid_, aten.tanh_} # noqa: E501
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
@torch.fx._compatibility.compatibility(is_backward_compatible=False)
|
| 20 |
+
def get_CSE_banned_ops():
|
| 21 |
+
return rand_ops.union(inplace_ops)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
@torch.fx._compatibility.compatibility(is_backward_compatible=False)
|
| 25 |
+
class CSEPass(PassBase):
|
| 26 |
+
|
| 27 |
+
def __init__(self, banned_ops=None):
|
| 28 |
+
"""
|
| 29 |
+
This version of CSE Pass aims to be dialect agnostic, and it's implemented purely based on the connectivity between fx.Node.
|
| 30 |
+
|
| 31 |
+
For functional dialects, user would only need to specify the random ops in ban list.
|
| 32 |
+
|
| 33 |
+
Warning: CSE Pass cannot be safely applied on a FX graph in non-functional dialects.
|
| 34 |
+
If your dialect contains stateful operators, please customized the banned_ops.
|
| 35 |
+
|
| 36 |
+
"""
|
| 37 |
+
if banned_ops is None:
|
| 38 |
+
banned_ops = set()
|
| 39 |
+
self.banned_ops = banned_ops
|
| 40 |
+
super().__init__()
|
| 41 |
+
|
| 42 |
+
def call(self, graph_module: GraphModule) -> PassResult:
|
| 43 |
+
"""
|
| 44 |
+
Return a new copy of torch.fx.GraphModule with CSE applied to the input graph
|
| 45 |
+
|
| 46 |
+
Example usage:
|
| 47 |
+
|
| 48 |
+
from torch.fx.experimental.proxy_tensor import make_fx
|
| 49 |
+
def f(a):
|
| 50 |
+
b = a * a
|
| 51 |
+
c = a * a
|
| 52 |
+
return b+c
|
| 53 |
+
|
| 54 |
+
p = CSEPass()
|
| 55 |
+
traced_graph = make_fx(f)(torch.tensor(1))
|
| 56 |
+
print(traced_graph)
|
| 57 |
+
result = p(traced_graph)
|
| 58 |
+
print(result.graph_module)
|
| 59 |
+
"""
|
| 60 |
+
def get_aten_target(node):
|
| 61 |
+
if hasattr(node.target, 'overloadpacket'):
|
| 62 |
+
return node.target.overloadpacket
|
| 63 |
+
return node.target
|
| 64 |
+
|
| 65 |
+
modified = False
|
| 66 |
+
new_graph = Graph()
|
| 67 |
+
env: Dict[Node, Node] = {} # map from node in the old graph to node in the new graph
|
| 68 |
+
hash_env: Dict[Tuple[torch._ops.OpOverload, int], Node] = {} # map from hash to a node in the new graph
|
| 69 |
+
token_map: Dict[Tuple[torch._ops.OpOverload, int], Dict[str, Any]] = {} # map from hash to token
|
| 70 |
+
for n in graph_module.graph.nodes:
|
| 71 |
+
# The placeholder, output, and get_attr nodes are copied to the new graph without change
|
| 72 |
+
# do not CSE away random operations
|
| 73 |
+
if n.op == 'placeholder' or n.op == 'output' or n.op == 'get_attr' or get_aten_target(n) in self.banned_ops:
|
| 74 |
+
new_node = new_graph.node_copy(n, lambda x: env[x])
|
| 75 |
+
env[n] = new_node
|
| 76 |
+
else: # n.op == 'call_function', should never see n.op == 'call_module' or 'call_method'
|
| 77 |
+
# substitute args and kwargs members to their mapping in env if exists
|
| 78 |
+
# specs can be used to reconstruct nested list/dictionaries
|
| 79 |
+
def substitute(arg_list):
|
| 80 |
+
arg_list, spec = tree_flatten(arg_list)
|
| 81 |
+
for i in range(len(arg_list)):
|
| 82 |
+
v = arg_list[i]
|
| 83 |
+
if isinstance(v, Node) and v in env:
|
| 84 |
+
arg_list[i] = env[v]
|
| 85 |
+
return tuple(arg_list), spec
|
| 86 |
+
args, args_spec = substitute(n.args)
|
| 87 |
+
kwargs, kwargs_spec = substitute(n.kwargs)
|
| 88 |
+
|
| 89 |
+
# each token corresponds to a unique node
|
| 90 |
+
# nodes with the same token can be substituted
|
| 91 |
+
token = {"target": n.target, "args": args, "args_spec": args_spec,
|
| 92 |
+
"kwargs": kwargs, "kwargs_spec": kwargs_spec}
|
| 93 |
+
|
| 94 |
+
# hash substituted args to a number, do not hash specs because specs are not hashable
|
| 95 |
+
hash_arg = hash((args, kwargs))
|
| 96 |
+
hash_val = (n.target, hash_arg)
|
| 97 |
+
|
| 98 |
+
# check if a node has a substitute and can be eliminated
|
| 99 |
+
hash_val_in_hash_env = hash_val in hash_env
|
| 100 |
+
if hash_val_in_hash_env and token_map[hash_val] == token:
|
| 101 |
+
modified = True # substitution happens and the graph is modified
|
| 102 |
+
env[n] = hash_env[hash_val]
|
| 103 |
+
continue
|
| 104 |
+
|
| 105 |
+
new_node = new_graph.node_copy(n, lambda x: env[x])
|
| 106 |
+
env[n] = new_node
|
| 107 |
+
if not hash_val_in_hash_env:
|
| 108 |
+
hash_env[hash_val] = new_node
|
| 109 |
+
token_map[hash_val] = token
|
| 110 |
+
|
| 111 |
+
csed_gm = GraphModule(graph_module, new_graph)
|
| 112 |
+
return PassResult(csed_gm, modified)
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/fake_tensor_prop.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional
|
| 2 |
+
|
| 3 |
+
import torch.fx
|
| 4 |
+
from torch.fx import Node
|
| 5 |
+
from torch.fx._compatibility import compatibility
|
| 6 |
+
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
|
| 7 |
+
from torch.fx.experimental.proxy_tensor import py_sym_types, snapshot_fake
|
| 8 |
+
from torch.fx.node import map_aggregate
|
| 9 |
+
|
| 10 |
+
__all__ = ['FakeTensorProp']
|
| 11 |
+
|
| 12 |
+
@compatibility(is_backward_compatible=False)
|
| 13 |
+
class FakeTensorProp(torch.fx.Interpreter):
|
| 14 |
+
"""
|
| 15 |
+
Execute an FX graph Node-by-Node and record a fake tensor representing
|
| 16 |
+
the metadata for the node. Unlike ShapeProp, (1) this propagation
|
| 17 |
+
is cheap--it does the propagation with meta tensors which do not actually
|
| 18 |
+
store data, and (2) the fake tensors have much more fine grained information,
|
| 19 |
+
e.g., they have accurate alias information that can be consulted by looking
|
| 20 |
+
at the storages.
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
module (GraphModule): The module to be executed
|
| 24 |
+
mode (Optional[FakeTensorMode]): The dispatch mode used to execute computation indicated by each FX Node.
|
| 25 |
+
"""
|
| 26 |
+
def __init__(self, module: torch.fx.GraphModule, mode: Optional[FakeTensorMode] = None):
|
| 27 |
+
super().__init__(module)
|
| 28 |
+
if mode is None:
|
| 29 |
+
mode = FakeTensorMode()
|
| 30 |
+
self._mode = mode
|
| 31 |
+
|
| 32 |
+
def run_node(self, n: Node):
|
| 33 |
+
result = super().run_node(n)
|
| 34 |
+
|
| 35 |
+
def extract_val(obj):
|
| 36 |
+
if isinstance(obj, FakeTensor):
|
| 37 |
+
return snapshot_fake(obj)
|
| 38 |
+
elif isinstance(obj, torch.Tensor):
|
| 39 |
+
# TODO: How is it possible that we get a non fake tensor? We
|
| 40 |
+
# should be running under the mode...
|
| 41 |
+
return snapshot_fake(self._mode.from_tensor(obj, static_shapes=True))
|
| 42 |
+
elif isinstance(obj, py_sym_types):
|
| 43 |
+
return obj
|
| 44 |
+
else:
|
| 45 |
+
return None
|
| 46 |
+
|
| 47 |
+
meta = map_aggregate(result, extract_val)
|
| 48 |
+
if meta is not None:
|
| 49 |
+
n.meta['val'] = meta
|
| 50 |
+
return result
|
| 51 |
+
|
| 52 |
+
def propagate(self, *args):
|
| 53 |
+
fake_args = [
|
| 54 |
+
self._mode.from_tensor(a) if isinstance(a, torch.Tensor) else a
|
| 55 |
+
for a in args
|
| 56 |
+
]
|
| 57 |
+
return self.propagate_dont_convert_inputs(*fake_args)
|
| 58 |
+
|
| 59 |
+
def propagate_dont_convert_inputs(self, *args):
|
| 60 |
+
with self._mode:
|
| 61 |
+
return super().run(*args)
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/graph_drawer.py
ADDED
|
@@ -0,0 +1,347 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import hashlib
|
| 3 |
+
import torch
|
| 4 |
+
import torch.fx
|
| 5 |
+
from typing import Dict, Any, TYPE_CHECKING
|
| 6 |
+
from torch.fx.node import _get_qualified_name, _format_arg
|
| 7 |
+
from torch.fx.passes.shape_prop import TensorMetadata
|
| 8 |
+
from torch.fx._compatibility import compatibility
|
| 9 |
+
from itertools import chain
|
| 10 |
+
|
| 11 |
+
__all__ = ['FxGraphDrawer']
|
| 12 |
+
try:
|
| 13 |
+
import pydot
|
| 14 |
+
HAS_PYDOT = True
|
| 15 |
+
except ImportError:
|
| 16 |
+
HAS_PYDOT = False
|
| 17 |
+
|
| 18 |
+
_COLOR_MAP = {
|
| 19 |
+
"placeholder": '"AliceBlue"',
|
| 20 |
+
"call_module": "LemonChiffon1",
|
| 21 |
+
"get_param": "Yellow2",
|
| 22 |
+
"get_attr": "LightGrey",
|
| 23 |
+
"output": "PowderBlue",
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
_HASH_COLOR_MAP = [
|
| 27 |
+
"CadetBlue1",
|
| 28 |
+
"Coral",
|
| 29 |
+
"DarkOliveGreen1",
|
| 30 |
+
"DarkSeaGreen1",
|
| 31 |
+
"GhostWhite",
|
| 32 |
+
"Khaki1",
|
| 33 |
+
"LavenderBlush1",
|
| 34 |
+
"LightSkyBlue",
|
| 35 |
+
"MistyRose1",
|
| 36 |
+
"MistyRose2",
|
| 37 |
+
"PaleTurquoise2",
|
| 38 |
+
"PeachPuff1",
|
| 39 |
+
"Salmon",
|
| 40 |
+
"Thistle1",
|
| 41 |
+
"Thistle3",
|
| 42 |
+
"Wheat1",
|
| 43 |
+
]
|
| 44 |
+
|
| 45 |
+
_WEIGHT_TEMPLATE = {
|
| 46 |
+
"shape": "record",
|
| 47 |
+
"fillcolor": "Salmon",
|
| 48 |
+
"style": '"filled,rounded"',
|
| 49 |
+
"fontcolor": "#000000",
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
if HAS_PYDOT:
|
| 53 |
+
@compatibility(is_backward_compatible=False)
|
| 54 |
+
class FxGraphDrawer:
|
| 55 |
+
"""
|
| 56 |
+
Visualize a torch.fx.Graph with graphviz
|
| 57 |
+
Basic usage:
|
| 58 |
+
g = FxGraphDrawer(symbolic_traced, "resnet18")
|
| 59 |
+
g.get_dot_graph().write_svg("a.svg")
|
| 60 |
+
"""
|
| 61 |
+
|
| 62 |
+
def __init__(
|
| 63 |
+
self,
|
| 64 |
+
graph_module: torch.fx.GraphModule,
|
| 65 |
+
name: str,
|
| 66 |
+
ignore_getattr: bool = False,
|
| 67 |
+
ignore_parameters_and_buffers: bool = False,
|
| 68 |
+
skip_node_names_in_args: bool = True,
|
| 69 |
+
):
|
| 70 |
+
self._name = name
|
| 71 |
+
self._dot_graphs = {
|
| 72 |
+
name: self._to_dot(
|
| 73 |
+
graph_module, name, ignore_getattr, ignore_parameters_and_buffers, skip_node_names_in_args
|
| 74 |
+
)
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
for node in graph_module.graph.nodes:
|
| 78 |
+
if node.op != "call_module":
|
| 79 |
+
continue
|
| 80 |
+
|
| 81 |
+
leaf_node = self._get_leaf_node(graph_module, node)
|
| 82 |
+
|
| 83 |
+
if not isinstance(leaf_node, torch.fx.GraphModule):
|
| 84 |
+
continue
|
| 85 |
+
|
| 86 |
+
self._dot_graphs[f"{name}_{node.target}"] = self._to_dot(
|
| 87 |
+
leaf_node,
|
| 88 |
+
f"{name}_{node.target}",
|
| 89 |
+
ignore_getattr,
|
| 90 |
+
ignore_parameters_and_buffers,
|
| 91 |
+
skip_node_names_in_args,
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
def get_dot_graph(self, submod_name=None) -> pydot.Dot:
|
| 95 |
+
"""
|
| 96 |
+
Visualize a torch.fx.Graph with graphviz
|
| 97 |
+
Example:
|
| 98 |
+
>>> # xdoctest: +REQUIRES(module:pydot)
|
| 99 |
+
>>> # define module
|
| 100 |
+
>>> class MyModule(torch.nn.Module):
|
| 101 |
+
>>> def __init__(self):
|
| 102 |
+
>>> super().__init__()
|
| 103 |
+
>>> self.linear = torch.nn.Linear(4, 5)
|
| 104 |
+
>>> def forward(self, x):
|
| 105 |
+
>>> return self.linear(x).clamp(min=0.0, max=1.0)
|
| 106 |
+
>>> module = MyModule()
|
| 107 |
+
>>> # trace the module
|
| 108 |
+
>>> symbolic_traced = torch.fx.symbolic_trace(module)
|
| 109 |
+
>>> # setup output file
|
| 110 |
+
>>> import ubelt as ub
|
| 111 |
+
>>> dpath = ub.Path.appdir('torch/tests/FxGraphDrawer').ensuredir()
|
| 112 |
+
>>> fpath = dpath / 'linear.svg'
|
| 113 |
+
>>> # draw the graph
|
| 114 |
+
>>> g = FxGraphDrawer(symbolic_traced, "linear")
|
| 115 |
+
>>> g.get_dot_graph().write_svg(fpath)
|
| 116 |
+
"""
|
| 117 |
+
if submod_name is None:
|
| 118 |
+
return self.get_main_dot_graph()
|
| 119 |
+
else:
|
| 120 |
+
return self.get_submod_dot_graph(submod_name)
|
| 121 |
+
|
| 122 |
+
def get_main_dot_graph(self) -> pydot.Dot:
|
| 123 |
+
return self._dot_graphs[self._name]
|
| 124 |
+
|
| 125 |
+
def get_submod_dot_graph(self, submod_name) -> pydot.Dot:
|
| 126 |
+
return self._dot_graphs[f"{self._name}_{submod_name}"]
|
| 127 |
+
|
| 128 |
+
def get_all_dot_graphs(self) -> Dict[str, pydot.Dot]:
|
| 129 |
+
return self._dot_graphs
|
| 130 |
+
|
| 131 |
+
def _get_node_style(self, node: torch.fx.Node) -> Dict[str, str]:
|
| 132 |
+
template = {
|
| 133 |
+
"shape": "record",
|
| 134 |
+
"fillcolor": "#CAFFE3",
|
| 135 |
+
"style": '"filled,rounded"',
|
| 136 |
+
"fontcolor": "#000000",
|
| 137 |
+
}
|
| 138 |
+
if node.op in _COLOR_MAP:
|
| 139 |
+
template["fillcolor"] = _COLOR_MAP[node.op]
|
| 140 |
+
else:
|
| 141 |
+
# Use a random color for each node; based on its name so it's stable.
|
| 142 |
+
target_name = node._pretty_print_target(node.target)
|
| 143 |
+
target_hash = int(hashlib.md5(target_name.encode()).hexdigest()[:8], 16)
|
| 144 |
+
template["fillcolor"] = _HASH_COLOR_MAP[target_hash % len(_HASH_COLOR_MAP)]
|
| 145 |
+
return template
|
| 146 |
+
|
| 147 |
+
def _get_leaf_node(
|
| 148 |
+
self, module: torch.nn.Module, node: torch.fx.Node
|
| 149 |
+
) -> torch.nn.Module:
|
| 150 |
+
py_obj = module
|
| 151 |
+
assert isinstance(node.target, str)
|
| 152 |
+
atoms = node.target.split(".")
|
| 153 |
+
for atom in atoms:
|
| 154 |
+
if not hasattr(py_obj, atom):
|
| 155 |
+
raise RuntimeError(
|
| 156 |
+
str(py_obj) + " does not have attribute " + atom + "!"
|
| 157 |
+
)
|
| 158 |
+
py_obj = getattr(py_obj, atom)
|
| 159 |
+
return py_obj
|
| 160 |
+
|
| 161 |
+
def _typename(self, target: Any) -> str:
|
| 162 |
+
if isinstance(target, torch.nn.Module):
|
| 163 |
+
ret = torch.typename(target)
|
| 164 |
+
elif isinstance(target, str):
|
| 165 |
+
ret = target
|
| 166 |
+
else:
|
| 167 |
+
ret = _get_qualified_name(target)
|
| 168 |
+
|
| 169 |
+
# Escape "{" and "}" to prevent dot files like:
|
| 170 |
+
# https://gist.github.com/SungMinCho/1a017aab662c75d805c5954d62c5aabc
|
| 171 |
+
# which triggers `Error: bad label format (...)` from dot
|
| 172 |
+
return ret.replace("{", r"\{").replace("}", r"\}")
|
| 173 |
+
|
| 174 |
+
def _get_node_label(
|
| 175 |
+
self,
|
| 176 |
+
module: torch.fx.GraphModule,
|
| 177 |
+
node: torch.fx.Node,
|
| 178 |
+
skip_node_names_in_args: bool,
|
| 179 |
+
) -> str:
|
| 180 |
+
def _get_str_for_args_kwargs(arg):
|
| 181 |
+
if isinstance(arg, tuple):
|
| 182 |
+
prefix, suffix = r"|args=(\l", r",\n)\l"
|
| 183 |
+
arg_strs_list = [_format_arg(a, max_list_len=8) for a in arg]
|
| 184 |
+
elif isinstance(arg, dict):
|
| 185 |
+
prefix, suffix = r"|kwargs={\l", r",\n}\l"
|
| 186 |
+
arg_strs_list = [
|
| 187 |
+
f"{k}: {_format_arg(v, max_list_len=8)}"
|
| 188 |
+
for k, v in arg.items()
|
| 189 |
+
]
|
| 190 |
+
else: # Fall back to nothing in unexpected case.
|
| 191 |
+
return ""
|
| 192 |
+
|
| 193 |
+
# Strip out node names if requested.
|
| 194 |
+
if skip_node_names_in_args:
|
| 195 |
+
arg_strs_list = [a for a in arg_strs_list if "%" not in a]
|
| 196 |
+
if len(arg_strs_list) == 0:
|
| 197 |
+
return ""
|
| 198 |
+
arg_strs = prefix + r",\n".join(arg_strs_list) + suffix
|
| 199 |
+
return arg_strs.replace("{", r"\{").replace("}", r"\}")
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
label = "{" + f"name=%{node.name}|op_code={node.op}\n"
|
| 203 |
+
|
| 204 |
+
if node.op == "call_module":
|
| 205 |
+
leaf_module = self._get_leaf_node(module, node)
|
| 206 |
+
label += r"\n" + self._typename(leaf_module) + r"\n|"
|
| 207 |
+
extra = ""
|
| 208 |
+
if hasattr(leaf_module, "__constants__"):
|
| 209 |
+
extra = r"\n".join(
|
| 210 |
+
[f"{c}: {getattr(leaf_module, c)}" for c in leaf_module.__constants__] # type: ignore[union-attr]
|
| 211 |
+
)
|
| 212 |
+
label += extra + r"\n"
|
| 213 |
+
else:
|
| 214 |
+
label += f"|target={self._typename(node.target)}" + r"\n"
|
| 215 |
+
if len(node.args) > 0:
|
| 216 |
+
label += _get_str_for_args_kwargs(node.args)
|
| 217 |
+
if len(node.kwargs) > 0:
|
| 218 |
+
label += _get_str_for_args_kwargs(node.kwargs)
|
| 219 |
+
label += f"|num_users={len(node.users)}" + r"\n"
|
| 220 |
+
|
| 221 |
+
tensor_meta = node.meta.get('tensor_meta')
|
| 222 |
+
label += self._tensor_meta_to_label(tensor_meta)
|
| 223 |
+
|
| 224 |
+
return label + "}"
|
| 225 |
+
|
| 226 |
+
def _tensor_meta_to_label(self, tm) -> str:
|
| 227 |
+
if tm is None:
|
| 228 |
+
return ""
|
| 229 |
+
elif isinstance(tm, TensorMetadata):
|
| 230 |
+
return self._stringify_tensor_meta(tm)
|
| 231 |
+
elif isinstance(tm, list):
|
| 232 |
+
result = ""
|
| 233 |
+
for item in tm:
|
| 234 |
+
result += self._tensor_meta_to_label(item)
|
| 235 |
+
return result
|
| 236 |
+
elif isinstance(tm, dict):
|
| 237 |
+
result = ""
|
| 238 |
+
for v in tm.values():
|
| 239 |
+
result += self._tensor_meta_to_label(v)
|
| 240 |
+
return result
|
| 241 |
+
elif isinstance(tm, tuple):
|
| 242 |
+
result = ""
|
| 243 |
+
for item in tm:
|
| 244 |
+
result += self._tensor_meta_to_label(item)
|
| 245 |
+
return result
|
| 246 |
+
else:
|
| 247 |
+
raise RuntimeError(f"Unsupported tensor meta type {type(tm)}")
|
| 248 |
+
|
| 249 |
+
def _stringify_tensor_meta(self, tm: TensorMetadata) -> str:
|
| 250 |
+
result = ""
|
| 251 |
+
if not hasattr(tm, "dtype"):
|
| 252 |
+
print("tm", tm)
|
| 253 |
+
result += "|" + "dtype" + "=" + str(tm.dtype) + r"\n"
|
| 254 |
+
result += "|" + "shape" + "=" + str(tuple(tm.shape)) + r"\n"
|
| 255 |
+
result += "|" + "requires_grad" + "=" + str(tm.requires_grad) + r"\n"
|
| 256 |
+
result += "|" + "stride" + "=" + str(tm.stride) + r"\n"
|
| 257 |
+
if tm.is_quantized:
|
| 258 |
+
assert tm.qparams is not None
|
| 259 |
+
assert "qscheme" in tm.qparams
|
| 260 |
+
qscheme = tm.qparams["qscheme"]
|
| 261 |
+
if qscheme in {
|
| 262 |
+
torch.per_tensor_affine,
|
| 263 |
+
torch.per_tensor_symmetric,
|
| 264 |
+
}:
|
| 265 |
+
result += "|" + "q_scale" + "=" + str(tm.qparams["scale"]) + r"\n"
|
| 266 |
+
result += "|" + "q_zero_point" + "=" + str(tm.qparams["zero_point"]) + r"\n"
|
| 267 |
+
elif qscheme in {
|
| 268 |
+
torch.per_channel_affine,
|
| 269 |
+
torch.per_channel_symmetric,
|
| 270 |
+
torch.per_channel_affine_float_qparams,
|
| 271 |
+
}:
|
| 272 |
+
result += "|" + "q_per_channel_scale" + "=" + str(tm.qparams["scale"]) + r"\n"
|
| 273 |
+
result += "|" + "q_per_channel_zero_point" + "=" + str(tm.qparams["zero_point"]) + r"\n"
|
| 274 |
+
result += "|" + "q_per_channel_axis" + "=" + str(tm.qparams["axis"]) + r"\n"
|
| 275 |
+
else:
|
| 276 |
+
raise RuntimeError(f"Unsupported qscheme: {qscheme}")
|
| 277 |
+
result += "|" + "qscheme" + "=" + str(tm.qparams["qscheme"]) + r"\n"
|
| 278 |
+
return result
|
| 279 |
+
|
| 280 |
+
def _get_tensor_label(self, t: torch.Tensor) -> str:
|
| 281 |
+
return str(t.dtype) + str(list(t.shape)) + r"\n"
|
| 282 |
+
|
| 283 |
+
def _to_dot(
|
| 284 |
+
self,
|
| 285 |
+
graph_module: torch.fx.GraphModule,
|
| 286 |
+
name: str,
|
| 287 |
+
ignore_getattr: bool,
|
| 288 |
+
ignore_parameters_and_buffers: bool,
|
| 289 |
+
skip_node_names_in_args: bool,
|
| 290 |
+
) -> pydot.Dot:
|
| 291 |
+
"""
|
| 292 |
+
Actual interface to visualize a fx.Graph. Note that it takes in the GraphModule instead of the Graph.
|
| 293 |
+
If ignore_parameters_and_buffers is True, the parameters and buffers
|
| 294 |
+
created with the module will not be added as nodes and edges.
|
| 295 |
+
"""
|
| 296 |
+
dot_graph = pydot.Dot(name, rankdir="TB")
|
| 297 |
+
|
| 298 |
+
for node in graph_module.graph.nodes:
|
| 299 |
+
if ignore_getattr and node.op == "get_attr":
|
| 300 |
+
continue
|
| 301 |
+
|
| 302 |
+
style = self._get_node_style(node)
|
| 303 |
+
dot_node = pydot.Node(
|
| 304 |
+
node.name, label=self._get_node_label(graph_module, node, skip_node_names_in_args), **style
|
| 305 |
+
)
|
| 306 |
+
dot_graph.add_node(dot_node)
|
| 307 |
+
|
| 308 |
+
def get_module_params_or_buffers():
|
| 309 |
+
for pname, ptensor in chain(
|
| 310 |
+
leaf_module.named_parameters(), leaf_module.named_buffers()
|
| 311 |
+
):
|
| 312 |
+
pname1 = node.name + "." + pname
|
| 313 |
+
label1 = (
|
| 314 |
+
pname1 + "|op_code=get_" + "parameter"
|
| 315 |
+
if isinstance(ptensor, torch.nn.Parameter)
|
| 316 |
+
else "buffer" + r"\l"
|
| 317 |
+
)
|
| 318 |
+
dot_w_node = pydot.Node(
|
| 319 |
+
pname1,
|
| 320 |
+
label="{" + label1 + self._get_tensor_label(ptensor) + "}",
|
| 321 |
+
**_WEIGHT_TEMPLATE,
|
| 322 |
+
)
|
| 323 |
+
dot_graph.add_node(dot_w_node)
|
| 324 |
+
dot_graph.add_edge(pydot.Edge(pname1, node.name))
|
| 325 |
+
|
| 326 |
+
if node.op == "call_module":
|
| 327 |
+
leaf_module = self._get_leaf_node(graph_module, node)
|
| 328 |
+
|
| 329 |
+
if not ignore_parameters_and_buffers and not isinstance(leaf_module, torch.fx.GraphModule):
|
| 330 |
+
get_module_params_or_buffers()
|
| 331 |
+
|
| 332 |
+
for node in graph_module.graph.nodes:
|
| 333 |
+
if ignore_getattr and node.op == "get_attr":
|
| 334 |
+
continue
|
| 335 |
+
|
| 336 |
+
for user in node.users:
|
| 337 |
+
dot_graph.add_edge(pydot.Edge(node.name, user.name))
|
| 338 |
+
|
| 339 |
+
return dot_graph
|
| 340 |
+
|
| 341 |
+
else:
|
| 342 |
+
if not TYPE_CHECKING:
|
| 343 |
+
@compatibility(is_backward_compatible=False)
|
| 344 |
+
class FxGraphDrawer:
|
| 345 |
+
def __init__(self, graph_module: torch.fx.GraphModule, name: str, ignore_getattr: bool = False):
|
| 346 |
+
raise RuntimeError('FXGraphDrawer requires the pydot package to be installed. Please install '
|
| 347 |
+
'pydot through your favorite Python package manager.')
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/infra/__init__.py
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
from . import pass_manager
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/infra/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (214 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/infra/__pycache__/partitioner.cpython-310.pyc
ADDED
|
Binary file (7.79 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/infra/__pycache__/pass_base.cpython-310.pyc
ADDED
|
Binary file (3.06 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/infra/__pycache__/pass_manager.cpython-310.pyc
ADDED
|
Binary file (9.57 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/infra/partitioner.py
ADDED
|
@@ -0,0 +1,278 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict, List, Set, Iterable, Sequence, Optional, Deque
|
| 2 |
+
|
| 3 |
+
from torch.fx.passes.utils.fuser_utils import fuse_by_partitions
|
| 4 |
+
|
| 5 |
+
from torch.fx.graph_module import GraphModule
|
| 6 |
+
from torch.fx.node import Node, _get_qualified_name
|
| 7 |
+
from torch.fx.passes.operator_support import OperatorSupportBase
|
| 8 |
+
|
| 9 |
+
import logging
|
| 10 |
+
import itertools
|
| 11 |
+
from copy import copy
|
| 12 |
+
from collections import deque
|
| 13 |
+
|
| 14 |
+
logger = logging.getLogger(__name__)
|
| 15 |
+
logger.setLevel(logging.WARNING)
|
| 16 |
+
|
| 17 |
+
class Partition:
|
| 18 |
+
def __init__(self, id: Optional[int] = None, nodes: Optional[Iterable[Node]] = None):
|
| 19 |
+
self.id = id
|
| 20 |
+
self.nodes: Set[Node] = set(nodes) if nodes is not None else set()
|
| 21 |
+
|
| 22 |
+
def __repr__(self) -> str:
|
| 23 |
+
return str(self.nodes)
|
| 24 |
+
|
| 25 |
+
def add_node(self, node: Node):
|
| 26 |
+
self.nodes.add(node)
|
| 27 |
+
|
| 28 |
+
def remove_node(self, node: Node):
|
| 29 |
+
self.nodes.remove(node)
|
| 30 |
+
|
| 31 |
+
def size(self):
|
| 32 |
+
return len(self.nodes)
|
| 33 |
+
|
| 34 |
+
class CapabilityBasedPartitioner:
|
| 35 |
+
|
| 36 |
+
def __init__(self,
|
| 37 |
+
graph_module: GraphModule,
|
| 38 |
+
operator_support: OperatorSupportBase,
|
| 39 |
+
allows_single_node_partition: bool = False,
|
| 40 |
+
non_compute_ops: Optional[Sequence[str]] = None,
|
| 41 |
+
allowed_single_node_partition_ops: Optional[Sequence[str]] = None,
|
| 42 |
+
) -> None:
|
| 43 |
+
self.graph_module = graph_module
|
| 44 |
+
self.operator_support = operator_support
|
| 45 |
+
self.allows_single_node_partition = allows_single_node_partition
|
| 46 |
+
self.non_compute_ops = non_compute_ops if non_compute_ops is not None else []
|
| 47 |
+
self.allowed_single_node_partition_ops = (
|
| 48 |
+
allowed_single_node_partition_ops
|
| 49 |
+
if allowed_single_node_partition_ops is not None
|
| 50 |
+
else []
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
def __is_node_supported(self, node: Node) -> bool:
|
| 54 |
+
return (
|
| 55 |
+
self.operator_support.is_node_supported(dict(self.graph_module.named_modules()), node)
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
def propose_partitions(self) -> List[Partition]:
|
| 59 |
+
# assumptions: nodes in candidate list is sorted in topological order
|
| 60 |
+
assignment: Dict[Node, int] = {} # mapping from node to partition_id
|
| 61 |
+
partitions_by_id: Dict[int, Partition] = {} # mapping from partition_id to partition
|
| 62 |
+
new_partition_id = itertools.count()
|
| 63 |
+
|
| 64 |
+
# try to merge partition other_id into partition self_id
|
| 65 |
+
# merge only happens if the end graph doesn't contain cyclic dependency
|
| 66 |
+
# returns `True` when merge happens, `False` otherwise.
|
| 67 |
+
def maybe_merge_partition(self_id: int, other_id: int):
|
| 68 |
+
# merged_nodes is the union of nodes in two partition to-be-merged
|
| 69 |
+
merged_nodes = copy(partitions_by_id[self_id].nodes)
|
| 70 |
+
merged_nodes.update(partitions_by_id[other_id].nodes)
|
| 71 |
+
|
| 72 |
+
# Note it's ok to use `set` here, since we are only query if a node
|
| 73 |
+
# has been visited. We are NEVER going to iterate on nodes inside
|
| 74 |
+
# the set.
|
| 75 |
+
visited: Set[Node] = set()
|
| 76 |
+
|
| 77 |
+
def dfs_iter_find_cycle(root_node):
|
| 78 |
+
stack : Deque[Node] = deque()
|
| 79 |
+
stack.append(root_node)
|
| 80 |
+
|
| 81 |
+
while stack:
|
| 82 |
+
node = stack.pop()
|
| 83 |
+
|
| 84 |
+
if node in visited:
|
| 85 |
+
continue
|
| 86 |
+
if node in merged_nodes:
|
| 87 |
+
return True # found cycle, return
|
| 88 |
+
|
| 89 |
+
# branching on hitting partition or not
|
| 90 |
+
if node in assignment:
|
| 91 |
+
# Since partition is not merged in the graph yet, when we
|
| 92 |
+
# hit a node in a partition through DFS, we need to
|
| 93 |
+
# traverse all nodes in the partition to properly reflect
|
| 94 |
+
# dependencies after the fusion
|
| 95 |
+
for p_node in partitions_by_id[assignment[node]].nodes:
|
| 96 |
+
for user_node in p_node.users:
|
| 97 |
+
if user_node not in partitions_by_id[assignment[node]].nodes:
|
| 98 |
+
stack.append(user_node)
|
| 99 |
+
else:
|
| 100 |
+
for user_node in node.users:
|
| 101 |
+
stack.append(user_node)
|
| 102 |
+
|
| 103 |
+
visited.add(node)
|
| 104 |
+
|
| 105 |
+
return False
|
| 106 |
+
|
| 107 |
+
# check if merge would create cyclic dependency.
|
| 108 |
+
for node in merged_nodes:
|
| 109 |
+
for user_node in node.users:
|
| 110 |
+
if user_node not in merged_nodes and dfs_iter_find_cycle(user_node):
|
| 111 |
+
# return false indicating cyclic dependency found and
|
| 112 |
+
# merge is aborted
|
| 113 |
+
return False
|
| 114 |
+
|
| 115 |
+
# no cyclic dependency found, move forward with the merge
|
| 116 |
+
# updating partition nodes
|
| 117 |
+
partitions_by_id[self_id].nodes = merged_nodes
|
| 118 |
+
# updating assignment map
|
| 119 |
+
for node in partitions_by_id[other_id].nodes:
|
| 120 |
+
assignment[node] = self_id
|
| 121 |
+
# delete other partition
|
| 122 |
+
del partitions_by_id[other_id]
|
| 123 |
+
|
| 124 |
+
return True
|
| 125 |
+
|
| 126 |
+
def merge_single_node(node: Node, id: Optional[int]):
|
| 127 |
+
if node in assignment:
|
| 128 |
+
partitions_by_id[assignment[node]].remove_node(node)
|
| 129 |
+
|
| 130 |
+
if id is None:
|
| 131 |
+
assignment.pop(node)
|
| 132 |
+
elif id not in partitions_by_id:
|
| 133 |
+
assignment[node] = id
|
| 134 |
+
partitions_by_id[id] = Partition(id=id, nodes=[node])
|
| 135 |
+
else:
|
| 136 |
+
assignment[node] = id
|
| 137 |
+
partitions_by_id[id].add_node(node)
|
| 138 |
+
|
| 139 |
+
logger.debug("Proposing partitions...")
|
| 140 |
+
|
| 141 |
+
for node in reversed(self.graph_module.graph.nodes):
|
| 142 |
+
# use Dict as an ordered set to ensure deterministic partitioning result, don't care value
|
| 143 |
+
merge_candidates: Dict[int, None] = {}
|
| 144 |
+
|
| 145 |
+
# Note a limited horizontal fusion is enabled:
|
| 146 |
+
# when `node` is not supported, the code below attempts to fuse consumer of `node`.
|
| 147 |
+
#
|
| 148 |
+
# I don't see a need to add a knob to disable horizontal fusion yet, we can short-cut
|
| 149 |
+
# the fusion by adding an `else` block here to skip horizontal fusion.
|
| 150 |
+
if self.__is_node_supported(node) and node not in assignment:
|
| 151 |
+
partition_id = next(new_partition_id)
|
| 152 |
+
merge_single_node(node, partition_id)
|
| 153 |
+
merge_candidates[partition_id] = None
|
| 154 |
+
|
| 155 |
+
# merge all possible partitions
|
| 156 |
+
for node in assignment:
|
| 157 |
+
merge_candidates[assignment[node]] = None
|
| 158 |
+
|
| 159 |
+
merge_candidates_list = list(merge_candidates.keys())
|
| 160 |
+
if len(merge_candidates_list) > 1:
|
| 161 |
+
self_id = merge_candidates_list[0]
|
| 162 |
+
for other_id in merge_candidates_list[1:]:
|
| 163 |
+
# note: merge partition `other_id` into partition `self_id` if
|
| 164 |
+
# it doesn't create cyclic dependency in the graph, otherwise,
|
| 165 |
+
# this is a no-op
|
| 166 |
+
maybe_merge_partition(self_id, other_id)
|
| 167 |
+
|
| 168 |
+
# post processing to re-assign "getitem" nodes into upstream partition
|
| 169 |
+
logger.debug("Reassigning getitem nodes to its producer node's partition...")
|
| 170 |
+
nodes_reassignment: Dict[Node, int] = {}
|
| 171 |
+
for node in self.graph_module.graph.nodes:
|
| 172 |
+
is_tuple_output = True
|
| 173 |
+
for user in node.users:
|
| 174 |
+
if user.op != "call_function" or \
|
| 175 |
+
_get_qualified_name(user.target) != "_operator.getitem": # type: ignore[arg-type]
|
| 176 |
+
is_tuple_output = False
|
| 177 |
+
break
|
| 178 |
+
|
| 179 |
+
# node has tuple outputs, re-assign all following getitem node into node's partition
|
| 180 |
+
if is_tuple_output:
|
| 181 |
+
id = assignment.get(node, None) # type: ignore[arg-type]
|
| 182 |
+
for user in node.users:
|
| 183 |
+
if assignment.get(user, None) != id: # type: ignore[arg-type]
|
| 184 |
+
nodes_reassignment[user] = id # type: ignore[assignment]
|
| 185 |
+
for node, id in nodes_reassignment.items():
|
| 186 |
+
merge_single_node(node, id)
|
| 187 |
+
|
| 188 |
+
# filter out single node partitions
|
| 189 |
+
if not self.allows_single_node_partition:
|
| 190 |
+
logger.debug("Filtering out single node partitions...")
|
| 191 |
+
default_non_compute_ops = {"torch.ops.aten.view", "_operator.getitem"}
|
| 192 |
+
non_compute_ops = default_non_compute_ops.union(set(self.non_compute_ops))
|
| 193 |
+
partitions_to_remove: List[int] = []
|
| 194 |
+
for id, partition in partitions_by_id.items():
|
| 195 |
+
compute_node_count = 0
|
| 196 |
+
for node in partition.nodes:
|
| 197 |
+
if node.op == "call_function":
|
| 198 |
+
assert callable(node.target)
|
| 199 |
+
if _get_qualified_name(node.target) not in non_compute_ops:
|
| 200 |
+
compute_node_count += 1
|
| 201 |
+
if _get_qualified_name(node.target) in self.allowed_single_node_partition_ops:
|
| 202 |
+
compute_node_count += 1
|
| 203 |
+
if compute_node_count <= 1:
|
| 204 |
+
partitions_to_remove.append(id)
|
| 205 |
+
for id in partitions_to_remove:
|
| 206 |
+
del partitions_by_id[id]
|
| 207 |
+
|
| 208 |
+
logger.debug("Partitions proposed:")
|
| 209 |
+
for id, partition in partitions_by_id.items():
|
| 210 |
+
logger.debug("partition #%s: %s", id, [node.name for node in partition.nodes])
|
| 211 |
+
|
| 212 |
+
return list(partitions_by_id.values())
|
| 213 |
+
|
| 214 |
+
def fuse_partitions(self, partitions: List[Partition]) -> GraphModule:
|
| 215 |
+
logger.debug("Fusing partitions...")
|
| 216 |
+
# fuse_by_partitions expects partitions in List[List[Node]]: [ [node0, node1], [node2, node3] ]
|
| 217 |
+
return fuse_by_partitions(self.graph_module, [list(partition.nodes) for partition in partitions])
|
| 218 |
+
|
| 219 |
+
# remove non-compute-ops that sits at the boundary of a partition.
|
| 220 |
+
def remove_bookend_non_compute_ops(self, partitions: List[Partition]):
|
| 221 |
+
non_compute_ops = set(self.non_compute_ops)
|
| 222 |
+
|
| 223 |
+
def is_non_compute_node(node: Node):
|
| 224 |
+
return node.op == "call_function" and \
|
| 225 |
+
_get_qualified_name(node.target) in non_compute_ops # type: ignore[arg-type]
|
| 226 |
+
|
| 227 |
+
# cache transparent nodes
|
| 228 |
+
transparent_input_nodes: Dict[Node, bool] = {}
|
| 229 |
+
transparent_output_nodes: Dict[Node, bool] = {}
|
| 230 |
+
|
| 231 |
+
def is_transparent_input_node(node: Node, partition: Set[Node], removed_nodes: Set[Node]):
|
| 232 |
+
if node.op == "placeholder" or (node not in partition) or (node in removed_nodes):
|
| 233 |
+
return True
|
| 234 |
+
if node in transparent_input_nodes:
|
| 235 |
+
return transparent_input_nodes[node]
|
| 236 |
+
if is_non_compute_node(node):
|
| 237 |
+
for input_n in node.all_input_nodes:
|
| 238 |
+
if not is_transparent_input_node(input_n, partition, removed_nodes):
|
| 239 |
+
transparent_input_nodes[node] = False
|
| 240 |
+
return False
|
| 241 |
+
transparent_input_nodes[node] = True
|
| 242 |
+
return True
|
| 243 |
+
transparent_input_nodes[node] = False
|
| 244 |
+
return False
|
| 245 |
+
|
| 246 |
+
def is_transparent_output_node(node: Node, partition: Set[Node], removed_nodes: Set[Node]):
|
| 247 |
+
if node.op == "placeholder" or (node not in partition) or (node in removed_nodes):
|
| 248 |
+
return True
|
| 249 |
+
if node in transparent_output_nodes:
|
| 250 |
+
return transparent_output_nodes[node]
|
| 251 |
+
if is_non_compute_node(node):
|
| 252 |
+
for output_n in node.users:
|
| 253 |
+
if not is_transparent_output_node(output_n, partition, removed_nodes):
|
| 254 |
+
transparent_output_nodes[node] = False
|
| 255 |
+
return False
|
| 256 |
+
transparent_output_nodes[node] = True
|
| 257 |
+
return True
|
| 258 |
+
transparent_output_nodes[node] = False
|
| 259 |
+
return False
|
| 260 |
+
|
| 261 |
+
for partition in partitions:
|
| 262 |
+
# Note it's ok to use `set` here, since we are only query if a node
|
| 263 |
+
# has been removed. We are NEVER going to iterate on nodes inside
|
| 264 |
+
# the set.
|
| 265 |
+
remove_node: Set[Node] = set()
|
| 266 |
+
for node in partition.nodes:
|
| 267 |
+
if is_non_compute_node(node) and \
|
| 268 |
+
(is_transparent_input_node(node, partition.nodes, remove_node) or
|
| 269 |
+
is_transparent_output_node(node, partition.nodes, remove_node)):
|
| 270 |
+
remove_node.add(node)
|
| 271 |
+
|
| 272 |
+
if len(remove_node) != 0:
|
| 273 |
+
partition.nodes = partition.nodes - remove_node
|
| 274 |
+
|
| 275 |
+
def partition_and_fuse(self) -> GraphModule:
|
| 276 |
+
partitions = self.propose_partitions()
|
| 277 |
+
fused_gm = self.fuse_partitions(partitions)
|
| 278 |
+
return fused_gm
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/infra/pass_base.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import abc
|
| 2 |
+
from collections import namedtuple
|
| 3 |
+
from typing import Optional
|
| 4 |
+
|
| 5 |
+
from torch.fx.graph_module import GraphModule
|
| 6 |
+
from torch.fx._compatibility import compatibility
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
__all__ = ['PassResult', 'PassBase']
|
| 10 |
+
|
| 11 |
+
@compatibility(is_backward_compatible=False)
|
| 12 |
+
class PassResult(namedtuple("PassResult", ["graph_module", "modified"])):
|
| 13 |
+
"""
|
| 14 |
+
Result of a pass:
|
| 15 |
+
graph_module: The modified graph module
|
| 16 |
+
modified: A flag for if the pass has modified the graph module
|
| 17 |
+
"""
|
| 18 |
+
def __new__(cls, graph_module, modified):
|
| 19 |
+
return super().__new__(cls, graph_module, modified)
|
| 20 |
+
|
| 21 |
+
@compatibility(is_backward_compatible=False)
|
| 22 |
+
class PassBase(abc.ABC):
|
| 23 |
+
"""
|
| 24 |
+
Base interface for implementing passes.
|
| 25 |
+
|
| 26 |
+
It is required to implement the `call` function so that we can directly
|
| 27 |
+
pass instances of the Pass directly to the PassManager and call them as a
|
| 28 |
+
function.
|
| 29 |
+
|
| 30 |
+
We can directly pass an instance of a class implementing this interface into
|
| 31 |
+
the PassManager's `passes` attribute.
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
def __call__(self, graph_module: GraphModule) -> Optional[PassResult]:
|
| 35 |
+
"""
|
| 36 |
+
Runs the precondition check, the pass itself, and the postcondition check.
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
self.requires(graph_module)
|
| 40 |
+
res = self.call(graph_module)
|
| 41 |
+
self.ensures(graph_module)
|
| 42 |
+
return res
|
| 43 |
+
|
| 44 |
+
@abc.abstractmethod
|
| 45 |
+
def call(self, graph_module: GraphModule) -> Optional[PassResult]:
|
| 46 |
+
"""
|
| 47 |
+
The pass that is run through the given graph module. To implement a
|
| 48 |
+
pass, it is required to implement this function.
|
| 49 |
+
|
| 50 |
+
Args:
|
| 51 |
+
graph_module: The graph module we will run a pass on
|
| 52 |
+
"""
|
| 53 |
+
pass
|
| 54 |
+
|
| 55 |
+
def requires(self, graph_module: GraphModule) -> None: # noqa: B027
|
| 56 |
+
"""
|
| 57 |
+
This function will be called before the pass is run and will check that
|
| 58 |
+
the given graph module contains the preconditions needed to run the
|
| 59 |
+
pass. It is not required to implement this function.
|
| 60 |
+
|
| 61 |
+
Args:
|
| 62 |
+
graph_module: The graph module we will run checks on
|
| 63 |
+
"""
|
| 64 |
+
pass
|
| 65 |
+
|
| 66 |
+
def ensures(self, graph_module: GraphModule) -> None: # noqa: B027
|
| 67 |
+
"""
|
| 68 |
+
This function will be called after the pass is run and will check that
|
| 69 |
+
the given graph module contains the postconditions needed to run the
|
| 70 |
+
pass. It is not required to implement this function.
|
| 71 |
+
|
| 72 |
+
Args:
|
| 73 |
+
graph_module: The graph module we will run checks on
|
| 74 |
+
"""
|
| 75 |
+
pass
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/infra/pass_manager.py
ADDED
|
@@ -0,0 +1,303 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import inspect
|
| 2 |
+
import logging
|
| 3 |
+
from queue import Queue
|
| 4 |
+
from functools import wraps
|
| 5 |
+
from typing import Callable, Dict, List
|
| 6 |
+
|
| 7 |
+
import torch.nn as nn
|
| 8 |
+
from torch.fx.graph_module import GraphModule
|
| 9 |
+
from torch.fx._compatibility import compatibility
|
| 10 |
+
from torch.fx.passes.infra.pass_base import PassResult
|
| 11 |
+
|
| 12 |
+
logger = logging.getLogger(__name__)
|
| 13 |
+
logger.setLevel(logging.WARNING)
|
| 14 |
+
|
| 15 |
+
__all__ = ['pass_result_wrapper', 'this_before_that_pass_constraint', 'PassManager']
|
| 16 |
+
|
| 17 |
+
@compatibility(is_backward_compatible=False)
|
| 18 |
+
def pass_result_wrapper(fn: Callable) -> Callable:
|
| 19 |
+
"""
|
| 20 |
+
Wrapper for passes which currently do not return a PassResult.
|
| 21 |
+
This wrapper makes them return a PassResult containing the modified object
|
| 22 |
+
and True for the "modified" flag.
|
| 23 |
+
|
| 24 |
+
Args:
|
| 25 |
+
fn (Callable[Module, Any])
|
| 26 |
+
|
| 27 |
+
Returns:
|
| 28 |
+
wrapped_fn (Callable[Module, PassResult])
|
| 29 |
+
"""
|
| 30 |
+
if fn is None:
|
| 31 |
+
return None
|
| 32 |
+
|
| 33 |
+
@wraps(fn)
|
| 34 |
+
def wrapped_fn(gm):
|
| 35 |
+
res = fn(gm)
|
| 36 |
+
if res is None:
|
| 37 |
+
return PassResult(gm, True)
|
| 38 |
+
if isinstance(res, PassResult):
|
| 39 |
+
return res
|
| 40 |
+
elif isinstance(res, nn.Module):
|
| 41 |
+
return PassResult(res, True)
|
| 42 |
+
|
| 43 |
+
if not inspect.isfunction(fn):
|
| 44 |
+
wrapped_fn.__name__ = type(fn).__name__
|
| 45 |
+
|
| 46 |
+
return wrapped_fn
|
| 47 |
+
|
| 48 |
+
def _validate_pass_schedule_constraint(
|
| 49 |
+
constraint: Callable[[Callable, Callable], bool], passes: List[Callable]
|
| 50 |
+
) -> None:
|
| 51 |
+
for i, a in enumerate(passes):
|
| 52 |
+
for j, b in enumerate(passes[i + 1 :]):
|
| 53 |
+
if constraint(a, b):
|
| 54 |
+
continue
|
| 55 |
+
raise RuntimeError(
|
| 56 |
+
f"pass schedule constraint violated. Expected {a} before {b}"
|
| 57 |
+
f" but found {a} at index {i} and {b} at index{j} in pass"
|
| 58 |
+
f" list."
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
def _topological_sort_passes(
|
| 62 |
+
passes: List[Callable], constraints: List[Callable]
|
| 63 |
+
) -> List[Callable]:
|
| 64 |
+
"""
|
| 65 |
+
Args
|
| 66 |
+
passes: Passes that we are ordering
|
| 67 |
+
constraints: Constraints applied on these passes
|
| 68 |
+
|
| 69 |
+
Returns
|
| 70 |
+
A sorted list of callables and a boolean of if a circular dependency
|
| 71 |
+
existed
|
| 72 |
+
"""
|
| 73 |
+
if len(constraints) == 0:
|
| 74 |
+
return passes
|
| 75 |
+
|
| 76 |
+
# Contruct a graph mapping nodes to a list of their users
|
| 77 |
+
graph: Dict[Callable, List[Callable]] = {p : [] for p in passes}
|
| 78 |
+
indegree_map: Dict[Callable, int] = {p : 0 for p in passes}
|
| 79 |
+
candidates: Queue = Queue()
|
| 80 |
+
for a in passes:
|
| 81 |
+
for b in passes:
|
| 82 |
+
if a == b:
|
| 83 |
+
continue
|
| 84 |
+
|
| 85 |
+
for constraint in constraints:
|
| 86 |
+
if not constraint(a, b):
|
| 87 |
+
graph[b].append(a)
|
| 88 |
+
indegree_map[a] += 1
|
| 89 |
+
|
| 90 |
+
if indegree_map[a] == 0:
|
| 91 |
+
candidates.put(a)
|
| 92 |
+
|
| 93 |
+
visited: Dict[Callable, bool] = {p : False for p in passes}
|
| 94 |
+
sorted_passes: List[Callable] = []
|
| 95 |
+
|
| 96 |
+
while not candidates.empty():
|
| 97 |
+
p = candidates.get()
|
| 98 |
+
sorted_passes.append(p)
|
| 99 |
+
visited[p] = True
|
| 100 |
+
|
| 101 |
+
for n in graph[p]:
|
| 102 |
+
if not visited[n]:
|
| 103 |
+
indegree_map[n] -= 1
|
| 104 |
+
if indegree_map[n] == 0:
|
| 105 |
+
candidates.put(n)
|
| 106 |
+
|
| 107 |
+
# Check if there are unvisited nodes (aka cycles in the graph)
|
| 108 |
+
cycle_passes = list(filter(lambda p: indegree_map[p] != 0, indegree_map.keys()))
|
| 109 |
+
if len(cycle_passes) != 0:
|
| 110 |
+
error = f"Circular dependency detected within the following passes: {cycle_passes}"
|
| 111 |
+
raise RuntimeError(error)
|
| 112 |
+
|
| 113 |
+
return sorted_passes
|
| 114 |
+
|
| 115 |
+
@compatibility(is_backward_compatible=False)
|
| 116 |
+
def this_before_that_pass_constraint(this: Callable, that: Callable) -> Callable:
|
| 117 |
+
"""
|
| 118 |
+
Defines a partial order ('depends on' function) where `this` must occur
|
| 119 |
+
before `that`.
|
| 120 |
+
|
| 121 |
+
For example, the following pass list and constraint list would be invalid.
|
| 122 |
+
```
|
| 123 |
+
passes = [pass_b, pass_a]
|
| 124 |
+
|
| 125 |
+
constraints = [
|
| 126 |
+
this_before_that_pass_constraint(pass_a, pass_b)
|
| 127 |
+
]
|
| 128 |
+
```
|
| 129 |
+
|
| 130 |
+
Args:
|
| 131 |
+
this (Callable): pass which should occur first
|
| 132 |
+
that (Callable): pass which should occur later
|
| 133 |
+
|
| 134 |
+
Returns:
|
| 135 |
+
depends_on (Callable[[Object, Object], bool]
|
| 136 |
+
"""
|
| 137 |
+
|
| 138 |
+
def depends_on(a: Callable, b: Callable):
|
| 139 |
+
if a == that and b == this:
|
| 140 |
+
return False
|
| 141 |
+
return True
|
| 142 |
+
|
| 143 |
+
return depends_on
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
@compatibility(is_backward_compatible=False)
|
| 147 |
+
class PassManager:
|
| 148 |
+
"""
|
| 149 |
+
Construct a PassManager.
|
| 150 |
+
|
| 151 |
+
Collects passes and constraints. This defines the pass schedule, manages
|
| 152 |
+
pass constraints and pass execution.
|
| 153 |
+
|
| 154 |
+
Args:
|
| 155 |
+
passes (Optional[List[Callable]]): List of passes. A pass is a
|
| 156 |
+
callable which modifies an object and returns a PassResult
|
| 157 |
+
constraint (Optional[List[Callable]]): List of constraints. A
|
| 158 |
+
constraint is a callable which takes two passes (A, B) and returns
|
| 159 |
+
True if A depends on B and False otherwise. See implementation of
|
| 160 |
+
`this_before_that_pass_constraint` for example.
|
| 161 |
+
steps (int): Max number of times we run the passes (default = 1).
|
| 162 |
+
run_checks_after_each_pass (bool): Whether to run checks and linting
|
| 163 |
+
after each pass
|
| 164 |
+
suppress_check_failures (bool): Whether to raise errors when running
|
| 165 |
+
checks
|
| 166 |
+
"""
|
| 167 |
+
|
| 168 |
+
passes: List[Callable[[nn.Module], PassResult]]
|
| 169 |
+
constraints: List[Callable[[Callable, Callable], bool]]
|
| 170 |
+
_validated: bool = False
|
| 171 |
+
steps: int = 1
|
| 172 |
+
|
| 173 |
+
def __init__(
|
| 174 |
+
self,
|
| 175 |
+
passes=None,
|
| 176 |
+
constraints=None,
|
| 177 |
+
steps=None,
|
| 178 |
+
run_checks_after_each_pass: bool = False,
|
| 179 |
+
suppress_check_failures: bool = False,
|
| 180 |
+
):
|
| 181 |
+
self.passes = passes or []
|
| 182 |
+
self.constraints = constraints or []
|
| 183 |
+
if steps:
|
| 184 |
+
self.steps = steps
|
| 185 |
+
|
| 186 |
+
self.run_checks_after_each_pass = run_checks_after_each_pass
|
| 187 |
+
self.suppress_check_failures = suppress_check_failures
|
| 188 |
+
|
| 189 |
+
def add_pass(self, _pass: Callable):
|
| 190 |
+
"""
|
| 191 |
+
Adds a pass into the current list of passes.
|
| 192 |
+
"""
|
| 193 |
+
self.passes.append(_pass)
|
| 194 |
+
self._validated = False
|
| 195 |
+
|
| 196 |
+
def add_constraint(self, constraint: Callable):
|
| 197 |
+
"""
|
| 198 |
+
Adds a constraint into the current list of constraints.
|
| 199 |
+
"""
|
| 200 |
+
self.constraints.append(constraint)
|
| 201 |
+
self._validated = False
|
| 202 |
+
|
| 203 |
+
def validate_constraints(self):
|
| 204 |
+
"""
|
| 205 |
+
Validates that current pass schedule defined by `self.passes` is valid
|
| 206 |
+
according to all constraints in `self.constraints`
|
| 207 |
+
"""
|
| 208 |
+
if self._validated:
|
| 209 |
+
return
|
| 210 |
+
for constraint in self.constraints:
|
| 211 |
+
_validate_pass_schedule_constraint(constraint, self.passes)
|
| 212 |
+
self._validated = True
|
| 213 |
+
|
| 214 |
+
def solve_constraints(self):
|
| 215 |
+
"""
|
| 216 |
+
Finds a valid traversal order based on the given constraints and orders
|
| 217 |
+
the passes based on this order.
|
| 218 |
+
|
| 219 |
+
If a circular dependency exists between the constraints and steps = 1,
|
| 220 |
+
then we will raise an error because if steps != 1 this means that we
|
| 221 |
+
will re-run the passes, allowing for circular dependencies.
|
| 222 |
+
"""
|
| 223 |
+
self.passes = _topological_sort_passes(self.passes, self.constraints)
|
| 224 |
+
self._validated = True
|
| 225 |
+
|
| 226 |
+
def add_checks(self, check: Callable) -> None:
|
| 227 |
+
"""
|
| 228 |
+
Adds a function which takes runs various checks on a given graph module.
|
| 229 |
+
This function is run before and after each pass if the
|
| 230 |
+
`run_checks_after_each_pass` flag is enabled.
|
| 231 |
+
"""
|
| 232 |
+
sig = inspect.signature(check)
|
| 233 |
+
|
| 234 |
+
if len(list(sig.parameters.values())) != 1:
|
| 235 |
+
raise TypeError("PassManager check function should only take in one variable, a module")
|
| 236 |
+
|
| 237 |
+
setattr(self, "check", check) # noqa: B010
|
| 238 |
+
|
| 239 |
+
def check(self, module: nn.Module) -> None:
|
| 240 |
+
pass
|
| 241 |
+
|
| 242 |
+
def __call__(self, module: nn.Module) -> PassResult:
|
| 243 |
+
"""
|
| 244 |
+
Runs a list of passes in the order based on `self.passes` on the given
|
| 245 |
+
graph module. Each time a pass is run, checks and linting will be run on
|
| 246 |
+
the graph module if `run_checks_after_each_pass` is set.
|
| 247 |
+
|
| 248 |
+
If the module is a graph module, we will run the list of passes until
|
| 249 |
+
the graph stops changing, or until `steps` number of times.
|
| 250 |
+
"""
|
| 251 |
+
# Order the passes based on the constraints
|
| 252 |
+
if not self._validated:
|
| 253 |
+
self.solve_constraints()
|
| 254 |
+
|
| 255 |
+
# Check graph invariants
|
| 256 |
+
self.check(module)
|
| 257 |
+
|
| 258 |
+
# Run the set of passes `steps` number of times or until the graph stops
|
| 259 |
+
# changing
|
| 260 |
+
overall_modified = False
|
| 261 |
+
for _ in range(self.steps):
|
| 262 |
+
modified = False
|
| 263 |
+
|
| 264 |
+
# Run the set of passes on the graph module
|
| 265 |
+
for i, fn in enumerate(self.passes):
|
| 266 |
+
fn_name = fn.__name__ if inspect.isfunction(fn) else type(fn).__name__
|
| 267 |
+
logger.debug("Running pass '%s'", fn_name)
|
| 268 |
+
|
| 269 |
+
try:
|
| 270 |
+
res = fn(module)
|
| 271 |
+
|
| 272 |
+
if not isinstance(res, PassResult) and not hasattr(
|
| 273 |
+
res, "graph_module"
|
| 274 |
+
):
|
| 275 |
+
raise TypeError(
|
| 276 |
+
f"The result of the pass {fn_name} should be type PassResult."
|
| 277 |
+
+ "Please wrap it with pass_result_wrapper()"
|
| 278 |
+
)
|
| 279 |
+
module = res.graph_module
|
| 280 |
+
modified = modified or res.modified
|
| 281 |
+
|
| 282 |
+
if isinstance(module, GraphModule):
|
| 283 |
+
logger.debug("Graph after pass '%s': %s", fn_name, module.graph)
|
| 284 |
+
module.recompile()
|
| 285 |
+
|
| 286 |
+
# Check graph invariants
|
| 287 |
+
if self.run_checks_after_each_pass:
|
| 288 |
+
self.check(module)
|
| 289 |
+
|
| 290 |
+
except Exception as e:
|
| 291 |
+
prev_pass_names = [
|
| 292 |
+
p.__name__ if inspect.isfunction(p) else type(p).__name__
|
| 293 |
+
for p in self.passes[:i]
|
| 294 |
+
]
|
| 295 |
+
msg = f"An error occurred when running the '{fn_name}' pass after the following passes: {prev_pass_names}"
|
| 296 |
+
raise Exception(msg) from e
|
| 297 |
+
|
| 298 |
+
# If the graph no longer changes, then we can stop running these passes
|
| 299 |
+
overall_modified = overall_modified or modified
|
| 300 |
+
if not modified:
|
| 301 |
+
break
|
| 302 |
+
|
| 303 |
+
return PassResult(module, overall_modified)
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/operator_support.py
ADDED
|
@@ -0,0 +1,220 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import abc
|
| 2 |
+
import typing as t
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
import torch.fx
|
| 6 |
+
from torch.fx._compatibility import compatibility
|
| 7 |
+
from .shape_prop import TensorMetadata
|
| 8 |
+
from .tools_common import get_node_target, CALLABLE_NODE_OPS
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
__all__ = ['OperatorSupportBase', 'OperatorSupport', 'create_op_support', 'chain', 'OpSupports', 'any_chain']
|
| 12 |
+
|
| 13 |
+
# fx.Node.target typename, as returned by `get_node_target()`
|
| 14 |
+
TargetTypeName = str
|
| 15 |
+
|
| 16 |
+
# Arguments' dtypes for a given node, see `OperatorSupport`
|
| 17 |
+
SupportedArgumentDTypes = t.Optional[
|
| 18 |
+
t.Tuple[
|
| 19 |
+
t.Sequence[t.Sequence[torch.dtype]],
|
| 20 |
+
t.Dict[str, t.Sequence[torch.dtype]],
|
| 21 |
+
]
|
| 22 |
+
]
|
| 23 |
+
|
| 24 |
+
SupportDict = t.Mapping[TargetTypeName, SupportedArgumentDTypes]
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
@compatibility(is_backward_compatible=False)
|
| 28 |
+
class OperatorSupportBase(abc.ABC):
|
| 29 |
+
"""Interface for determining if a fx.Node is supported by a backend"""
|
| 30 |
+
@abc.abstractmethod
|
| 31 |
+
def is_node_supported(
|
| 32 |
+
self, submodules: t.Mapping[str, torch.nn.Module], node: torch.fx.Node
|
| 33 |
+
) -> bool:
|
| 34 |
+
raise NotImplementedError()
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
@compatibility(is_backward_compatible=False)
|
| 38 |
+
class OperatorSupport(OperatorSupportBase):
|
| 39 |
+
"""
|
| 40 |
+
`_support_dict` maps node.target typename to supported inputs dtypes.
|
| 41 |
+
|
| 42 |
+
node.target typename is retrieved using helper function `get_node_target()`
|
| 43 |
+
|
| 44 |
+
If supported inputs dtypes is None, it means any dtype is supported, else
|
| 45 |
+
we should see a tuple like (([dtypes], ...), {"name":[dtypes], ...}).
|
| 46 |
+
|
| 47 |
+
The first tuple ([dtypes], ...) indicates what dtypes are supported for
|
| 48 |
+
inputs in node.args and the second dict {"name": [dtypes], ...} indicates
|
| 49 |
+
what dtypes are supported for inputs in node.kwargs.
|
| 50 |
+
|
| 51 |
+
For inputs in args, if we don't want to check it, we can put None there,
|
| 52 |
+
e.g. (None, [torch.float]) indicates that we don't care about the type of
|
| 53 |
+
the first input in args. And for inputs in kwargs, if not listed, will not
|
| 54 |
+
be checked.
|
| 55 |
+
"""
|
| 56 |
+
|
| 57 |
+
_support_dict: SupportDict
|
| 58 |
+
|
| 59 |
+
def __init__(
|
| 60 |
+
self,
|
| 61 |
+
support_dict: t.Optional[SupportDict] = None
|
| 62 |
+
):
|
| 63 |
+
self._support_dict = support_dict or {}
|
| 64 |
+
|
| 65 |
+
def is_node_supported(
|
| 66 |
+
self, submodules: t.Mapping[str, torch.nn.Module], node: torch.fx.Node
|
| 67 |
+
) -> bool:
|
| 68 |
+
"""
|
| 69 |
+
Args:
|
| 70 |
+
`submodules`: mapping from module name to the module. This can be
|
| 71 |
+
retrieved by calling model.named_modules().
|
| 72 |
+
|
| 73 |
+
`node`: a Fx node that we want to determine whether it's supported.
|
| 74 |
+
|
| 75 |
+
Returns:
|
| 76 |
+
`is_supported`: whether the arg `node` is supported.
|
| 77 |
+
"""
|
| 78 |
+
if node.op not in CALLABLE_NODE_OPS:
|
| 79 |
+
return True
|
| 80 |
+
|
| 81 |
+
target = get_node_target(submodules, node)
|
| 82 |
+
|
| 83 |
+
# Target not found in _support_dict meaning that we don't support this op at all
|
| 84 |
+
if target not in self._support_dict:
|
| 85 |
+
return False
|
| 86 |
+
|
| 87 |
+
# The rule for target is None meaning that we accept any dtype
|
| 88 |
+
if self._support_dict[target] is None:
|
| 89 |
+
return True
|
| 90 |
+
|
| 91 |
+
args_dtypes, kwargs_dtypes = self._support_dict[target] # type: ignore[misc]
|
| 92 |
+
|
| 93 |
+
# Check args dtypes
|
| 94 |
+
for i, dtypes in enumerate(args_dtypes):
|
| 95 |
+
if len(node.args) <= i:
|
| 96 |
+
break
|
| 97 |
+
|
| 98 |
+
# None indicates we don't care about the dtype of args[i]
|
| 99 |
+
if dtypes is None:
|
| 100 |
+
continue
|
| 101 |
+
|
| 102 |
+
# If arg is not a node then we don't check it
|
| 103 |
+
if not isinstance(node.args[i], torch.fx.Node):
|
| 104 |
+
continue
|
| 105 |
+
|
| 106 |
+
arg_dtype = _get_arg_dtype(node.args[i]) # type: ignore[arg-type]
|
| 107 |
+
if arg_dtype not in dtypes:
|
| 108 |
+
return False
|
| 109 |
+
|
| 110 |
+
# Check kwargs dtypes
|
| 111 |
+
for k, dtypes in kwargs_dtypes.items():
|
| 112 |
+
if k not in node.kwargs:
|
| 113 |
+
continue
|
| 114 |
+
|
| 115 |
+
# If arg is not a node then we don't check it
|
| 116 |
+
if not isinstance(node.kwargs[k], torch.fx.Node):
|
| 117 |
+
continue
|
| 118 |
+
|
| 119 |
+
kwarg_dtype = _get_arg_dtype(node.kwargs[k]) # type: ignore[arg-type]
|
| 120 |
+
if kwarg_dtype not in dtypes:
|
| 121 |
+
return False
|
| 122 |
+
|
| 123 |
+
return True
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
# ======================================================================
|
| 127 |
+
# Functional interfaces and utils for defining basic operator support logic
|
| 128 |
+
# and composing them into more complex ones
|
| 129 |
+
# ======================================================================
|
| 130 |
+
|
| 131 |
+
IsNodeSupported = t.Callable[[t.Mapping[str, torch.nn.Module], torch.fx.Node], bool]
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
@compatibility(is_backward_compatible=False)
|
| 135 |
+
def create_op_support(is_node_supported: IsNodeSupported) -> OperatorSupportBase:
|
| 136 |
+
"""Wraps a `IsNodeSupported` function into an `OperatorSupportBase` instance
|
| 137 |
+
|
| 138 |
+
`IsNodeSupported` has the same call signature as
|
| 139 |
+
`OperatorSupportBase.is_node_supported`
|
| 140 |
+
"""
|
| 141 |
+
class FunctionalOperatorSupport(OperatorSupportBase):
|
| 142 |
+
def is_node_supported(
|
| 143 |
+
self, submodules: t.Mapping[str, torch.nn.Module], node: torch.fx.Node
|
| 144 |
+
) -> bool:
|
| 145 |
+
return is_node_supported(submodules, node)
|
| 146 |
+
return FunctionalOperatorSupport()
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
@compatibility(is_backward_compatible=False)
|
| 150 |
+
def chain(*op_support: OperatorSupportBase) -> OperatorSupportBase:
|
| 151 |
+
"""Combines a sequence of `OperatorSupportBase` instances to form a single `OperatorSupportBase`
|
| 152 |
+
instance by evaluating each input `OperatorSupportBase` instance, and returns False if
|
| 153 |
+
any of it reports False.
|
| 154 |
+
"""
|
| 155 |
+
def _chain(submods, node) -> bool:
|
| 156 |
+
return all(
|
| 157 |
+
x.is_node_supported(submods, node)
|
| 158 |
+
for x in op_support
|
| 159 |
+
)
|
| 160 |
+
return create_op_support(_chain)
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
@compatibility(is_backward_compatible=False)
|
| 164 |
+
def any_chain(*op_support: OperatorSupportBase) -> OperatorSupportBase:
|
| 165 |
+
"""Combines a sequence of `OperatorSupportBase` instances to form a single `OperatorSupportBase`
|
| 166 |
+
instance by evaluating each input `OperatorSupportBase` instance, and returns True if
|
| 167 |
+
any of it reports True.
|
| 168 |
+
"""
|
| 169 |
+
def _any_chain(submods, node) -> bool:
|
| 170 |
+
return any(
|
| 171 |
+
x.is_node_supported(submods, node)
|
| 172 |
+
for x in op_support
|
| 173 |
+
)
|
| 174 |
+
return create_op_support(_any_chain)
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
@compatibility(is_backward_compatible=False)
|
| 178 |
+
class OpSupports:
|
| 179 |
+
"""A set of atomic `OperatorSupportBase` instances that can be combined together
|
| 180 |
+
to form more complex operator support logic.
|
| 181 |
+
"""
|
| 182 |
+
@classmethod
|
| 183 |
+
def decline_if_input_dtype(cls, dtype: torch.dtype) -> OperatorSupportBase:
|
| 184 |
+
"""Report a node as non-supported, if any of its arguments is of dtype"""
|
| 185 |
+
|
| 186 |
+
def _decline_if_input_dtype(
|
| 187 |
+
submodules: t.Mapping[str, torch.nn.Module],
|
| 188 |
+
node: torch.fx.Node,
|
| 189 |
+
) -> bool:
|
| 190 |
+
for arg in node.all_input_nodes:
|
| 191 |
+
# escape dtype check for get_attr node
|
| 192 |
+
if arg.op == "get_attr":
|
| 193 |
+
continue
|
| 194 |
+
arg_dtype = _get_arg_dtype(arg)
|
| 195 |
+
if arg_dtype == dtype:
|
| 196 |
+
return False
|
| 197 |
+
return True
|
| 198 |
+
return create_op_support(_decline_if_input_dtype)
|
| 199 |
+
|
| 200 |
+
@classmethod
|
| 201 |
+
def decline_if_node_in_names(cls, disallow_set: t.Set[str]) -> OperatorSupportBase:
|
| 202 |
+
"""
|
| 203 |
+
If a node has a name that is in the disallow set, reported it as non-supported.
|
| 204 |
+
"""
|
| 205 |
+
def _decline_if_node_in_names(
|
| 206 |
+
submodules: t.Mapping[str, torch.nn.Module],
|
| 207 |
+
node: torch.fx.Node,
|
| 208 |
+
) -> bool:
|
| 209 |
+
if node.name in disallow_set:
|
| 210 |
+
return False
|
| 211 |
+
else:
|
| 212 |
+
return True
|
| 213 |
+
return create_op_support(_decline_if_node_in_names)
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
def _get_arg_dtype(arg: torch.fx.Node) -> t.Any:
|
| 217 |
+
assert isinstance(arg, torch.fx.Node)
|
| 218 |
+
tensor_meta = arg.meta.get("tensor_meta") # type: ignore[union-attr]
|
| 219 |
+
dtype = tensor_meta.dtype if isinstance(tensor_meta, TensorMetadata) else arg.meta["type"]
|
| 220 |
+
return dtype
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/param_fetch.py
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from torch.fx.graph_module import GraphModule
|
| 2 |
+
from typing import Any, Callable, Dict, List, Tuple, Type
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
|
| 6 |
+
from torch.fx._compatibility import compatibility
|
| 7 |
+
|
| 8 |
+
__all__ = ['default_matching', 'extract_attrs_for_lowering', 'lift_lowering_attrs_to_nodes']
|
| 9 |
+
|
| 10 |
+
# Matching method matches the attribute name of current version to the attribute name of `target_version`
|
| 11 |
+
@compatibility(is_backward_compatible=False)
|
| 12 |
+
def default_matching(name: str, target_version: int) -> str:
|
| 13 |
+
"""Default matching method
|
| 14 |
+
"""
|
| 15 |
+
return name
|
| 16 |
+
|
| 17 |
+
# This dict maps the nn.Module class name to the attribute name list that we want to fetch for lowering.
|
| 18 |
+
# The first integer in the tuple is the version number of the nn.Module class when we create the parameter list.
|
| 19 |
+
# If there's a version mismatch then it means the parameter names in the book might be mismatched with nn.Module.
|
| 20 |
+
module_fetch_book: Dict[Type, Tuple[int, List[str], Callable[[str, int], str]]] = {
|
| 21 |
+
torch.nn.modules.linear.Linear: (1, ["weight", "bias"], default_matching),
|
| 22 |
+
torch.nn.modules.conv.Conv2d: (
|
| 23 |
+
1, ["weight", "bias", "kernel_size", "stride", "padding", "dilation", "groups", "padding_mode"], default_matching
|
| 24 |
+
),
|
| 25 |
+
torch.nn.modules.batchnorm.BatchNorm2d: (2, ["weight", "bias", "running_mean", "running_var", "eps"], default_matching),
|
| 26 |
+
torch.nn.modules.pooling.AdaptiveAvgPool2d: (1, [], default_matching),
|
| 27 |
+
torch.nn.modules.pooling.MaxPool2d: (
|
| 28 |
+
1, ["kernel_size", "stride", "padding", "dilation", "return_indices", "ceil_mode"], default_matching
|
| 29 |
+
),
|
| 30 |
+
torch.nn.modules.activation.ReLU: (1, ["inplace"], default_matching),
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
@compatibility(is_backward_compatible=False)
|
| 34 |
+
def extract_attrs_for_lowering(mod: nn.Module) -> Dict[str, Any]:
|
| 35 |
+
"""If `mod` is in `module_fetch_book`, fetch the mod's attributes that in the `module_fetch_book`
|
| 36 |
+
after checking module's version is compatible with the `module_fetch_book`.
|
| 37 |
+
"""
|
| 38 |
+
attrs_for_lowering: Dict[str, Any] = {}
|
| 39 |
+
attrs_for_lowering["name"] = torch.typename(mod)
|
| 40 |
+
|
| 41 |
+
if type(mod) in module_fetch_book:
|
| 42 |
+
version, param_to_fetch, matching_method = module_fetch_book[type(mod)]
|
| 43 |
+
if version < mod._version:
|
| 44 |
+
raise RuntimeError(f"Fetcher version {version} try to fetch {torch.typename(mod)} version {mod._version}, "
|
| 45 |
+
"please upgrade the module_fetch_book, open an issue and @842974287 "
|
| 46 |
+
"or report a bug to AIACC team directly.")
|
| 47 |
+
for attr in param_to_fetch:
|
| 48 |
+
attrs_for_lowering[attr] = getattr(mod, matching_method(attr, mod._version))
|
| 49 |
+
else:
|
| 50 |
+
raise RuntimeError(f"{torch.typename(mod)} is not in the module_fetch_book yet, "
|
| 51 |
+
"please add it to the module_fetch_book, open an issue and @842974287 "
|
| 52 |
+
"or report a bug to AIACC team directly.")
|
| 53 |
+
return attrs_for_lowering
|
| 54 |
+
|
| 55 |
+
@compatibility(is_backward_compatible=False)
|
| 56 |
+
def lift_lowering_attrs_to_nodes(fx_module: GraphModule) -> None:
|
| 57 |
+
"""Recursively traverse all `fx_module` nodes and fetch the module's attributes if the node is a leaf module.
|
| 58 |
+
"""
|
| 59 |
+
submodules = dict(fx_module.named_modules())
|
| 60 |
+
|
| 61 |
+
for node in fx_module.graph.nodes:
|
| 62 |
+
if node.op == "call_module":
|
| 63 |
+
if isinstance(submodules[node.target], GraphModule):
|
| 64 |
+
lift_lowering_attrs_to_nodes(submodules[node.target])
|
| 65 |
+
else:
|
| 66 |
+
node.attrs_for_lowering = extract_attrs_for_lowering(submodules[node.target])
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/pass_manager.py
ADDED
|
@@ -0,0 +1,247 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from functools import wraps
|
| 2 |
+
from inspect import unwrap
|
| 3 |
+
from typing import Callable, List, Optional
|
| 4 |
+
import logging
|
| 5 |
+
|
| 6 |
+
logger = logging.getLogger(__name__)
|
| 7 |
+
|
| 8 |
+
__all__ = [
|
| 9 |
+
"PassManager",
|
| 10 |
+
"inplace_wrapper",
|
| 11 |
+
"log_hook",
|
| 12 |
+
"loop_pass",
|
| 13 |
+
"this_before_that_pass_constraint",
|
| 14 |
+
"these_before_those_pass_constraint",
|
| 15 |
+
]
|
| 16 |
+
|
| 17 |
+
# for callables which modify object inplace and return something other than
|
| 18 |
+
# the object on which they act
|
| 19 |
+
def inplace_wrapper(fn: Callable) -> Callable:
|
| 20 |
+
"""
|
| 21 |
+
Convenience wrapper for passes which modify an object inplace. This
|
| 22 |
+
wrapper makes them return the modified object instead.
|
| 23 |
+
|
| 24 |
+
Args:
|
| 25 |
+
fn (Callable[Object, Any])
|
| 26 |
+
|
| 27 |
+
Returns:
|
| 28 |
+
wrapped_fn (Callable[Object, Object])
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
@wraps(fn)
|
| 32 |
+
def wrapped_fn(gm):
|
| 33 |
+
val = fn(gm)
|
| 34 |
+
return gm
|
| 35 |
+
|
| 36 |
+
return wrapped_fn
|
| 37 |
+
|
| 38 |
+
def log_hook(fn: Callable, level=logging.INFO) -> Callable:
|
| 39 |
+
"""
|
| 40 |
+
Logs callable output.
|
| 41 |
+
|
| 42 |
+
This is useful for logging output of passes. Note inplace_wrapper replaces
|
| 43 |
+
the pass output with the modified object. If we want to log the original
|
| 44 |
+
output, apply this wrapper before inplace_wrapper.
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
```
|
| 48 |
+
def my_pass(d: Dict) -> bool:
|
| 49 |
+
changed = False
|
| 50 |
+
if 'foo' in d:
|
| 51 |
+
d['foo'] = 'bar'
|
| 52 |
+
changed = True
|
| 53 |
+
return changed
|
| 54 |
+
|
| 55 |
+
pm = PassManager(
|
| 56 |
+
passes=[
|
| 57 |
+
inplace_wrapper(log_hook(my_pass))
|
| 58 |
+
]
|
| 59 |
+
)
|
| 60 |
+
```
|
| 61 |
+
|
| 62 |
+
Args:
|
| 63 |
+
fn (Callable[Type1, Type2])
|
| 64 |
+
level: logging level (e.g. logging.INFO)
|
| 65 |
+
|
| 66 |
+
Returns:
|
| 67 |
+
wrapped_fn (Callable[Type1, Type2])
|
| 68 |
+
"""
|
| 69 |
+
@wraps(fn)
|
| 70 |
+
def wrapped_fn(gm):
|
| 71 |
+
val = fn(gm)
|
| 72 |
+
logger.log(level, "Ran pass %s\t Return value: %s", fn, val)
|
| 73 |
+
return val
|
| 74 |
+
|
| 75 |
+
return wrapped_fn
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def loop_pass(base_pass: Callable, n_iter: Optional[int] = None, predicate: Optional[Callable] = None):
|
| 80 |
+
"""
|
| 81 |
+
Convenience wrapper for passes which need to be applied multiple times.
|
| 82 |
+
|
| 83 |
+
Exactly one of `n_iter`or `predicate` must be specified.
|
| 84 |
+
|
| 85 |
+
Args:
|
| 86 |
+
base_pass (Callable[Object, Object]): pass to be applied in loop
|
| 87 |
+
n_iter (int, optional): number of times to loop pass
|
| 88 |
+
predicate (Callable[Object, bool], optional):
|
| 89 |
+
|
| 90 |
+
"""
|
| 91 |
+
assert (n_iter is not None) ^ (
|
| 92 |
+
predicate is not None
|
| 93 |
+
), "Exactly one of `n_iter`or `predicate` must be specified."
|
| 94 |
+
|
| 95 |
+
@wraps(base_pass)
|
| 96 |
+
def new_pass(source):
|
| 97 |
+
output = source
|
| 98 |
+
if n_iter is not None and n_iter > 0:
|
| 99 |
+
for _ in range(n_iter):
|
| 100 |
+
output = base_pass(output)
|
| 101 |
+
elif predicate is not None:
|
| 102 |
+
while predicate(output):
|
| 103 |
+
output = base_pass(output)
|
| 104 |
+
else:
|
| 105 |
+
raise RuntimeError(
|
| 106 |
+
f"loop_pass must be given positive int n_iter (given "
|
| 107 |
+
f"{n_iter}) xor predicate (given {predicate})"
|
| 108 |
+
)
|
| 109 |
+
return output
|
| 110 |
+
|
| 111 |
+
return new_pass
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
# Pass Schedule Constraints:
|
| 115 |
+
#
|
| 116 |
+
# Implemented as 'depends on' operators. A constraint is satisfied iff a list
|
| 117 |
+
# has a valid partial ordering according to this comparison operator.
|
| 118 |
+
def _validate_pass_schedule_constraint(
|
| 119 |
+
constraint: Callable[[Callable, Callable], bool], passes: List[Callable]
|
| 120 |
+
):
|
| 121 |
+
for i, a in enumerate(passes):
|
| 122 |
+
for j, b in enumerate(passes[i + 1 :]):
|
| 123 |
+
if constraint(a, b):
|
| 124 |
+
continue
|
| 125 |
+
raise RuntimeError(
|
| 126 |
+
f"pass schedule constraint violated. Expected {a} before {b}"
|
| 127 |
+
f" but found {a} at index {i} and {b} at index{j} in pass"
|
| 128 |
+
f" list."
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
def this_before_that_pass_constraint(this: Callable, that: Callable):
|
| 133 |
+
"""
|
| 134 |
+
Defines a partial order ('depends on' function) where `this` must occur
|
| 135 |
+
before `that`.
|
| 136 |
+
"""
|
| 137 |
+
|
| 138 |
+
def depends_on(a: Callable, b: Callable):
|
| 139 |
+
if a == that and b == this:
|
| 140 |
+
return False
|
| 141 |
+
return True
|
| 142 |
+
|
| 143 |
+
return depends_on
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def these_before_those_pass_constraint(these: Callable, those: Callable):
|
| 147 |
+
"""
|
| 148 |
+
Defines a partial order ('depends on' function) where `these` must occur
|
| 149 |
+
before `those`. Where the inputs are 'unwrapped' before comparison.
|
| 150 |
+
|
| 151 |
+
For example, the following pass list and constraint list would be invalid.
|
| 152 |
+
```
|
| 153 |
+
passes = [
|
| 154 |
+
loop_pass(pass_b, 3),
|
| 155 |
+
loop_pass(pass_a, 5),
|
| 156 |
+
]
|
| 157 |
+
|
| 158 |
+
constraints = [
|
| 159 |
+
these_before_those_pass_constraint(pass_a, pass_b)
|
| 160 |
+
]
|
| 161 |
+
```
|
| 162 |
+
|
| 163 |
+
Args:
|
| 164 |
+
these (Callable): pass which should occur first
|
| 165 |
+
those (Callable): pass which should occur later
|
| 166 |
+
|
| 167 |
+
Returns:
|
| 168 |
+
depends_on (Callable[[Object, Object], bool]
|
| 169 |
+
"""
|
| 170 |
+
|
| 171 |
+
def depends_on(a: Callable, b: Callable):
|
| 172 |
+
if unwrap(a) == those and unwrap(b) == these:
|
| 173 |
+
return False
|
| 174 |
+
return True
|
| 175 |
+
|
| 176 |
+
return depends_on
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
class PassManager:
|
| 180 |
+
"""
|
| 181 |
+
Construct a PassManager.
|
| 182 |
+
|
| 183 |
+
Collects passes and constraints. This defines the pass schedule, manages
|
| 184 |
+
pass constraints and pass execution.
|
| 185 |
+
|
| 186 |
+
Args:
|
| 187 |
+
passes (Optional[List[Callable]]): list of passes. A pass is a
|
| 188 |
+
callable which modifies an object and returns modified object
|
| 189 |
+
constraint (Optional[List[Callable]]): list of constraints. A
|
| 190 |
+
constraint is a callable which takes two passes (A, B) and returns
|
| 191 |
+
True if A depends on B and False otherwise. See implementation of
|
| 192 |
+
`this_before_that_pass_constraint` for example.
|
| 193 |
+
"""
|
| 194 |
+
|
| 195 |
+
passes: List[Callable]
|
| 196 |
+
constraints: List[Callable]
|
| 197 |
+
_validated: bool = False
|
| 198 |
+
|
| 199 |
+
def __init__(
|
| 200 |
+
self,
|
| 201 |
+
passes=None,
|
| 202 |
+
constraints=None,
|
| 203 |
+
):
|
| 204 |
+
self.passes = passes or []
|
| 205 |
+
self.constraints = constraints or []
|
| 206 |
+
|
| 207 |
+
@classmethod
|
| 208 |
+
def build_from_passlist(cls, passes):
|
| 209 |
+
pm = PassManager(passes)
|
| 210 |
+
# TODO(alexbeloi): add constraint management/validation
|
| 211 |
+
return pm
|
| 212 |
+
|
| 213 |
+
def add_pass(self, _pass: Callable):
|
| 214 |
+
self.passes.append(_pass)
|
| 215 |
+
self._validated = False
|
| 216 |
+
|
| 217 |
+
def add_constraint(self, constraint):
|
| 218 |
+
self.constraints.append(constraint)
|
| 219 |
+
self._validated = False
|
| 220 |
+
|
| 221 |
+
def remove_pass(self, _passes: List[Callable]):
|
| 222 |
+
if _passes is None:
|
| 223 |
+
return
|
| 224 |
+
passes_left = []
|
| 225 |
+
for ps in self.passes:
|
| 226 |
+
if ps.__name__ not in _passes:
|
| 227 |
+
passes_left.append(ps)
|
| 228 |
+
self.passes = passes_left
|
| 229 |
+
self._validated = False
|
| 230 |
+
|
| 231 |
+
def validate(self):
|
| 232 |
+
"""
|
| 233 |
+
Validates that current pass schedule defined by `self.passes` is valid
|
| 234 |
+
according to all constraints in `self.constraints`
|
| 235 |
+
"""
|
| 236 |
+
if self._validated:
|
| 237 |
+
return
|
| 238 |
+
for constraint in self.constraints:
|
| 239 |
+
_validate_pass_schedule_constraint(constraint, self.passes)
|
| 240 |
+
self._validated = True
|
| 241 |
+
|
| 242 |
+
def __call__(self, source):
|
| 243 |
+
self.validate()
|
| 244 |
+
out = source
|
| 245 |
+
for _pass in self.passes:
|
| 246 |
+
out = _pass(out)
|
| 247 |
+
return out
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/shape_prop.py
ADDED
|
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.fx
|
| 3 |
+
import traceback
|
| 4 |
+
|
| 5 |
+
from torch._dispatch.python import enable_python_dispatcher
|
| 6 |
+
from torch.fx.node import Node, map_aggregate
|
| 7 |
+
from typing import Any, Tuple, NamedTuple, Optional, Dict
|
| 8 |
+
from torch.fx._compatibility import compatibility
|
| 9 |
+
from torch._guards import detect_fake_mode
|
| 10 |
+
|
| 11 |
+
__all__ = ['TensorMetadata', 'ShapeProp']
|
| 12 |
+
|
| 13 |
+
@compatibility(is_backward_compatible=True)
|
| 14 |
+
class TensorMetadata(NamedTuple):
|
| 15 |
+
# TensorMetadata is a structure containing pertinent information
|
| 16 |
+
# about a tensor within a PyTorch program.
|
| 17 |
+
|
| 18 |
+
# General Tensor metadata
|
| 19 |
+
shape : torch.Size
|
| 20 |
+
dtype : torch.dtype
|
| 21 |
+
requires_grad : bool
|
| 22 |
+
stride : Tuple[int, ...]
|
| 23 |
+
memory_format : Optional[torch.memory_format]
|
| 24 |
+
|
| 25 |
+
# Quantization metadata
|
| 26 |
+
is_quantized : bool
|
| 27 |
+
qparams: Dict[str, Any]
|
| 28 |
+
|
| 29 |
+
def _extract_tensor_metadata(result : torch.Tensor) -> TensorMetadata:
|
| 30 |
+
"""
|
| 31 |
+
Extract a TensorMetadata NamedTuple describing `result`.
|
| 32 |
+
"""
|
| 33 |
+
shape = result.shape
|
| 34 |
+
dtype = result.dtype
|
| 35 |
+
requires_grad = result.requires_grad
|
| 36 |
+
stride = result.stride()
|
| 37 |
+
|
| 38 |
+
memory_formats = {
|
| 39 |
+
torch.contiguous_format,
|
| 40 |
+
torch.channels_last,
|
| 41 |
+
torch.channels_last_3d,
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
memory_format = None
|
| 45 |
+
|
| 46 |
+
for query_format in memory_formats:
|
| 47 |
+
if result.is_contiguous(memory_format=query_format):
|
| 48 |
+
memory_format = query_format
|
| 49 |
+
break
|
| 50 |
+
|
| 51 |
+
is_quantized = result.is_quantized
|
| 52 |
+
qparams: Dict[str, Any] = {}
|
| 53 |
+
if is_quantized:
|
| 54 |
+
qscheme = result.qscheme()
|
| 55 |
+
qparams["qscheme"] = qscheme
|
| 56 |
+
if qscheme in {torch.per_tensor_affine, torch.per_tensor_symmetric}:
|
| 57 |
+
qparams["scale"] = result.q_scale() # type: ignore[assignment]
|
| 58 |
+
qparams["zero_point"] = result.q_zero_point() # type: ignore[assignment]
|
| 59 |
+
elif qscheme in {torch.per_channel_affine, torch.per_channel_affine_float_qparams, torch.per_channel_symmetric}:
|
| 60 |
+
# In this branch, scale and zero_point are expected to be tensors,
|
| 61 |
+
# we store the values as immutable_list in TensorMetadata for
|
| 62 |
+
# easier serialization downstream
|
| 63 |
+
qparams["scale"] = result.q_per_channel_scales().tolist() # type: ignore[assignment]
|
| 64 |
+
qparams["zero_point"] = result.q_per_channel_zero_points().tolist() # type: ignore[assignment]
|
| 65 |
+
qparams["axis"] = result.q_per_channel_axis() # type: ignore[assignment]
|
| 66 |
+
|
| 67 |
+
return TensorMetadata(
|
| 68 |
+
shape, dtype, requires_grad, stride, memory_format, is_quantized, qparams)
|
| 69 |
+
|
| 70 |
+
@compatibility(is_backward_compatible=True)
|
| 71 |
+
class ShapeProp(torch.fx.Interpreter):
|
| 72 |
+
"""
|
| 73 |
+
Execute an FX graph Node-by-Node and
|
| 74 |
+
record the shape and type of the result
|
| 75 |
+
into the corresponding node.
|
| 76 |
+
|
| 77 |
+
Example:
|
| 78 |
+
In this example, we record the shape
|
| 79 |
+
and data type of a module given
|
| 80 |
+
an example input ``torch.randn(50, D_in)``.
|
| 81 |
+
We print the name, shape and dtype of each node.
|
| 82 |
+
|
| 83 |
+
class TwoLayerNet(torch.nn.Module):
|
| 84 |
+
def __init__(self, D_in, H, D_out):
|
| 85 |
+
super().__init__()
|
| 86 |
+
self.linear1 = torch.nn.Linear(D_in, H)
|
| 87 |
+
self.linear2 = torch.nn.Linear(H, D_out)
|
| 88 |
+
def forward(self, x):
|
| 89 |
+
h_relu = self.linear1(x).clamp(min=0)
|
| 90 |
+
y_pred = self.linear2(h_relu)
|
| 91 |
+
return y_pred
|
| 92 |
+
N, D_in, H, D_out = 64, 1000, 100, 10
|
| 93 |
+
x = torch.randn(N, D_in)
|
| 94 |
+
y = torch.randn(N, D_out)
|
| 95 |
+
model = TwoLayerNet(D_in, H, D_out)
|
| 96 |
+
gm = torch.fx.symbolic_trace(model)
|
| 97 |
+
sample_input = torch.randn(50, D_in)
|
| 98 |
+
ShapeProp(gm).propagate(sample_input)
|
| 99 |
+
|
| 100 |
+
for node in gm.graph.nodes:
|
| 101 |
+
print(node.name, node.meta['tensor_meta'].dtype,
|
| 102 |
+
node.meta['tensor_meta'].shape)
|
| 103 |
+
|
| 104 |
+
The output of this code is:
|
| 105 |
+
|
| 106 |
+
x torch.float32 torch.Size([50, 1000])
|
| 107 |
+
linear1 torch.float32 torch.Size([50, 100])
|
| 108 |
+
clamp_1 torch.float32 torch.Size([50, 100])
|
| 109 |
+
linear2 torch.float32 torch.Size([50, 10])
|
| 110 |
+
output torch.float32 torch.Size([50, 10])
|
| 111 |
+
|
| 112 |
+
Args:
|
| 113 |
+
module (GraphModule): The module to be executed
|
| 114 |
+
fake_mode (FakeTensorMode): A fake mode for copying the gm
|
| 115 |
+
|
| 116 |
+
"""
|
| 117 |
+
def __init__(self, gm, fake_mode=None):
|
| 118 |
+
super().__init__(gm)
|
| 119 |
+
if fake_mode is None:
|
| 120 |
+
fake_mode = detect_fake_mode()
|
| 121 |
+
if fake_mode is not None:
|
| 122 |
+
from torch._dynamo.utils import deepcopy_to_fake_tensor
|
| 123 |
+
# Note:
|
| 124 |
+
# We need fake execution cause the inputs are fake, however, we cannot fakify the module
|
| 125 |
+
# - because we need to write to the tensor_meta of the real module. So we fakify to
|
| 126 |
+
# produce a result (L131 below), to extract tensor meta, and then keep going.
|
| 127 |
+
#
|
| 128 |
+
# If we were to fakify, we would write to the wrong node, and then downstream fusion
|
| 129 |
+
# would be missing the tensor_meta.
|
| 130 |
+
#
|
| 131 |
+
# See torch/_inductor/overrides.py for where this is called upstream of fusion.
|
| 132 |
+
self.fake_module = deepcopy_to_fake_tensor(self.module, fake_mode)
|
| 133 |
+
self.fake_mode = fake_mode
|
| 134 |
+
else:
|
| 135 |
+
self.fake_module = None
|
| 136 |
+
self.fake_mode = None
|
| 137 |
+
|
| 138 |
+
self.real_module = self.module
|
| 139 |
+
|
| 140 |
+
def run_node(self, n : Node) -> Any:
|
| 141 |
+
try:
|
| 142 |
+
if self.fake_module is not None:
|
| 143 |
+
# Hacky swap. Alternatively, we could do this with overriding
|
| 144 |
+
# call_module and get_attr.
|
| 145 |
+
self.module = self.fake_module
|
| 146 |
+
try:
|
| 147 |
+
if self.fake_mode is not None:
|
| 148 |
+
with self.fake_mode, enable_python_dispatcher():
|
| 149 |
+
result = super().run_node(n)
|
| 150 |
+
else:
|
| 151 |
+
result = super().run_node(n)
|
| 152 |
+
finally:
|
| 153 |
+
self.module = self.real_module
|
| 154 |
+
except Exception as e:
|
| 155 |
+
traceback.print_exc()
|
| 156 |
+
raise RuntimeError(
|
| 157 |
+
f"ShapeProp error for: node={n.format_node()} with "
|
| 158 |
+
f"meta={n.meta}"
|
| 159 |
+
) from e
|
| 160 |
+
|
| 161 |
+
found_tensor = False
|
| 162 |
+
|
| 163 |
+
def extract_tensor_meta(obj):
|
| 164 |
+
if isinstance(obj, torch.Tensor):
|
| 165 |
+
nonlocal found_tensor
|
| 166 |
+
found_tensor = True
|
| 167 |
+
return _extract_tensor_metadata(obj)
|
| 168 |
+
else:
|
| 169 |
+
return obj
|
| 170 |
+
|
| 171 |
+
meta = map_aggregate(result, extract_tensor_meta)
|
| 172 |
+
if found_tensor:
|
| 173 |
+
n.meta['tensor_meta'] = meta
|
| 174 |
+
|
| 175 |
+
n.meta['type'] = type(result)
|
| 176 |
+
return result
|
| 177 |
+
|
| 178 |
+
def propagate(self, *args):
|
| 179 |
+
"""
|
| 180 |
+
Run `module` via interpretation and return the result and
|
| 181 |
+
record the shape and type of each node.
|
| 182 |
+
|
| 183 |
+
Args:
|
| 184 |
+
*args (Tensor): the sample input.
|
| 185 |
+
|
| 186 |
+
Returns:
|
| 187 |
+
Any: The value returned from executing the Module
|
| 188 |
+
"""
|
| 189 |
+
if self.fake_mode is not None:
|
| 190 |
+
fake_args = [self.fake_mode.from_tensor(t) if isinstance(t, torch.Tensor) else t for t in args]
|
| 191 |
+
else:
|
| 192 |
+
fake_args = args
|
| 193 |
+
return super().run(*fake_args)
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/split_module.py
ADDED
|
@@ -0,0 +1,370 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import inspect
|
| 2 |
+
from typing import Any, Callable, Dict, List, Optional
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
from torch.fx._compatibility import compatibility
|
| 6 |
+
from torch.fx.graph_module import GraphModule
|
| 7 |
+
|
| 8 |
+
__all__ = ["Partition", "split_module"]
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
@compatibility(is_backward_compatible=True)
|
| 12 |
+
class Partition:
|
| 13 |
+
def __init__(self, name: str):
|
| 14 |
+
self.name: str = name
|
| 15 |
+
self.submod_name = f"submod_{name}"
|
| 16 |
+
self.node_names: List[str] = []
|
| 17 |
+
self.inputs: Dict[str, None] = {}
|
| 18 |
+
self.outputs: Dict[str, None] = {}
|
| 19 |
+
self.partitions_dependent_on: Dict[str, None] = {}
|
| 20 |
+
self.partition_dependents: Dict[str, None] = {}
|
| 21 |
+
self.graph: torch.fx.graph.Graph = torch.fx.graph.Graph()
|
| 22 |
+
self.environment: Dict[torch.fx.node.Node, torch.fx.node.Node] = {}
|
| 23 |
+
self.targets: Dict[str, Any] = {}
|
| 24 |
+
|
| 25 |
+
def __repr__(self) -> str:
|
| 26 |
+
return (
|
| 27 |
+
f"name: {self.name},\n"
|
| 28 |
+
f" nodes: {self.node_names},\n"
|
| 29 |
+
f" inputs: {self.inputs},\n"
|
| 30 |
+
f" outputs: {self.outputs},\n"
|
| 31 |
+
f" partitions dependent on: {self.partitions_dependent_on},\n"
|
| 32 |
+
f" partition dependents: {self.partition_dependents}"
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
# Creates subgraphs out of main graph
|
| 37 |
+
@compatibility(is_backward_compatible=True)
|
| 38 |
+
def split_module(
|
| 39 |
+
m: GraphModule,
|
| 40 |
+
root_m: torch.nn.Module,
|
| 41 |
+
split_callback: Callable[[torch.fx.node.Node], int],
|
| 42 |
+
qualname_map: Optional[Dict[str, str]] = None,
|
| 43 |
+
keep_original_order: Optional[bool] = False,
|
| 44 |
+
):
|
| 45 |
+
"""
|
| 46 |
+
Creates subgraphs out of main graph
|
| 47 |
+
|
| 48 |
+
Args:
|
| 49 |
+
m (GraphModule): Graph module to split
|
| 50 |
+
root_m (torch.nn.Module): root nn module. Not currently used. Included
|
| 51 |
+
because the root nn module is usually transformed via
|
| 52 |
+
torch.fx._symbolic_trace.symbolic_trace (see example below)
|
| 53 |
+
split_callback (Callable[[torch.fx.node.Node], int]): Callable function
|
| 54 |
+
that maps a given Node instance to a numeric partition identifier.
|
| 55 |
+
split_module will use this function as the policy for which operations
|
| 56 |
+
appear in which partitions in the output Module.
|
| 57 |
+
qualname_map: Optional[Dict[str, str]]: optional output parameter that returns a
|
| 58 |
+
mapping from new target names in the module after split to old target
|
| 59 |
+
names in the original module.
|
| 60 |
+
keep_original_order: Optional[bool]: keep the original order of the GraphModule
|
| 61 |
+
or use the Topological order of the new constructed GraphModule
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
Returns:
|
| 65 |
+
GraphModule: the module after split.
|
| 66 |
+
|
| 67 |
+
Example:
|
| 68 |
+
|
| 69 |
+
This is a sample setup:
|
| 70 |
+
|
| 71 |
+
import torch
|
| 72 |
+
from torch.fx.symbolic_trace import symbolic_trace
|
| 73 |
+
from torch.fx.graph_module import GraphModule
|
| 74 |
+
from torch.fx.node import Node
|
| 75 |
+
from torch.fx.passes.split_module import split_module
|
| 76 |
+
|
| 77 |
+
class MyModule(torch.nn.Module):
|
| 78 |
+
def __init__(self):
|
| 79 |
+
super().__init__()
|
| 80 |
+
self.param = torch.nn.Parameter(torch.rand(3, 4))
|
| 81 |
+
self.linear = torch.nn.Linear(4, 5)
|
| 82 |
+
|
| 83 |
+
def forward(self, x, y):
|
| 84 |
+
z = self.linear(x + self.param).clamp(min=0.0, max=1.0)
|
| 85 |
+
w = self.linear(y).clamp(min=0.0, max=1.0)
|
| 86 |
+
return z + w
|
| 87 |
+
|
| 88 |
+
# symbolically trace model
|
| 89 |
+
my_module = MyModule()
|
| 90 |
+
my_module_traced = symbolic_trace(my_module)
|
| 91 |
+
|
| 92 |
+
# random mod partitioning
|
| 93 |
+
partition_counter = 0
|
| 94 |
+
NPARTITIONS = 3
|
| 95 |
+
|
| 96 |
+
def mod_partition(node: Node):
|
| 97 |
+
global partition_counter
|
| 98 |
+
partition = partition_counter % NPARTITIONS
|
| 99 |
+
partition_counter = (partition_counter + 1) % NPARTITIONS
|
| 100 |
+
return partition
|
| 101 |
+
|
| 102 |
+
# split module in module with submodules
|
| 103 |
+
module_with_submodules = split_module(
|
| 104 |
+
my_module_traced, my_module, mod_partition
|
| 105 |
+
)
|
| 106 |
+
|
| 107 |
+
Output looks like this. Original graph is broken into partitions
|
| 108 |
+
|
| 109 |
+
> print(module_with_submodules)
|
| 110 |
+
GraphModule(
|
| 111 |
+
(submod_0): GraphModule(
|
| 112 |
+
(linear): Linear(in_features=4, out_features=5, bias=True)
|
| 113 |
+
)
|
| 114 |
+
(submod_1): GraphModule(
|
| 115 |
+
(linear): Linear(in_features=4, out_features=5, bias=True)
|
| 116 |
+
)
|
| 117 |
+
(submod_2): GraphModule()
|
| 118 |
+
)
|
| 119 |
+
|
| 120 |
+
def forward(self, x, y):
|
| 121 |
+
param = self.param
|
| 122 |
+
submod_0 = self.submod_0(x, param, y); x = param = y = None
|
| 123 |
+
getitem = submod_0[0]
|
| 124 |
+
getitem_1 = submod_0[1]; submod_0 = None
|
| 125 |
+
submod_1 = self.submod_1(getitem, getitem_1); getitem = getitem_1 = None
|
| 126 |
+
getitem_2 = submod_1[0]
|
| 127 |
+
getitem_3 = submod_1[1]; submod_1 = None
|
| 128 |
+
submod_2 = self.submod_2(getitem_2, getitem_3); getitem_2 = getitem_3 = None
|
| 129 |
+
return submod_2
|
| 130 |
+
|
| 131 |
+
Output of split module is the same as output of input traced module.
|
| 132 |
+
This is an example within a test setting:
|
| 133 |
+
|
| 134 |
+
> orig_out = my_module_traced(x, y)
|
| 135 |
+
> submodules_out = module_with_submodules(x, y)
|
| 136 |
+
> self.assertEqual(orig_out, submodules_out)
|
| 137 |
+
True
|
| 138 |
+
"""
|
| 139 |
+
|
| 140 |
+
def construct_graph(
|
| 141 |
+
node: torch.fx.node.Node,
|
| 142 |
+
base_mod_env: Dict[str, torch.fx.node.Node],
|
| 143 |
+
base_mod_attrs: Dict[str, torch.fx.graph_module.GraphModule],
|
| 144 |
+
):
|
| 145 |
+
if node.op == "placeholder":
|
| 146 |
+
default_value = (
|
| 147 |
+
node.args[0] if len(node.args) > 0 else inspect.Signature.empty
|
| 148 |
+
)
|
| 149 |
+
base_mod_env[node.name] = base_mod_graph.placeholder(
|
| 150 |
+
node.target, type_expr=node.type, default_value=default_value
|
| 151 |
+
)
|
| 152 |
+
base_mod_env[node.name].meta = node.meta.copy()
|
| 153 |
+
elif node.op == "get_attr":
|
| 154 |
+
base_mod_env[node.name] = base_mod_graph.get_attr(node.target)
|
| 155 |
+
base_mod_env[node.name].meta = node.meta.copy()
|
| 156 |
+
attr_val = m
|
| 157 |
+
for atom in node.target.split("."): # type: ignore[union-attr]
|
| 158 |
+
if not hasattr(attr_val, atom):
|
| 159 |
+
raise AttributeError(f"Node target {node.target} not found!")
|
| 160 |
+
attr_val = getattr(attr_val, atom)
|
| 161 |
+
base_mod_attrs[node.target] = attr_val # type: ignore[index]
|
| 162 |
+
return base_mod_env, base_mod_attrs
|
| 163 |
+
|
| 164 |
+
partitions: Dict[str, Partition] = {}
|
| 165 |
+
orig_nodes: Dict[str, torch.fx.node.Node] = {}
|
| 166 |
+
|
| 167 |
+
def record_cross_partition_use(
|
| 168 |
+
def_node: torch.fx.node.Node, use_node: Optional[torch.fx.node.Node]
|
| 169 |
+
): # noqa: B950
|
| 170 |
+
def_partition_name = getattr(def_node, "_fx_partition", None)
|
| 171 |
+
use_partition_name = getattr(use_node, "_fx_partition", None)
|
| 172 |
+
if def_partition_name != use_partition_name:
|
| 173 |
+
if def_partition_name is not None:
|
| 174 |
+
def_partition = partitions[def_partition_name]
|
| 175 |
+
def_partition.outputs.setdefault(def_node.name)
|
| 176 |
+
if use_partition_name is not None:
|
| 177 |
+
def_partition.partition_dependents.setdefault(use_partition_name)
|
| 178 |
+
|
| 179 |
+
if use_partition_name is not None:
|
| 180 |
+
use_partition = partitions[use_partition_name]
|
| 181 |
+
use_partition.inputs.setdefault(def_node.name)
|
| 182 |
+
if def_partition_name is not None:
|
| 183 |
+
use_partition.partitions_dependent_on.setdefault(def_partition_name)
|
| 184 |
+
|
| 185 |
+
# split nodes into partitions
|
| 186 |
+
for node in m.graph.nodes:
|
| 187 |
+
orig_nodes[node.name] = node
|
| 188 |
+
|
| 189 |
+
# TODO currently placeholders/parameters aren't put into random partitions,
|
| 190 |
+
# rather they're added to the graphs where they are used down below
|
| 191 |
+
if node.op in ["placeholder", "get_attr"]:
|
| 192 |
+
continue
|
| 193 |
+
if node.op == "output":
|
| 194 |
+
torch.fx.graph.map_arg(
|
| 195 |
+
node.args[0], lambda n: record_cross_partition_use(n, None)
|
| 196 |
+
)
|
| 197 |
+
continue
|
| 198 |
+
partition_name = str(split_callback(node))
|
| 199 |
+
|
| 200 |
+
# add node to partitions
|
| 201 |
+
partition = partitions.get(partition_name)
|
| 202 |
+
if partition is None:
|
| 203 |
+
partitions[partition_name] = partition = Partition(partition_name)
|
| 204 |
+
|
| 205 |
+
partition.node_names.append(node.name)
|
| 206 |
+
node._fx_partition = partition_name
|
| 207 |
+
|
| 208 |
+
torch.fx.graph.map_arg(
|
| 209 |
+
node.args, lambda def_node: record_cross_partition_use(def_node, node)
|
| 210 |
+
)
|
| 211 |
+
torch.fx.graph.map_arg(
|
| 212 |
+
node.kwargs, lambda def_node: record_cross_partition_use(def_node, node)
|
| 213 |
+
) # noqa: B950
|
| 214 |
+
|
| 215 |
+
original_partition_order = list(partitions.keys())
|
| 216 |
+
# find partitions with no dependencies
|
| 217 |
+
root_partitions: List[str] = []
|
| 218 |
+
for partition_name, partition in partitions.items():
|
| 219 |
+
if not len(partition.partitions_dependent_on):
|
| 220 |
+
root_partitions.append(partition_name)
|
| 221 |
+
|
| 222 |
+
# check partitions for circular dependencies and create topological partition ordering
|
| 223 |
+
sorted_partitions: List[str] = []
|
| 224 |
+
while root_partitions:
|
| 225 |
+
root_partition = root_partitions.pop()
|
| 226 |
+
sorted_partitions.append(root_partition)
|
| 227 |
+
for dependent in partitions[root_partition].partition_dependents:
|
| 228 |
+
partitions[dependent].partitions_dependent_on.pop(root_partition)
|
| 229 |
+
if not partitions[dependent].partitions_dependent_on:
|
| 230 |
+
root_partitions.append(dependent)
|
| 231 |
+
if len(sorted_partitions) != len(partitions):
|
| 232 |
+
raise RuntimeError("cycle exists between partitions!")
|
| 233 |
+
|
| 234 |
+
# add placeholders to partitions
|
| 235 |
+
for partition_name in sorted_partitions:
|
| 236 |
+
partition = partitions[partition_name]
|
| 237 |
+
for input in partition.inputs:
|
| 238 |
+
placeholder = partition.graph.placeholder(
|
| 239 |
+
input,
|
| 240 |
+
type_expr=orig_nodes[input].type,
|
| 241 |
+
)
|
| 242 |
+
placeholder.meta = orig_nodes[input].meta.copy()
|
| 243 |
+
partition.environment[orig_nodes[input]] = placeholder
|
| 244 |
+
|
| 245 |
+
# Transform nodes and collect targets for partition's submodule
|
| 246 |
+
for node in m.graph.nodes:
|
| 247 |
+
if hasattr(node, "_fx_partition"):
|
| 248 |
+
partition = partitions[node._fx_partition]
|
| 249 |
+
|
| 250 |
+
# swap out old graph nodes in kw/args with references to new nodes in this submodule
|
| 251 |
+
environment = partition.environment
|
| 252 |
+
gathered_args = torch.fx.graph.map_arg(node.args, lambda n: environment[n])
|
| 253 |
+
gathered_kwargs = torch.fx.graph.map_arg(
|
| 254 |
+
node.kwargs, lambda n: environment[n]
|
| 255 |
+
)
|
| 256 |
+
|
| 257 |
+
if node.op not in ["call_module", "get_attr"]:
|
| 258 |
+
target = node.target
|
| 259 |
+
else:
|
| 260 |
+
target_atoms = node.target.split(".")
|
| 261 |
+
target_attr = m
|
| 262 |
+
for atom in target_atoms:
|
| 263 |
+
if not hasattr(target_attr, atom):
|
| 264 |
+
raise AttributeError(f"Operator target {node.target} not found!")
|
| 265 |
+
target_attr = getattr(target_attr, atom)
|
| 266 |
+
# target = target_atoms[-1]
|
| 267 |
+
target = "_".join(target_atoms)
|
| 268 |
+
partition.targets[target] = target_attr
|
| 269 |
+
# Fill in the passed-in mapping from new qualname to old qualname
|
| 270 |
+
if qualname_map is not None:
|
| 271 |
+
# When creating the split module later, the submodules will have
|
| 272 |
+
# path prefix matching the corresponding partition's submod_name
|
| 273 |
+
qualname = f"{partition.submod_name}.{target}"
|
| 274 |
+
qualname_map[qualname] = node.target
|
| 275 |
+
|
| 276 |
+
assert isinstance(gathered_args, tuple)
|
| 277 |
+
assert isinstance(gathered_kwargs, dict)
|
| 278 |
+
new_node = partition.graph.create_node(
|
| 279 |
+
op=node.op,
|
| 280 |
+
target=target,
|
| 281 |
+
args=gathered_args,
|
| 282 |
+
kwargs=gathered_kwargs,
|
| 283 |
+
type_expr=node.type,
|
| 284 |
+
)
|
| 285 |
+
new_node.meta = node.meta.copy()
|
| 286 |
+
partition.environment[node] = new_node
|
| 287 |
+
|
| 288 |
+
# original module environment dict mapping node names to nodes
|
| 289 |
+
org_mod_env: Dict[str, torch.fx.node.Node] = {}
|
| 290 |
+
# Set up values to construct base module
|
| 291 |
+
base_mod_env: Dict[str, torch.fx.node.Node] = {}
|
| 292 |
+
base_mod_graph: torch.fx.graph.Graph = torch.fx.graph.Graph()
|
| 293 |
+
base_mod_attrs: Dict[str, torch.fx.graph_module.GraphModule] = {}
|
| 294 |
+
if not keep_original_order:
|
| 295 |
+
for node in m.graph.nodes:
|
| 296 |
+
base_mod_env, base_mod_attrs = construct_graph(
|
| 297 |
+
node, base_mod_env, base_mod_attrs
|
| 298 |
+
)
|
| 299 |
+
|
| 300 |
+
else:
|
| 301 |
+
# Go through the graph to construct the mapping dict
|
| 302 |
+
for node in m.graph.nodes:
|
| 303 |
+
org_mod_env[node.name] = node
|
| 304 |
+
|
| 305 |
+
# Do some things iterating over the partitions in topological order again:
|
| 306 |
+
# 1) Finish off submodule Graphs by setting corresponding outputs
|
| 307 |
+
# 2) Construct GraphModules for each submodule
|
| 308 |
+
# 3) Construct the base graph by emitting calls to those submodules in
|
| 309 |
+
# topological order or original order specified by keep_original_order
|
| 310 |
+
|
| 311 |
+
construct_order_partitions = (
|
| 312 |
+
sorted_partitions if not keep_original_order else original_partition_order
|
| 313 |
+
)
|
| 314 |
+
|
| 315 |
+
already_constructed_attr_nodes = set()
|
| 316 |
+
for partition_name in construct_order_partitions:
|
| 317 |
+
partition = partitions[partition_name]
|
| 318 |
+
|
| 319 |
+
# Set correct output values
|
| 320 |
+
output_vals = tuple(
|
| 321 |
+
partition.environment[orig_nodes[name]] for name in partition.outputs
|
| 322 |
+
)
|
| 323 |
+
|
| 324 |
+
# skip output node generation if there are no output values
|
| 325 |
+
num_output_vals = len(output_vals)
|
| 326 |
+
if num_output_vals == 1:
|
| 327 |
+
partition.graph.output(output_vals[0])
|
| 328 |
+
elif num_output_vals > 1:
|
| 329 |
+
partition.graph.output(output_vals)
|
| 330 |
+
|
| 331 |
+
if keep_original_order:
|
| 332 |
+
# first get the attr nodes required by this partition
|
| 333 |
+
org_mod_attr_nodes: List[torch.fx.node.Node] = [
|
| 334 |
+
org_mod_env[key] for key in partition.inputs
|
| 335 |
+
]
|
| 336 |
+
# Construct GraphModule for this partition
|
| 337 |
+
for node in org_mod_attr_nodes: # type: ignore[attr-defined]
|
| 338 |
+
if node in already_constructed_attr_nodes:
|
| 339 |
+
continue
|
| 340 |
+
base_mod_env, base_mod_attrs = construct_graph(
|
| 341 |
+
node, base_mod_env, base_mod_attrs
|
| 342 |
+
)
|
| 343 |
+
already_constructed_attr_nodes.add(node)
|
| 344 |
+
|
| 345 |
+
base_mod_attrs[partition.submod_name] = torch.fx.graph_module.GraphModule(
|
| 346 |
+
partition.targets, partition.graph
|
| 347 |
+
) # noqa: B950
|
| 348 |
+
|
| 349 |
+
# Emit call in base graph to this submodule
|
| 350 |
+
output_val = base_mod_graph.call_module(
|
| 351 |
+
partition.submod_name,
|
| 352 |
+
tuple(base_mod_env[name] for name in partition.inputs),
|
| 353 |
+
)
|
| 354 |
+
|
| 355 |
+
num_outputs = len(partition.outputs)
|
| 356 |
+
if num_outputs > 1:
|
| 357 |
+
# Unpack multiple return values from submodule
|
| 358 |
+
output_val_proxy = torch.fx.proxy.Proxy(output_val)
|
| 359 |
+
for i, output_name in enumerate(partition.outputs):
|
| 360 |
+
base_mod_env[output_name] = output_val_proxy[i].node # type: ignore[index]
|
| 361 |
+
elif num_outputs == 1:
|
| 362 |
+
base_mod_env[list(partition.outputs)[0]] = output_val
|
| 363 |
+
|
| 364 |
+
for node in m.graph.nodes:
|
| 365 |
+
if node.op == "output":
|
| 366 |
+
base_mod_graph.output(
|
| 367 |
+
torch.fx.graph.map_arg(node.args[0], lambda n: base_mod_env[n.name])
|
| 368 |
+
) # noqa: B950
|
| 369 |
+
|
| 370 |
+
return torch.fx.graph_module.GraphModule(base_mod_attrs, base_mod_graph)
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/split_utils.py
ADDED
|
@@ -0,0 +1,280 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
from dataclasses import dataclass, field
|
| 3 |
+
from typing import Dict, List, Optional
|
| 4 |
+
|
| 5 |
+
import torch.fx
|
| 6 |
+
from torch.fx._compatibility import compatibility
|
| 7 |
+
from torch.fx.graph import map_arg
|
| 8 |
+
from torch.fx.passes.utils import HolderModule, lift_subgraph_as_module
|
| 9 |
+
|
| 10 |
+
from .tools_common import NodeList
|
| 11 |
+
|
| 12 |
+
__all__ = ["getattr_recursive", "setattr_recursive", "Component", "split_by_tags"]
|
| 13 |
+
|
| 14 |
+
@compatibility(is_backward_compatible=False)
|
| 15 |
+
def getattr_recursive(obj, name):
|
| 16 |
+
for layer in name.split("."):
|
| 17 |
+
if hasattr(obj, layer):
|
| 18 |
+
obj = getattr(obj, layer)
|
| 19 |
+
else:
|
| 20 |
+
return None
|
| 21 |
+
return obj
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
@compatibility(is_backward_compatible=False)
|
| 25 |
+
def setattr_recursive(obj, attr, value):
|
| 26 |
+
if "." not in attr:
|
| 27 |
+
setattr(obj, attr, value)
|
| 28 |
+
else:
|
| 29 |
+
layer = attr.split(".")
|
| 30 |
+
setattr_recursive(getattr(obj, layer[0]), ".".join(layer[1:]), value)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
@compatibility(is_backward_compatible=False)
|
| 34 |
+
@dataclass
|
| 35 |
+
class Component:
|
| 36 |
+
"""
|
| 37 |
+
A component serves as a container for a subgraph we want to create afterwards.
|
| 38 |
+
"""
|
| 39 |
+
|
| 40 |
+
graph: torch.fx.Graph
|
| 41 |
+
order: int
|
| 42 |
+
name: str
|
| 43 |
+
|
| 44 |
+
# Stores the placeholder nodes in `graph`.
|
| 45 |
+
input_placeholders: List = field(default_factory=list)
|
| 46 |
+
|
| 47 |
+
# Store the nodes in original graph that are placeholder in `graph`.
|
| 48 |
+
orig_inputs: List = field(default_factory=list)
|
| 49 |
+
|
| 50 |
+
# Store the nodes in original graph that are outputs in `graph`.
|
| 51 |
+
orig_outputs: List = field(default_factory=list)
|
| 52 |
+
|
| 53 |
+
# Mapping from get_attr node in original graph to get_attr node in `graph`.
|
| 54 |
+
getattr_maps: Dict[torch.fx.Node, torch.fx.Node] = field(default_factory=dict)
|
| 55 |
+
constructor_args: List[str] = field(default_factory=list)
|
| 56 |
+
gm: Optional[torch.fx.GraphModule] = None
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
@compatibility(is_backward_compatible=False)
|
| 60 |
+
def split_by_tags(gm: torch.fx.GraphModule, tags: List[str]) -> torch.fx.GraphModule:
|
| 61 |
+
"""
|
| 62 |
+
Splits a GraphModule using tags on its graph nodes. We honor the order of
|
| 63 |
+
tags. For example, we have tags = ["a", "b", "c"], the function will create
|
| 64 |
+
the initial submodules in the order of "a_0", "b_1", "c_2".
|
| 65 |
+
|
| 66 |
+
To set a tag:
|
| 67 |
+
gm.graph.nodes[idx].tag = "mytag"
|
| 68 |
+
|
| 69 |
+
This will result in all nodes with the same tag being extracted and placed in their
|
| 70 |
+
own submodule. For placeholder, output and get_attr node, the tag is ignored. placeholder
|
| 71 |
+
and output nodes are created when needed while get_attr nodes get copied to submodules
|
| 72 |
+
where they are used.
|
| 73 |
+
|
| 74 |
+
Given the following module def:
|
| 75 |
+
|
| 76 |
+
class SimpleModule(torch.nn.Module):
|
| 77 |
+
def __init__(self):
|
| 78 |
+
super().__init__()
|
| 79 |
+
self.linear1 = torch.nn.Linear(...)
|
| 80 |
+
self.linear2 = torch.nn.Linear(...)
|
| 81 |
+
self.linear3 = torch.nn.Linear(...)
|
| 82 |
+
|
| 83 |
+
def forward(self, in1, in2):
|
| 84 |
+
r1 = self.linear1(in1)
|
| 85 |
+
r2 = self.linear2(in2)
|
| 86 |
+
r3 = torch.cat([r1, r2])
|
| 87 |
+
return self.linear3(r3)
|
| 88 |
+
|
| 89 |
+
Marking the node corresponding to in1 with the tag sc.REQUEST_ONLY.lower() results in the following split:
|
| 90 |
+
|
| 91 |
+
ro_0:
|
| 92 |
+
def forward(self, in1):
|
| 93 |
+
self = self.root
|
| 94 |
+
linear1 = self.linear1(in1)
|
| 95 |
+
return linear1
|
| 96 |
+
|
| 97 |
+
main_1:
|
| 98 |
+
def forward(self, in2, linear1):
|
| 99 |
+
self = self.root
|
| 100 |
+
linear2 = self.linear2(in2)
|
| 101 |
+
cat_1 = torch.cat([linear1, linear2])
|
| 102 |
+
linear3 = self.linear3(cat_1)
|
| 103 |
+
return linear3
|
| 104 |
+
|
| 105 |
+
main_0:
|
| 106 |
+
def forward(self, in1, in2):
|
| 107 |
+
self = self.root
|
| 108 |
+
ro_0 = self.ro_0(in1)
|
| 109 |
+
main_1 = self.main_1(in2, ro_0)
|
| 110 |
+
return main_1
|
| 111 |
+
"""
|
| 112 |
+
|
| 113 |
+
def flatten(x: torch.fx.node.Argument) -> NodeList:
|
| 114 |
+
"""
|
| 115 |
+
Stores nodes in x to a list and returns the list.
|
| 116 |
+
"""
|
| 117 |
+
r: NodeList = []
|
| 118 |
+
map_arg(x, r.append)
|
| 119 |
+
return r
|
| 120 |
+
|
| 121 |
+
# Mapping from node in original module to node in created submodule.
|
| 122 |
+
node_remapping: Dict[torch.fx.Node, torch.fx.Node] = {}
|
| 123 |
+
|
| 124 |
+
# Mapping from node in original module or created submodules to
|
| 125 |
+
# corresponding component.
|
| 126 |
+
node_to_component: Dict[torch.fx.Node, Component] = {}
|
| 127 |
+
|
| 128 |
+
# Mapping from tag to the corresponding component.
|
| 129 |
+
tag_to_component: Dict[str, Component] = {}
|
| 130 |
+
|
| 131 |
+
# Stores all components.
|
| 132 |
+
all_components: List[Component] = []
|
| 133 |
+
|
| 134 |
+
# Stores nodes that will be used in main graph.
|
| 135 |
+
used_in_main: Dict[torch.fx.Node, None] = {}
|
| 136 |
+
|
| 137 |
+
# Main graph after split.
|
| 138 |
+
main_g = torch.fx.Graph()
|
| 139 |
+
|
| 140 |
+
# Mapping from node in original module to node in main graph after split.
|
| 141 |
+
main_remapping: Dict[torch.fx.Node, torch.fx.Node] = {}
|
| 142 |
+
|
| 143 |
+
# Output node of original module.
|
| 144 |
+
output_node: Optional[torch.fx.Node] = None
|
| 145 |
+
|
| 146 |
+
# Create a component for each tag, we don't expect to create other components afterwards.
|
| 147 |
+
for tag in tags:
|
| 148 |
+
comp = Component(torch.fx.Graph(), len(all_components), f"{tag}")
|
| 149 |
+
all_components.append(comp)
|
| 150 |
+
tag_to_component[tag] = comp
|
| 151 |
+
|
| 152 |
+
# Traverse the nodes in original graph and take care of them.
|
| 153 |
+
for node in gm.graph.nodes:
|
| 154 |
+
if node.op == "output":
|
| 155 |
+
if output_node is not None:
|
| 156 |
+
raise RuntimeError("Multiple output nodes in graph!")
|
| 157 |
+
output_node = node
|
| 158 |
+
continue
|
| 159 |
+
|
| 160 |
+
# Placeholders in the original graph get copied to main graph.
|
| 161 |
+
if node.op == "placeholder":
|
| 162 |
+
main_remapping[node] = main_g.placeholder(node.name, type_expr=node.type)
|
| 163 |
+
main_remapping[node].meta = copy.copy(node.meta)
|
| 164 |
+
continue
|
| 165 |
+
|
| 166 |
+
# Get_attr nodes are ignored because we are not tagging them.
|
| 167 |
+
# Instead, we copy them directly to the submodules use them afterwards.
|
| 168 |
+
if node.op == "get_attr":
|
| 169 |
+
continue
|
| 170 |
+
|
| 171 |
+
# Now we process callable nodes which are nodes with op of call_module,
|
| 172 |
+
# call_function or call_method. Every callable nodes should be tagged.
|
| 173 |
+
assert hasattr(node, "tag")
|
| 174 |
+
|
| 175 |
+
upstream_components = [
|
| 176 |
+
node_to_component[x]
|
| 177 |
+
for x in flatten(node.args) + flatten(node.kwargs)
|
| 178 |
+
if x.op not in {"placeholder", "get_attr"}
|
| 179 |
+
]
|
| 180 |
+
|
| 181 |
+
comp = tag_to_component[node.tag]
|
| 182 |
+
node_to_component[node] = comp
|
| 183 |
+
|
| 184 |
+
# Max order of upperstream components.
|
| 185 |
+
mx = max((c.order for c in upstream_components), default=0)
|
| 186 |
+
|
| 187 |
+
# Expect the component for `node` has higher order then its upstream components.
|
| 188 |
+
assert comp.order >= mx
|
| 189 |
+
|
| 190 |
+
# Map a input of `node` to nodes in the component's graph.
|
| 191 |
+
def remap_func(x):
|
| 192 |
+
# If input is a get_attr node, copy it to current component's graph.
|
| 193 |
+
# Returns the get_attr node in current component's graph.
|
| 194 |
+
if x.op == "get_attr":
|
| 195 |
+
if x not in comp.getattr_maps:
|
| 196 |
+
comp.getattr_maps[x] = comp.graph.get_attr(
|
| 197 |
+
x.target, type_expr=x.type
|
| 198 |
+
)
|
| 199 |
+
return comp.getattr_maps[x]
|
| 200 |
+
|
| 201 |
+
# If input is not a placeholder, it should have been put into a component
|
| 202 |
+
# already. If it's the current component then we return the corresponding
|
| 203 |
+
# node in the component.
|
| 204 |
+
if x.op != "placeholder" and node_to_component[x] == comp:
|
| 205 |
+
return node_remapping[x]
|
| 206 |
+
|
| 207 |
+
# If input is a placeholder or it's in other components, we want to make it
|
| 208 |
+
# as a placeholder in current component's graph.
|
| 209 |
+
if x not in comp.orig_inputs:
|
| 210 |
+
comp.orig_inputs.append(x)
|
| 211 |
+
placeholder = comp.graph.placeholder(x.name, type_expr=x.type)
|
| 212 |
+
placeholder.meta = copy.copy(x.meta)
|
| 213 |
+
comp.input_placeholders.append(
|
| 214 |
+
placeholder
|
| 215 |
+
)
|
| 216 |
+
used_in_main[x] = None
|
| 217 |
+
|
| 218 |
+
return comp.input_placeholders[comp.orig_inputs.index(x)]
|
| 219 |
+
|
| 220 |
+
n = comp.graph.node_copy(node, remap_func)
|
| 221 |
+
n.tag = node.tag # type: ignore[attr-defined]
|
| 222 |
+
node_remapping[node] = n
|
| 223 |
+
node_to_component[n] = comp
|
| 224 |
+
|
| 225 |
+
if output_node is None:
|
| 226 |
+
raise RuntimeError("Graph had no output node!")
|
| 227 |
+
|
| 228 |
+
for x in flatten(output_node.args[0]):
|
| 229 |
+
if x.op == "get_attr":
|
| 230 |
+
# We don't need components mapping for nodes of type "get_attr"
|
| 231 |
+
# that are consumed by the output. Only need to make sure we create
|
| 232 |
+
# corresponding counterparts in the resulting graph.
|
| 233 |
+
main_remapping[x] = main_g.get_attr(x.name, type_expr=x.type)
|
| 234 |
+
else:
|
| 235 |
+
# All component results consumed by the output node should be
|
| 236 |
+
# marked as "used in main".
|
| 237 |
+
used_in_main[x] = None
|
| 238 |
+
|
| 239 |
+
# If a node is used in main graph then we mark it as an output in the component
|
| 240 |
+
# it belongs to.
|
| 241 |
+
for n in used_in_main:
|
| 242 |
+
if n.op != "placeholder":
|
| 243 |
+
node_to_component[n].orig_outputs.append(n)
|
| 244 |
+
|
| 245 |
+
# Now we create a graphmodule for each component.
|
| 246 |
+
for comp in all_components:
|
| 247 |
+
outs = tuple(map(node_remapping.__getitem__, comp.orig_outputs))
|
| 248 |
+
|
| 249 |
+
# Take care of the args of FX output node. If there's a single
|
| 250 |
+
# output then the output node args is like (output_single), else
|
| 251 |
+
# if there're multiple outputs then the output node args is like
|
| 252 |
+
# ((output_0, output_1, ...)).
|
| 253 |
+
comp.graph.output(outs[0] if len(outs) == 1 else outs)
|
| 254 |
+
|
| 255 |
+
comp.gm = lift_subgraph_as_module(gm, comp.graph)
|
| 256 |
+
|
| 257 |
+
# Create a call_module node in main graph.
|
| 258 |
+
main_node = main_g.call_module(
|
| 259 |
+
comp.name,
|
| 260 |
+
args=tuple(map(main_remapping.__getitem__, comp.orig_inputs)),
|
| 261 |
+
kwargs=None,
|
| 262 |
+
)
|
| 263 |
+
|
| 264 |
+
if len(outs) == 1:
|
| 265 |
+
main_remapping[comp.orig_outputs[0]] = main_node
|
| 266 |
+
else:
|
| 267 |
+
for i, o in enumerate(comp.orig_outputs):
|
| 268 |
+
# Use Proxy to record getitem access.
|
| 269 |
+
main_remapping[o] = torch.fx.Proxy(main_node)[i].node # type: ignore[index]
|
| 270 |
+
|
| 271 |
+
main_g.output(map_arg(output_node.args[0], main_remapping.__getitem__))
|
| 272 |
+
main_root = HolderModule({comp.name: comp.gm for comp in all_components})
|
| 273 |
+
|
| 274 |
+
# If the output nodes consumes get_attr directly in the original graph,
|
| 275 |
+
# then we need to make sure get_attr is copied to the new graph.
|
| 276 |
+
for x in flatten(output_node.args[0]):
|
| 277 |
+
if x.op == "get_attr":
|
| 278 |
+
setattr(main_root, x.name, getattr_recursive(gm, x.target)) # type: ignore[arg-type]
|
| 279 |
+
|
| 280 |
+
return torch.fx.GraphModule(main_root, main_g)
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/splitter_base.py
ADDED
|
@@ -0,0 +1,871 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import copy
|
| 3 |
+
from collections import defaultdict
|
| 4 |
+
from dataclasses import dataclass
|
| 5 |
+
from typing import NamedTuple, Sequence, Iterable, Any, List, Dict, Optional, Tuple
|
| 6 |
+
import logging
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
from torch.fx.passes.graph_manipulation import get_size_of_node
|
| 10 |
+
from torch.fx.node import map_arg
|
| 11 |
+
from torch.fx._compatibility import compatibility
|
| 12 |
+
|
| 13 |
+
from .operator_support import (
|
| 14 |
+
get_node_target,
|
| 15 |
+
OperatorSupportBase,
|
| 16 |
+
)
|
| 17 |
+
from .graph_drawer import FxGraphDrawer
|
| 18 |
+
from .shape_prop import ShapeProp
|
| 19 |
+
from .split_utils import split_by_tags
|
| 20 |
+
from .tools_common import (
|
| 21 |
+
FxNetAccFusionsFinder,
|
| 22 |
+
CALLABLE_NODE_OPS,
|
| 23 |
+
Tensors,
|
| 24 |
+
NodeList,
|
| 25 |
+
NodeSet,
|
| 26 |
+
is_node_output_tensor,
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
__all__ = ['FxNetAccNodesFinder', 'FxNetSplitterInternalError', 'Subgraph', 'SplitResult', 'generate_inputs_for_submodules']
|
| 31 |
+
_LOGGER = logging.getLogger(__name__)
|
| 32 |
+
|
| 33 |
+
DEFAULT_MIN_ACC_MODULE_SIZE = 1
|
| 34 |
+
DEFAULT_SKIP_FUSION = False
|
| 35 |
+
DEFAULT_ALLOW_NON_TENSOR = False
|
| 36 |
+
|
| 37 |
+
class _SplitterSettingBase:
|
| 38 |
+
def __init__(
|
| 39 |
+
self,
|
| 40 |
+
min_acc_module_size=DEFAULT_MIN_ACC_MODULE_SIZE,
|
| 41 |
+
skip_fusion=DEFAULT_SKIP_FUSION,
|
| 42 |
+
allow_non_tensor=DEFAULT_ALLOW_NON_TENSOR
|
| 43 |
+
):
|
| 44 |
+
parser = argparse.ArgumentParser()
|
| 45 |
+
parser.add_argument(
|
| 46 |
+
"--min-acc-module-size",
|
| 47 |
+
"--min_acc_module_size",
|
| 48 |
+
required=False,
|
| 49 |
+
type=int,
|
| 50 |
+
help="Minimum size limit of an accelerator subgraph.",
|
| 51 |
+
)
|
| 52 |
+
parser.add_argument(
|
| 53 |
+
"--skip-fusion",
|
| 54 |
+
"--skip_fusion",
|
| 55 |
+
default=False,
|
| 56 |
+
action="store_true",
|
| 57 |
+
help="If true then no fusion groups. Fusion group is used to "
|
| 58 |
+
"enforce no non-tensor data flow between submodules. If we don't "
|
| 59 |
+
"have this constrain, setting this to false is recommended as it "
|
| 60 |
+
"can reduce overhead.",
|
| 61 |
+
)
|
| 62 |
+
parser.add_argument(
|
| 63 |
+
"--allow-non-tensor",
|
| 64 |
+
"--allow_non_tensor",
|
| 65 |
+
default=False,
|
| 66 |
+
action="store_true",
|
| 67 |
+
help="For some backends non-tensor data flow between cpu and them "
|
| 68 |
+
"are not allowed. Therefore, if a node supported by accelerator but "
|
| 69 |
+
"it has non-tensor inputs or outputs to a cpu node we would want to "
|
| 70 |
+
"consider it as a cpu node during splitting. However, for some backends "
|
| 71 |
+
"we might not care about non-tensor data flow and we can set this option "
|
| 72 |
+
"to true to disable the functionality that prevent non-tensor data flow.",
|
| 73 |
+
)
|
| 74 |
+
args, unknown = parser.parse_known_args()
|
| 75 |
+
|
| 76 |
+
self.min_acc_module_size: int = args.min_acc_module_size if args.min_acc_module_size else min_acc_module_size
|
| 77 |
+
self.skip_fusion: bool = args.skip_fusion if args.skip_fusion else skip_fusion
|
| 78 |
+
self.allow_non_tensor: bool = args.allow_non_tensor if args.allow_non_tensor else allow_non_tensor
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
@compatibility(is_backward_compatible=False)
|
| 82 |
+
class FxNetAccNodesFinder:
|
| 83 |
+
"""
|
| 84 |
+
Finds a set of nodes that can be supported on ACC, excluding nodes that have non-tensor
|
| 85 |
+
input/output to cpu nodes to prevent non-tensor data flow between backends and cpu.
|
| 86 |
+
|
| 87 |
+
I.e. if we have a chain:
|
| 88 |
+
|
| 89 |
+
ACC_NODE_1 -> ACC_NODE_2 -> ACC_NODE_3 -> CPU_NODE_1
|
| 90 |
+
|
| 91 |
+
where every ACC node produces non-tensor output, then they all should be treated as CPU nodes.
|
| 92 |
+
|
| 93 |
+
This behavior can be turned off by passing allow_non_tensor=True.
|
| 94 |
+
"""
|
| 95 |
+
|
| 96 |
+
def __init__(
|
| 97 |
+
self,
|
| 98 |
+
module: torch.fx.GraphModule,
|
| 99 |
+
operator_support: OperatorSupportBase,
|
| 100 |
+
allow_non_tensor: bool,
|
| 101 |
+
):
|
| 102 |
+
self.module = module
|
| 103 |
+
self.operator_support = operator_support
|
| 104 |
+
self.allow_non_tensor = allow_non_tensor
|
| 105 |
+
|
| 106 |
+
def reduce_acc_nodes_non_tensor_input_helper(
|
| 107 |
+
self, cpu_worklist: NodeList
|
| 108 |
+
):
|
| 109 |
+
"""
|
| 110 |
+
Transitively excludes nodes from ACC supported set.
|
| 111 |
+
For every node in the worklist:
|
| 112 |
+
- removes its downstream ACC nodes from ACC supported set,
|
| 113 |
+
- if any downstream ACC node produces non-tensor output,
|
| 114 |
+
then it gets added into the worklist.
|
| 115 |
+
"""
|
| 116 |
+
while cpu_worklist:
|
| 117 |
+
node = cpu_worklist.pop(0)
|
| 118 |
+
|
| 119 |
+
for user in node.users:
|
| 120 |
+
if user in self.acc_nodes:
|
| 121 |
+
self.acc_nodes.remove(user)
|
| 122 |
+
if not is_node_output_tensor(user):
|
| 123 |
+
cpu_worklist.append(user)
|
| 124 |
+
|
| 125 |
+
def reduce_acc_nodes_non_tensor_input(self):
|
| 126 |
+
"""
|
| 127 |
+
Excludes nodes from ACC supported set that have direct
|
| 128 |
+
upstream CPU nodes that produce non-tensor outputs.
|
| 129 |
+
"""
|
| 130 |
+
non_tensor_cpu_nodes: NodeList = []
|
| 131 |
+
|
| 132 |
+
for node in self.module.graph.nodes:
|
| 133 |
+
if node.op not in CALLABLE_NODE_OPS:
|
| 134 |
+
continue
|
| 135 |
+
if node in self.acc_nodes:
|
| 136 |
+
continue
|
| 137 |
+
if is_node_output_tensor(node):
|
| 138 |
+
continue
|
| 139 |
+
non_tensor_cpu_nodes.append(node)
|
| 140 |
+
|
| 141 |
+
self.reduce_acc_nodes_non_tensor_input_helper(non_tensor_cpu_nodes)
|
| 142 |
+
|
| 143 |
+
def reduce_acc_nodes_non_tensor_output(self):
|
| 144 |
+
"""
|
| 145 |
+
Excludes nodes from ACC supported set that produce non-tensor
|
| 146 |
+
outputs and have downstream CPU nodes.
|
| 147 |
+
"""
|
| 148 |
+
while True:
|
| 149 |
+
new_cpu_nodes: NodeList = []
|
| 150 |
+
|
| 151 |
+
for acc_node in self.acc_nodes:
|
| 152 |
+
if is_node_output_tensor(acc_node):
|
| 153 |
+
continue
|
| 154 |
+
for user in acc_node.users:
|
| 155 |
+
if user not in self.acc_nodes:
|
| 156 |
+
new_cpu_nodes.append(acc_node)
|
| 157 |
+
break
|
| 158 |
+
|
| 159 |
+
if not new_cpu_nodes:
|
| 160 |
+
break
|
| 161 |
+
|
| 162 |
+
for new_cpu_node in new_cpu_nodes:
|
| 163 |
+
self.acc_nodes.remove(new_cpu_node)
|
| 164 |
+
|
| 165 |
+
self.reduce_acc_nodes_non_tensor_input_helper(new_cpu_nodes)
|
| 166 |
+
|
| 167 |
+
def __call__(self) -> NodeSet:
|
| 168 |
+
submodules = dict(self.module.named_modules())
|
| 169 |
+
self.acc_nodes = {
|
| 170 |
+
n
|
| 171 |
+
for n in self.module.graph.nodes
|
| 172 |
+
if n.op in CALLABLE_NODE_OPS
|
| 173 |
+
and self.operator_support.is_node_supported(submodules, n)
|
| 174 |
+
}
|
| 175 |
+
|
| 176 |
+
if not self.allow_non_tensor:
|
| 177 |
+
self.reduce_acc_nodes_non_tensor_input()
|
| 178 |
+
self.reduce_acc_nodes_non_tensor_output()
|
| 179 |
+
|
| 180 |
+
return self.acc_nodes
|
| 181 |
+
|
| 182 |
+
@compatibility(is_backward_compatible=False)
|
| 183 |
+
class FxNetSplitterInternalError(Exception):
|
| 184 |
+
pass
|
| 185 |
+
|
| 186 |
+
@compatibility(is_backward_compatible=False)
|
| 187 |
+
@dataclass
|
| 188 |
+
class Subgraph:
|
| 189 |
+
is_acc: bool
|
| 190 |
+
nodes: NodeList
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
@compatibility(is_backward_compatible=False)
|
| 194 |
+
class SplitResult(NamedTuple):
|
| 195 |
+
"""
|
| 196 |
+
Stores the results of the splitter.
|
| 197 |
+
|
| 198 |
+
Attributes:
|
| 199 |
+
split_module: root module after splitting.
|
| 200 |
+
submodule_inputs: a dict that maps submodule name to its inputs.
|
| 201 |
+
non_acc_submodule_prefix: the prefix for non acc submodules. For
|
| 202 |
+
acc submodule the prefix is alwasy "_run_on_acc_".
|
| 203 |
+
"""
|
| 204 |
+
|
| 205 |
+
split_module: torch.fx.GraphModule
|
| 206 |
+
submodule_inputs: Dict[str, Any]
|
| 207 |
+
non_acc_submodule_prefix: str
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
@compatibility(is_backward_compatible=False)
|
| 211 |
+
def generate_inputs_for_submodules(
|
| 212 |
+
model: torch.nn.Module,
|
| 213 |
+
inputs: Sequence[Any],
|
| 214 |
+
target_submodules: Iterable[str],
|
| 215 |
+
deepcopy: bool = False,
|
| 216 |
+
) -> Dict[str, Any]:
|
| 217 |
+
"""
|
| 218 |
+
Generate inputs for targeting submdoules in the given model. Note that if two submodules refer to the same obj, this
|
| 219 |
+
function doesn't work.
|
| 220 |
+
|
| 221 |
+
Args:
|
| 222 |
+
model: root model.
|
| 223 |
+
inputs: inputs to the root model.
|
| 224 |
+
target_submodules: submodules that we want to generate inputs for.
|
| 225 |
+
|
| 226 |
+
Returns:
|
| 227 |
+
A dict that maps from submodule name to its inputs.
|
| 228 |
+
"""
|
| 229 |
+
|
| 230 |
+
handles = []
|
| 231 |
+
results = {}
|
| 232 |
+
submodule_to_names = {mod: name for name, mod in model.named_modules()}
|
| 233 |
+
|
| 234 |
+
def pre_forward(module, module_inputs):
|
| 235 |
+
results[submodule_to_names[module]] = copy.deepcopy(module_inputs) if deepcopy else module_inputs
|
| 236 |
+
|
| 237 |
+
for name, mod in model.named_modules():
|
| 238 |
+
if name in target_submodules:
|
| 239 |
+
handles.append(mod.register_forward_pre_hook(pre_forward))
|
| 240 |
+
|
| 241 |
+
def clean_up_handles():
|
| 242 |
+
for h in handles:
|
| 243 |
+
h.remove()
|
| 244 |
+
|
| 245 |
+
try:
|
| 246 |
+
with torch.no_grad():
|
| 247 |
+
model(*inputs)
|
| 248 |
+
except Exception as e:
|
| 249 |
+
clean_up_handles()
|
| 250 |
+
raise e
|
| 251 |
+
|
| 252 |
+
clean_up_handles()
|
| 253 |
+
return results
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
class _SplitterBase:
|
| 257 |
+
"""
|
| 258 |
+
Splits a GraphModule into sub-GraphModules for execution on CPU or the accelerator.
|
| 259 |
+
Output is a GraphModule with supported and unsupported operators grouped into as few sub-GraphModules as possible.
|
| 260 |
+
Assumes that only "call_module", "call_function" and "call_method" from FX IR can potentially be executed on the accelerator.
|
| 261 |
+
|
| 262 |
+
Given the following graph:
|
| 263 |
+
==> b ==>
|
| 264 |
+
// \\
|
| 265 |
+
a d
|
| 266 |
+
\\ //
|
| 267 |
+
==> c ==>
|
| 268 |
+
|
| 269 |
+
class SimpleModule(torch.nn.Module):
|
| 270 |
+
def forward(self, a):
|
| 271 |
+
b = torch.sin(a)
|
| 272 |
+
c = torch.cos(a)
|
| 273 |
+
d = b + c
|
| 274 |
+
return d
|
| 275 |
+
|
| 276 |
+
and providing "operator_support" that indicates that 'b' and 'c' can be executed on the accelerator,
|
| 277 |
+
we will get the following split result:
|
| 278 |
+
|
| 279 |
+
main:
|
| 280 |
+
def forward(self, a):
|
| 281 |
+
run_on_acc_0_0 = self._run_on_acc_0_0(a)
|
| 282 |
+
getitem = run_on_acc_0_0[0]
|
| 283 |
+
getitem_1 = run_on_acc_0_0[1]
|
| 284 |
+
run_on_cpu_1_1 = self._run_on_cpu_1_1(getitem, getitem_1)
|
| 285 |
+
return run_on_cpu_1_1
|
| 286 |
+
|
| 287 |
+
_run_on_acc_0_0:
|
| 288 |
+
def forward(self, a):
|
| 289 |
+
sin_1 = torch.sin(a)
|
| 290 |
+
cos_1 = torch.cos(a)
|
| 291 |
+
return (sin_1, cos_1)
|
| 292 |
+
|
| 293 |
+
_run_on_cpu_1_1:
|
| 294 |
+
def forward(self, sin_1, cos_1):
|
| 295 |
+
add_1 = sin_1 + cos_1
|
| 296 |
+
return add_1
|
| 297 |
+
"""
|
| 298 |
+
|
| 299 |
+
# PCIe bandwidth for the backend, default to 100 GB/s
|
| 300 |
+
PCIe_BW = 100 * 2 ** 30
|
| 301 |
+
|
| 302 |
+
def __init__(
|
| 303 |
+
self,
|
| 304 |
+
module: torch.fx.GraphModule,
|
| 305 |
+
sample_input: Sequence[Any],
|
| 306 |
+
operator_support: OperatorSupportBase,
|
| 307 |
+
settings: _SplitterSettingBase,
|
| 308 |
+
non_acc_submodule_name: str = "_run_on_cpu_",
|
| 309 |
+
):
|
| 310 |
+
"""
|
| 311 |
+
Preprocesses graph before splitting:
|
| 312 |
+
- finds nodes supported by ACC,
|
| 313 |
+
- finds fusion groups for ACC nodes having non-tensor IO,
|
| 314 |
+
- builds a graph of direct dependencies,
|
| 315 |
+
- builds a map of fused nodes to their fusions.
|
| 316 |
+
As a result we get self.acc_nodes, self.deps and self.fusions.
|
| 317 |
+
"""
|
| 318 |
+
assert isinstance(module, torch.fx.GraphModule)
|
| 319 |
+
|
| 320 |
+
self.module = module
|
| 321 |
+
ShapeProp(self.module).propagate(*sample_input)
|
| 322 |
+
|
| 323 |
+
self.settings = settings
|
| 324 |
+
self.operator_support = operator_support
|
| 325 |
+
self.sample_input = sample_input
|
| 326 |
+
self.acc_nodes = FxNetAccNodesFinder(self.module, self.operator_support, self.settings.allow_non_tensor)()
|
| 327 |
+
|
| 328 |
+
if self.settings.skip_fusion:
|
| 329 |
+
self.fusions = {}
|
| 330 |
+
else:
|
| 331 |
+
self.fusions = FxNetAccFusionsFinder(module, self.acc_nodes)()
|
| 332 |
+
|
| 333 |
+
# Modify deps to add more deps for fused nodes
|
| 334 |
+
self.deps = self.find_deps()
|
| 335 |
+
self.update_deps_for_fusions()
|
| 336 |
+
|
| 337 |
+
self.non_acc_submodule_name = non_acc_submodule_name
|
| 338 |
+
self._node_submodule_map: Dict[str, str] = {}
|
| 339 |
+
|
| 340 |
+
# ===============================================================
|
| 341 |
+
# Helpers for ctor and initial state
|
| 342 |
+
# ===============================================================
|
| 343 |
+
|
| 344 |
+
def get_node_submodule_map(self) -> Dict[str, str]:
|
| 345 |
+
""" Returns a map from node name to submodule name, e.g.
|
| 346 |
+
node: main_module_impl_impl_over_arch_unary_multiple_embedding
|
| 347 |
+
_pooling_embedding_pooling_sparse_entity_equivalence_key
|
| 348 |
+
_proxy_embedding_bag
|
| 349 |
+
maps to submodule name of: _run_on_acc_1
|
| 350 |
+
"""
|
| 351 |
+
return self._node_submodule_map
|
| 352 |
+
|
| 353 |
+
def find_deps(self) -> Dict[torch.fx.Node, NodeSet]:
|
| 354 |
+
"""
|
| 355 |
+
Builds a graph of node dependencies. Leaf nodes don't have any
|
| 356 |
+
dependencies and the "output" node doesn't have nodes depending on it.
|
| 357 |
+
|
| 358 |
+
Resulting graph has only direct dependencies, i.e. there are no
|
| 359 |
+
transitive dependencies.
|
| 360 |
+
"""
|
| 361 |
+
deps: Dict[torch.fx.Node, NodeSet] = defaultdict(set)
|
| 362 |
+
for node in self.module.graph.nodes:
|
| 363 |
+
if node.op not in CALLABLE_NODE_OPS:
|
| 364 |
+
continue
|
| 365 |
+
|
| 366 |
+
for user in node.users:
|
| 367 |
+
if user.op != "output":
|
| 368 |
+
deps[user].add(node)
|
| 369 |
+
return deps
|
| 370 |
+
|
| 371 |
+
def update_deps_for_fusions(self):
|
| 372 |
+
"""
|
| 373 |
+
Updates graph of dependencies so that:
|
| 374 |
+
- nodes from the same fusion depend on the same set of outer nodes,
|
| 375 |
+
- outer nodes depending on a fusion depend on all nodes in that fusion.
|
| 376 |
+
"""
|
| 377 |
+
for node in self.fusions:
|
| 378 |
+
fusion = self.fusions[node]
|
| 379 |
+
for fused_neighbor in fusion:
|
| 380 |
+
self.deps[node].update(self.deps[fused_neighbor] - fusion)
|
| 381 |
+
|
| 382 |
+
for user in fused_neighbor.users:
|
| 383 |
+
if user not in fusion:
|
| 384 |
+
self.deps[user].add(node)
|
| 385 |
+
|
| 386 |
+
# ===============================================================
|
| 387 |
+
# Helpers for preview
|
| 388 |
+
# ===============================================================
|
| 389 |
+
|
| 390 |
+
def _lower_model_to_backend(
|
| 391 |
+
self, mod: torch.fx.GraphModule, inputs: Tensors
|
| 392 |
+
) -> torch.nn.Module:
|
| 393 |
+
"""
|
| 394 |
+
Lower the model to a backend.
|
| 395 |
+
"""
|
| 396 |
+
|
| 397 |
+
return mod
|
| 398 |
+
|
| 399 |
+
def _find_culprit(
|
| 400 |
+
self, mod: torch.fx.GraphModule, inputs: Tensors
|
| 401 |
+
) -> str:
|
| 402 |
+
"""
|
| 403 |
+
When an error occurs during lowering or running the lowered mod, we use this
|
| 404 |
+
function to find culprits in the `mod` that causes the error.
|
| 405 |
+
"""
|
| 406 |
+
|
| 407 |
+
return "Unable to find a culprit because _find_culprit() function is not implemented."
|
| 408 |
+
|
| 409 |
+
def _draw_graph_based_on_node_support(
|
| 410 |
+
self, mod: torch.fx.GraphModule, supported_nodes: NodeList
|
| 411 |
+
):
|
| 412 |
+
color_map = {
|
| 413 |
+
"default": "AliceBlue",
|
| 414 |
+
"supported": "chartreuse1",
|
| 415 |
+
"unsupported": "crimson",
|
| 416 |
+
}
|
| 417 |
+
|
| 418 |
+
class CustomDrawer(FxGraphDrawer):
|
| 419 |
+
def _get_node_style(self, node):
|
| 420 |
+
template = super()._get_node_style(node)
|
| 421 |
+
if node in supported_nodes:
|
| 422 |
+
template["fillcolor"] = color_map["supported"]
|
| 423 |
+
elif node.op in CALLABLE_NODE_OPS:
|
| 424 |
+
template["fillcolor"] = color_map["unsupported"]
|
| 425 |
+
else:
|
| 426 |
+
template["fillcolor"] = color_map["default"]
|
| 427 |
+
|
| 428 |
+
return template
|
| 429 |
+
|
| 430 |
+
drawer = CustomDrawer(mod, "node_support", ignore_getattr=True)
|
| 431 |
+
dot_graph = drawer.get_main_dot_graph()
|
| 432 |
+
dot_graph.write_raw("node_support.dot")
|
| 433 |
+
|
| 434 |
+
def node_support_preview(self, dump_graph: bool = False):
|
| 435 |
+
submodules = dict(self.module.named_modules())
|
| 436 |
+
|
| 437 |
+
supported_nodes: NodeList = []
|
| 438 |
+
supported_node_types = defaultdict(set)
|
| 439 |
+
unsupported_node_types = defaultdict(set)
|
| 440 |
+
|
| 441 |
+
def get_dtype(arg):
|
| 442 |
+
tensor_meta = arg.meta.get("tensor_meta")
|
| 443 |
+
return getattr(tensor_meta, "dtype", None)
|
| 444 |
+
|
| 445 |
+
for node in self.module.graph.nodes:
|
| 446 |
+
if node.op not in CALLABLE_NODE_OPS:
|
| 447 |
+
continue
|
| 448 |
+
|
| 449 |
+
target = get_node_target(submodules, node)
|
| 450 |
+
|
| 451 |
+
# Store dtype of arg in node.args. If arg doesn't have dtype, i.e. not a tensor, we'll store None.
|
| 452 |
+
arg_dtypes = [
|
| 453 |
+
get_dtype(arg) if isinstance(arg, torch.fx.Node) else None
|
| 454 |
+
for arg in node.args
|
| 455 |
+
]
|
| 456 |
+
|
| 457 |
+
# Find last non-None element. If all elements are None, return max_len.
|
| 458 |
+
last_index = len(arg_dtypes) - next(
|
| 459 |
+
(
|
| 460 |
+
i
|
| 461 |
+
for i, dtype in enumerate(reversed(arg_dtypes))
|
| 462 |
+
if dtype is not None
|
| 463 |
+
),
|
| 464 |
+
len(arg_dtypes),
|
| 465 |
+
)
|
| 466 |
+
|
| 467 |
+
# Strip None elements at the end.
|
| 468 |
+
arg_dtypes_tuple = tuple(arg_dtypes[:last_index])
|
| 469 |
+
kwarg_dtypes_tuple = tuple(
|
| 470 |
+
(k, get_dtype(arg))
|
| 471 |
+
for k, arg in node.kwargs.items()
|
| 472 |
+
if isinstance(arg, torch.fx.Node)
|
| 473 |
+
)
|
| 474 |
+
|
| 475 |
+
if self.operator_support.is_node_supported(submodules, node):
|
| 476 |
+
supported_nodes.append(node)
|
| 477 |
+
supported_node_types[target].add((arg_dtypes_tuple, kwarg_dtypes_tuple))
|
| 478 |
+
else:
|
| 479 |
+
unsupported_node_types[target].add((arg_dtypes_tuple, kwarg_dtypes_tuple))
|
| 480 |
+
|
| 481 |
+
if dump_graph:
|
| 482 |
+
self._draw_graph_based_on_node_support(self.module, supported_nodes)
|
| 483 |
+
|
| 484 |
+
reports = "\nSupported node types in the model:\n"
|
| 485 |
+
for t, dtypes in supported_node_types.items():
|
| 486 |
+
for arg_dtypes_tuple, kwarg_dtypes_tuple in dtypes:
|
| 487 |
+
reports += f"{t}: ({arg_dtypes_tuple}, {dict(kwarg_dtypes_tuple)})\n"
|
| 488 |
+
|
| 489 |
+
reports += "\nUnsupported node types in the model:\n"
|
| 490 |
+
for t, dtypes in unsupported_node_types.items():
|
| 491 |
+
for arg_dtypes_tuple, kwarg_dtypes_tuple in dtypes:
|
| 492 |
+
reports += f"{t}: ({arg_dtypes_tuple}, {dict(kwarg_dtypes_tuple)})\n"
|
| 493 |
+
|
| 494 |
+
print(reports)
|
| 495 |
+
|
| 496 |
+
# Return reports for testing purpose
|
| 497 |
+
return reports
|
| 498 |
+
|
| 499 |
+
def split_preview(self, dump_graph: bool = False):
|
| 500 |
+
reports = ""
|
| 501 |
+
subgraphs = self.put_nodes_into_subgraphs()
|
| 502 |
+
acc_subgraphs_num = len([g for g in subgraphs if g.is_acc])
|
| 503 |
+
cpu_subgraphs_num = len(subgraphs) - acc_subgraphs_num
|
| 504 |
+
reports += f"Before removing small acc subgraphs, total {len(subgraphs)} subgraphs are created:"
|
| 505 |
+
reports += f" {acc_subgraphs_num} acc subgraphs and {cpu_subgraphs_num} cpu subgraphs.\n"
|
| 506 |
+
|
| 507 |
+
subgraphs = self.remove_small_acc_subgraphs(subgraphs)
|
| 508 |
+
acc_subgraphs_num = len([g for g in subgraphs if g.is_acc])
|
| 509 |
+
cpu_subgraphs_num = len(subgraphs) - acc_subgraphs_num
|
| 510 |
+
reports += f"After removing small acc subgraphs, total {len(subgraphs)} subgraphs are created:"
|
| 511 |
+
reports += f" {acc_subgraphs_num} acc subgraphs and {cpu_subgraphs_num} cpu subgraphs.\n"
|
| 512 |
+
|
| 513 |
+
for i, subgraph in enumerate(subgraphs):
|
| 514 |
+
reports += f"_run_on_acc_{i}: " if subgraph.is_acc else f"{self.non_acc_submodule_name}{i}: "
|
| 515 |
+
reports += f"{len(subgraph.nodes)} node(s)\n"
|
| 516 |
+
|
| 517 |
+
self.tag(subgraphs)
|
| 518 |
+
split_mod = self.split(remove_tag=True)
|
| 519 |
+
split_mod.eval()
|
| 520 |
+
|
| 521 |
+
if dump_graph:
|
| 522 |
+
drawer = FxGraphDrawer(
|
| 523 |
+
split_mod, "preview", ignore_getattr=True
|
| 524 |
+
)
|
| 525 |
+
dot_graphs = drawer.get_all_dot_graphs()
|
| 526 |
+
for name, dot_graph in dot_graphs.items():
|
| 527 |
+
dot_graph.write_raw(f"{name}.dot")
|
| 528 |
+
|
| 529 |
+
max_qps: float = self.PCIe_BW
|
| 530 |
+
bottleneck_module = ""
|
| 531 |
+
|
| 532 |
+
for node in split_mod.graph.nodes:
|
| 533 |
+
if node.op == "call_module" and "acc" in node.target:
|
| 534 |
+
reports += f"\nProcessing acc submodule {node.target}\n"
|
| 535 |
+
|
| 536 |
+
submod = getattr(split_mod, node.target)
|
| 537 |
+
|
| 538 |
+
def get_submod_inputs(main_mod, submod, example_inputs):
|
| 539 |
+
sub_inputs = None
|
| 540 |
+
|
| 541 |
+
def get_inputs(self, inputs):
|
| 542 |
+
nonlocal sub_inputs
|
| 543 |
+
sub_inputs = inputs
|
| 544 |
+
|
| 545 |
+
handle = submod.register_forward_pre_hook(get_inputs)
|
| 546 |
+
main_mod(*example_inputs)
|
| 547 |
+
handle.remove()
|
| 548 |
+
return sub_inputs
|
| 549 |
+
|
| 550 |
+
submod_inputs = get_submod_inputs(
|
| 551 |
+
split_mod, submod, self.sample_input
|
| 552 |
+
)
|
| 553 |
+
ShapeProp(submod).propagate(*submod_inputs)
|
| 554 |
+
|
| 555 |
+
total_input_bytes = 0
|
| 556 |
+
total_output_bytes = 0
|
| 557 |
+
|
| 558 |
+
reports += "Checking inputs...\n"
|
| 559 |
+
for n in submod.graph.nodes:
|
| 560 |
+
if n.op == "placeholder":
|
| 561 |
+
if not is_node_output_tensor(n):
|
| 562 |
+
reports += f"Input {n.name} is not a tensor, this might cause problems during lowering!\n"
|
| 563 |
+
else:
|
| 564 |
+
total_input_bytes += get_size_of_node(submod, n)[0]
|
| 565 |
+
if n.op == "output":
|
| 566 |
+
output_node = n
|
| 567 |
+
|
| 568 |
+
reports += "Checking outputs...\n"
|
| 569 |
+
|
| 570 |
+
def get_bytes(node: torch.fx.Node):
|
| 571 |
+
nonlocal total_output_bytes
|
| 572 |
+
nonlocal reports
|
| 573 |
+
if not is_node_output_tensor(node):
|
| 574 |
+
reports += f"Output {node.name} is not a tensor, this might cause problems during lowering!\n"
|
| 575 |
+
else:
|
| 576 |
+
total_output_bytes += get_size_of_node(submod, node)[0]
|
| 577 |
+
|
| 578 |
+
map_arg(output_node.args, get_bytes)
|
| 579 |
+
qps = self.PCIe_BW / max(total_input_bytes, total_output_bytes)
|
| 580 |
+
reports += f"Total input size in bytes is {total_input_bytes}, total output size in bytes is {total_output_bytes},"
|
| 581 |
+
reports += f" theoretical max qps (bounds by PCIe bandwidth) for this submodule is {qps}.\n"
|
| 582 |
+
|
| 583 |
+
if qps < max_qps:
|
| 584 |
+
max_qps = qps
|
| 585 |
+
bottleneck_module = node.target
|
| 586 |
+
|
| 587 |
+
try:
|
| 588 |
+
lowered_submod = self._lower_model_to_backend(submod, submod_inputs)
|
| 589 |
+
except RuntimeError:
|
| 590 |
+
reports += "Run into an error during lowering!\n"
|
| 591 |
+
reports += self._find_culprit(submod, submod_inputs)
|
| 592 |
+
continue
|
| 593 |
+
|
| 594 |
+
try:
|
| 595 |
+
lowered_submod(*submod_inputs)
|
| 596 |
+
except RuntimeError:
|
| 597 |
+
reports += "Run into an error during inference!\n"
|
| 598 |
+
reports += self._find_culprit(submod, submod_inputs)
|
| 599 |
+
else:
|
| 600 |
+
reports += "Lowering and running succeed!\n"
|
| 601 |
+
|
| 602 |
+
reports += f"\nTheoretical max qps (bounds by PCIe bandwidth) for this model is {max_qps},"
|
| 603 |
+
reports += f" bottleneck is submodule {bottleneck_module}."
|
| 604 |
+
print(reports)
|
| 605 |
+
|
| 606 |
+
# return the reports for testing purposes
|
| 607 |
+
return reports
|
| 608 |
+
|
| 609 |
+
# ===============================================================
|
| 610 |
+
# Helpers for extend_acc_subgraph() method
|
| 611 |
+
# ===============================================================
|
| 612 |
+
|
| 613 |
+
def find_reverse_deps(
|
| 614 |
+
self, tag_id: Optional[int] = None
|
| 615 |
+
) -> Dict[torch.fx.Node, NodeSet]:
|
| 616 |
+
"""
|
| 617 |
+
Builds reversed topological node dependencies, if tag_id is specified,
|
| 618 |
+
we ignore nodes that are in later subgraph i.e. nodes have greater tag_id.
|
| 619 |
+
"""
|
| 620 |
+
result: Dict[torch.fx.Node, NodeSet] = defaultdict(set)
|
| 621 |
+
|
| 622 |
+
for node in self.module.graph.nodes:
|
| 623 |
+
if node.op not in CALLABLE_NODE_OPS:
|
| 624 |
+
continue
|
| 625 |
+
|
| 626 |
+
for user in node.users:
|
| 627 |
+
if user.op not in CALLABLE_NODE_OPS:
|
| 628 |
+
continue
|
| 629 |
+
|
| 630 |
+
if tag_id is None or (int(user.tag.split("_")[-1]) < tag_id):
|
| 631 |
+
result[node].add(user)
|
| 632 |
+
|
| 633 |
+
return result
|
| 634 |
+
|
| 635 |
+
def update_reverse_deps_for_fusions(
|
| 636 |
+
self, deps: Dict[torch.fx.Node, NodeSet]
|
| 637 |
+
):
|
| 638 |
+
processed_node = set()
|
| 639 |
+
|
| 640 |
+
for node, fusion in self.fusions.items():
|
| 641 |
+
if node in processed_node:
|
| 642 |
+
continue
|
| 643 |
+
|
| 644 |
+
new_dep = set()
|
| 645 |
+
|
| 646 |
+
# Create a new dependency set which include all the
|
| 647 |
+
# dependencies of the nodes in the fusion group
|
| 648 |
+
for n in fusion:
|
| 649 |
+
new_dep.update(deps[n])
|
| 650 |
+
|
| 651 |
+
# Exclude nodes in the fusion
|
| 652 |
+
new_dep.difference_update(fusion)
|
| 653 |
+
|
| 654 |
+
# Update dependency
|
| 655 |
+
for n in fusion:
|
| 656 |
+
deps[n] = new_dep
|
| 657 |
+
|
| 658 |
+
for arg in n.all_input_nodes:
|
| 659 |
+
if arg not in fusion:
|
| 660 |
+
deps[arg].update(fusion)
|
| 661 |
+
|
| 662 |
+
processed_node.add(n)
|
| 663 |
+
|
| 664 |
+
def find_parent_nodes_of_subgraph(self, tag: str) -> NodeSet:
|
| 665 |
+
"""
|
| 666 |
+
Finds parent nodes of the `tag` subgraph.
|
| 667 |
+
|
| 668 |
+
Traverse the inputs of nodes in the subgraph, if input doesn't belong to the subgraph
|
| 669 |
+
and is not a placeholder, we consider it as the parent node of the subgraph.
|
| 670 |
+
"""
|
| 671 |
+
parent_nodes = set()
|
| 672 |
+
|
| 673 |
+
for node in self.module.graph.nodes:
|
| 674 |
+
if node.op in CALLABLE_NODE_OPS and node.tag == tag:
|
| 675 |
+
for arg in node.all_input_nodes:
|
| 676 |
+
if arg.op in CALLABLE_NODE_OPS and arg.tag != tag:
|
| 677 |
+
parent_nodes.add(arg)
|
| 678 |
+
|
| 679 |
+
return parent_nodes
|
| 680 |
+
|
| 681 |
+
def extend_acc_subgraph(self, tag: str):
|
| 682 |
+
"""
|
| 683 |
+
Extend the acc subgraph with `tag` going the reversed topological direction.
|
| 684 |
+
"""
|
| 685 |
+
# Dict that maps node to its users and ignore users that
|
| 686 |
+
# are in the subgraph that has greater tag
|
| 687 |
+
deps = self.find_reverse_deps(tag_id=int(tag.split("_")[-1]))
|
| 688 |
+
self.update_reverse_deps_for_fusions(deps)
|
| 689 |
+
|
| 690 |
+
# Parent nodes of the subgraph
|
| 691 |
+
parent_nodes = self.find_parent_nodes_of_subgraph(tag)
|
| 692 |
+
|
| 693 |
+
visited_nodes: NodeSet = set()
|
| 694 |
+
|
| 695 |
+
while parent_nodes:
|
| 696 |
+
node = None
|
| 697 |
+
|
| 698 |
+
# Find a acc node that depends on visited nodes only
|
| 699 |
+
for n in parent_nodes:
|
| 700 |
+
if deps[n] <= visited_nodes and n in self.acc_nodes:
|
| 701 |
+
node = n
|
| 702 |
+
break
|
| 703 |
+
|
| 704 |
+
if node is None:
|
| 705 |
+
break
|
| 706 |
+
|
| 707 |
+
# Put the node into `tag` subgraph
|
| 708 |
+
node.tag = tag # type: ignore[attr-defined]
|
| 709 |
+
parent_nodes.remove(node)
|
| 710 |
+
visited_nodes.add(node)
|
| 711 |
+
|
| 712 |
+
# If node is in a fusion group, add all fusion buddies to parent nodes
|
| 713 |
+
if node in self.fusions:
|
| 714 |
+
for fusion_node in self.fusions[node]:
|
| 715 |
+
if fusion_node not in visited_nodes:
|
| 716 |
+
parent_nodes.add(fusion_node)
|
| 717 |
+
|
| 718 |
+
# Add inputs of the node to parent nodes
|
| 719 |
+
for arg in node.all_input_nodes:
|
| 720 |
+
if arg.op in CALLABLE_NODE_OPS and arg not in visited_nodes:
|
| 721 |
+
parent_nodes.add(arg)
|
| 722 |
+
|
| 723 |
+
# ===============================================================
|
| 724 |
+
# Helpers for split() method
|
| 725 |
+
# ===============================================================
|
| 726 |
+
|
| 727 |
+
def starter_nodes(self) -> Tuple[NodeSet, NodeSet]:
|
| 728 |
+
"""
|
| 729 |
+
Finds nodes that consume module inputs or get_attr nodes.
|
| 730 |
+
"""
|
| 731 |
+
starter_cpu_nodes: NodeSet = set()
|
| 732 |
+
starter_acc_nodes: NodeSet = set()
|
| 733 |
+
for node in self.module.graph.nodes:
|
| 734 |
+
if node.op not in {"placeholder", "get_attr"}:
|
| 735 |
+
continue
|
| 736 |
+
for user in node.users:
|
| 737 |
+
if user in self.acc_nodes:
|
| 738 |
+
starter_acc_nodes.add(user)
|
| 739 |
+
else:
|
| 740 |
+
starter_cpu_nodes.add(user)
|
| 741 |
+
return starter_cpu_nodes, starter_acc_nodes
|
| 742 |
+
|
| 743 |
+
def put_nodes_into_subgraphs(self) -> List[Subgraph]:
|
| 744 |
+
# We start graph traversal from leaf nodes
|
| 745 |
+
current_cpu_nodes, current_acc_nodes = self.starter_nodes()
|
| 746 |
+
visited_nodes: NodeSet = set()
|
| 747 |
+
|
| 748 |
+
# Determine which subgraph to start from based on which subgraph has
|
| 749 |
+
# 0-dep node
|
| 750 |
+
acc_subgraph: bool = not any(len(self.deps[n]) == 0 for n in current_cpu_nodes)
|
| 751 |
+
|
| 752 |
+
current_subgraph_nodes: NodeList = []
|
| 753 |
+
|
| 754 |
+
# Result accumulator
|
| 755 |
+
subgraphs: List[Subgraph] = []
|
| 756 |
+
while current_cpu_nodes or current_acc_nodes:
|
| 757 |
+
# Find the first node that should belong to the current subgraph and has all dependencies resolved
|
| 758 |
+
current_nodes = current_acc_nodes if acc_subgraph else current_cpu_nodes
|
| 759 |
+
node = next(
|
| 760 |
+
(n for n in current_nodes if self.deps[n] <= visited_nodes),
|
| 761 |
+
None,
|
| 762 |
+
)
|
| 763 |
+
|
| 764 |
+
# If nothing was found, then it's time to flip the mode and start a new subgraph
|
| 765 |
+
if node is None:
|
| 766 |
+
if not current_subgraph_nodes:
|
| 767 |
+
raise FxNetSplitterInternalError("Subgraph can't be empty")
|
| 768 |
+
|
| 769 |
+
subgraphs.append(
|
| 770 |
+
Subgraph(is_acc=acc_subgraph, nodes=current_subgraph_nodes)
|
| 771 |
+
)
|
| 772 |
+
acc_subgraph = not acc_subgraph
|
| 773 |
+
current_subgraph_nodes = []
|
| 774 |
+
continue
|
| 775 |
+
|
| 776 |
+
current_nodes.remove(node)
|
| 777 |
+
visited_nodes.add(node)
|
| 778 |
+
current_subgraph_nodes.append(node)
|
| 779 |
+
|
| 780 |
+
# Add fusion buddies
|
| 781 |
+
if node in self.fusions:
|
| 782 |
+
if node in self.acc_nodes:
|
| 783 |
+
current_acc_nodes.update(self.fusions[node] - visited_nodes)
|
| 784 |
+
else:
|
| 785 |
+
current_cpu_nodes.update(self.fusions[node] - visited_nodes)
|
| 786 |
+
|
| 787 |
+
# Put depending nodes into the queue
|
| 788 |
+
for user in node.users:
|
| 789 |
+
if user.op not in CALLABLE_NODE_OPS:
|
| 790 |
+
continue
|
| 791 |
+
|
| 792 |
+
# Add downstream nodes
|
| 793 |
+
if user in self.acc_nodes:
|
| 794 |
+
current_acc_nodes.add(user)
|
| 795 |
+
else:
|
| 796 |
+
current_cpu_nodes.add(user)
|
| 797 |
+
|
| 798 |
+
# Check if the last subgraph was not created
|
| 799 |
+
if current_subgraph_nodes:
|
| 800 |
+
subgraphs.append(
|
| 801 |
+
Subgraph(is_acc=acc_subgraph, nodes=current_subgraph_nodes)
|
| 802 |
+
)
|
| 803 |
+
|
| 804 |
+
if not subgraphs:
|
| 805 |
+
raise FxNetSplitterInternalError("Couldn't create subgraphs")
|
| 806 |
+
|
| 807 |
+
return subgraphs
|
| 808 |
+
|
| 809 |
+
def remove_small_acc_subgraphs(self, subgraphs: List[Subgraph]) -> List[Subgraph]:
|
| 810 |
+
"""
|
| 811 |
+
This pass finds ACC submodules with less than specified size and merges
|
| 812 |
+
them with adjacent CPU submodules.
|
| 813 |
+
"""
|
| 814 |
+
result: List[Subgraph] = []
|
| 815 |
+
for subgraph in subgraphs:
|
| 816 |
+
if subgraph.is_acc:
|
| 817 |
+
if len(subgraph.nodes) >= self.settings.min_acc_module_size:
|
| 818 |
+
result.append(subgraph)
|
| 819 |
+
else:
|
| 820 |
+
print(
|
| 821 |
+
"Eliminating acc subgraph because it's smaller than the threshold: "
|
| 822 |
+
f"{len(subgraph.nodes)} < {self.settings.min_acc_module_size}"
|
| 823 |
+
)
|
| 824 |
+
if result:
|
| 825 |
+
result[-1].nodes.extend(subgraph.nodes)
|
| 826 |
+
else:
|
| 827 |
+
subgraph.is_acc = False
|
| 828 |
+
result.append(subgraph)
|
| 829 |
+
else:
|
| 830 |
+
if result and not result[-1].is_acc:
|
| 831 |
+
result[-1].nodes.extend(subgraph.nodes)
|
| 832 |
+
else:
|
| 833 |
+
result.append(subgraph)
|
| 834 |
+
return result
|
| 835 |
+
|
| 836 |
+
def tag(self, subgraphs: List[Subgraph]):
|
| 837 |
+
self.tags: List[str] = []
|
| 838 |
+
for subgraph in subgraphs:
|
| 839 |
+
tag = f"_run_on_acc_{len(self.tags)}" if subgraph.is_acc else f"{self.non_acc_submodule_name}{len(self.tags)}"
|
| 840 |
+
self.tags.append(tag)
|
| 841 |
+
for node in subgraph.nodes:
|
| 842 |
+
if hasattr(node, "tag"):
|
| 843 |
+
raise FxNetSplitterInternalError(f"Node {node} was already tagged")
|
| 844 |
+
|
| 845 |
+
node.tag = tag # type: ignore[attr-defined]
|
| 846 |
+
self._node_submodule_map[node.name] = tag
|
| 847 |
+
|
| 848 |
+
def split(self, remove_tag: bool = False) -> torch.fx.GraphModule:
|
| 849 |
+
split_module = split_by_tags(self.module, self.tags)
|
| 850 |
+
if remove_tag:
|
| 851 |
+
for node in self.module.graph.nodes:
|
| 852 |
+
if hasattr(node, "tag"):
|
| 853 |
+
del node.tag
|
| 854 |
+
return split_module
|
| 855 |
+
|
| 856 |
+
def __call__(self) -> torch.fx.GraphModule:
|
| 857 |
+
subgraphs = self.put_nodes_into_subgraphs()
|
| 858 |
+
subgraphs = self.remove_small_acc_subgraphs(subgraphs)
|
| 859 |
+
acc_subgraphs_count = len([s for s in subgraphs if s.is_acc])
|
| 860 |
+
non_acc_subgraphs_count = len(subgraphs) - acc_subgraphs_count
|
| 861 |
+
print(f"Got {acc_subgraphs_count} acc subgraphs and {non_acc_subgraphs_count} non-acc subgraphs")
|
| 862 |
+
self.tag(subgraphs)
|
| 863 |
+
return self.split()
|
| 864 |
+
|
| 865 |
+
def generate_split_results(self) -> SplitResult:
|
| 866 |
+
split_module = self()
|
| 867 |
+
submodule_names = []
|
| 868 |
+
for name, mod in split_module.named_children():
|
| 869 |
+
submodule_names.append(name)
|
| 870 |
+
submodule_inputs = generate_inputs_for_submodules(split_module, self.sample_input, submodule_names)
|
| 871 |
+
return SplitResult(split_module, submodule_inputs, self.non_acc_submodule_name)
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/tests/__init__.py
ADDED
|
File without changes
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/tests/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (177 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/tests/__pycache__/test_pass_manager.cpython-310.pyc
ADDED
|
Binary file (3.53 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/tests/test_pass_manager.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import unittest
|
| 2 |
+
|
| 3 |
+
from ..pass_manager import (
|
| 4 |
+
inplace_wrapper,
|
| 5 |
+
PassManager,
|
| 6 |
+
these_before_those_pass_constraint,
|
| 7 |
+
this_before_that_pass_constraint,
|
| 8 |
+
)
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class TestPassManager(unittest.TestCase):
|
| 12 |
+
def test_pass_manager_builder(self) -> None:
|
| 13 |
+
passes = [lambda x: 2 * x for _ in range(10)]
|
| 14 |
+
pm = PassManager(passes)
|
| 15 |
+
pm.validate()
|
| 16 |
+
|
| 17 |
+
def test_this_before_that_pass_constraint(self) -> None:
|
| 18 |
+
passes = [lambda x: 2 * x for _ in range(10)]
|
| 19 |
+
pm = PassManager(passes)
|
| 20 |
+
|
| 21 |
+
# add unfulfillable constraint
|
| 22 |
+
pm.add_constraint(this_before_that_pass_constraint(passes[-1], passes[0]))
|
| 23 |
+
|
| 24 |
+
self.assertRaises(RuntimeError, pm.validate)
|
| 25 |
+
|
| 26 |
+
def test_these_before_those_pass_constraint(self) -> None:
|
| 27 |
+
passes = [lambda x: 2 * x for _ in range(10)]
|
| 28 |
+
constraint = these_before_those_pass_constraint(passes[-1], passes[0])
|
| 29 |
+
pm = PassManager(
|
| 30 |
+
[inplace_wrapper(p) for p in passes]
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
# add unfulfillable constraint
|
| 34 |
+
pm.add_constraint(constraint)
|
| 35 |
+
|
| 36 |
+
self.assertRaises(RuntimeError, pm.validate)
|
| 37 |
+
|
| 38 |
+
def test_two_pass_managers(self) -> None:
|
| 39 |
+
"""Make sure we can construct the PassManager twice and not share any
|
| 40 |
+
state between them"""
|
| 41 |
+
|
| 42 |
+
passes = [lambda x: 2 * x for _ in range(3)]
|
| 43 |
+
constraint = these_before_those_pass_constraint(passes[0], passes[1])
|
| 44 |
+
pm1 = PassManager()
|
| 45 |
+
for p in passes:
|
| 46 |
+
pm1.add_pass(p)
|
| 47 |
+
pm1.add_constraint(constraint)
|
| 48 |
+
output1 = pm1(1)
|
| 49 |
+
self.assertEqual(output1, 2 ** 3)
|
| 50 |
+
|
| 51 |
+
passes = [lambda x: 3 * x for _ in range(3)]
|
| 52 |
+
constraint = these_before_those_pass_constraint(passes[0], passes[1])
|
| 53 |
+
pm2 = PassManager()
|
| 54 |
+
for p in passes:
|
| 55 |
+
pm2.add_pass(p)
|
| 56 |
+
pm2.add_constraint(constraint)
|
| 57 |
+
output2 = pm2(1)
|
| 58 |
+
self.assertEqual(output2, 3 ** 3)
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/tools_common.py
ADDED
|
@@ -0,0 +1,254 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Tuple, Union, Dict, Any, Set, Mapping
|
| 2 |
+
import collections
|
| 3 |
+
from dataclasses import dataclass
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
import torch.fx
|
| 7 |
+
from torch.fx.node import _get_qualified_name
|
| 8 |
+
from torch.fx._compatibility import compatibility
|
| 9 |
+
|
| 10 |
+
__all__ = ['get_acc_ops_name', 'get_node_target', 'is_node_output_tensor', 'FxNetAccFusionsFinder', 'legalize_graph']
|
| 11 |
+
|
| 12 |
+
Tensors = Union[Tuple[torch.Tensor], List[torch.Tensor]]
|
| 13 |
+
TensorOrTensors = Union[torch.Tensor, Tensors]
|
| 14 |
+
NodeList = List[torch.fx.Node]
|
| 15 |
+
NodeSet = Set[torch.fx.Node]
|
| 16 |
+
Names = List[str]
|
| 17 |
+
CALLABLE_NODE_OPS = {"call_module", "call_function", "call_method"}
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
@compatibility(is_backward_compatible=False)
|
| 21 |
+
def get_acc_ops_name(k):
|
| 22 |
+
if isinstance(k, str):
|
| 23 |
+
return k
|
| 24 |
+
elif k.__module__ and "acc_ops" in k.__module__:
|
| 25 |
+
return f"acc_ops.{k.__name__}"
|
| 26 |
+
else:
|
| 27 |
+
module = k.__module__.replace('torch._ops', 'torch.ops') # WAR for bug in how torch.ops assigns module
|
| 28 |
+
return f"{module if module else ''}.{k.__name__}"
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
@compatibility(is_backward_compatible=False)
|
| 32 |
+
def get_node_target(submodules: Mapping[str, torch.nn.Module], node: torch.fx.Node) -> str:
|
| 33 |
+
"""
|
| 34 |
+
Given a `node` returns its target typename.
|
| 35 |
+
|
| 36 |
+
For "call_method" node, return node.target which is the name of that method being called.
|
| 37 |
+
This could potential lead to conflict but should be okay because normally it's on a tensor.
|
| 38 |
+
|
| 39 |
+
For "call_function" node, return typename of node.target.
|
| 40 |
+
|
| 41 |
+
For "call_module" node, return typename of the module that node.target point to.
|
| 42 |
+
|
| 43 |
+
If seeing "_VariableFunctionsClass" in the target name string, it will be replaced by
|
| 44 |
+
"torch". e.g. _VariableFunctionsClass.relu would become torch.relu.
|
| 45 |
+
"""
|
| 46 |
+
|
| 47 |
+
assert node.op in CALLABLE_NODE_OPS, (
|
| 48 |
+
"Expect op types of " + ", ".join(CALLABLE_NODE_OPS) + f", but found {node.op}"
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
if node.op == "call_module":
|
| 52 |
+
assert isinstance(node.target, str)
|
| 53 |
+
submod = submodules[node.target]
|
| 54 |
+
submod_type = getattr(submod, "_base_class_origin", type(submod))
|
| 55 |
+
return get_acc_ops_name(submod_type)
|
| 56 |
+
elif node.op == "call_function":
|
| 57 |
+
target: Any = node.target
|
| 58 |
+
return (
|
| 59 |
+
f"acc_ops.{target.__name__}"
|
| 60 |
+
if target.__module__ is not None and "acc_ops" in target.__module__
|
| 61 |
+
else _get_qualified_name(target)
|
| 62 |
+
)
|
| 63 |
+
else:
|
| 64 |
+
assert isinstance(node.target, str)
|
| 65 |
+
return node.target
|
| 66 |
+
|
| 67 |
+
@compatibility(is_backward_compatible=False)
|
| 68 |
+
def is_node_output_tensor(node: torch.fx.Node) -> bool:
|
| 69 |
+
"""Checks if the node output produces a Tensor or not.
|
| 70 |
+
|
| 71 |
+
NOTE: This requires to run `ShapeProp` on the containing fx graph before
|
| 72 |
+
calling this function. This is because it works by checking the `type`
|
| 73 |
+
metadata on the node. This metadata is produced by the `ShapeProp`.
|
| 74 |
+
"""
|
| 75 |
+
type_ = node.meta.get("type", None)
|
| 76 |
+
return type_ is not None and issubclass(type_, torch.Tensor)
|
| 77 |
+
|
| 78 |
+
@compatibility(is_backward_compatible=False)
|
| 79 |
+
class FxNetAccFusionsFinder:
|
| 80 |
+
"""
|
| 81 |
+
Finds groups of connected ACC nodes that pass non-tensor data between each other.
|
| 82 |
+
Such groups are called fusion groups.
|
| 83 |
+
"""
|
| 84 |
+
|
| 85 |
+
def __init__(self, module: torch.fx.GraphModule, acc_nodes: NodeSet):
|
| 86 |
+
self.module = module
|
| 87 |
+
self.nodes = list(module.graph.nodes)
|
| 88 |
+
self.acc_nodes = acc_nodes
|
| 89 |
+
|
| 90 |
+
@dataclass
|
| 91 |
+
class FusionGroup:
|
| 92 |
+
# The smallest idx of nodes in the fusion group after topological sorting all the nodes in the model.
|
| 93 |
+
top_node_idx: int
|
| 94 |
+
|
| 95 |
+
# Nodes in this fusion group.
|
| 96 |
+
nodes: NodeSet
|
| 97 |
+
|
| 98 |
+
# Inputs to this fusion group.
|
| 99 |
+
inputs: NodeSet
|
| 100 |
+
|
| 101 |
+
# Nodes that in the fusion group that haven't been processed yet.
|
| 102 |
+
nodes_need_process: NodeSet
|
| 103 |
+
|
| 104 |
+
def add_node(self, node):
|
| 105 |
+
"""
|
| 106 |
+
Add a node to fusion group.
|
| 107 |
+
"""
|
| 108 |
+
if node in self.nodes:
|
| 109 |
+
return
|
| 110 |
+
|
| 111 |
+
self.nodes_need_process.add(node)
|
| 112 |
+
self.nodes.add(node)
|
| 113 |
+
self.inputs.discard(node)
|
| 114 |
+
self.inputs.update(
|
| 115 |
+
{
|
| 116 |
+
n
|
| 117 |
+
for n in node.all_input_nodes
|
| 118 |
+
if n.op in CALLABLE_NODE_OPS and n not in self.nodes
|
| 119 |
+
}
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
def recursive_add_node(
|
| 123 |
+
self,
|
| 124 |
+
fusion_group: "FxNetAccFusionsFinder.FusionGroup",
|
| 125 |
+
inputs: Union[NodeSet, NodeList],
|
| 126 |
+
):
|
| 127 |
+
"""
|
| 128 |
+
Start from inputs and going reverse topological order. If any upstream node
|
| 129 |
+
is in the fusion group, add all the nodes in this path to fusion group.
|
| 130 |
+
"""
|
| 131 |
+
for arg in inputs:
|
| 132 |
+
# Skip placeholder and get_attr because they won't be in the fusion group.
|
| 133 |
+
if arg.op not in CALLABLE_NODE_OPS:
|
| 134 |
+
continue
|
| 135 |
+
|
| 136 |
+
# If the node has smaller idx, it's already an upstream node of the fusion
|
| 137 |
+
# group. We don't need to check it anymore.
|
| 138 |
+
if self.nodes.index(arg) < fusion_group.top_node_idx:
|
| 139 |
+
continue
|
| 140 |
+
|
| 141 |
+
# If the node is in the fusion group, return True.
|
| 142 |
+
if arg in fusion_group.nodes:
|
| 143 |
+
return True
|
| 144 |
+
|
| 145 |
+
# Check the upstream nodes of the node, if any of them is in the fusion group
|
| 146 |
+
# we'll add this node to fusion group and return True.
|
| 147 |
+
if self.recursive_add_node(fusion_group, arg.all_input_nodes):
|
| 148 |
+
fusion_group.add_node(arg)
|
| 149 |
+
return True
|
| 150 |
+
|
| 151 |
+
return False
|
| 152 |
+
|
| 153 |
+
def __call__(self) -> Dict[torch.fx.Node, NodeSet]:
|
| 154 |
+
result: Dict[torch.fx.Node, NodeSet] = {}
|
| 155 |
+
acc_nodes = list(self.acc_nodes)
|
| 156 |
+
|
| 157 |
+
for node in acc_nodes:
|
| 158 |
+
if node in result:
|
| 159 |
+
continue
|
| 160 |
+
if node.op not in CALLABLE_NODE_OPS:
|
| 161 |
+
continue
|
| 162 |
+
if "tensor_meta" in node.meta:
|
| 163 |
+
continue
|
| 164 |
+
if node not in self.acc_nodes:
|
| 165 |
+
continue
|
| 166 |
+
|
| 167 |
+
fusion_group: FxNetAccFusionsFinder.FusionGroup = self.FusionGroup(
|
| 168 |
+
top_node_idx=self.nodes.index(node),
|
| 169 |
+
nodes={node},
|
| 170 |
+
inputs=set(node.all_input_nodes),
|
| 171 |
+
nodes_need_process={node},
|
| 172 |
+
)
|
| 173 |
+
while fusion_group.nodes_need_process:
|
| 174 |
+
node = fusion_group.nodes_need_process.pop()
|
| 175 |
+
self.recursive_add_node(fusion_group, fusion_group.inputs)
|
| 176 |
+
|
| 177 |
+
# Optionally add downstream nodes
|
| 178 |
+
if "tensor_meta" not in node.meta:
|
| 179 |
+
for user in node.users:
|
| 180 |
+
if user.op not in CALLABLE_NODE_OPS:
|
| 181 |
+
continue
|
| 182 |
+
if user in fusion_group.nodes:
|
| 183 |
+
continue
|
| 184 |
+
|
| 185 |
+
fusion_group.add_node(user)
|
| 186 |
+
self.recursive_add_node(fusion_group, fusion_group.inputs)
|
| 187 |
+
|
| 188 |
+
# Add some upstream nodes
|
| 189 |
+
for arg in node.all_input_nodes:
|
| 190 |
+
if arg.op not in CALLABLE_NODE_OPS:
|
| 191 |
+
continue
|
| 192 |
+
if "tensor_meta" in arg.meta:
|
| 193 |
+
continue
|
| 194 |
+
if arg in fusion_group.nodes:
|
| 195 |
+
continue
|
| 196 |
+
|
| 197 |
+
fusion_group.add_node(arg)
|
| 198 |
+
fusion_group.top_node_idx = min(
|
| 199 |
+
fusion_group.top_node_idx, self.nodes.index(arg)
|
| 200 |
+
)
|
| 201 |
+
self.recursive_add_node(fusion_group, fusion_group.inputs)
|
| 202 |
+
|
| 203 |
+
if not (set(fusion_group.nodes) <= self.acc_nodes):
|
| 204 |
+
self.acc_nodes -= fusion_group.nodes
|
| 205 |
+
else:
|
| 206 |
+
for n in fusion_group.nodes:
|
| 207 |
+
result[n] = fusion_group.nodes
|
| 208 |
+
|
| 209 |
+
return result
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
@compatibility(is_backward_compatible=False)
|
| 213 |
+
def legalize_graph(gm: torch.fx.GraphModule) -> torch.fx.GraphModule:
|
| 214 |
+
"""
|
| 215 |
+
Replace the graph of the given GraphModule with one that contains the same nodes as the
|
| 216 |
+
original, but in topologically sorted order.
|
| 217 |
+
|
| 218 |
+
This is used by the merge_matmul transformation below, which disturbs the topologically sorted
|
| 219 |
+
order of its input GraphModule, so that this order is restored before further transformation.
|
| 220 |
+
|
| 221 |
+
Arguments:
|
| 222 |
+
gm: The graph module to topologically sort. It is modified in-place.
|
| 223 |
+
|
| 224 |
+
Returns:
|
| 225 |
+
The graph module in-place sorted
|
| 226 |
+
"""
|
| 227 |
+
indeg = {node: 0 for node in gm.graph.nodes}
|
| 228 |
+
new_graph = torch.fx.Graph()
|
| 229 |
+
# Track how many unfulfilled dependencies each node has
|
| 230 |
+
for node in gm.graph.nodes:
|
| 231 |
+
for user in node.users:
|
| 232 |
+
indeg[user] += 1
|
| 233 |
+
queue: collections.deque = collections.deque()
|
| 234 |
+
# Add all nodes with no dependencies to the queue
|
| 235 |
+
for node in gm.graph.nodes:
|
| 236 |
+
if indeg[node] == 0:
|
| 237 |
+
queue.append(node)
|
| 238 |
+
env: Dict[torch.fx.Node, torch.fx.Node] = {}
|
| 239 |
+
# Pop nodes from the queue, and add nodes that have had all their
|
| 240 |
+
# dependencies fulfilled
|
| 241 |
+
while len(queue) > 0:
|
| 242 |
+
cur = queue.popleft()
|
| 243 |
+
env[cur] = new_graph.node_copy(cur, lambda x: env[x])
|
| 244 |
+
for user in cur.users:
|
| 245 |
+
indeg[user] -= 1
|
| 246 |
+
if indeg[user] == 0:
|
| 247 |
+
queue.append(user)
|
| 248 |
+
# If the new graph's size is not as large as the old one, then there must be
|
| 249 |
+
# a cycle (i.e. some node's dependencies were not satisfied.)
|
| 250 |
+
if len(new_graph.nodes) < len(gm.graph.nodes):
|
| 251 |
+
raise RuntimeError(f"Input graph has cycles, unable to add {[node for node in indeg if indeg[node] != 0]}")
|
| 252 |
+
new_graph._codegen = gm.graph._codegen
|
| 253 |
+
gm.graph = new_graph
|
| 254 |
+
return gm
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
from .common import lift_subgraph_as_module, HolderModule, compare_graphs
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (279 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/__pycache__/common.cpython-310.pyc
ADDED
|
Binary file (2.52 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/__pycache__/fuser_utils.cpython-310.pyc
ADDED
|
Binary file (5.09 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/__pycache__/matcher_utils.cpython-310.pyc
ADDED
|
Binary file (12.1 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/__pycache__/source_matcher_utils.cpython-310.pyc
ADDED
|
Binary file (3.98 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/common.py
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from torch.nn import Module
|
| 2 |
+
|
| 3 |
+
from torch.fx.graph_module import GraphModule
|
| 4 |
+
from torch.fx.graph import Graph
|
| 5 |
+
from torch.fx.passes.utils.matcher_utils import SubgraphMatcher
|
| 6 |
+
from torch.fx._compatibility import compatibility
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
__all__ = ['HolderModule', 'lift_subgraph_as_module', 'compare_graphs']
|
| 10 |
+
|
| 11 |
+
@compatibility(is_backward_compatible=False)
|
| 12 |
+
class HolderModule(Module):
|
| 13 |
+
"""
|
| 14 |
+
HolderModule is used to copy all the attributes from original module to submodules
|
| 15 |
+
that uses the attributes
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
def __init__(self, d):
|
| 19 |
+
super().__init__()
|
| 20 |
+
for k, v in d.items():
|
| 21 |
+
self.add_module(k, v)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
@compatibility(is_backward_compatible=False)
|
| 25 |
+
def lift_subgraph_as_module(gm: GraphModule, subgraph: Graph, class_name: str = 'GraphModule') -> GraphModule:
|
| 26 |
+
"""
|
| 27 |
+
Create a GraphModule for subgraph, which copies the necessary attributes from the original parent graph_module.
|
| 28 |
+
|
| 29 |
+
Args:
|
| 30 |
+
gm (GraphModule): parent graph module
|
| 31 |
+
|
| 32 |
+
subgraph (Graph): a valid subgraph that contains copied nodes from the parent graph
|
| 33 |
+
|
| 34 |
+
class_name (str): name for the submodule
|
| 35 |
+
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
# Loop through all module calls (call_module) and param fetches (get_attr)
|
| 39 |
+
# in this component, creating HolderModules as necessary to match the path.
|
| 40 |
+
# e.g. if in the original module there's a get_attr node fetches "conv.weight".
|
| 41 |
+
# We create a HolderModule as root -> add a HolderModule named "conv" ->
|
| 42 |
+
# make "weight" a attribute of "conv" HolderModule and point to conv.weight in
|
| 43 |
+
# the original module.
|
| 44 |
+
submodule = HolderModule({})
|
| 45 |
+
for n in subgraph.nodes:
|
| 46 |
+
if n.op not in ("call_module", "get_attr"):
|
| 47 |
+
continue
|
| 48 |
+
|
| 49 |
+
target = n.target
|
| 50 |
+
assert isinstance(target, str)
|
| 51 |
+
target_name_parts = target.split(".")
|
| 52 |
+
curr = submodule
|
| 53 |
+
orig_gm = gm
|
| 54 |
+
|
| 55 |
+
for name in target_name_parts[:-1]:
|
| 56 |
+
if not hasattr(curr, name):
|
| 57 |
+
curr.add_module(name, HolderModule({}))
|
| 58 |
+
|
| 59 |
+
curr = getattr(curr, name)
|
| 60 |
+
orig_gm = getattr(orig_gm, name)
|
| 61 |
+
|
| 62 |
+
leaf_node_name = target_name_parts[-1]
|
| 63 |
+
leaf_node = getattr(orig_gm, leaf_node_name)
|
| 64 |
+
|
| 65 |
+
# Relies on custom __setattr__ magic.
|
| 66 |
+
setattr(curr, leaf_node_name, leaf_node)
|
| 67 |
+
|
| 68 |
+
return GraphModule(submodule, subgraph, class_name)
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
@compatibility(is_backward_compatible=False)
|
| 72 |
+
def compare_graphs(left: Graph, right: Graph) -> bool:
|
| 73 |
+
"""
|
| 74 |
+
Return True if two graphs are identical, i.e they
|
| 75 |
+
- have the same number of outputs in the same order
|
| 76 |
+
- have the same number of inputs in the same order
|
| 77 |
+
- have the same set of nodes, and identical connectivity
|
| 78 |
+
"""
|
| 79 |
+
|
| 80 |
+
matcher = SubgraphMatcher(left, match_output=True, match_placeholder=True)
|
| 81 |
+
matches = matcher.match(right)
|
| 82 |
+
|
| 83 |
+
return len(matches) > 0
|
llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/fuser_utils.py
ADDED
|
@@ -0,0 +1,233 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
from queue import SimpleQueue
|
| 3 |
+
from typing import List, Dict, Tuple
|
| 4 |
+
|
| 5 |
+
import torch.fx
|
| 6 |
+
from torch.fx.graph_module import GraphModule
|
| 7 |
+
from torch.fx.graph import Graph
|
| 8 |
+
from torch.fx.node import Node
|
| 9 |
+
from torch.fx.passes.tools_common import NodeList, NodeSet, legalize_graph
|
| 10 |
+
from torch.fx.passes.utils import lift_subgraph_as_module
|
| 11 |
+
from torch.fx._compatibility import compatibility
|
| 12 |
+
|
| 13 |
+
@compatibility(is_backward_compatible=False)
|
| 14 |
+
def topo_sort(nodes: NodeList) -> NodeList:
|
| 15 |
+
# sort nodes according to the topological order
|
| 16 |
+
indegree_map = {node : 0 for node in nodes}
|
| 17 |
+
candidates: SimpleQueue = SimpleQueue()
|
| 18 |
+
|
| 19 |
+
for node in nodes:
|
| 20 |
+
for n in node.all_input_nodes:
|
| 21 |
+
if n in indegree_map:
|
| 22 |
+
indegree_map[node] += 1
|
| 23 |
+
if indegree_map[node] == 0:
|
| 24 |
+
candidates.put(node)
|
| 25 |
+
|
| 26 |
+
sorted_nodes: NodeList = list()
|
| 27 |
+
while not candidates.empty():
|
| 28 |
+
node = candidates.get()
|
| 29 |
+
sorted_nodes.append(node)
|
| 30 |
+
|
| 31 |
+
for n in node.users:
|
| 32 |
+
if n in indegree_map:
|
| 33 |
+
indegree_map[n] -= 1
|
| 34 |
+
if indegree_map[n] == 0:
|
| 35 |
+
candidates.put(n)
|
| 36 |
+
|
| 37 |
+
assert len(nodes) == len(sorted_nodes), "topological sorted nodes doesn't have same length as input nodes"
|
| 38 |
+
|
| 39 |
+
return sorted_nodes
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
@compatibility(is_backward_compatible=False)
|
| 43 |
+
def validate_partition(partition: NodeList) -> bool:
|
| 44 |
+
# verify the partition does't form a dependency cycle in the original graph
|
| 45 |
+
# returns True for valid partition, False for invalid
|
| 46 |
+
|
| 47 |
+
partition_set = set(partition)
|
| 48 |
+
|
| 49 |
+
outputs: NodeList = list()
|
| 50 |
+
for node in partition_set:
|
| 51 |
+
for user_node in node.users:
|
| 52 |
+
if user_node not in partition_set:
|
| 53 |
+
# external user node, need to expose as an output
|
| 54 |
+
outputs.append(user_node)
|
| 55 |
+
|
| 56 |
+
# Perform BFS on the partition outputs.
|
| 57 |
+
# If it reaches a node within the partition, then it found a cycle.
|
| 58 |
+
# This function takes the ownership of `root_nodes` and may modify it.
|
| 59 |
+
def bfs_find_cycle(root_nodes: NodeList) -> bool:
|
| 60 |
+
# Set used to exclude nodes that have already been visited.
|
| 61 |
+
# If a node has been visited, that node and all its children have
|
| 62 |
+
# been checked for cycles.
|
| 63 |
+
visited: NodeSet = set()
|
| 64 |
+
|
| 65 |
+
# Start with `root_nodes` and traverse through (toward child nodes)
|
| 66 |
+
# their connected sub-graph. Nodes in `visited` won't be added
|
| 67 |
+
# to `queue` again.
|
| 68 |
+
queue: NodeList = root_nodes
|
| 69 |
+
while queue:
|
| 70 |
+
current = queue.pop()
|
| 71 |
+
visited.add(current)
|
| 72 |
+
if current in partition_set:
|
| 73 |
+
# Started from partition's `output` nodes, and reached
|
| 74 |
+
# another node in partition. Cycle!
|
| 75 |
+
return True
|
| 76 |
+
for user_node in current.users:
|
| 77 |
+
if user_node in visited:
|
| 78 |
+
continue
|
| 79 |
+
queue.append(user_node)
|
| 80 |
+
# `root_nodes` don't cause cycle.
|
| 81 |
+
return False
|
| 82 |
+
|
| 83 |
+
# Use all output nodes as roots to traverse
|
| 84 |
+
# the graph to check cycles.
|
| 85 |
+
if bfs_find_cycle(outputs):
|
| 86 |
+
return False
|
| 87 |
+
|
| 88 |
+
return True
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
@compatibility(is_backward_compatible=False)
|
| 92 |
+
def fuse_as_graphmodule(gm: GraphModule,
|
| 93 |
+
nodes: NodeList,
|
| 94 |
+
module_name: str) -> Tuple[GraphModule, Tuple[Node, ...], Tuple[Node, ...]]:
|
| 95 |
+
|
| 96 |
+
"""
|
| 97 |
+
Fuse nodes in graph_module into a GraphModule.
|
| 98 |
+
|
| 99 |
+
Args:
|
| 100 |
+
gm (GraphModule): target graph_module
|
| 101 |
+
|
| 102 |
+
nodes (List[Node]): list of nodes in `gm` to fuse, where the node must be topologically sorted
|
| 103 |
+
|
| 104 |
+
module_name: class name for the fused GraphModule
|
| 105 |
+
|
| 106 |
+
Returns:
|
| 107 |
+
fused_gm (GraphModule): fused graph module, where its node is a copy of `nodes` in `gm`
|
| 108 |
+
|
| 109 |
+
original_inputs (Tuple[Node, ...]): input nodes to `nodes` in original `gm`
|
| 110 |
+
|
| 111 |
+
original_outputs (Tuple[Node, ...]): consumer nodes of `nodes` in original `gm`
|
| 112 |
+
|
| 113 |
+
"""
|
| 114 |
+
|
| 115 |
+
# assumption: nodes are already sorted in topo order
|
| 116 |
+
|
| 117 |
+
for node in nodes:
|
| 118 |
+
assert node.graph.owning_module is gm, f"{node} doesn't belong to passed in graph module {gm._get_name()}"
|
| 119 |
+
assert not node._erased, f"{node} has been removed from owning graph"
|
| 120 |
+
assert node in gm.graph.nodes, f"{node} is not found in graph module {gm._get_name()}"
|
| 121 |
+
|
| 122 |
+
# validates partition doesn't introduce dependency circles in the graph
|
| 123 |
+
assert validate_partition(nodes), "Invalid partition, found dependency cycles"
|
| 124 |
+
|
| 125 |
+
subgraph = Graph()
|
| 126 |
+
|
| 127 |
+
node_to_placeholder: Dict[Node, Node] = {} # mapping of nodes from old graph to placeholder in new graph
|
| 128 |
+
node_map: Dict[Node, Node] = {} # mapping of nodes from old graph to new graph
|
| 129 |
+
|
| 130 |
+
# handles inputs through graph.node_copy's arg_transform functions
|
| 131 |
+
def remap_inputs(x):
|
| 132 |
+
if x.op == "get_attr":
|
| 133 |
+
# TODO: do we really need copy the get_attr node into the graph?
|
| 134 |
+
# do something here
|
| 135 |
+
pass
|
| 136 |
+
|
| 137 |
+
if x in nodes:
|
| 138 |
+
# x is inside subgraph, return the copied node
|
| 139 |
+
# the node should have been copied aleady, as we are copying graph in the topological order
|
| 140 |
+
return node_map[x]
|
| 141 |
+
|
| 142 |
+
if x not in node_to_placeholder:
|
| 143 |
+
# x is not in subgraph, create a new placeholder for subgraph
|
| 144 |
+
placeholder_node = subgraph.placeholder(x.name, type_expr=x.type)
|
| 145 |
+
# copy all meta fields, even if some fields might be irrelvant for the placeholder node
|
| 146 |
+
placeholder_node.meta = copy.copy(x.meta)
|
| 147 |
+
node_to_placeholder[x] = placeholder_node
|
| 148 |
+
|
| 149 |
+
return node_to_placeholder[x]
|
| 150 |
+
|
| 151 |
+
# copy nodes in topological order
|
| 152 |
+
for node in nodes:
|
| 153 |
+
new_node = subgraph.node_copy(node, remap_inputs)
|
| 154 |
+
node_map[node] = new_node
|
| 155 |
+
|
| 156 |
+
# handles outputs
|
| 157 |
+
output_mapping: Dict[Node, Node] = {} # mapping from old output to new outputs
|
| 158 |
+
|
| 159 |
+
for node in nodes:
|
| 160 |
+
for user_node in node.users:
|
| 161 |
+
if user_node not in nodes:
|
| 162 |
+
# external user node, need to expose as an output
|
| 163 |
+
output_mapping[node] = node_map[node]
|
| 164 |
+
|
| 165 |
+
# outs contain nodes in the new subgraph
|
| 166 |
+
outs = tuple(output_mapping.values())
|
| 167 |
+
|
| 168 |
+
# Take care of the args of FX output node. If there's a single
|
| 169 |
+
# output then the output node args is like (output_single), else
|
| 170 |
+
# if there're multiple outputs then the output node args is like
|
| 171 |
+
# ((output_0, output_1, ...)).
|
| 172 |
+
subgraph.output(outs[0] if len(outs) == 1 else outs)
|
| 173 |
+
|
| 174 |
+
# lint to ensure correctness
|
| 175 |
+
subgraph.lint()
|
| 176 |
+
|
| 177 |
+
fused_gm: GraphModule = lift_subgraph_as_module(gm, subgraph, class_name=module_name)
|
| 178 |
+
|
| 179 |
+
# sub_gm's input nodes in the original module
|
| 180 |
+
original_inputs: Tuple[Node, ...] = tuple(node_to_placeholder.keys())
|
| 181 |
+
|
| 182 |
+
# sub_gm's outputs node in the original module
|
| 183 |
+
original_outputs: Tuple[Node, ...] = tuple(output_mapping.keys())
|
| 184 |
+
|
| 185 |
+
return fused_gm, original_inputs, original_outputs
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
@compatibility(is_backward_compatible=False)
|
| 189 |
+
def insert_subgm(gm: GraphModule, sub_gm: GraphModule, orig_inputs: Tuple[Node, ...], orig_outputs: Tuple[Node, ...]):
|
| 190 |
+
# add sub_gm into gm
|
| 191 |
+
submodule_name = sub_gm.__class__.__name__
|
| 192 |
+
gm.add_submodule(submodule_name, sub_gm)
|
| 193 |
+
|
| 194 |
+
# Create a call_module node in main graph.
|
| 195 |
+
module_node = gm.graph.call_module(
|
| 196 |
+
submodule_name,
|
| 197 |
+
args=orig_inputs,
|
| 198 |
+
kwargs=None)
|
| 199 |
+
|
| 200 |
+
if len(orig_outputs) == 1:
|
| 201 |
+
# main_remapping[comp.orig_outputs[0]] = module_node
|
| 202 |
+
orig_outputs[0].replace_all_uses_with(module_node, propagate_meta=True)
|
| 203 |
+
else:
|
| 204 |
+
for i, orig_output in enumerate(orig_outputs):
|
| 205 |
+
# Use Proxy to record getitem access.
|
| 206 |
+
proxy_out = torch.fx.Proxy(module_node)[i].node # type: ignore[index]
|
| 207 |
+
orig_output.replace_all_uses_with(proxy_out, propagate_meta=True)
|
| 208 |
+
return gm
|
| 209 |
+
|
| 210 |
+
@compatibility(is_backward_compatible=False)
|
| 211 |
+
def erase_nodes(gm: GraphModule, nodes: NodeList):
|
| 212 |
+
|
| 213 |
+
# erase original nodes in inversed topological order
|
| 214 |
+
for node in reversed(nodes):
|
| 215 |
+
gm.graph.erase_node(node)
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
@compatibility(is_backward_compatible=False)
|
| 219 |
+
def fuse_by_partitions(gm: GraphModule, partitions: List[NodeList]) -> GraphModule:
|
| 220 |
+
for partition_id, nodes in enumerate(partitions):
|
| 221 |
+
sorted_nodes = topo_sort(nodes)
|
| 222 |
+
|
| 223 |
+
submodule_name = "fused_" + str(partition_id)
|
| 224 |
+
sub_gm, orig_inputs, orig_outputs = fuse_as_graphmodule(gm, sorted_nodes, submodule_name)
|
| 225 |
+
|
| 226 |
+
insert_subgm(gm, sub_gm, orig_inputs, orig_outputs)
|
| 227 |
+
|
| 228 |
+
erase_nodes(gm, sorted_nodes)
|
| 229 |
+
|
| 230 |
+
# topological sort original gm with newly created sub_gm
|
| 231 |
+
legalize_graph(gm)
|
| 232 |
+
|
| 233 |
+
return gm
|