diff --git a/.gitattributes b/.gitattributes index f93e5933ef14bcbd7a15b0e5ba16dd1a9cb95a39..414c9633ff582793381a0c022b1f2c3546900de5 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1192,3 +1192,8 @@ vlmpy310/lib/python3.10/site-packages/pyglet/gl/__pycache__/gl.cpython-310.pyc f llava_next/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/cuda_to_hip_mappings.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text llava_next/lib/python3.10/site-packages/torch/linalg/__pycache__/__init__.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text vlmpy310/lib/python3.10/site-packages/pyglet/input/__pycache__/controller_db.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +llava_next/lib/python3.10/site-packages/torchvision.libs/libpng16.7f72a3c5.so.16 filter=lfs diff=lfs merge=lfs -text +llava_next/lib/python3.10/site-packages/torch/lib/libtorch.so filter=lfs diff=lfs merge=lfs -text +vlmpy310/lib/python3.10/site-packages/skimage/segmentation/_watershed_cy.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +vlmpy310/lib/python3.10/site-packages/skimage/segmentation/_felzenszwalb_cy.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +vlmpy310/lib/python3.10/site-packages/skimage/segmentation/_quickshift_cy.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/llava_next/lib/python3.10/site-packages/torch/amp/__pycache__/autocast_mode.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/amp/__pycache__/autocast_mode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ec154e4a5ceaeee9e5f1d625419cceb3ca5957f Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/amp/__pycache__/autocast_mode.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/graph_drawer.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/graph_drawer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33ac7568f4ca2dfcf751344fe4d12a4b9e94a174 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/graph_drawer.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/graph_manipulation.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/graph_manipulation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef4a27fbab952c8b6fd758056d609bdcc4daa5d0 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/graph_manipulation.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/net_min_base.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/net_min_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53d99a73e854d3d8a864665508728dfc7ef29f3a Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/net_min_base.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/param_fetch.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/param_fetch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ab5a4779ef7b08145dca54e247ef70c2843ef8b Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/param_fetch.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/pass_manager.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/pass_manager.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..759e61ed58c8dca89fe5bc56aff1a112aa58fa97 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/pass_manager.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/shape_prop.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/shape_prop.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ad96ce9011fc7924273dcf10b3a01ba0805373f Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/shape_prop.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/splitter_base.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/splitter_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..103669274755a81c4f572673e84c8593d373235c Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/fx/passes/__pycache__/splitter_base.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/annotate_getitem_nodes.py b/llava_next/lib/python3.10/site-packages/torch/fx/passes/annotate_getitem_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..f77fddfb4d551685845e28853b28adcd34bea9f4 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/fx/passes/annotate_getitem_nodes.py @@ -0,0 +1,42 @@ +import operator + +import torch + + +def annotate_getitem_nodes(graph: torch.fx.Graph) -> None: + """ + Annotate the type of getitem nodes, inferred from the type of sequence node. + If sequence node is not annotated with a type, do nothing. + Currently support getitem nodes from Tuple, List, and NamedTuple sequence node. + + This is helpful since annotations on local names within function are lost during FX transforms. + Adding back known type annotation for getitem nodes to improve jit scriptability. + + Args: + graph (Graph): The graph to be annotated + """ + for node in graph.nodes: + if node.target == operator.getitem: + sequence_node, index_node = node.args + if not sequence_node.type: + continue + # container types + if hasattr(sequence_node.type, "_name"): + parameterized_types = sequence_node.type.__args__ + if sequence_node.type._name == "Tuple": + if len(parameterized_types) == 2 and isinstance( + parameterized_types[1], type(...) + ): + node.type = parameterized_types[0] + else: + assert len(parameterized_types) > index_node + node_type = parameterized_types[index_node] + node.type = node_type + elif sequence_node.type._name == "List": + assert len(parameterized_types) == 1 + node.type = parameterized_types[0] + # NamedTuple type + elif hasattr(sequence_node.type, "__annotations__"): + sequence_node_field_types = sequence_node.type.__annotations__ + field_name = sequence_node.type._fields[index_node] + node.type = sequence_node_field_types[field_name] diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/backends/__init__.py b/llava_next/lib/python3.10/site-packages/torch/fx/passes/backends/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/backends/__pycache__/__init__.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/fx/passes/backends/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2914b59df7a547ca036cb5ced464a27486b6d345 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/fx/passes/backends/__pycache__/__init__.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/backends/__pycache__/cudagraphs.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/fx/passes/backends/__pycache__/cudagraphs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7c2c1061f74f814cbd31108cec8c6c9b9aee8f8 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/fx/passes/backends/__pycache__/cudagraphs.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/backends/cudagraphs.py b/llava_next/lib/python3.10/site-packages/torch/fx/passes/backends/cudagraphs.py new file mode 100644 index 0000000000000000000000000000000000000000..2d4ccbcfb3dc5f66fd15c5dd1411d78c7994511b --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/fx/passes/backends/cudagraphs.py @@ -0,0 +1,56 @@ +import torch +from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner +from torch.fx.passes.operator_support import OperatorSupport +from torch.fx.passes.tools_common import CALLABLE_NODE_OPS +from torch.fx.passes.fake_tensor_prop import FakeTensorProp +from torch.utils._pytree import tree_map + +import operator + +class CudaGraphsSupport(OperatorSupport): + # TODO: why is submodules passed here + def is_node_supported(self, submodules, node: torch.fx.Node) -> bool: + if node.op not in CALLABLE_NODE_OPS: + return False + + if node.target in [torch.ops.aten.embedding_dense_backward.default]: + return False + + if node.target in [operator.getitem]: + return True + + found_not_cuda = False + + def meta_fk(meta): + return meta["val"] if "val" in meta else meta["fake_result"] + + def find_not_cuda(t): + nonlocal found_not_cuda + if isinstance(t, torch.Tensor) and t.device.type != 'cuda': + found_not_cuda = True + + for n in node.all_input_nodes: + tree_map(find_not_cuda, meta_fk(n.meta)) + + tree_map(find_not_cuda, meta_fk(node.meta)) + + # NB: factory function is accounted for because the result would be + # cpu or cuda + + return not found_not_cuda + +def partition_cudagraphs(gm, inputs): + """ + Partition an FX graph into sub-GraphModules that can be validly run under + CUDA graphs. For a subgraph to be runnable under CUDA, all of the operations + must involve CUDA tensors only/ + """ + + FakeTensorProp(gm).propagate(*inputs) + supported_ops = CudaGraphsSupport() + # TODO: single node partition may be wrong due to the pessimization + # from copying in and out the data. Check in benchmarks, perhaps + partitioner = CapabilityBasedPartitioner(gm, supported_ops, allows_single_node_partition=True) + partitions = partitioner.propose_partitions() + fused_graph = partitioner.fuse_partitions(partitions) + return fused_graph diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/dialect/__init__.py b/llava_next/lib/python3.10/site-packages/torch/fx/passes/dialect/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/dialect/__pycache__/__init__.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/fx/passes/dialect/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..927e46ba9d31e487da12230021376a6489f7aba0 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/fx/passes/dialect/__pycache__/__init__.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/dialect/common/__init__.py b/llava_next/lib/python3.10/site-packages/torch/fx/passes/dialect/common/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/dialect/common/__pycache__/__init__.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/fx/passes/dialect/common/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca508148fa0fdaaedbc286b04fff4483c08a3f6b Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/fx/passes/dialect/common/__pycache__/__init__.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/dialect/common/__pycache__/cse_pass.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/fx/passes/dialect/common/__pycache__/cse_pass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..074dfebf108f2be738b8ff85b0205529159bf702 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/fx/passes/dialect/common/__pycache__/cse_pass.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/dialect/common/cse_pass.py b/llava_next/lib/python3.10/site-packages/torch/fx/passes/dialect/common/cse_pass.py new file mode 100644 index 0000000000000000000000000000000000000000..dc95a70a22a7da599880d962b40c6a0a25aa5634 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/fx/passes/dialect/common/cse_pass.py @@ -0,0 +1,112 @@ +from typing import Dict, Tuple, Any + +import torch +from torch.fx.passes.infra.pass_base import PassBase, PassResult +from torch.utils._pytree import tree_flatten + +from torch.fx import GraphModule, Graph +from torch.fx import Node + +aten = torch.ops.aten + + +# stateful ops are banned from CSE +rand_ops = {aten.dropout, aten._fused_dropout, aten._standard_gamma, aten.bernoulli, aten.multinomial, aten.native_dropout, aten.normal, aten.poisson, aten.binomial, aten.rrelu, aten.rand_like, aten.rand, aten.randint, aten.randn, aten.randperm} # noqa: E501,B950 + +inplace_ops = {aten.add_, aten.sub_, aten.mul_, aten.div_, aten.pow_, aten.lerp_, aten.relu_, aten.sigmoid_, aten.tanh_} # noqa: E501 + + +@torch.fx._compatibility.compatibility(is_backward_compatible=False) +def get_CSE_banned_ops(): + return rand_ops.union(inplace_ops) + + +@torch.fx._compatibility.compatibility(is_backward_compatible=False) +class CSEPass(PassBase): + + def __init__(self, banned_ops=None): + """ + This version of CSE Pass aims to be dialect agnostic, and it's implemented purely based on the connectivity between fx.Node. + + For functional dialects, user would only need to specify the random ops in ban list. + + Warning: CSE Pass cannot be safely applied on a FX graph in non-functional dialects. + If your dialect contains stateful operators, please customized the banned_ops. + + """ + if banned_ops is None: + banned_ops = set() + self.banned_ops = banned_ops + super().__init__() + + def call(self, graph_module: GraphModule) -> PassResult: + """ + Return a new copy of torch.fx.GraphModule with CSE applied to the input graph + + Example usage: + + from torch.fx.experimental.proxy_tensor import make_fx + def f(a): + b = a * a + c = a * a + return b+c + + p = CSEPass() + traced_graph = make_fx(f)(torch.tensor(1)) + print(traced_graph) + result = p(traced_graph) + print(result.graph_module) + """ + def get_aten_target(node): + if hasattr(node.target, 'overloadpacket'): + return node.target.overloadpacket + return node.target + + modified = False + new_graph = Graph() + env: Dict[Node, Node] = {} # map from node in the old graph to node in the new graph + hash_env: Dict[Tuple[torch._ops.OpOverload, int], Node] = {} # map from hash to a node in the new graph + token_map: Dict[Tuple[torch._ops.OpOverload, int], Dict[str, Any]] = {} # map from hash to token + for n in graph_module.graph.nodes: + # The placeholder, output, and get_attr nodes are copied to the new graph without change + # do not CSE away random operations + if n.op == 'placeholder' or n.op == 'output' or n.op == 'get_attr' or get_aten_target(n) in self.banned_ops: + new_node = new_graph.node_copy(n, lambda x: env[x]) + env[n] = new_node + else: # n.op == 'call_function', should never see n.op == 'call_module' or 'call_method' + # substitute args and kwargs members to their mapping in env if exists + # specs can be used to reconstruct nested list/dictionaries + def substitute(arg_list): + arg_list, spec = tree_flatten(arg_list) + for i in range(len(arg_list)): + v = arg_list[i] + if isinstance(v, Node) and v in env: + arg_list[i] = env[v] + return tuple(arg_list), spec + args, args_spec = substitute(n.args) + kwargs, kwargs_spec = substitute(n.kwargs) + + # each token corresponds to a unique node + # nodes with the same token can be substituted + token = {"target": n.target, "args": args, "args_spec": args_spec, + "kwargs": kwargs, "kwargs_spec": kwargs_spec} + + # hash substituted args to a number, do not hash specs because specs are not hashable + hash_arg = hash((args, kwargs)) + hash_val = (n.target, hash_arg) + + # check if a node has a substitute and can be eliminated + hash_val_in_hash_env = hash_val in hash_env + if hash_val_in_hash_env and token_map[hash_val] == token: + modified = True # substitution happens and the graph is modified + env[n] = hash_env[hash_val] + continue + + new_node = new_graph.node_copy(n, lambda x: env[x]) + env[n] = new_node + if not hash_val_in_hash_env: + hash_env[hash_val] = new_node + token_map[hash_val] = token + + csed_gm = GraphModule(graph_module, new_graph) + return PassResult(csed_gm, modified) diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/fake_tensor_prop.py b/llava_next/lib/python3.10/site-packages/torch/fx/passes/fake_tensor_prop.py new file mode 100644 index 0000000000000000000000000000000000000000..737412d22a7078f2a1c6b50a66370e3353226e35 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/fx/passes/fake_tensor_prop.py @@ -0,0 +1,61 @@ +from typing import Optional + +import torch.fx +from torch.fx import Node +from torch.fx._compatibility import compatibility +from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor +from torch.fx.experimental.proxy_tensor import py_sym_types, snapshot_fake +from torch.fx.node import map_aggregate + +__all__ = ['FakeTensorProp'] + +@compatibility(is_backward_compatible=False) +class FakeTensorProp(torch.fx.Interpreter): + """ + Execute an FX graph Node-by-Node and record a fake tensor representing + the metadata for the node. Unlike ShapeProp, (1) this propagation + is cheap--it does the propagation with meta tensors which do not actually + store data, and (2) the fake tensors have much more fine grained information, + e.g., they have accurate alias information that can be consulted by looking + at the storages. + + Args: + module (GraphModule): The module to be executed + mode (Optional[FakeTensorMode]): The dispatch mode used to execute computation indicated by each FX Node. + """ + def __init__(self, module: torch.fx.GraphModule, mode: Optional[FakeTensorMode] = None): + super().__init__(module) + if mode is None: + mode = FakeTensorMode() + self._mode = mode + + def run_node(self, n: Node): + result = super().run_node(n) + + def extract_val(obj): + if isinstance(obj, FakeTensor): + return snapshot_fake(obj) + elif isinstance(obj, torch.Tensor): + # TODO: How is it possible that we get a non fake tensor? We + # should be running under the mode... + return snapshot_fake(self._mode.from_tensor(obj, static_shapes=True)) + elif isinstance(obj, py_sym_types): + return obj + else: + return None + + meta = map_aggregate(result, extract_val) + if meta is not None: + n.meta['val'] = meta + return result + + def propagate(self, *args): + fake_args = [ + self._mode.from_tensor(a) if isinstance(a, torch.Tensor) else a + for a in args + ] + return self.propagate_dont_convert_inputs(*fake_args) + + def propagate_dont_convert_inputs(self, *args): + with self._mode: + return super().run(*args) diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/graph_drawer.py b/llava_next/lib/python3.10/site-packages/torch/fx/passes/graph_drawer.py new file mode 100644 index 0000000000000000000000000000000000000000..96c59c49e08d6e748712778a3040aec321158b37 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/fx/passes/graph_drawer.py @@ -0,0 +1,347 @@ + +import hashlib +import torch +import torch.fx +from typing import Dict, Any, TYPE_CHECKING +from torch.fx.node import _get_qualified_name, _format_arg +from torch.fx.passes.shape_prop import TensorMetadata +from torch.fx._compatibility import compatibility +from itertools import chain + +__all__ = ['FxGraphDrawer'] +try: + import pydot + HAS_PYDOT = True +except ImportError: + HAS_PYDOT = False + +_COLOR_MAP = { + "placeholder": '"AliceBlue"', + "call_module": "LemonChiffon1", + "get_param": "Yellow2", + "get_attr": "LightGrey", + "output": "PowderBlue", +} + +_HASH_COLOR_MAP = [ + "CadetBlue1", + "Coral", + "DarkOliveGreen1", + "DarkSeaGreen1", + "GhostWhite", + "Khaki1", + "LavenderBlush1", + "LightSkyBlue", + "MistyRose1", + "MistyRose2", + "PaleTurquoise2", + "PeachPuff1", + "Salmon", + "Thistle1", + "Thistle3", + "Wheat1", +] + +_WEIGHT_TEMPLATE = { + "shape": "record", + "fillcolor": "Salmon", + "style": '"filled,rounded"', + "fontcolor": "#000000", +} + +if HAS_PYDOT: + @compatibility(is_backward_compatible=False) + class FxGraphDrawer: + """ + Visualize a torch.fx.Graph with graphviz + Basic usage: + g = FxGraphDrawer(symbolic_traced, "resnet18") + g.get_dot_graph().write_svg("a.svg") + """ + + def __init__( + self, + graph_module: torch.fx.GraphModule, + name: str, + ignore_getattr: bool = False, + ignore_parameters_and_buffers: bool = False, + skip_node_names_in_args: bool = True, + ): + self._name = name + self._dot_graphs = { + name: self._to_dot( + graph_module, name, ignore_getattr, ignore_parameters_and_buffers, skip_node_names_in_args + ) + } + + for node in graph_module.graph.nodes: + if node.op != "call_module": + continue + + leaf_node = self._get_leaf_node(graph_module, node) + + if not isinstance(leaf_node, torch.fx.GraphModule): + continue + + self._dot_graphs[f"{name}_{node.target}"] = self._to_dot( + leaf_node, + f"{name}_{node.target}", + ignore_getattr, + ignore_parameters_and_buffers, + skip_node_names_in_args, + ) + + def get_dot_graph(self, submod_name=None) -> pydot.Dot: + """ + Visualize a torch.fx.Graph with graphviz + Example: + >>> # xdoctest: +REQUIRES(module:pydot) + >>> # define module + >>> class MyModule(torch.nn.Module): + >>> def __init__(self): + >>> super().__init__() + >>> self.linear = torch.nn.Linear(4, 5) + >>> def forward(self, x): + >>> return self.linear(x).clamp(min=0.0, max=1.0) + >>> module = MyModule() + >>> # trace the module + >>> symbolic_traced = torch.fx.symbolic_trace(module) + >>> # setup output file + >>> import ubelt as ub + >>> dpath = ub.Path.appdir('torch/tests/FxGraphDrawer').ensuredir() + >>> fpath = dpath / 'linear.svg' + >>> # draw the graph + >>> g = FxGraphDrawer(symbolic_traced, "linear") + >>> g.get_dot_graph().write_svg(fpath) + """ + if submod_name is None: + return self.get_main_dot_graph() + else: + return self.get_submod_dot_graph(submod_name) + + def get_main_dot_graph(self) -> pydot.Dot: + return self._dot_graphs[self._name] + + def get_submod_dot_graph(self, submod_name) -> pydot.Dot: + return self._dot_graphs[f"{self._name}_{submod_name}"] + + def get_all_dot_graphs(self) -> Dict[str, pydot.Dot]: + return self._dot_graphs + + def _get_node_style(self, node: torch.fx.Node) -> Dict[str, str]: + template = { + "shape": "record", + "fillcolor": "#CAFFE3", + "style": '"filled,rounded"', + "fontcolor": "#000000", + } + if node.op in _COLOR_MAP: + template["fillcolor"] = _COLOR_MAP[node.op] + else: + # Use a random color for each node; based on its name so it's stable. + target_name = node._pretty_print_target(node.target) + target_hash = int(hashlib.md5(target_name.encode()).hexdigest()[:8], 16) + template["fillcolor"] = _HASH_COLOR_MAP[target_hash % len(_HASH_COLOR_MAP)] + return template + + def _get_leaf_node( + self, module: torch.nn.Module, node: torch.fx.Node + ) -> torch.nn.Module: + py_obj = module + assert isinstance(node.target, str) + atoms = node.target.split(".") + for atom in atoms: + if not hasattr(py_obj, atom): + raise RuntimeError( + str(py_obj) + " does not have attribute " + atom + "!" + ) + py_obj = getattr(py_obj, atom) + return py_obj + + def _typename(self, target: Any) -> str: + if isinstance(target, torch.nn.Module): + ret = torch.typename(target) + elif isinstance(target, str): + ret = target + else: + ret = _get_qualified_name(target) + + # Escape "{" and "}" to prevent dot files like: + # https://gist.github.com/SungMinCho/1a017aab662c75d805c5954d62c5aabc + # which triggers `Error: bad label format (...)` from dot + return ret.replace("{", r"\{").replace("}", r"\}") + + def _get_node_label( + self, + module: torch.fx.GraphModule, + node: torch.fx.Node, + skip_node_names_in_args: bool, + ) -> str: + def _get_str_for_args_kwargs(arg): + if isinstance(arg, tuple): + prefix, suffix = r"|args=(\l", r",\n)\l" + arg_strs_list = [_format_arg(a, max_list_len=8) for a in arg] + elif isinstance(arg, dict): + prefix, suffix = r"|kwargs={\l", r",\n}\l" + arg_strs_list = [ + f"{k}: {_format_arg(v, max_list_len=8)}" + for k, v in arg.items() + ] + else: # Fall back to nothing in unexpected case. + return "" + + # Strip out node names if requested. + if skip_node_names_in_args: + arg_strs_list = [a for a in arg_strs_list if "%" not in a] + if len(arg_strs_list) == 0: + return "" + arg_strs = prefix + r",\n".join(arg_strs_list) + suffix + return arg_strs.replace("{", r"\{").replace("}", r"\}") + + + label = "{" + f"name=%{node.name}|op_code={node.op}\n" + + if node.op == "call_module": + leaf_module = self._get_leaf_node(module, node) + label += r"\n" + self._typename(leaf_module) + r"\n|" + extra = "" + if hasattr(leaf_module, "__constants__"): + extra = r"\n".join( + [f"{c}: {getattr(leaf_module, c)}" for c in leaf_module.__constants__] # type: ignore[union-attr] + ) + label += extra + r"\n" + else: + label += f"|target={self._typename(node.target)}" + r"\n" + if len(node.args) > 0: + label += _get_str_for_args_kwargs(node.args) + if len(node.kwargs) > 0: + label += _get_str_for_args_kwargs(node.kwargs) + label += f"|num_users={len(node.users)}" + r"\n" + + tensor_meta = node.meta.get('tensor_meta') + label += self._tensor_meta_to_label(tensor_meta) + + return label + "}" + + def _tensor_meta_to_label(self, tm) -> str: + if tm is None: + return "" + elif isinstance(tm, TensorMetadata): + return self._stringify_tensor_meta(tm) + elif isinstance(tm, list): + result = "" + for item in tm: + result += self._tensor_meta_to_label(item) + return result + elif isinstance(tm, dict): + result = "" + for v in tm.values(): + result += self._tensor_meta_to_label(v) + return result + elif isinstance(tm, tuple): + result = "" + for item in tm: + result += self._tensor_meta_to_label(item) + return result + else: + raise RuntimeError(f"Unsupported tensor meta type {type(tm)}") + + def _stringify_tensor_meta(self, tm: TensorMetadata) -> str: + result = "" + if not hasattr(tm, "dtype"): + print("tm", tm) + result += "|" + "dtype" + "=" + str(tm.dtype) + r"\n" + result += "|" + "shape" + "=" + str(tuple(tm.shape)) + r"\n" + result += "|" + "requires_grad" + "=" + str(tm.requires_grad) + r"\n" + result += "|" + "stride" + "=" + str(tm.stride) + r"\n" + if tm.is_quantized: + assert tm.qparams is not None + assert "qscheme" in tm.qparams + qscheme = tm.qparams["qscheme"] + if qscheme in { + torch.per_tensor_affine, + torch.per_tensor_symmetric, + }: + result += "|" + "q_scale" + "=" + str(tm.qparams["scale"]) + r"\n" + result += "|" + "q_zero_point" + "=" + str(tm.qparams["zero_point"]) + r"\n" + elif qscheme in { + torch.per_channel_affine, + torch.per_channel_symmetric, + torch.per_channel_affine_float_qparams, + }: + result += "|" + "q_per_channel_scale" + "=" + str(tm.qparams["scale"]) + r"\n" + result += "|" + "q_per_channel_zero_point" + "=" + str(tm.qparams["zero_point"]) + r"\n" + result += "|" + "q_per_channel_axis" + "=" + str(tm.qparams["axis"]) + r"\n" + else: + raise RuntimeError(f"Unsupported qscheme: {qscheme}") + result += "|" + "qscheme" + "=" + str(tm.qparams["qscheme"]) + r"\n" + return result + + def _get_tensor_label(self, t: torch.Tensor) -> str: + return str(t.dtype) + str(list(t.shape)) + r"\n" + + def _to_dot( + self, + graph_module: torch.fx.GraphModule, + name: str, + ignore_getattr: bool, + ignore_parameters_and_buffers: bool, + skip_node_names_in_args: bool, + ) -> pydot.Dot: + """ + Actual interface to visualize a fx.Graph. Note that it takes in the GraphModule instead of the Graph. + If ignore_parameters_and_buffers is True, the parameters and buffers + created with the module will not be added as nodes and edges. + """ + dot_graph = pydot.Dot(name, rankdir="TB") + + for node in graph_module.graph.nodes: + if ignore_getattr and node.op == "get_attr": + continue + + style = self._get_node_style(node) + dot_node = pydot.Node( + node.name, label=self._get_node_label(graph_module, node, skip_node_names_in_args), **style + ) + dot_graph.add_node(dot_node) + + def get_module_params_or_buffers(): + for pname, ptensor in chain( + leaf_module.named_parameters(), leaf_module.named_buffers() + ): + pname1 = node.name + "." + pname + label1 = ( + pname1 + "|op_code=get_" + "parameter" + if isinstance(ptensor, torch.nn.Parameter) + else "buffer" + r"\l" + ) + dot_w_node = pydot.Node( + pname1, + label="{" + label1 + self._get_tensor_label(ptensor) + "}", + **_WEIGHT_TEMPLATE, + ) + dot_graph.add_node(dot_w_node) + dot_graph.add_edge(pydot.Edge(pname1, node.name)) + + if node.op == "call_module": + leaf_module = self._get_leaf_node(graph_module, node) + + if not ignore_parameters_and_buffers and not isinstance(leaf_module, torch.fx.GraphModule): + get_module_params_or_buffers() + + for node in graph_module.graph.nodes: + if ignore_getattr and node.op == "get_attr": + continue + + for user in node.users: + dot_graph.add_edge(pydot.Edge(node.name, user.name)) + + return dot_graph + +else: + if not TYPE_CHECKING: + @compatibility(is_backward_compatible=False) + class FxGraphDrawer: + def __init__(self, graph_module: torch.fx.GraphModule, name: str, ignore_getattr: bool = False): + raise RuntimeError('FXGraphDrawer requires the pydot package to be installed. Please install ' + 'pydot through your favorite Python package manager.') diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/infra/__init__.py b/llava_next/lib/python3.10/site-packages/torch/fx/passes/infra/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..657b6a93014f428eece18ec896136c81bc3949f3 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/fx/passes/infra/__init__.py @@ -0,0 +1,2 @@ + +from . import pass_manager diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/infra/__pycache__/__init__.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/fx/passes/infra/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6dba22b50ce305b4e1d3719157bc19013e6308d6 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/fx/passes/infra/__pycache__/__init__.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/infra/__pycache__/partitioner.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/fx/passes/infra/__pycache__/partitioner.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b3693ff1fdfb7e9ef2549854b6acfdaa26c760f Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/fx/passes/infra/__pycache__/partitioner.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/infra/__pycache__/pass_base.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/fx/passes/infra/__pycache__/pass_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ebe0f9849130392eb8f0ffc2c4a4b07f0edd55c8 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/fx/passes/infra/__pycache__/pass_base.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/infra/__pycache__/pass_manager.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/fx/passes/infra/__pycache__/pass_manager.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..07172c11d4794c55ee21806b2f65699c702c60f9 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/fx/passes/infra/__pycache__/pass_manager.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/infra/partitioner.py b/llava_next/lib/python3.10/site-packages/torch/fx/passes/infra/partitioner.py new file mode 100644 index 0000000000000000000000000000000000000000..7693f528af56e1f6fbd173d935cdb28df838c8d5 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/fx/passes/infra/partitioner.py @@ -0,0 +1,278 @@ +from typing import Dict, List, Set, Iterable, Sequence, Optional, Deque + +from torch.fx.passes.utils.fuser_utils import fuse_by_partitions + +from torch.fx.graph_module import GraphModule +from torch.fx.node import Node, _get_qualified_name +from torch.fx.passes.operator_support import OperatorSupportBase + +import logging +import itertools +from copy import copy +from collections import deque + +logger = logging.getLogger(__name__) +logger.setLevel(logging.WARNING) + +class Partition: + def __init__(self, id: Optional[int] = None, nodes: Optional[Iterable[Node]] = None): + self.id = id + self.nodes: Set[Node] = set(nodes) if nodes is not None else set() + + def __repr__(self) -> str: + return str(self.nodes) + + def add_node(self, node: Node): + self.nodes.add(node) + + def remove_node(self, node: Node): + self.nodes.remove(node) + + def size(self): + return len(self.nodes) + +class CapabilityBasedPartitioner: + + def __init__(self, + graph_module: GraphModule, + operator_support: OperatorSupportBase, + allows_single_node_partition: bool = False, + non_compute_ops: Optional[Sequence[str]] = None, + allowed_single_node_partition_ops: Optional[Sequence[str]] = None, + ) -> None: + self.graph_module = graph_module + self.operator_support = operator_support + self.allows_single_node_partition = allows_single_node_partition + self.non_compute_ops = non_compute_ops if non_compute_ops is not None else [] + self.allowed_single_node_partition_ops = ( + allowed_single_node_partition_ops + if allowed_single_node_partition_ops is not None + else [] + ) + + def __is_node_supported(self, node: Node) -> bool: + return ( + self.operator_support.is_node_supported(dict(self.graph_module.named_modules()), node) + ) + + def propose_partitions(self) -> List[Partition]: + # assumptions: nodes in candidate list is sorted in topological order + assignment: Dict[Node, int] = {} # mapping from node to partition_id + partitions_by_id: Dict[int, Partition] = {} # mapping from partition_id to partition + new_partition_id = itertools.count() + + # try to merge partition other_id into partition self_id + # merge only happens if the end graph doesn't contain cyclic dependency + # returns `True` when merge happens, `False` otherwise. + def maybe_merge_partition(self_id: int, other_id: int): + # merged_nodes is the union of nodes in two partition to-be-merged + merged_nodes = copy(partitions_by_id[self_id].nodes) + merged_nodes.update(partitions_by_id[other_id].nodes) + + # Note it's ok to use `set` here, since we are only query if a node + # has been visited. We are NEVER going to iterate on nodes inside + # the set. + visited: Set[Node] = set() + + def dfs_iter_find_cycle(root_node): + stack : Deque[Node] = deque() + stack.append(root_node) + + while stack: + node = stack.pop() + + if node in visited: + continue + if node in merged_nodes: + return True # found cycle, return + + # branching on hitting partition or not + if node in assignment: + # Since partition is not merged in the graph yet, when we + # hit a node in a partition through DFS, we need to + # traverse all nodes in the partition to properly reflect + # dependencies after the fusion + for p_node in partitions_by_id[assignment[node]].nodes: + for user_node in p_node.users: + if user_node not in partitions_by_id[assignment[node]].nodes: + stack.append(user_node) + else: + for user_node in node.users: + stack.append(user_node) + + visited.add(node) + + return False + + # check if merge would create cyclic dependency. + for node in merged_nodes: + for user_node in node.users: + if user_node not in merged_nodes and dfs_iter_find_cycle(user_node): + # return false indicating cyclic dependency found and + # merge is aborted + return False + + # no cyclic dependency found, move forward with the merge + # updating partition nodes + partitions_by_id[self_id].nodes = merged_nodes + # updating assignment map + for node in partitions_by_id[other_id].nodes: + assignment[node] = self_id + # delete other partition + del partitions_by_id[other_id] + + return True + + def merge_single_node(node: Node, id: Optional[int]): + if node in assignment: + partitions_by_id[assignment[node]].remove_node(node) + + if id is None: + assignment.pop(node) + elif id not in partitions_by_id: + assignment[node] = id + partitions_by_id[id] = Partition(id=id, nodes=[node]) + else: + assignment[node] = id + partitions_by_id[id].add_node(node) + + logger.debug("Proposing partitions...") + + for node in reversed(self.graph_module.graph.nodes): + # use Dict as an ordered set to ensure deterministic partitioning result, don't care value + merge_candidates: Dict[int, None] = {} + + # Note a limited horizontal fusion is enabled: + # when `node` is not supported, the code below attempts to fuse consumer of `node`. + # + # I don't see a need to add a knob to disable horizontal fusion yet, we can short-cut + # the fusion by adding an `else` block here to skip horizontal fusion. + if self.__is_node_supported(node) and node not in assignment: + partition_id = next(new_partition_id) + merge_single_node(node, partition_id) + merge_candidates[partition_id] = None + + # merge all possible partitions + for node in assignment: + merge_candidates[assignment[node]] = None + + merge_candidates_list = list(merge_candidates.keys()) + if len(merge_candidates_list) > 1: + self_id = merge_candidates_list[0] + for other_id in merge_candidates_list[1:]: + # note: merge partition `other_id` into partition `self_id` if + # it doesn't create cyclic dependency in the graph, otherwise, + # this is a no-op + maybe_merge_partition(self_id, other_id) + + # post processing to re-assign "getitem" nodes into upstream partition + logger.debug("Reassigning getitem nodes to its producer node's partition...") + nodes_reassignment: Dict[Node, int] = {} + for node in self.graph_module.graph.nodes: + is_tuple_output = True + for user in node.users: + if user.op != "call_function" or \ + _get_qualified_name(user.target) != "_operator.getitem": # type: ignore[arg-type] + is_tuple_output = False + break + + # node has tuple outputs, re-assign all following getitem node into node's partition + if is_tuple_output: + id = assignment.get(node, None) # type: ignore[arg-type] + for user in node.users: + if assignment.get(user, None) != id: # type: ignore[arg-type] + nodes_reassignment[user] = id # type: ignore[assignment] + for node, id in nodes_reassignment.items(): + merge_single_node(node, id) + + # filter out single node partitions + if not self.allows_single_node_partition: + logger.debug("Filtering out single node partitions...") + default_non_compute_ops = {"torch.ops.aten.view", "_operator.getitem"} + non_compute_ops = default_non_compute_ops.union(set(self.non_compute_ops)) + partitions_to_remove: List[int] = [] + for id, partition in partitions_by_id.items(): + compute_node_count = 0 + for node in partition.nodes: + if node.op == "call_function": + assert callable(node.target) + if _get_qualified_name(node.target) not in non_compute_ops: + compute_node_count += 1 + if _get_qualified_name(node.target) in self.allowed_single_node_partition_ops: + compute_node_count += 1 + if compute_node_count <= 1: + partitions_to_remove.append(id) + for id in partitions_to_remove: + del partitions_by_id[id] + + logger.debug("Partitions proposed:") + for id, partition in partitions_by_id.items(): + logger.debug("partition #%s: %s", id, [node.name for node in partition.nodes]) + + return list(partitions_by_id.values()) + + def fuse_partitions(self, partitions: List[Partition]) -> GraphModule: + logger.debug("Fusing partitions...") + # fuse_by_partitions expects partitions in List[List[Node]]: [ [node0, node1], [node2, node3] ] + return fuse_by_partitions(self.graph_module, [list(partition.nodes) for partition in partitions]) + + # remove non-compute-ops that sits at the boundary of a partition. + def remove_bookend_non_compute_ops(self, partitions: List[Partition]): + non_compute_ops = set(self.non_compute_ops) + + def is_non_compute_node(node: Node): + return node.op == "call_function" and \ + _get_qualified_name(node.target) in non_compute_ops # type: ignore[arg-type] + + # cache transparent nodes + transparent_input_nodes: Dict[Node, bool] = {} + transparent_output_nodes: Dict[Node, bool] = {} + + def is_transparent_input_node(node: Node, partition: Set[Node], removed_nodes: Set[Node]): + if node.op == "placeholder" or (node not in partition) or (node in removed_nodes): + return True + if node in transparent_input_nodes: + return transparent_input_nodes[node] + if is_non_compute_node(node): + for input_n in node.all_input_nodes: + if not is_transparent_input_node(input_n, partition, removed_nodes): + transparent_input_nodes[node] = False + return False + transparent_input_nodes[node] = True + return True + transparent_input_nodes[node] = False + return False + + def is_transparent_output_node(node: Node, partition: Set[Node], removed_nodes: Set[Node]): + if node.op == "placeholder" or (node not in partition) or (node in removed_nodes): + return True + if node in transparent_output_nodes: + return transparent_output_nodes[node] + if is_non_compute_node(node): + for output_n in node.users: + if not is_transparent_output_node(output_n, partition, removed_nodes): + transparent_output_nodes[node] = False + return False + transparent_output_nodes[node] = True + return True + transparent_output_nodes[node] = False + return False + + for partition in partitions: + # Note it's ok to use `set` here, since we are only query if a node + # has been removed. We are NEVER going to iterate on nodes inside + # the set. + remove_node: Set[Node] = set() + for node in partition.nodes: + if is_non_compute_node(node) and \ + (is_transparent_input_node(node, partition.nodes, remove_node) or + is_transparent_output_node(node, partition.nodes, remove_node)): + remove_node.add(node) + + if len(remove_node) != 0: + partition.nodes = partition.nodes - remove_node + + def partition_and_fuse(self) -> GraphModule: + partitions = self.propose_partitions() + fused_gm = self.fuse_partitions(partitions) + return fused_gm diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/infra/pass_base.py b/llava_next/lib/python3.10/site-packages/torch/fx/passes/infra/pass_base.py new file mode 100644 index 0000000000000000000000000000000000000000..dd699ea86cdecbe9f85af2b76b5503b3c8cbd0b5 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/fx/passes/infra/pass_base.py @@ -0,0 +1,75 @@ +import abc +from collections import namedtuple +from typing import Optional + +from torch.fx.graph_module import GraphModule +from torch.fx._compatibility import compatibility + + +__all__ = ['PassResult', 'PassBase'] + +@compatibility(is_backward_compatible=False) +class PassResult(namedtuple("PassResult", ["graph_module", "modified"])): + """ + Result of a pass: + graph_module: The modified graph module + modified: A flag for if the pass has modified the graph module + """ + def __new__(cls, graph_module, modified): + return super().__new__(cls, graph_module, modified) + +@compatibility(is_backward_compatible=False) +class PassBase(abc.ABC): + """ + Base interface for implementing passes. + + It is required to implement the `call` function so that we can directly + pass instances of the Pass directly to the PassManager and call them as a + function. + + We can directly pass an instance of a class implementing this interface into + the PassManager's `passes` attribute. + """ + + def __call__(self, graph_module: GraphModule) -> Optional[PassResult]: + """ + Runs the precondition check, the pass itself, and the postcondition check. + """ + + self.requires(graph_module) + res = self.call(graph_module) + self.ensures(graph_module) + return res + + @abc.abstractmethod + def call(self, graph_module: GraphModule) -> Optional[PassResult]: + """ + The pass that is run through the given graph module. To implement a + pass, it is required to implement this function. + + Args: + graph_module: The graph module we will run a pass on + """ + pass + + def requires(self, graph_module: GraphModule) -> None: # noqa: B027 + """ + This function will be called before the pass is run and will check that + the given graph module contains the preconditions needed to run the + pass. It is not required to implement this function. + + Args: + graph_module: The graph module we will run checks on + """ + pass + + def ensures(self, graph_module: GraphModule) -> None: # noqa: B027 + """ + This function will be called after the pass is run and will check that + the given graph module contains the postconditions needed to run the + pass. It is not required to implement this function. + + Args: + graph_module: The graph module we will run checks on + """ + pass diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/infra/pass_manager.py b/llava_next/lib/python3.10/site-packages/torch/fx/passes/infra/pass_manager.py new file mode 100644 index 0000000000000000000000000000000000000000..b9c50d0cfa2707c67d55311cec02f7ae65d414f7 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/fx/passes/infra/pass_manager.py @@ -0,0 +1,303 @@ +import inspect +import logging +from queue import Queue +from functools import wraps +from typing import Callable, Dict, List + +import torch.nn as nn +from torch.fx.graph_module import GraphModule +from torch.fx._compatibility import compatibility +from torch.fx.passes.infra.pass_base import PassResult + +logger = logging.getLogger(__name__) +logger.setLevel(logging.WARNING) + +__all__ = ['pass_result_wrapper', 'this_before_that_pass_constraint', 'PassManager'] + +@compatibility(is_backward_compatible=False) +def pass_result_wrapper(fn: Callable) -> Callable: + """ + Wrapper for passes which currently do not return a PassResult. + This wrapper makes them return a PassResult containing the modified object + and True for the "modified" flag. + + Args: + fn (Callable[Module, Any]) + + Returns: + wrapped_fn (Callable[Module, PassResult]) + """ + if fn is None: + return None + + @wraps(fn) + def wrapped_fn(gm): + res = fn(gm) + if res is None: + return PassResult(gm, True) + if isinstance(res, PassResult): + return res + elif isinstance(res, nn.Module): + return PassResult(res, True) + + if not inspect.isfunction(fn): + wrapped_fn.__name__ = type(fn).__name__ + + return wrapped_fn + +def _validate_pass_schedule_constraint( + constraint: Callable[[Callable, Callable], bool], passes: List[Callable] +) -> None: + for i, a in enumerate(passes): + for j, b in enumerate(passes[i + 1 :]): + if constraint(a, b): + continue + raise RuntimeError( + f"pass schedule constraint violated. Expected {a} before {b}" + f" but found {a} at index {i} and {b} at index{j} in pass" + f" list." + ) + +def _topological_sort_passes( + passes: List[Callable], constraints: List[Callable] +) -> List[Callable]: + """ + Args + passes: Passes that we are ordering + constraints: Constraints applied on these passes + + Returns + A sorted list of callables and a boolean of if a circular dependency + existed + """ + if len(constraints) == 0: + return passes + + # Contruct a graph mapping nodes to a list of their users + graph: Dict[Callable, List[Callable]] = {p : [] for p in passes} + indegree_map: Dict[Callable, int] = {p : 0 for p in passes} + candidates: Queue = Queue() + for a in passes: + for b in passes: + if a == b: + continue + + for constraint in constraints: + if not constraint(a, b): + graph[b].append(a) + indegree_map[a] += 1 + + if indegree_map[a] == 0: + candidates.put(a) + + visited: Dict[Callable, bool] = {p : False for p in passes} + sorted_passes: List[Callable] = [] + + while not candidates.empty(): + p = candidates.get() + sorted_passes.append(p) + visited[p] = True + + for n in graph[p]: + if not visited[n]: + indegree_map[n] -= 1 + if indegree_map[n] == 0: + candidates.put(n) + + # Check if there are unvisited nodes (aka cycles in the graph) + cycle_passes = list(filter(lambda p: indegree_map[p] != 0, indegree_map.keys())) + if len(cycle_passes) != 0: + error = f"Circular dependency detected within the following passes: {cycle_passes}" + raise RuntimeError(error) + + return sorted_passes + +@compatibility(is_backward_compatible=False) +def this_before_that_pass_constraint(this: Callable, that: Callable) -> Callable: + """ + Defines a partial order ('depends on' function) where `this` must occur + before `that`. + + For example, the following pass list and constraint list would be invalid. + ``` + passes = [pass_b, pass_a] + + constraints = [ + this_before_that_pass_constraint(pass_a, pass_b) + ] + ``` + + Args: + this (Callable): pass which should occur first + that (Callable): pass which should occur later + + Returns: + depends_on (Callable[[Object, Object], bool] + """ + + def depends_on(a: Callable, b: Callable): + if a == that and b == this: + return False + return True + + return depends_on + + +@compatibility(is_backward_compatible=False) +class PassManager: + """ + Construct a PassManager. + + Collects passes and constraints. This defines the pass schedule, manages + pass constraints and pass execution. + + Args: + passes (Optional[List[Callable]]): List of passes. A pass is a + callable which modifies an object and returns a PassResult + constraint (Optional[List[Callable]]): List of constraints. A + constraint is a callable which takes two passes (A, B) and returns + True if A depends on B and False otherwise. See implementation of + `this_before_that_pass_constraint` for example. + steps (int): Max number of times we run the passes (default = 1). + run_checks_after_each_pass (bool): Whether to run checks and linting + after each pass + suppress_check_failures (bool): Whether to raise errors when running + checks + """ + + passes: List[Callable[[nn.Module], PassResult]] + constraints: List[Callable[[Callable, Callable], bool]] + _validated: bool = False + steps: int = 1 + + def __init__( + self, + passes=None, + constraints=None, + steps=None, + run_checks_after_each_pass: bool = False, + suppress_check_failures: bool = False, + ): + self.passes = passes or [] + self.constraints = constraints or [] + if steps: + self.steps = steps + + self.run_checks_after_each_pass = run_checks_after_each_pass + self.suppress_check_failures = suppress_check_failures + + def add_pass(self, _pass: Callable): + """ + Adds a pass into the current list of passes. + """ + self.passes.append(_pass) + self._validated = False + + def add_constraint(self, constraint: Callable): + """ + Adds a constraint into the current list of constraints. + """ + self.constraints.append(constraint) + self._validated = False + + def validate_constraints(self): + """ + Validates that current pass schedule defined by `self.passes` is valid + according to all constraints in `self.constraints` + """ + if self._validated: + return + for constraint in self.constraints: + _validate_pass_schedule_constraint(constraint, self.passes) + self._validated = True + + def solve_constraints(self): + """ + Finds a valid traversal order based on the given constraints and orders + the passes based on this order. + + If a circular dependency exists between the constraints and steps = 1, + then we will raise an error because if steps != 1 this means that we + will re-run the passes, allowing for circular dependencies. + """ + self.passes = _topological_sort_passes(self.passes, self.constraints) + self._validated = True + + def add_checks(self, check: Callable) -> None: + """ + Adds a function which takes runs various checks on a given graph module. + This function is run before and after each pass if the + `run_checks_after_each_pass` flag is enabled. + """ + sig = inspect.signature(check) + + if len(list(sig.parameters.values())) != 1: + raise TypeError("PassManager check function should only take in one variable, a module") + + setattr(self, "check", check) # noqa: B010 + + def check(self, module: nn.Module) -> None: + pass + + def __call__(self, module: nn.Module) -> PassResult: + """ + Runs a list of passes in the order based on `self.passes` on the given + graph module. Each time a pass is run, checks and linting will be run on + the graph module if `run_checks_after_each_pass` is set. + + If the module is a graph module, we will run the list of passes until + the graph stops changing, or until `steps` number of times. + """ + # Order the passes based on the constraints + if not self._validated: + self.solve_constraints() + + # Check graph invariants + self.check(module) + + # Run the set of passes `steps` number of times or until the graph stops + # changing + overall_modified = False + for _ in range(self.steps): + modified = False + + # Run the set of passes on the graph module + for i, fn in enumerate(self.passes): + fn_name = fn.__name__ if inspect.isfunction(fn) else type(fn).__name__ + logger.debug("Running pass '%s'", fn_name) + + try: + res = fn(module) + + if not isinstance(res, PassResult) and not hasattr( + res, "graph_module" + ): + raise TypeError( + f"The result of the pass {fn_name} should be type PassResult." + + "Please wrap it with pass_result_wrapper()" + ) + module = res.graph_module + modified = modified or res.modified + + if isinstance(module, GraphModule): + logger.debug("Graph after pass '%s': %s", fn_name, module.graph) + module.recompile() + + # Check graph invariants + if self.run_checks_after_each_pass: + self.check(module) + + except Exception as e: + prev_pass_names = [ + p.__name__ if inspect.isfunction(p) else type(p).__name__ + for p in self.passes[:i] + ] + msg = f"An error occurred when running the '{fn_name}' pass after the following passes: {prev_pass_names}" + raise Exception(msg) from e + + # If the graph no longer changes, then we can stop running these passes + overall_modified = overall_modified or modified + if not modified: + break + + return PassResult(module, overall_modified) diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/operator_support.py b/llava_next/lib/python3.10/site-packages/torch/fx/passes/operator_support.py new file mode 100644 index 0000000000000000000000000000000000000000..ed2cca91da8d765b50467fcfef638c485d3c823a --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/fx/passes/operator_support.py @@ -0,0 +1,220 @@ +import abc +import typing as t + +import torch +import torch.fx +from torch.fx._compatibility import compatibility +from .shape_prop import TensorMetadata +from .tools_common import get_node_target, CALLABLE_NODE_OPS + + +__all__ = ['OperatorSupportBase', 'OperatorSupport', 'create_op_support', 'chain', 'OpSupports', 'any_chain'] + +# fx.Node.target typename, as returned by `get_node_target()` +TargetTypeName = str + +# Arguments' dtypes for a given node, see `OperatorSupport` +SupportedArgumentDTypes = t.Optional[ + t.Tuple[ + t.Sequence[t.Sequence[torch.dtype]], + t.Dict[str, t.Sequence[torch.dtype]], + ] +] + +SupportDict = t.Mapping[TargetTypeName, SupportedArgumentDTypes] + + +@compatibility(is_backward_compatible=False) +class OperatorSupportBase(abc.ABC): + """Interface for determining if a fx.Node is supported by a backend""" + @abc.abstractmethod + def is_node_supported( + self, submodules: t.Mapping[str, torch.nn.Module], node: torch.fx.Node + ) -> bool: + raise NotImplementedError() + + +@compatibility(is_backward_compatible=False) +class OperatorSupport(OperatorSupportBase): + """ + `_support_dict` maps node.target typename to supported inputs dtypes. + + node.target typename is retrieved using helper function `get_node_target()` + + If supported inputs dtypes is None, it means any dtype is supported, else + we should see a tuple like (([dtypes], ...), {"name":[dtypes], ...}). + + The first tuple ([dtypes], ...) indicates what dtypes are supported for + inputs in node.args and the second dict {"name": [dtypes], ...} indicates + what dtypes are supported for inputs in node.kwargs. + + For inputs in args, if we don't want to check it, we can put None there, + e.g. (None, [torch.float]) indicates that we don't care about the type of + the first input in args. And for inputs in kwargs, if not listed, will not + be checked. + """ + + _support_dict: SupportDict + + def __init__( + self, + support_dict: t.Optional[SupportDict] = None + ): + self._support_dict = support_dict or {} + + def is_node_supported( + self, submodules: t.Mapping[str, torch.nn.Module], node: torch.fx.Node + ) -> bool: + """ + Args: + `submodules`: mapping from module name to the module. This can be + retrieved by calling model.named_modules(). + + `node`: a Fx node that we want to determine whether it's supported. + + Returns: + `is_supported`: whether the arg `node` is supported. + """ + if node.op not in CALLABLE_NODE_OPS: + return True + + target = get_node_target(submodules, node) + + # Target not found in _support_dict meaning that we don't support this op at all + if target not in self._support_dict: + return False + + # The rule for target is None meaning that we accept any dtype + if self._support_dict[target] is None: + return True + + args_dtypes, kwargs_dtypes = self._support_dict[target] # type: ignore[misc] + + # Check args dtypes + for i, dtypes in enumerate(args_dtypes): + if len(node.args) <= i: + break + + # None indicates we don't care about the dtype of args[i] + if dtypes is None: + continue + + # If arg is not a node then we don't check it + if not isinstance(node.args[i], torch.fx.Node): + continue + + arg_dtype = _get_arg_dtype(node.args[i]) # type: ignore[arg-type] + if arg_dtype not in dtypes: + return False + + # Check kwargs dtypes + for k, dtypes in kwargs_dtypes.items(): + if k not in node.kwargs: + continue + + # If arg is not a node then we don't check it + if not isinstance(node.kwargs[k], torch.fx.Node): + continue + + kwarg_dtype = _get_arg_dtype(node.kwargs[k]) # type: ignore[arg-type] + if kwarg_dtype not in dtypes: + return False + + return True + + +# ====================================================================== +# Functional interfaces and utils for defining basic operator support logic +# and composing them into more complex ones +# ====================================================================== + +IsNodeSupported = t.Callable[[t.Mapping[str, torch.nn.Module], torch.fx.Node], bool] + + +@compatibility(is_backward_compatible=False) +def create_op_support(is_node_supported: IsNodeSupported) -> OperatorSupportBase: + """Wraps a `IsNodeSupported` function into an `OperatorSupportBase` instance + + `IsNodeSupported` has the same call signature as + `OperatorSupportBase.is_node_supported` + """ + class FunctionalOperatorSupport(OperatorSupportBase): + def is_node_supported( + self, submodules: t.Mapping[str, torch.nn.Module], node: torch.fx.Node + ) -> bool: + return is_node_supported(submodules, node) + return FunctionalOperatorSupport() + + +@compatibility(is_backward_compatible=False) +def chain(*op_support: OperatorSupportBase) -> OperatorSupportBase: + """Combines a sequence of `OperatorSupportBase` instances to form a single `OperatorSupportBase` + instance by evaluating each input `OperatorSupportBase` instance, and returns False if + any of it reports False. + """ + def _chain(submods, node) -> bool: + return all( + x.is_node_supported(submods, node) + for x in op_support + ) + return create_op_support(_chain) + + +@compatibility(is_backward_compatible=False) +def any_chain(*op_support: OperatorSupportBase) -> OperatorSupportBase: + """Combines a sequence of `OperatorSupportBase` instances to form a single `OperatorSupportBase` + instance by evaluating each input `OperatorSupportBase` instance, and returns True if + any of it reports True. + """ + def _any_chain(submods, node) -> bool: + return any( + x.is_node_supported(submods, node) + for x in op_support + ) + return create_op_support(_any_chain) + + +@compatibility(is_backward_compatible=False) +class OpSupports: + """A set of atomic `OperatorSupportBase` instances that can be combined together + to form more complex operator support logic. + """ + @classmethod + def decline_if_input_dtype(cls, dtype: torch.dtype) -> OperatorSupportBase: + """Report a node as non-supported, if any of its arguments is of dtype""" + + def _decline_if_input_dtype( + submodules: t.Mapping[str, torch.nn.Module], + node: torch.fx.Node, + ) -> bool: + for arg in node.all_input_nodes: + # escape dtype check for get_attr node + if arg.op == "get_attr": + continue + arg_dtype = _get_arg_dtype(arg) + if arg_dtype == dtype: + return False + return True + return create_op_support(_decline_if_input_dtype) + + @classmethod + def decline_if_node_in_names(cls, disallow_set: t.Set[str]) -> OperatorSupportBase: + """ + If a node has a name that is in the disallow set, reported it as non-supported. + """ + def _decline_if_node_in_names( + submodules: t.Mapping[str, torch.nn.Module], + node: torch.fx.Node, + ) -> bool: + if node.name in disallow_set: + return False + else: + return True + return create_op_support(_decline_if_node_in_names) + + +def _get_arg_dtype(arg: torch.fx.Node) -> t.Any: + assert isinstance(arg, torch.fx.Node) + tensor_meta = arg.meta.get("tensor_meta") # type: ignore[union-attr] + dtype = tensor_meta.dtype if isinstance(tensor_meta, TensorMetadata) else arg.meta["type"] + return dtype diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/param_fetch.py b/llava_next/lib/python3.10/site-packages/torch/fx/passes/param_fetch.py new file mode 100644 index 0000000000000000000000000000000000000000..5979e29fcc6b2650a1f73be4845e2ad3dcda0920 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/fx/passes/param_fetch.py @@ -0,0 +1,66 @@ +from torch.fx.graph_module import GraphModule +from typing import Any, Callable, Dict, List, Tuple, Type +import torch +import torch.nn as nn + +from torch.fx._compatibility import compatibility + +__all__ = ['default_matching', 'extract_attrs_for_lowering', 'lift_lowering_attrs_to_nodes'] + +# Matching method matches the attribute name of current version to the attribute name of `target_version` +@compatibility(is_backward_compatible=False) +def default_matching(name: str, target_version: int) -> str: + """Default matching method + """ + return name + +# This dict maps the nn.Module class name to the attribute name list that we want to fetch for lowering. +# The first integer in the tuple is the version number of the nn.Module class when we create the parameter list. +# If there's a version mismatch then it means the parameter names in the book might be mismatched with nn.Module. +module_fetch_book: Dict[Type, Tuple[int, List[str], Callable[[str, int], str]]] = { + torch.nn.modules.linear.Linear: (1, ["weight", "bias"], default_matching), + torch.nn.modules.conv.Conv2d: ( + 1, ["weight", "bias", "kernel_size", "stride", "padding", "dilation", "groups", "padding_mode"], default_matching + ), + torch.nn.modules.batchnorm.BatchNorm2d: (2, ["weight", "bias", "running_mean", "running_var", "eps"], default_matching), + torch.nn.modules.pooling.AdaptiveAvgPool2d: (1, [], default_matching), + torch.nn.modules.pooling.MaxPool2d: ( + 1, ["kernel_size", "stride", "padding", "dilation", "return_indices", "ceil_mode"], default_matching + ), + torch.nn.modules.activation.ReLU: (1, ["inplace"], default_matching), +} + +@compatibility(is_backward_compatible=False) +def extract_attrs_for_lowering(mod: nn.Module) -> Dict[str, Any]: + """If `mod` is in `module_fetch_book`, fetch the mod's attributes that in the `module_fetch_book` + after checking module's version is compatible with the `module_fetch_book`. + """ + attrs_for_lowering: Dict[str, Any] = {} + attrs_for_lowering["name"] = torch.typename(mod) + + if type(mod) in module_fetch_book: + version, param_to_fetch, matching_method = module_fetch_book[type(mod)] + if version < mod._version: + raise RuntimeError(f"Fetcher version {version} try to fetch {torch.typename(mod)} version {mod._version}, " + "please upgrade the module_fetch_book, open an issue and @842974287 " + "or report a bug to AIACC team directly.") + for attr in param_to_fetch: + attrs_for_lowering[attr] = getattr(mod, matching_method(attr, mod._version)) + else: + raise RuntimeError(f"{torch.typename(mod)} is not in the module_fetch_book yet, " + "please add it to the module_fetch_book, open an issue and @842974287 " + "or report a bug to AIACC team directly.") + return attrs_for_lowering + +@compatibility(is_backward_compatible=False) +def lift_lowering_attrs_to_nodes(fx_module: GraphModule) -> None: + """Recursively traverse all `fx_module` nodes and fetch the module's attributes if the node is a leaf module. + """ + submodules = dict(fx_module.named_modules()) + + for node in fx_module.graph.nodes: + if node.op == "call_module": + if isinstance(submodules[node.target], GraphModule): + lift_lowering_attrs_to_nodes(submodules[node.target]) + else: + node.attrs_for_lowering = extract_attrs_for_lowering(submodules[node.target]) diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/pass_manager.py b/llava_next/lib/python3.10/site-packages/torch/fx/passes/pass_manager.py new file mode 100644 index 0000000000000000000000000000000000000000..37c31fdff19b6ccd693176e9d29e95a8695e14d1 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/fx/passes/pass_manager.py @@ -0,0 +1,247 @@ +from functools import wraps +from inspect import unwrap +from typing import Callable, List, Optional +import logging + +logger = logging.getLogger(__name__) + +__all__ = [ + "PassManager", + "inplace_wrapper", + "log_hook", + "loop_pass", + "this_before_that_pass_constraint", + "these_before_those_pass_constraint", +] + +# for callables which modify object inplace and return something other than +# the object on which they act +def inplace_wrapper(fn: Callable) -> Callable: + """ + Convenience wrapper for passes which modify an object inplace. This + wrapper makes them return the modified object instead. + + Args: + fn (Callable[Object, Any]) + + Returns: + wrapped_fn (Callable[Object, Object]) + """ + + @wraps(fn) + def wrapped_fn(gm): + val = fn(gm) + return gm + + return wrapped_fn + +def log_hook(fn: Callable, level=logging.INFO) -> Callable: + """ + Logs callable output. + + This is useful for logging output of passes. Note inplace_wrapper replaces + the pass output with the modified object. If we want to log the original + output, apply this wrapper before inplace_wrapper. + + + ``` + def my_pass(d: Dict) -> bool: + changed = False + if 'foo' in d: + d['foo'] = 'bar' + changed = True + return changed + + pm = PassManager( + passes=[ + inplace_wrapper(log_hook(my_pass)) + ] + ) + ``` + + Args: + fn (Callable[Type1, Type2]) + level: logging level (e.g. logging.INFO) + + Returns: + wrapped_fn (Callable[Type1, Type2]) + """ + @wraps(fn) + def wrapped_fn(gm): + val = fn(gm) + logger.log(level, "Ran pass %s\t Return value: %s", fn, val) + return val + + return wrapped_fn + + + +def loop_pass(base_pass: Callable, n_iter: Optional[int] = None, predicate: Optional[Callable] = None): + """ + Convenience wrapper for passes which need to be applied multiple times. + + Exactly one of `n_iter`or `predicate` must be specified. + + Args: + base_pass (Callable[Object, Object]): pass to be applied in loop + n_iter (int, optional): number of times to loop pass + predicate (Callable[Object, bool], optional): + + """ + assert (n_iter is not None) ^ ( + predicate is not None + ), "Exactly one of `n_iter`or `predicate` must be specified." + + @wraps(base_pass) + def new_pass(source): + output = source + if n_iter is not None and n_iter > 0: + for _ in range(n_iter): + output = base_pass(output) + elif predicate is not None: + while predicate(output): + output = base_pass(output) + else: + raise RuntimeError( + f"loop_pass must be given positive int n_iter (given " + f"{n_iter}) xor predicate (given {predicate})" + ) + return output + + return new_pass + + +# Pass Schedule Constraints: +# +# Implemented as 'depends on' operators. A constraint is satisfied iff a list +# has a valid partial ordering according to this comparison operator. +def _validate_pass_schedule_constraint( + constraint: Callable[[Callable, Callable], bool], passes: List[Callable] +): + for i, a in enumerate(passes): + for j, b in enumerate(passes[i + 1 :]): + if constraint(a, b): + continue + raise RuntimeError( + f"pass schedule constraint violated. Expected {a} before {b}" + f" but found {a} at index {i} and {b} at index{j} in pass" + f" list." + ) + + +def this_before_that_pass_constraint(this: Callable, that: Callable): + """ + Defines a partial order ('depends on' function) where `this` must occur + before `that`. + """ + + def depends_on(a: Callable, b: Callable): + if a == that and b == this: + return False + return True + + return depends_on + + +def these_before_those_pass_constraint(these: Callable, those: Callable): + """ + Defines a partial order ('depends on' function) where `these` must occur + before `those`. Where the inputs are 'unwrapped' before comparison. + + For example, the following pass list and constraint list would be invalid. + ``` + passes = [ + loop_pass(pass_b, 3), + loop_pass(pass_a, 5), + ] + + constraints = [ + these_before_those_pass_constraint(pass_a, pass_b) + ] + ``` + + Args: + these (Callable): pass which should occur first + those (Callable): pass which should occur later + + Returns: + depends_on (Callable[[Object, Object], bool] + """ + + def depends_on(a: Callable, b: Callable): + if unwrap(a) == those and unwrap(b) == these: + return False + return True + + return depends_on + + +class PassManager: + """ + Construct a PassManager. + + Collects passes and constraints. This defines the pass schedule, manages + pass constraints and pass execution. + + Args: + passes (Optional[List[Callable]]): list of passes. A pass is a + callable which modifies an object and returns modified object + constraint (Optional[List[Callable]]): list of constraints. A + constraint is a callable which takes two passes (A, B) and returns + True if A depends on B and False otherwise. See implementation of + `this_before_that_pass_constraint` for example. + """ + + passes: List[Callable] + constraints: List[Callable] + _validated: bool = False + + def __init__( + self, + passes=None, + constraints=None, + ): + self.passes = passes or [] + self.constraints = constraints or [] + + @classmethod + def build_from_passlist(cls, passes): + pm = PassManager(passes) + # TODO(alexbeloi): add constraint management/validation + return pm + + def add_pass(self, _pass: Callable): + self.passes.append(_pass) + self._validated = False + + def add_constraint(self, constraint): + self.constraints.append(constraint) + self._validated = False + + def remove_pass(self, _passes: List[Callable]): + if _passes is None: + return + passes_left = [] + for ps in self.passes: + if ps.__name__ not in _passes: + passes_left.append(ps) + self.passes = passes_left + self._validated = False + + def validate(self): + """ + Validates that current pass schedule defined by `self.passes` is valid + according to all constraints in `self.constraints` + """ + if self._validated: + return + for constraint in self.constraints: + _validate_pass_schedule_constraint(constraint, self.passes) + self._validated = True + + def __call__(self, source): + self.validate() + out = source + for _pass in self.passes: + out = _pass(out) + return out diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/shape_prop.py b/llava_next/lib/python3.10/site-packages/torch/fx/passes/shape_prop.py new file mode 100644 index 0000000000000000000000000000000000000000..69260a5316699f8beddf27a2c359e90df32621db --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/fx/passes/shape_prop.py @@ -0,0 +1,193 @@ +import torch +import torch.fx +import traceback + +from torch._dispatch.python import enable_python_dispatcher +from torch.fx.node import Node, map_aggregate +from typing import Any, Tuple, NamedTuple, Optional, Dict +from torch.fx._compatibility import compatibility +from torch._guards import detect_fake_mode + +__all__ = ['TensorMetadata', 'ShapeProp'] + +@compatibility(is_backward_compatible=True) +class TensorMetadata(NamedTuple): + # TensorMetadata is a structure containing pertinent information + # about a tensor within a PyTorch program. + + # General Tensor metadata + shape : torch.Size + dtype : torch.dtype + requires_grad : bool + stride : Tuple[int, ...] + memory_format : Optional[torch.memory_format] + + # Quantization metadata + is_quantized : bool + qparams: Dict[str, Any] + +def _extract_tensor_metadata(result : torch.Tensor) -> TensorMetadata: + """ + Extract a TensorMetadata NamedTuple describing `result`. + """ + shape = result.shape + dtype = result.dtype + requires_grad = result.requires_grad + stride = result.stride() + + memory_formats = { + torch.contiguous_format, + torch.channels_last, + torch.channels_last_3d, + } + + memory_format = None + + for query_format in memory_formats: + if result.is_contiguous(memory_format=query_format): + memory_format = query_format + break + + is_quantized = result.is_quantized + qparams: Dict[str, Any] = {} + if is_quantized: + qscheme = result.qscheme() + qparams["qscheme"] = qscheme + if qscheme in {torch.per_tensor_affine, torch.per_tensor_symmetric}: + qparams["scale"] = result.q_scale() # type: ignore[assignment] + qparams["zero_point"] = result.q_zero_point() # type: ignore[assignment] + elif qscheme in {torch.per_channel_affine, torch.per_channel_affine_float_qparams, torch.per_channel_symmetric}: + # In this branch, scale and zero_point are expected to be tensors, + # we store the values as immutable_list in TensorMetadata for + # easier serialization downstream + qparams["scale"] = result.q_per_channel_scales().tolist() # type: ignore[assignment] + qparams["zero_point"] = result.q_per_channel_zero_points().tolist() # type: ignore[assignment] + qparams["axis"] = result.q_per_channel_axis() # type: ignore[assignment] + + return TensorMetadata( + shape, dtype, requires_grad, stride, memory_format, is_quantized, qparams) + +@compatibility(is_backward_compatible=True) +class ShapeProp(torch.fx.Interpreter): + """ + Execute an FX graph Node-by-Node and + record the shape and type of the result + into the corresponding node. + + Example: + In this example, we record the shape + and data type of a module given + an example input ``torch.randn(50, D_in)``. + We print the name, shape and dtype of each node. + + class TwoLayerNet(torch.nn.Module): + def __init__(self, D_in, H, D_out): + super().__init__() + self.linear1 = torch.nn.Linear(D_in, H) + self.linear2 = torch.nn.Linear(H, D_out) + def forward(self, x): + h_relu = self.linear1(x).clamp(min=0) + y_pred = self.linear2(h_relu) + return y_pred + N, D_in, H, D_out = 64, 1000, 100, 10 + x = torch.randn(N, D_in) + y = torch.randn(N, D_out) + model = TwoLayerNet(D_in, H, D_out) + gm = torch.fx.symbolic_trace(model) + sample_input = torch.randn(50, D_in) + ShapeProp(gm).propagate(sample_input) + + for node in gm.graph.nodes: + print(node.name, node.meta['tensor_meta'].dtype, + node.meta['tensor_meta'].shape) + + The output of this code is: + + x torch.float32 torch.Size([50, 1000]) + linear1 torch.float32 torch.Size([50, 100]) + clamp_1 torch.float32 torch.Size([50, 100]) + linear2 torch.float32 torch.Size([50, 10]) + output torch.float32 torch.Size([50, 10]) + + Args: + module (GraphModule): The module to be executed + fake_mode (FakeTensorMode): A fake mode for copying the gm + + """ + def __init__(self, gm, fake_mode=None): + super().__init__(gm) + if fake_mode is None: + fake_mode = detect_fake_mode() + if fake_mode is not None: + from torch._dynamo.utils import deepcopy_to_fake_tensor + # Note: + # We need fake execution cause the inputs are fake, however, we cannot fakify the module + # - because we need to write to the tensor_meta of the real module. So we fakify to + # produce a result (L131 below), to extract tensor meta, and then keep going. + # + # If we were to fakify, we would write to the wrong node, and then downstream fusion + # would be missing the tensor_meta. + # + # See torch/_inductor/overrides.py for where this is called upstream of fusion. + self.fake_module = deepcopy_to_fake_tensor(self.module, fake_mode) + self.fake_mode = fake_mode + else: + self.fake_module = None + self.fake_mode = None + + self.real_module = self.module + + def run_node(self, n : Node) -> Any: + try: + if self.fake_module is not None: + # Hacky swap. Alternatively, we could do this with overriding + # call_module and get_attr. + self.module = self.fake_module + try: + if self.fake_mode is not None: + with self.fake_mode, enable_python_dispatcher(): + result = super().run_node(n) + else: + result = super().run_node(n) + finally: + self.module = self.real_module + except Exception as e: + traceback.print_exc() + raise RuntimeError( + f"ShapeProp error for: node={n.format_node()} with " + f"meta={n.meta}" + ) from e + + found_tensor = False + + def extract_tensor_meta(obj): + if isinstance(obj, torch.Tensor): + nonlocal found_tensor + found_tensor = True + return _extract_tensor_metadata(obj) + else: + return obj + + meta = map_aggregate(result, extract_tensor_meta) + if found_tensor: + n.meta['tensor_meta'] = meta + + n.meta['type'] = type(result) + return result + + def propagate(self, *args): + """ + Run `module` via interpretation and return the result and + record the shape and type of each node. + + Args: + *args (Tensor): the sample input. + + Returns: + Any: The value returned from executing the Module + """ + if self.fake_mode is not None: + fake_args = [self.fake_mode.from_tensor(t) if isinstance(t, torch.Tensor) else t for t in args] + else: + fake_args = args + return super().run(*fake_args) diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/split_module.py b/llava_next/lib/python3.10/site-packages/torch/fx/passes/split_module.py new file mode 100644 index 0000000000000000000000000000000000000000..7458c932555fa9724b03ed404f931aec3875a88a --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/fx/passes/split_module.py @@ -0,0 +1,370 @@ +import inspect +from typing import Any, Callable, Dict, List, Optional + +import torch +from torch.fx._compatibility import compatibility +from torch.fx.graph_module import GraphModule + +__all__ = ["Partition", "split_module"] + + +@compatibility(is_backward_compatible=True) +class Partition: + def __init__(self, name: str): + self.name: str = name + self.submod_name = f"submod_{name}" + self.node_names: List[str] = [] + self.inputs: Dict[str, None] = {} + self.outputs: Dict[str, None] = {} + self.partitions_dependent_on: Dict[str, None] = {} + self.partition_dependents: Dict[str, None] = {} + self.graph: torch.fx.graph.Graph = torch.fx.graph.Graph() + self.environment: Dict[torch.fx.node.Node, torch.fx.node.Node] = {} + self.targets: Dict[str, Any] = {} + + def __repr__(self) -> str: + return ( + f"name: {self.name},\n" + f" nodes: {self.node_names},\n" + f" inputs: {self.inputs},\n" + f" outputs: {self.outputs},\n" + f" partitions dependent on: {self.partitions_dependent_on},\n" + f" partition dependents: {self.partition_dependents}" + ) + + +# Creates subgraphs out of main graph +@compatibility(is_backward_compatible=True) +def split_module( + m: GraphModule, + root_m: torch.nn.Module, + split_callback: Callable[[torch.fx.node.Node], int], + qualname_map: Optional[Dict[str, str]] = None, + keep_original_order: Optional[bool] = False, +): + """ + Creates subgraphs out of main graph + + Args: + m (GraphModule): Graph module to split + root_m (torch.nn.Module): root nn module. Not currently used. Included + because the root nn module is usually transformed via + torch.fx._symbolic_trace.symbolic_trace (see example below) + split_callback (Callable[[torch.fx.node.Node], int]): Callable function + that maps a given Node instance to a numeric partition identifier. + split_module will use this function as the policy for which operations + appear in which partitions in the output Module. + qualname_map: Optional[Dict[str, str]]: optional output parameter that returns a + mapping from new target names in the module after split to old target + names in the original module. + keep_original_order: Optional[bool]: keep the original order of the GraphModule + or use the Topological order of the new constructed GraphModule + + + Returns: + GraphModule: the module after split. + + Example: + + This is a sample setup: + + import torch + from torch.fx.symbolic_trace import symbolic_trace + from torch.fx.graph_module import GraphModule + from torch.fx.node import Node + from torch.fx.passes.split_module import split_module + + class MyModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.param = torch.nn.Parameter(torch.rand(3, 4)) + self.linear = torch.nn.Linear(4, 5) + + def forward(self, x, y): + z = self.linear(x + self.param).clamp(min=0.0, max=1.0) + w = self.linear(y).clamp(min=0.0, max=1.0) + return z + w + + # symbolically trace model + my_module = MyModule() + my_module_traced = symbolic_trace(my_module) + + # random mod partitioning + partition_counter = 0 + NPARTITIONS = 3 + + def mod_partition(node: Node): + global partition_counter + partition = partition_counter % NPARTITIONS + partition_counter = (partition_counter + 1) % NPARTITIONS + return partition + + # split module in module with submodules + module_with_submodules = split_module( + my_module_traced, my_module, mod_partition + ) + + Output looks like this. Original graph is broken into partitions + + > print(module_with_submodules) + GraphModule( + (submod_0): GraphModule( + (linear): Linear(in_features=4, out_features=5, bias=True) + ) + (submod_1): GraphModule( + (linear): Linear(in_features=4, out_features=5, bias=True) + ) + (submod_2): GraphModule() + ) + + def forward(self, x, y): + param = self.param + submod_0 = self.submod_0(x, param, y); x = param = y = None + getitem = submod_0[0] + getitem_1 = submod_0[1]; submod_0 = None + submod_1 = self.submod_1(getitem, getitem_1); getitem = getitem_1 = None + getitem_2 = submod_1[0] + getitem_3 = submod_1[1]; submod_1 = None + submod_2 = self.submod_2(getitem_2, getitem_3); getitem_2 = getitem_3 = None + return submod_2 + + Output of split module is the same as output of input traced module. + This is an example within a test setting: + + > orig_out = my_module_traced(x, y) + > submodules_out = module_with_submodules(x, y) + > self.assertEqual(orig_out, submodules_out) + True + """ + + def construct_graph( + node: torch.fx.node.Node, + base_mod_env: Dict[str, torch.fx.node.Node], + base_mod_attrs: Dict[str, torch.fx.graph_module.GraphModule], + ): + if node.op == "placeholder": + default_value = ( + node.args[0] if len(node.args) > 0 else inspect.Signature.empty + ) + base_mod_env[node.name] = base_mod_graph.placeholder( + node.target, type_expr=node.type, default_value=default_value + ) + base_mod_env[node.name].meta = node.meta.copy() + elif node.op == "get_attr": + base_mod_env[node.name] = base_mod_graph.get_attr(node.target) + base_mod_env[node.name].meta = node.meta.copy() + attr_val = m + for atom in node.target.split("."): # type: ignore[union-attr] + if not hasattr(attr_val, atom): + raise AttributeError(f"Node target {node.target} not found!") + attr_val = getattr(attr_val, atom) + base_mod_attrs[node.target] = attr_val # type: ignore[index] + return base_mod_env, base_mod_attrs + + partitions: Dict[str, Partition] = {} + orig_nodes: Dict[str, torch.fx.node.Node] = {} + + def record_cross_partition_use( + def_node: torch.fx.node.Node, use_node: Optional[torch.fx.node.Node] + ): # noqa: B950 + def_partition_name = getattr(def_node, "_fx_partition", None) + use_partition_name = getattr(use_node, "_fx_partition", None) + if def_partition_name != use_partition_name: + if def_partition_name is not None: + def_partition = partitions[def_partition_name] + def_partition.outputs.setdefault(def_node.name) + if use_partition_name is not None: + def_partition.partition_dependents.setdefault(use_partition_name) + + if use_partition_name is not None: + use_partition = partitions[use_partition_name] + use_partition.inputs.setdefault(def_node.name) + if def_partition_name is not None: + use_partition.partitions_dependent_on.setdefault(def_partition_name) + + # split nodes into partitions + for node in m.graph.nodes: + orig_nodes[node.name] = node + + # TODO currently placeholders/parameters aren't put into random partitions, + # rather they're added to the graphs where they are used down below + if node.op in ["placeholder", "get_attr"]: + continue + if node.op == "output": + torch.fx.graph.map_arg( + node.args[0], lambda n: record_cross_partition_use(n, None) + ) + continue + partition_name = str(split_callback(node)) + + # add node to partitions + partition = partitions.get(partition_name) + if partition is None: + partitions[partition_name] = partition = Partition(partition_name) + + partition.node_names.append(node.name) + node._fx_partition = partition_name + + torch.fx.graph.map_arg( + node.args, lambda def_node: record_cross_partition_use(def_node, node) + ) + torch.fx.graph.map_arg( + node.kwargs, lambda def_node: record_cross_partition_use(def_node, node) + ) # noqa: B950 + + original_partition_order = list(partitions.keys()) + # find partitions with no dependencies + root_partitions: List[str] = [] + for partition_name, partition in partitions.items(): + if not len(partition.partitions_dependent_on): + root_partitions.append(partition_name) + + # check partitions for circular dependencies and create topological partition ordering + sorted_partitions: List[str] = [] + while root_partitions: + root_partition = root_partitions.pop() + sorted_partitions.append(root_partition) + for dependent in partitions[root_partition].partition_dependents: + partitions[dependent].partitions_dependent_on.pop(root_partition) + if not partitions[dependent].partitions_dependent_on: + root_partitions.append(dependent) + if len(sorted_partitions) != len(partitions): + raise RuntimeError("cycle exists between partitions!") + + # add placeholders to partitions + for partition_name in sorted_partitions: + partition = partitions[partition_name] + for input in partition.inputs: + placeholder = partition.graph.placeholder( + input, + type_expr=orig_nodes[input].type, + ) + placeholder.meta = orig_nodes[input].meta.copy() + partition.environment[orig_nodes[input]] = placeholder + + # Transform nodes and collect targets for partition's submodule + for node in m.graph.nodes: + if hasattr(node, "_fx_partition"): + partition = partitions[node._fx_partition] + + # swap out old graph nodes in kw/args with references to new nodes in this submodule + environment = partition.environment + gathered_args = torch.fx.graph.map_arg(node.args, lambda n: environment[n]) + gathered_kwargs = torch.fx.graph.map_arg( + node.kwargs, lambda n: environment[n] + ) + + if node.op not in ["call_module", "get_attr"]: + target = node.target + else: + target_atoms = node.target.split(".") + target_attr = m + for atom in target_atoms: + if not hasattr(target_attr, atom): + raise AttributeError(f"Operator target {node.target} not found!") + target_attr = getattr(target_attr, atom) + # target = target_atoms[-1] + target = "_".join(target_atoms) + partition.targets[target] = target_attr + # Fill in the passed-in mapping from new qualname to old qualname + if qualname_map is not None: + # When creating the split module later, the submodules will have + # path prefix matching the corresponding partition's submod_name + qualname = f"{partition.submod_name}.{target}" + qualname_map[qualname] = node.target + + assert isinstance(gathered_args, tuple) + assert isinstance(gathered_kwargs, dict) + new_node = partition.graph.create_node( + op=node.op, + target=target, + args=gathered_args, + kwargs=gathered_kwargs, + type_expr=node.type, + ) + new_node.meta = node.meta.copy() + partition.environment[node] = new_node + + # original module environment dict mapping node names to nodes + org_mod_env: Dict[str, torch.fx.node.Node] = {} + # Set up values to construct base module + base_mod_env: Dict[str, torch.fx.node.Node] = {} + base_mod_graph: torch.fx.graph.Graph = torch.fx.graph.Graph() + base_mod_attrs: Dict[str, torch.fx.graph_module.GraphModule] = {} + if not keep_original_order: + for node in m.graph.nodes: + base_mod_env, base_mod_attrs = construct_graph( + node, base_mod_env, base_mod_attrs + ) + + else: + # Go through the graph to construct the mapping dict + for node in m.graph.nodes: + org_mod_env[node.name] = node + + # Do some things iterating over the partitions in topological order again: + # 1) Finish off submodule Graphs by setting corresponding outputs + # 2) Construct GraphModules for each submodule + # 3) Construct the base graph by emitting calls to those submodules in + # topological order or original order specified by keep_original_order + + construct_order_partitions = ( + sorted_partitions if not keep_original_order else original_partition_order + ) + + already_constructed_attr_nodes = set() + for partition_name in construct_order_partitions: + partition = partitions[partition_name] + + # Set correct output values + output_vals = tuple( + partition.environment[orig_nodes[name]] for name in partition.outputs + ) + + # skip output node generation if there are no output values + num_output_vals = len(output_vals) + if num_output_vals == 1: + partition.graph.output(output_vals[0]) + elif num_output_vals > 1: + partition.graph.output(output_vals) + + if keep_original_order: + # first get the attr nodes required by this partition + org_mod_attr_nodes: List[torch.fx.node.Node] = [ + org_mod_env[key] for key in partition.inputs + ] + # Construct GraphModule for this partition + for node in org_mod_attr_nodes: # type: ignore[attr-defined] + if node in already_constructed_attr_nodes: + continue + base_mod_env, base_mod_attrs = construct_graph( + node, base_mod_env, base_mod_attrs + ) + already_constructed_attr_nodes.add(node) + + base_mod_attrs[partition.submod_name] = torch.fx.graph_module.GraphModule( + partition.targets, partition.graph + ) # noqa: B950 + + # Emit call in base graph to this submodule + output_val = base_mod_graph.call_module( + partition.submod_name, + tuple(base_mod_env[name] for name in partition.inputs), + ) + + num_outputs = len(partition.outputs) + if num_outputs > 1: + # Unpack multiple return values from submodule + output_val_proxy = torch.fx.proxy.Proxy(output_val) + for i, output_name in enumerate(partition.outputs): + base_mod_env[output_name] = output_val_proxy[i].node # type: ignore[index] + elif num_outputs == 1: + base_mod_env[list(partition.outputs)[0]] = output_val + + for node in m.graph.nodes: + if node.op == "output": + base_mod_graph.output( + torch.fx.graph.map_arg(node.args[0], lambda n: base_mod_env[n.name]) + ) # noqa: B950 + + return torch.fx.graph_module.GraphModule(base_mod_attrs, base_mod_graph) diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/split_utils.py b/llava_next/lib/python3.10/site-packages/torch/fx/passes/split_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ada204d06c4729fea64cc12be2ce2ef418cdfb27 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/fx/passes/split_utils.py @@ -0,0 +1,280 @@ +import copy +from dataclasses import dataclass, field +from typing import Dict, List, Optional + +import torch.fx +from torch.fx._compatibility import compatibility +from torch.fx.graph import map_arg +from torch.fx.passes.utils import HolderModule, lift_subgraph_as_module + +from .tools_common import NodeList + +__all__ = ["getattr_recursive", "setattr_recursive", "Component", "split_by_tags"] + +@compatibility(is_backward_compatible=False) +def getattr_recursive(obj, name): + for layer in name.split("."): + if hasattr(obj, layer): + obj = getattr(obj, layer) + else: + return None + return obj + + +@compatibility(is_backward_compatible=False) +def setattr_recursive(obj, attr, value): + if "." not in attr: + setattr(obj, attr, value) + else: + layer = attr.split(".") + setattr_recursive(getattr(obj, layer[0]), ".".join(layer[1:]), value) + + +@compatibility(is_backward_compatible=False) +@dataclass +class Component: + """ + A component serves as a container for a subgraph we want to create afterwards. + """ + + graph: torch.fx.Graph + order: int + name: str + + # Stores the placeholder nodes in `graph`. + input_placeholders: List = field(default_factory=list) + + # Store the nodes in original graph that are placeholder in `graph`. + orig_inputs: List = field(default_factory=list) + + # Store the nodes in original graph that are outputs in `graph`. + orig_outputs: List = field(default_factory=list) + + # Mapping from get_attr node in original graph to get_attr node in `graph`. + getattr_maps: Dict[torch.fx.Node, torch.fx.Node] = field(default_factory=dict) + constructor_args: List[str] = field(default_factory=list) + gm: Optional[torch.fx.GraphModule] = None + + +@compatibility(is_backward_compatible=False) +def split_by_tags(gm: torch.fx.GraphModule, tags: List[str]) -> torch.fx.GraphModule: + """ + Splits a GraphModule using tags on its graph nodes. We honor the order of + tags. For example, we have tags = ["a", "b", "c"], the function will create + the initial submodules in the order of "a_0", "b_1", "c_2". + + To set a tag: + gm.graph.nodes[idx].tag = "mytag" + + This will result in all nodes with the same tag being extracted and placed in their + own submodule. For placeholder, output and get_attr node, the tag is ignored. placeholder + and output nodes are created when needed while get_attr nodes get copied to submodules + where they are used. + + Given the following module def: + + class SimpleModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.linear1 = torch.nn.Linear(...) + self.linear2 = torch.nn.Linear(...) + self.linear3 = torch.nn.Linear(...) + + def forward(self, in1, in2): + r1 = self.linear1(in1) + r2 = self.linear2(in2) + r3 = torch.cat([r1, r2]) + return self.linear3(r3) + + Marking the node corresponding to in1 with the tag sc.REQUEST_ONLY.lower() results in the following split: + + ro_0: + def forward(self, in1): + self = self.root + linear1 = self.linear1(in1) + return linear1 + + main_1: + def forward(self, in2, linear1): + self = self.root + linear2 = self.linear2(in2) + cat_1 = torch.cat([linear1, linear2]) + linear3 = self.linear3(cat_1) + return linear3 + + main_0: + def forward(self, in1, in2): + self = self.root + ro_0 = self.ro_0(in1) + main_1 = self.main_1(in2, ro_0) + return main_1 + """ + + def flatten(x: torch.fx.node.Argument) -> NodeList: + """ + Stores nodes in x to a list and returns the list. + """ + r: NodeList = [] + map_arg(x, r.append) + return r + + # Mapping from node in original module to node in created submodule. + node_remapping: Dict[torch.fx.Node, torch.fx.Node] = {} + + # Mapping from node in original module or created submodules to + # corresponding component. + node_to_component: Dict[torch.fx.Node, Component] = {} + + # Mapping from tag to the corresponding component. + tag_to_component: Dict[str, Component] = {} + + # Stores all components. + all_components: List[Component] = [] + + # Stores nodes that will be used in main graph. + used_in_main: Dict[torch.fx.Node, None] = {} + + # Main graph after split. + main_g = torch.fx.Graph() + + # Mapping from node in original module to node in main graph after split. + main_remapping: Dict[torch.fx.Node, torch.fx.Node] = {} + + # Output node of original module. + output_node: Optional[torch.fx.Node] = None + + # Create a component for each tag, we don't expect to create other components afterwards. + for tag in tags: + comp = Component(torch.fx.Graph(), len(all_components), f"{tag}") + all_components.append(comp) + tag_to_component[tag] = comp + + # Traverse the nodes in original graph and take care of them. + for node in gm.graph.nodes: + if node.op == "output": + if output_node is not None: + raise RuntimeError("Multiple output nodes in graph!") + output_node = node + continue + + # Placeholders in the original graph get copied to main graph. + if node.op == "placeholder": + main_remapping[node] = main_g.placeholder(node.name, type_expr=node.type) + main_remapping[node].meta = copy.copy(node.meta) + continue + + # Get_attr nodes are ignored because we are not tagging them. + # Instead, we copy them directly to the submodules use them afterwards. + if node.op == "get_attr": + continue + + # Now we process callable nodes which are nodes with op of call_module, + # call_function or call_method. Every callable nodes should be tagged. + assert hasattr(node, "tag") + + upstream_components = [ + node_to_component[x] + for x in flatten(node.args) + flatten(node.kwargs) + if x.op not in {"placeholder", "get_attr"} + ] + + comp = tag_to_component[node.tag] + node_to_component[node] = comp + + # Max order of upperstream components. + mx = max((c.order for c in upstream_components), default=0) + + # Expect the component for `node` has higher order then its upstream components. + assert comp.order >= mx + + # Map a input of `node` to nodes in the component's graph. + def remap_func(x): + # If input is a get_attr node, copy it to current component's graph. + # Returns the get_attr node in current component's graph. + if x.op == "get_attr": + if x not in comp.getattr_maps: + comp.getattr_maps[x] = comp.graph.get_attr( + x.target, type_expr=x.type + ) + return comp.getattr_maps[x] + + # If input is not a placeholder, it should have been put into a component + # already. If it's the current component then we return the corresponding + # node in the component. + if x.op != "placeholder" and node_to_component[x] == comp: + return node_remapping[x] + + # If input is a placeholder or it's in other components, we want to make it + # as a placeholder in current component's graph. + if x not in comp.orig_inputs: + comp.orig_inputs.append(x) + placeholder = comp.graph.placeholder(x.name, type_expr=x.type) + placeholder.meta = copy.copy(x.meta) + comp.input_placeholders.append( + placeholder + ) + used_in_main[x] = None + + return comp.input_placeholders[comp.orig_inputs.index(x)] + + n = comp.graph.node_copy(node, remap_func) + n.tag = node.tag # type: ignore[attr-defined] + node_remapping[node] = n + node_to_component[n] = comp + + if output_node is None: + raise RuntimeError("Graph had no output node!") + + for x in flatten(output_node.args[0]): + if x.op == "get_attr": + # We don't need components mapping for nodes of type "get_attr" + # that are consumed by the output. Only need to make sure we create + # corresponding counterparts in the resulting graph. + main_remapping[x] = main_g.get_attr(x.name, type_expr=x.type) + else: + # All component results consumed by the output node should be + # marked as "used in main". + used_in_main[x] = None + + # If a node is used in main graph then we mark it as an output in the component + # it belongs to. + for n in used_in_main: + if n.op != "placeholder": + node_to_component[n].orig_outputs.append(n) + + # Now we create a graphmodule for each component. + for comp in all_components: + outs = tuple(map(node_remapping.__getitem__, comp.orig_outputs)) + + # Take care of the args of FX output node. If there's a single + # output then the output node args is like (output_single), else + # if there're multiple outputs then the output node args is like + # ((output_0, output_1, ...)). + comp.graph.output(outs[0] if len(outs) == 1 else outs) + + comp.gm = lift_subgraph_as_module(gm, comp.graph) + + # Create a call_module node in main graph. + main_node = main_g.call_module( + comp.name, + args=tuple(map(main_remapping.__getitem__, comp.orig_inputs)), + kwargs=None, + ) + + if len(outs) == 1: + main_remapping[comp.orig_outputs[0]] = main_node + else: + for i, o in enumerate(comp.orig_outputs): + # Use Proxy to record getitem access. + main_remapping[o] = torch.fx.Proxy(main_node)[i].node # type: ignore[index] + + main_g.output(map_arg(output_node.args[0], main_remapping.__getitem__)) + main_root = HolderModule({comp.name: comp.gm for comp in all_components}) + + # If the output nodes consumes get_attr directly in the original graph, + # then we need to make sure get_attr is copied to the new graph. + for x in flatten(output_node.args[0]): + if x.op == "get_attr": + setattr(main_root, x.name, getattr_recursive(gm, x.target)) # type: ignore[arg-type] + + return torch.fx.GraphModule(main_root, main_g) diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/splitter_base.py b/llava_next/lib/python3.10/site-packages/torch/fx/passes/splitter_base.py new file mode 100644 index 0000000000000000000000000000000000000000..e30d9d3e415400cba5443344a6277e609b4118aa --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/fx/passes/splitter_base.py @@ -0,0 +1,871 @@ +import argparse +import copy +from collections import defaultdict +from dataclasses import dataclass +from typing import NamedTuple, Sequence, Iterable, Any, List, Dict, Optional, Tuple +import logging + +import torch +from torch.fx.passes.graph_manipulation import get_size_of_node +from torch.fx.node import map_arg +from torch.fx._compatibility import compatibility + +from .operator_support import ( + get_node_target, + OperatorSupportBase, +) +from .graph_drawer import FxGraphDrawer +from .shape_prop import ShapeProp +from .split_utils import split_by_tags +from .tools_common import ( + FxNetAccFusionsFinder, + CALLABLE_NODE_OPS, + Tensors, + NodeList, + NodeSet, + is_node_output_tensor, +) + + +__all__ = ['FxNetAccNodesFinder', 'FxNetSplitterInternalError', 'Subgraph', 'SplitResult', 'generate_inputs_for_submodules'] +_LOGGER = logging.getLogger(__name__) + +DEFAULT_MIN_ACC_MODULE_SIZE = 1 +DEFAULT_SKIP_FUSION = False +DEFAULT_ALLOW_NON_TENSOR = False + +class _SplitterSettingBase: + def __init__( + self, + min_acc_module_size=DEFAULT_MIN_ACC_MODULE_SIZE, + skip_fusion=DEFAULT_SKIP_FUSION, + allow_non_tensor=DEFAULT_ALLOW_NON_TENSOR + ): + parser = argparse.ArgumentParser() + parser.add_argument( + "--min-acc-module-size", + "--min_acc_module_size", + required=False, + type=int, + help="Minimum size limit of an accelerator subgraph.", + ) + parser.add_argument( + "--skip-fusion", + "--skip_fusion", + default=False, + action="store_true", + help="If true then no fusion groups. Fusion group is used to " + "enforce no non-tensor data flow between submodules. If we don't " + "have this constrain, setting this to false is recommended as it " + "can reduce overhead.", + ) + parser.add_argument( + "--allow-non-tensor", + "--allow_non_tensor", + default=False, + action="store_true", + help="For some backends non-tensor data flow between cpu and them " + "are not allowed. Therefore, if a node supported by accelerator but " + "it has non-tensor inputs or outputs to a cpu node we would want to " + "consider it as a cpu node during splitting. However, for some backends " + "we might not care about non-tensor data flow and we can set this option " + "to true to disable the functionality that prevent non-tensor data flow.", + ) + args, unknown = parser.parse_known_args() + + self.min_acc_module_size: int = args.min_acc_module_size if args.min_acc_module_size else min_acc_module_size + self.skip_fusion: bool = args.skip_fusion if args.skip_fusion else skip_fusion + self.allow_non_tensor: bool = args.allow_non_tensor if args.allow_non_tensor else allow_non_tensor + + +@compatibility(is_backward_compatible=False) +class FxNetAccNodesFinder: + """ + Finds a set of nodes that can be supported on ACC, excluding nodes that have non-tensor + input/output to cpu nodes to prevent non-tensor data flow between backends and cpu. + + I.e. if we have a chain: + + ACC_NODE_1 -> ACC_NODE_2 -> ACC_NODE_3 -> CPU_NODE_1 + + where every ACC node produces non-tensor output, then they all should be treated as CPU nodes. + + This behavior can be turned off by passing allow_non_tensor=True. + """ + + def __init__( + self, + module: torch.fx.GraphModule, + operator_support: OperatorSupportBase, + allow_non_tensor: bool, + ): + self.module = module + self.operator_support = operator_support + self.allow_non_tensor = allow_non_tensor + + def reduce_acc_nodes_non_tensor_input_helper( + self, cpu_worklist: NodeList + ): + """ + Transitively excludes nodes from ACC supported set. + For every node in the worklist: + - removes its downstream ACC nodes from ACC supported set, + - if any downstream ACC node produces non-tensor output, + then it gets added into the worklist. + """ + while cpu_worklist: + node = cpu_worklist.pop(0) + + for user in node.users: + if user in self.acc_nodes: + self.acc_nodes.remove(user) + if not is_node_output_tensor(user): + cpu_worklist.append(user) + + def reduce_acc_nodes_non_tensor_input(self): + """ + Excludes nodes from ACC supported set that have direct + upstream CPU nodes that produce non-tensor outputs. + """ + non_tensor_cpu_nodes: NodeList = [] + + for node in self.module.graph.nodes: + if node.op not in CALLABLE_NODE_OPS: + continue + if node in self.acc_nodes: + continue + if is_node_output_tensor(node): + continue + non_tensor_cpu_nodes.append(node) + + self.reduce_acc_nodes_non_tensor_input_helper(non_tensor_cpu_nodes) + + def reduce_acc_nodes_non_tensor_output(self): + """ + Excludes nodes from ACC supported set that produce non-tensor + outputs and have downstream CPU nodes. + """ + while True: + new_cpu_nodes: NodeList = [] + + for acc_node in self.acc_nodes: + if is_node_output_tensor(acc_node): + continue + for user in acc_node.users: + if user not in self.acc_nodes: + new_cpu_nodes.append(acc_node) + break + + if not new_cpu_nodes: + break + + for new_cpu_node in new_cpu_nodes: + self.acc_nodes.remove(new_cpu_node) + + self.reduce_acc_nodes_non_tensor_input_helper(new_cpu_nodes) + + def __call__(self) -> NodeSet: + submodules = dict(self.module.named_modules()) + self.acc_nodes = { + n + for n in self.module.graph.nodes + if n.op in CALLABLE_NODE_OPS + and self.operator_support.is_node_supported(submodules, n) + } + + if not self.allow_non_tensor: + self.reduce_acc_nodes_non_tensor_input() + self.reduce_acc_nodes_non_tensor_output() + + return self.acc_nodes + +@compatibility(is_backward_compatible=False) +class FxNetSplitterInternalError(Exception): + pass + +@compatibility(is_backward_compatible=False) +@dataclass +class Subgraph: + is_acc: bool + nodes: NodeList + + +@compatibility(is_backward_compatible=False) +class SplitResult(NamedTuple): + """ + Stores the results of the splitter. + + Attributes: + split_module: root module after splitting. + submodule_inputs: a dict that maps submodule name to its inputs. + non_acc_submodule_prefix: the prefix for non acc submodules. For + acc submodule the prefix is alwasy "_run_on_acc_". + """ + + split_module: torch.fx.GraphModule + submodule_inputs: Dict[str, Any] + non_acc_submodule_prefix: str + + +@compatibility(is_backward_compatible=False) +def generate_inputs_for_submodules( + model: torch.nn.Module, + inputs: Sequence[Any], + target_submodules: Iterable[str], + deepcopy: bool = False, +) -> Dict[str, Any]: + """ + Generate inputs for targeting submdoules in the given model. Note that if two submodules refer to the same obj, this + function doesn't work. + + Args: + model: root model. + inputs: inputs to the root model. + target_submodules: submodules that we want to generate inputs for. + + Returns: + A dict that maps from submodule name to its inputs. + """ + + handles = [] + results = {} + submodule_to_names = {mod: name for name, mod in model.named_modules()} + + def pre_forward(module, module_inputs): + results[submodule_to_names[module]] = copy.deepcopy(module_inputs) if deepcopy else module_inputs + + for name, mod in model.named_modules(): + if name in target_submodules: + handles.append(mod.register_forward_pre_hook(pre_forward)) + + def clean_up_handles(): + for h in handles: + h.remove() + + try: + with torch.no_grad(): + model(*inputs) + except Exception as e: + clean_up_handles() + raise e + + clean_up_handles() + return results + + +class _SplitterBase: + """ + Splits a GraphModule into sub-GraphModules for execution on CPU or the accelerator. + Output is a GraphModule with supported and unsupported operators grouped into as few sub-GraphModules as possible. + Assumes that only "call_module", "call_function" and "call_method" from FX IR can potentially be executed on the accelerator. + + Given the following graph: + ==> b ==> + // \\ + a d + \\ // + ==> c ==> + + class SimpleModule(torch.nn.Module): + def forward(self, a): + b = torch.sin(a) + c = torch.cos(a) + d = b + c + return d + + and providing "operator_support" that indicates that 'b' and 'c' can be executed on the accelerator, + we will get the following split result: + + main: + def forward(self, a): + run_on_acc_0_0 = self._run_on_acc_0_0(a) + getitem = run_on_acc_0_0[0] + getitem_1 = run_on_acc_0_0[1] + run_on_cpu_1_1 = self._run_on_cpu_1_1(getitem, getitem_1) + return run_on_cpu_1_1 + + _run_on_acc_0_0: + def forward(self, a): + sin_1 = torch.sin(a) + cos_1 = torch.cos(a) + return (sin_1, cos_1) + + _run_on_cpu_1_1: + def forward(self, sin_1, cos_1): + add_1 = sin_1 + cos_1 + return add_1 + """ + + # PCIe bandwidth for the backend, default to 100 GB/s + PCIe_BW = 100 * 2 ** 30 + + def __init__( + self, + module: torch.fx.GraphModule, + sample_input: Sequence[Any], + operator_support: OperatorSupportBase, + settings: _SplitterSettingBase, + non_acc_submodule_name: str = "_run_on_cpu_", + ): + """ + Preprocesses graph before splitting: + - finds nodes supported by ACC, + - finds fusion groups for ACC nodes having non-tensor IO, + - builds a graph of direct dependencies, + - builds a map of fused nodes to their fusions. + As a result we get self.acc_nodes, self.deps and self.fusions. + """ + assert isinstance(module, torch.fx.GraphModule) + + self.module = module + ShapeProp(self.module).propagate(*sample_input) + + self.settings = settings + self.operator_support = operator_support + self.sample_input = sample_input + self.acc_nodes = FxNetAccNodesFinder(self.module, self.operator_support, self.settings.allow_non_tensor)() + + if self.settings.skip_fusion: + self.fusions = {} + else: + self.fusions = FxNetAccFusionsFinder(module, self.acc_nodes)() + + # Modify deps to add more deps for fused nodes + self.deps = self.find_deps() + self.update_deps_for_fusions() + + self.non_acc_submodule_name = non_acc_submodule_name + self._node_submodule_map: Dict[str, str] = {} + + # =============================================================== + # Helpers for ctor and initial state + # =============================================================== + + def get_node_submodule_map(self) -> Dict[str, str]: + """ Returns a map from node name to submodule name, e.g. + node: main_module_impl_impl_over_arch_unary_multiple_embedding + _pooling_embedding_pooling_sparse_entity_equivalence_key + _proxy_embedding_bag + maps to submodule name of: _run_on_acc_1 + """ + return self._node_submodule_map + + def find_deps(self) -> Dict[torch.fx.Node, NodeSet]: + """ + Builds a graph of node dependencies. Leaf nodes don't have any + dependencies and the "output" node doesn't have nodes depending on it. + + Resulting graph has only direct dependencies, i.e. there are no + transitive dependencies. + """ + deps: Dict[torch.fx.Node, NodeSet] = defaultdict(set) + for node in self.module.graph.nodes: + if node.op not in CALLABLE_NODE_OPS: + continue + + for user in node.users: + if user.op != "output": + deps[user].add(node) + return deps + + def update_deps_for_fusions(self): + """ + Updates graph of dependencies so that: + - nodes from the same fusion depend on the same set of outer nodes, + - outer nodes depending on a fusion depend on all nodes in that fusion. + """ + for node in self.fusions: + fusion = self.fusions[node] + for fused_neighbor in fusion: + self.deps[node].update(self.deps[fused_neighbor] - fusion) + + for user in fused_neighbor.users: + if user not in fusion: + self.deps[user].add(node) + + # =============================================================== + # Helpers for preview + # =============================================================== + + def _lower_model_to_backend( + self, mod: torch.fx.GraphModule, inputs: Tensors + ) -> torch.nn.Module: + """ + Lower the model to a backend. + """ + + return mod + + def _find_culprit( + self, mod: torch.fx.GraphModule, inputs: Tensors + ) -> str: + """ + When an error occurs during lowering or running the lowered mod, we use this + function to find culprits in the `mod` that causes the error. + """ + + return "Unable to find a culprit because _find_culprit() function is not implemented." + + def _draw_graph_based_on_node_support( + self, mod: torch.fx.GraphModule, supported_nodes: NodeList + ): + color_map = { + "default": "AliceBlue", + "supported": "chartreuse1", + "unsupported": "crimson", + } + + class CustomDrawer(FxGraphDrawer): + def _get_node_style(self, node): + template = super()._get_node_style(node) + if node in supported_nodes: + template["fillcolor"] = color_map["supported"] + elif node.op in CALLABLE_NODE_OPS: + template["fillcolor"] = color_map["unsupported"] + else: + template["fillcolor"] = color_map["default"] + + return template + + drawer = CustomDrawer(mod, "node_support", ignore_getattr=True) + dot_graph = drawer.get_main_dot_graph() + dot_graph.write_raw("node_support.dot") + + def node_support_preview(self, dump_graph: bool = False): + submodules = dict(self.module.named_modules()) + + supported_nodes: NodeList = [] + supported_node_types = defaultdict(set) + unsupported_node_types = defaultdict(set) + + def get_dtype(arg): + tensor_meta = arg.meta.get("tensor_meta") + return getattr(tensor_meta, "dtype", None) + + for node in self.module.graph.nodes: + if node.op not in CALLABLE_NODE_OPS: + continue + + target = get_node_target(submodules, node) + + # Store dtype of arg in node.args. If arg doesn't have dtype, i.e. not a tensor, we'll store None. + arg_dtypes = [ + get_dtype(arg) if isinstance(arg, torch.fx.Node) else None + for arg in node.args + ] + + # Find last non-None element. If all elements are None, return max_len. + last_index = len(arg_dtypes) - next( + ( + i + for i, dtype in enumerate(reversed(arg_dtypes)) + if dtype is not None + ), + len(arg_dtypes), + ) + + # Strip None elements at the end. + arg_dtypes_tuple = tuple(arg_dtypes[:last_index]) + kwarg_dtypes_tuple = tuple( + (k, get_dtype(arg)) + for k, arg in node.kwargs.items() + if isinstance(arg, torch.fx.Node) + ) + + if self.operator_support.is_node_supported(submodules, node): + supported_nodes.append(node) + supported_node_types[target].add((arg_dtypes_tuple, kwarg_dtypes_tuple)) + else: + unsupported_node_types[target].add((arg_dtypes_tuple, kwarg_dtypes_tuple)) + + if dump_graph: + self._draw_graph_based_on_node_support(self.module, supported_nodes) + + reports = "\nSupported node types in the model:\n" + for t, dtypes in supported_node_types.items(): + for arg_dtypes_tuple, kwarg_dtypes_tuple in dtypes: + reports += f"{t}: ({arg_dtypes_tuple}, {dict(kwarg_dtypes_tuple)})\n" + + reports += "\nUnsupported node types in the model:\n" + for t, dtypes in unsupported_node_types.items(): + for arg_dtypes_tuple, kwarg_dtypes_tuple in dtypes: + reports += f"{t}: ({arg_dtypes_tuple}, {dict(kwarg_dtypes_tuple)})\n" + + print(reports) + + # Return reports for testing purpose + return reports + + def split_preview(self, dump_graph: bool = False): + reports = "" + subgraphs = self.put_nodes_into_subgraphs() + acc_subgraphs_num = len([g for g in subgraphs if g.is_acc]) + cpu_subgraphs_num = len(subgraphs) - acc_subgraphs_num + reports += f"Before removing small acc subgraphs, total {len(subgraphs)} subgraphs are created:" + reports += f" {acc_subgraphs_num} acc subgraphs and {cpu_subgraphs_num} cpu subgraphs.\n" + + subgraphs = self.remove_small_acc_subgraphs(subgraphs) + acc_subgraphs_num = len([g for g in subgraphs if g.is_acc]) + cpu_subgraphs_num = len(subgraphs) - acc_subgraphs_num + reports += f"After removing small acc subgraphs, total {len(subgraphs)} subgraphs are created:" + reports += f" {acc_subgraphs_num} acc subgraphs and {cpu_subgraphs_num} cpu subgraphs.\n" + + for i, subgraph in enumerate(subgraphs): + reports += f"_run_on_acc_{i}: " if subgraph.is_acc else f"{self.non_acc_submodule_name}{i}: " + reports += f"{len(subgraph.nodes)} node(s)\n" + + self.tag(subgraphs) + split_mod = self.split(remove_tag=True) + split_mod.eval() + + if dump_graph: + drawer = FxGraphDrawer( + split_mod, "preview", ignore_getattr=True + ) + dot_graphs = drawer.get_all_dot_graphs() + for name, dot_graph in dot_graphs.items(): + dot_graph.write_raw(f"{name}.dot") + + max_qps: float = self.PCIe_BW + bottleneck_module = "" + + for node in split_mod.graph.nodes: + if node.op == "call_module" and "acc" in node.target: + reports += f"\nProcessing acc submodule {node.target}\n" + + submod = getattr(split_mod, node.target) + + def get_submod_inputs(main_mod, submod, example_inputs): + sub_inputs = None + + def get_inputs(self, inputs): + nonlocal sub_inputs + sub_inputs = inputs + + handle = submod.register_forward_pre_hook(get_inputs) + main_mod(*example_inputs) + handle.remove() + return sub_inputs + + submod_inputs = get_submod_inputs( + split_mod, submod, self.sample_input + ) + ShapeProp(submod).propagate(*submod_inputs) + + total_input_bytes = 0 + total_output_bytes = 0 + + reports += "Checking inputs...\n" + for n in submod.graph.nodes: + if n.op == "placeholder": + if not is_node_output_tensor(n): + reports += f"Input {n.name} is not a tensor, this might cause problems during lowering!\n" + else: + total_input_bytes += get_size_of_node(submod, n)[0] + if n.op == "output": + output_node = n + + reports += "Checking outputs...\n" + + def get_bytes(node: torch.fx.Node): + nonlocal total_output_bytes + nonlocal reports + if not is_node_output_tensor(node): + reports += f"Output {node.name} is not a tensor, this might cause problems during lowering!\n" + else: + total_output_bytes += get_size_of_node(submod, node)[0] + + map_arg(output_node.args, get_bytes) + qps = self.PCIe_BW / max(total_input_bytes, total_output_bytes) + reports += f"Total input size in bytes is {total_input_bytes}, total output size in bytes is {total_output_bytes}," + reports += f" theoretical max qps (bounds by PCIe bandwidth) for this submodule is {qps}.\n" + + if qps < max_qps: + max_qps = qps + bottleneck_module = node.target + + try: + lowered_submod = self._lower_model_to_backend(submod, submod_inputs) + except RuntimeError: + reports += "Run into an error during lowering!\n" + reports += self._find_culprit(submod, submod_inputs) + continue + + try: + lowered_submod(*submod_inputs) + except RuntimeError: + reports += "Run into an error during inference!\n" + reports += self._find_culprit(submod, submod_inputs) + else: + reports += "Lowering and running succeed!\n" + + reports += f"\nTheoretical max qps (bounds by PCIe bandwidth) for this model is {max_qps}," + reports += f" bottleneck is submodule {bottleneck_module}." + print(reports) + + # return the reports for testing purposes + return reports + + # =============================================================== + # Helpers for extend_acc_subgraph() method + # =============================================================== + + def find_reverse_deps( + self, tag_id: Optional[int] = None + ) -> Dict[torch.fx.Node, NodeSet]: + """ + Builds reversed topological node dependencies, if tag_id is specified, + we ignore nodes that are in later subgraph i.e. nodes have greater tag_id. + """ + result: Dict[torch.fx.Node, NodeSet] = defaultdict(set) + + for node in self.module.graph.nodes: + if node.op not in CALLABLE_NODE_OPS: + continue + + for user in node.users: + if user.op not in CALLABLE_NODE_OPS: + continue + + if tag_id is None or (int(user.tag.split("_")[-1]) < tag_id): + result[node].add(user) + + return result + + def update_reverse_deps_for_fusions( + self, deps: Dict[torch.fx.Node, NodeSet] + ): + processed_node = set() + + for node, fusion in self.fusions.items(): + if node in processed_node: + continue + + new_dep = set() + + # Create a new dependency set which include all the + # dependencies of the nodes in the fusion group + for n in fusion: + new_dep.update(deps[n]) + + # Exclude nodes in the fusion + new_dep.difference_update(fusion) + + # Update dependency + for n in fusion: + deps[n] = new_dep + + for arg in n.all_input_nodes: + if arg not in fusion: + deps[arg].update(fusion) + + processed_node.add(n) + + def find_parent_nodes_of_subgraph(self, tag: str) -> NodeSet: + """ + Finds parent nodes of the `tag` subgraph. + + Traverse the inputs of nodes in the subgraph, if input doesn't belong to the subgraph + and is not a placeholder, we consider it as the parent node of the subgraph. + """ + parent_nodes = set() + + for node in self.module.graph.nodes: + if node.op in CALLABLE_NODE_OPS and node.tag == tag: + for arg in node.all_input_nodes: + if arg.op in CALLABLE_NODE_OPS and arg.tag != tag: + parent_nodes.add(arg) + + return parent_nodes + + def extend_acc_subgraph(self, tag: str): + """ + Extend the acc subgraph with `tag` going the reversed topological direction. + """ + # Dict that maps node to its users and ignore users that + # are in the subgraph that has greater tag + deps = self.find_reverse_deps(tag_id=int(tag.split("_")[-1])) + self.update_reverse_deps_for_fusions(deps) + + # Parent nodes of the subgraph + parent_nodes = self.find_parent_nodes_of_subgraph(tag) + + visited_nodes: NodeSet = set() + + while parent_nodes: + node = None + + # Find a acc node that depends on visited nodes only + for n in parent_nodes: + if deps[n] <= visited_nodes and n in self.acc_nodes: + node = n + break + + if node is None: + break + + # Put the node into `tag` subgraph + node.tag = tag # type: ignore[attr-defined] + parent_nodes.remove(node) + visited_nodes.add(node) + + # If node is in a fusion group, add all fusion buddies to parent nodes + if node in self.fusions: + for fusion_node in self.fusions[node]: + if fusion_node not in visited_nodes: + parent_nodes.add(fusion_node) + + # Add inputs of the node to parent nodes + for arg in node.all_input_nodes: + if arg.op in CALLABLE_NODE_OPS and arg not in visited_nodes: + parent_nodes.add(arg) + + # =============================================================== + # Helpers for split() method + # =============================================================== + + def starter_nodes(self) -> Tuple[NodeSet, NodeSet]: + """ + Finds nodes that consume module inputs or get_attr nodes. + """ + starter_cpu_nodes: NodeSet = set() + starter_acc_nodes: NodeSet = set() + for node in self.module.graph.nodes: + if node.op not in {"placeholder", "get_attr"}: + continue + for user in node.users: + if user in self.acc_nodes: + starter_acc_nodes.add(user) + else: + starter_cpu_nodes.add(user) + return starter_cpu_nodes, starter_acc_nodes + + def put_nodes_into_subgraphs(self) -> List[Subgraph]: + # We start graph traversal from leaf nodes + current_cpu_nodes, current_acc_nodes = self.starter_nodes() + visited_nodes: NodeSet = set() + + # Determine which subgraph to start from based on which subgraph has + # 0-dep node + acc_subgraph: bool = not any(len(self.deps[n]) == 0 for n in current_cpu_nodes) + + current_subgraph_nodes: NodeList = [] + + # Result accumulator + subgraphs: List[Subgraph] = [] + while current_cpu_nodes or current_acc_nodes: + # Find the first node that should belong to the current subgraph and has all dependencies resolved + current_nodes = current_acc_nodes if acc_subgraph else current_cpu_nodes + node = next( + (n for n in current_nodes if self.deps[n] <= visited_nodes), + None, + ) + + # If nothing was found, then it's time to flip the mode and start a new subgraph + if node is None: + if not current_subgraph_nodes: + raise FxNetSplitterInternalError("Subgraph can't be empty") + + subgraphs.append( + Subgraph(is_acc=acc_subgraph, nodes=current_subgraph_nodes) + ) + acc_subgraph = not acc_subgraph + current_subgraph_nodes = [] + continue + + current_nodes.remove(node) + visited_nodes.add(node) + current_subgraph_nodes.append(node) + + # Add fusion buddies + if node in self.fusions: + if node in self.acc_nodes: + current_acc_nodes.update(self.fusions[node] - visited_nodes) + else: + current_cpu_nodes.update(self.fusions[node] - visited_nodes) + + # Put depending nodes into the queue + for user in node.users: + if user.op not in CALLABLE_NODE_OPS: + continue + + # Add downstream nodes + if user in self.acc_nodes: + current_acc_nodes.add(user) + else: + current_cpu_nodes.add(user) + + # Check if the last subgraph was not created + if current_subgraph_nodes: + subgraphs.append( + Subgraph(is_acc=acc_subgraph, nodes=current_subgraph_nodes) + ) + + if not subgraphs: + raise FxNetSplitterInternalError("Couldn't create subgraphs") + + return subgraphs + + def remove_small_acc_subgraphs(self, subgraphs: List[Subgraph]) -> List[Subgraph]: + """ + This pass finds ACC submodules with less than specified size and merges + them with adjacent CPU submodules. + """ + result: List[Subgraph] = [] + for subgraph in subgraphs: + if subgraph.is_acc: + if len(subgraph.nodes) >= self.settings.min_acc_module_size: + result.append(subgraph) + else: + print( + "Eliminating acc subgraph because it's smaller than the threshold: " + f"{len(subgraph.nodes)} < {self.settings.min_acc_module_size}" + ) + if result: + result[-1].nodes.extend(subgraph.nodes) + else: + subgraph.is_acc = False + result.append(subgraph) + else: + if result and not result[-1].is_acc: + result[-1].nodes.extend(subgraph.nodes) + else: + result.append(subgraph) + return result + + def tag(self, subgraphs: List[Subgraph]): + self.tags: List[str] = [] + for subgraph in subgraphs: + tag = f"_run_on_acc_{len(self.tags)}" if subgraph.is_acc else f"{self.non_acc_submodule_name}{len(self.tags)}" + self.tags.append(tag) + for node in subgraph.nodes: + if hasattr(node, "tag"): + raise FxNetSplitterInternalError(f"Node {node} was already tagged") + + node.tag = tag # type: ignore[attr-defined] + self._node_submodule_map[node.name] = tag + + def split(self, remove_tag: bool = False) -> torch.fx.GraphModule: + split_module = split_by_tags(self.module, self.tags) + if remove_tag: + for node in self.module.graph.nodes: + if hasattr(node, "tag"): + del node.tag + return split_module + + def __call__(self) -> torch.fx.GraphModule: + subgraphs = self.put_nodes_into_subgraphs() + subgraphs = self.remove_small_acc_subgraphs(subgraphs) + acc_subgraphs_count = len([s for s in subgraphs if s.is_acc]) + non_acc_subgraphs_count = len(subgraphs) - acc_subgraphs_count + print(f"Got {acc_subgraphs_count} acc subgraphs and {non_acc_subgraphs_count} non-acc subgraphs") + self.tag(subgraphs) + return self.split() + + def generate_split_results(self) -> SplitResult: + split_module = self() + submodule_names = [] + for name, mod in split_module.named_children(): + submodule_names.append(name) + submodule_inputs = generate_inputs_for_submodules(split_module, self.sample_input, submodule_names) + return SplitResult(split_module, submodule_inputs, self.non_acc_submodule_name) diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/tests/__init__.py b/llava_next/lib/python3.10/site-packages/torch/fx/passes/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/tests/__pycache__/__init__.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/fx/passes/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ec7c24bc316462c4f484d4b1d40bce61914c095 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/fx/passes/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/tests/__pycache__/test_pass_manager.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/fx/passes/tests/__pycache__/test_pass_manager.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4745c5c8a23a2b997d867ce6a6e9b989fa27c003 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/fx/passes/tests/__pycache__/test_pass_manager.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/tests/test_pass_manager.py b/llava_next/lib/python3.10/site-packages/torch/fx/passes/tests/test_pass_manager.py new file mode 100644 index 0000000000000000000000000000000000000000..60ed6671179b2c20fa0be176631d1415009ee87a --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/fx/passes/tests/test_pass_manager.py @@ -0,0 +1,58 @@ +import unittest + +from ..pass_manager import ( + inplace_wrapper, + PassManager, + these_before_those_pass_constraint, + this_before_that_pass_constraint, +) + + +class TestPassManager(unittest.TestCase): + def test_pass_manager_builder(self) -> None: + passes = [lambda x: 2 * x for _ in range(10)] + pm = PassManager(passes) + pm.validate() + + def test_this_before_that_pass_constraint(self) -> None: + passes = [lambda x: 2 * x for _ in range(10)] + pm = PassManager(passes) + + # add unfulfillable constraint + pm.add_constraint(this_before_that_pass_constraint(passes[-1], passes[0])) + + self.assertRaises(RuntimeError, pm.validate) + + def test_these_before_those_pass_constraint(self) -> None: + passes = [lambda x: 2 * x for _ in range(10)] + constraint = these_before_those_pass_constraint(passes[-1], passes[0]) + pm = PassManager( + [inplace_wrapper(p) for p in passes] + ) + + # add unfulfillable constraint + pm.add_constraint(constraint) + + self.assertRaises(RuntimeError, pm.validate) + + def test_two_pass_managers(self) -> None: + """Make sure we can construct the PassManager twice and not share any + state between them""" + + passes = [lambda x: 2 * x for _ in range(3)] + constraint = these_before_those_pass_constraint(passes[0], passes[1]) + pm1 = PassManager() + for p in passes: + pm1.add_pass(p) + pm1.add_constraint(constraint) + output1 = pm1(1) + self.assertEqual(output1, 2 ** 3) + + passes = [lambda x: 3 * x for _ in range(3)] + constraint = these_before_those_pass_constraint(passes[0], passes[1]) + pm2 = PassManager() + for p in passes: + pm2.add_pass(p) + pm2.add_constraint(constraint) + output2 = pm2(1) + self.assertEqual(output2, 3 ** 3) diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/tools_common.py b/llava_next/lib/python3.10/site-packages/torch/fx/passes/tools_common.py new file mode 100644 index 0000000000000000000000000000000000000000..42032b4b6cad1f9920bdbff7cd3f56a6f66aa00c --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/fx/passes/tools_common.py @@ -0,0 +1,254 @@ +from typing import List, Tuple, Union, Dict, Any, Set, Mapping +import collections +from dataclasses import dataclass + +import torch +import torch.fx +from torch.fx.node import _get_qualified_name +from torch.fx._compatibility import compatibility + +__all__ = ['get_acc_ops_name', 'get_node_target', 'is_node_output_tensor', 'FxNetAccFusionsFinder', 'legalize_graph'] + +Tensors = Union[Tuple[torch.Tensor], List[torch.Tensor]] +TensorOrTensors = Union[torch.Tensor, Tensors] +NodeList = List[torch.fx.Node] +NodeSet = Set[torch.fx.Node] +Names = List[str] +CALLABLE_NODE_OPS = {"call_module", "call_function", "call_method"} + + +@compatibility(is_backward_compatible=False) +def get_acc_ops_name(k): + if isinstance(k, str): + return k + elif k.__module__ and "acc_ops" in k.__module__: + return f"acc_ops.{k.__name__}" + else: + module = k.__module__.replace('torch._ops', 'torch.ops') # WAR for bug in how torch.ops assigns module + return f"{module if module else ''}.{k.__name__}" + + +@compatibility(is_backward_compatible=False) +def get_node_target(submodules: Mapping[str, torch.nn.Module], node: torch.fx.Node) -> str: + """ + Given a `node` returns its target typename. + + For "call_method" node, return node.target which is the name of that method being called. + This could potential lead to conflict but should be okay because normally it's on a tensor. + + For "call_function" node, return typename of node.target. + + For "call_module" node, return typename of the module that node.target point to. + + If seeing "_VariableFunctionsClass" in the target name string, it will be replaced by + "torch". e.g. _VariableFunctionsClass.relu would become torch.relu. + """ + + assert node.op in CALLABLE_NODE_OPS, ( + "Expect op types of " + ", ".join(CALLABLE_NODE_OPS) + f", but found {node.op}" + ) + + if node.op == "call_module": + assert isinstance(node.target, str) + submod = submodules[node.target] + submod_type = getattr(submod, "_base_class_origin", type(submod)) + return get_acc_ops_name(submod_type) + elif node.op == "call_function": + target: Any = node.target + return ( + f"acc_ops.{target.__name__}" + if target.__module__ is not None and "acc_ops" in target.__module__ + else _get_qualified_name(target) + ) + else: + assert isinstance(node.target, str) + return node.target + +@compatibility(is_backward_compatible=False) +def is_node_output_tensor(node: torch.fx.Node) -> bool: + """Checks if the node output produces a Tensor or not. + + NOTE: This requires to run `ShapeProp` on the containing fx graph before + calling this function. This is because it works by checking the `type` + metadata on the node. This metadata is produced by the `ShapeProp`. + """ + type_ = node.meta.get("type", None) + return type_ is not None and issubclass(type_, torch.Tensor) + +@compatibility(is_backward_compatible=False) +class FxNetAccFusionsFinder: + """ + Finds groups of connected ACC nodes that pass non-tensor data between each other. + Such groups are called fusion groups. + """ + + def __init__(self, module: torch.fx.GraphModule, acc_nodes: NodeSet): + self.module = module + self.nodes = list(module.graph.nodes) + self.acc_nodes = acc_nodes + + @dataclass + class FusionGroup: + # The smallest idx of nodes in the fusion group after topological sorting all the nodes in the model. + top_node_idx: int + + # Nodes in this fusion group. + nodes: NodeSet + + # Inputs to this fusion group. + inputs: NodeSet + + # Nodes that in the fusion group that haven't been processed yet. + nodes_need_process: NodeSet + + def add_node(self, node): + """ + Add a node to fusion group. + """ + if node in self.nodes: + return + + self.nodes_need_process.add(node) + self.nodes.add(node) + self.inputs.discard(node) + self.inputs.update( + { + n + for n in node.all_input_nodes + if n.op in CALLABLE_NODE_OPS and n not in self.nodes + } + ) + + def recursive_add_node( + self, + fusion_group: "FxNetAccFusionsFinder.FusionGroup", + inputs: Union[NodeSet, NodeList], + ): + """ + Start from inputs and going reverse topological order. If any upstream node + is in the fusion group, add all the nodes in this path to fusion group. + """ + for arg in inputs: + # Skip placeholder and get_attr because they won't be in the fusion group. + if arg.op not in CALLABLE_NODE_OPS: + continue + + # If the node has smaller idx, it's already an upstream node of the fusion + # group. We don't need to check it anymore. + if self.nodes.index(arg) < fusion_group.top_node_idx: + continue + + # If the node is in the fusion group, return True. + if arg in fusion_group.nodes: + return True + + # Check the upstream nodes of the node, if any of them is in the fusion group + # we'll add this node to fusion group and return True. + if self.recursive_add_node(fusion_group, arg.all_input_nodes): + fusion_group.add_node(arg) + return True + + return False + + def __call__(self) -> Dict[torch.fx.Node, NodeSet]: + result: Dict[torch.fx.Node, NodeSet] = {} + acc_nodes = list(self.acc_nodes) + + for node in acc_nodes: + if node in result: + continue + if node.op not in CALLABLE_NODE_OPS: + continue + if "tensor_meta" in node.meta: + continue + if node not in self.acc_nodes: + continue + + fusion_group: FxNetAccFusionsFinder.FusionGroup = self.FusionGroup( + top_node_idx=self.nodes.index(node), + nodes={node}, + inputs=set(node.all_input_nodes), + nodes_need_process={node}, + ) + while fusion_group.nodes_need_process: + node = fusion_group.nodes_need_process.pop() + self.recursive_add_node(fusion_group, fusion_group.inputs) + + # Optionally add downstream nodes + if "tensor_meta" not in node.meta: + for user in node.users: + if user.op not in CALLABLE_NODE_OPS: + continue + if user in fusion_group.nodes: + continue + + fusion_group.add_node(user) + self.recursive_add_node(fusion_group, fusion_group.inputs) + + # Add some upstream nodes + for arg in node.all_input_nodes: + if arg.op not in CALLABLE_NODE_OPS: + continue + if "tensor_meta" in arg.meta: + continue + if arg in fusion_group.nodes: + continue + + fusion_group.add_node(arg) + fusion_group.top_node_idx = min( + fusion_group.top_node_idx, self.nodes.index(arg) + ) + self.recursive_add_node(fusion_group, fusion_group.inputs) + + if not (set(fusion_group.nodes) <= self.acc_nodes): + self.acc_nodes -= fusion_group.nodes + else: + for n in fusion_group.nodes: + result[n] = fusion_group.nodes + + return result + + +@compatibility(is_backward_compatible=False) +def legalize_graph(gm: torch.fx.GraphModule) -> torch.fx.GraphModule: + """ + Replace the graph of the given GraphModule with one that contains the same nodes as the + original, but in topologically sorted order. + + This is used by the merge_matmul transformation below, which disturbs the topologically sorted + order of its input GraphModule, so that this order is restored before further transformation. + + Arguments: + gm: The graph module to topologically sort. It is modified in-place. + + Returns: + The graph module in-place sorted + """ + indeg = {node: 0 for node in gm.graph.nodes} + new_graph = torch.fx.Graph() + # Track how many unfulfilled dependencies each node has + for node in gm.graph.nodes: + for user in node.users: + indeg[user] += 1 + queue: collections.deque = collections.deque() + # Add all nodes with no dependencies to the queue + for node in gm.graph.nodes: + if indeg[node] == 0: + queue.append(node) + env: Dict[torch.fx.Node, torch.fx.Node] = {} + # Pop nodes from the queue, and add nodes that have had all their + # dependencies fulfilled + while len(queue) > 0: + cur = queue.popleft() + env[cur] = new_graph.node_copy(cur, lambda x: env[x]) + for user in cur.users: + indeg[user] -= 1 + if indeg[user] == 0: + queue.append(user) + # If the new graph's size is not as large as the old one, then there must be + # a cycle (i.e. some node's dependencies were not satisfied.) + if len(new_graph.nodes) < len(gm.graph.nodes): + raise RuntimeError(f"Input graph has cycles, unable to add {[node for node in indeg if indeg[node] != 0]}") + new_graph._codegen = gm.graph._codegen + gm.graph = new_graph + return gm diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/__init__.py b/llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2a7970ba4c283e851430ed0025e1ed5c772eb7b1 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/__init__.py @@ -0,0 +1 @@ +from .common import lift_subgraph_as_module, HolderModule, compare_graphs diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/__pycache__/__init__.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7888461ac691213fde37c5d899647fd8a47a6e52 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/__pycache__/common.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7dd623a4f3a8ad0ad8e571a8ec989d4389c6af5 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/__pycache__/common.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/__pycache__/fuser_utils.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/__pycache__/fuser_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57cb09e64d63d7a53e04e6f703c95a68cc9e6057 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/__pycache__/fuser_utils.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/__pycache__/matcher_utils.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/__pycache__/matcher_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..165b9afadd11baf1a27dc14f1dccb75d51097f0d Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/__pycache__/matcher_utils.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/__pycache__/source_matcher_utils.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/__pycache__/source_matcher_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..abea916ba4f99914fb6c11c02966060bf3b38f02 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/__pycache__/source_matcher_utils.cpython-310.pyc differ diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/common.py b/llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/common.py new file mode 100644 index 0000000000000000000000000000000000000000..22313a84bef1aa9adac60bffc126fadbf37fe900 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/common.py @@ -0,0 +1,83 @@ +from torch.nn import Module + +from torch.fx.graph_module import GraphModule +from torch.fx.graph import Graph +from torch.fx.passes.utils.matcher_utils import SubgraphMatcher +from torch.fx._compatibility import compatibility + + +__all__ = ['HolderModule', 'lift_subgraph_as_module', 'compare_graphs'] + +@compatibility(is_backward_compatible=False) +class HolderModule(Module): + """ + HolderModule is used to copy all the attributes from original module to submodules + that uses the attributes + """ + + def __init__(self, d): + super().__init__() + for k, v in d.items(): + self.add_module(k, v) + + +@compatibility(is_backward_compatible=False) +def lift_subgraph_as_module(gm: GraphModule, subgraph: Graph, class_name: str = 'GraphModule') -> GraphModule: + """ + Create a GraphModule for subgraph, which copies the necessary attributes from the original parent graph_module. + + Args: + gm (GraphModule): parent graph module + + subgraph (Graph): a valid subgraph that contains copied nodes from the parent graph + + class_name (str): name for the submodule + + """ + + # Loop through all module calls (call_module) and param fetches (get_attr) + # in this component, creating HolderModules as necessary to match the path. + # e.g. if in the original module there's a get_attr node fetches "conv.weight". + # We create a HolderModule as root -> add a HolderModule named "conv" -> + # make "weight" a attribute of "conv" HolderModule and point to conv.weight in + # the original module. + submodule = HolderModule({}) + for n in subgraph.nodes: + if n.op not in ("call_module", "get_attr"): + continue + + target = n.target + assert isinstance(target, str) + target_name_parts = target.split(".") + curr = submodule + orig_gm = gm + + for name in target_name_parts[:-1]: + if not hasattr(curr, name): + curr.add_module(name, HolderModule({})) + + curr = getattr(curr, name) + orig_gm = getattr(orig_gm, name) + + leaf_node_name = target_name_parts[-1] + leaf_node = getattr(orig_gm, leaf_node_name) + + # Relies on custom __setattr__ magic. + setattr(curr, leaf_node_name, leaf_node) + + return GraphModule(submodule, subgraph, class_name) + + +@compatibility(is_backward_compatible=False) +def compare_graphs(left: Graph, right: Graph) -> bool: + """ + Return True if two graphs are identical, i.e they + - have the same number of outputs in the same order + - have the same number of inputs in the same order + - have the same set of nodes, and identical connectivity + """ + + matcher = SubgraphMatcher(left, match_output=True, match_placeholder=True) + matches = matcher.match(right) + + return len(matches) > 0 diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/fuser_utils.py b/llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/fuser_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e777a55740fcb3c7e5608cd61111e1b01054d4a4 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/fuser_utils.py @@ -0,0 +1,233 @@ +import copy +from queue import SimpleQueue +from typing import List, Dict, Tuple + +import torch.fx +from torch.fx.graph_module import GraphModule +from torch.fx.graph import Graph +from torch.fx.node import Node +from torch.fx.passes.tools_common import NodeList, NodeSet, legalize_graph +from torch.fx.passes.utils import lift_subgraph_as_module +from torch.fx._compatibility import compatibility + +@compatibility(is_backward_compatible=False) +def topo_sort(nodes: NodeList) -> NodeList: + # sort nodes according to the topological order + indegree_map = {node : 0 for node in nodes} + candidates: SimpleQueue = SimpleQueue() + + for node in nodes: + for n in node.all_input_nodes: + if n in indegree_map: + indegree_map[node] += 1 + if indegree_map[node] == 0: + candidates.put(node) + + sorted_nodes: NodeList = list() + while not candidates.empty(): + node = candidates.get() + sorted_nodes.append(node) + + for n in node.users: + if n in indegree_map: + indegree_map[n] -= 1 + if indegree_map[n] == 0: + candidates.put(n) + + assert len(nodes) == len(sorted_nodes), "topological sorted nodes doesn't have same length as input nodes" + + return sorted_nodes + + +@compatibility(is_backward_compatible=False) +def validate_partition(partition: NodeList) -> bool: + # verify the partition does't form a dependency cycle in the original graph + # returns True for valid partition, False for invalid + + partition_set = set(partition) + + outputs: NodeList = list() + for node in partition_set: + for user_node in node.users: + if user_node not in partition_set: + # external user node, need to expose as an output + outputs.append(user_node) + + # Perform BFS on the partition outputs. + # If it reaches a node within the partition, then it found a cycle. + # This function takes the ownership of `root_nodes` and may modify it. + def bfs_find_cycle(root_nodes: NodeList) -> bool: + # Set used to exclude nodes that have already been visited. + # If a node has been visited, that node and all its children have + # been checked for cycles. + visited: NodeSet = set() + + # Start with `root_nodes` and traverse through (toward child nodes) + # their connected sub-graph. Nodes in `visited` won't be added + # to `queue` again. + queue: NodeList = root_nodes + while queue: + current = queue.pop() + visited.add(current) + if current in partition_set: + # Started from partition's `output` nodes, and reached + # another node in partition. Cycle! + return True + for user_node in current.users: + if user_node in visited: + continue + queue.append(user_node) + # `root_nodes` don't cause cycle. + return False + + # Use all output nodes as roots to traverse + # the graph to check cycles. + if bfs_find_cycle(outputs): + return False + + return True + + +@compatibility(is_backward_compatible=False) +def fuse_as_graphmodule(gm: GraphModule, + nodes: NodeList, + module_name: str) -> Tuple[GraphModule, Tuple[Node, ...], Tuple[Node, ...]]: + + """ + Fuse nodes in graph_module into a GraphModule. + + Args: + gm (GraphModule): target graph_module + + nodes (List[Node]): list of nodes in `gm` to fuse, where the node must be topologically sorted + + module_name: class name for the fused GraphModule + + Returns: + fused_gm (GraphModule): fused graph module, where its node is a copy of `nodes` in `gm` + + original_inputs (Tuple[Node, ...]): input nodes to `nodes` in original `gm` + + original_outputs (Tuple[Node, ...]): consumer nodes of `nodes` in original `gm` + + """ + + # assumption: nodes are already sorted in topo order + + for node in nodes: + assert node.graph.owning_module is gm, f"{node} doesn't belong to passed in graph module {gm._get_name()}" + assert not node._erased, f"{node} has been removed from owning graph" + assert node in gm.graph.nodes, f"{node} is not found in graph module {gm._get_name()}" + + # validates partition doesn't introduce dependency circles in the graph + assert validate_partition(nodes), "Invalid partition, found dependency cycles" + + subgraph = Graph() + + node_to_placeholder: Dict[Node, Node] = {} # mapping of nodes from old graph to placeholder in new graph + node_map: Dict[Node, Node] = {} # mapping of nodes from old graph to new graph + + # handles inputs through graph.node_copy's arg_transform functions + def remap_inputs(x): + if x.op == "get_attr": + # TODO: do we really need copy the get_attr node into the graph? + # do something here + pass + + if x in nodes: + # x is inside subgraph, return the copied node + # the node should have been copied aleady, as we are copying graph in the topological order + return node_map[x] + + if x not in node_to_placeholder: + # x is not in subgraph, create a new placeholder for subgraph + placeholder_node = subgraph.placeholder(x.name, type_expr=x.type) + # copy all meta fields, even if some fields might be irrelvant for the placeholder node + placeholder_node.meta = copy.copy(x.meta) + node_to_placeholder[x] = placeholder_node + + return node_to_placeholder[x] + + # copy nodes in topological order + for node in nodes: + new_node = subgraph.node_copy(node, remap_inputs) + node_map[node] = new_node + + # handles outputs + output_mapping: Dict[Node, Node] = {} # mapping from old output to new outputs + + for node in nodes: + for user_node in node.users: + if user_node not in nodes: + # external user node, need to expose as an output + output_mapping[node] = node_map[node] + + # outs contain nodes in the new subgraph + outs = tuple(output_mapping.values()) + + # Take care of the args of FX output node. If there's a single + # output then the output node args is like (output_single), else + # if there're multiple outputs then the output node args is like + # ((output_0, output_1, ...)). + subgraph.output(outs[0] if len(outs) == 1 else outs) + + # lint to ensure correctness + subgraph.lint() + + fused_gm: GraphModule = lift_subgraph_as_module(gm, subgraph, class_name=module_name) + + # sub_gm's input nodes in the original module + original_inputs: Tuple[Node, ...] = tuple(node_to_placeholder.keys()) + + # sub_gm's outputs node in the original module + original_outputs: Tuple[Node, ...] = tuple(output_mapping.keys()) + + return fused_gm, original_inputs, original_outputs + + +@compatibility(is_backward_compatible=False) +def insert_subgm(gm: GraphModule, sub_gm: GraphModule, orig_inputs: Tuple[Node, ...], orig_outputs: Tuple[Node, ...]): + # add sub_gm into gm + submodule_name = sub_gm.__class__.__name__ + gm.add_submodule(submodule_name, sub_gm) + + # Create a call_module node in main graph. + module_node = gm.graph.call_module( + submodule_name, + args=orig_inputs, + kwargs=None) + + if len(orig_outputs) == 1: + # main_remapping[comp.orig_outputs[0]] = module_node + orig_outputs[0].replace_all_uses_with(module_node, propagate_meta=True) + else: + for i, orig_output in enumerate(orig_outputs): + # Use Proxy to record getitem access. + proxy_out = torch.fx.Proxy(module_node)[i].node # type: ignore[index] + orig_output.replace_all_uses_with(proxy_out, propagate_meta=True) + return gm + +@compatibility(is_backward_compatible=False) +def erase_nodes(gm: GraphModule, nodes: NodeList): + + # erase original nodes in inversed topological order + for node in reversed(nodes): + gm.graph.erase_node(node) + + +@compatibility(is_backward_compatible=False) +def fuse_by_partitions(gm: GraphModule, partitions: List[NodeList]) -> GraphModule: + for partition_id, nodes in enumerate(partitions): + sorted_nodes = topo_sort(nodes) + + submodule_name = "fused_" + str(partition_id) + sub_gm, orig_inputs, orig_outputs = fuse_as_graphmodule(gm, sorted_nodes, submodule_name) + + insert_subgm(gm, sub_gm, orig_inputs, orig_outputs) + + erase_nodes(gm, sorted_nodes) + + # topological sort original gm with newly created sub_gm + legalize_graph(gm) + + return gm diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/matcher_utils.py b/llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/matcher_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6348df62f0606d4067ad609382e20ed14e250a10 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/matcher_utils.py @@ -0,0 +1,382 @@ +from dataclasses import dataclass, field +from collections import defaultdict +import copy +import torch +from torch.fx.graph import Graph +from torch.fx.node import Node +from torch.fx._compatibility import compatibility +from typing import Dict, List, Set, Any, Union, Tuple +import logging +import os + +__all__ = ['SubgraphMatcher', 'InternalMatch'] + +# Set`PYTORCH_MATCHER_LOGLEVEL=INFO` to see debug logs +def _init_logger(): + logger = logging.getLogger(__name__) + + level = os.environ.get('PYTORCH_MATCHER_LOGLEVEL', 'WARNING').upper() + logger.setLevel(level) + console = logging.StreamHandler() + formatter = logging.Formatter("%(filename)s > %(message)s") + console.setFormatter(formatter) + console.setLevel(level) + # add the handlers to the logger + logger.addHandler(console) + logger.propagate = False + return logger + +logger = _init_logger() + +@compatibility(is_backward_compatible=False) +@dataclass +class InternalMatch: + # Nodes from which the match was found + anchors: List[Node] + # Maps nodes in the pattern subgraph to nodes in the larger graph + nodes_map: Dict[Node, Node] = field(default_factory=dict) + + # nodes in target graph that are matched placeholder in pattern + placeholder_nodes: List[Node] = field(default_factory=list) + + # nodes in matched subgraph returned by output + returning_nodes: List[Node] = field(default_factory=list) + + def __copy__(self): + return InternalMatch(anchors=self.anchors, nodes_map=self.nodes_map.copy(), + placeholder_nodes=self.placeholder_nodes.copy(), + returning_nodes=self.returning_nodes.copy()) + +@compatibility(is_backward_compatible=False) +class SubgraphMatcher: + def __init__(self, pattern: Graph, + match_output: bool = False, + match_placeholder: bool = False, + remove_overlapping_matches: bool = True, + ignore_literals: bool = False) -> None: + """ + Args: + pattern: the targeted matching pattern, represented in fx.Graph. + match_output: If True, output node in the pattern graph will be treated as a part of the targeted pattern. + If False, output node is ignored during match. + match_placeholder: If True, placeholder node in the pattern graph will be treated as a part of + the targeted pattern. If False, placeholder nodes will be used a wildcard. + remove_overlapping_matches: If True, in the case of overlapping matches, only the first match + will be returned. + ignore_literals: If True, will not check if literals are equal and + will instead treat them as wildcards. + """ + + self.pattern = pattern + self.match_output = match_output + self.match_placeholder = match_placeholder + self.remove_overlapping_matches = remove_overlapping_matches + self.ignore_literals = ignore_literals + + if len(pattern.nodes) == 0: + raise ValueError("SubgraphMatcher cannot be initialized with an empty pattern") + + for node in pattern.nodes: + if node.op != "output": + assert len(node.users) > 0, \ + "SubgraphMatcher cannot be initialized with an pattern with dead code" + + # TODO: assert pattern is a connected graph + + self.pattern_placeholder_nodes = [n for n in pattern.nodes if n.op == "placeholder"] + output_node = next(iter(reversed(pattern.nodes))) + # nodes returned by outputs + self.pattern_returning_nodes: List[Node] = output_node.all_input_nodes + + self.pattern_anchors: List[Node] = [] + if match_output: + self.pattern_anchors = [output_node] + else: + # If a node has output_node as the ONLY user, then this node is a graph sink, + # and should be matched against as an anchor + self.pattern_anchors = [n for n in output_node.all_input_nodes if len(n.users) == 1] + + def _match_attributes(self, pn: Node, gn: Node) -> bool: + # Attributes matching is complicated. Right now we only support matching constant tensor + assert isinstance(pn.target, str), f"pn.target {pn.target} must be a string." + assert isinstance(gn.target, str), f"gn.target {gn.target} must be a string." + pn_value = getattr(pn.graph.owning_module, pn.target) + gn_value = getattr(gn.graph.owning_module, gn.target) + if type(pn_value) != type(gn_value): + return False + + # Don't require exact match on tensor values. + if isinstance(pn_value, torch.Tensor): + return isinstance(gn_value, torch.Tensor) + else: + raise RuntimeError(f"Unsupported type {pn_value} when matching attributes") + return False + + def _nodes_are_equal(self, pn: Node, gn: Node) -> bool: + # if exact match for placeholder is not required, then use placeholder as a wildcard + if not self.match_placeholder and pn.op == "placeholder": + return True + + if pn.op == gn.op: + if pn.op == "placeholder" or pn.op == "output": + return True + elif pn.op == "get_attr": + return self._match_attributes(pn, gn) + return pn.target == gn.target + return False + + def _is_contained(self, nodes_map: Dict[Node, Node]) -> bool: + # `lookup` represents all the nodes in `original_graph` + # that are part of `pattern` + + # Placeholders can be used by other nodes in the graphs + lookup: Dict[Node, Node] = {gn : pn for pn, gn in nodes_map.items() if pn.op != "placeholder"} + + for gn, pn in lookup.items(): + # nodes returned by output are allowed to be used in other areas of the graph + if pn in self.pattern_returning_nodes: + continue + + for user in gn.users: + # If this node has users that were not in `lookup`, then it must leak out of the + # pattern subgraph + if user not in lookup: + return False + return True + + def _remove_overlapping_matches(self, matches: List[InternalMatch]) -> List[InternalMatch]: + non_overlapping_matches: List[InternalMatch] = list() + nodes_matched: Set[Node] = set() + + for match in matches: + found_overlap = False + for pn, gn in match.nodes_map.items(): + if pn.op not in {"placeholder", "output"} and gn in nodes_matched: + found_overlap = True + break + + if not found_overlap: + non_overlapping_matches.append(match) + for pn, gn in match.nodes_map.items(): + if pn.op not in {"placeholder", "output"}: + nodes_matched.add(gn) + return non_overlapping_matches + + def _match_literals(self, pn: Any, gn: Any, match: InternalMatch) -> bool: + assert not (isinstance(pn, Node) and isinstance(gn, Node)), "pn and gn cannot both be Node" + + if isinstance(pn, Node) and not isinstance(gn, Node): + if pn.op == "placeholder": + # Check if we've already matched these nodes in the current + # traversal + if pn in match.nodes_map: + return match.nodes_map[pn] == gn + + match.nodes_map[pn] = gn + return True + else: + return False + elif not isinstance(pn, Node) and isinstance(gn, Node): + return False + else: + return type(gn) == type(pn) and gn == pn + + def _match_nodes(self, pn: Node, gn: Node, match: InternalMatch) -> bool: + logger.info(" matching %s to %s", pn, gn) + + assert isinstance(pn, Node) and isinstance(gn, Node), str(f"pn and gn must be Node, pn: {pn}, gn: {gn}") + + # Check if we've already matched these nodes in the current + # traversal + if pn in match.nodes_map: + return match.nodes_map[pn] == gn + + # TODO: use a more efficient way to check if gn is matched before: two-way dict + if gn in match.nodes_map.values(): + return False + + if not self._nodes_are_equal(pn, gn): + return False + + # Optimistically mark `pn` as a match for `gn`, and save a local copy of match + saved_match = copy.copy(match) + match.nodes_map[pn] = gn + + # Placeholder is a wildcard and can be matched with any python object + # (including list/tuple) + if pn.op == "placeholder": + return True + + # Recursively traverse upwards to check if `pn` is a true + # match for `gn` + match_found = True + + def _match_args(args1: Union[List, Tuple], args2: Union[List, Tuple]) -> bool: + if len(args1) != len(args2): + return False + + for a1, a2 in zip(args1, args2): + if isinstance(a1, Node) and isinstance(a2, Node): + matched = self._match_nodes(a1, a2, match) + elif isinstance(a1, (list, tuple)) and isinstance(a2, (list, tuple)): + matched = _match_args(a1, a2) + else: + matched = self._match_literals(a1, a2, match) or self.ignore_literals + + if not matched: + return False + + return True + + # Flatten all args/kwargs into 1 list of args + pn_args, gn_args = None, None + if ( + (len(pn.args) != len(gn.args) or list(pn.kwargs.keys()) != list(gn.kwargs.keys())) and + pn.op == "call_function" and + isinstance(pn.target, torch._ops.OpOverload) + ): + args_schema = pn.target._schema.arguments + + def get_all_arguments(orig_args, orig_kwargs): + all_args = [] + for i, schema in enumerate(args_schema): + if schema.name in orig_kwargs: + all_args.append(orig_kwargs[schema.name]) + elif not schema.kwarg_only and i < len(orig_args): + all_args.append(orig_args[i]) + else: + all_args.append(schema.default_value) + return all_args + + pn_args = get_all_arguments(pn.args, pn.kwargs) + gn_args = get_all_arguments(gn.args, gn.kwargs) + + elif len(pn.args) == len(gn.args) and list(pn.kwargs.keys()) == list(gn.kwargs.keys()): + pn_args = list(pn.args) + gn_args = list(gn.args) + pn_args.extend(list(pn.kwargs.values())) + gn_args.extend(list(gn.kwargs.values())) + else: + match_found = False + + match_found = ( + match_found and + pn_args is not None and + gn_args is not None and + _match_args(pn_args, gn_args) + ) + + if not match_found: + # revert to saved_match before matching with current node + match = copy.copy(saved_match) + return False + + return True + + def match(self, graph: Graph) -> List[InternalMatch]: + """ + Returns: + The matched subgraphs. + Thre returned subgraph would be fully self-contained, meaning the nodes (except placeholder + and nodes returned by output) can only be consumed by nodes within the matched subgraph. + + Subgraph pattern matcher is implemented with the backtracking style in the following steps: + + 1. We first identify all the anchor nodes in the pattern graph. The anchor nodes + are the "sinks" (nodes with no user other than the output node) of the pattern graph. + One pattern graph could have multiple anchors if it has multiple return values. + + 2. In the target graph, we identify the potential candidate nodes that can be matched + with each anchor. These anchor-candidate pairs are the starting points for + pairwise per-node matching. + + 3. For each anchor-candidate pair, we simultaneously traverse backwards (DFS) in both + pattern and target graphs. For every pattern nodes along traversal path, we compare it + against the target nodes. In case any comparison failed, the match for this anchor-candidate + pair fails. A match is found when DFS completes traversing the graph. See `self._match_nodes` + for more details. + + 4. In the case of multiple anchors, every anchor will need to find a match using step 3. + In addition, the matches found between anchors need to have a common intersection node + in order for the match to be valid. This is implemented with backtracking. See `backtracking` + for more details. + + Notice: graph traversal must be done in the reverser order because a tensor can have multiple + consumers, but can only have a single producer. Only with reverser order, we can we jointly + traverse the pattern and target graph in a deterministic path. + + Warning: In theory, this backtracking algorithm have an **exponential** time complexity. However, + in practice, it's unlikely to blow up. + + """ + from torch.fx.passes.utils.fuser_utils import validate_partition + + # find candidate nodes to match with pattern anchors + match_candidates: Dict[Node, List[Node]] = defaultdict(list) + for pattern_anchor in self.pattern_anchors: + for node in graph.nodes: + if self._nodes_are_equal(pattern_anchor, node): + match_candidates[pattern_anchor].append(node) + match_candidates_list = list(match_candidates.items()) + + logger.info("Initial match_candidates_list: %s\n", match_candidates_list) + + matches: List[InternalMatch] = [] + + def backtracking(anchor_index, match): + if anchor_index == len(match_candidates_list): + match.placeholder_nodes = [match.nodes_map[pn] for pn in self.pattern_placeholder_nodes] + match.returning_nodes = [match.nodes_map[pn] for pn in self.pattern_returning_nodes] + matches.append(match) + + logger.info("Found a match: %s\n", match) + return + + pattern_anchor, candidate_nodes = match_candidates_list[anchor_index] + saved_match = copy.copy(match) + + for node in candidate_nodes: + logger.info("Trying to match anchor %s to %s", pattern_anchor, node) + + match_found = self._match_nodes(pattern_anchor, node, match) + if match_found: + # match next anchor + backtracking(anchor_index + 1, match) + else: + logger.info("Failed to match anchor %s to %s\n", pattern_anchor, node) + + # revert to saved_match before matching with current anchor + match = copy.copy(saved_match) + + match = InternalMatch(anchors=self.pattern_anchors) + if match_candidates_list: + backtracking(0, match) + + # filter out the matches where the subgraph is not fully_contained + before = len(matches) + matches = [match for match in matches if self._is_contained(match.nodes_map)] + after = len(matches) + if before != after: + logger.info("Filtered out %s matches because they are not fully contained", before - after) + + # filter out the matches that form a cycle if the subgraph is fused + valid_matches = [] + for match in matches: + matched_compute_nodes = \ + [gn for pn, gn in match.nodes_map.items() if pn.op not in {"placeholder", "output"}] + if validate_partition(matched_compute_nodes): + valid_matches.append(match) + if len(valid_matches) != len(matches): + logger.info("Filtered out %s matches because \ + matched subgraph would form a cycle if fused", len(matches) - len(valid_matches)) + + if self.remove_overlapping_matches: + before = len(valid_matches) + matches = self._remove_overlapping_matches(valid_matches) + after = len(matches) + if before != after: + logger.info("Filtered out %s matches because matched subgraphs are overlapping", before - after) + + logger.info("Matches returned: %s", matches) + + return matches diff --git a/llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/source_matcher_utils.py b/llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/source_matcher_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d5060589af5d22563610a9d790c1ec492a8f9c15 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/fx/passes/utils/source_matcher_utils.py @@ -0,0 +1,143 @@ +from dataclasses import dataclass, field +from torch.fx.graph import Graph +from torch.fx.node import Node +from torch.fx._compatibility import compatibility +from typing import Dict, List, Any, Type, Optional, Callable +import logging +import os + + +__all__ = ['get_source_partitions', 'check_subgraphs_connected', 'SourcePartition'] + +# Set`PYTORCH_MATCHER_LOGLEVEL=INFO` to see debug logs +def _init_logger(): + logger = logging.getLogger(__name__) + + level = os.environ.get('PYTORCH_MATCHER_LOGLEVEL', 'WARNING').upper() + logger.setLevel(level) + console = logging.StreamHandler() + formatter = logging.Formatter("%(filename)s > %(message)s") + console.setFormatter(formatter) + console.setLevel(level) + # add the handlers to the logger + logger.addHandler(console) + logger.propagate = False + return logger + +logger = _init_logger() + + +@compatibility(is_backward_compatible=False) +@dataclass +class SourcePartition: + # Nodes in a particular partition + nodes: List[Node] + + # The source these nodes decomposed from + source: Any + + # Nodes in the graph that are needed as inputs to the partition + input_nodes: List[Node] = field(default_factory=list) + + # Nodes in the partition that are being used by nodes outside of the + # partition + output_nodes: List[Node] = field(default_factory=list) + + # Parameters that are being used + params: List[Node] = field(default_factory=list) + + +@compatibility(is_backward_compatible=False) +def get_source_partitions( + graph: Graph, + wanted_sources: List[Any], + filter_fn: Optional[Callable[[Node], bool]] = None, +) -> Dict[Any, List[SourcePartition]]: + """ + Args: + graph: The graph we want to partition + wanted_sources: List of sources of nodes that were decomposed from this + source. This can be a function (ex. torch.nn.functional.linear) or a + leaf module type (ex. torch.nn.Linear). + + Returns: + Dictionary mapping sources that were given to a list of SourcePartitions + that correspond to the list of nodes that were decomposed from the given + source. + """ + modules: Dict[Type, Dict[str, List[Node]]] = {} + + for node in graph.nodes: + # The metadata source_fn should contain a tuple of a unique name for the + # source, and the source function if the node is decomposed from a + # function, or the type of module if the node is decomposed from a leaf + # module + + if (source_fn := node.meta.get("source_fn", None)) is None: + continue + + if source_fn[1] not in wanted_sources: + continue + + diff_modules = modules.setdefault(source_fn[1], {}) + partition = diff_modules.setdefault(source_fn[0], []) + partition.append(node) + + def make_partition(nodes: List[Node], module_type: Type) -> SourcePartition: + input_nodes = set() + output_nodes = set() + params = set() + for node in nodes: + for arg in node.args: + if isinstance(arg, Node) and arg not in nodes: + input_nodes.add(arg) + + if node.op == "get_attr": + params.add(node) + + for user in node.users.keys(): + if user not in nodes: + output_nodes.add(node) + + return SourcePartition( + nodes, + module_type, + list(input_nodes), + list(output_nodes), + list(params), # type: ignore[arg-type] + ) + + ret: Dict[Type[Any], List[SourcePartition]] = {} + + if filter_fn: + # for each partition, we apply filter_fn to filter out all partitions that doesn't satisfy the + # filter condition + filtered_modules = {} + for tp, name_to_partition in modules.items(): + filtered_name_to_partition = { + name: partition + for name, partition in name_to_partition.items() + if all(map(filter_fn, partition)) + } + filtered_modules[tp] = filtered_name_to_partition + modules = filtered_modules + + for k, v in modules.items(): + ret[k] = [make_partition(partition, k) for partition in v.values()] + + return ret + + +@compatibility(is_backward_compatible=False) +def check_subgraphs_connected(subgraph1: SourcePartition, subgraph2: SourcePartition) -> bool: + """ + Given two subgraphs A and B (in the form of a list of nodes), checks if + A has nodes connecting to at least one node in B -- aka there exists a node + in B that uses a node in A (not the other way around). + """ + + for node in reversed(subgraph1.nodes): + for user in node.users.keys(): + if user in subgraph2.nodes: + return True + return False diff --git a/llava_next/lib/python3.10/site-packages/torch/lib/libshm.so b/llava_next/lib/python3.10/site-packages/torch/lib/libshm.so new file mode 100644 index 0000000000000000000000000000000000000000..7511103dce417e9896727a12850564fb40f54056 Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/torch/lib/libshm.so differ diff --git a/llava_next/lib/python3.10/site-packages/torch/lib/libtorch.so b/llava_next/lib/python3.10/site-packages/torch/lib/libtorch.so new file mode 100644 index 0000000000000000000000000000000000000000..251aa0c9119b782f1397abdee637b369e2746a8b --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/lib/libtorch.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ada04eead56bb30e69863d4ffaad087095e0d9f0210ab81f3054310eba9547ca +size 196113 diff --git a/llava_next/lib/python3.10/site-packages/torch/sparse/_semi_structured_conversions.py b/llava_next/lib/python3.10/site-packages/torch/sparse/_semi_structured_conversions.py new file mode 100644 index 0000000000000000000000000000000000000000..2f07a03add3e7b1a90d7b4bb6595f30f4a8d385e --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/sparse/_semi_structured_conversions.py @@ -0,0 +1,337 @@ +import torch + + +def _sparse_semi_structured_from_dense(dense): + if dense.dim() != 2: + raise RuntimeError( + f"Expected 2-dimensional dense tensor, got {dense.dim()}-dimensional tensor" + ) + + m, k = dense.shape + device = dense.device + + meta_dtype = torch.int8 + if dense.dtype == torch.int8: + meta_dtype = torch.int32 + elif dense.dtype in [torch.half, torch.bfloat16]: + meta_dtype = torch.int16 + else: + raise RuntimeError(f"Invalid datatype {dense.dtype} of dense matrix") + quadbits_per_meta_elem = meta_dtype.itemsize * 8 // 4 + if quadbits_per_meta_elem not in (4, 8): + raise RuntimeError("Invalid number of elements per meta element calculated") + + if m % 32 != 0: + raise RuntimeError( + f"Number rows columns of dense matrix {m} must be divisible by 32" + ) + if k % (4 * quadbits_per_meta_elem) != 0: + raise RuntimeError( + f"Number of columns of dense matrix {k} must be divisible by {4 * quadbits_per_meta_elem}" + ) + meta_ncols = k // (4 * quadbits_per_meta_elem) + + dense_4 = dense.view(-1, k // 4, 4) + m0, m1, m2, m3 = (dense_4 != 0).unbind(-1) + + # Encoding quadruples of True/False values as follows: + # [True, True, False, False] -> 0b0100 + # [True, False, True, False] -> 0b1000 + # [False, True, True, False] -> 0b1001 + # [True, False, False, True ] -> 0b1100 + # [False, True, False, True ] -> 0b1101 + # [False, False, True, True ] -> 0b1110 + # Thus, lower two bits in the encoding are index of the True value + # at the lowest index in the quadruple, and the higher two bits in + # the encoding are index of the other True value in the quadruple. + # In case there are less than two True values, than False value or + # values at some index or indices are considered True for the + # encoding. In case there are more than two True values, then the + # excess True value(s) at some indices are considered False for + # the encoding. The exact encodings used for these cases are as + # follows: + # [False, False, False, False] -> 0b1110 + # [False, False, False, True ] -> 0b1110 + # [False, False, True, False] -> 0b1110 + # [False, True, False, False] -> 0b1101 + # [False, True, True, True ] -> 0b1001 + # [True, False, False, False] -> 0b1100 + # [True, False, True, True ] -> 0b1000 + # [True, True, False, True ] -> 0b0100 + # [True, True, True, False] -> 0b1000 + # [True, True, True, True ] -> 0b1000 + # These particular encodings are chosen, with the help of Espresso + # logic minimizer software, for the purpose of minimization of + # corresponding Boolean functions, that translate non-zero flags + # into encoding bits. + + bit0 = ~m0 & m1 + bit1 = ~m0 & ~m1 + bit2 = bit1 | ~m2 + bit3 = bit0 | ~m1 | m2 + idxs0 = bit0 | (bit1.to(torch.int64) << 1) + idxs1 = bit2 | (bit3.to(torch.int64) << 1) + + sparse0 = dense_4.gather(-1, idxs0.unsqueeze(-1)) + sparse1 = dense_4.gather(-1, idxs1.unsqueeze(-1)) + sparse = torch.stack((sparse0, sparse1), dim=-1).view(m, k // 2) + + meta_4 = idxs0 | (idxs1 << 2) + meta_n = meta_4.view((-1, meta_ncols, quadbits_per_meta_elem)).to(meta_dtype) + + if quadbits_per_meta_elem == 4: + meta = ( + meta_n[:, :, 0] + | (meta_n[:, :, 1] << 4) + | (meta_n[:, :, 2] << 8) + | (meta_n[:, :, 3] << 12) + ) + elif quadbits_per_meta_elem == 8: + meta = ( + meta_n[:, :, 0] + | (meta_n[:, :, 1] << 4) + | (meta_n[:, :, 2] << 8) + | (meta_n[:, :, 3] << 12) + | (meta_n[:, :, 4] << 16) + | (meta_n[:, :, 5] << 20) + | (meta_n[:, :, 6] << 24) + | (meta_n[:, :, 7] << 28) + ) + + # Metadata values are now to be reshuffled in a way given in + # reorder_meta() function, in + # tools/util/include/cutlass/util/host_reorder.h file of CUTLASS + # source tree. Furthermore, CUTLASS template for sparse GEMM + # decides upon layout of this matrix, and at the moment for the + # sparse GEMM executed on tensor cores, this is layout described + # by ColumnMajorInterleaved<2> data structure, in + # include/cutlass/layout/matrix.h of CUTLASS source tree. The + # reordering of meta matrix into meta_reordered matrix calculated + # according to these segments of CUTLASS code is given below. + # However, this calculation produces offsets for scatter access + # from metadata matrix to redordered metadata matrix, and gather + # pattern is more efficient. For this reason, the scatter offsets + # are reverted and printed, through enabling commented block at + # the end of following code. Resulting gather offsets are then + # analyzed, on several (m, k) value pairs (in particular: (32, + # 128), (32, 256), (64, 128) and (64, 256)), and the code that + # follows this comment is written to reproduce these gather offsets. + # + # dst_rows = torch.arange(0, m, device=device)[:, None].repeat(1, meta_ncols) + # dst_cols = torch.arange(0, meta_ncols, device=device).repeat(m, 1) + # + # # Reorder the rows, then swizzle the 2x2 blocks. + # group = 32 if meta_dtype.itemsize == 2 else 16 + # interweave = 4 if meta_dtype.itemsize == 2 else 2 + # dst_rows = ( + # dst_rows // group * group + # + (dst_rows % 8) * interweave + # + (dst_rows % group) // 8 + # ) + # + # topright = ((dst_rows % 2 == 0) & (dst_cols % 2 == 1)).to(torch.int8) + # bottomleft = ((dst_rows % 2 == 1) & (dst_cols % 2 == 0)).to(torch.int8) + # dst_rows += topright - bottomleft + # dst_cols -= topright - bottomleft + # + # # Assumed that meta tensor is to be stored in CUTLASS + # # InterleavedColumnMajor layout, and reverse engineered + # # corresponding code to store values into this tensor. + # interleave = 2 + # cols_maj = dst_cols // interleave + # cols_min = dst_cols % interleave + # meta_reordered_offsets = ( + # cols_maj * m * interleave + dst_rows * interleave + cols_min + # ) + # + # meta_reordered = torch.empty((m, meta_ncols), dtype=meta_dtype, device=device) + # meta_reordered.view(-1)[meta_reordered_offsets.view(-1)] = meta.view(-1) + # + # # Uncomment to have gather pattern for meta_reordered printed + # # + # #offsets = torch.empty( + # # (m, meta_ncols), dtype=meta_reordered_offsets.dtype, device=device + # #) + # #offsets.view(-1)[meta_reordered_offsets.view(-1)] = torch.arange( + # # 0, m * meta_ncols, dtype=meta_reordered_offsets.dtype, device=device + # #) + # #torch.set_printoptions(threshold=1000000) + # #print("------------------------------------------------------------") + # #print("dtype =", dtype, ", m =", m, ", k =", k, ", meta_ncols =", meta_ncols) + # #print(offsets.view(-1)) + # + + # No point to try to understand this code: as mentioned in the + # comment above it is written to reproduce gather offsets, as + # these would be calculated by CUTLASS, and to be efficient, but + # it contains several magic values and magic calculations that + # make it rather hard to read, let alone understand. + if meta_dtype == torch.int32: + magic0 = 4 + magic1 = 32 + magic2 = 16 + magic3 = k // 2 + magic4 = [0, k // 4, 1, k // 4 + 1] + elif meta_dtype == torch.int16: + magic0 = 8 + magic1 = 64 + magic2 = 32 + magic3 = 2 * k + magic4 = [0, k // 2, 1, k // 2 + 1, k, 3 * k // 2, k + 1, 3 * k // 2 + 1] + tmp0 = torch.zeros(m * meta_ncols, dtype=torch.int64, device=device) + tmp1 = ( + tmp0.view(meta_ncols // 2, -1) + + torch.arange(0, meta_ncols, 2, device=device).view(meta_ncols // 2, 1) + ).view(-1, magic1) + tmp2 = ( + ( + torch.arange(0, 8, device=device).view(-1, 1) + * torch.ones((magic0,), dtype=torch.int64, device=device) + * meta_ncols + ) + .view(-1) + .repeat(m * meta_ncols // magic1) + .view(-1, magic1) + ) + tmp3 = (torch.arange(0, m // magic2, device=device).view(-1, 1) * magic3).repeat( + meta_ncols // 2, magic1 + ) + tmp4 = torch.tensor(magic4, device=device).repeat(tmp3.shape[0], 8) + meta_offsets = tmp1 + tmp2 + tmp3 + tmp4 + + meta_reordered = torch.gather(meta.view(-1), 0, meta_offsets.view(-1)).view( + m, meta_ncols + ) + return (sparse, meta_reordered) + + +def _sparse_semi_structured_to_dense(sparse, meta_reordered): + if sparse.dim() != 2: + raise RuntimeError( + f"Expected 2-dimensional sparse tensor, got {sparse.dim()}-dimensional tensor" + ) + + m, k = sparse.shape + device = sparse.device + + if meta_reordered.dim() != 2: + raise RuntimeError( + f"Expected 2-dimensional meta tensor, got {meta_reordered.dim()}-dimensional tensor" + ) + if meta_reordered.device != device: + raise RuntimeError( + f"Expected meta matrix to be on {device} device, got matrix on {meta_reordered.device} device" + ) + + meta_dtype = meta_reordered.dtype + if meta_dtype not in (torch.int16, torch.int32): + raise RuntimeError(f"Invalid datatype {meta_dtype} of meta matrix") + quadbits_per_meta_elem = meta_dtype.itemsize * 8 // 4 + + meta_nrows, meta_ncols = meta_reordered.shape + if meta_nrows != m: + raise RuntimeError( + f"Number of rows of meta matrix {meta_nrows} must be equal to number of columns of spase matrix {m}" + ) + if meta_ncols * 4 * quadbits_per_meta_elem != 2 * k: + raise RuntimeError( + f"Number of columns of sparse matrix {k} different from the {meta_ncols * 4 * quadbits_per_meta_elem // 2}, " + "expected according to the number of columns of meta matrix" + ) + + if meta_dtype == torch.int32: + magic0 = 4 + magic1 = [0, 1, 32, 33] + elif meta_dtype == torch.int16: + magic0 = 8 + magic1 = [0, 1, 4, 5] + tmp1 = torch.tensor([0, 2], dtype=torch.int64, device=device).repeat( + meta_nrows, meta_ncols // 2 + ) + tmp2 = ( + (torch.arange(0, meta_ncols // 2, device=device) * 2 * meta_nrows) + .view(-1, 1) + .repeat(1, 2) + .view(-1) + .repeat(m, 1) + ) + tmp3 = ( + (torch.arange(0, 8, device=device) * magic0) + .view(-1, 1) + .repeat(m // 8, meta_ncols) + ) + tmp4 = ( + torch.tensor(magic1, device=device) + .view(-1, 1) + .repeat(1, 8 * meta_ncols) + .repeat(meta_nrows // 32, 1) + .view(meta_nrows, meta_ncols) + ) + tmp5 = ( + (torch.arange(0, meta_nrows // 32, device=device) * 64) + .view(-1, 1) + .repeat(1, 32 * meta_ncols) + .view(meta_nrows, meta_ncols) + ) + meta_offsets = tmp1 + tmp2 + tmp3 + tmp4 + tmp5 + + meta = torch.gather(meta_reordered.view(-1), 0, meta_offsets.view(-1)).view( + m, meta_ncols + ) + + meta_2 = torch.empty( + (m, meta_ncols, 2 * quadbits_per_meta_elem), dtype=meta_dtype, device=device + ) + if quadbits_per_meta_elem == 4: + meta_2[:, :, 0] = meta & 0b11 + meta_2[:, :, 1] = (meta >> 2) & 0b11 + meta_2[:, :, 2] = (meta >> 4) & 0b11 + meta_2[:, :, 3] = (meta >> 6) & 0b11 + meta_2[:, :, 4] = (meta >> 8) & 0b11 + meta_2[:, :, 5] = (meta >> 10) & 0b11 + meta_2[:, :, 6] = (meta >> 12) & 0b11 + meta_2[:, :, 7] = (meta >> 14) & 0b11 + elif quadbits_per_meta_elem == 8: + meta_2[:, :, 0] = meta & 0b11 + meta_2[:, :, 1] = (meta >> 2) & 0b11 + meta_2[:, :, 2] = (meta >> 4) & 0b11 + meta_2[:, :, 3] = (meta >> 6) & 0b11 + meta_2[:, :, 4] = (meta >> 8) & 0b11 + meta_2[:, :, 5] = (meta >> 10) & 0b11 + meta_2[:, :, 6] = (meta >> 12) & 0b11 + meta_2[:, :, 7] = (meta >> 14) & 0b11 + meta_2[:, :, 8] = (meta >> 16) & 0b11 + meta_2[:, :, 9] = (meta >> 18) & 0b11 + meta_2[:, :, 10] = (meta >> 20) & 0b11 + meta_2[:, :, 11] = (meta >> 22) & 0b11 + meta_2[:, :, 12] = (meta >> 24) & 0b11 + meta_2[:, :, 13] = (meta >> 26) & 0b11 + meta_2[:, :, 14] = (meta >> 28) & 0b11 + meta_2[:, :, 15] = (meta >> 30) & 0b11 + + dense_offsets = meta_2.view(-1) + ( + torch.arange(0, m * k // 2, device=device) * 4 + ).view(-1, 1).repeat(1, 2).view(-1) + + dense = torch.zeros((m * 2 * k,), dtype=sparse.dtype, device=device) + dense.scatter_(0, dense_offsets, sparse.view(-1)) + + return dense.view(m, 2 * k) + + +def sparse_semi_structured_from_dense(dense): + from torch._dynamo.utils import is_compile_supported + if is_compile_supported(dense.device.type): + kernel = torch.compile(_sparse_semi_structured_from_dense) + return kernel(dense) + + return _sparse_semi_structured_from_dense(dense) + + +def sparse_semi_structured_to_dense(sparse, meta_reordered): + from torch._dynamo.utils import is_compile_supported + if is_compile_supported(sparse.device.type): + kernel = torch.compile(_sparse_semi_structured_to_dense) + return kernel(sparse, meta_reordered) + + return _sparse_semi_structured_to_dense(sparse, meta_reordered) diff --git a/llava_next/lib/python3.10/site-packages/torch/sparse/_triton_ops.py b/llava_next/lib/python3.10/site-packages/torch/sparse/_triton_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..57c9ac0168afd2ce4c9c6062df33df1e031c1361 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/sparse/_triton_ops.py @@ -0,0 +1,902 @@ +import math + +import torch +from torch._inductor.cuda_properties import get_device_capability + + +def _has_triton(): + if not torch.cuda.is_available(): + return False + try: + import triton + + return triton is not None and get_device_capability() >= (7, 0) + except ImportError: + return False + + +def check(cond, msg): + if not cond: + raise ValueError(msg) + + +def check_bsr_layout(f_name, t): + check( + t.layout == torch.sparse_bsr, + f"{f_name}(): only BSR sparse format is supported for the sparse argument.", + ) + + +def check_device(f_name, t, device): + check( + t.device == device and t.device.type == "cuda", + f"{f_name}(): all inputs are expected to be on the same GPU device.", + ) + + +def check_mm_compatible_shapes(f_name, lhs, rhs): + check( + lhs.dim() >= 2 and rhs.dim() >= 2, + f"{f_name}(): all inputs involved in the matrix product are expected to be at least 2D, " + f"but got lhs.dim() == {lhs.dim()} and rhs.dim() == {rhs.dim()}." + ) + + m, kl = lhs.shape[-2:] + kr, n = rhs.shape[-2:] + + check( + kl == kr, + f"{f_name}(): arguments' sizes involved in the matrix product are not compatible for matrix multiplication, " + f"got lhs.shape[-1] == {kl} which is not equal to rhs.shape[-2] == {kr}.", + ) + + +def check_dtype(f_name, t, dtype, *additional_dtypes): + check( + t.dtype == dtype + and t.dtype in ((torch.half, torch.bfloat16, torch.float) + tuple(*additional_dtypes)), + f"{f_name}(): all inputs are expected to be of the same dtype " + f"and one of (half, bfloat16, float32) or {additional_dtypes}, " + f"but got dtype == {t.dtype}.", + ) + + +def check_blocksize(f_name, blocksize): + assert len(blocksize) == 2 + + def is_power_of_two(v): + return not (v & (v - 1)) + + def is_compatible_blocksize(b): + res = True + for blocksize in b: + # Triton loads only blocks which are at least 16 and powers of 2. + res = (blocksize >= 16 and is_power_of_two(blocksize)) and res + return res + + check( + is_compatible_blocksize(blocksize), + f"{f_name}(): sparse inputs' blocksize ({blocksize[0]}, {blocksize[1]}) " + "should be at least 16 and a power of 2 in each dimension.", + ) + + +def make_triton_contiguous(t): + if t.stride(-2) > 1 and t.stride(-1) > 1: + return t.contiguous() + else: + return t + + +def broadcast_batch_dims(f_name, *tensors): + try: + return torch.broadcast_shapes(*(t.shape[:-2] for t in tensors)) + except Exception: + check(False, f"{f_name}(): inputs' batch dimensions are not broadcastable!") + + +def slicer(dim, slice_range, *tensors): + for t in tensors: + slices = [slice(None)] * t.dim() + slices[dim] = slice_range + yield t[slices] + + +def multidim_slicer(dims, slices, *tensors): + for t in tensors: + s = [slice(None)] * t.dim() + for d, d_slice in zip(dims, slices): + if d is not None: + s[d] = d_slice + yield t[s] + + +def ptr_stride_extractor(*tensors): + for t in tensors: + yield t + yield from t.stride() + + +def grid_partitioner(full_grid, grid_blocks, tensor_dims_map): + assert 0 <= len(full_grid) <= 3 + assert 0 <= len(grid_blocks) <= 3 + + import itertools + + def generate_grid_points(): + for fg, mg in zip(full_grid, grid_blocks): + yield range(0, fg, mg) + + def generate_sliced_tensors(slices): + for t, t_dims in tensor_dims_map.items(): + yield next(multidim_slicer(t_dims, slices, t)) + + for grid_point in itertools.product(*generate_grid_points()): + grid = [min(fg - gp, mg) for fg, gp, mg in zip(full_grid, grid_point, grid_blocks)] + slices = [slice(gp, gp + g) for gp, g in zip(grid_point, grid)] + # grid_points are iterated in a "contiguous" order, i.e. + # left dimensions traversed slower than right dimensions. + # This order is reversed for CUDA grids. + yield grid[::-1], *generate_sliced_tensors(slices) + + +def launch_kernel(kernel, tensor_dims_map, full_grid, grid_blocks=None): + # cuda_max_grid = (2 ** 31 - 1, 2 ** 16 - 1, 2 ** 16 - 1) + cuda_max_grid = (2147483647, 65535, 65535)[::-1] + if grid_blocks is None: + grid_blocks = cuda_max_grid + else: + + def valid_grid_dim(g, mg): + if g is None: + return mg + else: + # grid must be at least 1 and no greater than mg + return max(1, min(g, mg)) + + grid_blocks = tuple( + valid_grid_dim(g, mg) for g, mg in zip(grid_blocks, cuda_max_grid) + ) # type: ignore[assignment] + + for grid, *sliced_tensors in grid_partitioner(full_grid, grid_blocks, tensor_dims_map): + kernel(grid, *sliced_tensors) + + +def prepare_inputs(bsr, *dense_tensors): + # Introduce fake batch dimension if not present for convenience. + crow_indices = bsr.crow_indices().unsqueeze(0) + col_indices = bsr.col_indices().unsqueeze(0) + values = make_triton_contiguous(bsr.values().unsqueeze(0)) + tensors = [make_triton_contiguous(t.unsqueeze(0)) for t in dense_tensors] + + # Compute broadcasted batch dimension + batch_dims_broadcasted = torch.broadcast_shapes(values.shape[:-3], *(t.shape[:-2] for t in tensors)) + + # Broadcast batch dimensions and squash + def batch_broadcast_and_squash(t, batch_dims, invariant_dims): + return t.broadcast_to(batch_dims + invariant_dims).flatten( + 0, len(batch_dims) - 1 + ) + + crow_indices = batch_broadcast_and_squash( + crow_indices, batch_dims_broadcasted, (-1,) + ) + + col_indices = batch_broadcast_and_squash( + col_indices, batch_dims_broadcasted, (-1,) + ) + values = batch_broadcast_and_squash( + values, batch_dims_broadcasted, values.shape[-3:] + ) + tensors = [ + batch_broadcast_and_squash(t, batch_dims_broadcasted, t.shape[-2:]) for t in tensors + ] + + return crow_indices, col_indices, values, *tensors + + +def broadcast_batch_dims_bsr(f_name, bsr, *tensors): + batch_shape = broadcast_batch_dims(f_name, bsr, *tensors) + + crow_indices = bsr.crow_indices().broadcast_to(batch_shape + (-1,)) + col_indices = bsr.col_indices().broadcast_to(batch_shape + (-1,)) + values = bsr.values().broadcast_to(batch_shape + bsr.values().shape[-3:]) + size = batch_shape + bsr.shape[-2:] + return torch.sparse_compressed_tensor(crow_indices, col_indices, values, size=size, layout=bsr.layout) + + +# NOTE: this function will ALWAYS create a view +def tile_to_blocksize(t, blocksize): + *rest, m, n = t.shape + new_shape = rest + [ + m // blocksize[0], + blocksize[0], + n // blocksize[1], + blocksize[1], + ] + return t.reshape(new_shape).transpose(-3, -2) + + +if _has_triton(): + import triton + import triton.language as tl + from typing import Optional, Tuple + + @triton.jit + def _sampled_addmm_kernel( + alpha, + beta, + IS_BETA_ZERO: tl.constexpr, + BLOCKSIZE_ROW: tl.constexpr, + BLOCKSIZE_COL: tl.constexpr, + k, + TILE_K: tl.constexpr, + values_ptr, + values_batch_stride, + values_nnz_stride, + values_row_block_stride, + values_col_block_stride, + crow_indices_ptr, + crow_indices_batch_stride, + crow_indices_stride, + col_indices_ptr, + col_indices_batch_stride, + col_indices_stride, + mat1_ptr, + mat1_batch_stride, + mat1_tiled_row_stride, + mat1_tiled_col_stride, + mat1_row_block_stride, + mat1_col_block_stride, + mat2_ptr, + mat2_batch_stride, + mat2_tiled_row_stride, + mat2_tiled_col_stride, + mat2_row_block_stride, + mat2_col_block_stride, + acc_dtype: tl.constexpr, + allow_tf32: tl.constexpr, + ): + batch_pid = tl.program_id(axis=1) + row_block_pid = tl.program_id(axis=0) + + crow_indices_offset_ptr = ( + crow_indices_ptr + + crow_indices_batch_stride * batch_pid + + crow_indices_stride * row_block_pid + ) + nnz_offset = tl.load(crow_indices_offset_ptr) + nnz_offset_next = tl.load(crow_indices_offset_ptr + crow_indices_stride) + + # Compute nnz for the row with number row_block_pid. + # If it is zero, skip the row. + row_nnz = nnz_offset_next - nnz_offset + if row_nnz == 0: + return + + row_block_arange = tl.arange(0, BLOCKSIZE_ROW) + col_block_arange = tl.arange(0, BLOCKSIZE_COL) + + # Pointers are set to the first block of the current row. + values_block_ptrs = ( + values_ptr + + values_batch_stride * batch_pid + + values_nnz_stride * nnz_offset + + values_row_block_stride * row_block_arange[:, None] + + values_col_block_stride * col_block_arange[None, :] + ) + + col_index_nnz_ptr = ( + col_indices_ptr + + col_indices_batch_stride * batch_pid + + col_indices_stride * nnz_offset + ) + + # Advance mat1 to the current tiled row, ignore columns. + mat1_block_ptrs = ( + mat1_ptr + + mat1_batch_stride * batch_pid + + mat1_tiled_row_stride * row_block_pid + + mat1_row_block_stride * row_block_arange[:, None] + ) + + # Advance mat2 in batch and block col dimension. + mat2_block_ptrs = ( + mat2_ptr + + mat2_batch_stride * batch_pid + + mat2_col_block_stride * col_block_arange[None, :] + ) + + k_tile_arange = tl.arange(0, TILE_K) + for _ in range(row_nnz): + acc_block = tl.zeros((BLOCKSIZE_ROW, BLOCKSIZE_COL), dtype=acc_dtype) + + # find column block index + col_block = tl.load(col_index_nnz_ptr) + + for k_tile in range(0, k, TILE_K): + k_offsets = k_tile + k_tile_arange + mask_k = k_offsets < k + + mat1_block = tl.load( + mat1_block_ptrs + + mat1_col_block_stride * k_offsets[None, :], + mask=mask_k[None, :], other=0.0 + ) + + mat2_block = tl.load( + mat2_block_ptrs + + mat2_tiled_col_stride * col_block + + mat2_row_block_stride * k_offsets[:, None], + mask=mask_k[:, None], other=0.0 + ) + + acc_block += tl.dot(mat1_block, mat2_block, allow_tf32=allow_tf32) + + if IS_BETA_ZERO: + acc_block *= alpha + else: + acc_block = alpha * acc_block + beta * tl.load(values_block_ptrs) + + # write result + tl.store(values_block_ptrs, acc_block.to(values_ptr.dtype.element_ty)) + + # advance val/col_index ptrs to the next block in the row. + values_block_ptrs += values_nnz_stride + col_index_nnz_ptr += col_indices_stride + + @triton.jit + def _bsr_strided_dense_rowspace_kernel( + BLOCKSIZE_ROW: tl.constexpr, + BLOCKSIZE_COL: tl.constexpr, + # values prologue + values_ptr, + values_batch_stride, + values_nnz_stride, + values_row_block_stride, + values_col_block_stride, + # values epilogue + # crow_indices prologue + crow_indices_ptr, + crow_indices_batch_stride, + crow_indices_stride, + # crow_indices epilogue + # col_indices prologue + col_indices_ptr, + col_indices_batch_stride, + col_indices_stride, + # col_indices epilogue + # dense prologue + dense_ptr, + dense_batch_stride, + dense_tiled_row_stride, + dense_tiled_col_stride, + dense_row_block_stride, + dense_col_block_stride, + # dense epilogue + # output prologue + output_ptr, + output_batch_stride, + output_tiled_row_stride, + output_tiled_col_stride, + output_row_block_stride, + output_col_block_stride, + # output epilogue + acc_dtype: tl.constexpr, + allow_tf32: tl.constexpr, + GROUP_SIZE_ROW: tl.constexpr, + ): + batch_pid = tl.program_id(axis=2) + row_block_pid = tl.program_id(axis=0) + col_block_pid = tl.program_id(axis=1) + n_block_rows = tl.num_programs(axis=0) + n_block_cols = tl.num_programs(axis=1) + + row_block_pid, col_block_pid = tl.swizzle2d( + row_block_pid, col_block_pid, n_block_rows, n_block_cols, GROUP_SIZE_ROW + ) + + crow_indices_offset_ptr = ( + crow_indices_ptr + + crow_indices_batch_stride * batch_pid + + crow_indices_stride * row_block_pid + ) + nnz_offset = tl.load(crow_indices_offset_ptr) + nnz_offset_next = tl.load(crow_indices_offset_ptr + crow_indices_stride) + + # Compute nnz for the row with number row_block_pid. + # If it is zero, skip the row. + row_nnz = nnz_offset_next - nnz_offset + if row_nnz == 0: + return + + row_block_arange = tl.arange(0, BLOCKSIZE_ROW) + col_block_arange = tl.arange(0, BLOCKSIZE_COL) + + # Pointers are set to the first block of the current row. + values_block_ptrs = ( + values_ptr + + values_batch_stride * batch_pid + + values_nnz_stride * nnz_offset + + values_row_block_stride * row_block_arange[:, None] + + values_col_block_stride * col_block_arange[None, :] + ) + + # NOTE: dense is advanced into all dimensions but the tiled row one. + # That will be advanced in the loop according to values in col_indices. + dense_block_ptrs = ( + dense_ptr + + dense_batch_stride * batch_pid + + dense_tiled_col_stride * col_block_pid + + dense_row_block_stride * col_block_arange[:, None] + + dense_col_block_stride * row_block_arange[None, :] + ) + + # Pointers are set to exact write-to locations + output_ptrs = ( + output_ptr + + output_batch_stride * batch_pid + + output_tiled_row_stride * row_block_pid + + output_tiled_col_stride * col_block_pid + + output_row_block_stride * row_block_arange[:, None] + + output_col_block_stride * row_block_arange[None, :] + ) + + # Set pointer to the first nonzero element in the current row + col_index_nnz_ptr = ( + col_indices_ptr + + col_indices_batch_stride * batch_pid + + col_indices_stride * nnz_offset + ) + + output_acc_block = tl.zeros((BLOCKSIZE_ROW, BLOCKSIZE_ROW), dtype=acc_dtype) + for _ in range(row_nnz): + values_block = tl.load(values_block_ptrs) + + # find which row of dense needs to get loaded + # for multiplication with values_block. + dense_row_idx = tl.load(col_index_nnz_ptr) + dense_block = tl.load(dense_block_ptrs + dense_tiled_row_stride * dense_row_idx) + + # do block mm + output_acc_block += tl.dot(values_block, dense_block, allow_tf32=allow_tf32) + + # move val/col_index ptrs to the next block in the row + values_block_ptrs += values_nnz_stride + col_index_nnz_ptr += col_indices_stride + + # write back the result + tl.store(output_ptrs, output_acc_block.to(output_ptr.dtype.element_ty)) + + + def _run_dense_rowspace_kernel( + blocksize, values, crow_indices, col_indices, dense, output, max_grid + ): + n_batches = dense.size(0) + n_block_rows = crow_indices.size(-1) - 1 + n_block_cols = dense.size(-3) + + full_grid = (n_batches, n_block_cols, n_block_rows) + if max_grid is not None: + grid_blocks = tuple(max_grid[:3][::-1]) + (None,) * (3 - len(max_grid[:3])) + else: + grid_blocks = None + tensor_dims_map = { + values: (0, None, None), + crow_indices: (0, None, -1), + col_indices: (0, None, None), + dense: (0, -3, None), + output: (0, -3, -4) + } + if values.dtype in (torch.half, torch.bfloat16): + acc_dtype = tl.float32 + allow_tf32 = True + else: + acc_dtype = tl.float64 + allow_tf32 = False + + def kernel(grid, *sliced_tensors): + _bsr_strided_dense_rowspace_kernel[grid]( + *blocksize, + *ptr_stride_extractor(*sliced_tensors), + acc_dtype=acc_dtype, + allow_tf32=allow_tf32, + GROUP_SIZE_ROW=4, + num_stages=1, + num_warps=4 + ) + + launch_kernel(kernel, tensor_dims_map, full_grid, grid_blocks) + + + def _run_sampled_addmm_kernel( + alpha, beta, is_beta_zero, + blocksize, k, tile_k, + values, crow_indices, col_indices, + mat1, mat2, + max_grid + ): + n_batches = values.size(0) + n_block_rows = crow_indices.size(-1) - 1 + + full_grid = (n_batches, n_block_rows) + if max_grid is not None: + grid_blocks = tuple(max_grid[:2][::-1]) + (None,) * (2 - len(max_grid[:2])) + else: + grid_blocks = None + tensor_dims_map = { + values: (0, None), + crow_indices: (0, -1), + col_indices: (0, None), + mat1: (0, -4), + mat2: (0, None), + } + if values.dtype in (torch.half, torch.bfloat16): + acc_dtype = tl.float32 + allow_tf32 = True + else: + acc_dtype = tl.float64 + allow_tf32 = False + + def kernel(grid, *sliced_tensors): + _sampled_addmm_kernel[grid]( + alpha, beta, is_beta_zero, + *blocksize, k, tile_k, + *ptr_stride_extractor(*sliced_tensors), + acc_dtype=acc_dtype, + allow_tf32=allow_tf32, + num_stages=1, + num_warps=4 + ) + + launch_kernel(kernel, tensor_dims_map, full_grid, grid_blocks) + + + def sampled_addmm( + input: torch.Tensor, + mat1: torch.Tensor, + mat2: torch.Tensor, + *, + beta=1.0, + alpha=1.0, + out: Optional[torch.Tensor] = None, + skip_checks: bool = False, + max_grid: Optional[Tuple[Optional[int], Optional[int], Optional[int]]] = None, + ): + f_name = "sampled_addmm" + + check_bsr_layout(f_name, input) + input_broadcasted = broadcast_batch_dims_bsr(f_name, input, mat1, mat2) + + if not skip_checks: + check_device(f_name, mat1, input.device) + check_device(f_name, mat2, input.device) + if beta != 0.0 and input.dtype is torch.bool: + check( + False, + f"{f_name}(): having beta == {beta} not equal to 0.0 with boolean mask is not allowed." + ) + if input.dtype is not torch.bool: + check_dtype(f_name, mat1, input.dtype) + check_dtype(f_name, mat2, input.dtype) + else: + check_dtype(f_name, mat1, mat2.dtype) + check_mm_compatible_shapes(f_name, mat1, mat2) + if out is not None: + check_bsr_layout(f_name, out) + check_device(f_name, out, mat1.device) + check_dtype(f_name, out, input.dtype) + check( + out.shape == input_broadcasted.shape + and out._nnz() == input._nnz(), + f"{f_name}(): Expects `out` to be of shape {input_broadcasted.shape} " + f"and with nnz equal to {input_broadcasted._nnz()} " + f"but got out.shape = {out.shape} and out.nnz = {out._nnz()}" + ) + + if out is None: + out = input_broadcasted.to(mat1.dtype, copy=True) + else: + out.copy_(input_broadcasted) + + if out.numel() == 0 or out._nnz() == 0: + return out + + blocksize = out.values().shape[-2:] + m = mat1.size(-2) + n = mat2.size(-1) + k = mat1.size(-1) + + # NOTE: (m, 0) @ (0, n) == zeros(m, n) + if alpha == 0.0 or k == 0: + out.values().mul_(beta) + return out + + # prepare inputs by reshaping them to be kernel-compatible + out_backup = out + crow_indices, col_indices, values, mat1, mat2 = prepare_inputs(out, mat1, mat2) + + mat1 = tile_to_blocksize(mat1, (blocksize[0], k)) + mat2 = tile_to_blocksize(mat2, (k, blocksize[1])) + tile_k = max(*blocksize) + + _run_sampled_addmm_kernel( + alpha, beta, beta == 0.0, + blocksize, k, tile_k, + values, crow_indices, col_indices, + mat1, mat2, + max_grid + ) + + # If nnz x block strides are not the same in out_backup.values and values, + # it means that out_backup.values and values are not the views of each other, + # so we have to copy. + if out_backup.values().stride()[-3:] != values.stride()[-3:]: + out_backup.values().copy_(values.reshape(out_backup.values().shape)) + return out_backup + + + def bsr_dense_mm( + bsr: torch.Tensor, + dense: torch.Tensor, + *, + out: Optional[torch.Tensor] = None, + skip_checks: bool = False, + max_grid: Optional[Tuple[Optional[int], Optional[int], Optional[int]]] = None, + ): + f_name = "bsr_dense_mm" + if not skip_checks: + check_bsr_layout(f_name, bsr) + check_device(f_name, bsr, dense.device) + check_dtype(f_name, bsr, dense.dtype) + check_mm_compatible_shapes(f_name, bsr, dense) + + m = bsr.size(-2) + n = dense.size(-1) + row_block, col_block = bsr.values().shape[-2:] + check( + not n % row_block, + f"bsr_dense_mm(): dense.size(-1) == {n} should be divisible by " + f"blocksize[0] == {row_block}.", + ) + check_blocksize(f_name, (row_block, col_block)) + else: + m, kl = bsr.shape[-2:] + kr, n = dense.shape[-2:] + + original_batch_dims_broadcasted = broadcast_batch_dims(f_name, bsr, dense) + + if out is not None and not skip_checks: + expected_out_shape = original_batch_dims_broadcasted + (m, n) + check( + out.shape == expected_out_shape, + "bsr_dense_mm(): `out` argument has wrong shape, " + f"expected {expected_out_shape}, but got {out.shape}.", + ) + check( + out.is_contiguous() or out.transpose(-2, -1).is_contiguous(), + "bsr_dense_mm(): only row-major/col-major `out` arguments are supported, " + "i.e. (out.is_contiguous() or out.transpose(-2, -1).is_contiguous()) " + "should be True.", + ) + + # Allocate out + if out is None: + out = dense.new_empty(original_batch_dims_broadcasted + (m, n)) + + # Short circuit if lhs is zero + if bsr._nnz() == 0: + return out.zero_() + + blocksize = bsr.values().shape[-2:] + + # NOTE: out is contiguous, so prepare_inputs will create a view. + # out gets modified in-place, so we store a backup copy. + out_backup = out + + # prepare inputs by reshaping them to be kernel-compatible. + crow_indices, col_indices, values, dense, out = prepare_inputs(bsr, dense, out) + + # "Blockify" the row dimension of dense with blocksize[1] + # since dense is on the rhs of matmul + dense = tile_to_blocksize(dense, blocksize[::-1]) + # "Blockify" the row dimension of out with blocksize[0] + # which is inherited from the bsr input. + # NOTE: tile_to_blocksize will create a view. + # NOTE: out.blocksize[-1] == dense.blocksize[-1], + # so it could be any value in [1, dense.shape[-1]). + # We need to probably use the largest possible blocksize + # so that it fits into SRAM. + out = tile_to_blocksize(out, (blocksize[0], blocksize[0])) + + # Launch kernel + _run_dense_rowspace_kernel(blocksize, values, crow_indices, col_indices, dense, out, max_grid) + + return out_backup + + + @triton.jit + def _bsr_softmax_kernel( + crow_indices_ptr, + crow_indices_batch_stride, + crow_indices_stride, + values_ptr, + values_batch_stride, + values_row_block_stride, + values_nnz_col_block_stride, + row_block, col_block, + MAX_ROW_NNZ: tl.constexpr, + TILE: tl.constexpr + ): + batch_pid = tl.program_id(axis=2) + row_block_offset_pid = tl.program_id(axis=1) + row_block_pid = tl.program_id(axis=0) + + crow_indices_offset_ptr = ( + crow_indices_ptr + + crow_indices_batch_stride * batch_pid + + crow_indices_stride * row_block_pid + ) + nnz_offset = tl.load(crow_indices_offset_ptr) + nnz_offset_next = tl.load(crow_indices_offset_ptr + crow_indices_stride) + + # Compute nnz for the row with number row_block_pid. + # If it is zero, skip the row. + row_nnz = nnz_offset_next - nnz_offset + if row_nnz == 0: + return + + row_arange = tl.arange(0, TILE) + mask = row_arange < row_nnz * col_block + + curr_row_values_ptrs = ( + values_ptr + + values_batch_stride * batch_pid + + values_row_block_stride * row_block_offset_pid + + nnz_offset * col_block + ) + + # find max in the row + row_tile = tl.load(curr_row_values_ptrs + row_arange, mask=mask, other=-float('inf')).to(tl.float32) + max_row_value = tl.max(row_tile, axis=0) + for _ in range(TILE, MAX_ROW_NNZ, TILE): + row_arange += TILE + mask = row_arange < row_nnz * col_block + row_tile = tl.load(curr_row_values_ptrs + row_arange, mask=mask, other=-float('inf')).to(tl.float32) + curr_max_row_value = tl.max(row_tile, axis=0) + max_row_value = tl.where(max_row_value > curr_max_row_value, max_row_value, curr_max_row_value) + + # find denominator for stable softmax + num = tl.exp(row_tile - max_row_value) + denom = tl.sum(num, axis=0) + for _ in range(TILE, MAX_ROW_NNZ, TILE): + row_arange -= TILE + mask = row_arange < row_nnz * col_block + row_tile = tl.load(curr_row_values_ptrs + row_arange, mask=mask, other=-float('inf')).to(tl.float32) + num = tl.exp(row_tile - max_row_value) + denom += tl.sum(num, axis=0) + + # populate output + tl.store(curr_row_values_ptrs + row_arange, (num / denom).to(values_ptr.dtype.element_ty), mask=mask) + for _ in range(TILE, MAX_ROW_NNZ, TILE): + row_arange += TILE + mask = row_arange < row_nnz * col_block + row_tile = tl.load(curr_row_values_ptrs + row_arange, mask=mask, other=-float('inf')).to(tl.float32) + num = tl.exp(row_tile - max_row_value) + tl.store(curr_row_values_ptrs + row_arange, (num / denom).to(values_ptr.dtype.element_ty), mask=mask) + + + def bsr_softmax(input, max_row_nnz=None): + f_name = "bsr_softmax" + + check_bsr_layout(f_name, input) + check_dtype(f_name, input, input.dtype) + + if input._nnz() == 0 or input.numel() == 0: + return input.clone() + + m, n = input.shape[-2:] + nnz = input._nnz() + row_block, col_block = input.values().shape[-2:] + + if max_row_nnz is None: + max_row_nnz = triton.next_power_of_2(n) + else: + max_row_nnz = triton.next_power_of_2(max_row_nnz) + + crow_indices = input.crow_indices().unsqueeze(0).flatten(0, -2) + # reshape values from + # (b1, ..., bn, nnz, row_block, col_block) to + # (b1 * ... * bn, row_block, nnz * col_block). + # This simplifies batch dim manipulation and unlocks + # the possibility to access all nnzs in any given row. + if input.values().transpose(-3, -2).is_contiguous(): + # Need to clone to avoid `contiguous` returning a view. + values = input.values().clone() + else: + values = input.values() + values = values.transpose(-3, -2).contiguous().unsqueeze(0).flatten(0, -4).reshape(-1, row_block, nnz * col_block) + full_grid = (values.shape[0], row_block, m // row_block) + grid_blocks = None + tensor_dims_map = { + # We span nnz number of blocks, not nnz + 1, + # hence crow_indices[..., :-1] + crow_indices[..., :-1]: (0, None, -1), + values: (0, None, None), + } + + def kernel(grid, *sliced_tensors): + _bsr_softmax_kernel[grid]( + *ptr_stride_extractor(*sliced_tensors), + row_block, col_block, + max_row_nnz, + # Triton's max numel is bounded by 2 ** 17. + min(2 ** 17, max_row_nnz) + ) + + launch_kernel(kernel, tensor_dims_map, full_grid, grid_blocks) + + values = values.reshape(-1, row_block, nnz, col_block).transpose(-3, -2).reshape(*input.values().shape) + + return torch.sparse_compressed_tensor( + input.crow_indices().clone(), + input.col_indices().clone(), + values, + size=input.shape, + layout=input.layout + ) + + def _scaled_dot_product_attention( + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + attn_mask: Optional[torch.Tensor], + dropout_p: float = 0.0, + is_causal: bool = False, + scale: Optional[float] = None + ): + f_name = "_scaled_dot_product_attention" + check( + not is_causal, + f"{f_name}(): is_causal == True is not supported." + ) + check( + attn_mask is not None, + f"{f_name}(): attn_mask == None is not supported." + ) + assert attn_mask is not None + + check( + attn_mask.layout == torch.sparse_bsr, + f"{f_name}(): " + f"attn_mask.layout must be {torch.sparse_bsr}, but got " + f"attn_mask.layout == {attn_mask.layout}." + ) + + check_device(f_name, key, query.device) + check_device(f_name, value, query.device) + check_device(f_name, attn_mask, query.device) + + check_dtype(f_name, key, query.dtype) + check_dtype(f_name, value, query.dtype) + if attn_mask.dtype is not torch.bool: + check_dtype(f_name, attn_mask, query.dtype) + + sdpa = sampled_addmm(attn_mask, query, key.transpose(-2, -1), beta=0.0, skip_checks=False) + if scale is None and query.size(-1) == 0 or scale == 0.0: + check( + False, + f"{f_name}(): current value of scale == {scale} " + "results in division by zero." + ) + scale_factor = 1 / math.sqrt(query.size(-1)) if scale is None else scale + sdpa.values().mul_(scale_factor) + sdpa = bsr_softmax(sdpa) + torch.nn.functional.dropout(sdpa.values(), p=dropout_p, inplace=True) + sdpa = bsr_dense_mm(sdpa, value) + return sdpa +else: + bsr_softmax = None # type: ignore[assignment] + bsr_dense_mm = None # type: ignore[assignment] + sampled_addmm = None # type: ignore[assignment] + _scaled_dot_product_attention = None # type: ignore[assignment] diff --git a/llava_next/lib/python3.10/site-packages/torch/sparse/semi_structured.py b/llava_next/lib/python3.10/site-packages/torch/sparse/semi_structured.py new file mode 100644 index 0000000000000000000000000000000000000000..dbca4f1e8243cce13b2d7b8376a7f3b4572d7438 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/sparse/semi_structured.py @@ -0,0 +1,434 @@ +import warnings +from collections import namedtuple +from typing import Any, Optional + +import torch + +__all__ = [ + "SparseSemiStructuredTensor", + "to_sparse_semi_structured", +] + +_SEMI_STRUCTURED_SPARSE_CONFIG = namedtuple( + "_SEMI_STRUCTURED_SPARSE_CONFIG", "compression_factor min_rows min_cols" +) +_DTYPE_TO_SEMI_STRUCTURED_SPARSE_CONFIG = { + torch.int8: _SEMI_STRUCTURED_SPARSE_CONFIG(10, 32, 128), + torch.float16: _SEMI_STRUCTURED_SPARSE_CONFIG(9, 32, 64), + torch.bfloat16: _SEMI_STRUCTURED_SPARSE_CONFIG(9, 32, 64), + # TODO enable float32 support when adding cuSPARSELt as a backend + # torch.float32: _SEMI_STRUCTURED_SPARSE_CONFIG(9, 32, 32) +} + + +class SparseSemiStructuredTensor(torch.Tensor): + """This class implementes semi-structured sparsity as a Tensor subclass. + + Semi-structured sparsity describes a sparsity pattern where n in every 2n elements are sparse, + depending on the datatype. It is also referred to as 2:4 sparsity or fine-grained + structured sparsity. + + Currently, this class supports 2:4 sparsity for int8, float16 and bfloat16 dtypes. + We also support 1:2 sparsity for float32 dtype. + + This subclass stores the dense tensor in a compressed form by only storing the specified elements and corresponding metadata. + These two are stored next to each other in one contiguous tensor. + + We choose to store the specified elements and the metadata in a single tensor for compatibilty with cuSPARSELt, + which expects the data to be stored in this format. + + compressed tensor = [ specified elements of original tensor | metadata ] + + For an original tensor of size (m, k) we expect the first m * k // 2 elements to be the kept elements + The rest of the tensor is metadata. + + The subclass supports two backend, either CUTLASS or cuSPASRELt. + + When _FORCE_CUTLASS is set, or when cuSPARSELt is not available, this subclass calls into _sparse_semi_structured_linear + and sparse_semi_structured_from_dense for conversion to the compressed format. + + When PyTorch is compiled with cuSPARSELt support, this subclass will call into _cslt_sparse_mm for sparse mm and + _cslt_compress to convert into the compressed format. + """ + + _FUSE_TRANSPOSE = False + _FORCE_CUTLASS = False + _WARNING_SHOWN = False + + @staticmethod + def __new__( + cls, + original_tensor: Optional[torch.Tensor], + original_shape: Optional[torch.Size] = None, + compressed_tensor: Optional[torch.Tensor] = None, + transposed: bool = False, + ): + """ + Create a new instance of the class. + + When original_tensor is passed in, we compress it and store the compresed representation. + We can also create new instance of the class from the compressed representation without the original tensor. + + Args: + original_tensor: The original dense tensor, or None, if we have already compressed the tensor. + original_shape: The shape of the original dense tensor + compressed_tensor: A flattened tensor to store the specified elements and metadata. + transposed: Whether the tensor is transposed or not. + + Returns: + torch.Tensor: A torch.Tensor wrapper subclass. + + Raises: + ValueError: If both original_tensor and compressed_tensor are None. + + """ + if not cls._WARNING_SHOWN: + warnings.warn( + ( + "The PyTorch API of SparseSemiStructuredTensor is in prototype stage " + "and will change in the near future. Please open a Github issue " + "for features requests and see our documentation on the torch.sparse " + "module for further information about the project." + ), + UserWarning, + ) + cls._WARNING_SHOWN = True + + if original_tensor is not None: + previous_tensor = original_tensor + original_shape = original_tensor.shape + elif compressed_tensor is not None: + previous_tensor = compressed_tensor + else: + raise ValueError("Both compressed_tensor and original_tensor are None!") + + kwargs = {} + kwargs["device"] = previous_tensor.device # type: ignore[assignment] + kwargs["dtype"] = previous_tensor.dtype # type: ignore[assignment] + kwargs["layout"] = previous_tensor.layout # type: ignore[assignment] + kwargs["requires_grad"] = False # type: ignore[assignment] + + return torch.Tensor._make_wrapper_subclass(cls, original_shape, **kwargs) # type: ignore[attr-defined] + + @staticmethod + def __get_indices_dtype(values_dtype): + if values_dtype == torch.int8: + return torch.int32 + elif values_dtype in (torch.float16, torch.bfloat16): + return torch.int16 + else: + raise RuntimeError(f"Datatype {values_dtype} is not supported!") + return None + + def __init__( + self, + original_tensor: Optional[torch.Tensor], + original_shape: Optional[torch.Size] = None, + compressed_tensor: Optional[torch.Tensor] = None, + transposed: bool = False, + ) -> None: + """SparseSemiStructuredTensor constructor. + + Args: + original_tensor: The original dense tensor, or None, if we have already compressed the tensor. + original_shape: The shape of the original dense tensor + compressed_tensor: A flattened tensor to store the specified elements and metadata. + transposed: Whether the tensor is transposed or not. + + Returns: + None + + Raises: + RuntimeError: If original_tensor is not a supported dtype, dim, shape, or device. + """ + # if original tensor is passed in, we need to compress it and store the compressed representation. + if original_tensor is not None: + # TODO right now we have unified checks and constraints for cuSPARSELt and CUTLASS, these are not actually the same. + # We should consolidate similar checks here and leave backend specific checks like shape in the op implementation. + + # check device + if not original_tensor.is_cuda: + raise RuntimeError( + f"Error original_tensor.device= {original_tensor.device} is not supported! " + "Only CUDA tensors are currently supported." + ) + + # check dim + if original_tensor.dim() != 2: + raise RuntimeError( + f"Error original_tensor.dim = {original_tensor.dim()} is not supported! " + "Only 2d tensors are currently supported." + ) + + # check dtype + if original_tensor.dtype not in _DTYPE_TO_SEMI_STRUCTURED_SPARSE_CONFIG: + raise RuntimeError( + f"Error original_tensor.dtype {original_tensor.dtype} is not a supported dtype! " + "dtype must be one of: {_DTYPE_TO_SEMI_STRUCTURED_SPARSE_CONFIG}" + ) + + # check shape + m, n = original_tensor.shape + min_rows = _DTYPE_TO_SEMI_STRUCTURED_SPARSE_CONFIG[ + original_tensor.dtype + ].min_rows + min_cols = _DTYPE_TO_SEMI_STRUCTURED_SPARSE_CONFIG[ + original_tensor.dtype + ].min_cols + if m < min_rows or m % min_rows or n < min_cols or n % min_cols: + # TODO in the future we can add in padding to support dimensions that aren't perfect multiples + raise RuntimeError( + f"Error original_tensor.shape {original_tensor.shape} is not supported! " + "Both dimensions must be larger or equal than and a multiple of ({min_rows}, {min_cols})" + ) + + if self._FORCE_CUTLASS: + # This code calculates the size of the compressed tensor. + # compression factor is different based on dtype it's given by the formula below for 2:4 sparsity: + # compression_factor = 1/2 + 1/bitwidth(dtype) + original_size = original_tensor.nelement() + compression_factor = _DTYPE_TO_SEMI_STRUCTURED_SPARSE_CONFIG[ + original_tensor.dtype + ].compression_factor + compressed_size = original_size * compression_factor // 16 + + compressed_tensor = torch.empty( + (compressed_size,), + dtype=original_tensor.dtype, + device=original_tensor.device, + ) + + from torch.sparse._semi_structured_conversions import ( + sparse_semi_structured_from_dense, + ) + + sparse, meta = sparse_semi_structured_from_dense(original_tensor) + compressed_tensor[: m * n // 2] = sparse.view(-1) + compressed_tensor[m * n // 2 :] = meta.view(original_tensor.dtype).view( + -1 + ) + else: + # use cuSPARSELt + compressed_tensor = torch._cslt_compress(original_tensor) + + # set values + self.original_tensor = None + self.compressed_tensor = compressed_tensor + self.transposed = transposed + + def __repr__(self) -> str: # type: ignore[override] + """Return string representation of SparseSemiStructuredTensor + + Returns: + str: String representation + + Raises: + None + """ + return ( + f"SparseSemiStructuredTensor(shape={self.shape}, " + f"transposed={self.transposed}" + f"values={self.values()}" + f"metadata={self.indices()})" + ) + + __torch_function__ = torch._C._disabled_torch_function_impl + + @classmethod + def __torch_dispatch__(cls, func, types, args, kwargs) -> Any: + """Overload __torch_dispatch__ to use torch._sparse_semi_structured_linear. + + `torch.structured_sparse_linear` uses accelerated sparse CUTLASS kernels. + In the future we plan to also add in support for cuSPARSELt kernels. + + Args: + func: The function being dispatched. + types: The types of the arguments. + args: The arguments passed to the function. + kwargs: The keyword arguments passed to the function. + + Returns: + Any: The result of the dispatched operation. + + Raises: + NotImplementedError: If the dispatched operation is not implemented. + """ + # Since this code runs below autograd, a detach corresponds to only returning a new object + if func is torch.ops.aten.detach.default: + return SparseSemiStructuredTensor( + args[0].original_tensor, + original_shape=args[0].shape, + compressed_tensor=args[0].compressed_tensor, + transposed=args[0].transposed, + ) + + # Because we cannot go from the compressed representation back to the dense representation currently, + # we just keep track of how many times we have been transposed. Depending on whether the sparse matrix + # is the first or second argument, we expect an even / odd number of calls to transpose respectively. + if func is torch.ops.aten.t.default: + return SparseSemiStructuredTensor( + args[0].original_tensor, + original_shape=args[0].shape, + compressed_tensor=args[0].compressed_tensor, + transposed=not args[0].transposed, + ) + + # handle addmm + if func is torch.ops.aten.addmm.default: + bias, input_A, input_B = args + + # Currently, we only support the first matrix being sparse for addmm/mm in cuSPARSELT and CUTLASS. + # CUTLASS only supports the first input to be sparse for a given matmul. + # cuSPARSELt does not have this limitation, although our implementation is only for sparse first. + + # We support second matrix sparse matmul by taking advantage of some transpose properties: + # This is also why we want an odd number of transposed for second matrix sparse vs an even number + # of transpose calss for first matrix sparse. + # F.linear(x) = addmm(bias, input, weight.t()) = b + xW' = (b + xW')'' + # = (W''x' + b')' = (Wx' + b')' = addmm(bias.T, weight, input).T + if isinstance(input_B, cls) and input_B.transposed: + if cls._FORCE_CUTLASS: + return torch._sparse_semi_structured_linear( + input_A, input_B.values(), input_B.indices(), bias=bias + ) + else: + return torch._cslt_sparse_mm( + input_B.compressed_tensor, input_A.T, bias # type: ignore[arg-type] + ).t() + + # handle mm + if func is torch.ops.aten.mm.default: + input_A, input_B = args + + if isinstance(input_A, cls) and not input_A.transposed: + if cls._FORCE_CUTLASS: + return torch._sparse_semi_structured_linear( + input_B.t(), input_A.values(), input_A.indices() + ).t() + else: + return torch._cslt_sparse_mm( + input_A.compressed_tensor, input_B, None # type: ignore[arg-type] + ) + + elif isinstance(input_B, cls) and input_B.transposed: + if cls._FORCE_CUTLASS: + return torch._sparse_semi_structured_linear( + input_A, input_B.values(), input_B.indices() + ) + else: + return torch._cslt_sparse_mm(input_B.compressed_tensor, input_A.T, None).t() # type: ignore[arg-type] + + # When torch is run with inference mode, pytorch does not decompose torch.ops.aten.linear into a .t() and addmm(), + # so we must match the aten.linear op. In this case, we need to explicitly handle collapsing to 2d matmul + # TODO see if there's a way to force pytorch to decompose the op so we don't have to handle this here. + if func is torch.ops.aten.linear.default: + input_tensor, weight, bias = args + shape = input_tensor.shape + if isinstance(weight, cls): + if cls._FORCE_CUTLASS: + return torch._sparse_semi_structured_linear( + input_tensor, + weight.values(), + weight.indices(), + bias=bias + ) + else: + return torch._cslt_sparse_mm( + weight.compressed_tensor, # type: ignore[arg-type] + input_tensor.view(-1, shape[-1]).t(), + bias + ).t().view(*shape[:-1], -1) + + # handle values + if func is torch.ops.aten.values.default: + m, k = args[0].shape + num_kept_elements = m * k // 2 + return args[0].compressed_tensor[:num_kept_elements].view(m, k // 2) + + # handle indices + if func is torch.ops.aten.indices.default: + m, k = args[0].shape + num_kept_elements = m * k // 2 + metadata = args[0].compressed_tensor[num_kept_elements:].view(m, -1) + + # the metadata is expected to be in different datatypes for fp16/int8 respectively for CUTLASS. + indices_dtype = SparseSemiStructuredTensor.__get_indices_dtype( + args[0].dtype + ) + return metadata.view(indices_dtype) + + error_string = "\n".join( + [f"func {func} with args: "] + + [f"arg{i}: {arg}" for i, arg in enumerate(args)] + ) + raise NotImplementedError(error_string) + + def to_dense(self): + if self.compressed_tensor is None: + raise RuntimeError("Compressed tensor is not set, cannot convert to dense!") + + m, n = self.shape + indices_dtype = SparseSemiStructuredTensor.__get_indices_dtype(self.dtype) + + from torch.sparse._semi_structured_conversions import ( + sparse_semi_structured_to_dense, + ) + + return sparse_semi_structured_to_dense( + self.compressed_tensor[: m * n // 2].view(m, -1), + self.compressed_tensor[m * n // 2 :].view(indices_dtype).view(m, -1), + ) + + +def to_sparse_semi_structured( + original_tensor: torch.Tensor, + transposed: bool = False, +) -> SparseSemiStructuredTensor: + """ + This function converts a dense tensor into a sparse semi-structured tensor. + It will return a SparseSemiStructuredTensor, a subclass of torch.Tensor. + + This function will check to ensure the dense tensor has the right dtype, size, dims, and device. + We currently only support semi-structured sparse tensors for 2d CUDA tensors. + Additionally, your tensor must be a positive multiple of a block size given the dtype + + - torch.float16 (r, c) must be >= and a multiple of 64 + - torch.int8 (r, c) must be >= and a multiple of 128 + + Args: + original_tensor (Tensor): the dense tensor to convert + transposed (bool, optional): whether the dense tensor is transposed + + Returns: + SparseSemiStructuredTensor: A sparse semi-structured tensor created from the given original_tensor + + Raises: + None + Example: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA) + >>> A = torch.Tensor([0, 0, 1, 1]).tile((128, 32)).half().cuda() + tensor([[0., 0., 1., ..., 0., 1., 1.], + [0., 0., 1., ..., 0., 1., 1.], + [0., 0., 1., ..., 0., 1., 1.], + ..., + [0., 0., 1., ..., 0., 1., 1.], + [0., 0., 1., ..., 0., 1., 1.], + [0., 0., 1., ..., 0., 1., 1.]], device='cuda:0', dtype=torch.float16) + >>> A_sparse = to_sparse_semi_structured(A) + SparseSemiStructuredTensor(shape=torch.Size([128, 128]), transposed=False, values=tensor([[1., 1., 1., ..., 1., 1., 1.], + [1., 1., 1., ..., 1., 1., 1.], + [1., 1., 1., ..., 1., 1., 1.], + ..., + [1., 1., 1., ..., 1., 1., 1.], + [1., 1., 1., ..., 1., 1., 1.], + [1., 1., 1., ..., 1., 1., 1.]], device='cuda:0', dtype=torch.float16), + metadata=tensor([[-4370, -4370, -4370, ..., -4370, -4370, -4370], + [-4370, -4370, -4370, ..., -4370, -4370, -4370], + [-4370, -4370, -4370, ..., -4370, -4370, -4370], + ..., + [-4370, -4370, -4370, ..., -4370, -4370, -4370], + [-4370, -4370, -4370, ..., -4370, -4370, -4370], + [-4370, -4370, -4370, ..., -4370, -4370, -4370]], device='cuda:0', + dtype=torch.int16)) + """ + return SparseSemiStructuredTensor(original_tensor, original_shape=original_tensor.shape, transposed=transposed) diff --git a/llava_next/lib/python3.10/site-packages/torchvision.libs/libpng16.7f72a3c5.so.16 b/llava_next/lib/python3.10/site-packages/torchvision.libs/libpng16.7f72a3c5.so.16 new file mode 100644 index 0000000000000000000000000000000000000000..79664dbcb562f739be44358a5acc49f0084a94e4 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torchvision.libs/libpng16.7f72a3c5.so.16 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53abe0d895220e89313958fb6c72aafb6462ced275fc8e72a1eabd3c7c0c2029 +size 1079081 diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/__pycache__/__init__.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a482b33289371673046559d1c840febbff170ea5 Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/__pycache__/test_collection.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/__pycache__/test_collection.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8cbc00b7c2db7845ecf81eebe8b6db170a878ef Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/__pycache__/test_collection.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/__pycache__/test_fits.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/__pycache__/test_fits.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..346efe87b8344cdc71b46364dba2448a72ff8717 Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/__pycache__/test_fits.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/__pycache__/test_imageio.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/__pycache__/test_imageio.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8067155998fd061914f74d16ecdc703a5e81a04 Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/__pycache__/test_imageio.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/__pycache__/test_imread.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/__pycache__/test_imread.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5254ae7e9eeee42457e7c0b5667af5e27eaf21ba Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/__pycache__/test_imread.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/__pycache__/test_io.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/__pycache__/test_io.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ccdad91c013edc68719caced86032562547cb660 Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/__pycache__/test_io.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/__pycache__/test_mpl_imshow.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/__pycache__/test_mpl_imshow.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..525d00198b7ced10fce9b8b42a704dd5f2bf0ed0 Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/__pycache__/test_mpl_imshow.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/__pycache__/test_multi_image.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/__pycache__/test_multi_image.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..27e675de148299a6ab3697f5bf1ecdd7c89734d7 Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/__pycache__/test_multi_image.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/__pycache__/test_pil.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/__pycache__/test_pil.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..04aa94dc94a886958c6d60bcda40afb47887e339 Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/__pycache__/test_pil.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/__pycache__/test_plugin.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/__pycache__/test_plugin.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee0e8e6b6282b5c9d13fe6b3f0784bcd07b4e2da Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/__pycache__/test_plugin.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/__pycache__/test_sift.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/__pycache__/test_sift.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c594462f9c824653253be6f1a6245729934aa2b Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/__pycache__/test_sift.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/__pycache__/test_simpleitk.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/__pycache__/test_simpleitk.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc4d2609a5f35325e978c1c6b19a6e28e6b217bc Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/__pycache__/test_simpleitk.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/__pycache__/test_tifffile.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/__pycache__/test_tifffile.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49ff2fae2b7be69da9e5ea0a2c5951a377a4fa2f Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/__pycache__/test_tifffile.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/test_collection.py b/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/test_collection.py new file mode 100644 index 0000000000000000000000000000000000000000..3cb4be967403cf462d0260a012ad6cde9ef9fc78 --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/test_collection.py @@ -0,0 +1,168 @@ +import os +import itertools + +import numpy as np +import imageio.v3 as iio3 +from skimage import data_dir +from skimage.io.collection import ImageCollection, MultiImage, alphanumeric_key +from skimage.io import reset_plugins + +from skimage._shared import testing +from skimage._shared.testing import assert_equal, assert_allclose, fetch + +import pytest + + +try: + has_pooch = True +except ModuleNotFoundError: + has_pooch = False + + +def test_string_split(): + test_string = 'z23a' + test_str_result = ['z', 23, 'a'] + assert_equal(alphanumeric_key(test_string), test_str_result) + + +def test_string_sort(): + filenames = [ + 'f9.10.png', + 'f9.9.png', + 'f10.10.png', + 'f10.9.png', + 'e9.png', + 'e10.png', + 'em.png', + ] + expected_filenames = [ + 'e9.png', + 'e10.png', + 'em.png', + 'f9.9.png', + 'f9.10.png', + 'f10.9.png', + 'f10.10.png', + ] + sorted_filenames = sorted(filenames, key=alphanumeric_key) + assert_equal(expected_filenames, sorted_filenames) + + +def test_imagecollection_input(): + """Test function for ImageCollection. The new behavior (implemented + in 0.16) allows the `pattern` argument to accept a list of strings + as the input. + + Notes + ----- + If correct, `images` will receive three images. + """ + # Ensure that these images are part of the legacy datasets + # this means they will always be available in the user's install + # regardless of the availability of pooch + pics = [ + fetch('data/coffee.png'), + fetch('data/chessboard_GRAY.png'), + fetch('data/rocket.jpg'), + ] + pattern = [os.path.join(data_dir, pic) for pic in pics] + images = ImageCollection(pattern) + assert len(images) == 3 + + +class TestImageCollection: + pics = [fetch('data/brick.png'), fetch('data/color.png'), fetch('data/moon.png')] + pattern = pics[:2] + pattern_same_shape = pics[::2] + + def setup_method(self): + reset_plugins() + # Generic image collection with images of different shapes. + self.images = ImageCollection(self.pattern) + # Image collection with images having shapes that match. + self.images_matched = ImageCollection(self.pattern_same_shape) + # Same images as a collection of frames + self.frames_matched = MultiImage(self.pattern_same_shape) + + def test_len(self): + assert len(self.images) == 2 + + def test_getitem(self): + num = len(self.images) + for i in range(-num, num): + assert isinstance(self.images[i], np.ndarray) + assert_allclose(self.images[0], self.images[-num]) + + def return_img(n): + return self.images[n] + + with testing.raises(IndexError): + return_img(num) + with testing.raises(IndexError): + return_img(-num - 1) + + def test_slicing(self): + assert type(self.images[:]) is ImageCollection + assert len(self.images[:]) == 2 + assert len(self.images[:1]) == 1 + assert len(self.images[1:]) == 1 + assert_allclose(self.images[0], self.images[:1][0]) + assert_allclose(self.images[1], self.images[1:][0]) + assert_allclose(self.images[1], self.images[::-1][0]) + assert_allclose(self.images[0], self.images[::-1][1]) + + def test_files_property(self): + assert isinstance(self.images.files, list) + + def set_files(f): + self.images.files = f + + with testing.raises(AttributeError): + set_files('newfiles') + + @pytest.mark.skipif(not has_pooch, reason="needs pooch to download data") + def test_custom_load_func_sequence(self): + filename = fetch('data/no_time_for_that_tiny.gif') + + def reader(index): + return iio3.imread(filename, index=index) + + ic = ImageCollection(range(24), load_func=reader) + # the length of ic should be that of the given load_pattern sequence + assert len(ic) == 24 + # GIF file has frames of size 25x14 with 4 channels (RGBA) + assert ic[0].shape == (25, 14, 3) + + @pytest.mark.skipif(not has_pooch, reason="needs pooch to download data") + def test_custom_load_func_w_kwarg(self): + load_pattern = fetch('data/no_time_for_that_tiny.gif') + + def load_fn(f, step): + vid = iio3.imiter(f) + return list(itertools.islice(vid, None, None, step)) + + ic = ImageCollection(load_pattern, load_func=load_fn, step=3) + # Each file should map to one image (array). + assert len(ic) == 1 + # GIF file has 24 frames, so 24 / 3 equals 8. + assert len(ic[0]) == 8 + + def test_custom_load_func(self): + def load_fn(x): + return x + + ic = ImageCollection(os.pathsep.join(self.pattern), load_func=load_fn) + assert_equal(ic[0], self.pattern[0]) + + def test_concatenate(self): + array = self.images_matched.concatenate() + expected_shape = (len(self.images_matched),) + self.images[0].shape + assert_equal(array.shape, expected_shape) + + def test_concatenate_mismatched_image_shapes(self): + with testing.raises(ValueError): + self.images.concatenate() + + def test_multiimage_imagecollection(self): + assert_equal(self.images_matched[0], self.frames_matched[0]) + assert_equal(self.images_matched[1], self.frames_matched[1]) diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/test_fits.py b/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/test_fits.py new file mode 100644 index 0000000000000000000000000000000000000000..c043db406d5d2b8c9822b94cb66aeb3e142cfb00 --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/test_fits.py @@ -0,0 +1,35 @@ +import numpy as np +import skimage.io as io +from skimage._shared import testing + + +testing.pytest.importorskip('astropy') + + +@testing.pytest.fixture(autouse=True) +def _reset_plugin(): + yield + io.reset_plugins() + + +def test_imread(): + # Make sure we get an import exception if Astropy isn't there + # (not sure how useful this is, but it ensures there isn't some other + # error when trying to load the plugin) + try: + io.use_plugin('fits') + except ImportError: + raise () + + +def _same_ImageCollection(collection1, collection2): + """ + Ancillary function to compare two ImageCollection objects, checking that + their constituent arrays are equal. + """ + if len(collection1) != len(collection2): + return False + for ext1, ext2 in zip(collection1, collection2): + if not np.all(ext1 == ext2): + return False + return True diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/test_imread.py b/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/test_imread.py new file mode 100644 index 0000000000000000000000000000000000000000..75fcc27ecc728bc0815a02899e50549e09888a44 --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/test_imread.py @@ -0,0 +1,75 @@ +from tempfile import NamedTemporaryFile + +import numpy as np +from skimage import io +from skimage.io import imread, imsave, use_plugin, reset_plugins + +from skimage._shared import testing +from skimage._shared.testing import ( + TestCase, + assert_array_equal, + assert_array_almost_equal, + fetch, +) + +import pytest + +pytest.importorskip('imread') + + +@pytest.fixture(autouse=True) +def _use_imread_plugin(): + """Ensure that PIL plugin is used in tests here.""" + use_plugin('imread') + yield + reset_plugins() + + +def test_imread_as_gray(): + img = imread(fetch('data/color.png'), as_gray=True) + assert img.ndim == 2 + assert img.dtype == np.float64 + img = imread(fetch('data/camera.png'), as_gray=True) + # check that conversion does not happen for a gray image + assert np.dtype(img.dtype).char in np.typecodes['AllInteger'] + + +def test_imread_palette(): + img = imread(fetch('data/palette_color.png')) + assert img.ndim == 3 + + +def test_imread_truncated_jpg(): + with testing.raises(RuntimeError): + io.imread(fetch('data/truncated.jpg')) + + +def test_bilevel(): + expected = np.zeros((10, 10), bool) + expected[::2] = 1 + + img = imread(fetch('data/checker_bilevel.png')) + assert_array_equal(img.astype(bool), expected) + + +class TestSave(TestCase): + def roundtrip(self, x, scaling=1): + with NamedTemporaryFile(suffix='.png') as f: + fname = f.name + + imsave(fname, x) + y = imread(fname) + + assert_array_almost_equal((x * scaling).astype(np.int32), y) + + def test_imsave_roundtrip(self): + dtype = np.uint8 + np.random.seed(0) + for shape in [(10, 10), (10, 10, 3), (10, 10, 4)]: + x = np.ones(shape, dtype=dtype) * np.random.rand(*shape) + + if np.issubdtype(dtype, np.floating): + yield self.roundtrip, x, 255 + else: + x = (x * 255).astype(dtype) + yield self.roundtrip, x diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/test_io.py b/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/test_io.py new file mode 100644 index 0000000000000000000000000000000000000000..d76499a26bee9a35dbf85f1bde3ed9aaecb7604b --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/test_io.py @@ -0,0 +1,161 @@ +import os +import pathlib +import tempfile + +import numpy as np +import pytest + +from skimage import io +from skimage._shared.testing import assert_array_equal, fetch, assert_stacklevel +from skimage._shared._dependency_checks import is_wasm +from skimage.data import data_dir + + +one_by_one_jpeg = ( + b'\xff\xd8\xff\xe0\x00\x10JFIF\x00\x01\x01\x00\x00\x01' + b'\x00\x01\x00\x00\xff\xdb\x00C\x00\x03\x02\x02\x02\x02' + b'\x02\x03\x02\x02\x02\x03\x03\x03\x03\x04\x06\x04\x04' + b'\x04\x04\x04\x08\x06\x06\x05\x06\t\x08\n\n\t\x08\t\t' + b'\n\x0c\x0f\x0c\n\x0b\x0e\x0b\t\t\r\x11\r\x0e\x0f\x10' + b'\x10\x11\x10\n\x0c\x12\x13\x12\x10\x13\x0f\x10\x10' + b'\x10\xff\xc0\x00\x0b\x08\x00\x01\x00\x01\x01\x01\x11' + b'\x00\xff\xc4\x00\x14\x00\x01\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\t\xff\xc4\x00' + b'\x14\x10\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\xff\xda\x00\x08\x01\x01\x00' + b'\x00?\x00*\x9f\xff\xd9' +) + + +def test_stack_basic(): + x = np.arange(12).reshape(3, 4) + io.push(x) + + assert_array_equal(io.pop(), x) + + +def test_stack_non_array(): + with pytest.raises(ValueError): + io.push([[1, 2, 3]]) + + +def test_imread_file_url(): + # tweak data path so that file URI works on both unix and windows. + data_path = str(fetch('data/camera.png')) + data_path = data_path.replace(os.path.sep, '/') + image_url = f'file:///{data_path}' + image = io.imread(image_url) + assert image.shape == (512, 512) + + +@pytest.mark.skipif(is_wasm, reason="no access to pytest-localserver") +def test_imread_http_url(httpserver): + # httpserver is a fixture provided by pytest-localserver + # https://bitbucket.org/pytest-dev/pytest-localserver/ + httpserver.serve_content(one_by_one_jpeg) + # it will serve anything you provide to it on its url. + # we add a /test.jpg so that we can identify the content + # by extension + image = io.imread(httpserver.url + '/test.jpg' + '?' + 's' * 266) + assert image.shape == (1, 1) + + +def test_imread_pathlib_tiff(): + """Tests reading from Path object (issue gh-5545).""" + + # read via fetch + fname = fetch('data/multipage.tif') + expected = io.imread(fname) + + # read by passing in a pathlib.Path object + path = pathlib.Path(fname) + img = io.imread(path) + + assert img.shape == (2, 15, 10) + assert_array_equal(expected, img) + + +def _named_tempfile_func(error_class): + """Create a mock function for NamedTemporaryFile that always raises. + + Parameters + ---------- + error_class : exception class + The error that should be raised when asking for a NamedTemporaryFile. + + Returns + ------- + named_temp_file : callable + A function that always raises the desired error. + + Notes + ----- + Although this function has general utility for raising errors, it is + expected to be used to raise errors that ``tempfile.NamedTemporaryFile`` + from the Python standard library could raise. As of this writing, these + are ``FileNotFoundError``, ``FileExistsError``, ``PermissionError``, and + ``BaseException``. See + `this comment `__ + for more information. + """ + + def named_temp_file(*args, **kwargs): + raise error_class() + + return named_temp_file + + +@pytest.mark.parametrize( + 'error_class', [FileNotFoundError, FileExistsError, PermissionError, BaseException] +) +def test_failed_temporary_file(monkeypatch, error_class): + fetch('data/camera.png') + # tweak data path so that file URI works on both unix and windows. + data_path = data_dir.lstrip(os.path.sep) + data_path = data_path.replace(os.path.sep, '/') + image_url = f'file:///{data_path}/camera.png' + with monkeypatch.context(): + monkeypatch.setattr( + tempfile, 'NamedTemporaryFile', _named_tempfile_func(error_class) + ) + with pytest.raises(error_class): + io.imread(image_url) + + +@pytest.mark.parametrize( + # Test `**plugin_args` with `mode` + "kwarg", + [{"plugin": None}, {"plugin": "imageio"}, {"mode": "r"}], +) +def test_plugin_deprecation_on_imread(kwarg): + path = fetch("data/multipage.tif") + regex = ".*use `imageio` or other I/O packages directly.*" + with pytest.warns(FutureWarning, match=regex) as record: + io.imread(path, **kwarg) + assert len(record) == 1 + assert_stacklevel(record, offset=-2) + + +@pytest.mark.parametrize( + # Test `**plugin_args` with `mode` + "kwarg", + [{"plugin": None}, {"plugin": "imageio"}, {"append": False}], +) +def test_plugin_deprecation_on_imsave(kwarg, tmp_path): + path = tmp_path / "test.tif" + array = np.array([0, 1], dtype=float) + regex = ".*use `imageio` or other I/O packages directly.*" + with pytest.warns(FutureWarning, match=regex) as record: + io.imsave(path, array, **kwarg) + assert len(record) == 1 + assert_stacklevel(record, offset=-2) + + +@pytest.mark.parametrize("kwarg", [{"plugin": None}, {"plugin": "imageio"}]) +def test_plugin_deprecation_on_imread_collection(kwarg): + pattern = data_dir + "*.png" + regex = ".*use `imageio` or other I/O packages directly.*" + with pytest.warns(FutureWarning, match=regex) as record: + io.imread_collection(pattern, **kwarg) + assert len(record) == 1 + assert_stacklevel(record, offset=-2) diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/test_mpl_imshow.py b/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/test_mpl_imshow.py new file mode 100644 index 0000000000000000000000000000000000000000..e5297630fcad86423612aa9ab8701538bb9db546 --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/test_mpl_imshow.py @@ -0,0 +1,130 @@ +import numpy as np +import pytest + +from skimage import io +from skimage._shared._warnings import expected_warnings +from skimage._shared._dependency_checks import is_wasm + +plt = pytest.importorskip("matplotlib.pyplot") + +if plt: + plt.switch_backend("Agg") + + +@pytest.fixture(autouse=True) +def _reset_plugins(): + io.reset_plugins() + + +# test images. Note that they don't have their full range for their dtype, +# but we still expect the display range to equal the full dtype range. +im8 = np.array([[0, 64], [128, 240]], np.uint8) +im16 = im8.astype(np.uint16) * 256 +im64 = im8.astype(np.uint64) +imf = im8 / 255 +im_lo = imf / 1000 +im_hi = imf + 10 + +imshow_expected_warnings = [ + r"tight_layout : falling back to Agg|\A\Z", + r"np.asscalar|\A\Z", + r"The figure layout has changed to tight|\A\Z", + "is deprecated since version 0.25.* Please use .* to visualize images", + r"Use imageio or a similar package instead|\A\Z", +] + +# This warning comes from the Python 3.12.1 interpreter powered by Pyodide +# and is not relevant to the tests where it is raised. +if is_wasm: + imshow_expected_warnings.append( + r"Pickle, copy, and deepcopy support will be removed from itertools in Python 3.14|\A\Z" + ) + + +def n_subplots(ax_im): + """Return the number of subplots in the figure containing an ``AxesImage``. + + Parameters + ---------- + ax_im : matplotlib.pyplot.AxesImage object + The input ``AxesImage``. + + Returns + ------- + n : int + The number of subplots in the corresponding figure. + + Notes + ----- + This function is intended to check whether a colorbar was drawn, in + which case two subplots are expected. For standard imshows, one + subplot is expected. + """ + return len(ax_im.get_figure().get_axes()) + + +def test_uint8(): + plt.figure() + with expected_warnings(imshow_expected_warnings): + ax_im = io.imshow(im8) + assert ax_im.cmap.name == 'gray' + assert ax_im.get_clim() == (0, 255) + assert n_subplots(ax_im) == 1 + assert ax_im.colorbar is None + + +def test_uint16(): + plt.figure() + with expected_warnings(imshow_expected_warnings): + ax_im = io.imshow(im16) + assert ax_im.cmap.name == 'gray' + assert ax_im.get_clim() == (0, 65535) + assert n_subplots(ax_im) == 1 + assert ax_im.colorbar is None + + +def test_float(): + plt.figure() + with expected_warnings(imshow_expected_warnings): + ax_im = io.imshow(imf) + assert ax_im.cmap.name == 'gray' + assert ax_im.get_clim() == (0, 1) + assert n_subplots(ax_im) == 1 + assert ax_im.colorbar is None + + +def test_low_data_range(): + with expected_warnings(imshow_expected_warnings + ["Low image data range"]): + ax_im = io.imshow(im_lo) + assert ax_im.get_clim() == (im_lo.min(), im_lo.max()) + # check that a colorbar was created + assert ax_im.colorbar is not None + + +def test_outside_standard_range(): + plt.figure() + with expected_warnings(imshow_expected_warnings + ["out of standard range"]): + ax_im = io.imshow(im_hi) + assert ax_im.get_clim() == (im_hi.min(), im_hi.max()) + assert n_subplots(ax_im) == 2 + assert ax_im.colorbar is not None + + +def test_nonstandard_type(): + plt.figure() + with expected_warnings(imshow_expected_warnings + ["Low image data range"]): + ax_im = io.imshow(im64) + assert ax_im.get_clim() == (im64.min(), im64.max()) + assert n_subplots(ax_im) == 2 + assert ax_im.colorbar is not None + + +def test_signed_image(): + plt.figure() + im_signed = np.array([[-0.5, -0.2], [0.1, 0.4]]) + + with expected_warnings(imshow_expected_warnings): + ax_im = io.imshow(im_signed) + assert ax_im.get_clim() == (-0.5, 0.5) + assert n_subplots(ax_im) == 2 + assert ax_im.colorbar is not None diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/test_multi_image.py b/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/test_multi_image.py new file mode 100644 index 0000000000000000000000000000000000000000..f5850b3b6d27024faa007e8e94e7070f094aebe1 --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/test_multi_image.py @@ -0,0 +1,100 @@ +import os + +import numpy as np +from skimage.io import use_plugin, reset_plugins +from skimage.io.collection import MultiImage + +from skimage._shared import testing +from skimage._shared.testing import assert_equal, assert_allclose + +from pytest import fixture + + +@fixture +def imgs(): + use_plugin('pil') + + paths = [ + testing.fetch('data/multipage_rgb.tif'), + testing.fetch('data/no_time_for_that_tiny.gif'), + ] + imgs = [ + MultiImage(paths[0]), + MultiImage(paths[0], conserve_memory=False), + MultiImage(paths[1]), + MultiImage(paths[1], conserve_memory=False), + MultiImage(os.pathsep.join(paths)), + ] + yield imgs + + reset_plugins() + + +def test_shapes(imgs): + imgs = imgs[-1] + assert imgs[0][0].shape == imgs[0][1].shape + assert imgs[0][0].shape == (10, 10, 3) + + +def test_len(imgs): + assert len(imgs[0][0]) == len(imgs[1][0]) == 2 + assert len(imgs[2][0]) == len(imgs[3][0]) == 24 + assert len(imgs[-1]) == 2, len(imgs[-1]) + + +def test_slicing(imgs): + img = imgs[-1] + assert type(img[:]) is MultiImage + assert len(img[0][:]) + len(img[1][:]) == 26, len(img[:]) + assert len(img[0][:1]) == 1 + assert len(img[1][1:]) == 23 + assert_allclose(img[0], img[:1][0]) + assert_allclose(img[1], img[1:][0]) + assert_allclose(img[-1], img[::-1][0]) + assert_allclose(img[0], img[::-1][-1]) + + +def test_getitem(imgs): + for img in imgs[0]: + num = len(img) + + for i in range(-num, num): + assert type(img[i]) is np.ndarray + assert_allclose(img[0], img[-num]) + + with testing.raises(AssertionError): + assert_allclose(img[0], img[1]) + + with testing.raises(IndexError): + img[num] + with testing.raises(IndexError): + img[-num - 1] + + +def test_files_property(imgs): + for img in imgs: + if isinstance(img, MultiImage): + continue + + assert isinstance(img.filename, str) + + with testing.raises(AttributeError): + img.filename = "newfile" + + +def test_conserve_memory_property(imgs): + for img in imgs: + assert isinstance(img.conserve_memory, bool) + + with testing.raises(AttributeError): + img.conserve_memory = True + + +def test_concatenate(imgs): + for img in imgs: + if img[0].shape != img[-1].shape: + with testing.raises(ValueError): + img.concatenate() + continue + array = img.concatenate() + assert_equal(array.shape, (len(img),) + img[0].shape) diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/test_plugin.py b/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/test_plugin.py new file mode 100644 index 0000000000000000000000000000000000000000..2b6e83f3f0175b49cafd5ef8ceb8f3c0be0af2f2 --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/skimage/io/tests/test_plugin.py @@ -0,0 +1,93 @@ +from contextlib import contextmanager +import numpy as np +import pytest + +from skimage._shared._dependency_checks import has_mpl +from skimage import io +from skimage.io import manage_plugins +from skimage._shared.testing import fetch, assert_stacklevel + + +priority_plugin = 'pil' + + +@pytest.fixture(autouse=True) +def _use_pil_plugin(): + io.use_plugin('pil') + yield + io.reset_plugins() + + +@contextmanager +def protect_preferred_plugins(): + """Contexts where `preferred_plugins` can be modified w/o side-effects.""" + preferred_plugins = manage_plugins.preferred_plugins.copy() + try: + yield + finally: + manage_plugins.preferred_plugins = preferred_plugins + + +def test_failed_use(): + with pytest.raises(ValueError): + manage_plugins.use_plugin('asd') + + +@pytest.mark.skipif(not has_mpl, reason="matplotlib not installed") +def test_use_priority(): + manage_plugins.use_plugin(priority_plugin) + plug, func = manage_plugins.plugin_store['imread'][0] + np.testing.assert_equal(plug, priority_plugin) + + manage_plugins.use_plugin('matplotlib') + plug, func = manage_plugins.plugin_store['imread'][0] + np.testing.assert_equal(plug, 'matplotlib') + + +@pytest.mark.skipif(not has_mpl, reason="matplotlib not installed") +def test_load_preferred_plugins_all(): + from skimage.io._plugins import pil_plugin, matplotlib_plugin + + with protect_preferred_plugins(): + manage_plugins.preferred_plugins = {'all': ['pil'], 'imshow': ['matplotlib']} + manage_plugins.reset_plugins() + + for plugin_type in ('imread', 'imsave'): + plug, func = manage_plugins.plugin_store[plugin_type][0] + assert func == getattr(pil_plugin, plugin_type) + plug, func = manage_plugins.plugin_store['imshow'][0] + assert func == getattr(matplotlib_plugin, 'imshow') + + +@pytest.mark.skipif(not has_mpl, reason="matplotlib not installed") +def test_load_preferred_plugins_imread(): + from skimage.io._plugins import pil_plugin, matplotlib_plugin + + with protect_preferred_plugins(): + manage_plugins.preferred_plugins['imread'] = ['pil'] + manage_plugins.reset_plugins() + + plug, func = manage_plugins.plugin_store['imread'][0] + assert func == pil_plugin.imread + plug, func = manage_plugins.plugin_store['imshow'][0] + assert func == matplotlib_plugin.imshow, func.__module__ + + +@pytest.mark.parametrize( + ("func", "args"), + [ + (io.use_plugin, ["imageio"]), + (io.call_plugin, ["imread", fetch("data/camera.png")]), + (io.plugin_info, ["imageio"]), + (io.plugin_order, tuple()), + (io.reset_plugins, tuple()), + (io.find_available_plugins, tuple()), + (getattr, [io, "available_plugins"]), + ], +) +def test_deprecation_warnings_on_plugin_funcs(func, args): + regex = ".*use `imageio` or other I/O packages directly.*" + with pytest.warns(FutureWarning, match=regex) as record: + func(*args) + assert len(record) == 1 + assert_stacklevel(record, offset=-2) diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/segmentation/_felzenszwalb_cy.cpython-310-x86_64-linux-gnu.so b/vlmpy310/lib/python3.10/site-packages/skimage/segmentation/_felzenszwalb_cy.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..8f19dcf44a14c9c8e3bdad63815808f2d0311c8d --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/skimage/segmentation/_felzenszwalb_cy.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9034ad70e4093e86dd95d26ff2f1c6833ec7094ced24713a10888adb7aab384 +size 182264 diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/segmentation/_quickshift_cy.cpython-310-x86_64-linux-gnu.so b/vlmpy310/lib/python3.10/site-packages/skimage/segmentation/_quickshift_cy.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..7ad9422269ff47c9b5b54c58ff24d917033387a6 --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/skimage/segmentation/_quickshift_cy.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41d3404292b7c78a6f5d71f5346163ab65cbae368fc4136847b254801043ba58 +size 326120 diff --git a/vlmpy310/lib/python3.10/site-packages/skimage/segmentation/_watershed_cy.cpython-310-x86_64-linux-gnu.so b/vlmpy310/lib/python3.10/site-packages/skimage/segmentation/_watershed_cy.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..0bfc816875d62b7cfc204267b3315341ed7a771b --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/skimage/segmentation/_watershed_cy.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa7e9ea90e690498a71fcd83c073afb4ee4f6744ae97948c2947b132ad3ea0b7 +size 367632