Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- llava_next/share/terminfo/M/MtxOrb162 +0 -0
- llava_next/share/terminfo/q/qansi +0 -0
- llava_next/share/terminfo/q/qvt103-w +0 -0
- llava_next/share/terminfo/q/qvt119+-25 +0 -0
- parrot/lib/python3.10/site-packages/torch/fx/__pycache__/_compatibility.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/fx/__pycache__/_lazy_graph_module.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/fx/__pycache__/_symbolic_trace.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/fx/__pycache__/_utils.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/fx/__pycache__/annotate.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/fx/__pycache__/config.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/fx/__pycache__/graph.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/fx/__pycache__/graph_module.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/fx/__pycache__/immutable_collections.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/fx/__pycache__/node.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/fx/__pycache__/operator_schemas.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/fx/__pycache__/proxy.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/fx/__pycache__/subgraph_rewriter.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/fx/__pycache__/tensor_type.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/accelerator_partitioner.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/meta_tracer.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/__init__.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint_generator.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/util.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint.py +558 -0
- parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint_transformation.py +1040 -0
- parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/transform_to_z3.py +349 -0
- parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/util.py +53 -0
- parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/z3_types.py +29 -0
- parrot/lib/python3.10/site-packages/torch/fx/operator_schemas.py +442 -0
- parrot/lib/python3.10/site-packages/torch/fx/passes/annotate_getitem_nodes.py +44 -0
- parrot/lib/python3.10/site-packages/torch/fx/passes/dialect/__init__.py +0 -0
- parrot/lib/python3.10/site-packages/torch/fx/passes/graph_transform_observer.py +88 -0
- parrot/lib/python3.10/site-packages/torch/fx/passes/net_min_base.py +924 -0
- parrot/lib/python3.10/site-packages/torch/fx/passes/param_fetch.py +66 -0
- parrot/lib/python3.10/site-packages/torch/fx/passes/runtime_assert.py +392 -0
- parrot/lib/python3.10/site-packages/torch/fx/passes/tools_common.py +303 -0
- parrot/lib/python3.10/site-packages/torch/share/cmake/Caffe2/FindCUDAToolkit.cmake +1073 -0
- parrot/lib/python3.10/site-packages/torch/share/cmake/Tensorpipe/TensorpipeTargets-release.cmake +39 -0
- parrot/lib/python3.10/site-packages/torch/share/cmake/Tensorpipe/TensorpipeTargets.cmake +114 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_tfcompile.so +3 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/central_storage_strategy.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/cross_device_ops.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/cross_device_utils.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/distribute_config.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/distribute_utils.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/input_lib.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/input_ops.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/input_util.cpython-310.pyc +0 -0
.gitattributes
CHANGED
|
@@ -807,3 +807,4 @@ parrot/lib/python3.10/site-packages/nvidia/cusparse/lib/libcusparse.so.12 filter
|
|
| 807 |
parrot/lib/python3.10/site-packages/torch/__pycache__/_meta_registrations.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 808 |
mplug_owl2/lib/python3.10/site-packages/nvidia/curand/lib/libcurand.so.10 filter=lfs diff=lfs merge=lfs -text
|
| 809 |
videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_py_exception_registry.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 807 |
parrot/lib/python3.10/site-packages/torch/__pycache__/_meta_registrations.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 808 |
mplug_owl2/lib/python3.10/site-packages/nvidia/curand/lib/libcurand.so.10 filter=lfs diff=lfs merge=lfs -text
|
| 809 |
videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_py_exception_registry.so filter=lfs diff=lfs merge=lfs -text
|
| 810 |
+
videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_tfcompile.so filter=lfs diff=lfs merge=lfs -text
|
llava_next/share/terminfo/M/MtxOrb162
ADDED
|
Binary file (193 Bytes). View file
|
|
|
llava_next/share/terminfo/q/qansi
ADDED
|
Binary file (2.01 kB). View file
|
|
|
llava_next/share/terminfo/q/qvt103-w
ADDED
|
Binary file (758 Bytes). View file
|
|
|
llava_next/share/terminfo/q/qvt119+-25
ADDED
|
Binary file (581 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/fx/__pycache__/_compatibility.cpython-310.pyc
ADDED
|
Binary file (1.19 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/fx/__pycache__/_lazy_graph_module.cpython-310.pyc
ADDED
|
Binary file (6.5 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/fx/__pycache__/_symbolic_trace.cpython-310.pyc
ADDED
|
Binary file (34.9 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/fx/__pycache__/_utils.cpython-310.pyc
ADDED
|
Binary file (1.97 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/fx/__pycache__/annotate.cpython-310.pyc
ADDED
|
Binary file (810 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/fx/__pycache__/config.cpython-310.pyc
ADDED
|
Binary file (203 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/fx/__pycache__/graph.cpython-310.pyc
ADDED
|
Binary file (58.5 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/fx/__pycache__/graph_module.cpython-310.pyc
ADDED
|
Binary file (26.7 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/fx/__pycache__/immutable_collections.cpython-310.pyc
ADDED
|
Binary file (2.99 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/fx/__pycache__/node.cpython-310.pyc
ADDED
|
Binary file (27.2 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/fx/__pycache__/operator_schemas.cpython-310.pyc
ADDED
|
Binary file (14.2 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/fx/__pycache__/proxy.cpython-310.pyc
ADDED
|
Binary file (19.8 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/fx/__pycache__/subgraph_rewriter.cpython-310.pyc
ADDED
|
Binary file (10.5 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/fx/__pycache__/tensor_type.cpython-310.pyc
ADDED
|
Binary file (3.76 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/accelerator_partitioner.cpython-310.pyc
ADDED
|
Binary file (28.9 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/meta_tracer.cpython-310.pyc
ADDED
|
Binary file (9.18 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (195 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint.cpython-310.pyc
ADDED
|
Binary file (17.1 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint_generator.cpython-310.pyc
ADDED
|
Binary file (30.7 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/util.cpython-310.pyc
ADDED
|
Binary file (1.89 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint.py
ADDED
|
@@ -0,0 +1,558 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
from torch.fx.experimental.migrate_gradual_types.operation import op_add, op_sub, op_mul, op_div, \
|
| 3 |
+
op_mod, op_gt, op_lt, op_neq, op_eq
|
| 4 |
+
from torch.fx.tensor_type import TensorType, Dyn
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class Constraint:
|
| 8 |
+
pass
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class Conj(Constraint):
|
| 12 |
+
def __init__(self, conjuncts):
|
| 13 |
+
"""
|
| 14 |
+
:param conjuncts: Conjunction of constraints
|
| 15 |
+
"""
|
| 16 |
+
self.conjucts = conjuncts
|
| 17 |
+
|
| 18 |
+
def __eq__(self, other):
|
| 19 |
+
if isinstance(other, Conj):
|
| 20 |
+
return self.conjucts == other.conjucts and self.conjucts == other.conjucts
|
| 21 |
+
else:
|
| 22 |
+
return False
|
| 23 |
+
|
| 24 |
+
def __repr__(self):
|
| 25 |
+
return f'And({self.conjucts})'
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class Disj(Constraint):
|
| 29 |
+
def __init__(self, disjuncts):
|
| 30 |
+
"""
|
| 31 |
+
:param disjuncts: Disjunction of constraints
|
| 32 |
+
"""
|
| 33 |
+
self.disjuncts = disjuncts
|
| 34 |
+
|
| 35 |
+
def __eq__(self, other):
|
| 36 |
+
if isinstance(other, Disj):
|
| 37 |
+
return self.disjuncts == other.disjuncts and self.disjuncts == other.disjuncts
|
| 38 |
+
else:
|
| 39 |
+
return False
|
| 40 |
+
|
| 41 |
+
def __repr__(self):
|
| 42 |
+
return f'Or({self.disjuncts})'
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
class Prod(Constraint):
|
| 46 |
+
def __init__(self, products):
|
| 47 |
+
"""
|
| 48 |
+
:param products: lists of dimensions to multiply
|
| 49 |
+
"""
|
| 50 |
+
self.products = products
|
| 51 |
+
|
| 52 |
+
def __eq__(self, other):
|
| 53 |
+
if isinstance(other, Prod):
|
| 54 |
+
return self.products == other.products and self.products == other.products
|
| 55 |
+
else:
|
| 56 |
+
return False
|
| 57 |
+
|
| 58 |
+
def __repr__(self):
|
| 59 |
+
return f'Product({self.products})'
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
class T(Constraint):
|
| 63 |
+
"""
|
| 64 |
+
True
|
| 65 |
+
"""
|
| 66 |
+
def __init__(self):
|
| 67 |
+
pass
|
| 68 |
+
|
| 69 |
+
def __eq__(self, other):
|
| 70 |
+
return isinstance(other, T)
|
| 71 |
+
|
| 72 |
+
def __repr__(self):
|
| 73 |
+
return 'True'
|
| 74 |
+
|
| 75 |
+
class F(Constraint):
|
| 76 |
+
"""
|
| 77 |
+
False
|
| 78 |
+
"""
|
| 79 |
+
def __init__(self):
|
| 80 |
+
pass
|
| 81 |
+
|
| 82 |
+
def __eq__(self, other):
|
| 83 |
+
return isinstance(other, F)
|
| 84 |
+
|
| 85 |
+
def __repr__(self):
|
| 86 |
+
return 'False'
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
class BinaryConstraint(Constraint):
|
| 90 |
+
"""
|
| 91 |
+
Represents all binary operations
|
| 92 |
+
"""
|
| 93 |
+
def __init__(self, lhs, rhs, op):
|
| 94 |
+
"""
|
| 95 |
+
:param lhs: lhs of the constraint
|
| 96 |
+
:param rhs: rhs of the constraint
|
| 97 |
+
:param op: string representing the operation
|
| 98 |
+
"""
|
| 99 |
+
self.lhs = lhs
|
| 100 |
+
self.rhs = rhs
|
| 101 |
+
self.op = op
|
| 102 |
+
|
| 103 |
+
def __eq__(self, other):
|
| 104 |
+
if isinstance(other, BinaryConstraint):
|
| 105 |
+
return self.lhs == other.lhs and self.rhs == other.rhs and self.op == other.op
|
| 106 |
+
else:
|
| 107 |
+
return False
|
| 108 |
+
|
| 109 |
+
def __repr__(self):
|
| 110 |
+
return f'({self.lhs} {self.op} {self.rhs})'
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
class BinConstraintT(BinaryConstraint):
|
| 114 |
+
"""
|
| 115 |
+
Binary constraints about tensors
|
| 116 |
+
"""
|
| 117 |
+
def __init__(self, lhs, rhs, op):
|
| 118 |
+
assert (isinstance(lhs, (TVar, TensorType, int)) or lhs == Dyn) and \
|
| 119 |
+
(isinstance(rhs, (TVar, TensorType, int)) or rhs == Dyn)
|
| 120 |
+
super().__init__(lhs, rhs, op)
|
| 121 |
+
|
| 122 |
+
def __eq__(self, other):
|
| 123 |
+
return super().__eq__(other)
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
class BinConstraintD(BinaryConstraint):
|
| 127 |
+
"""
|
| 128 |
+
Binary constraints about dimensions
|
| 129 |
+
"""
|
| 130 |
+
def __init__(self, lhs, rhs, op):
|
| 131 |
+
assert is_algebraic_expression(lhs) or is_dim(lhs) or is_bool_expr(lhs)
|
| 132 |
+
assert is_algebraic_expression(rhs) or is_dim(rhs) or is_bool_expr(rhs)
|
| 133 |
+
|
| 134 |
+
super().__init__(lhs, rhs, op)
|
| 135 |
+
|
| 136 |
+
def __eq__(self, other):
|
| 137 |
+
return super().__eq__(other)
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
class TGreatestUpperBound(Constraint):
|
| 142 |
+
"""
|
| 143 |
+
Greatest Upper bound for tensors with dynamic type
|
| 144 |
+
"""
|
| 145 |
+
def __init__(self, res, rhs1, rhs2):
|
| 146 |
+
"""
|
| 147 |
+
:param res: tensor variable that stores the result of the outout
|
| 148 |
+
:param rhs1: tensor or tensor variable
|
| 149 |
+
:param rhs2: tensor or tensor variabke
|
| 150 |
+
"""
|
| 151 |
+
self.res = res
|
| 152 |
+
self.rhs1 = rhs1
|
| 153 |
+
self.rhs2 = rhs2
|
| 154 |
+
|
| 155 |
+
def __repr__(self):
|
| 156 |
+
return f'{self.res} = {self.rhs1}\u2294*{self.rhs2}'
|
| 157 |
+
|
| 158 |
+
def __eq__(self, other):
|
| 159 |
+
if isinstance(other, TGreatestUpperBound):
|
| 160 |
+
return self.res == other.res and self.rhs1 == other.rhs1 and self.rhs2 == other.rhs2
|
| 161 |
+
else:
|
| 162 |
+
return False
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
class DGreatestUpperBound(Constraint):
|
| 166 |
+
"""
|
| 167 |
+
Greatest Upper bound for dimensions
|
| 168 |
+
"""
|
| 169 |
+
def __init__(self, res, rhs1, rhs2):
|
| 170 |
+
"""
|
| 171 |
+
:param res: Dimension variable to store the result
|
| 172 |
+
:param rhs1: dimension variable 1
|
| 173 |
+
:param rhs2: dimension variable 2
|
| 174 |
+
"""
|
| 175 |
+
assert is_dim(res)
|
| 176 |
+
assert is_dim(rhs1)
|
| 177 |
+
assert is_dim(rhs2)
|
| 178 |
+
|
| 179 |
+
self.res = res
|
| 180 |
+
self.rhs1 = rhs1
|
| 181 |
+
self.rhs2 = rhs2
|
| 182 |
+
|
| 183 |
+
def __repr__(self):
|
| 184 |
+
return f'{self.res} = {self.rhs1}\u2294{self.rhs2}'
|
| 185 |
+
|
| 186 |
+
def __eq__(self, other):
|
| 187 |
+
if isinstance(other, DGreatestUpperBound):
|
| 188 |
+
return self.res == other.res and self.rhs1 == other.rhs1 and self.rhs2 == other.rhs2
|
| 189 |
+
else:
|
| 190 |
+
return False
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
class CanReshape(Constraint):
|
| 194 |
+
"""
|
| 195 |
+
can_reshape constraint
|
| 196 |
+
"""
|
| 197 |
+
def __init__(self, src, target):
|
| 198 |
+
"""
|
| 199 |
+
:param src: tensor variable
|
| 200 |
+
:param target: tensor
|
| 201 |
+
"""
|
| 202 |
+
self.src = src
|
| 203 |
+
self.target = target
|
| 204 |
+
|
| 205 |
+
def __repr__(self):
|
| 206 |
+
return f'can-reshape({self.src}, {self.target})'
|
| 207 |
+
|
| 208 |
+
def __eq__(self, other):
|
| 209 |
+
if isinstance(other, CanReshape):
|
| 210 |
+
return self.src == other.src and self.target == other.target
|
| 211 |
+
else:
|
| 212 |
+
return False
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
class IndexSelect(Constraint):
|
| 216 |
+
|
| 217 |
+
def __init__(self, tensor_size, input_var, dim_replace, index, output):
|
| 218 |
+
"""
|
| 219 |
+
Args:
|
| 220 |
+
input_var: input to index_select
|
| 221 |
+
tensor_size: tensor size we are considering
|
| 222 |
+
dim_replace: the dimension of the output at "index"
|
| 223 |
+
index: location of the dimensions to replace in the input
|
| 224 |
+
output: variable to store the result
|
| 225 |
+
"""
|
| 226 |
+
assert isinstance(input_var, TVar)
|
| 227 |
+
assert isinstance(output, TVar)
|
| 228 |
+
assert isinstance(dim_replace, DVar) or dim_replace == Dyn
|
| 229 |
+
assert isinstance(index, int)
|
| 230 |
+
|
| 231 |
+
self.input_var = input_var
|
| 232 |
+
self.tensor_size = tensor_size
|
| 233 |
+
self.dim_replace = dim_replace
|
| 234 |
+
self.index = index
|
| 235 |
+
self.output = output
|
| 236 |
+
|
| 237 |
+
def __repr__(self):
|
| 238 |
+
|
| 239 |
+
return f' {self.output} = ' \
|
| 240 |
+
f'IndexSelect({self.input_var}, ' \
|
| 241 |
+
f'tensor_size: {self.tensor_size}, ' \
|
| 242 |
+
f'{self.dim_replace}, ' \
|
| 243 |
+
f'{self.index})'
|
| 244 |
+
|
| 245 |
+
def __eq__(self, other):
|
| 246 |
+
if isinstance(other, IndexSelect):
|
| 247 |
+
return self.tensor_size == other.tensor_size and \
|
| 248 |
+
self.dim_replace == other.dim_replace and \
|
| 249 |
+
self.index == other.index and \
|
| 250 |
+
self.output == other.output and \
|
| 251 |
+
self.input_var == other.input_var
|
| 252 |
+
else:
|
| 253 |
+
return False
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
class Transpose(Constraint):
|
| 257 |
+
|
| 258 |
+
def __init__(self, tensor_size, input_var, index1, index2, output):
|
| 259 |
+
"""
|
| 260 |
+
Args:
|
| 261 |
+
tensor_size: current tensor size
|
| 262 |
+
input_var: variable to hold input
|
| 263 |
+
index1: dimension 1
|
| 264 |
+
index2: dimension 2
|
| 265 |
+
output: output that stores result
|
| 266 |
+
"""
|
| 267 |
+
assert isinstance(input_var, TVar)
|
| 268 |
+
assert isinstance(output, TVar)
|
| 269 |
+
assert isinstance(index1, int)
|
| 270 |
+
assert isinstance(index2, int)
|
| 271 |
+
|
| 272 |
+
self.input_var = input_var
|
| 273 |
+
self.tensor_size = tensor_size
|
| 274 |
+
self.index1 = index1
|
| 275 |
+
self.index2 = index2
|
| 276 |
+
self.output = output
|
| 277 |
+
|
| 278 |
+
def __repr__(self):
|
| 279 |
+
|
| 280 |
+
return f' {self.output} = ' \
|
| 281 |
+
f'Transpose({self.input_var}, ' \
|
| 282 |
+
f'tensor_size: {self.tensor_size}, ' \
|
| 283 |
+
f'{self.index1}, ' \
|
| 284 |
+
f'{self.index2})'
|
| 285 |
+
|
| 286 |
+
def __eq__(self, other):
|
| 287 |
+
if isinstance(other, Transpose):
|
| 288 |
+
return self.tensor_size == other.tensor_size and \
|
| 289 |
+
self.index1 == other.index1 and \
|
| 290 |
+
self.index2 == other.index2 and \
|
| 291 |
+
self.output == other.output and \
|
| 292 |
+
self.input_var == other.input_var
|
| 293 |
+
else:
|
| 294 |
+
return False
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
class GetItem(Constraint):
|
| 298 |
+
|
| 299 |
+
def __init__(self, tensor_size, index, res, input_var):
|
| 300 |
+
"""
|
| 301 |
+
Constraint for getting item given a tensor size
|
| 302 |
+
:param tensor_size: actual number
|
| 303 |
+
:param index: actual number representing the index
|
| 304 |
+
:param res: dimension variable to carry the item we get
|
| 305 |
+
:param input_var: a tensor variable from which we will get item
|
| 306 |
+
"""
|
| 307 |
+
assert isinstance(res, DVar)
|
| 308 |
+
|
| 309 |
+
self.res = res
|
| 310 |
+
self.tensor_size = tensor_size
|
| 311 |
+
self.index = index
|
| 312 |
+
self.input_var = input_var
|
| 313 |
+
|
| 314 |
+
def __repr__(self):
|
| 315 |
+
return f' {self.res} = GetItem({self.input_var}, tensor_size: {self.tensor_size}, {self.index})'
|
| 316 |
+
|
| 317 |
+
def __eq__(self, other):
|
| 318 |
+
if isinstance(other, GetItem):
|
| 319 |
+
return self.res == other.res and \
|
| 320 |
+
self.tensor_size == other.tensor_size and \
|
| 321 |
+
self.index == other.index and \
|
| 322 |
+
self.input_var == other.input_var
|
| 323 |
+
else:
|
| 324 |
+
return False
|
| 325 |
+
|
| 326 |
+
class GetItemTensor(Constraint):
|
| 327 |
+
|
| 328 |
+
def __init__(self, tensor_size, index_tuple, res, input_var):
|
| 329 |
+
"""
|
| 330 |
+
Constraint for getting item given a tensor size
|
| 331 |
+
However, when the argument is a tuple, we will
|
| 332 |
+
expect a tensor
|
| 333 |
+
:param tensor_size: actual number representing the rank
|
| 334 |
+
:param index_tuple: tuple for indexing
|
| 335 |
+
:param res: tensor variable to carry the item we get
|
| 336 |
+
:param input_var: a tensor variable from which we will get item
|
| 337 |
+
"""
|
| 338 |
+
assert isinstance(res, TVar)
|
| 339 |
+
|
| 340 |
+
self.res = res
|
| 341 |
+
self.tensor_size = tensor_size
|
| 342 |
+
self.index_tuple = index_tuple
|
| 343 |
+
self.input_var = input_var
|
| 344 |
+
|
| 345 |
+
def __repr__(self):
|
| 346 |
+
return f' {self.res} = GetItemT({self.input_var}, tensor_size: {self.tensor_size}, {self.index_tuple})'
|
| 347 |
+
|
| 348 |
+
def __eq__(self, other):
|
| 349 |
+
if isinstance(other, GetItemTensor):
|
| 350 |
+
return self.res == other.res and \
|
| 351 |
+
self.tensor_size == other.tensor_size and \
|
| 352 |
+
self.index_tuple == other.index_tuple and \
|
| 353 |
+
self.input_var == other.input_var
|
| 354 |
+
else:
|
| 355 |
+
return False
|
| 356 |
+
|
| 357 |
+
class CalcConv(Constraint):
|
| 358 |
+
|
| 359 |
+
def __init__(self, conv_result, input_var, c_out, kernel, padding, stride, dilation, matching_constraint_vars):
|
| 360 |
+
"""
|
| 361 |
+
:param conv_result: the convolution result
|
| 362 |
+
:param input_var: input to convolution
|
| 363 |
+
:param c_out: output chanel type
|
| 364 |
+
:param kernel: kernel tuple
|
| 365 |
+
"""
|
| 366 |
+
self.conv_result = conv_result
|
| 367 |
+
self.input_var = input_var
|
| 368 |
+
self.c_out = c_out
|
| 369 |
+
self.kernel = kernel
|
| 370 |
+
self.padding = padding
|
| 371 |
+
self.stride = stride
|
| 372 |
+
self.dilation = dilation
|
| 373 |
+
self.matching_constraint = matching_constraint_vars
|
| 374 |
+
|
| 375 |
+
def __repr__(self):
|
| 376 |
+
return f'{self.conv_result} =' \
|
| 377 |
+
f' calc-conv({self.input_var},' \
|
| 378 |
+
f' {self.c_out}, {self.kernel}, ' \
|
| 379 |
+
f'{self.padding}, {self.stride},' \
|
| 380 |
+
f' {self.dilation})'
|
| 381 |
+
|
| 382 |
+
def __eq__(self, other):
|
| 383 |
+
if isinstance(other, CalcConv):
|
| 384 |
+
return self.conv_result == other.conv_result and self.input_var == other.input_var and \
|
| 385 |
+
self.c_out == other.c_out and self.kernel == other.kernel and self.padding == other.padding \
|
| 386 |
+
and self.stride == other.stride and self.dilation == other.dilation \
|
| 387 |
+
and self.matching_constraint == other.matching_constraint
|
| 388 |
+
else:
|
| 389 |
+
return False
|
| 390 |
+
|
| 391 |
+
|
| 392 |
+
class CalcMaxPool(Constraint):
|
| 393 |
+
|
| 394 |
+
def __init__(self, maxpool_result, input_var, kernel, padding, stride, dilation, matching_constraint_vars):
|
| 395 |
+
"""
|
| 396 |
+
:param maxpool_result: the result of maxpool
|
| 397 |
+
:param input_var: input to convolution
|
| 398 |
+
:param kernel: kernel tuple
|
| 399 |
+
"""
|
| 400 |
+
self.maxpool_result = maxpool_result
|
| 401 |
+
self.input_var = input_var
|
| 402 |
+
self.kernel = kernel
|
| 403 |
+
self.padding = padding
|
| 404 |
+
self.stride = stride
|
| 405 |
+
self.dilation = dilation
|
| 406 |
+
self.matching_constraint = matching_constraint_vars
|
| 407 |
+
|
| 408 |
+
def __repr__(self):
|
| 409 |
+
return f'{self.maxpool_result} =' \
|
| 410 |
+
f' calc-maxpool({self.input_var},' \
|
| 411 |
+
f' {self.kernel}, ' \
|
| 412 |
+
f'{self.padding}, {self.stride},' \
|
| 413 |
+
f' {self.dilation})'
|
| 414 |
+
|
| 415 |
+
def __eq__(self, other):
|
| 416 |
+
if isinstance(other, CalcMaxPool):
|
| 417 |
+
return self.maxpool_result == other.maxpool_result and self.input_var == other.input_var \
|
| 418 |
+
and self.kernel == other.kernel and self.padding == other.padding \
|
| 419 |
+
and self.stride == other.stride and self.dilation == other.dilation \
|
| 420 |
+
and self.matching_constraint == other.matching_constraint
|
| 421 |
+
else:
|
| 422 |
+
return False
|
| 423 |
+
|
| 424 |
+
|
| 425 |
+
class ApplyBroadcasting(Constraint):
|
| 426 |
+
def __init__(self, res1, res2, input1, input2):
|
| 427 |
+
"""
|
| 428 |
+
:param res1: resulting tensor 1
|
| 429 |
+
:param res2: resulting tensor 2
|
| 430 |
+
:param input1: tensor variable 1
|
| 431 |
+
:param input2: tensor variable 2
|
| 432 |
+
"""
|
| 433 |
+
self.res1 = res1
|
| 434 |
+
self.res2 = res2
|
| 435 |
+
self.input1 = input1
|
| 436 |
+
self.input2 = input2
|
| 437 |
+
|
| 438 |
+
def __eq__(self, other):
|
| 439 |
+
if isinstance(other, ApplyBroadcasting):
|
| 440 |
+
return self.res1 == other.res1 \
|
| 441 |
+
and self.res2 == other.res2 \
|
| 442 |
+
and self.input1 == other.input1 \
|
| 443 |
+
and self.input2 == other.input2
|
| 444 |
+
else:
|
| 445 |
+
return False
|
| 446 |
+
|
| 447 |
+
def __repr__(self):
|
| 448 |
+
return f'{self.res1}, {self.res2} ='f' apply-broadcasting({self.input1},' f' {self.input2})'
|
| 449 |
+
|
| 450 |
+
|
| 451 |
+
class CalcProduct(Constraint):
|
| 452 |
+
"""
|
| 453 |
+
Given correct dimensions, calculate the product for flatten accounting for Dyn
|
| 454 |
+
"""
|
| 455 |
+
def __init__(self, start, end, flattened, dims_to_flatten):
|
| 456 |
+
"""
|
| 457 |
+
:param start: start index
|
| 458 |
+
:param end: end index
|
| 459 |
+
:param flattened: variable to store the product
|
| 460 |
+
:param dims_to_flatten: the type which we will flatten
|
| 461 |
+
"""
|
| 462 |
+
assert isinstance(dims_to_flatten, list)
|
| 463 |
+
assert isinstance(flattened, TVar)
|
| 464 |
+
assert isinstance(start, int)
|
| 465 |
+
assert isinstance(end, int)
|
| 466 |
+
|
| 467 |
+
self.start = start
|
| 468 |
+
self.end = end
|
| 469 |
+
self.dims_to_flatten = dims_to_flatten
|
| 470 |
+
self.flattened = flattened
|
| 471 |
+
|
| 472 |
+
def __eq__(self, other):
|
| 473 |
+
if isinstance(other, CalcProduct):
|
| 474 |
+
return self.start == other.start and self.end == other.end and \
|
| 475 |
+
self.dims_to_flatten == other.dims_to_flatten and self.flattened == other.flattened
|
| 476 |
+
|
| 477 |
+
else:
|
| 478 |
+
return False
|
| 479 |
+
|
| 480 |
+
def __repr__(self):
|
| 481 |
+
return f'{self.flattened} = CalcProduct({self.start}, {self.end}, {self.dims_to_flatten})'
|
| 482 |
+
|
| 483 |
+
|
| 484 |
+
class TVar:
|
| 485 |
+
"""
|
| 486 |
+
Tensor variable with no tensor constructor
|
| 487 |
+
"""
|
| 488 |
+
def __init__(self, tvar):
|
| 489 |
+
"""
|
| 490 |
+
:param tvar: tensor variable
|
| 491 |
+
"""
|
| 492 |
+
self.tvar = tvar
|
| 493 |
+
|
| 494 |
+
def __repr__(self):
|
| 495 |
+
return f'TV({self.tvar})'
|
| 496 |
+
|
| 497 |
+
def __eq__(self, other):
|
| 498 |
+
if isinstance(other, TVar):
|
| 499 |
+
return self.tvar == other.tvar
|
| 500 |
+
else:
|
| 501 |
+
return False
|
| 502 |
+
|
| 503 |
+
|
| 504 |
+
class DVar:
|
| 505 |
+
"""
|
| 506 |
+
Dimension variable
|
| 507 |
+
"""
|
| 508 |
+
def __init__(self, c):
|
| 509 |
+
"""
|
| 510 |
+
:param c: character or number
|
| 511 |
+
"""
|
| 512 |
+
self.c = c
|
| 513 |
+
|
| 514 |
+
def __repr__(self):
|
| 515 |
+
return f'DV({self.c})'
|
| 516 |
+
|
| 517 |
+
def __eq__(self, other):
|
| 518 |
+
if isinstance(other, DVar):
|
| 519 |
+
return self.c == other.c
|
| 520 |
+
else:
|
| 521 |
+
return False
|
| 522 |
+
|
| 523 |
+
|
| 524 |
+
class BVar:
|
| 525 |
+
"""
|
| 526 |
+
Boolean variable
|
| 527 |
+
"""
|
| 528 |
+
def __init__(self, c):
|
| 529 |
+
"""
|
| 530 |
+
:param c: character or number
|
| 531 |
+
"""
|
| 532 |
+
self.c = c
|
| 533 |
+
|
| 534 |
+
def __repr__(self):
|
| 535 |
+
return f'BV({self.c})'
|
| 536 |
+
|
| 537 |
+
def __eq__(self, other):
|
| 538 |
+
if isinstance(other, BVar):
|
| 539 |
+
return self.c == other.c
|
| 540 |
+
else:
|
| 541 |
+
return False
|
| 542 |
+
|
| 543 |
+
|
| 544 |
+
def is_algebraic_expression(constraint):
|
| 545 |
+
if isinstance(constraint, BinConstraintD):
|
| 546 |
+
return constraint.op in [op_add, op_sub, op_div, op_mul, op_mod]
|
| 547 |
+
else:
|
| 548 |
+
return isinstance(constraint, Prod)
|
| 549 |
+
|
| 550 |
+
|
| 551 |
+
def is_bool_expr(constraint):
|
| 552 |
+
if isinstance(constraint, BinConstraintD):
|
| 553 |
+
return constraint.op in [op_gt, op_lt, op_neq, op_eq]
|
| 554 |
+
else:
|
| 555 |
+
return isinstance(constraint, (BVar, Conj, Disj))
|
| 556 |
+
|
| 557 |
+
def is_dim(d):
|
| 558 |
+
return isinstance(d, (DVar, int)) or d == Dyn
|
parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint_transformation.py
ADDED
|
@@ -0,0 +1,1040 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: ignore-errors
|
| 2 |
+
import copy
|
| 3 |
+
import itertools
|
| 4 |
+
from torch.fx.experimental.migrate_gradual_types.constraint_generator import BinConstraintT, MAX_TENSOR_RANK
|
| 5 |
+
from torch.fx.experimental.migrate_gradual_types.constraint import T, BinConstraintD, Conj, Constraint, DVar, TVar, \
|
| 6 |
+
Transpose
|
| 7 |
+
from torch.fx.experimental.migrate_gradual_types.constraint import Disj, TGreatestUpperBound
|
| 8 |
+
from torch.fx.experimental.migrate_gradual_types.constraint import DGreatestUpperBound
|
| 9 |
+
from torch.fx.experimental.migrate_gradual_types.constraint import CalcConv, CalcMaxPool
|
| 10 |
+
from torch.fx.experimental.migrate_gradual_types.constraint import CalcProduct, CanReshape
|
| 11 |
+
from torch.fx.experimental.migrate_gradual_types.constraint import ApplyBroadcasting, Prod, F, GetItem, GetItemTensor, IndexSelect
|
| 12 |
+
from torch.fx.experimental.migrate_gradual_types.operation import op_eq, op_precision, op_leq, op_matching
|
| 13 |
+
from torch.fx.experimental.migrate_gradual_types.operation import op_consistency, op_neq
|
| 14 |
+
from torch.fx.experimental.migrate_gradual_types.operation import op_mul, op_add, op_sub, op_div, op_mod
|
| 15 |
+
from torch.fx.experimental.migrate_gradual_types.util import gen_tensor_dims, gen_nat_constraints, gen_dvar
|
| 16 |
+
from torch.fx.tensor_type import TensorType, Dyn
|
| 17 |
+
from typing import Callable, Dict, List
|
| 18 |
+
|
| 19 |
+
_TRANSFORMATION_RULES: Dict[Constraint, Callable] = {}
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def register_transformation_rule(call_target):
|
| 23 |
+
def register(fn):
|
| 24 |
+
if call_target in _TRANSFORMATION_RULES:
|
| 25 |
+
raise RuntimeError(f'Transformation rule already registered for {call_target}!')
|
| 26 |
+
_TRANSFORMATION_RULES[call_target] = fn
|
| 27 |
+
return fn
|
| 28 |
+
return register
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def valid_index(index, dims):
|
| 32 |
+
"""
|
| 33 |
+
Given a list of dimensions, checks if an index is valid in the list
|
| 34 |
+
"""
|
| 35 |
+
try:
|
| 36 |
+
dims[index]
|
| 37 |
+
return T()
|
| 38 |
+
except IndexError:
|
| 39 |
+
return F()
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
@register_transformation_rule(Transpose)
|
| 43 |
+
def transform_transpose(constraint, counter):
|
| 44 |
+
"""
|
| 45 |
+
Similar to a sequence of two index-selects
|
| 46 |
+
"""
|
| 47 |
+
dims, counter = gen_tensor_dims(constraint.tensor_size, counter)
|
| 48 |
+
is_valid_index1 = valid_index(constraint.index1, dims)
|
| 49 |
+
is_valid_index2 = valid_index(constraint.index2, dims)
|
| 50 |
+
new_dims = copy.deepcopy(dims)
|
| 51 |
+
nat_constraints = gen_nat_constraints(dims)
|
| 52 |
+
|
| 53 |
+
if is_valid_index1 == T() and is_valid_index2 == T():
|
| 54 |
+
new_dims[constraint.index1] = dims[constraint.index2]
|
| 55 |
+
new_dims[constraint.index2] = dims[constraint.index1]
|
| 56 |
+
|
| 57 |
+
transformed_constraint = Conj([BinConstraintT(constraint.input_var, TensorType(dims), op_eq),
|
| 58 |
+
*nat_constraints,
|
| 59 |
+
is_valid_index1, is_valid_index2,
|
| 60 |
+
BinConstraintT(constraint.output, TensorType(new_dims), op_eq)])
|
| 61 |
+
return transformed_constraint, counter
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
@register_transformation_rule(IndexSelect)
|
| 65 |
+
def transform_index_select(constraint, counter):
|
| 66 |
+
"""
|
| 67 |
+
The constraints consider the given tensor size, checks if the index is valid
|
| 68 |
+
and if so, generates a constraint for replacing the input dimension
|
| 69 |
+
with the required dimension
|
| 70 |
+
"""
|
| 71 |
+
dims, counter = gen_tensor_dims(constraint.tensor_size, counter)
|
| 72 |
+
is_valid_index = valid_index(constraint.index, dims)
|
| 73 |
+
nat_constraints = gen_nat_constraints(dims)
|
| 74 |
+
|
| 75 |
+
# if the index is valid then replace the input dimension with the new dimension
|
| 76 |
+
# otherwise the dimension will not be replaced and the clause will contain False
|
| 77 |
+
if is_valid_index == T():
|
| 78 |
+
new_dims = copy.deepcopy(dims)
|
| 79 |
+
new_dims[constraint.index] = constraint.dim_replace
|
| 80 |
+
|
| 81 |
+
transformed_constraint = Conj([BinConstraintT(constraint.input_var, TensorType(dims), op_eq),
|
| 82 |
+
*nat_constraints,
|
| 83 |
+
is_valid_index,
|
| 84 |
+
BinConstraintT(constraint.output, TensorType(new_dims), op_eq)])
|
| 85 |
+
|
| 86 |
+
# print(constraints)
|
| 87 |
+
return transformed_constraint, counter
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
@register_transformation_rule(GetItem)
|
| 91 |
+
def transform_get_item(constraint, counter):
|
| 92 |
+
"""
|
| 93 |
+
generate an equality of the form:
|
| 94 |
+
t = [a1, ..., an]
|
| 95 |
+
then generate constraints that check if the given index is valid
|
| 96 |
+
given this particular tensor size.
|
| 97 |
+
If the index is valid, generate a constraint to get the item
|
| 98 |
+
Note that we already handled the Dyn input case in the previous
|
| 99 |
+
step.
|
| 100 |
+
Args:
|
| 101 |
+
constraint: GetItem which assumes we are getting an item from a tensor (not Dyn)
|
| 102 |
+
counter: variable tracking
|
| 103 |
+
Returns: simplified constraints for GetItem
|
| 104 |
+
|
| 105 |
+
"""
|
| 106 |
+
dims, counter = gen_tensor_dims(constraint.tensor_size, counter)
|
| 107 |
+
nat_constraints = gen_nat_constraints(dims)
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
is_valid_index = valid_index(constraint.index, dims)
|
| 111 |
+
|
| 112 |
+
all_constraints = [BinConstraintT(constraint.input_var, TensorType(dims), op_eq),
|
| 113 |
+
*nat_constraints,
|
| 114 |
+
is_valid_index]
|
| 115 |
+
|
| 116 |
+
# if the index is valid, we generate a constraint for getting an item
|
| 117 |
+
# otherwise this clause will have been UNSAT due to the wrong index
|
| 118 |
+
if is_valid_index == T():
|
| 119 |
+
all_constraints.append(BinConstraintD(constraint.res, dims[constraint.index], op_eq))
|
| 120 |
+
|
| 121 |
+
return Conj(all_constraints), counter
|
| 122 |
+
|
| 123 |
+
def valid_index_tensor(index, dims):
|
| 124 |
+
"""
|
| 125 |
+
if the slice instances exceed the length of the dimensions
|
| 126 |
+
then this is a type error so we return False
|
| 127 |
+
"""
|
| 128 |
+
slice_count = 0
|
| 129 |
+
for s in index:
|
| 130 |
+
if isinstance(s, slice):
|
| 131 |
+
slice_count += 1
|
| 132 |
+
if slice_count > len(dims):
|
| 133 |
+
return F()
|
| 134 |
+
else:
|
| 135 |
+
return T()
|
| 136 |
+
|
| 137 |
+
@register_transformation_rule(GetItemTensor)
|
| 138 |
+
def transform_get_item_tensor(constraint, counter):
|
| 139 |
+
"""
|
| 140 |
+
When the index is a tuple, then the output will be a tensor
|
| 141 |
+
TODO: we have to check if this is the case for all HF models
|
| 142 |
+
|
| 143 |
+
The cases we are covering here are a tuple with one of:
|
| 144 |
+
- slice with default argument
|
| 145 |
+
- None
|
| 146 |
+
|
| 147 |
+
None appends 1 to the input tensor dimensions
|
| 148 |
+
so each occurrence of 'None' increases the rank by 1
|
| 149 |
+
|
| 150 |
+
slice with default arguments does not change the rank
|
| 151 |
+
"""
|
| 152 |
+
assert isinstance(constraint.index_tuple, tuple)
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
# generate a result tensor of the expected size
|
| 156 |
+
dims, counter = gen_tensor_dims(constraint.tensor_size, counter)
|
| 157 |
+
nat_constraints = gen_nat_constraints(dims)
|
| 158 |
+
|
| 159 |
+
# generate a place-holder list of the right rank
|
| 160 |
+
# where "slice" does not contribute to the rank and "None" does
|
| 161 |
+
none_c = constraint.index_tuple.count(None)
|
| 162 |
+
resulting_tensor_dims = (none_c + len(dims)) * [None]
|
| 163 |
+
|
| 164 |
+
dim_index = 0
|
| 165 |
+
for i in range(len(constraint.index_tuple)):
|
| 166 |
+
|
| 167 |
+
# append 1 to the right location of the resulting tensor
|
| 168 |
+
if constraint.index_tuple[i] is None:
|
| 169 |
+
resulting_tensor_dims[i] = 1
|
| 170 |
+
|
| 171 |
+
elif constraint.index_tuple[i] == slice(None, None, None):
|
| 172 |
+
pass
|
| 173 |
+
|
| 174 |
+
else:
|
| 175 |
+
raise NotImplementedError('Method not yet implemented')
|
| 176 |
+
|
| 177 |
+
# append the remaining dimensions to the right location
|
| 178 |
+
dim_index = 0
|
| 179 |
+
for i in range(len(resulting_tensor_dims)):
|
| 180 |
+
if resulting_tensor_dims[i] is None:
|
| 181 |
+
resulting_tensor_dims[i] = dims[dim_index]
|
| 182 |
+
dim_index += 1
|
| 183 |
+
|
| 184 |
+
# check if the index is valid
|
| 185 |
+
is_valid_index = valid_index_tensor(constraint.index_tuple, dims)
|
| 186 |
+
|
| 187 |
+
# check if the resulting tensor is within bounds
|
| 188 |
+
if len(resulting_tensor_dims) > 4:
|
| 189 |
+
return F(), counter
|
| 190 |
+
|
| 191 |
+
else:
|
| 192 |
+
constraints = [BinConstraintT(constraint.input_var, TensorType(dims), op_eq),
|
| 193 |
+
BinConstraintT(constraint.res, TensorType(resulting_tensor_dims), op_eq),
|
| 194 |
+
*nat_constraints,
|
| 195 |
+
is_valid_index]
|
| 196 |
+
return Conj(constraints), counter
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
@register_transformation_rule(BinConstraintT)
|
| 200 |
+
def generate_binconstraint_t(constraint, counter):
|
| 201 |
+
"""
|
| 202 |
+
Transform binary constraints for tensors
|
| 203 |
+
"""
|
| 204 |
+
|
| 205 |
+
# precision constraints
|
| 206 |
+
if constraint.op == op_precision:
|
| 207 |
+
if constraint.lhs == Dyn:
|
| 208 |
+
return T(), counter
|
| 209 |
+
elif isinstance(constraint.lhs, TensorType):
|
| 210 |
+
is_fully_static = all(d != Dyn for d in constraint.lhs.__args__)
|
| 211 |
+
if is_fully_static:
|
| 212 |
+
return BinConstraintT(constraint.lhs, constraint.rhs, op_eq), counter
|
| 213 |
+
else:
|
| 214 |
+
new_dims = []
|
| 215 |
+
|
| 216 |
+
for _ in range(len(constraint.lhs.__args__)):
|
| 217 |
+
dim, counter = gen_dvar(counter)
|
| 218 |
+
new_dims.append(dim)
|
| 219 |
+
|
| 220 |
+
new_dim_constraints = [BinConstraintD(old_dim, new_dim, op_precision) for
|
| 221 |
+
new_dim, old_dim in zip(new_dims, constraint.lhs.__args__)] + \
|
| 222 |
+
[BinConstraintT(constraint.rhs, TensorType(new_dims), op_eq)] + \
|
| 223 |
+
[BinConstraintD(1, new_dim, op_leq) for
|
| 224 |
+
new_dim in new_dims]
|
| 225 |
+
return Conj(new_dim_constraints), counter
|
| 226 |
+
|
| 227 |
+
# matching
|
| 228 |
+
elif constraint.op == op_matching:
|
| 229 |
+
assert isinstance(constraint.rhs, TensorType)
|
| 230 |
+
d1 = constraint.rhs.__args__[0]
|
| 231 |
+
d2 = constraint.rhs.__args__[1]
|
| 232 |
+
d3 = constraint.rhs.__args__[2]
|
| 233 |
+
d4 = constraint.rhs.__args__[3]
|
| 234 |
+
|
| 235 |
+
conj = [BinConstraintT(constraint.lhs, Dyn, op_eq),
|
| 236 |
+
BinConstraintD(d1, Dyn, op_eq),
|
| 237 |
+
BinConstraintD(d2, Dyn, op_eq),
|
| 238 |
+
BinConstraintD(d3, Dyn, op_eq),
|
| 239 |
+
BinConstraintD(d4, Dyn, op_eq)]
|
| 240 |
+
return Disj([Conj(conj),
|
| 241 |
+
BinConstraintT(constraint.lhs, TensorType([d1, d2, d3, d4]), op_eq)]), counter
|
| 242 |
+
|
| 243 |
+
elif constraint.op == op_consistency:
|
| 244 |
+
c_dyn = Disj([BinConstraintT(constraint.lhs, Dyn, op_eq), BinConstraintT(constraint.rhs, Dyn, op_eq)])
|
| 245 |
+
[c_tensor_1, c_tensor_2, c_tensor_3, c_tensor_4], counter = gen_consistency_constraints(constraint, counter)
|
| 246 |
+
|
| 247 |
+
return Disj([c_dyn, c_tensor_1, c_tensor_2, c_tensor_3, c_tensor_4]), counter
|
| 248 |
+
|
| 249 |
+
elif constraint.op == op_leq:
|
| 250 |
+
assert isinstance(constraint.rhs, int)
|
| 251 |
+
disj = [BinConstraintT(constraint.lhs, Dyn, op_eq)]
|
| 252 |
+
for i in range(1, constraint.rhs + 1):
|
| 253 |
+
dims = []
|
| 254 |
+
for j in range(1, i + 1):
|
| 255 |
+
dim_var, counter = gen_dvar(counter)
|
| 256 |
+
dims.append(dim_var)
|
| 257 |
+
disj.append(BinConstraintT(constraint.lhs, TensorType(dims), op_eq))
|
| 258 |
+
return Disj(disj), counter
|
| 259 |
+
else:
|
| 260 |
+
return constraint, counter
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
@register_transformation_rule(BinConstraintD)
|
| 264 |
+
def generate_binconstraint_d(constraint, counter):
|
| 265 |
+
"""
|
| 266 |
+
Transform binary constraints for dimensions
|
| 267 |
+
"""
|
| 268 |
+
if constraint.op == op_precision:
|
| 269 |
+
if isinstance(constraint.lhs, int):
|
| 270 |
+
return BinConstraintD(constraint.lhs, constraint.rhs, op_eq), counter
|
| 271 |
+
elif constraint.lhs == Dyn:
|
| 272 |
+
return T(), counter
|
| 273 |
+
|
| 274 |
+
elif constraint.op == op_consistency:
|
| 275 |
+
return Disj([BinConstraintD(constraint.lhs, constraint.rhs, op_eq),
|
| 276 |
+
BinConstraintD(constraint.rhs, Dyn, op_eq), BinConstraintD(constraint.lhs, Dyn, op_eq)]), counter
|
| 277 |
+
|
| 278 |
+
else:
|
| 279 |
+
return constraint, counter
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
@register_transformation_rule(Conj)
|
| 283 |
+
def generate_conj(constraint, counter):
|
| 284 |
+
"""
|
| 285 |
+
Transform conjunctions
|
| 286 |
+
"""
|
| 287 |
+
new = []
|
| 288 |
+
for c in constraint.conjucts:
|
| 289 |
+
new_c, counter = transform_constraint(c, counter)
|
| 290 |
+
new.append(new_c)
|
| 291 |
+
return Conj(new), counter
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
@register_transformation_rule(Disj)
|
| 295 |
+
def generate_disj(constraint, counter):
|
| 296 |
+
"""
|
| 297 |
+
Transform disjunctions
|
| 298 |
+
"""
|
| 299 |
+
new = []
|
| 300 |
+
for c in constraint.disjuncts:
|
| 301 |
+
new_c, counter = transform_constraint(c, counter)
|
| 302 |
+
new.append(new_c)
|
| 303 |
+
return Disj(new), counter
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
@register_transformation_rule(TGreatestUpperBound)
|
| 307 |
+
def generate_gub(constraint, counter):
|
| 308 |
+
"""
|
| 309 |
+
Transform greatest upper bound for tensors. Results in equality and Greatest Upper Bound
|
| 310 |
+
on dimensions
|
| 311 |
+
"""
|
| 312 |
+
c1 = Conj([Disj([BinConstraintT(constraint.rhs1, Dyn, op_eq),
|
| 313 |
+
BinConstraintT(constraint.rhs2, Dyn, op_eq)]), BinConstraintT(constraint.res, Dyn, op_eq)])
|
| 314 |
+
|
| 315 |
+
[c2, c3, c4, c5], counter = gen_greatest_upper_bound(constraint, counter)
|
| 316 |
+
|
| 317 |
+
return Disj([c1, c2, c3, c4, c5]), counter
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
@register_transformation_rule(DGreatestUpperBound)
|
| 321 |
+
def generate_d_gub(constraint, counter):
|
| 322 |
+
"""
|
| 323 |
+
Transform greatest upper bound for dimensions into equality constraints
|
| 324 |
+
"""
|
| 325 |
+
c1 = Conj([BinConstraintD(constraint.rhs1, Dyn, op_eq), BinConstraintD(constraint.res, constraint.rhs2, op_eq)])
|
| 326 |
+
c2 = Conj([BinConstraintD(constraint.rhs2, Dyn, op_eq), BinConstraintD(constraint.res, constraint.rhs1, op_eq)])
|
| 327 |
+
c3 = Conj([BinConstraintD(constraint.rhs2, constraint.rhs1, op_eq), BinConstraintD(constraint.res, constraint.rhs1, op_eq)])
|
| 328 |
+
return Disj([c1, c2, c3]), counter
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
@register_transformation_rule(CalcConv)
|
| 332 |
+
def generate_calc_conv(constraint, counter):
|
| 333 |
+
d, counter = gen_tensor_dims(4, counter)
|
| 334 |
+
conv_result = TensorType([d[0], d[1], d[2], d[3]])
|
| 335 |
+
|
| 336 |
+
# the convolution result is a tensor of size 4
|
| 337 |
+
c1 = BinConstraintT(constraint.conv_result, conv_result, op_eq)
|
| 338 |
+
|
| 339 |
+
# the second dimension of the output is equal to the output channels
|
| 340 |
+
c2 = Conj([BinConstraintD(d[1], constraint.c_out, op_eq), BinConstraintD(d[1], Dyn, op_neq)])
|
| 341 |
+
|
| 342 |
+
# the input corresponds to the output in the first dimension of the convolution
|
| 343 |
+
c3 = BinConstraintD(constraint.matching_constraint[0], d[0], op_eq)
|
| 344 |
+
|
| 345 |
+
c4, c5 = calc_last_two_dims(constraint, d)
|
| 346 |
+
|
| 347 |
+
leq_constraints = Conj([BinConstraintD(0, d[0], op_leq),
|
| 348 |
+
BinConstraintD(0, d[1], op_leq),
|
| 349 |
+
BinConstraintD(0, d[2], op_leq),
|
| 350 |
+
BinConstraintD(0, d[3], op_leq)])
|
| 351 |
+
|
| 352 |
+
return Conj([c1, c2, c3, c4, c5, leq_constraints]), counter
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
@register_transformation_rule(CalcMaxPool)
|
| 356 |
+
def generate_calc_maxpool(constraint, counter):
|
| 357 |
+
"""
|
| 358 |
+
Transform maxpool constraints
|
| 359 |
+
"""
|
| 360 |
+
d, counter = gen_tensor_dims(4, counter)
|
| 361 |
+
maxpool_result = TensorType([d[0], d[1], d[2], d[3]])
|
| 362 |
+
|
| 363 |
+
# the maxpool result is a tensor of size 4
|
| 364 |
+
c1 = BinConstraintT(constraint.maxpool_result, maxpool_result, op_eq)
|
| 365 |
+
|
| 366 |
+
# the input corresponds to the output in the first and second dimension of maxpool
|
| 367 |
+
c2 = BinConstraintD(constraint.matching_constraint[1], d[1], op_eq)
|
| 368 |
+
c3 = BinConstraintD(constraint.matching_constraint[0], d[0], op_eq)
|
| 369 |
+
c4, c5 = calc_last_two_dims(constraint, d)
|
| 370 |
+
|
| 371 |
+
leq_constraints = Conj([BinConstraintD(0, d[0], op_leq),
|
| 372 |
+
BinConstraintD(0, d[1], op_leq),
|
| 373 |
+
BinConstraintD(0, d[2], op_leq),
|
| 374 |
+
BinConstraintD(0, d[3], op_leq)])
|
| 375 |
+
|
| 376 |
+
return Conj([c1, c2, c3, c4, c5, leq_constraints]), counter
|
| 377 |
+
|
| 378 |
+
|
| 379 |
+
@register_transformation_rule(CalcProduct)
|
| 380 |
+
def generate_calc_product(constraint, counter):
|
| 381 |
+
"""
|
| 382 |
+
Transform flatten constraints
|
| 383 |
+
"""
|
| 384 |
+
start = constraint.start
|
| 385 |
+
end = constraint.end
|
| 386 |
+
dims = constraint.dims_to_flatten
|
| 387 |
+
flattened = constraint.flattened
|
| 388 |
+
n = len(constraint.dims_to_flatten)
|
| 389 |
+
|
| 390 |
+
# this will be evaluated right here
|
| 391 |
+
boundary_check = (0 <= start and start < end and end <= n)
|
| 392 |
+
|
| 393 |
+
c_boundary = T() if boundary_check else F()
|
| 394 |
+
|
| 395 |
+
lhs = dims[0:start]
|
| 396 |
+
rhs = dims[end:]
|
| 397 |
+
mid = dims[start:end]
|
| 398 |
+
|
| 399 |
+
all_possibilities = generate_all_int_dyn_dim_possibilities(mid)
|
| 400 |
+
|
| 401 |
+
all_constraints = []
|
| 402 |
+
|
| 403 |
+
for p in all_possibilities:
|
| 404 |
+
p = list(p)
|
| 405 |
+
# this tells us there is a dynamic variable
|
| 406 |
+
contains_dyn = not all(constraint.op == op_neq for constraint in p)
|
| 407 |
+
if contains_dyn:
|
| 408 |
+
mid_var = [Dyn]
|
| 409 |
+
total_constraints = lhs + mid_var + rhs
|
| 410 |
+
if len(total_constraints) > 4:
|
| 411 |
+
all_constraints.append(F())
|
| 412 |
+
else:
|
| 413 |
+
all_constraints.append(Conj([BinConstraintT(flattened, TensorType(lhs + mid_var + rhs), op_eq)] + p))
|
| 414 |
+
else:
|
| 415 |
+
new_var, counter = gen_dvar(counter)
|
| 416 |
+
mid_eq_prod = Conj([BinConstraintD(new_var, Prod(mid), op_eq), BinConstraintD(new_var, Dyn, op_neq)])
|
| 417 |
+
mid_var = [new_var]
|
| 418 |
+
total_constraints = lhs + mid_var + rhs
|
| 419 |
+
if len(total_constraints) > 4:
|
| 420 |
+
all_constraints.append(F())
|
| 421 |
+
else:
|
| 422 |
+
all_constraints.append(Conj([BinConstraintT(flattened, TensorType(lhs + mid_var + rhs), op_eq), mid_eq_prod] + p))
|
| 423 |
+
|
| 424 |
+
return Conj([Disj(all_constraints), c_boundary]), counter
|
| 425 |
+
|
| 426 |
+
|
| 427 |
+
@register_transformation_rule(CanReshape)
|
| 428 |
+
def generate_reshape(constraint, counter):
|
| 429 |
+
"""
|
| 430 |
+
Transform reshape constraints
|
| 431 |
+
"""
|
| 432 |
+
d, counter = gen_tensor_dims(4, counter)
|
| 433 |
+
|
| 434 |
+
d1 = d[0]
|
| 435 |
+
d2 = d[1]
|
| 436 |
+
d3 = d[2]
|
| 437 |
+
d4 = d[3]
|
| 438 |
+
|
| 439 |
+
target = constraint.target.__args__
|
| 440 |
+
|
| 441 |
+
is_fully_static = all(d != Dyn for d in target)
|
| 442 |
+
|
| 443 |
+
# dynamic tensor
|
| 444 |
+
c1_dyn = BinConstraintT(constraint.src, Dyn, op_eq)
|
| 445 |
+
c2_tensor1 = BinConstraintT(constraint.src, TensorType([d1]), op_eq)
|
| 446 |
+
c2_tensor2 = BinConstraintT(constraint.src, TensorType([d1, d2]), op_eq)
|
| 447 |
+
c2_tensor3 = BinConstraintT(constraint.src, TensorType([d1, d2, d3]), op_eq)
|
| 448 |
+
c2_tensor4 = BinConstraintT(constraint.src, TensorType([d1, d2, d3, d4]), op_eq)
|
| 449 |
+
|
| 450 |
+
d1_eq_dyn = BinConstraintD(d1, Dyn, op_eq)
|
| 451 |
+
d1_neq_dyn = BinConstraintD(d1, Dyn, op_neq)
|
| 452 |
+
|
| 453 |
+
d2_eq_dyn = BinConstraintD(d2, Dyn, op_eq)
|
| 454 |
+
d2_neq_dyn = BinConstraintD(d2, Dyn, op_neq)
|
| 455 |
+
|
| 456 |
+
d3_eq_dyn = BinConstraintD(d3, Dyn, op_eq)
|
| 457 |
+
d3_neq_dyn = BinConstraintD(d3, Dyn, op_neq)
|
| 458 |
+
|
| 459 |
+
d4_eq_dyn = BinConstraintD(d3, Dyn, op_eq)
|
| 460 |
+
d4_neq_dyn = BinConstraintD(d3, Dyn, op_neq)
|
| 461 |
+
|
| 462 |
+
nat_d1 = BinConstraintD(0, d1, op_leq)
|
| 463 |
+
nat_d2 = BinConstraintD(0, d2, op_leq)
|
| 464 |
+
nat_d3 = BinConstraintD(0, d3, op_leq)
|
| 465 |
+
nat_d4 = BinConstraintD(0, d4, op_leq)
|
| 466 |
+
|
| 467 |
+
if is_fully_static:
|
| 468 |
+
# size 1 tensor
|
| 469 |
+
c3_tensor1 = Disj([d1_eq_dyn,
|
| 470 |
+
(Conj([d1_neq_dyn,
|
| 471 |
+
BinConstraintD(d1, Prod(target), op_eq)]))])
|
| 472 |
+
all_tensor_1 = Conj([c2_tensor1, c3_tensor1])
|
| 473 |
+
|
| 474 |
+
# size 2 tensor
|
| 475 |
+
all_tensor_2 = Conj([c2_tensor2, gen_all_reshape_possibilities([d1, d2], target)])
|
| 476 |
+
|
| 477 |
+
# size 3 tensor
|
| 478 |
+
all_tensor_3 = Conj([c2_tensor3, gen_all_reshape_possibilities([d1, d2, d3], target)])
|
| 479 |
+
|
| 480 |
+
# size 4 tensor
|
| 481 |
+
all_tensor_4 = Conj([c2_tensor4, gen_all_reshape_possibilities([d1, d2, d3, d4], target)])
|
| 482 |
+
|
| 483 |
+
return Conj([Disj([c1_dyn, all_tensor_1, all_tensor_2, all_tensor_3, all_tensor_4]),
|
| 484 |
+
nat_d1, nat_d2, nat_d3, nat_d4]), counter
|
| 485 |
+
|
| 486 |
+
# then there must be exactly one occurrence of dyn
|
| 487 |
+
else:
|
| 488 |
+
new_target = []
|
| 489 |
+
|
| 490 |
+
for n in target:
|
| 491 |
+
if n != Dyn:
|
| 492 |
+
new_target.append(n)
|
| 493 |
+
|
| 494 |
+
# tensor 1
|
| 495 |
+
c3_tensor1 = Disj([d1_eq_dyn,
|
| 496 |
+
(Conj([d1_neq_dyn,
|
| 497 |
+
is_dim_div_by_target(new_target, d1)]))])
|
| 498 |
+
all_tensor_1 = Conj([c2_tensor1, c3_tensor1])
|
| 499 |
+
|
| 500 |
+
# tensor 2
|
| 501 |
+
c21 = Disj([d1_eq_dyn, d2_eq_dyn])
|
| 502 |
+
c22 = Conj([d1_neq_dyn, d2_neq_dyn, is_dim_div_by_target(new_target, Prod([d1, d2]))])
|
| 503 |
+
all_tensor_2 = Conj([c2_tensor2, Disj([c21, c22])])
|
| 504 |
+
|
| 505 |
+
# tensor 3
|
| 506 |
+
c31 = Disj([d1_eq_dyn, d2_eq_dyn, d3_eq_dyn])
|
| 507 |
+
c32 = Conj([d1_neq_dyn, d2_neq_dyn, d3_neq_dyn, is_dim_div_by_target(new_target, Prod([d1, d2, d3]))])
|
| 508 |
+
all_tensor_3 = Conj([c2_tensor3, Disj([c31, c32])])
|
| 509 |
+
|
| 510 |
+
# tensor 4
|
| 511 |
+
c41 = Disj([d1_eq_dyn, d2_eq_dyn, d3_eq_dyn, d4_eq_dyn])
|
| 512 |
+
c42 = Conj([d1_neq_dyn, d2_neq_dyn, d3_neq_dyn, d4_neq_dyn, is_dim_div_by_target(new_target, Prod([d1, d2, d3, d4]))])
|
| 513 |
+
all_tensor_4 = Conj([c2_tensor4, Disj([c41, c42])])
|
| 514 |
+
|
| 515 |
+
return Conj([Disj([c1_dyn, all_tensor_1, all_tensor_2, all_tensor_3, all_tensor_4]),
|
| 516 |
+
nat_d1, nat_d2, nat_d3, nat_d4]), counter
|
| 517 |
+
|
| 518 |
+
|
| 519 |
+
@register_transformation_rule(ApplyBroadcasting)
|
| 520 |
+
def generate_broadcasting(constraint, counter):
|
| 521 |
+
"""
|
| 522 |
+
Transform broadcasting constraints
|
| 523 |
+
"""
|
| 524 |
+
e11, e12 = constraint.res1, constraint.res2
|
| 525 |
+
e1, e2 = constraint.input1, constraint.input2
|
| 526 |
+
|
| 527 |
+
e1_dyn = BinConstraintT(e1, Dyn, op_eq)
|
| 528 |
+
e2_dyn = BinConstraintT(e2, Dyn, op_eq)
|
| 529 |
+
|
| 530 |
+
# Introduce dimensions
|
| 531 |
+
e1_equal_e11 = BinConstraintT(e1, e11, op_eq)
|
| 532 |
+
e2_equal_e12 = BinConstraintT(e2, e12, op_eq)
|
| 533 |
+
|
| 534 |
+
# dyn possibility
|
| 535 |
+
e1_dyn_constraint = Conj([e1_dyn, e1_equal_e11, e2_equal_e12])
|
| 536 |
+
e2_dyn_constraint = Conj([e2_dyn, e1_equal_e11, e2_equal_e12])
|
| 537 |
+
|
| 538 |
+
# tensor possibility
|
| 539 |
+
# generate dimensions to create tensors of size 1
|
| 540 |
+
final_tensor_1_constraint, _, _, nat_dims_1, counter = \
|
| 541 |
+
gen_broadcasting_constraints(e1, e2, e11, e12, 1, counter)
|
| 542 |
+
|
| 543 |
+
# generate dimensions to create tensors of size 2
|
| 544 |
+
final_tensor_2_constraint_no_padding, final_tensor_2_constraint_padding_arg1, \
|
| 545 |
+
final_tensor_2_constraint_padding_arg2, nat_dims_2, counter = \
|
| 546 |
+
gen_broadcasting_constraints(e1, e2, e11, e12, 2, counter)
|
| 547 |
+
|
| 548 |
+
# generate dimensions to create tensors of size 3
|
| 549 |
+
final_tensor_3_constraint_no_padding, final_tensor_3_constraint_padding_arg1, \
|
| 550 |
+
final_tensor_3_constraint_padding_arg2, nat_dims_3, counter = \
|
| 551 |
+
gen_broadcasting_constraints(e1, e2, e11, e12, 3, counter)
|
| 552 |
+
|
| 553 |
+
# generate dimensions to create tensors of size 4
|
| 554 |
+
final_tensor_4_constraint_no_padding, final_tensor_4_constraint_padding_arg1, \
|
| 555 |
+
final_tensor_4_constraint_padding_arg2, nat_dims_4, counter = \
|
| 556 |
+
gen_broadcasting_constraints(e1, e2, e11, e12, 4, counter)
|
| 557 |
+
|
| 558 |
+
final_result = Disj([
|
| 559 |
+
e1_dyn_constraint,
|
| 560 |
+
e2_dyn_constraint,
|
| 561 |
+
final_tensor_1_constraint,
|
| 562 |
+
final_tensor_2_constraint_no_padding,
|
| 563 |
+
final_tensor_2_constraint_padding_arg1,
|
| 564 |
+
final_tensor_2_constraint_padding_arg2,
|
| 565 |
+
final_tensor_3_constraint_no_padding,
|
| 566 |
+
final_tensor_3_constraint_padding_arg1,
|
| 567 |
+
final_tensor_3_constraint_padding_arg2,
|
| 568 |
+
final_tensor_4_constraint_no_padding,
|
| 569 |
+
final_tensor_4_constraint_padding_arg1,
|
| 570 |
+
final_tensor_4_constraint_padding_arg2
|
| 571 |
+
])
|
| 572 |
+
|
| 573 |
+
return Conj([final_result, *nat_dims_1, *nat_dims_2, *nat_dims_3, *nat_dims_4]), counter
|
| 574 |
+
|
| 575 |
+
|
| 576 |
+
def transform_constraint(constraint: Constraint, counter: int):
|
| 577 |
+
"""
|
| 578 |
+
Transforms a constraint into a simpler constraint.
|
| 579 |
+
Ex: precision and consistency are transformed to equality
|
| 580 |
+
Args:
|
| 581 |
+
constraint: constraint to be transformed
|
| 582 |
+
counter: for variable tracking
|
| 583 |
+
|
| 584 |
+
Returns: Constraint
|
| 585 |
+
|
| 586 |
+
"""
|
| 587 |
+
if type(constraint) in _TRANSFORMATION_RULES:
|
| 588 |
+
return _TRANSFORMATION_RULES[type(constraint)](constraint, counter)
|
| 589 |
+
|
| 590 |
+
else:
|
| 591 |
+
return constraint, counter
|
| 592 |
+
|
| 593 |
+
|
| 594 |
+
|
| 595 |
+
|
| 596 |
+
def calc_last_two_dims(constraint, d: List[DVar]):
|
| 597 |
+
"""
|
| 598 |
+
Generates constraints for the last two dimensions of a convolution or a maxpool output
|
| 599 |
+
Args:
|
| 600 |
+
constraint: CalcConv or CalcMaxPool
|
| 601 |
+
d: The list of output dimensions
|
| 602 |
+
|
| 603 |
+
Returns: Constraints for calculating the last two dimensions of the output
|
| 604 |
+
|
| 605 |
+
"""
|
| 606 |
+
|
| 607 |
+
assert isinstance(constraint, (CalcConv, CalcMaxPool))
|
| 608 |
+
|
| 609 |
+
b3 = constraint.matching_constraint[2]
|
| 610 |
+
b4 = constraint.matching_constraint[3]
|
| 611 |
+
|
| 612 |
+
b3_dyn = Conj([BinConstraintD(d[2], Dyn, op_eq), BinConstraintD(b3, Dyn, op_eq)])
|
| 613 |
+
b4_dyn = Conj([BinConstraintD(d[3], Dyn, op_eq), BinConstraintD(b4, Dyn, op_eq)])
|
| 614 |
+
|
| 615 |
+
d3_not_dyn = Conj([BinConstraintD(d[2], Dyn, op_neq), BinConstraintD(b3, Dyn, op_neq)])
|
| 616 |
+
d4_not_dyn = Conj([BinConstraintD(d[3], Dyn, op_neq), BinConstraintD(b4, Dyn, op_neq)])
|
| 617 |
+
|
| 618 |
+
# transform parameters into tuples incase they are not already
|
| 619 |
+
padding = (constraint.padding, constraint.padding) \
|
| 620 |
+
if isinstance(constraint.padding, int) else constraint.padding
|
| 621 |
+
kernel = (constraint.kernel, constraint.kernel) \
|
| 622 |
+
if isinstance(constraint.kernel, int) else constraint.kernel
|
| 623 |
+
stride = (constraint.stride, constraint.stride) \
|
| 624 |
+
if isinstance(constraint.stride, int) else constraint.stride
|
| 625 |
+
dilation = (constraint.dilation, constraint.dilation) \
|
| 626 |
+
if isinstance(constraint.dilation, int) else constraint.dilation
|
| 627 |
+
|
| 628 |
+
f1 = BinConstraintD(b3, BinConstraintD(2, padding[0], op_mul), op_add)
|
| 629 |
+
f2 = BinConstraintD(dilation[0], BinConstraintD(kernel[0], 1, op_sub), op_mul)
|
| 630 |
+
f3 = BinConstraintD(BinConstraintD(BinConstraintD(f1, f2, op_sub), 1, op_sub), stride[0], op_div)
|
| 631 |
+
f4 = BinConstraintD(f3, 1, op_add)
|
| 632 |
+
|
| 633 |
+
c4 = Disj([b3_dyn, Conj([d3_not_dyn, BinConstraintD(d[2], f4, op_eq)])])
|
| 634 |
+
|
| 635 |
+
f11 = BinConstraintD(b4, BinConstraintD(2, padding[1], op_mul), op_add)
|
| 636 |
+
f22 = BinConstraintD(dilation[1], BinConstraintD(kernel[1], 1, op_sub), op_mul)
|
| 637 |
+
f33 = BinConstraintD(BinConstraintD(BinConstraintD(f11, f22, op_sub), 1, op_sub), stride[1], op_div)
|
| 638 |
+
f44 = BinConstraintD(f33, 1, op_add)
|
| 639 |
+
|
| 640 |
+
c5 = Disj([b4_dyn, Conj([d4_not_dyn, BinConstraintD(d[3], f44, op_eq)])])
|
| 641 |
+
|
| 642 |
+
return c4, c5
|
| 643 |
+
|
| 644 |
+
|
| 645 |
+
def generate_all_int_dyn_dim_possibilities(my_list: List[DVar]):
|
| 646 |
+
"""
|
| 647 |
+
Generate all possibilities of being equal or not equal to dyn for my_list
|
| 648 |
+
Args:
|
| 649 |
+
my_list: List of tensor dimensions
|
| 650 |
+
|
| 651 |
+
Returns: A list of a list of constraints. Each list of constraints corresponds to
|
| 652 |
+
one possibility about the values of the dimension variables
|
| 653 |
+
"""
|
| 654 |
+
# generate all possibilities of being equal or not equal to dyn for my_list
|
| 655 |
+
eq_possibilities = [BinConstraintD(my_list[i], Dyn, op_eq) for i in range(len(my_list))]
|
| 656 |
+
neq_possibilities = [BinConstraintD(my_list[i], Dyn, op_neq) for i in range(len(my_list))]
|
| 657 |
+
d_possibilities = []
|
| 658 |
+
|
| 659 |
+
for i in zip(eq_possibilities, neq_possibilities):
|
| 660 |
+
d_possibilities.append(list(i))
|
| 661 |
+
all_possibilities = list(itertools.product(*d_possibilities))
|
| 662 |
+
return all_possibilities
|
| 663 |
+
|
| 664 |
+
|
| 665 |
+
def is_target_div_by_dim(target: List[int], dim: List[DVar]):
|
| 666 |
+
"""
|
| 667 |
+
Generate constraints to check if the target dimensions are divisible by the input dimensions
|
| 668 |
+
Args:
|
| 669 |
+
target: Target dimensions
|
| 670 |
+
dim: Input dimensions
|
| 671 |
+
|
| 672 |
+
Returns: Constraints to check divisibility
|
| 673 |
+
|
| 674 |
+
"""
|
| 675 |
+
return BinConstraintD(BinConstraintD(Prod(target), dim, op_mod), 0, op_eq)
|
| 676 |
+
|
| 677 |
+
|
| 678 |
+
def is_dim_div_by_target(target: List[int], dim: List[DVar]):
|
| 679 |
+
"""
|
| 680 |
+
Generate constraints to check if the input dimensions is divisible by the target dimensions
|
| 681 |
+
Args:
|
| 682 |
+
target: Target dimensions
|
| 683 |
+
dim: Input dimensions
|
| 684 |
+
|
| 685 |
+
Returns: Constraints to check divisibility
|
| 686 |
+
|
| 687 |
+
"""
|
| 688 |
+
return BinConstraintD(BinConstraintD(dim, Prod(target), op_mod), 0, op_eq)
|
| 689 |
+
|
| 690 |
+
|
| 691 |
+
def gen_all_reshape_possibilities(list_of_dims, target):
|
| 692 |
+
"""
|
| 693 |
+
Consider all possibilities what the input dimensions could be (number or dynamic)
|
| 694 |
+
Then generate the appropriate constraints using multiplication or mod depending on the possibility
|
| 695 |
+
The possibilities we consider here are the cross product of being equal to dyn or not equal to dyn
|
| 696 |
+
for the input. Target is fixed because at most one dimension could be dyn.
|
| 697 |
+
We have different cases for this.
|
| 698 |
+
|
| 699 |
+
Args:
|
| 700 |
+
list_of_dims: The input list of dimensions
|
| 701 |
+
target: The tensor we want to reshape to
|
| 702 |
+
|
| 703 |
+
Returns: A disjunction of transformed reshape constraints
|
| 704 |
+
|
| 705 |
+
"""
|
| 706 |
+
all_possibilities = generate_all_int_dyn_dim_possibilities(list_of_dims)
|
| 707 |
+
|
| 708 |
+
all_constraints = []
|
| 709 |
+
|
| 710 |
+
for p in all_possibilities:
|
| 711 |
+
to_multiply = []
|
| 712 |
+
|
| 713 |
+
p = list(p)
|
| 714 |
+
|
| 715 |
+
for constraint in p:
|
| 716 |
+
assert isinstance(constraint, BinConstraintD)
|
| 717 |
+
if constraint.op == op_neq:
|
| 718 |
+
to_multiply.append(constraint.lhs)
|
| 719 |
+
|
| 720 |
+
if not to_multiply:
|
| 721 |
+
all_constraints.append(Conj(p))
|
| 722 |
+
|
| 723 |
+
elif len(to_multiply) < len(list_of_dims):
|
| 724 |
+
all_constraints.append(Conj(p + [is_target_div_by_dim(target, Prod(to_multiply))]))
|
| 725 |
+
else:
|
| 726 |
+
all_constraints.append(Conj(p + [BinConstraintD(Prod(list_of_dims),
|
| 727 |
+
Prod(target), op_eq)]))
|
| 728 |
+
|
| 729 |
+
return Disj(all_constraints)
|
| 730 |
+
|
| 731 |
+
|
| 732 |
+
def broadcast_dim(tensor_input1, tensor_input2, res1, res2, index, padding=False):
|
| 733 |
+
"""
|
| 734 |
+
Apply broadcasting to the 'index' dimension of tensor_input1.
|
| 735 |
+
Args:
|
| 736 |
+
tensor_input1: should represent [d1, ..., d_index, ...] where d_index = 1
|
| 737 |
+
tensor_input2: represents the second input
|
| 738 |
+
res1: broadcasted result 1
|
| 739 |
+
res2: broadcasted result 2
|
| 740 |
+
index: the index to broadcast
|
| 741 |
+
padding: If padding was used, then tensor_input1[index] does not exist
|
| 742 |
+
|
| 743 |
+
Returns:
|
| 744 |
+
|
| 745 |
+
"""
|
| 746 |
+
if tensor_input1[index] is None:
|
| 747 |
+
assert padding
|
| 748 |
+
|
| 749 |
+
|
| 750 |
+
if not padding:
|
| 751 |
+
# then the inputs are the same length so they all have dimensions at "index"
|
| 752 |
+
return Conj([BinConstraintD(tensor_input1[index], 1, op_eq),
|
| 753 |
+
BinConstraintD(res1[index], res2[index], op_eq),
|
| 754 |
+
BinConstraintD(res2[index], tensor_input2[index], op_eq)])
|
| 755 |
+
|
| 756 |
+
else:
|
| 757 |
+
# we don't set the input dimension to 1, since it doesn't exist.
|
| 758 |
+
return Conj([BinConstraintD(res1[index], res2[index], op_eq),
|
| 759 |
+
BinConstraintD(res2[index], tensor_input2[index], op_eq)])
|
| 760 |
+
|
| 761 |
+
|
| 762 |
+
def apply_padding(e1_var: TVar,
|
| 763 |
+
e11: BinConstraintT,
|
| 764 |
+
e2: BinConstraintT,
|
| 765 |
+
e12: BinConstraintT,
|
| 766 |
+
d2: List[DVar],
|
| 767 |
+
d11: List[DVar],
|
| 768 |
+
d12: List[DVar],
|
| 769 |
+
counter: int):
|
| 770 |
+
"""
|
| 771 |
+
We are considering the possibility where one input has less dimensions than
|
| 772 |
+
another input, so we apply padding to the broadcasted results
|
| 773 |
+
|
| 774 |
+
Args:
|
| 775 |
+
e1_var: Variable representing the first input where padding will be
|
| 776 |
+
e11: constraint of the form e11 = Tensortype[d1, ..., dn]
|
| 777 |
+
e2: constraint of the form e2 = Tensortype[d1, ..., dn]
|
| 778 |
+
e12: constraint of the form e11 = Tensortype[d1, ..., dn]
|
| 779 |
+
d2: Tensor variables for the second input
|
| 780 |
+
d11: Tensor variables for the broadcasted first input
|
| 781 |
+
d12: Tensor variables for the broadcasted second input
|
| 782 |
+
counter: variable tracking
|
| 783 |
+
|
| 784 |
+
Returns: A new constraint whose goal is to apply padding to the broadcasted result
|
| 785 |
+
|
| 786 |
+
"""
|
| 787 |
+
|
| 788 |
+
res = []
|
| 789 |
+
|
| 790 |
+
# pad the shorter input with None so we can pass it to the broadcasting helper function
|
| 791 |
+
for i in range(1, len(d2)):
|
| 792 |
+
|
| 793 |
+
d1, counter = gen_tensor_dims(i, counter)
|
| 794 |
+
|
| 795 |
+
nat_constraints = gen_nat_constraints(d1 + d2 + d11 + d12)
|
| 796 |
+
|
| 797 |
+
e1 = BinConstraintT(e1_var, TensorType(d1), op_eq)
|
| 798 |
+
|
| 799 |
+
simulate_padding = [None] * (len(d2) - i)
|
| 800 |
+
|
| 801 |
+
assert len(simulate_padding + d1) == len(d2)
|
| 802 |
+
|
| 803 |
+
broadcast_padding = []
|
| 804 |
+
|
| 805 |
+
# for every padding size, we also consider broadcasting
|
| 806 |
+
for j in range(len(d2) - i):
|
| 807 |
+
broadcast_padding.append(broadcast_dim(simulate_padding, d2, d11, d12, j, True))
|
| 808 |
+
|
| 809 |
+
# we consider the possibilities for broadcasting for every dimension. Since we already
|
| 810 |
+
# padded d1, we do not consider it while broadcasting
|
| 811 |
+
all_broadcasting_possibilities = generate_all_broadcasting_possibilities_no_padding(d1,
|
| 812 |
+
d2[(len(d2) - i):],
|
| 813 |
+
d11[(len(d2) - i):],
|
| 814 |
+
d12[(len(d2) - i):])
|
| 815 |
+
# combine all constraints into a conjunction
|
| 816 |
+
c = Conj([e1, e11, e2, e12,
|
| 817 |
+
*broadcast_padding,
|
| 818 |
+
all_broadcasting_possibilities,
|
| 819 |
+
*nat_constraints
|
| 820 |
+
])
|
| 821 |
+
res.append(c)
|
| 822 |
+
|
| 823 |
+
return Disj(res), counter
|
| 824 |
+
|
| 825 |
+
|
| 826 |
+
def no_broadcast_dim_with_index(d1: List[DVar],
|
| 827 |
+
d2: List[DVar],
|
| 828 |
+
d3: List[DVar],
|
| 829 |
+
d4: List[DVar],
|
| 830 |
+
i: int):
|
| 831 |
+
"""
|
| 832 |
+
Args:
|
| 833 |
+
d1: input 1
|
| 834 |
+
d2: input 2
|
| 835 |
+
d3: simulated broadcasting for input 1
|
| 836 |
+
d4: simulated broadcasting for input 2
|
| 837 |
+
i: the rank of the resulting tensor addition
|
| 838 |
+
|
| 839 |
+
Returns: Constraints for when no broadcasting occurs
|
| 840 |
+
"""
|
| 841 |
+
return Conj([
|
| 842 |
+
Disj([
|
| 843 |
+
Conj([BinConstraintD(d1[i], 1, op_eq),
|
| 844 |
+
BinConstraintD(d2[i], 1, op_eq)]),
|
| 845 |
+
|
| 846 |
+
Conj([BinConstraintD(d1[i], 1, op_neq),
|
| 847 |
+
BinConstraintD(d2[i], 1, op_neq)])]),
|
| 848 |
+
|
| 849 |
+
BinConstraintD(d1[i], d3[i], op_eq),
|
| 850 |
+
BinConstraintD(d2[i], d4[i], op_eq)])
|
| 851 |
+
|
| 852 |
+
|
| 853 |
+
|
| 854 |
+
def gen_lists_of_dims(num_tensors: int, dim_size: int, counter: int):
|
| 855 |
+
"""
|
| 856 |
+
Generate lists of DVar to represent tensor dimensions
|
| 857 |
+
Args:
|
| 858 |
+
num_tensors: the required number of tensors
|
| 859 |
+
dim_size: the number of dimensions for each tensor
|
| 860 |
+
counter: variable tracking
|
| 861 |
+
|
| 862 |
+
Returns: A list of a list of tensor dimensions
|
| 863 |
+
|
| 864 |
+
"""
|
| 865 |
+
res = []
|
| 866 |
+
|
| 867 |
+
for _ in range(num_tensors):
|
| 868 |
+
dims, counter = gen_tensor_dims(dim_size, counter)
|
| 869 |
+
res.append(dims)
|
| 870 |
+
|
| 871 |
+
return res, counter
|
| 872 |
+
|
| 873 |
+
|
| 874 |
+
def create_equality_constraints_for_broadcasting(e1: TVar,
|
| 875 |
+
e2: TVar,
|
| 876 |
+
e11: TVar,
|
| 877 |
+
e12: TVar,
|
| 878 |
+
d1: List[DVar],
|
| 879 |
+
d2: List[DVar],
|
| 880 |
+
d11: List[DVar],
|
| 881 |
+
d12: List[DVar]):
|
| 882 |
+
"""
|
| 883 |
+
Create equality constraints for when no broadcasting occurs
|
| 884 |
+
Args:
|
| 885 |
+
e1: Input 1
|
| 886 |
+
e2: Input 2
|
| 887 |
+
e11: Broadcasted input 1
|
| 888 |
+
e12: Broadcasted input 2
|
| 889 |
+
d1: Variables that store dimensions for e1
|
| 890 |
+
d2: Variables that store dimensions for e2
|
| 891 |
+
d11: Variables that store dimensions for e11
|
| 892 |
+
d12: Variables that store dimensions for e22
|
| 893 |
+
|
| 894 |
+
Returns: Four equality constraints
|
| 895 |
+
|
| 896 |
+
"""
|
| 897 |
+
|
| 898 |
+
e1_tensor = BinConstraintT(e1, TensorType(d1), op_eq)
|
| 899 |
+
e11_tensor = BinConstraintT(e11, TensorType(d11), op_eq)
|
| 900 |
+
e2_tensor = BinConstraintT(e2, TensorType(d2), op_eq)
|
| 901 |
+
e12_tensor = BinConstraintT(e12, TensorType(d12), op_eq)
|
| 902 |
+
return [e1_tensor, e11_tensor, e2_tensor, e12_tensor]
|
| 903 |
+
|
| 904 |
+
|
| 905 |
+
def gen_consistency_constraints(constraint: Constraint, counter: int):
|
| 906 |
+
"""
|
| 907 |
+
Args:
|
| 908 |
+
constraint: Consistency constraint on tensors
|
| 909 |
+
counter: for variable tracking
|
| 910 |
+
|
| 911 |
+
Returns: Equality and consistency constraints on dimensions
|
| 912 |
+
|
| 913 |
+
"""
|
| 914 |
+
|
| 915 |
+
all_constraints = []
|
| 916 |
+
|
| 917 |
+
for i in range(1, MAX_TENSOR_RANK + 1):
|
| 918 |
+
new_dims_rhs_1, counter = gen_tensor_dims(i, counter)
|
| 919 |
+
new_dims_rhs_2, counter = gen_tensor_dims(i, counter)
|
| 920 |
+
|
| 921 |
+
nat_constraints = gen_nat_constraints(new_dims_rhs_1 + new_dims_rhs_2)
|
| 922 |
+
|
| 923 |
+
c_tensor_i = Conj([BinConstraintT(constraint.lhs, TensorType(new_dims_rhs_1), op_eq),
|
| 924 |
+
BinConstraintT(constraint.rhs, TensorType(new_dims_rhs_2), op_eq)] +
|
| 925 |
+
[BinConstraintD(d1, d2, op_consistency) for
|
| 926 |
+
d1, d2 in zip(new_dims_rhs_1, new_dims_rhs_2)] + nat_constraints)
|
| 927 |
+
|
| 928 |
+
all_constraints.append(c_tensor_i)
|
| 929 |
+
|
| 930 |
+
return all_constraints, counter
|
| 931 |
+
|
| 932 |
+
|
| 933 |
+
def gen_greatest_upper_bound(constraint: TGreatestUpperBound, counter: int):
|
| 934 |
+
"""
|
| 935 |
+
Args:
|
| 936 |
+
constraint: Greatest upper bound on tensors
|
| 937 |
+
counter: variable tracking
|
| 938 |
+
|
| 939 |
+
Returns: A set of equality constraints and DGreatestUpperBound constraints
|
| 940 |
+
|
| 941 |
+
"""
|
| 942 |
+
|
| 943 |
+
all_constraints = []
|
| 944 |
+
|
| 945 |
+
for i in range(1, MAX_TENSOR_RANK + 1):
|
| 946 |
+
c = []
|
| 947 |
+
dims1, counter = gen_tensor_dims(i, counter)
|
| 948 |
+
c1tensor = TensorType(dims1)
|
| 949 |
+
|
| 950 |
+
dims2, counter = gen_tensor_dims(i, counter)
|
| 951 |
+
c2tensor = TensorType(dims2)
|
| 952 |
+
|
| 953 |
+
dims3, counter = gen_tensor_dims(i, counter)
|
| 954 |
+
c3tensor = TensorType(dims3)
|
| 955 |
+
|
| 956 |
+
c += [BinConstraintT(constraint.rhs1, c1tensor, op_eq),
|
| 957 |
+
BinConstraintT(constraint.rhs2, c2tensor, op_eq),
|
| 958 |
+
BinConstraintT(constraint.res, c3tensor, op_eq)] + \
|
| 959 |
+
gen_nat_constraints(dims1 + dims2 + dims3)
|
| 960 |
+
|
| 961 |
+
assert len(c3tensor.__args__) == len(c1tensor.__args__) == len(c2tensor.__args__)
|
| 962 |
+
for i in range(len(c3tensor.__args__)):
|
| 963 |
+
c.append(DGreatestUpperBound(c3tensor.__args__[i],
|
| 964 |
+
c1tensor.__args__[i],
|
| 965 |
+
c2tensor.__args__[i]))
|
| 966 |
+
|
| 967 |
+
all_constraints.append(Conj(c))
|
| 968 |
+
return all_constraints, counter
|
| 969 |
+
|
| 970 |
+
|
| 971 |
+
def generate_all_broadcasting_possibilities_no_padding(d1: List[DVar], d2: List[DVar], d11: List[DVar], d12: List[DVar]):
|
| 972 |
+
"""
|
| 973 |
+
Generate broadcasting constraints assuming no padding. Broadcasting can happen at any dimension.
|
| 974 |
+
We look at all combinations for all dimensions in d1 and d2
|
| 975 |
+
Args:
|
| 976 |
+
d1: input1 dimensions
|
| 977 |
+
d2: input2 dimensions
|
| 978 |
+
d11: broadcasted input1 dimensions
|
| 979 |
+
d12: broadcasted input2 dimensions
|
| 980 |
+
|
| 981 |
+
Returns: broadcasting constraints relating the input dimensions to the broadcasted dimensions
|
| 982 |
+
|
| 983 |
+
"""
|
| 984 |
+
|
| 985 |
+
size = len(d1)
|
| 986 |
+
|
| 987 |
+
res2 = []
|
| 988 |
+
|
| 989 |
+
for i in range(size):
|
| 990 |
+
t1 = broadcast_dim(d1, d2, d11, d12, i)
|
| 991 |
+
t2 = broadcast_dim(d2, d1, d12, d11, i)
|
| 992 |
+
t3 = no_broadcast_dim_with_index(d1, d2, d11, d12, i)
|
| 993 |
+
|
| 994 |
+
res2.append(Disj([t1, t2, t3]))
|
| 995 |
+
|
| 996 |
+
return Conj(res2)
|
| 997 |
+
|
| 998 |
+
|
| 999 |
+
def gen_broadcasting_constraints(e1: TVar, e2: TVar, e11: TVar, e12: TVar, i: int, counter: int):
|
| 1000 |
+
"""
|
| 1001 |
+
Simulates broadcasting on e1 and e2 and returns the results
|
| 1002 |
+
respectively in e11 and e12. Because of gradual types,
|
| 1003 |
+
e1 and e2 may not be equal. Similarly, e11 and e12 may not
|
| 1004 |
+
be equal. e11 and e12 should be guaranteed to be consistent
|
| 1005 |
+
as they represent the shapes of the tensors to be added after
|
| 1006 |
+
broadcasting.
|
| 1007 |
+
Args:
|
| 1008 |
+
e1: TVar representing the type of input 1
|
| 1009 |
+
e2: TVar representing the type of input 2
|
| 1010 |
+
e11: TVar representing the representing broadcasted input 1
|
| 1011 |
+
e12: TVar representing the representing broadcasted input 2
|
| 1012 |
+
i: The rank of the resulting type of addition
|
| 1013 |
+
counter: for variable tracking
|
| 1014 |
+
|
| 1015 |
+
Returns: Simplified broadcasting constraints
|
| 1016 |
+
|
| 1017 |
+
"""
|
| 1018 |
+
dims, counter = gen_lists_of_dims(4, i, counter)
|
| 1019 |
+
[d1, d2, d3, d4] = dims
|
| 1020 |
+
nat_dims_i = gen_nat_constraints(list(itertools.chain.from_iterable(dims)))
|
| 1021 |
+
|
| 1022 |
+
initialize_tensors_constraints = create_equality_constraints_for_broadcasting(e1, e2, e11, e12,
|
| 1023 |
+
d1, d2, d3, d4)
|
| 1024 |
+
|
| 1025 |
+
[e1_tensor, e11_tensor, e2_tensor, e12_tensor] = initialize_tensors_constraints
|
| 1026 |
+
|
| 1027 |
+
# without padding, broadcast all possibilities for tensors of size i
|
| 1028 |
+
final_tensor_constraint_no_padding = Conj([*initialize_tensors_constraints,
|
| 1029 |
+
generate_all_broadcasting_possibilities_no_padding(d1, d2, d3, d4)])
|
| 1030 |
+
|
| 1031 |
+
# with padding, broadcast all possibilities for tensors of size i
|
| 1032 |
+
final_tensor_constraint_padding_arg1, counter = \
|
| 1033 |
+
apply_padding(e1, e11_tensor, e2_tensor, e12_tensor, d2, d3, d4, counter)
|
| 1034 |
+
|
| 1035 |
+
final_tensor_constraint_padding_arg2, counter = \
|
| 1036 |
+
apply_padding(e2, e12_tensor, e1_tensor, e11_tensor, d1, d4, d3, counter)
|
| 1037 |
+
|
| 1038 |
+
return final_tensor_constraint_no_padding, \
|
| 1039 |
+
final_tensor_constraint_padding_arg1, \
|
| 1040 |
+
final_tensor_constraint_padding_arg2, nat_dims_i, counter
|
parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/transform_to_z3.py
ADDED
|
@@ -0,0 +1,349 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
from torch.fx.experimental.migrate_gradual_types.constraint import Conj, Disj, T, F, BinConstraintT, BVar, is_bool_expr
|
| 3 |
+
from torch.fx.experimental.migrate_gradual_types.constraint import BinConstraintD, TVar, DVar
|
| 4 |
+
from torch.fx.experimental.migrate_gradual_types.constraint import Prod, is_algebraic_expression, is_dim
|
| 5 |
+
from torch.fx.experimental.migrate_gradual_types.constraint_generator import ConstraintGenerator
|
| 6 |
+
from torch.fx.experimental.migrate_gradual_types.constraint_transformation import transform_constraint
|
| 7 |
+
from torch.fx.experimental.migrate_gradual_types.operation import op_add, op_eq, op_neq, op_gt, op_lt
|
| 8 |
+
from torch.fx.experimental.migrate_gradual_types.operation import op_leq, op_sub, op_div, op_mul, op_mod
|
| 9 |
+
from torch.fx.tensor_type import TensorType, Dyn
|
| 10 |
+
|
| 11 |
+
try:
|
| 12 |
+
import z3 # type: ignore[import]
|
| 13 |
+
from torch.fx.experimental.migrate_gradual_types.z3_types import tensor_type, z3_dyn, D
|
| 14 |
+
HAS_Z3 = True
|
| 15 |
+
|
| 16 |
+
def transform_to_z3(constraint, counter, dimension_dict):
|
| 17 |
+
if isinstance(constraint, Conj):
|
| 18 |
+
conjuncts = []
|
| 19 |
+
for c in constraint.conjucts:
|
| 20 |
+
new_c, counter = transform_to_z3(c, counter, dimension_dict)
|
| 21 |
+
conjuncts.append(new_c)
|
| 22 |
+
return z3.And(conjuncts), counter
|
| 23 |
+
|
| 24 |
+
elif isinstance(constraint, Disj):
|
| 25 |
+
disjuncts = []
|
| 26 |
+
for c in constraint.disjuncts:
|
| 27 |
+
new_c, counter = transform_to_z3(c, counter, dimension_dict)
|
| 28 |
+
disjuncts.append(new_c)
|
| 29 |
+
return z3.Or(disjuncts), counter
|
| 30 |
+
|
| 31 |
+
elif isinstance(constraint, T):
|
| 32 |
+
return True, counter
|
| 33 |
+
|
| 34 |
+
elif isinstance(constraint, F):
|
| 35 |
+
return False, counter
|
| 36 |
+
|
| 37 |
+
elif isinstance(constraint, BinConstraintT):
|
| 38 |
+
if constraint.op == op_eq:
|
| 39 |
+
lhs, counter = transform_var(constraint.lhs, counter, dimension_dict)
|
| 40 |
+
rhs, counter = transform_var(constraint.rhs, counter, dimension_dict)
|
| 41 |
+
return (lhs == rhs), counter
|
| 42 |
+
|
| 43 |
+
else:
|
| 44 |
+
raise NotImplementedError('Method not yet implemented')
|
| 45 |
+
|
| 46 |
+
elif isinstance(constraint, BinConstraintD):
|
| 47 |
+
if constraint.op == op_eq:
|
| 48 |
+
|
| 49 |
+
if isinstance(constraint.lhs, BVar) and is_bool_expr(constraint.rhs):
|
| 50 |
+
transformed_rhs, counter = transform_to_z3(constraint.rhs, counter, dimension_dict)
|
| 51 |
+
transformed_lhs = z3.Bool(constraint.lhs.c)
|
| 52 |
+
return transformed_lhs == transformed_rhs, counter
|
| 53 |
+
|
| 54 |
+
elif is_dim(constraint.lhs) and is_dim(constraint.rhs):
|
| 55 |
+
# with dimension transformations we consider the encoding
|
| 56 |
+
lhs, counter = transform_dimension(constraint.lhs, counter, dimension_dict)
|
| 57 |
+
rhs, counter = transform_dimension(constraint.rhs, counter, dimension_dict)
|
| 58 |
+
return lhs == rhs, counter
|
| 59 |
+
|
| 60 |
+
else:
|
| 61 |
+
# then we have an algebraic expression which means that we disregard the
|
| 62 |
+
# first element of the encoding
|
| 63 |
+
lhs, counter = transform_algebraic_expression(constraint.lhs, counter, dimension_dict)
|
| 64 |
+
rhs, counter = transform_algebraic_expression(constraint.rhs, counter, dimension_dict)
|
| 65 |
+
return lhs == rhs, counter
|
| 66 |
+
|
| 67 |
+
# The assumption here is that the LHS and RHS must be dimensions
|
| 68 |
+
elif constraint.op == op_neq:
|
| 69 |
+
assert is_dim(constraint.lhs)
|
| 70 |
+
assert is_dim(constraint.rhs)
|
| 71 |
+
lhs, counter = transform_dimension(constraint.lhs, counter, dimension_dict)
|
| 72 |
+
rhs, counter = transform_dimension(constraint.rhs, counter, dimension_dict)
|
| 73 |
+
if constraint.rhs == Dyn or constraint.lhs == Dyn:
|
| 74 |
+
if constraint.rhs == Dyn:
|
| 75 |
+
return lhs.arg(0) == 1, counter
|
| 76 |
+
elif constraint.lhs == Dyn:
|
| 77 |
+
return rhs.arg(0) == 1, counter
|
| 78 |
+
|
| 79 |
+
# if one of the instances is a number
|
| 80 |
+
elif isinstance(constraint.lhs, int) or isinstance(constraint.rhs, int):
|
| 81 |
+
if isinstance(constraint.lhs, int):
|
| 82 |
+
return z3.Or([rhs.arg(0) == 0, z3.And([rhs.arg(0) == 1, lhs.arg(1) != rhs.arg(1)])]), counter
|
| 83 |
+
|
| 84 |
+
elif isinstance(constraint.rhs, int):
|
| 85 |
+
return z3.Or([lhs.arg(0) == 0, z3.And([lhs.arg(0) == 1, lhs.arg(1) != rhs.arg(1)])]), counter
|
| 86 |
+
|
| 87 |
+
else:
|
| 88 |
+
return z3.Or([z3.And([lhs.arg(0) == 0, rhs.arg(0) != 0]),
|
| 89 |
+
z3.And([lhs.arg(0) != 0, rhs.arg(0) == 0]),
|
| 90 |
+
z3.And([lhs.arg(0) != 0, rhs.arg(0) != 0, lhs.arg(1) != rhs.arg(1)])]), counter
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
elif constraint.op == op_leq:
|
| 94 |
+
# if the dimensions are not dyn, this will come into effect
|
| 95 |
+
# there would have been another constraint specifying if a given dimension
|
| 96 |
+
# is dyn or not
|
| 97 |
+
assert is_dim(constraint.lhs) and is_dim(constraint.rhs)
|
| 98 |
+
lhs, counter = transform_algebraic_expression(constraint.lhs, counter, dimension_dict)
|
| 99 |
+
rhs, counter = transform_algebraic_expression(constraint.rhs, counter, dimension_dict)
|
| 100 |
+
return lhs <= rhs, counter
|
| 101 |
+
|
| 102 |
+
elif constraint.op == op_gt:
|
| 103 |
+
assert is_dim(constraint.lhs) and is_dim(constraint.rhs)
|
| 104 |
+
lhs, counter = transform_algebraic_expression(constraint.lhs, counter, dimension_dict)
|
| 105 |
+
rhs, counter = transform_algebraic_expression(constraint.rhs, counter, dimension_dict)
|
| 106 |
+
return lhs > rhs, counter
|
| 107 |
+
|
| 108 |
+
elif constraint.op == op_lt:
|
| 109 |
+
assert is_dim(constraint.lhs) and is_dim(constraint.rhs)
|
| 110 |
+
lhs, counter = transform_algebraic_expression(constraint.lhs, counter, dimension_dict)
|
| 111 |
+
rhs, counter = transform_algebraic_expression(constraint.rhs, counter, dimension_dict)
|
| 112 |
+
return lhs < rhs, counter
|
| 113 |
+
|
| 114 |
+
else:
|
| 115 |
+
raise NotImplementedError('operation not yet implemented')
|
| 116 |
+
|
| 117 |
+
else:
|
| 118 |
+
raise NotImplementedError('Operation not yet implemented')
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def transform_var(tensor, counter, dimension_dict):
|
| 122 |
+
"""
|
| 123 |
+
Transforms tensor variables to a format understood by z3
|
| 124 |
+
Args:
|
| 125 |
+
tensor: Tensor variable or a tensor type potentially with variable dimensions
|
| 126 |
+
Returns: Transformed variable to a z3 format
|
| 127 |
+
|
| 128 |
+
"""
|
| 129 |
+
if isinstance(tensor, TensorType):
|
| 130 |
+
res = []
|
| 131 |
+
for t in tensor.__args__:
|
| 132 |
+
transformed, counter = transform_dimension(t, counter, dimension_dict)
|
| 133 |
+
res.append(transformed)
|
| 134 |
+
|
| 135 |
+
assert len(res) <= 4
|
| 136 |
+
if len(tensor.__args__) == 1:
|
| 137 |
+
return tensor_type.tensor1(res[0]), counter
|
| 138 |
+
elif len(tensor.__args__) == 2:
|
| 139 |
+
return tensor_type.tensor2(res[0], res[1]), counter
|
| 140 |
+
elif len(tensor.__args__) == 3:
|
| 141 |
+
return tensor_type.tensor3(res[0], res[1], res[2]), counter
|
| 142 |
+
elif len(tensor.__args__) == 4:
|
| 143 |
+
return tensor_type.tensor4(res[0], res[1], res[2], res[3]), counter
|
| 144 |
+
|
| 145 |
+
elif tensor == Dyn:
|
| 146 |
+
return z3_dyn, counter
|
| 147 |
+
|
| 148 |
+
elif isinstance(tensor, TVar):
|
| 149 |
+
return z3.Const(tensor.tvar, tensor_type), counter
|
| 150 |
+
|
| 151 |
+
def transform_dimension(dimension, counter, dimension_dict):
|
| 152 |
+
"""
|
| 153 |
+
Takes a dimension variable or a number and transforms it to a tuple
|
| 154 |
+
according to our scheme
|
| 155 |
+
Args:
|
| 156 |
+
dimension: The dimension to be transformed
|
| 157 |
+
counter: variable tracking
|
| 158 |
+
|
| 159 |
+
Returns: tuple and the current counter
|
| 160 |
+
|
| 161 |
+
"""
|
| 162 |
+
if dimension == Dyn:
|
| 163 |
+
counter += 1
|
| 164 |
+
return D(0, z3.Int(counter)), counter
|
| 165 |
+
elif isinstance(dimension, int):
|
| 166 |
+
return D(1, dimension), counter
|
| 167 |
+
elif isinstance(dimension, DVar):
|
| 168 |
+
if dimension.c in dimension_dict:
|
| 169 |
+
return D(z3.Int(dimension_dict[dimension.c]), z3.Int(dimension.c)), counter
|
| 170 |
+
else:
|
| 171 |
+
counter += 1
|
| 172 |
+
dimension_dict[dimension.c] = counter
|
| 173 |
+
return D(z3.Int(counter), z3.Int(dimension.c)), counter
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
def transform_algebraic_expression(expr, counter, dimension_dict):
|
| 177 |
+
"""
|
| 178 |
+
Transforms an algebraic expression to z3 format
|
| 179 |
+
Args:
|
| 180 |
+
expr: An expression is either a dimension variable or an algebraic-expression
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
Returns: the transformed expression
|
| 184 |
+
|
| 185 |
+
"""
|
| 186 |
+
assert is_algebraic_expression(expr) or is_dim(expr)
|
| 187 |
+
|
| 188 |
+
if is_dim(expr):
|
| 189 |
+
transformed, counter = transform_dimension(expr, counter, dimension_dict)
|
| 190 |
+
return transformed.arg(1), counter
|
| 191 |
+
|
| 192 |
+
elif isinstance(expr, Prod):
|
| 193 |
+
|
| 194 |
+
dims = []
|
| 195 |
+
for dim in expr.products:
|
| 196 |
+
assert is_dim(dim)
|
| 197 |
+
d, counter = transform_dimension(dim, counter, dimension_dict)
|
| 198 |
+
dims.append(d.arg(1))
|
| 199 |
+
return z3.Product(dims), counter
|
| 200 |
+
|
| 201 |
+
elif is_algebraic_expression(expr):
|
| 202 |
+
|
| 203 |
+
lhs, counter = transform_algebraic_expression(expr.lhs, counter, dimension_dict)
|
| 204 |
+
rhs, counter = transform_algebraic_expression(expr.rhs, counter, dimension_dict)
|
| 205 |
+
|
| 206 |
+
if expr.op == op_sub:
|
| 207 |
+
c = lhs - rhs
|
| 208 |
+
|
| 209 |
+
elif expr.op == op_add:
|
| 210 |
+
c = lhs + rhs
|
| 211 |
+
|
| 212 |
+
elif expr.op == op_div:
|
| 213 |
+
c = lhs / rhs
|
| 214 |
+
|
| 215 |
+
elif expr.op == op_mul:
|
| 216 |
+
c = lhs * rhs
|
| 217 |
+
|
| 218 |
+
elif expr.op == op_mod:
|
| 219 |
+
c = lhs % rhs
|
| 220 |
+
|
| 221 |
+
else:
|
| 222 |
+
raise NotImplementedError('operation not yet implemented')
|
| 223 |
+
|
| 224 |
+
return c, counter
|
| 225 |
+
|
| 226 |
+
else:
|
| 227 |
+
raise RuntimeError
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
def transform_all_constraints(traced, counter=0):
|
| 231 |
+
"""
|
| 232 |
+
Given a trace, generates constraints and transforms them to z3 format
|
| 233 |
+
|
| 234 |
+
"""
|
| 235 |
+
dimension_dict = {} # type: ignore[var-annotated]
|
| 236 |
+
|
| 237 |
+
generator = ConstraintGenerator(traced)
|
| 238 |
+
new_constraints, counter = generator.generate_constraints(counter)
|
| 239 |
+
|
| 240 |
+
# print(new_constraints.conjucts[0])
|
| 241 |
+
# print(*new_constraints.conjucts, sep='\n')
|
| 242 |
+
|
| 243 |
+
# transform precision, matching, consistency till obtaining a fixed point
|
| 244 |
+
new_constraints, counter = iterate_till_fixed_point(new_constraints, counter)
|
| 245 |
+
# print(new_constraints)
|
| 246 |
+
# print(new_constraints.conjucts)
|
| 247 |
+
# new_constraints.conjucts = new_constraints.conjucts[:-1]
|
| 248 |
+
# print(*new_constraints.conjucts, sep='\n')
|
| 249 |
+
|
| 250 |
+
transformed, counter = transform_to_z3(new_constraints, counter, dimension_dict)
|
| 251 |
+
# print(transformed)
|
| 252 |
+
return transformed
|
| 253 |
+
|
| 254 |
+
def iterate_till_fixed_point(constraints, counter):
|
| 255 |
+
"""
|
| 256 |
+
Transform constraints till reaching a fixed point
|
| 257 |
+
"""
|
| 258 |
+
old_c = None
|
| 259 |
+
while old_c != constraints:
|
| 260 |
+
old_c = constraints
|
| 261 |
+
constraints, counter = transform_constraint(constraints, counter)
|
| 262 |
+
return constraints, counter
|
| 263 |
+
|
| 264 |
+
def transform_all_constraints_trace_time(tracer_root, graph, node, counter=0):
|
| 265 |
+
"""
|
| 266 |
+
Takes a node and a graph and generates two sets of constraints.
|
| 267 |
+
One set constraints the node's constraints and another set
|
| 268 |
+
constraints the negation of the node's constraints
|
| 269 |
+
Args:
|
| 270 |
+
tracer_root: the root for getting the module instances
|
| 271 |
+
graph: the graph so far in the tracing process
|
| 272 |
+
node: node that represents a conditional
|
| 273 |
+
counter: variable tracking
|
| 274 |
+
|
| 275 |
+
Returns: Two sets of constraints. One with a conjunction with the
|
| 276 |
+
the conditional constraint and the other with a conjunction with
|
| 277 |
+
its negation.
|
| 278 |
+
|
| 279 |
+
"""
|
| 280 |
+
dimension_dict = {} # type: ignore[var-annotated]
|
| 281 |
+
|
| 282 |
+
generator = ConstraintGenerator(tracer_root, graph)
|
| 283 |
+
new_constraints, counter = generator.generate_constraints(counter)
|
| 284 |
+
|
| 285 |
+
condition_constraint = new_constraints.conjucts[-1]
|
| 286 |
+
|
| 287 |
+
# we know the constraint is a conjunction where the last constraint is about the conditional
|
| 288 |
+
# so remove the last constraint
|
| 289 |
+
new_constraints.conjucts = new_constraints.conjucts[:-1]
|
| 290 |
+
|
| 291 |
+
# transform precision, matching, consistency till obtaining a fixed point
|
| 292 |
+
new_constraints, counter = iterate_till_fixed_point(new_constraints, counter)
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
# since the function returns a list of one element, we get the first element
|
| 296 |
+
# we are only interested in the RHS in this case because the LHS just stores
|
| 297 |
+
# the result
|
| 298 |
+
|
| 299 |
+
# we make sure the constraint is of the form:
|
| 300 |
+
# c = b where b is a boolean expression
|
| 301 |
+
# and we consider b (constraint.rhs) for transformation
|
| 302 |
+
assert isinstance(condition_constraint.lhs, BVar)
|
| 303 |
+
assert is_bool_expr(condition_constraint.rhs)
|
| 304 |
+
condition_constraint_rhs = condition_constraint.rhs
|
| 305 |
+
|
| 306 |
+
# transform the condition constraint
|
| 307 |
+
condition_constraint_rhs, counter = iterate_till_fixed_point(condition_constraint_rhs, counter)
|
| 308 |
+
|
| 309 |
+
transformed, counter = transform_to_z3(new_constraints, counter, dimension_dict)
|
| 310 |
+
|
| 311 |
+
transformed_condition_constraint, counter = transform_to_z3(condition_constraint_rhs, counter, dimension_dict)
|
| 312 |
+
|
| 313 |
+
negation_transformed_condition_constraint = z3.Not(transformed_condition_constraint)
|
| 314 |
+
|
| 315 |
+
return z3.And([transformed, transformed_condition_constraint]), \
|
| 316 |
+
z3.And([transformed, negation_transformed_condition_constraint])
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
def evaluate_conditional_with_constraints(tracer_root, graph, node, counter=0, user_constraints=None):
|
| 320 |
+
"""
|
| 321 |
+
Given an IR and a node representing a conditional, evaluate the conditional
|
| 322 |
+
and its negation
|
| 323 |
+
Args:
|
| 324 |
+
tracer_root: Tracer root for module instances
|
| 325 |
+
node: The node to be evaluated
|
| 326 |
+
|
| 327 |
+
Returns: the results of evaluating the condition and the negation with
|
| 328 |
+
the rest of the constraints
|
| 329 |
+
|
| 330 |
+
"""
|
| 331 |
+
|
| 332 |
+
transformed_positive, transformed_negative = \
|
| 333 |
+
transform_all_constraints_trace_time(tracer_root, graph, node, counter)
|
| 334 |
+
|
| 335 |
+
s = z3.Solver()
|
| 336 |
+
s.add(transformed_positive)
|
| 337 |
+
if user_constraints is not None:
|
| 338 |
+
s.add(user_constraints)
|
| 339 |
+
condition = s.check()
|
| 340 |
+
|
| 341 |
+
s = z3.Solver()
|
| 342 |
+
s.add(transformed_negative)
|
| 343 |
+
if user_constraints is not None:
|
| 344 |
+
s.add(user_constraints)
|
| 345 |
+
negation = s.check()
|
| 346 |
+
return condition, negation
|
| 347 |
+
|
| 348 |
+
except ImportError:
|
| 349 |
+
HAS_Z3 = False
|
parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/util.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
from torch.fx.experimental.migrate_gradual_types.constraint import TVar, DVar, BinConstraintD, \
|
| 3 |
+
BVar
|
| 4 |
+
from torch.fx.experimental.migrate_gradual_types.operation import op_leq
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def gen_tvar(curr):
|
| 8 |
+
"""
|
| 9 |
+
Generate a tensor variable
|
| 10 |
+
:param curr: The current counter
|
| 11 |
+
:return: a tensor variable and the updated counter
|
| 12 |
+
"""
|
| 13 |
+
curr += 1
|
| 14 |
+
return TVar(curr), curr
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def gen_dvar(curr):
|
| 18 |
+
"""
|
| 19 |
+
Generate a dimension variable
|
| 20 |
+
:param curr: the current counter
|
| 21 |
+
:return: a dimension variable and an updated counter
|
| 22 |
+
"""
|
| 23 |
+
curr += 1
|
| 24 |
+
return DVar(curr), curr
|
| 25 |
+
|
| 26 |
+
def gen_bvar(curr):
|
| 27 |
+
"""
|
| 28 |
+
Generate a boolean variable
|
| 29 |
+
:param curr: the current counter
|
| 30 |
+
:return: a boolean variable and an updated counter
|
| 31 |
+
"""
|
| 32 |
+
curr += 1
|
| 33 |
+
return BVar(curr), curr
|
| 34 |
+
|
| 35 |
+
def gen_tensor_dims(n, curr):
|
| 36 |
+
"""
|
| 37 |
+
Generate a list of tensor dimensions
|
| 38 |
+
:param n: the number of dimensions
|
| 39 |
+
:param curr: the current counter
|
| 40 |
+
:return: a list of dimension variables and an updated counter
|
| 41 |
+
"""
|
| 42 |
+
dims = []
|
| 43 |
+
for _ in range(n):
|
| 44 |
+
dvar, curr = gen_dvar(curr)
|
| 45 |
+
dims.append(dvar)
|
| 46 |
+
return dims, curr
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def gen_nat_constraints(list_of_dims):
|
| 50 |
+
"""
|
| 51 |
+
Generate natural number constraints for dimensions
|
| 52 |
+
"""
|
| 53 |
+
return [BinConstraintD(0, d, op_leq) for d in list_of_dims]
|
parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/z3_types.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
try:
|
| 2 |
+
import z3 # type: ignore[import]
|
| 3 |
+
HAS_Z3 = True
|
| 4 |
+
# dynamic type
|
| 5 |
+
dyn = z3.DeclareSort('Dyn')
|
| 6 |
+
dyn_type = z3.Const('dyn', dyn)
|
| 7 |
+
|
| 8 |
+
# dimension
|
| 9 |
+
dim = z3.Datatype('dim')
|
| 10 |
+
dim.declare('dim', ('0', z3.IntSort()), ('1', z3.IntSort()))
|
| 11 |
+
dim = dim.create()
|
| 12 |
+
|
| 13 |
+
# tensors
|
| 14 |
+
tensor_type = z3.Datatype('TensorType')
|
| 15 |
+
tensor_type.declare('Dyn', ('dyn', dyn))
|
| 16 |
+
tensor_type.declare('tensor1', ('0', dim))
|
| 17 |
+
tensor_type.declare('tensor2', ('0', dim), ('1', dim))
|
| 18 |
+
tensor_type.declare('tensor3', ('0', dim), ('1', dim), ('2', dim))
|
| 19 |
+
tensor_type.declare('tensor4', ('0', dim), ('1', dim), ('2', dim), ('3', dim))
|
| 20 |
+
tensor_type = tensor_type.create()
|
| 21 |
+
|
| 22 |
+
# create dimension
|
| 23 |
+
D = dim.dim
|
| 24 |
+
|
| 25 |
+
z3_dyn = tensor_type.Dyn(dyn_type)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
except ImportError:
|
| 29 |
+
HAS_Z3 = False
|
parrot/lib/python3.10/site-packages/torch/fx/operator_schemas.py
ADDED
|
@@ -0,0 +1,442 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import torch
|
| 3 |
+
import inspect
|
| 4 |
+
import numbers
|
| 5 |
+
import types
|
| 6 |
+
import typing
|
| 7 |
+
import enum
|
| 8 |
+
import warnings
|
| 9 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple, NamedTuple, cast, TYPE_CHECKING
|
| 10 |
+
from torch._jit_internal import boolean_dispatched
|
| 11 |
+
from ._compatibility import compatibility
|
| 12 |
+
from torch._ops import OpOverloadPacket, OpOverload
|
| 13 |
+
|
| 14 |
+
if TYPE_CHECKING:
|
| 15 |
+
from .node import Argument
|
| 16 |
+
|
| 17 |
+
__all__ = ["ArgsKwargsPair", "check_for_mutable_operation", "get_signature_for_torch_op", "create_type_hint",
|
| 18 |
+
"type_matches", "normalize_function", "normalize_module"]
|
| 19 |
+
|
| 20 |
+
@compatibility(is_backward_compatible=False)
|
| 21 |
+
class ArgsKwargsPair(NamedTuple):
|
| 22 |
+
"""
|
| 23 |
+
Simple named tuple for wrapping args/kwargs pairs.
|
| 24 |
+
"""
|
| 25 |
+
args: Tuple[Any, ...]
|
| 26 |
+
kwargs: Dict[str, Any]
|
| 27 |
+
|
| 28 |
+
_manual_overrides : Dict[Callable, List[inspect.Signature]] = {}
|
| 29 |
+
|
| 30 |
+
def _nonzero_schemas():
|
| 31 |
+
signatures = []
|
| 32 |
+
|
| 33 |
+
def nonzero(self):
|
| 34 |
+
pass
|
| 35 |
+
signatures.append(inspect.signature(nonzero))
|
| 36 |
+
|
| 37 |
+
def nonzero(self, *, as_tuple : bool): # type: ignore[no-redef]
|
| 38 |
+
pass
|
| 39 |
+
signatures.append(inspect.signature(nonzero))
|
| 40 |
+
|
| 41 |
+
return signatures
|
| 42 |
+
|
| 43 |
+
_manual_overrides[torch.nonzero] = _nonzero_schemas()
|
| 44 |
+
|
| 45 |
+
class _FakeGlobalNamespace:
|
| 46 |
+
def __getattr__(self, name):
|
| 47 |
+
if name == 'torch':
|
| 48 |
+
return torch
|
| 49 |
+
raise RuntimeError('Expected a torch namespace lookup')
|
| 50 |
+
|
| 51 |
+
_type_eval_globals = {'Tensor' : torch.Tensor, 'Device' : torch.device, 'Layout' : torch.layout,
|
| 52 |
+
'number' : numbers.Number, 'Future' : torch.jit.Future,
|
| 53 |
+
'AnyEnumType' : enum.Enum, 'QScheme' : torch.qscheme,
|
| 54 |
+
'__torch__': _FakeGlobalNamespace(), 'NoneType': type(None),
|
| 55 |
+
'Storage': torch.UntypedStorage,
|
| 56 |
+
't': typing.TypeVar('t')}
|
| 57 |
+
for k in dir(typing):
|
| 58 |
+
_type_eval_globals[k] = getattr(typing, k)
|
| 59 |
+
|
| 60 |
+
def _torchscript_type_to_python_type(ts_type : 'torch._C.JitType') -> Any:
|
| 61 |
+
"""
|
| 62 |
+
Convert a TorchScript type to a Python type (including subtypes) via
|
| 63 |
+
eval'ing the annotation_str. _type_eval_globals sets up expressions
|
| 64 |
+
like "List" and "Future" to map to actual types (typing.List and jit.Future)
|
| 65 |
+
"""
|
| 66 |
+
return eval(ts_type.annotation_str, _type_eval_globals)
|
| 67 |
+
|
| 68 |
+
def _torchscript_schema_to_signature_impl(ts_schema : torch._C.FunctionSchema) -> inspect.Signature:
|
| 69 |
+
from inspect import Parameter
|
| 70 |
+
parameters : List[Parameter] = []
|
| 71 |
+
for arg in ts_schema.arguments:
|
| 72 |
+
arg_type = _torchscript_type_to_python_type(arg.type)
|
| 73 |
+
default = arg.default_value if arg.has_default_value() else Parameter.empty
|
| 74 |
+
# TODO: Figure out if this is safe. It seems like when generating the type signatures for
|
| 75 |
+
# PythonArgParser, we emit signatures with `input` instead of `self` as the first tensor
|
| 76 |
+
# argument name. Downstream, if someone converts that positional argument to a keyword
|
| 77 |
+
# argument, the name mismatch will break things, so here we're going to normalize the
|
| 78 |
+
# name to "input"
|
| 79 |
+
name = arg.name if arg.name != 'self' else 'input'
|
| 80 |
+
kind = Parameter.KEYWORD_ONLY if arg.kwarg_only else Parameter.POSITIONAL_OR_KEYWORD
|
| 81 |
+
# "from" is a keyword therefore it must be a POSITIONAL_ONLY argument
|
| 82 |
+
if name == "from":
|
| 83 |
+
assert kind == Parameter.POSITIONAL_OR_KEYWORD
|
| 84 |
+
# ParameterKind type is internal implementation detail to inspec package
|
| 85 |
+
# which makes it hard to do type annotation
|
| 86 |
+
kind = Parameter.POSITIONAL_ONLY # type: ignore[assignment]
|
| 87 |
+
# This renders all previous arguments to positional only
|
| 88 |
+
for idx, p in enumerate(parameters):
|
| 89 |
+
assert p.kind == Parameter.POSITIONAL_OR_KEYWORD
|
| 90 |
+
parameters[idx] = Parameter(name=p.name, kind=Parameter.POSITIONAL_ONLY, default=p.default, annotation=p.annotation)
|
| 91 |
+
parameters.append(Parameter(name=name, kind=kind, default=default, annotation=arg_type))
|
| 92 |
+
return_types = [_torchscript_type_to_python_type(ret.type) for ret in ts_schema.returns]
|
| 93 |
+
if len(return_types) == 0:
|
| 94 |
+
return_type = None
|
| 95 |
+
elif len(return_types) == 1:
|
| 96 |
+
return_type = return_types[0]
|
| 97 |
+
else:
|
| 98 |
+
return_type = tuple(return_types)
|
| 99 |
+
|
| 100 |
+
return inspect.Signature(parameters, return_annotation=return_type)
|
| 101 |
+
|
| 102 |
+
_SCHEMA_TO_SIGNATURE_CACHE : Dict[Tuple[str, str], inspect.Signature] = {}
|
| 103 |
+
|
| 104 |
+
def _torchscript_schema_to_signature(ts_schema : torch._C.FunctionSchema) -> inspect.Signature:
|
| 105 |
+
# Cached as it's called in the hot path of FakeTensor dispatch
|
| 106 |
+
cache_key = ts_schema.name, ts_schema.overload_name
|
| 107 |
+
cache_val = _SCHEMA_TO_SIGNATURE_CACHE.get(cache_key)
|
| 108 |
+
if cache_val is not None:
|
| 109 |
+
return cache_val
|
| 110 |
+
|
| 111 |
+
res = _torchscript_schema_to_signature_impl(ts_schema)
|
| 112 |
+
_SCHEMA_TO_SIGNATURE_CACHE[cache_key] = res
|
| 113 |
+
return res
|
| 114 |
+
|
| 115 |
+
@compatibility(is_backward_compatible=False)
|
| 116 |
+
def check_for_mutable_operation(target : Callable, args : Tuple['Argument', ...], kwargs : Dict[str, 'Argument']):
|
| 117 |
+
signatures, schemas = get_signature_for_torch_op(target, return_schemas=True)
|
| 118 |
+
|
| 119 |
+
if signatures and schemas:
|
| 120 |
+
matched_schemas = []
|
| 121 |
+
|
| 122 |
+
# Iterate through all of the schema until we find one that matches
|
| 123 |
+
# If one matches, populate `new_args_and_kwargs` with the new args/kwargs
|
| 124 |
+
# values. If none matches, `new_args_and_kwargs` will be None
|
| 125 |
+
for candidate_signature, schema in zip(signatures, schemas):
|
| 126 |
+
try:
|
| 127 |
+
candidate_signature.bind(*args, **kwargs)
|
| 128 |
+
matched_schemas.append((candidate_signature, schema))
|
| 129 |
+
except TypeError as e:
|
| 130 |
+
continue
|
| 131 |
+
|
| 132 |
+
def throw_if_mutable(schema):
|
| 133 |
+
if schema.is_mutable:
|
| 134 |
+
raise RuntimeError(f'Tried to trace mutable operation {schema}. FX only supports functional '
|
| 135 |
+
f'code, so operations that mutate operands in-place (e.g. via `out` arguments) '
|
| 136 |
+
f'are not supported')
|
| 137 |
+
|
| 138 |
+
if len(matched_schemas) == 0:
|
| 139 |
+
# Did not match any schema. Cannot check for mutation
|
| 140 |
+
pass
|
| 141 |
+
elif len(matched_schemas) == 1:
|
| 142 |
+
# Matched exactly one schema, unambiguous
|
| 143 |
+
_, schema_to_check = matched_schemas[0]
|
| 144 |
+
throw_if_mutable(schema_to_check)
|
| 145 |
+
pass
|
| 146 |
+
else:
|
| 147 |
+
# Ambiguous schema match. Since mutability checking is best effort,
|
| 148 |
+
# do nothing.
|
| 149 |
+
pass
|
| 150 |
+
|
| 151 |
+
@compatibility(is_backward_compatible=False)
|
| 152 |
+
def get_signature_for_torch_op(op : Callable, return_schemas : bool = False):
|
| 153 |
+
"""
|
| 154 |
+
Given an operator on the `torch` namespace, return a list of `inspect.Signature`
|
| 155 |
+
objects corresponding to the overloads of that op.. May return `None` if a signature
|
| 156 |
+
could not be retrieved.
|
| 157 |
+
|
| 158 |
+
Args:
|
| 159 |
+
op (Callable): An operator on the `torch` namespace to look up a signature for
|
| 160 |
+
|
| 161 |
+
Returns:
|
| 162 |
+
Optional[List[inspect.Signature]]: A list of signatures for the overloads of this
|
| 163 |
+
operator, or None if the operator signatures could not be retrieved. If
|
| 164 |
+
return_schemas=True, returns a tuple containing the optional Python signatures
|
| 165 |
+
and the optional TorchScript Function signature
|
| 166 |
+
"""
|
| 167 |
+
if isinstance(op, OpOverload):
|
| 168 |
+
schemas = [op._schema]
|
| 169 |
+
elif isinstance(op, OpOverloadPacket):
|
| 170 |
+
schemas = [getattr(op, overload)._schema for overload in op.overloads()]
|
| 171 |
+
else:
|
| 172 |
+
override = _manual_overrides.get(op)
|
| 173 |
+
if override:
|
| 174 |
+
return (override, None) if return_schemas else None
|
| 175 |
+
|
| 176 |
+
aten_fn = torch.jit._builtins._find_builtin(op)
|
| 177 |
+
|
| 178 |
+
if aten_fn is None:
|
| 179 |
+
return (None, None) if return_schemas else None
|
| 180 |
+
schemas = torch._C._jit_get_schemas_for_operator(aten_fn)
|
| 181 |
+
|
| 182 |
+
signatures = [_torchscript_schema_to_signature(schema) for schema in schemas]
|
| 183 |
+
return (signatures, schemas) if return_schemas else signatures
|
| 184 |
+
|
| 185 |
+
@compatibility(is_backward_compatible=False)
|
| 186 |
+
def create_type_hint(x):
|
| 187 |
+
try:
|
| 188 |
+
if isinstance(x, (list, tuple)):
|
| 189 |
+
# todo(chilli): Figure out the right way for mypy to handle this
|
| 190 |
+
if isinstance(x, list):
|
| 191 |
+
def ret_type(x):
|
| 192 |
+
return List[x] # type: ignore[valid-type]
|
| 193 |
+
else:
|
| 194 |
+
def ret_type(x):
|
| 195 |
+
return Tuple[x, ...]
|
| 196 |
+
if len(x) == 0:
|
| 197 |
+
return ret_type(Any)
|
| 198 |
+
base_type = x[0]
|
| 199 |
+
for t in x:
|
| 200 |
+
if issubclass(t, base_type):
|
| 201 |
+
continue
|
| 202 |
+
elif issubclass(base_type, t):
|
| 203 |
+
base_type = t
|
| 204 |
+
else:
|
| 205 |
+
return ret_type(Any)
|
| 206 |
+
return ret_type(base_type)
|
| 207 |
+
except Exception as e:
|
| 208 |
+
# We tried to create a type hint for list but failed.
|
| 209 |
+
warnings.warn(f"We were not able to successfully create type hint from the type {x}")
|
| 210 |
+
pass
|
| 211 |
+
return x
|
| 212 |
+
|
| 213 |
+
@compatibility(is_backward_compatible=False)
|
| 214 |
+
def type_matches(signature_type : Any, argument_type : Any):
|
| 215 |
+
sig_origin_type = getattr(signature_type, '__origin__', signature_type)
|
| 216 |
+
|
| 217 |
+
if signature_type is argument_type:
|
| 218 |
+
return True
|
| 219 |
+
|
| 220 |
+
# Union types in signature. Given type needs to match one of the
|
| 221 |
+
# contained types in the Union
|
| 222 |
+
if sig_origin_type is typing.Union and signature_type != argument_type:
|
| 223 |
+
sig_contained = signature_type.__args__
|
| 224 |
+
return any(type_matches(c, argument_type) for c in sig_contained)
|
| 225 |
+
|
| 226 |
+
if signature_type is List[int] and argument_type is int:
|
| 227 |
+
# int can be promoted to List[int]
|
| 228 |
+
return True
|
| 229 |
+
|
| 230 |
+
if getattr(signature_type, '__origin__', None) in {list, List}:
|
| 231 |
+
sig_el_type = signature_type.__args__[0]
|
| 232 |
+
if not inspect.isclass(sig_el_type):
|
| 233 |
+
warnings.warn(
|
| 234 |
+
f"Does not support nested parametric types, got {signature_type}. Please file a bug.")
|
| 235 |
+
return False
|
| 236 |
+
if getattr(argument_type, '__origin__', None) in {list, List}:
|
| 237 |
+
return issubclass(argument_type.__args__[0], sig_el_type)
|
| 238 |
+
|
| 239 |
+
def is_homogeneous_tuple(t):
|
| 240 |
+
if getattr(t, "__origin__", None) not in {tuple, Tuple}:
|
| 241 |
+
return False
|
| 242 |
+
contained = t.__args__
|
| 243 |
+
if t.__args__ == ((),): # Tuple[()].__args__ == ((),) for some reason
|
| 244 |
+
return True
|
| 245 |
+
return all((c is Ellipsis) or issubclass(c, sig_el_type) for c in contained)
|
| 246 |
+
|
| 247 |
+
# Tuple[T] is accepted for List[T] parameters
|
| 248 |
+
return is_homogeneous_tuple(argument_type)
|
| 249 |
+
|
| 250 |
+
# Dtype is an int in schemas
|
| 251 |
+
if signature_type is int and argument_type is torch.dtype:
|
| 252 |
+
return True
|
| 253 |
+
|
| 254 |
+
if signature_type is numbers.Number and argument_type in {int, float}:
|
| 255 |
+
return True
|
| 256 |
+
if inspect.isclass(argument_type) and inspect.isclass(signature_type):
|
| 257 |
+
return issubclass(argument_type, signature_type)
|
| 258 |
+
|
| 259 |
+
return False
|
| 260 |
+
|
| 261 |
+
@compatibility(is_backward_compatible=False)
|
| 262 |
+
def normalize_function(
|
| 263 |
+
target: Callable, args: Tuple[Any], kwargs : Optional[Dict[str, Any]] = None, arg_types : Optional[Tuple[Any]] = None,
|
| 264 |
+
kwarg_types : Optional[Dict[str, Any]] = None,
|
| 265 |
+
normalize_to_only_use_kwargs : bool = False) -> Optional[ArgsKwargsPair]:
|
| 266 |
+
"""
|
| 267 |
+
Returns normalized arguments to PyTorch functions. This means that
|
| 268 |
+
`args/kwargs` will be matched up to the functional's
|
| 269 |
+
signature and return exclusively kwargs in positional order if
|
| 270 |
+
`normalize_to_only_use_kwargs` is True.
|
| 271 |
+
Also populates default values. Does not support positional-only
|
| 272 |
+
parameters or varargs parameters (*args, **kwargs). Does not support modules.
|
| 273 |
+
|
| 274 |
+
May require `arg_types` and `kwarg_types` in order to disambiguate overloads.
|
| 275 |
+
|
| 276 |
+
Args:
|
| 277 |
+
target (Callable): Function that we are normalizing
|
| 278 |
+
args (Tuple[Any]): Tuple of args to the function
|
| 279 |
+
kwargs (Optional[Dict[str, Any]]): Dict of kwargs to the function
|
| 280 |
+
arg_types (Optional[Tuple[Any]]): Tuple of arg types for the args
|
| 281 |
+
kwarg_types (Optional[Dict[str, Any]]): Dict of arg types for the kwargs
|
| 282 |
+
normalize_to_only_use_kwargs (bool): Whether to normalize to only use kwargs.
|
| 283 |
+
|
| 284 |
+
Returns:
|
| 285 |
+
|
| 286 |
+
Returns normalized_args_and_kwargs, or `None` if not successful.
|
| 287 |
+
"""
|
| 288 |
+
if kwargs is None:
|
| 289 |
+
kwargs = {}
|
| 290 |
+
new_args_and_kwargs = None
|
| 291 |
+
if not isinstance(target, types.BuiltinFunctionType) and not (
|
| 292 |
+
isinstance(target, (OpOverloadPacket, OpOverload))
|
| 293 |
+
):
|
| 294 |
+
target_for_analysis = target
|
| 295 |
+
if target in boolean_dispatched:
|
| 296 |
+
# HACK: `boolean_dispatch` as used in `torch.nn.functional` makes it so that we have
|
| 297 |
+
# a 2-way dispatch based on a boolean value. Here we check that the `true` and `false`
|
| 298 |
+
# branches of the dispatch have exactly the same signature. If they do, use the `true`
|
| 299 |
+
# branch signature for analysis. Otherwise, leave this un-normalized
|
| 300 |
+
assert not isinstance(target, str)
|
| 301 |
+
dispatched = boolean_dispatched[target]
|
| 302 |
+
if_true, if_false = dispatched['if_true'], dispatched['if_false']
|
| 303 |
+
if inspect.signature(if_true).parameters != inspect.signature(if_false).parameters:
|
| 304 |
+
return None
|
| 305 |
+
target_for_analysis = if_true
|
| 306 |
+
|
| 307 |
+
assert callable(target_for_analysis)
|
| 308 |
+
sig = inspect.signature(inspect.unwrap(target_for_analysis))
|
| 309 |
+
new_args_and_kwargs = _args_kwargs_to_normalized_args_kwargs(sig, args, kwargs, normalize_to_only_use_kwargs)
|
| 310 |
+
else:
|
| 311 |
+
assert callable(target)
|
| 312 |
+
torch_op_schemas = get_signature_for_torch_op(target)
|
| 313 |
+
matched_schemas = []
|
| 314 |
+
if torch_op_schemas:
|
| 315 |
+
# Iterate through all of the schema until we find one that matches
|
| 316 |
+
# If one matches, populate `new_args_and_kwargs` with the new args/kwargs
|
| 317 |
+
# values. If none matches, `new_args_and_kwargs` will be None
|
| 318 |
+
for candidate_signature in torch_op_schemas:
|
| 319 |
+
try:
|
| 320 |
+
candidate_signature.bind(*args, **kwargs)
|
| 321 |
+
matched_schemas.append(candidate_signature)
|
| 322 |
+
except TypeError as e:
|
| 323 |
+
continue
|
| 324 |
+
|
| 325 |
+
if len(matched_schemas) == 0:
|
| 326 |
+
# Did not match any schema. Cannot normalize
|
| 327 |
+
pass
|
| 328 |
+
elif len(matched_schemas) == 1:
|
| 329 |
+
# Matched exactly one schema, unambiguous
|
| 330 |
+
new_args_and_kwargs = _args_kwargs_to_normalized_args_kwargs(matched_schemas[0], args, kwargs,
|
| 331 |
+
normalize_to_only_use_kwargs)
|
| 332 |
+
else:
|
| 333 |
+
if arg_types is not None or kwarg_types is not None:
|
| 334 |
+
arg_types = arg_types if arg_types else cast(Tuple[Any], ())
|
| 335 |
+
kwarg_types = kwarg_types if kwarg_types else {}
|
| 336 |
+
for candidate_signature in torch_op_schemas:
|
| 337 |
+
sig_matches = True
|
| 338 |
+
try:
|
| 339 |
+
bound_types = candidate_signature.bind(*arg_types, **kwarg_types)
|
| 340 |
+
for arg_name, arg_type in bound_types.arguments.items():
|
| 341 |
+
param = candidate_signature.parameters[arg_name]
|
| 342 |
+
sig_matches = sig_matches and type_matches(param.annotation, arg_type)
|
| 343 |
+
except TypeError as e:
|
| 344 |
+
sig_matches = False
|
| 345 |
+
if sig_matches:
|
| 346 |
+
new_args_and_kwargs = _args_kwargs_to_normalized_args_kwargs(candidate_signature, args, kwargs,
|
| 347 |
+
normalize_to_only_use_kwargs)
|
| 348 |
+
break
|
| 349 |
+
else:
|
| 350 |
+
# Matched more than one schema. In this situation, the caller must provide the types of
|
| 351 |
+
# the arguments of the overload they expect.
|
| 352 |
+
schema_printouts = '\n'.join(str(schema) for schema in matched_schemas)
|
| 353 |
+
raise RuntimeError(f'Tried to normalize arguments to {torch.typename(target)} but '
|
| 354 |
+
f'the schema match was ambiguous! Please provide argument types to '
|
| 355 |
+
f'the normalize_arguments() call. Available schemas:\n{schema_printouts}')
|
| 356 |
+
|
| 357 |
+
return new_args_and_kwargs
|
| 358 |
+
|
| 359 |
+
@compatibility(is_backward_compatible=False)
|
| 360 |
+
def normalize_module(
|
| 361 |
+
root: torch.nn.Module, target: str, args: Tuple[Any], kwargs : Optional[Dict[str, Any]] = None,
|
| 362 |
+
normalize_to_only_use_kwargs : bool = False) -> Optional[ArgsKwargsPair]:
|
| 363 |
+
"""
|
| 364 |
+
Returns normalized arguments to PyTorch modules. This means that
|
| 365 |
+
`args/kwargs` will be matched up to the functional's
|
| 366 |
+
signature and return exclusively kwargs in positional order if
|
| 367 |
+
`normalize_to_only_use_kwargs` is True.
|
| 368 |
+
Also populates default values. Does not support positional-only
|
| 369 |
+
parameters or varargs parameters (*args, **kwargs).
|
| 370 |
+
|
| 371 |
+
Args:
|
| 372 |
+
root (nn.Module): root module upon which we query modules
|
| 373 |
+
target (Callable): Function that we are normalizing
|
| 374 |
+
args (Tuple[Any]): Tuple of args to the function
|
| 375 |
+
kwargs (Optional[Dict[str, Any]]): Dict of kwargs to the function
|
| 376 |
+
normalize_to_only_use_kwargs (bool): Whether to normalize to only use kwargs.
|
| 377 |
+
|
| 378 |
+
Returns:
|
| 379 |
+
|
| 380 |
+
Returns normalized_args_and_kwargs, or `None` if not successful.
|
| 381 |
+
"""
|
| 382 |
+
try:
|
| 383 |
+
submod = root.get_submodule(target)
|
| 384 |
+
except AttributeError as e:
|
| 385 |
+
raise RuntimeError(f"Tried to normalize node with target {target} but root did not "
|
| 386 |
+
f"have that target!") from e
|
| 387 |
+
if hasattr(submod.__class__, '__name__'):
|
| 388 |
+
classname = submod.__class__.__name__
|
| 389 |
+
if getattr(torch.nn, classname, None) == submod.__class__:
|
| 390 |
+
sig = inspect.signature(inspect.unwrap(submod.forward))
|
| 391 |
+
if kwargs is None:
|
| 392 |
+
kwargs = {}
|
| 393 |
+
new_args_and_kwargs = _args_kwargs_to_normalized_args_kwargs(sig, args, kwargs,
|
| 394 |
+
normalize_to_only_use_kwargs)
|
| 395 |
+
return new_args_and_kwargs
|
| 396 |
+
return None
|
| 397 |
+
|
| 398 |
+
def _args_kwargs_to_normalized_args_kwargs(sig : inspect.Signature, args : Tuple[Any, ...],
|
| 399 |
+
kwargs : Dict[str, Any],
|
| 400 |
+
normalize_to_only_use_kwargs : bool) -> Optional[ArgsKwargsPair]:
|
| 401 |
+
"""
|
| 402 |
+
Given a call target, args, and kwargs, return the arguments normalized into
|
| 403 |
+
an ArgsKwargsPair, or None if the type signature is not supported by
|
| 404 |
+
this normalization.
|
| 405 |
+
|
| 406 |
+
Args:
|
| 407 |
+
|
| 408 |
+
sig (inspect.Signature): Signature object for the target
|
| 409 |
+
args (Tuple): Arguments that appear at the callsite for `target`
|
| 410 |
+
kwargs (Dict): Keyword arguments that appear at the callsite for `target`
|
| 411 |
+
normalize_to_only_use_kwargs (bool): Whether to normalize to only use kwargs.
|
| 412 |
+
|
| 413 |
+
Returns:
|
| 414 |
+
|
| 415 |
+
Optional[ArgsKwargsPair]: Normalized args and kwargs for `target`, or `None` if
|
| 416 |
+
this target is not supported.
|
| 417 |
+
"""
|
| 418 |
+
|
| 419 |
+
# Don't currently support positional-only
|
| 420 |
+
# or varargs (*args, **kwargs) signatures
|
| 421 |
+
supported_parameter_types = {
|
| 422 |
+
inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY}
|
| 423 |
+
if any(p.kind not in supported_parameter_types for p in sig.parameters.values()):
|
| 424 |
+
# Add an exception for one signature, which is common for random/uniform, i.e.:
|
| 425 |
+
# Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None
|
| 426 |
+
# `from` is Python keyword and as such functions with that signature should have
|
| 427 |
+
# positional-only args, but at the same time they could be dispatched as kwargs
|
| 428 |
+
if list(sig.parameters.keys()) != ['input', 'from', 'to', 'generator']:
|
| 429 |
+
return None
|
| 430 |
+
|
| 431 |
+
bound_args = sig.bind(*args, **kwargs)
|
| 432 |
+
bound_args.apply_defaults()
|
| 433 |
+
|
| 434 |
+
new_kwargs : Dict[str, Any] = {}
|
| 435 |
+
new_args : List[Any] = []
|
| 436 |
+
for i, param in enumerate(sig.parameters):
|
| 437 |
+
if not normalize_to_only_use_kwargs and i < len(args):
|
| 438 |
+
new_args.append(bound_args.arguments[param])
|
| 439 |
+
else:
|
| 440 |
+
new_kwargs[param] = bound_args.arguments[param]
|
| 441 |
+
|
| 442 |
+
return ArgsKwargsPair(tuple(new_args), new_kwargs)
|
parrot/lib/python3.10/site-packages/torch/fx/passes/annotate_getitem_nodes.py
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import operator
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def annotate_getitem_nodes(graph: torch.fx.Graph) -> None:
|
| 7 |
+
"""
|
| 8 |
+
Annotate the type of getitem nodes, inferred from the type of sequence node.
|
| 9 |
+
If sequence node is not annotated with a type, do nothing.
|
| 10 |
+
Currently support getitem nodes from Tuple, List, and NamedTuple sequence node.
|
| 11 |
+
|
| 12 |
+
This is helpful since annotations on local names within function are lost during FX transforms.
|
| 13 |
+
Adding back known type annotation for getitem nodes to improve jit scriptability.
|
| 14 |
+
|
| 15 |
+
Args:
|
| 16 |
+
graph (Graph): The graph to be annotated
|
| 17 |
+
"""
|
| 18 |
+
for node in graph.nodes:
|
| 19 |
+
if node.target == operator.getitem:
|
| 20 |
+
sequence_node, index_node = node.args
|
| 21 |
+
if not sequence_node.type:
|
| 22 |
+
continue
|
| 23 |
+
# container types
|
| 24 |
+
if hasattr(sequence_node.type, "_name"):
|
| 25 |
+
parameterized_types = sequence_node.type.__args__
|
| 26 |
+
if sequence_node.type._name == "Tuple":
|
| 27 |
+
if len(parameterized_types) == 2 and isinstance(
|
| 28 |
+
parameterized_types[1], type(...)
|
| 29 |
+
):
|
| 30 |
+
node.type = parameterized_types[0]
|
| 31 |
+
else:
|
| 32 |
+
assert len(parameterized_types) > index_node
|
| 33 |
+
node_type = parameterized_types[index_node]
|
| 34 |
+
node.type = node_type
|
| 35 |
+
elif sequence_node.type._name == "List":
|
| 36 |
+
assert len(parameterized_types) == 1
|
| 37 |
+
node.type = parameterized_types[0]
|
| 38 |
+
# NamedTuple type
|
| 39 |
+
elif hasattr(sequence_node.type, "__annotations__"):
|
| 40 |
+
if sequence_node.type == torch.Tensor:
|
| 41 |
+
continue
|
| 42 |
+
sequence_node_field_types = sequence_node.type.__annotations__
|
| 43 |
+
field_name = sequence_node.type._fields[index_node]
|
| 44 |
+
node.type = sequence_node_field_types[field_name]
|
parrot/lib/python3.10/site-packages/torch/fx/passes/dialect/__init__.py
ADDED
|
File without changes
|
parrot/lib/python3.10/site-packages/torch/fx/passes/graph_transform_observer.py
ADDED
|
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import os
|
| 3 |
+
from typing import Optional
|
| 4 |
+
|
| 5 |
+
from torch.fx.graph_module import GraphModule
|
| 6 |
+
|
| 7 |
+
from .graph_drawer import FxGraphDrawer
|
| 8 |
+
|
| 9 |
+
__all__ = ["GraphTransformObserver"]
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class GraphTransformObserver:
|
| 13 |
+
__pass_count = 0
|
| 14 |
+
|
| 15 |
+
def __init__(self, gm: GraphModule, passname: str, log_url: Optional[str] = None):
|
| 16 |
+
# If log_url is None, we don't log anything
|
| 17 |
+
self.log_url = log_url
|
| 18 |
+
if self.log_url is None:
|
| 19 |
+
return
|
| 20 |
+
GraphTransformObserver.__pass_count += 1
|
| 21 |
+
self.gm = gm
|
| 22 |
+
self.passname = passname
|
| 23 |
+
|
| 24 |
+
self.input_dot_graph = FxGraphDrawer(
|
| 25 |
+
self.gm,
|
| 26 |
+
self.passname,
|
| 27 |
+
ignore_getattr=True,
|
| 28 |
+
ignore_parameters_and_buffers=True,
|
| 29 |
+
).get_dot_graph()
|
| 30 |
+
|
| 31 |
+
@classmethod
|
| 32 |
+
def get_current_pass_count(cls):
|
| 33 |
+
return cls.__pass_count
|
| 34 |
+
|
| 35 |
+
def __enter__(self):
|
| 36 |
+
if self.log_url is None or self.gm is None:
|
| 37 |
+
return self
|
| 38 |
+
|
| 39 |
+
self.erased_nodes = set()
|
| 40 |
+
self.created_nodes = set()
|
| 41 |
+
self.gm._register_create_node_hook(self.on_node_creation)
|
| 42 |
+
self.gm._register_erase_node_hook(self.on_node_erase)
|
| 43 |
+
|
| 44 |
+
return self
|
| 45 |
+
|
| 46 |
+
def __exit__(self, type, value, tb):
|
| 47 |
+
if self.log_url is None or self.gm is None:
|
| 48 |
+
return
|
| 49 |
+
|
| 50 |
+
self.gm._unregister_create_node_hook(self.on_node_creation)
|
| 51 |
+
self.gm._unregister_erase_node_hook(self.on_node_erase)
|
| 52 |
+
|
| 53 |
+
if len(self.created_nodes) > 0 or len(self.erased_nodes) > 0:
|
| 54 |
+
for e in self.input_dot_graph.get_node_list():
|
| 55 |
+
if e.get_name() in self.erased_nodes:
|
| 56 |
+
e.obj_dict["attributes"]["fillcolor"] = "yellow"
|
| 57 |
+
else:
|
| 58 |
+
e.obj_dict["attributes"]["fillcolor"] = "grey"
|
| 59 |
+
self.input_dot_graph.write_svg(
|
| 60 |
+
os.path.join(
|
| 61 |
+
self.log_url,
|
| 62 |
+
f"pass_{GraphTransformObserver.__pass_count}_{self.passname}_input_graph.svg",
|
| 63 |
+
)
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
output_dot_graph = FxGraphDrawer(
|
| 67 |
+
self.gm,
|
| 68 |
+
self.passname,
|
| 69 |
+
ignore_getattr=True,
|
| 70 |
+
ignore_parameters_and_buffers=True,
|
| 71 |
+
).get_dot_graph()
|
| 72 |
+
for e in output_dot_graph.get_node_list():
|
| 73 |
+
if e.get_name() in self.created_nodes:
|
| 74 |
+
e.obj_dict["attributes"]["fillcolor"] = "yellow"
|
| 75 |
+
else:
|
| 76 |
+
e.obj_dict["attributes"]["fillcolor"] = "grey"
|
| 77 |
+
output_dot_graph.write_svg(
|
| 78 |
+
os.path.join(
|
| 79 |
+
self.log_url,
|
| 80 |
+
f"pass_{GraphTransformObserver.__pass_count}_{self.passname}_output_graph.svg",
|
| 81 |
+
)
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
def on_node_creation(self, node):
|
| 85 |
+
self.created_nodes.add(node.name)
|
| 86 |
+
|
| 87 |
+
def on_node_erase(self, node):
|
| 88 |
+
self.erased_nodes.add(node.name)
|
parrot/lib/python3.10/site-packages/torch/fx/passes/net_min_base.py
ADDED
|
@@ -0,0 +1,924 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import logging
|
| 3 |
+
from dataclasses import dataclass
|
| 4 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
import torch.fx
|
| 8 |
+
|
| 9 |
+
from torch.fx._compatibility import compatibility
|
| 10 |
+
from torch.fx.node import map_arg
|
| 11 |
+
|
| 12 |
+
from .shape_prop import ShapeProp
|
| 13 |
+
from .split_utils import split_by_tags
|
| 14 |
+
from .tools_common import (
|
| 15 |
+
CALLABLE_NODE_OPS,
|
| 16 |
+
FxNetAccFusionsFinder,
|
| 17 |
+
Names,
|
| 18 |
+
NodeList,
|
| 19 |
+
NodeSet,
|
| 20 |
+
TensorOrTensors,
|
| 21 |
+
Tensors,
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
__all__ = [
|
| 25 |
+
"FxNetMinimizerBadModuleError",
|
| 26 |
+
"FxNetMinimizerRunFuncError",
|
| 27 |
+
"FxNetMinimizerResultMismatchError",
|
| 28 |
+
]
|
| 29 |
+
|
| 30 |
+
_LOGGER = logging.getLogger(__name__)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
@compatibility(is_backward_compatible=False)
|
| 34 |
+
class FxNetMinimizerBadModuleError(Exception):
|
| 35 |
+
"""
|
| 36 |
+
Raised if failed to split out a minimize module
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
pass
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
@compatibility(is_backward_compatible=False)
|
| 43 |
+
class FxNetMinimizerRunFuncError(Exception):
|
| 44 |
+
"""
|
| 45 |
+
Raised if error occurs during run_a or run_b functions
|
| 46 |
+
"""
|
| 47 |
+
|
| 48 |
+
pass
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
@compatibility(is_backward_compatible=False)
|
| 52 |
+
class FxNetMinimizerResultMismatchError(Exception):
|
| 53 |
+
"""
|
| 54 |
+
Raised if comparing function thinks the results are mismatching.
|
| 55 |
+
"""
|
| 56 |
+
|
| 57 |
+
pass
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
@dataclass
|
| 61 |
+
class _MinimizerSettingBase:
|
| 62 |
+
"""
|
| 63 |
+
Args:
|
| 64 |
+
`accumulate_error`: Instead of using a's input for both converted module to verify
|
| 65 |
+
, use the previous outputs of each converted module as input to accumulate the
|
| 66 |
+
errors.
|
| 67 |
+
|
| 68 |
+
`traverse_method`: "sequential" or "binary" or "accumulate"
|
| 69 |
+
Determine the way of traverse the nodes in FX module.
|
| 70 |
+
|
| 71 |
+
`find_all`: Minimizer will go through the entire model and return all problematic nodes.
|
| 72 |
+
|
| 73 |
+
`return_intermediate`: If true, when using `run_nodes()` function to run the
|
| 74 |
+
model, intermediate results of all the ops will be returned as output.
|
| 75 |
+
"""
|
| 76 |
+
|
| 77 |
+
accumulate_error: bool = False
|
| 78 |
+
traverse_method: str = "sequential"
|
| 79 |
+
find_all: bool = False
|
| 80 |
+
return_intermediate: bool = False
|
| 81 |
+
|
| 82 |
+
def __str__(self):
|
| 83 |
+
settings_str = "FX Minimizer Settings:\n"
|
| 84 |
+
|
| 85 |
+
for k, v in vars(self).items():
|
| 86 |
+
settings_str += f"\t{k}: {v}\n"
|
| 87 |
+
|
| 88 |
+
return settings_str
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
class _MinimizerBase:
|
| 92 |
+
"""
|
| 93 |
+
This class is used to automatically find problematic nodes in a model. It takes a FX
|
| 94 |
+
graphmodule and generate some submodules while traverse the graph. Then two functions
|
| 95 |
+
`run_a` and `run_b` will be used to run the same submodule and a function `compare_fn`
|
| 96 |
+
will be used to compare the results.
|
| 97 |
+
|
| 98 |
+
Currently we provides two ways to traverse the graph and generate submodules.
|
| 99 |
+
1. Sequential traversal: this will traverse the graph node by node and generate
|
| 100 |
+
one submodule with one sigle node.
|
| 101 |
+
2. Binary searching: this will do a binary search style traversal on the graph.
|
| 102 |
+
|
| 103 |
+
For internal Users, a guide can be found here https://fb.quip.com/HDtuAgiKGfkP.
|
| 104 |
+
"""
|
| 105 |
+
|
| 106 |
+
def __init__(
|
| 107 |
+
self,
|
| 108 |
+
module: torch.fx.GraphModule,
|
| 109 |
+
sample_input: Tensors,
|
| 110 |
+
compare_fn: Callable[
|
| 111 |
+
[TensorOrTensors, TensorOrTensors, Names], Tuple[float, bool]
|
| 112 |
+
],
|
| 113 |
+
settings: _MinimizerSettingBase,
|
| 114 |
+
module_exporter: Optional[
|
| 115 |
+
Callable[
|
| 116 |
+
[Tensors, torch.fx.GraphModule, str],
|
| 117 |
+
None
|
| 118 |
+
]
|
| 119 |
+
] = None,
|
| 120 |
+
exclusion_fn: Optional[
|
| 121 |
+
Callable[[NodeList, int, int], None]
|
| 122 |
+
] = None,
|
| 123 |
+
):
|
| 124 |
+
assert isinstance(module, torch.fx.GraphModule)
|
| 125 |
+
|
| 126 |
+
self.module = module
|
| 127 |
+
self.sample_input = sample_input
|
| 128 |
+
self.compare_fn = compare_fn
|
| 129 |
+
self.module_exporter = module_exporter
|
| 130 |
+
self.settings = settings
|
| 131 |
+
self.exclusion_fn = exclusion_fn
|
| 132 |
+
|
| 133 |
+
# Stores outputs of run_a function
|
| 134 |
+
self.a_outputs: Dict[str, Any] = {}
|
| 135 |
+
|
| 136 |
+
# Stores outputs of run_b function
|
| 137 |
+
self.b_outputs: Dict[str, Any] = {}
|
| 138 |
+
|
| 139 |
+
# Stores the results of compare_fn
|
| 140 |
+
self.results: Dict[Any, Any] = {}
|
| 141 |
+
|
| 142 |
+
# Stores the report for the runs
|
| 143 |
+
self.reports: List[List[str]] = []
|
| 144 |
+
|
| 145 |
+
# Current iteration
|
| 146 |
+
self.iteration: int = 0
|
| 147 |
+
|
| 148 |
+
callable_nodes = {
|
| 149 |
+
node for node in self.module.graph.nodes if node.op in CALLABLE_NODE_OPS
|
| 150 |
+
}
|
| 151 |
+
ShapeProp(self.module).propagate(*self.sample_input)
|
| 152 |
+
self.fusions = FxNetAccFusionsFinder(self.module, callable_nodes)()
|
| 153 |
+
|
| 154 |
+
# Check if number of input in sample_input matches the number of placeholders
|
| 155 |
+
placeholders = [
|
| 156 |
+
node.name for node in self.module.graph.nodes if node.op == "placeholder"
|
| 157 |
+
]
|
| 158 |
+
assert len(placeholders) == len(self.sample_input)
|
| 159 |
+
|
| 160 |
+
# Store sample_input
|
| 161 |
+
for i, name in enumerate(placeholders):
|
| 162 |
+
self.a_outputs[name] = sample_input[i]
|
| 163 |
+
self.b_outputs[name] = sample_input[i]
|
| 164 |
+
|
| 165 |
+
def run_a(self, mod: torch.fx.GraphModule, inputs: Tensors, report_idx: int = -1) -> TensorOrTensors:
|
| 166 |
+
"""
|
| 167 |
+
Run `mod` with `inputs` and generate output. The output will be compared with
|
| 168 |
+
output of run_b().
|
| 169 |
+
"""
|
| 170 |
+
raise RuntimeError("run_a() is not implemented.")
|
| 171 |
+
|
| 172 |
+
def run_b(self, mod: torch.fx.GraphModule, inputs: Tensors, report_idx: int = -1) -> TensorOrTensors:
|
| 173 |
+
"""
|
| 174 |
+
Run `mod` with `inputs` and generate output. The output will be compared with
|
| 175 |
+
output of run_a().
|
| 176 |
+
"""
|
| 177 |
+
raise RuntimeError("run_b() is not implemented.")
|
| 178 |
+
|
| 179 |
+
def _store_outputs(
|
| 180 |
+
self,
|
| 181 |
+
a_result: TensorOrTensors,
|
| 182 |
+
b_result: TensorOrTensors,
|
| 183 |
+
submodule: torch.fx.GraphModule,
|
| 184 |
+
):
|
| 185 |
+
"""
|
| 186 |
+
Store the outputs of self.run_a() and self.run_b() into self.a_outputs and
|
| 187 |
+
self.b_outputs, so that we can use them when execute preceding nodes that
|
| 188 |
+
use those outputs as inputs.
|
| 189 |
+
|
| 190 |
+
Args:
|
| 191 |
+
a_result: Output of self.run_a(). Could be a tensor or tensors.
|
| 192 |
+
b_result: Output of self.run_b(). Could be a tensor or tensors.
|
| 193 |
+
submodule: The module that generates a_result and b_result.
|
| 194 |
+
"""
|
| 195 |
+
output_node = next(
|
| 196 |
+
node for node in submodule.graph.nodes if node.op == "output"
|
| 197 |
+
)
|
| 198 |
+
|
| 199 |
+
# Only one output
|
| 200 |
+
if isinstance(output_node.args[0], torch.fx.Node):
|
| 201 |
+
self.a_outputs[output_node.args[0].name] = a_result
|
| 202 |
+
self.b_outputs[output_node.args[0].name] = b_result
|
| 203 |
+
# Multiple outputs
|
| 204 |
+
else:
|
| 205 |
+
for i, arg in enumerate(output_node.args[0]):
|
| 206 |
+
self.a_outputs[arg.name] = a_result[i]
|
| 207 |
+
self.b_outputs[arg.name] = b_result[i]
|
| 208 |
+
|
| 209 |
+
def _get_submod_inputs(
|
| 210 |
+
self, main_module: torch.fx.GraphModule, submod_path: str
|
| 211 |
+
) -> Tuple[Tensors, Tensors]:
|
| 212 |
+
"""
|
| 213 |
+
Try get submodule inputs from stored outputs. If not found then use
|
| 214 |
+
torch_glow.get_submod_inputs to get the inputs.
|
| 215 |
+
|
| 216 |
+
If accumulate_error is False, use a_input for run_a() and run_b()
|
| 217 |
+
otherwise use a_input for run_a and b_input for run_b.
|
| 218 |
+
|
| 219 |
+
Args:
|
| 220 |
+
main_module: Top-levlel fx module.
|
| 221 |
+
submod_path: Path to the submodule we want to run and compare results.
|
| 222 |
+
|
| 223 |
+
Returns:
|
| 224 |
+
a_input: List of tensor(s) that will be used by run_a() as submodule inputs.
|
| 225 |
+
b_input: List of tensor(s) that will be used by run_b() as submodule inputs.
|
| 226 |
+
"""
|
| 227 |
+
a_input = []
|
| 228 |
+
b_input = []
|
| 229 |
+
submodule = getattr(main_module, submod_path)
|
| 230 |
+
placeholders = [
|
| 231 |
+
node.name for node in submodule.graph.nodes if node.op == "placeholder"
|
| 232 |
+
]
|
| 233 |
+
|
| 234 |
+
# If all placeholder can be found in stored outputs, use stored
|
| 235 |
+
# outputs as inputs. Otherwise, use `torch_glow.get_submod_inputs`
|
| 236 |
+
# to get the inputs.
|
| 237 |
+
if set(placeholders) <= self.a_outputs.keys():
|
| 238 |
+
for name in placeholders:
|
| 239 |
+
a_input.append(self.a_outputs[name])
|
| 240 |
+
b_input.append(self.b_outputs[name])
|
| 241 |
+
else:
|
| 242 |
+
if self.settings.accumulate_error:
|
| 243 |
+
print(f"Can't find previous stored outputs named {placeholders}!")
|
| 244 |
+
|
| 245 |
+
def get_inputs(self: torch.nn.Module, inputs: Any):
|
| 246 |
+
nonlocal a_input
|
| 247 |
+
a_input = inputs
|
| 248 |
+
|
| 249 |
+
# Use forward hook to get the inputs to the submodule
|
| 250 |
+
handle = submodule.register_forward_pre_hook(get_inputs)
|
| 251 |
+
main_module(*self.sample_input)
|
| 252 |
+
handle.remove()
|
| 253 |
+
|
| 254 |
+
b_input = a_input
|
| 255 |
+
|
| 256 |
+
if not self.settings.accumulate_error:
|
| 257 |
+
return a_input, a_input
|
| 258 |
+
|
| 259 |
+
return a_input, b_input
|
| 260 |
+
|
| 261 |
+
def _tag_nodes(self, selected_nodes: NodeSet):
|
| 262 |
+
"""
|
| 263 |
+
Tag selected nodes with tag "minimize". Nodes with the same tags will
|
| 264 |
+
be split to the same submodule afterwards.
|
| 265 |
+
|
| 266 |
+
Args:
|
| 267 |
+
selected_nodes: Nodes that we want to minimize. We will tag those nodes
|
| 268 |
+
with "minimize", all preceding nodes with "main_0" and all following
|
| 269 |
+
nodes with "main_1".
|
| 270 |
+
"""
|
| 271 |
+
for node in self.module.graph.nodes:
|
| 272 |
+
if node.op not in CALLABLE_NODE_OPS:
|
| 273 |
+
continue
|
| 274 |
+
|
| 275 |
+
if node in selected_nodes:
|
| 276 |
+
node.tag = "minimize"
|
| 277 |
+
elif any(
|
| 278 |
+
n.tag in {"minimize", "main_1"}
|
| 279 |
+
for n in node.all_input_nodes
|
| 280 |
+
if n.op in CALLABLE_NODE_OPS
|
| 281 |
+
):
|
| 282 |
+
node.tag = "main_1"
|
| 283 |
+
else:
|
| 284 |
+
node.tag = "main_0"
|
| 285 |
+
|
| 286 |
+
def _build_submodule(self, nodes: NodeSet) -> Tuple[torch.fx.GraphModule, str]:
|
| 287 |
+
"""
|
| 288 |
+
Split self.module so that one submodule consists of `nodes` and only `nodes`.
|
| 289 |
+
|
| 290 |
+
Args:
|
| 291 |
+
nodes: Nodes that we want to include in the minimize submodule.
|
| 292 |
+
|
| 293 |
+
Returns:
|
| 294 |
+
split_module (torch.fx.GraphModule): the module after split.
|
| 295 |
+
submodule_name (str): the name of the submodule that consists of `nodes`.
|
| 296 |
+
"""
|
| 297 |
+
# Color provided nodes
|
| 298 |
+
self._tag_nodes(nodes)
|
| 299 |
+
|
| 300 |
+
# Split module based on coloring
|
| 301 |
+
split_module = split_by_tags(self.module, ["main_0", "minimize", "main_1"])
|
| 302 |
+
|
| 303 |
+
# Find submodule containing colored nodes
|
| 304 |
+
submodule_name: str = ""
|
| 305 |
+
for child_name, _ in split_module.named_children():
|
| 306 |
+
# Skip submodules we're not interested in at the moment
|
| 307 |
+
if "minimize" not in child_name:
|
| 308 |
+
continue
|
| 309 |
+
|
| 310 |
+
if submodule_name == "":
|
| 311 |
+
submodule_name = child_name
|
| 312 |
+
else:
|
| 313 |
+
raise FxNetMinimizerBadModuleError(
|
| 314 |
+
f"Expected only one minimize submodule with nodes {nodes}"
|
| 315 |
+
)
|
| 316 |
+
|
| 317 |
+
if submodule_name == "":
|
| 318 |
+
raise FxNetMinimizerBadModuleError(
|
| 319 |
+
f"Minimize submodule was not found with nodes {nodes}"
|
| 320 |
+
)
|
| 321 |
+
|
| 322 |
+
return split_module, submodule_name
|
| 323 |
+
|
| 324 |
+
def _run_and_compare(
|
| 325 |
+
self,
|
| 326 |
+
split_module: torch.fx.GraphModule,
|
| 327 |
+
submod_name: str,
|
| 328 |
+
output_names: Names,
|
| 329 |
+
report_idx: int = -1
|
| 330 |
+
):
|
| 331 |
+
"""
|
| 332 |
+
Run the submodule in `split_module` that has name `submod_name`
|
| 333 |
+
using `self.run_a` and `self.run_b` and compare their results.
|
| 334 |
+
|
| 335 |
+
Args:
|
| 336 |
+
split_module: Main module that contains the minimize submodule.
|
| 337 |
+
submod_name: Name of the minimize submodule.
|
| 338 |
+
output_names: Names of the node we want to output. If None, we
|
| 339 |
+
will use the original output.
|
| 340 |
+
"""
|
| 341 |
+
submodule = getattr(split_module, submod_name)
|
| 342 |
+
a_input, b_input = self._get_submod_inputs(split_module, submod_name)
|
| 343 |
+
|
| 344 |
+
if len(self.reports) == 0:
|
| 345 |
+
self.reports.append([])
|
| 346 |
+
self.iteration = 1
|
| 347 |
+
|
| 348 |
+
report = self.reports[report_idx if report_idx >= 0 else self.iteration - 1]
|
| 349 |
+
report.append("Run and compare ...")
|
| 350 |
+
|
| 351 |
+
if output_names:
|
| 352 |
+
output_nodes: NodeList = []
|
| 353 |
+
for node in submodule.graph.nodes:
|
| 354 |
+
if node.op == "output":
|
| 355 |
+
submodule.graph.erase_node(node)
|
| 356 |
+
|
| 357 |
+
if node.name in output_names:
|
| 358 |
+
output_nodes.append(node)
|
| 359 |
+
|
| 360 |
+
submodule.graph.output(
|
| 361 |
+
output_nodes[0] if len(output_nodes) == 1 else tuple(output_nodes)
|
| 362 |
+
)
|
| 363 |
+
submodule.graph.lint()
|
| 364 |
+
submodule.recompile()
|
| 365 |
+
|
| 366 |
+
# Use name of args in output node as key to store comparison result
|
| 367 |
+
for node in submodule.graph.nodes:
|
| 368 |
+
if node.op == "output":
|
| 369 |
+
result_key = map_arg(node.args, lambda x: x.name)
|
| 370 |
+
|
| 371 |
+
try:
|
| 372 |
+
a_result = self.run_a(submodule, a_input, report_idx)
|
| 373 |
+
b_result = self.run_b(submodule, b_input, report_idx)
|
| 374 |
+
self._store_outputs(a_result, b_result, submodule)
|
| 375 |
+
except Exception as e:
|
| 376 |
+
report.append(f"Exception raised when running {submod_name}: {e}")
|
| 377 |
+
raise FxNetMinimizerRunFuncError( # noqa: B904
|
| 378 |
+
f"Exception raised when running {submod_name}: {e}"
|
| 379 |
+
)
|
| 380 |
+
|
| 381 |
+
# Compare results
|
| 382 |
+
names: Names = output_names
|
| 383 |
+
if output_names is None:
|
| 384 |
+
names = [str(v) for v in result_key] # type: ignore[possibly-undefined]
|
| 385 |
+
|
| 386 |
+
numeric_result, bool_result = self.compare_fn(a_result, b_result, names)
|
| 387 |
+
|
| 388 |
+
self.results[result_key] = numeric_result # type: ignore[possibly-undefined]
|
| 389 |
+
report.append(f"Numerical accuracy = {numeric_result}")
|
| 390 |
+
if not bool_result:
|
| 391 |
+
report.append(f"Result mismatch for {result_key}")
|
| 392 |
+
if self.module_exporter:
|
| 393 |
+
self.module_exporter(
|
| 394 |
+
a_input, submodule, str(result_key[0]) + "_cpu",
|
| 395 |
+
)
|
| 396 |
+
self.module_exporter(
|
| 397 |
+
b_input, submodule, str(result_key[0]) + "_acc",
|
| 398 |
+
)
|
| 399 |
+
raise FxNetMinimizerResultMismatchError(f"Result mismatch for {result_key}")
|
| 400 |
+
|
| 401 |
+
def _binary_search_impl(
|
| 402 |
+
self, all_nodes: NodeList, start_idx: int, end_idx: int
|
| 403 |
+
) -> NodeSet:
|
| 404 |
+
"""
|
| 405 |
+
Recursive binary search implementation.
|
| 406 |
+
"""
|
| 407 |
+
culprits: NodeSet = set()
|
| 408 |
+
nodes: NodeList = all_nodes[start_idx:end_idx]
|
| 409 |
+
|
| 410 |
+
report: List[str] = []
|
| 411 |
+
if self.exclusion_fn is not None:
|
| 412 |
+
self.exclusion_fn(nodes, start_idx, end_idx)
|
| 413 |
+
if len(nodes) == 0:
|
| 414 |
+
report = ["All nodes are excluded by user"]
|
| 415 |
+
self.reports.append(report)
|
| 416 |
+
return culprits
|
| 417 |
+
|
| 418 |
+
first_node_name = nodes[0].name
|
| 419 |
+
output_node_name = nodes[-1].name
|
| 420 |
+
self.iteration += 1
|
| 421 |
+
self.reports.append(report)
|
| 422 |
+
report.append(f"Binary search iteration {self.iteration}")
|
| 423 |
+
report.append(
|
| 424 |
+
f"From node index {start_idx}:{first_node_name} to {end_idx-1}:{output_node_name}. "
|
| 425 |
+
f"Size of the interested node list is {len(nodes)}"
|
| 426 |
+
)
|
| 427 |
+
cur_nodes: NodeSet = set(nodes)
|
| 428 |
+
|
| 429 |
+
try:
|
| 430 |
+
split_module, submod_name = self._build_submodule(cur_nodes)
|
| 431 |
+
self._run_and_compare(split_module, submod_name, [output_node_name])
|
| 432 |
+
|
| 433 |
+
except (FxNetMinimizerRunFuncError, FxNetMinimizerResultMismatchError):
|
| 434 |
+
|
| 435 |
+
if len(nodes) == 1:
|
| 436 |
+
report.append(
|
| 437 |
+
f"This is the last node in the sub-module. "
|
| 438 |
+
f"Search in the current branch is successful with culprit = {cur_nodes}."
|
| 439 |
+
)
|
| 440 |
+
self.print_report(report)
|
| 441 |
+
return cur_nodes
|
| 442 |
+
|
| 443 |
+
report.append(
|
| 444 |
+
"Proceed to split and lower the halves of the current "
|
| 445 |
+
"sub-module individually."
|
| 446 |
+
)
|
| 447 |
+
self.print_report(report)
|
| 448 |
+
|
| 449 |
+
mid = len(nodes) // 2
|
| 450 |
+
culprits = self._binary_search_impl(all_nodes, start_idx, start_idx + mid)
|
| 451 |
+
|
| 452 |
+
if len(culprits) != 0 and not self.settings.find_all:
|
| 453 |
+
return culprits
|
| 454 |
+
|
| 455 |
+
culprits = self._binary_search_impl(all_nodes, start_idx + mid, end_idx)
|
| 456 |
+
|
| 457 |
+
if len(culprits) == 0:
|
| 458 |
+
report.append(
|
| 459 |
+
f"Further split and lowering found no errors. "
|
| 460 |
+
f"Unable to minimize the submodule with list of nodes: {nodes}"
|
| 461 |
+
)
|
| 462 |
+
self.print_report(report)
|
| 463 |
+
|
| 464 |
+
return culprits
|
| 465 |
+
else:
|
| 466 |
+
report.append("No discrepancy found.")
|
| 467 |
+
self.print_report(report)
|
| 468 |
+
return set()
|
| 469 |
+
|
| 470 |
+
def _binary_traverse(self, nodes: NodeList) -> NodeSet:
|
| 471 |
+
"""
|
| 472 |
+
Binary search on `nodes` for culprit.
|
| 473 |
+
"""
|
| 474 |
+
return self._binary_search_impl(nodes, 0, len(nodes))
|
| 475 |
+
|
| 476 |
+
def _sequential_traverse(self, nodes: NodeList) -> NodeSet:
|
| 477 |
+
"""
|
| 478 |
+
Traverse `nodes` one by one and determine if any of them is a culprit.
|
| 479 |
+
"""
|
| 480 |
+
culprits: NodeSet = set()
|
| 481 |
+
|
| 482 |
+
for node in nodes:
|
| 483 |
+
report: List[str] = []
|
| 484 |
+
self.reports.append(report)
|
| 485 |
+
self.iteration += 1
|
| 486 |
+
report.append(f"Sequential traverse iteration {self.iteration}.")
|
| 487 |
+
report.append(f"Visit node: {node.name}")
|
| 488 |
+
|
| 489 |
+
_LOGGER.info("Visit node: %s", node.name)
|
| 490 |
+
node_list: NodeList = [node]
|
| 491 |
+
if self.exclusion_fn is not None:
|
| 492 |
+
self.exclusion_fn(node_list, -1, -1)
|
| 493 |
+
if len(node_list) == 0:
|
| 494 |
+
report.append(f"User exclusion : {node.name}")
|
| 495 |
+
self.print_report(report)
|
| 496 |
+
return culprits
|
| 497 |
+
|
| 498 |
+
cur_nodes: NodeSet = {node}
|
| 499 |
+
|
| 500 |
+
if node in self.fusions:
|
| 501 |
+
cur_nodes = self.fusions[node]
|
| 502 |
+
|
| 503 |
+
try:
|
| 504 |
+
split_module, submod_name = self._build_submodule(cur_nodes)
|
| 505 |
+
self._run_and_compare(split_module, submod_name, [node.name])
|
| 506 |
+
self.print_report(report)
|
| 507 |
+
except (FxNetMinimizerResultMismatchError):
|
| 508 |
+
culprits.add(node)
|
| 509 |
+
report.append(f"Found culprit from numeric error: {node}")
|
| 510 |
+
self.print_report(report)
|
| 511 |
+
if not self.settings.find_all:
|
| 512 |
+
return culprits
|
| 513 |
+
except (FxNetMinimizerRunFuncError):
|
| 514 |
+
culprits.update(cur_nodes)
|
| 515 |
+
report.append(f"Found culprit from run error: {node}")
|
| 516 |
+
self.print_report(report)
|
| 517 |
+
if not self.settings.find_all:
|
| 518 |
+
return culprits
|
| 519 |
+
|
| 520 |
+
return culprits
|
| 521 |
+
|
| 522 |
+
|
| 523 |
+
def _block_traverse_impl(self, nodes: NodeList, start_idx: int, end_idx: int, find_last_node: bool) -> int:
|
| 524 |
+
"""
|
| 525 |
+
Recursive block search implementation.
|
| 526 |
+
find_last_node: If True, search for the last node which result in numerics difference
|
| 527 |
+
if False: find first node in sorted node list
|
| 528 |
+
"""
|
| 529 |
+
report: List[str] = []
|
| 530 |
+
|
| 531 |
+
mid = (start_idx + end_idx) // 2
|
| 532 |
+
cur_nodes_list: NodeList = nodes[:mid + 1] if find_last_node else nodes[mid:]
|
| 533 |
+
|
| 534 |
+
if self.exclusion_fn:
|
| 535 |
+
self.exclusion_fn(cur_nodes_list, -1, -1)
|
| 536 |
+
|
| 537 |
+
cur_nodes = set(cur_nodes_list)
|
| 538 |
+
|
| 539 |
+
first_node_name = cur_nodes_list[0].name
|
| 540 |
+
last_node_name = cur_nodes_list[-1].name
|
| 541 |
+
target_node_name = last_node_name if find_last_node else first_node_name
|
| 542 |
+
|
| 543 |
+
self.iteration += 1
|
| 544 |
+
self.reports.append(report)
|
| 545 |
+
report.extend(
|
| 546 |
+
[
|
| 547 |
+
"=" * 30,
|
| 548 |
+
f"Block search iteration {self.iteration}",
|
| 549 |
+
]
|
| 550 |
+
)
|
| 551 |
+
report.extend(
|
| 552 |
+
[
|
| 553 |
+
f"Search for {'last' if find_last_node else 'first'} node in culprits",
|
| 554 |
+
f"From node index {start_idx}:{nodes[start_idx].name} to {end_idx}:{nodes[end_idx].name}. ",
|
| 555 |
+
f"Subgraph constructed by {first_node_name} to {last_node_name}",
|
| 556 |
+
f"Targeting node: {target_node_name}",
|
| 557 |
+
f"Size of the interested node list is {end_idx - start_idx + 1}",
|
| 558 |
+
]
|
| 559 |
+
)
|
| 560 |
+
report_idx = len(self.reports) - 1
|
| 561 |
+
|
| 562 |
+
try:
|
| 563 |
+
split_module, submod_name = self._build_submodule(cur_nodes)
|
| 564 |
+
self._run_and_compare(split_module, submod_name, [last_node_name], report_idx)
|
| 565 |
+
except (FxNetMinimizerResultMismatchError, FxNetMinimizerRunFuncError):
|
| 566 |
+
report.append(f"Culprits found from node {first_node_name} to {last_node_name}.")
|
| 567 |
+
|
| 568 |
+
if start_idx == mid:
|
| 569 |
+
report.extend(
|
| 570 |
+
[
|
| 571 |
+
"This is the last node in the sub-module. ",
|
| 572 |
+
"Search in the current branch is successful with node :",
|
| 573 |
+
f"{start_idx}, node name: {nodes[start_idx].name}."
|
| 574 |
+
]
|
| 575 |
+
)
|
| 576 |
+
self.print_report(report)
|
| 577 |
+
return start_idx
|
| 578 |
+
|
| 579 |
+
report.append(
|
| 580 |
+
"Proceed to split and lower the halves of the current "
|
| 581 |
+
"sub-module individually."
|
| 582 |
+
)
|
| 583 |
+
self.print_report(report)
|
| 584 |
+
|
| 585 |
+
if find_last_node:
|
| 586 |
+
return self._block_traverse_impl(nodes, start_idx, mid, find_last_node)
|
| 587 |
+
else:
|
| 588 |
+
return self._block_traverse_impl(nodes, mid + 1, end_idx, find_last_node)
|
| 589 |
+
else:
|
| 590 |
+
report.append(f"Culprits not found from node start to {mid}:{nodes[mid].name}.")
|
| 591 |
+
|
| 592 |
+
if start_idx == mid:
|
| 593 |
+
report.extend(
|
| 594 |
+
[
|
| 595 |
+
"This is the last node in the sub-module. ",
|
| 596 |
+
"Search in the current branch is successful with node",
|
| 597 |
+
f"{start_idx}, node name: {nodes[start_idx].name}.",
|
| 598 |
+
]
|
| 599 |
+
)
|
| 600 |
+
self.print_report(report)
|
| 601 |
+
return start_idx + 1 if find_last_node else start_idx - 1
|
| 602 |
+
|
| 603 |
+
report.append(
|
| 604 |
+
"Proceed to split and lower the halves of the current "
|
| 605 |
+
"sub-module individually."
|
| 606 |
+
)
|
| 607 |
+
self.print_report(report)
|
| 608 |
+
|
| 609 |
+
if find_last_node:
|
| 610 |
+
return self._block_traverse_impl(nodes, mid + 1, end_idx, find_last_node)
|
| 611 |
+
else:
|
| 612 |
+
return self._block_traverse_impl(nodes, start_idx, mid, find_last_node)
|
| 613 |
+
|
| 614 |
+
|
| 615 |
+
def _block_traverse(self, nodes: NodeList, find_last_node: Optional[bool]) -> NodeSet:
|
| 616 |
+
"""
|
| 617 |
+
Traverse topologically sorted node list
|
| 618 |
+
Find minimium block (start_idx, end_idx) which contains the culprit
|
| 619 |
+
1st pass: search for end_idx by finding the last node in culprit block
|
| 620 |
+
where Numerical accuracy (0, end_idx) > threshold
|
| 621 |
+
2nd pass: search for start_idx by finding the first node in culprit block
|
| 622 |
+
where Numerical accuracy (start_idx, end_idx) < threshold
|
| 623 |
+
Form minimum block by (start_idx - 1, end_idx)
|
| 624 |
+
"""
|
| 625 |
+
culprits: NodeSet = set()
|
| 626 |
+
first_node_name = nodes[0].name
|
| 627 |
+
last_node_name = nodes[-1].name
|
| 628 |
+
last_node_report = [f"Block search from {first_node_name} to {last_node_name}"]
|
| 629 |
+
last_node_report.append("*" * 50)
|
| 630 |
+
self.reports.append(last_node_report)
|
| 631 |
+
|
| 632 |
+
start_idx = 0
|
| 633 |
+
end_idx = len(nodes) - 1
|
| 634 |
+
run_both = True if find_last_node is None else False
|
| 635 |
+
|
| 636 |
+
# step 1: find (0, end_idx) of culprit block
|
| 637 |
+
if run_both or find_last_node:
|
| 638 |
+
last_node_report.append("Start searching for last node in culprit")
|
| 639 |
+
self.print_report(last_node_report)
|
| 640 |
+
end_idx = self._block_traverse_impl(nodes, start_idx, end_idx, True)
|
| 641 |
+
last_node_report.extend(
|
| 642 |
+
[
|
| 643 |
+
"Finish Pass 1",
|
| 644 |
+
f"Find end_idx = {end_idx}:{nodes[end_idx].name}"
|
| 645 |
+
]
|
| 646 |
+
)
|
| 647 |
+
self.print_report(last_node_report)
|
| 648 |
+
|
| 649 |
+
# step 2: reduce culprit block to (start_idx, end_idx)
|
| 650 |
+
if run_both or not find_last_node:
|
| 651 |
+
first_node_report = ["Start searching for first node in culprit"]
|
| 652 |
+
self.print_report(first_node_report)
|
| 653 |
+
start_idx = self._block_traverse_impl(nodes[0:end_idx + 1], start_idx, end_idx, False)
|
| 654 |
+
first_node_report.append("*" * 50)
|
| 655 |
+
self.reports.append(first_node_report)
|
| 656 |
+
first_node_report.extend(
|
| 657 |
+
[
|
| 658 |
+
"Finish Pass 2",
|
| 659 |
+
f"Find start_idx = {start_idx}:{nodes[start_idx].name}"
|
| 660 |
+
]
|
| 661 |
+
)
|
| 662 |
+
self.print_report(first_node_report)
|
| 663 |
+
|
| 664 |
+
# step 3: form module with minimum culprits
|
| 665 |
+
culprits.update(nodes[start_idx:end_idx + 1])
|
| 666 |
+
result_report = [f"Finish searching, found minimum block ({nodes[start_idx]},{nodes[end_idx]})"]
|
| 667 |
+
self.reports.append(result_report)
|
| 668 |
+
self.print_report(result_report)
|
| 669 |
+
return culprits
|
| 670 |
+
|
| 671 |
+
|
| 672 |
+
def _defined_traverse(self, nodes: NodeList) -> NodeSet:
|
| 673 |
+
"""
|
| 674 |
+
run user defined `nodes` and determine if it is a culprit.
|
| 675 |
+
"""
|
| 676 |
+
culprits: NodeSet = set()
|
| 677 |
+
if self.exclusion_fn is not None:
|
| 678 |
+
self.exclusion_fn(nodes, -1, -1)
|
| 679 |
+
if len(nodes) == 0:
|
| 680 |
+
report = ["All nodes are excluded by user"]
|
| 681 |
+
self.reports.append(report)
|
| 682 |
+
return culprits
|
| 683 |
+
|
| 684 |
+
first_node_name = nodes[0].name
|
| 685 |
+
output_node_name = nodes[-1].name
|
| 686 |
+
report = [f"Defined graph from {first_node_name} to {output_node_name}"]
|
| 687 |
+
cur_nodes: NodeSet = set(nodes)
|
| 688 |
+
try:
|
| 689 |
+
split_module, submod_name = self._build_submodule(cur_nodes)
|
| 690 |
+
self._run_and_compare(split_module, submod_name, [output_node_name])
|
| 691 |
+
self.print_report(report)
|
| 692 |
+
except (FxNetMinimizerResultMismatchError, FxNetMinimizerRunFuncError):
|
| 693 |
+
report.append(f"Found culprit {cur_nodes}")
|
| 694 |
+
self.print_report(report)
|
| 695 |
+
return culprits
|
| 696 |
+
|
| 697 |
+
return culprits
|
| 698 |
+
|
| 699 |
+
def _accumulate_traverse(self, nodes: NodeList) -> NodeSet:
|
| 700 |
+
culprits: NodeSet = set()
|
| 701 |
+
nodes_to_run: NodeSet = set()
|
| 702 |
+
|
| 703 |
+
# find_all is not supported for accumulate traversal because all the
|
| 704 |
+
# ops run on NNPI. So we return after the first op that raises error.
|
| 705 |
+
if self.settings.find_all:
|
| 706 |
+
print("'Find All' mode is not supported in accumulate traversal.")
|
| 707 |
+
return culprits
|
| 708 |
+
|
| 709 |
+
for node in nodes:
|
| 710 |
+
report: List[str] = []
|
| 711 |
+
self.reports.append(report)
|
| 712 |
+
self.iteration += 1
|
| 713 |
+
report.append(f"Accumulate traverse iteration {self.iteration}.")
|
| 714 |
+
|
| 715 |
+
nodes_to_run.add(node)
|
| 716 |
+
|
| 717 |
+
node_name = node.name
|
| 718 |
+
if node_name is not None and isinstance(node_name, tuple):
|
| 719 |
+
node_name = node_name[0]
|
| 720 |
+
assert node_name is not None and isinstance(
|
| 721 |
+
node_name, str
|
| 722 |
+
), f"minimize: node_name: {node_name}"
|
| 723 |
+
|
| 724 |
+
report.append(f"Add node: {node_name}")
|
| 725 |
+
|
| 726 |
+
try:
|
| 727 |
+
split_module, submod_name = self._build_submodule(nodes_to_run)
|
| 728 |
+
self._run_and_compare(split_module, submod_name, [node_name])
|
| 729 |
+
self.print_report(report)
|
| 730 |
+
except (FxNetMinimizerResultMismatchError, FxNetMinimizerRunFuncError):
|
| 731 |
+
culprits.add(node)
|
| 732 |
+
report.append(f"Found culprit {node}")
|
| 733 |
+
self.print_report(report)
|
| 734 |
+
return culprits
|
| 735 |
+
|
| 736 |
+
return culprits
|
| 737 |
+
|
| 738 |
+
def _skip_traverse_impl(self, all_nodes: NodeList, start_idx: int, end_idx: int) -> NodeSet:
|
| 739 |
+
"""
|
| 740 |
+
Skip certain nodes in graph based on settings
|
| 741 |
+
"""
|
| 742 |
+
culprits: NodeSet = set()
|
| 743 |
+
nodes: NodeList = all_nodes[start_idx:end_idx]
|
| 744 |
+
cur_nodes: NodeSet = set(nodes)
|
| 745 |
+
if self.exclusion_fn is not None:
|
| 746 |
+
self.exclusion_fn(nodes, start_idx, end_idx)
|
| 747 |
+
cur_nodes = set(nodes)
|
| 748 |
+
else:
|
| 749 |
+
for node in nodes:
|
| 750 |
+
if node in self.fusions:
|
| 751 |
+
cur_nodes.update(self.fusions[node])
|
| 752 |
+
report: List[str] = []
|
| 753 |
+
self.reports.append(report)
|
| 754 |
+
self.iteration += 1
|
| 755 |
+
report.append(f" Nodes block {self.iteration}.")
|
| 756 |
+
report.append(
|
| 757 |
+
f"From node index {start_idx} to {end_idx-1}. "
|
| 758 |
+
f"Size of the interested node list is {len(nodes)}"
|
| 759 |
+
)
|
| 760 |
+
|
| 761 |
+
try:
|
| 762 |
+
split_module, submod_name = self._build_submodule(cur_nodes)
|
| 763 |
+
self._run_and_compare(split_module, submod_name, [])
|
| 764 |
+
except (FxNetMinimizerResultMismatchError):
|
| 765 |
+
culprits.update(cur_nodes)
|
| 766 |
+
report.append(f"Found culprit from numeric error: {cur_nodes}")
|
| 767 |
+
self.print_report(report)
|
| 768 |
+
return culprits
|
| 769 |
+
except (FxNetMinimizerRunFuncError):
|
| 770 |
+
culprits.update(cur_nodes)
|
| 771 |
+
report.append(f"Found culprit from run error: {cur_nodes}")
|
| 772 |
+
self.print_report(report)
|
| 773 |
+
return culprits
|
| 774 |
+
else:
|
| 775 |
+
report.append("No discrepancy found.")
|
| 776 |
+
self.print_report(report)
|
| 777 |
+
return set()
|
| 778 |
+
|
| 779 |
+
|
| 780 |
+
def _skip_traverse(self, all_nodes: NodeList, skip_nodes: List) -> NodeSet:
|
| 781 |
+
"""
|
| 782 |
+
Skip certain nodes in graph based on settings
|
| 783 |
+
"""
|
| 784 |
+
start_idx = 0
|
| 785 |
+
num_nodes = len(all_nodes)
|
| 786 |
+
idx = 0
|
| 787 |
+
culprits = set()
|
| 788 |
+
while idx < num_nodes:
|
| 789 |
+
node = all_nodes[idx]
|
| 790 |
+
if (node.name in skip_nodes): # skip the node
|
| 791 |
+
if idx > start_idx:
|
| 792 |
+
culprits = self._skip_traverse_impl(all_nodes, start_idx, idx)
|
| 793 |
+
start_idx = idx + 1
|
| 794 |
+
elif idx == num_nodes - 1 and start_idx <= idx: # last node
|
| 795 |
+
culprits = self._skip_traverse_impl(all_nodes, start_idx, idx + 1)
|
| 796 |
+
idx += 1
|
| 797 |
+
|
| 798 |
+
return culprits
|
| 799 |
+
|
| 800 |
+
|
| 801 |
+
|
| 802 |
+
def _collect_nodes(self, start: Optional[str], end: Optional[str]) -> NodeList:
|
| 803 |
+
"""
|
| 804 |
+
Collect nodes in the model that between nodes with name of `start` and `end`.
|
| 805 |
+
These two nodes are also included.
|
| 806 |
+
"""
|
| 807 |
+
nodes: NodeList = []
|
| 808 |
+
add_node = start is None
|
| 809 |
+
|
| 810 |
+
for node in self.module.graph.nodes:
|
| 811 |
+
if node.op not in CALLABLE_NODE_OPS:
|
| 812 |
+
continue
|
| 813 |
+
|
| 814 |
+
if node.name == start:
|
| 815 |
+
add_node = True
|
| 816 |
+
|
| 817 |
+
if add_node:
|
| 818 |
+
nodes.append(node)
|
| 819 |
+
|
| 820 |
+
if node.name == end:
|
| 821 |
+
break
|
| 822 |
+
|
| 823 |
+
return nodes
|
| 824 |
+
|
| 825 |
+
def run_nodes(self, start: Optional[str] = None, end: Optional[str] = None):
|
| 826 |
+
"""
|
| 827 |
+
Run part of the model from `start` node to `end` node. If `start` is None
|
| 828 |
+
then we start from the beginning of the model. If `end` is None then we
|
| 829 |
+
stop at the end of the model.
|
| 830 |
+
|
| 831 |
+
Args:
|
| 832 |
+
start: The name of the node which is the first node of the submodule
|
| 833 |
+
we want to run. If set to None, then we'll start with the first
|
| 834 |
+
node of the model.
|
| 835 |
+
end: The name of the node which is the last node of the submodule we
|
| 836 |
+
want to run. If set to None, we'll end with the last node of the
|
| 837 |
+
model.
|
| 838 |
+
"""
|
| 839 |
+
nodes = self._collect_nodes(start, end)
|
| 840 |
+
cur_nodes = set(nodes)
|
| 841 |
+
|
| 842 |
+
for node in nodes:
|
| 843 |
+
if node in self.fusions:
|
| 844 |
+
cur_nodes.update(self.fusions[node])
|
| 845 |
+
|
| 846 |
+
output_names = []
|
| 847 |
+
if self.settings.return_intermediate:
|
| 848 |
+
output_names = [node.name for node in nodes]
|
| 849 |
+
|
| 850 |
+
try:
|
| 851 |
+
split_module, submod_name = self._build_submodule(cur_nodes)
|
| 852 |
+
self._run_and_compare(split_module, submod_name, output_names)
|
| 853 |
+
except (
|
| 854 |
+
FxNetMinimizerRunFuncError,
|
| 855 |
+
FxNetMinimizerResultMismatchError,
|
| 856 |
+
) as e:
|
| 857 |
+
print(e)
|
| 858 |
+
|
| 859 |
+
def print_report(self, report: List[str]):
|
| 860 |
+
for i in range(len(report)):
|
| 861 |
+
if i > 0:
|
| 862 |
+
print(" . " + report[i])
|
| 863 |
+
else:
|
| 864 |
+
print(report[i])
|
| 865 |
+
|
| 866 |
+
def print_reports(self):
|
| 867 |
+
for report in self.reports:
|
| 868 |
+
self.print_report(report)
|
| 869 |
+
|
| 870 |
+
def minimize(
|
| 871 |
+
self,
|
| 872 |
+
start: Optional[str] = None,
|
| 873 |
+
end: Optional[str] = None,
|
| 874 |
+
skip_nodes: Optional[List] = None,
|
| 875 |
+
find_last_node: Optional[bool] = None,
|
| 876 |
+
) -> NodeSet:
|
| 877 |
+
"""
|
| 878 |
+
Minimizing the model from node with name `start` to node with name `end` base
|
| 879 |
+
on self.settings. Find culprits that causes FxNetMinimizerRunFuncError or
|
| 880 |
+
FxNetMinimizerResultMismatchError errors.
|
| 881 |
+
|
| 882 |
+
Args:
|
| 883 |
+
start: The name of the node where we want to start minimizing. If set
|
| 884 |
+
to None, then we'll start with the first node of the model.
|
| 885 |
+
end: The name of the node where we want to terminate minimizing. If
|
| 886 |
+
set to None, we'll end with the last node of the model.
|
| 887 |
+
skip_nodes: The names of nodes where we want to skip during minimizing.
|
| 888 |
+
It'll create subgraphs without these skip nodes under the hood.
|
| 889 |
+
Only applicable in mode "skip".
|
| 890 |
+
find_last_node: True if only last_node of a culprits is needed in mode "block".
|
| 891 |
+
False if only the first_node of a culprits is needed.
|
| 892 |
+
Only applicable in mode "block".
|
| 893 |
+
|
| 894 |
+
Returns:
|
| 895 |
+
nodes: A list of nodes that causes FxNetMinimizerRunFuncError or
|
| 896 |
+
FxNetMinimizerResultMismatchError errors during minimizing.
|
| 897 |
+
"""
|
| 898 |
+
|
| 899 |
+
print(self.settings)
|
| 900 |
+
print(self.module.graph)
|
| 901 |
+
|
| 902 |
+
nodes = self._collect_nodes(start, end)
|
| 903 |
+
|
| 904 |
+
if self.settings.traverse_method == "sequential":
|
| 905 |
+
return self._sequential_traverse(nodes)
|
| 906 |
+
|
| 907 |
+
if self.settings.traverse_method == "binary":
|
| 908 |
+
return self._binary_traverse(nodes)
|
| 909 |
+
|
| 910 |
+
if self.settings.traverse_method == "accumulate":
|
| 911 |
+
return self._accumulate_traverse(nodes)
|
| 912 |
+
|
| 913 |
+
if self.settings.traverse_method == "skip":
|
| 914 |
+
if (skip_nodes is None):
|
| 915 |
+
raise RuntimeError("'skip_nodes' can't be None when 'traverse_method' is 'skip'.")
|
| 916 |
+
return self._skip_traverse(nodes, skip_nodes)
|
| 917 |
+
|
| 918 |
+
if self.settings.traverse_method == "defined":
|
| 919 |
+
return self._defined_traverse(nodes)
|
| 920 |
+
|
| 921 |
+
if self.settings.traverse_method == "block":
|
| 922 |
+
return self._block_traverse(nodes, find_last_node)
|
| 923 |
+
|
| 924 |
+
raise RuntimeError(f"Unknown traverse method {self.settings.traverse_method}!")
|
parrot/lib/python3.10/site-packages/torch/fx/passes/param_fetch.py
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from torch.fx.graph_module import GraphModule
|
| 2 |
+
from typing import Any, Callable, Dict, List, Tuple, Type
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
|
| 6 |
+
from torch.fx._compatibility import compatibility
|
| 7 |
+
|
| 8 |
+
__all__ = ['default_matching', 'extract_attrs_for_lowering', 'lift_lowering_attrs_to_nodes']
|
| 9 |
+
|
| 10 |
+
# Matching method matches the attribute name of current version to the attribute name of `target_version`
|
| 11 |
+
@compatibility(is_backward_compatible=False)
|
| 12 |
+
def default_matching(name: str, target_version: int) -> str:
|
| 13 |
+
"""Default matching method
|
| 14 |
+
"""
|
| 15 |
+
return name
|
| 16 |
+
|
| 17 |
+
# This dict maps the nn.Module class name to the attribute name list that we want to fetch for lowering.
|
| 18 |
+
# The first integer in the tuple is the version number of the nn.Module class when we create the parameter list.
|
| 19 |
+
# If there's a version mismatch then it means the parameter names in the book might be mismatched with nn.Module.
|
| 20 |
+
module_fetch_book: Dict[Type, Tuple[int, List[str], Callable[[str, int], str]]] = {
|
| 21 |
+
torch.nn.modules.linear.Linear: (1, ["weight", "bias"], default_matching),
|
| 22 |
+
torch.nn.modules.conv.Conv2d: (
|
| 23 |
+
1, ["weight", "bias", "kernel_size", "stride", "padding", "dilation", "groups", "padding_mode"], default_matching
|
| 24 |
+
),
|
| 25 |
+
torch.nn.modules.batchnorm.BatchNorm2d: (2, ["weight", "bias", "running_mean", "running_var", "eps"], default_matching),
|
| 26 |
+
torch.nn.modules.pooling.AdaptiveAvgPool2d: (1, [], default_matching),
|
| 27 |
+
torch.nn.modules.pooling.MaxPool2d: (
|
| 28 |
+
1, ["kernel_size", "stride", "padding", "dilation", "return_indices", "ceil_mode"], default_matching
|
| 29 |
+
),
|
| 30 |
+
torch.nn.modules.activation.ReLU: (1, ["inplace"], default_matching),
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
@compatibility(is_backward_compatible=False)
|
| 34 |
+
def extract_attrs_for_lowering(mod: nn.Module) -> Dict[str, Any]:
|
| 35 |
+
"""If `mod` is in `module_fetch_book`, fetch the mod's attributes that in the `module_fetch_book`
|
| 36 |
+
after checking module's version is compatible with the `module_fetch_book`.
|
| 37 |
+
"""
|
| 38 |
+
attrs_for_lowering: Dict[str, Any] = {}
|
| 39 |
+
attrs_for_lowering["name"] = torch.typename(mod)
|
| 40 |
+
|
| 41 |
+
if type(mod) in module_fetch_book:
|
| 42 |
+
version, param_to_fetch, matching_method = module_fetch_book[type(mod)]
|
| 43 |
+
if version < mod._version:
|
| 44 |
+
raise RuntimeError(f"Fetcher version {version} try to fetch {torch.typename(mod)} version {mod._version}, "
|
| 45 |
+
"please upgrade the module_fetch_book, open an issue and @842974287 "
|
| 46 |
+
"or report a bug to AIACC team directly.")
|
| 47 |
+
for attr in param_to_fetch:
|
| 48 |
+
attrs_for_lowering[attr] = getattr(mod, matching_method(attr, mod._version))
|
| 49 |
+
else:
|
| 50 |
+
raise RuntimeError(f"{torch.typename(mod)} is not in the module_fetch_book yet, "
|
| 51 |
+
"please add it to the module_fetch_book, open an issue and @842974287 "
|
| 52 |
+
"or report a bug to AIACC team directly.")
|
| 53 |
+
return attrs_for_lowering
|
| 54 |
+
|
| 55 |
+
@compatibility(is_backward_compatible=False)
|
| 56 |
+
def lift_lowering_attrs_to_nodes(fx_module: GraphModule) -> None:
|
| 57 |
+
"""Recursively traverse all `fx_module` nodes and fetch the module's attributes if the node is a leaf module.
|
| 58 |
+
"""
|
| 59 |
+
submodules = dict(fx_module.named_modules())
|
| 60 |
+
|
| 61 |
+
for node in fx_module.graph.nodes:
|
| 62 |
+
if node.op == "call_module":
|
| 63 |
+
if isinstance(submodules[node.target], GraphModule):
|
| 64 |
+
lift_lowering_attrs_to_nodes(submodules[node.target])
|
| 65 |
+
else:
|
| 66 |
+
node.attrs_for_lowering = extract_attrs_for_lowering(submodules[node.target])
|
parrot/lib/python3.10/site-packages/torch/fx/passes/runtime_assert.py
ADDED
|
@@ -0,0 +1,392 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import logging
|
| 3 |
+
import operator
|
| 4 |
+
from typing import Any, Dict, Optional, Set, TYPE_CHECKING
|
| 5 |
+
|
| 6 |
+
# Import sympy and ShapeEnv during TYPE_CHECKING since importing sympy is slow
|
| 7 |
+
if TYPE_CHECKING:
|
| 8 |
+
from torch.fx.experimental.symbolic_shapes import ShapeEnv
|
| 9 |
+
else:
|
| 10 |
+
ShapeEnv = Any
|
| 11 |
+
|
| 12 |
+
import torch
|
| 13 |
+
import torch.utils._pytree as pytree
|
| 14 |
+
from torch import fx
|
| 15 |
+
from torch.fx._compatibility import compatibility
|
| 16 |
+
from torch.fx._utils import lazy_format_graph_code
|
| 17 |
+
from torch.fx.experimental.sym_node import SymNode
|
| 18 |
+
from torch.fx.graph_module import GraphModule
|
| 19 |
+
|
| 20 |
+
log = logging.getLogger(__name__)
|
| 21 |
+
graph_code_log = torch._logging.getArtifactLogger(__name__, "graph_code")
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def _get_example_value(node: fx.Node) -> Optional[str]:
|
| 25 |
+
"""
|
| 26 |
+
Get the example value key for a node, since dynamo uses "example_value"
|
| 27 |
+
while non-strict export uses "val.
|
| 28 |
+
"""
|
| 29 |
+
if "example_value" in node.meta:
|
| 30 |
+
return node.meta["example_value"]
|
| 31 |
+
elif "val" in node.meta:
|
| 32 |
+
return node.meta["val"]
|
| 33 |
+
else:
|
| 34 |
+
return None
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
@compatibility(is_backward_compatible=True)
|
| 38 |
+
def insert_deferred_runtime_asserts(
|
| 39 |
+
gm: GraphModule,
|
| 40 |
+
shape_env: ShapeEnv,
|
| 41 |
+
name: str,
|
| 42 |
+
export: bool = False,
|
| 43 |
+
) -> None:
|
| 44 |
+
"""
|
| 45 |
+
During tracing, we may have discovered that some data-dependent values
|
| 46 |
+
had runtime assert on them; e.g., torch.empty(x.item()) induces a runtime
|
| 47 |
+
that x.item() >= 0. This asserts can happen unpredictably during fake
|
| 48 |
+
tensor propagation, so we cannot conveniently insert them into the FX graph
|
| 49 |
+
when they occur. Instead, we accumulate them in the ShapeEnv, and in this
|
| 50 |
+
pass insert them into the graph as proper tests.
|
| 51 |
+
"""
|
| 52 |
+
|
| 53 |
+
# We hash (node_name, min_val, max_val)
|
| 54 |
+
nodes_that_already_have_sym_constraint_range = set()
|
| 55 |
+
|
| 56 |
+
# We hash only node name here because size don't take min/max
|
| 57 |
+
nodes_that_already_have_sym_constraint_size = set()
|
| 58 |
+
# TODO this only works for top-level nodes today, also
|
| 59 |
+
# we should potentially use it not create duplicate
|
| 60 |
+
# assert_async nodes
|
| 61 |
+
for node in gm.graph.nodes:
|
| 62 |
+
if (
|
| 63 |
+
node.op == "call_function"
|
| 64 |
+
and node.target == torch.ops.aten.sym_constrain_range.default
|
| 65 |
+
):
|
| 66 |
+
assert len(node.args) == 1
|
| 67 |
+
nodes_that_already_have_sym_constraint_range.add(
|
| 68 |
+
(node.args[0], node.kwargs["min"], node.kwargs["max"])
|
| 69 |
+
)
|
| 70 |
+
if (
|
| 71 |
+
node.op == "call_function"
|
| 72 |
+
and node.target == torch.ops.aten.sym_constrain_range_for_size.default
|
| 73 |
+
):
|
| 74 |
+
assert len(node.args) == 1
|
| 75 |
+
nodes_that_already_have_sym_constraint_size.add(node.args[0])
|
| 76 |
+
|
| 77 |
+
# Import sympy locally
|
| 78 |
+
import sympy
|
| 79 |
+
|
| 80 |
+
from torch.fx.experimental.symbolic_shapes import (
|
| 81 |
+
CallMethodKey,
|
| 82 |
+
cast_symbool_to_symint_guardless,
|
| 83 |
+
ConvertIntKey,
|
| 84 |
+
DivideByKey,
|
| 85 |
+
free_symbols,
|
| 86 |
+
InnerTensorKey,
|
| 87 |
+
)
|
| 88 |
+
from torch.utils._sympy.interp import sympy_interp
|
| 89 |
+
from torch.utils._sympy.reference import PythonReferenceAnalysis
|
| 90 |
+
|
| 91 |
+
# TODO: Request simplification on runtime asserts before emitting them
|
| 92 |
+
ras_by_symbol = shape_env.deferred_runtime_asserts.copy()
|
| 93 |
+
graph = gm.graph
|
| 94 |
+
|
| 95 |
+
if not any(ras for ras in ras_by_symbol.values()):
|
| 96 |
+
return
|
| 97 |
+
|
| 98 |
+
graph_code_log.debug(
|
| 99 |
+
"%s",
|
| 100 |
+
lazy_format_graph_code(f"pre insert_deferred_runtime_asserts {name}", gm),
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
# deduplicate unassociated runtime assertions
|
| 104 |
+
# we could do better, some guards might be redundant,
|
| 105 |
+
# e.g. Eq(s0, 4) & Eq(2*s0, 8)
|
| 106 |
+
# but unclear how to handle all of that right now.
|
| 107 |
+
# TODO(pianpwk): better way of doing this
|
| 108 |
+
new_ras = []
|
| 109 |
+
ras_exprs: Set[sympy.Expr] = set()
|
| 110 |
+
for ras in ras_by_symbol.pop(None, []): # type: ignore[call-overload]
|
| 111 |
+
if ras.expr not in ras_exprs:
|
| 112 |
+
new_ras.append(ras)
|
| 113 |
+
ras_exprs.add(ras.expr)
|
| 114 |
+
ras_by_symbol[None] = new_ras # type: ignore[index]
|
| 115 |
+
|
| 116 |
+
# We are going to mutate the dict
|
| 117 |
+
symbol_to_proxy: Dict[sympy.Symbol, fx.Proxy] = {}
|
| 118 |
+
placeholders = set()
|
| 119 |
+
last_placeholder = None
|
| 120 |
+
for node in graph.nodes:
|
| 121 |
+
if node.op != "placeholder":
|
| 122 |
+
break
|
| 123 |
+
last_placeholder = node
|
| 124 |
+
placeholders.add(node)
|
| 125 |
+
if last_placeholder is None: # no placeholders, just insert before first node
|
| 126 |
+
last_placeholder = next(iter(graph.nodes))
|
| 127 |
+
|
| 128 |
+
# Identify what symbols we need to reify. This isn't strictly needed
|
| 129 |
+
# but helps reduce churn on the graph
|
| 130 |
+
needed_symbols: Set[sympy.Symbol] = set()
|
| 131 |
+
for ras in ras_by_symbol.values():
|
| 132 |
+
for ra in ras:
|
| 133 |
+
needed_symbols.update(free_symbols(ra.expr))
|
| 134 |
+
|
| 135 |
+
log.debug("needed_symbols = %s", needed_symbols)
|
| 136 |
+
|
| 137 |
+
def add_runtime_asserts(ras):
|
| 138 |
+
for ra in ras:
|
| 139 |
+
log.debug("inserting runtime assert %s", ra.expr)
|
| 140 |
+
# Need to process ALL free symbols, not just unbacked ones
|
| 141 |
+
fvs = free_symbols(ra.expr)
|
| 142 |
+
missing = fvs - symbol_to_proxy.keys()
|
| 143 |
+
if missing:
|
| 144 |
+
i1 = min(missing, key=str)
|
| 145 |
+
# TODO: Remove relaxing assert on unbacked_symint https://github.com/pytorch/pytorch/issues/119689
|
| 146 |
+
# assert shape_env.is_unbacked_symint(i1), i1
|
| 147 |
+
ras_by_symbol.setdefault(i1, []).append(ra)
|
| 148 |
+
else:
|
| 149 |
+
# Convert the sympy expression into a sequence of FX
|
| 150 |
+
# nodes
|
| 151 |
+
res = sympy_interp(
|
| 152 |
+
PythonReferenceAnalysis, symbol_to_proxy, ra.expr
|
| 153 |
+
).node
|
| 154 |
+
graph.call_function(
|
| 155 |
+
torch.ops.aten._assert_scalar.default,
|
| 156 |
+
# TODO: use ra.msg here, but it's pretty
|
| 157 |
+
# useless right now
|
| 158 |
+
(
|
| 159 |
+
res,
|
| 160 |
+
f"Runtime assertion failed for expression {ra.expr} on node '{res}'",
|
| 161 |
+
),
|
| 162 |
+
)
|
| 163 |
+
|
| 164 |
+
inserted_sym_nodes = 0 # for inserting unassociated runtime asserts
|
| 165 |
+
nodes = list(graph.nodes)
|
| 166 |
+
for i, node in enumerate(nodes[:-1]):
|
| 167 |
+
# Placeholders can match symbols, but when we destructure them
|
| 168 |
+
# with size we have to make sure we insert the nodes after all
|
| 169 |
+
# the placeholders
|
| 170 |
+
with graph.inserting_before(
|
| 171 |
+
nodes[i + 1] if node not in placeholders else last_placeholder.next
|
| 172 |
+
):
|
| 173 |
+
# Unfortunately, this logic still must remain because manual
|
| 174 |
+
# make_fx calls may not explicitly bind all symbolic ints as
|
| 175 |
+
# arguments to the function, so we must infer it from the other
|
| 176 |
+
# arguments
|
| 177 |
+
if (
|
| 178 |
+
node in placeholders
|
| 179 |
+
and (example_value := _get_example_value(node)) is not None
|
| 180 |
+
):
|
| 181 |
+
|
| 182 |
+
def match_symbol(symint, cb):
|
| 183 |
+
if (
|
| 184 |
+
isinstance(symint, torch.SymInt)
|
| 185 |
+
and isinstance(symint.node, SymNode)
|
| 186 |
+
and isinstance(s := symint.node.expr, sympy.Symbol)
|
| 187 |
+
and s not in symbol_to_proxy
|
| 188 |
+
and s in needed_symbols
|
| 189 |
+
):
|
| 190 |
+
symbol_to_proxy[s] = fx.Proxy(cb())
|
| 191 |
+
log.debug("symbol_to_proxy[%s] = %s", s, symbol_to_proxy[s])
|
| 192 |
+
nonlocal inserted_sym_nodes
|
| 193 |
+
inserted_sym_nodes += 1
|
| 194 |
+
|
| 195 |
+
match_symbol(example_value, lambda: node)
|
| 196 |
+
if isinstance(t := example_value, torch.Tensor):
|
| 197 |
+
for i, s in enumerate(t.size()):
|
| 198 |
+
match_symbol(
|
| 199 |
+
s,
|
| 200 |
+
lambda: graph.call_function(
|
| 201 |
+
torch.ops.aten.sym_size.int, (node, i)
|
| 202 |
+
),
|
| 203 |
+
)
|
| 204 |
+
for i, s in enumerate(t.stride()):
|
| 205 |
+
match_symbol(
|
| 206 |
+
s,
|
| 207 |
+
lambda: graph.call_function(
|
| 208 |
+
torch.ops.aten.sym_stride.int, (node, i)
|
| 209 |
+
),
|
| 210 |
+
)
|
| 211 |
+
match_symbol(
|
| 212 |
+
t.storage_offset(),
|
| 213 |
+
lambda: graph.call_function(
|
| 214 |
+
torch.ops.aten.sym_storage_offset.default, (node,)
|
| 215 |
+
),
|
| 216 |
+
)
|
| 217 |
+
|
| 218 |
+
# Handle asserts that aren't associated with any symbol. This
|
| 219 |
+
# doesn't really have to be in the loop as it will only run once,
|
| 220 |
+
# it just needs to happen right after the placeholders.
|
| 221 |
+
# insert this after placeholders & added sym nodes, and before non-placeholders.
|
| 222 |
+
if node not in placeholders:
|
| 223 |
+
last_sym_node = last_placeholder
|
| 224 |
+
for _ in range(inserted_sym_nodes):
|
| 225 |
+
last_sym_node = last_sym_node.next
|
| 226 |
+
with graph.inserting_before(last_sym_node.next):
|
| 227 |
+
add_runtime_asserts(ras_by_symbol.pop(None, [])) # type: ignore[call-overload]
|
| 228 |
+
|
| 229 |
+
defs = []
|
| 230 |
+
|
| 231 |
+
if unbacked_bindings := node.meta.get("unbacked_bindings"):
|
| 232 |
+
for s, keypath in unbacked_bindings.items():
|
| 233 |
+
defs.append(s)
|
| 234 |
+
|
| 235 |
+
# TODO: some CSE when generating these nodes can probably
|
| 236 |
+
# help reduce graph size and improve compile itme
|
| 237 |
+
def go(node, keypath):
|
| 238 |
+
if keypath == ():
|
| 239 |
+
return node
|
| 240 |
+
if (
|
| 241 |
+
len(keypath) >= 2
|
| 242 |
+
and isinstance(keypath[0], CallMethodKey)
|
| 243 |
+
and isinstance(keypath[1], pytree.SequenceKey)
|
| 244 |
+
):
|
| 245 |
+
if keypath[0].name == "size":
|
| 246 |
+
return go(
|
| 247 |
+
graph.call_function(
|
| 248 |
+
torch.ops.aten.sym_size.int,
|
| 249 |
+
(node, keypath[1].idx),
|
| 250 |
+
),
|
| 251 |
+
keypath[2:],
|
| 252 |
+
)
|
| 253 |
+
if keypath[0].name == "stride":
|
| 254 |
+
return go(
|
| 255 |
+
graph.call_function(
|
| 256 |
+
torch.ops.aten.stride.int,
|
| 257 |
+
(node, keypath[1].idx),
|
| 258 |
+
),
|
| 259 |
+
keypath[2:],
|
| 260 |
+
)
|
| 261 |
+
return go(
|
| 262 |
+
graph.call_method(
|
| 263 |
+
keypath[0].name, (node, keypath[1].idx)
|
| 264 |
+
),
|
| 265 |
+
keypath[2:],
|
| 266 |
+
)
|
| 267 |
+
elif isinstance(keypath[0], CallMethodKey):
|
| 268 |
+
return go(
|
| 269 |
+
graph.call_method(keypath[0].name, (node,)), keypath[1:]
|
| 270 |
+
)
|
| 271 |
+
elif isinstance(keypath[0], pytree.SequenceKey):
|
| 272 |
+
return go(
|
| 273 |
+
graph.call_function(
|
| 274 |
+
operator.getitem, (node, keypath[0].idx)
|
| 275 |
+
),
|
| 276 |
+
keypath[1:],
|
| 277 |
+
)
|
| 278 |
+
elif isinstance(keypath[0], ConvertIntKey):
|
| 279 |
+
return go(
|
| 280 |
+
graph.call_function(
|
| 281 |
+
cast_symbool_to_symint_guardless, (node,)
|
| 282 |
+
),
|
| 283 |
+
keypath[1:],
|
| 284 |
+
)
|
| 285 |
+
elif isinstance(keypath[0], DivideByKey):
|
| 286 |
+
# TODO: need to assert divisibility
|
| 287 |
+
return go(
|
| 288 |
+
graph.call_function(
|
| 289 |
+
operator.floordiv, (node, keypath[0].divisor)
|
| 290 |
+
),
|
| 291 |
+
keypath[1:],
|
| 292 |
+
)
|
| 293 |
+
elif isinstance(keypath[0], InnerTensorKey):
|
| 294 |
+
return go(
|
| 295 |
+
graph.call_function(
|
| 296 |
+
getattr, (node, keypath[0].inner_name)
|
| 297 |
+
),
|
| 298 |
+
keypath[1:],
|
| 299 |
+
)
|
| 300 |
+
else:
|
| 301 |
+
raise AssertionError(f"unrecognized keypath {keypath}")
|
| 302 |
+
|
| 303 |
+
symbol_to_proxy[s] = fx.Proxy(go(node, keypath))
|
| 304 |
+
log.debug("symbol_to_proxy[%s] = %s", s, symbol_to_proxy[s])
|
| 305 |
+
|
| 306 |
+
for i0 in defs:
|
| 307 |
+
ras = ras_by_symbol.pop(i0, [])
|
| 308 |
+
# Before we perform any asserts, first apply range
|
| 309 |
+
# refinement. This is important, because if we are going
|
| 310 |
+
# to retrace the graph (and we typically are if we send
|
| 311 |
+
# the graph to AOTAutograd), we need to make sure we apply
|
| 312 |
+
# range refinement (ala _check_is_size) first, BEFORE we
|
| 313 |
+
# run any of the asserts. Otherwise, we may decide to
|
| 314 |
+
# perform substitutions based on the asserts which we then
|
| 315 |
+
# can't back out, because value ranges can only be applied
|
| 316 |
+
# to asserts.)
|
| 317 |
+
#
|
| 318 |
+
# A perhaps better long term plan is to avoid this order
|
| 319 |
+
# dependence by making it possible to refine ranges on
|
| 320 |
+
# arbitrary expressions, not just symbols. But it is not
|
| 321 |
+
# so easy to make use of this information, see
|
| 322 |
+
# https://twitter.com/ezyang/status/1745801370299482492
|
| 323 |
+
# We actually made an attempt at this in
|
| 324 |
+
# https://github.com/pytorch/pytorch/pull/119043
|
| 325 |
+
# which didn't work.
|
| 326 |
+
#
|
| 327 |
+
# Another ideas for how to do this:
|
| 328 |
+
# - Have bound_sympy be the source of truth of the ranges of any expression
|
| 329 |
+
# - Cache intermediate results for every subexpression of bound_sympy
|
| 330 |
+
# - This cache should be possible to edit to refine ranges
|
| 331 |
+
#
|
| 332 |
+
# One issue with this proposal is that if
|
| 333 |
+
# we have a bound on 2x, we are not going to be able to
|
| 334 |
+
# apply it for 4x. Similarly, we may have bounds for an
|
| 335 |
+
# equivalent expression that we are not applying because
|
| 336 |
+
# it's not a perfect match (e.g. x < y vs y > x)".
|
| 337 |
+
#
|
| 338 |
+
# The first issue we already have it and it's impossible
|
| 339 |
+
# to solve in general, so any implementation on a best
|
| 340 |
+
# effort basis should do.
|
| 341 |
+
#
|
| 342 |
+
# The second issue is a preexisting one. It can be mitigated
|
| 343 |
+
# with a normalisation algorithm. In general, it may also
|
| 344 |
+
# be on a best effort basis, but since our grammar is not
|
| 345 |
+
# terribly difficult, chances are we could even fully
|
| 346 |
+
# normalise SymPy expressions... who knows.
|
| 347 |
+
|
| 348 |
+
if i0 in shape_env.size_like:
|
| 349 |
+
if export:
|
| 350 |
+
if (
|
| 351 |
+
symbol_to_proxy[i0].node
|
| 352 |
+
not in nodes_that_already_have_sym_constraint_size
|
| 353 |
+
):
|
| 354 |
+
graph.call_function(
|
| 355 |
+
torch.ops.aten.sym_constrain_range_for_size.default,
|
| 356 |
+
(symbol_to_proxy[i0].node,),
|
| 357 |
+
)
|
| 358 |
+
else:
|
| 359 |
+
graph.call_function(
|
| 360 |
+
torch._check_is_size, (symbol_to_proxy[i0].node,)
|
| 361 |
+
)
|
| 362 |
+
|
| 363 |
+
vr = shape_env.var_to_range[i0]
|
| 364 |
+
if not shape_env._default_unspecified_value_range().issubset(vr):
|
| 365 |
+
# The runtime range is constrained, so add a runtime
|
| 366 |
+
# assert and also explicitly refine the range
|
| 367 |
+
# (refinement should not be necessary once runtime
|
| 368 |
+
# asserts cause refinement, but that's NYI)
|
| 369 |
+
def convert(s):
|
| 370 |
+
try:
|
| 371 |
+
return int(s)
|
| 372 |
+
except TypeError:
|
| 373 |
+
return None
|
| 374 |
+
|
| 375 |
+
min_val = convert(vr.lower)
|
| 376 |
+
max_val = convert(vr.upper)
|
| 377 |
+
|
| 378 |
+
if (
|
| 379 |
+
symbol_to_proxy[i0].node,
|
| 380 |
+
min_val,
|
| 381 |
+
max_val,
|
| 382 |
+
) not in nodes_that_already_have_sym_constraint_range:
|
| 383 |
+
graph.call_function(
|
| 384 |
+
torch.ops.aten.sym_constrain_range.default,
|
| 385 |
+
(symbol_to_proxy[i0].node,),
|
| 386 |
+
{
|
| 387 |
+
"min": convert(vr.lower),
|
| 388 |
+
"max": convert(vr.upper),
|
| 389 |
+
},
|
| 390 |
+
)
|
| 391 |
+
|
| 392 |
+
add_runtime_asserts(ras)
|
parrot/lib/python3.10/site-packages/torch/fx/passes/tools_common.py
ADDED
|
@@ -0,0 +1,303 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
from typing import List, Tuple, Union, Dict, Any, Set, Mapping, Optional
|
| 3 |
+
import collections
|
| 4 |
+
from dataclasses import dataclass
|
| 5 |
+
import operator
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
import torch.fx
|
| 9 |
+
from torch.fx.node import _get_qualified_name
|
| 10 |
+
from torch.fx._compatibility import compatibility
|
| 11 |
+
|
| 12 |
+
__all__ = ['get_acc_ops_name', 'get_node_target', 'is_node_output_tensor', 'FxNetAccFusionsFinder', 'legalize_graph']
|
| 13 |
+
|
| 14 |
+
Tensors = Union[Tuple[torch.Tensor], List[torch.Tensor]]
|
| 15 |
+
TensorOrTensors = Union[torch.Tensor, Tensors]
|
| 16 |
+
NodeList = List[torch.fx.Node]
|
| 17 |
+
NodeSet = Set[torch.fx.Node]
|
| 18 |
+
Names = List[str]
|
| 19 |
+
CALLABLE_NODE_OPS = {"call_module", "call_function", "call_method"}
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
@compatibility(is_backward_compatible=False)
|
| 23 |
+
def get_acc_ops_name(k):
|
| 24 |
+
if isinstance(k, str):
|
| 25 |
+
return k
|
| 26 |
+
elif k.__module__ and "acc_ops" in k.__module__:
|
| 27 |
+
return f"acc_ops.{k.__name__}"
|
| 28 |
+
else:
|
| 29 |
+
module = k.__module__.replace('torch._ops', 'torch.ops') # WAR for bug in how torch.ops assigns module
|
| 30 |
+
return f"{module if module else ''}.{k.__name__}"
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
@compatibility(is_backward_compatible=False)
|
| 34 |
+
def get_node_target(submodules: Mapping[str, torch.nn.Module], node: torch.fx.Node) -> str:
|
| 35 |
+
"""
|
| 36 |
+
Given a `node` returns its target typename.
|
| 37 |
+
|
| 38 |
+
For "call_method" node, return node.target which is the name of that method being called.
|
| 39 |
+
This could potential lead to conflict but should be okay because normally it's on a tensor.
|
| 40 |
+
|
| 41 |
+
For "call_function" node, return typename of node.target.
|
| 42 |
+
|
| 43 |
+
For "call_module" node, return typename of the module that node.target point to.
|
| 44 |
+
|
| 45 |
+
If seeing "_VariableFunctionsClass" in the target name string, it will be replaced by
|
| 46 |
+
"torch". e.g. _VariableFunctionsClass.relu would become torch.relu.
|
| 47 |
+
"""
|
| 48 |
+
|
| 49 |
+
assert node.op in CALLABLE_NODE_OPS, (
|
| 50 |
+
"Expect op types of " + ", ".join(CALLABLE_NODE_OPS) + f", but found {node.op}"
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
if node.op == "call_module":
|
| 54 |
+
assert isinstance(node.target, str)
|
| 55 |
+
submod = submodules[node.target]
|
| 56 |
+
submod_type = getattr(submod, "_base_class_origin", type(submod))
|
| 57 |
+
return get_acc_ops_name(submod_type)
|
| 58 |
+
elif node.op == "call_function":
|
| 59 |
+
target: Any = node.target
|
| 60 |
+
return (
|
| 61 |
+
f"acc_ops.{target.__name__}"
|
| 62 |
+
if target.__module__ is not None and "acc_ops" in target.__module__
|
| 63 |
+
else _get_qualified_name(target)
|
| 64 |
+
)
|
| 65 |
+
else:
|
| 66 |
+
assert isinstance(node.target, str)
|
| 67 |
+
return node.target
|
| 68 |
+
|
| 69 |
+
@compatibility(is_backward_compatible=False)
|
| 70 |
+
def is_node_output_tensor(node: torch.fx.Node) -> bool:
|
| 71 |
+
"""Checks if the node output produces a Tensor or not.
|
| 72 |
+
|
| 73 |
+
NOTE: This requires to run `ShapeProp` on the containing fx graph before
|
| 74 |
+
calling this function. This is because it works by checking the `type`
|
| 75 |
+
metadata on the node. This metadata is produced by the `ShapeProp`.
|
| 76 |
+
"""
|
| 77 |
+
type_ = node.meta.get("type", None)
|
| 78 |
+
return type_ is not None and issubclass(type_, torch.Tensor)
|
| 79 |
+
|
| 80 |
+
@compatibility(is_backward_compatible=False)
|
| 81 |
+
class FxNetAccFusionsFinder:
|
| 82 |
+
"""
|
| 83 |
+
Finds groups of connected ACC nodes that pass non-tensor data between each other.
|
| 84 |
+
Such groups are called fusion groups.
|
| 85 |
+
"""
|
| 86 |
+
|
| 87 |
+
def __init__(self, module: torch.fx.GraphModule, acc_nodes: NodeSet):
|
| 88 |
+
self.module = module
|
| 89 |
+
self.nodes = list(module.graph.nodes)
|
| 90 |
+
self.acc_nodes = acc_nodes
|
| 91 |
+
|
| 92 |
+
@dataclass
|
| 93 |
+
class FusionGroup:
|
| 94 |
+
# The smallest idx of nodes in the fusion group after topological sorting all the nodes in the model.
|
| 95 |
+
top_node_idx: int
|
| 96 |
+
|
| 97 |
+
# Nodes in this fusion group.
|
| 98 |
+
nodes: NodeSet
|
| 99 |
+
|
| 100 |
+
# Inputs to this fusion group.
|
| 101 |
+
inputs: NodeSet
|
| 102 |
+
|
| 103 |
+
# Nodes that in the fusion group that haven't been processed yet.
|
| 104 |
+
nodes_need_process: NodeSet
|
| 105 |
+
|
| 106 |
+
def add_node(self, node):
|
| 107 |
+
"""
|
| 108 |
+
Add a node to fusion group.
|
| 109 |
+
"""
|
| 110 |
+
if node in self.nodes:
|
| 111 |
+
return
|
| 112 |
+
|
| 113 |
+
self.nodes_need_process.add(node)
|
| 114 |
+
self.nodes.add(node)
|
| 115 |
+
self.inputs.discard(node)
|
| 116 |
+
self.inputs.update(
|
| 117 |
+
{
|
| 118 |
+
n
|
| 119 |
+
for n in node.all_input_nodes
|
| 120 |
+
if n.op in CALLABLE_NODE_OPS and n not in self.nodes
|
| 121 |
+
}
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
def recursive_add_node(
|
| 125 |
+
self,
|
| 126 |
+
fusion_group: "FxNetAccFusionsFinder.FusionGroup",
|
| 127 |
+
inputs: Union[NodeSet, NodeList],
|
| 128 |
+
visited: Optional[NodeSet] = None,
|
| 129 |
+
):
|
| 130 |
+
"""
|
| 131 |
+
Start from inputs and going reverse topological order. If any upstream node
|
| 132 |
+
is in the fusion group, add all the nodes in this path to fusion group.
|
| 133 |
+
"""
|
| 134 |
+
for arg in inputs:
|
| 135 |
+
# skip the node if already seen
|
| 136 |
+
if visited is not None:
|
| 137 |
+
if arg in visited:
|
| 138 |
+
continue
|
| 139 |
+
visited.add(arg)
|
| 140 |
+
|
| 141 |
+
# Skip placeholder and get_attr because they won't be in the fusion group.
|
| 142 |
+
if arg.op not in CALLABLE_NODE_OPS:
|
| 143 |
+
continue
|
| 144 |
+
|
| 145 |
+
# If the node has smaller idx, it's already an upstream node of the fusion
|
| 146 |
+
# group. We don't need to check it anymore.
|
| 147 |
+
if self.nodes.index(arg) < fusion_group.top_node_idx:
|
| 148 |
+
continue
|
| 149 |
+
|
| 150 |
+
# If the node is in the fusion group, return True.
|
| 151 |
+
if arg in fusion_group.nodes:
|
| 152 |
+
return True
|
| 153 |
+
|
| 154 |
+
# Check the upstream nodes of the node, if any of them is in the fusion group
|
| 155 |
+
# we'll add this node to fusion group and return True.
|
| 156 |
+
if self.recursive_add_node(fusion_group, arg.all_input_nodes, visited):
|
| 157 |
+
fusion_group.add_node(arg)
|
| 158 |
+
return True
|
| 159 |
+
|
| 160 |
+
return False
|
| 161 |
+
|
| 162 |
+
def __call__(self) -> Dict[torch.fx.Node, NodeSet]:
|
| 163 |
+
result: Dict[torch.fx.Node, NodeSet] = {}
|
| 164 |
+
acc_nodes = list(self.acc_nodes)
|
| 165 |
+
|
| 166 |
+
for node in acc_nodes:
|
| 167 |
+
if node in result:
|
| 168 |
+
continue
|
| 169 |
+
if node.op not in CALLABLE_NODE_OPS:
|
| 170 |
+
continue
|
| 171 |
+
if "tensor_meta" in node.meta:
|
| 172 |
+
continue
|
| 173 |
+
if node not in self.acc_nodes:
|
| 174 |
+
continue
|
| 175 |
+
|
| 176 |
+
fusion_group: FxNetAccFusionsFinder.FusionGroup = self.FusionGroup(
|
| 177 |
+
top_node_idx=self.nodes.index(node),
|
| 178 |
+
nodes={node},
|
| 179 |
+
inputs=set(node.all_input_nodes),
|
| 180 |
+
nodes_need_process={node},
|
| 181 |
+
)
|
| 182 |
+
while fusion_group.nodes_need_process:
|
| 183 |
+
node = fusion_group.nodes_need_process.pop()
|
| 184 |
+
self.recursive_add_node(
|
| 185 |
+
fusion_group,
|
| 186 |
+
fusion_group.inputs,
|
| 187 |
+
visited=set(),
|
| 188 |
+
)
|
| 189 |
+
|
| 190 |
+
# Optionally add downstream nodes
|
| 191 |
+
if "tensor_meta" not in node.meta:
|
| 192 |
+
for user in node.users:
|
| 193 |
+
if user.op not in CALLABLE_NODE_OPS:
|
| 194 |
+
continue
|
| 195 |
+
if user in fusion_group.nodes:
|
| 196 |
+
continue
|
| 197 |
+
|
| 198 |
+
fusion_group.add_node(user)
|
| 199 |
+
self.recursive_add_node(
|
| 200 |
+
fusion_group,
|
| 201 |
+
fusion_group.inputs,
|
| 202 |
+
visited=set(),
|
| 203 |
+
)
|
| 204 |
+
|
| 205 |
+
# Add some upstream nodes
|
| 206 |
+
for arg in node.all_input_nodes:
|
| 207 |
+
if arg.op not in CALLABLE_NODE_OPS:
|
| 208 |
+
continue
|
| 209 |
+
if "tensor_meta" in arg.meta:
|
| 210 |
+
continue
|
| 211 |
+
if arg in fusion_group.nodes:
|
| 212 |
+
continue
|
| 213 |
+
|
| 214 |
+
fusion_group.add_node(arg)
|
| 215 |
+
fusion_group.top_node_idx = min(
|
| 216 |
+
fusion_group.top_node_idx, self.nodes.index(arg)
|
| 217 |
+
)
|
| 218 |
+
self.recursive_add_node(
|
| 219 |
+
fusion_group,
|
| 220 |
+
fusion_group.inputs,
|
| 221 |
+
visited=set(),
|
| 222 |
+
)
|
| 223 |
+
|
| 224 |
+
if not (set(fusion_group.nodes) <= self.acc_nodes):
|
| 225 |
+
self.acc_nodes -= fusion_group.nodes
|
| 226 |
+
else:
|
| 227 |
+
for n in fusion_group.nodes:
|
| 228 |
+
result[n] = fusion_group.nodes
|
| 229 |
+
|
| 230 |
+
return result
|
| 231 |
+
|
| 232 |
+
|
| 233 |
+
@compatibility(is_backward_compatible=False)
|
| 234 |
+
def legalize_graph(gm: torch.fx.GraphModule) -> torch.fx.GraphModule:
|
| 235 |
+
"""
|
| 236 |
+
Replace the graph of the given GraphModule with one that contains the same nodes as the
|
| 237 |
+
original, but in topologically sorted order.
|
| 238 |
+
|
| 239 |
+
This is used by the merge_matmul transformation below, which disturbs the topologically sorted
|
| 240 |
+
order of its input GraphModule, so that this order is restored before further transformation.
|
| 241 |
+
|
| 242 |
+
Arguments:
|
| 243 |
+
gm: The graph module to topologically sort. It is modified in-place.
|
| 244 |
+
|
| 245 |
+
Returns:
|
| 246 |
+
The graph module in-place sorted
|
| 247 |
+
"""
|
| 248 |
+
|
| 249 |
+
# These operators are used for making runtime assertions before any
|
| 250 |
+
# data-dependent operators occur. We want to prioritize sorting these to
|
| 251 |
+
# ensure that these assertions appear before any data-dependent operations
|
| 252 |
+
# in the graph.
|
| 253 |
+
PRIORITIZED_OPS = [
|
| 254 |
+
operator.add,
|
| 255 |
+
operator.mul,
|
| 256 |
+
operator.sub,
|
| 257 |
+
operator.floordiv,
|
| 258 |
+
operator.truediv,
|
| 259 |
+
operator.mod,
|
| 260 |
+
operator.le,
|
| 261 |
+
operator.lt,
|
| 262 |
+
operator.ge,
|
| 263 |
+
operator.gt,
|
| 264 |
+
operator.eq,
|
| 265 |
+
operator.ne,
|
| 266 |
+
torch.ops.aten.sym_constrain_range.default,
|
| 267 |
+
torch.ops.aten.sym_constrain_range_for_size.default,
|
| 268 |
+
torch.ops.aten._assert_async.msg,
|
| 269 |
+
torch.ops.aten.scalar_tensor.default,
|
| 270 |
+
torch.ops.aten._assert_scalar.default,
|
| 271 |
+
]
|
| 272 |
+
|
| 273 |
+
indeg = dict.fromkeys(gm.graph.nodes, 0)
|
| 274 |
+
new_graph = torch.fx.Graph()
|
| 275 |
+
# Track how many unfulfilled dependencies each node has
|
| 276 |
+
for node in gm.graph.nodes:
|
| 277 |
+
for user in node.users:
|
| 278 |
+
indeg[user] += 1
|
| 279 |
+
queue: collections.deque = collections.deque()
|
| 280 |
+
# Add all nodes with no dependencies to the queue
|
| 281 |
+
for node in gm.graph.nodes:
|
| 282 |
+
if indeg[node] == 0:
|
| 283 |
+
queue.append(node)
|
| 284 |
+
env: Dict[torch.fx.Node, torch.fx.Node] = {}
|
| 285 |
+
# Pop nodes from the queue, and add nodes that have had all their
|
| 286 |
+
# dependencies fulfilled
|
| 287 |
+
while len(queue) > 0:
|
| 288 |
+
cur = queue.popleft()
|
| 289 |
+
env[cur] = new_graph.node_copy(cur, lambda x: env[x])
|
| 290 |
+
for user in cur.users:
|
| 291 |
+
indeg[user] -= 1
|
| 292 |
+
if indeg[user] == 0:
|
| 293 |
+
if user.op == "call_function" and user.target in PRIORITIZED_OPS:
|
| 294 |
+
queue.appendleft(user)
|
| 295 |
+
else:
|
| 296 |
+
queue.append(user)
|
| 297 |
+
# If the new graph's size is not as large as the old one, then there must be
|
| 298 |
+
# a cycle (i.e. some node's dependencies were not satisfied.)
|
| 299 |
+
if len(new_graph.nodes) < len(gm.graph.nodes):
|
| 300 |
+
raise RuntimeError(f"Input graph has cycles, unable to add {[node for node in indeg if indeg[node] != 0]}")
|
| 301 |
+
new_graph._codegen = gm.graph._codegen
|
| 302 |
+
gm.graph = new_graph
|
| 303 |
+
return gm
|
parrot/lib/python3.10/site-packages/torch/share/cmake/Caffe2/FindCUDAToolkit.cmake
ADDED
|
@@ -0,0 +1,1073 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# This module is back-ported from CMake 3.17 and above to work with CMake 3.10
|
| 3 |
+
|
| 4 |
+
# Distributed under the OSI-approved BSD 3-Clause License. See accompanying
|
| 5 |
+
# file Copyright.txt or https://cmake.org/licensing for details.
|
| 6 |
+
|
| 7 |
+
#[=======================================================================[.rst:
|
| 8 |
+
FindCUDAToolkit
|
| 9 |
+
---------------
|
| 10 |
+
|
| 11 |
+
.. versionadded:: 3.17
|
| 12 |
+
|
| 13 |
+
This script locates the NVIDIA CUDA toolkit and the associated libraries, but
|
| 14 |
+
does not require the ``CUDA`` language be enabled for a given project. This
|
| 15 |
+
module does not search for the NVIDIA CUDA Samples.
|
| 16 |
+
|
| 17 |
+
.. versionadded:: 3.19
|
| 18 |
+
QNX support.
|
| 19 |
+
|
| 20 |
+
Search Behavior
|
| 21 |
+
^^^^^^^^^^^^^^^
|
| 22 |
+
|
| 23 |
+
The CUDA Toolkit search behavior uses the following order:
|
| 24 |
+
|
| 25 |
+
1. If the ``CUDA`` language has been enabled we will use the directory
|
| 26 |
+
containing the compiler as the first search location for ``nvcc``.
|
| 27 |
+
|
| 28 |
+
2. If the ``CUDAToolkit_ROOT`` cmake configuration variable (e.g.,
|
| 29 |
+
``-DCUDAToolkit_ROOT=/some/path``) *or* environment variable is defined, it
|
| 30 |
+
will be searched. If both an environment variable **and** a
|
| 31 |
+
configuration variable are specified, the *configuration* variable takes
|
| 32 |
+
precedence.
|
| 33 |
+
|
| 34 |
+
The directory specified here must be such that the executable ``nvcc`` or
|
| 35 |
+
the appropriate ``version.txt`` file can be found underneath the specified
|
| 36 |
+
directory.
|
| 37 |
+
|
| 38 |
+
3. If the CUDA_PATH environment variable is defined, it will be searched
|
| 39 |
+
for ``nvcc``.
|
| 40 |
+
|
| 41 |
+
4. The user's path is searched for ``nvcc`` using :command:`find_program`. If
|
| 42 |
+
this is found, no subsequent search attempts are performed. Users are
|
| 43 |
+
responsible for ensuring that the first ``nvcc`` to show up in the path is
|
| 44 |
+
the desired path in the event that multiple CUDA Toolkits are installed.
|
| 45 |
+
|
| 46 |
+
5. On Unix systems, if the symbolic link ``/usr/local/cuda`` exists, this is
|
| 47 |
+
used. No subsequent search attempts are performed. No default symbolic link
|
| 48 |
+
location exists for the Windows platform.
|
| 49 |
+
|
| 50 |
+
6. The platform specific default install locations are searched. If exactly one
|
| 51 |
+
candidate is found, this is used. The default CUDA Toolkit install locations
|
| 52 |
+
searched are:
|
| 53 |
+
|
| 54 |
+
+-------------+-------------------------------------------------------------+
|
| 55 |
+
| Platform | Search Pattern |
|
| 56 |
+
+=============+=============================================================+
|
| 57 |
+
| macOS | ``/Developer/NVIDIA/CUDA-X.Y`` |
|
| 58 |
+
+-------------+-------------------------------------------------------------+
|
| 59 |
+
| Other Unix | ``/usr/local/cuda-X.Y`` |
|
| 60 |
+
+-------------+-------------------------------------------------------------+
|
| 61 |
+
| Windows | ``C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\vX.Y`` |
|
| 62 |
+
+-------------+-------------------------------------------------------------+
|
| 63 |
+
|
| 64 |
+
Where ``X.Y`` would be a specific version of the CUDA Toolkit, such as
|
| 65 |
+
``/usr/local/cuda-9.0`` or
|
| 66 |
+
``C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v9.0``
|
| 67 |
+
|
| 68 |
+
.. note::
|
| 69 |
+
|
| 70 |
+
When multiple CUDA Toolkits are installed in the default location of a
|
| 71 |
+
system(e.g., both ``/usr/local/cuda-9.0`` and ``/usr/local/cuda-10.0``
|
| 72 |
+
exist but the ``/usr/local/cuda`` symbolic link does **not** exist), this
|
| 73 |
+
package is marked as **not** found.
|
| 74 |
+
|
| 75 |
+
There are too many factors involved in making an automatic decision in
|
| 76 |
+
the presence of multiple CUDA Toolkits being installed. In this
|
| 77 |
+
situation, users are encouraged to either (1) set ``CUDAToolkit_ROOT`` or
|
| 78 |
+
(2) ensure that the correct ``nvcc`` executable shows up in ``$PATH`` for
|
| 79 |
+
:command:`find_program` to find.
|
| 80 |
+
|
| 81 |
+
Arguments
|
| 82 |
+
^^^^^^^^^
|
| 83 |
+
|
| 84 |
+
``[<version>]``
|
| 85 |
+
The ``[<version>]`` argument requests a version with which the package found
|
| 86 |
+
should be compatible. See :ref:`find_package version format <FIND_PACKAGE_VERSION_FORMAT>`
|
| 87 |
+
for more details.
|
| 88 |
+
|
| 89 |
+
Options
|
| 90 |
+
^^^^^^^
|
| 91 |
+
|
| 92 |
+
``REQUIRED``
|
| 93 |
+
If specified, configuration will error if a suitable CUDA Toolkit is not
|
| 94 |
+
found.
|
| 95 |
+
|
| 96 |
+
``QUIET``
|
| 97 |
+
If specified, the search for a suitable CUDA Toolkit will not produce any
|
| 98 |
+
messages.
|
| 99 |
+
|
| 100 |
+
``EXACT``
|
| 101 |
+
If specified, the CUDA Toolkit is considered found only if the exact
|
| 102 |
+
``VERSION`` specified is recovered.
|
| 103 |
+
|
| 104 |
+
Imported targets
|
| 105 |
+
^^^^^^^^^^^^^^^^
|
| 106 |
+
|
| 107 |
+
An :ref:`imported target <Imported targets>` named ``CUDA::toolkit`` is provided.
|
| 108 |
+
|
| 109 |
+
This module defines :prop_tgt:`IMPORTED` targets for each
|
| 110 |
+
of the following libraries that are part of the CUDAToolkit:
|
| 111 |
+
|
| 112 |
+
- :ref:`CUDA Runtime Library<cuda_toolkit_rt_lib>`
|
| 113 |
+
- :ref:`CUDA Driver Library<cuda_toolkit_driver_lib>`
|
| 114 |
+
- :ref:`cuBLAS<cuda_toolkit_cuBLAS>`
|
| 115 |
+
- :ref:`cuFFT<cuda_toolkit_cuFFT>`
|
| 116 |
+
- :ref:`cuRAND<cuda_toolkit_cuRAND>`
|
| 117 |
+
- :ref:`cuSOLVER<cuda_toolkit_cuSOLVER>`
|
| 118 |
+
- :ref:`cuSPARSE<cuda_toolkit_cuSPARSE>`
|
| 119 |
+
- :ref:`cuPTI<cuda_toolkit_cupti>`
|
| 120 |
+
- :ref:`NPP<cuda_toolkit_NPP>`
|
| 121 |
+
- :ref:`nvBLAS<cuda_toolkit_nvBLAS>`
|
| 122 |
+
- :ref:`nvGRAPH<cuda_toolkit_nvGRAPH>`
|
| 123 |
+
- :ref:`nvJPEG<cuda_toolkit_nvJPEG>`
|
| 124 |
+
- :ref:`nvidia-ML<cuda_toolkit_nvML>`
|
| 125 |
+
- :ref:`nvRTC<cuda_toolkit_nvRTC>`
|
| 126 |
+
- :ref:`nvToolsExt<cuda_toolkit_nvToolsExt>`
|
| 127 |
+
- :ref:`OpenCL<cuda_toolkit_opencl>`
|
| 128 |
+
- :ref:`cuLIBOS<cuda_toolkit_cuLIBOS>`
|
| 129 |
+
|
| 130 |
+
.. _`cuda_toolkit_rt_lib`:
|
| 131 |
+
|
| 132 |
+
CUDA Runtime Library
|
| 133 |
+
""""""""""""""""""""
|
| 134 |
+
|
| 135 |
+
The CUDA Runtime library (cudart) are what most applications will typically
|
| 136 |
+
need to link against to make any calls such as `cudaMalloc`, and `cudaFree`.
|
| 137 |
+
|
| 138 |
+
Targets Created:
|
| 139 |
+
|
| 140 |
+
- ``CUDA::cudart``
|
| 141 |
+
- ``CUDA::cudart_static``
|
| 142 |
+
|
| 143 |
+
.. _`cuda_toolkit_driver_lib`:
|
| 144 |
+
|
| 145 |
+
CUDA Driver Library
|
| 146 |
+
""""""""""""""""""""
|
| 147 |
+
|
| 148 |
+
The CUDA Driver library (cuda) are used by applications that use calls
|
| 149 |
+
such as `cuMemAlloc`, and `cuMemFree`.
|
| 150 |
+
|
| 151 |
+
Targets Created:
|
| 152 |
+
|
| 153 |
+
- ``CUDA::cuda_driver``
|
| 154 |
+
|
| 155 |
+
.. _`cuda_toolkit_cuBLAS`:
|
| 156 |
+
|
| 157 |
+
cuBLAS
|
| 158 |
+
""""""
|
| 159 |
+
|
| 160 |
+
The `cuBLAS <https://docs.nvidia.com/cuda/cublas/index.html>`_ library.
|
| 161 |
+
|
| 162 |
+
Targets Created:
|
| 163 |
+
|
| 164 |
+
- ``CUDA::cublas``
|
| 165 |
+
- ``CUDA::cublas_static``
|
| 166 |
+
- ``CUDA::cublasLt`` starting in CUDA 10.1
|
| 167 |
+
- ``CUDA::cublasLt_static`` starting in CUDA 10.1
|
| 168 |
+
|
| 169 |
+
.. _`cuda_toolkit_cuFFT`:
|
| 170 |
+
|
| 171 |
+
cuFFT
|
| 172 |
+
"""""
|
| 173 |
+
|
| 174 |
+
The `cuFFT <https://docs.nvidia.com/cuda/cufft/index.html>`_ library.
|
| 175 |
+
|
| 176 |
+
Targets Created:
|
| 177 |
+
|
| 178 |
+
- ``CUDA::cufft``
|
| 179 |
+
- ``CUDA::cufftw``
|
| 180 |
+
- ``CUDA::cufft_static``
|
| 181 |
+
- ``CUDA::cufft_static_nocallback`` starting in CUDA 9.2, requires CMake 3.23+
|
| 182 |
+
- ``CUDA::cufftw_static``
|
| 183 |
+
|
| 184 |
+
cuRAND
|
| 185 |
+
""""""
|
| 186 |
+
|
| 187 |
+
The `cuRAND <https://docs.nvidia.com/cuda/curand/index.html>`_ library.
|
| 188 |
+
|
| 189 |
+
Targets Created:
|
| 190 |
+
|
| 191 |
+
- ``CUDA::curand``
|
| 192 |
+
- ``CUDA::curand_static``
|
| 193 |
+
|
| 194 |
+
.. _`cuda_toolkit_cuSOLVER`:
|
| 195 |
+
|
| 196 |
+
cuSOLVER
|
| 197 |
+
""""""""
|
| 198 |
+
|
| 199 |
+
The `cuSOLVER <https://docs.nvidia.com/cuda/cusolver/index.html>`_ library.
|
| 200 |
+
|
| 201 |
+
Targets Created:
|
| 202 |
+
|
| 203 |
+
- ``CUDA::cusolver``
|
| 204 |
+
- ``CUDA::cusolver_static``
|
| 205 |
+
|
| 206 |
+
.. _`cuda_toolkit_cuSPARSE`:
|
| 207 |
+
|
| 208 |
+
cuSPARSE
|
| 209 |
+
""""""""
|
| 210 |
+
|
| 211 |
+
The `cuSPARSE <https://docs.nvidia.com/cuda/cusparse/index.html>`_ library.
|
| 212 |
+
|
| 213 |
+
Targets Created:
|
| 214 |
+
|
| 215 |
+
- ``CUDA::cusparse``
|
| 216 |
+
- ``CUDA::cusparse_static``
|
| 217 |
+
|
| 218 |
+
.. _`cuda_toolkit_cupti`:
|
| 219 |
+
|
| 220 |
+
cupti
|
| 221 |
+
"""""
|
| 222 |
+
|
| 223 |
+
The `NVIDIA CUDA Profiling Tools Interface <https://developer.nvidia.com/CUPTI>`_.
|
| 224 |
+
|
| 225 |
+
Targets Created:
|
| 226 |
+
|
| 227 |
+
- ``CUDA::cupti``
|
| 228 |
+
- ``CUDA::cupti_static``
|
| 229 |
+
|
| 230 |
+
.. _`cuda_toolkit_NPP`:
|
| 231 |
+
|
| 232 |
+
NPP
|
| 233 |
+
"""
|
| 234 |
+
|
| 235 |
+
The `NPP <https://docs.nvidia.com/cuda/npp/index.html>`_ libraries.
|
| 236 |
+
|
| 237 |
+
Targets Created:
|
| 238 |
+
|
| 239 |
+
- `nppc`:
|
| 240 |
+
|
| 241 |
+
- ``CUDA::nppc``
|
| 242 |
+
- ``CUDA::nppc_static``
|
| 243 |
+
|
| 244 |
+
- `nppial`: Arithmetic and logical operation functions in `nppi_arithmetic_and_logical_operations.h`
|
| 245 |
+
|
| 246 |
+
- ``CUDA::nppial``
|
| 247 |
+
- ``CUDA::nppial_static``
|
| 248 |
+
|
| 249 |
+
- `nppicc`: Color conversion and sampling functions in `nppi_color_conversion.h`
|
| 250 |
+
|
| 251 |
+
- ``CUDA::nppicc``
|
| 252 |
+
- ``CUDA::nppicc_static``
|
| 253 |
+
|
| 254 |
+
- `nppicom`: JPEG compression and decompression functions in `nppi_compression_functions.h`
|
| 255 |
+
Removed starting in CUDA 11.0, use :ref:`nvJPEG<cuda_toolkit_nvJPEG>` instead.
|
| 256 |
+
|
| 257 |
+
- ``CUDA::nppicom``
|
| 258 |
+
- ``CUDA::nppicom_static``
|
| 259 |
+
|
| 260 |
+
- `nppidei`: Data exchange and initialization functions in `nppi_data_exchange_and_initialization.h`
|
| 261 |
+
|
| 262 |
+
- ``CUDA::nppidei``
|
| 263 |
+
- ``CUDA::nppidei_static``
|
| 264 |
+
|
| 265 |
+
- `nppif`: Filtering and computer vision functions in `nppi_filter_functions.h`
|
| 266 |
+
|
| 267 |
+
- ``CUDA::nppif``
|
| 268 |
+
- ``CUDA::nppif_static``
|
| 269 |
+
|
| 270 |
+
- `nppig`: Geometry transformation functions found in `nppi_geometry_transforms.h`
|
| 271 |
+
|
| 272 |
+
- ``CUDA::nppig``
|
| 273 |
+
- ``CUDA::nppig_static``
|
| 274 |
+
|
| 275 |
+
- `nppim`: Morphological operation functions found in `nppi_morphological_operations.h`
|
| 276 |
+
|
| 277 |
+
- ``CUDA::nppim``
|
| 278 |
+
- ``CUDA::nppim_static``
|
| 279 |
+
|
| 280 |
+
- `nppist`: Statistics and linear transform in `nppi_statistics_functions.h` and `nppi_linear_transforms.h`
|
| 281 |
+
|
| 282 |
+
- ``CUDA::nppist``
|
| 283 |
+
- ``CUDA::nppist_static``
|
| 284 |
+
|
| 285 |
+
- `nppisu`: Memory support functions in `nppi_support_functions.h`
|
| 286 |
+
|
| 287 |
+
- ``CUDA::nppisu``
|
| 288 |
+
- ``CUDA::nppisu_static``
|
| 289 |
+
|
| 290 |
+
- `nppitc`: Threshold and compare operation functions in `nppi_threshold_and_compare_operations.h`
|
| 291 |
+
|
| 292 |
+
- ``CUDA::nppitc``
|
| 293 |
+
- ``CUDA::nppitc_static``
|
| 294 |
+
|
| 295 |
+
- `npps`:
|
| 296 |
+
|
| 297 |
+
- ``CUDA::npps``
|
| 298 |
+
- ``CUDA::npps_static``
|
| 299 |
+
|
| 300 |
+
.. _`cuda_toolkit_nvBLAS`:
|
| 301 |
+
|
| 302 |
+
nvBLAS
|
| 303 |
+
""""""
|
| 304 |
+
|
| 305 |
+
The `nvBLAS <https://docs.nvidia.com/cuda/nvblas/index.html>`_ libraries.
|
| 306 |
+
This is a shared library only.
|
| 307 |
+
|
| 308 |
+
Targets Created:
|
| 309 |
+
|
| 310 |
+
- ``CUDA::nvblas``
|
| 311 |
+
|
| 312 |
+
.. _`cuda_toolkit_nvGRAPH`:
|
| 313 |
+
|
| 314 |
+
nvGRAPH
|
| 315 |
+
"""""""
|
| 316 |
+
|
| 317 |
+
The `nvGRAPH <https://docs.nvidia.com/cuda/nvgraph/index.html>`_ library.
|
| 318 |
+
Removed starting in CUDA 11.0
|
| 319 |
+
|
| 320 |
+
Targets Created:
|
| 321 |
+
|
| 322 |
+
- ``CUDA::nvgraph``
|
| 323 |
+
- ``CUDA::nvgraph_static``
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
.. _`cuda_toolkit_nvJPEG`:
|
| 327 |
+
|
| 328 |
+
nvJPEG
|
| 329 |
+
""""""
|
| 330 |
+
|
| 331 |
+
The `nvJPEG <https://docs.nvidia.com/cuda/nvjpeg/index.html>`_ library.
|
| 332 |
+
Introduced in CUDA 10.
|
| 333 |
+
|
| 334 |
+
Targets Created:
|
| 335 |
+
|
| 336 |
+
- ``CUDA::nvjpeg``
|
| 337 |
+
- ``CUDA::nvjpeg_static``
|
| 338 |
+
|
| 339 |
+
.. _`cuda_toolkit_nvRTC`:
|
| 340 |
+
|
| 341 |
+
nvRTC
|
| 342 |
+
"""""
|
| 343 |
+
|
| 344 |
+
The `nvRTC <https://docs.nvidia.com/cuda/nvrtc/index.html>`_ (Runtime Compilation) library.
|
| 345 |
+
This is a shared library only.
|
| 346 |
+
|
| 347 |
+
Targets Created:
|
| 348 |
+
|
| 349 |
+
- ``CUDA::nvrtc``
|
| 350 |
+
|
| 351 |
+
.. _`cuda_toolkit_nvml`:
|
| 352 |
+
|
| 353 |
+
nvidia-ML
|
| 354 |
+
"""""""""
|
| 355 |
+
|
| 356 |
+
The `NVIDIA Management Library <https://developer.nvidia.com/nvidia-management-library-nvml>`_.
|
| 357 |
+
This is a shared library only.
|
| 358 |
+
|
| 359 |
+
Targets Created:
|
| 360 |
+
|
| 361 |
+
- ``CUDA::nvml``
|
| 362 |
+
|
| 363 |
+
.. _`cuda_toolkit_nvToolsExt`:
|
| 364 |
+
|
| 365 |
+
nvToolsExt
|
| 366 |
+
""""""""""
|
| 367 |
+
|
| 368 |
+
The `NVIDIA Tools Extension <https://docs.nvidia.com/gameworks/content/gameworkslibrary/nvtx/nvidia_tools_extension_library_nvtx.htm>`_.
|
| 369 |
+
This is a shared library only.
|
| 370 |
+
|
| 371 |
+
Targets Created:
|
| 372 |
+
|
| 373 |
+
- ``CUDA::nvToolsExt``
|
| 374 |
+
|
| 375 |
+
.. _`cuda_toolkit_opencl`:
|
| 376 |
+
|
| 377 |
+
OpenCL
|
| 378 |
+
""""""
|
| 379 |
+
|
| 380 |
+
The `NVIDIA OpenCL Library <https://developer.nvidia.com/opencl>`_.
|
| 381 |
+
This is a shared library only.
|
| 382 |
+
|
| 383 |
+
Targets Created:
|
| 384 |
+
|
| 385 |
+
- ``CUDA::OpenCL``
|
| 386 |
+
|
| 387 |
+
.. _`cuda_toolkit_cuLIBOS`:
|
| 388 |
+
|
| 389 |
+
cuLIBOS
|
| 390 |
+
"""""""
|
| 391 |
+
|
| 392 |
+
The cuLIBOS library is a backend thread abstraction layer library which is
|
| 393 |
+
static only. The ``CUDA::cublas_static``, ``CUDA::cusparse_static``,
|
| 394 |
+
``CUDA::cufft_static``, ``CUDA::curand_static``, and (when implemented) NPP
|
| 395 |
+
libraries all automatically have this dependency linked.
|
| 396 |
+
|
| 397 |
+
Target Created:
|
| 398 |
+
|
| 399 |
+
- ``CUDA::culibos``
|
| 400 |
+
|
| 401 |
+
**Note**: direct usage of this target by consumers should not be necessary.
|
| 402 |
+
|
| 403 |
+
.. _`cuda_toolkit_cuRAND`:
|
| 404 |
+
|
| 405 |
+
|
| 406 |
+
|
| 407 |
+
Result variables
|
| 408 |
+
^^^^^^^^^^^^^^^^
|
| 409 |
+
|
| 410 |
+
``CUDAToolkit_FOUND``
|
| 411 |
+
A boolean specifying whether or not the CUDA Toolkit was found.
|
| 412 |
+
|
| 413 |
+
``CUDAToolkit_VERSION``
|
| 414 |
+
The exact version of the CUDA Toolkit found (as reported by
|
| 415 |
+
``nvcc --version`` or ``version.txt``).
|
| 416 |
+
|
| 417 |
+
``CUDAToolkit_VERSION_MAJOR``
|
| 418 |
+
The major version of the CUDA Toolkit.
|
| 419 |
+
|
| 420 |
+
``CUDAToolkit_VERSION_MINOR``
|
| 421 |
+
The minor version of the CUDA Toolkit.
|
| 422 |
+
|
| 423 |
+
``CUDAToolkit_VERSION_PATCH``
|
| 424 |
+
The patch version of the CUDA Toolkit.
|
| 425 |
+
|
| 426 |
+
``CUDAToolkit_BIN_DIR``
|
| 427 |
+
The path to the CUDA Toolkit library directory that contains the CUDA
|
| 428 |
+
executable ``nvcc``.
|
| 429 |
+
|
| 430 |
+
``CUDAToolkit_INCLUDE_DIRS``
|
| 431 |
+
The path to the CUDA Toolkit ``include`` folder containing the header files
|
| 432 |
+
required to compile a project linking against CUDA.
|
| 433 |
+
|
| 434 |
+
``CUDAToolkit_LIBRARY_DIR``
|
| 435 |
+
The path to the CUDA Toolkit library directory that contains the CUDA
|
| 436 |
+
Runtime library ``cudart``.
|
| 437 |
+
|
| 438 |
+
``CUDAToolkit_LIBRARY_ROOT``
|
| 439 |
+
.. versionadded:: 3.18
|
| 440 |
+
|
| 441 |
+
The path to the CUDA Toolkit directory containing the nvvm directory and
|
| 442 |
+
version.txt.
|
| 443 |
+
|
| 444 |
+
``CUDAToolkit_TARGET_DIR``
|
| 445 |
+
The path to the CUDA Toolkit directory including the target architecture
|
| 446 |
+
when cross-compiling. When not cross-compiling this will be equivalent to
|
| 447 |
+
the parent directory of ``CUDAToolkit_BIN_DIR``.
|
| 448 |
+
|
| 449 |
+
``CUDAToolkit_NVCC_EXECUTABLE``
|
| 450 |
+
The path to the NVIDIA CUDA compiler ``nvcc``. Note that this path may
|
| 451 |
+
**not** be the same as
|
| 452 |
+
:variable:`CMAKE_CUDA_COMPILER <CMAKE_<LANG>_COMPILER>`. ``nvcc`` must be
|
| 453 |
+
found to determine the CUDA Toolkit version as well as determining other
|
| 454 |
+
features of the Toolkit. This variable is set for the convenience of
|
| 455 |
+
modules that depend on this one.
|
| 456 |
+
|
| 457 |
+
|
| 458 |
+
#]=======================================================================]
|
| 459 |
+
|
| 460 |
+
# NOTE: much of this was simply extracted from FindCUDA.cmake.
|
| 461 |
+
|
| 462 |
+
# James Bigler, NVIDIA Corp (nvidia.com - jbigler)
|
| 463 |
+
# Abe Stephens, SCI Institute -- http://www.sci.utah.edu/~abe/FindCuda.html
|
| 464 |
+
#
|
| 465 |
+
# Copyright (c) 2008 - 2009 NVIDIA Corporation. All rights reserved.
|
| 466 |
+
#
|
| 467 |
+
# Copyright (c) 2007-2009
|
| 468 |
+
# Scientific Computing and Imaging Institute, University of Utah
|
| 469 |
+
#
|
| 470 |
+
# This code is licensed under the MIT License. See the FindCUDA.cmake script
|
| 471 |
+
# for the text of the license.
|
| 472 |
+
|
| 473 |
+
# The MIT License
|
| 474 |
+
#
|
| 475 |
+
# License for the specific language governing rights and limitations under
|
| 476 |
+
# Permission is hereby granted, free of charge, to any person obtaining a
|
| 477 |
+
# copy of this software and associated documentation files (the "Software"),
|
| 478 |
+
# to deal in the Software without restriction, including without limitation
|
| 479 |
+
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
| 480 |
+
# and/or sell copies of the Software, and to permit persons to whom the
|
| 481 |
+
# Software is furnished to do so, subject to the following conditions:
|
| 482 |
+
#
|
| 483 |
+
# The above copyright notice and this permission notice shall be included
|
| 484 |
+
# in all copies or substantial portions of the Software.
|
| 485 |
+
#
|
| 486 |
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
| 487 |
+
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 488 |
+
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
| 489 |
+
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 490 |
+
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
| 491 |
+
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
| 492 |
+
# DEALINGS IN THE SOFTWARE.
|
| 493 |
+
#
|
| 494 |
+
###############################################################################
|
| 495 |
+
|
| 496 |
+
# The toolkit is located during compiler detection for CUDA and stored in CMakeCUDACompiler.cmake as
|
| 497 |
+
# CMAKE_CUDA_COMPILER_TOOLKIT_ROOT and CMAKE_CUDA_COMPILER_LIBRARY_ROOT.
|
| 498 |
+
# We compute the rest based on those here to avoid re-searching and to avoid finding a possibly
|
| 499 |
+
# different installation.
|
| 500 |
+
if(CMAKE_CUDA_COMPILER_TOOLKIT_ROOT)
|
| 501 |
+
set(CUDAToolkit_ROOT_DIR "${CMAKE_CUDA_COMPILER_TOOLKIT_ROOT}")
|
| 502 |
+
set(CUDAToolkit_LIBRARY_ROOT "${CMAKE_CUDA_COMPILER_LIBRARY_ROOT}")
|
| 503 |
+
set(CUDAToolkit_VERSION "${CMAKE_CUDA_COMPILER_TOOLKIT_VERSION}")
|
| 504 |
+
|
| 505 |
+
if(CUDAToolkit_VERSION MATCHES [=[([0-9]+)\.([0-9]+)\.([0-9]+)]=])
|
| 506 |
+
set(CUDAToolkit_VERSION_MAJOR "${CMAKE_MATCH_1}")
|
| 507 |
+
set(CUDAToolkit_VERSION_MINOR "${CMAKE_MATCH_2}")
|
| 508 |
+
set(CUDAToolkit_VERSION_PATCH "${CMAKE_MATCH_3}")
|
| 509 |
+
endif()
|
| 510 |
+
else()
|
| 511 |
+
function(_CUDAToolkit_find_root_dir )
|
| 512 |
+
cmake_parse_arguments(arg "" "" "SEARCH_PATHS;FIND_FLAGS" ${ARGN})
|
| 513 |
+
|
| 514 |
+
if(NOT CUDAToolkit_BIN_DIR)
|
| 515 |
+
if(NOT CUDAToolkit_SENTINEL_FILE)
|
| 516 |
+
find_program(CUDAToolkit_NVCC_EXECUTABLE
|
| 517 |
+
NAMES nvcc nvcc.exe
|
| 518 |
+
PATHS ${arg_SEARCH_PATHS}
|
| 519 |
+
${arg_FIND_FLAGS}
|
| 520 |
+
)
|
| 521 |
+
endif()
|
| 522 |
+
|
| 523 |
+
if(NOT CUDAToolkit_NVCC_EXECUTABLE)
|
| 524 |
+
find_file(CUDAToolkit_SENTINEL_FILE
|
| 525 |
+
NAMES version.txt
|
| 526 |
+
PATHS ${arg_SEARCH_PATHS}
|
| 527 |
+
NO_DEFAULT_PATH
|
| 528 |
+
)
|
| 529 |
+
endif()
|
| 530 |
+
|
| 531 |
+
if(EXISTS "${CUDAToolkit_NVCC_EXECUTABLE}")
|
| 532 |
+
# If NVCC exists then invoke it to find the toolkit location.
|
| 533 |
+
# This allows us to support wrapper scripts (e.g. ccache or colornvcc), CUDA Toolkit,
|
| 534 |
+
# NVIDIA HPC SDK, and distro's splayed layouts
|
| 535 |
+
execute_process(COMMAND ${CUDAToolkit_NVCC_EXECUTABLE} "-v" "__cmake_determine_cuda"
|
| 536 |
+
OUTPUT_VARIABLE _CUDA_NVCC_OUT ERROR_VARIABLE _CUDA_NVCC_OUT)
|
| 537 |
+
if(_CUDA_NVCC_OUT MATCHES "\\#\\$ TOP=([^\r\n]*)")
|
| 538 |
+
get_filename_component(CUDAToolkit_BIN_DIR "${CMAKE_MATCH_1}/bin" ABSOLUTE)
|
| 539 |
+
else()
|
| 540 |
+
get_filename_component(CUDAToolkit_BIN_DIR "${CUDAToolkit_NVCC_EXECUTABLE}" DIRECTORY)
|
| 541 |
+
endif()
|
| 542 |
+
unset(_CUDA_NVCC_OUT)
|
| 543 |
+
|
| 544 |
+
mark_as_advanced(CUDAToolkit_BIN_DIR)
|
| 545 |
+
set(CUDAToolkit_BIN_DIR "${CUDAToolkit_BIN_DIR}" CACHE PATH "" FORCE)
|
| 546 |
+
endif()
|
| 547 |
+
|
| 548 |
+
if(CUDAToolkit_SENTINEL_FILE)
|
| 549 |
+
get_filename_component(CUDAToolkit_BIN_DIR ${CUDAToolkit_SENTINEL_FILE} DIRECTORY ABSOLUTE)
|
| 550 |
+
set(CUDAToolkit_BIN_DIR "${CUDAToolkit_BIN_DIR}/bin")
|
| 551 |
+
|
| 552 |
+
set(CUDAToolkit_BIN_DIR "${CUDAToolkit_BIN_DIR}" CACHE PATH "" FORCE)
|
| 553 |
+
mark_as_advanced(CUDAToolkit_BIN_DIR)
|
| 554 |
+
endif()
|
| 555 |
+
endif()
|
| 556 |
+
|
| 557 |
+
if(CUDAToolkit_BIN_DIR)
|
| 558 |
+
get_filename_component(CUDAToolkit_ROOT_DIR ${CUDAToolkit_BIN_DIR} DIRECTORY ABSOLUTE)
|
| 559 |
+
set(CUDAToolkit_ROOT_DIR "${CUDAToolkit_ROOT_DIR}" PARENT_SCOPE)
|
| 560 |
+
endif()
|
| 561 |
+
|
| 562 |
+
endfunction()
|
| 563 |
+
|
| 564 |
+
# For NVCC we can easily deduce the SDK binary directory from the compiler path.
|
| 565 |
+
if(CMAKE_CUDA_COMPILER_LOADED AND NOT CUDAToolkit_BIN_DIR AND CMAKE_CUDA_COMPILER_ID STREQUAL "NVIDIA")
|
| 566 |
+
get_filename_component(CUDAToolkit_BIN_DIR "${CMAKE_CUDA_COMPILER}" DIRECTORY)
|
| 567 |
+
set(CUDAToolkit_BIN_DIR "${CUDAToolkit_BIN_DIR}" CACHE PATH "")
|
| 568 |
+
# Try language provided path first.
|
| 569 |
+
_CUDAToolkit_find_root_dir(SEARCH_PATHS "${CUDAToolkit_BIN_DIR}" FIND_FLAGS NO_DEFAULT_PATH)
|
| 570 |
+
mark_as_advanced(CUDAToolkit_BIN_DIR)
|
| 571 |
+
endif()
|
| 572 |
+
|
| 573 |
+
# Try user provided path
|
| 574 |
+
if(NOT CUDAToolkit_ROOT_DIR AND CUDAToolkit_ROOT)
|
| 575 |
+
_CUDAToolkit_find_root_dir(SEARCH_PATHS "${CUDAToolkit_ROOT}" FIND_FLAGS PATH_SUFFIXES bin NO_DEFAULT_PATH)
|
| 576 |
+
endif()
|
| 577 |
+
if(NOT CUDAToolkit_ROOT_DIR)
|
| 578 |
+
_CUDAToolkit_find_root_dir(FIND_FLAGS PATHS ENV CUDA_PATH PATH_SUFFIXES bin)
|
| 579 |
+
endif()
|
| 580 |
+
|
| 581 |
+
# If the user specified CUDAToolkit_ROOT but the toolkit could not be found, this is an error.
|
| 582 |
+
if(NOT CUDAToolkit_ROOT_DIR AND (DEFINED CUDAToolkit_ROOT OR DEFINED ENV{CUDAToolkit_ROOT}))
|
| 583 |
+
# Declare error messages now, print later depending on find_package args.
|
| 584 |
+
set(fail_base "Could not find nvcc executable in path specified by")
|
| 585 |
+
set(cuda_root_fail "${fail_base} CUDAToolkit_ROOT=${CUDAToolkit_ROOT}")
|
| 586 |
+
set(env_cuda_root_fail "${fail_base} environment variable CUDAToolkit_ROOT=$ENV{CUDAToolkit_ROOT}")
|
| 587 |
+
|
| 588 |
+
if(CUDAToolkit_FIND_REQUIRED)
|
| 589 |
+
if(DEFINED CUDAToolkit_ROOT)
|
| 590 |
+
message(FATAL_ERROR ${cuda_root_fail})
|
| 591 |
+
elseif(DEFINED ENV{CUDAToolkit_ROOT})
|
| 592 |
+
message(FATAL_ERROR ${env_cuda_root_fail})
|
| 593 |
+
endif()
|
| 594 |
+
else()
|
| 595 |
+
if(NOT CUDAToolkit_FIND_QUIETLY)
|
| 596 |
+
if(DEFINED CUDAToolkit_ROOT)
|
| 597 |
+
message(STATUS ${cuda_root_fail})
|
| 598 |
+
elseif(DEFINED ENV{CUDAToolkit_ROOT})
|
| 599 |
+
message(STATUS ${env_cuda_root_fail})
|
| 600 |
+
endif()
|
| 601 |
+
endif()
|
| 602 |
+
set(CUDAToolkit_FOUND FALSE)
|
| 603 |
+
unset(fail_base)
|
| 604 |
+
unset(cuda_root_fail)
|
| 605 |
+
unset(env_cuda_root_fail)
|
| 606 |
+
return()
|
| 607 |
+
endif()
|
| 608 |
+
endif()
|
| 609 |
+
|
| 610 |
+
# CUDAToolkit_ROOT cmake / env variable not specified, try platform defaults.
|
| 611 |
+
#
|
| 612 |
+
# - Linux: /usr/local/cuda-X.Y
|
| 613 |
+
# - macOS: /Developer/NVIDIA/CUDA-X.Y
|
| 614 |
+
# - Windows: C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\vX.Y
|
| 615 |
+
#
|
| 616 |
+
# We will also search the default symlink location /usr/local/cuda first since
|
| 617 |
+
# if CUDAToolkit_ROOT is not specified, it is assumed that the symlinked
|
| 618 |
+
# directory is the desired location.
|
| 619 |
+
if(NOT CUDAToolkit_ROOT_DIR)
|
| 620 |
+
if(UNIX)
|
| 621 |
+
if(NOT APPLE)
|
| 622 |
+
set(platform_base "/usr/local/cuda-")
|
| 623 |
+
else()
|
| 624 |
+
set(platform_base "/Developer/NVIDIA/CUDA-")
|
| 625 |
+
endif()
|
| 626 |
+
else()
|
| 627 |
+
set(platform_base "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v")
|
| 628 |
+
endif()
|
| 629 |
+
|
| 630 |
+
# Build out a descending list of possible cuda installations, e.g.
|
| 631 |
+
file(GLOB possible_paths "${platform_base}*")
|
| 632 |
+
# Iterate the glob results and create a descending list.
|
| 633 |
+
set(versions)
|
| 634 |
+
foreach(p ${possible_paths})
|
| 635 |
+
# Extract version number from end of string
|
| 636 |
+
string(REGEX MATCH "[0-9][0-9]?\\.[0-9]$" p_version ${p})
|
| 637 |
+
if(IS_DIRECTORY ${p} AND p_version)
|
| 638 |
+
list(APPEND versions ${p_version})
|
| 639 |
+
endif()
|
| 640 |
+
endforeach()
|
| 641 |
+
|
| 642 |
+
# Sort numerically in descending order, so we try the newest versions first.
|
| 643 |
+
if(CMAKE_VERSION VERSION_GREATER_EQUAL 3.18)
|
| 644 |
+
list(SORT versions COMPARE NATURAL ORDER DESCENDING)
|
| 645 |
+
elseif(versions)
|
| 646 |
+
# Alphabetical sort here is not ideal but better than nothing
|
| 647 |
+
list(SORT versions)
|
| 648 |
+
list(REVERSE versions)
|
| 649 |
+
endif()
|
| 650 |
+
|
| 651 |
+
# With a descending list of versions, populate possible paths to search.
|
| 652 |
+
set(search_paths)
|
| 653 |
+
foreach(v ${versions})
|
| 654 |
+
list(APPEND search_paths "${platform_base}${v}")
|
| 655 |
+
endforeach()
|
| 656 |
+
|
| 657 |
+
# Force the global default /usr/local/cuda to the front on Unix.
|
| 658 |
+
if(UNIX)
|
| 659 |
+
list(INSERT search_paths 0 "/usr/local/cuda")
|
| 660 |
+
endif()
|
| 661 |
+
|
| 662 |
+
# Now search for the toolkit again using the platform default search paths.
|
| 663 |
+
_CUDAToolkit_find_root_dir(SEARCH_PATHS "${search_paths}" FIND_FLAGS PATH_SUFFIXES bin)
|
| 664 |
+
|
| 665 |
+
# We are done with these variables now, cleanup for caller.
|
| 666 |
+
unset(platform_base)
|
| 667 |
+
unset(possible_paths)
|
| 668 |
+
unset(versions)
|
| 669 |
+
unset(search_paths)
|
| 670 |
+
|
| 671 |
+
if(NOT CUDAToolkit_ROOT_DIR)
|
| 672 |
+
if(CUDAToolkit_FIND_REQUIRED)
|
| 673 |
+
message(FATAL_ERROR "Could not find nvcc, please set CUDAToolkit_ROOT.")
|
| 674 |
+
elseif(NOT CUDAToolkit_FIND_QUIETLY)
|
| 675 |
+
message(STATUS "Could not find nvcc, please set CUDAToolkit_ROOT.")
|
| 676 |
+
endif()
|
| 677 |
+
|
| 678 |
+
set(CUDAToolkit_FOUND FALSE)
|
| 679 |
+
return()
|
| 680 |
+
endif()
|
| 681 |
+
endif()
|
| 682 |
+
endif()
|
| 683 |
+
|
| 684 |
+
if(NOT CUDAToolkit_BIN_DIR)
|
| 685 |
+
set(CUDAToolkit_BIN_DIR "${CUDAToolkit_ROOT_DIR}/bin")
|
| 686 |
+
endif()
|
| 687 |
+
|
| 688 |
+
if(NOT CUDAToolkit_NVCC_EXECUTABLE)
|
| 689 |
+
set(CUDAToolkit_NVCC_EXECUTABLE "${CUDAToolkit_BIN_DIR}/nvcc${CMAKE_EXECUTABLE_SUFFIX}")
|
| 690 |
+
endif()
|
| 691 |
+
|
| 692 |
+
if(CMAKE_CUDA_COMPILER_TOOLKIT_VERSION)
|
| 693 |
+
set(CUDAToolkit_VERSION "${CMAKE_CUDA_COMPILER_TOOLKIT_VERSION}")
|
| 694 |
+
else()
|
| 695 |
+
function(_CUDAToolkit_find_version_file result_variable)
|
| 696 |
+
# We first check for a non-scattered installation to prefer it over a scattered installation.
|
| 697 |
+
if(CUDAToolkit_ROOT AND EXISTS "${CUDAToolkit_ROOT}/version.txt")
|
| 698 |
+
set(${result_variable} "${CUDAToolkit_ROOT}/version.txt" PARENT_SCOPE)
|
| 699 |
+
elseif(CUDAToolkit_ROOT_DIR AND EXISTS "${CUDAToolkit_ROOT_DIR}/version.txt")
|
| 700 |
+
set(${result_variable} "${CUDAToolkit_ROOT_DIR}/version.txt" PARENT_SCOPE)
|
| 701 |
+
elseif(CMAKE_SYSROOT_LINK AND EXISTS "${CMAKE_SYSROOT_LINK}/usr/lib/cuda/version.txt")
|
| 702 |
+
set(${result_variable} "${CMAKE_SYSROOT_LINK}/usr/lib/cuda/version.txt" PARENT_SCOPE)
|
| 703 |
+
elseif(EXISTS "${CMAKE_SYSROOT}/usr/lib/cuda/version.txt")
|
| 704 |
+
set(${result_variable} "${CMAKE_SYSROOT}/usr/lib/cuda/version.txt" PARENT_SCOPE)
|
| 705 |
+
endif()
|
| 706 |
+
endfunction()
|
| 707 |
+
|
| 708 |
+
_CUDAToolkit_find_version_file( _CUDAToolkit_version_file )
|
| 709 |
+
if(_CUDAToolkit_version_file)
|
| 710 |
+
# CUDAToolkit_LIBRARY_ROOT contains the device library and version file.
|
| 711 |
+
get_filename_component(CUDAToolkit_LIBRARY_ROOT "${_CUDAToolkit_version_file}" DIRECTORY ABSOLUTE)
|
| 712 |
+
endif()
|
| 713 |
+
unset(_CUDAToolkit_version_file)
|
| 714 |
+
|
| 715 |
+
if(CUDAToolkit_NVCC_EXECUTABLE AND
|
| 716 |
+
CMAKE_CUDA_COMPILER_VERSION AND
|
| 717 |
+
CUDAToolkit_NVCC_EXECUTABLE STREQUAL CMAKE_CUDA_COMPILER)
|
| 718 |
+
# Need to set these based off the already computed CMAKE_CUDA_COMPILER_VERSION value
|
| 719 |
+
# This if statement will always match, but is used to provide variables for MATCH 1,2,3...
|
| 720 |
+
if(CMAKE_CUDA_COMPILER_VERSION MATCHES [=[([0-9]+)\.([0-9]+)\.([0-9]+)]=])
|
| 721 |
+
set(CUDAToolkit_VERSION_MAJOR "${CMAKE_MATCH_1}")
|
| 722 |
+
set(CUDAToolkit_VERSION_MINOR "${CMAKE_MATCH_2}")
|
| 723 |
+
set(CUDAToolkit_VERSION_PATCH "${CMAKE_MATCH_3}")
|
| 724 |
+
set(CUDAToolkit_VERSION "${CMAKE_CUDA_COMPILER_VERSION}")
|
| 725 |
+
endif()
|
| 726 |
+
elseif(CUDAToolkit_NVCC_EXECUTABLE)
|
| 727 |
+
# Compute the version by invoking nvcc
|
| 728 |
+
execute_process(COMMAND ${CUDAToolkit_NVCC_EXECUTABLE} "--version" OUTPUT_VARIABLE NVCC_OUT)
|
| 729 |
+
if(NVCC_OUT MATCHES [=[ V([0-9]+)\.([0-9]+)\.([0-9]+)]=])
|
| 730 |
+
set(CUDAToolkit_VERSION_MAJOR "${CMAKE_MATCH_1}")
|
| 731 |
+
set(CUDAToolkit_VERSION_MINOR "${CMAKE_MATCH_2}")
|
| 732 |
+
set(CUDAToolkit_VERSION_PATCH "${CMAKE_MATCH_3}")
|
| 733 |
+
set(CUDAToolkit_VERSION "${CMAKE_MATCH_1}.${CMAKE_MATCH_2}.${CMAKE_MATCH_3}")
|
| 734 |
+
endif()
|
| 735 |
+
unset(NVCC_OUT)
|
| 736 |
+
else()
|
| 737 |
+
_CUDAToolkit_find_version_file(version_file)
|
| 738 |
+
if(version_file)
|
| 739 |
+
file(READ "${version_file}" VERSION_INFO)
|
| 740 |
+
if(VERSION_INFO MATCHES [=[CUDA Version ([0-9]+)\.([0-9]+)\.([0-9]+)]=])
|
| 741 |
+
set(CUDAToolkit_VERSION_MAJOR "${CMAKE_MATCH_1}")
|
| 742 |
+
set(CUDAToolkit_VERSION_MINOR "${CMAKE_MATCH_2}")
|
| 743 |
+
set(CUDAToolkit_VERSION_PATCH "${CMAKE_MATCH_3}")
|
| 744 |
+
set(CUDAToolkit_VERSION "${CMAKE_MATCH_1}.${CMAKE_MATCH_2}.${CMAKE_MATCH_3}")
|
| 745 |
+
endif()
|
| 746 |
+
endif()
|
| 747 |
+
endif()
|
| 748 |
+
endif()
|
| 749 |
+
|
| 750 |
+
# Find target directory when crosscompiling.
|
| 751 |
+
if(CMAKE_CROSSCOMPILING)
|
| 752 |
+
if(CMAKE_SYSTEM_PROCESSOR STREQUAL "armv7-a")
|
| 753 |
+
# Support for NVPACK
|
| 754 |
+
set(CUDAToolkit_TARGET_NAME "armv7-linux-androideabi")
|
| 755 |
+
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "arm")
|
| 756 |
+
set(CUDAToolkit_TARGET_NAME "armv7-linux-gnueabihf")
|
| 757 |
+
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64")
|
| 758 |
+
if(ANDROID_ARCH_NAME STREQUAL "arm64")
|
| 759 |
+
set(CUDAToolkit_TARGET_NAME "aarch64-linux-androideabi")
|
| 760 |
+
elseif(CMAKE_SYSTEM_NAME STREQUAL "QNX")
|
| 761 |
+
set(CUDAToolkit_TARGET_NAME "aarch64-qnx")
|
| 762 |
+
else()
|
| 763 |
+
set(CUDAToolkit_TARGET_NAME "aarch64-linux")
|
| 764 |
+
endif(ANDROID_ARCH_NAME STREQUAL "arm64")
|
| 765 |
+
elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64")
|
| 766 |
+
set(CUDAToolkit_TARGET_NAME "x86_64-linux")
|
| 767 |
+
endif()
|
| 768 |
+
|
| 769 |
+
if(EXISTS "${CUDAToolkit_ROOT_DIR}/targets/${CUDAToolkit_TARGET_NAME}")
|
| 770 |
+
set(CUDAToolkit_TARGET_DIR "${CUDAToolkit_ROOT_DIR}/targets/${CUDAToolkit_TARGET_NAME}")
|
| 771 |
+
# add known CUDA target root path to the set of directories we search for programs, libraries and headers
|
| 772 |
+
list(PREPEND CMAKE_FIND_ROOT_PATH "${CUDAToolkit_TARGET_DIR}")
|
| 773 |
+
|
| 774 |
+
# Mark that we need to pop the root search path changes after we have
|
| 775 |
+
# found all cuda libraries so that searches for our cross-compilation
|
| 776 |
+
# libraries work when another cuda sdk is in CMAKE_PREFIX_PATH or
|
| 777 |
+
# PATh
|
| 778 |
+
set(_CUDAToolkit_Pop_ROOT_PATH True)
|
| 779 |
+
endif()
|
| 780 |
+
endif()
|
| 781 |
+
|
| 782 |
+
# If not already set we can simply use the toolkit root or it's a scattered installation.
|
| 783 |
+
if(NOT CUDAToolkit_TARGET_DIR)
|
| 784 |
+
# Not cross compiling
|
| 785 |
+
set(CUDAToolkit_TARGET_DIR "${CUDAToolkit_ROOT_DIR}")
|
| 786 |
+
# Now that we have the real ROOT_DIR, find components inside it.
|
| 787 |
+
list(APPEND CMAKE_PREFIX_PATH ${CUDAToolkit_ROOT_DIR})
|
| 788 |
+
|
| 789 |
+
# Mark that we need to pop the prefix path changes after we have
|
| 790 |
+
# found the cudart library.
|
| 791 |
+
set(_CUDAToolkit_Pop_Prefix True)
|
| 792 |
+
endif()
|
| 793 |
+
|
| 794 |
+
# CUDAToolkit_TARGET_DIR always points to the directory containing the include directory.
|
| 795 |
+
# On a scattered installation /usr, on a non-scattered something like /usr/local/cuda or /usr/local/cuda-10.2/targets/aarch64-linux.
|
| 796 |
+
if(EXISTS "${CUDAToolkit_TARGET_DIR}/include/cuda_runtime.h")
|
| 797 |
+
set(CUDAToolkit_INCLUDE_DIR "${CUDAToolkit_TARGET_DIR}/include")
|
| 798 |
+
elseif(NOT CUDAToolkit_FIND_QUIETLY)
|
| 799 |
+
message(STATUS "Unable to find cuda_runtime.h in \"${CUDAToolkit_TARGET_DIR}/include\" for CUDAToolkit_INCLUDE_DIR.")
|
| 800 |
+
endif()
|
| 801 |
+
|
| 802 |
+
# The NVHPC layout moves math library headers and libraries to a sibling directory.
|
| 803 |
+
# Create a separate variable so this directory can be selectively added to math targets.
|
| 804 |
+
if(NOT EXISTS "${CUDAToolkit_INCLUDE_DIR}/cublas_v2.h")
|
| 805 |
+
set(CUDAToolkit_MATH_INCLUDE_DIR "${CUDAToolkit_TARGET_DIR}/../../math_libs/include")
|
| 806 |
+
get_filename_component(CUDAToolkit_MATH_INCLUDE_DIR "${CUDAToolkit_MATH_INCLUDE_DIR}" ABSOLUTE)
|
| 807 |
+
if(NOT EXISTS "${CUDAToolkit_MATH_INCLUDE_DIR}/cublas_v2.h")
|
| 808 |
+
if(NOT CUDAToolkit_FIND_QUIETLY)
|
| 809 |
+
message(STATUS "Unable to find cublas_v2.h in either \"${CUDAToolkit_INCLUDE_DIR}\" or \"${CUDAToolkit_MATH_INCLUDE_DIR}\"")
|
| 810 |
+
endif()
|
| 811 |
+
unset(CUDAToolkit_MATH_INCLUDE_DIR)
|
| 812 |
+
endif()
|
| 813 |
+
endif()
|
| 814 |
+
|
| 815 |
+
# Find the CUDA Runtime Library libcudart
|
| 816 |
+
find_library(CUDA_CUDART
|
| 817 |
+
NAMES cudart
|
| 818 |
+
PATH_SUFFIXES lib64 lib/x64
|
| 819 |
+
)
|
| 820 |
+
find_library(CUDA_CUDART
|
| 821 |
+
NAMES cudart
|
| 822 |
+
PATH_SUFFIXES lib64/stubs lib/x64/stubs
|
| 823 |
+
)
|
| 824 |
+
|
| 825 |
+
if(NOT CUDA_CUDART AND NOT CUDAToolkit_FIND_QUIETLY)
|
| 826 |
+
message(STATUS "Unable to find cudart library.")
|
| 827 |
+
endif()
|
| 828 |
+
|
| 829 |
+
if(_CUDAToolkit_Pop_Prefix)
|
| 830 |
+
list(REMOVE_AT CMAKE_PREFIX_PATH -1)
|
| 831 |
+
unset(_CUDAToolkit_Pop_Prefix)
|
| 832 |
+
endif()
|
| 833 |
+
|
| 834 |
+
#-----------------------------------------------------------------------------
|
| 835 |
+
# Perform version comparison and validate all required variables are set.
|
| 836 |
+
include(FindPackageHandleStandardArgs)
|
| 837 |
+
find_package_handle_standard_args(CUDAToolkit
|
| 838 |
+
REQUIRED_VARS
|
| 839 |
+
CUDAToolkit_INCLUDE_DIR
|
| 840 |
+
CUDAToolkit_VERSION
|
| 841 |
+
CUDA_CUDART
|
| 842 |
+
CUDAToolkit_BIN_DIR
|
| 843 |
+
VERSION_VAR
|
| 844 |
+
CUDAToolkit_VERSION
|
| 845 |
+
)
|
| 846 |
+
|
| 847 |
+
mark_as_advanced(CUDA_CUDART
|
| 848 |
+
CUDAToolkit_INCLUDE_DIR
|
| 849 |
+
CUDAToolkit_NVCC_EXECUTABLE
|
| 850 |
+
CUDAToolkit_SENTINEL_FILE
|
| 851 |
+
)
|
| 852 |
+
|
| 853 |
+
#-----------------------------------------------------------------------------
|
| 854 |
+
# Construct result variables
|
| 855 |
+
if(CUDAToolkit_FOUND)
|
| 856 |
+
set(CUDAToolkit_INCLUDE_DIRS ${CUDAToolkit_INCLUDE_DIR})
|
| 857 |
+
get_filename_component(CUDAToolkit_LIBRARY_DIR ${CUDA_CUDART} DIRECTORY ABSOLUTE)
|
| 858 |
+
endif()
|
| 859 |
+
|
| 860 |
+
#-----------------------------------------------------------------------------
|
| 861 |
+
# Construct import targets
|
| 862 |
+
if(CUDAToolkit_FOUND)
|
| 863 |
+
|
| 864 |
+
function(_CUDAToolkit_find_and_add_import_lib lib_name)
|
| 865 |
+
cmake_parse_arguments(arg "" "" "ALT;DEPS;EXTRA_HINTS;EXTRA_PATH_SUFFIXES;EXTRA_INCLUDE_DIRS" ${ARGN})
|
| 866 |
+
|
| 867 |
+
set(search_names ${lib_name} ${arg_ALT})
|
| 868 |
+
|
| 869 |
+
find_library(CUDA_${lib_name}_LIBRARY
|
| 870 |
+
NAMES ${search_names}
|
| 871 |
+
HINTS ${CUDAToolkit_LIBRARY_DIR}
|
| 872 |
+
ENV CUDA_PATH
|
| 873 |
+
${arg_EXTRA_HINTS}
|
| 874 |
+
PATH_SUFFIXES nvidia/current lib64 lib/x64 lib
|
| 875 |
+
${arg_EXTRA_PATH_SUFFIXES}
|
| 876 |
+
)
|
| 877 |
+
# Don't try any stub directories until we have exhausted all other
|
| 878 |
+
# search locations.
|
| 879 |
+
find_library(CUDA_${lib_name}_LIBRARY
|
| 880 |
+
NAMES ${search_names}
|
| 881 |
+
HINTS ${CUDAToolkit_LIBRARY_DIR}
|
| 882 |
+
ENV CUDA_PATH
|
| 883 |
+
${arg_EXTRA_HINTS}
|
| 884 |
+
PATH_SUFFIXES lib64/stubs lib/x64/stubs lib/stubs stubs
|
| 885 |
+
# Support NVHPC splayed math library layout
|
| 886 |
+
../../math_libs/${CUDAToolkit_VERSION_MAJOR}.${CUDAToolkit_VERSION_MINOR}/lib64
|
| 887 |
+
../../math_libs/lib64
|
| 888 |
+
)
|
| 889 |
+
|
| 890 |
+
mark_as_advanced(CUDA_${lib_name}_LIBRARY)
|
| 891 |
+
|
| 892 |
+
if(NOT TARGET CUDA::${lib_name} AND CUDA_${lib_name}_LIBRARY)
|
| 893 |
+
add_library(CUDA::${lib_name} UNKNOWN IMPORTED)
|
| 894 |
+
set_property(TARGET CUDA::${lib_name} APPEND PROPERTY
|
| 895 |
+
INTERFACE_INCLUDE_DIRECTORIES "${CUDAToolkit_INCLUDE_DIRS}")
|
| 896 |
+
set_property(TARGET CUDA::${lib_name} APPEND PROPERTY
|
| 897 |
+
INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${CUDAToolkit_INCLUDE_DIRS}")
|
| 898 |
+
if(DEFINED CUDAToolkit_MATH_INCLUDE_DIR)
|
| 899 |
+
string(FIND ${CUDA_${lib_name}_LIBRARY} "math_libs" math_libs)
|
| 900 |
+
if(NOT ${math_libs} EQUAL -1)
|
| 901 |
+
set_property(TARGET CUDA::${lib_name} APPEND PROPERTY
|
| 902 |
+
INTERFACE_INCLUDE_DIRECTORIES "${CUDAToolkit_MATH_INCLUDE_DIRS}")
|
| 903 |
+
set_property(TARGET CUDA::${lib_name} APPEND PROPERTY
|
| 904 |
+
INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${CUDAToolkit_MATH_INCLUDE_DIRS}")
|
| 905 |
+
endif()
|
| 906 |
+
endif()
|
| 907 |
+
set_property(TARGET CUDA::${lib_name} PROPERTY IMPORTED_LOCATION "${CUDA_${lib_name}_LIBRARY}")
|
| 908 |
+
foreach(dep ${arg_DEPS})
|
| 909 |
+
if(TARGET CUDA::${dep})
|
| 910 |
+
set_property(TARGET CUDA::${lib_name} APPEND PROPERTY
|
| 911 |
+
INTERFACE_LINK_LIBRARIES CUDA::${dep})
|
| 912 |
+
endif()
|
| 913 |
+
endforeach()
|
| 914 |
+
if(arg_EXTRA_INCLUDE_DIRS)
|
| 915 |
+
set_property(TARGET CUDA::${lib_name} APPEND PROPERTY
|
| 916 |
+
INTERFACE_INCLUDE_DIRECTORIES "${arg_EXTRA_INCLUDE_DIRS}")
|
| 917 |
+
set_property(TARGET CUDA::${lib_name} APPEND PROPERTY
|
| 918 |
+
INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${arg_EXTRA_INCLUDE_DIRS}")
|
| 919 |
+
endif()
|
| 920 |
+
endif()
|
| 921 |
+
endfunction()
|
| 922 |
+
|
| 923 |
+
if(NOT TARGET CUDA::toolkit)
|
| 924 |
+
add_library(CUDA::toolkit IMPORTED INTERFACE)
|
| 925 |
+
set_property(TARGET CUDA::toolkit APPEND PROPERTY
|
| 926 |
+
INTERFACE_INCLUDE_DIRECTORIES "${CUDAToolkit_INCLUDE_DIRS}")
|
| 927 |
+
set_property(TARGET CUDA::toolkit APPEND PROPERTY
|
| 928 |
+
INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${CUDAToolkit_INCLUDE_DIRS}")
|
| 929 |
+
endif()
|
| 930 |
+
|
| 931 |
+
_CUDAToolkit_find_and_add_import_lib(cuda_driver ALT cuda)
|
| 932 |
+
|
| 933 |
+
_CUDAToolkit_find_and_add_import_lib(cudart)
|
| 934 |
+
_CUDAToolkit_find_and_add_import_lib(cudart_static)
|
| 935 |
+
|
| 936 |
+
# setup dependencies that are required for cudart_static when building
|
| 937 |
+
# on linux. These are generally only required when using the CUDA toolkit
|
| 938 |
+
# when CUDA language is disabled
|
| 939 |
+
if(NOT TARGET CUDA::cudart_static_deps
|
| 940 |
+
AND TARGET CUDA::cudart_static)
|
| 941 |
+
|
| 942 |
+
add_library(CUDA::cudart_static_deps IMPORTED INTERFACE)
|
| 943 |
+
set_property(TARGET CUDA::cudart_static APPEND PROPERTY
|
| 944 |
+
INTERFACE_LINK_LIBRARIES CUDA::cudart_static_deps)
|
| 945 |
+
|
| 946 |
+
if(UNIX AND (CMAKE_C_COMPILER OR CMAKE_CXX_COMPILER))
|
| 947 |
+
find_package(Threads REQUIRED)
|
| 948 |
+
set_property(TARGET CUDA::cudart_static_deps APPEND PROPERTY
|
| 949 |
+
INTERFACE_LINK_LIBRARIES Threads::Threads ${CMAKE_DL_LIBS})
|
| 950 |
+
endif()
|
| 951 |
+
|
| 952 |
+
if(UNIX AND NOT APPLE AND NOT (CMAKE_SYSTEM_NAME STREQUAL "QNX"))
|
| 953 |
+
# On Linux, you must link against librt when using the static cuda runtime.
|
| 954 |
+
find_library(CUDAToolkit_rt_LIBRARY rt)
|
| 955 |
+
mark_as_advanced(CUDAToolkit_rt_LIBRARY)
|
| 956 |
+
if(NOT CUDAToolkit_rt_LIBRARY)
|
| 957 |
+
message(WARNING "Could not find librt library, needed by CUDA::cudart_static")
|
| 958 |
+
else()
|
| 959 |
+
set_property(TARGET CUDA::cudart_static_deps APPEND PROPERTY
|
| 960 |
+
INTERFACE_LINK_LIBRARIES ${CUDAToolkit_rt_LIBRARY})
|
| 961 |
+
endif()
|
| 962 |
+
endif()
|
| 963 |
+
endif()
|
| 964 |
+
|
| 965 |
+
_CUDAToolkit_find_and_add_import_lib(culibos) # it's a static library
|
| 966 |
+
foreach(cuda_lib cublasLt cufft curand cusparse nppc nvjpeg)
|
| 967 |
+
_CUDAToolkit_find_and_add_import_lib(${cuda_lib})
|
| 968 |
+
_CUDAToolkit_find_and_add_import_lib(${cuda_lib}_static DEPS culibos)
|
| 969 |
+
endforeach()
|
| 970 |
+
|
| 971 |
+
if(CUDAToolkit_VERSION VERSION_GREATER_EQUAL 11.0.0)
|
| 972 |
+
# cublas depends on cublasLt
|
| 973 |
+
# https://docs.nvidia.com/cuda/archive/11.0/cublas/index.html#static-library
|
| 974 |
+
_CUDAToolkit_find_and_add_import_lib(cublas DEPS cublasLt)
|
| 975 |
+
_CUDAToolkit_find_and_add_import_lib(cublas_static DEPS cublasLt_static)
|
| 976 |
+
else()
|
| 977 |
+
_CUDAToolkit_find_and_add_import_lib(cublas)
|
| 978 |
+
_CUDAToolkit_find_and_add_import_lib(cublas_static DEPS culibos)
|
| 979 |
+
endif()
|
| 980 |
+
|
| 981 |
+
# cuFFTW depends on cuFFT
|
| 982 |
+
_CUDAToolkit_find_and_add_import_lib(cufftw DEPS cufft)
|
| 983 |
+
_CUDAToolkit_find_and_add_import_lib(cufftw_static DEPS cufft_static)
|
| 984 |
+
if(CUDAToolkit_VERSION VERSION_GREATER_EQUAL 9.2)
|
| 985 |
+
_CUDAToolkit_find_and_add_import_lib(cufft_static_nocallback DEPS culibos)
|
| 986 |
+
endif()
|
| 987 |
+
|
| 988 |
+
# cuSOLVER depends on cuBLAS, and cuSPARSE
|
| 989 |
+
_CUDAToolkit_find_and_add_import_lib(cusolver DEPS cublas cusparse)
|
| 990 |
+
_CUDAToolkit_find_and_add_import_lib(cusolver_static DEPS cublas_static cusparse_static culibos)
|
| 991 |
+
|
| 992 |
+
|
| 993 |
+
if(CUDAToolkit_VERSION VERSION_GREATER_EQUAL 10.1.2)
|
| 994 |
+
# cusolver depends on liblapack_static.a starting with CUDA 10.1 update 2,
|
| 995 |
+
# https://docs.nvidia.com/cuda/archive/11.5.0/cusolver/index.html#static-link-lapack
|
| 996 |
+
_CUDAToolkit_find_and_add_import_lib(cusolver_lapack_static ALT lapack_static) # implementation detail static lib
|
| 997 |
+
_CUDAToolkit_find_and_add_import_lib(cusolver_static DEPS cusolver_lapack_static)
|
| 998 |
+
endif()
|
| 999 |
+
|
| 1000 |
+
if(CUDAToolkit_VERSION VERSION_GREATER 11.2.1)
|
| 1001 |
+
# cusolver depends on libcusolver_metis and cublasLt
|
| 1002 |
+
# https://docs.nvidia.com/cuda/archive/11.2.2/cusolver/index.html#link-dependency
|
| 1003 |
+
_CUDAToolkit_find_and_add_import_lib(cusolver DEPS cublasLt)
|
| 1004 |
+
|
| 1005 |
+
_CUDAToolkit_find_and_add_import_lib(cusolver_metis_static ALT metis_static) # implementation detail static lib
|
| 1006 |
+
_CUDAToolkit_find_and_add_import_lib(cusolver_static DEPS cusolver_metis_static cublasLt_static)
|
| 1007 |
+
endif()
|
| 1008 |
+
|
| 1009 |
+
# nvGRAPH depends on cuRAND, and cuSOLVER.
|
| 1010 |
+
_CUDAToolkit_find_and_add_import_lib(nvgraph DEPS curand cusolver)
|
| 1011 |
+
_CUDAToolkit_find_and_add_import_lib(nvgraph_static DEPS curand_static cusolver_static)
|
| 1012 |
+
|
| 1013 |
+
# Process the majority of the NPP libraries.
|
| 1014 |
+
foreach(cuda_lib nppial nppicc nppidei nppif nppig nppim nppist nppitc npps nppicom nppisu)
|
| 1015 |
+
_CUDAToolkit_find_and_add_import_lib(${cuda_lib} DEPS nppc)
|
| 1016 |
+
_CUDAToolkit_find_and_add_import_lib(${cuda_lib}_static DEPS nppc_static)
|
| 1017 |
+
endforeach()
|
| 1018 |
+
|
| 1019 |
+
find_path(CUDAToolkit_CUPTI_INCLUDE_DIR cupti.h PATHS
|
| 1020 |
+
"${CUDAToolkit_ROOT_DIR}/extras/CUPTI/include"
|
| 1021 |
+
"${CUDAToolkit_INCLUDE_DIR}/../extras/CUPTI/include"
|
| 1022 |
+
"${CUDAToolkit_INCLUDE_DIR}"
|
| 1023 |
+
NO_DEFAULT_PATH)
|
| 1024 |
+
mark_as_advanced(CUDAToolkit_CUPTI_INCLUDE_DIR)
|
| 1025 |
+
|
| 1026 |
+
if(CUDAToolkit_CUPTI_INCLUDE_DIR)
|
| 1027 |
+
_CUDAToolkit_find_and_add_import_lib(cupti
|
| 1028 |
+
EXTRA_PATH_SUFFIXES ../extras/CUPTI/lib64/
|
| 1029 |
+
../extras/CUPTI/lib/
|
| 1030 |
+
EXTRA_INCLUDE_DIRS "${CUDAToolkit_CUPTI_INCLUDE_DIR}")
|
| 1031 |
+
_CUDAToolkit_find_and_add_import_lib(cupti_static
|
| 1032 |
+
EXTRA_PATH_SUFFIXES ../extras/CUPTI/lib64/
|
| 1033 |
+
../extras/CUPTI/lib/
|
| 1034 |
+
EXTRA_INCLUDE_DIRS "${CUDAToolkit_CUPTI_INCLUDE_DIR}")
|
| 1035 |
+
endif()
|
| 1036 |
+
|
| 1037 |
+
_CUDAToolkit_find_and_add_import_lib(nvrtc DEPS cuda_driver)
|
| 1038 |
+
|
| 1039 |
+
_CUDAToolkit_find_and_add_import_lib(nvml ALT nvidia-ml nvml)
|
| 1040 |
+
|
| 1041 |
+
# nvtools can be installed outside the CUDA toolkit directory,
|
| 1042 |
+
# so search the NVTOOLSEXT_PATH windows only environment variable
|
| 1043 |
+
set(nvToolsExt_EXTRA_PATH)
|
| 1044 |
+
if(WIN32)
|
| 1045 |
+
set(nvToolsExt_EXTRA_PATH "C:\\Program Files\\NVIDIA Corporation\\NvToolsExt")
|
| 1046 |
+
endif()
|
| 1047 |
+
|
| 1048 |
+
find_path(CUDAToolkit_nvToolsExt_INCLUDE_DIR nvToolsExt.h
|
| 1049 |
+
PATHS "${CUDAToolkit_INCLUDE_DIR}"
|
| 1050 |
+
"${CUDAToolkit_ROOT_DIR}"
|
| 1051 |
+
ENV NVTOOLSEXT_PATH
|
| 1052 |
+
"${nvToolsExt_EXTRA_PATH}"
|
| 1053 |
+
PATH_SUFFIXES include
|
| 1054 |
+
NO_DEFAULT_PATH)
|
| 1055 |
+
mark_as_advanced(CUDAToolkit_nvToolsExt_INCLUDE_DIR)
|
| 1056 |
+
|
| 1057 |
+
if(CUDAToolkit_nvToolsExt_INCLUDE_DIR)
|
| 1058 |
+
_CUDAToolkit_find_and_add_import_lib(nvToolsExt
|
| 1059 |
+
ALT nvToolsExt64 nvToolsExt64_1
|
| 1060 |
+
EXTRA_HINTS ENV NVTOOLSEXT_PATH
|
| 1061 |
+
"${nvToolsExt_EXTRA_PATH}"
|
| 1062 |
+
EXTRA_INCLUDE_DIRS "${CUDAToolkit_nvToolsExt_INCLUDE_DIR}")
|
| 1063 |
+
endif()
|
| 1064 |
+
|
| 1065 |
+
_CUDAToolkit_find_and_add_import_lib(OpenCL)
|
| 1066 |
+
endif()
|
| 1067 |
+
|
| 1068 |
+
unset(CUDAToolkit_ROOT_DIR)
|
| 1069 |
+
|
| 1070 |
+
if(_CUDAToolkit_Pop_ROOT_PATH)
|
| 1071 |
+
list(REMOVE_AT CMAKE_FIND_ROOT_PATH 0)
|
| 1072 |
+
unset(_CUDAToolkit_Pop_ROOT_PATH)
|
| 1073 |
+
endif()
|
parrot/lib/python3.10/site-packages/torch/share/cmake/Tensorpipe/TensorpipeTargets-release.cmake
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#----------------------------------------------------------------
|
| 2 |
+
# Generated CMake target import file for configuration "Release".
|
| 3 |
+
#----------------------------------------------------------------
|
| 4 |
+
|
| 5 |
+
# Commands may need to know the format version.
|
| 6 |
+
set(CMAKE_IMPORT_FILE_VERSION 1)
|
| 7 |
+
|
| 8 |
+
# Import target "tensorpipe_uv" for configuration "Release"
|
| 9 |
+
set_property(TARGET tensorpipe_uv APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE)
|
| 10 |
+
set_target_properties(tensorpipe_uv PROPERTIES
|
| 11 |
+
IMPORTED_LINK_INTERFACE_LANGUAGES_RELEASE "C"
|
| 12 |
+
IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib64/libtensorpipe_uv.a"
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
list(APPEND _IMPORT_CHECK_TARGETS tensorpipe_uv )
|
| 16 |
+
list(APPEND _IMPORT_CHECK_FILES_FOR_tensorpipe_uv "${_IMPORT_PREFIX}/lib64/libtensorpipe_uv.a" )
|
| 17 |
+
|
| 18 |
+
# Import target "tensorpipe" for configuration "Release"
|
| 19 |
+
set_property(TARGET tensorpipe APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE)
|
| 20 |
+
set_target_properties(tensorpipe PROPERTIES
|
| 21 |
+
IMPORTED_LINK_INTERFACE_LANGUAGES_RELEASE "CXX"
|
| 22 |
+
IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib64/libtensorpipe.a"
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
list(APPEND _IMPORT_CHECK_TARGETS tensorpipe )
|
| 26 |
+
list(APPEND _IMPORT_CHECK_FILES_FOR_tensorpipe "${_IMPORT_PREFIX}/lib64/libtensorpipe.a" )
|
| 27 |
+
|
| 28 |
+
# Import target "tensorpipe_cuda" for configuration "Release"
|
| 29 |
+
set_property(TARGET tensorpipe_cuda APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE)
|
| 30 |
+
set_target_properties(tensorpipe_cuda PROPERTIES
|
| 31 |
+
IMPORTED_LINK_INTERFACE_LANGUAGES_RELEASE "CXX"
|
| 32 |
+
IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib64/libtensorpipe_cuda.a"
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
list(APPEND _IMPORT_CHECK_TARGETS tensorpipe_cuda )
|
| 36 |
+
list(APPEND _IMPORT_CHECK_FILES_FOR_tensorpipe_cuda "${_IMPORT_PREFIX}/lib64/libtensorpipe_cuda.a" )
|
| 37 |
+
|
| 38 |
+
# Commands beyond this point should not need to know the version.
|
| 39 |
+
set(CMAKE_IMPORT_FILE_VERSION)
|
parrot/lib/python3.10/site-packages/torch/share/cmake/Tensorpipe/TensorpipeTargets.cmake
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Generated by CMake
|
| 2 |
+
|
| 3 |
+
if("${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}" LESS 2.5)
|
| 4 |
+
message(FATAL_ERROR "CMake >= 2.6.0 required")
|
| 5 |
+
endif()
|
| 6 |
+
cmake_policy(PUSH)
|
| 7 |
+
cmake_policy(VERSION 2.6...3.17)
|
| 8 |
+
#----------------------------------------------------------------
|
| 9 |
+
# Generated CMake target import file.
|
| 10 |
+
#----------------------------------------------------------------
|
| 11 |
+
|
| 12 |
+
# Commands may need to know the format version.
|
| 13 |
+
set(CMAKE_IMPORT_FILE_VERSION 1)
|
| 14 |
+
|
| 15 |
+
# Protect against multiple inclusion, which would fail when already imported targets are added once more.
|
| 16 |
+
set(_targetsDefined)
|
| 17 |
+
set(_targetsNotDefined)
|
| 18 |
+
set(_expectedTargets)
|
| 19 |
+
foreach(_expectedTarget tensorpipe_uv tensorpipe tensorpipe_cuda)
|
| 20 |
+
list(APPEND _expectedTargets ${_expectedTarget})
|
| 21 |
+
if(NOT TARGET ${_expectedTarget})
|
| 22 |
+
list(APPEND _targetsNotDefined ${_expectedTarget})
|
| 23 |
+
endif()
|
| 24 |
+
if(TARGET ${_expectedTarget})
|
| 25 |
+
list(APPEND _targetsDefined ${_expectedTarget})
|
| 26 |
+
endif()
|
| 27 |
+
endforeach()
|
| 28 |
+
if("${_targetsDefined}" STREQUAL "${_expectedTargets}")
|
| 29 |
+
unset(_targetsDefined)
|
| 30 |
+
unset(_targetsNotDefined)
|
| 31 |
+
unset(_expectedTargets)
|
| 32 |
+
set(CMAKE_IMPORT_FILE_VERSION)
|
| 33 |
+
cmake_policy(POP)
|
| 34 |
+
return()
|
| 35 |
+
endif()
|
| 36 |
+
if(NOT "${_targetsDefined}" STREQUAL "")
|
| 37 |
+
message(FATAL_ERROR "Some (but not all) targets in this export set were already defined.\nTargets Defined: ${_targetsDefined}\nTargets not yet defined: ${_targetsNotDefined}\n")
|
| 38 |
+
endif()
|
| 39 |
+
unset(_targetsDefined)
|
| 40 |
+
unset(_targetsNotDefined)
|
| 41 |
+
unset(_expectedTargets)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
# Compute the installation prefix relative to this file.
|
| 45 |
+
get_filename_component(_IMPORT_PREFIX "${CMAKE_CURRENT_LIST_FILE}" PATH)
|
| 46 |
+
get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH)
|
| 47 |
+
get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH)
|
| 48 |
+
get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH)
|
| 49 |
+
if(_IMPORT_PREFIX STREQUAL "/")
|
| 50 |
+
set(_IMPORT_PREFIX "")
|
| 51 |
+
endif()
|
| 52 |
+
|
| 53 |
+
# Create imported target tensorpipe_uv
|
| 54 |
+
add_library(tensorpipe_uv STATIC IMPORTED)
|
| 55 |
+
|
| 56 |
+
set_target_properties(tensorpipe_uv PROPERTIES
|
| 57 |
+
INTERFACE_LINK_LIBRARIES "\$<LINK_ONLY:pthread>;\$<LINK_ONLY:dl>;\$<LINK_ONLY:rt>"
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
# Create imported target tensorpipe
|
| 61 |
+
add_library(tensorpipe STATIC IMPORTED)
|
| 62 |
+
|
| 63 |
+
set_target_properties(tensorpipe PROPERTIES
|
| 64 |
+
INTERFACE_INCLUDE_DIRECTORIES "${_IMPORT_PREFIX}/include"
|
| 65 |
+
INTERFACE_LINK_LIBRARIES "\$<LINK_ONLY:tensorpipe_uv>"
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
# Create imported target tensorpipe_cuda
|
| 69 |
+
add_library(tensorpipe_cuda STATIC IMPORTED)
|
| 70 |
+
|
| 71 |
+
set_target_properties(tensorpipe_cuda PROPERTIES
|
| 72 |
+
INTERFACE_INCLUDE_DIRECTORIES "/usr/local/cuda/include"
|
| 73 |
+
INTERFACE_LINK_LIBRARIES "tensorpipe;/usr/local/cuda/lib64/libcudart.so"
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
if(CMAKE_VERSION VERSION_LESS 2.8.12)
|
| 77 |
+
message(FATAL_ERROR "This file relies on consumers using CMake 2.8.12 or greater.")
|
| 78 |
+
endif()
|
| 79 |
+
|
| 80 |
+
# Load information for each installed configuration.
|
| 81 |
+
get_filename_component(_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH)
|
| 82 |
+
file(GLOB CONFIG_FILES "${_DIR}/TensorpipeTargets-*.cmake")
|
| 83 |
+
foreach(f ${CONFIG_FILES})
|
| 84 |
+
include(${f})
|
| 85 |
+
endforeach()
|
| 86 |
+
|
| 87 |
+
# Cleanup temporary variables.
|
| 88 |
+
set(_IMPORT_PREFIX)
|
| 89 |
+
|
| 90 |
+
# Loop over all imported files and verify that they actually exist
|
| 91 |
+
foreach(target ${_IMPORT_CHECK_TARGETS} )
|
| 92 |
+
foreach(file ${_IMPORT_CHECK_FILES_FOR_${target}} )
|
| 93 |
+
if(NOT EXISTS "${file}" )
|
| 94 |
+
message(FATAL_ERROR "The imported target \"${target}\" references the file
|
| 95 |
+
\"${file}\"
|
| 96 |
+
but this file does not exist. Possible reasons include:
|
| 97 |
+
* The file was deleted, renamed, or moved to another location.
|
| 98 |
+
* An install or uninstall procedure did not complete successfully.
|
| 99 |
+
* The installation package was faulty and contained
|
| 100 |
+
\"${CMAKE_CURRENT_LIST_FILE}\"
|
| 101 |
+
but not all the files it references.
|
| 102 |
+
")
|
| 103 |
+
endif()
|
| 104 |
+
endforeach()
|
| 105 |
+
unset(_IMPORT_CHECK_FILES_FOR_${target})
|
| 106 |
+
endforeach()
|
| 107 |
+
unset(_IMPORT_CHECK_TARGETS)
|
| 108 |
+
|
| 109 |
+
# This file does not depend on other imported targets which have
|
| 110 |
+
# been exported from the same project but in a separate export set.
|
| 111 |
+
|
| 112 |
+
# Commands beyond this point should not need to know the version.
|
| 113 |
+
set(CMAKE_IMPORT_FILE_VERSION)
|
| 114 |
+
cmake_policy(POP)
|
videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_tfcompile.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ae4d5f8a470c79ac912e7f51ef2a303a8cd6fa1b19cce11c9ac26333f82b2e52
|
| 3 |
+
size 155160
|
videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/central_storage_strategy.cpython-310.pyc
ADDED
|
Binary file (8.49 kB). View file
|
|
|
videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/cross_device_ops.cpython-310.pyc
ADDED
|
Binary file (44.6 kB). View file
|
|
|
videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/cross_device_utils.cpython-310.pyc
ADDED
|
Binary file (22.2 kB). View file
|
|
|
videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/distribute_config.cpython-310.pyc
ADDED
|
Binary file (1.27 kB). View file
|
|
|
videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/distribute_utils.cpython-310.pyc
ADDED
|
Binary file (13.9 kB). View file
|
|
|
videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/input_lib.cpython-310.pyc
ADDED
|
Binary file (60.9 kB). View file
|
|
|
videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/input_ops.cpython-310.pyc
ADDED
|
Binary file (3.56 kB). View file
|
|
|
videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/input_util.cpython-310.pyc
ADDED
|
Binary file (4.9 kB). View file
|
|
|