Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +1 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/__init__.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/accelerator_partitioner.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/const_fold.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/debug.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/graph_gradual_typechecker.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/merge_matmul.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/meta_tracer.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/normalize.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/optimization.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/partitioner_utils.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/proxy_tensor.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/refinement_types.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/rewriter.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/schema_type_annotation.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/symbolic_shapes.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/unify_refinements.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/validator.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__init__.py +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint_transformation.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/operation.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint.py +557 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint_generator.py +1281 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint_transformation.py +1040 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/operation.py +14 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/transform_to_z3.py +348 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/util.py +52 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/z3_types.py +29 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/__init__.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/core.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/dispatch.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/match.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/more.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/unification_tools.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/utils.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/variable.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/unification/match.py +121 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/unification/more.py +117 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/__pycache__/__init__.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/__pycache__/conflict.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/__pycache__/core.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/__pycache__/dispatcher.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/__pycache__/utils.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/__pycache__/variadic.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/conflict.py +119 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/dispatcher.py +430 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/variadic.py +91 -0
- vlmpy310/lib/python3.10/site-packages/skimage/feature/brief.py +209 -0
- vlmpy310/lib/python3.10/site-packages/skimage/feature/censure.py +343 -0
.gitattributes
CHANGED
|
@@ -1204,3 +1204,4 @@ vlmpy310/lib/python3.10/site-packages/skimage/filters/_multiotsu.cpython-310-x86
|
|
| 1204 |
valley/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_ops.so.9 filter=lfs diff=lfs merge=lfs -text
|
| 1205 |
llava_next/lib/python3.10/site-packages/torch/__pycache__/overrides.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1206 |
llava_next/lib/python3.10/site-packages/torch/__pycache__/_meta_registrations.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 1204 |
valley/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_ops.so.9 filter=lfs diff=lfs merge=lfs -text
|
| 1205 |
llava_next/lib/python3.10/site-packages/torch/__pycache__/overrides.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1206 |
llava_next/lib/python3.10/site-packages/torch/__pycache__/_meta_registrations.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1207 |
+
vlmpy310/lib/python3.10/site-packages/skimage/filters/rank/core_cy_3d.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (177 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/accelerator_partitioner.cpython-310.pyc
ADDED
|
Binary file (29.3 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/const_fold.cpython-310.pyc
ADDED
|
Binary file (6.76 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/debug.cpython-310.pyc
ADDED
|
Binary file (1.22 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/graph_gradual_typechecker.cpython-310.pyc
ADDED
|
Binary file (26 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/merge_matmul.cpython-310.pyc
ADDED
|
Binary file (4.51 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/meta_tracer.cpython-310.pyc
ADDED
|
Binary file (9.18 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/normalize.cpython-310.pyc
ADDED
|
Binary file (5.14 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/optimization.cpython-310.pyc
ADDED
|
Binary file (14.2 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/partitioner_utils.cpython-310.pyc
ADDED
|
Binary file (9.35 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/proxy_tensor.cpython-310.pyc
ADDED
|
Binary file (25.6 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/refinement_types.cpython-310.pyc
ADDED
|
Binary file (920 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/rewriter.cpython-310.pyc
ADDED
|
Binary file (4.85 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/schema_type_annotation.cpython-310.pyc
ADDED
|
Binary file (4.06 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/symbolic_shapes.cpython-310.pyc
ADDED
|
Binary file (95.8 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/unify_refinements.cpython-310.pyc
ADDED
|
Binary file (2.91 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/validator.cpython-310.pyc
ADDED
|
Binary file (15.2 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__init__.py
ADDED
|
File without changes
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint.cpython-310.pyc
ADDED
|
Binary file (17.1 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint_transformation.cpython-310.pyc
ADDED
|
Binary file (26.4 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/operation.cpython-310.pyc
ADDED
|
Binary file (466 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint.py
ADDED
|
@@ -0,0 +1,557 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from torch.fx.experimental.migrate_gradual_types.operation import op_add, op_sub, op_mul, op_div, \
|
| 2 |
+
op_mod, op_gt, op_lt, op_neq, op_eq
|
| 3 |
+
from torch.fx.tensor_type import TensorType, Dyn
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class Constraint:
|
| 7 |
+
pass
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class Conj(Constraint):
|
| 11 |
+
def __init__(self, conjuncts):
|
| 12 |
+
"""
|
| 13 |
+
:param conjuncts: Conjunction of constraints
|
| 14 |
+
"""
|
| 15 |
+
self.conjucts = conjuncts
|
| 16 |
+
|
| 17 |
+
def __eq__(self, other):
|
| 18 |
+
if isinstance(other, Conj):
|
| 19 |
+
return self.conjucts == other.conjucts and self.conjucts == other.conjucts
|
| 20 |
+
else:
|
| 21 |
+
return False
|
| 22 |
+
|
| 23 |
+
def __repr__(self):
|
| 24 |
+
return f'And({self.conjucts})'
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class Disj(Constraint):
|
| 28 |
+
def __init__(self, disjuncts):
|
| 29 |
+
"""
|
| 30 |
+
:param disjuncts: Disjunction of constraints
|
| 31 |
+
"""
|
| 32 |
+
self.disjuncts = disjuncts
|
| 33 |
+
|
| 34 |
+
def __eq__(self, other):
|
| 35 |
+
if isinstance(other, Disj):
|
| 36 |
+
return self.disjuncts == other.disjuncts and self.disjuncts == other.disjuncts
|
| 37 |
+
else:
|
| 38 |
+
return False
|
| 39 |
+
|
| 40 |
+
def __repr__(self):
|
| 41 |
+
return f'Or({self.disjuncts})'
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
class Prod(Constraint):
|
| 45 |
+
def __init__(self, products):
|
| 46 |
+
"""
|
| 47 |
+
:param products: lists of dimensions to multiply
|
| 48 |
+
"""
|
| 49 |
+
self.products = products
|
| 50 |
+
|
| 51 |
+
def __eq__(self, other):
|
| 52 |
+
if isinstance(other, Prod):
|
| 53 |
+
return self.products == other.products and self.products == other.products
|
| 54 |
+
else:
|
| 55 |
+
return False
|
| 56 |
+
|
| 57 |
+
def __repr__(self):
|
| 58 |
+
return f'Product({self.products})'
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
class T(Constraint):
|
| 62 |
+
"""
|
| 63 |
+
True
|
| 64 |
+
"""
|
| 65 |
+
def __init__(self):
|
| 66 |
+
pass
|
| 67 |
+
|
| 68 |
+
def __eq__(self, other):
|
| 69 |
+
return isinstance(other, T)
|
| 70 |
+
|
| 71 |
+
def __repr__(self):
|
| 72 |
+
return 'True'
|
| 73 |
+
|
| 74 |
+
class F(Constraint):
|
| 75 |
+
"""
|
| 76 |
+
False
|
| 77 |
+
"""
|
| 78 |
+
def __init__(self):
|
| 79 |
+
pass
|
| 80 |
+
|
| 81 |
+
def __eq__(self, other):
|
| 82 |
+
return isinstance(other, F)
|
| 83 |
+
|
| 84 |
+
def __repr__(self):
|
| 85 |
+
return 'False'
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
class BinaryConstraint(Constraint):
|
| 89 |
+
"""
|
| 90 |
+
Represents all binary operations
|
| 91 |
+
"""
|
| 92 |
+
def __init__(self, lhs, rhs, op):
|
| 93 |
+
"""
|
| 94 |
+
:param lhs: lhs of the constraint
|
| 95 |
+
:param rhs: rhs of the constraint
|
| 96 |
+
:param op: string representing the operation
|
| 97 |
+
"""
|
| 98 |
+
self.lhs = lhs
|
| 99 |
+
self.rhs = rhs
|
| 100 |
+
self.op = op
|
| 101 |
+
|
| 102 |
+
def __eq__(self, other):
|
| 103 |
+
if isinstance(other, BinaryConstraint):
|
| 104 |
+
return self.lhs == other.lhs and self.rhs == other.rhs and self.op == other.op
|
| 105 |
+
else:
|
| 106 |
+
return False
|
| 107 |
+
|
| 108 |
+
def __repr__(self):
|
| 109 |
+
return f'({self.lhs} {self.op} {self.rhs})'
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
class BinConstraintT(BinaryConstraint):
|
| 113 |
+
"""
|
| 114 |
+
Binary constraints about tensors
|
| 115 |
+
"""
|
| 116 |
+
def __init__(self, lhs, rhs, op):
|
| 117 |
+
assert (isinstance(lhs, (TVar, TensorType, int)) or lhs == Dyn) and \
|
| 118 |
+
(isinstance(rhs, (TVar, TensorType, int)) or rhs == Dyn)
|
| 119 |
+
super().__init__(lhs, rhs, op)
|
| 120 |
+
|
| 121 |
+
def __eq__(self, other):
|
| 122 |
+
return super().__eq__(other)
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
class BinConstraintD(BinaryConstraint):
|
| 126 |
+
"""
|
| 127 |
+
Binary constraints about dimensions
|
| 128 |
+
"""
|
| 129 |
+
def __init__(self, lhs, rhs, op):
|
| 130 |
+
assert is_algebraic_expression(lhs) or is_dim(lhs) or is_bool_expr(lhs)
|
| 131 |
+
assert is_algebraic_expression(rhs) or is_dim(rhs) or is_bool_expr(rhs)
|
| 132 |
+
|
| 133 |
+
super().__init__(lhs, rhs, op)
|
| 134 |
+
|
| 135 |
+
def __eq__(self, other):
|
| 136 |
+
return super().__eq__(other)
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
class TGreatestUpperBound(Constraint):
|
| 141 |
+
"""
|
| 142 |
+
Greatest Upper bound for tensors with dynamic type
|
| 143 |
+
"""
|
| 144 |
+
def __init__(self, res, rhs1, rhs2):
|
| 145 |
+
"""
|
| 146 |
+
:param res: tensor variable that stores the result of the outout
|
| 147 |
+
:param rhs1: tensor or tensor variable
|
| 148 |
+
:param rhs2: tensor or tensor variabke
|
| 149 |
+
"""
|
| 150 |
+
self.res = res
|
| 151 |
+
self.rhs1 = rhs1
|
| 152 |
+
self.rhs2 = rhs2
|
| 153 |
+
|
| 154 |
+
def __repr__(self):
|
| 155 |
+
return f'{self.res} = {self.rhs1}⊔*{self.rhs2}'
|
| 156 |
+
|
| 157 |
+
def __eq__(self, other):
|
| 158 |
+
if isinstance(other, TGreatestUpperBound):
|
| 159 |
+
return self.res == other.res and self.rhs1 == other.rhs1 and self.rhs2 == other.rhs2
|
| 160 |
+
else:
|
| 161 |
+
return False
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
class DGreatestUpperBound(Constraint):
|
| 165 |
+
"""
|
| 166 |
+
Greatest Upper bound for dimensions
|
| 167 |
+
"""
|
| 168 |
+
def __init__(self, res, rhs1, rhs2):
|
| 169 |
+
"""
|
| 170 |
+
:param res: Dimension variable to store the result
|
| 171 |
+
:param rhs1: dimension variable 1
|
| 172 |
+
:param rhs2: dimension variable 2
|
| 173 |
+
"""
|
| 174 |
+
assert is_dim(res)
|
| 175 |
+
assert is_dim(rhs1)
|
| 176 |
+
assert is_dim(rhs2)
|
| 177 |
+
|
| 178 |
+
self.res = res
|
| 179 |
+
self.rhs1 = rhs1
|
| 180 |
+
self.rhs2 = rhs2
|
| 181 |
+
|
| 182 |
+
def __repr__(self):
|
| 183 |
+
return f'{self.res} = {self.rhs1}⊔{self.rhs2}'
|
| 184 |
+
|
| 185 |
+
def __eq__(self, other):
|
| 186 |
+
if isinstance(other, DGreatestUpperBound):
|
| 187 |
+
return self.res == other.res and self.rhs1 == other.rhs1 and self.rhs2 == other.rhs2
|
| 188 |
+
else:
|
| 189 |
+
return False
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
class CanReshape(Constraint):
|
| 193 |
+
"""
|
| 194 |
+
can_reshape constraint
|
| 195 |
+
"""
|
| 196 |
+
def __init__(self, src, target):
|
| 197 |
+
"""
|
| 198 |
+
:param src: tensor variable
|
| 199 |
+
:param target: tensor
|
| 200 |
+
"""
|
| 201 |
+
self.src = src
|
| 202 |
+
self.target = target
|
| 203 |
+
|
| 204 |
+
def __repr__(self):
|
| 205 |
+
return f'can-reshape({self.src}, {self.target})'
|
| 206 |
+
|
| 207 |
+
def __eq__(self, other):
|
| 208 |
+
if isinstance(other, CanReshape):
|
| 209 |
+
return self.src == other.src and self.target == other.target
|
| 210 |
+
else:
|
| 211 |
+
return False
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
class IndexSelect(Constraint):
|
| 215 |
+
|
| 216 |
+
def __init__(self, tensor_size, input_var, dim_replace, index, output):
|
| 217 |
+
"""
|
| 218 |
+
Args:
|
| 219 |
+
input_var: input to index_select
|
| 220 |
+
tensor_size: tensor size we are considering
|
| 221 |
+
dim_replace: the dimension of the output at "index"
|
| 222 |
+
index: location of the dimensions to replace in the input
|
| 223 |
+
output: variable to store the result
|
| 224 |
+
"""
|
| 225 |
+
assert isinstance(input_var, TVar)
|
| 226 |
+
assert isinstance(output, TVar)
|
| 227 |
+
assert isinstance(dim_replace, DVar) or dim_replace == Dyn
|
| 228 |
+
assert isinstance(index, int)
|
| 229 |
+
|
| 230 |
+
self.input_var = input_var
|
| 231 |
+
self.tensor_size = tensor_size
|
| 232 |
+
self.dim_replace = dim_replace
|
| 233 |
+
self.index = index
|
| 234 |
+
self.output = output
|
| 235 |
+
|
| 236 |
+
def __repr__(self):
|
| 237 |
+
|
| 238 |
+
return f' {self.output} = ' \
|
| 239 |
+
f'IndexSelect({self.input_var}, ' \
|
| 240 |
+
f'tensor_size: {self.tensor_size}, ' \
|
| 241 |
+
f'{self.dim_replace}, ' \
|
| 242 |
+
f'{self.index})'
|
| 243 |
+
|
| 244 |
+
def __eq__(self, other):
|
| 245 |
+
if isinstance(other, IndexSelect):
|
| 246 |
+
return self.tensor_size == other.tensor_size and \
|
| 247 |
+
self.dim_replace == other.dim_replace and \
|
| 248 |
+
self.index == other.index and \
|
| 249 |
+
self.output == other.output and \
|
| 250 |
+
self.input_var == other.input_var
|
| 251 |
+
else:
|
| 252 |
+
return False
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
class Transpose(Constraint):
|
| 256 |
+
|
| 257 |
+
def __init__(self, tensor_size, input_var, index1, index2, output):
|
| 258 |
+
"""
|
| 259 |
+
Args:
|
| 260 |
+
tensor_size: current tensor size
|
| 261 |
+
input_var: variable to hold input
|
| 262 |
+
index1: dimension 1
|
| 263 |
+
index2: dimension 2
|
| 264 |
+
output: output that stores result
|
| 265 |
+
"""
|
| 266 |
+
assert isinstance(input_var, TVar)
|
| 267 |
+
assert isinstance(output, TVar)
|
| 268 |
+
assert isinstance(index1, int)
|
| 269 |
+
assert isinstance(index2, int)
|
| 270 |
+
|
| 271 |
+
self.input_var = input_var
|
| 272 |
+
self.tensor_size = tensor_size
|
| 273 |
+
self.index1 = index1
|
| 274 |
+
self.index2 = index2
|
| 275 |
+
self.output = output
|
| 276 |
+
|
| 277 |
+
def __repr__(self):
|
| 278 |
+
|
| 279 |
+
return f' {self.output} = ' \
|
| 280 |
+
f'Transpose({self.input_var}, ' \
|
| 281 |
+
f'tensor_size: {self.tensor_size}, ' \
|
| 282 |
+
f'{self.index1}, ' \
|
| 283 |
+
f'{self.index2})'
|
| 284 |
+
|
| 285 |
+
def __eq__(self, other):
|
| 286 |
+
if isinstance(other, Transpose):
|
| 287 |
+
return self.tensor_size == other.tensor_size and \
|
| 288 |
+
self.index1 == other.index1 and \
|
| 289 |
+
self.index2 == other.index2 and \
|
| 290 |
+
self.output == other.output and \
|
| 291 |
+
self.input_var == other.input_var
|
| 292 |
+
else:
|
| 293 |
+
return False
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
class GetItem(Constraint):
|
| 297 |
+
|
| 298 |
+
def __init__(self, tensor_size, index, res, input_var):
|
| 299 |
+
"""
|
| 300 |
+
Constraint for getting item given a tensor size
|
| 301 |
+
:param tensor_size: actual number
|
| 302 |
+
:param index: actual number representing the index
|
| 303 |
+
:param res: dimension variable to carry the item we get
|
| 304 |
+
:param input_var: a tensor variable from which we will get item
|
| 305 |
+
"""
|
| 306 |
+
assert isinstance(res, DVar)
|
| 307 |
+
|
| 308 |
+
self.res = res
|
| 309 |
+
self.tensor_size = tensor_size
|
| 310 |
+
self.index = index
|
| 311 |
+
self.input_var = input_var
|
| 312 |
+
|
| 313 |
+
def __repr__(self):
|
| 314 |
+
return f' {self.res} = GetItem({self.input_var}, tensor_size: {self.tensor_size}, {self.index})'
|
| 315 |
+
|
| 316 |
+
def __eq__(self, other):
|
| 317 |
+
if isinstance(other, GetItem):
|
| 318 |
+
return self.res == other.res and \
|
| 319 |
+
self.tensor_size == other.tensor_size and \
|
| 320 |
+
self.index == other.index and \
|
| 321 |
+
self.input_var == other.input_var
|
| 322 |
+
else:
|
| 323 |
+
return False
|
| 324 |
+
|
| 325 |
+
class GetItemTensor(Constraint):
|
| 326 |
+
|
| 327 |
+
def __init__(self, tensor_size, index_tuple, res, input_var):
|
| 328 |
+
"""
|
| 329 |
+
Constraint for getting item given a tensor size
|
| 330 |
+
However, when the argument is a tuple, we will
|
| 331 |
+
expect a tensor
|
| 332 |
+
:param tensor_size: actual number representing the rank
|
| 333 |
+
:param index_tuple: tuple for indexing
|
| 334 |
+
:param res: tensor variable to carry the item we get
|
| 335 |
+
:param input_var: a tensor variable from which we will get item
|
| 336 |
+
"""
|
| 337 |
+
assert isinstance(res, TVar)
|
| 338 |
+
|
| 339 |
+
self.res = res
|
| 340 |
+
self.tensor_size = tensor_size
|
| 341 |
+
self.index_tuple = index_tuple
|
| 342 |
+
self.input_var = input_var
|
| 343 |
+
|
| 344 |
+
def __repr__(self):
|
| 345 |
+
return f' {self.res} = GetItemT({self.input_var}, tensor_size: {self.tensor_size}, {self.index_tuple})'
|
| 346 |
+
|
| 347 |
+
def __eq__(self, other):
|
| 348 |
+
if isinstance(other, GetItemTensor):
|
| 349 |
+
return self.res == other.res and \
|
| 350 |
+
self.tensor_size == other.tensor_size and \
|
| 351 |
+
self.index_tuple == other.index_tuple and \
|
| 352 |
+
self.input_var == other.input_var
|
| 353 |
+
else:
|
| 354 |
+
return False
|
| 355 |
+
|
| 356 |
+
class CalcConv(Constraint):
|
| 357 |
+
|
| 358 |
+
def __init__(self, conv_result, input_var, c_out, kernel, padding, stride, dilation, matching_constraint_vars):
|
| 359 |
+
"""
|
| 360 |
+
:param conv_result: the convolution result
|
| 361 |
+
:param input_var: input to convolution
|
| 362 |
+
:param c_out: output chanel type
|
| 363 |
+
:param kernel: kernel tuple
|
| 364 |
+
"""
|
| 365 |
+
self.conv_result = conv_result
|
| 366 |
+
self.input_var = input_var
|
| 367 |
+
self.c_out = c_out
|
| 368 |
+
self.kernel = kernel
|
| 369 |
+
self.padding = padding
|
| 370 |
+
self.stride = stride
|
| 371 |
+
self.dilation = dilation
|
| 372 |
+
self.matching_constraint = matching_constraint_vars
|
| 373 |
+
|
| 374 |
+
def __repr__(self):
|
| 375 |
+
return f'{self.conv_result} =' \
|
| 376 |
+
f' calc-conv({self.input_var},' \
|
| 377 |
+
f' {self.c_out}, {self.kernel}, ' \
|
| 378 |
+
f'{self.padding}, {self.stride},' \
|
| 379 |
+
f' {self.dilation})'
|
| 380 |
+
|
| 381 |
+
def __eq__(self, other):
|
| 382 |
+
if isinstance(other, CalcConv):
|
| 383 |
+
return self.conv_result == other.conv_result and self.input_var == other.input_var and \
|
| 384 |
+
self.c_out == other.c_out and self.kernel == other.kernel and self.padding == other.padding \
|
| 385 |
+
and self.stride == other.stride and self.dilation == other.dilation \
|
| 386 |
+
and self.matching_constraint == other.matching_constraint
|
| 387 |
+
else:
|
| 388 |
+
return False
|
| 389 |
+
|
| 390 |
+
|
| 391 |
+
class CalcMaxPool(Constraint):
|
| 392 |
+
|
| 393 |
+
def __init__(self, maxpool_result, input_var, kernel, padding, stride, dilation, matching_constraint_vars):
|
| 394 |
+
"""
|
| 395 |
+
:param maxpool_result: the result of maxpool
|
| 396 |
+
:param input_var: input to convolution
|
| 397 |
+
:param kernel: kernel tuple
|
| 398 |
+
"""
|
| 399 |
+
self.maxpool_result = maxpool_result
|
| 400 |
+
self.input_var = input_var
|
| 401 |
+
self.kernel = kernel
|
| 402 |
+
self.padding = padding
|
| 403 |
+
self.stride = stride
|
| 404 |
+
self.dilation = dilation
|
| 405 |
+
self.matching_constraint = matching_constraint_vars
|
| 406 |
+
|
| 407 |
+
def __repr__(self):
|
| 408 |
+
return f'{self.maxpool_result} =' \
|
| 409 |
+
f' calc-maxpool({self.input_var},' \
|
| 410 |
+
f' {self.kernel}, ' \
|
| 411 |
+
f'{self.padding}, {self.stride},' \
|
| 412 |
+
f' {self.dilation})'
|
| 413 |
+
|
| 414 |
+
def __eq__(self, other):
|
| 415 |
+
if isinstance(other, CalcMaxPool):
|
| 416 |
+
return self.maxpool_result == other.maxpool_result and self.input_var == other.input_var \
|
| 417 |
+
and self.kernel == other.kernel and self.padding == other.padding \
|
| 418 |
+
and self.stride == other.stride and self.dilation == other.dilation \
|
| 419 |
+
and self.matching_constraint == other.matching_constraint
|
| 420 |
+
else:
|
| 421 |
+
return False
|
| 422 |
+
|
| 423 |
+
|
| 424 |
+
class ApplyBroadcasting(Constraint):
|
| 425 |
+
def __init__(self, res1, res2, input1, input2):
|
| 426 |
+
"""
|
| 427 |
+
:param res1: resulting tensor 1
|
| 428 |
+
:param res2: resulting tensor 2
|
| 429 |
+
:param input1: tensor variable 1
|
| 430 |
+
:param input2: tensor variable 2
|
| 431 |
+
"""
|
| 432 |
+
self.res1 = res1
|
| 433 |
+
self.res2 = res2
|
| 434 |
+
self.input1 = input1
|
| 435 |
+
self.input2 = input2
|
| 436 |
+
|
| 437 |
+
def __eq__(self, other):
|
| 438 |
+
if isinstance(other, ApplyBroadcasting):
|
| 439 |
+
return self.res1 == other.res1 \
|
| 440 |
+
and self.res2 == other.res2 \
|
| 441 |
+
and self.input1 == other.input1 \
|
| 442 |
+
and self.input2 == other.input2
|
| 443 |
+
else:
|
| 444 |
+
return False
|
| 445 |
+
|
| 446 |
+
def __repr__(self):
|
| 447 |
+
return f'{self.res1}, {self.res2} ='f' apply-broadcasting({self.input1},' f' {self.input2})'
|
| 448 |
+
|
| 449 |
+
|
| 450 |
+
class CalcProduct(Constraint):
|
| 451 |
+
"""
|
| 452 |
+
Given correct dimensions, calculate the product for flatten accounting for Dyn
|
| 453 |
+
"""
|
| 454 |
+
def __init__(self, start, end, flattened, dims_to_flatten):
|
| 455 |
+
"""
|
| 456 |
+
:param start: start index
|
| 457 |
+
:param end: end index
|
| 458 |
+
:param flattened: variable to store the product
|
| 459 |
+
:param dims_to_flatten: the type which we will flatten
|
| 460 |
+
"""
|
| 461 |
+
assert isinstance(dims_to_flatten, list)
|
| 462 |
+
assert isinstance(flattened, TVar)
|
| 463 |
+
assert isinstance(start, int)
|
| 464 |
+
assert isinstance(end, int)
|
| 465 |
+
|
| 466 |
+
self.start = start
|
| 467 |
+
self.end = end
|
| 468 |
+
self.dims_to_flatten = dims_to_flatten
|
| 469 |
+
self.flattened = flattened
|
| 470 |
+
|
| 471 |
+
def __eq__(self, other):
|
| 472 |
+
if isinstance(other, CalcProduct):
|
| 473 |
+
return self.start == other.start and self.end == other.end and \
|
| 474 |
+
self.dims_to_flatten == other.dims_to_flatten and self.flattened == other.flattened
|
| 475 |
+
|
| 476 |
+
else:
|
| 477 |
+
return False
|
| 478 |
+
|
| 479 |
+
def __repr__(self):
|
| 480 |
+
return f'{self.flattened} = CalcProduct({self.start}, {self.end}, {self.dims_to_flatten})'
|
| 481 |
+
|
| 482 |
+
|
| 483 |
+
class TVar:
|
| 484 |
+
"""
|
| 485 |
+
Tensor variable with no tensor constructor
|
| 486 |
+
"""
|
| 487 |
+
def __init__(self, tvar):
|
| 488 |
+
"""
|
| 489 |
+
:param tvar: tensor variable
|
| 490 |
+
"""
|
| 491 |
+
self.tvar = tvar
|
| 492 |
+
|
| 493 |
+
def __repr__(self):
|
| 494 |
+
return f'TV({self.tvar})'
|
| 495 |
+
|
| 496 |
+
def __eq__(self, other):
|
| 497 |
+
if isinstance(other, TVar):
|
| 498 |
+
return self.tvar == other.tvar
|
| 499 |
+
else:
|
| 500 |
+
return False
|
| 501 |
+
|
| 502 |
+
|
| 503 |
+
class DVar:
|
| 504 |
+
"""
|
| 505 |
+
Dimension variable
|
| 506 |
+
"""
|
| 507 |
+
def __init__(self, c):
|
| 508 |
+
"""
|
| 509 |
+
:param c: character or number
|
| 510 |
+
"""
|
| 511 |
+
self.c = c
|
| 512 |
+
|
| 513 |
+
def __repr__(self):
|
| 514 |
+
return f'DV({self.c})'
|
| 515 |
+
|
| 516 |
+
def __eq__(self, other):
|
| 517 |
+
if isinstance(other, DVar):
|
| 518 |
+
return self.c == other.c
|
| 519 |
+
else:
|
| 520 |
+
return False
|
| 521 |
+
|
| 522 |
+
|
| 523 |
+
class BVar:
|
| 524 |
+
"""
|
| 525 |
+
Boolean variable
|
| 526 |
+
"""
|
| 527 |
+
def __init__(self, c):
|
| 528 |
+
"""
|
| 529 |
+
:param c: character or number
|
| 530 |
+
"""
|
| 531 |
+
self.c = c
|
| 532 |
+
|
| 533 |
+
def __repr__(self):
|
| 534 |
+
return f'BV({self.c})'
|
| 535 |
+
|
| 536 |
+
def __eq__(self, other):
|
| 537 |
+
if isinstance(other, BVar):
|
| 538 |
+
return self.c == other.c
|
| 539 |
+
else:
|
| 540 |
+
return False
|
| 541 |
+
|
| 542 |
+
|
| 543 |
+
def is_algebraic_expression(constraint):
|
| 544 |
+
if isinstance(constraint, BinConstraintD):
|
| 545 |
+
return constraint.op in [op_add, op_sub, op_div, op_mul, op_mod]
|
| 546 |
+
else:
|
| 547 |
+
return isinstance(constraint, Prod)
|
| 548 |
+
|
| 549 |
+
|
| 550 |
+
def is_bool_expr(constraint):
|
| 551 |
+
if isinstance(constraint, BinConstraintD):
|
| 552 |
+
return constraint.op in [op_gt, op_lt, op_neq, op_eq]
|
| 553 |
+
else:
|
| 554 |
+
return isinstance(constraint, (BVar, Conj, Disj))
|
| 555 |
+
|
| 556 |
+
def is_dim(d):
|
| 557 |
+
return isinstance(d, (DVar, int)) or d == Dyn
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint_generator.py
ADDED
|
@@ -0,0 +1,1281 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import operator
|
| 3 |
+
import warnings
|
| 4 |
+
from typing import Callable, Dict, Iterable
|
| 5 |
+
|
| 6 |
+
from torch.fx._symbolic_trace import _assert_is_none
|
| 7 |
+
from torch.fx.experimental.migrate_gradual_types.constraint import ApplyBroadcasting, CalcProduct, \
|
| 8 |
+
Disj, TGreatestUpperBound, CalcMaxPool, CalcConv, Conj, BinConstraintT, CanReshape, BinConstraintD, GetItem, T, F, \
|
| 9 |
+
TVar, DVar, GetItemTensor, IndexSelect, Transpose, DGreatestUpperBound
|
| 10 |
+
from torch.fx.experimental.migrate_gradual_types.operation import \
|
| 11 |
+
op_eq, op_matching, op_consistency, op_leq, op_precision, op_gt, op_div, op_sub, op_neq, op_lt, op_add, op_mul
|
| 12 |
+
from torch.fx.node import Target, Node
|
| 13 |
+
from torch.fx.experimental.migrate_gradual_types.util import gen_tensor_dims, gen_nat_constraints, gen_dvar, gen_tvar, \
|
| 14 |
+
gen_bvar
|
| 15 |
+
|
| 16 |
+
from torch.fx.tensor_type import Dyn, TensorType
|
| 17 |
+
from torch.nn.modules.conv import Conv2d
|
| 18 |
+
from torch.nn.modules.batchnorm import BatchNorm2d
|
| 19 |
+
|
| 20 |
+
_INFERENCE_RULES: Dict[Target, Callable] = {}
|
| 21 |
+
|
| 22 |
+
MAX_TENSOR_RANK = 4
|
| 23 |
+
|
| 24 |
+
def register_inference_rule(call_target):
|
| 25 |
+
def register(fn):
|
| 26 |
+
if call_target in _INFERENCE_RULES:
|
| 27 |
+
raise RuntimeError(f'Inference rule already registered for {call_target}!')
|
| 28 |
+
_INFERENCE_RULES[call_target] = fn
|
| 29 |
+
return fn
|
| 30 |
+
return register
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def generate_flatten_constraints(start_dim, end_dim, input, flattened, n, counter):
|
| 34 |
+
d, counter = gen_tensor_dims(n, counter)
|
| 35 |
+
c1 = BinConstraintT(input, TensorType(d), op_eq)
|
| 36 |
+
start_dim = n if start_dim == -1 else abs(start_dim)
|
| 37 |
+
end_dim = n + end_dim + 1 if end_dim < 0 else end_dim + 1
|
| 38 |
+
c2 = CalcProduct(start_dim, end_dim, flattened, d)
|
| 39 |
+
nat_constraints = gen_nat_constraints(d)
|
| 40 |
+
return Conj([c1, c2, *nat_constraints]), counter
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
@register_inference_rule(getattr)
|
| 44 |
+
def get_attr_inference_rule(n: Node, symbols, constraints, counter):
|
| 45 |
+
"""
|
| 46 |
+
If the attribute is "device" then the tensor shape is preserved
|
| 47 |
+
"""
|
| 48 |
+
assert isinstance(n.args[0], Node)
|
| 49 |
+
assert isinstance(n.args[1], str)
|
| 50 |
+
output, counter = gen_tvar(counter)
|
| 51 |
+
symbols[n] = output
|
| 52 |
+
|
| 53 |
+
input = symbols[n.args[0]]
|
| 54 |
+
attr = n.args[1]
|
| 55 |
+
|
| 56 |
+
if attr == 'device':
|
| 57 |
+
return [BinConstraintT(input, output, op_eq)], counter
|
| 58 |
+
else:
|
| 59 |
+
raise NotImplementedError('Not yet implemented')
|
| 60 |
+
|
| 61 |
+
@register_inference_rule(torch.bmm)
|
| 62 |
+
def bmm_inference_rule(n: Node, symbols, constraints, counter):
|
| 63 |
+
"""
|
| 64 |
+
Constraints that match the input to a size 3 tensor
|
| 65 |
+
and switch the dimensions according to the rules
|
| 66 |
+
of batch multiplication
|
| 67 |
+
"""
|
| 68 |
+
assert isinstance(n.args[0], Node)
|
| 69 |
+
assert isinstance(n.args[1], Node)
|
| 70 |
+
|
| 71 |
+
bmm_output, counter = gen_tvar(counter)
|
| 72 |
+
symbols[n] = bmm_output
|
| 73 |
+
|
| 74 |
+
bmm_input1 = symbols[n.args[0]]
|
| 75 |
+
bmm_input2 = symbols[n.args[1]]
|
| 76 |
+
|
| 77 |
+
dims_input1, counter = gen_tensor_dims(3, counter)
|
| 78 |
+
dims_input2, counter = gen_tensor_dims(3, counter)
|
| 79 |
+
|
| 80 |
+
inputs_dyn = Conj([BinConstraintT(bmm_input1, Dyn, op_eq),
|
| 81 |
+
BinConstraintT(bmm_input2, Dyn, op_eq),
|
| 82 |
+
BinConstraintT(bmm_output, Dyn, op_eq)])
|
| 83 |
+
|
| 84 |
+
input1_dyn = Conj([BinConstraintT(bmm_input1, Dyn, op_eq),
|
| 85 |
+
BinConstraintT(bmm_input2, TensorType(dims_input2), op_eq),
|
| 86 |
+
BinConstraintT(bmm_output, TensorType([dims_input2[0], Dyn, dims_input2[2]]), op_eq)])
|
| 87 |
+
|
| 88 |
+
input2_dyn = Conj([BinConstraintT(bmm_input2, Dyn, op_eq),
|
| 89 |
+
BinConstraintT(bmm_input1, TensorType(dims_input1), op_eq),
|
| 90 |
+
BinConstraintT(bmm_output, TensorType([dims_input1[0], dims_input1[1], Dyn]), op_eq)])
|
| 91 |
+
|
| 92 |
+
consistency_constraints = [BinConstraintD(dims_input1[0], dims_input2[0], op_consistency)]
|
| 93 |
+
|
| 94 |
+
batch_size, counter = gen_dvar(counter)
|
| 95 |
+
|
| 96 |
+
inputs_are_tensors = Conj([BinConstraintT(bmm_input1, TensorType(dims_input1), op_eq),
|
| 97 |
+
BinConstraintT(bmm_input2, TensorType(dims_input2), op_eq),
|
| 98 |
+
BinConstraintT(bmm_output, TensorType([batch_size, dims_input1[1], dims_input2[2]]), op_eq),
|
| 99 |
+
*consistency_constraints, DGreatestUpperBound(batch_size, dims_input1[0], dims_input2[0])])
|
| 100 |
+
|
| 101 |
+
return [Disj([inputs_dyn, input1_dyn, input2_dyn, inputs_are_tensors])], counter
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
@register_inference_rule("index_select")
|
| 105 |
+
def index_select_inference_rule(n: Node, symbols, constraints, counter):
|
| 106 |
+
"""
|
| 107 |
+
We constrain the second argument to a vector or Dyn.
|
| 108 |
+
The output replaces the input with the shape of the vector
|
| 109 |
+
at the position given by the index (first argument)
|
| 110 |
+
"""
|
| 111 |
+
# print(n.args)
|
| 112 |
+
assert isinstance(n.args[0], Node)
|
| 113 |
+
assert isinstance(n.args[1], int)
|
| 114 |
+
assert isinstance(n.args[2], Node)
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
index_select, counter = gen_tvar(counter)
|
| 119 |
+
symbols[n] = index_select
|
| 120 |
+
|
| 121 |
+
dims, counter = gen_tensor_dims(1, counter)
|
| 122 |
+
|
| 123 |
+
# equality constraint
|
| 124 |
+
is_size_1 = BinConstraintT(symbols[n.args[2]], TensorType(dims), op_eq)
|
| 125 |
+
is_dyn = BinConstraintT(symbols[n.args[2]], Dyn, op_eq)
|
| 126 |
+
|
| 127 |
+
c2 = Conj([is_size_1, Disj([IndexSelect(i + 1, symbols[n.args[0]], dims[0], n.args[1], index_select)
|
| 128 |
+
for i in range(MAX_TENSOR_RANK)])])
|
| 129 |
+
c3 = Conj([is_dyn, Disj([IndexSelect(i + 1, symbols[n.args[0]], Dyn, n.args[1], index_select)
|
| 130 |
+
for i in range(MAX_TENSOR_RANK)])])
|
| 131 |
+
|
| 132 |
+
return [Disj([c2, c3])], counter
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
@register_inference_rule("expand")
|
| 136 |
+
def expand_inference_rule(n: Node, symbols, constraints, counter):
|
| 137 |
+
"""
|
| 138 |
+
We generate the exact constraints as we do for tensor additions but we constraint
|
| 139 |
+
the rank of this expression to be equal to len(n.args[1:]) so that only
|
| 140 |
+
those cases get considered for the output
|
| 141 |
+
"""
|
| 142 |
+
assert isinstance(n.args[0], Node)
|
| 143 |
+
|
| 144 |
+
# define the output for expand
|
| 145 |
+
expand, counter = gen_tvar(counter)
|
| 146 |
+
symbols[n] = expand
|
| 147 |
+
|
| 148 |
+
# since we do not have two nodes here, we will construct an argument variable
|
| 149 |
+
e1 = symbols[n.args[0]]
|
| 150 |
+
e2, counter = gen_tvar(counter)
|
| 151 |
+
|
| 152 |
+
e2_nat_constraints = []
|
| 153 |
+
for arg in n.args[1:]:
|
| 154 |
+
assert isinstance(arg, (Node, int))
|
| 155 |
+
if isinstance(arg, Node):
|
| 156 |
+
assert isinstance(symbols[arg], DVar)
|
| 157 |
+
e2_nat_constraints.append(BinConstraintD(0, symbols[arg], op_leq))
|
| 158 |
+
|
| 159 |
+
e2_constraint = BinConstraintT(e2, TensorType([arg if isinstance(arg, int) else symbols[arg] for arg in n.args[1:]]), op_eq)
|
| 160 |
+
|
| 161 |
+
constraints, counter = gen_broadcasting_constraints(e1, e2, symbols, counter, expand)
|
| 162 |
+
|
| 163 |
+
# constraint the output size
|
| 164 |
+
dims, counter = gen_tensor_dims(len(n.args[1:]), counter)
|
| 165 |
+
nat_constraints = gen_nat_constraints(dims)
|
| 166 |
+
c = [BinConstraintT(expand, TensorType(dims), op_eq), *nat_constraints, e2_constraint, *e2_nat_constraints]
|
| 167 |
+
constraints += c
|
| 168 |
+
|
| 169 |
+
return constraints, counter
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
@register_inference_rule(torch.nn.functional.gelu)
|
| 173 |
+
@register_inference_rule(torch.nn.functional.dropout)
|
| 174 |
+
@register_inference_rule(torch.nn.functional.softmax)
|
| 175 |
+
@register_inference_rule("detach")
|
| 176 |
+
@register_inference_rule("to")
|
| 177 |
+
@register_inference_rule("int")
|
| 178 |
+
@register_inference_rule("long")
|
| 179 |
+
@register_inference_rule("contiguous")
|
| 180 |
+
@register_inference_rule(torch.ones)
|
| 181 |
+
@register_inference_rule(torch.zeros)
|
| 182 |
+
def equality_inference_rule(n: Node, symbols, constraints, counter):
|
| 183 |
+
"""
|
| 184 |
+
We generate the constraint: input = output
|
| 185 |
+
"""
|
| 186 |
+
output, counter = gen_tvar(counter)
|
| 187 |
+
symbols[n] = output
|
| 188 |
+
|
| 189 |
+
if isinstance(n.args[0], Node):
|
| 190 |
+
input = symbols[n.args[0]]
|
| 191 |
+
if isinstance(input, TVar):
|
| 192 |
+
return [BinConstraintT(input, output, op_eq)], counter
|
| 193 |
+
|
| 194 |
+
# then we have dimension variables
|
| 195 |
+
else:
|
| 196 |
+
for arg in n.args:
|
| 197 |
+
assert isinstance(symbols[arg], DVar)
|
| 198 |
+
my_size = [symbols[arg] for arg in n.args]
|
| 199 |
+
return [BinConstraintT(output, TensorType(my_size), op_eq)], counter
|
| 200 |
+
|
| 201 |
+
elif isinstance(n.args[0], tuple):
|
| 202 |
+
# then the tuple is the size
|
| 203 |
+
assert len(n.args[0]) <= 4
|
| 204 |
+
my_size = [symbols[arg] for arg in n.args[0]]
|
| 205 |
+
return [BinConstraintT(output, TensorType(my_size), op_eq)], counter
|
| 206 |
+
else:
|
| 207 |
+
raise NotImplementedError('Method not yet implemented')
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
@register_inference_rule("transpose")
|
| 211 |
+
def transpose_inference_rule(n: Node, symbols, constraints, counter):
|
| 212 |
+
"""
|
| 213 |
+
Can be considered as a sequence of two index selects, so we generate constraints accordingly
|
| 214 |
+
"""
|
| 215 |
+
assert isinstance(n.args[0], Node)
|
| 216 |
+
assert isinstance(n.args[1], int)
|
| 217 |
+
assert isinstance(n.args[2], int)
|
| 218 |
+
|
| 219 |
+
output, counter = gen_tvar(counter)
|
| 220 |
+
symbols[n] = output
|
| 221 |
+
|
| 222 |
+
from_arg = symbols[n.args[0]]
|
| 223 |
+
assert isinstance(from_arg, TVar)
|
| 224 |
+
|
| 225 |
+
# input and output are dyn
|
| 226 |
+
is_dyn = Conj([BinConstraintT(from_arg, Dyn, op_eq), BinConstraintT(output, Dyn, op_eq)])
|
| 227 |
+
|
| 228 |
+
# or input is a tensor and we actually do the replacement
|
| 229 |
+
c3 = Disj([Transpose(i + 1, from_arg, n.args[1], n.args[2], output) for i in range(MAX_TENSOR_RANK)])
|
| 230 |
+
|
| 231 |
+
return [Disj([is_dyn, c3])], counter
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
@register_inference_rule("type_as")
|
| 235 |
+
def type_inference_rule(n: Node, symbols, constraints, counter):
|
| 236 |
+
"""
|
| 237 |
+
We generate the constraint: input = output
|
| 238 |
+
"""
|
| 239 |
+
assert isinstance(n.args[0], Node)
|
| 240 |
+
assert isinstance(n.args[1], Node)
|
| 241 |
+
|
| 242 |
+
output, counter = gen_tvar(counter)
|
| 243 |
+
symbols[n] = output
|
| 244 |
+
|
| 245 |
+
from_arg = symbols[n.args[0]]
|
| 246 |
+
to_arg = symbols[n.args[1]]
|
| 247 |
+
|
| 248 |
+
assert isinstance(from_arg, TVar)
|
| 249 |
+
assert isinstance(to_arg, TVar)
|
| 250 |
+
|
| 251 |
+
return [BinConstraintT(from_arg, to_arg, op_consistency),
|
| 252 |
+
BinConstraintT(output, to_arg, op_eq)], counter
|
| 253 |
+
|
| 254 |
+
@register_inference_rule("masked_fill_")
|
| 255 |
+
def masked_fill_inference_rule(n: Node, symbols, constraints, counter):
|
| 256 |
+
"""
|
| 257 |
+
Similar to addition. For now we implement the constraints when
|
| 258 |
+
the argument is a boolean tensor. There is also a case for when
|
| 259 |
+
it is a condition. We will leave this out for now.
|
| 260 |
+
"""
|
| 261 |
+
|
| 262 |
+
assert isinstance(n.args[0], Node)
|
| 263 |
+
assert isinstance(n.args[1], Node)
|
| 264 |
+
|
| 265 |
+
# We will retrieve the type variables from the symbol table
|
| 266 |
+
# and confirm they are tensor variables
|
| 267 |
+
|
| 268 |
+
e1 = symbols[n.args[0]]
|
| 269 |
+
e2 = symbols[n.args[1]]
|
| 270 |
+
|
| 271 |
+
if isinstance(e1, TVar) and isinstance(e2, TVar):
|
| 272 |
+
masked_fill_tensor, counter = gen_tvar(counter)
|
| 273 |
+
symbols[n] = masked_fill_tensor
|
| 274 |
+
return gen_broadcasting_constraints(e1, e2, symbols, counter, masked_fill_tensor)
|
| 275 |
+
else:
|
| 276 |
+
raise NotImplementedError('Not yet implemented')
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
@register_inference_rule(torch.nn.functional.embedding)
|
| 280 |
+
def embedding_inference_rule_functional(n: Node, symbols, constraints, counter):
|
| 281 |
+
assert isinstance(n.args[0], Node)
|
| 282 |
+
|
| 283 |
+
embedding_dim_weights = symbols[n.args[1]]
|
| 284 |
+
|
| 285 |
+
# will treat this as a static shape. So we will not use matching.
|
| 286 |
+
weight_dims, counter = gen_tensor_dims(2, counter)
|
| 287 |
+
equality_constraint = BinConstraintT(embedding_dim_weights, TensorType(weight_dims), op_eq)
|
| 288 |
+
embedding_dim = weight_dims[1]
|
| 289 |
+
constraints, counter = gen_embedding_rules(n, symbols, embedding_dim, counter)
|
| 290 |
+
return [equality_constraint] + constraints, counter
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
@register_inference_rule(torch.nn.modules.sparse.Embedding)
|
| 294 |
+
def embedding_inference_rule(n: Node, module_instance, symbols, constraints, counter):
|
| 295 |
+
"""
|
| 296 |
+
The output shape differs from the input shape in the last dimension
|
| 297 |
+
"""
|
| 298 |
+
assert isinstance(n.args[0], Node)
|
| 299 |
+
return gen_embedding_rules(n, symbols, module_instance.embedding_dim, counter)
|
| 300 |
+
|
| 301 |
+
|
| 302 |
+
def gen_embedding_rules(n: Node, symbols, embedding_dim, counter):
|
| 303 |
+
|
| 304 |
+
embedding_output, counter = gen_tvar(counter)
|
| 305 |
+
symbols[n] = embedding_output
|
| 306 |
+
embedding_input = symbols[n.args[0]]
|
| 307 |
+
|
| 308 |
+
input_dyn = BinConstraintT(embedding_input, Dyn, op_eq)
|
| 309 |
+
output_dyn = BinConstraintT(embedding_output, Dyn, op_eq)
|
| 310 |
+
|
| 311 |
+
c1 = Conj([input_dyn, output_dyn])
|
| 312 |
+
c2 = []
|
| 313 |
+
|
| 314 |
+
for i in range(1, MAX_TENSOR_RANK):
|
| 315 |
+
new_dims, counter = gen_tensor_dims(i, counter)
|
| 316 |
+
nat_constraints = gen_nat_constraints(new_dims)
|
| 317 |
+
|
| 318 |
+
# we consider all tensor sizes and append embedding_dim to the end of the output dimension in all cases
|
| 319 |
+
c_tensor_i = Conj([BinConstraintT(embedding_input, TensorType(new_dims), op_eq),
|
| 320 |
+
BinConstraintT(embedding_output, TensorType(new_dims + [embedding_dim]), op_eq)] +
|
| 321 |
+
nat_constraints)
|
| 322 |
+
c2.append(c_tensor_i)
|
| 323 |
+
|
| 324 |
+
return [Disj([c1, Disj(c2)])], counter
|
| 325 |
+
|
| 326 |
+
|
| 327 |
+
@register_inference_rule(torch.tensor)
|
| 328 |
+
def tensor_inference_rule(n: Node, symbols, constraints, counter):
|
| 329 |
+
"""
|
| 330 |
+
If the tensor is a scalar, we will skip it since we
|
| 331 |
+
do not support scalars yet. We will add support in the future
|
| 332 |
+
if it's needed. For our examples so far, scalars are not needed.
|
| 333 |
+
"""
|
| 334 |
+
return [], counter
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
@register_inference_rule("reshape")
|
| 338 |
+
@register_inference_rule("view")
|
| 339 |
+
def view_inference_rule(n: Node, symbols, constraints, counter):
|
| 340 |
+
"""
|
| 341 |
+
Similar to reshape but with an extra condition on the strides
|
| 342 |
+
"""
|
| 343 |
+
assert isinstance(n.args[0], Node)
|
| 344 |
+
|
| 345 |
+
# generate the new variable
|
| 346 |
+
my_view, counter = gen_tvar(counter)
|
| 347 |
+
symbols[n] = my_view
|
| 348 |
+
|
| 349 |
+
|
| 350 |
+
src_var = symbols[n.args[0]]
|
| 351 |
+
t2 = [symbols[elem] if isinstance(elem, Node) else elem for elem in n.args[1:]] # target shape
|
| 352 |
+
t2_type = []
|
| 353 |
+
num_constraints = []
|
| 354 |
+
|
| 355 |
+
for t in t2:
|
| 356 |
+
if t == -1:
|
| 357 |
+
var, counter = gen_dvar(counter)
|
| 358 |
+
t2_type.append(var)
|
| 359 |
+
num_constraints.append(BinConstraintD(var, Dyn, op_neq))
|
| 360 |
+
|
| 361 |
+
else:
|
| 362 |
+
num_constraints.append(BinConstraintD(t, Dyn, op_neq))
|
| 363 |
+
t2_type.append(t)
|
| 364 |
+
|
| 365 |
+
t2_type = TensorType(t2_type) # type: ignore[assignment]
|
| 366 |
+
|
| 367 |
+
c1 = BinConstraintT(my_view, t2_type, op_eq)
|
| 368 |
+
c2 = CanReshape(src_var, t2_type)
|
| 369 |
+
|
| 370 |
+
# TODO: add the extra check mentioned here:
|
| 371 |
+
# https://pytorch.org/docs/stable/generated/torch.Tensor.view.html#torch.Tensor.view
|
| 372 |
+
|
| 373 |
+
return [c1, c2] + num_constraints, counter # type: ignore[operator]
|
| 374 |
+
|
| 375 |
+
|
| 376 |
+
@register_inference_rule("size")
|
| 377 |
+
def size_inference_rule(n: Node, symbols, constraints, counter):
|
| 378 |
+
"""
|
| 379 |
+
The constraint is just lhs = rhs.
|
| 380 |
+
Ex: size = input_ids.size()
|
| 381 |
+
"""
|
| 382 |
+
|
| 383 |
+
|
| 384 |
+
if len(n.args) == 1:
|
| 385 |
+
# generate the new variable
|
| 386 |
+
size, counter = gen_tvar(counter)
|
| 387 |
+
symbols[n] = size
|
| 388 |
+
input = symbols[n.args[0]]
|
| 389 |
+
c = BinConstraintT(input, size, op_eq)
|
| 390 |
+
return [c], counter
|
| 391 |
+
|
| 392 |
+
elif len(n.args) == 2:
|
| 393 |
+
# TODO: review this rule; should input = dyn; output = dyn be included here?
|
| 394 |
+
if isinstance(n.args[1], int):
|
| 395 |
+
# generate the new variable
|
| 396 |
+
size_index, counter = gen_dvar(counter)
|
| 397 |
+
symbols[n] = size_index
|
| 398 |
+
input = symbols[n.args[0]]
|
| 399 |
+
c2 = [GetItem(i + 1, n.args[1], size_index, input) for i in range(MAX_TENSOR_RANK)]
|
| 400 |
+
c3 = BinConstraintD(0, size_index, op_leq)
|
| 401 |
+
|
| 402 |
+
input_dyn = BinConstraintT(input, Dyn, op_eq)
|
| 403 |
+
output_dyn = BinConstraintD(size_index, Dyn, op_eq)
|
| 404 |
+
c1 = Conj([input_dyn, output_dyn])
|
| 405 |
+
|
| 406 |
+
return [Disj([c1, Conj([Disj(c2), c3])])], counter
|
| 407 |
+
|
| 408 |
+
else:
|
| 409 |
+
raise NotImplementedError
|
| 410 |
+
|
| 411 |
+
else:
|
| 412 |
+
raise NotImplementedError
|
| 413 |
+
|
| 414 |
+
|
| 415 |
+
def range_check(i, n):
|
| 416 |
+
"""
|
| 417 |
+
Checks if an index i is within range of a size n list
|
| 418 |
+
Args:
|
| 419 |
+
i: index
|
| 420 |
+
n: list size
|
| 421 |
+
|
| 422 |
+
Returns: Boolean
|
| 423 |
+
"""
|
| 424 |
+
if i >= 0:
|
| 425 |
+
return T() if i < n else F()
|
| 426 |
+
else:
|
| 427 |
+
return T() if i >= n else F()
|
| 428 |
+
|
| 429 |
+
|
| 430 |
+
@register_inference_rule(torch.cumsum)
|
| 431 |
+
def cumsum_inference_rule(n: Node, symbols, constraints, counter):
|
| 432 |
+
"""
|
| 433 |
+
Input and output shapes should be equal
|
| 434 |
+
We should verify that the index is valid
|
| 435 |
+
"""
|
| 436 |
+
assert isinstance(n.args[0], Node)
|
| 437 |
+
arg_1 = n.args[1] if len(n.args) > 1 else n.kwargs["dim"]
|
| 438 |
+
assert isinstance(arg_1, int)
|
| 439 |
+
|
| 440 |
+
output, counter = gen_tvar(counter)
|
| 441 |
+
symbols[n] = output
|
| 442 |
+
input = symbols[n.args[0]]
|
| 443 |
+
|
| 444 |
+
input_dyn = BinConstraintT(input, Dyn, op_eq)
|
| 445 |
+
output_dyn = BinConstraintT(output, Dyn, op_eq)
|
| 446 |
+
c1 = Conj([input_dyn, output_dyn])
|
| 447 |
+
c2 = []
|
| 448 |
+
for i in range(1, MAX_TENSOR_RANK + 1):
|
| 449 |
+
new_dims, counter = gen_tensor_dims(i, counter)
|
| 450 |
+
|
| 451 |
+
nat_constraints = gen_nat_constraints(new_dims)
|
| 452 |
+
|
| 453 |
+
c_tensor_i = Conj([BinConstraintT(input, TensorType(new_dims), op_eq),
|
| 454 |
+
BinConstraintT(output, TensorType(new_dims), op_eq)] +
|
| 455 |
+
[range_check(arg_1, i)] + nat_constraints)
|
| 456 |
+
|
| 457 |
+
c2.append(c_tensor_i)
|
| 458 |
+
dyn_or_tensor = Disj([c1, Disj(c2)])
|
| 459 |
+
return [dyn_or_tensor], counter
|
| 460 |
+
|
| 461 |
+
|
| 462 |
+
@register_inference_rule(_assert_is_none)
|
| 463 |
+
def assert_inference_rule(n: Node, symbols, constraints, counter):
|
| 464 |
+
assert len(n.users) == 0
|
| 465 |
+
return [], counter
|
| 466 |
+
|
| 467 |
+
|
| 468 |
+
@register_inference_rule(operator.getitem)
|
| 469 |
+
def getitem_inference_rule(n: Node, symbols, constraints, counter):
|
| 470 |
+
assert isinstance(n.args[0], Node)
|
| 471 |
+
|
| 472 |
+
# dimension output case
|
| 473 |
+
if isinstance(n.args[1], int):
|
| 474 |
+
# create and store the new dimension variable
|
| 475 |
+
get_item_output, counter = gen_dvar(counter)
|
| 476 |
+
symbols[n] = get_item_output
|
| 477 |
+
|
| 478 |
+
# retrieve arg variables
|
| 479 |
+
get_item_arg = symbols[n.args[0]]
|
| 480 |
+
assert isinstance(get_item_arg, TVar)
|
| 481 |
+
|
| 482 |
+
|
| 483 |
+
# if the input is dynamic, we accept any index and return
|
| 484 |
+
# a dynamic dimension as output
|
| 485 |
+
input_dyn = BinConstraintT(get_item_arg, Dyn, op_eq)
|
| 486 |
+
output_dyn = BinConstraintD(get_item_output, Dyn, op_eq)
|
| 487 |
+
c1 = Conj([input_dyn, output_dyn])
|
| 488 |
+
|
| 489 |
+
# if the input is a tensor,
|
| 490 |
+
# generate a getItem constraint which will be expanded based on the
|
| 491 |
+
# tensor dimension.
|
| 492 |
+
|
| 493 |
+
c2 = [GetItem(i + 1, n.args[1], get_item_output, get_item_arg) for i in range(MAX_TENSOR_RANK)]
|
| 494 |
+
|
| 495 |
+
|
| 496 |
+
# since the output is a dimension, we make sure it's a natural number
|
| 497 |
+
# added as a conjunction to the disjunction of c2
|
| 498 |
+
c3 = BinConstraintD(0, get_item_output, op_leq)
|
| 499 |
+
return [Disj([c1, Conj([Disj(c2), c3])])], counter
|
| 500 |
+
|
| 501 |
+
# tensor output case
|
| 502 |
+
elif isinstance(n.args[1], tuple):
|
| 503 |
+
# create and store the new tensor variable
|
| 504 |
+
get_item_output, counter = gen_tvar(counter)
|
| 505 |
+
symbols[n] = get_item_output
|
| 506 |
+
|
| 507 |
+
# retrieve arg variables
|
| 508 |
+
if n.args[0] in symbols:
|
| 509 |
+
get_item_arg = symbols[n.args[0]]
|
| 510 |
+
assert isinstance(get_item_arg, TVar)
|
| 511 |
+
|
| 512 |
+
input_dyn = BinConstraintT(get_item_arg, Dyn, op_eq)
|
| 513 |
+
output_dyn = BinConstraintT(get_item_output, Dyn, op_eq) # type: ignore[assignment]
|
| 514 |
+
c1 = Conj([input_dyn, output_dyn])
|
| 515 |
+
|
| 516 |
+
c2 = [GetItemTensor(i + 1, n.args[1], get_item_output, get_item_arg) # type: ignore[misc]
|
| 517 |
+
for i in range(MAX_TENSOR_RANK)]
|
| 518 |
+
else:
|
| 519 |
+
# TODO: we should figure out why there is a key-error here.
|
| 520 |
+
return [], counter
|
| 521 |
+
|
| 522 |
+
return [Disj([c1, *c2])], counter
|
| 523 |
+
|
| 524 |
+
else:
|
| 525 |
+
raise RuntimeError('Method not yet implemented')
|
| 526 |
+
|
| 527 |
+
|
| 528 |
+
@register_inference_rule(operator.gt)
|
| 529 |
+
def gt_inference_rule(n: Node, symbols, constraints, counter):
|
| 530 |
+
assert isinstance(n.args[0], (Node, int))
|
| 531 |
+
assert isinstance(n.args[1], (Node, int))
|
| 532 |
+
|
| 533 |
+
# We make sure this node will not be used again. We do not
|
| 534 |
+
# generate a constraint about that node. Only about the operands.
|
| 535 |
+
|
| 536 |
+
e1 = symbols[n.args[0]] if isinstance(n.args[0], Node) else n.args[0]
|
| 537 |
+
e2 = symbols[n.args[1]] if isinstance(n.args[1], Node) else n.args[1]
|
| 538 |
+
|
| 539 |
+
if isinstance(n.args[0], Node) and isinstance(n.args[1], Node):
|
| 540 |
+
if isinstance(e1, TVar) and isinstance(e2, TVar):
|
| 541 |
+
gt_tensor, counter = gen_tvar(counter)
|
| 542 |
+
symbols[n] = gt_tensor
|
| 543 |
+
return gen_broadcasting_constraints(e1, e2, symbols, counter, gt_tensor)
|
| 544 |
+
|
| 545 |
+
elif isinstance(e1, DVar) and isinstance(e2, DVar):
|
| 546 |
+
# This is meant to be used for flow analysis only
|
| 547 |
+
gt_constraint = BinConstraintD(e1, e2, op_gt)
|
| 548 |
+
|
| 549 |
+
my_gt, counter = gen_bvar(counter)
|
| 550 |
+
equality_constraint = BinConstraintD(my_gt, gt_constraint, op_eq)
|
| 551 |
+
return [equality_constraint], counter
|
| 552 |
+
|
| 553 |
+
else:
|
| 554 |
+
raise RuntimeError('Sort Mismatch')
|
| 555 |
+
|
| 556 |
+
elif isinstance(n.args[0], Node) and not isinstance(n.args[1], Node):
|
| 557 |
+
if isinstance(e1, DVar):
|
| 558 |
+
# This is meant to be used for flow analysis only
|
| 559 |
+
gt_constraint = BinConstraintD(e1, e2, op_gt)
|
| 560 |
+
|
| 561 |
+
my_gt, counter = gen_bvar(counter)
|
| 562 |
+
equality_constraint = BinConstraintD(my_gt, gt_constraint, op_eq)
|
| 563 |
+
return [equality_constraint], counter
|
| 564 |
+
|
| 565 |
+
elif isinstance(e1, TVar) and isinstance(e2, int):
|
| 566 |
+
# then we made the wrong assumption about the argument being a tensor
|
| 567 |
+
# so we should fix the assumption
|
| 568 |
+
warnings.warn(f'Made the wrong assumption for node {n}. Correctness not guaranteed.')
|
| 569 |
+
|
| 570 |
+
new_e1, counter = gen_dvar(counter)
|
| 571 |
+
symbols[n.args[0]] = new_e1
|
| 572 |
+
symbols[n.args[0]]
|
| 573 |
+
|
| 574 |
+
gt_constraint = BinConstraintD(new_e1, e2, op_gt)
|
| 575 |
+
|
| 576 |
+
my_gt, counter = gen_bvar(counter)
|
| 577 |
+
equality_constraint = BinConstraintD(my_gt, gt_constraint, op_eq)
|
| 578 |
+
return [equality_constraint], counter
|
| 579 |
+
|
| 580 |
+
else:
|
| 581 |
+
raise NotImplementedError('Method not yet implemented')
|
| 582 |
+
|
| 583 |
+
else:
|
| 584 |
+
raise NotImplementedError('Method not yet implemented')
|
| 585 |
+
|
| 586 |
+
|
| 587 |
+
@register_inference_rule(operator.eq)
|
| 588 |
+
def eq_inference_rule(n: Node, symbols, constraints, counter):
|
| 589 |
+
assert isinstance(n.args[0], (Node, int))
|
| 590 |
+
assert isinstance(n.args[1], (Node, int))
|
| 591 |
+
|
| 592 |
+
e1 = symbols[n.args[0]] if isinstance(n.args[0], Node) else n.args[0]
|
| 593 |
+
e2 = symbols[n.args[1]] if isinstance(n.args[1], Node) else n.args[1]
|
| 594 |
+
|
| 595 |
+
if isinstance(n.args[0], Node) and isinstance(n.args[1], Node):
|
| 596 |
+
if isinstance(e1, TVar) and isinstance(e2, TVar):
|
| 597 |
+
eq_tensor, counter = gen_tvar(counter)
|
| 598 |
+
symbols[n] = eq_tensor
|
| 599 |
+
return gen_broadcasting_constraints(e1, e2, symbols, counter, eq_tensor)
|
| 600 |
+
|
| 601 |
+
elif isinstance(e1, DVar) and isinstance(e2, DVar):
|
| 602 |
+
# This is meant to be used for flow analysis only
|
| 603 |
+
eq_constraint = BinConstraintD(e1, e2, op_eq)
|
| 604 |
+
|
| 605 |
+
my_eq, counter = gen_bvar(counter)
|
| 606 |
+
equality_constraint = BinConstraintD(my_eq, eq_constraint, op_eq)
|
| 607 |
+
return [equality_constraint], counter
|
| 608 |
+
|
| 609 |
+
else:
|
| 610 |
+
raise RuntimeError('Sort Mismatch')
|
| 611 |
+
|
| 612 |
+
elif isinstance(n.args[0], Node) and not isinstance(n.args[1], Node):
|
| 613 |
+
if isinstance(e1, DVar):
|
| 614 |
+
# This is meant to be used for flow analysis only
|
| 615 |
+
eq_constraint = BinConstraintD(e1, e2, op_eq)
|
| 616 |
+
|
| 617 |
+
my_eq, counter = gen_bvar(counter)
|
| 618 |
+
equality_constraint = BinConstraintD(my_eq, eq_constraint, op_eq)
|
| 619 |
+
return [equality_constraint], counter
|
| 620 |
+
else:
|
| 621 |
+
raise NotImplementedError('Method not yet implemented')
|
| 622 |
+
else:
|
| 623 |
+
raise NotImplementedError('Method not yet implemented')
|
| 624 |
+
|
| 625 |
+
@register_inference_rule(operator.ne)
|
| 626 |
+
def neq_inference_rule(n: Node, symbols, constraints, counter):
|
| 627 |
+
"""
|
| 628 |
+
Translates to inconsistent in gradual types.
|
| 629 |
+
To prove inequality, we should prove that
|
| 630 |
+
tensors are either different sizes or
|
| 631 |
+
disagree on at least one dimension
|
| 632 |
+
|
| 633 |
+
This is a WIP (works when the condition
|
| 634 |
+
is false. We are working on making this operation work
|
| 635 |
+
when the condition is true as well)
|
| 636 |
+
"""
|
| 637 |
+
assert isinstance(n.args[0], Node)
|
| 638 |
+
assert isinstance(n.args[1], tuple)
|
| 639 |
+
|
| 640 |
+
# implementing for size 3 and 4
|
| 641 |
+
if len(n.args[1]) == 3:
|
| 642 |
+
|
| 643 |
+
assert isinstance(n.args[1][0], (Node, int))
|
| 644 |
+
assert isinstance(n.args[1][1], (Node, int))
|
| 645 |
+
assert isinstance(n.args[1][2], (Node, int))
|
| 646 |
+
|
| 647 |
+
lhs = symbols[n.args[0]]
|
| 648 |
+
|
| 649 |
+
b, counter = gen_tensor_dims(4, counter)
|
| 650 |
+
input_is_size3 = BinConstraintT(lhs, TensorType([b[0], b[1], b[2]]), op_eq)
|
| 651 |
+
|
| 652 |
+
d1 = n.args[1][0] if isinstance(n.args[1][0], int) else symbols[n.args[1][0]]
|
| 653 |
+
d2 = n.args[1][1] if isinstance(n.args[1][1], int) else symbols[n.args[1][1]]
|
| 654 |
+
d3 = n.args[1][2] if isinstance(n.args[1][2], int) else symbols[n.args[1][2]]
|
| 655 |
+
|
| 656 |
+
# dimensions not equal
|
| 657 |
+
my_ne, counter = gen_bvar(counter)
|
| 658 |
+
neq_1 = BinConstraintD(d1, b[0], op_neq)
|
| 659 |
+
neq_2 = BinConstraintD(d2, b[1], op_neq)
|
| 660 |
+
neq_3 = BinConstraintD(d3, b[2], op_neq)
|
| 661 |
+
|
| 662 |
+
# dimensions inconsistent
|
| 663 |
+
dims_inconsistent1 = Conj([BinConstraintD(d1, Dyn, op_neq), BinConstraintD(b[0], Dyn, op_neq), neq_1])
|
| 664 |
+
dims_inconsistent2 = Conj([BinConstraintD(d2, Dyn, op_neq), BinConstraintD(b[1], Dyn, op_neq), neq_2])
|
| 665 |
+
dims_inconsistent3 = Conj([BinConstraintD(d3, Dyn, op_neq), BinConstraintD(b[2], Dyn, op_neq), neq_3])
|
| 666 |
+
|
| 667 |
+
dims_inconsistent = Disj([dims_inconsistent1, dims_inconsistent2, dims_inconsistent3])
|
| 668 |
+
|
| 669 |
+
# we are covering size 3 and 4 only for now
|
| 670 |
+
ne_constraint = Conj([input_is_size3, dims_inconsistent])
|
| 671 |
+
|
| 672 |
+
my_ne, counter = gen_bvar(counter)
|
| 673 |
+
equality_constraint = BinConstraintD(my_ne, ne_constraint, op_eq)
|
| 674 |
+
|
| 675 |
+
elif len(n.args[1]) == 4:
|
| 676 |
+
|
| 677 |
+
assert isinstance(n.args[1][0], (Node, int))
|
| 678 |
+
assert isinstance(n.args[1][1], (Node, int))
|
| 679 |
+
assert isinstance(n.args[1][2], (Node, int))
|
| 680 |
+
assert isinstance(n.args[1][3], (Node, int))
|
| 681 |
+
|
| 682 |
+
lhs = symbols[n.args[0]]
|
| 683 |
+
|
| 684 |
+
b1, counter = gen_dvar(counter)
|
| 685 |
+
b2, counter = gen_dvar(counter)
|
| 686 |
+
b3, counter = gen_dvar(counter)
|
| 687 |
+
b4, counter = gen_dvar(counter)
|
| 688 |
+
|
| 689 |
+
input_is_size4 = BinConstraintT(lhs, TensorType([b1, b2, b3, b4]), op_eq)
|
| 690 |
+
|
| 691 |
+
d1 = n.args[1][0] if isinstance(n.args[1][0], int) else symbols[n.args[1][0]]
|
| 692 |
+
d2 = n.args[1][1] if isinstance(n.args[1][1], int) else symbols[n.args[1][1]]
|
| 693 |
+
d3 = n.args[1][2] if isinstance(n.args[1][2], int) else symbols[n.args[1][2]]
|
| 694 |
+
d4 = n.args[1][3] if isinstance(n.args[1][3], int) else symbols[n.args[1][3]]
|
| 695 |
+
|
| 696 |
+
# dimensions not equal
|
| 697 |
+
my_ne, counter = gen_bvar(counter)
|
| 698 |
+
neq_1 = BinConstraintD(d1, b1, op_neq)
|
| 699 |
+
neq_2 = BinConstraintD(d2, b2, op_neq)
|
| 700 |
+
neq_3 = BinConstraintD(d3, b3, op_neq)
|
| 701 |
+
neq_4 = BinConstraintD(d4, b4, op_neq)
|
| 702 |
+
|
| 703 |
+
# dimensions to inconsistent
|
| 704 |
+
dims_inconsistent1 = Conj([BinConstraintD(d1, Dyn, op_neq), BinConstraintD(b1, Dyn, op_neq), neq_1])
|
| 705 |
+
dims_inconsistent2 = Conj([BinConstraintD(d2, Dyn, op_neq), BinConstraintD(b2, Dyn, op_neq), neq_2])
|
| 706 |
+
dims_inconsistent3 = Conj([BinConstraintD(d3, Dyn, op_neq), BinConstraintD(b3, Dyn, op_neq), neq_3])
|
| 707 |
+
dims_inconsistent4 = Conj([BinConstraintD(d4, Dyn, op_neq), BinConstraintD(b3, Dyn, op_neq), neq_4])
|
| 708 |
+
|
| 709 |
+
dims_inconsistent = Disj([dims_inconsistent1, dims_inconsistent2, dims_inconsistent3, dims_inconsistent4])
|
| 710 |
+
|
| 711 |
+
ne_constraint = Conj([input_is_size4, dims_inconsistent])
|
| 712 |
+
|
| 713 |
+
my_ne, counter = gen_bvar(counter)
|
| 714 |
+
|
| 715 |
+
equality_constraint = BinConstraintD(my_ne, ne_constraint, op_eq)
|
| 716 |
+
|
| 717 |
+
else:
|
| 718 |
+
raise NotImplementedError('Method not yet implemented')
|
| 719 |
+
|
| 720 |
+
return [equality_constraint], counter
|
| 721 |
+
|
| 722 |
+
|
| 723 |
+
@register_inference_rule(operator.lt)
|
| 724 |
+
def lt_inference_rule(n: Node, symbols, constraints, counter):
|
| 725 |
+
assert isinstance(n.args[0], (Node, int))
|
| 726 |
+
assert isinstance(n.args[1], (Node, int))
|
| 727 |
+
|
| 728 |
+
# We make sure this node will not be used again. We do not
|
| 729 |
+
# generate a constraint about that node. Only about the operands.
|
| 730 |
+
|
| 731 |
+
e1 = symbols[n.args[0]] if isinstance(n.args[0], Node) else n.args[0]
|
| 732 |
+
e2 = symbols[n.args[1]] if isinstance(n.args[1], Node) else n.args[1]
|
| 733 |
+
|
| 734 |
+
if isinstance(n.args[0], Node) and isinstance(n.args[1], Node):
|
| 735 |
+
if isinstance(e1, TVar) and isinstance(e2, TVar):
|
| 736 |
+
lt_tensor, counter = gen_tvar(counter)
|
| 737 |
+
symbols[n] = lt_tensor
|
| 738 |
+
return gen_broadcasting_constraints(e1, e2, symbols, counter, lt_tensor)
|
| 739 |
+
|
| 740 |
+
elif isinstance(e1, DVar) and isinstance(e2, DVar):
|
| 741 |
+
# This is meant to be used for flow analysis only
|
| 742 |
+
lt_constraint = BinConstraintD(e1, e2, op_lt)
|
| 743 |
+
|
| 744 |
+
my_lt, counter = gen_bvar(counter)
|
| 745 |
+
equality_constraint = BinConstraintD(my_lt, lt_constraint, op_eq)
|
| 746 |
+
return [equality_constraint], counter
|
| 747 |
+
|
| 748 |
+
else:
|
| 749 |
+
raise RuntimeError('Sort Mismatch')
|
| 750 |
+
|
| 751 |
+
elif isinstance(n.args[0], Node) and not isinstance(n.args[1], Node):
|
| 752 |
+
if isinstance(e1, DVar):
|
| 753 |
+
# This is meant to be used for flow analysis only
|
| 754 |
+
lt_constraint = BinConstraintD(e1, e2, op_lt)
|
| 755 |
+
|
| 756 |
+
my_lt, counter = gen_bvar(counter)
|
| 757 |
+
equality_constraint = BinConstraintD(my_lt, lt_constraint, op_eq)
|
| 758 |
+
return [equality_constraint], counter
|
| 759 |
+
else:
|
| 760 |
+
raise NotImplementedError('Method not yet implemented')
|
| 761 |
+
|
| 762 |
+
else:
|
| 763 |
+
raise NotImplementedError('Method not yet implemented')
|
| 764 |
+
|
| 765 |
+
|
| 766 |
+
@register_inference_rule(torch.full)
|
| 767 |
+
def full_inference_rule(n: Node, symbols, constraints, counter):
|
| 768 |
+
full, counter = gen_tvar(counter)
|
| 769 |
+
symbols[n] = full
|
| 770 |
+
res = []
|
| 771 |
+
|
| 772 |
+
assert isinstance(n.args[0], Iterable)
|
| 773 |
+
for arg in n.args[0]:
|
| 774 |
+
dim = arg if isinstance(arg, int) else symbols[arg]
|
| 775 |
+
res.append(dim)
|
| 776 |
+
c = BinConstraintT(full, TensorType(list(res)), op_eq) # type: ignore[arg-type]
|
| 777 |
+
return [c], counter
|
| 778 |
+
|
| 779 |
+
|
| 780 |
+
# TODO normalize index
|
| 781 |
+
@register_inference_rule(torch.arange)
|
| 782 |
+
def arange_inference_rule(n: Node, symbols, constraints, counter):
|
| 783 |
+
start = 0
|
| 784 |
+
step = 1
|
| 785 |
+
|
| 786 |
+
if len(n.args) == 1:
|
| 787 |
+
end = symbols[n.args[0]]
|
| 788 |
+
else:
|
| 789 |
+
raise NotImplementedError('Not yet implemented')
|
| 790 |
+
|
| 791 |
+
# int((end - start) / step)
|
| 792 |
+
d1, counter = gen_dvar(counter)
|
| 793 |
+
size_constraint = BinConstraintD(d1, BinConstraintD(BinConstraintD(end, start, op_sub), step, op_div), op_eq)
|
| 794 |
+
arange, counter = gen_tvar(counter)
|
| 795 |
+
symbols[n] = arange
|
| 796 |
+
|
| 797 |
+
# either the a parameter is a number or it is Dyn
|
| 798 |
+
c1 = Disj([BinConstraintD(end, Dyn, op_eq),
|
| 799 |
+
BinConstraintD(start, Dyn, op_eq),
|
| 800 |
+
BinConstraintD(step, Dyn, op_eq)])
|
| 801 |
+
c2 = BinConstraintD(d1, Dyn, op_eq)
|
| 802 |
+
both_dyn = Conj([c1, c2])
|
| 803 |
+
|
| 804 |
+
c11 = Conj([BinConstraintD(end, Dyn, op_neq),
|
| 805 |
+
BinConstraintD(start, Dyn, op_neq),
|
| 806 |
+
BinConstraintD(step, Dyn, op_neq)])
|
| 807 |
+
c22 = BinConstraintD(d1, Dyn, op_neq)
|
| 808 |
+
both_numbers = Conj([c11, c22, size_constraint])
|
| 809 |
+
|
| 810 |
+
return [BinConstraintT(arange, TensorType([d1]), op_eq), Disj([both_dyn, both_numbers])], counter
|
| 811 |
+
|
| 812 |
+
def gen_broadcasting_constraints(e1, e2, symbols, counter, output_var):
|
| 813 |
+
# additional vars that don't correspond to expressions
|
| 814 |
+
e11, counter = gen_tvar(counter)
|
| 815 |
+
e22, counter = gen_tvar(counter)
|
| 816 |
+
|
| 817 |
+
# generate constraints
|
| 818 |
+
c1 = TGreatestUpperBound(output_var, e11, e22)
|
| 819 |
+
c2 = ApplyBroadcasting(e11, e22, e1, e2)
|
| 820 |
+
c3 = BinConstraintT(e11, e22, op_consistency)
|
| 821 |
+
return [c1, c2, c3], counter
|
| 822 |
+
|
| 823 |
+
|
| 824 |
+
@register_inference_rule(operator.mul)
|
| 825 |
+
@register_inference_rule(torch.ne)
|
| 826 |
+
@register_inference_rule("ne")
|
| 827 |
+
@register_inference_rule(torch.add)
|
| 828 |
+
@register_inference_rule(operator.add)
|
| 829 |
+
def broadcasting_inference_rule(n: Node, symbols, constraints, counter):
|
| 830 |
+
|
| 831 |
+
op_code = None
|
| 832 |
+
if n.target == operator.add or n.target == torch.add:
|
| 833 |
+
op_code = op_add
|
| 834 |
+
elif n.target == operator.mul:
|
| 835 |
+
op_code = op_mul
|
| 836 |
+
|
| 837 |
+
if isinstance(n.args[0], Node) and isinstance(n.args[1], Node):
|
| 838 |
+
if isinstance(symbols[n.args[0]], TVar) and isinstance(symbols[n.args[1]], TVar):
|
| 839 |
+
my_output, counter = gen_tvar(counter)
|
| 840 |
+
symbols[n] = my_output
|
| 841 |
+
e1 = symbols[n.args[0]]
|
| 842 |
+
e2 = symbols[n.args[1]]
|
| 843 |
+
|
| 844 |
+
return gen_broadcasting_constraints(e1, e2, symbols, counter, my_output)
|
| 845 |
+
else:
|
| 846 |
+
raise NotImplementedError('Method not yet implemented')
|
| 847 |
+
|
| 848 |
+
elif isinstance(n.args[0], Node) and isinstance(n.args[1], (int, float)):
|
| 849 |
+
if isinstance(symbols[n.args[0]], TVar):
|
| 850 |
+
my_output, counter = gen_tvar(counter)
|
| 851 |
+
symbols[n] = my_output
|
| 852 |
+
e1 = symbols[n.args[0]]
|
| 853 |
+
return [BinConstraintT(my_output, e1, op_eq)], counter
|
| 854 |
+
elif isinstance(symbols[n.args[0]], DVar):
|
| 855 |
+
my_output, counter = gen_dvar(counter)
|
| 856 |
+
symbols[n] = my_output
|
| 857 |
+
e1 = symbols[n.args[0]]
|
| 858 |
+
|
| 859 |
+
# we will propagate the runtime value here since this is regular addition
|
| 860 |
+
c = Conj([BinConstraintD(my_output, BinConstraintD(e1, n.args[1], op_code), op_eq),
|
| 861 |
+
BinConstraintD(0, my_output, op_leq)])
|
| 862 |
+
return [c], counter
|
| 863 |
+
|
| 864 |
+
elif isinstance(n.args[1], Node) and isinstance(n.args[0], (int, float)):
|
| 865 |
+
if isinstance(symbols[n.args[1]], TVar):
|
| 866 |
+
my_output, counter = gen_tvar(counter)
|
| 867 |
+
symbols[n] = my_output
|
| 868 |
+
e2 = symbols[n.args[1]]
|
| 869 |
+
return [BinConstraintT(my_output, e2, op_eq)], counter
|
| 870 |
+
elif isinstance(symbols[n.args[1]], DVar):
|
| 871 |
+
my_output, counter = gen_dvar(counter)
|
| 872 |
+
symbols[n] = my_output
|
| 873 |
+
e2 = symbols[n.args[1]]
|
| 874 |
+
|
| 875 |
+
# we will propagate the runtime value here since this is regular addition
|
| 876 |
+
c = Conj([BinConstraintD(my_output, BinConstraintD(e2, n.args[0], op_code), op_eq),
|
| 877 |
+
BinConstraintD(0, my_output, op_leq)])
|
| 878 |
+
return [c], counter
|
| 879 |
+
|
| 880 |
+
else:
|
| 881 |
+
raise NotImplementedError('Method not yet implemented')
|
| 882 |
+
|
| 883 |
+
else:
|
| 884 |
+
# TODO generate add constraints for scalar addition
|
| 885 |
+
raise NotImplementedError('Addition not yet implemented')
|
| 886 |
+
|
| 887 |
+
|
| 888 |
+
@register_inference_rule(torch.flatten)
|
| 889 |
+
def flatten_inference_rule(n: Node, symbols, constraints, counter):
|
| 890 |
+
assert isinstance(n.args[0], Node)
|
| 891 |
+
|
| 892 |
+
# generate the new variable
|
| 893 |
+
flattened, counter = gen_tvar(counter)
|
| 894 |
+
symbols[n] = flattened
|
| 895 |
+
|
| 896 |
+
input = symbols[n.args[0]]
|
| 897 |
+
|
| 898 |
+
# set the default start and end dims
|
| 899 |
+
start_dim = 1
|
| 900 |
+
end_dim = -1
|
| 901 |
+
|
| 902 |
+
if len(n.args) > 1:
|
| 903 |
+
assert isinstance(n.args[1], int)
|
| 904 |
+
start_dim = n.args[1]
|
| 905 |
+
|
| 906 |
+
if len(n.args) > 2:
|
| 907 |
+
assert isinstance(n.args[2], int)
|
| 908 |
+
end_dim = n.args[2]
|
| 909 |
+
|
| 910 |
+
c1 = BinConstraintT(input, Dyn, op_eq)
|
| 911 |
+
c2 = BinConstraintT(flattened, Dyn, op_eq)
|
| 912 |
+
both_dyn = Conj([c1, c2])
|
| 913 |
+
|
| 914 |
+
const = []
|
| 915 |
+
for i in range(1, MAX_TENSOR_RANK + 1):
|
| 916 |
+
c, counter = generate_flatten_constraints(start_dim, end_dim, input, flattened, i, counter)
|
| 917 |
+
const.append(c)
|
| 918 |
+
|
| 919 |
+
return [Disj([both_dyn, *const])], counter
|
| 920 |
+
|
| 921 |
+
|
| 922 |
+
@register_inference_rule(torch.nn.functional.layer_norm)
|
| 923 |
+
def layer_norm_functional(n: Node, symbols, constraints, counter):
|
| 924 |
+
"""
|
| 925 |
+
We generate the constraint: input = output
|
| 926 |
+
"""
|
| 927 |
+
assert isinstance(n.args[0], Node)
|
| 928 |
+
return gen_layer_norm_constraints(n, n.args[1], symbols, counter)
|
| 929 |
+
|
| 930 |
+
|
| 931 |
+
@register_inference_rule(torch.nn.LayerNorm)
|
| 932 |
+
def layer_norm_inference_rule(n: Node, module_instance, symbols, constraints, counter):
|
| 933 |
+
"""
|
| 934 |
+
Input and output shapes should be equal.
|
| 935 |
+
Input should be consistent with the normalized_shape
|
| 936 |
+
"""
|
| 937 |
+
assert isinstance(n.args[0], Node)
|
| 938 |
+
return gen_layer_norm_constraints(n, module_instance.normalized_shape, symbols, counter)
|
| 939 |
+
|
| 940 |
+
|
| 941 |
+
def gen_layer_norm_constraints(n: Node, normalized_shape, symbols, counter):
|
| 942 |
+
output, counter = gen_tvar(counter)
|
| 943 |
+
symbols[n] = output
|
| 944 |
+
input = symbols[n.args[0]]
|
| 945 |
+
|
| 946 |
+
input_dyn = BinConstraintT(input, Dyn, op_eq)
|
| 947 |
+
output_dyn = BinConstraintT(output, Dyn, op_eq)
|
| 948 |
+
|
| 949 |
+
c1 = Conj([input_dyn, output_dyn])
|
| 950 |
+
|
| 951 |
+
c2 = []
|
| 952 |
+
for i in range(1, MAX_TENSOR_RANK + 1):
|
| 953 |
+
new_dims_rhs, counter = gen_tensor_dims(i, counter)
|
| 954 |
+
nat_constraints = gen_nat_constraints(new_dims_rhs)
|
| 955 |
+
|
| 956 |
+
c_tensor_i = Conj([BinConstraintT(input, TensorType(new_dims_rhs), op_eq),
|
| 957 |
+
BinConstraintT(output, TensorType(new_dims_rhs), op_eq)] +
|
| 958 |
+
add_layer_norm_constraints(new_dims_rhs, list(normalized_shape)) +
|
| 959 |
+
nat_constraints)
|
| 960 |
+
c2.append(c_tensor_i)
|
| 961 |
+
return [Disj([c1, Disj(c2)])], counter
|
| 962 |
+
|
| 963 |
+
@register_inference_rule(torch.nn.Dropout)
|
| 964 |
+
@register_inference_rule(torch.nn.ReLU)
|
| 965 |
+
def relu_inference_rule(n: Node, module_instance, symbols, constraints, counter):
|
| 966 |
+
"""
|
| 967 |
+
Input and output shapes should be equal.
|
| 968 |
+
"""
|
| 969 |
+
assert isinstance(n.args[0], Node)
|
| 970 |
+
output, counter = gen_tvar(counter)
|
| 971 |
+
symbols[n] = output
|
| 972 |
+
input = symbols[n.args[0]]
|
| 973 |
+
assert isinstance(input, TVar)
|
| 974 |
+
return [BinConstraintT(input, output, op_eq)], counter
|
| 975 |
+
|
| 976 |
+
|
| 977 |
+
@register_inference_rule(torch.nn.Linear)
|
| 978 |
+
def linear_inference_rule(n: Node, module_instance, symbols, constraints, counter):
|
| 979 |
+
"""
|
| 980 |
+
Input and output sizes should be the same except for the last dimension
|
| 981 |
+
If the input is Dyn, then so should the output
|
| 982 |
+
"""
|
| 983 |
+
assert isinstance(n.args[0], Node)
|
| 984 |
+
return linear_constraints(n, module_instance.in_features, module_instance.out_features, symbols, counter)
|
| 985 |
+
|
| 986 |
+
|
| 987 |
+
@register_inference_rule("dim") # type: ignore[attr-defined]
|
| 988 |
+
def torch_dim_inference_rule(n: Node, symbols, constraints, counter):
|
| 989 |
+
assert isinstance(n.args[0], Node)
|
| 990 |
+
my_dim, counter = gen_dvar(counter)
|
| 991 |
+
symbols[n] = my_dim
|
| 992 |
+
input = symbols[n.args[0]]
|
| 993 |
+
|
| 994 |
+
input_dyn = BinConstraintT(input, Dyn, op_eq)
|
| 995 |
+
output_dyn = BinConstraintD(my_dim, Dyn, op_eq)
|
| 996 |
+
|
| 997 |
+
c1 = []
|
| 998 |
+
|
| 999 |
+
for i in range(1, MAX_TENSOR_RANK + 1):
|
| 1000 |
+
new_dims_rhs_1, counter = gen_tensor_dims(i, counter)
|
| 1001 |
+
|
| 1002 |
+
c_tensor_i = Conj([BinConstraintT(input, TensorType(new_dims_rhs_1), op_eq),
|
| 1003 |
+
BinConstraintD(my_dim, i, op_eq)])
|
| 1004 |
+
c1.append(c_tensor_i)
|
| 1005 |
+
|
| 1006 |
+
return [Disj([Conj([input_dyn, output_dyn]), Disj(c1)])], counter
|
| 1007 |
+
|
| 1008 |
+
|
| 1009 |
+
@register_inference_rule(torch._C._nn.linear) # type: ignore[attr-defined]
|
| 1010 |
+
def torch_linear_inference_rule(n: Node, symbols, constraints, counter):
|
| 1011 |
+
assert isinstance(n.args[0], Node)
|
| 1012 |
+
weight_dims, counter = gen_tensor_dims(2, counter)
|
| 1013 |
+
equality_constraint = BinConstraintT(symbols[n.args[1]], TensorType(weight_dims), op_eq)
|
| 1014 |
+
constraints, counter = linear_constraints(n, weight_dims[1], weight_dims[0], symbols, counter)
|
| 1015 |
+
return [equality_constraint] + constraints, counter
|
| 1016 |
+
|
| 1017 |
+
|
| 1018 |
+
def linear_constraints(n: Node, in_features, out_features, symbols, counter):
|
| 1019 |
+
linear_output, counter = gen_tvar(counter)
|
| 1020 |
+
symbols[n] = linear_output
|
| 1021 |
+
linear_input = symbols[n.args[0]]
|
| 1022 |
+
|
| 1023 |
+
input_dyn = BinConstraintT(linear_input, Dyn, op_eq)
|
| 1024 |
+
output_dyn = BinConstraintT(linear_output, Dyn, op_eq)
|
| 1025 |
+
|
| 1026 |
+
c1 = Conj([input_dyn, output_dyn])
|
| 1027 |
+
|
| 1028 |
+
c2 = []
|
| 1029 |
+
for i in range(1, MAX_TENSOR_RANK + 1):
|
| 1030 |
+
new_dims_rhs_1, counter = gen_tensor_dims(i, counter)
|
| 1031 |
+
new_dims_rhs_2, counter = gen_tensor_dims(i, counter)
|
| 1032 |
+
|
| 1033 |
+
nat_constraints = gen_nat_constraints(new_dims_rhs_1 + new_dims_rhs_2)
|
| 1034 |
+
|
| 1035 |
+
c_tensor_i = Conj([BinConstraintT(linear_input, TensorType(new_dims_rhs_1), op_eq),
|
| 1036 |
+
BinConstraintT(linear_output, TensorType(new_dims_rhs_2), op_eq)] +
|
| 1037 |
+
add_linear_constraints(new_dims_rhs_1, new_dims_rhs_2, in_features, out_features) +
|
| 1038 |
+
nat_constraints)
|
| 1039 |
+
c2.append(c_tensor_i)
|
| 1040 |
+
return [Disj([c1, Disj(c2)])], counter
|
| 1041 |
+
|
| 1042 |
+
def add_layer_norm_constraints(input_dim, normalized_dim):
|
| 1043 |
+
"""
|
| 1044 |
+
The constraints say that the type has te form: [*, 1024, 1024]
|
| 1045 |
+
while the normalized_dim have the form [1024, 1024]
|
| 1046 |
+
Args:
|
| 1047 |
+
input_dim: Input shape of layer norm
|
| 1048 |
+
normalized_dim: normalized_dim parameter of the module instance
|
| 1049 |
+
|
| 1050 |
+
"""
|
| 1051 |
+
|
| 1052 |
+
# in this case we return false since there's a pattern mismatch
|
| 1053 |
+
if len(normalized_dim) > len(input_dim):
|
| 1054 |
+
return [F()]
|
| 1055 |
+
|
| 1056 |
+
else:
|
| 1057 |
+
constraints = []
|
| 1058 |
+
for i, n in zip(reversed(input_dim), reversed(normalized_dim)):
|
| 1059 |
+
constraints.append(BinConstraintD(i, n, op_consistency))
|
| 1060 |
+
return constraints
|
| 1061 |
+
|
| 1062 |
+
|
| 1063 |
+
def add_linear_constraints(dims1, dims2, in_features, out_features):
|
| 1064 |
+
assert len(dims1) == len(dims2)
|
| 1065 |
+
constraints = []
|
| 1066 |
+
for i in range(len(dims1)):
|
| 1067 |
+
if i == len(dims1) - 1:
|
| 1068 |
+
constraints.append(BinConstraintD(dims1[i], in_features, op_consistency))
|
| 1069 |
+
constraints.append(BinConstraintD(dims2[i], out_features, op_eq))
|
| 1070 |
+
else:
|
| 1071 |
+
constraints.append(BinConstraintD(dims1[i], dims2[i], op_eq))
|
| 1072 |
+
|
| 1073 |
+
return constraints
|
| 1074 |
+
|
| 1075 |
+
|
| 1076 |
+
@register_inference_rule(torch.reshape)
|
| 1077 |
+
def reshape_inference_rule(n: Node, symbols, constraints, counter):
|
| 1078 |
+
assert isinstance(n.args[0], Node)
|
| 1079 |
+
|
| 1080 |
+
# generate the new variable
|
| 1081 |
+
my_reshape, counter = gen_tvar(counter)
|
| 1082 |
+
symbols[n] = my_reshape
|
| 1083 |
+
|
| 1084 |
+
src_var = symbols[n.args[0]]
|
| 1085 |
+
t2 = n.args[1]
|
| 1086 |
+
t2_type = TensorType([Dyn if elem == -1 else elem for elem in t2]) # type: ignore[union-attr]
|
| 1087 |
+
c1 = BinConstraintT(my_reshape, t2_type, op_eq) # type: ignore[union-attr]
|
| 1088 |
+
c2 = CanReshape(src_var, t2_type)
|
| 1089 |
+
|
| 1090 |
+
return [c1, c2], counter
|
| 1091 |
+
|
| 1092 |
+
|
| 1093 |
+
@register_inference_rule(BatchNorm2d)
|
| 1094 |
+
def batchnorm_inference_rule(n: Node, module_instance, symbols, constraints, counter):
|
| 1095 |
+
assert isinstance(n.args[0], Node)
|
| 1096 |
+
|
| 1097 |
+
# generate the new variable
|
| 1098 |
+
batchnorm_output, counter = gen_tvar(counter)
|
| 1099 |
+
symbols[n] = batchnorm_output
|
| 1100 |
+
batchnorm_input = symbols[n.args[0]]
|
| 1101 |
+
|
| 1102 |
+
# dim vars
|
| 1103 |
+
d1, counter = gen_dvar(counter)
|
| 1104 |
+
d2, counter = gen_dvar(counter)
|
| 1105 |
+
d3, counter = gen_dvar(counter)
|
| 1106 |
+
d4, counter = gen_dvar(counter)
|
| 1107 |
+
|
| 1108 |
+
nat_constraints = gen_nat_constraints([d1, d2, d3, d4])
|
| 1109 |
+
|
| 1110 |
+
c1 = BinConstraintT(batchnorm_input, TensorType([d1, d2, d3, d4]), op_matching)
|
| 1111 |
+
c2 = BinConstraintT(batchnorm_input, batchnorm_output, op_eq)
|
| 1112 |
+
return [c1, c2, *nat_constraints], counter
|
| 1113 |
+
|
| 1114 |
+
|
| 1115 |
+
@register_inference_rule(torch.nn.AdaptiveAvgPool2d)
|
| 1116 |
+
def adaptive_inference_rule(n: Node, module_instance, symbols, constraints, counter):
|
| 1117 |
+
assert isinstance(n.args[0], Node)
|
| 1118 |
+
|
| 1119 |
+
avg_pool, counter = gen_tvar(counter)
|
| 1120 |
+
|
| 1121 |
+
symbols[n] = avg_pool
|
| 1122 |
+
input_var = symbols[n.args[0]]
|
| 1123 |
+
|
| 1124 |
+
# dim vars
|
| 1125 |
+
d1, counter = gen_dvar(counter)
|
| 1126 |
+
d2, counter = gen_dvar(counter)
|
| 1127 |
+
d3, counter = gen_dvar(counter)
|
| 1128 |
+
d4, counter = gen_dvar(counter)
|
| 1129 |
+
nat_constraints = gen_nat_constraints([d1, d2, d3, d4])
|
| 1130 |
+
c1 = BinConstraintT(input_var, TensorType([d1, d2, d3, d4]), op_matching)
|
| 1131 |
+
c2 = BinConstraintT(avg_pool, TensorType([d1, d2, module_instance.output_size[0], module_instance.output_size[1]]), op_eq)
|
| 1132 |
+
|
| 1133 |
+
return [c1, c2, *nat_constraints], counter
|
| 1134 |
+
|
| 1135 |
+
|
| 1136 |
+
@register_inference_rule(Conv2d)
|
| 1137 |
+
def conv2d_inference_rule(n: Node, module_instance, symbols, constraints, counter):
|
| 1138 |
+
assert isinstance(n.args[0], Node)
|
| 1139 |
+
|
| 1140 |
+
my_conv, counter = gen_tvar(counter)
|
| 1141 |
+
symbols[n] = my_conv
|
| 1142 |
+
input_var = symbols[n.args[0]]
|
| 1143 |
+
|
| 1144 |
+
# dim vars
|
| 1145 |
+
[d1, d2, d3, d4], counter = gen_tensor_dims(MAX_TENSOR_RANK, counter)
|
| 1146 |
+
|
| 1147 |
+
# c1 = Matching(input_var, TensorType([d1, d2, d3, d4]))
|
| 1148 |
+
c1 = BinConstraintT(input_var, TensorType([d1, d2, d3, d4]), op_matching)
|
| 1149 |
+
|
| 1150 |
+
# c2 = DConsistency(module_instance.in_channels, d2)
|
| 1151 |
+
c2 = BinConstraintD(module_instance.in_channels, d2, op_consistency)
|
| 1152 |
+
|
| 1153 |
+
c3 = CalcConv(my_conv, input_var,
|
| 1154 |
+
module_instance.out_channels,
|
| 1155 |
+
module_instance.kernel_size,
|
| 1156 |
+
module_instance.padding,
|
| 1157 |
+
module_instance.stride,
|
| 1158 |
+
module_instance.dilation, [d1, d2, d3, d4])
|
| 1159 |
+
|
| 1160 |
+
nat_constraints = gen_nat_constraints([d1, d2, d3, d4])
|
| 1161 |
+
|
| 1162 |
+
return [c1, c2, c3, *nat_constraints], counter
|
| 1163 |
+
|
| 1164 |
+
|
| 1165 |
+
@register_inference_rule(torch.nn.MaxPool2d)
|
| 1166 |
+
def maxpool_inference_rule(n: Node, module_instance, symbols, constraints, counter):
|
| 1167 |
+
assert isinstance(n.args[0], Node)
|
| 1168 |
+
maxpool, counter = gen_tvar(counter)
|
| 1169 |
+
symbols[n] = maxpool
|
| 1170 |
+
input_var = symbols[n.args[0]]
|
| 1171 |
+
|
| 1172 |
+
# dim vars
|
| 1173 |
+
[d1, d2, d3, d4], counter = gen_tensor_dims(MAX_TENSOR_RANK, counter)
|
| 1174 |
+
|
| 1175 |
+
c1 = BinConstraintT(input_var, TensorType([d1, d2, d3, d4]), op_matching)
|
| 1176 |
+
|
| 1177 |
+
c2 = CalcMaxPool(maxpool, input_var, module_instance.kernel_size, module_instance.padding,
|
| 1178 |
+
module_instance.stride, module_instance.dilation, [d1, d2, d3, d4])
|
| 1179 |
+
|
| 1180 |
+
nat_constraints = gen_nat_constraints([d1, d2, d3, d4])
|
| 1181 |
+
|
| 1182 |
+
return [c1, c2, *nat_constraints], counter
|
| 1183 |
+
|
| 1184 |
+
|
| 1185 |
+
class ConstraintGenerator:
|
| 1186 |
+
def __init__(self, traced, graph=None):
|
| 1187 |
+
self.traced = traced # traced or tracer.root
|
| 1188 |
+
self.traced_params = dict(self.traced.named_parameters())
|
| 1189 |
+
self.constraints = []
|
| 1190 |
+
self.symbol_dict = {}
|
| 1191 |
+
self.graph = traced.graph if hasattr(traced, 'graph') else graph
|
| 1192 |
+
|
| 1193 |
+
|
| 1194 |
+
def generate_constraints(self, counter=0):
|
| 1195 |
+
"""
|
| 1196 |
+
Iterate through every node and generate constraints
|
| 1197 |
+
Effect: self.constraints will be populated with the final constraints
|
| 1198 |
+
"""
|
| 1199 |
+
graph = self.graph
|
| 1200 |
+
|
| 1201 |
+
all_constraints = []
|
| 1202 |
+
|
| 1203 |
+
for n in graph.nodes:
|
| 1204 |
+
(constraints, counter) = self.generate_constraints_node(n, counter)
|
| 1205 |
+
all_constraints += constraints
|
| 1206 |
+
|
| 1207 |
+
return Conj(all_constraints), counter
|
| 1208 |
+
|
| 1209 |
+
def generate_constraints_node(self, n: Node, counter):
|
| 1210 |
+
"""
|
| 1211 |
+
Generate constraints the given node:
|
| 1212 |
+
Currently supported operations:
|
| 1213 |
+
- Reshape
|
| 1214 |
+
- Add
|
| 1215 |
+
- conv2d
|
| 1216 |
+
"""
|
| 1217 |
+
|
| 1218 |
+
if n.op == 'placeholder':
|
| 1219 |
+
x, counter = gen_tvar(counter)
|
| 1220 |
+
self.symbol_dict[n] = x
|
| 1221 |
+
|
| 1222 |
+
my_type = n.type
|
| 1223 |
+
|
| 1224 |
+
if n.type != Dyn and (not isinstance(n.type, TensorType)):
|
| 1225 |
+
if n.type == torch.nn.parameter.Parameter:
|
| 1226 |
+
# since we have a parameter, the shape must be static
|
| 1227 |
+
assert 'example_value' in n.meta
|
| 1228 |
+
my_type = TensorType(n.meta['example_value'].size())
|
| 1229 |
+
else:
|
| 1230 |
+
my_type = Dyn
|
| 1231 |
+
|
| 1232 |
+
c1 = BinConstraintT(my_type, x, op_precision)
|
| 1233 |
+
c2 = BinConstraintT(x, MAX_TENSOR_RANK, op_leq)
|
| 1234 |
+
return [c1, c2], counter
|
| 1235 |
+
|
| 1236 |
+
elif n.op == 'call_function':
|
| 1237 |
+
if n.target in _INFERENCE_RULES:
|
| 1238 |
+
return _INFERENCE_RULES[n.target](n, self.symbol_dict, self.constraints, counter)
|
| 1239 |
+
else:
|
| 1240 |
+
raise RuntimeError(f'No inference rule registered for target {n.target}!')
|
| 1241 |
+
|
| 1242 |
+
elif n.op == 'call_module':
|
| 1243 |
+
|
| 1244 |
+
module_instance = self.traced.get_submodule(n.target)
|
| 1245 |
+
if type(module_instance) in _INFERENCE_RULES:
|
| 1246 |
+
return _INFERENCE_RULES[type(module_instance)](n,
|
| 1247 |
+
module_instance,
|
| 1248 |
+
self.symbol_dict,
|
| 1249 |
+
self.constraints, counter)
|
| 1250 |
+
else:
|
| 1251 |
+
raise RuntimeError(f'No inference rule registered for class {type(module_instance)}!')
|
| 1252 |
+
|
| 1253 |
+
elif n.op == 'call_method':
|
| 1254 |
+
if n.target in _INFERENCE_RULES:
|
| 1255 |
+
return _INFERENCE_RULES[n.target](n, self.symbol_dict, self.constraints, counter)
|
| 1256 |
+
else:
|
| 1257 |
+
raise RuntimeError(f'No inference rule registered for target {n.target}!')
|
| 1258 |
+
|
| 1259 |
+
elif n.op == 'get_attr':
|
| 1260 |
+
t = self.traced_params.get(n.target, None)
|
| 1261 |
+
|
| 1262 |
+
if isinstance(t, torch.Tensor):
|
| 1263 |
+
if len(t.shape) > 0:
|
| 1264 |
+
res = []
|
| 1265 |
+
for t in t.shape:
|
| 1266 |
+
res.append(t)
|
| 1267 |
+
attr_type = TensorType(res)
|
| 1268 |
+
output, counter = gen_tvar(counter)
|
| 1269 |
+
self.symbol_dict[n] = output
|
| 1270 |
+
return [BinConstraintT(output, attr_type, op_eq)], counter
|
| 1271 |
+
else:
|
| 1272 |
+
# scalar?
|
| 1273 |
+
return [], counter
|
| 1274 |
+
else:
|
| 1275 |
+
return [], counter
|
| 1276 |
+
|
| 1277 |
+
elif n.op == 'output':
|
| 1278 |
+
return [], counter
|
| 1279 |
+
|
| 1280 |
+
else:
|
| 1281 |
+
raise NotImplementedError(f"Method {n.op} not yet implemented")
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint_transformation.py
ADDED
|
@@ -0,0 +1,1040 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: ignore-errors
|
| 2 |
+
import copy
|
| 3 |
+
import itertools
|
| 4 |
+
from torch.fx.experimental.migrate_gradual_types.constraint_generator import BinConstraintT, MAX_TENSOR_RANK
|
| 5 |
+
from torch.fx.experimental.migrate_gradual_types.constraint import T, BinConstraintD, Conj, Constraint, DVar, TVar, \
|
| 6 |
+
Transpose
|
| 7 |
+
from torch.fx.experimental.migrate_gradual_types.constraint import Disj, TGreatestUpperBound
|
| 8 |
+
from torch.fx.experimental.migrate_gradual_types.constraint import DGreatestUpperBound
|
| 9 |
+
from torch.fx.experimental.migrate_gradual_types.constraint import CalcConv, CalcMaxPool
|
| 10 |
+
from torch.fx.experimental.migrate_gradual_types.constraint import CalcProduct, CanReshape
|
| 11 |
+
from torch.fx.experimental.migrate_gradual_types.constraint import ApplyBroadcasting, Prod, F, GetItem, GetItemTensor, IndexSelect
|
| 12 |
+
from torch.fx.experimental.migrate_gradual_types.operation import op_eq, op_precision, op_leq, op_matching
|
| 13 |
+
from torch.fx.experimental.migrate_gradual_types.operation import op_consistency, op_neq
|
| 14 |
+
from torch.fx.experimental.migrate_gradual_types.operation import op_mul, op_add, op_sub, op_div, op_mod
|
| 15 |
+
from torch.fx.experimental.migrate_gradual_types.util import gen_tensor_dims, gen_nat_constraints, gen_dvar
|
| 16 |
+
from torch.fx.tensor_type import TensorType, Dyn
|
| 17 |
+
from typing import Callable, Dict, List
|
| 18 |
+
|
| 19 |
+
_TRANSFORMATION_RULES: Dict[Constraint, Callable] = {}
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def register_transformation_rule(call_target):
|
| 23 |
+
def register(fn):
|
| 24 |
+
if call_target in _TRANSFORMATION_RULES:
|
| 25 |
+
raise RuntimeError(f'Transformation rule already registered for {call_target}!')
|
| 26 |
+
_TRANSFORMATION_RULES[call_target] = fn
|
| 27 |
+
return fn
|
| 28 |
+
return register
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def valid_index(index, dims):
|
| 32 |
+
"""
|
| 33 |
+
Given a list of dimensions, checks if an index is valid in the list
|
| 34 |
+
"""
|
| 35 |
+
try:
|
| 36 |
+
dims[index]
|
| 37 |
+
return T()
|
| 38 |
+
except IndexError:
|
| 39 |
+
return F()
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
@register_transformation_rule(Transpose)
|
| 43 |
+
def transform_transpose(constraint, counter):
|
| 44 |
+
"""
|
| 45 |
+
Similar to a sequence of two index-selects
|
| 46 |
+
"""
|
| 47 |
+
dims, counter = gen_tensor_dims(constraint.tensor_size, counter)
|
| 48 |
+
is_valid_index1 = valid_index(constraint.index1, dims)
|
| 49 |
+
is_valid_index2 = valid_index(constraint.index2, dims)
|
| 50 |
+
new_dims = copy.deepcopy(dims)
|
| 51 |
+
nat_constraints = gen_nat_constraints(dims)
|
| 52 |
+
|
| 53 |
+
if is_valid_index1 == T() and is_valid_index2 == T():
|
| 54 |
+
new_dims[constraint.index1] = dims[constraint.index2]
|
| 55 |
+
new_dims[constraint.index2] = dims[constraint.index1]
|
| 56 |
+
|
| 57 |
+
transformed_constraint = Conj([BinConstraintT(constraint.input_var, TensorType(dims), op_eq),
|
| 58 |
+
*nat_constraints,
|
| 59 |
+
is_valid_index1, is_valid_index2,
|
| 60 |
+
BinConstraintT(constraint.output, TensorType(new_dims), op_eq)])
|
| 61 |
+
return transformed_constraint, counter
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
@register_transformation_rule(IndexSelect)
|
| 65 |
+
def transform_index_select(constraint, counter):
|
| 66 |
+
"""
|
| 67 |
+
The constraints consider the given tensor size, checks if the index is valid
|
| 68 |
+
and if so, generates a constraint for replacing the input dimension
|
| 69 |
+
with the required dimension
|
| 70 |
+
"""
|
| 71 |
+
dims, counter = gen_tensor_dims(constraint.tensor_size, counter)
|
| 72 |
+
is_valid_index = valid_index(constraint.index, dims)
|
| 73 |
+
nat_constraints = gen_nat_constraints(dims)
|
| 74 |
+
|
| 75 |
+
# if the index is valid then replace the input dimension with the new dimension
|
| 76 |
+
# otherwise the dimension will not be replaced and the clause will contain False
|
| 77 |
+
if is_valid_index == T():
|
| 78 |
+
new_dims = copy.deepcopy(dims)
|
| 79 |
+
new_dims[constraint.index] = constraint.dim_replace
|
| 80 |
+
|
| 81 |
+
transformed_constraint = Conj([BinConstraintT(constraint.input_var, TensorType(dims), op_eq),
|
| 82 |
+
*nat_constraints,
|
| 83 |
+
is_valid_index,
|
| 84 |
+
BinConstraintT(constraint.output, TensorType(new_dims), op_eq)])
|
| 85 |
+
|
| 86 |
+
# print(constraints)
|
| 87 |
+
return transformed_constraint, counter
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
@register_transformation_rule(GetItem)
|
| 91 |
+
def transform_get_item(constraint, counter):
|
| 92 |
+
"""
|
| 93 |
+
generate an equality of the form:
|
| 94 |
+
t = [a1, ..., an]
|
| 95 |
+
then generate constraints that check if the given index is valid
|
| 96 |
+
given this particular tensor size.
|
| 97 |
+
If the index is valid, generate a constraint to get the item
|
| 98 |
+
Note that we already handled the Dyn input case in the previous
|
| 99 |
+
step.
|
| 100 |
+
Args:
|
| 101 |
+
constraint: GetItem which assumes we are getting an item from a tensor (not Dyn)
|
| 102 |
+
counter: variable tracking
|
| 103 |
+
Returns: simplified constraints for GetItem
|
| 104 |
+
|
| 105 |
+
"""
|
| 106 |
+
dims, counter = gen_tensor_dims(constraint.tensor_size, counter)
|
| 107 |
+
nat_constraints = gen_nat_constraints(dims)
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
is_valid_index = valid_index(constraint.index, dims)
|
| 111 |
+
|
| 112 |
+
all_constraints = [BinConstraintT(constraint.input_var, TensorType(dims), op_eq),
|
| 113 |
+
*nat_constraints,
|
| 114 |
+
is_valid_index]
|
| 115 |
+
|
| 116 |
+
# if the index is valid, we generate a constraint for getting an item
|
| 117 |
+
# otherwise this clause will have been UNSAT due to the wrong index
|
| 118 |
+
if is_valid_index == T():
|
| 119 |
+
all_constraints.append(BinConstraintD(constraint.res, dims[constraint.index], op_eq))
|
| 120 |
+
|
| 121 |
+
return Conj(all_constraints), counter
|
| 122 |
+
|
| 123 |
+
def valid_index_tensor(index, dims):
|
| 124 |
+
"""
|
| 125 |
+
if the slice instances exceed the length of the dimensions
|
| 126 |
+
then this is a type error so we return False
|
| 127 |
+
"""
|
| 128 |
+
slice_count = 0
|
| 129 |
+
for s in index:
|
| 130 |
+
if isinstance(s, slice):
|
| 131 |
+
slice_count += 1
|
| 132 |
+
if slice_count > len(dims):
|
| 133 |
+
return F()
|
| 134 |
+
else:
|
| 135 |
+
return T()
|
| 136 |
+
|
| 137 |
+
@register_transformation_rule(GetItemTensor)
|
| 138 |
+
def transform_get_item_tensor(constraint, counter):
|
| 139 |
+
"""
|
| 140 |
+
When the index is a tuple, then the output will be a tensor
|
| 141 |
+
TODO: we have to check if this is the case for all HF models
|
| 142 |
+
|
| 143 |
+
The cases we are covering here are a tuple with one of:
|
| 144 |
+
- slice with default argument
|
| 145 |
+
- None
|
| 146 |
+
|
| 147 |
+
None appends 1 to the input tensor dimensions
|
| 148 |
+
so each occurrence of 'None' increases the rank by 1
|
| 149 |
+
|
| 150 |
+
slice with default arguments does not change the rank
|
| 151 |
+
"""
|
| 152 |
+
assert isinstance(constraint.index_tuple, tuple)
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
# generate a result tensor of the expected size
|
| 156 |
+
dims, counter = gen_tensor_dims(constraint.tensor_size, counter)
|
| 157 |
+
nat_constraints = gen_nat_constraints(dims)
|
| 158 |
+
|
| 159 |
+
# generate a place-holder list of the right rank
|
| 160 |
+
# where "slice" does not contribute to the rank and "None" does
|
| 161 |
+
none_c = constraint.index_tuple.count(None)
|
| 162 |
+
resulting_tensor_dims = (none_c + len(dims)) * [None]
|
| 163 |
+
|
| 164 |
+
dim_index = 0
|
| 165 |
+
for i in range(len(constraint.index_tuple)):
|
| 166 |
+
|
| 167 |
+
# append 1 to the right location of the resulting tensor
|
| 168 |
+
if constraint.index_tuple[i] is None:
|
| 169 |
+
resulting_tensor_dims[i] = 1
|
| 170 |
+
|
| 171 |
+
elif constraint.index_tuple[i] == slice(None, None, None):
|
| 172 |
+
pass
|
| 173 |
+
|
| 174 |
+
else:
|
| 175 |
+
raise NotImplementedError('Method not yet implemented')
|
| 176 |
+
|
| 177 |
+
# append the remaining dimensions to the right location
|
| 178 |
+
dim_index = 0
|
| 179 |
+
for i in range(len(resulting_tensor_dims)):
|
| 180 |
+
if resulting_tensor_dims[i] is None:
|
| 181 |
+
resulting_tensor_dims[i] = dims[dim_index]
|
| 182 |
+
dim_index += 1
|
| 183 |
+
|
| 184 |
+
# check if the index is valid
|
| 185 |
+
is_valid_index = valid_index_tensor(constraint.index_tuple, dims)
|
| 186 |
+
|
| 187 |
+
# check if the resulting tensor is within bounds
|
| 188 |
+
if len(resulting_tensor_dims) > 4:
|
| 189 |
+
return F(), counter
|
| 190 |
+
|
| 191 |
+
else:
|
| 192 |
+
constraints = [BinConstraintT(constraint.input_var, TensorType(dims), op_eq),
|
| 193 |
+
BinConstraintT(constraint.res, TensorType(resulting_tensor_dims), op_eq),
|
| 194 |
+
*nat_constraints,
|
| 195 |
+
is_valid_index]
|
| 196 |
+
return Conj(constraints), counter
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
@register_transformation_rule(BinConstraintT)
|
| 200 |
+
def generate_binconstraint_t(constraint, counter):
|
| 201 |
+
"""
|
| 202 |
+
Transform binary constraints for tensors
|
| 203 |
+
"""
|
| 204 |
+
|
| 205 |
+
# precision constraints
|
| 206 |
+
if constraint.op == op_precision:
|
| 207 |
+
if constraint.lhs == Dyn:
|
| 208 |
+
return T(), counter
|
| 209 |
+
elif isinstance(constraint.lhs, TensorType):
|
| 210 |
+
is_fully_static = all(d != Dyn for d in constraint.lhs.__args__)
|
| 211 |
+
if is_fully_static:
|
| 212 |
+
return BinConstraintT(constraint.lhs, constraint.rhs, op_eq), counter
|
| 213 |
+
else:
|
| 214 |
+
new_dims = []
|
| 215 |
+
|
| 216 |
+
for _ in range(len(constraint.lhs.__args__)):
|
| 217 |
+
dim, counter = gen_dvar(counter)
|
| 218 |
+
new_dims.append(dim)
|
| 219 |
+
|
| 220 |
+
new_dim_constraints = [BinConstraintD(old_dim, new_dim, op_precision) for
|
| 221 |
+
new_dim, old_dim in zip(new_dims, constraint.lhs.__args__)] + \
|
| 222 |
+
[BinConstraintT(constraint.rhs, TensorType(new_dims), op_eq)] + \
|
| 223 |
+
[BinConstraintD(1, new_dim, op_leq) for
|
| 224 |
+
new_dim in new_dims]
|
| 225 |
+
return Conj(new_dim_constraints), counter
|
| 226 |
+
|
| 227 |
+
# matching
|
| 228 |
+
elif constraint.op == op_matching:
|
| 229 |
+
assert isinstance(constraint.rhs, TensorType)
|
| 230 |
+
d1 = constraint.rhs.__args__[0]
|
| 231 |
+
d2 = constraint.rhs.__args__[1]
|
| 232 |
+
d3 = constraint.rhs.__args__[2]
|
| 233 |
+
d4 = constraint.rhs.__args__[3]
|
| 234 |
+
|
| 235 |
+
conj = [BinConstraintT(constraint.lhs, Dyn, op_eq),
|
| 236 |
+
BinConstraintD(d1, Dyn, op_eq),
|
| 237 |
+
BinConstraintD(d2, Dyn, op_eq),
|
| 238 |
+
BinConstraintD(d3, Dyn, op_eq),
|
| 239 |
+
BinConstraintD(d4, Dyn, op_eq)]
|
| 240 |
+
return Disj([Conj(conj),
|
| 241 |
+
BinConstraintT(constraint.lhs, TensorType([d1, d2, d3, d4]), op_eq)]), counter
|
| 242 |
+
|
| 243 |
+
elif constraint.op == op_consistency:
|
| 244 |
+
c_dyn = Disj([BinConstraintT(constraint.lhs, Dyn, op_eq), BinConstraintT(constraint.rhs, Dyn, op_eq)])
|
| 245 |
+
[c_tensor_1, c_tensor_2, c_tensor_3, c_tensor_4], counter = gen_consistency_constraints(constraint, counter)
|
| 246 |
+
|
| 247 |
+
return Disj([c_dyn, c_tensor_1, c_tensor_2, c_tensor_3, c_tensor_4]), counter
|
| 248 |
+
|
| 249 |
+
elif constraint.op == op_leq:
|
| 250 |
+
assert isinstance(constraint.rhs, int)
|
| 251 |
+
disj = [BinConstraintT(constraint.lhs, Dyn, op_eq)]
|
| 252 |
+
for i in range(1, constraint.rhs + 1):
|
| 253 |
+
dims = []
|
| 254 |
+
for j in range(1, i + 1):
|
| 255 |
+
dim_var, counter = gen_dvar(counter)
|
| 256 |
+
dims.append(dim_var)
|
| 257 |
+
disj.append(BinConstraintT(constraint.lhs, TensorType(dims), op_eq))
|
| 258 |
+
return Disj(disj), counter
|
| 259 |
+
else:
|
| 260 |
+
return constraint, counter
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
@register_transformation_rule(BinConstraintD)
|
| 264 |
+
def generate_binconstraint_d(constraint, counter):
|
| 265 |
+
"""
|
| 266 |
+
Transform binary constraints for dimensions
|
| 267 |
+
"""
|
| 268 |
+
if constraint.op == op_precision:
|
| 269 |
+
if isinstance(constraint.lhs, int):
|
| 270 |
+
return BinConstraintD(constraint.lhs, constraint.rhs, op_eq), counter
|
| 271 |
+
elif constraint.lhs == Dyn:
|
| 272 |
+
return T(), counter
|
| 273 |
+
|
| 274 |
+
elif constraint.op == op_consistency:
|
| 275 |
+
return Disj([BinConstraintD(constraint.lhs, constraint.rhs, op_eq),
|
| 276 |
+
BinConstraintD(constraint.rhs, Dyn, op_eq), BinConstraintD(constraint.lhs, Dyn, op_eq)]), counter
|
| 277 |
+
|
| 278 |
+
else:
|
| 279 |
+
return constraint, counter
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
@register_transformation_rule(Conj)
|
| 283 |
+
def generate_conj(constraint, counter):
|
| 284 |
+
"""
|
| 285 |
+
Transform conjunctions
|
| 286 |
+
"""
|
| 287 |
+
new = []
|
| 288 |
+
for c in constraint.conjucts:
|
| 289 |
+
new_c, counter = transform_constraint(c, counter)
|
| 290 |
+
new.append(new_c)
|
| 291 |
+
return Conj(new), counter
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
@register_transformation_rule(Disj)
|
| 295 |
+
def generate_disj(constraint, counter):
|
| 296 |
+
"""
|
| 297 |
+
Transform disjunctions
|
| 298 |
+
"""
|
| 299 |
+
new = []
|
| 300 |
+
for c in constraint.disjuncts:
|
| 301 |
+
new_c, counter = transform_constraint(c, counter)
|
| 302 |
+
new.append(new_c)
|
| 303 |
+
return Disj(new), counter
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
@register_transformation_rule(TGreatestUpperBound)
|
| 307 |
+
def generate_gub(constraint, counter):
|
| 308 |
+
"""
|
| 309 |
+
Transform greatest upper bound for tensors. Results in equality and Greatest Upper Bound
|
| 310 |
+
on dimensions
|
| 311 |
+
"""
|
| 312 |
+
c1 = Conj([Disj([BinConstraintT(constraint.rhs1, Dyn, op_eq),
|
| 313 |
+
BinConstraintT(constraint.rhs2, Dyn, op_eq)]), BinConstraintT(constraint.res, Dyn, op_eq)])
|
| 314 |
+
|
| 315 |
+
[c2, c3, c4, c5], counter = gen_greatest_upper_bound(constraint, counter)
|
| 316 |
+
|
| 317 |
+
return Disj([c1, c2, c3, c4, c5]), counter
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
@register_transformation_rule(DGreatestUpperBound)
|
| 321 |
+
def generate_d_gub(constraint, counter):
|
| 322 |
+
"""
|
| 323 |
+
Transform greatest upper bound for dimensions into equality constraints
|
| 324 |
+
"""
|
| 325 |
+
c1 = Conj([BinConstraintD(constraint.rhs1, Dyn, op_eq), BinConstraintD(constraint.res, constraint.rhs2, op_eq)])
|
| 326 |
+
c2 = Conj([BinConstraintD(constraint.rhs2, Dyn, op_eq), BinConstraintD(constraint.res, constraint.rhs1, op_eq)])
|
| 327 |
+
c3 = Conj([BinConstraintD(constraint.rhs2, constraint.rhs1, op_eq), BinConstraintD(constraint.res, constraint.rhs1, op_eq)])
|
| 328 |
+
return Disj([c1, c2, c3]), counter
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
@register_transformation_rule(CalcConv)
|
| 332 |
+
def generate_calc_conv(constraint, counter):
|
| 333 |
+
d, counter = gen_tensor_dims(4, counter)
|
| 334 |
+
conv_result = TensorType([d[0], d[1], d[2], d[3]])
|
| 335 |
+
|
| 336 |
+
# the convolution result is a tensor of size 4
|
| 337 |
+
c1 = BinConstraintT(constraint.conv_result, conv_result, op_eq)
|
| 338 |
+
|
| 339 |
+
# the second dimension of the output is equal to the output channels
|
| 340 |
+
c2 = Conj([BinConstraintD(d[1], constraint.c_out, op_eq), BinConstraintD(d[1], Dyn, op_neq)])
|
| 341 |
+
|
| 342 |
+
# the input corresponds to the output in the first dimension of the convolution
|
| 343 |
+
c3 = BinConstraintD(constraint.matching_constraint[0], d[0], op_eq)
|
| 344 |
+
|
| 345 |
+
c4, c5 = calc_last_two_dims(constraint, d)
|
| 346 |
+
|
| 347 |
+
leq_constraints = Conj([BinConstraintD(0, d[0], op_leq),
|
| 348 |
+
BinConstraintD(0, d[1], op_leq),
|
| 349 |
+
BinConstraintD(0, d[2], op_leq),
|
| 350 |
+
BinConstraintD(0, d[3], op_leq)])
|
| 351 |
+
|
| 352 |
+
return Conj([c1, c2, c3, c4, c5, leq_constraints]), counter
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
@register_transformation_rule(CalcMaxPool)
|
| 356 |
+
def generate_calc_maxpool(constraint, counter):
|
| 357 |
+
"""
|
| 358 |
+
Transform maxpool constraints
|
| 359 |
+
"""
|
| 360 |
+
d, counter = gen_tensor_dims(4, counter)
|
| 361 |
+
maxpool_result = TensorType([d[0], d[1], d[2], d[3]])
|
| 362 |
+
|
| 363 |
+
# the maxpool result is a tensor of size 4
|
| 364 |
+
c1 = BinConstraintT(constraint.maxpool_result, maxpool_result, op_eq)
|
| 365 |
+
|
| 366 |
+
# the input corresponds to the output in the first and second dimension of maxpool
|
| 367 |
+
c2 = BinConstraintD(constraint.matching_constraint[1], d[1], op_eq)
|
| 368 |
+
c3 = BinConstraintD(constraint.matching_constraint[0], d[0], op_eq)
|
| 369 |
+
c4, c5 = calc_last_two_dims(constraint, d)
|
| 370 |
+
|
| 371 |
+
leq_constraints = Conj([BinConstraintD(0, d[0], op_leq),
|
| 372 |
+
BinConstraintD(0, d[1], op_leq),
|
| 373 |
+
BinConstraintD(0, d[2], op_leq),
|
| 374 |
+
BinConstraintD(0, d[3], op_leq)])
|
| 375 |
+
|
| 376 |
+
return Conj([c1, c2, c3, c4, c5, leq_constraints]), counter
|
| 377 |
+
|
| 378 |
+
|
| 379 |
+
@register_transformation_rule(CalcProduct)
|
| 380 |
+
def generate_calc_product(constraint, counter):
|
| 381 |
+
"""
|
| 382 |
+
Transform flatten constraints
|
| 383 |
+
"""
|
| 384 |
+
start = constraint.start
|
| 385 |
+
end = constraint.end
|
| 386 |
+
dims = constraint.dims_to_flatten
|
| 387 |
+
flattened = constraint.flattened
|
| 388 |
+
n = len(constraint.dims_to_flatten)
|
| 389 |
+
|
| 390 |
+
# this will be evaluated right here
|
| 391 |
+
boundary_check = (0 <= start and start < end and end <= n)
|
| 392 |
+
|
| 393 |
+
c_boundary = T() if boundary_check else F()
|
| 394 |
+
|
| 395 |
+
lhs = dims[0:start]
|
| 396 |
+
rhs = dims[end:]
|
| 397 |
+
mid = dims[start:end]
|
| 398 |
+
|
| 399 |
+
all_possibilities = generate_all_int_dyn_dim_possibilities(mid)
|
| 400 |
+
|
| 401 |
+
all_constraints = []
|
| 402 |
+
|
| 403 |
+
for p in all_possibilities:
|
| 404 |
+
p = list(p)
|
| 405 |
+
# this tells us there is a dynamic variable
|
| 406 |
+
contains_dyn = not(all(constraint.op == op_neq for constraint in p))
|
| 407 |
+
if contains_dyn:
|
| 408 |
+
mid_var = [Dyn]
|
| 409 |
+
total_constraints = lhs + mid_var + rhs
|
| 410 |
+
if len(total_constraints) > 4:
|
| 411 |
+
all_constraints.append(F())
|
| 412 |
+
else:
|
| 413 |
+
all_constraints.append(Conj([BinConstraintT(flattened, TensorType(lhs + mid_var + rhs), op_eq)] + p))
|
| 414 |
+
else:
|
| 415 |
+
new_var, counter = gen_dvar(counter)
|
| 416 |
+
mid_eq_prod = Conj([BinConstraintD(new_var, Prod(mid), op_eq), BinConstraintD(new_var, Dyn, op_neq)])
|
| 417 |
+
mid_var = [new_var]
|
| 418 |
+
total_constraints = lhs + mid_var + rhs
|
| 419 |
+
if len(total_constraints) > 4:
|
| 420 |
+
all_constraints.append(F())
|
| 421 |
+
else:
|
| 422 |
+
all_constraints.append(Conj([BinConstraintT(flattened, TensorType(lhs + mid_var + rhs), op_eq), mid_eq_prod] + p))
|
| 423 |
+
|
| 424 |
+
return Conj([Disj(all_constraints), c_boundary]), counter
|
| 425 |
+
|
| 426 |
+
|
| 427 |
+
@register_transformation_rule(CanReshape)
|
| 428 |
+
def generate_reshape(constraint, counter):
|
| 429 |
+
"""
|
| 430 |
+
Transform reshape constraints
|
| 431 |
+
"""
|
| 432 |
+
d, counter = gen_tensor_dims(4, counter)
|
| 433 |
+
|
| 434 |
+
d1 = d[0]
|
| 435 |
+
d2 = d[1]
|
| 436 |
+
d3 = d[2]
|
| 437 |
+
d4 = d[3]
|
| 438 |
+
|
| 439 |
+
target = constraint.target.__args__
|
| 440 |
+
|
| 441 |
+
is_fully_static = all(d != Dyn for d in target)
|
| 442 |
+
|
| 443 |
+
# dynamic tensor
|
| 444 |
+
c1_dyn = BinConstraintT(constraint.src, Dyn, op_eq)
|
| 445 |
+
c2_tensor1 = BinConstraintT(constraint.src, TensorType([d1]), op_eq)
|
| 446 |
+
c2_tensor2 = BinConstraintT(constraint.src, TensorType([d1, d2]), op_eq)
|
| 447 |
+
c2_tensor3 = BinConstraintT(constraint.src, TensorType([d1, d2, d3]), op_eq)
|
| 448 |
+
c2_tensor4 = BinConstraintT(constraint.src, TensorType([d1, d2, d3, d4]), op_eq)
|
| 449 |
+
|
| 450 |
+
d1_eq_dyn = BinConstraintD(d1, Dyn, op_eq)
|
| 451 |
+
d1_neq_dyn = BinConstraintD(d1, Dyn, op_neq)
|
| 452 |
+
|
| 453 |
+
d2_eq_dyn = BinConstraintD(d2, Dyn, op_eq)
|
| 454 |
+
d2_neq_dyn = BinConstraintD(d2, Dyn, op_neq)
|
| 455 |
+
|
| 456 |
+
d3_eq_dyn = BinConstraintD(d3, Dyn, op_eq)
|
| 457 |
+
d3_neq_dyn = BinConstraintD(d3, Dyn, op_neq)
|
| 458 |
+
|
| 459 |
+
d4_eq_dyn = BinConstraintD(d3, Dyn, op_eq)
|
| 460 |
+
d4_neq_dyn = BinConstraintD(d3, Dyn, op_neq)
|
| 461 |
+
|
| 462 |
+
nat_d1 = BinConstraintD(0, d1, op_leq)
|
| 463 |
+
nat_d2 = BinConstraintD(0, d2, op_leq)
|
| 464 |
+
nat_d3 = BinConstraintD(0, d3, op_leq)
|
| 465 |
+
nat_d4 = BinConstraintD(0, d4, op_leq)
|
| 466 |
+
|
| 467 |
+
if is_fully_static:
|
| 468 |
+
# size 1 tensor
|
| 469 |
+
c3_tensor1 = Disj([d1_eq_dyn,
|
| 470 |
+
(Conj([d1_neq_dyn,
|
| 471 |
+
BinConstraintD(d1, Prod(target), op_eq)]))])
|
| 472 |
+
all_tensor_1 = Conj([c2_tensor1, c3_tensor1])
|
| 473 |
+
|
| 474 |
+
# size 2 tensor
|
| 475 |
+
all_tensor_2 = Conj([c2_tensor2, gen_all_reshape_possibilities([d1, d2], target)])
|
| 476 |
+
|
| 477 |
+
# size 3 tensor
|
| 478 |
+
all_tensor_3 = Conj([c2_tensor3, gen_all_reshape_possibilities([d1, d2, d3], target)])
|
| 479 |
+
|
| 480 |
+
# size 4 tensor
|
| 481 |
+
all_tensor_4 = Conj([c2_tensor4, gen_all_reshape_possibilities([d1, d2, d3, d4], target)])
|
| 482 |
+
|
| 483 |
+
return Conj([Disj([c1_dyn, all_tensor_1, all_tensor_2, all_tensor_3, all_tensor_4]),
|
| 484 |
+
nat_d1, nat_d2, nat_d3, nat_d4]), counter
|
| 485 |
+
|
| 486 |
+
# then there must be exactly one occurrence of dyn
|
| 487 |
+
else:
|
| 488 |
+
new_target = []
|
| 489 |
+
|
| 490 |
+
for n in target:
|
| 491 |
+
if n != Dyn:
|
| 492 |
+
new_target.append(n)
|
| 493 |
+
|
| 494 |
+
# tensor 1
|
| 495 |
+
c3_tensor1 = Disj([d1_eq_dyn,
|
| 496 |
+
(Conj([d1_neq_dyn,
|
| 497 |
+
is_dim_div_by_target(new_target, d1)]))])
|
| 498 |
+
all_tensor_1 = Conj([c2_tensor1, c3_tensor1])
|
| 499 |
+
|
| 500 |
+
# tensor 2
|
| 501 |
+
c21 = Disj([d1_eq_dyn, d2_eq_dyn])
|
| 502 |
+
c22 = Conj([d1_neq_dyn, d2_neq_dyn, is_dim_div_by_target(new_target, Prod([d1, d2]))])
|
| 503 |
+
all_tensor_2 = Conj([c2_tensor2, Disj([c21, c22])])
|
| 504 |
+
|
| 505 |
+
# tensor 3
|
| 506 |
+
c31 = Disj([d1_eq_dyn, d2_eq_dyn, d3_eq_dyn])
|
| 507 |
+
c32 = Conj([d1_neq_dyn, d2_neq_dyn, d3_neq_dyn, is_dim_div_by_target(new_target, Prod([d1, d2, d3]))])
|
| 508 |
+
all_tensor_3 = Conj([c2_tensor3, Disj([c31, c32])])
|
| 509 |
+
|
| 510 |
+
# tensor 4
|
| 511 |
+
c41 = Disj([d1_eq_dyn, d2_eq_dyn, d3_eq_dyn, d4_eq_dyn])
|
| 512 |
+
c42 = Conj([d1_neq_dyn, d2_neq_dyn, d3_neq_dyn, d4_neq_dyn, is_dim_div_by_target(new_target, Prod([d1, d2, d3, d4]))])
|
| 513 |
+
all_tensor_4 = Conj([c2_tensor4, Disj([c41, c42])])
|
| 514 |
+
|
| 515 |
+
return Conj([Disj([c1_dyn, all_tensor_1, all_tensor_2, all_tensor_3, all_tensor_4]),
|
| 516 |
+
nat_d1, nat_d2, nat_d3, nat_d4]), counter
|
| 517 |
+
|
| 518 |
+
|
| 519 |
+
@register_transformation_rule(ApplyBroadcasting)
|
| 520 |
+
def generate_broadcasting(constraint, counter):
|
| 521 |
+
"""
|
| 522 |
+
Transform broadcasting constraints
|
| 523 |
+
"""
|
| 524 |
+
e11, e12 = constraint.res1, constraint.res2
|
| 525 |
+
e1, e2 = constraint.input1, constraint.input2
|
| 526 |
+
|
| 527 |
+
e1_dyn = BinConstraintT(e1, Dyn, op_eq)
|
| 528 |
+
e2_dyn = BinConstraintT(e2, Dyn, op_eq)
|
| 529 |
+
|
| 530 |
+
# Introduce dimensions
|
| 531 |
+
e1_equal_e11 = BinConstraintT(e1, e11, op_eq)
|
| 532 |
+
e2_equal_e12 = BinConstraintT(e2, e12, op_eq)
|
| 533 |
+
|
| 534 |
+
# dyn possibility
|
| 535 |
+
e1_dyn_constraint = Conj([e1_dyn, e1_equal_e11, e2_equal_e12])
|
| 536 |
+
e2_dyn_constraint = Conj([e2_dyn, e1_equal_e11, e2_equal_e12])
|
| 537 |
+
|
| 538 |
+
# tensor possibility
|
| 539 |
+
# generate dimensions to create tensors of size 1
|
| 540 |
+
final_tensor_1_constraint, _, _, nat_dims_1, counter = \
|
| 541 |
+
gen_broadcasting_constraints(e1, e2, e11, e12, 1, counter)
|
| 542 |
+
|
| 543 |
+
# generate dimensions to create tensors of size 2
|
| 544 |
+
final_tensor_2_constraint_no_padding, final_tensor_2_constraint_padding_arg1, \
|
| 545 |
+
final_tensor_2_constraint_padding_arg2, nat_dims_2, counter = \
|
| 546 |
+
gen_broadcasting_constraints(e1, e2, e11, e12, 2, counter)
|
| 547 |
+
|
| 548 |
+
# generate dimensions to create tensors of size 3
|
| 549 |
+
final_tensor_3_constraint_no_padding, final_tensor_3_constraint_padding_arg1, \
|
| 550 |
+
final_tensor_3_constraint_padding_arg2, nat_dims_3, counter = \
|
| 551 |
+
gen_broadcasting_constraints(e1, e2, e11, e12, 3, counter)
|
| 552 |
+
|
| 553 |
+
# generate dimensions to create tensors of size 4
|
| 554 |
+
final_tensor_4_constraint_no_padding, final_tensor_4_constraint_padding_arg1, \
|
| 555 |
+
final_tensor_4_constraint_padding_arg2, nat_dims_4, counter = \
|
| 556 |
+
gen_broadcasting_constraints(e1, e2, e11, e12, 4, counter)
|
| 557 |
+
|
| 558 |
+
final_result = Disj([
|
| 559 |
+
e1_dyn_constraint,
|
| 560 |
+
e2_dyn_constraint,
|
| 561 |
+
final_tensor_1_constraint,
|
| 562 |
+
final_tensor_2_constraint_no_padding,
|
| 563 |
+
final_tensor_2_constraint_padding_arg1,
|
| 564 |
+
final_tensor_2_constraint_padding_arg2,
|
| 565 |
+
final_tensor_3_constraint_no_padding,
|
| 566 |
+
final_tensor_3_constraint_padding_arg1,
|
| 567 |
+
final_tensor_3_constraint_padding_arg2,
|
| 568 |
+
final_tensor_4_constraint_no_padding,
|
| 569 |
+
final_tensor_4_constraint_padding_arg1,
|
| 570 |
+
final_tensor_4_constraint_padding_arg2
|
| 571 |
+
])
|
| 572 |
+
|
| 573 |
+
return Conj([final_result, *nat_dims_1, *nat_dims_2, *nat_dims_3, *nat_dims_4]), counter
|
| 574 |
+
|
| 575 |
+
|
| 576 |
+
def transform_constraint(constraint: Constraint, counter: int):
|
| 577 |
+
"""
|
| 578 |
+
Transforms a constraint into a simpler constraint.
|
| 579 |
+
Ex: precision and consistency are transformed to equality
|
| 580 |
+
Args:
|
| 581 |
+
constraint: constraint to be transformed
|
| 582 |
+
counter: for variable tracking
|
| 583 |
+
|
| 584 |
+
Returns: Constraint
|
| 585 |
+
|
| 586 |
+
"""
|
| 587 |
+
if type(constraint) in _TRANSFORMATION_RULES:
|
| 588 |
+
return _TRANSFORMATION_RULES[type(constraint)](constraint, counter)
|
| 589 |
+
|
| 590 |
+
else:
|
| 591 |
+
return constraint, counter
|
| 592 |
+
|
| 593 |
+
|
| 594 |
+
|
| 595 |
+
|
| 596 |
+
def calc_last_two_dims(constraint, d: List[DVar]):
|
| 597 |
+
"""
|
| 598 |
+
Generates constraints for the last two dimensions of a convolution or a maxpool output
|
| 599 |
+
Args:
|
| 600 |
+
constraint: CalcConv or CalcMaxPool
|
| 601 |
+
d: The list of output dimensions
|
| 602 |
+
|
| 603 |
+
Returns: Constraints for calculating the last two dimensions of the output
|
| 604 |
+
|
| 605 |
+
"""
|
| 606 |
+
|
| 607 |
+
assert isinstance(constraint, (CalcConv, CalcMaxPool))
|
| 608 |
+
|
| 609 |
+
b3 = constraint.matching_constraint[2]
|
| 610 |
+
b4 = constraint.matching_constraint[3]
|
| 611 |
+
|
| 612 |
+
b3_dyn = Conj([BinConstraintD(d[2], Dyn, op_eq), BinConstraintD(b3, Dyn, op_eq)])
|
| 613 |
+
b4_dyn = Conj([BinConstraintD(d[3], Dyn, op_eq), BinConstraintD(b4, Dyn, op_eq)])
|
| 614 |
+
|
| 615 |
+
d3_not_dyn = Conj([BinConstraintD(d[2], Dyn, op_neq), BinConstraintD(b3, Dyn, op_neq)])
|
| 616 |
+
d4_not_dyn = Conj([BinConstraintD(d[3], Dyn, op_neq), BinConstraintD(b4, Dyn, op_neq)])
|
| 617 |
+
|
| 618 |
+
# transform parameters into tuples incase they are not already
|
| 619 |
+
padding = (constraint.padding, constraint.padding) \
|
| 620 |
+
if isinstance(constraint.padding, int) else constraint.padding
|
| 621 |
+
kernel = (constraint.kernel, constraint.kernel) \
|
| 622 |
+
if isinstance(constraint.kernel, int) else constraint.kernel
|
| 623 |
+
stride = (constraint.stride, constraint.stride) \
|
| 624 |
+
if isinstance(constraint.stride, int) else constraint.stride
|
| 625 |
+
dilation = (constraint.dilation, constraint.dilation) \
|
| 626 |
+
if isinstance(constraint.dilation, int) else constraint.dilation
|
| 627 |
+
|
| 628 |
+
f1 = BinConstraintD(b3, BinConstraintD(2, padding[0], op_mul), op_add)
|
| 629 |
+
f2 = BinConstraintD(dilation[0], BinConstraintD(kernel[0], 1, op_sub), op_mul)
|
| 630 |
+
f3 = BinConstraintD(BinConstraintD(BinConstraintD(f1, f2, op_sub), 1, op_sub), stride[0], op_div)
|
| 631 |
+
f4 = BinConstraintD(f3, 1, op_add)
|
| 632 |
+
|
| 633 |
+
c4 = Disj([b3_dyn, Conj([d3_not_dyn, BinConstraintD(d[2], f4, op_eq)])])
|
| 634 |
+
|
| 635 |
+
f11 = BinConstraintD(b4, BinConstraintD(2, padding[1], op_mul), op_add)
|
| 636 |
+
f22 = BinConstraintD(dilation[1], BinConstraintD(kernel[1], 1, op_sub), op_mul)
|
| 637 |
+
f33 = BinConstraintD(BinConstraintD(BinConstraintD(f11, f22, op_sub), 1, op_sub), stride[1], op_div)
|
| 638 |
+
f44 = BinConstraintD(f33, 1, op_add)
|
| 639 |
+
|
| 640 |
+
c5 = Disj([b4_dyn, Conj([d4_not_dyn, BinConstraintD(d[3], f44, op_eq)])])
|
| 641 |
+
|
| 642 |
+
return c4, c5
|
| 643 |
+
|
| 644 |
+
|
| 645 |
+
def generate_all_int_dyn_dim_possibilities(my_list: List[DVar]):
|
| 646 |
+
"""
|
| 647 |
+
Generate all possibilities of being equal or not equal to dyn for my_list
|
| 648 |
+
Args:
|
| 649 |
+
my_list: List of tensor dimensions
|
| 650 |
+
|
| 651 |
+
Returns: A list of a list of constraints. Each list of constraints corresponds to
|
| 652 |
+
one possibility about the values of the dimension variables
|
| 653 |
+
"""
|
| 654 |
+
# generate all possibilities of being equal or not equal to dyn for my_list
|
| 655 |
+
eq_possibilities = [BinConstraintD(my_list[i], Dyn, op_eq) for i in range(len(my_list))]
|
| 656 |
+
neq_possibilities = [BinConstraintD(my_list[i], Dyn, op_neq) for i in range(len(my_list))]
|
| 657 |
+
d_possibilities = []
|
| 658 |
+
|
| 659 |
+
for i in zip(eq_possibilities, neq_possibilities):
|
| 660 |
+
d_possibilities.append(list(i))
|
| 661 |
+
all_possibilities = list(itertools.product(*d_possibilities))
|
| 662 |
+
return all_possibilities
|
| 663 |
+
|
| 664 |
+
|
| 665 |
+
def is_target_div_by_dim(target: List[int], dim: List[DVar]):
|
| 666 |
+
"""
|
| 667 |
+
Generate constraints to check if the target dimensions are divisible by the input dimensions
|
| 668 |
+
Args:
|
| 669 |
+
target: Target dimensions
|
| 670 |
+
dim: Input dimensions
|
| 671 |
+
|
| 672 |
+
Returns: Constraints to check divisibility
|
| 673 |
+
|
| 674 |
+
"""
|
| 675 |
+
return BinConstraintD(BinConstraintD(Prod(target), dim, op_mod), 0, op_eq)
|
| 676 |
+
|
| 677 |
+
|
| 678 |
+
def is_dim_div_by_target(target: List[int], dim: List[DVar]):
|
| 679 |
+
"""
|
| 680 |
+
Generate constraints to check if the input dimensions is divisible by the target dimensions
|
| 681 |
+
Args:
|
| 682 |
+
target: Target dimensions
|
| 683 |
+
dim: Input dimensions
|
| 684 |
+
|
| 685 |
+
Returns: Constraints to check divisibility
|
| 686 |
+
|
| 687 |
+
"""
|
| 688 |
+
return BinConstraintD(BinConstraintD(dim, Prod(target), op_mod), 0, op_eq)
|
| 689 |
+
|
| 690 |
+
|
| 691 |
+
def gen_all_reshape_possibilities(list_of_dims, target):
|
| 692 |
+
"""
|
| 693 |
+
Consider all possibilities what the input dimensions could be (number or dynamic)
|
| 694 |
+
Then generate the appropriate constraints using multiplication or mod depending on the possibility
|
| 695 |
+
The possibilities we consider here are the cross product of being equal to dyn or not equal to dyn
|
| 696 |
+
for the input. Target is fixed because at most one dimension could be dyn.
|
| 697 |
+
We have different cases for this.
|
| 698 |
+
|
| 699 |
+
Args:
|
| 700 |
+
list_of_dims: The input list of dimensions
|
| 701 |
+
target: The tensor we want to reshape to
|
| 702 |
+
|
| 703 |
+
Returns: A disjunction of transformed reshape constraints
|
| 704 |
+
|
| 705 |
+
"""
|
| 706 |
+
all_possibilities = generate_all_int_dyn_dim_possibilities(list_of_dims)
|
| 707 |
+
|
| 708 |
+
all_constraints = []
|
| 709 |
+
|
| 710 |
+
for p in all_possibilities:
|
| 711 |
+
to_multiply = []
|
| 712 |
+
|
| 713 |
+
p = list(p)
|
| 714 |
+
|
| 715 |
+
for constraint in p:
|
| 716 |
+
assert isinstance(constraint, BinConstraintD)
|
| 717 |
+
if constraint.op == op_neq:
|
| 718 |
+
to_multiply.append(constraint.lhs)
|
| 719 |
+
|
| 720 |
+
if not to_multiply:
|
| 721 |
+
all_constraints.append(Conj(p))
|
| 722 |
+
|
| 723 |
+
elif len(to_multiply) < len(list_of_dims):
|
| 724 |
+
all_constraints.append(Conj(p + [is_target_div_by_dim(target, Prod(to_multiply))]))
|
| 725 |
+
else:
|
| 726 |
+
all_constraints.append(Conj(p + [BinConstraintD(Prod(list_of_dims),
|
| 727 |
+
Prod(target), op_eq)]))
|
| 728 |
+
|
| 729 |
+
return Disj(all_constraints)
|
| 730 |
+
|
| 731 |
+
|
| 732 |
+
def broadcast_dim(tensor_input1, tensor_input2, res1, res2, index, padding=False):
|
| 733 |
+
"""
|
| 734 |
+
Apply broadcasting to the 'index' dimension of tensor_input1.
|
| 735 |
+
Args:
|
| 736 |
+
tensor_input1: should represent [d1, ..., d_index, ...] where d_index = 1
|
| 737 |
+
tensor_input2: represents the second input
|
| 738 |
+
res1: broadcasted result 1
|
| 739 |
+
res2: broadcasted result 2
|
| 740 |
+
index: the index to broadcast
|
| 741 |
+
padding: If padding was used, then tensor_input1[index] does not exist
|
| 742 |
+
|
| 743 |
+
Returns:
|
| 744 |
+
|
| 745 |
+
"""
|
| 746 |
+
if tensor_input1[index] is None:
|
| 747 |
+
assert padding
|
| 748 |
+
|
| 749 |
+
|
| 750 |
+
if not padding:
|
| 751 |
+
# then the inputs are the same length so they all have dimensions at "index"
|
| 752 |
+
return Conj([BinConstraintD(tensor_input1[index], 1, op_eq),
|
| 753 |
+
BinConstraintD(res1[index], res2[index], op_eq),
|
| 754 |
+
BinConstraintD(res2[index], tensor_input2[index], op_eq)])
|
| 755 |
+
|
| 756 |
+
else:
|
| 757 |
+
# we don't set the input dimension to 1, since it doesn't exist.
|
| 758 |
+
return Conj([BinConstraintD(res1[index], res2[index], op_eq),
|
| 759 |
+
BinConstraintD(res2[index], tensor_input2[index], op_eq)])
|
| 760 |
+
|
| 761 |
+
|
| 762 |
+
def apply_padding(e1_var: TVar,
|
| 763 |
+
e11: BinConstraintT,
|
| 764 |
+
e2: BinConstraintT,
|
| 765 |
+
e12: BinConstraintT,
|
| 766 |
+
d2: List[DVar],
|
| 767 |
+
d11: List[DVar],
|
| 768 |
+
d12: List[DVar],
|
| 769 |
+
counter: int):
|
| 770 |
+
"""
|
| 771 |
+
We are considering the possibility where one input has less dimensions than
|
| 772 |
+
another input, so we apply padding to the broadcasted results
|
| 773 |
+
|
| 774 |
+
Args:
|
| 775 |
+
e1_var: Variable representing the first input where padding will be
|
| 776 |
+
e11: constraint of the form e11 = Tensortype[d1, ..., dn]
|
| 777 |
+
e2: constraint of the form e2 = Tensortype[d1, ..., dn]
|
| 778 |
+
e12: constraint of the form e11 = Tensortype[d1, ..., dn]
|
| 779 |
+
d2: Tensor variables for the second input
|
| 780 |
+
d11: Tensor variables for the broadcasted first input
|
| 781 |
+
d12: Tensor variables for the broadcasted second input
|
| 782 |
+
counter: variable tracking
|
| 783 |
+
|
| 784 |
+
Returns: A new constraint whose goal is to apply padding to the broadcasted result
|
| 785 |
+
|
| 786 |
+
"""
|
| 787 |
+
|
| 788 |
+
res = []
|
| 789 |
+
|
| 790 |
+
# pad the shorter input with None so we can pass it to the broadcasting helper function
|
| 791 |
+
for i in range(1, len(d2)):
|
| 792 |
+
|
| 793 |
+
d1, counter = gen_tensor_dims(i, counter)
|
| 794 |
+
|
| 795 |
+
nat_constraints = gen_nat_constraints(d1 + d2 + d11 + d12)
|
| 796 |
+
|
| 797 |
+
e1 = BinConstraintT(e1_var, TensorType(d1), op_eq)
|
| 798 |
+
|
| 799 |
+
simulate_padding = [None] * (len(d2) - i)
|
| 800 |
+
|
| 801 |
+
assert len(simulate_padding + d1) == len(d2)
|
| 802 |
+
|
| 803 |
+
broadcast_padding = []
|
| 804 |
+
|
| 805 |
+
# for every padding size, we also consider broadcasting
|
| 806 |
+
for j in range(len(d2) - i):
|
| 807 |
+
broadcast_padding.append(broadcast_dim(simulate_padding, d2, d11, d12, j, True))
|
| 808 |
+
|
| 809 |
+
# we consider the possibilities for broadcasting for every dimension. Since we already
|
| 810 |
+
# padded d1, we do not consider it while broadcasting
|
| 811 |
+
all_broadcasting_possibilities = generate_all_broadcasting_possibilities_no_padding(d1,
|
| 812 |
+
d2[(len(d2) - i):],
|
| 813 |
+
d11[(len(d2) - i):],
|
| 814 |
+
d12[(len(d2) - i):])
|
| 815 |
+
# combine all constraints into a conjunction
|
| 816 |
+
c = Conj([e1, e11, e2, e12,
|
| 817 |
+
*broadcast_padding,
|
| 818 |
+
all_broadcasting_possibilities,
|
| 819 |
+
*nat_constraints
|
| 820 |
+
])
|
| 821 |
+
res.append(c)
|
| 822 |
+
|
| 823 |
+
return Disj(res), counter
|
| 824 |
+
|
| 825 |
+
|
| 826 |
+
def no_broadcast_dim_with_index(d1: List[DVar],
|
| 827 |
+
d2: List[DVar],
|
| 828 |
+
d3: List[DVar],
|
| 829 |
+
d4: List[DVar],
|
| 830 |
+
i: int):
|
| 831 |
+
"""
|
| 832 |
+
Args:
|
| 833 |
+
d1: input 1
|
| 834 |
+
d2: input 2
|
| 835 |
+
d3: simulated broadcasting for input 1
|
| 836 |
+
d4: simulated broadcasting for input 2
|
| 837 |
+
i: the rank of the resulting tensor addition
|
| 838 |
+
|
| 839 |
+
Returns: Constraints for when no broadcasting occurs
|
| 840 |
+
"""
|
| 841 |
+
return Conj([
|
| 842 |
+
Disj([
|
| 843 |
+
Conj([BinConstraintD(d1[i], 1, op_eq),
|
| 844 |
+
BinConstraintD(d2[i], 1, op_eq)]),
|
| 845 |
+
|
| 846 |
+
Conj([BinConstraintD(d1[i], 1, op_neq),
|
| 847 |
+
BinConstraintD(d2[i], 1, op_neq)])]),
|
| 848 |
+
|
| 849 |
+
BinConstraintD(d1[i], d3[i], op_eq),
|
| 850 |
+
BinConstraintD(d2[i], d4[i], op_eq)])
|
| 851 |
+
|
| 852 |
+
|
| 853 |
+
|
| 854 |
+
def gen_lists_of_dims(num_tensors: int, dim_size: int, counter: int):
|
| 855 |
+
"""
|
| 856 |
+
Generate lists of DVar to represent tensor dimensions
|
| 857 |
+
Args:
|
| 858 |
+
num_tensors: the required number of tensors
|
| 859 |
+
dim_size: the number of dimensions for each tensor
|
| 860 |
+
counter: variable tracking
|
| 861 |
+
|
| 862 |
+
Returns: A list of a list of tensor dimensions
|
| 863 |
+
|
| 864 |
+
"""
|
| 865 |
+
res = []
|
| 866 |
+
|
| 867 |
+
for _ in range(num_tensors):
|
| 868 |
+
dims, counter = gen_tensor_dims(dim_size, counter)
|
| 869 |
+
res.append(dims)
|
| 870 |
+
|
| 871 |
+
return res, counter
|
| 872 |
+
|
| 873 |
+
|
| 874 |
+
def create_equality_constraints_for_broadcasting(e1: TVar,
|
| 875 |
+
e2: TVar,
|
| 876 |
+
e11: TVar,
|
| 877 |
+
e12: TVar,
|
| 878 |
+
d1: List[DVar],
|
| 879 |
+
d2: List[DVar],
|
| 880 |
+
d11: List[DVar],
|
| 881 |
+
d12: List[DVar]):
|
| 882 |
+
"""
|
| 883 |
+
Create equality constraints for when no broadcasting occurs
|
| 884 |
+
Args:
|
| 885 |
+
e1: Input 1
|
| 886 |
+
e2: Input 2
|
| 887 |
+
e11: Broadcasted input 1
|
| 888 |
+
e12: Broadcasted input 2
|
| 889 |
+
d1: Variables that store dimensions for e1
|
| 890 |
+
d2: Variables that store dimensions for e2
|
| 891 |
+
d11: Variables that store dimensions for e11
|
| 892 |
+
d12: Variables that store dimensions for e22
|
| 893 |
+
|
| 894 |
+
Returns: Four equality constraints
|
| 895 |
+
|
| 896 |
+
"""
|
| 897 |
+
|
| 898 |
+
e1_tensor = BinConstraintT(e1, TensorType(d1), op_eq)
|
| 899 |
+
e11_tensor = BinConstraintT(e11, TensorType(d11), op_eq)
|
| 900 |
+
e2_tensor = BinConstraintT(e2, TensorType(d2), op_eq)
|
| 901 |
+
e12_tensor = BinConstraintT(e12, TensorType(d12), op_eq)
|
| 902 |
+
return [e1_tensor, e11_tensor, e2_tensor, e12_tensor]
|
| 903 |
+
|
| 904 |
+
|
| 905 |
+
def gen_consistency_constraints(constraint: Constraint, counter: int):
|
| 906 |
+
"""
|
| 907 |
+
Args:
|
| 908 |
+
constraint: Consistency constraint on tensors
|
| 909 |
+
counter: for variable tracking
|
| 910 |
+
|
| 911 |
+
Returns: Equality and consistency constraints on dimensions
|
| 912 |
+
|
| 913 |
+
"""
|
| 914 |
+
|
| 915 |
+
all_constraints = []
|
| 916 |
+
|
| 917 |
+
for i in range(1, MAX_TENSOR_RANK + 1):
|
| 918 |
+
new_dims_rhs_1, counter = gen_tensor_dims(i, counter)
|
| 919 |
+
new_dims_rhs_2, counter = gen_tensor_dims(i, counter)
|
| 920 |
+
|
| 921 |
+
nat_constraints = gen_nat_constraints(new_dims_rhs_1 + new_dims_rhs_2)
|
| 922 |
+
|
| 923 |
+
c_tensor_i = Conj([BinConstraintT(constraint.lhs, TensorType(new_dims_rhs_1), op_eq),
|
| 924 |
+
BinConstraintT(constraint.rhs, TensorType(new_dims_rhs_2), op_eq)] +
|
| 925 |
+
[BinConstraintD(d1, d2, op_consistency) for
|
| 926 |
+
d1, d2 in zip(new_dims_rhs_1, new_dims_rhs_2)] + nat_constraints)
|
| 927 |
+
|
| 928 |
+
all_constraints.append(c_tensor_i)
|
| 929 |
+
|
| 930 |
+
return all_constraints, counter
|
| 931 |
+
|
| 932 |
+
|
| 933 |
+
def gen_greatest_upper_bound(constraint: TGreatestUpperBound, counter: int):
|
| 934 |
+
"""
|
| 935 |
+
Args:
|
| 936 |
+
constraint: Greatest upper bound on tensors
|
| 937 |
+
counter: variable tracking
|
| 938 |
+
|
| 939 |
+
Returns: A set of equality constraints and DGreatestUpperBound constraints
|
| 940 |
+
|
| 941 |
+
"""
|
| 942 |
+
|
| 943 |
+
all_constraints = []
|
| 944 |
+
|
| 945 |
+
for i in range(1, MAX_TENSOR_RANK + 1):
|
| 946 |
+
c = []
|
| 947 |
+
dims1, counter = gen_tensor_dims(i, counter)
|
| 948 |
+
c1tensor = TensorType(dims1)
|
| 949 |
+
|
| 950 |
+
dims2, counter = gen_tensor_dims(i, counter)
|
| 951 |
+
c2tensor = TensorType(dims2)
|
| 952 |
+
|
| 953 |
+
dims3, counter = gen_tensor_dims(i, counter)
|
| 954 |
+
c3tensor = TensorType(dims3)
|
| 955 |
+
|
| 956 |
+
c += [BinConstraintT(constraint.rhs1, c1tensor, op_eq),
|
| 957 |
+
BinConstraintT(constraint.rhs2, c2tensor, op_eq),
|
| 958 |
+
BinConstraintT(constraint.res, c3tensor, op_eq)] + \
|
| 959 |
+
gen_nat_constraints(dims1 + dims2 + dims3)
|
| 960 |
+
|
| 961 |
+
assert len(c3tensor.__args__) == len(c1tensor.__args__) == len(c2tensor.__args__)
|
| 962 |
+
for i in range(len(c3tensor.__args__)):
|
| 963 |
+
c.append(DGreatestUpperBound(c3tensor.__args__[i],
|
| 964 |
+
c1tensor.__args__[i],
|
| 965 |
+
c2tensor.__args__[i]))
|
| 966 |
+
|
| 967 |
+
all_constraints.append(Conj(c))
|
| 968 |
+
return all_constraints, counter
|
| 969 |
+
|
| 970 |
+
|
| 971 |
+
def generate_all_broadcasting_possibilities_no_padding(d1: List[DVar], d2: List[DVar], d11: List[DVar], d12: List[DVar]):
|
| 972 |
+
"""
|
| 973 |
+
Generate broadcasting constraints assuming no padding. Broadcasting can happen at any dimension.
|
| 974 |
+
We look at all combinations for all dimensions in d1 and d2
|
| 975 |
+
Args:
|
| 976 |
+
d1: input1 dimensions
|
| 977 |
+
d2: input2 dimensions
|
| 978 |
+
d11: broadcasted input1 dimensions
|
| 979 |
+
d12: broadcasted input2 dimensions
|
| 980 |
+
|
| 981 |
+
Returns: broadcasting constraints relating the input dimensions to the broadcasted dimensions
|
| 982 |
+
|
| 983 |
+
"""
|
| 984 |
+
|
| 985 |
+
size = len(d1)
|
| 986 |
+
|
| 987 |
+
res2 = []
|
| 988 |
+
|
| 989 |
+
for i in range(size):
|
| 990 |
+
t1 = broadcast_dim(d1, d2, d11, d12, i)
|
| 991 |
+
t2 = broadcast_dim(d2, d1, d12, d11, i)
|
| 992 |
+
t3 = no_broadcast_dim_with_index(d1, d2, d11, d12, i)
|
| 993 |
+
|
| 994 |
+
res2.append(Disj([t1, t2, t3]))
|
| 995 |
+
|
| 996 |
+
return Conj(res2)
|
| 997 |
+
|
| 998 |
+
|
| 999 |
+
def gen_broadcasting_constraints(e1: TVar, e2: TVar, e11: TVar, e12: TVar, i: int, counter: int):
|
| 1000 |
+
"""
|
| 1001 |
+
Simulates broadcasting on e1 and e2 and returns the results
|
| 1002 |
+
respectively in e11 and e12. Because of gradual types,
|
| 1003 |
+
e1 and e2 may not be equal. Similarly, e11 and e12 may not
|
| 1004 |
+
be equal. e11 and e12 should be guaranteed to be consistent
|
| 1005 |
+
as they represent the shapes of the tensors to be added after
|
| 1006 |
+
broadcasting.
|
| 1007 |
+
Args:
|
| 1008 |
+
e1: TVar representing the type of input 1
|
| 1009 |
+
e2: TVar representing the type of input 2
|
| 1010 |
+
e11: TVar representing the representing broadcasted input 1
|
| 1011 |
+
e12: TVar representing the representing broadcasted input 2
|
| 1012 |
+
i: The rank of the resulting type of addition
|
| 1013 |
+
counter: for variable tracking
|
| 1014 |
+
|
| 1015 |
+
Returns: Simplified broadcasting constraints
|
| 1016 |
+
|
| 1017 |
+
"""
|
| 1018 |
+
dims, counter = gen_lists_of_dims(4, i, counter)
|
| 1019 |
+
[d1, d2, d3, d4] = dims
|
| 1020 |
+
nat_dims_i = gen_nat_constraints(list(itertools.chain(*dims)))
|
| 1021 |
+
|
| 1022 |
+
initialize_tensors_constraints = create_equality_constraints_for_broadcasting(e1, e2, e11, e12,
|
| 1023 |
+
d1, d2, d3, d4)
|
| 1024 |
+
|
| 1025 |
+
[e1_tensor, e11_tensor, e2_tensor, e12_tensor] = initialize_tensors_constraints
|
| 1026 |
+
|
| 1027 |
+
# without padding, broadcast all possibilities for tensors of size i
|
| 1028 |
+
final_tensor_constraint_no_padding = Conj([*initialize_tensors_constraints,
|
| 1029 |
+
generate_all_broadcasting_possibilities_no_padding(d1, d2, d3, d4)])
|
| 1030 |
+
|
| 1031 |
+
# with padding, broadcast all possibilities for tensors of size i
|
| 1032 |
+
final_tensor_constraint_padding_arg1, counter = \
|
| 1033 |
+
apply_padding(e1, e11_tensor, e2_tensor, e12_tensor, d2, d3, d4, counter)
|
| 1034 |
+
|
| 1035 |
+
final_tensor_constraint_padding_arg2, counter = \
|
| 1036 |
+
apply_padding(e2, e12_tensor, e1_tensor, e11_tensor, d1, d4, d3, counter)
|
| 1037 |
+
|
| 1038 |
+
return final_tensor_constraint_no_padding, \
|
| 1039 |
+
final_tensor_constraint_padding_arg1, \
|
| 1040 |
+
final_tensor_constraint_padding_arg2, nat_dims_i, counter
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/operation.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
op_add = '+'
|
| 2 |
+
op_sub = '-'
|
| 3 |
+
op_mul = '*'
|
| 4 |
+
op_div = '/'
|
| 5 |
+
op_eq = '='
|
| 6 |
+
op_neq = '!='
|
| 7 |
+
op_imp = '=>'
|
| 8 |
+
op_matching = '⊳'
|
| 9 |
+
op_consistency = '~'
|
| 10 |
+
op_precision = '⊑'
|
| 11 |
+
op_leq = '≤'
|
| 12 |
+
op_lt = '<'
|
| 13 |
+
op_gt = '>'
|
| 14 |
+
op_mod = '%'
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/transform_to_z3.py
ADDED
|
@@ -0,0 +1,348 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from torch.fx.experimental.migrate_gradual_types.constraint import Conj, Disj, T, F, BinConstraintT, BVar, is_bool_expr
|
| 2 |
+
from torch.fx.experimental.migrate_gradual_types.constraint import BinConstraintD, TVar, DVar
|
| 3 |
+
from torch.fx.experimental.migrate_gradual_types.constraint import Prod, is_algebraic_expression, is_dim
|
| 4 |
+
from torch.fx.experimental.migrate_gradual_types.constraint_generator import ConstraintGenerator
|
| 5 |
+
from torch.fx.experimental.migrate_gradual_types.constraint_transformation import transform_constraint
|
| 6 |
+
from torch.fx.experimental.migrate_gradual_types.operation import op_add, op_eq, op_neq, op_gt, op_lt
|
| 7 |
+
from torch.fx.experimental.migrate_gradual_types.operation import op_leq, op_sub, op_div, op_mul, op_mod
|
| 8 |
+
from torch.fx.tensor_type import TensorType, Dyn
|
| 9 |
+
|
| 10 |
+
try:
|
| 11 |
+
import z3 # type: ignore[import]
|
| 12 |
+
from torch.fx.experimental.migrate_gradual_types.z3_types import tensor_type, z3_dyn, D
|
| 13 |
+
HAS_Z3 = True
|
| 14 |
+
|
| 15 |
+
def transform_to_z3(constraint, counter, dimension_dict):
|
| 16 |
+
if isinstance(constraint, Conj):
|
| 17 |
+
conjuncts = []
|
| 18 |
+
for c in constraint.conjucts:
|
| 19 |
+
new_c, counter = transform_to_z3(c, counter, dimension_dict)
|
| 20 |
+
conjuncts.append(new_c)
|
| 21 |
+
return z3.And(conjuncts), counter
|
| 22 |
+
|
| 23 |
+
elif isinstance(constraint, Disj):
|
| 24 |
+
disjuncts = []
|
| 25 |
+
for c in constraint.disjuncts:
|
| 26 |
+
new_c, counter = transform_to_z3(c, counter, dimension_dict)
|
| 27 |
+
disjuncts.append(new_c)
|
| 28 |
+
return z3.Or(disjuncts), counter
|
| 29 |
+
|
| 30 |
+
elif isinstance(constraint, T):
|
| 31 |
+
return True, counter
|
| 32 |
+
|
| 33 |
+
elif isinstance(constraint, F):
|
| 34 |
+
return False, counter
|
| 35 |
+
|
| 36 |
+
elif isinstance(constraint, BinConstraintT):
|
| 37 |
+
if constraint.op == op_eq:
|
| 38 |
+
lhs, counter = transform_var(constraint.lhs, counter, dimension_dict)
|
| 39 |
+
rhs, counter = transform_var(constraint.rhs, counter, dimension_dict)
|
| 40 |
+
return (lhs == rhs), counter
|
| 41 |
+
|
| 42 |
+
else:
|
| 43 |
+
raise NotImplementedError('Method not yet implemented')
|
| 44 |
+
|
| 45 |
+
elif isinstance(constraint, BinConstraintD):
|
| 46 |
+
if constraint.op == op_eq:
|
| 47 |
+
|
| 48 |
+
if isinstance(constraint.lhs, BVar) and is_bool_expr(constraint.rhs):
|
| 49 |
+
transformed_rhs, counter = transform_to_z3(constraint.rhs, counter, dimension_dict)
|
| 50 |
+
transformed_lhs = z3.Bool(constraint.lhs.c)
|
| 51 |
+
return transformed_lhs == transformed_rhs, counter
|
| 52 |
+
|
| 53 |
+
elif is_dim(constraint.lhs) and is_dim(constraint.rhs):
|
| 54 |
+
# with dimension transformations we consider the encoding
|
| 55 |
+
lhs, counter = transform_dimension(constraint.lhs, counter, dimension_dict)
|
| 56 |
+
rhs, counter = transform_dimension(constraint.rhs, counter, dimension_dict)
|
| 57 |
+
return lhs == rhs, counter
|
| 58 |
+
|
| 59 |
+
else:
|
| 60 |
+
# then we have an algebraic expression which means that we disregard the
|
| 61 |
+
# first element of the encoding
|
| 62 |
+
lhs, counter = transform_algebraic_expression(constraint.lhs, counter, dimension_dict)
|
| 63 |
+
rhs, counter = transform_algebraic_expression(constraint.rhs, counter, dimension_dict)
|
| 64 |
+
return lhs == rhs, counter
|
| 65 |
+
|
| 66 |
+
# The assumption here is that the LHS and RHS must be dimensions
|
| 67 |
+
elif constraint.op == op_neq:
|
| 68 |
+
assert is_dim(constraint.lhs)
|
| 69 |
+
assert is_dim(constraint.rhs)
|
| 70 |
+
lhs, counter = transform_dimension(constraint.lhs, counter, dimension_dict)
|
| 71 |
+
rhs, counter = transform_dimension(constraint.rhs, counter, dimension_dict)
|
| 72 |
+
if constraint.rhs == Dyn or constraint.lhs == Dyn:
|
| 73 |
+
if constraint.rhs == Dyn:
|
| 74 |
+
return lhs.arg(0) == 1, counter
|
| 75 |
+
elif constraint.lhs == Dyn:
|
| 76 |
+
return rhs.arg(0) == 1, counter
|
| 77 |
+
|
| 78 |
+
# if one of the instances is a number
|
| 79 |
+
elif isinstance(constraint.lhs, int) or isinstance(constraint.rhs, int):
|
| 80 |
+
if isinstance(constraint.lhs, int):
|
| 81 |
+
return z3.Or([rhs.arg(0) == 0, z3.And([rhs.arg(0) == 1, lhs.arg(1) != rhs.arg(1)])]), counter
|
| 82 |
+
|
| 83 |
+
elif isinstance(constraint.rhs, int):
|
| 84 |
+
return z3.Or([lhs.arg(0) == 0, z3.And([lhs.arg(0) == 1, lhs.arg(1) != rhs.arg(1)])]), counter
|
| 85 |
+
|
| 86 |
+
else:
|
| 87 |
+
return z3.Or([z3.And([lhs.arg(0) == 0, rhs.arg(0) != 0]),
|
| 88 |
+
z3.And([lhs.arg(0) != 0, rhs.arg(0) == 0]),
|
| 89 |
+
z3.And([lhs.arg(0) != 0, rhs.arg(0) != 0, lhs.arg(1) != rhs.arg(1)])]), counter
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
elif constraint.op == op_leq:
|
| 93 |
+
# if the dimensions are not dyn, this will come into effect
|
| 94 |
+
# there would have been another constraint specifying if a given dimension
|
| 95 |
+
# is dyn or not
|
| 96 |
+
assert is_dim(constraint.lhs) and is_dim(constraint.rhs)
|
| 97 |
+
lhs, counter = transform_algebraic_expression(constraint.lhs, counter, dimension_dict)
|
| 98 |
+
rhs, counter = transform_algebraic_expression(constraint.rhs, counter, dimension_dict)
|
| 99 |
+
return lhs <= rhs, counter
|
| 100 |
+
|
| 101 |
+
elif constraint.op == op_gt:
|
| 102 |
+
assert is_dim(constraint.lhs) and is_dim(constraint.rhs)
|
| 103 |
+
lhs, counter = transform_algebraic_expression(constraint.lhs, counter, dimension_dict)
|
| 104 |
+
rhs, counter = transform_algebraic_expression(constraint.rhs, counter, dimension_dict)
|
| 105 |
+
return lhs > rhs, counter
|
| 106 |
+
|
| 107 |
+
elif constraint.op == op_lt:
|
| 108 |
+
assert is_dim(constraint.lhs) and is_dim(constraint.rhs)
|
| 109 |
+
lhs, counter = transform_algebraic_expression(constraint.lhs, counter, dimension_dict)
|
| 110 |
+
rhs, counter = transform_algebraic_expression(constraint.rhs, counter, dimension_dict)
|
| 111 |
+
return lhs < rhs, counter
|
| 112 |
+
|
| 113 |
+
else:
|
| 114 |
+
raise NotImplementedError('operation not yet implemented')
|
| 115 |
+
|
| 116 |
+
else:
|
| 117 |
+
raise NotImplementedError('Operation not yet implemented')
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def transform_var(tensor, counter, dimension_dict):
|
| 121 |
+
"""
|
| 122 |
+
Transforms tensor variables to a format understood by z3
|
| 123 |
+
Args:
|
| 124 |
+
tensor: Tensor variable or a tensor type potentially with variable dimensions
|
| 125 |
+
Returns: Transformed variable to a z3 format
|
| 126 |
+
|
| 127 |
+
"""
|
| 128 |
+
if isinstance(tensor, TensorType):
|
| 129 |
+
res = []
|
| 130 |
+
for t in tensor.__args__:
|
| 131 |
+
transformed, counter = transform_dimension(t, counter, dimension_dict)
|
| 132 |
+
res.append(transformed)
|
| 133 |
+
|
| 134 |
+
assert len(res) <= 4
|
| 135 |
+
if len(tensor.__args__) == 1:
|
| 136 |
+
return tensor_type.tensor1(res[0]), counter
|
| 137 |
+
elif len(tensor.__args__) == 2:
|
| 138 |
+
return tensor_type.tensor2(res[0], res[1]), counter
|
| 139 |
+
elif len(tensor.__args__) == 3:
|
| 140 |
+
return tensor_type.tensor3(res[0], res[1], res[2]), counter
|
| 141 |
+
elif len(tensor.__args__) == 4:
|
| 142 |
+
return tensor_type.tensor4(res[0], res[1], res[2], res[3]), counter
|
| 143 |
+
|
| 144 |
+
elif tensor == Dyn:
|
| 145 |
+
return z3_dyn, counter
|
| 146 |
+
|
| 147 |
+
elif isinstance(tensor, TVar):
|
| 148 |
+
return z3.Const(tensor.tvar, tensor_type), counter
|
| 149 |
+
|
| 150 |
+
def transform_dimension(dimension, counter, dimension_dict):
|
| 151 |
+
"""
|
| 152 |
+
Takes a dimension variable or a number and transforms it to a tuple
|
| 153 |
+
according to our scheme
|
| 154 |
+
Args:
|
| 155 |
+
dimension: The dimension to be transformed
|
| 156 |
+
counter: variable tracking
|
| 157 |
+
|
| 158 |
+
Returns: tuple and the current counter
|
| 159 |
+
|
| 160 |
+
"""
|
| 161 |
+
if dimension == Dyn:
|
| 162 |
+
counter += 1
|
| 163 |
+
return D(0, z3.Int(counter)), counter
|
| 164 |
+
elif isinstance(dimension, int):
|
| 165 |
+
return D(1, dimension), counter
|
| 166 |
+
elif isinstance(dimension, DVar):
|
| 167 |
+
if dimension.c in dimension_dict:
|
| 168 |
+
return D(z3.Int(dimension_dict[dimension.c]), z3.Int(dimension.c)), counter
|
| 169 |
+
else:
|
| 170 |
+
counter += 1
|
| 171 |
+
dimension_dict[dimension.c] = counter
|
| 172 |
+
return D(z3.Int(counter), z3.Int(dimension.c)), counter
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
def transform_algebraic_expression(expr, counter, dimension_dict):
|
| 176 |
+
"""
|
| 177 |
+
Transforms an algebraic expression to z3 format
|
| 178 |
+
Args:
|
| 179 |
+
expr: An expression is either a dimension variable or an algebraic-expression
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
Returns: the transformed expression
|
| 183 |
+
|
| 184 |
+
"""
|
| 185 |
+
assert is_algebraic_expression(expr) or is_dim(expr)
|
| 186 |
+
|
| 187 |
+
if is_dim(expr):
|
| 188 |
+
transformed, counter = transform_dimension(expr, counter, dimension_dict)
|
| 189 |
+
return transformed.arg(1), counter
|
| 190 |
+
|
| 191 |
+
elif isinstance(expr, Prod):
|
| 192 |
+
|
| 193 |
+
dims = []
|
| 194 |
+
for dim in expr.products:
|
| 195 |
+
assert is_dim(dim)
|
| 196 |
+
d, counter = transform_dimension(dim, counter, dimension_dict)
|
| 197 |
+
dims.append(d.arg(1))
|
| 198 |
+
return z3.Product(dims), counter
|
| 199 |
+
|
| 200 |
+
elif is_algebraic_expression(expr):
|
| 201 |
+
|
| 202 |
+
lhs, counter = transform_algebraic_expression(expr.lhs, counter, dimension_dict)
|
| 203 |
+
rhs, counter = transform_algebraic_expression(expr.rhs, counter, dimension_dict)
|
| 204 |
+
|
| 205 |
+
if expr.op == op_sub:
|
| 206 |
+
c = lhs - rhs
|
| 207 |
+
|
| 208 |
+
elif expr.op == op_add:
|
| 209 |
+
c = lhs + rhs
|
| 210 |
+
|
| 211 |
+
elif expr.op == op_div:
|
| 212 |
+
c = lhs / rhs
|
| 213 |
+
|
| 214 |
+
elif expr.op == op_mul:
|
| 215 |
+
c = lhs * rhs
|
| 216 |
+
|
| 217 |
+
elif expr.op == op_mod:
|
| 218 |
+
c = lhs % rhs
|
| 219 |
+
|
| 220 |
+
else:
|
| 221 |
+
raise NotImplementedError('operation not yet implemented')
|
| 222 |
+
|
| 223 |
+
return c, counter
|
| 224 |
+
|
| 225 |
+
else:
|
| 226 |
+
raise RuntimeError
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
def transform_all_constraints(traced, counter=0):
|
| 230 |
+
"""
|
| 231 |
+
Given a trace, generates constraints and transforms them to z3 format
|
| 232 |
+
|
| 233 |
+
"""
|
| 234 |
+
dimension_dict = {} # type: ignore[var-annotated]
|
| 235 |
+
|
| 236 |
+
generator = ConstraintGenerator(traced)
|
| 237 |
+
new_constraints, counter = generator.generate_constraints(counter)
|
| 238 |
+
|
| 239 |
+
# print(new_constraints.conjucts[0])
|
| 240 |
+
# print(*new_constraints.conjucts, sep='\n')
|
| 241 |
+
|
| 242 |
+
# transform precision, matching, consistency till obtaining a fixed point
|
| 243 |
+
new_constraints, counter = iterate_till_fixed_point(new_constraints, counter)
|
| 244 |
+
# print(new_constraints)
|
| 245 |
+
# print(new_constraints.conjucts)
|
| 246 |
+
# new_constraints.conjucts = new_constraints.conjucts[:-1]
|
| 247 |
+
# print(*new_constraints.conjucts, sep='\n')
|
| 248 |
+
|
| 249 |
+
transformed, counter = transform_to_z3(new_constraints, counter, dimension_dict)
|
| 250 |
+
# print(transformed)
|
| 251 |
+
return transformed
|
| 252 |
+
|
| 253 |
+
def iterate_till_fixed_point(constraints, counter):
|
| 254 |
+
"""
|
| 255 |
+
Transform constraints till reaching a fixed point
|
| 256 |
+
"""
|
| 257 |
+
old_c = None
|
| 258 |
+
while old_c != constraints:
|
| 259 |
+
old_c = constraints
|
| 260 |
+
constraints, counter = transform_constraint(constraints, counter)
|
| 261 |
+
return constraints, counter
|
| 262 |
+
|
| 263 |
+
def transform_all_constraints_trace_time(tracer_root, graph, node, counter=0):
|
| 264 |
+
"""
|
| 265 |
+
Takes a node and a graph and generates two sets of constraints.
|
| 266 |
+
One set constraints the node's constraints and another set
|
| 267 |
+
constraints the negation of the node's constraints
|
| 268 |
+
Args:
|
| 269 |
+
tracer_root: the root for getting the module instances
|
| 270 |
+
graph: the graph so far in the tracing process
|
| 271 |
+
node: node that represents a conditional
|
| 272 |
+
counter: variable tracking
|
| 273 |
+
|
| 274 |
+
Returns: Two sets of constraints. One with a conjunction with the
|
| 275 |
+
the conditional constraint and the other with a conjunction with
|
| 276 |
+
its negation.
|
| 277 |
+
|
| 278 |
+
"""
|
| 279 |
+
dimension_dict = {} # type: ignore[var-annotated]
|
| 280 |
+
|
| 281 |
+
generator = ConstraintGenerator(tracer_root, graph)
|
| 282 |
+
new_constraints, counter = generator.generate_constraints(counter)
|
| 283 |
+
|
| 284 |
+
condition_constraint = new_constraints.conjucts[-1]
|
| 285 |
+
|
| 286 |
+
# we know the constraint is a conjunction where the last constraint is about the conditional
|
| 287 |
+
# so remove the last constraint
|
| 288 |
+
new_constraints.conjucts = new_constraints.conjucts[:-1]
|
| 289 |
+
|
| 290 |
+
# transform precision, matching, consistency till obtaining a fixed point
|
| 291 |
+
new_constraints, counter = iterate_till_fixed_point(new_constraints, counter)
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
# since the function returns a list of one element, we get the first element
|
| 295 |
+
# we are only interested in the RHS in this case because the LHS just stores
|
| 296 |
+
# the result
|
| 297 |
+
|
| 298 |
+
# we make sure the constraint is of the form:
|
| 299 |
+
# c = b where b is a boolean expression
|
| 300 |
+
# and we consider b (constraint.rhs) for transformation
|
| 301 |
+
assert isinstance(condition_constraint.lhs, BVar)
|
| 302 |
+
assert is_bool_expr(condition_constraint.rhs)
|
| 303 |
+
condition_constraint_rhs = condition_constraint.rhs
|
| 304 |
+
|
| 305 |
+
# transform the condition constraint
|
| 306 |
+
condition_constraint_rhs, counter = iterate_till_fixed_point(condition_constraint_rhs, counter)
|
| 307 |
+
|
| 308 |
+
transformed, counter = transform_to_z3(new_constraints, counter, dimension_dict)
|
| 309 |
+
|
| 310 |
+
transformed_condition_constraint, counter = transform_to_z3(condition_constraint_rhs, counter, dimension_dict)
|
| 311 |
+
|
| 312 |
+
negation_transformed_condition_constraint = z3.Not(transformed_condition_constraint)
|
| 313 |
+
|
| 314 |
+
return z3.And([transformed, transformed_condition_constraint]),\
|
| 315 |
+
z3.And([transformed, negation_transformed_condition_constraint])
|
| 316 |
+
|
| 317 |
+
|
| 318 |
+
def evaluate_conditional_with_constraints(tracer_root, graph, node, counter=0, user_constraints=None):
|
| 319 |
+
"""
|
| 320 |
+
Given an IR and a node representing a conditional, evaluate the conditional
|
| 321 |
+
and its negation
|
| 322 |
+
Args:
|
| 323 |
+
tracer_root: Tracer root for module instances
|
| 324 |
+
node: The node to be evaluated
|
| 325 |
+
|
| 326 |
+
Returns: the results of evaluating the condition and the negation with
|
| 327 |
+
the rest of the constraints
|
| 328 |
+
|
| 329 |
+
"""
|
| 330 |
+
|
| 331 |
+
transformed_positive, transformed_negative = \
|
| 332 |
+
transform_all_constraints_trace_time(tracer_root, graph, node, counter)
|
| 333 |
+
|
| 334 |
+
s = z3.Solver()
|
| 335 |
+
s.add(transformed_positive)
|
| 336 |
+
if user_constraints is not None:
|
| 337 |
+
s.add(user_constraints)
|
| 338 |
+
condition = s.check()
|
| 339 |
+
|
| 340 |
+
s = z3.Solver()
|
| 341 |
+
s.add(transformed_negative)
|
| 342 |
+
if user_constraints is not None:
|
| 343 |
+
s.add(user_constraints)
|
| 344 |
+
negation = s.check()
|
| 345 |
+
return condition, negation
|
| 346 |
+
|
| 347 |
+
except ImportError:
|
| 348 |
+
HAS_Z3 = False
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/util.py
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from torch.fx.experimental.migrate_gradual_types.constraint import TVar, DVar, BinConstraintD, \
|
| 2 |
+
BVar
|
| 3 |
+
from torch.fx.experimental.migrate_gradual_types.operation import op_leq
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def gen_tvar(curr):
|
| 7 |
+
"""
|
| 8 |
+
Generate a tensor variable
|
| 9 |
+
:param curr: The current counter
|
| 10 |
+
:return: a tensor variable and the updated counter
|
| 11 |
+
"""
|
| 12 |
+
curr += 1
|
| 13 |
+
return TVar(curr), curr
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def gen_dvar(curr):
|
| 17 |
+
"""
|
| 18 |
+
Generate a dimension variable
|
| 19 |
+
:param curr: the current counter
|
| 20 |
+
:return: a dimension variable and an updated counter
|
| 21 |
+
"""
|
| 22 |
+
curr += 1
|
| 23 |
+
return DVar(curr), curr
|
| 24 |
+
|
| 25 |
+
def gen_bvar(curr):
|
| 26 |
+
"""
|
| 27 |
+
Generate a boolean variable
|
| 28 |
+
:param curr: the current counter
|
| 29 |
+
:return: a boolean variable and an updated counter
|
| 30 |
+
"""
|
| 31 |
+
curr += 1
|
| 32 |
+
return BVar(curr), curr
|
| 33 |
+
|
| 34 |
+
def gen_tensor_dims(n, curr):
|
| 35 |
+
"""
|
| 36 |
+
Generate a list of tensor dimensions
|
| 37 |
+
:param n: the number of dimensions
|
| 38 |
+
:param curr: the current counter
|
| 39 |
+
:return: a list of dimension variables and an updated counter
|
| 40 |
+
"""
|
| 41 |
+
dims = []
|
| 42 |
+
for _ in range(n):
|
| 43 |
+
dvar, curr = gen_dvar(curr)
|
| 44 |
+
dims.append(dvar)
|
| 45 |
+
return dims, curr
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def gen_nat_constraints(list_of_dims):
|
| 49 |
+
"""
|
| 50 |
+
Generate natural number constraints for dimensions
|
| 51 |
+
"""
|
| 52 |
+
return [BinConstraintD(0, d, op_leq) for d in list_of_dims]
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/z3_types.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
try:
|
| 2 |
+
import z3 # type: ignore[import]
|
| 3 |
+
HAS_Z3 = True
|
| 4 |
+
# dynamic type
|
| 5 |
+
dyn = z3.DeclareSort('Dyn')
|
| 6 |
+
dyn_type = z3.Const('dyn', dyn)
|
| 7 |
+
|
| 8 |
+
# dimension
|
| 9 |
+
dim = z3.Datatype('dim')
|
| 10 |
+
dim.declare('dim', ('0', z3.IntSort()), ('1', z3.IntSort()))
|
| 11 |
+
dim = dim.create()
|
| 12 |
+
|
| 13 |
+
# tensors
|
| 14 |
+
tensor_type = z3.Datatype('TensorType')
|
| 15 |
+
tensor_type.declare('Dyn', ('dyn', dyn))
|
| 16 |
+
tensor_type.declare('tensor1', ('0', dim))
|
| 17 |
+
tensor_type.declare('tensor2', ('0', dim), ('1', dim))
|
| 18 |
+
tensor_type.declare('tensor3', ('0', dim), ('1', dim), ('2', dim))
|
| 19 |
+
tensor_type.declare('tensor4', ('0', dim), ('1', dim), ('2', dim), ('3', dim))
|
| 20 |
+
tensor_type = tensor_type.create()
|
| 21 |
+
|
| 22 |
+
# create dimension
|
| 23 |
+
D = dim.dim
|
| 24 |
+
|
| 25 |
+
z3_dyn = tensor_type.Dyn(dyn_type)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
except ImportError:
|
| 29 |
+
HAS_Z3 = False
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (378 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/core.cpython-310.pyc
ADDED
|
Binary file (2.45 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/dispatch.cpython-310.pyc
ADDED
|
Binary file (322 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/match.cpython-310.pyc
ADDED
|
Binary file (4.53 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/more.cpython-310.pyc
ADDED
|
Binary file (3.47 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/unification_tools.cpython-310.pyc
ADDED
|
Binary file (11 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/utils.cpython-310.pyc
ADDED
|
Binary file (3.4 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/variable.cpython-310.pyc
ADDED
|
Binary file (2.89 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/unification/match.py
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .core import unify, reify # type: ignore[attr-defined]
|
| 2 |
+
from .variable import isvar
|
| 3 |
+
from .utils import _toposort, freeze
|
| 4 |
+
from .unification_tools import groupby, first # type: ignore[import]
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class Dispatcher:
|
| 8 |
+
def __init__(self, name):
|
| 9 |
+
self.name = name
|
| 10 |
+
self.funcs = {}
|
| 11 |
+
self.ordering = []
|
| 12 |
+
|
| 13 |
+
def add(self, signature, func):
|
| 14 |
+
self.funcs[freeze(signature)] = func
|
| 15 |
+
self.ordering = ordering(self.funcs)
|
| 16 |
+
|
| 17 |
+
def __call__(self, *args, **kwargs):
|
| 18 |
+
func, s = self.resolve(args)
|
| 19 |
+
return func(*args, **kwargs)
|
| 20 |
+
|
| 21 |
+
def resolve(self, args):
|
| 22 |
+
n = len(args)
|
| 23 |
+
for signature in self.ordering:
|
| 24 |
+
if len(signature) != n:
|
| 25 |
+
continue
|
| 26 |
+
s = unify(freeze(args), signature)
|
| 27 |
+
if s is not False:
|
| 28 |
+
result = self.funcs[signature]
|
| 29 |
+
return result, s
|
| 30 |
+
raise NotImplementedError("No match found. \nKnown matches: "
|
| 31 |
+
+ str(self.ordering) + "\nInput: " + str(args))
|
| 32 |
+
|
| 33 |
+
def register(self, *signature):
|
| 34 |
+
def _(func):
|
| 35 |
+
self.add(signature, func)
|
| 36 |
+
return self
|
| 37 |
+
return _
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class VarDispatcher(Dispatcher):
|
| 41 |
+
""" A dispatcher that calls functions with variable names
|
| 42 |
+
>>> # xdoctest: +SKIP
|
| 43 |
+
>>> d = VarDispatcher('d')
|
| 44 |
+
>>> x = var('x')
|
| 45 |
+
>>> @d.register('inc', x)
|
| 46 |
+
... def f(x):
|
| 47 |
+
... return x + 1
|
| 48 |
+
>>> @d.register('double', x)
|
| 49 |
+
... def f(x):
|
| 50 |
+
... return x * 2
|
| 51 |
+
>>> d('inc', 10)
|
| 52 |
+
11
|
| 53 |
+
>>> d('double', 10)
|
| 54 |
+
20
|
| 55 |
+
"""
|
| 56 |
+
def __call__(self, *args, **kwargs):
|
| 57 |
+
func, s = self.resolve(args)
|
| 58 |
+
d = {k.token: v for k, v in s.items()}
|
| 59 |
+
return func(**d)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
global_namespace = {} # type: ignore[var-annotated]
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def match(*signature, **kwargs):
|
| 66 |
+
namespace = kwargs.get('namespace', global_namespace)
|
| 67 |
+
dispatcher = kwargs.get('Dispatcher', Dispatcher)
|
| 68 |
+
|
| 69 |
+
def _(func):
|
| 70 |
+
name = func.__name__
|
| 71 |
+
|
| 72 |
+
if name not in namespace:
|
| 73 |
+
namespace[name] = dispatcher(name)
|
| 74 |
+
d = namespace[name]
|
| 75 |
+
|
| 76 |
+
d.add(signature, func)
|
| 77 |
+
|
| 78 |
+
return d
|
| 79 |
+
return _
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def supercedes(a, b):
|
| 83 |
+
""" ``a`` is a more specific match than ``b`` """
|
| 84 |
+
if isvar(b) and not isvar(a):
|
| 85 |
+
return True
|
| 86 |
+
s = unify(a, b)
|
| 87 |
+
if s is False:
|
| 88 |
+
return False
|
| 89 |
+
s = {k: v for k, v in s.items() if not isvar(k) or not isvar(v)}
|
| 90 |
+
if reify(a, s) == a:
|
| 91 |
+
return True
|
| 92 |
+
if reify(b, s) == b:
|
| 93 |
+
return False
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
# Taken from multipledispatch
|
| 97 |
+
def edge(a, b, tie_breaker=hash):
|
| 98 |
+
""" A should be checked before B
|
| 99 |
+
Tie broken by tie_breaker, defaults to ``hash``
|
| 100 |
+
"""
|
| 101 |
+
if supercedes(a, b):
|
| 102 |
+
if supercedes(b, a):
|
| 103 |
+
return tie_breaker(a) > tie_breaker(b)
|
| 104 |
+
else:
|
| 105 |
+
return True
|
| 106 |
+
return False
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
# Taken from multipledispatch
|
| 110 |
+
def ordering(signatures):
|
| 111 |
+
""" A sane ordering of signatures to check, first to last
|
| 112 |
+
Topological sort of edges as given by ``edge`` and ``supercedes``
|
| 113 |
+
"""
|
| 114 |
+
signatures = list(map(tuple, signatures))
|
| 115 |
+
edges = [(a, b) for a in signatures for b in signatures if edge(a, b)]
|
| 116 |
+
edges = groupby(first, edges)
|
| 117 |
+
for s in signatures:
|
| 118 |
+
if s not in edges:
|
| 119 |
+
edges[s] = []
|
| 120 |
+
edges = {k: [b for a, b in v] for k, v in edges.items()} # type: ignore[attr-defined, assignment]
|
| 121 |
+
return _toposort(edges)
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/unification/more.py
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .core import unify, reify # type: ignore[attr-defined]
|
| 2 |
+
from .dispatch import dispatch
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def unifiable(cls):
|
| 6 |
+
""" Register standard unify and reify operations on class
|
| 7 |
+
This uses the type and __dict__ or __slots__ attributes to define the
|
| 8 |
+
nature of the term
|
| 9 |
+
See Also:
|
| 10 |
+
>>> # xdoctest: +SKIP
|
| 11 |
+
>>> class A(object):
|
| 12 |
+
... def __init__(self, a, b):
|
| 13 |
+
... self.a = a
|
| 14 |
+
... self.b = b
|
| 15 |
+
>>> unifiable(A)
|
| 16 |
+
<class 'unification.more.A'>
|
| 17 |
+
>>> x = var('x')
|
| 18 |
+
>>> a = A(1, 2)
|
| 19 |
+
>>> b = A(1, x)
|
| 20 |
+
>>> unify(a, b, {})
|
| 21 |
+
{~x: 2}
|
| 22 |
+
"""
|
| 23 |
+
_unify.add((cls, cls, dict), unify_object)
|
| 24 |
+
_reify.add((cls, dict), reify_object)
|
| 25 |
+
|
| 26 |
+
return cls
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
#########
|
| 30 |
+
# Reify #
|
| 31 |
+
#########
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def reify_object(o, s):
|
| 35 |
+
""" Reify a Python object with a substitution
|
| 36 |
+
>>> # xdoctest: +SKIP
|
| 37 |
+
>>> class Foo(object):
|
| 38 |
+
... def __init__(self, a, b):
|
| 39 |
+
... self.a = a
|
| 40 |
+
... self.b = b
|
| 41 |
+
... def __str__(self):
|
| 42 |
+
... return "Foo(%s, %s)"%(str(self.a), str(self.b))
|
| 43 |
+
>>> x = var('x')
|
| 44 |
+
>>> f = Foo(1, x)
|
| 45 |
+
>>> print(f)
|
| 46 |
+
Foo(1, ~x)
|
| 47 |
+
>>> print(reify_object(f, {x: 2}))
|
| 48 |
+
Foo(1, 2)
|
| 49 |
+
"""
|
| 50 |
+
if hasattr(o, '__slots__'):
|
| 51 |
+
return _reify_object_slots(o, s)
|
| 52 |
+
else:
|
| 53 |
+
return _reify_object_dict(o, s)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def _reify_object_dict(o, s):
|
| 57 |
+
obj = object.__new__(type(o))
|
| 58 |
+
d = reify(o.__dict__, s)
|
| 59 |
+
if d == o.__dict__:
|
| 60 |
+
return o
|
| 61 |
+
obj.__dict__.update(d)
|
| 62 |
+
return obj
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def _reify_object_slots(o, s):
|
| 66 |
+
attrs = [getattr(o, attr) for attr in o.__slots__]
|
| 67 |
+
new_attrs = reify(attrs, s)
|
| 68 |
+
if attrs == new_attrs:
|
| 69 |
+
return o
|
| 70 |
+
else:
|
| 71 |
+
newobj = object.__new__(type(o))
|
| 72 |
+
for slot, attr in zip(o.__slots__, new_attrs):
|
| 73 |
+
setattr(newobj, slot, attr)
|
| 74 |
+
return newobj
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
@dispatch(slice, dict)
|
| 78 |
+
def _reify(o, s):
|
| 79 |
+
""" Reify a Python ``slice`` object """
|
| 80 |
+
return slice(*reify((o.start, o.stop, o.step), s))
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
#########
|
| 84 |
+
# Unify #
|
| 85 |
+
#########
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def unify_object(u, v, s):
|
| 89 |
+
""" Unify two Python objects
|
| 90 |
+
Unifies their type and ``__dict__`` attributes
|
| 91 |
+
>>> # xdoctest: +SKIP
|
| 92 |
+
>>> class Foo(object):
|
| 93 |
+
... def __init__(self, a, b):
|
| 94 |
+
... self.a = a
|
| 95 |
+
... self.b = b
|
| 96 |
+
... def __str__(self):
|
| 97 |
+
... return "Foo(%s, %s)"%(str(self.a), str(self.b))
|
| 98 |
+
>>> x = var('x')
|
| 99 |
+
>>> f = Foo(1, x)
|
| 100 |
+
>>> g = Foo(1, 2)
|
| 101 |
+
>>> unify_object(f, g, {})
|
| 102 |
+
{~x: 2}
|
| 103 |
+
"""
|
| 104 |
+
if type(u) != type(v):
|
| 105 |
+
return False
|
| 106 |
+
if hasattr(u, '__slots__'):
|
| 107 |
+
return unify([getattr(u, slot) for slot in u.__slots__],
|
| 108 |
+
[getattr(v, slot) for slot in v.__slots__],
|
| 109 |
+
s)
|
| 110 |
+
else:
|
| 111 |
+
return unify(u.__dict__, v.__dict__, s)
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
@dispatch(slice, slice, dict)
|
| 115 |
+
def _unify(u, v, s):
|
| 116 |
+
""" Unify a Python ``slice`` object """
|
| 117 |
+
return unify((u.start, u.stop, u.step), (v.start, v.stop, v.step), s)
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (371 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/__pycache__/conflict.cpython-310.pyc
ADDED
|
Binary file (4.6 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/__pycache__/core.cpython-310.pyc
ADDED
|
Binary file (2.58 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/__pycache__/dispatcher.cpython-310.pyc
ADDED
|
Binary file (14.5 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/__pycache__/utils.cpython-310.pyc
ADDED
|
Binary file (4.23 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/__pycache__/variadic.cpython-310.pyc
ADDED
|
Binary file (3.53 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/conflict.py
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .utils import _toposort, groupby
|
| 2 |
+
from .variadic import isvariadic
|
| 3 |
+
|
| 4 |
+
__all__ = ["AmbiguityWarning", "supercedes", "consistent", "ambiguous", "ambiguities", "super_signature",
|
| 5 |
+
"edge", "ordering"]
|
| 6 |
+
|
| 7 |
+
class AmbiguityWarning(Warning):
|
| 8 |
+
pass
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def supercedes(a, b):
|
| 12 |
+
""" A is consistent and strictly more specific than B """
|
| 13 |
+
if len(a) < len(b):
|
| 14 |
+
# only case is if a is empty and b is variadic
|
| 15 |
+
return not a and len(b) == 1 and isvariadic(b[-1])
|
| 16 |
+
elif len(a) == len(b):
|
| 17 |
+
return all(map(issubclass, a, b))
|
| 18 |
+
else:
|
| 19 |
+
# len(a) > len(b)
|
| 20 |
+
p1 = 0
|
| 21 |
+
p2 = 0
|
| 22 |
+
while p1 < len(a) and p2 < len(b):
|
| 23 |
+
cur_a = a[p1]
|
| 24 |
+
cur_b = b[p2]
|
| 25 |
+
if not (isvariadic(cur_a) or isvariadic(cur_b)):
|
| 26 |
+
if not issubclass(cur_a, cur_b):
|
| 27 |
+
return False
|
| 28 |
+
p1 += 1
|
| 29 |
+
p2 += 1
|
| 30 |
+
elif isvariadic(cur_a):
|
| 31 |
+
assert p1 == len(a) - 1
|
| 32 |
+
return p2 == len(b) - 1 and issubclass(cur_a, cur_b)
|
| 33 |
+
elif isvariadic(cur_b):
|
| 34 |
+
assert p2 == len(b) - 1
|
| 35 |
+
if not issubclass(cur_a, cur_b):
|
| 36 |
+
return False
|
| 37 |
+
p1 += 1
|
| 38 |
+
return p2 == len(b) - 1 and p1 == len(a)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def consistent(a, b):
|
| 42 |
+
""" It is possible for an argument list to satisfy both A and B """
|
| 43 |
+
|
| 44 |
+
# Need to check for empty args
|
| 45 |
+
if not a:
|
| 46 |
+
return not b or isvariadic(b[0])
|
| 47 |
+
if not b:
|
| 48 |
+
return not a or isvariadic(a[0])
|
| 49 |
+
|
| 50 |
+
# Non-empty args check for mutual subclasses
|
| 51 |
+
if len(a) == len(b):
|
| 52 |
+
return all(issubclass(aa, bb) or issubclass(bb, aa)
|
| 53 |
+
for aa, bb in zip(a, b))
|
| 54 |
+
else:
|
| 55 |
+
p1 = 0
|
| 56 |
+
p2 = 0
|
| 57 |
+
while p1 < len(a) and p2 < len(b):
|
| 58 |
+
cur_a = a[p1]
|
| 59 |
+
cur_b = b[p2]
|
| 60 |
+
if not issubclass(cur_b, cur_a) and not issubclass(cur_a, cur_b):
|
| 61 |
+
return False
|
| 62 |
+
if not (isvariadic(cur_a) or isvariadic(cur_b)):
|
| 63 |
+
p1 += 1
|
| 64 |
+
p2 += 1
|
| 65 |
+
elif isvariadic(cur_a):
|
| 66 |
+
p2 += 1
|
| 67 |
+
elif isvariadic(cur_b):
|
| 68 |
+
p1 += 1
|
| 69 |
+
# We only need to check for variadic ends
|
| 70 |
+
# Variadic types are guaranteed to be the last element
|
| 71 |
+
return (isvariadic(cur_a) and p2 == len(b) or
|
| 72 |
+
isvariadic(cur_b) and p1 == len(a))
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def ambiguous(a, b):
|
| 76 |
+
""" A is consistent with B but neither is strictly more specific """
|
| 77 |
+
return consistent(a, b) and not (supercedes(a, b) or supercedes(b, a))
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def ambiguities(signatures):
|
| 81 |
+
""" All signature pairs such that A is ambiguous with B """
|
| 82 |
+
signatures = list(map(tuple, signatures))
|
| 83 |
+
return {(a, b) for a in signatures for b in signatures
|
| 84 |
+
if hash(a) < hash(b)
|
| 85 |
+
and ambiguous(a, b)
|
| 86 |
+
and not any(supercedes(c, a) and supercedes(c, b)
|
| 87 |
+
for c in signatures)}
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def super_signature(signatures):
|
| 91 |
+
""" A signature that would break ambiguities """
|
| 92 |
+
n = len(signatures[0])
|
| 93 |
+
assert all(len(s) == n for s in signatures)
|
| 94 |
+
|
| 95 |
+
return [max((type.mro(sig[i]) for sig in signatures), key=len)[0]
|
| 96 |
+
for i in range(n)]
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def edge(a, b, tie_breaker=hash):
|
| 100 |
+
""" A should be checked before B
|
| 101 |
+
Tie broken by tie_breaker, defaults to ``hash``
|
| 102 |
+
"""
|
| 103 |
+
# A either supercedes B and B does not supercede A or if B does then call
|
| 104 |
+
# tie_breaker
|
| 105 |
+
return supercedes(a, b) and (not supercedes(b, a) or tie_breaker(a) > tie_breaker(b))
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def ordering(signatures):
|
| 109 |
+
""" A sane ordering of signatures to check, first to last
|
| 110 |
+
Topological sort of edges as given by ``edge`` and ``supercedes``
|
| 111 |
+
"""
|
| 112 |
+
signatures = list(map(tuple, signatures))
|
| 113 |
+
edges = [(a, b) for a in signatures for b in signatures if edge(a, b)]
|
| 114 |
+
edges = groupby(lambda x: x[0], edges)
|
| 115 |
+
for s in signatures:
|
| 116 |
+
if s not in edges:
|
| 117 |
+
edges[s] = []
|
| 118 |
+
edges = {k: [b for a, b in v] for k, v in edges.items()} # type: ignore[assignment, attr-defined]
|
| 119 |
+
return _toposort(edges)
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/dispatcher.py
ADDED
|
@@ -0,0 +1,430 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from warnings import warn
|
| 2 |
+
import inspect
|
| 3 |
+
from .conflict import ordering, ambiguities, super_signature, AmbiguityWarning
|
| 4 |
+
from .utils import expand_tuples
|
| 5 |
+
from .variadic import Variadic, isvariadic
|
| 6 |
+
import itertools as itl
|
| 7 |
+
|
| 8 |
+
__all__ = ["MDNotImplementedError", "ambiguity_warn", "halt_ordering", "restart_ordering", "variadic_signature_matches_iter",
|
| 9 |
+
"variadic_signature_matches", "Dispatcher", "source", "MethodDispatcher", "str_signature", "warning_text"]
|
| 10 |
+
|
| 11 |
+
class MDNotImplementedError(NotImplementedError):
|
| 12 |
+
""" A NotImplementedError for multiple dispatch """
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def ambiguity_warn(dispatcher, ambiguities):
|
| 16 |
+
""" Raise warning when ambiguity is detected
|
| 17 |
+
Parameters
|
| 18 |
+
----------
|
| 19 |
+
dispatcher : Dispatcher
|
| 20 |
+
The dispatcher on which the ambiguity was detected
|
| 21 |
+
ambiguities : set
|
| 22 |
+
Set of type signature pairs that are ambiguous within this dispatcher
|
| 23 |
+
See Also:
|
| 24 |
+
Dispatcher.add
|
| 25 |
+
warning_text
|
| 26 |
+
"""
|
| 27 |
+
warn(warning_text(dispatcher.name, ambiguities), AmbiguityWarning)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def halt_ordering():
|
| 31 |
+
"""Deprecated interface to temporarily disable ordering.
|
| 32 |
+
"""
|
| 33 |
+
warn(
|
| 34 |
+
'halt_ordering is deprecated, you can safely remove this call.',
|
| 35 |
+
DeprecationWarning,
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def restart_ordering(on_ambiguity=ambiguity_warn):
|
| 40 |
+
"""Deprecated interface to temporarily resume ordering.
|
| 41 |
+
"""
|
| 42 |
+
warn(
|
| 43 |
+
'restart_ordering is deprecated, if you would like to eagerly order'
|
| 44 |
+
'the dispatchers, you should call the ``reorder()`` method on each'
|
| 45 |
+
' dispatcher.',
|
| 46 |
+
DeprecationWarning,
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def variadic_signature_matches_iter(types, full_signature):
|
| 51 |
+
"""Check if a set of input types matches a variadic signature.
|
| 52 |
+
Notes
|
| 53 |
+
-----
|
| 54 |
+
The algorithm is as follows:
|
| 55 |
+
Initialize the current signature to the first in the sequence
|
| 56 |
+
For each type in `types`:
|
| 57 |
+
If the current signature is variadic
|
| 58 |
+
If the type matches the signature
|
| 59 |
+
yield True
|
| 60 |
+
Else
|
| 61 |
+
Try to get the next signature
|
| 62 |
+
If no signatures are left we can't possibly have a match
|
| 63 |
+
so yield False
|
| 64 |
+
Else
|
| 65 |
+
yield True if the type matches the current signature
|
| 66 |
+
Get the next signature
|
| 67 |
+
"""
|
| 68 |
+
sigiter = iter(full_signature)
|
| 69 |
+
sig = next(sigiter)
|
| 70 |
+
for typ in types:
|
| 71 |
+
matches = issubclass(typ, sig)
|
| 72 |
+
yield matches
|
| 73 |
+
if not isvariadic(sig):
|
| 74 |
+
# we're not matching a variadic argument, so move to the next
|
| 75 |
+
# element in the signature
|
| 76 |
+
sig = next(sigiter)
|
| 77 |
+
else:
|
| 78 |
+
try:
|
| 79 |
+
sig = next(sigiter)
|
| 80 |
+
except StopIteration:
|
| 81 |
+
assert isvariadic(sig)
|
| 82 |
+
yield True
|
| 83 |
+
else:
|
| 84 |
+
# We have signature items left over, so all of our arguments
|
| 85 |
+
# haven't matched
|
| 86 |
+
yield False
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def variadic_signature_matches(types, full_signature):
|
| 90 |
+
# No arguments always matches a variadic signature
|
| 91 |
+
assert full_signature
|
| 92 |
+
return all(variadic_signature_matches_iter(types, full_signature))
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
class Dispatcher:
|
| 96 |
+
""" Dispatch methods based on type signature
|
| 97 |
+
Use ``dispatch`` to add implementations
|
| 98 |
+
Examples
|
| 99 |
+
--------
|
| 100 |
+
>>> # xdoctest: +SKIP("bad import name")
|
| 101 |
+
>>> from multipledispatch import dispatch
|
| 102 |
+
>>> @dispatch(int)
|
| 103 |
+
... def f(x):
|
| 104 |
+
... return x + 1
|
| 105 |
+
>>> @dispatch(float)
|
| 106 |
+
... def f(x):
|
| 107 |
+
... return x - 1
|
| 108 |
+
>>> f(3)
|
| 109 |
+
4
|
| 110 |
+
>>> f(3.0)
|
| 111 |
+
2.0
|
| 112 |
+
"""
|
| 113 |
+
__slots__ = '__name__', 'name', 'funcs', '_ordering', '_cache', 'doc'
|
| 114 |
+
|
| 115 |
+
def __init__(self, name, doc=None):
|
| 116 |
+
self.name = self.__name__ = name
|
| 117 |
+
self.funcs = {}
|
| 118 |
+
self.doc = doc
|
| 119 |
+
|
| 120 |
+
self._cache = {}
|
| 121 |
+
|
| 122 |
+
def register(self, *types, **kwargs):
|
| 123 |
+
""" register dispatcher with new implementation
|
| 124 |
+
>>> # xdoctest: +SKIP
|
| 125 |
+
>>> f = Dispatcher('f')
|
| 126 |
+
>>> @f.register(int)
|
| 127 |
+
... def inc(x):
|
| 128 |
+
... return x + 1
|
| 129 |
+
>>> @f.register(float)
|
| 130 |
+
... def dec(x):
|
| 131 |
+
... return x - 1
|
| 132 |
+
>>> @f.register(list)
|
| 133 |
+
... @f.register(tuple)
|
| 134 |
+
... def reverse(x):
|
| 135 |
+
... return x[::-1]
|
| 136 |
+
>>> f(1)
|
| 137 |
+
2
|
| 138 |
+
>>> f(1.0)
|
| 139 |
+
0.0
|
| 140 |
+
>>> f([1, 2, 3])
|
| 141 |
+
[3, 2, 1]
|
| 142 |
+
"""
|
| 143 |
+
def _df(func):
|
| 144 |
+
self.add(types, func, **kwargs) # type: ignore[call-arg]
|
| 145 |
+
return func
|
| 146 |
+
return _df
|
| 147 |
+
|
| 148 |
+
@classmethod
|
| 149 |
+
def get_func_params(cls, func):
|
| 150 |
+
if hasattr(inspect, "signature"):
|
| 151 |
+
sig = inspect.signature(func)
|
| 152 |
+
return sig.parameters.values()
|
| 153 |
+
|
| 154 |
+
@classmethod
|
| 155 |
+
def get_func_annotations(cls, func):
|
| 156 |
+
""" get annotations of function positional parameters
|
| 157 |
+
"""
|
| 158 |
+
params = cls.get_func_params(func)
|
| 159 |
+
if params:
|
| 160 |
+
Parameter = inspect.Parameter
|
| 161 |
+
|
| 162 |
+
params = (param for param in params
|
| 163 |
+
if param.kind in
|
| 164 |
+
(Parameter.POSITIONAL_ONLY,
|
| 165 |
+
Parameter.POSITIONAL_OR_KEYWORD))
|
| 166 |
+
|
| 167 |
+
annotations = tuple(
|
| 168 |
+
param.annotation
|
| 169 |
+
for param in params)
|
| 170 |
+
|
| 171 |
+
if all(ann is not Parameter.empty for ann in annotations):
|
| 172 |
+
return annotations
|
| 173 |
+
|
| 174 |
+
def add(self, signature, func):
|
| 175 |
+
""" Add new types/method pair to dispatcher
|
| 176 |
+
>>> # xdoctest: +SKIP
|
| 177 |
+
>>> D = Dispatcher('add')
|
| 178 |
+
>>> D.add((int, int), lambda x, y: x + y)
|
| 179 |
+
>>> D.add((float, float), lambda x, y: x + y)
|
| 180 |
+
>>> D(1, 2)
|
| 181 |
+
3
|
| 182 |
+
>>> D(1, 2.0)
|
| 183 |
+
Traceback (most recent call last):
|
| 184 |
+
...
|
| 185 |
+
NotImplementedError: Could not find signature for add: <int, float>
|
| 186 |
+
>>> # When ``add`` detects a warning it calls the ``on_ambiguity`` callback
|
| 187 |
+
>>> # with a dispatcher/itself, and a set of ambiguous type signature pairs
|
| 188 |
+
>>> # as inputs. See ``ambiguity_warn`` for an example.
|
| 189 |
+
"""
|
| 190 |
+
# Handle annotations
|
| 191 |
+
if not signature:
|
| 192 |
+
annotations = self.get_func_annotations(func)
|
| 193 |
+
if annotations:
|
| 194 |
+
signature = annotations
|
| 195 |
+
|
| 196 |
+
# Handle union types
|
| 197 |
+
if any(isinstance(typ, tuple) for typ in signature):
|
| 198 |
+
for typs in expand_tuples(signature):
|
| 199 |
+
self.add(typs, func)
|
| 200 |
+
return
|
| 201 |
+
|
| 202 |
+
new_signature = []
|
| 203 |
+
|
| 204 |
+
for index, typ in enumerate(signature, start=1):
|
| 205 |
+
if not isinstance(typ, (type, list)):
|
| 206 |
+
str_sig = ', '.join(c.__name__ if isinstance(c, type)
|
| 207 |
+
else str(c) for c in signature)
|
| 208 |
+
raise TypeError(f"Tried to dispatch on non-type: {typ}\n"
|
| 209 |
+
f"In signature: <{str_sig}>\n"
|
| 210 |
+
f"In function: {self.name}")
|
| 211 |
+
|
| 212 |
+
# handle variadic signatures
|
| 213 |
+
if isinstance(typ, list):
|
| 214 |
+
if index != len(signature):
|
| 215 |
+
raise TypeError(
|
| 216 |
+
'Variadic signature must be the last element'
|
| 217 |
+
)
|
| 218 |
+
|
| 219 |
+
if len(typ) != 1:
|
| 220 |
+
raise TypeError(
|
| 221 |
+
'Variadic signature must contain exactly one element. '
|
| 222 |
+
'To use a variadic union type place the desired types '
|
| 223 |
+
'inside of a tuple, e.g., [(int, str)]'
|
| 224 |
+
)
|
| 225 |
+
new_signature.append(Variadic[typ[0]])
|
| 226 |
+
else:
|
| 227 |
+
new_signature.append(typ)
|
| 228 |
+
|
| 229 |
+
self.funcs[tuple(new_signature)] = func
|
| 230 |
+
self._cache.clear()
|
| 231 |
+
|
| 232 |
+
try:
|
| 233 |
+
del self._ordering
|
| 234 |
+
except AttributeError:
|
| 235 |
+
pass
|
| 236 |
+
|
| 237 |
+
@property
|
| 238 |
+
def ordering(self):
|
| 239 |
+
try:
|
| 240 |
+
return self._ordering
|
| 241 |
+
except AttributeError:
|
| 242 |
+
return self.reorder()
|
| 243 |
+
|
| 244 |
+
def reorder(self, on_ambiguity=ambiguity_warn):
|
| 245 |
+
self._ordering = od = ordering(self.funcs)
|
| 246 |
+
amb = ambiguities(self.funcs)
|
| 247 |
+
if amb:
|
| 248 |
+
on_ambiguity(self, amb)
|
| 249 |
+
return od
|
| 250 |
+
|
| 251 |
+
def __call__(self, *args, **kwargs):
|
| 252 |
+
types = tuple([type(arg) for arg in args])
|
| 253 |
+
try:
|
| 254 |
+
func = self._cache[types]
|
| 255 |
+
except KeyError as e:
|
| 256 |
+
func = self.dispatch(*types)
|
| 257 |
+
if not func:
|
| 258 |
+
raise NotImplementedError(
|
| 259 |
+
f'Could not find signature for {self.name}: <{str_signature(types)}>') from e
|
| 260 |
+
self._cache[types] = func
|
| 261 |
+
try:
|
| 262 |
+
return func(*args, **kwargs)
|
| 263 |
+
|
| 264 |
+
except MDNotImplementedError as e:
|
| 265 |
+
funcs = self.dispatch_iter(*types)
|
| 266 |
+
next(funcs) # burn first
|
| 267 |
+
for func in funcs:
|
| 268 |
+
try:
|
| 269 |
+
return func(*args, **kwargs)
|
| 270 |
+
except MDNotImplementedError:
|
| 271 |
+
pass
|
| 272 |
+
|
| 273 |
+
raise NotImplementedError(
|
| 274 |
+
"Matching functions for "
|
| 275 |
+
f"{self.name}: <{str_signature(types)}> found, but none completed successfully",) from e
|
| 276 |
+
|
| 277 |
+
def __str__(self):
|
| 278 |
+
return f"<dispatched {self.name}>"
|
| 279 |
+
__repr__ = __str__
|
| 280 |
+
|
| 281 |
+
def dispatch(self, *types):
|
| 282 |
+
"""Determine appropriate implementation for this type signature
|
| 283 |
+
This method is internal. Users should call this object as a function.
|
| 284 |
+
Implementation resolution occurs within the ``__call__`` method.
|
| 285 |
+
>>> # xdoctest: +SKIP
|
| 286 |
+
>>> from multipledispatch import dispatch
|
| 287 |
+
>>> @dispatch(int)
|
| 288 |
+
... def inc(x):
|
| 289 |
+
... return x + 1
|
| 290 |
+
>>> implementation = inc.dispatch(int)
|
| 291 |
+
>>> implementation(3)
|
| 292 |
+
4
|
| 293 |
+
>>> print(inc.dispatch(float))
|
| 294 |
+
None
|
| 295 |
+
See Also:
|
| 296 |
+
``multipledispatch.conflict`` - module to determine resolution order
|
| 297 |
+
"""
|
| 298 |
+
|
| 299 |
+
if types in self.funcs:
|
| 300 |
+
return self.funcs[types]
|
| 301 |
+
|
| 302 |
+
try:
|
| 303 |
+
return next(self.dispatch_iter(*types))
|
| 304 |
+
except StopIteration:
|
| 305 |
+
return None
|
| 306 |
+
|
| 307 |
+
def dispatch_iter(self, *types):
|
| 308 |
+
|
| 309 |
+
n = len(types)
|
| 310 |
+
for signature in self.ordering:
|
| 311 |
+
if len(signature) == n and all(map(issubclass, types, signature)):
|
| 312 |
+
result = self.funcs[signature]
|
| 313 |
+
yield result
|
| 314 |
+
elif len(signature) and isvariadic(signature[-1]):
|
| 315 |
+
if variadic_signature_matches(types, signature):
|
| 316 |
+
result = self.funcs[signature]
|
| 317 |
+
yield result
|
| 318 |
+
|
| 319 |
+
def resolve(self, types):
|
| 320 |
+
""" Determine appropriate implementation for this type signature
|
| 321 |
+
.. deprecated:: 0.4.4
|
| 322 |
+
Use ``dispatch(*types)`` instead
|
| 323 |
+
"""
|
| 324 |
+
warn("resolve() is deprecated, use dispatch(*types)",
|
| 325 |
+
DeprecationWarning)
|
| 326 |
+
|
| 327 |
+
return self.dispatch(*types)
|
| 328 |
+
|
| 329 |
+
def __getstate__(self):
|
| 330 |
+
return {'name': self.name,
|
| 331 |
+
'funcs': self.funcs}
|
| 332 |
+
|
| 333 |
+
def __setstate__(self, d):
|
| 334 |
+
self.name = d['name']
|
| 335 |
+
self.funcs = d['funcs']
|
| 336 |
+
self._ordering = ordering(self.funcs)
|
| 337 |
+
self._cache = {}
|
| 338 |
+
|
| 339 |
+
@property
|
| 340 |
+
def __doc__(self):
|
| 341 |
+
docs = [f"Multiply dispatched method: {self.name}"]
|
| 342 |
+
|
| 343 |
+
if self.doc:
|
| 344 |
+
docs.append(self.doc)
|
| 345 |
+
|
| 346 |
+
other = []
|
| 347 |
+
for sig in self.ordering[::-1]:
|
| 348 |
+
func = self.funcs[sig]
|
| 349 |
+
if func.__doc__:
|
| 350 |
+
s = f'Inputs: <{str_signature(sig)}>\n'
|
| 351 |
+
s += '-' * len(s) + '\n'
|
| 352 |
+
s += func.__doc__.strip()
|
| 353 |
+
docs.append(s)
|
| 354 |
+
else:
|
| 355 |
+
other.append(str_signature(sig))
|
| 356 |
+
|
| 357 |
+
if other:
|
| 358 |
+
docs.append('Other signatures:\n ' + '\n '.join(other))
|
| 359 |
+
|
| 360 |
+
return '\n\n'.join(docs)
|
| 361 |
+
|
| 362 |
+
def _help(self, *args):
|
| 363 |
+
return self.dispatch(*map(type, args)).__doc__
|
| 364 |
+
|
| 365 |
+
def help(self, *args, **kwargs):
|
| 366 |
+
""" Print docstring for the function corresponding to inputs """
|
| 367 |
+
print(self._help(*args))
|
| 368 |
+
|
| 369 |
+
def _source(self, *args):
|
| 370 |
+
func = self.dispatch(*map(type, args))
|
| 371 |
+
if not func:
|
| 372 |
+
raise TypeError("No function found")
|
| 373 |
+
return source(func)
|
| 374 |
+
|
| 375 |
+
def source(self, *args, **kwargs):
|
| 376 |
+
""" Print source code for the function corresponding to inputs """
|
| 377 |
+
print(self._source(*args))
|
| 378 |
+
|
| 379 |
+
|
| 380 |
+
def source(func):
|
| 381 |
+
s = f'File: {inspect.getsourcefile(func)}\n\n'
|
| 382 |
+
s = s + inspect.getsource(func)
|
| 383 |
+
return s
|
| 384 |
+
|
| 385 |
+
|
| 386 |
+
class MethodDispatcher(Dispatcher):
|
| 387 |
+
""" Dispatch methods based on type signature
|
| 388 |
+
See Also:
|
| 389 |
+
Dispatcher
|
| 390 |
+
"""
|
| 391 |
+
__slots__ = ('obj', 'cls')
|
| 392 |
+
|
| 393 |
+
@classmethod
|
| 394 |
+
def get_func_params(cls, func):
|
| 395 |
+
if hasattr(inspect, "signature"):
|
| 396 |
+
sig = inspect.signature(func)
|
| 397 |
+
return itl.islice(sig.parameters.values(), 1, None)
|
| 398 |
+
|
| 399 |
+
def __get__(self, instance, owner):
|
| 400 |
+
self.obj = instance
|
| 401 |
+
self.cls = owner
|
| 402 |
+
return self
|
| 403 |
+
|
| 404 |
+
def __call__(self, *args, **kwargs):
|
| 405 |
+
types = tuple([type(arg) for arg in args])
|
| 406 |
+
func = self.dispatch(*types)
|
| 407 |
+
if not func:
|
| 408 |
+
raise NotImplementedError(f'Could not find signature for {self.name}: <{str_signature(types)}>')
|
| 409 |
+
return func(self.obj, *args, **kwargs)
|
| 410 |
+
|
| 411 |
+
|
| 412 |
+
def str_signature(sig):
|
| 413 |
+
""" String representation of type signature
|
| 414 |
+
>>> str_signature((int, float))
|
| 415 |
+
'int, float'
|
| 416 |
+
"""
|
| 417 |
+
return ', '.join(cls.__name__ for cls in sig)
|
| 418 |
+
|
| 419 |
+
|
| 420 |
+
def warning_text(name, amb):
|
| 421 |
+
""" The text for ambiguity warnings """
|
| 422 |
+
text = f"\nAmbiguities exist in dispatched function {name}\n\n"
|
| 423 |
+
text += "The following signatures may result in ambiguous behavior:\n"
|
| 424 |
+
for pair in amb:
|
| 425 |
+
text += "\t" + \
|
| 426 |
+
', '.join('[' + str_signature(s) + ']' for s in pair) + "\n"
|
| 427 |
+
text += "\n\nConsider making the following additions:\n\n"
|
| 428 |
+
text += '\n\n'.join(['@dispatch(' + str_signature(super_signature(s))
|
| 429 |
+
+ f')\ndef {name}(...)' for s in amb])
|
| 430 |
+
return text
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/variadic.py
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .utils import typename
|
| 2 |
+
|
| 3 |
+
__all__ = ["VariadicSignatureType", "isvariadic", "VariadicSignatureMeta", "Variadic"]
|
| 4 |
+
|
| 5 |
+
class VariadicSignatureType(type):
|
| 6 |
+
# checking if subclass is a subclass of self
|
| 7 |
+
def __subclasscheck__(cls, subclass):
|
| 8 |
+
other_type = (subclass.variadic_type if isvariadic(subclass)
|
| 9 |
+
else (subclass,))
|
| 10 |
+
return subclass is cls or all(
|
| 11 |
+
issubclass(other, cls.variadic_type) for other in other_type # type: ignore[attr-defined]
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
def __eq__(cls, other):
|
| 15 |
+
"""
|
| 16 |
+
Return True if other has the same variadic type
|
| 17 |
+
Parameters
|
| 18 |
+
----------
|
| 19 |
+
other : object (type)
|
| 20 |
+
The object (type) to check
|
| 21 |
+
Returns
|
| 22 |
+
-------
|
| 23 |
+
bool
|
| 24 |
+
Whether or not `other` is equal to `self`
|
| 25 |
+
"""
|
| 26 |
+
return (isvariadic(other) and
|
| 27 |
+
set(cls.variadic_type) == set(other.variadic_type)) # type: ignore[attr-defined]
|
| 28 |
+
|
| 29 |
+
def __hash__(cls):
|
| 30 |
+
return hash((type(cls), frozenset(cls.variadic_type))) # type: ignore[attr-defined]
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def isvariadic(obj):
|
| 34 |
+
"""Check whether the type `obj` is variadic.
|
| 35 |
+
Parameters
|
| 36 |
+
----------
|
| 37 |
+
obj : type
|
| 38 |
+
The type to check
|
| 39 |
+
Returns
|
| 40 |
+
-------
|
| 41 |
+
bool
|
| 42 |
+
Whether or not `obj` is variadic
|
| 43 |
+
Examples
|
| 44 |
+
--------
|
| 45 |
+
>>> # xdoctest: +SKIP
|
| 46 |
+
>>> isvariadic(int)
|
| 47 |
+
False
|
| 48 |
+
>>> isvariadic(Variadic[int])
|
| 49 |
+
True
|
| 50 |
+
"""
|
| 51 |
+
return isinstance(obj, VariadicSignatureType)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class VariadicSignatureMeta(type):
|
| 55 |
+
"""A metaclass that overrides ``__getitem__`` on the class. This is used to
|
| 56 |
+
generate a new type for Variadic signatures. See the Variadic class for
|
| 57 |
+
examples of how this behaves.
|
| 58 |
+
"""
|
| 59 |
+
def __getitem__(cls, variadic_type):
|
| 60 |
+
if not (isinstance(variadic_type, (type, tuple)) or type(variadic_type)):
|
| 61 |
+
raise ValueError("Variadic types must be type or tuple of types"
|
| 62 |
+
" (Variadic[int] or Variadic[(int, float)]")
|
| 63 |
+
|
| 64 |
+
if not isinstance(variadic_type, tuple):
|
| 65 |
+
variadic_type = variadic_type,
|
| 66 |
+
return VariadicSignatureType(
|
| 67 |
+
f'Variadic[{typename(variadic_type)}]',
|
| 68 |
+
(),
|
| 69 |
+
dict(variadic_type=variadic_type, __slots__=())
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
class Variadic(metaclass=VariadicSignatureMeta):
|
| 74 |
+
"""A class whose getitem method can be used to generate a new type
|
| 75 |
+
representing a specific variadic signature.
|
| 76 |
+
Examples
|
| 77 |
+
--------
|
| 78 |
+
>>> # xdoctest: +SKIP
|
| 79 |
+
>>> Variadic[int] # any number of int arguments
|
| 80 |
+
<class 'multipledispatch.variadic.Variadic[int]'>
|
| 81 |
+
>>> Variadic[(int, str)] # any number of one of int or str arguments
|
| 82 |
+
<class 'multipledispatch.variadic.Variadic[(int, str)]'>
|
| 83 |
+
>>> issubclass(int, Variadic[int])
|
| 84 |
+
True
|
| 85 |
+
>>> issubclass(int, Variadic[(int, str)])
|
| 86 |
+
True
|
| 87 |
+
>>> issubclass(str, Variadic[(int, str)])
|
| 88 |
+
True
|
| 89 |
+
>>> issubclass(float, Variadic[(int, str)])
|
| 90 |
+
False
|
| 91 |
+
"""
|
vlmpy310/lib/python3.10/site-packages/skimage/feature/brief.py
ADDED
|
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
from .._shared.filters import gaussian
|
| 6 |
+
from .._shared.utils import check_nD
|
| 7 |
+
from .brief_cy import _brief_loop
|
| 8 |
+
from .util import (
|
| 9 |
+
DescriptorExtractor,
|
| 10 |
+
_mask_border_keypoints,
|
| 11 |
+
_prepare_grayscale_input_2D,
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class BRIEF(DescriptorExtractor):
|
| 16 |
+
"""BRIEF binary descriptor extractor.
|
| 17 |
+
|
| 18 |
+
BRIEF (Binary Robust Independent Elementary Features) is an efficient
|
| 19 |
+
feature point descriptor. It is highly discriminative even when using
|
| 20 |
+
relatively few bits and is computed using simple intensity difference
|
| 21 |
+
tests.
|
| 22 |
+
|
| 23 |
+
For each keypoint, intensity comparisons are carried out for a specifically
|
| 24 |
+
distributed number N of pixel-pairs resulting in a binary descriptor of
|
| 25 |
+
length N. For binary descriptors the Hamming distance can be used for
|
| 26 |
+
feature matching, which leads to lower computational cost in comparison to
|
| 27 |
+
the L2 norm.
|
| 28 |
+
|
| 29 |
+
Parameters
|
| 30 |
+
----------
|
| 31 |
+
descriptor_size : int, optional
|
| 32 |
+
Size of BRIEF descriptor for each keypoint. Sizes 128, 256 and 512
|
| 33 |
+
recommended by the authors. Default is 256.
|
| 34 |
+
patch_size : int, optional
|
| 35 |
+
Length of the two dimensional square patch sampling region around
|
| 36 |
+
the keypoints. Default is 49.
|
| 37 |
+
mode : {'normal', 'uniform'}, optional
|
| 38 |
+
Probability distribution for sampling location of decision pixel-pairs
|
| 39 |
+
around keypoints.
|
| 40 |
+
rng : {`numpy.random.Generator`, int}, optional
|
| 41 |
+
Pseudo-random number generator (RNG).
|
| 42 |
+
By default, a PCG64 generator is used (see :func:`numpy.random.default_rng`).
|
| 43 |
+
If `rng` is an int, it is used to seed the generator.
|
| 44 |
+
|
| 45 |
+
The PRNG is used for the random sampling of the decision
|
| 46 |
+
pixel-pairs. From a square window with length `patch_size`,
|
| 47 |
+
pixel pairs are sampled using the `mode` parameter to build
|
| 48 |
+
the descriptors using intensity comparison.
|
| 49 |
+
|
| 50 |
+
For matching across images, the same `rng` should be used to construct
|
| 51 |
+
descriptors. To facilitate this:
|
| 52 |
+
|
| 53 |
+
(a) `rng` defaults to 1
|
| 54 |
+
(b) Subsequent calls of the ``extract`` method will use the same rng/seed.
|
| 55 |
+
sigma : float, optional
|
| 56 |
+
Standard deviation of the Gaussian low-pass filter applied to the image
|
| 57 |
+
to alleviate noise sensitivity, which is strongly recommended to obtain
|
| 58 |
+
discriminative and good descriptors.
|
| 59 |
+
|
| 60 |
+
Attributes
|
| 61 |
+
----------
|
| 62 |
+
descriptors : (Q, `descriptor_size`) array of dtype bool
|
| 63 |
+
2D ndarray of binary descriptors of size `descriptor_size` for Q
|
| 64 |
+
keypoints after filtering out border keypoints with value at an
|
| 65 |
+
index ``(i, j)`` either being ``True`` or ``False`` representing
|
| 66 |
+
the outcome of the intensity comparison for i-th keypoint on j-th
|
| 67 |
+
decision pixel-pair. It is ``Q == np.sum(mask)``.
|
| 68 |
+
mask : (N,) array of dtype bool
|
| 69 |
+
Mask indicating whether a keypoint has been filtered out
|
| 70 |
+
(``False``) or is described in the `descriptors` array (``True``).
|
| 71 |
+
|
| 72 |
+
Examples
|
| 73 |
+
--------
|
| 74 |
+
>>> from skimage.feature import (corner_harris, corner_peaks, BRIEF,
|
| 75 |
+
... match_descriptors)
|
| 76 |
+
>>> import numpy as np
|
| 77 |
+
>>> square1 = np.zeros((8, 8), dtype=np.int32)
|
| 78 |
+
>>> square1[2:6, 2:6] = 1
|
| 79 |
+
>>> square1
|
| 80 |
+
array([[0, 0, 0, 0, 0, 0, 0, 0],
|
| 81 |
+
[0, 0, 0, 0, 0, 0, 0, 0],
|
| 82 |
+
[0, 0, 1, 1, 1, 1, 0, 0],
|
| 83 |
+
[0, 0, 1, 1, 1, 1, 0, 0],
|
| 84 |
+
[0, 0, 1, 1, 1, 1, 0, 0],
|
| 85 |
+
[0, 0, 1, 1, 1, 1, 0, 0],
|
| 86 |
+
[0, 0, 0, 0, 0, 0, 0, 0],
|
| 87 |
+
[0, 0, 0, 0, 0, 0, 0, 0]], dtype=int32)
|
| 88 |
+
>>> square2 = np.zeros((9, 9), dtype=np.int32)
|
| 89 |
+
>>> square2[2:7, 2:7] = 1
|
| 90 |
+
>>> square2
|
| 91 |
+
array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
|
| 92 |
+
[0, 0, 0, 0, 0, 0, 0, 0, 0],
|
| 93 |
+
[0, 0, 1, 1, 1, 1, 1, 0, 0],
|
| 94 |
+
[0, 0, 1, 1, 1, 1, 1, 0, 0],
|
| 95 |
+
[0, 0, 1, 1, 1, 1, 1, 0, 0],
|
| 96 |
+
[0, 0, 1, 1, 1, 1, 1, 0, 0],
|
| 97 |
+
[0, 0, 1, 1, 1, 1, 1, 0, 0],
|
| 98 |
+
[0, 0, 0, 0, 0, 0, 0, 0, 0],
|
| 99 |
+
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=int32)
|
| 100 |
+
>>> keypoints1 = corner_peaks(corner_harris(square1), min_distance=1)
|
| 101 |
+
>>> keypoints2 = corner_peaks(corner_harris(square2), min_distance=1)
|
| 102 |
+
>>> extractor = BRIEF(patch_size=5)
|
| 103 |
+
>>> extractor.extract(square1, keypoints1)
|
| 104 |
+
>>> descriptors1 = extractor.descriptors
|
| 105 |
+
>>> extractor.extract(square2, keypoints2)
|
| 106 |
+
>>> descriptors2 = extractor.descriptors
|
| 107 |
+
>>> matches = match_descriptors(descriptors1, descriptors2)
|
| 108 |
+
>>> matches
|
| 109 |
+
array([[0, 0],
|
| 110 |
+
[1, 1],
|
| 111 |
+
[2, 2],
|
| 112 |
+
[3, 3]])
|
| 113 |
+
>>> keypoints1[matches[:, 0]]
|
| 114 |
+
array([[2, 2],
|
| 115 |
+
[2, 5],
|
| 116 |
+
[5, 2],
|
| 117 |
+
[5, 5]])
|
| 118 |
+
>>> keypoints2[matches[:, 1]]
|
| 119 |
+
array([[2, 2],
|
| 120 |
+
[2, 6],
|
| 121 |
+
[6, 2],
|
| 122 |
+
[6, 6]])
|
| 123 |
+
|
| 124 |
+
"""
|
| 125 |
+
|
| 126 |
+
def __init__(
|
| 127 |
+
self, descriptor_size=256, patch_size=49, mode='normal', sigma=1, rng=1
|
| 128 |
+
):
|
| 129 |
+
mode = mode.lower()
|
| 130 |
+
if mode not in ('normal', 'uniform'):
|
| 131 |
+
raise ValueError("`mode` must be 'normal' or 'uniform'.")
|
| 132 |
+
|
| 133 |
+
self.descriptor_size = descriptor_size
|
| 134 |
+
self.patch_size = patch_size
|
| 135 |
+
self.mode = mode
|
| 136 |
+
self.sigma = sigma
|
| 137 |
+
|
| 138 |
+
if isinstance(rng, np.random.Generator):
|
| 139 |
+
# Spawn an independent RNG from parent RNG provided by the user.
|
| 140 |
+
# This is necessary so that we can safely deepcopy the RNG.
|
| 141 |
+
# See https://github.com/scikit-learn/scikit-learn/issues/16988#issuecomment-1518037853
|
| 142 |
+
bg = rng._bit_generator
|
| 143 |
+
ss = bg._seed_seq
|
| 144 |
+
(child_ss,) = ss.spawn(1)
|
| 145 |
+
self.rng = np.random.Generator(type(bg)(child_ss))
|
| 146 |
+
elif rng is None:
|
| 147 |
+
self.rng = np.random.default_rng(np.random.SeedSequence())
|
| 148 |
+
else:
|
| 149 |
+
self.rng = np.random.default_rng(rng)
|
| 150 |
+
|
| 151 |
+
self.descriptors = None
|
| 152 |
+
self.mask = None
|
| 153 |
+
|
| 154 |
+
def extract(self, image, keypoints):
|
| 155 |
+
"""Extract BRIEF binary descriptors for given keypoints in image.
|
| 156 |
+
|
| 157 |
+
Parameters
|
| 158 |
+
----------
|
| 159 |
+
image : 2D array
|
| 160 |
+
Input image.
|
| 161 |
+
keypoints : (N, 2) array
|
| 162 |
+
Keypoint coordinates as ``(row, col)``.
|
| 163 |
+
|
| 164 |
+
"""
|
| 165 |
+
check_nD(image, 2)
|
| 166 |
+
|
| 167 |
+
# Copy RNG so we can repeatedly call extract with the same random values
|
| 168 |
+
rng = copy.deepcopy(self.rng)
|
| 169 |
+
|
| 170 |
+
image = _prepare_grayscale_input_2D(image)
|
| 171 |
+
|
| 172 |
+
# Gaussian low-pass filtering to alleviate noise sensitivity
|
| 173 |
+
image = np.ascontiguousarray(gaussian(image, sigma=self.sigma, mode='reflect'))
|
| 174 |
+
|
| 175 |
+
# Sampling pairs of decision pixels in patch_size x patch_size window
|
| 176 |
+
desc_size = self.descriptor_size
|
| 177 |
+
patch_size = self.patch_size
|
| 178 |
+
if self.mode == 'normal':
|
| 179 |
+
samples = (patch_size / 5.0) * rng.standard_normal(desc_size * 8)
|
| 180 |
+
samples = np.array(samples, dtype=np.int32)
|
| 181 |
+
samples = samples[
|
| 182 |
+
(samples < (patch_size // 2)) & (samples > -(patch_size - 2) // 2)
|
| 183 |
+
]
|
| 184 |
+
|
| 185 |
+
pos1 = samples[: desc_size * 2].reshape(desc_size, 2)
|
| 186 |
+
pos2 = samples[desc_size * 2 : desc_size * 4].reshape(desc_size, 2)
|
| 187 |
+
elif self.mode == 'uniform':
|
| 188 |
+
samples = rng.integers(
|
| 189 |
+
-(patch_size - 2) // 2, (patch_size // 2) + 1, (desc_size * 2, 2)
|
| 190 |
+
)
|
| 191 |
+
samples = np.array(samples, dtype=np.int32)
|
| 192 |
+
pos1, pos2 = np.split(samples, 2)
|
| 193 |
+
|
| 194 |
+
pos1 = np.ascontiguousarray(pos1)
|
| 195 |
+
pos2 = np.ascontiguousarray(pos2)
|
| 196 |
+
|
| 197 |
+
# Removing keypoints that are within (patch_size / 2) distance from the
|
| 198 |
+
# image border
|
| 199 |
+
self.mask = _mask_border_keypoints(image.shape, keypoints, patch_size // 2)
|
| 200 |
+
|
| 201 |
+
keypoints = np.array(
|
| 202 |
+
keypoints[self.mask, :], dtype=np.int64, order='C', copy=False
|
| 203 |
+
)
|
| 204 |
+
|
| 205 |
+
self.descriptors = np.zeros(
|
| 206 |
+
(keypoints.shape[0], desc_size), dtype=bool, order='C'
|
| 207 |
+
)
|
| 208 |
+
|
| 209 |
+
_brief_loop(image, self.descriptors.view(np.uint8), keypoints, pos1, pos2)
|
vlmpy310/lib/python3.10/site-packages/skimage/feature/censure.py
ADDED
|
@@ -0,0 +1,343 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from scipy.ndimage import maximum_filter, minimum_filter, convolve
|
| 3 |
+
|
| 4 |
+
from ..transform import integral_image
|
| 5 |
+
from .corner import structure_tensor
|
| 6 |
+
from ..morphology import octagon, star
|
| 7 |
+
from .censure_cy import _censure_dob_loop
|
| 8 |
+
from ..feature.util import (
|
| 9 |
+
FeatureDetector,
|
| 10 |
+
_prepare_grayscale_input_2D,
|
| 11 |
+
_mask_border_keypoints,
|
| 12 |
+
)
|
| 13 |
+
from .._shared.utils import check_nD
|
| 14 |
+
|
| 15 |
+
# The paper(Reference [1]) mentions the sizes of the Octagon shaped filter
|
| 16 |
+
# kernel for the first seven scales only. The sizes of the later scales
|
| 17 |
+
# have been extrapolated based on the following statement in the paper.
|
| 18 |
+
# "These octagons scale linearly and were experimentally chosen to correspond
|
| 19 |
+
# to the seven DOBs described in the previous section."
|
| 20 |
+
OCTAGON_OUTER_SHAPE = [
|
| 21 |
+
(5, 2),
|
| 22 |
+
(5, 3),
|
| 23 |
+
(7, 3),
|
| 24 |
+
(9, 4),
|
| 25 |
+
(9, 7),
|
| 26 |
+
(13, 7),
|
| 27 |
+
(15, 10),
|
| 28 |
+
(15, 11),
|
| 29 |
+
(15, 12),
|
| 30 |
+
(17, 13),
|
| 31 |
+
(17, 14),
|
| 32 |
+
]
|
| 33 |
+
OCTAGON_INNER_SHAPE = [
|
| 34 |
+
(3, 0),
|
| 35 |
+
(3, 1),
|
| 36 |
+
(3, 2),
|
| 37 |
+
(5, 2),
|
| 38 |
+
(5, 3),
|
| 39 |
+
(5, 4),
|
| 40 |
+
(5, 5),
|
| 41 |
+
(7, 5),
|
| 42 |
+
(7, 6),
|
| 43 |
+
(9, 6),
|
| 44 |
+
(9, 7),
|
| 45 |
+
]
|
| 46 |
+
|
| 47 |
+
# The sizes for the STAR shaped filter kernel for different scales have been
|
| 48 |
+
# taken from the OpenCV implementation.
|
| 49 |
+
STAR_SHAPE = [1, 2, 3, 4, 6, 8, 11, 12, 16, 22, 23, 32, 45, 46, 64, 90, 128]
|
| 50 |
+
STAR_FILTER_SHAPE = [
|
| 51 |
+
(1, 0),
|
| 52 |
+
(3, 1),
|
| 53 |
+
(4, 2),
|
| 54 |
+
(5, 3),
|
| 55 |
+
(7, 4),
|
| 56 |
+
(8, 5),
|
| 57 |
+
(9, 6),
|
| 58 |
+
(11, 8),
|
| 59 |
+
(13, 10),
|
| 60 |
+
(14, 11),
|
| 61 |
+
(15, 12),
|
| 62 |
+
(16, 14),
|
| 63 |
+
]
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def _filter_image(image, min_scale, max_scale, mode):
|
| 67 |
+
response = np.zeros(
|
| 68 |
+
(image.shape[0], image.shape[1], max_scale - min_scale + 1), dtype=np.float64
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
if mode == 'dob':
|
| 72 |
+
# make response[:, :, i] contiguous memory block
|
| 73 |
+
item_size = response.itemsize
|
| 74 |
+
response.strides = (
|
| 75 |
+
item_size * response.shape[1],
|
| 76 |
+
item_size,
|
| 77 |
+
item_size * response.shape[0] * response.shape[1],
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
integral_img = integral_image(image)
|
| 81 |
+
|
| 82 |
+
for i in range(max_scale - min_scale + 1):
|
| 83 |
+
n = min_scale + i
|
| 84 |
+
|
| 85 |
+
# Constant multipliers for the outer region and the inner region
|
| 86 |
+
# of the bi-level filters with the constraint of keeping the
|
| 87 |
+
# DC bias 0.
|
| 88 |
+
inner_weight = 1.0 / (2 * n + 1) ** 2
|
| 89 |
+
outer_weight = 1.0 / (12 * n**2 + 4 * n)
|
| 90 |
+
|
| 91 |
+
_censure_dob_loop(
|
| 92 |
+
n, integral_img, response[:, :, i], inner_weight, outer_weight
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
# NOTE : For the Octagon shaped filter, we implemented and evaluated the
|
| 96 |
+
# slanted integral image based image filtering but the performance was
|
| 97 |
+
# more or less equal to image filtering using
|
| 98 |
+
# scipy.ndimage.filters.convolve(). Hence we have decided to use the
|
| 99 |
+
# later for a much cleaner implementation.
|
| 100 |
+
elif mode == 'octagon':
|
| 101 |
+
# TODO : Decide the shapes of Octagon filters for scales > 7
|
| 102 |
+
|
| 103 |
+
for i in range(max_scale - min_scale + 1):
|
| 104 |
+
mo, no = OCTAGON_OUTER_SHAPE[min_scale + i - 1]
|
| 105 |
+
mi, ni = OCTAGON_INNER_SHAPE[min_scale + i - 1]
|
| 106 |
+
response[:, :, i] = convolve(image, _octagon_kernel(mo, no, mi, ni))
|
| 107 |
+
|
| 108 |
+
elif mode == 'star':
|
| 109 |
+
for i in range(max_scale - min_scale + 1):
|
| 110 |
+
m = STAR_SHAPE[STAR_FILTER_SHAPE[min_scale + i - 1][0]]
|
| 111 |
+
n = STAR_SHAPE[STAR_FILTER_SHAPE[min_scale + i - 1][1]]
|
| 112 |
+
response[:, :, i] = convolve(image, _star_kernel(m, n))
|
| 113 |
+
|
| 114 |
+
return response
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def _octagon_kernel(mo, no, mi, ni):
|
| 118 |
+
outer = (mo + 2 * no) ** 2 - 2 * no * (no + 1)
|
| 119 |
+
inner = (mi + 2 * ni) ** 2 - 2 * ni * (ni + 1)
|
| 120 |
+
outer_weight = 1.0 / (outer - inner)
|
| 121 |
+
inner_weight = 1.0 / inner
|
| 122 |
+
c = ((mo + 2 * no) - (mi + 2 * ni)) // 2
|
| 123 |
+
outer_oct = octagon(mo, no)
|
| 124 |
+
inner_oct = np.zeros((mo + 2 * no, mo + 2 * no))
|
| 125 |
+
inner_oct[c:-c, c:-c] = octagon(mi, ni)
|
| 126 |
+
bfilter = outer_weight * outer_oct - (outer_weight + inner_weight) * inner_oct
|
| 127 |
+
return bfilter
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def _star_kernel(m, n):
|
| 131 |
+
c = m + m // 2 - n - n // 2
|
| 132 |
+
outer_star = star(m)
|
| 133 |
+
inner_star = np.zeros_like(outer_star)
|
| 134 |
+
inner_star[c:-c, c:-c] = star(n)
|
| 135 |
+
outer_weight = 1.0 / (np.sum(outer_star - inner_star))
|
| 136 |
+
inner_weight = 1.0 / np.sum(inner_star)
|
| 137 |
+
bfilter = outer_weight * outer_star - (outer_weight + inner_weight) * inner_star
|
| 138 |
+
return bfilter
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
def _suppress_lines(feature_mask, image, sigma, line_threshold):
|
| 142 |
+
Arr, Arc, Acc = structure_tensor(image, sigma, order='rc')
|
| 143 |
+
feature_mask[(Arr + Acc) ** 2 > line_threshold * (Arr * Acc - Arc**2)] = False
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
class CENSURE(FeatureDetector):
|
| 147 |
+
"""CENSURE keypoint detector.
|
| 148 |
+
|
| 149 |
+
min_scale : int, optional
|
| 150 |
+
Minimum scale to extract keypoints from.
|
| 151 |
+
max_scale : int, optional
|
| 152 |
+
Maximum scale to extract keypoints from. The keypoints will be
|
| 153 |
+
extracted from all the scales except the first and the last i.e.
|
| 154 |
+
from the scales in the range [min_scale + 1, max_scale - 1]. The filter
|
| 155 |
+
sizes for different scales is such that the two adjacent scales
|
| 156 |
+
comprise of an octave.
|
| 157 |
+
mode : {'DoB', 'Octagon', 'STAR'}, optional
|
| 158 |
+
Type of bi-level filter used to get the scales of the input image.
|
| 159 |
+
Possible values are 'DoB', 'Octagon' and 'STAR'. The three modes
|
| 160 |
+
represent the shape of the bi-level filters i.e. box(square), octagon
|
| 161 |
+
and star respectively. For instance, a bi-level octagon filter consists
|
| 162 |
+
of a smaller inner octagon and a larger outer octagon with the filter
|
| 163 |
+
weights being uniformly negative in both the inner octagon while
|
| 164 |
+
uniformly positive in the difference region. Use STAR and Octagon for
|
| 165 |
+
better features and DoB for better performance.
|
| 166 |
+
non_max_threshold : float, optional
|
| 167 |
+
Threshold value used to suppress maximas and minimas with a weak
|
| 168 |
+
magnitude response obtained after Non-Maximal Suppression.
|
| 169 |
+
line_threshold : float, optional
|
| 170 |
+
Threshold for rejecting interest points which have ratio of principal
|
| 171 |
+
curvatures greater than this value.
|
| 172 |
+
|
| 173 |
+
Attributes
|
| 174 |
+
----------
|
| 175 |
+
keypoints : (N, 2) array
|
| 176 |
+
Keypoint coordinates as ``(row, col)``.
|
| 177 |
+
scales : (N,) array
|
| 178 |
+
Corresponding scales.
|
| 179 |
+
|
| 180 |
+
References
|
| 181 |
+
----------
|
| 182 |
+
.. [1] Motilal Agrawal, Kurt Konolige and Morten Rufus Blas
|
| 183 |
+
"CENSURE: Center Surround Extremas for Realtime Feature
|
| 184 |
+
Detection and Matching",
|
| 185 |
+
https://link.springer.com/chapter/10.1007/978-3-540-88693-8_8
|
| 186 |
+
:DOI:`10.1007/978-3-540-88693-8_8`
|
| 187 |
+
|
| 188 |
+
.. [2] Adam Schmidt, Marek Kraft, Michal Fularz and Zuzanna Domagala
|
| 189 |
+
"Comparative Assessment of Point Feature Detectors and
|
| 190 |
+
Descriptors in the Context of Robot Navigation"
|
| 191 |
+
http://yadda.icm.edu.pl/yadda/element/bwmeta1.element.baztech-268aaf28-0faf-4872-a4df-7e2e61cb364c/c/Schmidt_comparative.pdf
|
| 192 |
+
:DOI:`10.1.1.465.1117`
|
| 193 |
+
|
| 194 |
+
Examples
|
| 195 |
+
--------
|
| 196 |
+
>>> from skimage.data import astronaut
|
| 197 |
+
>>> from skimage.color import rgb2gray
|
| 198 |
+
>>> from skimage.feature import CENSURE
|
| 199 |
+
>>> img = rgb2gray(astronaut()[100:300, 100:300])
|
| 200 |
+
>>> censure = CENSURE()
|
| 201 |
+
>>> censure.detect(img)
|
| 202 |
+
>>> censure.keypoints
|
| 203 |
+
array([[ 4, 148],
|
| 204 |
+
[ 12, 73],
|
| 205 |
+
[ 21, 176],
|
| 206 |
+
[ 91, 22],
|
| 207 |
+
[ 93, 56],
|
| 208 |
+
[ 94, 22],
|
| 209 |
+
[ 95, 54],
|
| 210 |
+
[100, 51],
|
| 211 |
+
[103, 51],
|
| 212 |
+
[106, 67],
|
| 213 |
+
[108, 15],
|
| 214 |
+
[117, 20],
|
| 215 |
+
[122, 60],
|
| 216 |
+
[125, 37],
|
| 217 |
+
[129, 37],
|
| 218 |
+
[133, 76],
|
| 219 |
+
[145, 44],
|
| 220 |
+
[146, 94],
|
| 221 |
+
[150, 114],
|
| 222 |
+
[153, 33],
|
| 223 |
+
[154, 156],
|
| 224 |
+
[155, 151],
|
| 225 |
+
[184, 63]])
|
| 226 |
+
>>> censure.scales
|
| 227 |
+
array([2, 6, 6, 2, 4, 3, 2, 3, 2, 6, 3, 2, 2, 3, 2, 2, 2, 3, 2, 2, 4, 2,
|
| 228 |
+
2])
|
| 229 |
+
|
| 230 |
+
"""
|
| 231 |
+
|
| 232 |
+
def __init__(
|
| 233 |
+
self,
|
| 234 |
+
min_scale=1,
|
| 235 |
+
max_scale=7,
|
| 236 |
+
mode='DoB',
|
| 237 |
+
non_max_threshold=0.15,
|
| 238 |
+
line_threshold=10,
|
| 239 |
+
):
|
| 240 |
+
mode = mode.lower()
|
| 241 |
+
if mode not in ('dob', 'octagon', 'star'):
|
| 242 |
+
raise ValueError("`mode` must be one of 'DoB', 'Octagon', 'STAR'.")
|
| 243 |
+
|
| 244 |
+
if min_scale < 1 or max_scale < 1 or max_scale - min_scale < 2:
|
| 245 |
+
raise ValueError(
|
| 246 |
+
'The scales must be >= 1 and the number of ' 'scales should be >= 3.'
|
| 247 |
+
)
|
| 248 |
+
|
| 249 |
+
self.min_scale = min_scale
|
| 250 |
+
self.max_scale = max_scale
|
| 251 |
+
self.mode = mode
|
| 252 |
+
self.non_max_threshold = non_max_threshold
|
| 253 |
+
self.line_threshold = line_threshold
|
| 254 |
+
|
| 255 |
+
self.keypoints = None
|
| 256 |
+
self.scales = None
|
| 257 |
+
|
| 258 |
+
def detect(self, image):
|
| 259 |
+
"""Detect CENSURE keypoints along with the corresponding scale.
|
| 260 |
+
|
| 261 |
+
Parameters
|
| 262 |
+
----------
|
| 263 |
+
image : 2D ndarray
|
| 264 |
+
Input image.
|
| 265 |
+
|
| 266 |
+
"""
|
| 267 |
+
|
| 268 |
+
# (1) First we generate the required scales on the input grayscale
|
| 269 |
+
# image using a bi-level filter and stack them up in `filter_response`.
|
| 270 |
+
|
| 271 |
+
# (2) We then perform Non-Maximal suppression in 3 x 3 x 3 window on
|
| 272 |
+
# the filter_response to suppress points that are neither minima or
|
| 273 |
+
# maxima in 3 x 3 x 3 neighborhood. We obtain a boolean ndarray
|
| 274 |
+
# `feature_mask` containing all the minimas and maximas in
|
| 275 |
+
# `filter_response` as True.
|
| 276 |
+
# (3) Then we suppress all the points in the `feature_mask` for which
|
| 277 |
+
# the corresponding point in the image at a particular scale has the
|
| 278 |
+
# ratio of principal curvatures greater than `line_threshold`.
|
| 279 |
+
# (4) Finally, we remove the border keypoints and return the keypoints
|
| 280 |
+
# along with its corresponding scale.
|
| 281 |
+
|
| 282 |
+
check_nD(image, 2)
|
| 283 |
+
|
| 284 |
+
num_scales = self.max_scale - self.min_scale
|
| 285 |
+
|
| 286 |
+
image = np.ascontiguousarray(_prepare_grayscale_input_2D(image))
|
| 287 |
+
|
| 288 |
+
# Generating all the scales
|
| 289 |
+
filter_response = _filter_image(
|
| 290 |
+
image, self.min_scale, self.max_scale, self.mode
|
| 291 |
+
)
|
| 292 |
+
|
| 293 |
+
# Suppressing points that are neither minima or maxima in their
|
| 294 |
+
# 3 x 3 x 3 neighborhood to zero
|
| 295 |
+
minimas = minimum_filter(filter_response, (3, 3, 3)) == filter_response
|
| 296 |
+
maximas = maximum_filter(filter_response, (3, 3, 3)) == filter_response
|
| 297 |
+
|
| 298 |
+
feature_mask = minimas | maximas
|
| 299 |
+
feature_mask[filter_response < self.non_max_threshold] = False
|
| 300 |
+
|
| 301 |
+
for i in range(1, num_scales):
|
| 302 |
+
# sigma = (window_size - 1) / 6.0, so the window covers > 99% of
|
| 303 |
+
# the kernel's distribution
|
| 304 |
+
# window_size = 7 + 2 * (min_scale - 1 + i)
|
| 305 |
+
# Hence sigma = 1 + (min_scale - 1 + i)/ 3.0
|
| 306 |
+
_suppress_lines(
|
| 307 |
+
feature_mask[:, :, i],
|
| 308 |
+
image,
|
| 309 |
+
(1 + (self.min_scale + i - 1) / 3.0),
|
| 310 |
+
self.line_threshold,
|
| 311 |
+
)
|
| 312 |
+
|
| 313 |
+
rows, cols, scales = np.nonzero(feature_mask[..., 1:num_scales])
|
| 314 |
+
keypoints = np.column_stack([rows, cols])
|
| 315 |
+
scales = scales + self.min_scale + 1
|
| 316 |
+
|
| 317 |
+
if self.mode == 'dob':
|
| 318 |
+
self.keypoints = keypoints
|
| 319 |
+
self.scales = scales
|
| 320 |
+
return
|
| 321 |
+
|
| 322 |
+
cumulative_mask = np.zeros(keypoints.shape[0], dtype=bool)
|
| 323 |
+
|
| 324 |
+
if self.mode == 'octagon':
|
| 325 |
+
for i in range(self.min_scale + 1, self.max_scale):
|
| 326 |
+
c = (OCTAGON_OUTER_SHAPE[i - 1][0] - 1) // 2 + OCTAGON_OUTER_SHAPE[
|
| 327 |
+
i - 1
|
| 328 |
+
][1]
|
| 329 |
+
cumulative_mask |= _mask_border_keypoints(image.shape, keypoints, c) & (
|
| 330 |
+
scales == i
|
| 331 |
+
)
|
| 332 |
+
elif self.mode == 'star':
|
| 333 |
+
for i in range(self.min_scale + 1, self.max_scale):
|
| 334 |
+
c = (
|
| 335 |
+
STAR_SHAPE[STAR_FILTER_SHAPE[i - 1][0]]
|
| 336 |
+
+ STAR_SHAPE[STAR_FILTER_SHAPE[i - 1][0]] // 2
|
| 337 |
+
)
|
| 338 |
+
cumulative_mask |= _mask_border_keypoints(image.shape, keypoints, c) & (
|
| 339 |
+
scales == i
|
| 340 |
+
)
|
| 341 |
+
|
| 342 |
+
self.keypoints = keypoints[cumulative_mask]
|
| 343 |
+
self.scales = scales[cumulative_mask]
|