Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- janus/lib/python3.10/site-packages/torch/_export/__pycache__/error.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/_export/__pycache__/pass_base.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/_export/__pycache__/tools.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/_export/__pycache__/verifier.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/_export/db/__pycache__/case.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_predicate.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/constrain_as_value_example.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_view.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/pytree_flatten.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/tensor_setattr.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/unsupported_operator.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/_export/serde/__pycache__/__init__.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/_export/serde/__pycache__/aoti_schema.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/_export/serde/__pycache__/dynamic_shapes.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/_export/serde/__pycache__/schema.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/_export/serde/__pycache__/schema_check.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/_export/serde/__pycache__/serialize.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/_export/serde/__pycache__/union.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/_export/serde/dynamic_shapes.py +321 -0
- janus/lib/python3.10/site-packages/torch/_export/serde/schema.py +381 -0
- janus/lib/python3.10/site-packages/torch/_export/serde/schema.yaml +437 -0
- janus/lib/python3.10/site-packages/torch/_export/serde/schema_check.py +286 -0
- janus/lib/python3.10/site-packages/torch/_export/serde/serialize.py +0 -0
- janus/lib/python3.10/site-packages/torch/_export/serde/union.py +70 -0
- janus/lib/python3.10/site-packages/torch/jit/__init__.py +294 -0
- janus/lib/python3.10/site-packages/torch/jit/__pycache__/__init__.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/jit/__pycache__/_await.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/jit/__pycache__/_decomposition_utils.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/jit/__pycache__/_decompositions.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/jit/__pycache__/_freeze.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/jit/__pycache__/_fuser.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/jit/__pycache__/_ir_utils.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/jit/__pycache__/_logging.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/jit/__pycache__/_pickle.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/jit/__pycache__/_recursive.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/jit/__pycache__/_script.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/jit/__pycache__/_serialization.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/jit/__pycache__/_trace.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/jit/__pycache__/annotations.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/jit/__pycache__/generate_bytecode.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/jit/__pycache__/supported_ops.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/jit/_async.py +102 -0
- janus/lib/python3.10/site-packages/torch/jit/_await.py +27 -0
- janus/lib/python3.10/site-packages/torch/jit/_builtins.py +193 -0
- janus/lib/python3.10/site-packages/torch/jit/_check.py +249 -0
- janus/lib/python3.10/site-packages/torch/jit/_dataclass_impls.py +190 -0
- janus/lib/python3.10/site-packages/torch/jit/_decomposition_utils.py +12 -0
- janus/lib/python3.10/site-packages/torch/jit/_decompositions.py +137 -0
- janus/lib/python3.10/site-packages/torch/jit/_freeze.py +228 -0
- janus/lib/python3.10/site-packages/torch/jit/_fuser.py +161 -0
janus/lib/python3.10/site-packages/torch/_export/__pycache__/error.cpython-310.pyc
ADDED
|
Binary file (2.07 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/_export/__pycache__/pass_base.cpython-310.pyc
ADDED
|
Binary file (15 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/_export/__pycache__/tools.cpython-310.pyc
ADDED
|
Binary file (4.34 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/_export/__pycache__/verifier.cpython-310.pyc
ADDED
|
Binary file (13.8 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/_export/db/__pycache__/case.cpython-310.pyc
ADDED
|
Binary file (4.94 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_predicate.cpython-310.pyc
ADDED
|
Binary file (1.25 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/constrain_as_value_example.cpython-310.pyc
ADDED
|
Binary file (1.07 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_view.cpython-310.pyc
ADDED
|
Binary file (844 Bytes). View file
|
|
|
janus/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/pytree_flatten.cpython-310.pyc
ADDED
|
Binary file (781 Bytes). View file
|
|
|
janus/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/tensor_setattr.cpython-310.pyc
ADDED
|
Binary file (737 Bytes). View file
|
|
|
janus/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/unsupported_operator.cpython-310.pyc
ADDED
|
Binary file (828 Bytes). View file
|
|
|
janus/lib/python3.10/site-packages/torch/_export/serde/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (170 Bytes). View file
|
|
|
janus/lib/python3.10/site-packages/torch/_export/serde/__pycache__/aoti_schema.cpython-310.pyc
ADDED
|
Binary file (725 Bytes). View file
|
|
|
janus/lib/python3.10/site-packages/torch/_export/serde/__pycache__/dynamic_shapes.cpython-310.pyc
ADDED
|
Binary file (8.93 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/_export/serde/__pycache__/schema.cpython-310.pyc
ADDED
|
Binary file (10.3 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/_export/serde/__pycache__/schema_check.cpython-310.pyc
ADDED
|
Binary file (8.33 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/_export/serde/__pycache__/serialize.cpython-310.pyc
ADDED
|
Binary file (77.7 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/_export/serde/__pycache__/union.cpython-310.pyc
ADDED
|
Binary file (3.38 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/_export/serde/dynamic_shapes.py
ADDED
|
@@ -0,0 +1,321 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import dataclasses
|
| 2 |
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
from torch._dynamo.exc import UserError, UserErrorType
|
| 6 |
+
from torch.export.dynamic_shapes import (
|
| 7 |
+
_check_dynamic_shapes,
|
| 8 |
+
_DerivedDim,
|
| 9 |
+
_Dim,
|
| 10 |
+
_DimHint,
|
| 11 |
+
_tree_map_with_path,
|
| 12 |
+
Dim,
|
| 13 |
+
)
|
| 14 |
+
from torch.utils._pytree import tree_map
|
| 15 |
+
|
| 16 |
+
from .serialize import _dataclass_to_dict
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
@dataclasses.dataclass
|
| 20 |
+
class RootDim:
|
| 21 |
+
"""
|
| 22 |
+
This represents a _Dim object.
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
min: int
|
| 26 |
+
max: Union[int, None]
|
| 27 |
+
derived: List[str]
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
@dataclasses.dataclass
|
| 31 |
+
class DynamicShapesSpec:
|
| 32 |
+
"""
|
| 33 |
+
This stores a dynamic_shapes spec for de/serialization.
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
dynamic_shapes: Union[Dict[str, Any], Tuple[Any], List[Any], None]
|
| 37 |
+
dims: Dict[str, RootDim]
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def _postprocess_serialized_shapes(
|
| 41 |
+
dynamic_shapes: Union[Dict[str, Any], Tuple[Any], List[Any], None],
|
| 42 |
+
dims: Dict[str, Dict[str, Union[int, List[str], None]]],
|
| 43 |
+
to_dict: Optional[bool] = False,
|
| 44 |
+
) -> Union[DynamicShapesSpec, Dict[str, Any]]:
|
| 45 |
+
"""
|
| 46 |
+
Sorts dims and dumps to dictionary format.
|
| 47 |
+
"""
|
| 48 |
+
from torch.utils._sympy.numbers import int_oo
|
| 49 |
+
|
| 50 |
+
dims = {
|
| 51 |
+
k: RootDim(
|
| 52 |
+
min=v["min"], # type: ignore[arg-type]
|
| 53 |
+
max=None if v["max"] is int_oo else v["max"], # type: ignore[arg-type]
|
| 54 |
+
derived=sorted(v["derived"]), # type: ignore[arg-type]
|
| 55 |
+
)
|
| 56 |
+
for k, v in sorted(dims.items())
|
| 57 |
+
}
|
| 58 |
+
spec = DynamicShapesSpec(dynamic_shapes=dynamic_shapes, dims=dims)
|
| 59 |
+
if to_dict:
|
| 60 |
+
return _dataclass_to_dict(spec)
|
| 61 |
+
else:
|
| 62 |
+
return spec
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def _dump_dynamic_shapes(
|
| 66 |
+
dynamic_shapes: Union[Dict[str, Any], Tuple[Any], List[Any], None],
|
| 67 |
+
args: Tuple[Any],
|
| 68 |
+
kwargs: Optional[Dict[str, Any]] = None,
|
| 69 |
+
to_dict: Optional[bool] = False,
|
| 70 |
+
) -> Union[DynamicShapesSpec, Dict[str, Any]]:
|
| 71 |
+
"""
|
| 72 |
+
Utility function for dynamic shapes serialization, serializing a dynamic_shapes spec.
|
| 73 |
+
Returns a DynamicShapesSpec dataclass containing 2 fields, "dynamic_shapes" and "dims".
|
| 74 |
+
Uses args & kwargs to distinguish between tensor-level and dim-level specs (only for Nones).
|
| 75 |
+
|
| 76 |
+
dynamic_shapes: A pytree structure mirroring the dynamic_shapes input to export():
|
| 77 |
+
- Each tensor input is represented with a list of values, non-tensor inputs with None.
|
| 78 |
+
- dynamic dimensions (i.e. symbols) in tensors and Dim enums are represented with strings.
|
| 79 |
+
- static dimensions are represented with ints.
|
| 80 |
+
|
| 81 |
+
dims: A dictionary mapping each symbol name to the min/max range and derived dim names.
|
| 82 |
+
|
| 83 |
+
For example:
|
| 84 |
+
```
|
| 85 |
+
dx = Dim("dx", min=4, max=16)
|
| 86 |
+
dy = dx + 1
|
| 87 |
+
|
| 88 |
+
inputs = (
|
| 89 |
+
[
|
| 90 |
+
torch.randn(4, 4),
|
| 91 |
+
torch.randn(5, 4),
|
| 92 |
+
],
|
| 93 |
+
torch.randn(4),
|
| 94 |
+
torch.randn(4, 4),
|
| 95 |
+
"hello",
|
| 96 |
+
)
|
| 97 |
+
dynamic_shapes = {
|
| 98 |
+
"a": [
|
| 99 |
+
(dx, 4),
|
| 100 |
+
(dy, 4),
|
| 101 |
+
],
|
| 102 |
+
"b": (Dim.STATIC,),
|
| 103 |
+
"c": None,
|
| 104 |
+
"d": None,
|
| 105 |
+
}
|
| 106 |
+
out = _dump_dynamic_shapes(dynamic_shapes, inputs, to_dict=True)
|
| 107 |
+
```
|
| 108 |
+
would generate the following output:
|
| 109 |
+
```
|
| 110 |
+
{
|
| 111 |
+
'dynamic_shapes': (
|
| 112 |
+
[
|
| 113 |
+
['dx', 4],
|
| 114 |
+
['dx + 1', 4],
|
| 115 |
+
],
|
| 116 |
+
['_DimHint.STATIC'],
|
| 117 |
+
['_DimHint.STATIC', '_DimHint.STATIC'],
|
| 118 |
+
None,
|
| 119 |
+
),
|
| 120 |
+
'dims': {
|
| 121 |
+
'dx': {
|
| 122 |
+
'min': 4,
|
| 123 |
+
'max': 16,
|
| 124 |
+
'derived': ['dx + 1'],
|
| 125 |
+
},
|
| 126 |
+
},
|
| 127 |
+
}
|
| 128 |
+
```
|
| 129 |
+
"""
|
| 130 |
+
dims: Dict[str, Dict[str, Any]] = {}
|
| 131 |
+
|
| 132 |
+
def _standardize_shapes(path, tensor, shape): # type: ignore[no-untyped-def]
|
| 133 |
+
"""
|
| 134 |
+
Helps standardize the dynamic_shapes tree structure we serialize,
|
| 135 |
+
returning lists for each tensor shape, handling tensor-level Nones.
|
| 136 |
+
"""
|
| 137 |
+
if not isinstance(tensor, torch.Tensor):
|
| 138 |
+
return None
|
| 139 |
+
if shape is None:
|
| 140 |
+
return [Dim.STATIC] * len(tensor.shape) # type: ignore[attr-defined]
|
| 141 |
+
|
| 142 |
+
out = []
|
| 143 |
+
if isinstance(shape, dict):
|
| 144 |
+
for i, s in enumerate(tensor.shape):
|
| 145 |
+
out.append(s if shape.get(i) is None else shape.get(i))
|
| 146 |
+
else:
|
| 147 |
+
assert isinstance(shape, (tuple, list))
|
| 148 |
+
for i, s in enumerate(tensor.shape):
|
| 149 |
+
out.append(s if shape[i] is None else shape[i])
|
| 150 |
+
return out
|
| 151 |
+
|
| 152 |
+
def _track_dim_from_dims(
|
| 153 |
+
val: Union[None, int, _DimHint, _Dim]
|
| 154 |
+
) -> Union[None, int, str]:
|
| 155 |
+
"""
|
| 156 |
+
Tracks dims, ranges, derived dims from the standardized dynamic_shapes spec.
|
| 157 |
+
"""
|
| 158 |
+
if val is None or isinstance(val, int): # non-tensor input or static
|
| 159 |
+
return val
|
| 160 |
+
if isinstance(val, _DimHint): # store enum as string
|
| 161 |
+
return val.__class__.__name__ + "." + val.name
|
| 162 |
+
|
| 163 |
+
assert isinstance(val, _Dim)
|
| 164 |
+
|
| 165 |
+
# track root dim
|
| 166 |
+
root = val.root if isinstance(val, _DerivedDim) else val # type: ignore[attr-defined]
|
| 167 |
+
if root.__name__ not in dims:
|
| 168 |
+
dims[root.__name__] = {
|
| 169 |
+
"min": root.min,
|
| 170 |
+
"max": root.max,
|
| 171 |
+
"derived": set(),
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
# track derived dims
|
| 175 |
+
if isinstance(val, _DerivedDim):
|
| 176 |
+
dims[root.__name__]["derived"].add(val.__name__)
|
| 177 |
+
|
| 178 |
+
return val.__name__
|
| 179 |
+
|
| 180 |
+
if dynamic_shapes is None:
|
| 181 |
+
return {"dynamic_shapes": None, "dims": {}}
|
| 182 |
+
|
| 183 |
+
# convert to tuple of specs, for each arg/kwarg
|
| 184 |
+
kwargs = kwargs or {}
|
| 185 |
+
if isinstance(dynamic_shapes, dict):
|
| 186 |
+
dynamic_shapes = dynamic_shapes.values() # type: ignore[assignment]
|
| 187 |
+
dynamic_shapes = tuple(dynamic_shapes)
|
| 188 |
+
combined_args = tuple(args) + tuple(kwargs.values())
|
| 189 |
+
|
| 190 |
+
# run same check when we're processing shapes for export - is this too lazy?
|
| 191 |
+
_check_dynamic_shapes(dict(enumerate(combined_args)), dynamic_shapes) # type: ignore[arg-type]
|
| 192 |
+
|
| 193 |
+
tree_shapes = _tree_map_with_path(
|
| 194 |
+
_standardize_shapes, combined_args, dynamic_shapes, tree_name="inputs"
|
| 195 |
+
)
|
| 196 |
+
serialized_shapes = tree_map(_track_dim_from_dims, tree_shapes)
|
| 197 |
+
return _postprocess_serialized_shapes(serialized_shapes, dims, to_dict=to_dict)
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
def _load_dynamic_shapes(
|
| 201 |
+
spec: Union[DynamicShapesSpec, Dict[str, Any]],
|
| 202 |
+
from_dict: Optional[bool] = False,
|
| 203 |
+
) -> Union[Dict[str, Any], Tuple[Any], List[Any], None]:
|
| 204 |
+
"""
|
| 205 |
+
Utility function for dynamic shapes serialization.
|
| 206 |
+
Deserializes a DynamicShapesSpec or corresponding dictionary into a dynamic_shapes input to export().
|
| 207 |
+
"""
|
| 208 |
+
import sympy
|
| 209 |
+
|
| 210 |
+
from torch.fx.experimental.symbolic_shapes import _is_supported_equivalence
|
| 211 |
+
|
| 212 |
+
if from_dict:
|
| 213 |
+
if not isinstance(spec, dict):
|
| 214 |
+
raise UserError(
|
| 215 |
+
UserErrorType.INVALID_INPUT,
|
| 216 |
+
f"With from_dict=True, expected `spec` to be a dict, got {type(spec)}",
|
| 217 |
+
)
|
| 218 |
+
if sorted(spec.keys()) != ["dims", "dynamic_shapes"]:
|
| 219 |
+
raise UserError(
|
| 220 |
+
UserErrorType.INVALID_INPUT,
|
| 221 |
+
"With from_dict=True, expected `spec` to have keys `dims` and `dynamic_shapes`, "
|
| 222 |
+
f"instead found {spec.keys()}",
|
| 223 |
+
)
|
| 224 |
+
dims = {}
|
| 225 |
+
for k, v in spec["dims"].items():
|
| 226 |
+
if not isinstance(k, str):
|
| 227 |
+
raise UserError(
|
| 228 |
+
UserErrorType.INVALID_INPUT,
|
| 229 |
+
f"Expected `spec['dims']` keys to be strings for symbols, got key {type(k)}",
|
| 230 |
+
)
|
| 231 |
+
if sorted(v.keys()) != ["derived", "max", "min"]:
|
| 232 |
+
raise UserError(
|
| 233 |
+
UserErrorType.INVALID_INPUT,
|
| 234 |
+
f"Expected `spec['dims']` values to have keys `derived`, `max`, and `min`, "
|
| 235 |
+
f"instead found {v.keys()}",
|
| 236 |
+
)
|
| 237 |
+
if not isinstance(v["min"], int):
|
| 238 |
+
raise UserError(
|
| 239 |
+
UserErrorType.INVALID_INPUT,
|
| 240 |
+
f"Expected dims in `spec['dims']` to map `min` to an int, got {k}: {v['min']}",
|
| 241 |
+
)
|
| 242 |
+
if not isinstance(v["max"], int) or v["max"] is None:
|
| 243 |
+
raise UserError(
|
| 244 |
+
UserErrorType.INVALID_INPUT,
|
| 245 |
+
f"Expected dims in `spec['dims']` to map `max` to an int or None, got {k}: {v['max']}",
|
| 246 |
+
)
|
| 247 |
+
if not isinstance(v["derived"], list) or any(
|
| 248 |
+
not isinstance(d, str) for d in v["derived"]
|
| 249 |
+
):
|
| 250 |
+
raise UserError(
|
| 251 |
+
UserErrorType.INVALID_INPUT,
|
| 252 |
+
"Expected dims in `spec['dims']` to map `derived` to a list of derived expressions, "
|
| 253 |
+
f"got {k}: {v['derived']}",
|
| 254 |
+
)
|
| 255 |
+
dims[k] = RootDim(**v)
|
| 256 |
+
dynamic_shapes = spec["dynamic_shapes"]
|
| 257 |
+
else:
|
| 258 |
+
if not isinstance(spec, DynamicShapesSpec):
|
| 259 |
+
raise UserError(
|
| 260 |
+
UserErrorType.INVALID_INPUT,
|
| 261 |
+
f"Expected `spec` to be a DynamicShapesSpec, got {type(spec)}",
|
| 262 |
+
)
|
| 263 |
+
dims = spec.dims
|
| 264 |
+
dynamic_shapes = spec.dynamic_shapes
|
| 265 |
+
|
| 266 |
+
if dynamic_shapes is None:
|
| 267 |
+
return None
|
| 268 |
+
|
| 269 |
+
dim_cache = {}
|
| 270 |
+
for name, info in dims.items():
|
| 271 |
+
symbol = sympy.sympify(name)
|
| 272 |
+
if not isinstance(symbol, sympy.Symbol):
|
| 273 |
+
raise UserError(
|
| 274 |
+
UserErrorType.INVALID_INPUT,
|
| 275 |
+
f"Expected `spec['dims']` keys to be symbols, got {name}",
|
| 276 |
+
)
|
| 277 |
+
dim_cache[name] = Dim(name, min=info.min, max=info.max) # cache root dim
|
| 278 |
+
for _expr in info.derived:
|
| 279 |
+
expr = sympy.sympify(_expr)
|
| 280 |
+
if len(expr.free_symbols) != 1 or symbol not in expr.free_symbols:
|
| 281 |
+
raise UserError(
|
| 282 |
+
UserErrorType.INVALID_INPUT,
|
| 283 |
+
f"Expected derived expressions in to have {name} as the only free symbol, got {expr}",
|
| 284 |
+
)
|
| 285 |
+
if not _is_supported_equivalence(expr):
|
| 286 |
+
raise UserError(
|
| 287 |
+
UserErrorType.INVALID_INPUT,
|
| 288 |
+
f"Expected derived expressions to be linear expressions, got {expr}",
|
| 289 |
+
)
|
| 290 |
+
modulus, remainder = sympy.polys.polytools.div(expr, symbol)
|
| 291 |
+
ddim = dim_cache[name]
|
| 292 |
+
if modulus != 1:
|
| 293 |
+
ddim = int(modulus) * ddim
|
| 294 |
+
if remainder != 0:
|
| 295 |
+
ddim = ddim + int(remainder)
|
| 296 |
+
dim_cache[_expr] = ddim # cache derived dims
|
| 297 |
+
|
| 298 |
+
def deserialize_shape(
|
| 299 |
+
val: Union[None, int, str]
|
| 300 |
+
) -> Union[None, int, _Dim, _DimHint]:
|
| 301 |
+
if val is None or isinstance(val, int):
|
| 302 |
+
return val
|
| 303 |
+
elif val == "_DimHint.AUTO":
|
| 304 |
+
return _DimHint.AUTO
|
| 305 |
+
elif val == "_DimHint.STATIC":
|
| 306 |
+
return _DimHint.STATIC
|
| 307 |
+
if not isinstance(val, str):
|
| 308 |
+
raise UserError(
|
| 309 |
+
UserErrorType.INVALID_INPUT,
|
| 310 |
+
"Expected leaves in `spec['dynamic_shapes']` to be ints, None, Dim.AUTO/STATIC, symbols, "
|
| 311 |
+
f" or derived expressions, got {val}",
|
| 312 |
+
)
|
| 313 |
+
if val not in dim_cache:
|
| 314 |
+
raise UserError(
|
| 315 |
+
UserErrorType.INVALID_INPUT,
|
| 316 |
+
"Expected dims in `spec['dynamic_shapes']` to be tracked in `spec['dims']`, "
|
| 317 |
+
f"got {val} which is not in {dims.keys()}",
|
| 318 |
+
)
|
| 319 |
+
return dim_cache[val]
|
| 320 |
+
|
| 321 |
+
return tree_map(deserialize_shape, dynamic_shapes)
|
janus/lib/python3.10/site-packages/torch/_export/serde/schema.py
ADDED
|
@@ -0,0 +1,381 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# NOTE: This is a placeholder for iterating on export serialization schema design.
|
| 2 |
+
# Anything is subject to change and no guarantee is provided at this point.
|
| 3 |
+
|
| 4 |
+
from dataclasses import dataclass, field
|
| 5 |
+
from enum import IntEnum
|
| 6 |
+
from typing import Dict, List, Optional, Tuple
|
| 7 |
+
|
| 8 |
+
from torch._export.serde.union import _Union
|
| 9 |
+
|
| 10 |
+
# NOTE: Please update this value if any modifications are made to the schema
|
| 11 |
+
SCHEMA_VERSION = (7, 3)
|
| 12 |
+
TREESPEC_VERSION = 1
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class ScalarType(IntEnum):
|
| 16 |
+
UNKNOWN = 0
|
| 17 |
+
BYTE = 1
|
| 18 |
+
CHAR = 2
|
| 19 |
+
SHORT = 3
|
| 20 |
+
INT = 4
|
| 21 |
+
LONG = 5
|
| 22 |
+
HALF = 6
|
| 23 |
+
FLOAT = 7
|
| 24 |
+
DOUBLE = 8
|
| 25 |
+
COMPLEXHALF = 9
|
| 26 |
+
COMPLEXFLOAT = 10
|
| 27 |
+
COMPLEXDOUBLE = 11
|
| 28 |
+
BOOL = 12
|
| 29 |
+
BFLOAT16 = 13
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class Layout(IntEnum):
|
| 33 |
+
Unknown = 0
|
| 34 |
+
SparseCoo = 1
|
| 35 |
+
SparseCsr = 2
|
| 36 |
+
SparseCsc = 3
|
| 37 |
+
SparseBsr = 4
|
| 38 |
+
SparseBsc = 5
|
| 39 |
+
_mkldnn = 6
|
| 40 |
+
Strided = 7
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
class MemoryFormat(IntEnum):
|
| 44 |
+
Unknown = 0
|
| 45 |
+
ContiguousFormat = 1
|
| 46 |
+
ChannelsLast = 2
|
| 47 |
+
ChannelsLast3d = 3
|
| 48 |
+
PreserveFormat = 4
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
@dataclass
|
| 52 |
+
class Device:
|
| 53 |
+
type: str
|
| 54 |
+
index: Optional[int] = None
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
@dataclass(repr=False)
|
| 58 |
+
class SymExprHint(_Union):
|
| 59 |
+
as_int: int
|
| 60 |
+
as_float: float
|
| 61 |
+
as_bool: bool
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
# This is for storing the symbolic expressions behind symints/symfloats/symbools
|
| 65 |
+
# For example, we can get something like
|
| 66 |
+
# SymExpr(expr_str="s0 + s1", hint=SymExprHint(as_int=4)
|
| 67 |
+
# if we also have the hint that s0 and s1 are both 2.
|
| 68 |
+
@dataclass
|
| 69 |
+
class SymExpr:
|
| 70 |
+
expr_str: str
|
| 71 |
+
hint: Optional[SymExprHint] = None
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
@dataclass(repr=False)
|
| 75 |
+
class SymInt(_Union):
|
| 76 |
+
as_expr: SymExpr
|
| 77 |
+
as_int: int
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
@dataclass(repr=False)
|
| 81 |
+
class SymBool(_Union):
|
| 82 |
+
as_expr: SymExpr
|
| 83 |
+
as_bool: bool
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
@dataclass
|
| 87 |
+
class TensorMeta:
|
| 88 |
+
dtype: ScalarType
|
| 89 |
+
sizes: List[SymInt]
|
| 90 |
+
requires_grad: bool
|
| 91 |
+
device: Device
|
| 92 |
+
strides: List[SymInt]
|
| 93 |
+
storage_offset: SymInt
|
| 94 |
+
layout: Layout
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
# In most cases we will use the "as_name" field to store arguments which are
|
| 98 |
+
# SymInts.
|
| 99 |
+
# The "as_int" field is used in the case where we have a list containing a mix
|
| 100 |
+
# of SymInt and ints (ex. [1, s0, ...]). We will serialize this type of list to
|
| 101 |
+
# be List[SymIntArgument] and map the SymInts to the "as_name" field, and ints
|
| 102 |
+
# to the "as_int" field.
|
| 103 |
+
@dataclass(repr=False)
|
| 104 |
+
class SymIntArgument(_Union):
|
| 105 |
+
as_name: str
|
| 106 |
+
as_int: int
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
# In most cases we will use the "as_name" field to store arguments which are
|
| 110 |
+
# SymBools.
|
| 111 |
+
# The "as_bool" field is used in the case where we have a list containing a mix
|
| 112 |
+
# of SymBool and bools (ex. [True, i0, ...]). We will serialize this type of list to
|
| 113 |
+
# be List[SymboolArgument] and map the SymBools to the "as_name" field, and bools
|
| 114 |
+
# to the "as_bool" field.
|
| 115 |
+
@dataclass(repr=False)
|
| 116 |
+
class SymBoolArgument(_Union):
|
| 117 |
+
as_name: str
|
| 118 |
+
as_bool: bool
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
@dataclass
|
| 122 |
+
class TensorArgument:
|
| 123 |
+
name: str
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
@dataclass
|
| 127 |
+
class TokenArgument:
|
| 128 |
+
name: str
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
# This is use for storing the contents of a list which contain optional tensors
|
| 132 |
+
# (Tensor?[], ex. [Tensor, None, ...]), where the list will be serialized to the
|
| 133 |
+
# type List[OptionalTensorArgument], with tensor values seiralized to the
|
| 134 |
+
# "as_tensor" field, and None values serialized to the "as_none" field.
|
| 135 |
+
@dataclass(repr=False)
|
| 136 |
+
class OptionalTensorArgument(_Union):
|
| 137 |
+
as_tensor: TensorArgument
|
| 138 |
+
as_none: Tuple[()]
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
@dataclass
|
| 142 |
+
class GraphArgument:
|
| 143 |
+
name: str
|
| 144 |
+
graph: 'Graph'
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
@dataclass
|
| 148 |
+
class CustomObjArgument:
|
| 149 |
+
name: str
|
| 150 |
+
class_fqn: str
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
# This is actually a union type
|
| 154 |
+
@dataclass(repr=False)
|
| 155 |
+
class Argument(_Union):
|
| 156 |
+
as_none: Tuple[()]
|
| 157 |
+
as_tensor: TensorArgument
|
| 158 |
+
as_tensors: List[TensorArgument]
|
| 159 |
+
as_int: int
|
| 160 |
+
as_ints: List[int]
|
| 161 |
+
as_float: float
|
| 162 |
+
as_floats: List[float]
|
| 163 |
+
as_string: str
|
| 164 |
+
as_strings: List[str]
|
| 165 |
+
as_sym_int: SymIntArgument
|
| 166 |
+
as_sym_ints: List[SymIntArgument]
|
| 167 |
+
as_scalar_type: ScalarType
|
| 168 |
+
as_memory_format: MemoryFormat
|
| 169 |
+
as_layout: Layout
|
| 170 |
+
as_device: Device
|
| 171 |
+
as_bool: bool
|
| 172 |
+
as_bools: List[bool]
|
| 173 |
+
as_sym_bool: SymBoolArgument
|
| 174 |
+
as_sym_bools: List[SymBoolArgument]
|
| 175 |
+
as_graph: GraphArgument
|
| 176 |
+
as_optional_tensors: List[OptionalTensorArgument]
|
| 177 |
+
as_custom_obj: CustomObjArgument
|
| 178 |
+
as_operator: str
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
@dataclass
|
| 182 |
+
class NamedArgument:
|
| 183 |
+
# Argument name from the operator schema
|
| 184 |
+
name: str
|
| 185 |
+
arg: Argument
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
@dataclass
|
| 189 |
+
class Node:
|
| 190 |
+
target: str
|
| 191 |
+
inputs: List[NamedArgument]
|
| 192 |
+
outputs: List[Argument]
|
| 193 |
+
metadata: Dict[str, str]
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
@dataclass
|
| 197 |
+
class Graph:
|
| 198 |
+
inputs: List[Argument]
|
| 199 |
+
outputs: List[Argument]
|
| 200 |
+
nodes: List[Node]
|
| 201 |
+
tensor_values: Dict[str, TensorMeta]
|
| 202 |
+
sym_int_values: Dict[str, SymInt]
|
| 203 |
+
sym_bool_values: Dict[str, SymBool]
|
| 204 |
+
# This is for deserializing the submodule graphs from higher order ops
|
| 205 |
+
# (ex. cond, map) where single tensor returns will just return a single
|
| 206 |
+
# tensor, rather than following export schema and returning a singleton
|
| 207 |
+
# list.
|
| 208 |
+
is_single_tensor_return: bool = False
|
| 209 |
+
custom_obj_values: Dict[str, CustomObjArgument] = field(default_factory=dict)
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
@dataclass
|
| 213 |
+
class UserInputSpec:
|
| 214 |
+
# Actually, only tensors and SymInts are allowed here
|
| 215 |
+
arg: Argument
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
@dataclass(repr=False)
|
| 219 |
+
class ConstantValue(_Union):
|
| 220 |
+
as_none: Tuple[()]
|
| 221 |
+
as_int: int
|
| 222 |
+
as_float: float
|
| 223 |
+
as_string: str
|
| 224 |
+
as_bool: bool
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
@dataclass
|
| 228 |
+
class ConstantInputSpec:
|
| 229 |
+
name: str
|
| 230 |
+
value: ConstantValue
|
| 231 |
+
|
| 232 |
+
|
| 233 |
+
@dataclass
|
| 234 |
+
class InputToParameterSpec:
|
| 235 |
+
arg: TensorArgument
|
| 236 |
+
parameter_name: str
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
@dataclass
|
| 240 |
+
class InputToBufferSpec:
|
| 241 |
+
arg: TensorArgument
|
| 242 |
+
buffer_name: str
|
| 243 |
+
persistent: bool
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
@dataclass
|
| 248 |
+
class InputToTensorConstantSpec:
|
| 249 |
+
arg: TensorArgument
|
| 250 |
+
tensor_constant_name: str
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
@dataclass
|
| 254 |
+
class InputToCustomObjSpec:
|
| 255 |
+
arg: CustomObjArgument
|
| 256 |
+
custom_obj_name: str
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
@dataclass
|
| 260 |
+
class InputTokenSpec:
|
| 261 |
+
arg: TokenArgument
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
@dataclass(repr=False)
|
| 265 |
+
class InputSpec(_Union):
|
| 266 |
+
user_input: UserInputSpec
|
| 267 |
+
parameter: InputToParameterSpec
|
| 268 |
+
buffer: InputToBufferSpec
|
| 269 |
+
tensor_constant: InputToTensorConstantSpec
|
| 270 |
+
custom_obj: InputToCustomObjSpec
|
| 271 |
+
token: InputTokenSpec
|
| 272 |
+
constant_input: ConstantInputSpec
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
@dataclass
|
| 276 |
+
class UserOutputSpec:
|
| 277 |
+
arg: Argument
|
| 278 |
+
|
| 279 |
+
|
| 280 |
+
@dataclass
|
| 281 |
+
class LossOutputSpec:
|
| 282 |
+
arg: TensorArgument
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
@dataclass
|
| 286 |
+
class BufferMutationSpec:
|
| 287 |
+
arg: TensorArgument
|
| 288 |
+
buffer_name: str
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
@dataclass
|
| 292 |
+
class GradientToParameterSpec:
|
| 293 |
+
arg: TensorArgument
|
| 294 |
+
parameter_name: str
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
@dataclass
|
| 298 |
+
class GradientToUserInputSpec:
|
| 299 |
+
arg: TensorArgument
|
| 300 |
+
user_input_name: str
|
| 301 |
+
|
| 302 |
+
|
| 303 |
+
@dataclass
|
| 304 |
+
class UserInputMutationSpec:
|
| 305 |
+
arg: TensorArgument
|
| 306 |
+
user_input_name: str
|
| 307 |
+
|
| 308 |
+
|
| 309 |
+
@dataclass
|
| 310 |
+
class OutputTokenSpec:
|
| 311 |
+
arg: TokenArgument
|
| 312 |
+
|
| 313 |
+
|
| 314 |
+
@dataclass(repr=False)
|
| 315 |
+
class OutputSpec(_Union):
|
| 316 |
+
user_output: UserOutputSpec
|
| 317 |
+
loss_output: LossOutputSpec
|
| 318 |
+
buffer_mutation: BufferMutationSpec
|
| 319 |
+
gradient_to_parameter: GradientToParameterSpec
|
| 320 |
+
gradient_to_user_input: GradientToUserInputSpec
|
| 321 |
+
user_input_mutation: UserInputMutationSpec
|
| 322 |
+
token: OutputTokenSpec
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
@dataclass
|
| 326 |
+
class GraphSignature:
|
| 327 |
+
input_specs: List[InputSpec]
|
| 328 |
+
output_specs: List[OutputSpec]
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
@dataclass
|
| 332 |
+
class RangeConstraint:
|
| 333 |
+
min_val: int
|
| 334 |
+
max_val: int
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
@dataclass
|
| 338 |
+
class ModuleCallSignature:
|
| 339 |
+
inputs: List[Argument]
|
| 340 |
+
outputs: List[Argument]
|
| 341 |
+
|
| 342 |
+
# These are serialized by calling pytree.treespec_loads
|
| 343 |
+
# And deserialized by calling pytree.treespec_dumps
|
| 344 |
+
in_spec: str
|
| 345 |
+
out_spec: str
|
| 346 |
+
|
| 347 |
+
|
| 348 |
+
@dataclass
|
| 349 |
+
class ModuleCallEntry:
|
| 350 |
+
fqn: str
|
| 351 |
+
signature: Optional[ModuleCallSignature] = None
|
| 352 |
+
|
| 353 |
+
|
| 354 |
+
@dataclass
|
| 355 |
+
class GraphModule:
|
| 356 |
+
graph: Graph
|
| 357 |
+
signature: GraphSignature
|
| 358 |
+
# This is used for unflattening, by tracking the calling structure of all of
|
| 359 |
+
# the modules in order to unflatten the modules back to the eager calling
|
| 360 |
+
# conventions.
|
| 361 |
+
module_call_graph: List[ModuleCallEntry]
|
| 362 |
+
metadata: Dict[str, str] = field(default_factory=dict)
|
| 363 |
+
|
| 364 |
+
|
| 365 |
+
# Invariant: Every time a change is made to the schema, one of the versions
|
| 366 |
+
# should be upadted.
|
| 367 |
+
@dataclass
|
| 368 |
+
class SchemaVersion:
|
| 369 |
+
major: int # Major version number is bumped every time a breaking change is made.
|
| 370 |
+
minor: int # Minor version number is bumped when a compatible change is made.
|
| 371 |
+
|
| 372 |
+
|
| 373 |
+
@dataclass
|
| 374 |
+
class ExportedProgram:
|
| 375 |
+
graph_module: GraphModule
|
| 376 |
+
# Key is the opset namespace (ex. aten), and value is the version number
|
| 377 |
+
opset_version: Dict[str, int]
|
| 378 |
+
range_constraints: Dict[str, RangeConstraint]
|
| 379 |
+
schema_version: SchemaVersion
|
| 380 |
+
verifiers: List[str] = field(default_factory=list)
|
| 381 |
+
torch_version: str = "<=2.4"
|
janus/lib/python3.10/site-packages/torch/_export/serde/schema.yaml
ADDED
|
@@ -0,0 +1,437 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# @generated by update_schema.py
|
| 2 |
+
# checksum<<923abf371a1f8802cacb037d409d28273867777a98f6542fba28616c2b92b639>>
|
| 3 |
+
Argument:
|
| 4 |
+
kind: union
|
| 5 |
+
fields:
|
| 6 |
+
as_none:
|
| 7 |
+
type: Tuple[()]
|
| 8 |
+
as_tensor:
|
| 9 |
+
type: TensorArgument
|
| 10 |
+
as_tensors:
|
| 11 |
+
type: List[TensorArgument]
|
| 12 |
+
as_int:
|
| 13 |
+
type: int
|
| 14 |
+
as_ints:
|
| 15 |
+
type: List[int]
|
| 16 |
+
as_float:
|
| 17 |
+
type: float
|
| 18 |
+
as_floats:
|
| 19 |
+
type: List[float]
|
| 20 |
+
as_string:
|
| 21 |
+
type: str
|
| 22 |
+
as_strings:
|
| 23 |
+
type: List[str]
|
| 24 |
+
as_sym_int:
|
| 25 |
+
type: SymIntArgument
|
| 26 |
+
as_sym_ints:
|
| 27 |
+
type: List[SymIntArgument]
|
| 28 |
+
as_scalar_type:
|
| 29 |
+
type: ScalarType
|
| 30 |
+
as_memory_format:
|
| 31 |
+
type: MemoryFormat
|
| 32 |
+
as_layout:
|
| 33 |
+
type: Layout
|
| 34 |
+
as_device:
|
| 35 |
+
type: Device
|
| 36 |
+
as_bool:
|
| 37 |
+
type: bool
|
| 38 |
+
as_bools:
|
| 39 |
+
type: List[bool]
|
| 40 |
+
as_sym_bool:
|
| 41 |
+
type: SymBoolArgument
|
| 42 |
+
as_sym_bools:
|
| 43 |
+
type: List[SymBoolArgument]
|
| 44 |
+
as_graph:
|
| 45 |
+
type: GraphArgument
|
| 46 |
+
as_optional_tensors:
|
| 47 |
+
type: List[OptionalTensorArgument]
|
| 48 |
+
as_custom_obj:
|
| 49 |
+
type: CustomObjArgument
|
| 50 |
+
as_operator:
|
| 51 |
+
type: str
|
| 52 |
+
BufferMutationSpec:
|
| 53 |
+
kind: struct
|
| 54 |
+
fields:
|
| 55 |
+
arg:
|
| 56 |
+
type: TensorArgument
|
| 57 |
+
buffer_name:
|
| 58 |
+
type: str
|
| 59 |
+
ConstantInputSpec:
|
| 60 |
+
kind: struct
|
| 61 |
+
fields:
|
| 62 |
+
name:
|
| 63 |
+
type: str
|
| 64 |
+
value:
|
| 65 |
+
type: ConstantValue
|
| 66 |
+
ConstantValue:
|
| 67 |
+
kind: union
|
| 68 |
+
fields:
|
| 69 |
+
as_none:
|
| 70 |
+
type: Tuple[()]
|
| 71 |
+
as_int:
|
| 72 |
+
type: int
|
| 73 |
+
as_float:
|
| 74 |
+
type: float
|
| 75 |
+
as_string:
|
| 76 |
+
type: str
|
| 77 |
+
as_bool:
|
| 78 |
+
type: bool
|
| 79 |
+
CustomObjArgument:
|
| 80 |
+
kind: struct
|
| 81 |
+
fields:
|
| 82 |
+
name:
|
| 83 |
+
type: str
|
| 84 |
+
class_fqn:
|
| 85 |
+
type: str
|
| 86 |
+
Device:
|
| 87 |
+
kind: struct
|
| 88 |
+
fields:
|
| 89 |
+
type:
|
| 90 |
+
type: str
|
| 91 |
+
index:
|
| 92 |
+
type: Optional[int]
|
| 93 |
+
default: None
|
| 94 |
+
ExportedProgram:
|
| 95 |
+
kind: struct
|
| 96 |
+
fields:
|
| 97 |
+
graph_module:
|
| 98 |
+
type: GraphModule
|
| 99 |
+
opset_version:
|
| 100 |
+
type: Dict[str, int]
|
| 101 |
+
range_constraints:
|
| 102 |
+
type: Dict[str, RangeConstraint]
|
| 103 |
+
schema_version:
|
| 104 |
+
type: SchemaVersion
|
| 105 |
+
verifiers:
|
| 106 |
+
type: List[str]
|
| 107 |
+
default: '[]'
|
| 108 |
+
torch_version:
|
| 109 |
+
type: str
|
| 110 |
+
default: <=2.4
|
| 111 |
+
GradientToParameterSpec:
|
| 112 |
+
kind: struct
|
| 113 |
+
fields:
|
| 114 |
+
arg:
|
| 115 |
+
type: TensorArgument
|
| 116 |
+
parameter_name:
|
| 117 |
+
type: str
|
| 118 |
+
GradientToUserInputSpec:
|
| 119 |
+
kind: struct
|
| 120 |
+
fields:
|
| 121 |
+
arg:
|
| 122 |
+
type: TensorArgument
|
| 123 |
+
user_input_name:
|
| 124 |
+
type: str
|
| 125 |
+
Graph:
|
| 126 |
+
kind: struct
|
| 127 |
+
fields:
|
| 128 |
+
inputs:
|
| 129 |
+
type: List[Argument]
|
| 130 |
+
outputs:
|
| 131 |
+
type: List[Argument]
|
| 132 |
+
nodes:
|
| 133 |
+
type: List[Node]
|
| 134 |
+
tensor_values:
|
| 135 |
+
type: Dict[str, TensorMeta]
|
| 136 |
+
sym_int_values:
|
| 137 |
+
type: Dict[str, SymInt]
|
| 138 |
+
sym_bool_values:
|
| 139 |
+
type: Dict[str, SymBool]
|
| 140 |
+
is_single_tensor_return:
|
| 141 |
+
type: bool
|
| 142 |
+
default: 'False'
|
| 143 |
+
custom_obj_values:
|
| 144 |
+
type: Dict[str, CustomObjArgument]
|
| 145 |
+
default: '{}'
|
| 146 |
+
GraphArgument:
|
| 147 |
+
kind: struct
|
| 148 |
+
fields:
|
| 149 |
+
name:
|
| 150 |
+
type: str
|
| 151 |
+
graph:
|
| 152 |
+
type: Graph
|
| 153 |
+
GraphModule:
|
| 154 |
+
kind: struct
|
| 155 |
+
fields:
|
| 156 |
+
graph:
|
| 157 |
+
type: Graph
|
| 158 |
+
signature:
|
| 159 |
+
type: GraphSignature
|
| 160 |
+
module_call_graph:
|
| 161 |
+
type: List[ModuleCallEntry]
|
| 162 |
+
metadata:
|
| 163 |
+
type: Dict[str, str]
|
| 164 |
+
default: '{}'
|
| 165 |
+
GraphSignature:
|
| 166 |
+
kind: struct
|
| 167 |
+
fields:
|
| 168 |
+
input_specs:
|
| 169 |
+
type: List[InputSpec]
|
| 170 |
+
output_specs:
|
| 171 |
+
type: List[OutputSpec]
|
| 172 |
+
InputSpec:
|
| 173 |
+
kind: union
|
| 174 |
+
fields:
|
| 175 |
+
user_input:
|
| 176 |
+
type: UserInputSpec
|
| 177 |
+
parameter:
|
| 178 |
+
type: InputToParameterSpec
|
| 179 |
+
buffer:
|
| 180 |
+
type: InputToBufferSpec
|
| 181 |
+
tensor_constant:
|
| 182 |
+
type: InputToTensorConstantSpec
|
| 183 |
+
custom_obj:
|
| 184 |
+
type: InputToCustomObjSpec
|
| 185 |
+
token:
|
| 186 |
+
type: InputTokenSpec
|
| 187 |
+
constant_input:
|
| 188 |
+
type: ConstantInputSpec
|
| 189 |
+
InputToBufferSpec:
|
| 190 |
+
kind: struct
|
| 191 |
+
fields:
|
| 192 |
+
arg:
|
| 193 |
+
type: TensorArgument
|
| 194 |
+
buffer_name:
|
| 195 |
+
type: str
|
| 196 |
+
persistent:
|
| 197 |
+
type: bool
|
| 198 |
+
InputToCustomObjSpec:
|
| 199 |
+
kind: struct
|
| 200 |
+
fields:
|
| 201 |
+
arg:
|
| 202 |
+
type: CustomObjArgument
|
| 203 |
+
custom_obj_name:
|
| 204 |
+
type: str
|
| 205 |
+
InputToParameterSpec:
|
| 206 |
+
kind: struct
|
| 207 |
+
fields:
|
| 208 |
+
arg:
|
| 209 |
+
type: TensorArgument
|
| 210 |
+
parameter_name:
|
| 211 |
+
type: str
|
| 212 |
+
InputToTensorConstantSpec:
|
| 213 |
+
kind: struct
|
| 214 |
+
fields:
|
| 215 |
+
arg:
|
| 216 |
+
type: TensorArgument
|
| 217 |
+
tensor_constant_name:
|
| 218 |
+
type: str
|
| 219 |
+
InputTokenSpec:
|
| 220 |
+
kind: struct
|
| 221 |
+
fields:
|
| 222 |
+
arg:
|
| 223 |
+
type: TokenArgument
|
| 224 |
+
Layout:
|
| 225 |
+
kind: enum
|
| 226 |
+
fields:
|
| 227 |
+
Unknown: 0
|
| 228 |
+
SparseCoo: 1
|
| 229 |
+
SparseCsr: 2
|
| 230 |
+
SparseCsc: 3
|
| 231 |
+
SparseBsr: 4
|
| 232 |
+
SparseBsc: 5
|
| 233 |
+
_mkldnn: 6
|
| 234 |
+
Strided: 7
|
| 235 |
+
LossOutputSpec:
|
| 236 |
+
kind: struct
|
| 237 |
+
fields:
|
| 238 |
+
arg:
|
| 239 |
+
type: TensorArgument
|
| 240 |
+
MemoryFormat:
|
| 241 |
+
kind: enum
|
| 242 |
+
fields:
|
| 243 |
+
Unknown: 0
|
| 244 |
+
ContiguousFormat: 1
|
| 245 |
+
ChannelsLast: 2
|
| 246 |
+
ChannelsLast3d: 3
|
| 247 |
+
PreserveFormat: 4
|
| 248 |
+
ModuleCallEntry:
|
| 249 |
+
kind: struct
|
| 250 |
+
fields:
|
| 251 |
+
fqn:
|
| 252 |
+
type: str
|
| 253 |
+
signature:
|
| 254 |
+
type: Optional[ModuleCallSignature]
|
| 255 |
+
default: None
|
| 256 |
+
ModuleCallSignature:
|
| 257 |
+
kind: struct
|
| 258 |
+
fields:
|
| 259 |
+
inputs:
|
| 260 |
+
type: List[Argument]
|
| 261 |
+
outputs:
|
| 262 |
+
type: List[Argument]
|
| 263 |
+
in_spec:
|
| 264 |
+
type: str
|
| 265 |
+
out_spec:
|
| 266 |
+
type: str
|
| 267 |
+
NamedArgument:
|
| 268 |
+
kind: struct
|
| 269 |
+
fields:
|
| 270 |
+
name:
|
| 271 |
+
type: str
|
| 272 |
+
arg:
|
| 273 |
+
type: Argument
|
| 274 |
+
Node:
|
| 275 |
+
kind: struct
|
| 276 |
+
fields:
|
| 277 |
+
target:
|
| 278 |
+
type: str
|
| 279 |
+
inputs:
|
| 280 |
+
type: List[NamedArgument]
|
| 281 |
+
outputs:
|
| 282 |
+
type: List[Argument]
|
| 283 |
+
metadata:
|
| 284 |
+
type: Dict[str, str]
|
| 285 |
+
OptionalTensorArgument:
|
| 286 |
+
kind: union
|
| 287 |
+
fields:
|
| 288 |
+
as_tensor:
|
| 289 |
+
type: TensorArgument
|
| 290 |
+
as_none:
|
| 291 |
+
type: Tuple[()]
|
| 292 |
+
OutputSpec:
|
| 293 |
+
kind: union
|
| 294 |
+
fields:
|
| 295 |
+
user_output:
|
| 296 |
+
type: UserOutputSpec
|
| 297 |
+
loss_output:
|
| 298 |
+
type: LossOutputSpec
|
| 299 |
+
buffer_mutation:
|
| 300 |
+
type: BufferMutationSpec
|
| 301 |
+
gradient_to_parameter:
|
| 302 |
+
type: GradientToParameterSpec
|
| 303 |
+
gradient_to_user_input:
|
| 304 |
+
type: GradientToUserInputSpec
|
| 305 |
+
user_input_mutation:
|
| 306 |
+
type: UserInputMutationSpec
|
| 307 |
+
token:
|
| 308 |
+
type: OutputTokenSpec
|
| 309 |
+
OutputTokenSpec:
|
| 310 |
+
kind: struct
|
| 311 |
+
fields:
|
| 312 |
+
arg:
|
| 313 |
+
type: TokenArgument
|
| 314 |
+
RangeConstraint:
|
| 315 |
+
kind: struct
|
| 316 |
+
fields:
|
| 317 |
+
min_val:
|
| 318 |
+
type: int
|
| 319 |
+
max_val:
|
| 320 |
+
type: int
|
| 321 |
+
ScalarType:
|
| 322 |
+
kind: enum
|
| 323 |
+
fields:
|
| 324 |
+
UNKNOWN: 0
|
| 325 |
+
BYTE: 1
|
| 326 |
+
CHAR: 2
|
| 327 |
+
SHORT: 3
|
| 328 |
+
INT: 4
|
| 329 |
+
LONG: 5
|
| 330 |
+
HALF: 6
|
| 331 |
+
FLOAT: 7
|
| 332 |
+
DOUBLE: 8
|
| 333 |
+
COMPLEXHALF: 9
|
| 334 |
+
COMPLEXFLOAT: 10
|
| 335 |
+
COMPLEXDOUBLE: 11
|
| 336 |
+
BOOL: 12
|
| 337 |
+
BFLOAT16: 13
|
| 338 |
+
SchemaVersion:
|
| 339 |
+
kind: struct
|
| 340 |
+
fields:
|
| 341 |
+
major:
|
| 342 |
+
type: int
|
| 343 |
+
minor:
|
| 344 |
+
type: int
|
| 345 |
+
SymBool:
|
| 346 |
+
kind: union
|
| 347 |
+
fields:
|
| 348 |
+
as_expr:
|
| 349 |
+
type: SymExpr
|
| 350 |
+
as_bool:
|
| 351 |
+
type: bool
|
| 352 |
+
SymBoolArgument:
|
| 353 |
+
kind: union
|
| 354 |
+
fields:
|
| 355 |
+
as_name:
|
| 356 |
+
type: str
|
| 357 |
+
as_bool:
|
| 358 |
+
type: bool
|
| 359 |
+
SymExpr:
|
| 360 |
+
kind: struct
|
| 361 |
+
fields:
|
| 362 |
+
expr_str:
|
| 363 |
+
type: str
|
| 364 |
+
hint:
|
| 365 |
+
type: Optional[SymExprHint]
|
| 366 |
+
default: None
|
| 367 |
+
SymExprHint:
|
| 368 |
+
kind: union
|
| 369 |
+
fields:
|
| 370 |
+
as_int:
|
| 371 |
+
type: int
|
| 372 |
+
as_float:
|
| 373 |
+
type: float
|
| 374 |
+
as_bool:
|
| 375 |
+
type: bool
|
| 376 |
+
SymInt:
|
| 377 |
+
kind: union
|
| 378 |
+
fields:
|
| 379 |
+
as_expr:
|
| 380 |
+
type: SymExpr
|
| 381 |
+
as_int:
|
| 382 |
+
type: int
|
| 383 |
+
SymIntArgument:
|
| 384 |
+
kind: union
|
| 385 |
+
fields:
|
| 386 |
+
as_name:
|
| 387 |
+
type: str
|
| 388 |
+
as_int:
|
| 389 |
+
type: int
|
| 390 |
+
TensorArgument:
|
| 391 |
+
kind: struct
|
| 392 |
+
fields:
|
| 393 |
+
name:
|
| 394 |
+
type: str
|
| 395 |
+
TensorMeta:
|
| 396 |
+
kind: struct
|
| 397 |
+
fields:
|
| 398 |
+
dtype:
|
| 399 |
+
type: ScalarType
|
| 400 |
+
sizes:
|
| 401 |
+
type: List[SymInt]
|
| 402 |
+
requires_grad:
|
| 403 |
+
type: bool
|
| 404 |
+
device:
|
| 405 |
+
type: Device
|
| 406 |
+
strides:
|
| 407 |
+
type: List[SymInt]
|
| 408 |
+
storage_offset:
|
| 409 |
+
type: SymInt
|
| 410 |
+
layout:
|
| 411 |
+
type: Layout
|
| 412 |
+
TokenArgument:
|
| 413 |
+
kind: struct
|
| 414 |
+
fields:
|
| 415 |
+
name:
|
| 416 |
+
type: str
|
| 417 |
+
UserInputMutationSpec:
|
| 418 |
+
kind: struct
|
| 419 |
+
fields:
|
| 420 |
+
arg:
|
| 421 |
+
type: TensorArgument
|
| 422 |
+
user_input_name:
|
| 423 |
+
type: str
|
| 424 |
+
UserInputSpec:
|
| 425 |
+
kind: struct
|
| 426 |
+
fields:
|
| 427 |
+
arg:
|
| 428 |
+
type: Argument
|
| 429 |
+
UserOutputSpec:
|
| 430 |
+
kind: struct
|
| 431 |
+
fields:
|
| 432 |
+
arg:
|
| 433 |
+
type: Argument
|
| 434 |
+
SCHEMA_VERSION:
|
| 435 |
+
- 7
|
| 436 |
+
- 3
|
| 437 |
+
TREESPEC_VERSION: 1
|
janus/lib/python3.10/site-packages/torch/_export/serde/schema_check.py
ADDED
|
@@ -0,0 +1,286 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import dataclasses
|
| 3 |
+
import hashlib
|
| 4 |
+
import re
|
| 5 |
+
import typing
|
| 6 |
+
from enum import IntEnum
|
| 7 |
+
from typing import Any, Dict, Optional, Union
|
| 8 |
+
|
| 9 |
+
from torch._export.serde import schema
|
| 10 |
+
from torch._export.serde.union import _Union
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class SchemaUpdateError(Exception):
|
| 14 |
+
pass
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def _check(x, msg):
|
| 18 |
+
if not x:
|
| 19 |
+
raise SchemaUpdateError(msg)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def _staged_schema():
|
| 23 |
+
ret: Dict[str, Any] = {}
|
| 24 |
+
defs = {}
|
| 25 |
+
|
| 26 |
+
def _handle_aggregate(ty):
|
| 27 |
+
def dump_type(t):
|
| 28 |
+
if isinstance(t, type):
|
| 29 |
+
return t.__name__
|
| 30 |
+
elif isinstance(t, str):
|
| 31 |
+
assert t in defs
|
| 32 |
+
return t
|
| 33 |
+
elif o := typing.get_origin(t):
|
| 34 |
+
# Lemme know if there's a better way to do this.
|
| 35 |
+
if o == list:
|
| 36 |
+
head = "List"
|
| 37 |
+
elif o == dict:
|
| 38 |
+
head = "Dict"
|
| 39 |
+
elif o == tuple:
|
| 40 |
+
if typing.get_args(t) == ():
|
| 41 |
+
return "Tuple[()]"
|
| 42 |
+
head = "Tuple"
|
| 43 |
+
elif o == Union:
|
| 44 |
+
args = typing.get_args(t)
|
| 45 |
+
assert len(args) == 2 and args[1] == type(None)
|
| 46 |
+
return f"Optional[{dump_type(args[0])}]"
|
| 47 |
+
else:
|
| 48 |
+
raise AssertionError(f"Type {t} is not supported in export schema.")
|
| 49 |
+
return (
|
| 50 |
+
f"{head}[{', '.join([dump_type(x) for x in typing.get_args(t)])}]"
|
| 51 |
+
)
|
| 52 |
+
elif t == ():
|
| 53 |
+
return "()"
|
| 54 |
+
else:
|
| 55 |
+
raise AssertionError(f"Type {t} is not supported in export schema.")
|
| 56 |
+
|
| 57 |
+
def dump_field(f):
|
| 58 |
+
t = dump_type(f.type)
|
| 59 |
+
ret = {"type": t}
|
| 60 |
+
|
| 61 |
+
value = dataclasses.MISSING
|
| 62 |
+
if f.default is not dataclasses.MISSING:
|
| 63 |
+
value = f.default
|
| 64 |
+
elif f.default_factory is not dataclasses.MISSING:
|
| 65 |
+
value = f.default_factory()
|
| 66 |
+
|
| 67 |
+
if t.startswith("Optional[") and value is not None:
|
| 68 |
+
raise AssertionError(
|
| 69 |
+
f"Optional field {ty.__name__}.{f.name} must have default value to be None."
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
if value is not dataclasses.MISSING:
|
| 73 |
+
default = str(value)
|
| 74 |
+
ret["default"] = default
|
| 75 |
+
return ret
|
| 76 |
+
|
| 77 |
+
return {f.name: dump_field(f) for f in dataclasses.fields(ty)}
|
| 78 |
+
|
| 79 |
+
def _handle_int_enum(name, ty):
|
| 80 |
+
ret[name] = {"kind": "enum", "fields": {x.name: x.value for x in ty}}
|
| 81 |
+
|
| 82 |
+
def _handle_struct(name, ty):
|
| 83 |
+
ret[name] = {"kind": "struct", "fields": _handle_aggregate(ty)}
|
| 84 |
+
|
| 85 |
+
def _handle_union(name, ty):
|
| 86 |
+
ret[name] = {"kind": "union", "fields": _handle_aggregate(ty)}
|
| 87 |
+
|
| 88 |
+
for name in dir(schema):
|
| 89 |
+
if name.startswith("_"):
|
| 90 |
+
continue
|
| 91 |
+
|
| 92 |
+
value = getattr(schema, name)
|
| 93 |
+
|
| 94 |
+
if hasattr(value, "__module__") and value.__module__ != schema.__name__:
|
| 95 |
+
continue
|
| 96 |
+
|
| 97 |
+
defs[name] = value
|
| 98 |
+
|
| 99 |
+
for name, value in defs.items():
|
| 100 |
+
if isinstance(value, type):
|
| 101 |
+
if issubclass(value, IntEnum):
|
| 102 |
+
_handle_int_enum(name, value)
|
| 103 |
+
elif dataclasses.is_dataclass(value):
|
| 104 |
+
if issubclass(value, _Union):
|
| 105 |
+
_handle_union(name, value)
|
| 106 |
+
else:
|
| 107 |
+
_handle_struct(name, value)
|
| 108 |
+
else:
|
| 109 |
+
raise AssertionError(f"Unknown schema type {name}: {value}")
|
| 110 |
+
elif isinstance(value, (int, tuple)):
|
| 111 |
+
assert name in ("SCHEMA_VERSION", "TREESPEC_VERSION")
|
| 112 |
+
else:
|
| 113 |
+
raise AssertionError(f"Unknown variable {name}: {value}")
|
| 114 |
+
|
| 115 |
+
ret["SCHEMA_VERSION"] = list(defs["SCHEMA_VERSION"])
|
| 116 |
+
assert all(x > 0 for x in ret["SCHEMA_VERSION"])
|
| 117 |
+
ret["TREESPEC_VERSION"] = defs["TREESPEC_VERSION"]
|
| 118 |
+
assert ret["TREESPEC_VERSION"] > 0
|
| 119 |
+
return ret
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def _diff_schema(dst, src):
|
| 123 |
+
additions = {key: src[key] for key in src.keys() - dst.keys()}
|
| 124 |
+
subtractions = {key: dst[key] for key in dst.keys() - src.keys()}
|
| 125 |
+
|
| 126 |
+
common_keys = src.keys() & dst.keys()
|
| 127 |
+
|
| 128 |
+
versions = {"SCHEMA_VERSION", "TREESPEC_VERSION"}
|
| 129 |
+
common_keys -= versions
|
| 130 |
+
|
| 131 |
+
for key in common_keys:
|
| 132 |
+
src_kind = src[key]["kind"]
|
| 133 |
+
src_fields = src[key]["fields"]
|
| 134 |
+
dst_kind = dst[key]["kind"]
|
| 135 |
+
dst_fields = dst[key]["fields"]
|
| 136 |
+
_check(
|
| 137 |
+
src_kind == dst_kind,
|
| 138 |
+
f"Type {key} changed kind from {dst_kind} to {src_kind}",
|
| 139 |
+
)
|
| 140 |
+
assert isinstance(src_fields, dict) and isinstance(dst_fields, dict)
|
| 141 |
+
added_fields = {
|
| 142 |
+
key: src_fields[key] for key in src_fields.keys() - dst_fields.keys()
|
| 143 |
+
}
|
| 144 |
+
subtracted_fields = {
|
| 145 |
+
key: dst_fields[key] for key in dst_fields.keys() - src_fields.keys()
|
| 146 |
+
}
|
| 147 |
+
common_fields = src_fields.keys() & dst_fields.keys()
|
| 148 |
+
|
| 149 |
+
for field in common_fields:
|
| 150 |
+
src_field = src_fields[field]
|
| 151 |
+
dst_field = dst_fields[field]
|
| 152 |
+
if src_kind == "struct":
|
| 153 |
+
_check(
|
| 154 |
+
src_field["type"] == dst_field["type"],
|
| 155 |
+
f"Type of the field {key}.{field} changed from {dst_field['type']} to {src_field['type']}",
|
| 156 |
+
)
|
| 157 |
+
if "default" in src_field and "default" not in dst_field:
|
| 158 |
+
added_fields[field] = {}
|
| 159 |
+
added_fields[field]["default"] = src_field["default"]
|
| 160 |
+
if "default" not in src_field and "default" in dst_field:
|
| 161 |
+
subtracted_fields[field] = {}
|
| 162 |
+
subtracted_fields[field]["default"] = dst_field["default"]
|
| 163 |
+
elif src_kind == "enum":
|
| 164 |
+
_check(
|
| 165 |
+
src_field == dst_field,
|
| 166 |
+
f"Value of the enum field {key}.{field} changed from {dst_field} to {src_field}",
|
| 167 |
+
)
|
| 168 |
+
elif src_kind == "union":
|
| 169 |
+
_check(
|
| 170 |
+
src_field["type"] == dst_field["type"],
|
| 171 |
+
f"Type of the field {key}.{field} changed from {dst_field['type']} to {src_field['type']}",
|
| 172 |
+
)
|
| 173 |
+
else:
|
| 174 |
+
raise AssertionError(f"Unknown kind {src_kind}: {key}")
|
| 175 |
+
if len(added_fields) > 0:
|
| 176 |
+
assert key not in additions
|
| 177 |
+
additions[key] = {}
|
| 178 |
+
additions[key]["fields"] = added_fields
|
| 179 |
+
if len(subtracted_fields) > 0:
|
| 180 |
+
assert key not in subtractions
|
| 181 |
+
subtractions[key] = {}
|
| 182 |
+
subtractions[key]["fields"] = subtracted_fields
|
| 183 |
+
|
| 184 |
+
return additions, subtractions
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
def _hash_schema(s):
|
| 188 |
+
return hashlib.sha256(repr(s).encode("utf-8")).hexdigest()
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
@dataclasses.dataclass
|
| 192 |
+
class _Commit:
|
| 193 |
+
result: Dict[str, Any]
|
| 194 |
+
checksum_result: str
|
| 195 |
+
path: str
|
| 196 |
+
additions: Dict[str, Any]
|
| 197 |
+
subtractions: Dict[str, Any]
|
| 198 |
+
base: Dict[str, Any]
|
| 199 |
+
checksum_base: Optional[str]
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
def update_schema():
|
| 203 |
+
import importlib.resources
|
| 204 |
+
|
| 205 |
+
if importlib.resources.is_resource(__package__, "schema.yaml"):
|
| 206 |
+
content = importlib.resources.read_text(__package__, "schema.yaml")
|
| 207 |
+
match = re.search("checksum<<([A-Fa-f0-9]{64})>>", content)
|
| 208 |
+
_check(match is not None, "checksum not found in schema.yaml")
|
| 209 |
+
assert match is not None
|
| 210 |
+
checksum_base = match.group(1)
|
| 211 |
+
from yaml import load, Loader
|
| 212 |
+
|
| 213 |
+
dst = load(content, Loader=Loader)
|
| 214 |
+
assert isinstance(dst, dict)
|
| 215 |
+
else:
|
| 216 |
+
checksum_base = None
|
| 217 |
+
dst = {"SCHEMA_VERSION": None, "TREESPEC_VERSION": None}
|
| 218 |
+
|
| 219 |
+
src = _staged_schema()
|
| 220 |
+
additions, subtractions = _diff_schema(dst, src)
|
| 221 |
+
return _Commit(
|
| 222 |
+
result=src,
|
| 223 |
+
checksum_result=_hash_schema(src),
|
| 224 |
+
path=__package__.replace(".", "/") + "/schema.yaml",
|
| 225 |
+
additions=additions,
|
| 226 |
+
subtractions=subtractions,
|
| 227 |
+
base=dst,
|
| 228 |
+
checksum_base=checksum_base,
|
| 229 |
+
)
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
def check(commit: _Commit, force_unsafe: bool = False):
|
| 233 |
+
next_version = None
|
| 234 |
+
reason = ""
|
| 235 |
+
# Step 1: Detect major schema updates.
|
| 236 |
+
if len(commit.additions) > 0:
|
| 237 |
+
for k, v in commit.additions.items():
|
| 238 |
+
if k not in commit.base:
|
| 239 |
+
continue
|
| 240 |
+
kind = commit.result[k]["kind"]
|
| 241 |
+
fields = v["fields"]
|
| 242 |
+
for f, d in fields.items():
|
| 243 |
+
if "default" not in d and kind == "struct":
|
| 244 |
+
reason += (
|
| 245 |
+
f"Field {k}.{f} is added to schema.py without a default value as an incomparible change "
|
| 246 |
+
+ "which requires major version bump.\n"
|
| 247 |
+
)
|
| 248 |
+
next_version = [commit.base["SCHEMA_VERSION"][0] + 1, 1]
|
| 249 |
+
|
| 250 |
+
if len(commit.subtractions) > 0:
|
| 251 |
+
for k, v in commit.subtractions.items():
|
| 252 |
+
if k not in commit.result:
|
| 253 |
+
continue
|
| 254 |
+
for f in v["fields"]:
|
| 255 |
+
reason = f"Field {k}.{f} is removed from schema.py as an incompatible change which requires major version bump.\n"
|
| 256 |
+
next_version = [commit.base["SCHEMA_VERSION"][0] + 1, 1]
|
| 257 |
+
|
| 258 |
+
if force_unsafe:
|
| 259 |
+
reason += "--force-unsafe is used."
|
| 260 |
+
next_version = commit.result["SCHEMA_VERSION"]
|
| 261 |
+
else:
|
| 262 |
+
# Step 2: Detect minor schema updates.
|
| 263 |
+
if next_version is None and len(commit.additions) > 0:
|
| 264 |
+
for k, v in commit.additions.items():
|
| 265 |
+
for f in v["fields"]:
|
| 266 |
+
reason += (
|
| 267 |
+
f"Field {k}.{f} is added to schema.py as an compatible change "
|
| 268 |
+
+ "which still requires minor version bump.\n"
|
| 269 |
+
)
|
| 270 |
+
next_version = [
|
| 271 |
+
commit.base["SCHEMA_VERSION"][0],
|
| 272 |
+
commit.base["SCHEMA_VERSION"][1] + 1,
|
| 273 |
+
]
|
| 274 |
+
if next_version is None and len(commit.subtractions) > 0:
|
| 275 |
+
for k, v in commit.subtractions.items():
|
| 276 |
+
for f in v["fields"]:
|
| 277 |
+
reason += (
|
| 278 |
+
f"Field {k}.{f} is removed from schema.py as an compatible change "
|
| 279 |
+
+ "which still requires minor version bump.\n"
|
| 280 |
+
)
|
| 281 |
+
next_version = [
|
| 282 |
+
commit.base["SCHEMA_VERSION"][0],
|
| 283 |
+
commit.base["SCHEMA_VERSION"][1] + 1,
|
| 284 |
+
]
|
| 285 |
+
|
| 286 |
+
return next_version, reason
|
janus/lib/python3.10/site-packages/torch/_export/serde/serialize.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
janus/lib/python3.10/site-packages/torch/_export/serde/union.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import functools
|
| 3 |
+
from dataclasses import fields
|
| 4 |
+
from typing import Hashable, Set
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class _UnionTag(str):
|
| 8 |
+
_cls: Hashable
|
| 9 |
+
|
| 10 |
+
@staticmethod
|
| 11 |
+
def create(t, cls):
|
| 12 |
+
tag = _UnionTag(t)
|
| 13 |
+
assert not hasattr(tag, "_cls")
|
| 14 |
+
tag._cls = cls
|
| 15 |
+
return tag
|
| 16 |
+
|
| 17 |
+
def __eq__(self, cmp) -> bool:
|
| 18 |
+
assert isinstance(cmp, str)
|
| 19 |
+
other = str(cmp)
|
| 20 |
+
assert other in _get_field_names(
|
| 21 |
+
self._cls
|
| 22 |
+
), f"{other} is not a valid tag for {self._cls}. Available tags: {_get_field_names(self._cls)}"
|
| 23 |
+
return str(self) == other
|
| 24 |
+
|
| 25 |
+
def __hash__(self):
|
| 26 |
+
return hash(str(self))
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
@functools.lru_cache(maxsize=None)
|
| 30 |
+
def _get_field_names(cls) -> Set[str]:
|
| 31 |
+
return {f.name for f in fields(cls)}
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class _Union:
|
| 35 |
+
_type: _UnionTag
|
| 36 |
+
|
| 37 |
+
@classmethod
|
| 38 |
+
def create(cls, **kwargs):
|
| 39 |
+
assert len(kwargs) == 1
|
| 40 |
+
obj = cls(**{**{f.name: None for f in fields(cls)}, **kwargs}) # type: ignore[arg-type]
|
| 41 |
+
obj._type = _UnionTag.create(next(iter(kwargs.keys())), cls)
|
| 42 |
+
return obj
|
| 43 |
+
|
| 44 |
+
def __post_init__(self):
|
| 45 |
+
assert not any(f.name in ("type", "_type", "create", "value") for f in fields(self)) # type: ignore[arg-type, misc]
|
| 46 |
+
|
| 47 |
+
@property
|
| 48 |
+
def type(self) -> str:
|
| 49 |
+
try:
|
| 50 |
+
return self._type
|
| 51 |
+
except AttributeError as e:
|
| 52 |
+
raise RuntimeError(
|
| 53 |
+
f"Please use {type(self).__name__}.create to instantiate the union type."
|
| 54 |
+
) from e
|
| 55 |
+
|
| 56 |
+
@property
|
| 57 |
+
def value(self):
|
| 58 |
+
return getattr(self, self.type)
|
| 59 |
+
|
| 60 |
+
def __getattribute__(self, name):
|
| 61 |
+
attr = super().__getattribute__(name)
|
| 62 |
+
if attr is None and name in _get_field_names(type(self)) and name != self.type: # type: ignore[arg-type]
|
| 63 |
+
raise AttributeError(f"Field {name} is not set.")
|
| 64 |
+
return attr
|
| 65 |
+
|
| 66 |
+
def __str__(self):
|
| 67 |
+
return self.__repr__()
|
| 68 |
+
|
| 69 |
+
def __repr__(self):
|
| 70 |
+
return f"{type(self).__name__}({self.type}={getattr(self, self.type)})"
|
janus/lib/python3.10/site-packages/torch/jit/__init__.py
ADDED
|
@@ -0,0 +1,294 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import warnings
|
| 3 |
+
from contextlib import contextmanager
|
| 4 |
+
from typing import Any, Iterator
|
| 5 |
+
|
| 6 |
+
import torch._C
|
| 7 |
+
|
| 8 |
+
# These are imported so users can access them from the `torch.jit` module
|
| 9 |
+
from torch._jit_internal import (
|
| 10 |
+
_Await,
|
| 11 |
+
_drop,
|
| 12 |
+
_IgnoreContextManager,
|
| 13 |
+
_isinstance,
|
| 14 |
+
_overload,
|
| 15 |
+
_overload_method,
|
| 16 |
+
export,
|
| 17 |
+
Final,
|
| 18 |
+
Future,
|
| 19 |
+
ignore,
|
| 20 |
+
is_scripting,
|
| 21 |
+
unused,
|
| 22 |
+
)
|
| 23 |
+
from torch.jit._async import fork, wait
|
| 24 |
+
from torch.jit._await import _awaitable, _awaitable_nowait, _awaitable_wait
|
| 25 |
+
from torch.jit._decomposition_utils import _register_decomposition
|
| 26 |
+
from torch.jit._freeze import freeze, optimize_for_inference, run_frozen_optimizations
|
| 27 |
+
from torch.jit._fuser import (
|
| 28 |
+
fuser,
|
| 29 |
+
last_executed_optimized_graph,
|
| 30 |
+
optimized_execution,
|
| 31 |
+
set_fusion_strategy,
|
| 32 |
+
)
|
| 33 |
+
from torch.jit._ir_utils import _InsertPoint
|
| 34 |
+
from torch.jit._script import (
|
| 35 |
+
_ScriptProfile,
|
| 36 |
+
_unwrap_optional,
|
| 37 |
+
Attribute,
|
| 38 |
+
CompilationUnit,
|
| 39 |
+
interface,
|
| 40 |
+
RecursiveScriptClass,
|
| 41 |
+
RecursiveScriptModule,
|
| 42 |
+
script,
|
| 43 |
+
script_method,
|
| 44 |
+
ScriptFunction,
|
| 45 |
+
ScriptModule,
|
| 46 |
+
ScriptWarning,
|
| 47 |
+
)
|
| 48 |
+
from torch.jit._serialization import (
|
| 49 |
+
jit_module_from_flatbuffer,
|
| 50 |
+
load,
|
| 51 |
+
save,
|
| 52 |
+
save_jit_module_to_flatbuffer,
|
| 53 |
+
)
|
| 54 |
+
from torch.jit._trace import (
|
| 55 |
+
_flatten,
|
| 56 |
+
_get_trace_graph,
|
| 57 |
+
_script_if_tracing,
|
| 58 |
+
_unique_state_dict,
|
| 59 |
+
is_tracing,
|
| 60 |
+
ONNXTracedModule,
|
| 61 |
+
TopLevelTracedModule,
|
| 62 |
+
trace,
|
| 63 |
+
trace_module,
|
| 64 |
+
TracedModule,
|
| 65 |
+
TracerWarning,
|
| 66 |
+
TracingCheckError,
|
| 67 |
+
)
|
| 68 |
+
from torch.utils import set_module
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
__all__ = [
|
| 72 |
+
"Attribute",
|
| 73 |
+
"CompilationUnit",
|
| 74 |
+
"Error",
|
| 75 |
+
"Future",
|
| 76 |
+
"ScriptFunction",
|
| 77 |
+
"ScriptModule",
|
| 78 |
+
"annotate",
|
| 79 |
+
"enable_onednn_fusion",
|
| 80 |
+
"export",
|
| 81 |
+
"export_opnames",
|
| 82 |
+
"fork",
|
| 83 |
+
"freeze",
|
| 84 |
+
"interface",
|
| 85 |
+
"ignore",
|
| 86 |
+
"isinstance",
|
| 87 |
+
"load",
|
| 88 |
+
"onednn_fusion_enabled",
|
| 89 |
+
"optimize_for_inference",
|
| 90 |
+
"save",
|
| 91 |
+
"script",
|
| 92 |
+
"script_if_tracing",
|
| 93 |
+
"set_fusion_strategy",
|
| 94 |
+
"strict_fusion",
|
| 95 |
+
"trace",
|
| 96 |
+
"trace_module",
|
| 97 |
+
"unused",
|
| 98 |
+
"wait",
|
| 99 |
+
]
|
| 100 |
+
|
| 101 |
+
# For backwards compatibility
|
| 102 |
+
_fork = fork
|
| 103 |
+
_wait = wait
|
| 104 |
+
_set_fusion_strategy = set_fusion_strategy
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def export_opnames(m):
|
| 108 |
+
r"""
|
| 109 |
+
Generate new bytecode for a Script module.
|
| 110 |
+
|
| 111 |
+
Returns what the op list would be for a Script Module based off the current code base.
|
| 112 |
+
|
| 113 |
+
If you have a LiteScriptModule and want to get the currently present
|
| 114 |
+
list of ops call _export_operator_list instead.
|
| 115 |
+
"""
|
| 116 |
+
return torch._C._export_opnames(m._c)
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
# torch.jit.Error
|
| 120 |
+
Error = torch._C.JITException
|
| 121 |
+
set_module(Error, "torch.jit")
|
| 122 |
+
# This is not perfect but works in common cases
|
| 123 |
+
Error.__name__ = "Error"
|
| 124 |
+
Error.__qualname__ = "Error"
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
# for use in python if using annotate
|
| 128 |
+
def annotate(the_type, the_value):
|
| 129 |
+
"""Use to give type of `the_value` in TorchScript compiler.
|
| 130 |
+
|
| 131 |
+
This method is a pass-through function that returns `the_value`, used to hint TorchScript
|
| 132 |
+
compiler the type of `the_value`. It is a no-op when running outside of TorchScript.
|
| 133 |
+
|
| 134 |
+
Though TorchScript can infer correct type for most Python expressions, there are some cases where
|
| 135 |
+
type inference can be wrong, including:
|
| 136 |
+
|
| 137 |
+
- Empty containers like `[]` and `{}`, which TorchScript assumes to be container of `Tensor`
|
| 138 |
+
- Optional types like `Optional[T]` but assigned a valid value of type `T`, TorchScript would assume
|
| 139 |
+
it is type `T` rather than `Optional[T]`
|
| 140 |
+
|
| 141 |
+
Note that `annotate()` does not help in `__init__` method of `torch.nn.Module` subclasses because it
|
| 142 |
+
is executed in eager mode. To annotate types of `torch.nn.Module` attributes,
|
| 143 |
+
use :meth:`~torch.jit.Attribute` instead.
|
| 144 |
+
|
| 145 |
+
Example:
|
| 146 |
+
|
| 147 |
+
.. testcode::
|
| 148 |
+
|
| 149 |
+
import torch
|
| 150 |
+
from typing import Dict
|
| 151 |
+
|
| 152 |
+
@torch.jit.script
|
| 153 |
+
def fn():
|
| 154 |
+
# Telling TorchScript that this empty dictionary is a (str -> int) dictionary
|
| 155 |
+
# instead of default dictionary type of (str -> Tensor).
|
| 156 |
+
d = torch.jit.annotate(Dict[str, int], {})
|
| 157 |
+
|
| 158 |
+
# Without `torch.jit.annotate` above, following statement would fail because of
|
| 159 |
+
# type mismatch.
|
| 160 |
+
d["name"] = 20
|
| 161 |
+
|
| 162 |
+
.. testcleanup::
|
| 163 |
+
|
| 164 |
+
del fn
|
| 165 |
+
|
| 166 |
+
Args:
|
| 167 |
+
the_type: Python type that should be passed to TorchScript compiler as type hint for `the_value`
|
| 168 |
+
the_value: Value or expression to hint type for.
|
| 169 |
+
|
| 170 |
+
Returns:
|
| 171 |
+
`the_value` is passed back as return value.
|
| 172 |
+
"""
|
| 173 |
+
return the_value
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
def script_if_tracing(fn):
|
| 177 |
+
"""
|
| 178 |
+
Compiles ``fn`` when it is first called during tracing.
|
| 179 |
+
|
| 180 |
+
``torch.jit.script`` has a non-negligible start up time when it is first called due to
|
| 181 |
+
lazy-initializations of many compiler builtins. Therefore you should not use
|
| 182 |
+
it in library code. However, you may want to have parts of your library work
|
| 183 |
+
in tracing even if they use control flow. In these cases, you should use
|
| 184 |
+
``@torch.jit.script_if_tracing`` to substitute for
|
| 185 |
+
``torch.jit.script``.
|
| 186 |
+
|
| 187 |
+
Args:
|
| 188 |
+
fn: A function to compile.
|
| 189 |
+
|
| 190 |
+
Returns:
|
| 191 |
+
If called during tracing, a :class:`ScriptFunction` created by `torch.jit.script` is returned.
|
| 192 |
+
Otherwise, the original function `fn` is returned.
|
| 193 |
+
"""
|
| 194 |
+
return _script_if_tracing(fn)
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
# for torch.jit.isinstance
|
| 198 |
+
def isinstance(obj, target_type):
|
| 199 |
+
"""
|
| 200 |
+
Provide container type refinement in TorchScript.
|
| 201 |
+
|
| 202 |
+
It can refine parameterized containers of the List, Dict, Tuple, and Optional types. E.g. ``List[str]``,
|
| 203 |
+
``Dict[str, List[torch.Tensor]]``, ``Optional[Tuple[int,str,int]]``. It can also
|
| 204 |
+
refine basic types such as bools and ints that are available in TorchScript.
|
| 205 |
+
|
| 206 |
+
Args:
|
| 207 |
+
obj: object to refine the type of
|
| 208 |
+
target_type: type to try to refine obj to
|
| 209 |
+
Returns:
|
| 210 |
+
``bool``: True if obj was successfully refined to the type of target_type,
|
| 211 |
+
False otherwise with no new type refinement
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
Example (using ``torch.jit.isinstance`` for type refinement):
|
| 215 |
+
.. testcode::
|
| 216 |
+
|
| 217 |
+
import torch
|
| 218 |
+
from typing import Any, Dict, List
|
| 219 |
+
|
| 220 |
+
class MyModule(torch.nn.Module):
|
| 221 |
+
def __init__(self) -> None:
|
| 222 |
+
super().__init__()
|
| 223 |
+
|
| 224 |
+
def forward(self, input: Any): # note the Any type
|
| 225 |
+
if torch.jit.isinstance(input, List[torch.Tensor]):
|
| 226 |
+
for t in input:
|
| 227 |
+
y = t.clamp(0, 0.5)
|
| 228 |
+
elif torch.jit.isinstance(input, Dict[str, str]):
|
| 229 |
+
for val in input.values():
|
| 230 |
+
print(val)
|
| 231 |
+
|
| 232 |
+
m = torch.jit.script(MyModule())
|
| 233 |
+
x = [torch.rand(3,3), torch.rand(4,3)]
|
| 234 |
+
m(x)
|
| 235 |
+
y = {"key1":"val1","key2":"val2"}
|
| 236 |
+
m(y)
|
| 237 |
+
"""
|
| 238 |
+
return _isinstance(obj, target_type)
|
| 239 |
+
|
| 240 |
+
|
| 241 |
+
class strict_fusion:
|
| 242 |
+
"""
|
| 243 |
+
Give errors if not all nodes have been fused in inference, or symbolically differentiated in training.
|
| 244 |
+
|
| 245 |
+
Example:
|
| 246 |
+
Forcing fusion of additions.
|
| 247 |
+
|
| 248 |
+
.. code-block:: python
|
| 249 |
+
|
| 250 |
+
@torch.jit.script
|
| 251 |
+
def foo(x):
|
| 252 |
+
with torch.jit.strict_fusion():
|
| 253 |
+
return x + x + x
|
| 254 |
+
|
| 255 |
+
"""
|
| 256 |
+
|
| 257 |
+
def __init__(self) -> None:
|
| 258 |
+
if not torch._jit_internal.is_scripting():
|
| 259 |
+
warnings.warn("Only works in script mode")
|
| 260 |
+
|
| 261 |
+
def __enter__(self):
|
| 262 |
+
pass
|
| 263 |
+
|
| 264 |
+
def __exit__(self, type: Any, value: Any, tb: Any) -> None:
|
| 265 |
+
pass
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
# Context manager for globally hiding source ranges when printing graphs.
|
| 269 |
+
# Note that these functions are exposed to Python as static members of the
|
| 270 |
+
# Graph class, so mypy checks need to be skipped.
|
| 271 |
+
@contextmanager
|
| 272 |
+
def _hide_source_ranges() -> Iterator[None]:
|
| 273 |
+
old_enable_source_ranges = torch._C.Graph.global_print_source_ranges # type: ignore[attr-defined]
|
| 274 |
+
try:
|
| 275 |
+
torch._C.Graph.set_global_print_source_ranges(False) # type: ignore[attr-defined]
|
| 276 |
+
yield
|
| 277 |
+
finally:
|
| 278 |
+
torch._C.Graph.set_global_print_source_ranges(old_enable_source_ranges) # type: ignore[attr-defined]
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
def enable_onednn_fusion(enabled: bool):
|
| 282 |
+
"""Enable or disables onednn JIT fusion based on the parameter `enabled`."""
|
| 283 |
+
torch._C._jit_set_llga_enabled(enabled)
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
def onednn_fusion_enabled():
|
| 287 |
+
"""Return whether onednn JIT fusion is enabled."""
|
| 288 |
+
return torch._C._jit_llga_enabled()
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
del Any
|
| 292 |
+
|
| 293 |
+
if not torch._C._jit_init():
|
| 294 |
+
raise RuntimeError("JIT initialization failed")
|
janus/lib/python3.10/site-packages/torch/jit/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (8.67 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/jit/__pycache__/_await.cpython-310.pyc
ADDED
|
Binary file (1.1 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/jit/__pycache__/_decomposition_utils.cpython-310.pyc
ADDED
|
Binary file (627 Bytes). View file
|
|
|
janus/lib/python3.10/site-packages/torch/jit/__pycache__/_decompositions.cpython-310.pyc
ADDED
|
Binary file (3.43 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/jit/__pycache__/_freeze.cpython-310.pyc
ADDED
|
Binary file (9.36 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/jit/__pycache__/_fuser.cpython-310.pyc
ADDED
|
Binary file (5.28 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/jit/__pycache__/_ir_utils.cpython-310.pyc
ADDED
|
Binary file (1.19 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/jit/__pycache__/_logging.cpython-310.pyc
ADDED
|
Binary file (386 Bytes). View file
|
|
|
janus/lib/python3.10/site-packages/torch/jit/__pycache__/_pickle.cpython-310.pyc
ADDED
|
Binary file (856 Bytes). View file
|
|
|
janus/lib/python3.10/site-packages/torch/jit/__pycache__/_recursive.cpython-310.pyc
ADDED
|
Binary file (26.3 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/jit/__pycache__/_script.cpython-310.pyc
ADDED
|
Binary file (51.5 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/jit/__pycache__/_serialization.cpython-310.pyc
ADDED
|
Binary file (9.02 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/jit/__pycache__/_trace.cpython-310.pyc
ADDED
|
Binary file (41.7 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/jit/__pycache__/annotations.cpython-310.pyc
ADDED
|
Binary file (13.6 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/jit/__pycache__/generate_bytecode.cpython-310.pyc
ADDED
|
Binary file (1.29 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/jit/__pycache__/supported_ops.cpython-310.pyc
ADDED
|
Binary file (8.11 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/jit/_async.py
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
"""Async API.
|
| 3 |
+
|
| 4 |
+
This module contains the API for parallelism in TorchScript, notably:
|
| 5 |
+
* torch.jit.fork
|
| 6 |
+
* torch.jit.wait
|
| 7 |
+
|
| 8 |
+
This is not intended to be imported directly; please use the exposed
|
| 9 |
+
functionalities in `torch.jit`.
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
import torch
|
| 13 |
+
from torch._jit_internal import Future
|
| 14 |
+
from torch.jit._builtins import _register_builtin
|
| 15 |
+
from torch.utils import set_module
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
set_module(Future, "torch.jit")
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def fork(func, *args, **kwargs):
|
| 22 |
+
r"""
|
| 23 |
+
Create an asynchronous task executing `func` and a reference to the value of the result of this execution.
|
| 24 |
+
|
| 25 |
+
`fork` will return immediately, so the return value of `func` may not have been computed yet. To force completion
|
| 26 |
+
of the task and access the return value invoke `torch.jit.wait` on the Future. `fork` invoked
|
| 27 |
+
with a `func` which returns `T` is typed as `torch.jit.Future[T]`. `fork` calls can be arbitrarily
|
| 28 |
+
nested, and may be invoked with positional and keyword arguments.
|
| 29 |
+
Asynchronous execution will only occur when run in TorchScript. If run in pure python,
|
| 30 |
+
`fork` will not execute in parallel. `fork` will also not execute in parallel when invoked
|
| 31 |
+
while tracing, however the `fork` and `wait` calls will be captured in the exported IR Graph.
|
| 32 |
+
|
| 33 |
+
.. warning::
|
| 34 |
+
`fork` tasks will execute non-deterministically. We recommend only spawning
|
| 35 |
+
parallel fork tasks for pure functions that do not modify their inputs,
|
| 36 |
+
module attributes, or global state.
|
| 37 |
+
|
| 38 |
+
Args:
|
| 39 |
+
func (callable or torch.nn.Module): A Python function or `torch.nn.Module`
|
| 40 |
+
that will be invoked. If executed in TorchScript, it will execute asynchronously,
|
| 41 |
+
otherwise it will not. Traced invocations of fork will be captured in the IR.
|
| 42 |
+
``*args``, ``**kwargs``: arguments to invoke `func` with.
|
| 43 |
+
Returns:
|
| 44 |
+
`torch.jit.Future[T]`: a reference to the execution of `func`. The value `T`
|
| 45 |
+
can only be accessed by forcing completion of `func` through `torch.jit.wait`.
|
| 46 |
+
|
| 47 |
+
Example (fork a free function):
|
| 48 |
+
|
| 49 |
+
.. code-block:: python
|
| 50 |
+
|
| 51 |
+
import torch
|
| 52 |
+
from torch import Tensor
|
| 53 |
+
def foo(a : Tensor, b : int) -> Tensor:
|
| 54 |
+
return a + b
|
| 55 |
+
def bar(a):
|
| 56 |
+
fut : torch.jit.Future[Tensor] = torch.jit.fork(foo, a, b=2)
|
| 57 |
+
return torch.jit.wait(fut)
|
| 58 |
+
script_bar = torch.jit.script(bar)
|
| 59 |
+
input = torch.tensor(2)
|
| 60 |
+
# only the scripted version executes asynchronously
|
| 61 |
+
assert script_bar(input) == bar(input)
|
| 62 |
+
# trace is not run asynchronously, but fork is captured in IR
|
| 63 |
+
graph = torch.jit.trace(bar, (input,)).graph
|
| 64 |
+
assert "fork" in str(graph)
|
| 65 |
+
|
| 66 |
+
Example (fork a module method):
|
| 67 |
+
|
| 68 |
+
.. code-block:: python
|
| 69 |
+
|
| 70 |
+
import torch
|
| 71 |
+
from torch import Tensor
|
| 72 |
+
class AddMod(torch.nn.Module):
|
| 73 |
+
def forward(self, a: Tensor, b : int):
|
| 74 |
+
return a + b
|
| 75 |
+
class Mod(torch.nn.Module):
|
| 76 |
+
def __init__(self) -> None:
|
| 77 |
+
super(self).__init__()
|
| 78 |
+
self.mod = AddMod()
|
| 79 |
+
def forward(self, input):
|
| 80 |
+
fut = torch.jit.fork(self.mod, a, b=2)
|
| 81 |
+
return torch.jit.wait(fut)
|
| 82 |
+
input = torch.tensor(2)
|
| 83 |
+
mod = Mod()
|
| 84 |
+
assert mod(input) == torch.jit.script(mod).forward(input)
|
| 85 |
+
"""
|
| 86 |
+
return torch._C.fork(func, *args, **kwargs)
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def wait(future):
|
| 90 |
+
r"""
|
| 91 |
+
Force completion of a `torch.jit.Future[T]` asynchronous task, returning the result of the task.
|
| 92 |
+
|
| 93 |
+
See :func:`~fork` for docs and examples.
|
| 94 |
+
Args:
|
| 95 |
+
future (torch.jit.Future[T]): an asynchronous task reference, created through `torch.jit.fork`
|
| 96 |
+
Returns:
|
| 97 |
+
`T`: the return value of the completed task
|
| 98 |
+
"""
|
| 99 |
+
return torch._C.wait(future)
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
_register_builtin(wait, "aten::wait")
|
janus/lib/python3.10/site-packages/torch/jit/_await.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import torch
|
| 3 |
+
from torch._jit_internal import _Await
|
| 4 |
+
from torch.jit._builtins import _register_builtin
|
| 5 |
+
from torch.utils import set_module
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
set_module(_Await, "torch.jit")
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def _awaitable(func, *args, **kwargs):
|
| 12 |
+
r"""Create Await object that will call specified functioni with specified args, when it is requested for the result."""
|
| 13 |
+
return torch._C._awaitable(func, *args, **kwargs)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def _awaitable_wait(aw):
|
| 17 |
+
r"""Request await the result of execution, if Await is not completed yet, the func will be called immediately."""
|
| 18 |
+
return torch._C._awaitable_wait(aw)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def _awaitable_nowait(o):
|
| 22 |
+
r"""Create completed Await with specified result."""
|
| 23 |
+
return torch._C._awaitable_nowait(o)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
_register_builtin(_awaitable_wait, "prim::awaitable_wait")
|
| 27 |
+
_register_builtin(_awaitable_nowait, "prim::awaitable_nowait")
|
janus/lib/python3.10/site-packages/torch/jit/_builtins.py
ADDED
|
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import cmath
|
| 3 |
+
import math
|
| 4 |
+
import warnings
|
| 5 |
+
from collections import OrderedDict
|
| 6 |
+
from typing import Dict, Optional
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
import torch.backends.cudnn as cudnn
|
| 10 |
+
from torch.nn.modules.utils import (
|
| 11 |
+
_list_with_default,
|
| 12 |
+
_pair,
|
| 13 |
+
_quadruple,
|
| 14 |
+
_single,
|
| 15 |
+
_triple,
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
_builtin_table: Optional[Dict[int, str]] = None
|
| 20 |
+
|
| 21 |
+
_modules_containing_builtins = (torch, torch._C._nn, torch._C._fft, torch._C._linalg, torch._C._nested, torch._C._sparse, torch._C._special) # type: ignore[attr-defined] # noqa: B950
|
| 22 |
+
|
| 23 |
+
_builtin_ops = [
|
| 24 |
+
# Pairs of (function, op_name)
|
| 25 |
+
(_pair, "aten::_pair"),
|
| 26 |
+
(_quadruple, "aten::_quadruple"),
|
| 27 |
+
(_single, "aten::_single"),
|
| 28 |
+
(_triple, "aten::_triple"),
|
| 29 |
+
(_list_with_default, "aten::list_with_default"),
|
| 30 |
+
(OrderedDict, "aten::dict"),
|
| 31 |
+
(dict, "aten::dict"),
|
| 32 |
+
(cudnn.is_acceptable, "aten::cudnn_is_acceptable"),
|
| 33 |
+
(math.ceil, "aten::ceil"),
|
| 34 |
+
(math.copysign, "aten::copysign"),
|
| 35 |
+
(math.erf, "aten::erf"),
|
| 36 |
+
(math.erfc, "aten::erfc"),
|
| 37 |
+
(math.exp, "aten::exp"),
|
| 38 |
+
(math.expm1, "aten::expm1"),
|
| 39 |
+
(math.fabs, "aten::fabs"),
|
| 40 |
+
(math.floor, "aten::floor"),
|
| 41 |
+
(math.gamma, "aten::gamma"),
|
| 42 |
+
(math.lgamma, "aten::lgamma"),
|
| 43 |
+
(math.log, "aten::log"),
|
| 44 |
+
(math.log10, "aten::log10"),
|
| 45 |
+
(math.log1p, "aten::log1p"),
|
| 46 |
+
(math.pow, "aten::pow"),
|
| 47 |
+
(math.sqrt, "aten::sqrt"),
|
| 48 |
+
(math.isnan, "aten::isnan"),
|
| 49 |
+
(math.asinh, "aten::asinh"),
|
| 50 |
+
(math.atanh, "aten::atanh"),
|
| 51 |
+
(math.cosh, "aten::cosh"),
|
| 52 |
+
(math.sinh, "aten::sinh"),
|
| 53 |
+
(math.tanh, "aten::tanh"),
|
| 54 |
+
(math.acos, "aten::acos"),
|
| 55 |
+
(math.asin, "aten::asin"),
|
| 56 |
+
(math.atan, "aten::atan"),
|
| 57 |
+
(math.atan2, "aten::atan2"),
|
| 58 |
+
(math.cos, "aten::cos"),
|
| 59 |
+
(math.sin, "aten::sin"),
|
| 60 |
+
(math.tan, "aten::tan"),
|
| 61 |
+
(math.asinh, "aten::asinh"),
|
| 62 |
+
(math.atanh, "aten::atanh"),
|
| 63 |
+
(math.acosh, "aten::acosh"),
|
| 64 |
+
(math.fmod, "aten::fmod"),
|
| 65 |
+
(math.modf, "aten::modf"),
|
| 66 |
+
(math.factorial, "aten::factorial"),
|
| 67 |
+
(math.frexp, "aten::frexp"),
|
| 68 |
+
(math.isinf, "aten::isinf"),
|
| 69 |
+
(math.degrees, "aten::degrees"),
|
| 70 |
+
(math.radians, "aten::radians"),
|
| 71 |
+
(cmath.isnan, "aten::isnan"),
|
| 72 |
+
(cmath.isfinite, "aten::isfinite"),
|
| 73 |
+
(cmath.isinf, "aten::isinf"),
|
| 74 |
+
(cmath.phase, "aten::angle"),
|
| 75 |
+
(cmath.rect, "aten::polar"),
|
| 76 |
+
(cmath.log, "aten::log"),
|
| 77 |
+
(cmath.log10, "aten::log10"),
|
| 78 |
+
(cmath.sqrt, "aten::sqrt"),
|
| 79 |
+
(cmath.exp, "aten::exp"),
|
| 80 |
+
(cmath.sin, "aten::sin"),
|
| 81 |
+
(cmath.tan, "aten::tan"),
|
| 82 |
+
(cmath.cos, "aten::cos"),
|
| 83 |
+
(cmath.asin, "aten::asin"),
|
| 84 |
+
(cmath.acos, "aten::acos"),
|
| 85 |
+
(cmath.atan, "aten::atan"),
|
| 86 |
+
(cmath.sinh, "aten::sinh"),
|
| 87 |
+
(cmath.cosh, "aten::cosh"),
|
| 88 |
+
(cmath.tanh, "aten::tanh"),
|
| 89 |
+
(cmath.asinh, "aten::asinh"),
|
| 90 |
+
(cmath.acosh, "aten::acosh"),
|
| 91 |
+
(cmath.atanh, "aten::atanh"),
|
| 92 |
+
(math.ldexp, "aten::ldexp"),
|
| 93 |
+
(torch._assert, "aten::_assert"),
|
| 94 |
+
(torch.autograd.grad, "aten::grad"),
|
| 95 |
+
(torch.autograd.backward, "aten::backward"),
|
| 96 |
+
(torch._C._infer_size, "aten::_infer_size"),
|
| 97 |
+
(torch.nn.functional._no_grad_embedding_renorm_, "aten::_no_grad_embedding_renorm_"), # type: ignore[attr-defined]
|
| 98 |
+
(torch.nn.functional.assert_int_or_pair, "aten::_assert_int_or_pair"),
|
| 99 |
+
(torch.nn.init._no_grad_fill_, "aten::_no_grad_fill_"),
|
| 100 |
+
(torch.nn.init._no_grad_normal_, "aten::_no_grad_normal_"),
|
| 101 |
+
(torch.nn.init._no_grad_uniform_, "aten::_no_grad_uniform_"),
|
| 102 |
+
(torch.nn.init._no_grad_zero_, "aten::_no_grad_zero_"),
|
| 103 |
+
(torch._C._get_tracing_state, "aten::_get_tracing_state"),
|
| 104 |
+
(torch._C._get_cpu_capability, "aten::_get_cpu_capability"),
|
| 105 |
+
(warnings.warn, "aten::warn"),
|
| 106 |
+
(torch._VF.stft, "aten::stft"), # type: ignore[attr-defined]
|
| 107 |
+
(torch._VF.istft, "aten::istft"), # type: ignore[attr-defined]
|
| 108 |
+
(torch._VF.cdist, "aten::cdist"), # type: ignore[attr-defined]
|
| 109 |
+
(torch._VF.norm, "aten::norm"), # type: ignore[attr-defined]
|
| 110 |
+
(torch._VF.unique_dim, "aten::unique_dim"),
|
| 111 |
+
(torch._VF.unique_consecutive, "aten::unique_consecutive"), # type: ignore[attr-defined]
|
| 112 |
+
(torch._VF.nuclear_norm, "aten::nuclear_norm"),
|
| 113 |
+
(torch._VF.frobenius_norm, "aten::frobenius_norm"),
|
| 114 |
+
(torch._VF.tensordot, "aten::tensordot"), # type: ignore[attr-defined]
|
| 115 |
+
]
|
| 116 |
+
|
| 117 |
+
# ops in torch.functional are bound to torch
|
| 118 |
+
# in these cases, we want to resolve the function to their python implementation
|
| 119 |
+
# instead looking up a builtin "aten::" schema
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def _gen_torch_functional_registered_ops():
|
| 123 |
+
# eventually ops should encompass all of torch/functional.py, (torch.functional.__all__)
|
| 124 |
+
# but we are currently only able to compile some of the functions. additionally,
|
| 125 |
+
# some functions directly map to their aten:: implementations.
|
| 126 |
+
# TODO: add support for more ops
|
| 127 |
+
ops = [
|
| 128 |
+
"stft",
|
| 129 |
+
"istft",
|
| 130 |
+
"lu",
|
| 131 |
+
"cdist",
|
| 132 |
+
"norm",
|
| 133 |
+
"unique",
|
| 134 |
+
"unique_consecutive",
|
| 135 |
+
"tensordot",
|
| 136 |
+
]
|
| 137 |
+
return {getattr(torch.functional, name) for name in ops}
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
_functional_registered_ops = _gen_torch_functional_registered_ops()
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def _is_special_functional_bound_op(fn):
|
| 144 |
+
return fn in _functional_registered_ops
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
# lazily built to ensure the correct initialization order
|
| 148 |
+
def _get_builtin_table():
|
| 149 |
+
global _builtin_table
|
| 150 |
+
if _builtin_table is not None:
|
| 151 |
+
return _builtin_table
|
| 152 |
+
_builtin_table = {}
|
| 153 |
+
|
| 154 |
+
def register_all(mod):
|
| 155 |
+
for name in dir(mod):
|
| 156 |
+
v = getattr(mod, name)
|
| 157 |
+
if (
|
| 158 |
+
callable(v)
|
| 159 |
+
and not _is_special_functional_bound_op(v)
|
| 160 |
+
and v is not torch.no_grad
|
| 161 |
+
and v is not torch.autocast
|
| 162 |
+
):
|
| 163 |
+
# Fixup inconsistency in segment_reduce
|
| 164 |
+
if name == "_segment_reduce":
|
| 165 |
+
name = name[1:]
|
| 166 |
+
_builtin_ops.append((v, "aten::" + name))
|
| 167 |
+
|
| 168 |
+
for mod in _modules_containing_builtins:
|
| 169 |
+
register_all(mod)
|
| 170 |
+
|
| 171 |
+
_builtin_ops.append((math.gcd, "aten::gcd"))
|
| 172 |
+
_builtin_ops.append((math.isfinite, "aten::isfinite"))
|
| 173 |
+
_builtin_ops.append((math.remainder, "aten::mathremainder")) # type: ignore[attr-defined]
|
| 174 |
+
|
| 175 |
+
import torch.distributed.autograd as dist_autograd
|
| 176 |
+
|
| 177 |
+
if dist_autograd.is_available():
|
| 178 |
+
_builtin_ops.append((dist_autograd.get_gradients, "aten::get_gradients"))
|
| 179 |
+
_builtin_ops.append((dist_autograd.backward, "aten::dist_backward"))
|
| 180 |
+
|
| 181 |
+
# populate the _builtin_table from _builtin_ops
|
| 182 |
+
for builtin, aten_op in _builtin_ops:
|
| 183 |
+
_builtin_table[id(builtin)] = aten_op
|
| 184 |
+
|
| 185 |
+
return _builtin_table
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
def _register_builtin(fn, op):
|
| 189 |
+
_get_builtin_table()[id(fn)] = op
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
def _find_builtin(fn):
|
| 193 |
+
return _get_builtin_table().get(id(fn))
|
janus/lib/python3.10/site-packages/torch/jit/_check.py
ADDED
|
@@ -0,0 +1,249 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import ast
|
| 3 |
+
import inspect
|
| 4 |
+
import textwrap
|
| 5 |
+
import warnings
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class AttributeTypeIsSupportedChecker(ast.NodeVisitor):
|
| 11 |
+
"""Check the ``__init__`` method of a given ``nn.Module``.
|
| 12 |
+
|
| 13 |
+
It ensures that all instance-level attributes can be properly initialized.
|
| 14 |
+
|
| 15 |
+
Specifically, we do type inference based on attribute values...even
|
| 16 |
+
if the attribute in question has already been typed using
|
| 17 |
+
Python3-style annotations or ``torch.jit.annotate``. This means that
|
| 18 |
+
setting an instance-level attribute to ``[]`` (for ``List``),
|
| 19 |
+
``{}`` for ``Dict``), or ``None`` (for ``Optional``) isn't enough
|
| 20 |
+
information for us to properly initialize that attribute.
|
| 21 |
+
|
| 22 |
+
An object of this class can walk a given ``nn.Module``'s AST and
|
| 23 |
+
determine if it meets our requirements or not.
|
| 24 |
+
|
| 25 |
+
Known limitations
|
| 26 |
+
1. We can only check the AST nodes for certain constructs; we can't
|
| 27 |
+
``eval`` arbitrary expressions. This means that function calls,
|
| 28 |
+
class instantiations, and complex expressions that resolve to one of
|
| 29 |
+
the "empty" values specified above will NOT be flagged as
|
| 30 |
+
problematic.
|
| 31 |
+
2. We match on string literals, so if the user decides to use a
|
| 32 |
+
non-standard import (e.g. `from typing import List as foo`), we
|
| 33 |
+
won't catch it.
|
| 34 |
+
|
| 35 |
+
Example:
|
| 36 |
+
.. code-block:: python
|
| 37 |
+
|
| 38 |
+
class M(torch.nn.Module):
|
| 39 |
+
def fn(self):
|
| 40 |
+
return []
|
| 41 |
+
|
| 42 |
+
def __init__(self) -> None:
|
| 43 |
+
super().__init__()
|
| 44 |
+
self.x: List[int] = []
|
| 45 |
+
|
| 46 |
+
def forward(self, x: List[int]):
|
| 47 |
+
self.x = x
|
| 48 |
+
return 1
|
| 49 |
+
|
| 50 |
+
The above code will pass the ``AttributeTypeIsSupportedChecker``
|
| 51 |
+
check since we have a function call in ``__init__``. However,
|
| 52 |
+
it will still fail later with the ``RuntimeError`` "Tried to set
|
| 53 |
+
nonexistent attribute: x. Did you forget to initialize it in
|
| 54 |
+
__init__()?".
|
| 55 |
+
|
| 56 |
+
Args:
|
| 57 |
+
nn_module - The instance of ``torch.nn.Module`` whose
|
| 58 |
+
``__init__`` method we wish to check
|
| 59 |
+
"""
|
| 60 |
+
|
| 61 |
+
def check(self, nn_module: torch.nn.Module) -> None:
|
| 62 |
+
source_lines = inspect.getsource(nn_module.__class__.__init__)
|
| 63 |
+
|
| 64 |
+
# Ignore comments no matter the indentation
|
| 65 |
+
def is_useless_comment(line):
|
| 66 |
+
line = line.strip()
|
| 67 |
+
return line.startswith("#") and not line.startswith("# type:")
|
| 68 |
+
|
| 69 |
+
source_lines = "\n".join(
|
| 70 |
+
[l for l in source_lines.split("\n") if not is_useless_comment(l)]
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
# This AST only contains the `__init__` method of the nn.Module
|
| 74 |
+
init_ast = ast.parse(textwrap.dedent(source_lines))
|
| 75 |
+
|
| 76 |
+
# Get items annotated in the class body
|
| 77 |
+
self.class_level_annotations = list(nn_module.__annotations__.keys())
|
| 78 |
+
|
| 79 |
+
# Flag for later
|
| 80 |
+
self.visiting_class_level_ann = False
|
| 81 |
+
|
| 82 |
+
self.visit(init_ast)
|
| 83 |
+
|
| 84 |
+
def _is_empty_container(self, node: ast.AST, ann_type: str) -> bool:
|
| 85 |
+
if ann_type == "List":
|
| 86 |
+
# Assigning `[]` to a `List` type gives you a Node where
|
| 87 |
+
# value=List(elts=[], ctx=Load())
|
| 88 |
+
if not isinstance(node, ast.List):
|
| 89 |
+
return False
|
| 90 |
+
if node.elts:
|
| 91 |
+
return False
|
| 92 |
+
elif ann_type == "Dict":
|
| 93 |
+
# Assigning `{}` to a `Dict` type gives you a Node where
|
| 94 |
+
# value=Dict(keys=[], values=[])
|
| 95 |
+
if not isinstance(node, ast.Dict):
|
| 96 |
+
return False
|
| 97 |
+
if node.keys:
|
| 98 |
+
return False
|
| 99 |
+
elif ann_type == "Optional":
|
| 100 |
+
# Assigning `None` to an `Optional` type gives you a
|
| 101 |
+
# Node where value=Constant(value=None, kind=None)
|
| 102 |
+
if not isinstance(node, ast.Constant):
|
| 103 |
+
return False
|
| 104 |
+
if node.value: # type: ignore[attr-defined]
|
| 105 |
+
return False
|
| 106 |
+
|
| 107 |
+
return True
|
| 108 |
+
|
| 109 |
+
def visit_Assign(self, node):
|
| 110 |
+
"""Store assignment state when assigning to a Call Node.
|
| 111 |
+
|
| 112 |
+
If we're visiting a Call Node (the right-hand side of an
|
| 113 |
+
assignment statement), we won't be able to check the variable
|
| 114 |
+
that we're assigning to (the left-hand side of an assignment).
|
| 115 |
+
Because of this, we need to store this state in visitAssign.
|
| 116 |
+
(Luckily, we only have to do this if we're assigning to a Call
|
| 117 |
+
Node, i.e. ``torch.jit.annotate``. If we're using normal Python
|
| 118 |
+
annotations, we'll be visiting an AnnAssign Node, which has its
|
| 119 |
+
target built in.)
|
| 120 |
+
"""
|
| 121 |
+
try:
|
| 122 |
+
if (
|
| 123 |
+
isinstance(node.value, ast.Call)
|
| 124 |
+
and node.targets[0].attr in self.class_level_annotations
|
| 125 |
+
):
|
| 126 |
+
self.visiting_class_level_ann = True
|
| 127 |
+
except AttributeError:
|
| 128 |
+
return
|
| 129 |
+
self.generic_visit(node)
|
| 130 |
+
self.visiting_class_level_ann = False
|
| 131 |
+
|
| 132 |
+
def visit_AnnAssign(self, node):
|
| 133 |
+
"""Visit an AnnAssign node in an ``nn.Module``'s ``__init__`` method.
|
| 134 |
+
|
| 135 |
+
It checks if it conforms to our attribute annotation rules."""
|
| 136 |
+
# If we have a local variable
|
| 137 |
+
try:
|
| 138 |
+
if node.target.value.id != "self":
|
| 139 |
+
return
|
| 140 |
+
except AttributeError:
|
| 141 |
+
return
|
| 142 |
+
|
| 143 |
+
# If we have an attribute that's already been annotated at the
|
| 144 |
+
# class level
|
| 145 |
+
if node.target.attr in self.class_level_annotations:
|
| 146 |
+
return
|
| 147 |
+
|
| 148 |
+
# TODO @ansley: add `Union` once landed
|
| 149 |
+
|
| 150 |
+
# NB: Even though `Tuple` is a "container", we don't want to
|
| 151 |
+
# check for it here. `Tuple` functions as an type with an
|
| 152 |
+
# "infinite" number of subtypes, in the sense that you can have
|
| 153 |
+
# `Tuple[())]`, `Tuple[T1]`, `Tuple[T2]`, `Tuple[T1, T2]`,
|
| 154 |
+
# `Tuple[T2, T1]` and so on, and none of these subtypes can be
|
| 155 |
+
# used in place of the other. Therefore, assigning an empty
|
| 156 |
+
# tuple in `__init__` CORRECTLY means that that variable
|
| 157 |
+
# cannot be reassigned later to a non-empty tuple. Same
|
| 158 |
+
# deal with `NamedTuple`
|
| 159 |
+
|
| 160 |
+
containers = {"List", "list", "Dict", "dict", "Optional"}
|
| 161 |
+
|
| 162 |
+
# If we're not evaluating one of the specified problem types
|
| 163 |
+
try:
|
| 164 |
+
if node.annotation.value.id not in containers:
|
| 165 |
+
return
|
| 166 |
+
except AttributeError:
|
| 167 |
+
# To evaluate a base type (`str`, `int`, etc.), we would
|
| 168 |
+
# have needed to get the name through `node.annotation.id`
|
| 169 |
+
# instead of `node.annotation.value.id`. Seems that we're
|
| 170 |
+
# not evaluating one of our "containers"
|
| 171 |
+
return
|
| 172 |
+
|
| 173 |
+
# Check if the assigned variable is empty
|
| 174 |
+
ann_type = node.annotation.value.id
|
| 175 |
+
if not self._is_empty_container(node.value, ann_type):
|
| 176 |
+
return
|
| 177 |
+
|
| 178 |
+
warnings.warn(
|
| 179 |
+
"The TorchScript type system doesn't support "
|
| 180 |
+
"instance-level annotations on empty non-base "
|
| 181 |
+
"types in `__init__`. Instead, either 1) use a "
|
| 182 |
+
"type annotation in the class body, or 2) wrap "
|
| 183 |
+
"the type in `torch.jit.Attribute`."
|
| 184 |
+
)
|
| 185 |
+
|
| 186 |
+
def visit_Call(self, node):
|
| 187 |
+
"""Determine if a Call node is 'torch.jit.annotate' in __init__.
|
| 188 |
+
|
| 189 |
+
Visit a Call node in an ``nn.Module``'s ``__init__``
|
| 190 |
+
method and determine if it's ``torch.jit.annotate``. If so,
|
| 191 |
+
see if it conforms to our attribute annotation rules.
|
| 192 |
+
"""
|
| 193 |
+
# If we have an attribute that's already been annotated at the
|
| 194 |
+
# class level
|
| 195 |
+
if self.visiting_class_level_ann:
|
| 196 |
+
return
|
| 197 |
+
|
| 198 |
+
# If this isn't a call to `torch.jit.annotate`
|
| 199 |
+
try:
|
| 200 |
+
if (
|
| 201 |
+
node.func.value.value.id != "torch"
|
| 202 |
+
or node.func.value.attr != "jit"
|
| 203 |
+
or node.func.attr != "annotate"
|
| 204 |
+
):
|
| 205 |
+
self.generic_visit(node)
|
| 206 |
+
elif (
|
| 207 |
+
node.func.value.value.id != "jit" or node.func.value.attr != "annotate"
|
| 208 |
+
):
|
| 209 |
+
self.generic_visit(node)
|
| 210 |
+
except AttributeError:
|
| 211 |
+
# Looks like we didn't even have the right node structure
|
| 212 |
+
# to check for `torch.jit.annotate` in the first place
|
| 213 |
+
self.generic_visit(node)
|
| 214 |
+
|
| 215 |
+
# Invariant: we have a `torch.jit.annotate` or a
|
| 216 |
+
# `torch.annotate` call
|
| 217 |
+
|
| 218 |
+
# A Call Node for `torch.jit.annotate` should have an `args`
|
| 219 |
+
# list of length 2 where args[0] represents the annotation and
|
| 220 |
+
# args[1] represents the actual value
|
| 221 |
+
if len(node.args) != 2:
|
| 222 |
+
return
|
| 223 |
+
|
| 224 |
+
if not isinstance(node.args[0], ast.Subscript):
|
| 225 |
+
return
|
| 226 |
+
|
| 227 |
+
# See notes in `visit_AnnAssign` r.e. containers
|
| 228 |
+
|
| 229 |
+
containers = {"List", "Dict", "Optional"}
|
| 230 |
+
|
| 231 |
+
try:
|
| 232 |
+
ann_type = node.args[0].value.id # type: ignore[attr-defined]
|
| 233 |
+
except AttributeError:
|
| 234 |
+
return
|
| 235 |
+
|
| 236 |
+
if ann_type not in containers:
|
| 237 |
+
return
|
| 238 |
+
|
| 239 |
+
# Check if the assigned variable is empty
|
| 240 |
+
if not self._is_empty_container(node.args[1], ann_type):
|
| 241 |
+
return
|
| 242 |
+
|
| 243 |
+
warnings.warn(
|
| 244 |
+
"The TorchScript type system doesn't support "
|
| 245 |
+
"instance-level annotations on empty non-base "
|
| 246 |
+
"types in `__init__`. Instead, either 1) use a "
|
| 247 |
+
"type annotation in the class body, or 2) wrap "
|
| 248 |
+
"the type in `torch.jit.Attribute`."
|
| 249 |
+
)
|
janus/lib/python3.10/site-packages/torch/jit/_dataclass_impls.py
ADDED
|
@@ -0,0 +1,190 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
# Functions for synthesizing magic methods for JIT-compiled dataclasses
|
| 3 |
+
import ast
|
| 4 |
+
import dataclasses
|
| 5 |
+
import inspect
|
| 6 |
+
import os
|
| 7 |
+
from functools import partial
|
| 8 |
+
from typing import Callable, Dict, List
|
| 9 |
+
|
| 10 |
+
from torch._jit_internal import FAKE_FILENAME_PREFIX, is_optional
|
| 11 |
+
from torch._sources import ParsedDef, SourceContext
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def _get_fake_filename(cls, method_name):
|
| 15 |
+
return os.path.join(FAKE_FILENAME_PREFIX, cls.__name__, method_name)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def compose_fn(cls, name: str, body_lines: List[str], signature: str) -> ParsedDef:
|
| 19 |
+
body = "\n".join(f" {b}" for b in body_lines)
|
| 20 |
+
decl = f"def {name}{signature}:\n{body}"
|
| 21 |
+
|
| 22 |
+
# Parse the function declaration
|
| 23 |
+
try:
|
| 24 |
+
py_ast = ast.parse(decl)
|
| 25 |
+
except SyntaxError as e:
|
| 26 |
+
# This should only happen if there's some unforeseeable change
|
| 27 |
+
# in the dataclasses module that makes our synthesized code fail
|
| 28 |
+
raise RuntimeError(
|
| 29 |
+
f"TorchScript failed to synthesize dataclass method '{name}' for class '{cls.__name__}'. "
|
| 30 |
+
"Please file a bug report at <https://github.com/pytorch/pytorch/issues>"
|
| 31 |
+
) from e
|
| 32 |
+
fake_filename = _get_fake_filename(cls, name)
|
| 33 |
+
# Parse the function
|
| 34 |
+
return ParsedDef(
|
| 35 |
+
py_ast,
|
| 36 |
+
ctx=SourceContext(
|
| 37 |
+
source=decl, filename=fake_filename, file_lineno=0, leading_whitespace_len=0
|
| 38 |
+
),
|
| 39 |
+
source=decl,
|
| 40 |
+
filename=fake_filename,
|
| 41 |
+
file_lineno=0,
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def synthesize__init__(cls) -> ParsedDef:
|
| 46 |
+
# Supporting default factories in the way that people expect would sort of require us to
|
| 47 |
+
# allow compiling lambda functions, which is not currently supported.
|
| 48 |
+
if any(
|
| 49 |
+
field.default_factory is not dataclasses.MISSING
|
| 50 |
+
for field in dataclasses.fields(cls)
|
| 51 |
+
):
|
| 52 |
+
raise NotImplementedError(
|
| 53 |
+
"Default factory initializers are not supported in TorchScript dataclasses"
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
# Simply read off the generated __init__ signature from CPython's implementation. It'll be
|
| 57 |
+
# almost correct except for InitVar annotations, which we need to handle specially.
|
| 58 |
+
signature = inspect.signature(cls.__init__)
|
| 59 |
+
|
| 60 |
+
# Handle InitVars if needed (only works on Python 3.8+, when a `type` attribute was added to InitVar);
|
| 61 |
+
# see CPython commit here https://github.com/python/cpython/commit/01ee12ba35a333e8a6a25c4153c4a21838e9585c
|
| 62 |
+
init_vars: List[str] = []
|
| 63 |
+
params = []
|
| 64 |
+
for name, param in signature.parameters.items():
|
| 65 |
+
ann = param.annotation
|
| 66 |
+
|
| 67 |
+
if isinstance(ann, dataclasses.InitVar):
|
| 68 |
+
# The TorchScript interpreter can't handle InitVar annotations, so we unwrap the underlying type here
|
| 69 |
+
init_vars.append(name)
|
| 70 |
+
params.append(param.replace(annotation=ann.type)) # type: ignore[attr-defined]
|
| 71 |
+
else:
|
| 72 |
+
params.append(param)
|
| 73 |
+
|
| 74 |
+
signature = signature.replace(parameters=params)
|
| 75 |
+
|
| 76 |
+
body = [
|
| 77 |
+
# Assign all attributes to self
|
| 78 |
+
f"self.{field.name} = {field.name}"
|
| 79 |
+
for field in dataclasses.fields(cls)
|
| 80 |
+
if field.init and field.name not in init_vars
|
| 81 |
+
]
|
| 82 |
+
# Call user's impl of __post_init__ if it exists
|
| 83 |
+
if hasattr(cls, "__post_init__"):
|
| 84 |
+
body.append("self.__post_init__(" + ", ".join(init_vars) + ")")
|
| 85 |
+
|
| 86 |
+
return compose_fn(cls, "__init__", body or ["pass"], signature=str(signature))
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
# This is a placeholder at the moment since the TorchScript interpreter doesn't call __repr__
|
| 90 |
+
def synthesize__repr__(cls) -> ParsedDef:
|
| 91 |
+
return compose_fn(
|
| 92 |
+
cls,
|
| 93 |
+
"__repr__",
|
| 94 |
+
[
|
| 95 |
+
f"return '{cls.__name__}("
|
| 96 |
+
+ ", ".join(
|
| 97 |
+
[
|
| 98 |
+
f"{field.name}=self.{field.name}"
|
| 99 |
+
for field in dataclasses.fields(cls)
|
| 100 |
+
if field.repr
|
| 101 |
+
]
|
| 102 |
+
)
|
| 103 |
+
+ ")'"
|
| 104 |
+
],
|
| 105 |
+
signature="(self) -> str",
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def synthesize__hash__(cls) -> ParsedDef:
|
| 110 |
+
return compose_fn(
|
| 111 |
+
cls,
|
| 112 |
+
"__hash__",
|
| 113 |
+
[
|
| 114 |
+
# This is just a placeholder to prevent compilation from failing; this won't even get called at
|
| 115 |
+
# all right now because the TorchScript interpreter doesn't call custom __hash__ implementations
|
| 116 |
+
"raise NotImplementedError('__hash__ is not supported for dataclasses in TorchScript')"
|
| 117 |
+
],
|
| 118 |
+
signature="(self) -> int",
|
| 119 |
+
)
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
# Implementation for __eq__ and __ne__
|
| 123 |
+
def synthesize_equality(cls, name: str, converse: str) -> ParsedDef:
|
| 124 |
+
return synthesize_comparison(
|
| 125 |
+
cls,
|
| 126 |
+
name,
|
| 127 |
+
allow_eq=True,
|
| 128 |
+
raise_on_none=False,
|
| 129 |
+
inner=[f"if val1 {converse} val2: return False"],
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
def synthesize_inequality(cls, name: str, op: str, allow_eq: bool) -> ParsedDef:
|
| 134 |
+
return synthesize_comparison(
|
| 135 |
+
cls,
|
| 136 |
+
name,
|
| 137 |
+
allow_eq,
|
| 138 |
+
raise_on_none=True,
|
| 139 |
+
inner=[
|
| 140 |
+
f"if val1 {op} val2: return True",
|
| 141 |
+
f"elif val2 {op} val1: return False",
|
| 142 |
+
],
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def synthesize_comparison(
|
| 147 |
+
cls, name: str, allow_eq: bool, raise_on_none: bool, inner: List[str]
|
| 148 |
+
) -> ParsedDef:
|
| 149 |
+
body = []
|
| 150 |
+
for field in dataclasses.fields(cls):
|
| 151 |
+
if not field.compare:
|
| 152 |
+
continue
|
| 153 |
+
|
| 154 |
+
body.extend(
|
| 155 |
+
[
|
| 156 |
+
f"val1 = self.{field.name}",
|
| 157 |
+
f"val2 = other.{field.name}",
|
| 158 |
+
]
|
| 159 |
+
)
|
| 160 |
+
body.extend(
|
| 161 |
+
inner
|
| 162 |
+
if not is_optional(field.type)
|
| 163 |
+
else [
|
| 164 |
+
# Type refinement for optional fields; we need this to avoid type errors from the interpreter
|
| 165 |
+
"if val1 is not None and val2 is not None:",
|
| 166 |
+
*[" " + line for line in inner],
|
| 167 |
+
"elif (val1 is None) != (val2 is None):",
|
| 168 |
+
f" raise TypeError('Cannot compare {cls.__name__} with None')"
|
| 169 |
+
if raise_on_none
|
| 170 |
+
else " return False",
|
| 171 |
+
]
|
| 172 |
+
)
|
| 173 |
+
|
| 174 |
+
body.append(f"return {allow_eq}")
|
| 175 |
+
return compose_fn(
|
| 176 |
+
cls, name, body, signature=f"(self, other: {cls.__name__}) -> bool"
|
| 177 |
+
)
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
DATACLASS_MAGIC_METHODS: Dict[str, Callable] = {
|
| 181 |
+
"__init__": synthesize__init__,
|
| 182 |
+
"__repr__": synthesize__repr__,
|
| 183 |
+
"__hash__": synthesize__hash__,
|
| 184 |
+
"__eq__": partial(synthesize_equality, name="__eq__", converse="!="),
|
| 185 |
+
"__ne__": partial(synthesize_equality, name="__ne__", converse="=="),
|
| 186 |
+
"__lt__": partial(synthesize_inequality, name="__lt__", op="<", allow_eq=False),
|
| 187 |
+
"__le__": partial(synthesize_inequality, name="__le__", op="<", allow_eq=True),
|
| 188 |
+
"__gt__": partial(synthesize_inequality, name="__gt__", op=">", allow_eq=False),
|
| 189 |
+
"__ge__": partial(synthesize_inequality, name="__ge__", op=">", allow_eq=True),
|
| 190 |
+
}
|
janus/lib/python3.10/site-packages/torch/jit/_decomposition_utils.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import torch
|
| 3 |
+
from torch._ops import OpOverload, OpOverloadPacket
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def _register_decomposition(op: OpOverload, graph: torch._C.Graph):
|
| 7 |
+
assert not isinstance(
|
| 8 |
+
op, OpOverloadPacket
|
| 9 |
+
), f"Must pass specific op overload, not overload packet, found {op}"
|
| 10 |
+
assert isinstance(op, OpOverload)
|
| 11 |
+
|
| 12 |
+
torch._C._jit_register_decomposition_for_schema(op._schema, graph)
|
janus/lib/python3.10/site-packages/torch/jit/_decompositions.py
ADDED
|
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import torch
|
| 3 |
+
from torch import Tensor
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
aten = torch.ops.aten
|
| 7 |
+
import inspect
|
| 8 |
+
import warnings
|
| 9 |
+
from typing import Callable, Dict, List, Optional, Set, TypeVar
|
| 10 |
+
from typing_extensions import ParamSpec
|
| 11 |
+
|
| 12 |
+
from torch.types import Number
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
decomposition_table: Dict[str, torch.jit.ScriptFunction] = {}
|
| 16 |
+
function_name_set: Set[str] = set()
|
| 17 |
+
|
| 18 |
+
_T = TypeVar("_T")
|
| 19 |
+
_P = ParamSpec("_P")
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def check_decomposition_has_type_annotations(f):
|
| 23 |
+
inspect_empty = inspect._empty # type: ignore[attr-defined]
|
| 24 |
+
sig = inspect.signature(f)
|
| 25 |
+
for param in sig.parameters.values():
|
| 26 |
+
assert (
|
| 27 |
+
param.annotation != inspect_empty
|
| 28 |
+
), f"No signature on param {param.name} for function {f.name}"
|
| 29 |
+
|
| 30 |
+
assert (
|
| 31 |
+
sig.return_annotation != inspect_empty
|
| 32 |
+
), f"No return annotation for function {f.name}"
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def signatures_match(decomposition_sig, torch_op_sig):
|
| 36 |
+
decomp_params = decomposition_sig.parameters
|
| 37 |
+
op_params = torch_op_sig.parameters
|
| 38 |
+
|
| 39 |
+
if len(decomp_params) != len(op_params):
|
| 40 |
+
return False
|
| 41 |
+
|
| 42 |
+
for decomp_param, op_param in zip(decomp_params.values(), op_params.values()):
|
| 43 |
+
# can't check full equality yet because not all fields are correcly deduced
|
| 44 |
+
# in the torch_op_sig - like default value
|
| 45 |
+
# can't check 'kind' bc
|
| 46 |
+
# kwarg-only values with defaults not yet supported in TS
|
| 47 |
+
inspect_empty = inspect._empty # type: ignore[attr-defined]
|
| 48 |
+
for field in ["name", "annotation"]:
|
| 49 |
+
if field == "name" and decomp_param.name == "self":
|
| 50 |
+
warnings.warn("PyTorch uses 'input' instead of 'self' on public api")
|
| 51 |
+
|
| 52 |
+
if getattr(decomp_param, field) != getattr(op_param, field):
|
| 53 |
+
return False
|
| 54 |
+
|
| 55 |
+
decomp_default = decomp_param.default
|
| 56 |
+
op_default = op_param.default
|
| 57 |
+
# default value not always correctly inferred as being present on torch schema,
|
| 58 |
+
# but if specified on both they should be equal
|
| 59 |
+
if decomp_default != inspect_empty and op_default != inspect_empty:
|
| 60 |
+
if decomp_default != op_default:
|
| 61 |
+
return False
|
| 62 |
+
|
| 63 |
+
return decomposition_sig.return_annotation == torch_op_sig.return_annotation
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def register_decomposition(
|
| 67 |
+
aten_op: torch._ops.OpOverload,
|
| 68 |
+
registry: Optional[Dict[str, torch.jit.ScriptFunction]] = None,
|
| 69 |
+
) -> Callable[[Callable[_P, _T]], Callable[_P, _T]]:
|
| 70 |
+
def decomposition_decorator(f: Callable[_P, _T]) -> Callable[_P, _T]:
|
| 71 |
+
nonlocal registry
|
| 72 |
+
if registry is None:
|
| 73 |
+
registry = decomposition_table
|
| 74 |
+
|
| 75 |
+
assert isinstance(aten_op, torch._ops.OpOverload)
|
| 76 |
+
|
| 77 |
+
# Need unique name for jit function serialization
|
| 78 |
+
assert (
|
| 79 |
+
f.__name__ not in function_name_set
|
| 80 |
+
), f"Duplicated function name {f.__name__}"
|
| 81 |
+
function_name_set.add(f.__name__)
|
| 82 |
+
|
| 83 |
+
scripted_func = torch.jit.script(f)
|
| 84 |
+
torch._C._jit_pass_inline(scripted_func.graph)
|
| 85 |
+
|
| 86 |
+
for _ in range(2):
|
| 87 |
+
torch._C._jit_pass_peephole(scripted_func.graph)
|
| 88 |
+
torch._C._jit_pass_constant_propagation(scripted_func.graph)
|
| 89 |
+
|
| 90 |
+
registry[str(aten_op._schema)] = scripted_func
|
| 91 |
+
return f
|
| 92 |
+
|
| 93 |
+
return decomposition_decorator
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
# TODO: replace torch.sigmoid -> aten.sigmoid
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
@register_decomposition(aten.var.correction)
|
| 100 |
+
def var_decomposition(
|
| 101 |
+
input: Tensor,
|
| 102 |
+
dim: Optional[List[int]] = None,
|
| 103 |
+
correction: Optional[Number] = None,
|
| 104 |
+
keepdim: bool = False,
|
| 105 |
+
) -> Tensor:
|
| 106 |
+
if dim is None:
|
| 107 |
+
dim_i: List[int] = []
|
| 108 |
+
dim = dim_i
|
| 109 |
+
|
| 110 |
+
if isinstance(dim, (tuple, list)) and len(dim) == 0:
|
| 111 |
+
n = input.numel()
|
| 112 |
+
else:
|
| 113 |
+
n = 1
|
| 114 |
+
for dim_i in dim: # type: ignore[assignment]
|
| 115 |
+
n *= input.shape[dim_i] # type: ignore[call-overload]
|
| 116 |
+
|
| 117 |
+
mean = aten.mean(input, dim, True)
|
| 118 |
+
sub = input - mean
|
| 119 |
+
sq = sub * sub
|
| 120 |
+
sum = aten.sum(sq, dim, keepdim)
|
| 121 |
+
|
| 122 |
+
if correction is None:
|
| 123 |
+
denom = float(n - 1)
|
| 124 |
+
else:
|
| 125 |
+
if isinstance(correction, int):
|
| 126 |
+
denom = float(n - correction)
|
| 127 |
+
elif isinstance(correction, float):
|
| 128 |
+
denom = float(n) - correction
|
| 129 |
+
else:
|
| 130 |
+
raise RuntimeError("correction must be int or float")
|
| 131 |
+
|
| 132 |
+
return sum / max(0, denom)
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
@register_decomposition(aten.var.default)
|
| 136 |
+
def var(input: Tensor, unbiased: bool = True) -> Tensor:
|
| 137 |
+
return var_decomposition(input, correction=(1 if unbiased else 0))
|
janus/lib/python3.10/site-packages/torch/jit/_freeze.py
ADDED
|
@@ -0,0 +1,228 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
"""Freezing.
|
| 3 |
+
|
| 4 |
+
This is not intended to be imported directly; please use the exposed
|
| 5 |
+
functionalities in `torch.jit`.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from typing import List, Optional
|
| 9 |
+
|
| 10 |
+
import torch
|
| 11 |
+
from torch.jit._script import RecursiveScriptModule, ScriptModule
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def freeze(
|
| 15 |
+
mod, preserved_attrs: Optional[List[str]] = None, optimize_numerics: bool = True
|
| 16 |
+
):
|
| 17 |
+
r"""Freeze ScriptModule, inline submodules, and attributes as constants.
|
| 18 |
+
|
| 19 |
+
Freezing a :class:`ScriptModule` will clone it and attempt to inline the cloned
|
| 20 |
+
module's submodules, parameters, and attributes as constants in the TorchScript IR Graph.
|
| 21 |
+
By default, `forward` will be preserved, as well as attributes & methods specified in
|
| 22 |
+
`preserved_attrs`. Additionally, any attribute that is modified within a preserved
|
| 23 |
+
method will be preserved.
|
| 24 |
+
|
| 25 |
+
Freezing currently only accepts ScriptModules that are in eval mode.
|
| 26 |
+
|
| 27 |
+
Freezing applies generic optimization that will speed up your model regardless of machine.
|
| 28 |
+
To further optimize using server-specific settings, run `optimize_for_inference` after
|
| 29 |
+
freezing.
|
| 30 |
+
|
| 31 |
+
Args:
|
| 32 |
+
mod (:class:`ScriptModule`): a module to be frozen
|
| 33 |
+
preserved_attrs (Optional[List[str]]): a list of attributes to preserve in addition to the forward method.
|
| 34 |
+
Attributes modified in preserved methods will also be preserved.
|
| 35 |
+
optimize_numerics (bool): If ``True``, a set of optimization passes will be run that does not strictly
|
| 36 |
+
preserve numerics. Full details of optimization can be found at `torch.jit.run_frozen_optimizations`.
|
| 37 |
+
|
| 38 |
+
Returns:
|
| 39 |
+
Frozen :class:`ScriptModule`.
|
| 40 |
+
|
| 41 |
+
Example (Freezing a simple module with a Parameter):
|
| 42 |
+
|
| 43 |
+
.. testcode::
|
| 44 |
+
import torch
|
| 45 |
+
class MyModule(torch.nn.Module):
|
| 46 |
+
def __init__(self, N, M):
|
| 47 |
+
super().__init__()
|
| 48 |
+
self.weight = torch.nn.Parameter(torch.rand(N, M))
|
| 49 |
+
self.linear = torch.nn.Linear(N, M)
|
| 50 |
+
|
| 51 |
+
def forward(self, input):
|
| 52 |
+
output = self.weight.mm(input)
|
| 53 |
+
output = self.linear(output)
|
| 54 |
+
return output
|
| 55 |
+
|
| 56 |
+
scripted_module = torch.jit.script(MyModule(2, 3).eval())
|
| 57 |
+
frozen_module = torch.jit.freeze(scripted_module)
|
| 58 |
+
# parameters have been removed and inlined into the Graph as constants
|
| 59 |
+
assert len(list(frozen_module.named_parameters())) == 0
|
| 60 |
+
# See the compiled graph as Python code
|
| 61 |
+
print(frozen_module.code)
|
| 62 |
+
|
| 63 |
+
Example (Freezing a module with preserved attributes)
|
| 64 |
+
|
| 65 |
+
.. testcode::
|
| 66 |
+
import torch
|
| 67 |
+
class MyModule2(torch.nn.Module):
|
| 68 |
+
def __init__(self) -> None:
|
| 69 |
+
super().__init__()
|
| 70 |
+
self.modified_tensor = torch.tensor(10.)
|
| 71 |
+
self.version = 1
|
| 72 |
+
|
| 73 |
+
def forward(self, input):
|
| 74 |
+
self.modified_tensor += 1
|
| 75 |
+
return input + self.modified_tensor
|
| 76 |
+
|
| 77 |
+
scripted_module = torch.jit.script(MyModule2().eval())
|
| 78 |
+
frozen_module = torch.jit.freeze(scripted_module, preserved_attrs=["version"])
|
| 79 |
+
# we've manually preserved `version`, so it still exists on the frozen module and can be modified
|
| 80 |
+
assert frozen_module.version == 1
|
| 81 |
+
frozen_module.version = 2
|
| 82 |
+
# `modified_tensor` is detected as being mutated in the forward, so freezing preserves
|
| 83 |
+
# it to retain model semantics
|
| 84 |
+
assert frozen_module(torch.tensor(1)) == torch.tensor(12)
|
| 85 |
+
# now that we've run it once, the next result will be incremented by one
|
| 86 |
+
assert frozen_module(torch.tensor(1)) == torch.tensor(13)
|
| 87 |
+
|
| 88 |
+
Note:
|
| 89 |
+
Freezing submodule attributes is also supported:
|
| 90 |
+
frozen_module = torch.jit.freeze(scripted_module, preserved_attrs=["submodule.version"])
|
| 91 |
+
|
| 92 |
+
Note:
|
| 93 |
+
If you're not sure why an attribute is not being inlined as a constant, you can run
|
| 94 |
+
`dump_alias_db` on frozen_module.forward.graph to see if freezing has detected the
|
| 95 |
+
attribute is being modified.
|
| 96 |
+
|
| 97 |
+
Note:
|
| 98 |
+
Because freezing makes weights constants and removes module hierarchy, `to` and other
|
| 99 |
+
nn.Module methods to manipulate device or dtype no longer work. As a workaround,
|
| 100 |
+
You can remap devices by specifying `map_location` in `torch.jit.load`, however
|
| 101 |
+
device-specific logic may have been baked into the model.
|
| 102 |
+
"""
|
| 103 |
+
if not isinstance(mod, ScriptModule):
|
| 104 |
+
raise RuntimeError(
|
| 105 |
+
"Freezing expects a ScriptModule as input. "
|
| 106 |
+
"Please use torch.jit.script or torch.jit.trace to script your 'nn.Module'."
|
| 107 |
+
)
|
| 108 |
+
|
| 109 |
+
if mod.training:
|
| 110 |
+
raise RuntimeError(
|
| 111 |
+
"Freezing is currently only implemented for modules in eval mode. "
|
| 112 |
+
"Please call .eval() on your module before freezing."
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
preserved_attrs = preserved_attrs if preserved_attrs is not None else []
|
| 116 |
+
|
| 117 |
+
out = RecursiveScriptModule(torch._C._freeze_module(mod._c, preserved_attrs))
|
| 118 |
+
RecursiveScriptModule._finalize_scriptmodule(out)
|
| 119 |
+
|
| 120 |
+
preserved_methods = [x for x in preserved_attrs if mod._c._has_method(x)]
|
| 121 |
+
run_frozen_optimizations(out, optimize_numerics, preserved_methods)
|
| 122 |
+
|
| 123 |
+
return out
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def run_frozen_optimizations(
|
| 127 |
+
mod, optimize_numerics: bool = True, preserved_methods: Optional[List[str]] = None
|
| 128 |
+
):
|
| 129 |
+
r"""
|
| 130 |
+
Run a series of optimizations looking for patterns that occur in frozen graphs.
|
| 131 |
+
|
| 132 |
+
The current set of optimizations includes:
|
| 133 |
+
- Dropout Removal
|
| 134 |
+
- Pretranspose Linear Layers
|
| 135 |
+
- Concat Linear Layers with same input Tensor
|
| 136 |
+
- Conv -> Batchnorm folding
|
| 137 |
+
- Conv -> Add/Sub folding
|
| 138 |
+
- Conv -> Mul/Div folding
|
| 139 |
+
|
| 140 |
+
Args:
|
| 141 |
+
mod (:class:`ScriptModule`): a frozen module to be optimized
|
| 142 |
+
|
| 143 |
+
optimize_numerics (bool): If ``True``, a set of optimization passes will be run that does not strictly
|
| 144 |
+
preserve numerics. These optimizations preserve default rtol and atol of `torch.testing.assert_close`
|
| 145 |
+
when applied on a single transformation, however in a module where many transformations are applied
|
| 146 |
+
the rtol or atol may no longer fall within the default `assert_close` tolerance. Conv -> Batchnorm folding,
|
| 147 |
+
Conv-Add/Sub, and Conv -> Mul/Div folding all may alter numerics.
|
| 148 |
+
|
| 149 |
+
Returns:
|
| 150 |
+
None
|
| 151 |
+
|
| 152 |
+
Note:
|
| 153 |
+
In rare occassions, this can result in slower execution.
|
| 154 |
+
|
| 155 |
+
Example (Freezing a module with Conv->Batchnorm)
|
| 156 |
+
.. code-block:: python
|
| 157 |
+
import torch
|
| 158 |
+
in_channels, out_channels = 3, 32
|
| 159 |
+
conv = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=True)
|
| 160 |
+
bn = torch.nn.BatchNorm2d(out_channels, eps=.001)
|
| 161 |
+
mod = torch.nn.Sequential(conv, bn)
|
| 162 |
+
# set optimize to False here, by default freezing runs run_frozen_optimizations
|
| 163 |
+
frozen_mod = torch.jit.freeze(torch.jit.script(mod.eval()), optimize=False)
|
| 164 |
+
# inspect frozen mod
|
| 165 |
+
assert "batch_norm" in str(frozen_mod.graph)
|
| 166 |
+
torch.jit.run_frozen_optimizations(frozen_mod)
|
| 167 |
+
assert "batch_norm" not in str(frozen_mod.graph)
|
| 168 |
+
|
| 169 |
+
"""
|
| 170 |
+
if mod._c._has_method("forward"):
|
| 171 |
+
torch._C._jit_pass_optimize_frozen_graph(mod.graph, optimize_numerics)
|
| 172 |
+
|
| 173 |
+
if preserved_methods is None:
|
| 174 |
+
preserved_methods = []
|
| 175 |
+
|
| 176 |
+
for method in preserved_methods:
|
| 177 |
+
torch._C._jit_pass_optimize_frozen_graph(
|
| 178 |
+
mod.__getattr__(method).graph, optimize_numerics
|
| 179 |
+
)
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
def optimize_for_inference(
|
| 183 |
+
mod: ScriptModule, other_methods: Optional[List[str]] = None
|
| 184 |
+
) -> ScriptModule:
|
| 185 |
+
"""
|
| 186 |
+
Perform a set of optimization passes to optimize a model for the purposes of inference.
|
| 187 |
+
|
| 188 |
+
If the model is not already frozen, optimize_for_inference
|
| 189 |
+
will invoke `torch.jit.freeze` automatically.
|
| 190 |
+
|
| 191 |
+
In addition to generic optimizations that should speed up your model regardless
|
| 192 |
+
of environment, prepare for inference will also bake in build specific settings
|
| 193 |
+
such as the presence of CUDNN or MKLDNN, and may in the future make transformations
|
| 194 |
+
which speed things up on one machine but slow things down on another. Accordingly,
|
| 195 |
+
serialization is not implemented following invoking `optimize_for_inference` and
|
| 196 |
+
is not guaranteed.
|
| 197 |
+
|
| 198 |
+
This is still in prototype, and may have the potential to slow down your model.
|
| 199 |
+
Primary use cases that have been targeted so far have been vision models on cpu
|
| 200 |
+
and gpu to a lesser extent.
|
| 201 |
+
|
| 202 |
+
Example (optimizing a module with Conv->Batchnorm)::
|
| 203 |
+
|
| 204 |
+
import torch
|
| 205 |
+
in_channels, out_channels = 3, 32
|
| 206 |
+
conv = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=True)
|
| 207 |
+
bn = torch.nn.BatchNorm2d(out_channels, eps=.001)
|
| 208 |
+
mod = torch.nn.Sequential(conv, bn)
|
| 209 |
+
frozen_mod = torch.jit.optimize_for_inference(torch.jit.script(mod.eval()))
|
| 210 |
+
assert "batch_norm" not in str(frozen_mod.graph)
|
| 211 |
+
# if built with MKLDNN, convolution will be run with MKLDNN weights
|
| 212 |
+
assert "MKLDNN" in frozen_mod.graph
|
| 213 |
+
"""
|
| 214 |
+
if not isinstance(mod, ScriptModule):
|
| 215 |
+
raise RuntimeError(
|
| 216 |
+
"optimize_for_inference expects a ScriptModule as input. "
|
| 217 |
+
"Please use torch.jit.script or torch.jit.trace to script your 'nn.Module'."
|
| 218 |
+
)
|
| 219 |
+
|
| 220 |
+
if other_methods is None:
|
| 221 |
+
other_methods = []
|
| 222 |
+
|
| 223 |
+
if hasattr(mod, "training"):
|
| 224 |
+
mod = freeze(mod.eval(), preserved_attrs=other_methods)
|
| 225 |
+
|
| 226 |
+
torch._C._jit_pass_optimize_for_inference(mod._c, other_methods)
|
| 227 |
+
|
| 228 |
+
return mod
|
janus/lib/python3.10/site-packages/torch/jit/_fuser.py
ADDED
|
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import contextlib
|
| 3 |
+
from typing import List, Tuple
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@contextlib.contextmanager
|
| 9 |
+
def optimized_execution(should_optimize):
|
| 10 |
+
"""Context manager that controls whether the JIT's executor will run optimizations before executing a function."""
|
| 11 |
+
stored_flag = torch._C._get_graph_executor_optimize()
|
| 12 |
+
torch._C._set_graph_executor_optimize(should_optimize)
|
| 13 |
+
try:
|
| 14 |
+
yield
|
| 15 |
+
finally:
|
| 16 |
+
torch._C._set_graph_executor_optimize(stored_flag)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
@contextlib.contextmanager
|
| 20 |
+
def fuser(name):
|
| 21 |
+
"""Context manager that facilitates switching between backend fusers.
|
| 22 |
+
|
| 23 |
+
Valid names:
|
| 24 |
+
* ``fuser0`` - enables only legacy fuser
|
| 25 |
+
* ``fuser1`` - enables only NNC
|
| 26 |
+
* ``fuser2`` - enables only nvFuser
|
| 27 |
+
* ``fuser3`` - enables oneDNN Graph
|
| 28 |
+
"""
|
| 29 |
+
old_cpu_fuse = torch._C._jit_can_fuse_on_cpu()
|
| 30 |
+
old_gpu_fuse = torch._C._jit_can_fuse_on_gpu()
|
| 31 |
+
old_texpr_fuser_state = torch._C._jit_texpr_fuser_enabled()
|
| 32 |
+
old_nvfuser_state = torch._C._jit_nvfuser_enabled()
|
| 33 |
+
old_llga_state = torch._C._jit_llga_enabled()
|
| 34 |
+
if name == "fuser0": # legacy fuser
|
| 35 |
+
torch._C._jit_override_can_fuse_on_cpu(True)
|
| 36 |
+
torch._C._jit_override_can_fuse_on_gpu(True)
|
| 37 |
+
torch._C._jit_set_texpr_fuser_enabled(False)
|
| 38 |
+
torch._C._jit_set_nvfuser_enabled(False)
|
| 39 |
+
torch._C._jit_set_llga_enabled(False)
|
| 40 |
+
elif name == "fuser1": # NNC
|
| 41 |
+
old_profiling_executor = torch._C._jit_set_profiling_executor(True)
|
| 42 |
+
old_profiling_mode = torch._C._get_graph_executor_optimize(True)
|
| 43 |
+
torch._C._jit_override_can_fuse_on_cpu(True)
|
| 44 |
+
torch._C._jit_override_can_fuse_on_gpu(True)
|
| 45 |
+
torch._C._jit_set_texpr_fuser_enabled(True)
|
| 46 |
+
torch._C._jit_set_nvfuser_enabled(False)
|
| 47 |
+
torch._C._jit_set_llga_enabled(False)
|
| 48 |
+
elif name == "fuser2": # nvFuser
|
| 49 |
+
torch._C._jit_override_can_fuse_on_cpu(False)
|
| 50 |
+
torch._C._jit_override_can_fuse_on_gpu(False)
|
| 51 |
+
torch._C._jit_set_texpr_fuser_enabled(False)
|
| 52 |
+
torch._C._jit_set_nvfuser_enabled(True)
|
| 53 |
+
torch._C._jit_set_llga_enabled(False)
|
| 54 |
+
elif name == "fuser3": # oneDNN Graph
|
| 55 |
+
old_profiling_executor = torch._C._jit_set_profiling_executor(True)
|
| 56 |
+
old_profiling_mode = torch._C._get_graph_executor_optimize(True)
|
| 57 |
+
torch._C._jit_override_can_fuse_on_cpu(True)
|
| 58 |
+
torch._C._jit_override_can_fuse_on_gpu(False)
|
| 59 |
+
torch._C._jit_set_texpr_fuser_enabled(True)
|
| 60 |
+
torch._C._jit_set_nvfuser_enabled(False)
|
| 61 |
+
torch._C._jit_set_llga_enabled(True)
|
| 62 |
+
elif name == "none": # Turn Pytorch fuser off
|
| 63 |
+
torch._C._jit_override_can_fuse_on_cpu(False)
|
| 64 |
+
torch._C._jit_override_can_fuse_on_gpu(False)
|
| 65 |
+
torch._C._jit_set_texpr_fuser_enabled(False)
|
| 66 |
+
torch._C._jit_set_nvfuser_enabled(False)
|
| 67 |
+
torch._C._jit_set_llga_enabled(False)
|
| 68 |
+
else:
|
| 69 |
+
raise Exception(f"unrecognized fuser option (name: {name})") # noqa: TRY002
|
| 70 |
+
try:
|
| 71 |
+
yield
|
| 72 |
+
finally:
|
| 73 |
+
if name in ["fuser1", "fuser3"]: # NNC or oneDNN Graph
|
| 74 |
+
torch._C._jit_set_profiling_executor(old_profiling_executor) # type: ignore[possibly-undefined]
|
| 75 |
+
torch._C._get_graph_executor_optimize(old_profiling_mode) # type: ignore[possibly-undefined]
|
| 76 |
+
# recover the previous values
|
| 77 |
+
torch._C._jit_override_can_fuse_on_cpu(old_cpu_fuse)
|
| 78 |
+
torch._C._jit_override_can_fuse_on_gpu(old_gpu_fuse)
|
| 79 |
+
torch._C._jit_set_texpr_fuser_enabled(old_texpr_fuser_state)
|
| 80 |
+
torch._C._jit_set_nvfuser_enabled(old_nvfuser_state)
|
| 81 |
+
torch._C._jit_set_llga_enabled(old_llga_state)
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
last_executed_optimized_graph = torch._C._last_executed_optimized_graph
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def _get_differentiable_graph_node(node, diff_node):
|
| 88 |
+
if node.kind() == "prim::DifferentiableGraph":
|
| 89 |
+
diff_node.append(node)
|
| 90 |
+
else:
|
| 91 |
+
for block in node.blocks():
|
| 92 |
+
for n in block.nodes():
|
| 93 |
+
_get_differentiable_graph_node(n, diff_node)
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def _graph_for(self, *args, **kwargs):
|
| 97 |
+
return _script_method_graph_for(self, self, *args, **kwargs)
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def _script_method_graph_for(self, parent, *args, **kwargs):
|
| 101 |
+
try:
|
| 102 |
+
dbs = parent.get_debug_state()
|
| 103 |
+
eps = list(dbs.execution_plans.values())
|
| 104 |
+
assert len(eps) == 1
|
| 105 |
+
graph = eps[0].graph.copy()
|
| 106 |
+
|
| 107 |
+
# graph_executor_states for differentiable node
|
| 108 |
+
fw_states = eps[0].code.differentiable_op_executor_states()
|
| 109 |
+
diff_nodes: List[torch._C.Node] = []
|
| 110 |
+
for n in graph.nodes():
|
| 111 |
+
_get_differentiable_graph_node(n, diff_nodes)
|
| 112 |
+
|
| 113 |
+
assert len(fw_states) == len(diff_nodes)
|
| 114 |
+
# swap each differentiable graph with optimized graph in their execution plan
|
| 115 |
+
for n, state in zip(diff_nodes, fw_states):
|
| 116 |
+
fw_execution_plans = list(state.execution_plans.values())
|
| 117 |
+
# we can only update the subgraph when there's a unique execution
|
| 118 |
+
# plan. Avoid assert here so we would skip the ones that can't be
|
| 119 |
+
# updated while try the best effort to update other nodes.
|
| 120 |
+
if len(fw_execution_plans) == 1:
|
| 121 |
+
n.g_("Subgraph", fw_execution_plans[0].graph)
|
| 122 |
+
|
| 123 |
+
return graph
|
| 124 |
+
except Exception:
|
| 125 |
+
# fallback approach, we just ran the graph and return the recorded optimized
|
| 126 |
+
# graph
|
| 127 |
+
self(*args, **kwargs)
|
| 128 |
+
return last_executed_optimized_graph()
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
def set_fusion_strategy(strategy: List[Tuple[str, int]]):
|
| 132 |
+
"""Set the type and number of specializations that can occur during fusion.
|
| 133 |
+
|
| 134 |
+
Usage: provide a list of pairs (type, depth) where type is one of "STATIC" or "DYNAMIC"
|
| 135 |
+
and depth is an integer.
|
| 136 |
+
|
| 137 |
+
Behavior - static vs dynamic:
|
| 138 |
+
In STATIC fusion, fused ops are compiled to have fixed input shapes. The shape is determined
|
| 139 |
+
based on some initial profiling runs.
|
| 140 |
+
In DYNAMIC fusion, fused ops are compiled to have variable input shapes, so that multiple
|
| 141 |
+
shapes are possible.
|
| 142 |
+
|
| 143 |
+
In both cases, we also recompile on new striding behavior, device, or dtype.
|
| 144 |
+
|
| 145 |
+
Behavior - fallback functions & depth:
|
| 146 |
+
When an input doesn't match the format required by the specialized compiled op, it will run
|
| 147 |
+
a fallback function. Fallback functions are recursively be compiled and specialized based
|
| 148 |
+
on the observed tensor shapes. Since compilation can be slow, the "depth" parameter is provided to
|
| 149 |
+
limit the number of specializations that can be compiled, before giving up on recompiling and
|
| 150 |
+
falling back to a completely un-fused, un-specialized implementation.
|
| 151 |
+
|
| 152 |
+
The list of (type, depth) pairs controls the type of specializations and the number of
|
| 153 |
+
specializations. For example: [("STATIC", 2), ("DYNAMIC", 2)] indicates that the first
|
| 154 |
+
two specializations will use static fusions, the following two specializations will use
|
| 155 |
+
dynamic fusion, and any inputs that satisfy none of the 4 options will run an
|
| 156 |
+
unfused implementation.
|
| 157 |
+
|
| 158 |
+
NB: in the future, if more as more fusion backends are added there may be more granular
|
| 159 |
+
apis for specific fusers.
|
| 160 |
+
"""
|
| 161 |
+
return torch._C._jit_set_fusion_strategy(strategy)
|