diff --git a/janus/lib/python3.10/site-packages/torch/_export/__pycache__/error.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/_export/__pycache__/error.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec9e5c87e7ca78c6a098871ceeea8ae30a3ce8ad Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/_export/__pycache__/error.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/_export/__pycache__/pass_base.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/_export/__pycache__/pass_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39db581e62522e949f93297328ef5ca432e70a9e Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/_export/__pycache__/pass_base.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/_export/__pycache__/tools.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/_export/__pycache__/tools.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c02d0bc83b8a21681113ebda0b96c32e4f3ea46 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/_export/__pycache__/tools.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/_export/__pycache__/verifier.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/_export/__pycache__/verifier.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fead8d54f4dba7b12bbba03c109042b96572581a Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/_export/__pycache__/verifier.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/_export/db/__pycache__/case.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/_export/db/__pycache__/case.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf895dec4a817a22d06dce8f57d640a2dafdf6fb Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/_export/db/__pycache__/case.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_predicate.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_predicate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71304d1fa58a75f44501420be5f448ec93acbb59 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_predicate.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/constrain_as_value_example.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/constrain_as_value_example.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..79730d3bd95779339d3b14779ab4d72ad3a9d37f Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/constrain_as_value_example.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_view.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_view.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e9c39b651be243e09f45e5e5cfb283216e3ef2b3 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_view.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/pytree_flatten.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/pytree_flatten.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f2327146706adf5b7a28ce6000d7f0762c377f4 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/pytree_flatten.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/tensor_setattr.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/tensor_setattr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6dd58563f096611f7ad1076c9bc575eb9f4a999f Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/tensor_setattr.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/unsupported_operator.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/unsupported_operator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3fe2ddee2514cf49e716a458f65def578d5f4f15 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/unsupported_operator.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/_export/serde/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/_export/serde/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69e6d4825c51dc5af420194b76be49ad6590dd5c Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/_export/serde/__pycache__/__init__.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/_export/serde/__pycache__/aoti_schema.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/_export/serde/__pycache__/aoti_schema.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9017aef3fdb1748dad5fd260a426a0ec06d22366 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/_export/serde/__pycache__/aoti_schema.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/_export/serde/__pycache__/dynamic_shapes.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/_export/serde/__pycache__/dynamic_shapes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01f7c540962c92d0778467f65795b2ee2a12e74b Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/_export/serde/__pycache__/dynamic_shapes.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/_export/serde/__pycache__/schema.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/_export/serde/__pycache__/schema.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a77d49d03a36defc709faa1fb320f50a7ad9e5c3 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/_export/serde/__pycache__/schema.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/_export/serde/__pycache__/schema_check.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/_export/serde/__pycache__/schema_check.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..725a4fa7c3d95dbcd69af4c8f9c5eefd6f972415 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/_export/serde/__pycache__/schema_check.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/_export/serde/__pycache__/serialize.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/_export/serde/__pycache__/serialize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4428a21416059762af37f02b510589af9bdf44b Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/_export/serde/__pycache__/serialize.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/_export/serde/__pycache__/union.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/_export/serde/__pycache__/union.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f90d9d3b505d6b2d025250724d770f18bbd3221 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/_export/serde/__pycache__/union.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/_export/serde/dynamic_shapes.py b/janus/lib/python3.10/site-packages/torch/_export/serde/dynamic_shapes.py new file mode 100644 index 0000000000000000000000000000000000000000..f24822d9b07d68cecfcb207b6b0c1b6829c3d3d8 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/_export/serde/dynamic_shapes.py @@ -0,0 +1,321 @@ +import dataclasses +from typing import Any, Dict, List, Optional, Tuple, Union + +import torch +from torch._dynamo.exc import UserError, UserErrorType +from torch.export.dynamic_shapes import ( + _check_dynamic_shapes, + _DerivedDim, + _Dim, + _DimHint, + _tree_map_with_path, + Dim, +) +from torch.utils._pytree import tree_map + +from .serialize import _dataclass_to_dict + + +@dataclasses.dataclass +class RootDim: + """ + This represents a _Dim object. + """ + + min: int + max: Union[int, None] + derived: List[str] + + +@dataclasses.dataclass +class DynamicShapesSpec: + """ + This stores a dynamic_shapes spec for de/serialization. + """ + + dynamic_shapes: Union[Dict[str, Any], Tuple[Any], List[Any], None] + dims: Dict[str, RootDim] + + +def _postprocess_serialized_shapes( + dynamic_shapes: Union[Dict[str, Any], Tuple[Any], List[Any], None], + dims: Dict[str, Dict[str, Union[int, List[str], None]]], + to_dict: Optional[bool] = False, +) -> Union[DynamicShapesSpec, Dict[str, Any]]: + """ + Sorts dims and dumps to dictionary format. + """ + from torch.utils._sympy.numbers import int_oo + + dims = { + k: RootDim( + min=v["min"], # type: ignore[arg-type] + max=None if v["max"] is int_oo else v["max"], # type: ignore[arg-type] + derived=sorted(v["derived"]), # type: ignore[arg-type] + ) + for k, v in sorted(dims.items()) + } + spec = DynamicShapesSpec(dynamic_shapes=dynamic_shapes, dims=dims) + if to_dict: + return _dataclass_to_dict(spec) + else: + return spec + + +def _dump_dynamic_shapes( + dynamic_shapes: Union[Dict[str, Any], Tuple[Any], List[Any], None], + args: Tuple[Any], + kwargs: Optional[Dict[str, Any]] = None, + to_dict: Optional[bool] = False, +) -> Union[DynamicShapesSpec, Dict[str, Any]]: + """ + Utility function for dynamic shapes serialization, serializing a dynamic_shapes spec. + Returns a DynamicShapesSpec dataclass containing 2 fields, "dynamic_shapes" and "dims". + Uses args & kwargs to distinguish between tensor-level and dim-level specs (only for Nones). + + dynamic_shapes: A pytree structure mirroring the dynamic_shapes input to export(): + - Each tensor input is represented with a list of values, non-tensor inputs with None. + - dynamic dimensions (i.e. symbols) in tensors and Dim enums are represented with strings. + - static dimensions are represented with ints. + + dims: A dictionary mapping each symbol name to the min/max range and derived dim names. + + For example: + ``` + dx = Dim("dx", min=4, max=16) + dy = dx + 1 + + inputs = ( + [ + torch.randn(4, 4), + torch.randn(5, 4), + ], + torch.randn(4), + torch.randn(4, 4), + "hello", + ) + dynamic_shapes = { + "a": [ + (dx, 4), + (dy, 4), + ], + "b": (Dim.STATIC,), + "c": None, + "d": None, + } + out = _dump_dynamic_shapes(dynamic_shapes, inputs, to_dict=True) + ``` + would generate the following output: + ``` + { + 'dynamic_shapes': ( + [ + ['dx', 4], + ['dx + 1', 4], + ], + ['_DimHint.STATIC'], + ['_DimHint.STATIC', '_DimHint.STATIC'], + None, + ), + 'dims': { + 'dx': { + 'min': 4, + 'max': 16, + 'derived': ['dx + 1'], + }, + }, + } + ``` + """ + dims: Dict[str, Dict[str, Any]] = {} + + def _standardize_shapes(path, tensor, shape): # type: ignore[no-untyped-def] + """ + Helps standardize the dynamic_shapes tree structure we serialize, + returning lists for each tensor shape, handling tensor-level Nones. + """ + if not isinstance(tensor, torch.Tensor): + return None + if shape is None: + return [Dim.STATIC] * len(tensor.shape) # type: ignore[attr-defined] + + out = [] + if isinstance(shape, dict): + for i, s in enumerate(tensor.shape): + out.append(s if shape.get(i) is None else shape.get(i)) + else: + assert isinstance(shape, (tuple, list)) + for i, s in enumerate(tensor.shape): + out.append(s if shape[i] is None else shape[i]) + return out + + def _track_dim_from_dims( + val: Union[None, int, _DimHint, _Dim] + ) -> Union[None, int, str]: + """ + Tracks dims, ranges, derived dims from the standardized dynamic_shapes spec. + """ + if val is None or isinstance(val, int): # non-tensor input or static + return val + if isinstance(val, _DimHint): # store enum as string + return val.__class__.__name__ + "." + val.name + + assert isinstance(val, _Dim) + + # track root dim + root = val.root if isinstance(val, _DerivedDim) else val # type: ignore[attr-defined] + if root.__name__ not in dims: + dims[root.__name__] = { + "min": root.min, + "max": root.max, + "derived": set(), + } + + # track derived dims + if isinstance(val, _DerivedDim): + dims[root.__name__]["derived"].add(val.__name__) + + return val.__name__ + + if dynamic_shapes is None: + return {"dynamic_shapes": None, "dims": {}} + + # convert to tuple of specs, for each arg/kwarg + kwargs = kwargs or {} + if isinstance(dynamic_shapes, dict): + dynamic_shapes = dynamic_shapes.values() # type: ignore[assignment] + dynamic_shapes = tuple(dynamic_shapes) + combined_args = tuple(args) + tuple(kwargs.values()) + + # run same check when we're processing shapes for export - is this too lazy? + _check_dynamic_shapes(dict(enumerate(combined_args)), dynamic_shapes) # type: ignore[arg-type] + + tree_shapes = _tree_map_with_path( + _standardize_shapes, combined_args, dynamic_shapes, tree_name="inputs" + ) + serialized_shapes = tree_map(_track_dim_from_dims, tree_shapes) + return _postprocess_serialized_shapes(serialized_shapes, dims, to_dict=to_dict) + + +def _load_dynamic_shapes( + spec: Union[DynamicShapesSpec, Dict[str, Any]], + from_dict: Optional[bool] = False, +) -> Union[Dict[str, Any], Tuple[Any], List[Any], None]: + """ + Utility function for dynamic shapes serialization. + Deserializes a DynamicShapesSpec or corresponding dictionary into a dynamic_shapes input to export(). + """ + import sympy + + from torch.fx.experimental.symbolic_shapes import _is_supported_equivalence + + if from_dict: + if not isinstance(spec, dict): + raise UserError( + UserErrorType.INVALID_INPUT, + f"With from_dict=True, expected `spec` to be a dict, got {type(spec)}", + ) + if sorted(spec.keys()) != ["dims", "dynamic_shapes"]: + raise UserError( + UserErrorType.INVALID_INPUT, + "With from_dict=True, expected `spec` to have keys `dims` and `dynamic_shapes`, " + f"instead found {spec.keys()}", + ) + dims = {} + for k, v in spec["dims"].items(): + if not isinstance(k, str): + raise UserError( + UserErrorType.INVALID_INPUT, + f"Expected `spec['dims']` keys to be strings for symbols, got key {type(k)}", + ) + if sorted(v.keys()) != ["derived", "max", "min"]: + raise UserError( + UserErrorType.INVALID_INPUT, + f"Expected `spec['dims']` values to have keys `derived`, `max`, and `min`, " + f"instead found {v.keys()}", + ) + if not isinstance(v["min"], int): + raise UserError( + UserErrorType.INVALID_INPUT, + f"Expected dims in `spec['dims']` to map `min` to an int, got {k}: {v['min']}", + ) + if not isinstance(v["max"], int) or v["max"] is None: + raise UserError( + UserErrorType.INVALID_INPUT, + f"Expected dims in `spec['dims']` to map `max` to an int or None, got {k}: {v['max']}", + ) + if not isinstance(v["derived"], list) or any( + not isinstance(d, str) for d in v["derived"] + ): + raise UserError( + UserErrorType.INVALID_INPUT, + "Expected dims in `spec['dims']` to map `derived` to a list of derived expressions, " + f"got {k}: {v['derived']}", + ) + dims[k] = RootDim(**v) + dynamic_shapes = spec["dynamic_shapes"] + else: + if not isinstance(spec, DynamicShapesSpec): + raise UserError( + UserErrorType.INVALID_INPUT, + f"Expected `spec` to be a DynamicShapesSpec, got {type(spec)}", + ) + dims = spec.dims + dynamic_shapes = spec.dynamic_shapes + + if dynamic_shapes is None: + return None + + dim_cache = {} + for name, info in dims.items(): + symbol = sympy.sympify(name) + if not isinstance(symbol, sympy.Symbol): + raise UserError( + UserErrorType.INVALID_INPUT, + f"Expected `spec['dims']` keys to be symbols, got {name}", + ) + dim_cache[name] = Dim(name, min=info.min, max=info.max) # cache root dim + for _expr in info.derived: + expr = sympy.sympify(_expr) + if len(expr.free_symbols) != 1 or symbol not in expr.free_symbols: + raise UserError( + UserErrorType.INVALID_INPUT, + f"Expected derived expressions in to have {name} as the only free symbol, got {expr}", + ) + if not _is_supported_equivalence(expr): + raise UserError( + UserErrorType.INVALID_INPUT, + f"Expected derived expressions to be linear expressions, got {expr}", + ) + modulus, remainder = sympy.polys.polytools.div(expr, symbol) + ddim = dim_cache[name] + if modulus != 1: + ddim = int(modulus) * ddim + if remainder != 0: + ddim = ddim + int(remainder) + dim_cache[_expr] = ddim # cache derived dims + + def deserialize_shape( + val: Union[None, int, str] + ) -> Union[None, int, _Dim, _DimHint]: + if val is None or isinstance(val, int): + return val + elif val == "_DimHint.AUTO": + return _DimHint.AUTO + elif val == "_DimHint.STATIC": + return _DimHint.STATIC + if not isinstance(val, str): + raise UserError( + UserErrorType.INVALID_INPUT, + "Expected leaves in `spec['dynamic_shapes']` to be ints, None, Dim.AUTO/STATIC, symbols, " + f" or derived expressions, got {val}", + ) + if val not in dim_cache: + raise UserError( + UserErrorType.INVALID_INPUT, + "Expected dims in `spec['dynamic_shapes']` to be tracked in `spec['dims']`, " + f"got {val} which is not in {dims.keys()}", + ) + return dim_cache[val] + + return tree_map(deserialize_shape, dynamic_shapes) diff --git a/janus/lib/python3.10/site-packages/torch/_export/serde/schema.py b/janus/lib/python3.10/site-packages/torch/_export/serde/schema.py new file mode 100644 index 0000000000000000000000000000000000000000..ce102b39367ad0178fe46236a428d1cf924dfe3c --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/_export/serde/schema.py @@ -0,0 +1,381 @@ +# NOTE: This is a placeholder for iterating on export serialization schema design. +# Anything is subject to change and no guarantee is provided at this point. + +from dataclasses import dataclass, field +from enum import IntEnum +from typing import Dict, List, Optional, Tuple + +from torch._export.serde.union import _Union + +# NOTE: Please update this value if any modifications are made to the schema +SCHEMA_VERSION = (7, 3) +TREESPEC_VERSION = 1 + + +class ScalarType(IntEnum): + UNKNOWN = 0 + BYTE = 1 + CHAR = 2 + SHORT = 3 + INT = 4 + LONG = 5 + HALF = 6 + FLOAT = 7 + DOUBLE = 8 + COMPLEXHALF = 9 + COMPLEXFLOAT = 10 + COMPLEXDOUBLE = 11 + BOOL = 12 + BFLOAT16 = 13 + + +class Layout(IntEnum): + Unknown = 0 + SparseCoo = 1 + SparseCsr = 2 + SparseCsc = 3 + SparseBsr = 4 + SparseBsc = 5 + _mkldnn = 6 + Strided = 7 + + +class MemoryFormat(IntEnum): + Unknown = 0 + ContiguousFormat = 1 + ChannelsLast = 2 + ChannelsLast3d = 3 + PreserveFormat = 4 + + +@dataclass +class Device: + type: str + index: Optional[int] = None + + +@dataclass(repr=False) +class SymExprHint(_Union): + as_int: int + as_float: float + as_bool: bool + + +# This is for storing the symbolic expressions behind symints/symfloats/symbools +# For example, we can get something like +# SymExpr(expr_str="s0 + s1", hint=SymExprHint(as_int=4) +# if we also have the hint that s0 and s1 are both 2. +@dataclass +class SymExpr: + expr_str: str + hint: Optional[SymExprHint] = None + + +@dataclass(repr=False) +class SymInt(_Union): + as_expr: SymExpr + as_int: int + + +@dataclass(repr=False) +class SymBool(_Union): + as_expr: SymExpr + as_bool: bool + + +@dataclass +class TensorMeta: + dtype: ScalarType + sizes: List[SymInt] + requires_grad: bool + device: Device + strides: List[SymInt] + storage_offset: SymInt + layout: Layout + + +# In most cases we will use the "as_name" field to store arguments which are +# SymInts. +# The "as_int" field is used in the case where we have a list containing a mix +# of SymInt and ints (ex. [1, s0, ...]). We will serialize this type of list to +# be List[SymIntArgument] and map the SymInts to the "as_name" field, and ints +# to the "as_int" field. +@dataclass(repr=False) +class SymIntArgument(_Union): + as_name: str + as_int: int + + +# In most cases we will use the "as_name" field to store arguments which are +# SymBools. +# The "as_bool" field is used in the case where we have a list containing a mix +# of SymBool and bools (ex. [True, i0, ...]). We will serialize this type of list to +# be List[SymboolArgument] and map the SymBools to the "as_name" field, and bools +# to the "as_bool" field. +@dataclass(repr=False) +class SymBoolArgument(_Union): + as_name: str + as_bool: bool + + +@dataclass +class TensorArgument: + name: str + + +@dataclass +class TokenArgument: + name: str + + +# This is use for storing the contents of a list which contain optional tensors +# (Tensor?[], ex. [Tensor, None, ...]), where the list will be serialized to the +# type List[OptionalTensorArgument], with tensor values seiralized to the +# "as_tensor" field, and None values serialized to the "as_none" field. +@dataclass(repr=False) +class OptionalTensorArgument(_Union): + as_tensor: TensorArgument + as_none: Tuple[()] + + +@dataclass +class GraphArgument: + name: str + graph: 'Graph' + + +@dataclass +class CustomObjArgument: + name: str + class_fqn: str + + +# This is actually a union type +@dataclass(repr=False) +class Argument(_Union): + as_none: Tuple[()] + as_tensor: TensorArgument + as_tensors: List[TensorArgument] + as_int: int + as_ints: List[int] + as_float: float + as_floats: List[float] + as_string: str + as_strings: List[str] + as_sym_int: SymIntArgument + as_sym_ints: List[SymIntArgument] + as_scalar_type: ScalarType + as_memory_format: MemoryFormat + as_layout: Layout + as_device: Device + as_bool: bool + as_bools: List[bool] + as_sym_bool: SymBoolArgument + as_sym_bools: List[SymBoolArgument] + as_graph: GraphArgument + as_optional_tensors: List[OptionalTensorArgument] + as_custom_obj: CustomObjArgument + as_operator: str + + +@dataclass +class NamedArgument: + # Argument name from the operator schema + name: str + arg: Argument + + +@dataclass +class Node: + target: str + inputs: List[NamedArgument] + outputs: List[Argument] + metadata: Dict[str, str] + + +@dataclass +class Graph: + inputs: List[Argument] + outputs: List[Argument] + nodes: List[Node] + tensor_values: Dict[str, TensorMeta] + sym_int_values: Dict[str, SymInt] + sym_bool_values: Dict[str, SymBool] + # This is for deserializing the submodule graphs from higher order ops + # (ex. cond, map) where single tensor returns will just return a single + # tensor, rather than following export schema and returning a singleton + # list. + is_single_tensor_return: bool = False + custom_obj_values: Dict[str, CustomObjArgument] = field(default_factory=dict) + + +@dataclass +class UserInputSpec: + # Actually, only tensors and SymInts are allowed here + arg: Argument + + +@dataclass(repr=False) +class ConstantValue(_Union): + as_none: Tuple[()] + as_int: int + as_float: float + as_string: str + as_bool: bool + + +@dataclass +class ConstantInputSpec: + name: str + value: ConstantValue + + +@dataclass +class InputToParameterSpec: + arg: TensorArgument + parameter_name: str + + +@dataclass +class InputToBufferSpec: + arg: TensorArgument + buffer_name: str + persistent: bool + + + +@dataclass +class InputToTensorConstantSpec: + arg: TensorArgument + tensor_constant_name: str + + +@dataclass +class InputToCustomObjSpec: + arg: CustomObjArgument + custom_obj_name: str + + +@dataclass +class InputTokenSpec: + arg: TokenArgument + + +@dataclass(repr=False) +class InputSpec(_Union): + user_input: UserInputSpec + parameter: InputToParameterSpec + buffer: InputToBufferSpec + tensor_constant: InputToTensorConstantSpec + custom_obj: InputToCustomObjSpec + token: InputTokenSpec + constant_input: ConstantInputSpec + + +@dataclass +class UserOutputSpec: + arg: Argument + + +@dataclass +class LossOutputSpec: + arg: TensorArgument + + +@dataclass +class BufferMutationSpec: + arg: TensorArgument + buffer_name: str + + +@dataclass +class GradientToParameterSpec: + arg: TensorArgument + parameter_name: str + + +@dataclass +class GradientToUserInputSpec: + arg: TensorArgument + user_input_name: str + + +@dataclass +class UserInputMutationSpec: + arg: TensorArgument + user_input_name: str + + +@dataclass +class OutputTokenSpec: + arg: TokenArgument + + +@dataclass(repr=False) +class OutputSpec(_Union): + user_output: UserOutputSpec + loss_output: LossOutputSpec + buffer_mutation: BufferMutationSpec + gradient_to_parameter: GradientToParameterSpec + gradient_to_user_input: GradientToUserInputSpec + user_input_mutation: UserInputMutationSpec + token: OutputTokenSpec + + +@dataclass +class GraphSignature: + input_specs: List[InputSpec] + output_specs: List[OutputSpec] + + +@dataclass +class RangeConstraint: + min_val: int + max_val: int + + +@dataclass +class ModuleCallSignature: + inputs: List[Argument] + outputs: List[Argument] + + # These are serialized by calling pytree.treespec_loads + # And deserialized by calling pytree.treespec_dumps + in_spec: str + out_spec: str + + +@dataclass +class ModuleCallEntry: + fqn: str + signature: Optional[ModuleCallSignature] = None + + +@dataclass +class GraphModule: + graph: Graph + signature: GraphSignature + # This is used for unflattening, by tracking the calling structure of all of + # the modules in order to unflatten the modules back to the eager calling + # conventions. + module_call_graph: List[ModuleCallEntry] + metadata: Dict[str, str] = field(default_factory=dict) + + +# Invariant: Every time a change is made to the schema, one of the versions +# should be upadted. +@dataclass +class SchemaVersion: + major: int # Major version number is bumped every time a breaking change is made. + minor: int # Minor version number is bumped when a compatible change is made. + + +@dataclass +class ExportedProgram: + graph_module: GraphModule + # Key is the opset namespace (ex. aten), and value is the version number + opset_version: Dict[str, int] + range_constraints: Dict[str, RangeConstraint] + schema_version: SchemaVersion + verifiers: List[str] = field(default_factory=list) + torch_version: str = "<=2.4" diff --git a/janus/lib/python3.10/site-packages/torch/_export/serde/schema.yaml b/janus/lib/python3.10/site-packages/torch/_export/serde/schema.yaml new file mode 100644 index 0000000000000000000000000000000000000000..25a9a295ad0b97967885cbb715cdfbb553dfb4eb --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/_export/serde/schema.yaml @@ -0,0 +1,437 @@ +# @generated by update_schema.py +# checksum<<923abf371a1f8802cacb037d409d28273867777a98f6542fba28616c2b92b639>> +Argument: + kind: union + fields: + as_none: + type: Tuple[()] + as_tensor: + type: TensorArgument + as_tensors: + type: List[TensorArgument] + as_int: + type: int + as_ints: + type: List[int] + as_float: + type: float + as_floats: + type: List[float] + as_string: + type: str + as_strings: + type: List[str] + as_sym_int: + type: SymIntArgument + as_sym_ints: + type: List[SymIntArgument] + as_scalar_type: + type: ScalarType + as_memory_format: + type: MemoryFormat + as_layout: + type: Layout + as_device: + type: Device + as_bool: + type: bool + as_bools: + type: List[bool] + as_sym_bool: + type: SymBoolArgument + as_sym_bools: + type: List[SymBoolArgument] + as_graph: + type: GraphArgument + as_optional_tensors: + type: List[OptionalTensorArgument] + as_custom_obj: + type: CustomObjArgument + as_operator: + type: str +BufferMutationSpec: + kind: struct + fields: + arg: + type: TensorArgument + buffer_name: + type: str +ConstantInputSpec: + kind: struct + fields: + name: + type: str + value: + type: ConstantValue +ConstantValue: + kind: union + fields: + as_none: + type: Tuple[()] + as_int: + type: int + as_float: + type: float + as_string: + type: str + as_bool: + type: bool +CustomObjArgument: + kind: struct + fields: + name: + type: str + class_fqn: + type: str +Device: + kind: struct + fields: + type: + type: str + index: + type: Optional[int] + default: None +ExportedProgram: + kind: struct + fields: + graph_module: + type: GraphModule + opset_version: + type: Dict[str, int] + range_constraints: + type: Dict[str, RangeConstraint] + schema_version: + type: SchemaVersion + verifiers: + type: List[str] + default: '[]' + torch_version: + type: str + default: <=2.4 +GradientToParameterSpec: + kind: struct + fields: + arg: + type: TensorArgument + parameter_name: + type: str +GradientToUserInputSpec: + kind: struct + fields: + arg: + type: TensorArgument + user_input_name: + type: str +Graph: + kind: struct + fields: + inputs: + type: List[Argument] + outputs: + type: List[Argument] + nodes: + type: List[Node] + tensor_values: + type: Dict[str, TensorMeta] + sym_int_values: + type: Dict[str, SymInt] + sym_bool_values: + type: Dict[str, SymBool] + is_single_tensor_return: + type: bool + default: 'False' + custom_obj_values: + type: Dict[str, CustomObjArgument] + default: '{}' +GraphArgument: + kind: struct + fields: + name: + type: str + graph: + type: Graph +GraphModule: + kind: struct + fields: + graph: + type: Graph + signature: + type: GraphSignature + module_call_graph: + type: List[ModuleCallEntry] + metadata: + type: Dict[str, str] + default: '{}' +GraphSignature: + kind: struct + fields: + input_specs: + type: List[InputSpec] + output_specs: + type: List[OutputSpec] +InputSpec: + kind: union + fields: + user_input: + type: UserInputSpec + parameter: + type: InputToParameterSpec + buffer: + type: InputToBufferSpec + tensor_constant: + type: InputToTensorConstantSpec + custom_obj: + type: InputToCustomObjSpec + token: + type: InputTokenSpec + constant_input: + type: ConstantInputSpec +InputToBufferSpec: + kind: struct + fields: + arg: + type: TensorArgument + buffer_name: + type: str + persistent: + type: bool +InputToCustomObjSpec: + kind: struct + fields: + arg: + type: CustomObjArgument + custom_obj_name: + type: str +InputToParameterSpec: + kind: struct + fields: + arg: + type: TensorArgument + parameter_name: + type: str +InputToTensorConstantSpec: + kind: struct + fields: + arg: + type: TensorArgument + tensor_constant_name: + type: str +InputTokenSpec: + kind: struct + fields: + arg: + type: TokenArgument +Layout: + kind: enum + fields: + Unknown: 0 + SparseCoo: 1 + SparseCsr: 2 + SparseCsc: 3 + SparseBsr: 4 + SparseBsc: 5 + _mkldnn: 6 + Strided: 7 +LossOutputSpec: + kind: struct + fields: + arg: + type: TensorArgument +MemoryFormat: + kind: enum + fields: + Unknown: 0 + ContiguousFormat: 1 + ChannelsLast: 2 + ChannelsLast3d: 3 + PreserveFormat: 4 +ModuleCallEntry: + kind: struct + fields: + fqn: + type: str + signature: + type: Optional[ModuleCallSignature] + default: None +ModuleCallSignature: + kind: struct + fields: + inputs: + type: List[Argument] + outputs: + type: List[Argument] + in_spec: + type: str + out_spec: + type: str +NamedArgument: + kind: struct + fields: + name: + type: str + arg: + type: Argument +Node: + kind: struct + fields: + target: + type: str + inputs: + type: List[NamedArgument] + outputs: + type: List[Argument] + metadata: + type: Dict[str, str] +OptionalTensorArgument: + kind: union + fields: + as_tensor: + type: TensorArgument + as_none: + type: Tuple[()] +OutputSpec: + kind: union + fields: + user_output: + type: UserOutputSpec + loss_output: + type: LossOutputSpec + buffer_mutation: + type: BufferMutationSpec + gradient_to_parameter: + type: GradientToParameterSpec + gradient_to_user_input: + type: GradientToUserInputSpec + user_input_mutation: + type: UserInputMutationSpec + token: + type: OutputTokenSpec +OutputTokenSpec: + kind: struct + fields: + arg: + type: TokenArgument +RangeConstraint: + kind: struct + fields: + min_val: + type: int + max_val: + type: int +ScalarType: + kind: enum + fields: + UNKNOWN: 0 + BYTE: 1 + CHAR: 2 + SHORT: 3 + INT: 4 + LONG: 5 + HALF: 6 + FLOAT: 7 + DOUBLE: 8 + COMPLEXHALF: 9 + COMPLEXFLOAT: 10 + COMPLEXDOUBLE: 11 + BOOL: 12 + BFLOAT16: 13 +SchemaVersion: + kind: struct + fields: + major: + type: int + minor: + type: int +SymBool: + kind: union + fields: + as_expr: + type: SymExpr + as_bool: + type: bool +SymBoolArgument: + kind: union + fields: + as_name: + type: str + as_bool: + type: bool +SymExpr: + kind: struct + fields: + expr_str: + type: str + hint: + type: Optional[SymExprHint] + default: None +SymExprHint: + kind: union + fields: + as_int: + type: int + as_float: + type: float + as_bool: + type: bool +SymInt: + kind: union + fields: + as_expr: + type: SymExpr + as_int: + type: int +SymIntArgument: + kind: union + fields: + as_name: + type: str + as_int: + type: int +TensorArgument: + kind: struct + fields: + name: + type: str +TensorMeta: + kind: struct + fields: + dtype: + type: ScalarType + sizes: + type: List[SymInt] + requires_grad: + type: bool + device: + type: Device + strides: + type: List[SymInt] + storage_offset: + type: SymInt + layout: + type: Layout +TokenArgument: + kind: struct + fields: + name: + type: str +UserInputMutationSpec: + kind: struct + fields: + arg: + type: TensorArgument + user_input_name: + type: str +UserInputSpec: + kind: struct + fields: + arg: + type: Argument +UserOutputSpec: + kind: struct + fields: + arg: + type: Argument +SCHEMA_VERSION: +- 7 +- 3 +TREESPEC_VERSION: 1 diff --git a/janus/lib/python3.10/site-packages/torch/_export/serde/schema_check.py b/janus/lib/python3.10/site-packages/torch/_export/serde/schema_check.py new file mode 100644 index 0000000000000000000000000000000000000000..b22b9778819e73635aa6d37a254aa4b643abd5f5 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/_export/serde/schema_check.py @@ -0,0 +1,286 @@ +# mypy: allow-untyped-defs +import dataclasses +import hashlib +import re +import typing +from enum import IntEnum +from typing import Any, Dict, Optional, Union + +from torch._export.serde import schema +from torch._export.serde.union import _Union + + +class SchemaUpdateError(Exception): + pass + + +def _check(x, msg): + if not x: + raise SchemaUpdateError(msg) + + +def _staged_schema(): + ret: Dict[str, Any] = {} + defs = {} + + def _handle_aggregate(ty): + def dump_type(t): + if isinstance(t, type): + return t.__name__ + elif isinstance(t, str): + assert t in defs + return t + elif o := typing.get_origin(t): + # Lemme know if there's a better way to do this. + if o == list: + head = "List" + elif o == dict: + head = "Dict" + elif o == tuple: + if typing.get_args(t) == (): + return "Tuple[()]" + head = "Tuple" + elif o == Union: + args = typing.get_args(t) + assert len(args) == 2 and args[1] == type(None) + return f"Optional[{dump_type(args[0])}]" + else: + raise AssertionError(f"Type {t} is not supported in export schema.") + return ( + f"{head}[{', '.join([dump_type(x) for x in typing.get_args(t)])}]" + ) + elif t == (): + return "()" + else: + raise AssertionError(f"Type {t} is not supported in export schema.") + + def dump_field(f): + t = dump_type(f.type) + ret = {"type": t} + + value = dataclasses.MISSING + if f.default is not dataclasses.MISSING: + value = f.default + elif f.default_factory is not dataclasses.MISSING: + value = f.default_factory() + + if t.startswith("Optional[") and value is not None: + raise AssertionError( + f"Optional field {ty.__name__}.{f.name} must have default value to be None." + ) + + if value is not dataclasses.MISSING: + default = str(value) + ret["default"] = default + return ret + + return {f.name: dump_field(f) for f in dataclasses.fields(ty)} + + def _handle_int_enum(name, ty): + ret[name] = {"kind": "enum", "fields": {x.name: x.value for x in ty}} + + def _handle_struct(name, ty): + ret[name] = {"kind": "struct", "fields": _handle_aggregate(ty)} + + def _handle_union(name, ty): + ret[name] = {"kind": "union", "fields": _handle_aggregate(ty)} + + for name in dir(schema): + if name.startswith("_"): + continue + + value = getattr(schema, name) + + if hasattr(value, "__module__") and value.__module__ != schema.__name__: + continue + + defs[name] = value + + for name, value in defs.items(): + if isinstance(value, type): + if issubclass(value, IntEnum): + _handle_int_enum(name, value) + elif dataclasses.is_dataclass(value): + if issubclass(value, _Union): + _handle_union(name, value) + else: + _handle_struct(name, value) + else: + raise AssertionError(f"Unknown schema type {name}: {value}") + elif isinstance(value, (int, tuple)): + assert name in ("SCHEMA_VERSION", "TREESPEC_VERSION") + else: + raise AssertionError(f"Unknown variable {name}: {value}") + + ret["SCHEMA_VERSION"] = list(defs["SCHEMA_VERSION"]) + assert all(x > 0 for x in ret["SCHEMA_VERSION"]) + ret["TREESPEC_VERSION"] = defs["TREESPEC_VERSION"] + assert ret["TREESPEC_VERSION"] > 0 + return ret + + +def _diff_schema(dst, src): + additions = {key: src[key] for key in src.keys() - dst.keys()} + subtractions = {key: dst[key] for key in dst.keys() - src.keys()} + + common_keys = src.keys() & dst.keys() + + versions = {"SCHEMA_VERSION", "TREESPEC_VERSION"} + common_keys -= versions + + for key in common_keys: + src_kind = src[key]["kind"] + src_fields = src[key]["fields"] + dst_kind = dst[key]["kind"] + dst_fields = dst[key]["fields"] + _check( + src_kind == dst_kind, + f"Type {key} changed kind from {dst_kind} to {src_kind}", + ) + assert isinstance(src_fields, dict) and isinstance(dst_fields, dict) + added_fields = { + key: src_fields[key] for key in src_fields.keys() - dst_fields.keys() + } + subtracted_fields = { + key: dst_fields[key] for key in dst_fields.keys() - src_fields.keys() + } + common_fields = src_fields.keys() & dst_fields.keys() + + for field in common_fields: + src_field = src_fields[field] + dst_field = dst_fields[field] + if src_kind == "struct": + _check( + src_field["type"] == dst_field["type"], + f"Type of the field {key}.{field} changed from {dst_field['type']} to {src_field['type']}", + ) + if "default" in src_field and "default" not in dst_field: + added_fields[field] = {} + added_fields[field]["default"] = src_field["default"] + if "default" not in src_field and "default" in dst_field: + subtracted_fields[field] = {} + subtracted_fields[field]["default"] = dst_field["default"] + elif src_kind == "enum": + _check( + src_field == dst_field, + f"Value of the enum field {key}.{field} changed from {dst_field} to {src_field}", + ) + elif src_kind == "union": + _check( + src_field["type"] == dst_field["type"], + f"Type of the field {key}.{field} changed from {dst_field['type']} to {src_field['type']}", + ) + else: + raise AssertionError(f"Unknown kind {src_kind}: {key}") + if len(added_fields) > 0: + assert key not in additions + additions[key] = {} + additions[key]["fields"] = added_fields + if len(subtracted_fields) > 0: + assert key not in subtractions + subtractions[key] = {} + subtractions[key]["fields"] = subtracted_fields + + return additions, subtractions + + +def _hash_schema(s): + return hashlib.sha256(repr(s).encode("utf-8")).hexdigest() + + +@dataclasses.dataclass +class _Commit: + result: Dict[str, Any] + checksum_result: str + path: str + additions: Dict[str, Any] + subtractions: Dict[str, Any] + base: Dict[str, Any] + checksum_base: Optional[str] + + +def update_schema(): + import importlib.resources + + if importlib.resources.is_resource(__package__, "schema.yaml"): + content = importlib.resources.read_text(__package__, "schema.yaml") + match = re.search("checksum<<([A-Fa-f0-9]{64})>>", content) + _check(match is not None, "checksum not found in schema.yaml") + assert match is not None + checksum_base = match.group(1) + from yaml import load, Loader + + dst = load(content, Loader=Loader) + assert isinstance(dst, dict) + else: + checksum_base = None + dst = {"SCHEMA_VERSION": None, "TREESPEC_VERSION": None} + + src = _staged_schema() + additions, subtractions = _diff_schema(dst, src) + return _Commit( + result=src, + checksum_result=_hash_schema(src), + path=__package__.replace(".", "/") + "/schema.yaml", + additions=additions, + subtractions=subtractions, + base=dst, + checksum_base=checksum_base, + ) + + +def check(commit: _Commit, force_unsafe: bool = False): + next_version = None + reason = "" + # Step 1: Detect major schema updates. + if len(commit.additions) > 0: + for k, v in commit.additions.items(): + if k not in commit.base: + continue + kind = commit.result[k]["kind"] + fields = v["fields"] + for f, d in fields.items(): + if "default" not in d and kind == "struct": + reason += ( + f"Field {k}.{f} is added to schema.py without a default value as an incomparible change " + + "which requires major version bump.\n" + ) + next_version = [commit.base["SCHEMA_VERSION"][0] + 1, 1] + + if len(commit.subtractions) > 0: + for k, v in commit.subtractions.items(): + if k not in commit.result: + continue + for f in v["fields"]: + reason = f"Field {k}.{f} is removed from schema.py as an incompatible change which requires major version bump.\n" + next_version = [commit.base["SCHEMA_VERSION"][0] + 1, 1] + + if force_unsafe: + reason += "--force-unsafe is used." + next_version = commit.result["SCHEMA_VERSION"] + else: + # Step 2: Detect minor schema updates. + if next_version is None and len(commit.additions) > 0: + for k, v in commit.additions.items(): + for f in v["fields"]: + reason += ( + f"Field {k}.{f} is added to schema.py as an compatible change " + + "which still requires minor version bump.\n" + ) + next_version = [ + commit.base["SCHEMA_VERSION"][0], + commit.base["SCHEMA_VERSION"][1] + 1, + ] + if next_version is None and len(commit.subtractions) > 0: + for k, v in commit.subtractions.items(): + for f in v["fields"]: + reason += ( + f"Field {k}.{f} is removed from schema.py as an compatible change " + + "which still requires minor version bump.\n" + ) + next_version = [ + commit.base["SCHEMA_VERSION"][0], + commit.base["SCHEMA_VERSION"][1] + 1, + ] + + return next_version, reason diff --git a/janus/lib/python3.10/site-packages/torch/_export/serde/serialize.py b/janus/lib/python3.10/site-packages/torch/_export/serde/serialize.py new file mode 100644 index 0000000000000000000000000000000000000000..44153ccc78eb42155a155caf742da91ab4c93cc0 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/_export/serde/serialize.py @@ -0,0 +1,2960 @@ +# mypy: allow-untyped-defs +import base64 +import copy +import copyreg +import dataclasses +import heapq +import inspect +import io +import json +import logging +import math +import operator +import re +import typing +import traceback + +from contextlib import contextmanager +from dataclasses import dataclass, field +from enum import Enum +from typing import ( + Any, + Callable, + cast, + Dict, + final, + Iterator, + List, + Optional, + Set, + Tuple, + Type, + Union, +) + +import sympy + +import torch +import torch.export.exported_program as ep +from torch._export.serde.schema import SchemaVersion +from torch._export.verifier import load_verifier +from torch._library.fake_class_registry import FakeScriptObject +from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode +from torch.fx.experimental import symbolic_shapes +from torch.utils import _pytree as pytree +from torch.utils._pytree import treespec_dumps, treespec_loads +from torch.utils._sympy.numbers import int_oo +from torch.utils._sympy.value_ranges import ValueRanges + +from .schema import ( # type: ignore[attr-defined] + Argument, + BufferMutationSpec, + ConstantInputSpec, + ConstantValue, + CustomObjArgument, + Device, + ExportedProgram, + GradientToParameterSpec, + GradientToUserInputSpec, + Graph, + GraphArgument, + GraphModule, + GraphSignature, + InputSpec, + InputToBufferSpec, + InputToCustomObjSpec, + InputTokenSpec, + InputToParameterSpec, + InputToTensorConstantSpec, + Layout, + LossOutputSpec, + MemoryFormat, + ModuleCallEntry, + ModuleCallSignature, + NamedArgument, + Node, + OptionalTensorArgument, + OutputSpec, + OutputTokenSpec, + RangeConstraint, + ScalarType, + SCHEMA_VERSION, + SymBool, + SymBoolArgument, + SymExpr, + SymExprHint, + SymInt, + SymIntArgument, + TensorArgument, + TensorMeta, + TokenArgument, + TREESPEC_VERSION, + UserInputMutationSpec, + UserInputSpec, + UserOutputSpec, +) +from .union import _Union +from ..utils import remove_proxy_from_state_dict + +__all__ = [ + "serialize", + "GraphModuleSerializer", + "ExportedProgramSerializer", + "GraphModuleDeserializer", + "ExportedProgramDeserializer", +] + +log = logging.getLogger(__name__) + + +class SerializeError(RuntimeError): + pass + + +def _reverse_map(d: Dict[Any, Enum]): + return {v.value: k for k, v in d.items()} + + +MetaType = Union[ + FakeTensor, int, torch.SymInt, bool, torch.SymBool, ep.CustomObjArgument +] + + +ST_DELIMITER = ";" + +_TORCH_TO_SERIALIZE_DTYPE = { + torch.uint8: ScalarType.BYTE, + torch.int8: ScalarType.CHAR, + torch.int16: ScalarType.SHORT, + torch.int32: ScalarType.INT, + torch.int64: ScalarType.LONG, + torch.float16: ScalarType.HALF, + torch.float32: ScalarType.FLOAT, + torch.float64: ScalarType.DOUBLE, + torch.complex32: ScalarType.COMPLEXHALF, + torch.complex64: ScalarType.COMPLEXFLOAT, + torch.complex128: ScalarType.COMPLEXDOUBLE, + torch.bool: ScalarType.BOOL, + torch.bfloat16: ScalarType.BFLOAT16, +} + + +_SERIALIZE_TO_TORCH_DTYPE = _reverse_map(_TORCH_TO_SERIALIZE_DTYPE) # type: ignore[arg-type] + + +_TORCH_TO_SERIALIZE_LAYOUT = { + torch.sparse_coo: Layout.SparseCoo, + torch.sparse_csr: Layout.SparseCsr, + torch.sparse_csc: Layout.SparseCsc, + torch.sparse_bsr: Layout.SparseBsr, + torch.sparse_bsc: Layout.SparseBsc, + torch._mkldnn: Layout._mkldnn, # type: ignore[attr-defined] + torch.strided: Layout.Strided, +} + + +_SERIALIZE_TO_TORCH_LAYOUT = _reverse_map(_TORCH_TO_SERIALIZE_LAYOUT) # type: ignore[arg-type] + + +_TORCH_TO_SERIALIZE_MEMORY_FORMAT = { + torch.contiguous_format: MemoryFormat.ContiguousFormat, + torch.channels_last: MemoryFormat.ChannelsLast, + torch.channels_last_3d: MemoryFormat.ChannelsLast3d, + torch.preserve_format: MemoryFormat.PreserveFormat, +} + + +_SERIALIZE_TO_TORCH_MEMORY_FORMAT = _reverse_map(_TORCH_TO_SERIALIZE_MEMORY_FORMAT) # type: ignore[arg-type] + + +_SYM_INT_OPS = { + operator.mul, + operator.add, + operator.sub, + operator.floordiv, + operator.mod, + operator.pow, + torch.sym_int, + torch.sym_float, + torch.sym_ite, + torch.sym_max, + torch.sym_min, + torch.sym_sqrt, +} + + +_SYM_BOOL_OPS = { + operator.eq, + operator.ne, + operator.le, + operator.ge, + operator.lt, + operator.gt, + torch.sym_not, +} + + +assert not any(isinstance(op, torch._ops.OpOverload) for op in _SYM_INT_OPS) +assert not any(isinstance(op, torch._ops.OpOverload) for op in _SYM_BOOL_OPS) + + +@dataclass +class SerializedArtifact: + exported_program: bytes + state_dict: bytes + constants: bytes + example_inputs: bytes + + +@dataclass +class _SerializedProgram: + exported_program: ExportedProgram + state_dict: bytes + constants: bytes + example_inputs: bytes + + +def deserialize_device(d: Device) -> torch.device: + if d.index is None: + return torch.device(type=d.type) # type: ignore[call-overload] + return torch.device(type=d.type, index=d.index) + + +def serialize_sym_int(s: Union[int, torch.SymInt]) -> SymInt: + if isinstance(s, (torch.SymInt, sympy.Symbol, int)): + if symbolic_shapes.is_concrete_int(s): + return SymInt.create(as_int=int(s)) + else: + assert isinstance(s, (torch.SymInt, sympy.Symbol)) + if s.node.hint is None: + return SymInt.create(as_expr=SymExpr(str(s))) + else: + return SymInt.create( + as_expr=SymExpr(str(s), hint=SymExprHint.create(as_int=s.node.hint)) + ) + else: + raise SerializeError( + f"SymInt should be either symbol or int, got `{s}` of type `{type(s)}`" + ) + + +def serialize_sym_bool(s: Union[bool, torch.SymBool]) -> SymBool: + if isinstance(s, (torch.SymBool, bool)): + if symbolic_shapes.is_concrete_bool(s): + return SymBool.create(as_bool=bool(s)) + else: + return SymBool.create(as_expr=SymExpr(expr_str=str(s))) + else: + raise SerializeError( + f"SymBool should be either symbol or bool, got `{s}` of type `{type(s)}`" + ) + + +def serialize_tensor_meta(t: torch.Tensor) -> TensorMeta: + """ + Extract a TensorMeta describing `t`. + """ + return TensorMeta( + dtype=_TORCH_TO_SERIALIZE_DTYPE[t.dtype], + sizes=[serialize_sym_int(s) for s in t.shape], + requires_grad=t.requires_grad, + device=Device(type=t.device.type, index=t.device.index), + strides=[serialize_sym_int(s) for s in t.stride()], + storage_offset=serialize_sym_int(0), # TODO needs to be fixed. + layout=_TORCH_TO_SERIALIZE_LAYOUT[t.layout], + ) + + +_CURRENT_DESERIALIZER: Optional["GraphModuleDeserializer"] = None + + +def _reduce_fake_tensor(fake_tensor: FakeTensor): + is_parameter = isinstance(fake_tensor, torch.nn.Parameter) + tensor_meta = serialize_tensor_meta(fake_tensor) + tensor_meta_bytes = json.dumps( + _dataclass_to_dict(tensor_meta), cls=EnumEncoder + ).encode("utf-8") + return _reconstruct_fake_tensor, (tensor_meta_bytes, is_parameter) + + +def _reconstruct_fake_tensor( + serialized_tensor_meta: bytes, is_parameter: bool +) -> FakeTensor: + # Deserialize the bytes into a TensorMeta + json_tensor_meta = json.loads(serialized_tensor_meta.decode("utf-8")) + tensor_meta = _dict_to_dataclass(TensorMeta, json_tensor_meta) + # Find the current fake mode + assert ( + _CURRENT_DESERIALIZER is not None + ), "Need access to current deserializer state" + fake_tensor = _CURRENT_DESERIALIZER.deserialize_tensor_meta(tensor_meta) + if is_parameter: + fake_tensor = torch.nn.Parameter(fake_tensor) # type: ignore[assignment] + return fake_tensor + + +def serialize_torch_artifact(artifact: Optional[Any]) -> bytes: + if artifact is None: + return b"" + + assert ( + FakeTensor not in copyreg.dispatch_table + ), "Refusing to stomp on existing FakeTensor reducer" + try: + copyreg.pickle(FakeTensor, _reduce_fake_tensor) + buffer = io.BytesIO() + # This is a workaround for backend's tensor deserialization problem: + # unpickleTensor() always create a tensor on the device where it was originally saved + # This behavior is bad for multi-gpu training, as we wish to directly load the tensor + # on the designated device. + # For now, we simply move the tensor to cpu before saving. + # TODO: this should be fixed by deserialization instead. + torch.save(artifact, buffer) + return buffer.getvalue() + finally: + del copyreg.dispatch_table[FakeTensor] + + +def deserialize_torch_artifact(serialized: Union[Dict[str, Any], Tuple[Any, ...], bytes]): + if isinstance(serialized, (dict, tuple)): + return serialized + if len(serialized) == 0: + return {} + buffer = io.BytesIO(serialized) + buffer.seek(0) + # weights_only=False as we want to load custom objects here (e.g. ScriptObject) + artifact = torch.load(buffer, weights_only=False) + assert isinstance(artifact, (tuple, dict)) + return artifact + + +def _sympy_int_to_int(val: sympy.Expr, adjust: str): + # Convert simple sympy Integers into concrete int + if val in (sympy.oo, int_oo): + return math.inf + if val in (-sympy.oo, -int_oo): + return -math.inf + if isinstance(val, sympy.Integer): + return int(val) + + # TODO: Remove this adjustment when Ed gets rid of fractional ranges + log.warning( + "Export constraints cannot be non-integer expressions. Found " + "type %s, and value %s. We will attempt to %s " + "this value.", type(val), val, adjust + ) + + if adjust == "floor": + return math.floor(val) + elif adjust == "ceil": + return math.ceil(val) + else: + raise RuntimeError(f"Got invalid adjustment {adjust}") + + +def _int_to_sympy_int(val) -> sympy.Expr: + # Convert concrete int into simple sympy Integers + if val == math.inf: + return int_oo + if val == -math.inf: + return -int_oo + return sympy.Integer(val) + + +def serialize_range_constraints( + range_constraints: Dict[sympy.Symbol, ValueRanges] +) -> Dict[str, RangeConstraint]: + return { + str(k): RangeConstraint( + _sympy_int_to_int(v.lower, "ceil"), # type: ignore[arg-type] + _sympy_int_to_int(v.upper, "floor"), # type: ignore[arg-type] + ) + for k, v in range_constraints.items() + } + + +def _get_schema_from_target(target): + if isinstance(target, torch._ops.OpOverload): + return target._schema + elif type(target) in _serialization_registry: + return _serialization_registry[type(target)].op_schema(target) + raise RuntimeError(f"Cannot find schema for {type(target)}") + + +def _is_single_tensor_return(target) -> bool: + schema = _get_schema_from_target(target) + returns = schema.returns + return len(returns) == 1 and isinstance(returns[0].real_type, torch.TensorType) + + +def _is_single_tensor_list_return(target: Any) -> bool: + schema = _get_schema_from_target(target) + returns = schema.returns + + if len(returns) != 1: + return False + return_type = returns[0].real_type + return isinstance(return_type, torch.ListType) and isinstance( + return_type.getElementType(), torch.TensorType + ) + + +@dataclass +class GraphState: + inputs: List[Argument] = field(default_factory=list) + outputs: List[Argument] = field(default_factory=list) + nodes: List[Node] = field(default_factory=list) + tensor_values: Dict[str, TensorMeta] = field(default_factory=dict) + sym_int_values: Dict[str, SymInt] = field(default_factory=dict) + sym_bool_values: Dict[str, SymBool] = field(default_factory=dict) + is_single_tensor_return: bool = False + custom_obj_values: Dict[str, CustomObjArgument] = field(default_factory=dict) + + +class Final(type): + def __new__(metacls, name, bases, classdict): + for b in bases: + if isinstance(b, Final): + raise TypeError(f"type '{b.__name__}' is not an acceptable base type") + return type.__new__(metacls, name, bases, dict(classdict)) + + +@final +class GraphModuleSerializer(metaclass=Final): + def __init__( + self, + graph_signature: ep.ExportGraphSignature, + module_call_graph: List[ep.ModuleCallEntry], + ): + self.graph_state = GraphState() + self.graph_signature = graph_signature + self.module_call_graph = module_call_graph + self.custom_objs: Dict[str, torch._C.ScriptObject] = {} + self.duplicate_getitem_nodes: Dict[str, str] = {} + + @contextmanager + def save_graph_state(self): + saved = self.graph_state + self.graph_state = GraphState() + try: + yield + finally: + self.graph_state = saved + + def handle_placeholder(self, node: torch.fx.Node): + assert node.op == "placeholder" + if isinstance(node.meta["val"], torch.Tensor): + graph_input = Argument.create(as_tensor=TensorArgument(name=node.name)) + self.graph_state.tensor_values[node.name] = serialize_tensor_meta( + node.meta["val"] + ) + elif isinstance(node.meta["val"], torch.SymInt): + raise AssertionError("SymInt graph input is not implemented yet.") + elif isinstance(node.meta["val"], (int, bool, str, float, type(None))): + graph_input = self.serialize_input(node.meta["val"]) + elif isinstance(node.meta["val"], ep.CustomObjArgument): + class_fqn = node.meta["val"].class_fqn + graph_input = Argument.create( + as_custom_obj=CustomObjArgument(name=node.name, class_fqn=class_fqn) + ) + self.graph_state.custom_obj_values[node.name] = ( + self.serialize_script_obj_meta(node.meta["val"]) + ) + else: + raise AssertionError(f"Unimplemented graph input type: {node.meta['val']}") + self.graph_state.inputs.append(graph_input) + + def handle_output(self, node: torch.fx.Node): + assert node.op == "output" + assert len(node.args) == 1, "FX.Node's args should have one arg" + node_args = node.args[0] + if isinstance(node_args, torch.fx.Node): + # For singleton tensor returns + self.graph_state.is_single_tensor_return = True + self.graph_state.outputs = [self.serialize_input(node_args)] + else: + assert isinstance(node_args, (tuple, list)) + self.graph_state.outputs = [self.serialize_input(arg) for arg in node_args] + + def serialize_operator(self, target) -> str: + if isinstance(target, str): + return target + elif target.__module__.startswith("torch._ops"): + # TODO(zhxchen17) Maybe provide a function name helper in FX. + # From torch.fx.node._get_qualified_name + module = target.__module__.replace("torch._ops", "torch.ops") + return f"{module}.{target.__name__}" + else: # TODO(zhxchen17) Don't catch all here. + return f"{target.__module__}.{target.__name__}" + + def handle_call_function(self, node: torch.fx.Node): + assert node.op == "call_function" + + # getitem has been handled in the producer node, skip it here + if node.target is operator.getitem: + return + + meta_val = node.meta.get("val") + if ( + node.target in _SYM_INT_OPS + or node.target in _SYM_BOOL_OPS + or (meta_val is not None and isinstance(meta_val, (torch.SymInt, torch.SymBool))) + ): + assert len(node.kwargs) == 0 + ex_node = Node( + target=self.serialize_operator(node.target), + inputs=self.serialize_sym_op_inputs(node.target, node.args), + outputs=[ + Argument.create( + as_sym_int=self.serialize_sym_int_output(node.name, meta_val) + ) + if (node.target in _SYM_INT_OPS or isinstance(meta_val, torch.SymInt)) + else Argument.create( + as_sym_bool=self.serialize_sym_bool_output(node.name, meta_val) + ) + ], + metadata=self.serialize_metadata(node), + ) + elif isinstance(node.target, torch._ops.OpOverload): + ex_node = Node( + target=self.serialize_operator(node.target), + inputs=self.serialize_inputs(node.target, node.args, node.kwargs), + outputs=self.serialize_outputs(node), + # TODO: create a new tensor_values here, meta might have faketensor info + metadata=self.serialize_metadata(node), + ) + elif isinstance(node.target, torch._ops.HigherOrderOperator): + ex_node = Node( + target=self.serialize_operator(node.target), + inputs=self.serialize_hoo_inputs(node.args, node.kwargs), + outputs=self.serialize_hoo_outputs(node), + metadata=self.serialize_metadata(node), + ) + elif type(node.target) in _serialization_registry: + # Sanity check for unhandled serialization. + assert type(node.target) in _serialization_registry, f"{type(node.target)} is not supported in export serialization." + + handler = _serialization_registry[type(node.target)] + namespace = handler.namespace() + op_name = handler.to_op_name(node.target) + assert isinstance(namespace, str) and isinstance(op_name, str) + assert ":" not in namespace and ":" not in op_name + ex_node = Node( + target=f"#{namespace}:{op_name}", + inputs=self.serialize_inputs(node.target, node.args, node.kwargs), + outputs=self.serialize_outputs(node), + metadata=self.serialize_metadata(node), + ) + else: + raise SerializeError(f"Serializing {node.target} is not supported") + + self.graph_state.nodes.append(ex_node) + + def handle_get_attr(self, node): + pass + + def _output_node_at_index(self, node, index) -> Optional[torch.fx.Node]: + user_node = None + for user in node.users: + assert user.target is operator.getitem, f"{user} is not a getitem node" + if index == user.args[1]: + if user_node is None: + user_node = user + else: + # We want to deduplicate getitem nodes that are trying to + # index to the same index + self.duplicate_getitem_nodes[user.name] = user_node.name + return user_node + + def _output_node_name_at_index(self, node, index) -> str: + user_node = self._output_node_at_index(node, index) + if user_node is None: + return f"{node.name}_unused_{index}" + else: + return user_node.name + + def serialize_metadata(self, node: torch.fx.Node) -> Dict[str, str]: + ret = {} + if stack_trace := node.meta.get("stack_trace"): + ret["stack_trace"] = stack_trace + + if nn_module_stack := node.meta.get("nn_module_stack"): + + def export_nn_module_stack(val): + assert isinstance(val, tuple) and len(val) == 2 + path, ty = val + + assert isinstance(path, str) + assert isinstance(ty, str) + + return path + "," + ty + + # Serialize to "key,orig_path,type_str" + nn_module_list = [ + f"{k},{export_nn_module_stack(v)}" for k, v in nn_module_stack.items() + ] + ret["nn_module_stack"] = ST_DELIMITER.join(nn_module_list) + + if source_fn_st := node.meta.get("source_fn_stack"): + source_fn_list = [ + f"{source_fn[0]},{self.serialize_operator(source_fn[1])}" + for source_fn in source_fn_st + ] + ret["source_fn_stack"] = ST_DELIMITER.join(source_fn_list) + + if torch_fn := node.meta.get("torch_fn"): + ret["torch_fn"] = ST_DELIMITER.join(list(torch_fn)) + + if custom := node.meta.get("custom"): + try: + ret["custom"] = json.dumps(custom) + except Exception as e: + raise SerializeError( + f"Failed to serialize custom metadata for node {node.name} with error {e}" + ) from e + + return ret + + def serialize_script_obj_meta( + self, script_obj_meta: ep.CustomObjArgument + ) -> CustomObjArgument: + return CustomObjArgument( + name=script_obj_meta.name, + class_fqn=script_obj_meta.class_fqn, + ) + + def serialize_sym_op_inputs(self, op, args) -> List[NamedArgument]: + if isinstance(op, torch._ops.OpOverload): + args_names = [arg.name for arg in op._schema.arguments] + else: + assert op in _SYM_INT_OPS or op in _SYM_BOOL_OPS + args_names = list(inspect.signature(op).parameters.keys()) + serialized_args = [] + for args_name, arg in zip(args_names, args): + serialized_args.append( + NamedArgument(name=args_name, arg=self.serialize_input(arg)) + ) + return serialized_args + + def serialize_inputs( + self, + target: Any, # torch._ops.OpOverload and other custom operator types. + args, + kwargs=None + ) -> List[NamedArgument]: + assert isinstance(target, (torch._ops.OpOverload, *_registered_extension_types())) + kwargs = kwargs or {} + serialized_args = [] + + schema = _get_schema_from_target(target) + + for i, schema_arg in enumerate(schema.arguments): + if schema_arg.name in kwargs: + serialized_args.append( + NamedArgument( + name=schema_arg.name, + arg=self.serialize_input(kwargs[schema_arg.name], schema_arg.type), + ) + ) + elif not schema_arg.kwarg_only and i < len(args): + serialized_args.append( + NamedArgument( + name=schema_arg.name, + arg=self.serialize_input(args[i], schema_arg.type), + ) + ) + else: + # We intentionally don't serialize the missing arguments + # with default values + pass + + return serialized_args + + def serialize_hoo_inputs(self, args, kwargs) -> List[NamedArgument]: + """ + For serializing HOO inputs since HOOs do not have a schema. + """ + inputs = [ + NamedArgument( + name="", + arg=self.serialize_input(a), + ) + for a in args + ] + inputs.extend( + [ + NamedArgument(name=name, arg=self.serialize_input(a)) + for name, a in kwargs.items() + ] + ) + return inputs + + def is_sym_int_arg(self, arg) -> bool: + return isinstance(arg, int) or ( + isinstance(arg, torch.fx.Node) + and arg.name in self.graph_state.sym_int_values + ) + + def is_sym_bool_arg(self, arg) -> bool: + return isinstance(arg, bool) or ( + isinstance(arg, torch.fx.Node) + and arg.name in self.graph_state.sym_bool_values + ) + + def serialize_input( + self, arg, arg_type: Optional[torch._C.Argument] = None + ) -> Argument: + import torch._inductor.ir as inductor_ir + + inductor_tensor_buffers = ( + inductor_ir.Buffer, + inductor_ir.ReinterpretView, + ) + + if isinstance(arg, torch.fx.Node): + if arg.op == "get_attr": + assert isinstance(arg.target, str) + attr = getattr(arg.graph.owning_module, arg.target) + + if isinstance(attr, torch.Tensor): + raise SerializeError( + "getattr nodes containing tensors should not appear in the graph" + ) + elif isinstance(attr, torch.fx.GraphModule): + with self.save_graph_state(): + graph = self.serialize_graph(attr) + return Argument.create( + as_graph=GraphArgument(name=arg.target, graph=graph) + ) + else: + raise SerializeError( + f"Unsupported getattr attribute {arg.target} with type: {type(attr)}" + ) + elif self.is_sym_int_arg(arg): + return Argument.create( + as_sym_int=SymIntArgument.create(as_name=arg.name) + ) + elif self.is_sym_bool_arg(arg): + return Argument.create( + as_sym_bool=SymBoolArgument.create(as_name=arg.name) + ) + elif isinstance(arg.meta["val"], ep.CustomObjArgument): + return Argument.create( + as_custom_obj=CustomObjArgument( + name=arg.name, class_fqn=arg.meta["val"].class_fqn + ) + ) + elif arg.name in self.duplicate_getitem_nodes: + dedup_name = self.duplicate_getitem_nodes[arg.name] + return Argument.create(as_tensor=TensorArgument(name=dedup_name)) + else: + return Argument.create(as_tensor=TensorArgument(name=arg.name)) + elif isinstance(arg, inductor_tensor_buffers): + # Other branches are for arguments in fx node. + # This is a special branch for handling buffers (representing tensor arguments) + # for inductor's ExternalFallbackNode + # export_extern_kernel_node() is using this function to serialize arguments + arg_name = arg.get_name() + assert arg_name is not None, "Buffer must have valid name" + return Argument.create(as_tensor=TensorArgument(name=arg_name)) + elif isinstance(arg, torch.SymInt): + # This is a special branch for handling SymInt args in inductor's + # ExternalFallbackNode. + # For regular FX graph, SymInt arg should be a fx.Node with + # self.is_sym_int_arg(arg) being true + return Argument.create(as_sym_int=SymIntArgument.create(as_name=str(arg))) + elif isinstance(arg, bool): + return Argument.create(as_bool=arg) + elif isinstance(arg, str): + return Argument.create(as_string=arg) + elif isinstance(arg, int): + return Argument.create(as_int=arg) + elif isinstance(arg, float): + return Argument.create(as_float=arg) + elif arg is None: + return Argument.create(as_none=()) + elif isinstance(arg, (list, tuple)): + if len(arg) == 0: + if arg_type is not None: + if isinstance(arg_type, torch.OptionalType): + arg_type = arg_type.getElementType() # type: ignore[assignment] + assert isinstance(arg_type, torch.ListType) + elem_type = arg_type.getElementType() + if isinstance(elem_type, torch.OptionalType): + elem_type = elem_type.getElementType() + + if isinstance(elem_type, torch.BoolType): + return Argument.create(as_bools=[]) + elif isinstance(elem_type, torch.IntType): + return Argument.create(as_ints=[]) + elif isinstance(elem_type, torch.FloatType): + return Argument.create(as_floats=[]) + elif isinstance(elem_type, torch.StringType): + return Argument.create(as_strings=[]) + elif isinstance(elem_type, torch.TensorType): + return Argument.create(as_tensors=[]) + else: + # I believe empty symint lists default to ints, but + # please file an issue if this is not the case + raise SerializeError(f"Empty list with type {elem_type} nyi.") + else: + # We could serialize this by default to a tensor list. This + # is needed in the HOO case + log.warning( + "Unsure how to serialize the given empty list, " + "as we don't know what is the type of this argument. " + "Serializing it as a tensor list by default." + ) + return Argument.create(as_tensors=[]) + + # Must check bool first, as bool is also treated as int + if all(isinstance(a, bool) for a in arg): + return Argument.create(as_bools=list(arg)) + elif all(isinstance(a, int) for a in arg): + return Argument.create(as_ints=list(arg)) + elif all(isinstance(a, float) for a in arg): + return Argument.create(as_floats=list(arg)) + elif all(isinstance(a, str) for a in arg): + return Argument.create(as_strings=list(arg)) + elif all(isinstance(a, torch.SymInt) for a in arg): + # This is a special branch for handling SymInt args in inductor's + # ExternalFallbackNode. + # For regular FX graph, SymInt arg should be a fx.Node with + # self.is_sym_int_arg(arg) being true + return Argument.create( + as_sym_ints=[SymIntArgument.create(as_name=str(a)) for a in arg] + ) + elif all(self.is_sym_int_arg(a) for a in arg): + # list of sym_ints + values = [] + for a in arg: + if isinstance(a, torch.fx.Node): + values.append(SymIntArgument.create(as_name=a.name)) + elif isinstance(a, int): + values.append(SymIntArgument.create(as_int=a)) + return Argument.create(as_sym_ints=values) + elif all(self.is_sym_bool_arg(a) for a in arg): + # list of sym_bools + values = [] + for a in arg: + if isinstance(a, torch.fx.Node): + values.append(SymBoolArgument.create(as_name=a.name)) + elif isinstance(a, bool): + values.append(SymBoolArgument.create(as_bool=a)) + return Argument.create(as_sym_bools=values) + elif all(isinstance(a, torch.fx.Node) for a in arg): + # list of tensors + arguments = [] + for a in arg: + if a.op == "get_attr": + raise SerializeError( + "getattr nodes containing tensors should not appear in the graph" + ) + arguments.append(TensorArgument(name=a.name)) + return Argument.create(as_tensors=arguments) + elif all(isinstance(a, (torch.fx.Node, type(None))) for a in arg): + # list of optional tensors + def serialize_optional_tensor_args(a): + if a is None: + return OptionalTensorArgument.create(as_none=()) + elif isinstance(a, torch.fx.Node): + return OptionalTensorArgument.create( + as_tensor=TensorArgument(name=a.name) + ) + else: + raise SerializeError(f"Unsupported list/tuple argument: {a}") + + return Argument.create( + as_optional_tensors=list(map(serialize_optional_tensor_args, arg)) + ) + elif all(isinstance(a, inductor_tensor_buffers) for a in arg): + # list of inductor buffers + return Argument.create( + as_tensors=[TensorArgument(name=a.get_name()) for a in arg], + ) + elif all( + isinstance(a, (*inductor_tensor_buffers, type(None))) for a in arg + ): + # list of inductor buffers as optional tensors + def serialize_optional_tensor_args(a): + if a is None: + return OptionalTensorArgument.create(as_none=()) + elif isinstance(a, inductor_tensor_buffers): + return OptionalTensorArgument.create( + as_tensor=TensorArgument(name=a.get_name()) + ) + else: + raise SerializeError(f"Unsupported list/tuple argument: {a}") + + return Argument.create( + as_optional_tensors=list(map(serialize_optional_tensor_args, arg)) + ) + else: + raise SerializeError( + f"Unsupported list/tuple argument type: {[type(a) for a in arg]}" + ) + elif isinstance(arg, torch.dtype): + return Argument.create(as_scalar_type=_TORCH_TO_SERIALIZE_DTYPE[arg]) + elif isinstance(arg, torch.device): + return Argument.create(as_device=Device(type=arg.type, index=arg.index)) + elif isinstance(arg, torch.memory_format): + return Argument.create( + as_memory_format=_TORCH_TO_SERIALIZE_MEMORY_FORMAT[arg] + ) + elif isinstance(arg, torch.layout): + return Argument.create(as_layout=_TORCH_TO_SERIALIZE_LAYOUT[arg]) + elif isinstance(arg, torch._C.ScriptObject): + if not ( + arg._has_method("__getstate__") # type: ignore[attr-defined] + and arg._has_method("__setstate__") # type: ignore[attr-defined] + ): + raise SerializeError( + f"Unable to serialize custom class {arg}. Please define " + "serialization methods via def_pickle()." + ) + # Custom objects through torchind are serializable with pickle, + # through implementing the .def_pickle function. This should result + # in the object containing a __getstate__ and __setstate__ + # serialize/deserialize function. + custom_obj_name = f"_custom_obj_{len(self.custom_objs)}" + self.custom_objs[custom_obj_name] = arg + class_fqn = arg._type().qualified_name() # type: ignore[attr-defined] + return Argument.create( + as_custom_obj=CustomObjArgument(custom_obj_name, class_fqn) + ) + elif isinstance(arg, (torch._ops.OpOverload, torch._ops.HigherOrderOperator)): + return Argument.create(as_operator=self.serialize_operator(arg)) + else: + raise SerializeError(f"Unsupported argument type: {type(arg)} with schema arg_type {arg_type}") + + def serialize_tensor_output(self, name, meta_val) -> TensorArgument: + assert name not in self.graph_state.tensor_values + self.graph_state.tensor_values[name] = serialize_tensor_meta(meta_val) + return TensorArgument(name=name) + + def serialize_sym_int_output(self, name, meta_val) -> SymIntArgument: + assert name not in self.graph_state.sym_int_values + self.graph_state.sym_int_values[name] = serialize_sym_int(meta_val) + return SymIntArgument.create(as_name=name) + + def serialize_sym_bool_output(self, name, meta_val) -> SymIntArgument: + assert name not in self.graph_state.sym_bool_values + self.graph_state.sym_bool_values[name] = serialize_sym_bool(meta_val) + return SymBoolArgument.create(as_name=name) + + def serialize_input_spec(self, spec: ep.InputSpec) -> InputSpec: + if spec.kind == ep.InputKind.USER_INPUT: + if isinstance(spec.arg, ep.ConstantArgument): + if isinstance(spec.arg.value, int): + constant_spec = ConstantValue.create(as_int=spec.arg.value) + elif isinstance(spec.arg.value, bool): + constant_spec = ConstantValue.create(as_bool=spec.arg.value) + elif isinstance(spec.arg.value, str): + constant_spec = ConstantValue.create(as_string=spec.arg.value) + elif isinstance(spec.arg.value, float): + constant_spec = ConstantValue.create(as_float=spec.arg.value) + elif spec.arg.value is None: + constant_spec = ConstantValue.create(as_none=()) + else: + raise SerializeError(f"Unhandled constant input {spec.arg.value} to serialize") + return InputSpec.create( + constant_input=ConstantInputSpec( + name=spec.arg.name, value=constant_spec + ) + ) + else: + return InputSpec.create( + user_input=UserInputSpec( + arg=self.serialize_argument_spec(spec.arg) + ) + ) + elif spec.kind == ep.InputKind.PARAMETER: + assert spec.target is not None + assert isinstance(spec.arg, ep.TensorArgument) + return InputSpec.create( + parameter=InputToParameterSpec( + arg=TensorArgument(name=spec.arg.name), + parameter_name=spec.target, + ) + ) + elif spec.kind == ep.InputKind.BUFFER: + assert spec.target is not None + assert isinstance(spec.arg, ep.TensorArgument) + assert spec.persistent is not None + return InputSpec.create( + buffer=InputToBufferSpec( + arg=TensorArgument(name=spec.arg.name), + buffer_name=spec.target, + persistent=spec.persistent, + ) + ) + elif spec.kind == ep.InputKind.CONSTANT_TENSOR: + assert spec.target is not None + assert isinstance(spec.arg, ep.TensorArgument) + return InputSpec.create( + tensor_constant=InputToTensorConstantSpec( + arg=TensorArgument(name=spec.arg.name), + tensor_constant_name=spec.target, + ) + ) + elif spec.kind == ep.InputKind.CUSTOM_OBJ: + assert spec.target is not None + assert isinstance(spec.arg, ep.CustomObjArgument) + return InputSpec.create( + custom_obj=InputToCustomObjSpec( + arg=CustomObjArgument( + name=spec.arg.name, class_fqn=spec.arg.class_fqn + ), + custom_obj_name=spec.target, + ) + ) + elif spec.kind == ep.InputKind.TOKEN: + assert isinstance(spec.arg, ep.TokenArgument) + return InputSpec.create( + token=InputTokenSpec( + arg=TokenArgument(name=spec.arg.name), + ) + ) + else: + raise AssertionError(f"Unknown argument kind: {spec}") + + def serialize_output_spec(self, spec: ep.OutputSpec) -> OutputSpec: + if spec.kind == ep.OutputKind.USER_OUTPUT: + return OutputSpec.create( + user_output=UserOutputSpec(arg=self.serialize_argument_spec(spec.arg)) + ) + elif spec.kind == ep.OutputKind.LOSS_OUTPUT: + assert isinstance(spec.arg, ep.TensorArgument) + return OutputSpec.create( + loss_output=LossOutputSpec(arg=TensorArgument(name=spec.arg.name)) + ) + elif spec.kind == ep.OutputKind.BUFFER_MUTATION: + assert spec.target is not None + assert isinstance(spec.arg, ep.TensorArgument) + return OutputSpec.create( + buffer_mutation=BufferMutationSpec( + arg=TensorArgument(name=spec.arg.name), + buffer_name=spec.target, + ) + ) + elif spec.kind == ep.OutputKind.GRADIENT_TO_PARAMETER: + assert spec.target is not None + assert isinstance(spec.arg, ep.TensorArgument) + return OutputSpec.create( + gradient_to_parameter=GradientToParameterSpec( + arg=TensorArgument(name=spec.arg.name), + parameter_name=spec.target, + ) + ) + elif spec.kind == ep.OutputKind.GRADIENT_TO_USER_INPUT: + assert spec.target is not None + assert isinstance(spec.arg, ep.TensorArgument) + return OutputSpec.create( + gradient_to_user_input=GradientToUserInputSpec( + arg=TensorArgument(name=spec.arg.name), + user_input_name=spec.target, + ) + ) + elif spec.kind == ep.OutputKind.USER_INPUT_MUTATION: + assert spec.target is not None + assert isinstance(spec.arg, ep.TensorArgument) + return OutputSpec.create( + user_input_mutation=UserInputMutationSpec( + arg=TensorArgument(name=spec.arg.name), + user_input_name=spec.target, + ) + ) + elif spec.kind == ep.OutputKind.TOKEN: + assert isinstance(spec.arg, ep.TokenArgument) + return OutputSpec.create( + token=OutputTokenSpec( + arg=TokenArgument(name=spec.arg.name), + ) + ) + else: + raise AssertionError(f"Unknown argument kind: {spec}") + + def serialize_signature(self, sig: ep.ExportGraphSignature) -> GraphSignature: + return GraphSignature( + input_specs=[self.serialize_input_spec(s) for s in sig.input_specs], + output_specs=[self.serialize_output_spec(s) for s in sig.output_specs], + ) + + def serialize_argument_spec(self, x: ep.ArgumentSpec) -> Argument: + if isinstance(x, ep.TensorArgument): + return Argument.create(as_tensor=TensorArgument(name=x.name)) + elif isinstance(x, ep.SymIntArgument): + return Argument.create(as_sym_int=SymIntArgument.create(as_name=x.name)) + elif isinstance(x, ep.ConstantArgument): + return self.serialize_input(x.value) + elif isinstance(x, ep.CustomObjArgument): + return Argument.create( + as_custom_obj=CustomObjArgument(name=x.name, class_fqn=x.class_fqn) + ) + else: + raise AssertionError("TODO") + + def serialize_module_call_signature( + self, module_call_signature: ep.ModuleCallSignature + ) -> ModuleCallSignature: + return ModuleCallSignature( + inputs=[ + self.serialize_argument_spec(x) for x in module_call_signature.inputs + ], + outputs=[ + self.serialize_argument_spec(x) for x in module_call_signature.outputs + ], + in_spec=treespec_dumps(module_call_signature.in_spec, TREESPEC_VERSION), + out_spec=treespec_dumps(module_call_signature.out_spec, TREESPEC_VERSION), + ) + + def serialize_module_call_graph( + self, module_call_graph: List[ep.ModuleCallEntry] + ) -> List[ModuleCallEntry]: + return [ + ModuleCallEntry( + fqn=entry.fqn, + signature=( + self.serialize_module_call_signature(entry.signature) + if entry.signature + else None + ), + ) + for entry in module_call_graph + ] + + def serialize_outputs(self, node: torch.fx.Node) -> List[Argument]: + """For a given node, return the dataclass representing its output values. + + [NOTE: Multiple outputs] We handle aggregates differently than FX. For + FX, it looks like: + + x = call_function("multiple_return", ...) + element0 = call_function(getitem, x, 0) + foo = call_function("use_output", element0) + + We do not want the intermediate `getitem` call, so our serialized thing looks like: + + element0, element1, element2 = call_function("multiple_return", ...) + foo = call_function("use_output", element0) + + We want names to be consistent across these two schemes, so that we can + mostly reuse the names coming from FX. This function computes a mapping from + the FX representation to our representation, preserving the names. + """ + assert node.op == "call_function" and isinstance(node.target, (torch._ops.OpOverload, *_registered_extension_types())) + + schema = _get_schema_from_target(node.target) + returns = schema.returns + + if len(returns) == 0: + return [] + + meta_val = node.meta["val"] + + # Check single value return + if _is_single_tensor_list_return(node.target): + # e.g "-> Tensor[]" + tensor_args = [] + for idx, meta in enumerate(meta_val): + name = self._output_node_name_at_index(node, idx) + tensor_args.append(self.serialize_tensor_output(name, meta)) + return [Argument.create(as_tensors=tensor_args)] + elif len(returns) == 1: + return [self.serialize_output(node.name, meta_val)] + + # There are a two possibilities at this point: + # - This operator returns a tuple of Tensors, e.g. "-> (Tensor, Tensor)" + # - This operator returns a tuple of mixed of Tensor and Tensors, e.g. "-> (Tensor, Tensor[])" + # + # Either way, start by gathering a list of TensorArguments with the correct names. + # For consistent naming with FX, consult the downstream `getitem` node and + # make sure our outputs have the same name. + + output_arguments = [] + for idx, (meta, return_schema) in enumerate(zip(meta_val, returns)): + if meta is None: + assert isinstance( + return_schema.real_type, (torch.OptionalType, torch.TensorType) + ) + # When the return type is annoated as Tensor type, the op can also return an + # undefined Tensor which will be implicitly converted to None in Python. + output_arguments.append(Argument.create(as_none=())) + elif isinstance(meta, FakeTensor): + assert isinstance(return_schema.real_type, (torch.OptionalType, torch.TensorType)) + name = self._output_node_name_at_index(node, idx) + output_arguments.append(self.serialize_output(name, meta)) + elif isinstance(meta, list): + # for List[Tensor] return type + assert isinstance( + return_schema.real_type, torch.ListType + ) and isinstance( + return_schema.real_type.getElementType(), torch.TensorType + ) + user_node = self._output_node_at_index(node, idx) + assert user_node is not None + + args = [] + for i, m in enumerate(meta): + if m is None: + continue + sub_user_node_name = self._output_node_name_at_index(user_node, i) + args.append(self.serialize_tensor_output(sub_user_node_name, m)) + output_arguments.append(Argument.create(as_tensors=args)) + elif isinstance(meta, (int, SymInt)): + user_node_name = self._output_node_name_at_index(node, idx) + output_arguments.append(self.serialize_output(user_node_name, meta)) + else: + raise ValueError( + f"Unhandled output type {type(meta)} from node {node.format_node()}" + ) + + return output_arguments + + def serialize_hoo_outputs(self, node: torch.fx.Node) -> List[Argument]: + """ + For serializing HOO outputs since HOOs do not have a schema. + """ + meta_val = node.meta["val"] + + if isinstance(meta_val, tuple): + # Note: Since we don't have a schema, we just serialize all tuple + # outputs to be a list of values. Even if the output is supposed to + # be a tensor list (Tensor[]), we will serialize it to be a list of + # tensors (Tensor, Tensor, Tensor). An exception is that if there's + # a singleton tensor, we will serialize this to be a singleton + # tensor list so that the deserializer knows to insert getitem nodes. + + if len(meta_val) == 1: + assert isinstance(meta_val[0], torch.Tensor) + name = self._output_node_name_at_index(node, 0) + return [Argument.create(as_tensors=[self.serialize_tensor_output(name, meta_val[0])])] + + outputs = [] + for i, element_meta_val in enumerate(meta_val): + user_node = self._output_node_at_index(node, i) + if isinstance(element_meta_val, list): + # e.g "-> Tensor[]" + assert user_node is not None + + tensors = [] + for j, m in enumerate(element_meta_val): + if not isinstance(m, torch.Tensor): + raise SerializeError(f"Serialize list output with type {type(m)} nyi") + + name = self._output_node_name_at_index(user_node, j) + tensors.append(self.serialize_tensor_output(name, m)) + outputs.append(Argument.create(as_tensors=tensors)) + + else: + name = ( + user_node.name + if user_node is not None + else f"{node.name}_unused_{i}" + ) + + outputs.append(self.serialize_output(name, element_meta_val)) + + return outputs + else: + return [self.serialize_output(node.name, meta_val)] + + def serialize_output(self, name: str, meta_val: Any) -> Argument: + # Check single value return + if meta_val is None: + return Argument.create(as_none=()) + if isinstance(meta_val, torch.Tensor): + # e.g "-> Tensor" + return Argument.create( + as_tensor=self.serialize_tensor_output(name, meta_val) + ) + elif isinstance(meta_val, (int, torch.SymInt)): + # e.g "-> SymInt" + return Argument.create( + as_sym_int=self.serialize_sym_int_output(name, meta_val) + ) + elif isinstance(meta_val, torch.SymBool): + # e.g "-> SymBool" + return Argument.create( + as_sym_bool=self.serialize_sym_bool_output(name, meta_val) + ) + + # list outputs should've been handled earlier + raise SerializeError(f"Unable to serialize output {meta_val}") + + def _handle_getitem_users(self, node: torch.fx.Node) -> List[TensorArgument]: + meta_val = node.meta["val"] + + idx_to_name = {} + for user in node.users: + assert ( + user.target is operator.getitem + ), f"User node {user} of {node} is incorrect" + idx_to_name[user.args[1]] = user.name + + for idx, _ in enumerate(meta_val): + # FX does not emit a getitem node for any outputs that are unused. + # However, we need a name for them so that the number of outputs will + # correctly match the schema. Just assign a dummy name. + if idx not in idx_to_name: + idx_to_name[idx] = f"{node.name}_unused_{idx}" + + arg_list = [] + for i, element_meta_val in enumerate(meta_val): + arg_list.append( + self.serialize_tensor_output(idx_to_name[i], element_meta_val) + ) + + return arg_list + + def serialize_graph(self, graph_module: torch.fx.GraphModule) -> Graph: + assert isinstance(graph_module, torch.fx.GraphModule) + for node in graph_module.graph.nodes: + try: + getattr(self, f"handle_{node.op}")(node) + except Exception as e: + raise SerializeError( + f"Failed serializing node {node} in graph: {node.format_node()}\n Original exception {traceback.format_exc()}" + ) from e + + return Graph( + inputs=self.graph_state.inputs, + nodes=self.graph_state.nodes, + tensor_values=self.graph_state.tensor_values, + sym_int_values=self.graph_state.sym_int_values, + sym_bool_values=self.graph_state.sym_bool_values, + custom_obj_values=self.graph_state.custom_obj_values, + outputs=self.graph_state.outputs, + is_single_tensor_return=self.graph_state.is_single_tensor_return, + ) + + def serialize_graph_module_metadata(self, meta: Dict[str, Any]): + ret = {} + if custom := meta.get("custom"): + try: + ret["custom"] = json.dumps(custom) + except Exception as e: + raise SerializeError( + f"Failed to serialize custom metadata for graph with error {e}" + ) from e + + return ret + + def serialize(self, graph_module: torch.fx.GraphModule) -> GraphModule: + graph = self.serialize_graph(graph_module) + + return GraphModule( + graph=graph, + signature=self.serialize_signature(self.graph_signature), + module_call_graph=self.serialize_module_call_graph(self.module_call_graph), + metadata=self.serialize_graph_module_metadata(graph_module.meta) + ) + + +@final +class ExportedProgramSerializer(metaclass=Final): + def __init__(self, opset_version: Optional[Dict[str, int]] = None): + self.opset_version: Dict[str, int] = {} + if opset_version: + self.opset_version.update(opset_version) + if "aten" not in self.opset_version: + self.opset_version["aten"] = torch._C._get_max_operator_version() + + def serialize(self, exported_program: ep.ExportedProgram) -> _SerializedProgram: + """ + Args: + exported_program: Exported Program to serialize + """ + exported_program.validate() + + gm_serializer = GraphModuleSerializer( + exported_program.graph_signature, exported_program.module_call_graph + ) + serialized_graph_module = gm_serializer.serialize(exported_program.graph_module) + serialized_range_constraints = serialize_range_constraints( + exported_program.range_constraints + ) + + # TODO: Directly serialize exported_program.constants once + # CustomClassHolders get stored in the ExportedProgram rather than in + # the graph + constants = {} + for n, c in gm_serializer.custom_objs.items(): + constants[n] = c + for n, t in exported_program.constants.items(): + assert n not in constants + constants[n] = t + + serialized_ep = ExportedProgram( + graph_module=serialized_graph_module, + opset_version=self.opset_version, + range_constraints=serialized_range_constraints, + schema_version=SchemaVersion( + major=SCHEMA_VERSION[0], + minor=SCHEMA_VERSION[1], + ), + verifiers=[v.dialect for v in exported_program.verifiers], + torch_version=torch.__version__, + ) + + # Test canonical form is well defined. + canonicalize(serialized_ep) + + # Proxy cannot be dumped, so we remove them. + new_state_dict = remove_proxy_from_state_dict( + exported_program.state_dict, in_place=False + ) + return _SerializedProgram( + serialized_ep, + serialize_torch_artifact(new_state_dict), + serialize_torch_artifact(constants), + serialize_torch_artifact(exported_program.example_inputs), + ) + + +@final +class GraphModuleDeserializer(metaclass=Final): + @dataclasses.dataclass + class Result: + graph_module: torch.fx.GraphModule + signature: ep.ExportGraphSignature + module_call_graph: List[ep.ModuleCallEntry] + names_to_symbols: Dict[str, sympy.Symbol] + state_dict: Dict[str, Union[torch.Tensor, torch.nn.Parameter]] + constants: Dict[str, Union[torch.Tensor, FakeScriptObject, torch.ScriptObject]] + example_inputs: Optional[Tuple[Tuple[torch.Tensor, ...], Dict[str, Any]]] + + def __init__(self) -> None: + self.serialized_name_to_node: Dict[str, torch.fx.Node] = {} + self.serialized_name_to_meta: Dict[str, MetaType] = {} + self.graph = torch.fx.Graph() + self.module = torch.nn.Module() + + @contextmanager + def save_graph_module(self) -> Iterator[None]: + saved = ( + self.graph, + self.module, + self.serialized_name_to_node, + self.serialized_name_to_meta, + ) + self.graph = torch.fx.Graph() + self.module = torch.nn.Module() + self.serialized_name_to_node = {} + self.serialized_name_to_meta = {} + try: + yield + finally: + ( + self.graph, + self.module, + self.serialized_name_to_node, + self.serialized_name_to_meta, + ) = saved + + def deserialize_extension_operator(self, serialized_target: str): + namespace, op_name = serialized_target.split(":") + namespace = namespace[1:] # starting with # + handler = _deserialization_registry[namespace] + return handler.from_op_name(op_name) + + def deserialize_operator(self, serialized_target: str): + if serialized_target.startswith( + "_operator" + ): # TODO(zhxchen17) Follow up on this. + module = operator + serialized_target_names = serialized_target.split(".")[1:] + elif serialized_target.startswith("torch"): + module = torch # type: ignore[misc] + serialized_target_names = serialized_target.split(".")[1:] + elif serialized_target.startswith("#"): + return self.deserialize_extension_operator(serialized_target) + else: # TODO(zhxchen17) Don't catch all here. + return serialized_target + + target = module + for name in serialized_target_names: + if not hasattr(target, name): + return serialized_target + else: + target = getattr(target, name) + return target + + def deserialize_sym_int(self, s: SymInt) -> Union[int, torch.SymInt]: + val = s.value + if s.type == "as_expr": + if val.hint is None: + hint = None + else: + assert val.hint.type == "as_int" + hint = val.hint.value + + if val.expr_str in self.symbol_name_to_symbol: + sym = self.symbol_name_to_symbol[val.expr_str] + else: + sym = sympy.sympify( + val.expr_str, + locals={**self.sympy_functions, **self.symbol_name_to_symbol}, + ) + # NOTE(avik): Assumptions on symbols are not explicitly serialized. + # This seems dangerous: it might cause unknown differences in shape env behavior + # on deserialization? Probably deserves a follow-up. + + # Here we force symbols corresponding to SymInts to be at least integers. + # Otherwise some expressions that the shape env would otherwise evaluate to False, + # e.g., 2*s = 9, can have rational solutions, e.g., 9/2. + # TODO: This is HIGHLY SUSPICIOUS ezyang(May 2024) + sym = sym.subs( + {s: sympy.Symbol(s.name, integer=True) for s in sym.free_symbols} + ) + # We need to check if the symbol has already been allocated, + # self.symbol_name_to_symbol is not enough because the + # integer-ification of symbols can induce simplification; + # e.g., (2**s0 + 1) // 2 --> s0 when we know s0 is integral + if isinstance(sym, sympy.Symbol) and sym not in self.shape_env.var_to_val: + self.symbol_name_to_symbol[val.expr_str] = sym + if hint is not None: + self.shape_env.add_var_to_val(sym, hint) + + if vr := self.symbol_name_to_range.get(val.expr_str): + self.shape_env.constrain_symbol_range( + sym, + compiler_min=vr.lower, # type: ignore[arg-type] + compiler_max=vr.upper, # type: ignore[arg-type] + ) + else: + # Placeholders, in particular, can have shapes as symbolic expressions. + # We need to populate the shape env with the range constraints of their + # free symbols, otherwise evaluating such expressions will error. + self.symbol_name_to_symbol[val.expr_str] = sym + free_symbols = sym.free_symbols + for s in free_symbols: + if s.name not in self.symbol_name_to_symbol: + self.symbol_name_to_symbol[s.name] = s # type: ignore[assignment] + if vr := self.symbol_name_to_range.get(s.name): + self.shape_env.constrain_symbol_range( + s, + compiler_min=vr.lower, # type: ignore[arg-type] + compiler_max=vr.upper, # type: ignore[arg-type] + ) + + return self.shape_env.create_symintnode(sym, hint=hint) + elif s.type == "as_int": + assert isinstance(val, int) + return val + else: + raise SerializeError( + f"SymInt has invalid field type {s.type} with value {s.value}" + ) + + def deserialize_sym_bool(self, s: SymBool) -> Union[bool, torch.SymBool]: + val = s.value + if s.type == "as_expr": + # first we sympify this just to access any untracked symbols + expr = sympy.sympify(val.expr_str) + for sym in expr.free_symbols: + if ( + not isinstance(sym, sympy.Number) + and str(sym) not in self.symbol_name_to_symbol + ): + self.deserialize_sym_int(SymInt.create(as_expr=SymExpr(str(sym)))) + # then we sympify again using locals to correctly reify with the constructed symbols + expr = sympy.sympify(val.expr_str, locals=self.symbol_name_to_symbol) + return self.shape_env.create_symboolnode(expr) + elif s.type == "as_bool": + assert isinstance(val, bool) + return val + else: + raise SerializeError( + f"SymBool has invalid field type {s.type} with value {s.value}" + ) + + def deserialize_tensor_meta( + self, + tensor_meta: TensorMeta, + ) -> FakeTensor: + with self.fake_tensor_mode: + return cast( + FakeTensor, + torch.empty_strided( + tuple(self.deserialize_sym_int(val) for val in tensor_meta.sizes), # type: ignore[misc] + tuple(self.deserialize_sym_int(val) for val in tensor_meta.strides), # type: ignore[misc] + device=deserialize_device(tensor_meta.device), + dtype=_SERIALIZE_TO_TORCH_DTYPE[tensor_meta.dtype], + ), + ) + + def deserialize_script_obj_meta( + self, script_obj_meta: CustomObjArgument + ) -> ep.CustomObjArgument: + return ep.CustomObjArgument( + name=script_obj_meta.name, + class_fqn=script_obj_meta.class_fqn, + ) + + def deserialize_graph_output(self, output) -> Optional[Union[torch.fx.Node, int]]: + if output.type == "as_tensor": + return self.serialized_name_to_node[output.as_tensor.name] + elif output.type == "as_sym_int": + return self.serialized_name_to_node[output.as_sym_int.as_name] + elif output.type == "as_sym_bool": + return self.serialized_name_to_node[output.as_sym_bool.as_name] + elif output.type == "as_int": + return output.as_int + elif output.type == "as_none": + return None + else: + raise SerializeError(f"Unable to deserialize output node {output}") + + def deserialize_graph(self, serialized_graph: Graph) -> torch.fx.Graph: + # Handle the tensor metas. + for name, tensor_value in serialized_graph.tensor_values.items(): + meta_val = self.deserialize_tensor_meta(tensor_value) + self.serialized_name_to_meta[name] = meta_val + + for name, sym_int_value in serialized_graph.sym_int_values.items(): + self.serialized_name_to_meta[name] = self.deserialize_sym_int(sym_int_value) + + for name, sym_bool_value in serialized_graph.sym_bool_values.items(): + self.serialized_name_to_meta[name] = self.deserialize_sym_bool( + sym_bool_value + ) + + for name, script_obj_meta in serialized_graph.custom_obj_values.items(): + self.serialized_name_to_meta[name] = self.deserialize_script_obj_meta( + script_obj_meta + ) + + # Inputs: convert to placeholder nodes in FX. + for i, input_ in enumerate(serialized_graph.inputs): + if input_.type in ("as_tensor", "as_sym_int", "as_custom_obj"): + node_name = input_.value.name + placeholder_node = self.graph.placeholder(node_name) + # FX might declare a name illegal (e.g. some nn.Modules use "input" as forward() arguments) + # we will overwrite it + placeholder_node.name = node_name + self.sync_fx_node(node_name, placeholder_node) + elif input_.type in ( + "as_int", + "as_float", + "as_bool", + "as_none", + "as_string", + ): + node_name = self.signature.input_specs[i].arg.name + placeholder_node = self.graph.placeholder(node_name) + placeholder_node.meta["val"] = self.deserialize_input(input_) + else: + raise SerializeError(f"Invalid input type {input_}") + + # Nodes: convert to call_function nodes. + for serialized_node in serialized_graph.nodes: + try: + target = self.deserialize_operator(serialized_node.target) + self.deserialize_node(serialized_node, target) + + except Exception as e: + raise SerializeError( + f"Failed deserializing node {serialized_node}\n Original exception {traceback.format_exc()}" + ) from e + + # Outputs: convert to a single `output` node. + outputs = [] + for output in serialized_graph.outputs: + outputs.append(self.deserialize_graph_output(output)) + + if serialized_graph.is_single_tensor_return: + assert len(outputs) == 1 + outputs = outputs[0] # type: ignore[assignment] + else: + outputs = tuple(outputs) # type: ignore[assignment] + + output_node = self.graph.output(outputs) + + if serialized_graph.is_single_tensor_return: + output_node.meta["val"] = output_node.args[0].meta["val"] + else: + output_node.meta["val"] = tuple( + arg.meta["val"] if isinstance(arg, torch.fx.Node) else arg + for arg in output_node.args[0] + ) + + return self.graph + + def deserialize_node(self, serialized_node: Node, target: Callable) -> None: + if ( + target in _SYM_BOOL_OPS + or target in _SYM_INT_OPS + or target == torch.ops.aten.item.default # this can produce either SymInt or SymBool + ): + name = serialized_node.outputs[0].value.as_name + args = self.deserialize_sym_op_inputs(serialized_node.inputs) + + fx_node = self.graph.create_node("call_function", target, args, {}, name) + self.deserialize_sym_op_outputs(serialized_node, fx_node) + + elif isinstance(target, torch._ops.HigherOrderOperator): + args, kwargs = self.deserialize_hoo_inputs(serialized_node.inputs) + # If HOP returns a single tensor, name the + # newly-created node after it. This ensures that these tensor values + # have names that are consistent with serialized. + # + # HOPs don't have schema yet, just check the output lengths and as_tensor attribute + name = ( + serialized_node.outputs[0].as_tensor.name + if len(serialized_node.outputs) == 1 + and hasattr(serialized_node.outputs[0], "as_tensor") + else None + ) + fx_node = self.graph.create_node( + "call_function", target, args, kwargs, name + ) + self.deserialize_outputs(serialized_node, fx_node) + fx_node.meta.update(self.deserialize_metadata(serialized_node.metadata)) + + elif isinstance(target, (torch._ops.OpOverload, *_registered_extension_types())): + # For convenience: if this node returns a single tensor, name the + # newly-created node after it. This ensures that these tensor values + # have names that are consistent with serialized. + name = ( + serialized_node.outputs[0].as_tensor.name + if _is_single_tensor_return(target) + else None # FX will generate a name for us. + ) + args, kwargs = self.deserialize_inputs(target, serialized_node) + fx_node = self.graph.create_node( + "call_function", target, args, kwargs, name + ) + self.deserialize_outputs(serialized_node, fx_node) + else: + raise SerializeError( + f"Unsupported target type for node {serialized_node}: {type(target)}" + ) + + fx_node.meta.update(self.deserialize_metadata(serialized_node.metadata)) + if fx_node.op not in ["placeholder", "output"] and "nn_module_stack" not in fx_node.meta: + fx_node.meta["nn_module_stack"] = {} # serialization throws away empty dicts + + def deserialize_input_spec(self, i: InputSpec) -> ep.InputSpec: + if i.type == "user_input": + return ep.InputSpec( + kind=ep.InputKind.USER_INPUT, + arg=self.deserialize_argument_spec(i.user_input.arg), + target=None, + ) + elif i.type == "parameter": + return ep.InputSpec( + kind=ep.InputKind.PARAMETER, + arg=ep.TensorArgument(name=i.parameter.arg.name), + target=i.parameter.parameter_name, + ) + elif i.type == "buffer": + return ep.InputSpec( + kind=ep.InputKind.BUFFER, + arg=ep.TensorArgument(name=i.buffer.arg.name), + target=i.buffer.buffer_name, + persistent=i.buffer.persistent, + ) + elif i.type == "tensor_constant": + return ep.InputSpec( + kind=ep.InputKind.CONSTANT_TENSOR, + arg=ep.TensorArgument(name=i.tensor_constant.arg.name), + target=i.tensor_constant.tensor_constant_name, + ) + elif i.type == "custom_obj": + return ep.InputSpec( + kind=ep.InputKind.CUSTOM_OBJ, + arg=ep.CustomObjArgument( + name=i.custom_obj.arg.name, class_fqn=i.custom_obj.arg.class_fqn + ), + target=i.custom_obj.custom_obj_name, + ) + elif i.type == "token": + return ep.InputSpec( + kind=ep.InputKind.TOKEN, + arg=ep.TokenArgument(name=i.token.arg.name), + target=None + ) + elif i.type == "constant_input": + return ep.InputSpec( + kind=ep.InputKind.USER_INPUT, + arg=ep.ConstantArgument( + name=i.constant_input.name, + value=self.deserialize_constant_input(i.constant_input.value) + ), + target=None, + ) + else: + raise AssertionError(f"Unknown input spec {i}") + + def deserialize_output_spec(self, o: OutputSpec) -> ep.OutputSpec: + if o.type == "user_output": + return ep.OutputSpec( + kind=ep.OutputKind.USER_OUTPUT, + arg=self.deserialize_argument_spec(o.user_output.arg), + target=None, + ) + elif o.type == "loss_output": + return ep.OutputSpec( + kind=ep.OutputKind.LOSS_OUTPUT, + arg=ep.TensorArgument(name=o.loss_output.arg.name), + target=None, + ) + elif o.type == "buffer_mutation": + return ep.OutputSpec( + kind=ep.OutputKind.BUFFER_MUTATION, + arg=ep.TensorArgument(name=o.buffer_mutation.arg.name), + target=o.buffer_mutation.buffer_name, + ) + elif o.type == "gradient_to_parameter": + return ep.OutputSpec( + kind=ep.OutputKind.GRADIENT_TO_PARAMETER, + arg=ep.TensorArgument(name=o.gradient_to_parameter.arg.name), + target=o.gradient_to_parameter.parameter_name, + ) + elif o.type == "gradient_to_user_input": + return ep.OutputSpec( + kind=ep.OutputKind.GRADIENT_TO_USER_INPUT, + arg=ep.TensorArgument(name=o.gradient_to_user_input.arg.name), + target=o.gradient_to_user_input.user_input_name, + ) + elif o.type == "user_input_mutation": + return ep.OutputSpec( + kind=ep.OutputKind.USER_INPUT_MUTATION, + arg=ep.TensorArgument(name=o.user_input_mutation.arg.name), + target=o.user_input_mutation.user_input_name, + ) + elif o.type == "token": + return ep.OutputSpec( + kind=ep.OutputKind.TOKEN, + arg=ep.TokenArgument(name=o.token.arg.name), + target=None + ) + else: + raise AssertionError(f"Unknown output spec {o}") + + def deserialize_signature(self, sig: GraphSignature) -> ep.ExportGraphSignature: + return ep.ExportGraphSignature( + input_specs=[self.deserialize_input_spec(i) for i in sig.input_specs], + output_specs=[self.deserialize_output_spec(o) for o in sig.output_specs], + ) + + def deserialize( + self, + serialized_graph_module: GraphModule, + serialized_state_dict: Union[Dict[str, torch.Tensor], bytes], + constants: Union[Dict[str, Any], bytes], + example_inputs: Optional[Union[Tuple[Tuple[torch.Tensor, ...], Dict[str, Any]], bytes]] = None, + symbol_name_to_range: Optional[Dict[str, symbolic_shapes.ValueRanges]] = None, + ) -> Result: + global _CURRENT_DESERIALIZER + assert _CURRENT_DESERIALIZER is None + _CURRENT_DESERIALIZER = self + try: + self.shape_env = symbolic_shapes.ShapeEnv(assume_static_by_default=True) + self.fake_tensor_mode = FakeTensorMode( + allow_fallback_kernels=False, + allow_non_fake_inputs=True, + shape_env=self.shape_env, + ) + self.sympy_functions = { + # all torch.utils._sympy.functions should go here + # TODO(avik): find a better way to keep this collection in sync; + # e.g.., `exec('from torch.utils._sympy.functions import *', ...)` + # would work as long as the public API of that module is complete + "FloorDiv": torch.utils._sympy.functions.FloorDiv, + "ModularIndexing": torch.utils._sympy.functions.ModularIndexing, + "Where": torch.utils._sympy.functions.Where, + "PythonMod": torch.utils._sympy.functions.PythonMod, + "Mod": torch.utils._sympy.functions.Mod, + "CleanDiv": torch.utils._sympy.functions.CleanDiv, + "CeilToInt": torch.utils._sympy.functions.CeilToInt, + "FloorToInt": torch.utils._sympy.functions.FloorToInt, + "CeilDiv": torch.utils._sympy.functions.CeilDiv, + "LShift": torch.utils._sympy.functions.LShift, + "RShift": torch.utils._sympy.functions.RShift, + "PowByNatural": torch.utils._sympy.functions.PowByNatural, + "FloatPow": torch.utils._sympy.functions.FloatPow, + "FloatTrueDiv": torch.utils._sympy.functions.FloatTrueDiv, + "IntTrueDiv": torch.utils._sympy.functions.IntTrueDiv, + "IsNonOverlappingAndDenseIndicator": torch.utils._sympy.functions.IsNonOverlappingAndDenseIndicator, + "TruncToFloat": torch.utils._sympy.functions.TruncToFloat, + "TruncToInt": torch.utils._sympy.functions.TruncToInt, + "RoundToInt": torch.utils._sympy.functions.RoundToInt, + "RoundDecimal": torch.utils._sympy.functions.RoundDecimal, + "ToFloat": torch.utils._sympy.functions.ToFloat, + "Identity": torch.utils._sympy.functions.Identity, + } + self.symbol_name_to_symbol: Dict[str, sympy.Symbol] = {} + self.constants = deserialize_torch_artifact(constants) + self.signature = self.deserialize_signature(serialized_graph_module.signature) + + # deserialization does analysis with checks on 0/1, so we create fake range constraints and + # restore the original range constraints afterwards + self.symbol_name_to_range = {} + if symbol_name_to_range: + for k, vr in symbol_name_to_range.items(): + lower = vr.lower + if vr.upper >= 2: # max is >= 2, not sym bool range + lower = max(2, lower) + self.symbol_name_to_range[k] = symbolic_shapes.ValueRanges(_int_to_sympy_int(lower), vr.upper) + + if example_inputs is not None and len(example_inputs) > 0: + self.example_inputs = deserialize_torch_artifact(example_inputs) + else: + self.example_inputs = None + self.deserialize_graph(serialized_graph_module.graph) + + module_call_graph = self.deserialize_module_call_graph( + serialized_graph_module.module_call_graph + ) + graph_module = ep._create_graph_module_for_export( + self.module, self.graph + ) + meta = {} + if custom := serialized_graph_module.metadata.get("custom"): + meta["custom"] = json.loads(custom) + graph_module.meta = meta + return GraphModuleDeserializer.Result( + graph_module=graph_module, + signature=self.signature, + module_call_graph=module_call_graph, + names_to_symbols=self.symbol_name_to_symbol, + state_dict=deserialize_torch_artifact(serialized_state_dict), + constants=self.constants, + example_inputs=self.example_inputs, + ) + finally: + _CURRENT_DESERIALIZER = None + + def sync_fx_node(self, name: str, fx_node: torch.fx.Node): + if name in self.serialized_name_to_node: + raise SerializeError(f"Node {name} has already been deserialized before.") + # overwrite name + fx_node.name = name + self.serialized_name_to_node[name] = fx_node + assert "val" not in fx_node.meta + fx_node.meta["val"] = self.serialized_name_to_meta[name] + + def deserialize_sym_op_inputs(self, inputs): + return tuple(self.deserialize_input(input.arg) for input in inputs) + + def deserialize_inputs(self, target, serialized_node: Node): + schema_args = _get_schema_from_target(target).arguments + actual_args = { + input.name: self.deserialize_input(input.arg) + for input in serialized_node.inputs + } + args = [] + kwargs = {} + for schema_arg in schema_args: + is_positional = ( + not schema_arg.has_default_value() and not schema_arg.kwarg_only + ) + if is_positional: + args.append(actual_args[schema_arg.name]) + else: + if schema_arg.name in actual_args: + kwargs[schema_arg.name] = actual_args[schema_arg.name] + return tuple(args), kwargs + + def deserialize_hoo_inputs(self, inputs: List[NamedArgument]): + """ + For deserializing HOO inputs since HOOs do not have a schema. + """ + args = [] + kwargs = {} + for input_ in inputs: + if input_.name != "": + kwargs[input_.name] = self.deserialize_input(input_.arg) + else: + args.append(self.deserialize_input(input_.arg)) + return (tuple(args), kwargs) + + def deserialize_input(self, inp: Argument) -> Any: + value = inp.value + typ_ = inp.type + if typ_ == "as_none": + # None should converted as None, but is encoded as bool in serialized + # Convert serialized object to torch equivalent + return None + elif typ_ == "as_tensor": + return self.serialized_name_to_node[inp.as_tensor.name] + elif typ_ == "as_scalar_type": + return _SERIALIZE_TO_TORCH_DTYPE[inp.as_scalar_type] + elif typ_ == "as_memory_format": + return _SERIALIZE_TO_TORCH_MEMORY_FORMAT[inp.as_memory_format] + elif typ_ == "as_layout": + return _SERIALIZE_TO_TORCH_LAYOUT[inp.as_layout] + elif typ_ == "as_graph": + assert isinstance(value, GraphArgument) + with self.save_graph_module(): + self.deserialize_graph(value.graph) + submodule = ep._create_graph_module_for_export(self.module, self.graph) + self.module.register_module(value.name, submodule) + return self.graph.create_node( + "get_attr", + value.name, + name=value.name, + ) + elif typ_ == "as_device": + return deserialize_device(inp.as_device) + elif typ_ == "as_int": + return inp.as_int + elif typ_ == "as_float": + return inp.as_float + elif typ_ == "as_bool": + return inp.as_bool + elif typ_ == "as_string": + return inp.as_string + elif typ_ == "as_sym_int": + return self.deserialize_sym_argument(inp.as_sym_int) + elif typ_ == "as_sym_bool": + return self.deserialize_sym_argument(inp.as_sym_bool) + elif isinstance(value, list): + if len(value) == 0: + return [] + elif typ_ == "as_tensors": + result = [] + for arg in value: + result.append(self.serialized_name_to_node[arg.name]) + return result + elif typ_ in ("as_ints", "as_floats", "as_bools", "as_strings"): + # convert from serialized.python.types.List to python list + return list(value) + elif typ_ in ("as_sym_ints", "as_sym_bools"): + return [self.deserialize_sym_argument(arg) for arg in value] + elif typ_ == "as_optional_tensors": + + def deserialize_optional_tensor_args(a): + if a.type == "as_none": + return None + elif a.type == "as_tensor": + return self.serialized_name_to_node[a.value.name] + else: + raise SerializeError(f"Unhandled argument {inp}") + + return list(map(deserialize_optional_tensor_args, value)) + else: + raise SerializeError(f"Unhandled argument {inp}") + elif typ_ == "as_custom_obj": + if inp.as_custom_obj.name in self.serialized_name_to_node: + # Custom object has been lifted as an input + return self.serialized_name_to_node[inp.as_custom_obj.name] + return self.constants[inp.as_custom_obj.name] + elif typ_ == "as_operator": + return self.deserialize_operator(inp.as_operator) + else: + raise SerializeError(f"Unhandled argument {inp}") + + def deserialize_constant_input(self, inp: ConstantValue) -> Any: + if inp.type == "as_int": + return int(inp.as_int) + elif inp.type == "as_float": + return float(inp.as_float) + elif inp.type == "as_string": + return str(inp.as_string) + elif inp.type == "as_bool": + return bool(inp.as_bool) + elif inp.type == "as_none": + return None + else: + raise SerializeError(f"Unhandled constant argument {inp} to deserialize") + + def deserialize_sym_argument(self, sym_arg): + if isinstance(sym_arg, SymIntArgument): + if sym_arg.type == "as_int": + return sym_arg.as_int + elif sym_arg.type == "as_name": + return self.serialized_name_to_node[sym_arg.as_name] + elif isinstance(sym_arg, SymBoolArgument): + if sym_arg.type == "as_bool": + return sym_arg.as_bool + elif sym_arg.type == "as_name": + return self.serialized_name_to_node[sym_arg.as_name] + raise SerializeError(f"Unknown symbolic argument type: {sym_arg}") + + def deserialize_sym_op_outputs(self, serialized_node: Node, fx_node: torch.fx.Node): + self.sync_fx_node(serialized_node.outputs[0].value.as_name, fx_node) + + def deserialize_outputs(self, serialized_node: Node, fx_node: torch.fx.Node): + # Check single value return + if len(serialized_node.outputs) == 0: + return + if ( + len(serialized_node.outputs) == 1 + and serialized_node.outputs[0].type == "as_tensor" + ): + self.sync_fx_node(serialized_node.outputs[0].as_tensor.name, fx_node) + return + elif len(serialized_node.outputs) == 1 and isinstance( + serialized_node.outputs[0].value, (SymIntArgument, SymBoolArgument) + ): + self.sync_fx_node(serialized_node.outputs[0].value.as_name, fx_node) + return + + self.deserialize_multiple_outputs(serialized_node, fx_node) + + def deserialize_multiple_outputs( + self, serialized_node: Node, fx_node: torch.fx.Node + ) -> None: + deserialized_metadata = self.deserialize_metadata(serialized_node.metadata) + + def generate_getitem( + meta_val, + fx_node: torch.fx.Node, + arg: Union[TensorArgument, SymIntArgument], + idx: int, + ): + if isinstance(arg, TensorArgument): + name = arg.name + elif isinstance(arg, SymIntArgument): + name = arg.as_name + else: + raise AssertionError( + f"generate_getitem got unknown argument type {type(arg)}" + ) + individual_output = self.graph.create_node( + "call_function", + operator.getitem, + (fx_node, idx), + name=name, + ) + self.sync_fx_node(name, individual_output) + meta_val.append(self.serialized_name_to_meta[name]) + # The derived `getitem` nodes should have the same stacktrace as the + # original `fx_node` + individual_output.meta.update(deserialized_metadata) + + def generate_getitems(meta_val, fx_node: torch.fx.Node, args): + for idx, arg in enumerate(args): + if isinstance(arg, Argument): + arg = arg.value + if isinstance(arg, (TensorArgument, SymIntArgument)): + generate_getitem(meta_val, fx_node, arg, idx) + elif isinstance(arg, (list, tuple)): + list_output = self.graph.create_node( + "call_function", + operator.getitem, + (fx_node, idx), + ) + meta_val.append([]) + generate_getitems(meta_val[-1], list_output, arg) + list_output.meta.update(deserialized_metadata) + list_output.meta["val"] = meta_val[-1] + else: + raise NotImplementedError(f"Unimplemented node output type: {arg}") + + # Convert multiple return types to FX format. + # In FX, each node only returns one value. So in order to represent + # multiple return values, we have to emit a `getitem` node for each + # return value. + # This performs the inverse mapping of the `serialize_outputs` call in + # serialization, see [NOTE: Multiple outputs] + meta_val: List[Any] = [] + if len(serialized_node.outputs) == 1: + assert isinstance(serialized_node.outputs[0].value, list) + assert isinstance(serialized_node.outputs[0].value[0], TensorArgument) + generate_getitems(meta_val, fx_node, serialized_node.outputs[0].as_tensors) + else: + generate_getitems(meta_val, fx_node, serialized_node.outputs) + + # also update the metaval for `fx_node` to be a list(meta) + fx_node.meta["val"] = tuple(meta_val) + self.serialized_name_to_node[fx_node.name] = fx_node + + def deserialize_metadata(self, metadata: Dict[str, str]) -> Dict[str, Any]: + ret: Dict[str, Any] = {} + if stack_trace := metadata.get("stack_trace"): + ret["stack_trace"] = stack_trace + + def deserialize_meta_func(serialized_target: str): + module = None + if serialized_target.startswith("torch.nn"): + module = torch.nn + serialized_target_names = serialized_target.split(".")[2:] + elif serialized_target.startswith("torch"): + module = torch + serialized_target_names = serialized_target.split(".")[1:] + else: + return self.deserialize_operator(serialized_target) + + target = module + for name in serialized_target_names: + if not hasattr(target, name): + return serialized_target + else: + target = getattr(target, name) + return target + + if nn_module_stack_str := metadata.get("nn_module_stack"): + # Originally serialized to "key,orig_path,type_str" + def import_nn_module_stack(key, path, ty): + return key, (path, ty) + + # Helper function that splits strings by commas except for those + # encapsulated by parens, which are valid traces. + # TODO: Currently this is needed due to indexing Sequential + # layers introducing names in the form "layer.slice(1, None, None)". + # If that naming is improved, this fancier splitting can probably be + # reverted to a simple split by comma. + def metadata_split(metadata): + # Remove the parentheses and commas inside them + metadata = re.sub(r'\(.*?\)', '', metadata) + # Split the string by comma, except for those inside parentheses + return re.split(r'(? ep.ArgumentSpec: + if x.type == "as_tensor": + return ep.TensorArgument(name=x.as_tensor.name) + elif x.type == "as_sym_int": + return ep.SymIntArgument(name=x.as_sym_int.as_name) + elif x.type == "as_custom_obj": + return ep.ConstantArgument(name=x.as_custom_obj.name, value=self.deserialize_input(x)) + else: + return ep.ConstantArgument(name="", value=self.deserialize_input(x)) + + def deserialize_module_call_signature( + self, module_call_signature: ModuleCallSignature + ) -> ep.ModuleCallSignature: + return ep.ModuleCallSignature( + inputs=[ + self.deserialize_argument_spec(x) for x in module_call_signature.inputs + ], + outputs=[ + self.deserialize_argument_spec(x) for x in module_call_signature.outputs + ], + in_spec=treespec_loads(module_call_signature.in_spec), + out_spec=treespec_loads(module_call_signature.out_spec), + ) + + def deserialize_module_call_graph( + self, module_call_graph: List[ModuleCallEntry] + ) -> List[ep.ModuleCallEntry]: + return [ + ep.ModuleCallEntry( + fqn=entry.fqn, + signature=( + self.deserialize_module_call_signature(entry.signature) + if entry.signature + else None + ), + ) + for entry in module_call_graph + ] + + +@final +class ExportedProgramDeserializer(metaclass=Final): + def __init__(self, expected_opset_version: Optional[Dict[str, int]] = None): + self.expected_opset_version: Dict[str, int] = {} + if expected_opset_version: + self.expected_opset_version.update(expected_opset_version) + if "aten" not in self.expected_opset_version: + self.expected_opset_version["aten"] = torch._C._get_max_operator_version() + + def deserialize_range_constraints( + self, + symbol_name_to_range: Dict[str, symbolic_shapes.ValueRanges], + symbol_name_to_symbol: Dict[str, sympy.Symbol], + ) -> Dict[sympy.Symbol, ValueRanges]: + range_constraints = {} + for k, v in symbol_name_to_range.items(): + if symbol := symbol_name_to_symbol.get(k): + range_constraints[symbol] = v # type: ignore[arg-type] + else: + log.warning(f"Symbol {k} did not appear in the graph that was deserialized") # noqa: G004 + return range_constraints + + def deserialize( + self, + exported_program: ExportedProgram, + state_dict: Union[Dict[str, torch.Tensor], bytes], + constants: Union[Dict[str, torch.Tensor], bytes], + example_inputs: Optional[Union[Tuple[Tuple[torch.Tensor, ...], Dict[str, Any]], bytes]] = None, + ) -> ep.ExportedProgram: + assert isinstance(exported_program, ExportedProgram) + version = exported_program.schema_version + + # TODO(zhxchen17) blocked on thrift schema refactor + if version.major != SCHEMA_VERSION[0] and not (version.major == 0 and version.minor == 0): + raise SerializeError( + f"Serialized schema version {exported_program.schema_version} " + f"does not match our current schema version {SCHEMA_VERSION}." + ) + + symbol_name_to_range = { + k: symbolic_shapes.ValueRanges( + _int_to_sympy_int(v.min_val), _int_to_sympy_int(v.max_val) + ) + for k, v in exported_program.range_constraints.items() + } + res = ( + GraphModuleDeserializer() + .deserialize( + exported_program.graph_module, + state_dict, + constants, + example_inputs, + symbol_name_to_range, + ) + ) + range_constraints = self.deserialize_range_constraints( + symbol_name_to_range, + res.names_to_symbols, + ) + + return ep.ExportedProgram( + root=res.graph_module, + graph=res.graph_module.graph, + graph_signature=res.signature, + state_dict=res.state_dict, # type: ignore[arg-type] + range_constraints=range_constraints, + module_call_graph=res.module_call_graph, + example_inputs=res.example_inputs, + constants=res.constants, + verifiers=[load_verifier(v) for v in exported_program.verifiers], + ) + + +class EnumEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, Enum): + return obj.value + if isinstance(obj, bytes): + return base64.b64encode(obj).decode("utf-8") + return super().default(obj) + + +def _dataclass_to_dict(obj): + if isinstance(obj, _Union): + return {obj.type: _dataclass_to_dict(obj.value)} + elif dataclasses.is_dataclass(obj): + return { + f.name: _dataclass_to_dict(getattr(obj, f.name)) + for f in dataclasses.fields(obj) + if not (f.default is None and getattr(obj, f.name) is None) + } + elif isinstance(obj, list): + return [_dataclass_to_dict(x) for x in obj] + elif isinstance(obj, tuple): + return tuple(_dataclass_to_dict(x) for x in obj) + elif isinstance(obj, dict): + return {k: _dataclass_to_dict(v) for k, v in obj.items()} + else: + return obj + + +def serialize( + exported_program: ep.ExportedProgram, + opset_version: Optional[Dict[str, int]] = None, +) -> SerializedArtifact: + serialized_program = ExportedProgramSerializer(opset_version).serialize( + exported_program + ) + assert isinstance(serialized_program.exported_program, ExportedProgram) + + json_program = json.dumps( + _dataclass_to_dict(serialized_program.exported_program), cls=EnumEncoder + ) + json_bytes = json_program.encode("utf-8") + artifact = SerializedArtifact( + json_bytes, + serialized_program.state_dict, + serialized_program.constants, + serialized_program.example_inputs + ) + return artifact + + +def _dict_to_dataclass(cls, data): + assert not isinstance(cls, str), f"Unresolved class type: '{cls}'." + if typing.get_origin(cls) == typing.Union and type(None) in typing.get_args(cls): + if data is None: + return None + ty_args = typing.get_args(cls) + assert len(ty_args) == 2 + return _dict_to_dataclass(ty_args[0], data) + elif isinstance(cls, type) and issubclass(cls, _Union): + assert isinstance(data, dict) + assert len(data) == 1 + _type = next(iter(data.keys())) + _value = next(iter(data.values())) + assert isinstance(_type, str) + field_type = cls.__annotations__[_type] + return cls.create(**{_type: _dict_to_dataclass(field_type, _value)}) + elif dataclasses.is_dataclass(cls): + obj = cls(**data) # type: ignore[assignment] + type_hints = typing.get_type_hints(cls) + for f in dataclasses.fields(cls): + name = f.name + new_field_obj = _dict_to_dataclass(type_hints[name], getattr(obj, name)) + setattr(obj, name, new_field_obj) + return obj + elif isinstance(data, list): + if len(data) == 0: + return data + d_type = typing.get_args(cls)[0] + return [_dict_to_dataclass(d_type, d) for d in data] + elif isinstance(data, dict): + v_type = typing.get_args(cls)[1] + return {k: _dict_to_dataclass(v_type, v) for k, v in data.items()} + return data + + +def deserialize( + artifact: SerializedArtifact, + expected_opset_version: Optional[Dict[str, int]] = None, +) -> ep.ExportedProgram: + assert isinstance(artifact.exported_program, bytes) + exported_program_str = artifact.exported_program.decode("utf-8") + exported_program_dict = json.loads(exported_program_str) + serialized_exported_program = _dict_to_dataclass(ExportedProgram, exported_program_dict) + return ( + ExportedProgramDeserializer(expected_opset_version) + .deserialize( + serialized_exported_program, + artifact.state_dict, + artifact.constants, + artifact.example_inputs, + ) + ) + + +def _canonicalize_graph( + sorted_inputs, sorted_outputs, graph +) -> Tuple[Graph, Dict[str, str]]: + def _get_argument(a: Argument): + if a.type == "as_none": + return None + elif a.type == "as_tensor": + return a.as_tensor + elif a.type == "as_tensors": + return a.as_tensors + elif a.type == "as_int": + return None + elif a.type == "as_ints": + return None + elif a.type == "as_float": + return None + elif a.type == "as_floats": + return None + elif a.type == "as_string": + return None + elif a.type == "as_strings": + return None + elif a.type == "as_sym_int": + return a.as_sym_int + elif a.type == "as_sym_ints": + return a.as_sym_ints + elif a.type == "as_scalar_type": + return None + elif a.type == "as_memory_format": + return None + elif a.type == "as_layout": + return None + elif a.type == "as_device": + return None + elif a.type == "as_bool": + return None + elif a.type == "as_bools": + return None + elif a.type == "as_sym_bool": + return a.as_sym_bool + elif a.type == "as_sym_bools": + return a.as_sym_bools + elif a.type == "as_graph": + return None + elif a.type == "as_optional_tensors": + return a.as_optional_tensors + elif a.type == "as_custom_obj": + return None + elif a.type == "as_operator": + return None + else: + raise AssertionError(f"Unknown input type to the ExportedProgram: {a}") + + # Stage 1: Reorder named items. + def for_args(f, a): + assert isinstance(a, Argument) + pytree.tree_map(f, _get_argument(a)) + + def sort_nodes(nodes): + @dataclass + class Edges: + outs: List[int] + ins: int + + graph_inputs: Set[str] = set() + def_table: Dict[str, int] = {} + edges: Dict[int, Edges] = {} + candidates: List[Tuple[str, List[Tuple[str, List[int]]], int]] = [] + rank: Dict[str, int] = {} + ret: List[Node] = [] + + def get_name(a) -> Optional[str]: + if a is None: + return None + if isinstance(a, TensorArgument): + return a.name + elif isinstance(a, (SymIntArgument, SymBoolArgument)): + if a.type == "as_name": + return a.as_name + elif a.type in ("as_int", "as_bool"): + return None + else: + raise AssertionError(f"Unknown argument type: {a}") + elif isinstance(a, OptionalTensorArgument): + if a.type == "as_tensor": + return a.as_tensor.name + elif a.type == "as_none": + return None + else: + raise AssertionError(f"Unknown optional tensor type: {a}") + else: + raise AssertionError(f"Unknown argument type: {a}") + + for i in sorted_inputs: + + def add_input(a): + if s := get_name(a): + graph_inputs.add(s) + + for_args(add_input, i) + + for idx, node in enumerate(nodes): + + def add_def(a): + if s := get_name(a): + assert s not in def_table + def_table[s] = idx + + for o in node.outputs: + for_args(add_def, o) + + edges[idx] = Edges([], 0) + + for idx, user in enumerate(nodes): + + def add_edge(a): + if s := get_name(a): + if s not in def_table: + assert s in graph_inputs + return + src = def_table[s] + edges[src].outs.append(idx) + edges[idx].ins += 1 + + for i in user.inputs: + for_args(add_edge, i.arg) + + def add_rank(a): + if s := get_name(a): + assert s not in rank + rank[s] = len(rank) + + def get_rank(a): + if s := get_name(a): + return rank[s] + else: + return -1 + + for i in sorted_inputs: + for_args(add_rank, i) + + def add_candidate(idx: int): + def get_ranks(i): + ranks = [] + for_args(lambda x: ranks.append(get_rank(x)), i) + return ranks + + node = nodes[idx] + args_rank = [(a.name, get_ranks(a.arg)) for a in node.inputs] + heapq.heappush(candidates, (node.target, args_rank, idx)) + + for idx, e in edges.items(): + if e.ins == 0: + add_candidate(idx) + + while len(candidates) > 0: + _, _, idx = heapq.heappop(candidates) + node = nodes[idx] + for o in node.outputs: + for_args(add_rank, o) + ret.append(node) + assert idx in edges + for user in edges[idx].outs: + e = edges[user] + assert e.ins > 0 + e.ins -= 1 + if e.ins == 0: + add_candidate(user) + edges[idx].outs.clear() + + return ret + + sorted_nodes = sort_nodes(graph.nodes) + assert len(sorted_nodes) == len(graph.nodes) + + # Stage 2: Rename nodes. + name_table: Dict[str, str] = {} + + def rename_def(a): + def _rename(arg_name, values): + new_name = f"_{len(name_table)}" + assert arg_name not in name_table + name_table[arg_name] = new_name + assert arg_name in values + values[new_name] = values.pop(arg_name) + return new_name + + if a is None: + return + if isinstance(a, TensorArgument): + a.name = _rename(a.name, graph.tensor_values) + elif isinstance(a, SymIntArgument): + if a.type == "as_name": + a.as_name = _rename(a.as_name, graph.sym_int_values) + elif isinstance(a, SymBoolArgument): + if a.type == "as_name": + a.as_name = _rename(a.as_name, graph.sym_bool_values) + else: + raise AssertionError(f"Unknown argument type: {a}") + + def replace_use(a): + if a is None: + return + if isinstance(a, TensorArgument): + a.name = name_table.get(a.name, a.name) + elif isinstance(a, SymIntArgument): + if a.type == "as_name": + a.as_name = name_table.get(a.as_name, a.as_name) + elif isinstance(a, SymBoolArgument): + if a.type == "as_name": + a.as_name = name_table.get(a.as_name, a.as_name) + elif isinstance(a, OptionalTensorArgument): + if a.type == "as_tensor": + a.as_tensor.name = name_table.get(a.as_tensor.name, a.as_tensor.name) + else: + raise AssertionError(f"Unknown argument type: {a}") + + for i in sorted_inputs: + for_args(rename_def, i) + + for n in sorted_nodes: + for o in n.outputs: + for_args(rename_def, o) + + for n in sorted_nodes: + for i in n.inputs: + for_args(replace_use, i.arg) + + for o in sorted_outputs: + for_args(replace_use, o) + + # Stage 3: Remove unstable fields. + for n in sorted_nodes: + n.metadata.clear() + + # Stage 4: Aggregate values. + sorted_tensor_values = dict(sorted(graph.tensor_values.items(), key=operator.itemgetter(0))) + sorted_sym_int_values = dict( + sorted(graph.sym_int_values.items(), key=operator.itemgetter(0)) + ) + sorted_sym_bool_values = dict( + sorted(graph.sym_bool_values.items(), key=operator.itemgetter(0)) + ) + + # Stage 5: Recurse in subgraphs. + counter = 0 + for node in sorted_nodes: + for i in node.inputs: + a = i.arg + if a.type == "as_graph": + a.as_graph.graph, _ = _canonicalize_graph( + a.as_graph.graph.inputs, a.as_graph.graph.outputs, a.as_graph.graph + ) + a.as_graph.name = f"_g{counter}" + counter += 1 + + graph = Graph( + inputs=sorted_inputs, + outputs=sorted_outputs, + nodes=sorted_nodes, + tensor_values=sorted_tensor_values, + sym_int_values=sorted_sym_int_values, + sym_bool_values=sorted_sym_bool_values, + is_single_tensor_return=graph.is_single_tensor_return, + ) + return graph, name_table + + +def canonicalize(ep: ExportedProgram) -> ExportedProgram: + """ + Normalize a serialized ExportedProgram, so that different eager program which + shares the same semantics can get a single representation on disk. + + This function canonicalizes an ExportedProgram by: + + 1. Sorting nodes in topological order. + 2. Rename nodes to have unique names. + 3. Remove unstable fields. + 4. Aggregate the above program fields. + 5. Recurse in subgraphs. + + Args: + ep (ExportedProgram): The ExportedProgram to canonicalize. + + Returns: + ExportedProgram: The canonicalized exported program. + """ + ep = copy.deepcopy(ep) + + opset_version = dict(sorted(ep.opset_version.items(), key=operator.itemgetter(0))) + range_constraints = dict(sorted(ep.range_constraints.items(), key=operator.itemgetter(0))) + module_call_graph = sorted(ep.graph_module.module_call_graph, key=lambda x: x.fqn) + signature = ep.graph_module.signature + graph = ep.graph_module.graph + + assert len(graph.inputs) == len(signature.input_specs) + assert len(graph.outputs) == len(signature.output_specs) + + def rank_input(inp) -> Tuple[int, Optional[str], int]: + idx, (arg, spec) = inp + assert isinstance(spec, InputSpec) + if spec.type == "user_input": + return 5, None, idx + elif spec.type == "parameter": + return 1, spec.parameter.parameter_name, idx + elif spec.type == "buffer": + return 2, spec.buffer.buffer_name, idx + elif spec.type == "tensor_constant": + return 3, spec.tensor_constant.tensor_constant_name, idx + elif spec.type == "custom_obj": + return 4, spec.custom_obj.custom_obj_name, idx + elif spec.type == "token": + return 0, None, idx + elif spec.type == "constant_input": + return 6, spec.constant_input.name, idx + else: + raise AssertionError(f"Unknown input type: {spec}") + + def rank_output(out) -> Tuple[int, Optional[str], int]: + idx, (arg, spec) = out + assert isinstance(spec, OutputSpec) + if spec.type == "user_output": + return 3, None, idx + elif spec.type == "loss_output": + return 3, None, idx + elif spec.type == "buffer_mutation": + return 1, spec.buffer_mutation.buffer_name, idx + elif spec.type == "gradient_to_parameter": + return 4, spec.gradient_to_parameter.parameter_name, idx + elif spec.type == "gradient_to_user_input": + return 5, None, idx + elif spec.type == "user_input_mutation": + return 2, None, idx + elif spec.type == "token": + return 0, None, idx + else: + raise AssertionError(f"Unknown output type: {spec}") + + sorted_ins = sorted( + enumerate(zip(graph.inputs, signature.input_specs)), key=rank_input + ) + + if len(sorted_ins) > 0: + sorted_inputs, input_specs = zip(*(i for idx, i in sorted_ins)) # type: ignore[assignment] + else: + sorted_inputs = () + input_specs = () + + sorted_outs = sorted( + enumerate(zip(graph.outputs, signature.output_specs)), key=rank_output + ) + sorted_outputs, output_specs = zip(*(i for idx, i in sorted_outs)) # type: ignore[assignment] + + sorted_graph, replace_table = _canonicalize_graph( + sorted_inputs, sorted_outputs, graph + ) + + def replace_input(inp): + assert isinstance(spec, InputSpec) + if spec.type == "user_input": + arg = spec.user_input.arg + if arg.type == "as_tensor": + t = arg.as_tensor + t.name = replace_table[t.name] + elif arg.type == "as_sym_int": + s = arg.as_sym_int + if s.type == "as_name": + s.as_name = replace_table[s.as_name] + elif s.type == "as_int": + pass + else: + raise AssertionError(f"Unknown sym_int type: {s}") + elif arg.type in ( + "as_none", + "as_bool", + "as_int", + "as_float", + "as_string", + "as_custom_obj", + ): + return + else: + raise AssertionError(f"Unknown input type: {arg}") + elif spec.type == "parameter": + t = spec.parameter.arg + t.name = replace_table[t.name] + elif spec.type == "buffer": + t = spec.buffer.arg + t.name = replace_table[t.name] + elif spec.type == "tensor_constant": + t = spec.tensor_constant.arg + t.name = replace_table[t.name] + elif spec.type == "custom_obj": + return + elif spec.type == "token": + tok = spec.token.arg + tok.name = replace_table[tok.name] + elif spec.type == "constant_input": + return + else: + raise AssertionError(f"Unknown input type: {spec}") + + def replace_output(out): + assert isinstance(spec, OutputSpec) + if spec.type == "user_output": + arg = spec.user_output.arg + if arg.type == "as_tensor": + t = arg.as_tensor + t.name = replace_table[t.name] + elif arg.type == "as_sym_int": + s = arg.as_sym_int + if s.type == "as_name": + s.as_name = replace_table[s.as_name] + elif s.type == "as_int": + pass + else: + raise AssertionError(f"Unknown sym_int type: {s}") + elif arg.type in ("as_none", "as_int", "as_float", "as_string"): + return + else: + raise AssertionError(f"Unknown input type: {arg}") + elif spec.type == "loss_output": + t = spec.loss_output.arg + t.name = replace_table[t.name] + elif spec.type == "buffer_mutation": + t = spec.buffer_mutation.arg + t.name = replace_table[t.name] + elif spec.type == "gradient_to_parameter": + t = spec.gradient_to_parameter.arg + t.name = replace_table[t.name] + elif spec.type == "gradient_to_user_input": + g = spec.gradient_to_user_input + g.arg.name = replace_table[g.arg.name] + g.user_input_name = replace_table[g.user_input_name] + elif spec.type == "user_input_mutation": + u = spec.user_input_mutation + u.arg.name = replace_table[u.arg.name] + u.user_input_name = replace_table[u.user_input_name] + elif spec.type == "token": + tok = spec.token.arg + tok.name = replace_table[tok.name] + else: + raise AssertionError(f"Unknown output type: {spec}") + + for spec in input_specs: + replace_input(spec) + + for spec in output_specs: + replace_output(spec) + + return ExportedProgram( + graph_module=GraphModule( + graph=sorted_graph, + signature=GraphSignature( + input_specs=list(input_specs), + output_specs=list(output_specs), + ), + module_call_graph=module_call_graph, + ), + opset_version=opset_version, + range_constraints=range_constraints, + schema_version=ep.schema_version, + verifiers=ep.verifiers, + torch_version=ep.torch_version, + ) + + +class ExtensionHandler: + """ + Base class for handling extension operators. + """ + @classmethod + def namespace(cls) -> str: + raise NotImplementedError(f"{cls.__class__} namespace() must be implemented") + + @classmethod + def to_op_name(cls, op) -> str: + raise NotImplementedError(f"{cls.__class__} op_name() must be implemented") + + @classmethod + def from_op_name(cls, name: str): + raise NotImplementedError(f"{cls.__class__} op_name() must be implemented") + + @classmethod + def op_schema(cls, op) -> torch.FunctionSchema: + raise NotImplementedError(f"{cls.__class__} op_schema() must be implemented") + + +def register_extension( + op_type: Type[Any], + extension_handler: Type[ExtensionHandler], +): + """Register custom de/serialization method for a node with non-standard type.""" + assert issubclass(extension_handler, ExtensionHandler), f"Expected ExtensionHandler, got {extension_handler}." + assert op_type not in _serialization_registry, f"{op_type} is already registered." + assert isinstance(op_type, type) # Maybe a good idea to enforce this first. + assert not (op_type.__module__.startswith("torch") or op_type.__module__.startswith("builtins")) + assert extension_handler.namespace() not in _deserialization_registry + _serialization_registry[op_type] = extension_handler + _deserialization_registry[extension_handler.namespace()] = extension_handler + + +def _registered_extension_types(): + return tuple( + _serialization_registry.keys() + ) + + +# Registry to store all custom serialization implementations. +# The registry maps a operation to its serialization function (a callable), in their own +# namespace to avoid conflicts. +# Serialization: Op type --> custom handler. +# De-serialization: Namespace --> custom handler. +_serialization_registry: Dict[Type[Any], Type[ExtensionHandler]] = {} +_deserialization_registry: Dict[str, Type[ExtensionHandler]] = {} diff --git a/janus/lib/python3.10/site-packages/torch/_export/serde/union.py b/janus/lib/python3.10/site-packages/torch/_export/serde/union.py new file mode 100644 index 0000000000000000000000000000000000000000..b129e8dd9a89ef4870d7ef3dc724aca8ccae3a43 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/_export/serde/union.py @@ -0,0 +1,70 @@ +# mypy: allow-untyped-defs +import functools +from dataclasses import fields +from typing import Hashable, Set + + +class _UnionTag(str): + _cls: Hashable + + @staticmethod + def create(t, cls): + tag = _UnionTag(t) + assert not hasattr(tag, "_cls") + tag._cls = cls + return tag + + def __eq__(self, cmp) -> bool: + assert isinstance(cmp, str) + other = str(cmp) + assert other in _get_field_names( + self._cls + ), f"{other} is not a valid tag for {self._cls}. Available tags: {_get_field_names(self._cls)}" + return str(self) == other + + def __hash__(self): + return hash(str(self)) + + +@functools.lru_cache(maxsize=None) +def _get_field_names(cls) -> Set[str]: + return {f.name for f in fields(cls)} + + +class _Union: + _type: _UnionTag + + @classmethod + def create(cls, **kwargs): + assert len(kwargs) == 1 + obj = cls(**{**{f.name: None for f in fields(cls)}, **kwargs}) # type: ignore[arg-type] + obj._type = _UnionTag.create(next(iter(kwargs.keys())), cls) + return obj + + def __post_init__(self): + assert not any(f.name in ("type", "_type", "create", "value") for f in fields(self)) # type: ignore[arg-type, misc] + + @property + def type(self) -> str: + try: + return self._type + except AttributeError as e: + raise RuntimeError( + f"Please use {type(self).__name__}.create to instantiate the union type." + ) from e + + @property + def value(self): + return getattr(self, self.type) + + def __getattribute__(self, name): + attr = super().__getattribute__(name) + if attr is None and name in _get_field_names(type(self)) and name != self.type: # type: ignore[arg-type] + raise AttributeError(f"Field {name} is not set.") + return attr + + def __str__(self): + return self.__repr__() + + def __repr__(self): + return f"{type(self).__name__}({self.type}={getattr(self, self.type)})" diff --git a/janus/lib/python3.10/site-packages/torch/jit/__init__.py b/janus/lib/python3.10/site-packages/torch/jit/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ec776b9047500742154e8ab4e8e32b47e6cdfaa7 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/jit/__init__.py @@ -0,0 +1,294 @@ +# mypy: allow-untyped-defs +import warnings +from contextlib import contextmanager +from typing import Any, Iterator + +import torch._C + +# These are imported so users can access them from the `torch.jit` module +from torch._jit_internal import ( + _Await, + _drop, + _IgnoreContextManager, + _isinstance, + _overload, + _overload_method, + export, + Final, + Future, + ignore, + is_scripting, + unused, +) +from torch.jit._async import fork, wait +from torch.jit._await import _awaitable, _awaitable_nowait, _awaitable_wait +from torch.jit._decomposition_utils import _register_decomposition +from torch.jit._freeze import freeze, optimize_for_inference, run_frozen_optimizations +from torch.jit._fuser import ( + fuser, + last_executed_optimized_graph, + optimized_execution, + set_fusion_strategy, +) +from torch.jit._ir_utils import _InsertPoint +from torch.jit._script import ( + _ScriptProfile, + _unwrap_optional, + Attribute, + CompilationUnit, + interface, + RecursiveScriptClass, + RecursiveScriptModule, + script, + script_method, + ScriptFunction, + ScriptModule, + ScriptWarning, +) +from torch.jit._serialization import ( + jit_module_from_flatbuffer, + load, + save, + save_jit_module_to_flatbuffer, +) +from torch.jit._trace import ( + _flatten, + _get_trace_graph, + _script_if_tracing, + _unique_state_dict, + is_tracing, + ONNXTracedModule, + TopLevelTracedModule, + trace, + trace_module, + TracedModule, + TracerWarning, + TracingCheckError, +) +from torch.utils import set_module + + +__all__ = [ + "Attribute", + "CompilationUnit", + "Error", + "Future", + "ScriptFunction", + "ScriptModule", + "annotate", + "enable_onednn_fusion", + "export", + "export_opnames", + "fork", + "freeze", + "interface", + "ignore", + "isinstance", + "load", + "onednn_fusion_enabled", + "optimize_for_inference", + "save", + "script", + "script_if_tracing", + "set_fusion_strategy", + "strict_fusion", + "trace", + "trace_module", + "unused", + "wait", +] + +# For backwards compatibility +_fork = fork +_wait = wait +_set_fusion_strategy = set_fusion_strategy + + +def export_opnames(m): + r""" + Generate new bytecode for a Script module. + + Returns what the op list would be for a Script Module based off the current code base. + + If you have a LiteScriptModule and want to get the currently present + list of ops call _export_operator_list instead. + """ + return torch._C._export_opnames(m._c) + + +# torch.jit.Error +Error = torch._C.JITException +set_module(Error, "torch.jit") +# This is not perfect but works in common cases +Error.__name__ = "Error" +Error.__qualname__ = "Error" + + +# for use in python if using annotate +def annotate(the_type, the_value): + """Use to give type of `the_value` in TorchScript compiler. + + This method is a pass-through function that returns `the_value`, used to hint TorchScript + compiler the type of `the_value`. It is a no-op when running outside of TorchScript. + + Though TorchScript can infer correct type for most Python expressions, there are some cases where + type inference can be wrong, including: + + - Empty containers like `[]` and `{}`, which TorchScript assumes to be container of `Tensor` + - Optional types like `Optional[T]` but assigned a valid value of type `T`, TorchScript would assume + it is type `T` rather than `Optional[T]` + + Note that `annotate()` does not help in `__init__` method of `torch.nn.Module` subclasses because it + is executed in eager mode. To annotate types of `torch.nn.Module` attributes, + use :meth:`~torch.jit.Attribute` instead. + + Example: + + .. testcode:: + + import torch + from typing import Dict + + @torch.jit.script + def fn(): + # Telling TorchScript that this empty dictionary is a (str -> int) dictionary + # instead of default dictionary type of (str -> Tensor). + d = torch.jit.annotate(Dict[str, int], {}) + + # Without `torch.jit.annotate` above, following statement would fail because of + # type mismatch. + d["name"] = 20 + + .. testcleanup:: + + del fn + + Args: + the_type: Python type that should be passed to TorchScript compiler as type hint for `the_value` + the_value: Value or expression to hint type for. + + Returns: + `the_value` is passed back as return value. + """ + return the_value + + +def script_if_tracing(fn): + """ + Compiles ``fn`` when it is first called during tracing. + + ``torch.jit.script`` has a non-negligible start up time when it is first called due to + lazy-initializations of many compiler builtins. Therefore you should not use + it in library code. However, you may want to have parts of your library work + in tracing even if they use control flow. In these cases, you should use + ``@torch.jit.script_if_tracing`` to substitute for + ``torch.jit.script``. + + Args: + fn: A function to compile. + + Returns: + If called during tracing, a :class:`ScriptFunction` created by `torch.jit.script` is returned. + Otherwise, the original function `fn` is returned. + """ + return _script_if_tracing(fn) + + +# for torch.jit.isinstance +def isinstance(obj, target_type): + """ + Provide container type refinement in TorchScript. + + It can refine parameterized containers of the List, Dict, Tuple, and Optional types. E.g. ``List[str]``, + ``Dict[str, List[torch.Tensor]]``, ``Optional[Tuple[int,str,int]]``. It can also + refine basic types such as bools and ints that are available in TorchScript. + + Args: + obj: object to refine the type of + target_type: type to try to refine obj to + Returns: + ``bool``: True if obj was successfully refined to the type of target_type, + False otherwise with no new type refinement + + + Example (using ``torch.jit.isinstance`` for type refinement): + .. testcode:: + + import torch + from typing import Any, Dict, List + + class MyModule(torch.nn.Module): + def __init__(self) -> None: + super().__init__() + + def forward(self, input: Any): # note the Any type + if torch.jit.isinstance(input, List[torch.Tensor]): + for t in input: + y = t.clamp(0, 0.5) + elif torch.jit.isinstance(input, Dict[str, str]): + for val in input.values(): + print(val) + + m = torch.jit.script(MyModule()) + x = [torch.rand(3,3), torch.rand(4,3)] + m(x) + y = {"key1":"val1","key2":"val2"} + m(y) + """ + return _isinstance(obj, target_type) + + +class strict_fusion: + """ + Give errors if not all nodes have been fused in inference, or symbolically differentiated in training. + + Example: + Forcing fusion of additions. + + .. code-block:: python + + @torch.jit.script + def foo(x): + with torch.jit.strict_fusion(): + return x + x + x + + """ + + def __init__(self) -> None: + if not torch._jit_internal.is_scripting(): + warnings.warn("Only works in script mode") + + def __enter__(self): + pass + + def __exit__(self, type: Any, value: Any, tb: Any) -> None: + pass + + +# Context manager for globally hiding source ranges when printing graphs. +# Note that these functions are exposed to Python as static members of the +# Graph class, so mypy checks need to be skipped. +@contextmanager +def _hide_source_ranges() -> Iterator[None]: + old_enable_source_ranges = torch._C.Graph.global_print_source_ranges # type: ignore[attr-defined] + try: + torch._C.Graph.set_global_print_source_ranges(False) # type: ignore[attr-defined] + yield + finally: + torch._C.Graph.set_global_print_source_ranges(old_enable_source_ranges) # type: ignore[attr-defined] + + +def enable_onednn_fusion(enabled: bool): + """Enable or disables onednn JIT fusion based on the parameter `enabled`.""" + torch._C._jit_set_llga_enabled(enabled) + + +def onednn_fusion_enabled(): + """Return whether onednn JIT fusion is enabled.""" + return torch._C._jit_llga_enabled() + + +del Any + +if not torch._C._jit_init(): + raise RuntimeError("JIT initialization failed") diff --git a/janus/lib/python3.10/site-packages/torch/jit/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/jit/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc65b787713336cc8a796e385b0f060b42b6d2f2 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/jit/__pycache__/__init__.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/jit/__pycache__/_await.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/jit/__pycache__/_await.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a12ba05960b4f794e6e7ba30da5c74f9ab74bc6 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/jit/__pycache__/_await.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/jit/__pycache__/_decomposition_utils.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/jit/__pycache__/_decomposition_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7631f4e0a510817dc735ef3c85e64cfcf93c6e2 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/jit/__pycache__/_decomposition_utils.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/jit/__pycache__/_decompositions.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/jit/__pycache__/_decompositions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..501f391e7d2b036420b94db04e5a190b99d0699b Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/jit/__pycache__/_decompositions.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/jit/__pycache__/_freeze.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/jit/__pycache__/_freeze.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6ddbc21bd9495f5ab0731933218ebbfeba641c9d Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/jit/__pycache__/_freeze.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/jit/__pycache__/_fuser.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/jit/__pycache__/_fuser.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58bda86a4c234523c2f72867d58437971048a1a8 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/jit/__pycache__/_fuser.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/jit/__pycache__/_ir_utils.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/jit/__pycache__/_ir_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d101f5e3a2dbe5b190547ee53e5e8387911e99f Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/jit/__pycache__/_ir_utils.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/jit/__pycache__/_logging.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/jit/__pycache__/_logging.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cba859ed6c0f26d85772578537b00d06795071e5 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/jit/__pycache__/_logging.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/jit/__pycache__/_pickle.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/jit/__pycache__/_pickle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f6322d960a77bd85cc88c1fa19fa3f1338b81cf Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/jit/__pycache__/_pickle.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/jit/__pycache__/_recursive.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/jit/__pycache__/_recursive.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8fc8938987fab2cadc45ac24ae5f6aec8088a382 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/jit/__pycache__/_recursive.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/jit/__pycache__/_script.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/jit/__pycache__/_script.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1012370c866a6d3ec8418c6bdfb596176fcc0f47 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/jit/__pycache__/_script.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/jit/__pycache__/_serialization.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/jit/__pycache__/_serialization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc33538fbc957f2625147e2bac514dc1ed82fcb4 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/jit/__pycache__/_serialization.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/jit/__pycache__/_trace.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/jit/__pycache__/_trace.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e78d4883e07884a457e5f7bdfa7f4473c27964d8 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/jit/__pycache__/_trace.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/jit/__pycache__/annotations.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/jit/__pycache__/annotations.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e29cb2d87b9329a0fe003036d4b5fb29163734b4 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/jit/__pycache__/annotations.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/jit/__pycache__/generate_bytecode.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/jit/__pycache__/generate_bytecode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..32cdb54c66740c74c2ca380d3ea1d75d15e9ba1a Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/jit/__pycache__/generate_bytecode.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/jit/__pycache__/supported_ops.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/jit/__pycache__/supported_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb06cdc51ed1961a1048c0a37257af3a698f1062 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/jit/__pycache__/supported_ops.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/jit/_async.py b/janus/lib/python3.10/site-packages/torch/jit/_async.py new file mode 100644 index 0000000000000000000000000000000000000000..9ccde01d4a425632fa942df18300110620c0248a --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/jit/_async.py @@ -0,0 +1,102 @@ +# mypy: allow-untyped-defs +"""Async API. + +This module contains the API for parallelism in TorchScript, notably: + * torch.jit.fork + * torch.jit.wait + +This is not intended to be imported directly; please use the exposed +functionalities in `torch.jit`. +""" + +import torch +from torch._jit_internal import Future +from torch.jit._builtins import _register_builtin +from torch.utils import set_module + + +set_module(Future, "torch.jit") + + +def fork(func, *args, **kwargs): + r""" + Create an asynchronous task executing `func` and a reference to the value of the result of this execution. + + `fork` will return immediately, so the return value of `func` may not have been computed yet. To force completion + of the task and access the return value invoke `torch.jit.wait` on the Future. `fork` invoked + with a `func` which returns `T` is typed as `torch.jit.Future[T]`. `fork` calls can be arbitrarily + nested, and may be invoked with positional and keyword arguments. + Asynchronous execution will only occur when run in TorchScript. If run in pure python, + `fork` will not execute in parallel. `fork` will also not execute in parallel when invoked + while tracing, however the `fork` and `wait` calls will be captured in the exported IR Graph. + + .. warning:: + `fork` tasks will execute non-deterministically. We recommend only spawning + parallel fork tasks for pure functions that do not modify their inputs, + module attributes, or global state. + + Args: + func (callable or torch.nn.Module): A Python function or `torch.nn.Module` + that will be invoked. If executed in TorchScript, it will execute asynchronously, + otherwise it will not. Traced invocations of fork will be captured in the IR. + ``*args``, ``**kwargs``: arguments to invoke `func` with. + Returns: + `torch.jit.Future[T]`: a reference to the execution of `func`. The value `T` + can only be accessed by forcing completion of `func` through `torch.jit.wait`. + + Example (fork a free function): + + .. code-block:: python + + import torch + from torch import Tensor + def foo(a : Tensor, b : int) -> Tensor: + return a + b + def bar(a): + fut : torch.jit.Future[Tensor] = torch.jit.fork(foo, a, b=2) + return torch.jit.wait(fut) + script_bar = torch.jit.script(bar) + input = torch.tensor(2) + # only the scripted version executes asynchronously + assert script_bar(input) == bar(input) + # trace is not run asynchronously, but fork is captured in IR + graph = torch.jit.trace(bar, (input,)).graph + assert "fork" in str(graph) + + Example (fork a module method): + + .. code-block:: python + + import torch + from torch import Tensor + class AddMod(torch.nn.Module): + def forward(self, a: Tensor, b : int): + return a + b + class Mod(torch.nn.Module): + def __init__(self) -> None: + super(self).__init__() + self.mod = AddMod() + def forward(self, input): + fut = torch.jit.fork(self.mod, a, b=2) + return torch.jit.wait(fut) + input = torch.tensor(2) + mod = Mod() + assert mod(input) == torch.jit.script(mod).forward(input) + """ + return torch._C.fork(func, *args, **kwargs) + + +def wait(future): + r""" + Force completion of a `torch.jit.Future[T]` asynchronous task, returning the result of the task. + + See :func:`~fork` for docs and examples. + Args: + future (torch.jit.Future[T]): an asynchronous task reference, created through `torch.jit.fork` + Returns: + `T`: the return value of the completed task + """ + return torch._C.wait(future) + + +_register_builtin(wait, "aten::wait") diff --git a/janus/lib/python3.10/site-packages/torch/jit/_await.py b/janus/lib/python3.10/site-packages/torch/jit/_await.py new file mode 100644 index 0000000000000000000000000000000000000000..b00da7b3384b7a263aeab613b3e613c4bd3c3667 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/jit/_await.py @@ -0,0 +1,27 @@ +# mypy: allow-untyped-defs +import torch +from torch._jit_internal import _Await +from torch.jit._builtins import _register_builtin +from torch.utils import set_module + + +set_module(_Await, "torch.jit") + + +def _awaitable(func, *args, **kwargs): + r"""Create Await object that will call specified functioni with specified args, when it is requested for the result.""" + return torch._C._awaitable(func, *args, **kwargs) + + +def _awaitable_wait(aw): + r"""Request await the result of execution, if Await is not completed yet, the func will be called immediately.""" + return torch._C._awaitable_wait(aw) + + +def _awaitable_nowait(o): + r"""Create completed Await with specified result.""" + return torch._C._awaitable_nowait(o) + + +_register_builtin(_awaitable_wait, "prim::awaitable_wait") +_register_builtin(_awaitable_nowait, "prim::awaitable_nowait") diff --git a/janus/lib/python3.10/site-packages/torch/jit/_builtins.py b/janus/lib/python3.10/site-packages/torch/jit/_builtins.py new file mode 100644 index 0000000000000000000000000000000000000000..a3edc688d9e14b33eccc146c91f171f90af8ab26 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/jit/_builtins.py @@ -0,0 +1,193 @@ +# mypy: allow-untyped-defs +import cmath +import math +import warnings +from collections import OrderedDict +from typing import Dict, Optional + +import torch +import torch.backends.cudnn as cudnn +from torch.nn.modules.utils import ( + _list_with_default, + _pair, + _quadruple, + _single, + _triple, +) + + +_builtin_table: Optional[Dict[int, str]] = None + +_modules_containing_builtins = (torch, torch._C._nn, torch._C._fft, torch._C._linalg, torch._C._nested, torch._C._sparse, torch._C._special) # type: ignore[attr-defined] # noqa: B950 + +_builtin_ops = [ + # Pairs of (function, op_name) + (_pair, "aten::_pair"), + (_quadruple, "aten::_quadruple"), + (_single, "aten::_single"), + (_triple, "aten::_triple"), + (_list_with_default, "aten::list_with_default"), + (OrderedDict, "aten::dict"), + (dict, "aten::dict"), + (cudnn.is_acceptable, "aten::cudnn_is_acceptable"), + (math.ceil, "aten::ceil"), + (math.copysign, "aten::copysign"), + (math.erf, "aten::erf"), + (math.erfc, "aten::erfc"), + (math.exp, "aten::exp"), + (math.expm1, "aten::expm1"), + (math.fabs, "aten::fabs"), + (math.floor, "aten::floor"), + (math.gamma, "aten::gamma"), + (math.lgamma, "aten::lgamma"), + (math.log, "aten::log"), + (math.log10, "aten::log10"), + (math.log1p, "aten::log1p"), + (math.pow, "aten::pow"), + (math.sqrt, "aten::sqrt"), + (math.isnan, "aten::isnan"), + (math.asinh, "aten::asinh"), + (math.atanh, "aten::atanh"), + (math.cosh, "aten::cosh"), + (math.sinh, "aten::sinh"), + (math.tanh, "aten::tanh"), + (math.acos, "aten::acos"), + (math.asin, "aten::asin"), + (math.atan, "aten::atan"), + (math.atan2, "aten::atan2"), + (math.cos, "aten::cos"), + (math.sin, "aten::sin"), + (math.tan, "aten::tan"), + (math.asinh, "aten::asinh"), + (math.atanh, "aten::atanh"), + (math.acosh, "aten::acosh"), + (math.fmod, "aten::fmod"), + (math.modf, "aten::modf"), + (math.factorial, "aten::factorial"), + (math.frexp, "aten::frexp"), + (math.isinf, "aten::isinf"), + (math.degrees, "aten::degrees"), + (math.radians, "aten::radians"), + (cmath.isnan, "aten::isnan"), + (cmath.isfinite, "aten::isfinite"), + (cmath.isinf, "aten::isinf"), + (cmath.phase, "aten::angle"), + (cmath.rect, "aten::polar"), + (cmath.log, "aten::log"), + (cmath.log10, "aten::log10"), + (cmath.sqrt, "aten::sqrt"), + (cmath.exp, "aten::exp"), + (cmath.sin, "aten::sin"), + (cmath.tan, "aten::tan"), + (cmath.cos, "aten::cos"), + (cmath.asin, "aten::asin"), + (cmath.acos, "aten::acos"), + (cmath.atan, "aten::atan"), + (cmath.sinh, "aten::sinh"), + (cmath.cosh, "aten::cosh"), + (cmath.tanh, "aten::tanh"), + (cmath.asinh, "aten::asinh"), + (cmath.acosh, "aten::acosh"), + (cmath.atanh, "aten::atanh"), + (math.ldexp, "aten::ldexp"), + (torch._assert, "aten::_assert"), + (torch.autograd.grad, "aten::grad"), + (torch.autograd.backward, "aten::backward"), + (torch._C._infer_size, "aten::_infer_size"), + (torch.nn.functional._no_grad_embedding_renorm_, "aten::_no_grad_embedding_renorm_"), # type: ignore[attr-defined] + (torch.nn.functional.assert_int_or_pair, "aten::_assert_int_or_pair"), + (torch.nn.init._no_grad_fill_, "aten::_no_grad_fill_"), + (torch.nn.init._no_grad_normal_, "aten::_no_grad_normal_"), + (torch.nn.init._no_grad_uniform_, "aten::_no_grad_uniform_"), + (torch.nn.init._no_grad_zero_, "aten::_no_grad_zero_"), + (torch._C._get_tracing_state, "aten::_get_tracing_state"), + (torch._C._get_cpu_capability, "aten::_get_cpu_capability"), + (warnings.warn, "aten::warn"), + (torch._VF.stft, "aten::stft"), # type: ignore[attr-defined] + (torch._VF.istft, "aten::istft"), # type: ignore[attr-defined] + (torch._VF.cdist, "aten::cdist"), # type: ignore[attr-defined] + (torch._VF.norm, "aten::norm"), # type: ignore[attr-defined] + (torch._VF.unique_dim, "aten::unique_dim"), + (torch._VF.unique_consecutive, "aten::unique_consecutive"), # type: ignore[attr-defined] + (torch._VF.nuclear_norm, "aten::nuclear_norm"), + (torch._VF.frobenius_norm, "aten::frobenius_norm"), + (torch._VF.tensordot, "aten::tensordot"), # type: ignore[attr-defined] +] + +# ops in torch.functional are bound to torch +# in these cases, we want to resolve the function to their python implementation +# instead looking up a builtin "aten::" schema + + +def _gen_torch_functional_registered_ops(): + # eventually ops should encompass all of torch/functional.py, (torch.functional.__all__) + # but we are currently only able to compile some of the functions. additionally, + # some functions directly map to their aten:: implementations. + # TODO: add support for more ops + ops = [ + "stft", + "istft", + "lu", + "cdist", + "norm", + "unique", + "unique_consecutive", + "tensordot", + ] + return {getattr(torch.functional, name) for name in ops} + + +_functional_registered_ops = _gen_torch_functional_registered_ops() + + +def _is_special_functional_bound_op(fn): + return fn in _functional_registered_ops + + +# lazily built to ensure the correct initialization order +def _get_builtin_table(): + global _builtin_table + if _builtin_table is not None: + return _builtin_table + _builtin_table = {} + + def register_all(mod): + for name in dir(mod): + v = getattr(mod, name) + if ( + callable(v) + and not _is_special_functional_bound_op(v) + and v is not torch.no_grad + and v is not torch.autocast + ): + # Fixup inconsistency in segment_reduce + if name == "_segment_reduce": + name = name[1:] + _builtin_ops.append((v, "aten::" + name)) + + for mod in _modules_containing_builtins: + register_all(mod) + + _builtin_ops.append((math.gcd, "aten::gcd")) + _builtin_ops.append((math.isfinite, "aten::isfinite")) + _builtin_ops.append((math.remainder, "aten::mathremainder")) # type: ignore[attr-defined] + + import torch.distributed.autograd as dist_autograd + + if dist_autograd.is_available(): + _builtin_ops.append((dist_autograd.get_gradients, "aten::get_gradients")) + _builtin_ops.append((dist_autograd.backward, "aten::dist_backward")) + + # populate the _builtin_table from _builtin_ops + for builtin, aten_op in _builtin_ops: + _builtin_table[id(builtin)] = aten_op + + return _builtin_table + + +def _register_builtin(fn, op): + _get_builtin_table()[id(fn)] = op + + +def _find_builtin(fn): + return _get_builtin_table().get(id(fn)) diff --git a/janus/lib/python3.10/site-packages/torch/jit/_check.py b/janus/lib/python3.10/site-packages/torch/jit/_check.py new file mode 100644 index 0000000000000000000000000000000000000000..f708ee87f3089a199232a353572cdd84258e21c2 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/jit/_check.py @@ -0,0 +1,249 @@ +# mypy: allow-untyped-defs +import ast +import inspect +import textwrap +import warnings + +import torch + + +class AttributeTypeIsSupportedChecker(ast.NodeVisitor): + """Check the ``__init__`` method of a given ``nn.Module``. + + It ensures that all instance-level attributes can be properly initialized. + + Specifically, we do type inference based on attribute values...even + if the attribute in question has already been typed using + Python3-style annotations or ``torch.jit.annotate``. This means that + setting an instance-level attribute to ``[]`` (for ``List``), + ``{}`` for ``Dict``), or ``None`` (for ``Optional``) isn't enough + information for us to properly initialize that attribute. + + An object of this class can walk a given ``nn.Module``'s AST and + determine if it meets our requirements or not. + + Known limitations + 1. We can only check the AST nodes for certain constructs; we can't + ``eval`` arbitrary expressions. This means that function calls, + class instantiations, and complex expressions that resolve to one of + the "empty" values specified above will NOT be flagged as + problematic. + 2. We match on string literals, so if the user decides to use a + non-standard import (e.g. `from typing import List as foo`), we + won't catch it. + + Example: + .. code-block:: python + + class M(torch.nn.Module): + def fn(self): + return [] + + def __init__(self) -> None: + super().__init__() + self.x: List[int] = [] + + def forward(self, x: List[int]): + self.x = x + return 1 + + The above code will pass the ``AttributeTypeIsSupportedChecker`` + check since we have a function call in ``__init__``. However, + it will still fail later with the ``RuntimeError`` "Tried to set + nonexistent attribute: x. Did you forget to initialize it in + __init__()?". + + Args: + nn_module - The instance of ``torch.nn.Module`` whose + ``__init__`` method we wish to check + """ + + def check(self, nn_module: torch.nn.Module) -> None: + source_lines = inspect.getsource(nn_module.__class__.__init__) + + # Ignore comments no matter the indentation + def is_useless_comment(line): + line = line.strip() + return line.startswith("#") and not line.startswith("# type:") + + source_lines = "\n".join( + [l for l in source_lines.split("\n") if not is_useless_comment(l)] + ) + + # This AST only contains the `__init__` method of the nn.Module + init_ast = ast.parse(textwrap.dedent(source_lines)) + + # Get items annotated in the class body + self.class_level_annotations = list(nn_module.__annotations__.keys()) + + # Flag for later + self.visiting_class_level_ann = False + + self.visit(init_ast) + + def _is_empty_container(self, node: ast.AST, ann_type: str) -> bool: + if ann_type == "List": + # Assigning `[]` to a `List` type gives you a Node where + # value=List(elts=[], ctx=Load()) + if not isinstance(node, ast.List): + return False + if node.elts: + return False + elif ann_type == "Dict": + # Assigning `{}` to a `Dict` type gives you a Node where + # value=Dict(keys=[], values=[]) + if not isinstance(node, ast.Dict): + return False + if node.keys: + return False + elif ann_type == "Optional": + # Assigning `None` to an `Optional` type gives you a + # Node where value=Constant(value=None, kind=None) + if not isinstance(node, ast.Constant): + return False + if node.value: # type: ignore[attr-defined] + return False + + return True + + def visit_Assign(self, node): + """Store assignment state when assigning to a Call Node. + + If we're visiting a Call Node (the right-hand side of an + assignment statement), we won't be able to check the variable + that we're assigning to (the left-hand side of an assignment). + Because of this, we need to store this state in visitAssign. + (Luckily, we only have to do this if we're assigning to a Call + Node, i.e. ``torch.jit.annotate``. If we're using normal Python + annotations, we'll be visiting an AnnAssign Node, which has its + target built in.) + """ + try: + if ( + isinstance(node.value, ast.Call) + and node.targets[0].attr in self.class_level_annotations + ): + self.visiting_class_level_ann = True + except AttributeError: + return + self.generic_visit(node) + self.visiting_class_level_ann = False + + def visit_AnnAssign(self, node): + """Visit an AnnAssign node in an ``nn.Module``'s ``__init__`` method. + + It checks if it conforms to our attribute annotation rules.""" + # If we have a local variable + try: + if node.target.value.id != "self": + return + except AttributeError: + return + + # If we have an attribute that's already been annotated at the + # class level + if node.target.attr in self.class_level_annotations: + return + + # TODO @ansley: add `Union` once landed + + # NB: Even though `Tuple` is a "container", we don't want to + # check for it here. `Tuple` functions as an type with an + # "infinite" number of subtypes, in the sense that you can have + # `Tuple[())]`, `Tuple[T1]`, `Tuple[T2]`, `Tuple[T1, T2]`, + # `Tuple[T2, T1]` and so on, and none of these subtypes can be + # used in place of the other. Therefore, assigning an empty + # tuple in `__init__` CORRECTLY means that that variable + # cannot be reassigned later to a non-empty tuple. Same + # deal with `NamedTuple` + + containers = {"List", "list", "Dict", "dict", "Optional"} + + # If we're not evaluating one of the specified problem types + try: + if node.annotation.value.id not in containers: + return + except AttributeError: + # To evaluate a base type (`str`, `int`, etc.), we would + # have needed to get the name through `node.annotation.id` + # instead of `node.annotation.value.id`. Seems that we're + # not evaluating one of our "containers" + return + + # Check if the assigned variable is empty + ann_type = node.annotation.value.id + if not self._is_empty_container(node.value, ann_type): + return + + warnings.warn( + "The TorchScript type system doesn't support " + "instance-level annotations on empty non-base " + "types in `__init__`. Instead, either 1) use a " + "type annotation in the class body, or 2) wrap " + "the type in `torch.jit.Attribute`." + ) + + def visit_Call(self, node): + """Determine if a Call node is 'torch.jit.annotate' in __init__. + + Visit a Call node in an ``nn.Module``'s ``__init__`` + method and determine if it's ``torch.jit.annotate``. If so, + see if it conforms to our attribute annotation rules. + """ + # If we have an attribute that's already been annotated at the + # class level + if self.visiting_class_level_ann: + return + + # If this isn't a call to `torch.jit.annotate` + try: + if ( + node.func.value.value.id != "torch" + or node.func.value.attr != "jit" + or node.func.attr != "annotate" + ): + self.generic_visit(node) + elif ( + node.func.value.value.id != "jit" or node.func.value.attr != "annotate" + ): + self.generic_visit(node) + except AttributeError: + # Looks like we didn't even have the right node structure + # to check for `torch.jit.annotate` in the first place + self.generic_visit(node) + + # Invariant: we have a `torch.jit.annotate` or a + # `torch.annotate` call + + # A Call Node for `torch.jit.annotate` should have an `args` + # list of length 2 where args[0] represents the annotation and + # args[1] represents the actual value + if len(node.args) != 2: + return + + if not isinstance(node.args[0], ast.Subscript): + return + + # See notes in `visit_AnnAssign` r.e. containers + + containers = {"List", "Dict", "Optional"} + + try: + ann_type = node.args[0].value.id # type: ignore[attr-defined] + except AttributeError: + return + + if ann_type not in containers: + return + + # Check if the assigned variable is empty + if not self._is_empty_container(node.args[1], ann_type): + return + + warnings.warn( + "The TorchScript type system doesn't support " + "instance-level annotations on empty non-base " + "types in `__init__`. Instead, either 1) use a " + "type annotation in the class body, or 2) wrap " + "the type in `torch.jit.Attribute`." + ) diff --git a/janus/lib/python3.10/site-packages/torch/jit/_dataclass_impls.py b/janus/lib/python3.10/site-packages/torch/jit/_dataclass_impls.py new file mode 100644 index 0000000000000000000000000000000000000000..2dc1dfba076f254c9fb4e4ca20fb7acff3246a7d --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/jit/_dataclass_impls.py @@ -0,0 +1,190 @@ +# mypy: allow-untyped-defs +# Functions for synthesizing magic methods for JIT-compiled dataclasses +import ast +import dataclasses +import inspect +import os +from functools import partial +from typing import Callable, Dict, List + +from torch._jit_internal import FAKE_FILENAME_PREFIX, is_optional +from torch._sources import ParsedDef, SourceContext + + +def _get_fake_filename(cls, method_name): + return os.path.join(FAKE_FILENAME_PREFIX, cls.__name__, method_name) + + +def compose_fn(cls, name: str, body_lines: List[str], signature: str) -> ParsedDef: + body = "\n".join(f" {b}" for b in body_lines) + decl = f"def {name}{signature}:\n{body}" + + # Parse the function declaration + try: + py_ast = ast.parse(decl) + except SyntaxError as e: + # This should only happen if there's some unforeseeable change + # in the dataclasses module that makes our synthesized code fail + raise RuntimeError( + f"TorchScript failed to synthesize dataclass method '{name}' for class '{cls.__name__}'. " + "Please file a bug report at " + ) from e + fake_filename = _get_fake_filename(cls, name) + # Parse the function + return ParsedDef( + py_ast, + ctx=SourceContext( + source=decl, filename=fake_filename, file_lineno=0, leading_whitespace_len=0 + ), + source=decl, + filename=fake_filename, + file_lineno=0, + ) + + +def synthesize__init__(cls) -> ParsedDef: + # Supporting default factories in the way that people expect would sort of require us to + # allow compiling lambda functions, which is not currently supported. + if any( + field.default_factory is not dataclasses.MISSING + for field in dataclasses.fields(cls) + ): + raise NotImplementedError( + "Default factory initializers are not supported in TorchScript dataclasses" + ) + + # Simply read off the generated __init__ signature from CPython's implementation. It'll be + # almost correct except for InitVar annotations, which we need to handle specially. + signature = inspect.signature(cls.__init__) + + # Handle InitVars if needed (only works on Python 3.8+, when a `type` attribute was added to InitVar); + # see CPython commit here https://github.com/python/cpython/commit/01ee12ba35a333e8a6a25c4153c4a21838e9585c + init_vars: List[str] = [] + params = [] + for name, param in signature.parameters.items(): + ann = param.annotation + + if isinstance(ann, dataclasses.InitVar): + # The TorchScript interpreter can't handle InitVar annotations, so we unwrap the underlying type here + init_vars.append(name) + params.append(param.replace(annotation=ann.type)) # type: ignore[attr-defined] + else: + params.append(param) + + signature = signature.replace(parameters=params) + + body = [ + # Assign all attributes to self + f"self.{field.name} = {field.name}" + for field in dataclasses.fields(cls) + if field.init and field.name not in init_vars + ] + # Call user's impl of __post_init__ if it exists + if hasattr(cls, "__post_init__"): + body.append("self.__post_init__(" + ", ".join(init_vars) + ")") + + return compose_fn(cls, "__init__", body or ["pass"], signature=str(signature)) + + +# This is a placeholder at the moment since the TorchScript interpreter doesn't call __repr__ +def synthesize__repr__(cls) -> ParsedDef: + return compose_fn( + cls, + "__repr__", + [ + f"return '{cls.__name__}(" + + ", ".join( + [ + f"{field.name}=self.{field.name}" + for field in dataclasses.fields(cls) + if field.repr + ] + ) + + ")'" + ], + signature="(self) -> str", + ) + + +def synthesize__hash__(cls) -> ParsedDef: + return compose_fn( + cls, + "__hash__", + [ + # This is just a placeholder to prevent compilation from failing; this won't even get called at + # all right now because the TorchScript interpreter doesn't call custom __hash__ implementations + "raise NotImplementedError('__hash__ is not supported for dataclasses in TorchScript')" + ], + signature="(self) -> int", + ) + + +# Implementation for __eq__ and __ne__ +def synthesize_equality(cls, name: str, converse: str) -> ParsedDef: + return synthesize_comparison( + cls, + name, + allow_eq=True, + raise_on_none=False, + inner=[f"if val1 {converse} val2: return False"], + ) + + +def synthesize_inequality(cls, name: str, op: str, allow_eq: bool) -> ParsedDef: + return synthesize_comparison( + cls, + name, + allow_eq, + raise_on_none=True, + inner=[ + f"if val1 {op} val2: return True", + f"elif val2 {op} val1: return False", + ], + ) + + +def synthesize_comparison( + cls, name: str, allow_eq: bool, raise_on_none: bool, inner: List[str] +) -> ParsedDef: + body = [] + for field in dataclasses.fields(cls): + if not field.compare: + continue + + body.extend( + [ + f"val1 = self.{field.name}", + f"val2 = other.{field.name}", + ] + ) + body.extend( + inner + if not is_optional(field.type) + else [ + # Type refinement for optional fields; we need this to avoid type errors from the interpreter + "if val1 is not None and val2 is not None:", + *[" " + line for line in inner], + "elif (val1 is None) != (val2 is None):", + f" raise TypeError('Cannot compare {cls.__name__} with None')" + if raise_on_none + else " return False", + ] + ) + + body.append(f"return {allow_eq}") + return compose_fn( + cls, name, body, signature=f"(self, other: {cls.__name__}) -> bool" + ) + + +DATACLASS_MAGIC_METHODS: Dict[str, Callable] = { + "__init__": synthesize__init__, + "__repr__": synthesize__repr__, + "__hash__": synthesize__hash__, + "__eq__": partial(synthesize_equality, name="__eq__", converse="!="), + "__ne__": partial(synthesize_equality, name="__ne__", converse="=="), + "__lt__": partial(synthesize_inequality, name="__lt__", op="<", allow_eq=False), + "__le__": partial(synthesize_inequality, name="__le__", op="<", allow_eq=True), + "__gt__": partial(synthesize_inequality, name="__gt__", op=">", allow_eq=False), + "__ge__": partial(synthesize_inequality, name="__ge__", op=">", allow_eq=True), +} diff --git a/janus/lib/python3.10/site-packages/torch/jit/_decomposition_utils.py b/janus/lib/python3.10/site-packages/torch/jit/_decomposition_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..795f9da8e073a1197f81ddf379738a7f93605d7c --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/jit/_decomposition_utils.py @@ -0,0 +1,12 @@ +# mypy: allow-untyped-defs +import torch +from torch._ops import OpOverload, OpOverloadPacket + + +def _register_decomposition(op: OpOverload, graph: torch._C.Graph): + assert not isinstance( + op, OpOverloadPacket + ), f"Must pass specific op overload, not overload packet, found {op}" + assert isinstance(op, OpOverload) + + torch._C._jit_register_decomposition_for_schema(op._schema, graph) diff --git a/janus/lib/python3.10/site-packages/torch/jit/_decompositions.py b/janus/lib/python3.10/site-packages/torch/jit/_decompositions.py new file mode 100644 index 0000000000000000000000000000000000000000..fc63ee7394a7165dded62996656c396a30579b23 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/jit/_decompositions.py @@ -0,0 +1,137 @@ +# mypy: allow-untyped-defs +import torch +from torch import Tensor + + +aten = torch.ops.aten +import inspect +import warnings +from typing import Callable, Dict, List, Optional, Set, TypeVar +from typing_extensions import ParamSpec + +from torch.types import Number + + +decomposition_table: Dict[str, torch.jit.ScriptFunction] = {} +function_name_set: Set[str] = set() + +_T = TypeVar("_T") +_P = ParamSpec("_P") + + +def check_decomposition_has_type_annotations(f): + inspect_empty = inspect._empty # type: ignore[attr-defined] + sig = inspect.signature(f) + for param in sig.parameters.values(): + assert ( + param.annotation != inspect_empty + ), f"No signature on param {param.name} for function {f.name}" + + assert ( + sig.return_annotation != inspect_empty + ), f"No return annotation for function {f.name}" + + +def signatures_match(decomposition_sig, torch_op_sig): + decomp_params = decomposition_sig.parameters + op_params = torch_op_sig.parameters + + if len(decomp_params) != len(op_params): + return False + + for decomp_param, op_param in zip(decomp_params.values(), op_params.values()): + # can't check full equality yet because not all fields are correcly deduced + # in the torch_op_sig - like default value + # can't check 'kind' bc + # kwarg-only values with defaults not yet supported in TS + inspect_empty = inspect._empty # type: ignore[attr-defined] + for field in ["name", "annotation"]: + if field == "name" and decomp_param.name == "self": + warnings.warn("PyTorch uses 'input' instead of 'self' on public api") + + if getattr(decomp_param, field) != getattr(op_param, field): + return False + + decomp_default = decomp_param.default + op_default = op_param.default + # default value not always correctly inferred as being present on torch schema, + # but if specified on both they should be equal + if decomp_default != inspect_empty and op_default != inspect_empty: + if decomp_default != op_default: + return False + + return decomposition_sig.return_annotation == torch_op_sig.return_annotation + + +def register_decomposition( + aten_op: torch._ops.OpOverload, + registry: Optional[Dict[str, torch.jit.ScriptFunction]] = None, +) -> Callable[[Callable[_P, _T]], Callable[_P, _T]]: + def decomposition_decorator(f: Callable[_P, _T]) -> Callable[_P, _T]: + nonlocal registry + if registry is None: + registry = decomposition_table + + assert isinstance(aten_op, torch._ops.OpOverload) + + # Need unique name for jit function serialization + assert ( + f.__name__ not in function_name_set + ), f"Duplicated function name {f.__name__}" + function_name_set.add(f.__name__) + + scripted_func = torch.jit.script(f) + torch._C._jit_pass_inline(scripted_func.graph) + + for _ in range(2): + torch._C._jit_pass_peephole(scripted_func.graph) + torch._C._jit_pass_constant_propagation(scripted_func.graph) + + registry[str(aten_op._schema)] = scripted_func + return f + + return decomposition_decorator + + +# TODO: replace torch.sigmoid -> aten.sigmoid + + +@register_decomposition(aten.var.correction) +def var_decomposition( + input: Tensor, + dim: Optional[List[int]] = None, + correction: Optional[Number] = None, + keepdim: bool = False, +) -> Tensor: + if dim is None: + dim_i: List[int] = [] + dim = dim_i + + if isinstance(dim, (tuple, list)) and len(dim) == 0: + n = input.numel() + else: + n = 1 + for dim_i in dim: # type: ignore[assignment] + n *= input.shape[dim_i] # type: ignore[call-overload] + + mean = aten.mean(input, dim, True) + sub = input - mean + sq = sub * sub + sum = aten.sum(sq, dim, keepdim) + + if correction is None: + denom = float(n - 1) + else: + if isinstance(correction, int): + denom = float(n - correction) + elif isinstance(correction, float): + denom = float(n) - correction + else: + raise RuntimeError("correction must be int or float") + + return sum / max(0, denom) + + +@register_decomposition(aten.var.default) +def var(input: Tensor, unbiased: bool = True) -> Tensor: + return var_decomposition(input, correction=(1 if unbiased else 0)) diff --git a/janus/lib/python3.10/site-packages/torch/jit/_freeze.py b/janus/lib/python3.10/site-packages/torch/jit/_freeze.py new file mode 100644 index 0000000000000000000000000000000000000000..e496bd74762554e4909362e8eebcb2711be04789 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/jit/_freeze.py @@ -0,0 +1,228 @@ +# mypy: allow-untyped-defs +"""Freezing. + +This is not intended to be imported directly; please use the exposed +functionalities in `torch.jit`. +""" + +from typing import List, Optional + +import torch +from torch.jit._script import RecursiveScriptModule, ScriptModule + + +def freeze( + mod, preserved_attrs: Optional[List[str]] = None, optimize_numerics: bool = True +): + r"""Freeze ScriptModule, inline submodules, and attributes as constants. + + Freezing a :class:`ScriptModule` will clone it and attempt to inline the cloned + module's submodules, parameters, and attributes as constants in the TorchScript IR Graph. + By default, `forward` will be preserved, as well as attributes & methods specified in + `preserved_attrs`. Additionally, any attribute that is modified within a preserved + method will be preserved. + + Freezing currently only accepts ScriptModules that are in eval mode. + + Freezing applies generic optimization that will speed up your model regardless of machine. + To further optimize using server-specific settings, run `optimize_for_inference` after + freezing. + + Args: + mod (:class:`ScriptModule`): a module to be frozen + preserved_attrs (Optional[List[str]]): a list of attributes to preserve in addition to the forward method. + Attributes modified in preserved methods will also be preserved. + optimize_numerics (bool): If ``True``, a set of optimization passes will be run that does not strictly + preserve numerics. Full details of optimization can be found at `torch.jit.run_frozen_optimizations`. + + Returns: + Frozen :class:`ScriptModule`. + + Example (Freezing a simple module with a Parameter): + + .. testcode:: + import torch + class MyModule(torch.nn.Module): + def __init__(self, N, M): + super().__init__() + self.weight = torch.nn.Parameter(torch.rand(N, M)) + self.linear = torch.nn.Linear(N, M) + + def forward(self, input): + output = self.weight.mm(input) + output = self.linear(output) + return output + + scripted_module = torch.jit.script(MyModule(2, 3).eval()) + frozen_module = torch.jit.freeze(scripted_module) + # parameters have been removed and inlined into the Graph as constants + assert len(list(frozen_module.named_parameters())) == 0 + # See the compiled graph as Python code + print(frozen_module.code) + + Example (Freezing a module with preserved attributes) + + .. testcode:: + import torch + class MyModule2(torch.nn.Module): + def __init__(self) -> None: + super().__init__() + self.modified_tensor = torch.tensor(10.) + self.version = 1 + + def forward(self, input): + self.modified_tensor += 1 + return input + self.modified_tensor + + scripted_module = torch.jit.script(MyModule2().eval()) + frozen_module = torch.jit.freeze(scripted_module, preserved_attrs=["version"]) + # we've manually preserved `version`, so it still exists on the frozen module and can be modified + assert frozen_module.version == 1 + frozen_module.version = 2 + # `modified_tensor` is detected as being mutated in the forward, so freezing preserves + # it to retain model semantics + assert frozen_module(torch.tensor(1)) == torch.tensor(12) + # now that we've run it once, the next result will be incremented by one + assert frozen_module(torch.tensor(1)) == torch.tensor(13) + + Note: + Freezing submodule attributes is also supported: + frozen_module = torch.jit.freeze(scripted_module, preserved_attrs=["submodule.version"]) + + Note: + If you're not sure why an attribute is not being inlined as a constant, you can run + `dump_alias_db` on frozen_module.forward.graph to see if freezing has detected the + attribute is being modified. + + Note: + Because freezing makes weights constants and removes module hierarchy, `to` and other + nn.Module methods to manipulate device or dtype no longer work. As a workaround, + You can remap devices by specifying `map_location` in `torch.jit.load`, however + device-specific logic may have been baked into the model. + """ + if not isinstance(mod, ScriptModule): + raise RuntimeError( + "Freezing expects a ScriptModule as input. " + "Please use torch.jit.script or torch.jit.trace to script your 'nn.Module'." + ) + + if mod.training: + raise RuntimeError( + "Freezing is currently only implemented for modules in eval mode. " + "Please call .eval() on your module before freezing." + ) + + preserved_attrs = preserved_attrs if preserved_attrs is not None else [] + + out = RecursiveScriptModule(torch._C._freeze_module(mod._c, preserved_attrs)) + RecursiveScriptModule._finalize_scriptmodule(out) + + preserved_methods = [x for x in preserved_attrs if mod._c._has_method(x)] + run_frozen_optimizations(out, optimize_numerics, preserved_methods) + + return out + + +def run_frozen_optimizations( + mod, optimize_numerics: bool = True, preserved_methods: Optional[List[str]] = None +): + r""" + Run a series of optimizations looking for patterns that occur in frozen graphs. + + The current set of optimizations includes: + - Dropout Removal + - Pretranspose Linear Layers + - Concat Linear Layers with same input Tensor + - Conv -> Batchnorm folding + - Conv -> Add/Sub folding + - Conv -> Mul/Div folding + + Args: + mod (:class:`ScriptModule`): a frozen module to be optimized + + optimize_numerics (bool): If ``True``, a set of optimization passes will be run that does not strictly + preserve numerics. These optimizations preserve default rtol and atol of `torch.testing.assert_close` + when applied on a single transformation, however in a module where many transformations are applied + the rtol or atol may no longer fall within the default `assert_close` tolerance. Conv -> Batchnorm folding, + Conv-Add/Sub, and Conv -> Mul/Div folding all may alter numerics. + + Returns: + None + + Note: + In rare occassions, this can result in slower execution. + + Example (Freezing a module with Conv->Batchnorm) + .. code-block:: python + import torch + in_channels, out_channels = 3, 32 + conv = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=True) + bn = torch.nn.BatchNorm2d(out_channels, eps=.001) + mod = torch.nn.Sequential(conv, bn) + # set optimize to False here, by default freezing runs run_frozen_optimizations + frozen_mod = torch.jit.freeze(torch.jit.script(mod.eval()), optimize=False) + # inspect frozen mod + assert "batch_norm" in str(frozen_mod.graph) + torch.jit.run_frozen_optimizations(frozen_mod) + assert "batch_norm" not in str(frozen_mod.graph) + + """ + if mod._c._has_method("forward"): + torch._C._jit_pass_optimize_frozen_graph(mod.graph, optimize_numerics) + + if preserved_methods is None: + preserved_methods = [] + + for method in preserved_methods: + torch._C._jit_pass_optimize_frozen_graph( + mod.__getattr__(method).graph, optimize_numerics + ) + + +def optimize_for_inference( + mod: ScriptModule, other_methods: Optional[List[str]] = None +) -> ScriptModule: + """ + Perform a set of optimization passes to optimize a model for the purposes of inference. + + If the model is not already frozen, optimize_for_inference + will invoke `torch.jit.freeze` automatically. + + In addition to generic optimizations that should speed up your model regardless + of environment, prepare for inference will also bake in build specific settings + such as the presence of CUDNN or MKLDNN, and may in the future make transformations + which speed things up on one machine but slow things down on another. Accordingly, + serialization is not implemented following invoking `optimize_for_inference` and + is not guaranteed. + + This is still in prototype, and may have the potential to slow down your model. + Primary use cases that have been targeted so far have been vision models on cpu + and gpu to a lesser extent. + + Example (optimizing a module with Conv->Batchnorm):: + + import torch + in_channels, out_channels = 3, 32 + conv = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=True) + bn = torch.nn.BatchNorm2d(out_channels, eps=.001) + mod = torch.nn.Sequential(conv, bn) + frozen_mod = torch.jit.optimize_for_inference(torch.jit.script(mod.eval())) + assert "batch_norm" not in str(frozen_mod.graph) + # if built with MKLDNN, convolution will be run with MKLDNN weights + assert "MKLDNN" in frozen_mod.graph + """ + if not isinstance(mod, ScriptModule): + raise RuntimeError( + "optimize_for_inference expects a ScriptModule as input. " + "Please use torch.jit.script or torch.jit.trace to script your 'nn.Module'." + ) + + if other_methods is None: + other_methods = [] + + if hasattr(mod, "training"): + mod = freeze(mod.eval(), preserved_attrs=other_methods) + + torch._C._jit_pass_optimize_for_inference(mod._c, other_methods) + + return mod diff --git a/janus/lib/python3.10/site-packages/torch/jit/_fuser.py b/janus/lib/python3.10/site-packages/torch/jit/_fuser.py new file mode 100644 index 0000000000000000000000000000000000000000..7466800402d27a954be8f5631df0c9517d10189a --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/jit/_fuser.py @@ -0,0 +1,161 @@ +# mypy: allow-untyped-defs +import contextlib +from typing import List, Tuple + +import torch + + +@contextlib.contextmanager +def optimized_execution(should_optimize): + """Context manager that controls whether the JIT's executor will run optimizations before executing a function.""" + stored_flag = torch._C._get_graph_executor_optimize() + torch._C._set_graph_executor_optimize(should_optimize) + try: + yield + finally: + torch._C._set_graph_executor_optimize(stored_flag) + + +@contextlib.contextmanager +def fuser(name): + """Context manager that facilitates switching between backend fusers. + + Valid names: + * ``fuser0`` - enables only legacy fuser + * ``fuser1`` - enables only NNC + * ``fuser2`` - enables only nvFuser + * ``fuser3`` - enables oneDNN Graph + """ + old_cpu_fuse = torch._C._jit_can_fuse_on_cpu() + old_gpu_fuse = torch._C._jit_can_fuse_on_gpu() + old_texpr_fuser_state = torch._C._jit_texpr_fuser_enabled() + old_nvfuser_state = torch._C._jit_nvfuser_enabled() + old_llga_state = torch._C._jit_llga_enabled() + if name == "fuser0": # legacy fuser + torch._C._jit_override_can_fuse_on_cpu(True) + torch._C._jit_override_can_fuse_on_gpu(True) + torch._C._jit_set_texpr_fuser_enabled(False) + torch._C._jit_set_nvfuser_enabled(False) + torch._C._jit_set_llga_enabled(False) + elif name == "fuser1": # NNC + old_profiling_executor = torch._C._jit_set_profiling_executor(True) + old_profiling_mode = torch._C._get_graph_executor_optimize(True) + torch._C._jit_override_can_fuse_on_cpu(True) + torch._C._jit_override_can_fuse_on_gpu(True) + torch._C._jit_set_texpr_fuser_enabled(True) + torch._C._jit_set_nvfuser_enabled(False) + torch._C._jit_set_llga_enabled(False) + elif name == "fuser2": # nvFuser + torch._C._jit_override_can_fuse_on_cpu(False) + torch._C._jit_override_can_fuse_on_gpu(False) + torch._C._jit_set_texpr_fuser_enabled(False) + torch._C._jit_set_nvfuser_enabled(True) + torch._C._jit_set_llga_enabled(False) + elif name == "fuser3": # oneDNN Graph + old_profiling_executor = torch._C._jit_set_profiling_executor(True) + old_profiling_mode = torch._C._get_graph_executor_optimize(True) + torch._C._jit_override_can_fuse_on_cpu(True) + torch._C._jit_override_can_fuse_on_gpu(False) + torch._C._jit_set_texpr_fuser_enabled(True) + torch._C._jit_set_nvfuser_enabled(False) + torch._C._jit_set_llga_enabled(True) + elif name == "none": # Turn Pytorch fuser off + torch._C._jit_override_can_fuse_on_cpu(False) + torch._C._jit_override_can_fuse_on_gpu(False) + torch._C._jit_set_texpr_fuser_enabled(False) + torch._C._jit_set_nvfuser_enabled(False) + torch._C._jit_set_llga_enabled(False) + else: + raise Exception(f"unrecognized fuser option (name: {name})") # noqa: TRY002 + try: + yield + finally: + if name in ["fuser1", "fuser3"]: # NNC or oneDNN Graph + torch._C._jit_set_profiling_executor(old_profiling_executor) # type: ignore[possibly-undefined] + torch._C._get_graph_executor_optimize(old_profiling_mode) # type: ignore[possibly-undefined] + # recover the previous values + torch._C._jit_override_can_fuse_on_cpu(old_cpu_fuse) + torch._C._jit_override_can_fuse_on_gpu(old_gpu_fuse) + torch._C._jit_set_texpr_fuser_enabled(old_texpr_fuser_state) + torch._C._jit_set_nvfuser_enabled(old_nvfuser_state) + torch._C._jit_set_llga_enabled(old_llga_state) + + +last_executed_optimized_graph = torch._C._last_executed_optimized_graph + + +def _get_differentiable_graph_node(node, diff_node): + if node.kind() == "prim::DifferentiableGraph": + diff_node.append(node) + else: + for block in node.blocks(): + for n in block.nodes(): + _get_differentiable_graph_node(n, diff_node) + + +def _graph_for(self, *args, **kwargs): + return _script_method_graph_for(self, self, *args, **kwargs) + + +def _script_method_graph_for(self, parent, *args, **kwargs): + try: + dbs = parent.get_debug_state() + eps = list(dbs.execution_plans.values()) + assert len(eps) == 1 + graph = eps[0].graph.copy() + + # graph_executor_states for differentiable node + fw_states = eps[0].code.differentiable_op_executor_states() + diff_nodes: List[torch._C.Node] = [] + for n in graph.nodes(): + _get_differentiable_graph_node(n, diff_nodes) + + assert len(fw_states) == len(diff_nodes) + # swap each differentiable graph with optimized graph in their execution plan + for n, state in zip(diff_nodes, fw_states): + fw_execution_plans = list(state.execution_plans.values()) + # we can only update the subgraph when there's a unique execution + # plan. Avoid assert here so we would skip the ones that can't be + # updated while try the best effort to update other nodes. + if len(fw_execution_plans) == 1: + n.g_("Subgraph", fw_execution_plans[0].graph) + + return graph + except Exception: + # fallback approach, we just ran the graph and return the recorded optimized + # graph + self(*args, **kwargs) + return last_executed_optimized_graph() + + +def set_fusion_strategy(strategy: List[Tuple[str, int]]): + """Set the type and number of specializations that can occur during fusion. + + Usage: provide a list of pairs (type, depth) where type is one of "STATIC" or "DYNAMIC" + and depth is an integer. + + Behavior - static vs dynamic: + In STATIC fusion, fused ops are compiled to have fixed input shapes. The shape is determined + based on some initial profiling runs. + In DYNAMIC fusion, fused ops are compiled to have variable input shapes, so that multiple + shapes are possible. + + In both cases, we also recompile on new striding behavior, device, or dtype. + + Behavior - fallback functions & depth: + When an input doesn't match the format required by the specialized compiled op, it will run + a fallback function. Fallback functions are recursively be compiled and specialized based + on the observed tensor shapes. Since compilation can be slow, the "depth" parameter is provided to + limit the number of specializations that can be compiled, before giving up on recompiling and + falling back to a completely un-fused, un-specialized implementation. + + The list of (type, depth) pairs controls the type of specializations and the number of + specializations. For example: [("STATIC", 2), ("DYNAMIC", 2)] indicates that the first + two specializations will use static fusions, the following two specializations will use + dynamic fusion, and any inputs that satisfy none of the 4 options will run an + unfused implementation. + + NB: in the future, if more as more fusion backends are added there may be more granular + apis for specific fusers. + """ + return torch._C._jit_set_fusion_strategy(strategy) diff --git a/janus/lib/python3.10/site-packages/torch/jit/_ir_utils.py b/janus/lib/python3.10/site-packages/torch/jit/_ir_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..52b953624a3a18d2fb3f06f79044ab6ac583af76 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/jit/_ir_utils.py @@ -0,0 +1,26 @@ +# mypy: allow-untyped-defs +from typing import Union + +import torch + + +class _InsertPoint: + def __init__( + self, + insert_point_graph: torch._C.Graph, + insert_point: Union[torch._C.Node, torch._C.Block], + ): + self.insert_point = insert_point + self.g = insert_point_graph + self.guard = None + + def __enter__(self): + self.prev_insert_point = self.g.insertPoint() + self.g.setInsertPoint(self.insert_point) + + def __exit__(self, *args): + self.g.setInsertPoint(self.prev_insert_point) + + +def insert_point_guard(self, insert_point: Union[torch._C.Node, torch._C.Block]): + return _InsertPoint(self, insert_point) diff --git a/janus/lib/python3.10/site-packages/torch/jit/_logging.py b/janus/lib/python3.10/site-packages/torch/jit/_logging.py new file mode 100644 index 0000000000000000000000000000000000000000..d0687023386ee7745af4e2cb0731f76528125477 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/jit/_logging.py @@ -0,0 +1,11 @@ +import torch + + +add_stat_value = torch.ops.prim.AddStatValue + +set_logger = torch._C._logging_set_logger +LockingLogger = torch._C.LockingLogger +AggregationType = torch._C.AggregationType +NoopLogger = torch._C.NoopLogger + +time_point = torch.ops.prim.TimePoint diff --git a/janus/lib/python3.10/site-packages/torch/jit/_monkeytype_config.py b/janus/lib/python3.10/site-packages/torch/jit/_monkeytype_config.py new file mode 100644 index 0000000000000000000000000000000000000000..366a58ac6afd30907d7ca73aab531d1dc02ea1f9 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/jit/_monkeytype_config.py @@ -0,0 +1,194 @@ +# mypy: allow-untyped-defs +import inspect +import sys +import typing +from collections import defaultdict +from pathlib import Path +from types import CodeType +from typing import Dict, Iterable, List, Optional + +import torch + + +_IS_MONKEYTYPE_INSTALLED = True +try: + import monkeytype # type: ignore[import] + from monkeytype import trace as monkeytype_trace + from monkeytype.config import _startswith, LIB_PATHS # type: ignore[import] + from monkeytype.db.base import ( # type: ignore[import] + CallTraceStore, + CallTraceStoreLogger, + CallTraceThunk, + ) + from monkeytype.tracing import CallTrace, CodeFilter # type: ignore[import] +except ImportError: + _IS_MONKEYTYPE_INSTALLED = False + + +# Checks whether a class is defind in `torch.*` modules +def is_torch_native_class(cls): + if not hasattr(cls, "__module__"): + return False + + parent_modules = cls.__module__.split(".") + if not parent_modules: + return False + + root_module = sys.modules.get(parent_modules[0]) + return root_module is torch + + +def get_type(type): + """Convert the given type to a torchScript acceptable format.""" + if isinstance(type, str): + return type + elif inspect.getmodule(type) == typing: + # If the type is a type imported from typing + # like Tuple, List, Dict then replace `typing.` + # with a null string. This needs to be done since + # typing.List is not accepted by TorchScript. + type_to_string = str(type) + return type_to_string.replace(type.__module__ + ".", "") + elif is_torch_native_class(type): + # If the type is a subtype of torch module, then TorchScript expects a fully qualified name + # for the type which is obtained by combining the module name and type name. + return type.__module__ + "." + type.__name__ + else: + # For all other types use the name for the type. + return type.__name__ + + +def get_optional_of_element_type(types): + """Extract element type, return as `Optional[element type]` from consolidated types. + + Helper function to extracts the type of the element to be annotated to Optional + from the list of consolidated types and returns `Optional[element type]`. + TODO: To remove this check once Union support lands. + """ + elem_type = types[1] if type(None) == types[0] else types[0] + elem_type = get_type(elem_type) + + # Optional type is internally converted to Union[type, NoneType], which + # is not supported yet in TorchScript. Hence, representing the optional type as string. + return "Optional[" + elem_type + "]" + + +def get_qualified_name(func): + return func.__qualname__ + + +if _IS_MONKEYTYPE_INSTALLED: + + class JitTypeTraceStoreLogger(CallTraceStoreLogger): + """A JitTypeCallTraceLogger that stores logged traces in a CallTraceStore.""" + + def __init__(self, store: CallTraceStore): + super().__init__(store) + + def log(self, trace: CallTrace) -> None: + self.traces.append(trace) + + class JitTypeTraceStore(CallTraceStore): + def __init__(self) -> None: + super().__init__() + # A dictionary keeping all collected CallTrace + # key is fully qualified name of called function + # value is list of all CallTrace + self.trace_records: Dict[str, list] = defaultdict(list) + + def add(self, traces: Iterable[CallTrace]): + for t in traces: + qualified_name = get_qualified_name(t.func) + self.trace_records[qualified_name].append(t) + + def filter( + self, + qualified_name: str, + qualname_prefix: Optional[str] = None, + limit: int = 2000, + ) -> List[CallTraceThunk]: + return self.trace_records[qualified_name] + + def analyze(self, qualified_name: str) -> Dict: + # Analyze the types for the given module + # and create a dictionary of all the types + # for arguments. + records = self.trace_records[qualified_name] + all_args = defaultdict(set) + for record in records: + for arg, arg_type in record.arg_types.items(): + all_args[arg].add(arg_type) + return all_args + + def consolidate_types(self, qualified_name: str) -> Dict: + all_args = self.analyze(qualified_name) + # If there are more types for an argument, + # then consolidate the type to `Any` and replace the entry + # by type `Any`. + for arg, types in all_args.items(): + types = list(types) + type_length = len(types) + if type_length == 2 and type(None) in types: + # TODO: To remove this check once Union suppport in TorchScript lands. + all_args[arg] = get_optional_of_element_type(types) + elif type_length > 1: + all_args[arg] = "Any" + elif type_length == 1: + all_args[arg] = get_type(types[0]) + return all_args + + def get_args_types(self, qualified_name: str) -> Dict: + return self.consolidate_types(qualified_name) + + class JitTypeTraceConfig(monkeytype.config.Config): + def __init__(self, s: JitTypeTraceStore): + super().__init__() + self.s = s + + def trace_logger(self) -> JitTypeTraceStoreLogger: + """Return a JitCallTraceStoreLogger that logs to the configured trace store.""" + return JitTypeTraceStoreLogger(self.trace_store()) + + def trace_store(self) -> CallTraceStore: + return self.s + + def code_filter(self) -> Optional[CodeFilter]: + return jit_code_filter + +else: + # When MonkeyType is not installed, we provide dummy class definitions + # for the below classes. + class JitTypeTraceStoreLogger: # type: ignore[no-redef] + def __init__(self) -> None: + pass + + class JitTypeTraceStore: # type: ignore[no-redef] + def __init__(self) -> None: + self.trace_records = None + + class JitTypeTraceConfig: # type: ignore[no-redef] + def __init__(self) -> None: + pass + + monkeytype_trace = None # type: ignore[assignment] # noqa: F811 + + +def jit_code_filter(code: CodeType) -> bool: + """Codefilter for Torchscript to trace forward calls. + + The custom CodeFilter is required while scripting a FX Traced forward calls. + FX Traced forward calls have `code.co_filename` start with '<' which is used + to exclude tracing of stdlib and site-packages in the default code filter. + Since we need all forward calls to be traced, this custom code filter + checks for code.co_name to be 'forward' and enables tracing for all such calls. + The code filter is similar to default code filter for monkeytype and + excludes tracing of stdlib and site-packages. + """ + # Filter code without a source file and exclude this check for 'forward' calls. + if code.co_name != "forward" and ( + not code.co_filename or code.co_filename[0] == "<" + ): + return False + + filename = Path(code.co_filename).resolve() + return not any(_startswith(filename, lib_path) for lib_path in LIB_PATHS) diff --git a/janus/lib/python3.10/site-packages/torch/jit/_passes/__init__.py b/janus/lib/python3.10/site-packages/torch/jit/_passes/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/janus/lib/python3.10/site-packages/torch/jit/_passes/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/jit/_passes/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67f87bc6b3233f8dc80d059d6c77432db51acbe9 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/jit/_passes/__pycache__/__init__.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/jit/_passes/__pycache__/_property_propagation.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/jit/_passes/__pycache__/_property_propagation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e33a0a4faa450fce443efcff157d898e22327515 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/jit/_passes/__pycache__/_property_propagation.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/jit/_passes/_property_propagation.py b/janus/lib/python3.10/site-packages/torch/jit/_passes/_property_propagation.py new file mode 100644 index 0000000000000000000000000000000000000000..1537f7bc4147359af3f1e0ea4229352d37303921 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/jit/_passes/_property_propagation.py @@ -0,0 +1,47 @@ +# mypy: allow-untyped-defs +""" +Tools to help with tensor property propagation. + +This is not intended to be imported directly; please use the exposed +functionalities in `torch.jit`. +""" + +from typing import Any, List + +import torch +from torch import TensorType +from torch._C import Graph + + +def apply_input_props_using_example(graph: Graph, example_input: List[Any]): + """ + Applies properties for each tensor in the graph inputs + using the example supplied. + """ + graph_inputs = list(graph.inputs()) + if len(graph_inputs) == 0: + return + + # Strip self args off for methods + in_0 = graph_inputs[0] + if isinstance(in_0.type(), torch._C.ClassType) and in_0.debugName() == "self": + graph_inputs = graph_inputs[1:] + + if not len(graph_inputs) == len(example_input): + raise RuntimeError( + "Number of inputs in graph does not match number of inputs in the example" + ) + + for i, (graph_i, example_i) in enumerate(zip(graph_inputs, example_input)): + if example_i is None: + continue # Skip the type check + + if isinstance(example_i, torch.Tensor) != isinstance( + graph_i.type(), TensorType + ): + raise RuntimeError( + f"Input {i} does not match type of example", graph_i, example_i + ) + + if isinstance(example_i, torch.Tensor): + graph_i.setType(TensorType.create_from_tensor(example_i)) # type: ignore[arg-type] diff --git a/janus/lib/python3.10/site-packages/torch/jit/_pickle.py b/janus/lib/python3.10/site-packages/torch/jit/_pickle.py new file mode 100644 index 0000000000000000000000000000000000000000..5517499e926001603cdb8f0696a7dfdc7df1a964 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/jit/_pickle.py @@ -0,0 +1,38 @@ +# mypy: allow-untyped-defs +# These functions are referenced from the pickle archives produced by +# ScriptModule.save() + + +# These (`build_*`) functions used to be used by `pickler.cpp` to specify +# the type of the list for certain special types, but now all lists get +# a type attached and restored via `restore_type_tag` below. The legacy +# functions should stick around for backwards-compatibility. + + +def build_intlist(data): + return data + + +def build_tensorlist(data): + return data + + +def build_doublelist(data): + return data + + +def build_boollist(data): + return data + + +def build_tensor_from_id(data): + if isinstance(data, int): + # just the id, can't really do anything + return data + + +def restore_type_tag(value, type_str): + # The type_ptr is used by the jit unpickler to restore the full static type + # to container types like list when they are re-loaded, but this doesn't + # matter for Python, so just return the plain value + return value diff --git a/janus/lib/python3.10/site-packages/torch/jit/_recursive.py b/janus/lib/python3.10/site-packages/torch/jit/_recursive.py new file mode 100644 index 0000000000000000000000000000000000000000..d489b51d3cd5d01610811ec39e85b18bc9790fd3 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/jit/_recursive.py @@ -0,0 +1,1071 @@ +# mypy: allow-untyped-defs +import collections +import functools +import inspect +import sys +import textwrap +import types +import warnings +from typing import Dict, List, Set, Type + +import torch +import torch._jit_internal as _jit_internal +from torch._sources import fake_range +from torch.jit._builtins import _find_builtin +from torch.jit._check import AttributeTypeIsSupportedChecker +from torch.jit._state import _add_script_class, _get_script_class, _python_cu +from torch.jit.frontend import ( + get_class_properties, + get_default_args, + get_jit_class_def, + get_jit_def, +) +from torch.nn import Module + + +ScriptMethodStub = collections.namedtuple( + "ScriptMethodStub", ("resolution_callback", "def_", "original_method") +) +PropertyStub = collections.namedtuple("PropertyStub", ("resolution_callback", "def_")) + + +# TODO: there should be a more principled way of doing this. +ignored_attributes = [ + "_version", + "_parameters", + "_buffers", + "_non_persistent_buffers_set", + "_backward_hooks", + "_backward_pre_hooks", + "_forward_hooks", + "_forward_hooks_with_kwargs", + "_forward_pre_hooks", + "_forward_pre_hooks_with_kwargs", + "_forward_hooks_always_called", + "_state_dict_hooks", + "_state_dict_pre_hooks", + "_load_state_dict_pre_hooks", + "_load_state_dict_post_hooks", + "_modules", + "_initializing", + "dump_patches", +] + + +def _compile_and_register_class(obj, rcb, qualified_name): + script_class = _get_script_class(obj) + + if not script_class: + ast = get_jit_class_def(obj, obj.__name__) + defaults = torch.jit.frontend.get_default_args_for_class(obj) + script_class = torch._C._jit_script_class_compile( + qualified_name, ast, defaults, rcb + ) + _add_script_class(obj, script_class) + + return script_class + + +def make_stub(func, name): + rcb = _jit_internal.createResolutionCallbackFromClosure(func) + ast = get_jit_def(func, name, self_name="RecursiveScriptModule") + return ScriptMethodStub(rcb, ast, func) + + +def make_stub_from_method(nn_module, method_name): + func = getattr(nn_module, method_name) + if isinstance(func, ScriptMethodStub): + return func + # Make sure the name present in the resulting AST will match the name + # requested here. The only time they don't match is if you do something + # like: + # def _forward(self): + # pass + # forward = _forward + # In this case, the actual function object will have the name `_forward`, + # even though we requested a stub for `forward`. + return make_stub(func, method_name) + + +def make_stubs_from_exported_methods(mod): + stubs = [] + for name in dir(mod): + item = getattr(mod, name, None) + if ( + _jit_internal.get_torchscript_modifier(item) + is _jit_internal.FunctionModifiers.EXPORT + ): + stubs.append(make_stub_from_method(mod, name)) + + return stubs + + +def jit_ignored_properties(module): + user_annotated_ignored_attributes = getattr( + module, "__jit_ignored_attributes__", [] + ) + + def get_properties_names(module): + return {k for k, v in vars(module).items() if isinstance(v, property)} + + properties = get_properties_names(type(module)) + user_annoted_ignored_properties = set() + + for ignored_attr in user_annotated_ignored_attributes: + if ignored_attr in properties: + user_annoted_ignored_properties.add(ignored_attr) + return user_annoted_ignored_properties + + +# base types that can be constants +# in addition, tuples and lists of these base types are also considered constants +# If you edit this list, then you also need to edit the handlers in +# ConstantValue in jit/script/init.cpp +_constant_types = ( + bool, + float, + int, + str, + type(None), + torch.device, + torch.layout, + torch.dtype, +) + + +def _get_valid_constant(attr, v, owner_type): + if isinstance(v, _constant_types): + return v + elif isinstance(v, (tuple, list)): + return tuple(_get_valid_constant(attr, x, owner_type) for x in v) + constants = ", ".join(torch.typename(typ) for typ in _constant_types) + raise TypeError( + textwrap.dedent( + f""" + '{torch.typename(type(v))}' object in attribute '{owner_type}.{attr}' is not a valid constant. + Valid constants are: + 1. a nn.ModuleList + 2. a value of type {{{constants}}} + 3. a list or tuple of (2) + """ + ) + ) + + +class SourceContext(torch._C._jit_tree_views.SourceRangeFactory): + def __init__(self, source, filename, file_lineno, leading_whitespace_len): + super().__init__(source, filename, file_lineno, leading_whitespace_len) + + +def get_annotations(obj): + if sys.version_info < (3, 10): + return getattr(obj, "__annotations__", {}) + # In Python-3.10+ it is recommended to use inspect.get_annotations + # See https://docs.python.org/3.10/howto/annotations.html + # But also, in 3.10 annotations from base class are not inherited + # by unannotated derived one, so they must be manually extracted + annotations = inspect.get_annotations(obj) + if annotations: + return annotations + + def get_cls_annotations(cls): + cls_annotations = inspect.get_annotations(cls) + if cls_annotations: + return cls_annotations + for base in cls.__bases__: + cls_annotations = get_cls_annotations(base) + if cls_annotations: + return cls_annotations + return {} + + cls = obj if isinstance(obj, type) else type(obj) + return get_cls_annotations(cls) + + +def infer_concrete_type_builder(nn_module, share_types=True): + """ + Build a ConcreteModuleTypeBuilder from an nn.Module. + + This ConcreteModuleType doesn't have a JIT type associated with it yet, it + must be filled in by the caller. + """ + concrete_type_builder = torch._C.ConcreteModuleTypeBuilder(type(nn_module)) + if isinstance(nn_module, (torch.nn.ModuleDict)): + concrete_type_builder.set_module_dict() + if isinstance(nn_module, (torch.nn.ModuleList, torch.nn.Sequential)): + concrete_type_builder.set_module_list() + if isinstance(nn_module, (torch.nn.ParameterList)): + concrete_type_builder.set_parameter_list() + if isinstance(nn_module, (torch.nn.ParameterDict)): + concrete_type_builder.set_parameter_dict() + + class_annotations = get_annotations(nn_module) + if isinstance(nn_module, (torch.ao.quantization.QuantWrapper)): + class_annotations = {} + + # Get user-annotated ignored attributes. + user_annotated_ignored_attributes = getattr( + nn_module, "__jit_ignored_attributes__", [] + ) + concrete_type_builder.add_ignored_attributes(user_annotated_ignored_attributes) + ignored_properties = jit_ignored_properties(nn_module) + + # try to infer the type from type annotation or from the object itself + def infer_type(name, item): + # The forward function from Module is special; never use this annotations; we + # need to infer type directly using JIT. I originally wanted to write + # this test as isinstance(class_annotations[name], Callable) but + # isinstance on typing things doesn't seem to work: isinstance(list, Callable) + # is also true! + inferred = False + try: + if ( + name in class_annotations + and class_annotations[name] + != torch.nn.Module.__annotations__["forward"] + ): + ann_to_type = torch.jit.annotations.ann_to_type( + class_annotations[name], fake_range() + ) + attr_type = torch._C.InferredType(ann_to_type) + elif isinstance(item, torch.jit.Attribute): + ann_to_type = torch.jit.annotations.ann_to_type(item.type, fake_range()) + attr_type = torch._C.InferredType(ann_to_type) + else: + attr_type = torch._C._jit_try_infer_type(item) + inferred = True + except RuntimeError as re: + raise RuntimeError(f"Error inferring type for {name}: {item}: {re}") from re + + return attr_type, inferred + + added_names = set() + + for name, item in nn_module._parameters.items(): + if name in user_annotated_ignored_attributes: + continue + + assert item is None or isinstance(item, torch.Tensor) + attr_type, _ = infer_type(name, item) + # We currently have the invariant in various places in our code + # that parameters must be Tensors. However, the nn.Module API also + # allows NoneType parameters. These parameters are not returned as + # part of `parameters()` and its variants, but are available + # through direct attribute access. + concrete_type_builder.add_attribute(name, attr_type.type(), True, False) + added_names.add(name) + + for name, item in nn_module._buffers.items(): + if name in user_annotated_ignored_attributes: + continue + + assert item is None or isinstance(item, torch.Tensor) + attr_type, _ = infer_type(name, item) + concrete_type_builder.add_attribute(name, attr_type.type(), False, True) + added_names.add(name) + + for name, item in nn_module._modules.items(): + if name in user_annotated_ignored_attributes: + continue + + attr_type, _ = infer_type(name, item) + if item is None: + # Modules can be None. We don't have direct support for optional + # Modules, so the register it as an NoneType attribute instead. + concrete_type_builder.add_attribute(name, attr_type.type(), False, False) + continue + if attr_type.success(): + assert attr_type.type().is_interface_type() + # if the type can be inferred, it should be a module interface type + sub_concrete_type = torch._C.ConcreteModuleType.from_jit_type( + attr_type.type() + ) + else: + # otherwise we get the concrete module type for item and add it to concrete_type + sub_concrete_type = get_module_concrete_type(item, share_types) + concrete_type_builder.add_module(name, sub_concrete_type) + + added_names.add(name) + + # populate constants_set + constants_set = set(getattr(nn_module, "__constants__", ())) + + # Constants annotated via `Final[T]` rather than being added to `__constants__` + for name, ann in class_annotations.items(): + if torch._jit_internal.is_final(ann): + constants_set.add(name) + + for name in constants_set: + if name in added_names: + # TODO: We should really error in this case, but its bc-breaking so + # we need to warn for at least one release + if name in nn_module._modules: + hint = "submodule" + elif name in nn_module._buffers: + hint = "buffer" + elif name in nn_module._parameters: + hint = "parameter" + else: + raise AssertionError( + "added_names must be submodule, parameter, or buffer" + ) + + warnings.warn( + f"'{name}' was found in ScriptModule constants, " + f" but it is a non-constant {hint}. Consider removing it." + ) + continue + if not hasattr(nn_module, name): + # TODO: We should really error in this case, but its bc-breaking so + # we need to warn for at least one release + warnings.warn( + f"'{name}' was found in ScriptModule constants, " + "but was not actually set in __init__. " + "Consider removing it." + ) + continue + value = getattr(nn_module, name) + concrete_type_builder.add_constant( + name, _get_valid_constant(name, value, type(nn_module).__name__) + ) + added_names.add(name) + + # populate overloads + overloads = getattr(nn_module, "__overloads__", {}) + # update with any annotated overloads + overloads.update( + get_overload_name_mapping( + get_overload_annotations(nn_module, ignored_properties) + ) + ) + for name, overloaded_names in overloads.items(): + concrete_type_builder.add_overload(name, overloaded_names) + + for name, value in nn_module.__dict__.items(): + if name in ignored_attributes or name.startswith("__"): + # Python objects have lots of random attributes attached to them; + # PyTorch adds a few more. Prevent these from getting compiled. + continue + + if name in user_annotated_ignored_attributes: + continue + + if name in added_names: + # Don't re-add anything we already added + continue + + isoverloadpacket = isinstance(value, torch._ops.OpOverloadPacket) + if isoverloadpacket: + value = value.op + # Handle Python function attributes + if inspect.isfunction(value): + try: + scripted_fn = torch.jit.script(value) + concrete_type_builder.add_function_attribute( + name, torch._C._jit_try_infer_type(scripted_fn).type(), value + ) + except Exception as e: + # If we fail to script the function, it isn't a hard error. + # Instead, we will add it to the list of attributes we failed + # to convert, with the compilation error. + hint = ( + "(This function exists as an attribute on the Python module, " + "but we failed to compile it to a TorchScript function. " + f"\nThe error stack is reproduced here:\n{e}" + ) + concrete_type_builder.add_failed_attribute(name, hint) + + continue + + # Handle calls to builtin functions (either bespoke builtins from torch.jit._builtins or + # a call to an aten function like torch.add) + builtin_symbol_name = _find_builtin(value) + if builtin_symbol_name: + concrete_type_builder.add_builtin_function(name, builtin_symbol_name) + continue + + # Handle Script function attributes + if isinstance(value, torch.jit.ScriptFunction): + concrete_type_builder.add_function_attribute( + name, torch._C._jit_try_infer_type(value).type(), value + ) + continue + + # If we got here, this is a regular "data" attribute, add it to the concrete type + attr_type, inferred = infer_type(name, value) + if attr_type.success(): + concrete_type_builder.add_attribute(name, attr_type.type(), False, False) + else: + # TODO: could add more detail here. For example, what the user should do + # when the pytype is `list` or `NoneType` + inferred_msg = ( + "Its type was inferred; try adding a type annotation for the attribute." + if inferred + else "" + ) + additional_info = f"{attr_type.reason()}. {inferred_msg}" + hint = ( + "(This attribute exists on the Python module, " + f"but we failed to convert Python type: '{torch.typename(type(value))}' " + f"to a TorchScript type. {additional_info})" + ) + concrete_type_builder.add_failed_attribute(name, hint) + + # add hooks to concrete type + for hook in nn_module._forward_hooks.values(): + concrete_type_builder.add_forward_hook(hook) + for pre_hook in nn_module._forward_pre_hooks.values(): + concrete_type_builder.add_forward_pre_hook(pre_hook) + + return concrete_type_builder + + +class ConcreteTypeStore: + type_store: Dict[Type[Module], List[torch._C.ConcreteModuleType]] + methods_compiled: Set[torch._C.ConcreteModuleType] + + def __init__(self) -> None: + # Python module type => List[ConcreteModuleType)] + self.type_store = {} + # ConcreteTypes that have had their methods already compiled + self.methods_compiled = set() + + def get_or_create_concrete_type(self, nn_module): + """Infer a ConcreteType from this `nn.Module` instance. Underlying JIT types are re-used if possible.""" + concrete_type_builder = infer_concrete_type_builder(nn_module) + + nn_module_type = type(nn_module) + if nn_module_type not in self.type_store: + self.type_store[nn_module_type] = [] + + # Search the type store for an already-available JIT type + known_types = self.type_store[nn_module_type] + for known_type in known_types: + if known_type.equals(concrete_type_builder): + return known_type + + # We didn't find anything; generate a new JIT type from this concrete type + concrete_type = concrete_type_builder.build() + self.type_store[nn_module_type].append(concrete_type) + return concrete_type + + +concrete_type_store = ConcreteTypeStore() + + +def create_methods_and_properties_from_stubs( + concrete_type, method_stubs, property_stubs +): + method_defs = [m.def_ for m in method_stubs] + method_rcbs = [m.resolution_callback for m in method_stubs] + method_defaults = [get_default_args(m.original_method) for m in method_stubs] + + property_defs = [p.def_ for p in property_stubs] + property_rcbs = [p.resolution_callback for p in property_stubs] + + concrete_type._create_methods_and_properties( + property_defs, property_rcbs, method_defs, method_rcbs, method_defaults + ) + + +def create_hooks_from_stubs(concrete_type, hook_stubs, pre_hook_stubs): + hook_defs = [h.def_ for h in hook_stubs] + hook_rcbs = [h.resolution_callback for h in hook_stubs] + + pre_hook_defs = [h.def_ for h in pre_hook_stubs] + pre_hook_rcbs = [h.resolution_callback for h in pre_hook_stubs] + + concrete_type._create_hooks(hook_defs, hook_rcbs, pre_hook_defs, pre_hook_rcbs) + + +def get_module_concrete_type(nn_module, share_types=True): + """ + Get a concrete type for nn_modules. + + If share_types is True, the concrete type is fetched from concrete_type_store. + If it is False, a new concrete type is created without first searching concrete_type_store. + + Args: + nn_module: The original Python nn.Module that we are creating a ScriptModule for. + share_types = Whether to share underlying JIT types between modules (if possible). + + Returns: + A concrete type for nn_module. + """ + assert isinstance(nn_module, Module) + if isinstance(nn_module, torch.jit.ScriptModule) and hasattr( + nn_module, "_concrete_type" + ): + return nn_module._concrete_type + + if share_types: + # Look into the store of cached JIT types + concrete_type = concrete_type_store.get_or_create_concrete_type(nn_module) + else: + # Get a concrete type directly, without trying to re-use an existing JIT + # type from the type store. + concrete_type_builder = infer_concrete_type_builder(nn_module, share_types) + concrete_type_builder.set_poisoned() + concrete_type = concrete_type_builder.build() + + return concrete_type + + +def create_script_class(obj): + """ + Create and return a RecursiveScriptClass instance from a Python object. + + Arguments: + obj: A Python object. + """ + qualified_class_name = _jit_internal._qualified_name(type(obj)) + rcb = _jit_internal.createResolutionCallbackForClassMethods(type(obj)) + # Script the type of obj if it hasn't already been scripted. + _compile_and_register_class(type(obj), rcb, qualified_class_name) + class_ty = _python_cu.get_class(qualified_class_name) + # Create an empty torch._C.ScriptObject with the scripted type. + cpp_object = torch._C._create_object_with_type(class_ty) + # Copy all of the attributes over to the torch._C.ScriptObject. + for name, value in obj.__dict__.items(): + cpp_object.setattr(name, value) + + # Wrap the torch._C.ScriptObject in a RecursiveScriptClass instance. + return wrap_cpp_class(cpp_object) + + +def create_script_module(nn_module, stubs_fn, share_types=True, is_tracing=False): + """ + Create a new ScriptModule from an nn.Module. + + Args: + nn_module: The original Python nn.Module that we are creating a ScriptModule for. + stubs_fn: Lambda that takes an nn.Module and generates a list of ScriptMethodStubs to compile. + share_types: Whether to share underlying JIT types between modules (if possible). + NOTE: Only set to False this when we cannot guarantee type sharing will work + correctly. This only happens today for traced modules, where the same + module can produce different traced methods depending on the inputs. + is_tracing: Whether this function is called during tracing or scripting. If tracing, + we don't need to do AttributeTypeIsSupportedChecker because all the unsupported + attributes will be baked as constant in the tracing graph. In addition, + this check significantly slows down the traced modules when the module size is big. + """ + assert not isinstance(nn_module, torch.jit.RecursiveScriptModule) + check_module_initialized(nn_module) + concrete_type = get_module_concrete_type(nn_module, share_types) + if not is_tracing: + AttributeTypeIsSupportedChecker().check(nn_module) + return create_script_module_impl(nn_module, concrete_type, stubs_fn) + + +def create_script_module_impl(nn_module, concrete_type, stubs_fn): + """ + Convert an nn.Module to a RecursiveScriptModule. + + Args: + nn_module: The original Python nn.Module that we are creating a ScriptModule for. + concrete_type: The fully initialized ConcreteType of the module. + stubs_fn: Lambda that takes an nn.Module and generates a list of ScriptMethodStubs to compile. + """ + cpp_module = torch._C._create_module_with_type(concrete_type.jit_type) + method_stubs = stubs_fn(nn_module) + property_stubs = get_property_stubs(nn_module) + hook_stubs, pre_hook_stubs = get_hook_stubs(nn_module) + + user_annotated_ignored_attributes = getattr( + nn_module, "__jit_ignored_attributes__", [] + ) + ignored_properties = jit_ignored_properties(nn_module) + + def init_fn(script_module): + # Initialize the ScriptModule: + # 1. Copy the attributes/parameters/buffers from the original `nn_module` to the new ScriptModule. + for name in concrete_type.get_attributes().keys(): + orig_value = getattr(nn_module, name) + orig_value = ( + orig_value.value + if isinstance(orig_value, torch.jit.Attribute) + else orig_value + ) + cpp_module.setattr(name, orig_value) + + # 2. Copy the submodules from the original `nn_module` to the new ScriptModule, + # recursively scripting them. + for name, sub_concrete_type in concrete_type.get_modules(): + orig_value = getattr(nn_module, name) + assert isinstance( + orig_value, Module + ), f"Expected Module but got {type(orig_value)}" + module_type = sub_concrete_type.jit_type + if isinstance(module_type, torch._C.InterfaceType): + # use the interface inference rule to compile the module + scripted = interface_script(module_type, orig_value) + elif isinstance(orig_value, torch.jit.ScriptModule): + scripted = orig_value + else: + # always reuse the provided stubs_fn to infer the methods to compile + scripted = create_script_module_impl( + orig_value, sub_concrete_type, stubs_fn + ) + + cpp_module.setattr(name, scripted) + script_module._modules[name] = scripted + + # 3. Copy @ignored/@unused methods and attrs from the original `nn_module` to the new ScriptModule. + # This ensures we can access these Python methods on the ScriptModule. + for name in dir(nn_module): + if name in ignored_properties: + continue + item = getattr(nn_module, name, None) + if inspect.ismethod(item) and _jit_internal.is_ignored_fn(item): + unbound_function = getattr(nn_module, name).__func__ + bound_method = unbound_function.__get__(script_module) + setattr(script_module, name, bound_method) + elif concrete_type.is_ignored_attribute(name): + setattr(script_module, name, item) + + # For convenience, attach the concrete type to the new ScriptModule + script_module._concrete_type = concrete_type + + # Actually create the ScriptModule, initializing it with the function we just defined + script_module = torch.jit.RecursiveScriptModule._construct(cpp_module, init_fn) + + # Compile methods if necessary + if concrete_type not in concrete_type_store.methods_compiled: + create_methods_and_properties_from_stubs( + concrete_type, method_stubs, property_stubs + ) + # Create hooks after methods to ensure no name collisions between hooks and methods. + # If done before, hooks can overshadow methods that aren't exported. + create_hooks_from_stubs(concrete_type, hook_stubs, pre_hook_stubs) + torch._C._run_emit_module_hook(cpp_module) + concrete_type_store.methods_compiled.add(concrete_type) + + # Copy the forward hooks and pre-hooks to the new ScriptModule + # to allow the hooks to be run from eager as ScriptFunctions + for idx, fn in enumerate(script_module._c._get_forward_pre_hooks()): + script_module._forward_pre_hooks[idx] = fn + for idx, fn in enumerate(script_module._c._get_forward_hooks()): + script_module._forward_hooks[idx] = fn + + # Special handling so methods like __len__ work in script methods on classes derived from containers + if ( + isinstance( + nn_module, (torch.nn.ModuleList, torch.nn.Sequential, torch.nn.ModuleDict) + ) + and "__len__" not in cpp_module._method_names() + ): + script_module.define(f"def __len__(self):\n return {len(nn_module)}\n") + if ( + isinstance(nn_module, torch.nn.ModuleDict) + and "__contains__" not in cpp_module._method_names() + ): + if len(nn_module.keys()): + keys = repr(list(nn_module.keys())) + script_module.define( + f"def __contains__(self, key: str):\n return key in {keys}\n" + ) + else: + script_module.define("def __contains__(self, key: str):\n return False\n") + + # Make the compiled methods available to the Python ScriptModule class. + for method_stub in method_stubs: + if method_stub.original_method is None: + # define()'d methods don't have an Python original_method, so we + # don't need to do any Python re-wrapping stuff + continue + + name = method_stub.original_method.__name__ + if name != method_stub.def_.name().name: + # TODO: Why skip this? Because @torch.jit._overload_method will + # mangle the name of the function. + continue + script_method = cpp_module._get_method(name) + + # Wrap the original to propagate docstrings and such. + # TODO: we don't currently do this functions that are recursively + # compiled, we should. + wrapped_script_method = functools.wraps(method_stub.original_method)( + script_method + ) + + # Add the methods to the script_module directly. This ensures they will + # be found first when `name` is looked up (as opposed to the stubs or + # nn.Module.forward) + script_module.__dict__[name] = wrapped_script_method + + # Make module properties available on the Python ScriptModule class. + for property_stub in property_stubs: + property_name = property_stub.def_.name().name + fget = cpp_module._get_method(property_stub.def_.getter_name().name) + # Setter is optional, so it may not exist. + setter_name = property_stub.def_.setter_name() + fset = cpp_module._get_method(setter_name.name) if setter_name else None + script_module.__dict__[property_name] = property(property_name, fget, fset) # type: ignore[arg-type] + + # copy over python methods to script module if they aren't defined on the script module + # this is currently an internal api used only on module containers + for name in dir(nn_module): + if name in ignored_properties: + continue + item = getattr(nn_module, name, None) + if ( + _jit_internal.get_torchscript_modifier(item) + is _jit_internal.FunctionModifiers.COPY_TO_SCRIPT_WRAPPER + ): + add_python_attr_to_scripted_model(script_module, nn_module, name) + + return script_module + + +# We define shims of certain attributes on the RecursiveScriptModule to support +# magic methods. To check if a script model defines an attribute we need +# to also check that the attribute is not the shim +def script_model_defines_attr(script_model, attr): + script_attr = getattr(script_model, attr, None) + if script_attr is None: + return False + default_attr = getattr(torch.jit.RecursiveScriptModule, attr, None) + if default_attr is None: + return False + return script_attr != default_attr + + +def add_python_attr_to_scripted_model(script_model, orig, attr): + if hasattr(orig, attr) and script_model_defines_attr(script_model, attr): + setattr(script_model, attr, getattr(orig, attr)) + + +def get_overload_annotations(mod, jit_ignored_properties): + # original function => [(mangled overload name, overload function)] + overloads = {} + + for name in dir(type(mod)): + if name in jit_ignored_properties: + continue + item = getattr(mod, name, None) + if not callable(item): + continue + + # builtin functions like repr() in python 2 do not have __module__ defined + if hasattr(item, "__module__") and item.__module__ is not None: + method_overloads = _jit_internal._get_overloaded_methods( + item, mod.__class__ + ) + if method_overloads is None: + continue + + if item.__func__ in method_overloads: + raise RuntimeError( + _jit_internal.get_overload_no_implementation_error_message( + "method", item.__func__ + ) + ) + + names = [name + "__" + str(i) for i in range(len(method_overloads))] + overloads[item] = list(zip(names, method_overloads)) + + return overloads + + +def get_overload_name_mapping(overload_info): + # Same format as __overloads__ + # original function => [overload names] + overload_name_mappings: Dict[str, List[str]] = {} + for orig_fn, overloads in overload_info.items(): + original_name = orig_fn.__name__ + if original_name not in overload_name_mappings: + overload_name_mappings[original_name] = [] + + for overload_name, _ in overloads: + overload_name_mappings[original_name].append(overload_name) + return overload_name_mappings + + +def _check_no_signature(func): + signature = torch.jit.annotations.get_signature( + func, None, fake_range(), inspect.ismethod(func) + ) + if signature is None: + qual_name = _jit_internal._qualified_name(func) + raise RuntimeError( + f"Must explicitly add type annotations to overloaded functions: {qual_name}" + ) + + +def make_stubs_for_overloads(overload_info): + overload_stubs = [] + for orig_fn, overloads in overload_info.items(): + orig_ast = get_jit_def( + orig_fn, orig_fn.__name__, self_name="RecursiveScriptModule" + ) + for overload_name, overload_fn in overloads: + _check_no_signature(overload_fn) + over_ast = get_jit_def( + overload_fn, overload_fn.__name__, self_name="RecursiveScriptModule" + ) + new_ast = torch._C._replace_overloaded_method_decl( + over_ast.decl(), orig_ast, overload_name + ) + _rcb = _jit_internal.createResolutionCallbackFromClosure(orig_fn) + overload_stubs.append(ScriptMethodStub(_rcb, new_ast, overload_fn)) + return overload_stubs + + +def check_module_initialized(mod): + assert isinstance(mod, torch.nn.Module) + if not hasattr(mod, "_parameters"): + raise RuntimeError( + f"'{torch.typename(type(mod))}' has not been initialized, did you forget to call 'super()'?" + ) + + # This is to avoid importing torch.distributed.nn + if not hasattr(mod, "remote_parameters"): + for name, param in mod._parameters.items(): + if param is not None and torch.nn.parameter.is_lazy(param): + raise RuntimeError( + f"'{torch.typename(type(mod))}' has uninitialized parameters {name}. Did you forget to run a forward pass?" + ) + for name, buf in mod._buffers.items(): + if buf is not None and torch.nn.parameter.is_lazy(buf): + raise RuntimeError( + f"'{torch.typename(type(mod))}' has uninitialized buffers {name}. Did you forget to run a forward pass?" + ) + + +def infer_methods_to_compile(nn_module): + """Implement the default rules for which methods should act as starting points for compilation. + + (TODO add a link when the rules are published). + """ + check_module_initialized(nn_module) + user_annotated_ignored_attributes = getattr( + nn_module, "__jit_ignored_attributes__", [] + ) + ignored_properties = jit_ignored_properties(nn_module) + + methods: List[str] = [] + if hasattr(nn_module, "forward") and not _jit_internal.is_ignored_fn( + nn_module.forward + ): + forward_func = getattr(nn_module.forward, "__func__", None) + module_forward = getattr(torch.nn.Module, "forward", None) + if forward_func != module_forward: + methods = ["forward"] + + exported = [] + for name in dir(nn_module): + if name in ignored_properties: + continue + item = getattr(nn_module, name, None) + if ( + _jit_internal.get_torchscript_modifier(item) + is _jit_internal.FunctionModifiers.EXPORT + ): + exported.append(name) + + methods = methods + exported + + overload_name_mappings = dict(getattr(nn_module, "__overloads__", {})) + overload_info = get_overload_annotations(nn_module, ignored_properties) + overload_name_mappings.update(get_overload_name_mapping(overload_info)) + overload_stubs = make_stubs_for_overloads(overload_info) + + nn_module.__overloads__ = overload_name_mappings + + # we shouldn't directly compile overloaded methods, just its overloads + def ignore_overloaded(method_name): + return method_name not in overload_name_mappings + + filtered_methods = filter(ignore_overloaded, methods) + + # Unique the methods. We don't want to use a set to store the methods because it + # introduces non-determinism to compile order. + uniquer: Set[str] = set() + uniqued_methods = [] + for name in filtered_methods: + if name in uniquer: + continue + uniqued_methods.append(name) + uniquer.add(name) + + stubs = [] + for method in uniqued_methods: + stubs.append(make_stub_from_method(nn_module, method)) + return overload_stubs + stubs + + +def get_hook_stubs(nn_module): + """Return forward hook and pre_hook ScriptModuleStubs.""" + check_module_initialized(nn_module) + hook_map: Dict = {} + + hook_stubs = [] + for hook in nn_module._forward_hooks.values(): + if hook.__name__ in hook_map: + if id(hook) != id(hook_map[hook.__name__]): + raise RuntimeError( + f"Hook '{hook.__name__}' on {type(nn_module).__name__} " + "has at least two different python definitions." + " Please use unique names for all hooks." + ) + else: + hook_map[hook.__name__] = hook + hook_stubs.append(make_stub(hook, hook.__name__)) + + pre_hook_stubs = [] + for pre_hook in nn_module._forward_pre_hooks.values(): + if pre_hook.__name__ in hook_map: + if id(pre_hook) != id(hook_map[pre_hook.__name__]): + raise RuntimeError( + f"Pre-hook '{pre_hook.__name__}' on {type(nn_module).__name__} " + "has at least two different python definitions." + " Please use unique names for all hooks." + ) + else: + hook_map[pre_hook.__name__] = pre_hook + pre_hook_stubs.append(make_stub(pre_hook, pre_hook.__name__)) + + return hook_stubs, pre_hook_stubs + + +def get_property_stubs(nn_module): + """Create property stubs for the properties of the module by creating method stubs for the getter and setter.""" + module_ty = type(nn_module) + properties_asts = get_class_properties(module_ty, self_name="RecursiveScriptModule") + rcbs = {} + + for name in dir(module_ty): + item = getattr(module_ty, name, None) + if isinstance(item, property): + if not item.fget: + raise RuntimeError( + f"Property {name} of {nn_module.__name__} must have a getter" + ) + + rcbs[name] = _jit_internal.createResolutionCallbackFromClosure(item.fget) + + stubs = [PropertyStub(rcbs[ast.name().name], ast) for ast in properties_asts] + return stubs + + +def interface_script(mod_interface, nn_module): + """ + Make a ScriptModule from an nn.Module, using the interface methods rule for determining which methods to compile. + + Args: + mod_interface: the interface type that the module have + nn_module: The original Python nn.Module that we are creating a ScriptModule for. + """ + if isinstance(nn_module, torch.jit.ScriptModule): + return nn_module + + check_module_initialized(nn_module) + + def infer_interface_methods_to_compile(nn_module): + """Rule to infer the methods from the interface type. + + It is used to know which methods need to act as starting points for compilation. + """ + stubs = [] + for method in mod_interface.getMethodNames(): + stubs.append(make_stub_from_method(nn_module, method)) + return stubs + + return create_script_module(nn_module, infer_interface_methods_to_compile) + + +def try_compile_fn(fn, loc): + if _jit_internal.is_ignored_fn(fn): + # Don't do anything for @ignore'd functions + return None + + if isinstance(fn, torch.nn.Module): + # Since modules are callable pybind recognizes them as functions, but + # don't do anything for them + return None + + if not inspect.isfunction(fn) and not inspect.ismethod(fn): + raise RuntimeError( + f"`{fn}` is not a function. Recursive scripting only supports " + "Python functions or methods currently.\n" + f"Consider manually annotating `{fn}` with @torch.jit.script." + ) + + # The object returned by __prepare_scriptable__ might have a different closure. + # Resolve it here to get the right resolution callback. + fn = fn.__prepare_scriptable__() if hasattr(fn, "__prepare_scriptable__") else fn # type: ignore[operator] + + # We don't have the actual scope where the function was defined, but we can + # extract the necessary info from the closed over variables on the function + # object + rcb = _jit_internal.createResolutionCallbackFromClosure(fn) + return torch.jit.script(fn, _rcb=rcb) + + +def wrap_cpp_class(cpp_class): + """Wrap this torch._C.Object in a Python RecursiveScriptClass.""" + return torch.jit.RecursiveScriptClass(cpp_class) + + +def wrap_cpp_module(cpp_module): + """Wrap this torch._C.ScriptModule in a Python ScriptModule, recursively for all submodules.""" + + def init_fn(script_module): + for name, cpp_module in torch._C.ModuleDict(script_module._c).items(): + setattr(script_module, name, wrap_cpp_module(cpp_module)) + script_module._concrete_type = torch._C.ConcreteModuleType.from_jit_type( + script_module._c._type() + ) + + for idx, fn in enumerate(script_module._c._get_forward_pre_hooks()): + script_module._forward_pre_hooks[idx] = fn + for idx, fn in enumerate(script_module._c._get_forward_hooks()): + script_module._forward_hooks[idx] = fn + + return torch.jit.RecursiveScriptModule._construct(cpp_module, init_fn) + + +def compile_unbound_method(concrete_type, fn): + if _jit_internal.is_ignored_fn(fn): + return None + stub = make_stub(fn, fn.__name__) + with torch._jit_internal._disable_emit_hooks(): + # We don't want to call the hooks here since the graph that is calling + # this function is not yet complete + create_methods_and_properties_from_stubs(concrete_type, (stub,), ()) + return stub + + +def lazy_bind(concrete_type, unbound_method): + """ + Return a function that lazily binds `unbound_method` to a provided Module IValue, then invokes the method. + + We do this so that any Python shenanigans that + will poison type sharing are impossible at compile time. + """ + + def lazy_binding_method(cpp_module, *args): + def init_fn(script_module): + orig_class = concrete_type.py_class + + # Copy @ignored/@unused methods from the original module to the new one. + # This ensures they are available during execution. + for name in dir(orig_class): + item = getattr(orig_class, name, None) + if _jit_internal.is_ignored_fn(item): + setattr(script_module, name, item) + + # Copy constants over so they are available during execution. + for name, value in concrete_type.get_constants().items(): + setattr(script_module, name, value) + + script_module = torch.jit.RecursiveScriptModule._construct(cpp_module, init_fn) + method = types.MethodType(unbound_method, script_module) + return method(*args) + + # make the lazy binding method "look like" the original method + lazy_binding_method.original_fn = unbound_method # type: ignore[attr-defined] + lazy_binding_method.__name__ = unbound_method.__name__ + torch._jit_internal.copy_torchscript_modifier(unbound_method, lazy_binding_method) + + return lazy_binding_method diff --git a/janus/lib/python3.10/site-packages/torch/jit/_script.py b/janus/lib/python3.10/site-packages/torch/jit/_script.py new file mode 100644 index 0000000000000000000000000000000000000000..1f90e5a6d84d2e0762e5111017c0286cefa60793 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/jit/_script.py @@ -0,0 +1,1727 @@ +"""TorchScript. + +This module contains functionality to support the JIT's scripting frontend, notably: + - torch.jit.script + +This is not intended to be imported directly; please use the exposed +functionalities in `torch.jit`. +""" +import collections +import copy +import enum +import functools +import inspect +import pickle +import warnings +from typing import Any, Callable, Dict, List, Set, Tuple, Union + +import torch +import torch._jit_internal as _jit_internal +from torch._classes import classes +from torch._jit_internal import _get_model_id, _qualified_name +from torch._utils_internal import log_torchscript_usage +from torch.jit._builtins import _register_builtin +from torch.jit._fuser import _graph_for, _script_method_graph_for +from torch.jit._monkeytype_config import ( + JitTypeTraceConfig, + JitTypeTraceStore, + monkeytype_trace, +) +from torch.jit._recursive import ( + _compile_and_register_class, + infer_methods_to_compile, + ScriptMethodStub, + wrap_cpp_module, +) +from torch.jit._state import ( + _enabled, + _set_jit_function_cache, + _set_jit_overload_cache, + _try_get_jit_cached_function, + _try_get_jit_cached_overloads, +) +from torch.jit.frontend import get_default_args, get_jit_class_def, get_jit_def +from torch.nn import Module +from torch.overrides import ( + has_torch_function, + has_torch_function_unary, + has_torch_function_variadic, +) +from torch.package import PackageExporter, PackageImporter +from torch.utils import set_module + +from ._serialization import validate_map_location + + +type_trace_db = JitTypeTraceStore() # DB to hold all call traces from MonkeyType + +torch._C.ScriptMethod.graph_for = _script_method_graph_for # type: ignore[attr-defined] +torch._C.ScriptFunction.graph_for = _graph_for # type: ignore[attr-defined] +ScriptFunction = torch._C.ScriptFunction +ScriptFunction.__doc__ = """ +Functionally equivalent to a :class:`ScriptModule`, but represents a single +function and does not have any attributes or Parameters. +""" +set_module(ScriptFunction, "torch.jit") + + +# Throws an error if a jit function is pickled. +# Helps to avoid Python crashes for Python versions 3.9.5 + when protocol 0 or 1 is given as an argument. +def _reduce(cls): + raise pickle.PickleError("ScriptFunction cannot be pickled") + + +ScriptFunction.__reduce__ = _reduce # type: ignore[assignment] + + +if _enabled: + Attribute = collections.namedtuple("Attribute", ["value", "type"]) +else: + + def Attribute(value, type): # type: ignore[no-redef] + return value + + +Attribute.__doc__ = """ + This method is a pass-through function that returns `value`, mostly + used to indicate to the TorchScript compiler that the left-hand side + expression is a class instance attribute with type of `type`. Note that + `torch.jit.Attribute` should only be used in `__init__` method of `jit.ScriptModule` + subclasses. + + Though TorchScript can infer correct type for most Python expressions, there are some cases where + type inference can be wrong, including: + + - Empty containers like `[]` and `{}`, which TorchScript assumes to be container of `Tensor` + - Optional types like `Optional[T]` but assigned a valid value of type `T`, TorchScript would assume + it is type `T` rather than `Optional[T]` + + In eager mode, it is simply a pass-through function that returns `value` + without other implications. + + Example: + + .. testcode:: + + import torch + from typing import Dict + + class AttributeModule(torch.jit.ScriptModule): + def __init__(self) -> None: + super().__init__() + self.foo = torch.jit.Attribute(0.1, float) + + # we should be able to use self.foo as a float here + assert 0.0 < self.foo + + self.names_ages = torch.jit.Attribute({}, Dict[str, int]) + self.names_ages["someone"] = 20 + assert isinstance(self.names_ages["someone"], int) + + m = AttributeModule() + # m will contain two attributes + # 1. foo of type float + # 2. names_ages of type Dict[str, int] + + .. testcleanup:: + + del AttributeModule + del m + + Note: it's now preferred to instead use type annotations instead of `torch.jit.Attribute`: + + .. testcode:: + + import torch + from typing import Dict + + class AttributeModule(torch.nn.Module): + names: Dict[str, int] + + def __init__(self) -> None: + super().__init__() + self.names = {} + + m = AttributeModule() + + .. testcleanup:: + + del AttributeModule + del m + + Args: + value: An initial value to be assigned to attribute. + type: A Python type + + Returns: + Returns `value` +""" + + +def _get_type_trace_db(): + # This is a private API. Use of this for external purposes is discouraged. + return type_trace_db + + +# Gets a function from the name of a method on a type +def _get_function_from_type(cls, name): + return getattr(cls, name, None) + + +# ScriptClasses must be new-style classes because we construct them using their +# __new__ method. +def _is_new_style_class(cls): + if hasattr(cls, "__class__"): + return "__dict__" in dir(cls) or hasattr(cls, "__slots__") + + +# These OrderedDictWrapper classes replace the actual OrderedDicts in +# module with versions that get/set properties inside of Module. +# This allows us to reuse most of nn.Module while still storing the +# data in C++. +# Each OrderedDict needs to support: +# x not in view +# x in view +# view[name] = ... +# view.values() +# del view[name] +# view.items() +# view.keys() +# len(view) + + +class OrderedDictWrapper: + def __init__(self, _c): + self._c = _c + + def keys(self): + return [k for k, v in self.items()] + + def values(self): + return [v for k, v in self.items()] + + def __len__(self): + return len(self.values()) + + def __delitem__(self, k): + raise RuntimeError("cannot delete methods or parameters of a script module") + + def items(self): + return self._c.items() + + def __setitem__(self, k, v): + if k not in self: + raise RuntimeError( + f"Can't add a new parameter after ScriptModule construction. Tried to add '{k}" + ) + self._c.setattr(k, v) + + def __contains__(self, k): + return self._c.contains(k) + + def __getitem__(self, k): + if k not in self: + raise KeyError(k) + return self._c.getattr(k) + + +class OrderedModuleDict(OrderedDictWrapper): + def __init__(self, module, python_dict): + super().__init__(torch._C.ModuleDict(module)) + # contains _both_ script modules and non-script python-only modules + + # because script modules are subclassed in python and the + # C++ Module class will not hold references to them, + # to ensure that you always get the same python value here + # we store it in the python dict as well + self._python_modules = python_dict + + def items(self): + r = self._python_modules.items() + return r + + def __contains__(self, k): + return k in self._python_modules + + def __setitem__(self, k, v): + # Cases where sub-module can be re-assigned after ScriptModule construction + # 1. If the attr is an module interface type, it's guaranteed that the module is + # not inlined in the graph, so it's safe to swap a new ScriptModule in. + # 2. if the new value if a ScriptModule with the same JIT type, IR won't change + # and it's legit to swap a new module in. + # In these two cases we allow swapping a new scripted module and update the + # corresponding python module dict to keep sync. + # Note: the value to be swapped in has to be ScriptModule instead of nn.Module, + # otherwise it's illegal and we throw error. + if isinstance(v, ScriptModule): + self._c.setattr(k, v) + self._python_modules[k] = v + else: + raise RuntimeError( + "Cannot re-assign modules in a ScriptModule with non-scripted " + f"module, tried to replace existing module '{k}': {v}" + ) + + def __getitem__(self, k): + return self._python_modules[k] + + +# For each user-defined class that subclasses ScriptModule, this meta-class: +# (1) finds all the methods annotated with @script_method in a ScriptModule and +# removes them from the class attributes +# (2) puts a wrapper around the class's __init__ method to recursively compile +# all of the script_methods with the module after the original __init__ has +# run. This has to occur after the user-defined __init__ so that submodules and +# parameters are initialized _before_ the script compiler resolve references to +# `self.param` or `self.module`. +class ScriptMeta(type): + def __init__(cls, name, bases, attrs): # noqa: B902 + # Aggregate all the ScriptMethods and constants from superclasses + cls._methods: Dict[str, Any] = {} + cls._constants_set = set(getattr(cls, "__constants__", ())) + for base in reversed(bases): + for k, v in getattr(base, "_methods", {}).items(): + cls._methods[k] = v + base_constants: Set = getattr(base, "_constants_set", set()) + cls._constants_set = cls._constants_set.union(base_constants) + + # find all the script methods of the current class + for k, v in sorted(attrs.items()): + if isinstance(v, ScriptMethodStub): + delattr(cls, k) + cls._methods[v.original_method.__name__] = v + + if getattr(cls, "_disable_script_meta", False): + # We leave built-in ScriptModule types alone, since this metaclass + # is only for compiling user classes that inherit from + # ScriptModule. + super().__init__(name, bases, attrs) + return + + original_init = getattr(cls, "__init__", lambda self: None) + + @functools.wraps(original_init) + def init_then_script(self, *args, **kwargs): + num_methods = len(cls._methods) + original_init(self, *args, **kwargs) + added_methods_in_init = len(cls._methods) > num_methods + + if type(self) == cls: + + def make_stubs(module): + cls = type(module) + if hasattr(cls, "_methods"): + return [v for k, v in sorted(cls._methods.items())] + else: + return infer_methods_to_compile(module) + + self.__dict__[ + "_actual_script_module" + ] = torch.jit._recursive.create_script_module( + self, make_stubs, share_types=not added_methods_in_init + ) + + # Delete the Python attributes that now shadow the ScriptModule + # ones, so that __getattr__ and __setattr__ will properly find + # the scripted versions. + concrete_type = self._actual_script_module._concrete_type + for name in concrete_type.get_attributes(): + delattr(self, name) + for name, _ in concrete_type.get_modules(): + delattr(self, name) + for name in ("_parameters", "_buffers", "_modules"): + delattr(self, name) + + cls.__init__ = init_then_script # type: ignore[misc] + super().__init__(name, bases, attrs) + + +class _CachedForward: + def __get__(self, obj, cls): + return self.__getattr__("forward") # type: ignore[attr-defined] + + +class ScriptWarning(Warning): + pass + + +def script_method(fn): + if not _enabled: + return fn + # NOTE: we need to traverse two frames here because the meta-class frame + # for ScriptModule will be present, as opposed to invoking @script on a + # a function or invoking define() on a CompilationUnit. + # The stack will look like: + # + # 0. createResolutionCallback() + # 1. script_method() + # 2. ScriptModule metaclass frame + # 3. Surrounding scope + # + # createResolutionCallback internally adds 1 to get us to the scope of this + # function (the calling function). Adding 2 gets us to the proper surrounding scope. + _rcb = _jit_internal.createResolutionCallbackFromFrame(frames_up=2) + ast = get_jit_def(fn, fn.__name__, self_name="ScriptModule") + return ScriptMethodStub(_rcb, ast, fn) + + +class ConstMap: + def __init__(self, const_mapping): + self.const_mapping = const_mapping + + def __getattr__(self, attr): + return self.const_mapping[attr] + + +def unpackage_script_module( + importer: PackageImporter, script_module_id: str +) -> torch.nn.Module: + """ + Call by ``torch.package.PackageImporter``'s Pickler's ``persistent_load`` function. + + Performs work of loading and returning a ScriptModule from a ``torch.package`` archive. + """ + if not isinstance(importer.zip_reader, torch._C.PyTorchFileReader): + raise RuntimeError( + "Loading ScriptObjects from a PackageImporter created from a " + "directory is not supported. Use a package archive file instead." + ) + cu = torch._C.CompilationUnit() + cpp_module = torch._C._import_ir_module_from_package( + cu, + importer.zip_reader, + importer.storage_context, + validate_map_location(importer.last_map_location), + script_module_id, + ) + return wrap_cpp_module(cpp_module) + + +if _enabled: + _magic_methods = [ + "__iter__", + "__len__", + "__neg__", + "__mul__", + "__contains__", + "__add__", + "__sub__", + "__pow__", + "__truediv__", + "__mod__", + "__ne__", + "__eq__", + "__lt__", + "__gt__", + "__le__", + "__ge__", + "__and__", + "__or__", + "__xor__", + "__getitem__", + "__setitem__", + "__call__", + "__int__", + "__float__", + "__bool__", + "__str__", + "__enter__", + "__exit__", + ] + + class RecursiveScriptClass: + """Wrapper for a TorchScript class instance for use in Python. + + An analogue of RecursiveScriptModule for regular objects that are not modules. + This class is a wrapper around a torch._C.ScriptObject that represents an instance + of a TorchScript class and allows it to be used in Python. + + Attributes: + _c [torch._C.ScriptObject]: The C++ object to which attribute lookups and method + calls are forwarded. + _props [Dict[str, property]]: A dictionary of properties fetched from self._c and + exposed on this wrppaer. + """ + + def __init__(self, cpp_class): + super().__init__() + self.__dict__["_initializing"] = True + self._c = cpp_class + + # Add wrapped object's properties to this class instance. + self._props = { + prop.name: property(prop.getter, prop.setter) + for prop in self._c._properties() + } + + self.__dict__["_initializing"] = False + + def __getattr__(self, attr): + if self.__dict__.get("_initializing"): + return super().__getattr__(attr) # type: ignore[misc] + + if attr in self._props: + return self._props[attr].fget() # type: ignore[call-arg, misc] + + return getattr(self._c, attr) + + def __setattr__(self, attr, value): + if self.__dict__.get("_initializing"): + return super().__setattr__(attr, value) + + if attr in self._props: + return self._props[attr].fset(value) # type: ignore[call-arg, misc] + + setattr(self._c, attr, value) + + # Delegate calls to magic methods like __len__ to the C++ module backing the + # RecursiveScriptClass. + def forward_magic_method(self, method_name, *args, **kwargs): + if not self._c._has_method(method_name): + raise TypeError + + self_method = self.__getattr__(method_name) + return self_method(*args, **kwargs) + + def __getstate__(self): + raise pickle.PickleError("ScriptClasses cannot be pickled") + + def __iadd__(self, other): + if self._c._has_method("__iadd__"): + return self.forward_magic_method("__iadd__", other) + else: + return self.forward_magic_method("__add__", other) + + for method_name in _magic_methods: + + def method_template(self, *args, **kwargs): + return self.forward_magic_method(method_name, *args, **kwargs) + + setattr(RecursiveScriptClass, method_name, method_template) + + # this is a Python 'non-data descriptor' that causes the first access + # to ScriptModule's forward to look up the forward method and stash + # it in the objects dict. Due to the standard rules for attribute lookup, + # subsequent lookups will just directly return the previously looked up method. + # This is necessary because nn.Module defines forward as a method. If we + # did nothing, __getattr__ would not be called. Instead we'd get nn.Module.forward + # which always throws an exception. + + class ScriptModule(Module, metaclass=ScriptMeta): + r"""Wrapper for C++ torch::jit::Module with methods, attributes, and parameters. + + A wrapper around C++ ``torch::jit::Module``. ``ScriptModule``\s + contain methods, attributes, parameters, and + constants. These can be accessed the same way as on a normal ``nn.Module``. + """ + + __jit_unused_properties__ = [ + "code", + "code_with_constants", + "graph", + "inlined_graph", + "original_name", + ] + + def __init__(self) -> None: + super().__init__() + + forward: Callable[..., Any] = _CachedForward() # type: ignore[assignment] + + def __getattr__(self, attr): + if "_actual_script_module" not in self.__dict__: + return super().__getattr__(attr) + return getattr(self._actual_script_module, attr) + + def __setattr__(self, attr, value): + if "_actual_script_module" not in self.__dict__: + # Unwrap torch.jit.Attribute into a regular setattr + record + # the provided type in __annotations__. + # + # This ensures that if we use the attr again in `__init__`, it + # will look like the actual value, not an instance of Attribute. + if isinstance(value, Attribute): + # NB: Ensure that we set __annotations__ on the specific + # class in question, and not on a superclass (which would + # be wrong wrong wrong!). + # See also https://github.com/pytorch/pytorch/issues/39463 + if "__annotations__" not in self.__class__.__dict__: + self.__class__.__annotations__ = {} + self.__annotations__[attr] = value.type + value = value.value + return super().__setattr__(attr, value) + + setattr(self._actual_script_module, attr, value) + + def define(self, src): + if "_actual_script_module" in self.__dict__: + # If we have completed initialization, just defer to the + # backing RecursiveScriptModule to eagerly compile the provided + # source. + return self._actual_script_module.define(src) + + # Otherwise, we are still in the object's __init__. + # In that case, add `src` as a stub to be compiled. + # + # We use frames_up=1 to get to the proper surrounding scope. The stack + # will look like: + # 0. createResolutionCallback + # 1. define() + # 2. surrounding scope. + # + # createResolutionCallback internally adds 1 to get us to our frame, then + # we add 1 to get to the proper surrounding scope. + rcb = _jit_internal.createResolutionCallbackFromFrame(frames_up=1) + ast = torch._C._parse_source_def(src) + self._methods[ast.name().name] = ScriptMethodStub(rcb, ast, None) + + def _replicate_for_data_parallel(self): + return self._actual_script_module._replicate_for_data_parallel() + + def __reduce_package__(self, exporter: PackageExporter): + """Save a ScriptModule inside of a ``torch.package`` archive. + + Called by ``torch.package.PackageExporter``'s Pickler's ``persistent_id`` when + saving TorchScript objects. Performs act of saving a ScriptModule inside of + a ``torch.package`` archive. + + Returns method to load the ScriptModule from a ``torch.package.PackageImporter``'s + Pickler's ``persistent_load`` function. + """ + script_module_id = exporter.get_unique_id() + exporter.script_module_serializer.serialize(self._c, int(script_module_id)) + return (unpackage_script_module, (script_module_id,)) + + class RecursiveScriptModule(ScriptModule): + # XXX: RecursiveScriptModule inherits from ScriptModule for the sole + # reason that it retains the existing isinstance(ScriptModule) + # behavior. + r"""Retain the existing isinstance(ScriptModule) behavior. + + The core data structure in TorchScript is the ``ScriptModule``. It is an + analogue of torch's ``nn.Module`` and represents an entire model as a tree of + submodules. Like normal modules, each individual module in a ``ScriptModule`` can + have submodules, parameters, and methods. In ``nn.Module``\s methods are implemented + as Python functions, but in ``ScriptModule``\s methods are implemented as + TorchScript functions, a statically-typed subset of Python that contains all + of PyTorch's built-in Tensor operations. This difference allows your + ``ScriptModule``\s code to run without the need for a Python interpreter. + + ``ScriptModule``\s should not be created manually, instead use + either :func:`tracing ` or :func:`scripting `. + Tracing and scripting can be applied incrementally and :ref:`composed as necessary `. + + * Tracing records the tensor operations as executed with a set of example inputs and uses these + operations to construct a computation graph. You can use the full dynamic behavior of Python with tracing, + but values other than Tensors and control flow aren't captured in the graph. + + * Scripting inspects the Python code of the model + and compiles it to TorchScript. Scripting allows the use of many `types`_ of values and supports dynamic control flow. + Many, but not all features of Python are supported by the compiler, so changes to the source code may be necessary. + """ + + _disable_script_meta = True + + def __init__(self, cpp_module): + self.__dict__["_initializing"] = True + self._c = cpp_module + super().__init__() + # Delete the 'training' attribute set up by `Module.__init__`. It + # will get set on the underlying cpp module, so we delete it here + # to avoid this version shadowing the cpp module version. + delattr(self, "training") + + @staticmethod + def _construct(cpp_module, init_fn): + """ + Construct a RecursiveScriptModule that's ready for use. + + PyTorch code should use this to construct a RecursiveScriptModule instead + of instead of calling `__init__` directly, as it makes sure the + object is properly finalized (and in the future, we may take + control of how the RecursiveScriptModule instance is created). + + Args: + cpp_module: The C++ Module that will hold the actual state of + this RecursiveScriptModule instance. + init_fn: Lambda that initializes the RecursiveScriptModule passed to it. + """ + script_module = RecursiveScriptModule(cpp_module) + init_fn(script_module) + + # Finalize the ScriptModule: replace the nn.Module state with our + # custom implementations and flip the _initializing bit. + RecursiveScriptModule._finalize_scriptmodule(script_module) + return script_module + + @staticmethod + def _finalize_scriptmodule(script_module): + script_module._parameters = OrderedDictWrapper( + torch._C.ParameterDict(script_module._c) + ) + script_module._buffers = OrderedDictWrapper( + torch._C.BufferDict(script_module._c) + ) + script_module._modules = OrderedModuleDict( + script_module._c, script_module._modules + ) + script_module._initializing = False + + def _reconstruct(self, cpp_module): + """ + Re-construct an instance of RecursiveScriptModule using an instance of a C++ module. + + Args: + cpp_module: The C++ module that this RecursiveScriptModule will be rebuilt around. + """ + self.__init__(cpp_module) # type: ignore[misc] + + # Copy the concrete type from the C++ module to this ScriptModule. + self._concrete_type = torch._C.ConcreteModuleType.from_jit_type( + self._c._type() + ) + + # Copy submodules from the C++ module to this ScriptModule. + modules = {} + for name, cpp_module in torch._C.ModuleDict(self._c).items(): + modules[name] = wrap_cpp_module(cpp_module) + self._modules = OrderedModuleDict(self._c, modules) # type: ignore[assignment] + + # Copy parameters and buffers. + self._parameters = OrderedDictWrapper(torch._C.ParameterDict(self._c)) # type: ignore[assignment] + self._buffers = OrderedDictWrapper(torch._C.BufferDict(self._c)) # type: ignore[assignment] + + # Get rid of the functions from the old C++ module. + self.__dict__ = { + k: v + for k, v in self.__dict__.items() + if not isinstance(v, torch._C.ScriptMethod) + } + self.__dict__["_initializing"] = False + + @property + def graph(self): + r"""Return a string representation of the internal graph for the ``forward`` method. + + See :ref:`interpreting-graphs` for details. + """ + return self._c._get_method("forward").graph + + @property + def inlined_graph(self): + r""" + Return a string representation of the internal graph for the ``forward`` method. + + This graph will be preprocessed to inline all function and method calls. + See :ref:`interpreting-graphs` for details. + """ + return self.forward.inlined_graph # type: ignore[attr-defined] + + @property + def code(self): + r""" + Return a pretty-printed representation (as valid Python syntax) of the internal graph for the ``forward`` method. + + See :ref:`inspecting-code` for details. + """ + return self.forward.code # type: ignore[attr-defined] + + @property + def code_with_constants(self): + r"""Return a tuple. + + Returns a tuple of: + + [0] a pretty-printed representation (as valid Python syntax) of + the internal graph for the ``forward`` method. See `code`. + [1] a ConstMap following the CONSTANT.cN format of the output in [0]. + The indices in the [0] output are keys to the underlying constant's values. + + See :ref:`inspecting-code` for details. + """ + r = self.forward.code_with_constants # type: ignore[attr-defined] + return (r[0], ConstMap(r[1])) + + def save(self, f, **kwargs): + r"""Save with a file-like object. + + save(f, _extra_files={}) + + See :func:`torch.jit.save ` which accepts a file-like object. + This function, torch.save(), converts the object to a string, treating it as a path. + DO NOT confuse these two functions when it comes to the 'f' parameter functionality. + """ + return self._c.save(str(f), **kwargs) + + def _save_for_lite_interpreter(self, *args, **kwargs): + r"""Add (or update) the bytecode session to the script model. + + _save_for_lite_interpreter(f) + + The updated model is used + in lite interpreter for mobile applications. + + Args: + f: a string containing a file name. + _extra_files: Map from filename to contents which will be stored as part of 'f'. + + """ + return self._c._save_for_mobile(*args, **kwargs) + + def _save_to_buffer_for_lite_interpreter(self, *args, **kwargs): + return self._c._save_to_buffer_for_mobile(*args, **kwargs) + + def save_to_buffer(self, *args, **kwargs): + return self._c.save_to_buffer(*args, **kwargs) + + def get_debug_state(self, *args, **kwargs): + return self._c.get_debug_state() + + def extra_repr(self): + return f"original_name={self.original_name}" + + def graph_for(self, *args, **kwargs): + return self.forward.graph_for(self, *args, **kwargs) # type: ignore[attr-defined] + + @property + def original_name(self): + if type(self) == str(self._c._type().name()): + return "" + return str(self._c._type().name()) + + def define(self, src): + # We use frames_up=1 to get to the proper surrounding scope. The stack + # will look like: + # 0. createResolutionCallback + # 1. define() + # 2. surrounding scope. + # + # createResolutionCallback internally adds 1 to get us to our frame, then + # we add 1 to get to the proper surrounding scope. + rcb = _jit_internal.createResolutionCallbackFromFrame(frames_up=1) + self._c._define(self._concrete_type, src, rcb) + + def __getattr__(self, attr): + if "_initializing" not in self.__dict__: + raise RuntimeError( + "ScriptModule has not been initialized, did you forget to call super's init?" + ) + + if self._initializing: + return super().__getattr__(attr) + + # _modules check is before hasattr since modules are included as attributes in _c, + # but we want to get the python wrapper from _modules instead of the raw _c object. + if attr in self._modules: + return self._modules[attr] + elif self._c.hasattr(attr): + return self._c.getattr(attr) + elif self._c._has_method(attr): + script_method = self._c._get_method(attr) + # cache method so future calls do not go through __getattr__ + # to improve invocation performance + self.__dict__[attr] = script_method + return script_method + + return super().__getattr__(attr) + + def __setattr__(self, attr, value): + if self._initializing: + return super().__setattr__(attr, value) + + if attr in self._modules: + self._modules[attr] = value + elif self._c.hasattr(attr): + self._c.setattr(attr, value) + elif ( + hasattr(self, "_concrete_type") + and attr in self._concrete_type.get_constants().keys() + ): + # TODO: we don't have _concrete_type set after load(), and in general we lose constant information. + # We should encode constants as class type attributes (or something) so it persists across save/load. + raise AttributeError( + f"Cannot mutate TorchScript constant value: '{attr}'. Value: '{value}'" + ) + else: + # We allow setting Python attributes on the ScriptModule, for + # when people want to stash some convenience info on it. + # TODO: it's possible that the following is confusing: + # s = torch.jit.script(...) + # s.python_attr = ... + # s.save() <--- this doesn't have `python_attr` + # It's fairly trivial to save enough info to warn in this case. + return super().__setattr__(attr, value) + + def __copy__(self): + return torch.jit._recursive.wrap_cpp_module(copy.copy(self._c)) + + def __deepcopy__(self, memo): + return torch.jit._recursive.wrap_cpp_module(copy.deepcopy(self._c, memo)) + + # Python magic methods do method lookups on an object's class type, instead of looking up + # the method defines on the class instance. In order to continue to expose the magic methods + # of builtin-containers (ModuleList, Sequential, ModuleDict) to Python, we + # define magic methods here as a shim to the correct attribute. + def forward_magic_method(self, method_name, *args, **kwargs): + self_method = getattr(self, method_name) + if getattr(self_method, "__func__", None) == getattr( + RecursiveScriptModule, method_name + ): + raise NotImplementedError + return self_method(*args, **kwargs) + + def __iter__(self): + return self.forward_magic_method("__iter__") + + def __getitem__(self, idx): + return self.forward_magic_method("__getitem__", idx) + + def __len__(self): + return self.forward_magic_method("__len__") + + def __contains__(self, key): + return self.forward_magic_method("__contains__", key) + + # dir is defined by the base nn.Module, so instead of throwing if + # it is not overridden, we call into the nn.Module __dir__ method + def __dir__(self): + self_method = self.__dir__ + if ( + self_method.__func__ # type: ignore[attr-defined] + == _get_function_from_type(RecursiveScriptModule, "__dir__") + ): + return super().__dir__() + return self_method() + + # to resolve bool(value), Python looks if __bool__ is defined then __iter__ + # is defined then returns true for classes. Since __iter__() on this + # class throws if it isn't overridden, we define __bool__ to preserve default behavior + def __bool__(self): + self_method = self.__bool__ + if ( + self_method.__func__ # type: ignore[attr-defined] + == _get_function_from_type(RecursiveScriptModule, "__bool__") + ): + return True + return self_method() + + def _replicate_for_data_parallel(self): + # we have to initialize ScriptModule properly so that + # it works with pybind11 + def init_fn(script_module): + # Don't do anything here, we'll initialize the ScriptModule below + return + + return RecursiveScriptModule._construct( + self._c._replicate_for_data_parallel(), init_fn + ) + + # Need to copy all RecursiveScriptModule methods to ScriptModule. + # + # This is because `super().foo()` does not use + # `__getattr__` to look up `foo`. So we need to make each method available on + # the ScriptModule manually. + for name, item in RecursiveScriptModule.__dict__.items(): + if not callable(item) and not isinstance(item, property): + continue + if name.startswith("__") or hasattr(ScriptModule, name): + continue + # We can copy over the implementation wholesale because besides the + # `super()` thing above, ScriptModule behaves exactly like + # RecursiveScriptModule + setattr(ScriptModule, name, item) + + def _get_methods(cls): + import inspect + + # In Python 3 unbound methods are functions, but in Python 2 they are methods + return inspect.getmembers( + cls, predicate=lambda x: inspect.isfunction(x) or inspect.ismethod(x) + ) + + _compiled_methods_allowlist = { + "forward", + "register_buffer", + "register_parameter", + "register_module", + "add_module", + "_apply", + "apply", + "cuda", + "cpu", + "to", + "type", + "float", + "double", + "half", + "state_dict", + "_save_to_state_dict", + "load_state_dict", + "_load_from_state_dict", + "_named_members", + "parameters", + "named_parameters", + "buffers", + "named_buffers", + "children", + "named_children", + "modules", + "named_modules", + "zero_grad", + "share_memory", + "_get_name", + "extra_repr", + "_slow_forward", + "_tracing_name", + "eval", + "train", + "get_extra_state", + "set_extra_state", + } + + def _make_fail(name): + def fail(self, *args, **kwargs): + raise RuntimeError(name + " is not supported on ScriptModules") + + return fail + + for name, method in _get_methods(torch.nn.Module): + if name.startswith("__") or name.endswith("_call_impl"): + continue + if ( + name not in RecursiveScriptModule.__dict__ + and name not in _compiled_methods_allowlist + ): + setattr(RecursiveScriptModule, method.__name__, _make_fail(name)) + + +else: + # TODO MAKE SURE THAT DISABLING WORKS + class RecursiveScriptClass: # type: ignore[no-redef] + pass + + class ScriptModule(torch.nn.Module): # type: ignore[no-redef] + def __init__(self, arg=None): + super().__init__() + + class RecursiveScriptModule(ScriptModule): # type: ignore[no-redef] + def __init__(self, arg=None): + super().__init__() + + +def call_prepare_scriptable_func_impl(obj, memo): + if not isinstance(obj, torch.nn.Module): + return obj + + obj_id = id(obj) + + # If obj_id is in memo, obj has already been prepared or is being + # prepared in another call up the stack. + if obj_id in memo: + return memo[id(obj)] + + obj = obj.__prepare_scriptable__() if hasattr(obj, "__prepare_scriptable__") else obj # type: ignore[operator] + # Record obj in memo to avoid infinite recursion in the case of cycles in the module + # hierarchy when recursing below. + memo[obj_id] = obj + + new_obj_dict = {} + + for name, sub_module in obj.__dict__.items(): + if name == "_modules": + for k, v in sub_module.items(): + sub_module[k] = call_prepare_scriptable_func_impl(v, memo) + new_obj_dict[name] = sub_module + elif isinstance(sub_module, torch.nn.Module) and not isinstance( + sub_module, ScriptModule + ): + new_obj_dict[name] = call_prepare_scriptable_func_impl(sub_module, memo) + else: + new_obj_dict[name] = sub_module + + for k, v in new_obj_dict.items(): + obj.__dict__[name] = v + + return obj + + +def call_prepare_scriptable_func(obj): + memo: Dict[int, torch.nn.Module] = {} + return call_prepare_scriptable_func_impl(obj, memo) + + +def create_script_dict(obj): + """ + Create a ``torch._C.ScriptDict`` instance with the data from ``obj``. + + Args: + obj (dict): The Python dictionary that is used to initialize the ``ScriptDict`` + returned by this function. + + Returns: + An instance of ``torch._C.ScriptDict`` that has the same data as ``obj`` + and can be passed between Python and TorchScript with reference semantics and + zero copy overhead. + """ + return torch._C.ScriptDict(obj) # type: ignore[attr-defined] + + +def create_script_list(obj, type_hint=None): + """ + Create a ``torch._C.ScriptList`` instance with the data from ``obj``. + + Args: + obj (dict): The Python list that is used to initialize the ``ScriptList`` + returned by this function. + Returns: + An instance of ``torch._C.ScriptList`` that has the same data as ``obj`` + and can be passed between Python and TorchScript with reference semantics and + zero copy overhead. + """ + return torch._C.ScriptList(obj) # type: ignore[attr-defined] + + +_TOPLEVEL: bool = True + + +def _script_impl( + obj, + optimize=None, + _frames_up=0, + _rcb=None, + example_inputs: Union[List[Tuple], Dict[Callable, List[Tuple]], None] = None, +): + global type_trace_db + + if optimize is not None: + warnings.warn( + "`optimize` is deprecated and has no effect. " + "Use `with torch.jit.optimized_execution()` instead", + FutureWarning, + stacklevel=3, + ) + + # No-op for modules, functions, class instances that are already scripted + if isinstance(obj, RecursiveScriptClass): + return obj + if isinstance(obj, ScriptModule): + return obj + if isinstance(obj, ScriptFunction): + return obj + + if example_inputs: + # If MonkeyType is installed, enable profile directed type annotation + # Check if example_inputs are defined and generate call traces + # for the method by running eager mode version of the method with + # the provide example inputs. This logs all the traces in type_trace_db + type_trace_db = JitTypeTraceStore() + if monkeytype_trace: + monkeytype_config = JitTypeTraceConfig(type_trace_db) + with monkeytype_trace(monkeytype_config): + if isinstance(example_inputs, Dict): + # If the obj is an nn.Module or a class, then each method is + # executed with the arguments provided in the example inputs. + # example inputs here will be of type Dict(class.method, (arguments)) + # This is used to infer type annotations for those methods + # which are not called directly under the hood of monkeytype. + for module, example_input in example_inputs.items(): + for example in example_input: + module(*example) + elif isinstance(example_inputs, List): + for examples in example_inputs: + obj(*examples) + else: + raise ValueError( + "Error: Unable to infer types. Please format the inputs to type `List[Tuple]`" + " or `Dict[Callable, List[Tuple]]` to be run with MonkeyType." + ) + else: + warnings.warn( + "Warning: monkeytype is not installed. Please install https://github.com/Instagram/MonkeyType " + "to enable Profile-Directed Typing in TorchScript. Refer to " + "https://github.com/Instagram/MonkeyType/blob/master/README.rst to install MonkeyType. " + ) + + if isinstance(obj, torch.nn.Module): + obj = call_prepare_scriptable_func(obj) + return torch.jit._recursive.create_script_module( + obj, torch.jit._recursive.infer_methods_to_compile + ) + else: + obj = obj.__prepare_scriptable__() if hasattr(obj, "__prepare_scriptable__") else obj # type: ignore[operator] + + if isinstance(obj, dict): + return create_script_dict(obj) + if isinstance(obj, list): + return create_script_list(obj) + + if inspect.isclass(obj): + qualified_name = _qualified_name(obj) + # If this type is a `nn.Module` subclass, they probably meant to pass + # an instance instead of a Module + if issubclass(obj, torch.nn.Module): + raise RuntimeError( + f"Type '{obj}' cannot be compiled since it inherits from nn.Module, pass an instance instead" + ) + + # Enums are automatically usable in TorchScript, explicitly scripting + # is not necessary, but not harmful either. + if issubclass(obj, enum.Enum): + return obj + + if not _is_new_style_class(obj): + raise RuntimeError( + "TorchScript classes must be new-style classes. " + "Please inherit from 'object'." + ) + if len(obj.mro()) > 2: + raise RuntimeError( + "TorchScript classes does not support inheritance yet. " + "Please directly inherit from 'object'." + ) + if _rcb is None: + _rcb = _jit_internal.createResolutionCallbackFromFrame(_frames_up + 1) + _compile_and_register_class(obj, _rcb, qualified_name) + return obj + elif inspect.isfunction(obj) or inspect.ismethod(obj): + qualified_name = _qualified_name(obj) + # this is a decorated fn, and we need to the underlying fn and its rcb + if hasattr(obj, "__script_if_tracing_wrapper"): + obj = obj.__original_fn # type: ignore[union-attr] + _rcb = _jit_internal.createResolutionCallbackFromClosure(obj) + + # some functions are explicitly marked as not supported in script mode + if hasattr(obj, "__script_unsupported"): + raise RuntimeError("TorchScript error: " + obj.__script_unsupported) + + _check_directly_compile_overloaded(obj) + maybe_already_compiled_fn = _try_get_jit_cached_function(obj) + if maybe_already_compiled_fn: + maybe_already_compiled_fn._torchdynamo_inline = obj # type: ignore[attr-defined] + return maybe_already_compiled_fn + ast = get_jit_def(obj, obj.__name__) + if _rcb is None: + _rcb = _jit_internal.createResolutionCallbackFromClosure(obj) + fn = torch._C._jit_script_compile( + qualified_name, ast, _rcb, get_default_args(obj) + ) + # Forward docstrings + fn.__doc__ = obj.__doc__ + # Allow torch.compile() to inline + fn._torchdynamo_inline = obj # type: ignore[attr-defined] + _set_jit_function_cache(obj, fn) + return fn + else: + return torch.jit._recursive.create_script_class(obj) + + +def script( + obj, + optimize=None, + _frames_up=0, + _rcb=None, + example_inputs: Union[List[Tuple], Dict[Callable, List[Tuple]], None] = None, +): + r"""Script the function. + + Scripting a function or ``nn.Module`` will inspect the source code, compile + it as TorchScript code using the TorchScript compiler, and return a :class:`ScriptModule` or + :class:`ScriptFunction`. TorchScript itself is a subset of the Python language, so not all + features in Python work, but we provide enough functionality to compute on + tensors and do control-dependent operations. For a complete guide, see the + :ref:`language-reference`. + + Scripting a dictionary or list copies the data inside it into a TorchScript instance than can be + subsequently passed by reference between Python and TorchScript with zero copy overhead. + + ``torch.jit.script`` can be used as a function for modules, functions, dictionaries and lists + and as a decorator ``@torch.jit.script`` for :ref:`torchscript-classes` and functions. + + Args: + obj (Callable, class, or nn.Module): The ``nn.Module``, function, class type, + dictionary, or list to compile. + example_inputs (Union[List[Tuple], Dict[Callable, List[Tuple]], None]): Provide example inputs + to annotate the arguments for a function or ``nn.Module``. + + Returns: + If ``obj`` is ``nn.Module``, ``script`` returns + a :class:`ScriptModule` object. The returned :class:`ScriptModule` will + have the same set of sub-modules and parameters as the + original ``nn.Module``. If ``obj`` is a standalone function, + a :class:`ScriptFunction` will be returned. If ``obj`` is a ``dict``, then + ``script`` returns an instance of `torch._C.ScriptDict`. If ``obj`` is a ``list``, + then ``script`` returns an instance of `torch._C.ScriptList`. + + **Scripting a function** + The ``@torch.jit.script`` decorator will construct a :class:`ScriptFunction` + by compiling the body of the function. + + Example (scripting a function): + + .. testcode:: + + import torch + + @torch.jit.script + def foo(x, y): + if x.max() > y.max(): + r = x + else: + r = y + return r + + print(type(foo)) # torch.jit.ScriptFunction + + # See the compiled graph as Python code + print(foo.code) + + # Call the function using the TorchScript interpreter + foo(torch.ones(2, 2), torch.ones(2, 2)) + + .. testoutput:: + :hide: + + ... + + ****Scripting a function using example_inputs** + Example inputs can be used to annotate a function arguments. + + Example (annotating a function before scripting): + + .. testcode:: + + import torch + + def test_sum(a, b): + return a + b + + # Annotate the arguments to be int + scripted_fn = torch.jit.script(test_sum, example_inputs=[(3, 4)]) + + print(type(scripted_fn)) # torch.jit.ScriptFunction + + # See the compiled graph as Python code + print(scripted_fn.code) + + # Call the function using the TorchScript interpreter + scripted_fn(20, 100) + + .. testoutput:: + :hide: + + ... + + **Scripting an nn.Module** + Scripting an ``nn.Module`` by default will compile the ``forward`` method and recursively + compile any methods, submodules, and functions called by ``forward``. If a ``nn.Module`` only uses + features supported in TorchScript, no changes to the original module code should be necessary. ``script`` + will construct :class:`ScriptModule` that has copies of the attributes, parameters, and methods of + the original module. + + Example (scripting a simple module with a Parameter): + + .. testcode:: + + import torch + + class MyModule(torch.nn.Module): + def __init__(self, N, M): + super().__init__() + # This parameter will be copied to the new ScriptModule + self.weight = torch.nn.Parameter(torch.rand(N, M)) + + # When this submodule is used, it will be compiled + self.linear = torch.nn.Linear(N, M) + + def forward(self, input): + output = self.weight.mv(input) + + # This calls the `forward` method of the `nn.Linear` module, which will + # cause the `self.linear` submodule to be compiled to a `ScriptModule` here + output = self.linear(output) + return output + + scripted_module = torch.jit.script(MyModule(2, 3)) + + Example (scripting a module with traced submodules): + + .. testcode:: + + import torch + import torch.nn as nn + import torch.nn.functional as F + + class MyModule(nn.Module): + def __init__(self) -> None: + super().__init__() + # torch.jit.trace produces a ScriptModule's conv1 and conv2 + self.conv1 = torch.jit.trace(nn.Conv2d(1, 20, 5), torch.rand(1, 1, 16, 16)) + self.conv2 = torch.jit.trace(nn.Conv2d(20, 20, 5), torch.rand(1, 20, 16, 16)) + + def forward(self, input): + input = F.relu(self.conv1(input)) + input = F.relu(self.conv2(input)) + return input + + scripted_module = torch.jit.script(MyModule()) + + To compile a method other than ``forward`` (and recursively compile anything it calls), add + the :func:`@torch.jit.export ` decorator to the method. To opt out of compilation + use :func:`@torch.jit.ignore ` or :func:`@torch.jit.unused `. + + Example (an exported and ignored method in a module):: + + import torch + import torch.nn as nn + + class MyModule(nn.Module): + def __init__(self) -> None: + super().__init__() + + @torch.jit.export + def some_entry_point(self, input): + return input + 10 + + @torch.jit.ignore + def python_only_fn(self, input): + # This function won't be compiled, so any + # Python APIs can be used + import pdb + pdb.set_trace() + + def forward(self, input): + if self.training: + self.python_only_fn(input) + return input * 99 + + scripted_module = torch.jit.script(MyModule()) + print(scripted_module.some_entry_point(torch.randn(2, 2))) + print(scripted_module(torch.randn(2, 2))) + + Example ( Annotating forward of nn.Module using example_inputs):: + + import torch + import torch.nn as nn + from typing import NamedTuple + + class MyModule(NamedTuple): + result: List[int] + + class TestNNModule(torch.nn.Module): + def forward(self, a) -> MyModule: + result = MyModule(result=a) + return result + + pdt_model = TestNNModule() + + # Runs the pdt_model in eager model with the inputs provided and annotates the arguments of forward + scripted_model = torch.jit.script(pdt_model, example_inputs={pdt_model: [([10, 20, ], ), ], }) + + # Run the scripted_model with actual inputs + print(scripted_model([20])) + """ + if not _enabled: + return obj + try: + global _TOPLEVEL + prev = _TOPLEVEL + _TOPLEVEL = False + ret = _script_impl( + obj=obj, + optimize=optimize, + _frames_up=_frames_up + 1, + _rcb=_rcb, + example_inputs=example_inputs, + ) + + if prev: + log_torchscript_usage("script", model_id=_get_model_id(ret)) + + return ret + finally: + _TOPLEVEL = prev + + +# overloads are registered in _jit_internal and compiled here so that _overload +# can be used in nn/functional.py without an import cycle + + +def _check_overload_defaults(impl_defaults, overload_defaults, loc): + for name, overload_value in overload_defaults.items(): + if name not in impl_defaults or impl_defaults[name] != overload_value: + raise torch.jit.frontend.FrontendError( + loc, + "Default parameters on overloads do not affect the runtime so they " + "must equal to the default parameter on the implementation function. Found on " + f"parameter {name}", + ) + + +def _compile_function_with_overload(overload_fn, qual_name, impl_fn): + overload_decl = get_jit_def(overload_fn, overload_fn.__name__).decl() + overload_signature = torch.jit.annotations.get_signature( + overload_fn, None, None, inspect.ismethod(overload_fn) + ) + impl_ast = get_jit_def(impl_fn, impl_fn.__name__) + overload_defaults = get_default_args(overload_fn) + implementation_defaults = get_default_args(impl_fn) + _rcb = _jit_internal.createResolutionCallbackFromClosure(impl_fn) + _check_overload_defaults( + implementation_defaults, overload_defaults, overload_decl.range() + ) + fn = torch._C._jit_script_compile_overload( + qual_name, + overload_decl, + impl_ast, + _rcb, + implementation_defaults, + overload_signature, + ) + return fn + + +def _get_overloads(obj): + # check for cached compiled fns + existing_compiled_fns = _try_get_jit_cached_overloads(obj) + qual_name = _qualified_name(obj) + uncompiled_overloads = _jit_internal._get_fn_overloads(qual_name) + if uncompiled_overloads is None: + return existing_compiled_fns + + if obj in uncompiled_overloads: + raise RuntimeError( + _jit_internal.get_overload_no_implementation_error_message("function", obj) + ) + + compiled_fns = [] + for overload_fn in uncompiled_overloads: + compiled_fns.append( + _compile_function_with_overload(overload_fn, qual_name, obj) + ) + + if existing_compiled_fns: + compiled_fns = existing_compiled_fns + compiled_fns + + # cache compilation, remove information stored to do compilation + _set_jit_overload_cache(obj, compiled_fns) + _jit_internal._clear_fn_overloads(qual_name) + return compiled_fns + + +def _check_directly_compile_overloaded(obj): + qual_name = _qualified_name(obj) + if _jit_internal._get_fn_overloads(qual_name) or _try_get_jit_cached_overloads(obj): + raise RuntimeError( + f"Function {qual_name} cannot be directly compiled because it" + " is overloaded. It must be used in a context of a function" + " where its inputs can determine which overload to call." + ) + + +def interface(obj): + r"""Decorate to annotate classes or modules of different types. + + This decorator can be used to define an interface that can be used to annotate + classes or modules of different types. This can be used for to annotate a submodule + or attribute class that could have different types that implement the same + interface, or which could be swapped at runtime; or to store a list of modules or + classes of varying types. + + It is sometimes used to implement "Callables" - functions or modules that implement + an interface but whose implementations differ and which can be swapped out. + + Example: + .. testcode:: + + import torch + from typing import List + + @torch.jit.interface + class InterfaceType: + def run(self, x: torch.Tensor) -> torch.Tensor: + pass + + # implements InterfaceType + @torch.jit.script + class Impl1: + def run(self, x: torch.Tensor) -> torch.Tensor: + return x.relu() + + class Impl2(torch.nn.Module): + def __init__(self) -> None: + super().__init__() + self.val = torch.rand(()) + + @torch.jit.export + def run(self, x: torch.Tensor) -> torch.Tensor: + return x + self.val + + def user_fn(impls: List[InterfaceType], idx: int, val: torch.Tensor) -> torch.Tensor: + return impls[idx].run(val) + + user_fn_jit = torch.jit.script(user_fn) + + impls = [Impl1(), torch.jit.script(Impl2())] + val = torch.rand(4, 4) + user_fn_jit(impls, 0, val) + user_fn_jit(impls, 1, val) + """ + if not inspect.isclass(obj): + raise RuntimeError("interface must be applied to a class") + if not _is_new_style_class(obj): + raise RuntimeError("TorchScript interfaces must inherit from 'object'") + + # Expected MRO is: + # User module + # torch.nn.modules.module.Module + # object + is_module_interface = issubclass(obj, torch.nn.Module) and len(obj.mro()) == 3 + + if not is_module_interface and len(obj.mro()) > 2: + raise RuntimeError( + "TorchScript interface does not support inheritance yet. " + "Please directly inherit from 'object' or 'nn.Module'." + ) + + qualified_name = _qualified_name(obj) + rcb = _jit_internal.createResolutionCallbackFromFrame(1) + # if this type is a `nn.Module` subclass, generate a module interface type + # instead of a class interface type; a module interface type only compiles + # the user provided methods as part of the interface + ast = get_jit_class_def(obj, obj.__name__) + mangled_classname = torch._C._jit_script_interface_compile( + qualified_name, ast, rcb, is_module_interface + ) + obj.__torch_script_interface__ = mangled_classname + return obj + + +def _recursive_compile_class(obj, loc): + _qual_name = _qualified_name(obj) + # We're starting a new compilation, so update the error call stack in + # case it fails + error_stack = torch._C.CallStack(_qual_name, loc) + rcb = _jit_internal.createResolutionCallbackForClassMethods(obj) + return _compile_and_register_class(obj, rcb, _qual_name) + + +CompilationUnit = torch._C.CompilationUnit +set_module(CompilationUnit, "torch.jit") + + +def pad(s: str, padding: int, offset: int = 0, char: str = " "): + if padding >= len(s): + padding -= len(s) + return "".join([char for _ in range(padding + offset)]) + s + + +class _ScriptProfileColumn: + def __init__(self, header: str, alignment: int = 4, offset: int = 0): + self.header = header + self.alignment = alignment + self.offset = offset + self.rows: Dict[int, Any] = {} + + def add_row(self, lineno: int, value: Any): + self.rows[lineno] = value + + def materialize(self): + max_length = len(self.header) + rows: List[Tuple[int, str]] = [] + for key, value in self.rows.items(): + cell = str(value) + rows.append((key, cell)) + max_length = max(len(cell), max_length) + + if self.alignment > 0: + padding = max_length + self.alignment + padding -= padding % self.alignment + else: + padding = 0 + + rows = [(key, pad(cell, padding, self.offset)) for key, cell in rows] + return pad(self.header, padding, self.offset), rows + + +class _ScriptProfileTable: + def __init__(self, cols: List[_ScriptProfileColumn], source_range: List[int]): + self.cols = cols + self.source_range = source_range + + def dump_string(self): + outputs: List[str] = [] + cells: List[Tuple[str, Dict[int, str]]] = [] + header_buffer = "" + for col in self.cols: + header, rows = col.materialize() + header_buffer += header + cells.append((header, dict(rows))) + + outputs.append(header_buffer) + outputs.append(pad("", len(header_buffer), 0, "=")) + for line in self.source_range: + row_buffer = "" + for header, rows in cells: + cell = rows.get(line) + if cell is None: + row_buffer += pad("", len(header)) + else: + row_buffer += cell + outputs.append(row_buffer) + return "\n".join(outputs) + + +class _ScriptProfile: + def __init__(self) -> None: + self.profile = classes.profiling._ScriptProfile() + + def enable(self): + self.profile.enable() + + def disable(self): + self.profile.disable() + + def dump_string(self) -> str: + outputs: List[str] = [] + for source_stats in self.profile._dump_stats(): + source_ref = source_stats.source() + source_lines = source_ref.text().splitlines() + dedent = min(len(line) - len(line.lstrip(" ")) for line in source_lines) + source_lines = [line[dedent:] for line in source_lines] + + start_line = source_ref.starting_lineno() + end_line = start_line + len(source_lines) + source_range = range(start_line, end_line) + lineno = _ScriptProfileColumn("Line #") + hits = _ScriptProfileColumn("Hits") + time_ns = _ScriptProfileColumn("Time (ns)") + line_contents = _ScriptProfileColumn("Line Contents", 0, 1) + stats = source_stats.line_map() + for line in source_range: + lineno.add_row(line, line) + line_contents.add_row(line, source_lines[line - start_line]) + stat = stats.get(line) + if stat is not None: + hits.add_row(line, stat.count()) + time_ns.add_row(line, stat.duration_ns()) + + table = _ScriptProfileTable( + [lineno, hits, time_ns, line_contents], list(source_range) + ) + outputs.append(table.dump_string()) + return "\n\n".join(outputs) + + def dump(self): + print(self.dump_string()) + + +def _unwrap_optional(x): + assert x is not None, "Unwrapping null optional" + return x + + +_register_builtin(_unwrap_optional, "aten::_unwrap_optional") +_register_builtin(_jit_internal.is_scripting, "aten::is_scripting") +_register_builtin(has_torch_function, "aten::has_torch_function") +_register_builtin(has_torch_function_unary, "aten::has_torch_function") +_register_builtin(has_torch_function_variadic, "aten::has_torch_function") diff --git a/janus/lib/python3.10/site-packages/torch/jit/_script.pyi b/janus/lib/python3.10/site-packages/torch/jit/_script.pyi new file mode 100644 index 0000000000000000000000000000000000000000..bafbb63340ec7d2e9c2625b288a709f7b239eb49 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/jit/_script.pyi @@ -0,0 +1,296 @@ +# mypy: allow-untyped-defs +# mypy: disable-error-code="type-arg" +from typing import Any, Callable, NamedTuple, overload, TypeVar +from typing_extensions import Never, TypeAlias + +from _typeshed import Incomplete + +import torch +from torch._classes import classes as classes +from torch._jit_internal import _qualified_name as _qualified_name +from torch.jit._builtins import _register_builtin as _register_builtin +from torch.jit._fuser import ( + _graph_for as _graph_for, + _script_method_graph_for as _script_method_graph_for, +) +from torch.jit._monkeytype_config import ( + JitTypeTraceConfig as JitTypeTraceConfig, + JitTypeTraceStore as JitTypeTraceStore, + monkeytype_trace as monkeytype_trace, +) +from torch.jit._recursive import ( + _compile_and_register_class as _compile_and_register_class, + infer_methods_to_compile as infer_methods_to_compile, + ScriptMethodStub as ScriptMethodStub, + wrap_cpp_module as wrap_cpp_module, +) +from torch.jit._serialization import validate_map_location as validate_map_location +from torch.jit._state import ( + _enabled as _enabled, + _set_jit_function_cache as _set_jit_function_cache, + _set_jit_overload_cache as _set_jit_overload_cache, + _try_get_jit_cached_function as _try_get_jit_cached_function, + _try_get_jit_cached_overloads as _try_get_jit_cached_overloads, +) +from torch.jit.frontend import ( + get_default_args as get_default_args, + get_jit_class_def as get_jit_class_def, + get_jit_def as get_jit_def, +) +from torch.nn import Module as Module +from torch.overrides import ( + has_torch_function as has_torch_function, + has_torch_function_unary as has_torch_function_unary, + has_torch_function_variadic as has_torch_function_variadic, +) +from torch.package import ( + PackageExporter as PackageExporter, + PackageImporter as PackageImporter, +) +from torch.utils import set_module as set_module + +ScriptFunction = torch._C.ScriptFunction + +type_trace_db: JitTypeTraceStore + +# Defined in torch/csrc/jit/python/script_init.cpp +ResolutionCallback: TypeAlias = Callable[[str], Callable[..., Any]] +_ClassVar = TypeVar("_ClassVar", bound=type) + +def _reduce(cls) -> None: ... + +class Attribute(NamedTuple): + value: Incomplete + type: Incomplete + +def _get_type_trace_db(): ... +def _get_function_from_type(cls, name): ... +def _is_new_style_class(cls): ... + +class OrderedDictWrapper: + _c: Incomplete + def __init__(self, _c) -> None: ... + def keys(self): ... + def values(self): ... + def __len__(self) -> int: ... + def __delitem__(self, k) -> None: ... + def items(self): ... + def __setitem__(self, k, v) -> None: ... + def __contains__(self, k) -> bool: ... + def __getitem__(self, k): ... + +class OrderedModuleDict(OrderedDictWrapper): + _python_modules: Incomplete + def __init__(self, module, python_dict) -> None: ... + def items(self): ... + def __contains__(self, k) -> bool: ... + def __setitem__(self, k, v) -> None: ... + def __getitem__(self, k): ... + +class ScriptMeta(type): + def __init__(cls, name, bases, attrs) -> None: ... + +class _CachedForward: + def __get__(self, obj, cls): ... + +class ScriptWarning(Warning): ... + +def script_method(fn): ... + +class ConstMap: + const_mapping: Incomplete + def __init__(self, const_mapping) -> None: ... + def __getattr__(self, attr): ... + +def unpackage_script_module( + importer: PackageImporter, + script_module_id: str, +) -> torch.nn.Module: ... + +_magic_methods: Incomplete + +class RecursiveScriptClass: + _c: Incomplete + _props: Incomplete + def __init__(self, cpp_class) -> None: ... + def __getattr__(self, attr): ... + def __setattr__(self, attr, value) -> None: ... + def forward_magic_method(self, method_name, *args, **kwargs): ... + def __getstate__(self) -> None: ... + def __iadd__(self, other): ... + +def method_template(self, *args, **kwargs): ... + +class ScriptModule(Module, metaclass=ScriptMeta): + __jit_unused_properties__: Incomplete + def __init__(self) -> None: ... + forward: Callable[..., Any] + def __getattr__(self, attr): ... + def __setattr__(self, attr, value) -> None: ... + def define(self, src): ... + def _replicate_for_data_parallel(self): ... + def __reduce_package__(self, exporter: PackageExporter): ... + # add __jit_unused_properties__ + @property + def code(self) -> str: ... + @property + def code_with_constants(self) -> tuple[str, ConstMap]: ... + @property + def graph(self) -> torch.Graph: ... + @property + def inlined_graph(self) -> torch.Graph: ... + @property + def original_name(self) -> str: ... + +class RecursiveScriptModule(ScriptModule): + _disable_script_meta: bool + _c: Incomplete + def __init__(self, cpp_module) -> None: ... + @staticmethod + def _construct(cpp_module, init_fn): ... + @staticmethod + def _finalize_scriptmodule(script_module) -> None: ... + _concrete_type: Incomplete + _modules: Incomplete + _parameters: Incomplete + _buffers: Incomplete + __dict__: Incomplete + def _reconstruct(self, cpp_module) -> None: ... + def save(self, f, **kwargs): ... + def _save_for_lite_interpreter(self, *args, **kwargs): ... + def _save_to_buffer_for_lite_interpreter(self, *args, **kwargs): ... + def save_to_buffer(self, *args, **kwargs): ... + def get_debug_state(self, *args, **kwargs): ... + def extra_repr(self): ... + def graph_for(self, *args, **kwargs): ... + def define(self, src) -> None: ... + def __getattr__(self, attr): ... + def __setattr__(self, attr, value) -> None: ... + def __copy__(self): ... + def __deepcopy__(self, memo): ... + def forward_magic_method(self, method_name, *args, **kwargs): ... + def __iter__(self): ... + def __getitem__(self, idx): ... + def __len__(self) -> int: ... + def __contains__(self, key) -> bool: ... + def __dir__(self): ... + def __bool__(self) -> bool: ... + def _replicate_for_data_parallel(self): ... + +def _get_methods(cls): ... + +_compiled_methods_allowlist: Incomplete + +def _make_fail(name): ... +def call_prepare_scriptable_func_impl(obj, memo): ... +def call_prepare_scriptable_func(obj): ... +def create_script_dict(obj): ... +def create_script_list(obj, type_hint: Incomplete | None = ...): ... +@overload +def script( + obj: type[Module], + optimize: bool | None = None, + _frames_up: int = 0, + _rcb: ResolutionCallback | None = None, + example_inputs: list[tuple] | dict[Callable, list[tuple]] | None = None, +) -> Never: ... +@overload +def script( # type: ignore[misc] + obj: dict, + optimize: bool | None = None, + _frames_up: int = 0, + _rcb: ResolutionCallback | None = None, + example_inputs: list[tuple] | dict[Callable, list[tuple]] | None = None, +) -> torch.ScriptDict: ... +@overload +def script( # type: ignore[misc] + obj: list, + optimize: bool | None = None, + _frames_up: int = 0, + _rcb: ResolutionCallback | None = None, + example_inputs: list[tuple] | dict[Callable, list[tuple]] | None = None, +) -> torch.ScriptList: ... +@overload +def script( # type: ignore[misc] + obj: Module, + optimize: bool | None = None, + _frames_up: int = 0, + _rcb: ResolutionCallback | None = None, + example_inputs: list[tuple] | dict[Callable, list[tuple]] | None = None, +) -> RecursiveScriptModule: ... +@overload +def script( # type: ignore[misc] + obj: _ClassVar, + optimize: bool | None = None, + _frames_up: int = 0, + _rcb: ResolutionCallback | None = None, + example_inputs: list[tuple] | dict[Callable, list[tuple]] | None = None, +) -> _ClassVar: ... +@overload +def script( # type: ignore[misc] + obj: Callable, + optimize: bool | None = None, + _frames_up: int = 0, + _rcb: ResolutionCallback | None = None, + example_inputs: list[tuple] | dict[Callable, list[tuple]] | None = None, +) -> ScriptFunction: ... +@overload +def script( + obj: Any, + optimize: bool | None = None, + _frames_up: int = 0, + _rcb: ResolutionCallback | None = None, + example_inputs: list[tuple] | dict[Callable, list[tuple]] | None = None, +) -> RecursiveScriptClass: ... +@overload +def script( + obj, + optimize: Incomplete | None = ..., + _frames_up: int = ..., + _rcb: Incomplete | None = ..., + example_inputs: list[tuple] | dict[Callable, list[tuple]] | None = ..., +): ... +def _check_overload_defaults(impl_defaults, overload_defaults, loc) -> None: ... +def _compile_function_with_overload(overload_fn, qual_name, impl_fn): ... +def _get_overloads(obj): ... +def _check_directly_compile_overloaded(obj) -> None: ... +def interface(obj): ... +def _recursive_compile_class(obj, loc): ... + +CompilationUnit: Incomplete + +def pad(s: str, padding: int, offset: int = ..., char: str = ...): ... + +class _ScriptProfileColumn: + header: Incomplete + alignment: Incomplete + offset: Incomplete + rows: Incomplete + def __init__( + self, + header: str, + alignment: int = ..., + offset: int = ..., + ) -> None: ... + def add_row(self, lineno: int, value: Any): ... + def materialize(self): ... + +class _ScriptProfileTable: + cols: Incomplete + source_range: Incomplete + def __init__( + self, + cols: list[_ScriptProfileColumn], + source_range: list[int], + ) -> None: ... + def dump_string(self): ... + +class _ScriptProfile: + profile: Incomplete + def __init__(self) -> None: ... + def enable(self) -> None: ... + def disable(self) -> None: ... + def dump_string(self) -> str: ... + def dump(self) -> None: ... + +def _unwrap_optional(x): ... diff --git a/janus/lib/python3.10/site-packages/torch/jit/_serialization.py b/janus/lib/python3.10/site-packages/torch/jit/_serialization.py new file mode 100644 index 0000000000000000000000000000000000000000..3f1be34bbb3dd50be4ddf39140d47021873dbd79 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/jit/_serialization.py @@ -0,0 +1,273 @@ +# mypy: allow-untyped-defs +"""Serialization. + +This module contains functionality for serializing TorchScript modules, notably: + * torch.jit.save + * torch.jit.load + +This is not intended to be imported directly; please use the exposed +functionalities in `torch.jit`. +""" + +import os + +import torch +from torch._jit_internal import _get_model_id +from torch._utils_internal import log_torchscript_usage +from torch.jit._recursive import wrap_cpp_module +from torch.serialization import validate_cuda_device + + +def save(m, f, _extra_files=None): + r""" + Save an offline version of this module for use in a separate process. + + The saved module serializes all of the methods, submodules, parameters, and + attributes of this module. It can be loaded into the C++ API using + ``torch::jit::load(filename)`` or into the Python API with + :func:`torch.jit.load `. + + To be able to save a module, it must not make any calls to native Python + functions. This means that all submodules must be subclasses of + :class:`ScriptModule` as well. + + .. DANGER:: + All modules, no matter their device, are always loaded onto the CPU + during loading. This is different from :func:`torch.load`'s semantics + and may change in the future. + + Args: + m: A :class:`ScriptModule` to save. + f: A file-like object (has to implement write and flush) or a string + containing a file name. + _extra_files: Map from filename to contents which will be stored as part of `f`. + + .. note:: + torch.jit.save attempts to preserve the behavior of some operators + across versions. For example, dividing two integer tensors in + PyTorch 1.5 performed floor division, and if the module + containing that code is saved in PyTorch 1.5 and loaded in PyTorch 1.6 + its division behavior will be preserved. The same module saved in + PyTorch 1.6 will fail to load in PyTorch 1.5, however, since the + behavior of division changed in 1.6, and 1.5 does not know how to + replicate the 1.6 behavior. + + Example: + .. testcode:: + + import torch + import io + + class MyModule(torch.nn.Module): + def forward(self, x): + return x + 10 + + m = torch.jit.script(MyModule()) + + # Save to file + torch.jit.save(m, 'scriptmodule.pt') + # This line is equivalent to the previous + m.save("scriptmodule.pt") + + # Save to io.BytesIO buffer + buffer = io.BytesIO() + torch.jit.save(m, buffer) + + # Save with extra files + extra_files = {'foo.txt': b'bar'} + torch.jit.save(m, 'scriptmodule.pt', _extra_files=extra_files) + """ + log_torchscript_usage("save", model_id=_get_model_id(m)) + if _extra_files is None: + _extra_files = {} + if isinstance(f, (str, os.PathLike)): + m.save(f, _extra_files=_extra_files) + else: + ret = m.save_to_buffer(_extra_files=_extra_files) + f.write(ret) + + +def load(f, map_location=None, _extra_files=None, _restore_shapes=False): + r""" + Load a :class:`ScriptModule` or :class:`ScriptFunction` previously saved with :func:`torch.jit.save `. + + All previously saved modules, no matter their device, are first loaded onto CPU, + and then are moved to the devices they were saved from. If this fails (e.g. + because the run time system doesn't have certain devices), an exception is + raised. + + Args: + f: a file-like object (has to implement read, readline, tell, and seek), + or a string containing a file name + map_location (string or torch.device): A simplified version of + ``map_location`` in `torch.jit.save` used to dynamically remap + storages to an alternative set of devices. + _extra_files (dictionary of filename to content): The extra + filenames given in the map would be loaded and their content + would be stored in the provided map. + _restore_shapes (bool): Whether or not to retrace the module on load using stored inputs + + Returns: + A :class:`ScriptModule` object. + + Example: + .. testcode:: + + import torch + import io + + torch.jit.load('scriptmodule.pt') + + # Load ScriptModule from io.BytesIO object + with open('scriptmodule.pt', 'rb') as f: + buffer = io.BytesIO(f.read()) + + # Load all tensors to the original device + torch.jit.load(buffer) + + # Load all tensors onto CPU, using a device + buffer.seek(0) + torch.jit.load(buffer, map_location=torch.device('cpu')) + + # Load all tensors onto CPU, using a string + buffer.seek(0) + torch.jit.load(buffer, map_location='cpu') + + # Load with extra files. + extra_files = {'foo.txt': ''} # values will be replaced with data + torch.jit.load('scriptmodule.pt', _extra_files=extra_files) + print(extra_files['foo.txt']) + + .. testoutput:: + :hide: + + ... + + .. testcleanup:: + + import os + os.remove("scriptmodule.pt") + """ + if isinstance(f, (str, os.PathLike)): + if not os.path.exists(f): # type: ignore[type-var] + raise ValueError(f"The provided filename {f} does not exist") # type: ignore[str-bytes-safe] + if os.path.isdir(f): + raise ValueError(f"The provided filename {f} is a directory") # type: ignore[str-bytes-safe] + + map_location = validate_map_location(map_location) + if _extra_files is None: + _extra_files = {} + + cu = torch._C.CompilationUnit() + if isinstance(f, (str, os.PathLike)): + cpp_module = torch._C.import_ir_module(cu, os.fspath(f), map_location, _extra_files, _restore_shapes) # type: ignore[call-arg] + else: + cpp_module = torch._C.import_ir_module_from_buffer( + cu, f.read(), map_location, _extra_files, _restore_shapes + ) # type: ignore[call-arg] + + # TODO: Pretty sure this approach loses ConstSequential status and such + ret = wrap_cpp_module(cpp_module) + log_torchscript_usage("load", model_id=_get_model_id(ret)) + return ret + + +def validate_map_location(map_location=None): + if isinstance(map_location, str): + map_location = torch.device(map_location) + elif not (map_location is None or isinstance(map_location, torch.device)): + raise ValueError( + "map_location should be either None, string or torch.device, " + "but got type: " + str(type(map_location)) + ) + + if str(map_location).startswith("cuda"): + validate_cuda_device(map_location) + + return map_location + + +def jit_module_from_flatbuffer(f): + if isinstance(f, (str, os.PathLike)): + f = os.fspath(f) + return wrap_cpp_module(torch._C._load_jit_module_from_file(f)) + else: + return wrap_cpp_module(torch._C._load_jit_module_from_bytes(f.read())) + + +def save_jit_module_to_flatbuffer(m, f, _extra_files=None): + r""" + Save an offline version of this module for use in a separate process. + + The saved module serializes all of the methods, submodules, parameters, and + attributes of this module. It can be loaded into the C++ API using + ``torch::jit::load_jit_module_from_file(filename)`` or into the Python API with + :func:`torch.jit.jit_module_from_flatbuffer`. + + To be able to save a module, it must not make any calls to native Python + functions. This means that all submodules must be subclasses of + :class:`ScriptModule` as well. + + .. DANGER:: + All modules, no matter their device, are always loaded onto the CPU + during loading. This is different from :func:`torch.load`'s semantics + and may change in the future. + + Args: + m: A :class:`ScriptModule` to save. + f: A string for file path + + + Example: + .. testcode:: + + import torch + import io + + class MyModule(torch.nn.Module): + def forward(self, x): + return x + 10 + + m = torch.jit.script(MyModule()) + + # Save to file + torch.jit.save_jit_module_to_flatbuffer(m, 'scriptmodule.ff') + """ + extra_files = _extra_files + if extra_files is None: + extra_files = {} + + if isinstance(f, (str, os.PathLike)): + f = os.fspath(f) + torch._C._save_jit_module(m._c, f, extra_files) + else: + s = torch._C._save_jit_module_to_bytes(m._c, extra_files) + f.write(s) + + +def get_flatbuffer_module_info(path_or_file): + r"""Get some information regarding a model file in flatbuffer format. + + Args: + path_or_file: Either str, Path or file like object (BytesIO OK). + If it's str or Path, we will read the file referenced by that + path as Bytes. + + Returns: + A dict with metadata on what that file contains, currently looks like + this: + { + 'bytecode_version': 4, # int + 'operator_version': 4, # int + 'function_names': { + '__torch__.___torch_mangle_0.Foo.forward'}, # set + 'type_names': set(), # set + 'opname_to_num_args': {'aten::linear': 3} # Dict[str, int] + } + """ + if isinstance(path_or_file, (str, os.PathLike)): + with open(path_or_file, "rb") as f: + all_bytes = f.read() + else: + all_bytes = path_or_file.read() + return torch._C._get_module_info_from_flatbuffer(all_bytes) diff --git a/janus/lib/python3.10/site-packages/torch/jit/_shape_functions.py b/janus/lib/python3.10/site-packages/torch/jit/_shape_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..56c4a8cb36e3a922589563cc160845f1bc94eb22 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/jit/_shape_functions.py @@ -0,0 +1,1477 @@ +# mypy: allow-untyped-defs +import math +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + + +number = Union[int, float] +# flake8: noqa + +### +# There are generated files that depend on this file +# To re-generate, please run from the root of the repo: +# python torchgen/shape_functions/gen_jit_shape_functions.py + +# How to test: +# After regenerating files, compile PyTorch. +# Then run: ./build/bin/test_jit --gtest_filter=TestShapeGraphLinting.Basic +# If you have enabled opinfo testing for the op, also run: +# python test/test_ops_jit.py TestJitCPU.test_variant_consistency_jit_[FAILING_OP]_cpu_float32 +# to reproduce errors from opinfo tests. + +# Example PR: https://github.com/pytorch/pytorch/pull/80860/files +#### + +import torch + + +def broadcast(a: List[int], b: List[int]): + dimsA = len(a) + dimsB = len(b) + ndim = max(dimsA, dimsB) + expandedSizes: List[int] = [] + + for i in range(ndim): + offset = ndim - 1 - i + dimA = dimsA - 1 - offset + dimB = dimsB - 1 - offset + sizeA = a[dimA] if (dimA >= 0) else 1 + sizeB = b[dimB] if (dimB >= 0) else 1 + + if sizeA != sizeB and sizeA != 1 and sizeB != 1: + # TODO: only assertion error is bound in C++ compilation right now + raise AssertionError( + f"The size of tensor a {sizeA} must match the size of tensor b ({sizeB}) at non-singleton dimension {i}" + ) + + expandedSizes.append(sizeB if sizeA == 1 else sizeA) + + return expandedSizes + + +def broadcast_three(a: List[int], b: List[int], c: List[int]): + return broadcast(broadcast(a, b), c) + + +def broadcast_one_three(a: List[int], b: Any, c: List[int]): + return broadcast(a, c) + + +def adaptive_avg_pool2d(self: List[int], out: List[int]): + assert len(out) == 2 + assert len(self) == 3 or len(self) == 4 + for i in range(1, len(self)): + assert self[i] != 0 + + shape: List[int] = [] + for i in range(0, len(self) - 2): + shape.append(self[i]) + for elem in out: + shape.append(elem) + return shape + + +def _copy(self: List[int]): + out: List[int] = [] + for elem in self: + out.append(elem) + return out + + +def unary(self: List[int]): + return _copy(self) + + +def broadcast_inplace(a: List[int], b: List[int]): + dimsA = len(a) + dimsB = len(b) + if dimsB > dimsA: + raise AssertionError( + f"The dims of tensor b ({dimsB}) must be less than or equal tothe dims of tensor a ({dimsA}) " + ) + for dimA in range(dimsA): + dimB = dimsB - dimsA + dimA + sizeA = a[dimA] + sizeB = b[dimB] if (dimB >= 0) else 1 + if sizeA != sizeB and sizeB != 1: + # TODO: only assertion error is bound in C++ compilation right now + raise AssertionError( + "The size of tensor a {} must match the size of tensor b (" + "{}) at non-singleton dimension {}".format(sizeA, sizeB, dimA) + ) + return _copy(a) + + +def expand(self: List[int], sizes: List[int]): + assert len(sizes) >= len(self) + ndim = len(sizes) + tensor_dim = len(self) + if ndim == 0: + return _copy(sizes) + out: List[int] = [] + for i in range(ndim): + offset = ndim - 1 - i + dim = tensor_dim - 1 - offset + size = self[dim] if dim >= 0 else 1 + targetSize = sizes[i] + if targetSize == -1: + assert dim >= 0 + targetSize = size + if size != targetSize: + assert size == 1 + size = targetSize + out.append(size) + return out + + +def expand_one_unused(self: List[int], sizes: List[int], inp0: Any): + return expand(self, sizes) + + +def infer_size_impl(shape: List[int], numel: int) -> List[int]: + newsize = 1 + infer_dim: Optional[int] = None + for dim in range(len(shape)): + if shape[dim] == -1: + if infer_dim is not None: + raise AssertionError("only one dimension can be inferred") + infer_dim = dim + elif shape[dim] >= 0: + newsize *= shape[dim] + else: + raise AssertionError("invalid shape dimensions") + if not ( + numel == newsize + or (infer_dim is not None and newsize > 0 and numel % newsize == 0) + ): + raise AssertionError("invalid shape") + out = _copy(shape) + if infer_dim is not None: + out[infer_dim] = numel // newsize + return out + + +def numel(sizes: List[int]): + numel = 1 + for elem in sizes: + numel *= elem + return numel + + +def view(self: List[int], sizes: List[int]): + return infer_size_impl(sizes, numel(self)) + + +def view_one_unused(self: List[int], sizes: List[int], *, implicit: bool = False): + return view(self, sizes) + + +def sum_mean_dim( + self: List[int], opt_dims: Optional[List[int]], keep_dim: bool, dt: Any +): + out: List[int] = [] + if opt_dims is None or len(opt_dims) == 0: + dims: List[int] = list(range(len(self))) + else: + dims = opt_dims + + for idx in range(len(self)): + is_mean_dim: bool = False + for reduce_dim in dims: + if idx == maybe_wrap_dim(reduce_dim, len(self)): + is_mean_dim = True + if is_mean_dim: + if keep_dim: + out.append(1) + else: + out.append(self[idx]) + return out + + +def max_dim(self: List[int], dim: int, keep_dim: bool): + out = sum_mean_dim(self, [dim], keep_dim, None) + return out, out + + +# note: python already rounds down towards negative infinity on integer division, special arithmetic not needed +def div_rtn(x: int, y: int): + return x // y + + +def pooling_output_shape_pad_lr( + inputSize: int, + kernelSize: int, + pad_l: int, + pad_r: int, + stride: int, + dilation: int, + ceil_mode: bool, +): + outputSize = ( + div_rtn( + inputSize + + pad_l + + pad_r + - dilation * (kernelSize - 1) + - 1 + + (stride - 1 if ceil_mode else 0), + stride, + ) + + 1 + ) + if ceil_mode: + if (outputSize - 1) * stride >= inputSize + pad_l: + outputSize = outputSize - 1 + return outputSize + + +def pooling_output_shape( + inputSize: int, + kernelSize: int, + pad_l: int, + stride: int, + dilation: int, + ceil_mode: bool, +): + assert stride != 0, "stride should not be zeero" + return pooling_output_shape_pad_lr( + inputSize, kernelSize, pad_l, pad_l, stride, dilation, ceil_mode + ) + + +def pool2d_shape_check( + input: List[int], + kH: int, + kW: int, + dH: int, + dW: int, + padH: int, + padW: int, + dilationH: int, + dilationW: int, + nInputPlane: int, + inputHeight: int, + inputWidth: int, + outputHeight: int, + outputWidth: int, +): + ndim = len(input) + nOutputPlane = nInputPlane + + assert kW > 0 and kH > 0 + assert dW > 0 and dH > 0 + assert dilationH > 0 and dilationW > 0 + + valid_dims = input[1] != 0 and input[2] != 0 + assert ( + ndim == 3 + and input[0] != 0 + and valid_dims + or (ndim == 4 and valid_dims and input[3] != 0) + ) + + assert kW // 2 >= padW and kH // 2 >= padH + assert outputWidth >= 1 and outputHeight >= 1 + + +def max_pool2d( + input: List[int], + kernel_size: List[int], + stride: List[int], + padding: List[int], + dilation: List[int], + ceil_mode: bool, +): + assert ( + len(kernel_size) == 1 or len(kernel_size) == 2 + ), "max_pool2d: kernel_size must either be a single int, or a tuple of two ints" + kH = kernel_size[0] + kW = kH if len(kernel_size) == 1 else kernel_size[1] + + assert ( + len(stride) == 0 or len(stride) == 1 or len(stride) == 2 + ), "max_pool2d: stride must either be omitted, a single int, or a tuple of two ints" + dH = kH if len(stride) == 0 else stride[0] + if len(stride) == 0: + dW = kW + elif len(stride) == 1: + dW = dH + else: + dW = stride[1] + + assert ( + len(padding) == 1 or len(padding) == 2 + ), "max_pool2d: padding must either be a single int, or a tuple of two ints" + padH = padding[0] + padW = padH if len(padding) == 1 else padding[1] + + assert ( + len(dilation) == 1 or len(dilation) == 2 + ), "max_pool2d: dilation must be either a single int, or a tuple of two ints" + dilationH = dilation[0] + dilationW = dilationH if len(dilation) == 1 else dilation[1] + + assert len(input) == 3 or len(input) == 4 + + nbatch = input[-4] if len(input) == 4 else 1 + nInputPlane = input[-3] + inputHeight = input[-2] + inputWidth = input[-1] + + outputHeight = pooling_output_shape(inputHeight, kH, padH, dH, dilationH, ceil_mode) + outputWidth = pooling_output_shape(inputWidth, kW, padW, dW, dilationW, ceil_mode) + + pool2d_shape_check( + input, + kH, + kW, + dH, + dW, + padH, + padW, + dilationH, + dilationW, + nInputPlane, + inputHeight, + inputWidth, + outputHeight, + outputWidth, + ) + + if len(input) == 3: + return [nInputPlane, outputHeight, outputWidth] + else: + return [nbatch, nInputPlane, outputHeight, outputWidth] + + +def max_pool2d_with_indices( + input: List[int], + kernel_size: List[int], + stride: List[int], + padding: List[int], + dilation: List[int], + ceil_mode: bool, +): + out = max_pool2d(input, kernel_size, stride, padding, dilation, ceil_mode) + return (out, out) + + +def upsample_nearest2d( + input: List[int], + output_size: Optional[List[int]], + scale_factors: Optional[List[float]], +): + out: List[int] = [] + out.append(input[0]) + out.append(input[1]) + + if scale_factors is None and output_size is None: + assert 0, "Either output_size or scale_factors must be presented" + + if output_size is not None: + assert ( + scale_factors is None + ), "Must specify exactly one of output_size and scale_factors" + assert len(output_size) == 2 + out.append(output_size[0]) + out.append(output_size[1]) + + if scale_factors is not None: + assert ( + output_size is None + ), "Must specify exactly one of output_size and scale_factors" + assert len(scale_factors) == 2 + out.append(int(input[2] * scale_factors[0])) + out.append(int(input[3] * scale_factors[1])) + + return out + + +def mm(self: List[int], mat2: List[int]): + assert len(self) == 2, "self must be a matrix" + assert len(mat2) == 2, "mat2 must be a matrix" + + assert self[1] == mat2[0] + return [self[0], mat2[1]] + + +def dot(self: List[int], tensor: List[int]): + assert len(self) == 1 and len(tensor) == 1 + assert self[0] == tensor[0] + out: List[int] = [] + return out + + +def mv(self: List[int], vec: List[int]): + assert len(self) == 2 and len(vec) == 1 + assert self[1] == vec[0] + # TODO: return self + return [self[0]] + + +def unsqueeze(li: List[int], dim: int): + dim = maybe_wrap_dim(dim, len(li) + 1) + out = _copy(li) + out.insert(dim, 1) + return out + + +def squeeze_nodim(li: List[int]): + out: List[int] = [] + for i in range(len(li)): + if li[i] != 1: + out.append(li[i]) + return out + + +def squeeze(li: List[int], dim: int): + out: List[int] = [] + wrapped_dim = maybe_wrap_dim(dim, len(li)) + for i in range(len(li)): + if i == wrapped_dim: + if li[i] != 1: + out.append(li[i]) + else: + out.append(li[i]) + return out + + +def squeeze_dims(li: List[int], dims: List[int]): + if len(dims) == 0: + return li + wrapped_dims = _copy(dims) + for i in range(len(dims)): + wrapped_dims[i] = maybe_wrap_dim(wrapped_dims[i], len(li)) + result: List[int] = [] + for i in range(len(li)): + if li[i] == 1: + if i not in wrapped_dims: + result.append(li[i]) + else: + result.append(li[i]) + return result + + +def index_select(self: List[int], dim: int, index: List[int]): + dim = maybe_wrap_dim(dim, len(self)) + numel = multiply_integers(index) + assert len(index) <= 1 + assert dim == 0 or dim < len(self) + result_size: List[int] = [] + for i in range(len(self)): + if dim == i: + result_size.append(numel) + else: + result_size.append(self[i]) + return result_size + + +def embedding( + weight: List[int], + indices: List[int], + padding_idx: int = -1, + scale_grad_by_freq: bool = False, + sparse: bool = False, +): + assert len(weight) == 2 + if len(indices) == 1: + return index_select(weight, 0, indices) + size = _copy(indices) + size.append(weight[1]) + return size + + +def max_int(): + return 9223372036854775807 + + +def slice( + self: List[int], dim: int, start: Optional[int], end: Optional[int], step: int +): + ndim = len(self) + assert ndim != 0 + dim = maybe_wrap_dim(dim, ndim) + start_val = start if start is not None else 0 + end_val = end if end is not None else max_int() + assert step > 0 + if start_val == max_int(): + start_val = 0 + if start_val < 0: + start_val += self[dim] + if end_val < 0: + end_val += self[dim] + if start_val < 0: + start_val = 0 + elif start_val > self[dim]: + start_val = self[dim] + if end_val < start_val: + end_val = start_val + elif end_val >= self[dim]: + end_val = self[dim] + slice_len = end_val - start_val + out = _copy(self) + out[dim] = (slice_len + step - 1) // step + return out + + +def check_cat_no_zero_dim(tensors: List[List[int]]): + for tensor in tensors: + assert len(tensor) > 0 + + +def legacy_cat_wrap_dim(dim: int, tensor_sizes: List[List[int]]): + out_dim: Optional[int] = None + for size in tensor_sizes: + if not (len(size) == 1 and size[0] == 0): + if out_dim is None: + out_dim = maybe_wrap_dim(dim, len(size)) + if out_dim is None: + out_dim = dim + return out_dim + + +def should_skip(tensor: List[int]): + return numel(tensor) == 0 and len(tensor) == 1 + + +def check_cat_shape_except_dim( + first: List[int], second: List[int], dimension: int, index: int +): + first_dims = len(first) + second_dims = len(second) + assert first_dims == second_dims, "Tensors must have same number of dimensions" + for dim in range(0, first_dims): + if dim != dimension: + assert ( + first[dim] == second[dim] + ), "Sizes of tensors must match except in dimension" + + +def cat(tensors: List[List[int]], dim: int): + check_cat_no_zero_dim(tensors) + dim = legacy_cat_wrap_dim(dim, tensors) + assert len(tensors) > 0 + not_skipped_tensor: Optional[List[int]] = None + for tensor in tensors: + if not should_skip(tensor): + not_skipped_tensor = tensor + if not_skipped_tensor is None: + return [0] + + cat_dim_size = 0 + + for i in range(len(tensors)): + tensor = tensors[i] + if not should_skip(tensor): + check_cat_shape_except_dim(not_skipped_tensor, tensor, dim, i) + cat_dim_size = cat_dim_size + tensor[dim] + + result_size = _copy(not_skipped_tensor) + result_size[dim] = cat_dim_size + return result_size + + +def stack(tensors: List[List[int]], dim: int): + unsqueezed_tensors: List[List[int]] = [] + for tensor in tensors: + unsqueezed = unsqueeze(tensor, dim) + unsqueezed_tensors.append(unsqueezed) + return cat(unsqueezed_tensors, dim) + + +def select(self: List[int], dim: int, index: int): + ndim = len(self) + assert ndim != 0 + dim = maybe_wrap_dim(dim, ndim) + size = self[dim] + assert not (index < -size or index >= size) + if index < 0: + index += size + out: List[int] = [] + for i in range(ndim): + if i != dim: + out.append(self[i]) + return out + + +def matmul(tensor1: List[int], tensor2: List[int]): + dim_tensor1 = len(tensor1) + dim_tensor2 = len(tensor2) + if dim_tensor1 == 1 and dim_tensor2 == 1: + return dot(tensor1, tensor2) + elif dim_tensor1 == 2 and dim_tensor2 == 1: + return mv(tensor1, tensor2) + elif dim_tensor1 == 1 and dim_tensor2 == 2: + return squeeze(mm(unsqueeze(tensor1, 0), tensor2), 0) + elif dim_tensor1 == 2 and dim_tensor2 == 2: + return mm(tensor1, tensor2) + elif dim_tensor1 >= 1 and dim_tensor2 >= 1: + # We are multiplying b1 x n x m1 by x2 x m2 x p (where b1 can be a list); + # we track m1 vs m2 separately even though they must match for nicer error messages + n = tensor1[-2] if dim_tensor1 > 1 else 1 + m1 = tensor1[-1] + batch_tensor1: List[int] = [] + # TODO: handling of slice + for i in range(dim_tensor1 - 2): + batch_tensor1.append(tensor1[i]) + m2 = tensor2[-1] if dim_tensor2 > 1 else 1 + p = tensor2[-1] + batch_tensor2: List[int] = [] + # TODO: handling of slice + for i in range(dim_tensor2 - 2): + batch_tensor2.append(tensor2[i]) + + # expand the batch portion (i.e. cut off matrix dimensions and expand rest) + expand_batch_portion = broadcast(batch_tensor1, batch_tensor2) + + # todo: copy ? + output_shape = expand_batch_portion + if dim_tensor1 > 1: + output_shape.append(n) + + if dim_tensor2 > 1: + output_shape.append(p) + + return output_shape + else: + assert False, "both arguments to matmul need to be at least 1D" + + +def t(self: List[int]): + assert len(self) <= 2 + self_len = len(self) + if self_len == 0: + out: List[int] = [] + return out + elif self_len == 1: + return [self[0]] + else: + return [self[1], self[0]] + + +def transpose(self: List[int], dim0: int, dim1: int): + ndims = len(self) + dim0 = maybe_wrap_dim(dim0, ndims) + dim1 = maybe_wrap_dim(dim1, ndims) + if dim0 == dim1: + return _copy(self) + out: List[int] = [] + for i in range(ndims): + if i == dim0: + out.append(self[dim1]) + elif i == dim1: + out.append(self[dim0]) + else: + out.append(self[i]) + return out + + +def linear(input: List[int], weight: List[int], bias: Optional[List[int]]): + out = matmul(input, t(weight)) + if bias is not None: + assert broadcast(bias, out) == out + return out + + +def addmm(self: List[int], mat1: List[int], mat2: List[int], beta: Any, alpha: Any): + return broadcast(self, mm(mat1, mat2)) + + +def check_non_negative(array: List[int]) -> bool: + # TODO: look into rewriting with early return and getting loop unrolling to fire + non_negative = False + for val in array: + if val < 0: + non_negative = True + return non_negative + + +def check_shape_forward( + input: List[int], + weight_sizes: List[int], + bias: Optional[List[int]], + stride: List[int], + padding: List[int], + dilation: List[int], + groups: int, +): + k = len(input) + weight_dim = len(weight_sizes) + + # TODO: assertions could be expanded with the error messages + assert not check_non_negative(padding) + assert not check_non_negative(stride) + + assert weight_dim == k + assert weight_sizes[0] >= groups + assert (weight_sizes[0] % groups) == 0 + # only handling not transposed + assert input[1] == weight_sizes[1] * groups + assert bias is None or (len(bias) == 1 and bias[0] == weight_sizes[0]) + + for i in range(2, k): + assert (input[i] + 2 * padding[i - 2]) >= ( + dilation[i - 2] * (weight_sizes[i] - 1) + 1 + ) + + # this is not handling transposed convolution yet + + +def conv_output_size( + input_size: List[int], + weight_size: List[int], + bias: Optional[List[int]], + stride: List[int], + padding: List[int], + dilation: List[int], + groups: int, +): + check_shape_forward( + input_size, weight_size, bias, stride, padding, dilation, groups + ) + + has_dilation = len(dilation) > 0 + dim = len(input_size) + output_size: List[int] = [] + input_batch_size_dim = 0 + weight_output_channels_dim = 0 + output_size.append(input_size[input_batch_size_dim]) + output_size.append(weight_size[weight_output_channels_dim]) + + for d in range(2, dim): + dilation_ = dilation[d - 2] if has_dilation else 1 + kernel = dilation_ * (weight_size[d] - 1) + 1 + output_size.append( + (input_size[d] + (2 * padding[d - 2]) - kernel) // stride[d - 2] + 1 + ) + return output_size + + +def conv1d( + input: List[int], + weight: List[int], + bias: Optional[List[int]], + stride: List[int], + padding: List[int], + dilation: List[int], + groups: int, +): + assert len(weight) == 3 + assert len(input) == 3 + return conv_output_size(input, weight, bias, stride, padding, dilation, groups) + + +def conv2d( + input: List[int], + weight: List[int], + bias: Optional[List[int]], + stride: List[int], + padding: List[int], + dilation: List[int], + groups: int, +): + assert len(weight) == 4 + assert len(input) == 4 + return conv_output_size(input, weight, bias, stride, padding, dilation, groups) + + +def conv_backwards( + grad_output: List[int], + input: List[int], + weight: List[int], + biases: Optional[List[int]], +): + # Bias gradient is always generated regardess of if biases is supplied + return _copy(input), _copy(weight), [grad_output[1]] + + +def conv_transpose2d_input( + input: List[int], + weight: List[int], + bias: Optional[List[int]] = None, + stride: Optional[List[int]] = None, + padding: Optional[List[int]] = None, + output_padding: Optional[List[int]] = None, + groups: int = 1, + dilation: Optional[List[int]] = None, +) -> List[int]: + if stride is None: + stride = [1, 1] + if padding is None: + padding = [0, 0] + if output_padding is None: + output_padding = [0, 0] + if dilation is None: + dilation = [1, 1] + has_dilation = len(dilation) > 0 + dim = len(input) + output_size: List[int] = [] + input_batch_size_dim = 0 + weight_output_channels_dim = 1 + output_size.append(input[input_batch_size_dim]) + output_size.append(weight[weight_output_channels_dim] * groups) + + for d in range(2, dim): + dilation_ = dilation[d - 2] if has_dilation else 1 + kernel = dilation_ * (weight[d] - 1) + output_size.append( + (input[d] - 1) * stride[d - 2] + - 2 * padding[d - 2] + + kernel + + output_padding[d - 2] + + 1 + ) + return output_size + + +def conv_forwards( + input: List[int], + weight: List[int], + bias: Optional[List[int]], + stride: List[int], + padding: List[int], + dilation: List[int], + transposed: bool, + output_padding: List[int], + groups: int, +) -> List[int]: + has_dilation = len(dilation) > 0 + has_output_padding = len(output_padding) > 0 + dim = len(input) + output_size: List[int] = [] + input_batch_size_dim = 0 + weight_output_channels_dim = 1 if transposed else 0 + output_size.append(input[input_batch_size_dim]) + if transposed: + output_size.append(weight[weight_output_channels_dim] * groups) + else: + output_size.append(weight[weight_output_channels_dim]) + + for d in range(2, dim): + dilation_ = dilation[d - 2] if has_dilation else 1 + output_padding_ = output_padding[d - 2] if has_output_padding else 0 + if transposed: + kernel = dilation_ * (weight[d] - 1) + output_size.append( + (input[d] - 1) * stride[d - 2] + - 2 * padding[d - 2] + + kernel + + output_padding_ + + 1 + ) + else: + kernel = dilation_ * (weight[d] - 1) + 1 + output_size.append( + (input[d] + (2 * padding[d - 2]) - kernel) // stride[d - 2] + 1 + ) + return output_size + + +def _conv_forwards( + input: List[int], + weight: List[int], + bias: Optional[List[int]], + stride: List[int], + padding: List[int], + dilation: List[int], + transposed: bool, + output_padding: List[int], + groups: int, + benchmark: bool, + deterministic: bool, + cudnn_enabled: bool, + allow_tf32: bool, +) -> List[int]: + return conv_forwards( + input, + weight, + bias, + stride, + padding, + dilation, + transposed, + output_padding, + groups, + ) + + +def batch_norm( + input: List[int], + weight: Optional[List[int]], + bias: Optional[List[int]], + running_mean: Optional[List[int]], + running_var: Optional[List[int]], + training: bool, + momentum: float, + eps: float, + cudnn_enabled: bool, +): + out: List[int] = [] + for elem in input: + out.append(elem) + return out + + +def conv3d( + input: List[int], + weight: List[int], + bias: Optional[List[int]], + stride: List[int], + padding: List[int], + dilation: List[int], + groups: int, +): + assert len(weight) == 5 + assert len(input) == 5 + return conv_output_size(input, weight, bias, stride, padding, dilation, groups) + + +def maybe_wrap_dim(dim: int, dim_post_expr: int, wrap_scalar: bool = True): + if dim_post_expr <= 0: + assert wrap_scalar + dim_post_expr = 1 + min = -dim_post_expr + max = dim_post_expr - 1 + assert not (dim < min or dim > max) + if dim < 0: + dim += dim_post_expr + return dim + + +def zero_dim_tensor(input: Any): + out: List[int] = [] + return out + + +def multiply_integers(li: List[int]): + out = 1 + for elem in li: + out = out * elem + return out + + +def arange_end(end: number, inp0: Any, inp1: Any, inp2: Any, inp3: Any): + assert end >= 0 + return [int(math.ceil(end))] + + +def arange_start( + start: number, end: number, inp0: Any, inp1: Any, inp2: Any, inp3: Any +): + assert end >= 0 + assert end >= start + return [int(math.ceil(end - start))] + + +def arange_start_step( + start: number, end: number, step: number, inp0: Any, inp1: Any, inp2: Any, inp3: Any +): + assert step != 0 + if step < 0: + assert start >= end + else: + assert end >= start + return [int(math.ceil((end - start) / step))] + + +def permute(input: List[int], dims: List[int]): + assert len(input) == len(dims) + ndim = len(dims) + seen_dims: List[int] = [] + newSizes: List[int] = [] + for i in range(ndim): + dim = maybe_wrap_dim(dims[i], ndim) + seen_dims.append(dim) + newSizes.append(input[dim]) + for i in range(1, ndim): + for j in range(i): + assert seen_dims[i] != seen_dims[j] + return newSizes + + +def movedim(self: List[int], source: List[int], destination: List[int]) -> List[int]: + self_dim = len(self) + if self_dim <= 1: + return self + normalized_src: List[int] = [] + normalized_dst: List[int] = [] + for i in range(len(source)): + normalized_src.append(maybe_wrap_dim(source[i], self_dim)) + normalized_dst.append(maybe_wrap_dim(destination[i], self_dim)) + order = [-1 for i in range(self_dim)] + src_dims = [i for i in range(self_dim)] + dst_dims = [i for i in range(self_dim)] + + for i in range(len(source)): + order[normalized_dst[i]] = normalized_src[i] + src_dims[normalized_src[i]] = -1 + dst_dims[normalized_dst[i]] = -1 + + source_dims: List[int] = [] + destination_dims: List[int] = [] + for ele in src_dims: + if ele != -1: + source_dims.append(ele) + for ele in dst_dims: + if ele != -1: + destination_dims.append(ele) + + rest_dim = self_dim - len(source) + for i in range(rest_dim): + order[destination_dims[i]] = source_dims[i] + return permute(self, order) + + +def flatten(input: List[int], start_dim: int, end_dim: int): + start_dim = maybe_wrap_dim(start_dim, len(input)) + end_dim = maybe_wrap_dim(end_dim, len(input)) + assert start_dim <= end_dim + if len(input) == 0: + return [1] + if start_dim == end_dim: + # TODO: return self + out: List[int] = [] + for elem in input: + out.append(elem) + return out + slice_numel = 1 + for i in range(start_dim, end_dim + 1): + slice_numel *= input[i] + # TODO: use slicing when slice optimization has landed + # slice_numel = multiply_integers(input[start_dim:end_dim - start_dim + 1]) + shape: List[int] = [] + for i in range(start_dim): + shape.append(input[i]) + shape.append(slice_numel) + for i in range(end_dim + 1, len(input)): + shape.append(input[i]) + return shape + + +def nonzero_lower_bound(input: List[int]): + return [0, len(input)] + + +def nonzero_upper_bound(input: List[int]): + return [numel(input), len(input)] + + +def _reduce_along_dim(self: List[int], dim: int, keepdim: bool): + dim = maybe_wrap_dim(dim, len(self)) + out: List[int] = [] + for i, self_dim in enumerate(self): + if i == dim: + if keepdim: + out.append(1) + else: + out.append(self_dim) + return out + + +def argmax( + self: List[int], dim: Optional[int] = None, keepdim: bool = False +) -> List[int]: + if dim is None: + return [] + return _reduce_along_dim(self, dim, keepdim) + + +def bmm(self: List[int], mat2: List[int]) -> List[int]: + assert len(self) == 3, "bmm only supports 3D tensors" + assert len(mat2) == 3, "bmm only supports 3D tensors" + assert self[0] == mat2[0], "mismatching batch dimension" + assert self[2] == mat2[1], "mismatching contracting dimension" + return [self[0], self[1], mat2[2]] + + +def _shape_as_tensor(self: List[int]) -> List[int]: + return [len(self)] + + +def topk(self: List[int], k: int, dim: int = -1) -> Tuple[List[int], List[int]]: + if len(self) == 0: + result: List[int] = [] + else: + assert ( + k <= self[dim] + ), f"k ({k}) is too big for dimension {dim} of size {self[dim]}" + result = _copy(self) + result[dim] = k + return result, result + + +def nll_loss_forward( + self: List[int], target: List[int], weight: Optional[List[int]], reduction: int +) -> Tuple[List[int], List[int]]: + # This is taken shamelessly from the meta function in LossNLL.cpp + self_dim = len(self) + target_dim = len(target) + assert 0 < self_dim <= 2 + assert target_dim <= 1 + no_batch_dim = self_dim == 1 and target_dim == 0 + assert no_batch_dim or (self[0] == target[0]) + n_classes = self[-1] + scalar_shape: List[int] = [] + assert weight is None or (len(weight) == 1 and weight[0] == n_classes) + if reduction == 0 and self_dim == 2: + reduction_shape = [self[0]] + else: + reduction_shape = scalar_shape + return reduction_shape, scalar_shape + + +def native_layer_norm( + input: List[int], normalized_shape: List[int] +) -> Tuple[List[int], List[int], List[int]]: + reduction_shape: List[int] = [] + num_unreduced_dimensions = len(input) - len(normalized_shape) + assert num_unreduced_dimensions >= 0 + for i in range(num_unreduced_dimensions): + reduction_shape.append(input[i]) + for i in range(num_unreduced_dimensions, len(input)): + reduction_shape.append(1) + return _copy(input), reduction_shape, reduction_shape + + +def native_batch_norm( + input: List[int], + weight: Optional[List[int]], + bias: Optional[List[int]], + running_mean: Optional[List[int]], + running_var: Optional[List[int]], + training: bool, +) -> Tuple[List[int], List[int], List[int]]: + if training: + _size = [input[1]] + else: + _size = [0] + return _copy(input), _size, _size + + +def _batch_norm_with_update( + input: List[int], + weight: Optional[List[int]], + bias: Optional[List[int]], + running_mean: Optional[List[int]], + running_var: Optional[List[int]], +) -> Tuple[List[int], List[int], List[int], List[int]]: + _size = [input[1]] + return _copy(input), _size, _size, [0] + + +def cross_entropy_loss( + self: List[int], + target: List[int], + weight: Optional[List[int]] = None, + reduction: int = 1, + ignore_index: int = -100, + label_smoothing: float = 0.0, +) -> List[int]: + result_shape = nll_loss_forward(self, target, weight, reduction)[0] + return result_shape + + +""" +Currently deferring the enabling of this, as part of the propoasal to suspend +adding ops. +There are currently cases in the test case where this is being called +in the SSA opinfo tests with with unexpected values (eg list of two ints, see the first +opinfo test). The behavoir of index is significantly dependent on the inputs. + +This could be an error with how we are matching up shape functions, or that this +function needs to just implement everything. + +def index_Tensor(self: List[int], indices: List[Optional[List[int]]]) -> List[int]: + assert len(indices) <= len(self), "More indices than dimensions to index" + broadcasted_shape: List[int] = [] + for index_tensor_shape in indices: + if index_tensor_shape is not None: + broadcasted_shape = broadcast(broadcasted_shape, index_tensor_shape) + return broadcasted_shape +""" + +ScriptFn = torch._C.ScriptFunction +shape_compute_graph_mapping: Dict[str, ScriptFn] = {} +bounded_compute_graph_mapping: Dict[str, Tuple[ScriptFn, ScriptFn]] = {} +script_func_map: Dict[Callable, ScriptFn] = {} + + +def process_func(func: Callable): + if func not in script_func_map: + scripted_func = torch.jit.script(func) + + torch._C._jit_pass_inline(scripted_func.graph) + + for _ in range(2): + torch._C._jit_pass_peephole(scripted_func.graph) + torch._C._jit_pass_constant_propagation(scripted_func.graph) + + script_func_map[func] = scripted_func + return script_func_map[func] + + +def add_shape_compute_mapping(operator_schema: str, func: Callable): + global shape_compute_graph_mapping + + shape_compute_graph_mapping[operator_schema] = process_func(func) + + +def add_bounded_compute_mapping( + operator_schema: str, lower_bound_func: Callable, upper_bound_func: Callable +): + # Adds a shape compute function for both upper and lower bounds + fns = (process_func(lower_bound_func), process_func(upper_bound_func)) + bounded_compute_graph_mapping[operator_schema] = fns + + +add_shape_compute_mapping( + "aten::contiguous(Tensor(a) self, *, MemoryFormat memory_format=contiguous_format) -> Tensor(a)", + unary, +) +add_shape_compute_mapping( + "aten::rsub.Tensor(Tensor self, Scalar other, Scalar alpha=1) -> Tensor", unary +) +add_shape_compute_mapping( + "aten::dropout(Tensor input, float p, bool train) -> Tensor", unary +) +add_shape_compute_mapping( + "aten::adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor", + adaptive_avg_pool2d, +) +add_shape_compute_mapping( + "prim::NumToTensor.Scalar(Scalar a) -> Tensor", zero_dim_tensor +) +add_shape_compute_mapping("prim::NumToTensor.bool(bool a) -> Tensor", zero_dim_tensor) +add_shape_compute_mapping( + "aten::zeros(int[] size, *, int? dtype=None, int? layout=None, Device? device=None, bool? pin_memory=None) -> (Tensor)", + unary, +) +add_shape_compute_mapping( + "aten::to.dtype(Tensor(a) self, int dtype, bool non_blocking=False, bool copy=False, int? memory_format=None) -> (Tensor(a))", + unary, +) +add_shape_compute_mapping( + "aten::arange(Scalar end, *, int? dtype=None, int? layout=None, Device? device=None, bool? pin_memory=None) -> (Tensor)", + arange_end, +) +add_shape_compute_mapping( + "aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", + arange_start, +) +add_shape_compute_mapping( + "aten::arange.start_step(Scalar start, Scalar end, Scalar step, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", + arange_start_step, +) +add_shape_compute_mapping("aten::squeeze(Tensor(a) self) -> Tensor(a)", squeeze_nodim) +add_shape_compute_mapping( + "aten::squeeze.dim(Tensor(a) self, int dim) -> Tensor(a)", squeeze +) +add_shape_compute_mapping( + "aten::squeeze.dims(Tensor(a) self, int[] dim) -> Tensor(a)", squeeze_dims +) +add_shape_compute_mapping( + "aten::unsqueeze(Tensor(a) self, int dim) -> Tensor(a)", unsqueeze +) +add_shape_compute_mapping( + "aten::slice.Tensor(Tensor(a) self, int dim=0, int? start=None, int? end=None, int step=1) -> Tensor(a)", + slice, +) +add_shape_compute_mapping( + "aten::select.int(Tensor(a) self, int dim, int index) -> Tensor(a)", select +) +add_shape_compute_mapping( + "aten::index_select(Tensor self, int dim, Tensor index) -> Tensor", index_select +) +add_shape_compute_mapping( + "aten::layer_norm(Tensor input, int[] normalized_shape, Tensor? weight=None, Tensor? bias=None, " + "float eps=1e-05, bool cudnn_enable=True) -> Tensor", + unary, +) +add_shape_compute_mapping( + "aten::softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor", unary +) +add_shape_compute_mapping( + "aten::_no_grad_embedding_renorm_(Tensor weight, Tensor input, float max_norm, float norm_type) -> Tensor", + unary, +) +add_shape_compute_mapping( + "aten::embedding_renorm_(Tensor(a!) self, Tensor indices, float max_norm, float norm_type) -> Tensor(a!)", + unary, +) +add_shape_compute_mapping( + "aten::embedding(Tensor weight, Tensor indices, int padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor", + embedding, +) +add_shape_compute_mapping("aten::mm(Tensor self, Tensor mat2) -> Tensor", mm) +add_shape_compute_mapping("aten::dot(Tensor self, Tensor tensor) -> Tensor", dot) +add_shape_compute_mapping("aten::mv(Tensor self, Tensor vec) -> Tensor", mv) +add_shape_compute_mapping("aten::matmul(Tensor self, Tensor other) -> Tensor", matmul) +add_shape_compute_mapping( + "aten::linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor", linear +) +add_shape_compute_mapping( + "aten::max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor", + max_pool2d, +) +add_shape_compute_mapping( + "aten::max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)", + max_pool2d_with_indices, +) +add_shape_compute_mapping("aten::t(Tensor(a) self) -> Tensor(a)", t) +add_shape_compute_mapping( + "aten::transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a)", transpose +) +add_shape_compute_mapping( + "aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] dilation=1, int groups=1) -> Tensor", + conv1d, +) +add_shape_compute_mapping( + "aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, int groups=1) -> Tensor", + conv2d, +) +add_shape_compute_mapping( + "aten::batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor", + batch_norm, +) +add_shape_compute_mapping( + "aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1, int groups=1) -> Tensor", + conv3d, +) +add_shape_compute_mapping( + "aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, int[]? bias_sizes, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)", + conv_backwards, +) +add_shape_compute_mapping( + "aten::convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor", + conv_forwards, +) +add_shape_compute_mapping( + "aten::_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor", + _conv_forwards, +) +add_shape_compute_mapping( + "aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int groups=1, int[2] dilation=1) -> Tensor", + conv_transpose2d_input, +) +add_shape_compute_mapping( + "aten::flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a)", + flatten, +) +add_shape_compute_mapping("aten::cat(Tensor[] tensors, int dim=0) -> Tensor", cat) +add_shape_compute_mapping("aten::stack(Tensor[] tensors, int dim=0) -> Tensor", stack) +add_shape_compute_mapping( + "aten::permute(Tensor(a) self, int[] dims) -> Tensor(a)", permute +) +add_shape_compute_mapping( + "aten::movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)", + movedim, +) +add_shape_compute_mapping("aten::view(Tensor(a) self, int[] size) -> Tensor(a)", view) +add_shape_compute_mapping( + "aten::expand_as(Tensor(a) self, Tensor other) -> Tensor(a)", expand +) +add_shape_compute_mapping( + "aten::expand(Tensor(a) self, int[] size, *, bool implicit=False) -> Tensor(a)", + expand_one_unused, +) +add_shape_compute_mapping( + "aten::mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", + sum_mean_dim, +) +add_shape_compute_mapping( + "aten::sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", + sum_mean_dim, +) +add_shape_compute_mapping( + "aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)", + max_dim, +) +add_shape_compute_mapping( + "aten::mean(Tensor self, *, ScalarType? dtype=None) -> Tensor", zero_dim_tensor +) +add_shape_compute_mapping( + "aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor", zero_dim_tensor +) +add_shape_compute_mapping( + "aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor", + addmm, +) +add_shape_compute_mapping( + "aten::upsample_nearest2d.vec(Tensor input, int[]? output_size, float[]? scale_factors) -> (Tensor)", + upsample_nearest2d, +) +add_shape_compute_mapping( + "aten::quantize_per_tensor(Tensor self, float scale, int zero_point, ScalarType dtype) -> Tensor", + unary, +) +add_shape_compute_mapping( + "aten::quantize_per_tensor.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype) -> Tensor", + unary, +) +add_shape_compute_mapping("aten::dequantize(Tensor self) -> Tensor", unary) +add_shape_compute_mapping( + "quantized::add(Tensor qa, Tensor qb, float scale, int zero_point) -> Tensor qc", + broadcast, +) +add_shape_compute_mapping( + "aten::argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor", argmax +) +add_shape_compute_mapping("aten::bmm(Tensor self, Tensor mat2) -> Tensor", bmm) +add_shape_compute_mapping( + "aten::_shape_as_tensor(Tensor self) -> Tensor", _shape_as_tensor +) +add_shape_compute_mapping( + "aten::topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices)", + topk, +) +add_shape_compute_mapping( + "aten::nll_loss_forward(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index) -> (Tensor output, Tensor total_weight)", + nll_loss_forward, +) +add_shape_compute_mapping( + "aten::native_layer_norm(Tensor input, int[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor)", + native_layer_norm, +) +add_shape_compute_mapping( + "aten::native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)", + native_batch_norm, +) +add_shape_compute_mapping( + "aten::_native_batch_norm_legit(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)", + native_batch_norm, +) +add_shape_compute_mapping( + "aten::_native_batch_norm_legit.no_stats(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)", + native_batch_norm, +) +add_shape_compute_mapping( + "_batch_norm_with_update(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor)", + _batch_norm_with_update, +) + +add_shape_compute_mapping( + "aten::cross_entropy_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, float label_smoothing=0.0) -> Tensor", + cross_entropy_loss, +) +# add_shape_compute_mapping("aten::index.Tensor(Tensor self, Tensor?[] indices) -> Tensor", index_Tensor) + +# TODO: migrate over all of symbolic_shape_registry_util.cpp +# These are duplicated here so that the functions will be serialiazed +add_shape_compute_mapping( + "aten::lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor", + broadcast_three, +) +add_shape_compute_mapping( + "aten::where.ScalarSelf(Tensor condition, Scalar self, Tensor other) -> Tensor", + broadcast_one_three, +) +add_shape_compute_mapping( + "aten::add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)", + broadcast_inplace, +) + +# quantized_conv_prepack TODO + +# Shape Compute Fn with upper and lower bounds +add_bounded_compute_mapping( + "aten::nonzero(Tensor self) -> (Tensor)", nonzero_lower_bound, nonzero_upper_bound +) diff --git a/janus/lib/python3.10/site-packages/torch/jit/_state.py b/janus/lib/python3.10/site-packages/torch/jit/_state.py new file mode 100644 index 0000000000000000000000000000000000000000..18456ebd38687c32ef68f947ac512a74acfef59d --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/jit/_state.py @@ -0,0 +1,127 @@ +# mypy: allow-untyped-defs +"""JIT-related state. + +This module stores various pieces of Python-global state relating to the JIT. + +This is not intended to be imported directly; please the exposed +functionalities in `torch.jit`. +""" +import os +import weakref +from typing import Any, Dict, Type + +import torch + + +class EnabledProxy: + """Stores whether the JIT is enabled or not. + + This is just a wrapper for a bool, so that we get reference semantics + """ + + def __init__(self) -> None: + self.enabled = self.parse_env( + "PYTORCH_JIT", True, "> Using PyTorch JIT", "> PyTorch JIT DISABLED" + ) + + def parse_env(self, name, default, true_message, false_message): + value = os.environ.get(name) + if value is None: + return default + if value.lower() in {"1", "true", "yes"}: + return True + elif value.lower() in {"0", "false", "no"}: + return False + if value == "1v": + print(true_message) + return True + elif value == "0v": + print(false_message) + return False + raise ValueError(f"Unknown setting of {name}. Try using 0 or 1.") + + def __bool__(self): + return self.enabled + + +_enabled = EnabledProxy() + + +def disable(): + _enabled.enabled = False + + +def enable(): + _enabled.enabled = True + + +# The Python CompilationUnit. All functions and modules defined in Python will +# live in here. It's defined in Python because doing in cpp creates static +# destruction order issues. +_python_cu = torch._C.CompilationUnit() + + +# python class => ScriptClass mapping +_script_classes: Dict[Type[Any], Type[Any]] = {} +_name_to_pyclass: Dict[str, Type[Any]] = {} + + +def _add_script_class(python_class, script_class): + _script_classes[python_class] = script_class + _name_to_pyclass[script_class.qualified_name()] = python_class + + +def _get_script_class(python_class): + override = getattr(python_class, "_jit_override_qualname", None) + if override is not None: + python_class = _get_python_class(override) + return _script_classes.get(python_class, None) + + +def _get_python_class(qualified_name): + return _name_to_pyclass.get(qualified_name, None) + + +def _clear_class_state(): + _script_classes.clear() + _name_to_pyclass.clear() + + +# Caching: we currently cache compilation of free functions and overloaded functions. +# To cache free functions we hold a weak ref to the function object and +# map to the compiled fn's qualified name. +# To cache overloaded functions we hold a weak ref to the function obj and +# map to all of its overloaded compiled fns. +# In the future we could consider caching more types of objects so that +# aliasing is preserved across separate compilations of the same object. + +_jit_caching_layer: weakref.WeakKeyDictionary = weakref.WeakKeyDictionary() +_jit_function_overload_caching: weakref.WeakKeyDictionary = weakref.WeakKeyDictionary() + + +def _try_get_jit_cached_overloads(key): + qual_names = _jit_function_overload_caching.get(key, None) + if qual_names: + return [_python_cu.find_function(qual_name) for qual_name in qual_names] + else: + return None + + +def _set_jit_overload_cache(key, compiled_fns): + _jit_function_overload_caching[key] = [fn.qualified_name for fn in compiled_fns] + + +def _try_get_jit_cached_function(key): + if getattr(key, "__disable_jit_function_caching__", False) is True: + return None + qual_name = _jit_caching_layer.get(key, None) + if qual_name: + return _python_cu.find_function(qual_name) + else: + return None + + +def _set_jit_function_cache(key, value): + # only free functions currently supported + assert isinstance(value, torch.jit.ScriptFunction) + _jit_caching_layer[key] = value.qualified_name diff --git a/janus/lib/python3.10/site-packages/torch/jit/_trace.py b/janus/lib/python3.10/site-packages/torch/jit/_trace.py new file mode 100644 index 0000000000000000000000000000000000000000..1dbcdb6a3ca2a86644696f05857b00156427f274 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/jit/_trace.py @@ -0,0 +1,1503 @@ +# mypy: allow-untyped-defs +"""Tracing. + +This module contains functionality to support the JIT's tracing frontend, notably: + * torch.jit.trace + * torch.jit.trace_module + +This is not intended to be imported directly; please use the exposed +functionalities in `torch.jit`. +""" + +import contextlib +import copy +import functools +import inspect +import os +import re +import warnings +from enum import Enum +from typing import Any, Callable, Dict, List, Optional, Set, TypeVar +from typing_extensions import ParamSpec + +import torch +from torch._jit_internal import ( + _get_model_id, + _qualified_name, + get_callable_argument_names, + is_scripting, +) +from torch.autograd import function +from torch.jit._script import _CachedForward, script, ScriptModule +from torch.jit._state import _enabled, _python_cu +from torch.nn import Module +from torch.testing._comparison import default_tolerances + + +_flatten = torch._C._jit_flatten +_unflatten = torch._C._jit_unflatten + +R = TypeVar("R", covariant=True) # return type (always covariant) +P = ParamSpec("P") + + +def _create_interpreter_name_lookup_fn(frames_up=1): + def _get_interpreter_name_for_var(var): + frame = inspect.currentframe() + if not frame: + raise RuntimeError("failed to inspect frame") + + i = 0 + while i < frames_up + 1: + frame = frame.f_back + if not frame: + raise RuntimeError("failed to get frame") + i += 1 + + f_locals = frame.f_locals + f_globals = frame.f_globals + + for k, v in f_locals.items(): + if isinstance(v, torch.Tensor) and var is v: + return k if k != "self" else "" + return "" + + return _get_interpreter_name_for_var + + +def _unique_state_dict(module, keep_vars=False): + # since Parameter.detach() always creates a new torch.Tensor instance, + # id(v) doesn't work with it. So we always get the Parameter or Buffer + # as values, and deduplicate the params using Parameters and Buffers + state_dict = module.state_dict(keep_vars=True) + filtered_dict = type(state_dict)() + seen_ids: Set[int] = set() + for k, v in state_dict.items(): + if id(v) in seen_ids: + continue + seen_ids.add(id(v)) + if keep_vars: + filtered_dict[k] = v + else: + filtered_dict[k] = v.detach() + return filtered_dict + + +class ONNXTracedModule(torch.nn.Module): + def __init__( + self, + inner, + strict=True, + force_outplace=False, + return_inputs=False, + return_inputs_states=False, + ): + super().__init__() + # inner may be a Module, or it may be an arbitrary callable + # If it's a Module, we get its parameters automatically, which lets + # us avoid a special casing functions versus modules. + self.inner = inner + self.strict = strict + self._force_outplace = force_outplace + self._return_inputs = return_inputs + self._return_inputs_states = return_inputs_states + + def forward(self, *args: torch.Tensor): + in_vars, in_desc = _flatten(args) + # NOTE: use full state, because we need it for BatchNorm export + # This differs from the compiler path, which doesn't support it at the moment. + module_state = list(_unique_state_dict(self, keep_vars=True).values()) + + ret_inputs = [] + inputs_states = [] + outs = [] + + def wrapper(*args): + in_args: List[torch.Tensor] = [] + for i in range(len(in_vars)): + if not isinstance(args[i], torch.Tensor): + raise RuntimeError("Expected Tensor argument") + in_args.append(args[i]) + + trace_inputs = _unflatten(in_args, in_desc) + + if self._return_inputs: + ret_inputs.append( + tuple(x.clone(memory_format=torch.preserve_format) for x in args) + ) + if self._return_inputs_states: + inputs_states.append(_unflatten(in_args, in_desc)) + outs.append(self.inner(*trace_inputs)) + if self._return_inputs_states: + inputs_states[0] = (inputs_states[0], trace_inputs) + out_vars, _ = _flatten(outs) + if len(out_vars) == 1: + return out_vars[0] + else: + return tuple(out_vars) + + graph, out = torch._C._create_graph_by_tracing( + wrapper, + in_vars + module_state, + _create_interpreter_name_lookup_fn(), + self.strict, + self._force_outplace, + ) + + if self._return_inputs: + return graph, outs[0], ret_inputs[0] + if self._return_inputs_states: + return graph, outs[0], inputs_states[0] + else: + return graph, outs[0] + + +def _clone_inputs(args): + def clone_input(a): + if a is None: + return None + elif isinstance(a, torch.Tensor): + # TODO: figure out one liner to .clone() and set requires_grad + v = ( + a.detach() + .clone(memory_format=None if a.is_mkldnn else torch.preserve_format) + .requires_grad_(a.requires_grad) + ) + if a.grad is not None: + v.grad = clone_input(v.grad) + return v + else: + return a.clone(memory_format=torch.preserve_format) + + return function._nested_map( + lambda x: isinstance(x, torch.Tensor), clone_input, condition_msg="tensors" + )(args) + + +# This is purely for developer debugging. We are not going to advertise it. +_JIT_TIME = os.environ.get("PYTORCH_JIT_TIME", False) # CUDA-only timing +_JIT_DISABLE = os.environ.get("PYTORCH_JIT_DISABLE", False) +_JIT_STATS = os.environ.get("PYTORCH_JIT_STATS", False) + + +@contextlib.contextmanager +def _time(trace_name, name, time=True): + if (not _JIT_TIME and not time) or not torch.cuda.is_available(): + yield + return + stream = torch.cuda.current_stream() + start = torch.cuda.Event(enable_timing=True) + end = torch.cuda.Event(enable_timing=True) + stream.record_event(start) + try: + yield + finally: + stream.record_event(end) + end.synchronize() + print(f"{trace_name} {name} time: {start.elapsed_time(end)} ms") + + +def verify(model, args, loss_fn=torch.sum, devices=None): + """ + Verify that a JIT compiled model has the same behavior as its uncompiled version along with its backwards pass. + + If your model returns multiple outputs, + you must also specify a `loss_fn` to produce a loss for which + the backwards will be computed. + + This function has side-effects (e.g., it executes your model / saves and loads + parameters), so don't expect the model to come out exactly the same as what + you passed in. + + Args: + model (compiled torch.nn.Module or function): the module/function to be + verified. The module/function definition MUST have been decorated with + `@torch.jit.compile`. + args (tuple or Tensor): the positional arguments to pass to the + compiled function/module to be verified. A non-tuple is assumed to + be a single positional argument to be passed to the model. + loss_fn (function, optional): the loss function to be applied to + the output of the model, before backwards is invoked. By default, + we assume that a model returns a single result, and we :func:`torch.sum` + before calling backwards; if this is inappropriate, you can pass your + own loss function. Note that if a model returns a tuple of results, + these are passed as separate positional arguments to `loss_fn`. + devices (iterable of device IDs, optional): the GPU devices which the + compiled module will be run on. This determines the RNG state we + must save when running both compiled and uncompiled versions of the model. + """ + # TODO: In principle, we track device information in our trace, so it + # should be possible to check if our execution actually obeyed the 'devices' + # the user provided. + + # TODO: Consider adding a utility function to torch.jit to test + # for this case + if not isinstance(model, torch._C.CompiledFunction): # type: ignore[attr-defined] + raise TypeError( + "Cannot verify an uncompiled module. Add @torch.jit.compile to compile it" + ) + is_module = isinstance(model, Module) + + if not isinstance(args, tuple): + args = (args,) + + saved_args = _clone_inputs(args) + if is_module: + saved_state = copy.deepcopy(model.state_dict()) + + def run_fwd_bwd(args, force_trace=False, assert_compiled=False): + params = list(model.parameters()) if is_module else [] + in_vars, _ = _flatten((args, params)) + # We use a special API to reset the trace and compile it from scratch. + compiled_fn = model + if force_trace: + compiled_fn.clear_cache() + if assert_compiled: + hits = compiled_fn.hits + out = model(*args) + if assert_compiled and compiled_fn.hits == hits: # type: ignore[possibly-undefined] + raise RuntimeError("failed to use the compiled function") + if not isinstance(out, tuple): + out = (out,) + if loss_fn == torch.sum and len(out) != 1: + raise ValueError( + f"Model returns {len(out)} outputs, but default loss function " + "(torch.sum) can only handle a single output" + ) + out_vars, _ = _flatten(out) + saved_outs = [ + v.detach().clone(memory_format=torch.preserve_format) for v in out_vars + ] + loss = loss_fn(*out) + grads = torch.autograd.grad([loss], in_vars) + # TODO: I'm not sure if the clone here is necessary but it is safer + saved_grads = [ + v.detach().clone(memory_format=torch.preserve_format) for v in grads + ] + return (saved_outs, saved_grads) + + with torch.random.fork_rng(devices, _caller="torch.jit.verify"): + uncompiled_outs, uncompiled_grads = run_fwd_bwd(args, force_trace=True) + assert model.has_trace_for(*args) + + if is_module: + model.load_state_dict(saved_state) # type: ignore[possibly-undefined] + compiled_outs, compiled_grads = run_fwd_bwd(args, assert_compiled=True) + + _verify_equal(uncompiled_outs, compiled_outs) + _verify_equal(uncompiled_grads, compiled_grads) + + +def _verify_equal(xs, ys): + for x, y in zip(xs, ys): + if x.sub(y).abs().max() > 1e-6: + raise RuntimeError("JIT and real computation mismatch") + + +def indent(s): + return "\n".join(["\t" + line for line in s.splitlines()]) + + +class TracingCheckError(Exception): + def __init__(self, graph_diff_error, tensor_compare_error, extra_msg=None): + self.message = "Tracing failed sanity checks!\n" + if extra_msg is not None: + self.message += extra_msg + "\n" + if graph_diff_error is not None: + self.message += "ERROR: Graphs differed across invocations!\n" + self.message += indent(graph_diff_error) + "\n" + if tensor_compare_error is not None: + self.message += ( + "ERROR: Tensor-valued Constant nodes differed in value " + "across invocations. This often indicates that the tracer has" + " encountered untraceable code.\n" + ) + self.message += indent(tensor_compare_error) + "\n" + super().__init__(self.message) + + +# Check the traced module against a set of user-provided validation inputs +@torch.no_grad() +def _check_trace( + check_inputs, + func, + traced_func, + check_tolerance, + strict, + force_outplace, + is_trace_module, + _module_class, + example_inputs_is_kwarg=False, +): + # Note: tracing is independent of optimizations, which consume the trace + for inputs in check_inputs: + if isinstance(inputs, torch.Tensor): + inputs = (inputs,) + + if is_trace_module: + copied_dict = {} + for name, data in inputs.items(): + copied_dict[name] = _clone_inputs(data) + check_mod = torch.jit.trace_module( + getattr(func, "__self__", func), + copied_dict, + check_trace=False, + strict=strict, + _force_outplace=force_outplace, + _module_class=_module_class, + _compilation_unit=torch._C.CompilationUnit(), + example_inputs_is_kwarg=example_inputs_is_kwarg, + _store_inputs=False, + ) + check_mod_func = check_mod._c._get_method(traced_func.name) + inputs = inputs[traced_func.name] + if ( + isinstance(inputs, (torch.Tensor)) + or isinstance(inputs, dict) + and not example_inputs_is_kwarg + ): + inputs = (inputs,) + else: + if example_inputs_is_kwarg: + check_mod = torch.jit.trace( + func, + check_trace=False, + strict=strict, + _force_outplace=force_outplace, + _module_class=_module_class, + example_kwarg_inputs=_clone_inputs(inputs), + _store_inputs=False, + ) + else: + check_mod = torch.jit.trace( + func, + _clone_inputs(inputs), + check_trace=False, + strict=strict, + _force_outplace=force_outplace, + _module_class=_module_class, + _store_inputs=False, + ) + check_mod_func = check_mod + + def graph_diagnostic_info(): + mod_canonicalized = torch._C._jit_pass_canonicalize(traced_func.graph) + torch._C._jit_pass_inline(mod_canonicalized) + torch._C._jit_pass_erase_shape_information(mod_canonicalized) + mod_str = str(mod_canonicalized) + mod_str = re.sub(r"___torch_mangle_[0-9]+\.", "", mod_str) + check_canonicalized = torch._C._jit_pass_canonicalize(check_mod_func.graph) + torch._C._jit_pass_inline(check_canonicalized) + torch._C._jit_pass_erase_shape_information(check_canonicalized) + check_str = str(check_canonicalized) + check_str = re.sub(r"___torch_mangle_[0-9]+\.", "", check_str) + + graph_diff_errors = None + if mod_str != check_str: + import difflib + + graph_diff = difflib.ndiff( + mod_str.splitlines(True), check_str.splitlines(True) + ) + graph_diff_errors = "Graph diff:\n" + indent("".join(graph_diff)) + "\n" + + for n_mod, n_check in zip( + mod_canonicalized.nodes(), check_canonicalized.nodes() + ): + if str(n_mod) != str(n_check): + graph_diff_errors += "First diverging operator:\n" + node_diff = difflib.ndiff( + str(n_mod).splitlines(True), str(n_check).splitlines(True) + ) + source_printout = ( + "Node diff:\n" + indent("".join(node_diff)) + "\n" + ) + mod_stack = n_mod.sourceRange() + if mod_stack: + source_printout += ( + "Trace source location:\n" + indent(mod_stack) + "\n" + ) + check_stack = n_check.sourceRange() + if check_stack: + source_printout += ( + "Check source location:\n" + indent(check_stack) + "\n" + ) + graph_diff_errors += source_printout + + break # For now, only print out the first pair of nodes that diverges + + tensor_compare_errors = None + # Check Tensor-valued constant nodes + for n_mod, n_check in zip( + mod_canonicalized.nodes(), check_canonicalized.nodes() + ): + if n_mod.kind() != n_check.kind(): + break # Graphs have already diverged + + if n_mod.kind() == "prim::Constant" and not ( + n_mod.mustBeNone() or n_check.mustBeNone() + ): + if not n_mod.hasAttribute("value"): + continue + if n_mod.kindOf("value") != "t" or n_check.kindOf("value") != "t": + continue + + mod_tensor_val = n_mod.t("value") + check_tensor_val = n_check.t("value") + + try: + torch.testing.assert_close( + mod_tensor_val, check_tensor_val, equal_nan=True + ) + except (RuntimeError, AssertionError) as e: + if tensor_compare_errors is None: + tensor_compare_errors = "" + tensor_compare_errors += "Node:\n" + indent(str(n_mod)) + "\n" + compare_stack = n_mod.sourceRange() + if compare_stack: + tensor_compare_errors += ( + "Source Location:\n" + indent(compare_stack) + "\n" + ) + tensor_compare_errors += "Comparison exception: " + indent( + str(e) + ) + + break # For now, only print the first diverging pair + + return graph_diff_errors, tensor_compare_errors + + def wrap_retval(x): + return x if isinstance(x, tuple) else (x,) + + def run_mod_and_filter_tensor_outputs(mod, inputs, running_what): + try: + if isinstance(inputs, dict) and example_inputs_is_kwarg: + outs = wrap_retval(mod(**inputs)) + else: + outs = wrap_retval(mod(*_clone_inputs(inputs))) + outs = [out for out in outs if isinstance(out, torch.Tensor)] + return outs + except Exception as e: + graph_diff_errors, tensor_compare_errors = graph_diagnostic_info() + msg = f"encountered an exception while running the {running_what} with test inputs.\nException:\n{indent(str(e))}" + raise TracingCheckError( + graph_diff_errors, + tensor_compare_errors, + extra_msg=msg, + ) from e + + has_warned = [False] + + def maybe_warn_nondeterministic(): + if has_warned[0]: + return + has_warned[0] = True + nondeterm_ops = [ + op for op in traced_func.graph.nodes() if op.isNondeterministic() + ] + if len(nondeterm_ops) > 0: + nondeterministic_ops_warning = "Trace had nondeterministic nodes. " + nondeterministic_ops_warning += ( + "Did you forget call .eval() on your model? Nodes:\n" + ) + nondeterministic_ops_warning += "\n".join( + [indent(str(op)) for op in nondeterm_ops][:20] + ) + nondeterministic_ops_warning += ( + "\nThis may cause errors in trace checking. To disable trace checking," + " pass check_trace=False to torch.jit.trace()" + ) + warnings.warn( + nondeterministic_ops_warning, category=TracerWarning, stacklevel=5 + ) + + def compare_outputs(original, reference, match_what): + all_ok = True + for i, (orig, ref) in enumerate(zip(original, reference)): + try: + if orig.is_quantized: + orig = orig.dequantize() + if ref.is_quantized: + ref = ref.dequantize() + if orig.is_mkldnn: + orig = orig.to_dense() + if ref.is_mkldnn: + ref = ref.to_dense() + if ref.is_complex() or orig.is_complex(): + torch.testing.assert_close( + orig.to(torch.cdouble), + ref.to(torch.cdouble), + rtol=check_tolerance, + atol=default_tolerances(orig, ref)[1], + equal_nan=True, + ) + else: + if orig.is_mps or ref.is_mps: + torch.testing.assert_close( + orig.float(), + ref.float(), + rtol=check_tolerance, + atol=default_tolerances(orig, ref)[1], + equal_nan=True, + ) + elif getattr(orig, "is_nested", None) or getattr( + ref, "is_nested", None + ): + assert getattr(orig, "is_nested", None) == getattr( + ref, "is_nested", None + ) + for t_orig, t_ref in zip(orig.unbind(), ref.unbind()): + torch.testing.assert_close( + t_orig.double(), + t_ref.double(), + rtol=check_tolerance, + atol=default_tolerances(t_orig, t_ref)[1], + equal_nan=True, + ) + else: + torch.testing.assert_close( + orig.double(), + ref.double(), + rtol=check_tolerance, + atol=default_tolerances(orig, ref)[1], + equal_nan=True, + ) + except AssertionError as e: + maybe_warn_nondeterministic() + warnings.warn( + "Output nr " + + str(i + 1) + + ". of the traced function does not match " + "the corresponding output of the " + + match_what + + ". Detailed error:\n" + + str(e), + category=TracerWarning, + stacklevel=4, + ) + all_ok = False + + return all_ok + + traced_outs = run_mod_and_filter_tensor_outputs(traced_func, inputs, "trace") + fn_outs = run_mod_and_filter_tensor_outputs(func, inputs, "Python function") + if compare_outputs(traced_outs, fn_outs, "Python function"): + check_outs = run_mod_and_filter_tensor_outputs( + check_mod_func, inputs, "repeated trace" + ) + compare_outputs(traced_outs, check_outs, "repeated trace") + + diag_info = graph_diagnostic_info() + if any(info is not None for info in diag_info): + raise TracingCheckError(*diag_info) + + +class TracerWarning(Warning): + @staticmethod + def ignore_lib_warnings(): + # We ignore warnings from all submodules excluding the JIT, because we need them e.g. for _check_trace + warnings.filterwarnings( + "ignore", category=TracerWarning, module="torch.(?!jit)" + ) + warnings.filterwarnings("ignore", "torch::jit::fuser::cuda") + + +# We ignore the tracer warnings coming form inside the library, because all our shape +# checks in nn will trigger them. +TracerWarning.ignore_lib_warnings() +torch._C._tracer_warn_use_python() + + +def make_tuple(example_inputs): + if isinstance(example_inputs, (torch.Tensor, dict)): + return (example_inputs,) + # done primarily so that weird iterables fail here and not pybind11 code + if not isinstance(example_inputs, tuple): + return tuple(example_inputs) + return example_inputs + + +def make_module(mod, _module_class, _compilation_unit): + if isinstance(mod, ScriptModule): + return mod + elif torch._jit_internal.module_has_exports(mod): + infer_methods_stubs_fn = torch.jit._recursive.make_stubs_from_exported_methods + return torch.jit._recursive.create_script_module( + mod, infer_methods_stubs_fn, share_types=False, is_tracing=True + ) + else: + if _module_class is None: + _module_class = TopLevelTracedModule + return _module_class(mod, _compilation_unit=_compilation_unit) + + +def wrap_check_inputs(check_inputs): + if check_inputs is None: + return None + + return [{"forward": c} for c in check_inputs] + + +def analyze_ts_result_with_export_result(export, trace): + import torch.utils._pytree as pytree + + flat_export = pytree.tree_leaves(export) + flat_trace = pytree.tree_leaves(trace) + + for orig, loaded in zip(flat_export, flat_trace): + if orig.layout != loaded.layout: + return False + # mkldnn is not supported for torch.allclose + if orig.layout == torch._mkldnn: # type: ignore[attr-defined] + return True + if type(orig) != type(loaded): + return False + + if isinstance(orig, torch._subclasses.FakeTensor): + # Skip for FakeTensor. + return True + elif isinstance(orig, torch.Tensor): + if orig.dtype != loaded.dtype: + return False + if not torch.allclose(orig, loaded): + return False + else: + if orig != loaded: + return False + return True + + +def _trace_impl( + func, + example_inputs=None, + optimize=None, + check_trace=True, + check_inputs=None, + check_tolerance=1e-5, + strict=True, + _force_outplace=False, + _module_class=None, + _compilation_unit=_python_cu, + example_kwarg_inputs=None, + _store_inputs=True, +): + if isinstance(func, torch.jit.ScriptModule): + # it is hard to trace it because the forward method on ScriptModule is already defined, so it + # would result in an error. + warnings.warn( + "The input to trace is already a ScriptModule, tracing it is a no-op. Returning the object as is." + ) + return func + + if isinstance(func, torch.nn.Module): + if example_inputs is None: + if isinstance(example_kwarg_inputs, dict): + example_inputs = example_kwarg_inputs + else: + raise RuntimeError("example_kwarg_inputs should be a dict") + return trace_module( + func, + {"forward": example_inputs}, + None, + check_trace, + wrap_check_inputs(check_inputs), + check_tolerance, + strict, + _force_outplace, + _module_class, + example_inputs_is_kwarg=isinstance(example_kwarg_inputs, dict), + _store_inputs=_store_inputs, + ) + if ( + hasattr(func, "__self__") + and isinstance(func.__self__, torch.nn.Module) + and func.__name__ == "forward" + ): + if example_inputs is None: + if isinstance(example_kwarg_inputs, dict): + example_inputs = example_kwarg_inputs + else: + raise RuntimeError("example_kwarg_inputs should be a dict") + return trace_module( + func.__self__, + {"forward": example_inputs}, + None, + check_trace, + wrap_check_inputs(check_inputs), + check_tolerance, + strict, + _force_outplace, + _module_class, + example_inputs_is_kwarg=isinstance(example_kwarg_inputs, dict), + _store_inputs=_store_inputs, + ) + + # Special case for common case of passing a single Tensor + if ( + isinstance(example_inputs, (torch.Tensor, dict)) + and example_kwarg_inputs is None + ): + example_inputs = (example_inputs,) + # done primarily so that weird iterables fail here and not pybind11 code + elif example_kwarg_inputs is None and not isinstance(example_inputs, tuple): + example_inputs = tuple(example_inputs) + + var_lookup_fn = _create_interpreter_name_lookup_fn(0) + + if hasattr(func, "__self__") and isinstance(func.__self__, torch.nn.Module): + raise AttributeError( + "trace doesn't support compiling individual module's functions.\n" + "Please use trace_module" + ) + + name = _qualified_name(func) + if isinstance(example_kwarg_inputs, dict): + example_inputs = example_kwarg_inputs + traced = torch._C._create_function_from_trace_with_dict( + name, + func, + example_kwarg_inputs, + var_lookup_fn, + strict, + _force_outplace, + get_callable_argument_names(func), + ) + else: + traced = torch._C._create_function_from_trace( + name, + func, + example_inputs, + var_lookup_fn, + strict, + _force_outplace, + get_callable_argument_names(func), + ) + + # Check the trace against new traces created from user-specified inputs + if check_trace: + if check_inputs is not None: + _check_trace( + check_inputs, + func, + traced, + check_tolerance, + strict, + _force_outplace, + False, + _module_class, + example_inputs_is_kwarg=isinstance(example_kwarg_inputs, dict), + ) + else: + _check_trace( + [example_inputs], + func, + traced, + check_tolerance, + strict, + _force_outplace, + False, + _module_class, + example_inputs_is_kwarg=isinstance(example_kwarg_inputs, dict), + ) + + # Allow torch.compile() to inline + traced._torchdynamo_inline = func # type: ignore[attr-defined] + return traced + + +class _ExportType(str, Enum): + DIRECT_EXPORT = "DIRECT_EXPORT" + TRACE_AND_EXPORT = "TRACE_AND_EXPORT" + SOURCE_TO_SOURCE = "SOURCE_TO_SOURCE" + + def __str__(self) -> str: + return self.value + + +class _ExportOutcome(str, Enum): + SUCCESS = "SUCCESS" + FAILED_TO_EXPORT = "FAILED_TO_EXPORT" + FAILED_TO_RUN = "FAILED_TO_RUN" + ACCURACY_ERROR = "ACCURACY_ERROR" + + def __str__(self) -> str: + return self.value + + +def trace( + func, + example_inputs=None, + optimize=None, + check_trace=True, + check_inputs=None, + check_tolerance=1e-5, + strict=True, + _force_outplace=False, + _module_class=None, + _compilation_unit=_python_cu, + example_kwarg_inputs=None, + _store_inputs=True, +): + r""" + Trace a function and return an executable or :class:`ScriptFunction` that will be optimized using just-in-time compilation. + + Tracing is ideal for code that operates only on + ``Tensor``\\s and lists, dictionaries, and + tuples of ``Tensor``\\s. + + Using `torch.jit.trace` and `torch.jit.trace_module`, you can turn an + existing module or Python function into a TorchScript + :class:`ScriptFunction` or :class:`ScriptModule`. You must provide example + inputs, and we run the function, recording the operations performed on all + the tensors. + + * The resulting recording of a standalone function produces `ScriptFunction`. + * The resulting recording of `nn.Module.forward` or `nn.Module` produces + `ScriptModule`. + + This module also contains any parameters that the original + module had as well. + + Warning: + Tracing only correctly records functions and modules which are not data + dependent (e.g., do not have conditionals on data in tensors) and do not have + any untracked external dependencies (e.g., perform input/output or + access global variables). Tracing only records operations done when the given + function is run on the given tensors. Therefore, the returned + `ScriptModule` will always run the same traced graph on any input. This + has some important implications when your module is expected to run + different sets of operations, depending on the input and/or the module + state. For example, + + * Tracing will not record any control-flow like if-statements or loops. + When this control-flow is constant across your module, this is fine + and it often inlines the control-flow decisions. But sometimes the + control-flow is actually part of the model itself. For instance, a + recurrent network is a loop over the (possibly dynamic) length of an + input sequence. + * In the returned :class:`ScriptModule`, operations that have different + behaviors in ``training`` and ``eval`` modes will always behave as if + it is in the mode it was in during tracing, no matter which mode the + `ScriptModule` is in. + + In cases like these, tracing would not be appropriate and + :func:`scripting ` is a better choice. If you trace + such models, you may silently get incorrect results on subsequent + invocations of the model. The tracer will try to emit warnings when + doing something that may cause an incorrect trace to be produced. + + Args: + func (callable or torch.nn.Module): A Python function or `torch.nn.Module` + that will be run with `example_inputs`. `func` arguments and return + values must be tensors or (possibly nested) tuples that contain + tensors. When a module is passed `torch.jit.trace`, only the + ``forward`` method is run and traced (see :func:`torch.jit.trace + ` for details). + + Keyword arguments: + example_inputs (tuple or torch.Tensor or None, optional): A tuple of example + inputs that will be passed to the function while tracing. + Default: ``None``. Either this argument or ``example_kwarg_inputs`` + should be specified. The resulting trace can be run with inputs of + different types and shapes assuming the traced operations support those + types and shapes. `example_inputs` may also be a single Tensor in which + case it is automatically wrapped in a tuple. When the value is None, + ``example_kwarg_inputs`` should be specified. + + check_trace (``bool``, optional): Check if the same inputs run through + traced code produce the same outputs. Default: ``True``. You might want + to disable this if, for example, your network contains non- + deterministic ops or if you are sure that the network is correct despite + a checker failure. + + check_inputs (list of tuples, optional): A list of tuples of input + arguments that should be used to check the trace against what is + expected. Each tuple is equivalent to a set of input arguments that + would be specified in ``example_inputs``. For best results, pass in + a set of checking inputs representative of the space of shapes and + types of inputs you expect the network to see. If not specified, + the original ``example_inputs`` are used for checking + check_tolerance (float, optional): Floating-point comparison tolerance + to use in the checker procedure. This can be used to relax the + checker strictness in the event that results diverge numerically + for a known reason, such as operator fusion. + strict (``bool``, optional): run the tracer in a strict mode or not + (default: ``True``). Only turn this off when you want the tracer to + record your mutable container types (currently ``list``/``dict``) + and you are sure that the container you are using in your + problem is a ``constant`` structure and does not get used as + control flow (if, for) conditions. + example_kwarg_inputs (dict, optional): This parameter is a pack of keyword + arguments of example inputs that will be passed to the function while + tracing. Default: ``None``. Either this argument or ``example_inputs`` + should be specified. The dict will be unpacking by the arguments name + of the traced function. If the keys of the dict don't not match with + the traced function's arguments name, a runtime exception will be raised. + + Returns: + If `func` is `nn.Module` or ``forward`` of `nn.Module`, `trace` returns + a :class:`ScriptModule` object with a single ``forward`` method + containing the traced code. The returned `ScriptModule` will + have the same set of sub-modules and parameters as the original + ``nn.Module``. If ``func`` is a standalone function, ``trace`` + returns `ScriptFunction`. + + Example (tracing a function): + + .. testcode:: + + import torch + + def foo(x, y): + return 2 * x + y + + # Run `foo` with the provided inputs and record the tensor operations + traced_foo = torch.jit.trace(foo, (torch.rand(3), torch.rand(3))) + + # `traced_foo` can now be run with the TorchScript interpreter or saved + # and loaded in a Python-free environment + + Example (tracing an existing module):: + + import torch + import torch.nn as nn + + class Net(nn.Module): + def __init__(self) -> None: + super().__init__() + self.conv = nn.Conv2d(1, 1, 3) + + def forward(self, x): + return self.conv(x) + + n = Net() + example_weight = torch.rand(1, 1, 3, 3) + example_forward_input = torch.rand(1, 1, 3, 3) + + # Trace a specific method and construct `ScriptModule` with + # a single `forward` method + module = torch.jit.trace(n.forward, example_forward_input) + + # Trace a module (implicitly traces `forward`) and construct a + # `ScriptModule` with a single `forward` method + module = torch.jit.trace(n, example_forward_input) + + """ + if not _enabled: + return func + if optimize is not None: + warnings.warn( + "`optimize` is deprecated and has no effect. " + "Use `with torch.jit.optimized_execution()` instead", + FutureWarning, + stacklevel=2, + ) + + from torch._utils_internal import ( + check_if_torch_exportable, + log_torch_jit_trace_exportability, + log_torchscript_usage, + ) + + traced_func = _trace_impl( + func, + example_inputs, + optimize, + check_trace, + check_inputs, + check_tolerance, + strict, + _force_outplace, + _module_class, + _compilation_unit, + example_kwarg_inputs, + _store_inputs, + ) + log_torchscript_usage("trace", model_id=_get_model_id(traced_func)) + + if check_if_torch_exportable(): + from torch._export.converter import TS2EPConverter + from torch.export._trace import ( + _convert_ts_to_export_experimental, + _process_jit_trace_inputs_for_export, + ) + + traced_func_for_export = _trace_impl( + func, + example_inputs=example_inputs, + optimize=optimize, + check_trace=False, + check_inputs=check_inputs, + check_tolerance=check_tolerance, + strict=strict, + _force_outplace=_force_outplace, + _module_class=_module_class, + _compilation_unit=_compilation_unit, + example_kwarg_inputs=example_kwarg_inputs, + _store_inputs=_store_inputs, + ) + + export_args, _ = _process_jit_trace_inputs_for_export( + example_inputs, example_kwarg_inputs + ) + + def _log_exportability(func_to_export, export_func, export_args, export_type): + try: + traced_result = func_to_export(*export_args) + except Exception as e: + _ = e + log_torch_jit_trace_exportability( + "trace", str(export_type), str(_ExportOutcome.SUCCESS), "succeeded" + ) + return + + try: + ep_module = export_func(func_to_export, export_args) + except Exception as e: + log_torch_jit_trace_exportability( + "trace", + str(export_type), + str(_ExportOutcome.FAILED_TO_EXPORT), + str(e), + ) + return + + try: + export = ep_module(*export_args) + except Exception as e: + log_torch_jit_trace_exportability( + "trace", str(export_type), str(_ExportOutcome.FAILED_TO_RUN), str(e) + ) + return + + if not analyze_ts_result_with_export_result(export, traced_result): + log_torch_jit_trace_exportability( + "trace", + str(export_type), + str(_ExportOutcome.ACCURACY_ERROR), + "accuracy error", + ) + return + + log_torch_jit_trace_exportability( + "trace", str(export_type), str(_ExportOutcome.SUCCESS), "succeeded" + ) + + def _direct_export_and_lower(func, export_args): + return torch.export.export(func, export_args, strict=False).module() + + def _convert_ts_to_export_source_to_source(func, export_args): + return TS2EPConverter(func, export_args).convert().module() + + # torch.jit.trace is noop when the original module is torch.jit.ScriptModule + if not isinstance(traced_func_for_export, torch.jit.ScriptModule): + _log_exportability( + traced_func_for_export, + _direct_export_and_lower, + export_args, + _ExportType.DIRECT_EXPORT, + ) + + _log_exportability( + traced_func_for_export, + _convert_ts_to_export_experimental, + export_args, + _ExportType.TRACE_AND_EXPORT, + ) + _log_exportability( + traced_func_for_export, + _convert_ts_to_export_source_to_source, + export_args, + _ExportType.SOURCE_TO_SOURCE, + ) + + return traced_func + + +_trace_module_map: Optional[Dict[Any, Any]] = None + + +def trace_module( + mod, + inputs, + optimize=None, + check_trace=True, + check_inputs=None, + check_tolerance=1e-5, + strict=True, + _force_outplace=False, + _module_class=None, + _compilation_unit=_python_cu, + example_inputs_is_kwarg=False, + _store_inputs=True, +): + """ + Trace a module and return an executable :class:`ScriptModule` that will be optimized using just-in-time compilation. + + When a module is passed to :func:`torch.jit.trace `, only + the ``forward`` method is run and traced. With ``trace_module``, you can specify a dictionary of + method names to example inputs to trace (see the ``inputs``) argument below. + + See :func:`torch.jit.trace ` for more information on tracing. + + Args: + mod (torch.nn.Module): A ``torch.nn.Module`` containing methods whose names are + specified in ``inputs``. The given methods will be compiled + as a part of a single `ScriptModule`. + inputs (dict): A dict containing sample inputs indexed by method names in ``mod``. + The inputs will be passed to methods whose names correspond to inputs' + keys while tracing. + ``{ 'forward' : example_forward_input, 'method2': example_method2_input}`` + Keyword arguments: + check_trace (``bool``, optional): Check if the same inputs run through + traced code produce the same outputs. Default: ``True``. You might want + to disable this if, for example, your network contains non- + deterministic ops or if you are sure that the network is correct despite + a checker failure. + + check_inputs (list of dicts, optional): A list of dicts of input arguments that should be used + to check the trace against what is expected. Each tuple + is equivalent to a set of input arguments that would + be specified in ``inputs``. For best results, pass in a + set of checking inputs representative of the space of + shapes and types of inputs you expect the network to see. + If not specified, the original ``inputs`` are used for checking + check_tolerance (float, optional): Floating-point comparison tolerance to use in the checker procedure. + This can be used to relax the checker strictness in the event that + results diverge numerically for a known reason, such as operator fusion. + example_inputs_is_kwarg (``bool``, optional): This parameter indicate whether the example inputs is a pack + pack of keyword arguments. Default: ``False``. + + Returns: + A :class:`ScriptModule` object with a single ``forward`` method containing the traced code. + When ``func`` is a ``torch.nn.Module``, the returned :class:`ScriptModule` will have the same set of + sub-modules and parameters as ``func``. + + Example (tracing a module with multiple methods):: + + import torch + import torch.nn as nn + + class Net(nn.Module): + def __init__(self) -> None: + super().__init__() + self.conv = nn.Conv2d(1, 1, 3) + + def forward(self, x): + return self.conv(x) + + def weighted_kernel_sum(self, weight): + return weight * self.conv.weight + + + n = Net() + example_weight = torch.rand(1, 1, 3, 3) + example_forward_input = torch.rand(1, 1, 3, 3) + + # Trace a specific method and construct `ScriptModule` with + # a single `forward` method + module = torch.jit.trace(n.forward, example_forward_input) + + # Trace a module (implicitly traces `forward`) and construct a + # `ScriptModule` with a single `forward` method + module = torch.jit.trace(n, example_forward_input) + + # Trace specific methods on a module (specified in `inputs`), constructs + # a `ScriptModule` with `forward` and `weighted_kernel_sum` methods + inputs = {'forward' : example_forward_input, 'weighted_kernel_sum' : example_weight} + module = torch.jit.trace_module(n, inputs) + + """ + if not _enabled: + return mod + if optimize is not None: + warnings.warn( + "`optimize` is deprecated and has no effect. " + "Use `with torch.jit.optimized_execution()` instead", + FutureWarning, + stacklevel=2, + ) + + var_lookup_fn = _create_interpreter_name_lookup_fn(0) + + if not isinstance(mod, torch.nn.Module): + raise AttributeError("expected torch.nn.Module as the first argument") + + if not isinstance(inputs, dict): + raise AttributeError("expected a dictionary of (method_name, input) pairs") + + old_module_map = torch.jit._trace._trace_module_map + try: + trace_module_map: Dict[Any, Any] = {} + + def register_submods(mod, prefix): + for name, child in mod.named_children(): + submod_qualname = prefix + "." + name + trace_module_map[child] = submod_qualname + register_submods(child, submod_qualname) + + trace_module_map["__module"] = mod + torch.jit._trace._trace_module_map = trace_module_map + register_submods(mod, "__module") + + module = make_module(mod, _module_class, _compilation_unit) + + for method_name, example_inputs in inputs.items(): + if method_name == "forward": + # "forward" is a special case because we need to trace + # `Module.__call__`, which sets up some extra tracing, but uses + # argument names of the real `Module.forward` method. + func = mod + forward_method = getattr(mod, method_name) + argument_names = get_callable_argument_names(forward_method) + else: + func = getattr(mod, method_name) + argument_names = get_callable_argument_names(func) + + if isinstance(example_inputs, dict) and example_inputs_is_kwarg: + # Raise exception when the user provided key names are not aligned with forward() method's arguments' name/ + for key in example_inputs: + if key not in argument_names: + valid_arguments = "[" + ",".join(argument_names) + "]" + raise NameError( + f"""'{key}' is not in forward() method's arguments, + valid arguments name are {valid_arguments}""" + ) + module._c._create_method_from_trace_with_dict( + method_name, + func, + example_inputs, + var_lookup_fn, + strict, + _force_outplace, + argument_names, + _store_inputs, + ) + else: + example_inputs = make_tuple(example_inputs) + module._c._create_method_from_trace( + method_name, + func, + example_inputs, + var_lookup_fn, + strict, + _force_outplace, + argument_names, + _store_inputs, + ) + + check_trace_method = module._c._get_method(method_name) + + # Check the trace against new traces created from user-specified inputs + if check_trace: + if check_inputs is not None: + _check_trace( + check_inputs, + func, + check_trace_method, + check_tolerance, + strict, + _force_outplace, + True, + _module_class, + example_inputs_is_kwarg=example_inputs_is_kwarg, + ) + else: + _check_trace( + [inputs], + func, + check_trace_method, + check_tolerance, + strict, + _force_outplace, + True, + _module_class, + example_inputs_is_kwarg=example_inputs_is_kwarg, + ) + finally: + torch.jit._trace._trace_module_map = old_module_map + + return module + + +def is_tracing(): + """Return a boolean value. + + Returns ``True`` in tracing (if a function is called during the + tracing of code with ``torch.jit.trace``) and ``False`` otherwise. + """ + if is_scripting(): + return False + return torch._C._is_tracing() + + +class TracedModule(ScriptModule): + _disable_script_meta = True + + def __init__(self, orig, id_set=None, _compilation_unit=None): + # XXX: orig can be a nn.Module or a function! + super().__init__() + assert isinstance(orig, torch.nn.Module) + + # Copy a subset of `orig` to a temporary nn.Module. + # This is a way to customize what will actually get compiled by create_script_module + id_set = set() + + # This allows us to preserve the original module's qualified name by defining a new + # type with the attribute _jit_override_qualname. In torch._jit_internal._qualified_name + # we have a special case that will look up this attribute to override whatever qualname + # we would get from the python type system + class QualnameWrapper(torch.nn.Module): + pass + + QualnameWrapper._jit_override_qualname = torch._jit_internal._qualified_name( # type: ignore[attr-defined] + type(orig) + ) + + tmp_module = QualnameWrapper() + + def check_unique(param): + if param in id_set: + raise ValueError( + "TracedModules don't support parameter sharing between modules" + ) + id_set.add(param) + + tmp_module.training = orig.training + + for name, param in orig._parameters.items(): + if param is not None: + tmp_module._parameters[name] = param + check_unique(param) + for name, buf in orig._buffers.items(): + if buf is not None: + tmp_module._buffers[name] = buf + check_unique(buf) + for name, val in orig.__dict__.items(): + if ( + torch._C._jit_is_script_object(val) + and name not in orig._parameters + and name not in orig._buffers + ): + setattr(tmp_module, name, val) + + if orig._backward_hooks: + raise ValueError( + "Modules that have backward hooks assigned can't be compiled: " + + str(orig) + ) + + for name, submodule in orig._modules.items(): + if submodule is None: + continue + tmp_module._modules[name] = make_module( + submodule, TracedModule, _compilation_unit=None + ) + + script_module = torch.jit._recursive.create_script_module( + tmp_module, lambda module: (), share_types=False, is_tracing=True + ) + + self.__dict__["_name"] = type(orig).__name__ + self.__dict__["_actual_script_module"] = script_module + for name in ("_parameters", "_buffers", "_modules", "training"): + delattr(self, name) + + def forward(self, *args, **kwargs): + raise RuntimeError("Trace submodules cannot be called.") + + def __getattr__(self, attr): + if "_actual_script_module" not in self.__dict__: + return super().__getattr__(attr) + return getattr(self._actual_script_module, attr) + + def __setattr__(self, attr, value): + if "_actual_script_module" not in self.__dict__: + return super().__setattr__(attr, value) + setattr(self._actual_script_module, attr, value) + + def _get_name(self): + return self._name + + def extra_repr(self): + return f"original_name={self._name}" + + +class TopLevelTracedModule(TracedModule): + forward: Callable[..., Any] = _CachedForward() # type: ignore[assignment] + + def _reconstruct(self, cpp_module): + """ + Re-construct an instance of TopLevelTracedModule using an instance of a C++ module. + + Args: + cpp_module: The C++ module that this TopLevelTracedModule will be rebuilt around. + """ + self.__dict__["_actual_script_module"]._reconstruct(cpp_module) + + +def _script_if_tracing(fn: Callable[P, R]) -> Callable[P, R]: + @functools.wraps(fn) + def wrapper(*args: P.args, **kwargs: P.kwargs) -> R: + if not is_tracing(): + # Not tracing, don't do anything + return fn(*args, **kwargs) + + compiled_fn: Callable[P, R] = script(wrapper.__original_fn) # type: ignore[attr-defined] + return compiled_fn(*args, **kwargs) + + wrapper.__original_fn = fn # type: ignore[attr-defined] + wrapper.__script_if_tracing_wrapper = True # type: ignore[attr-defined] + + return wrapper + + +def _get_trace_graph( + f, + args=(), + kwargs=None, + strict=True, + _force_outplace=False, + return_inputs=False, + _return_inputs_states=False, +): + """Return a tuple on tracing a function or model. + + .. warning:: + This function is internal-only and should only be used by the ONNX + exporter. If you are trying to get a graph through tracing, please go + through the public API instead:: + + trace = torch.jit.trace(nn.LSTMCell(), (input, hidden)) + trace_graph = trace.graph + + Trace a function or model, returning a tuple consisting of the both the + *trace* of an execution, as well as the original return value. If return_inputs, + also returns the trace inputs as part of the tuple + + Tracing is guaranteed not to change the semantics of the function/module + that is traced. + + Args: + f (torch.nn.Module or function): the function or module + to be traced. + args (tuple or Tensor): the positional arguments to pass to the + function/module to be traced. A non-tuple is assumed to + be a single positional argument to be passed to the model. + kwargs (dict): the keyword arguments to pass to the function/module + to be traced. + + Example (trace a cell): + + .. testcode:: + + trace = torch.jit.trace(nn.LSTMCell(), (input, hidden)) + """ + if kwargs is None: + kwargs = {} + if not isinstance(args, tuple): + args = (args,) + outs = ONNXTracedModule( + f, strict, _force_outplace, return_inputs, _return_inputs_states + )(*args, **kwargs) + return outs diff --git a/janus/lib/python3.10/site-packages/torch/jit/annotations.py b/janus/lib/python3.10/site-packages/torch/jit/annotations.py new file mode 100644 index 0000000000000000000000000000000000000000..0337342edaf97be421b037dafdde199a3d29af42 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/jit/annotations.py @@ -0,0 +1,551 @@ +# mypy: allow-untyped-defs +import ast +import builtins +import dis +import enum +import inspect +import re +import typing +import warnings +from textwrap import dedent +from typing import Type + +import torch +from torch._C import ( + _GeneratorType, + AnyType, + AwaitType, + BoolType, + ComplexType, + DeviceObjType, + DictType, + EnumType, + FloatType, + FutureType, + InterfaceType, + IntType, + ListType, + NoneType, + NumberType, + OptionalType, + StreamObjType, + StringType, + TensorType, + TupleType, + UnionType, +) +from torch._jit_internal import ( # type: ignore[attr-defined] + _Await, + _qualified_name, + Any, + BroadcastingList1, + BroadcastingList2, + BroadcastingList3, + Dict, + Future, + is_await, + is_dict, + is_future, + is_ignored_fn, + is_list, + is_optional, + is_tuple, + is_union, + List, + Optional, + Tuple, + Union, +) +from torch._sources import get_source_lines_and_file + +from ._state import _get_script_class + + +if torch.distributed.rpc.is_available(): + from torch._C import RRefType + from torch._jit_internal import is_rref, RRef + +from torch._ops import OpOverloadPacket + + +class Module: + def __init__(self, name, members): + self.name = name + self.members = members + + def __getattr__(self, name): + try: + return self.members[name] + except KeyError: + raise RuntimeError( + f"Module {self.name} has no member called {name}" + ) from None + + +class EvalEnv: + env = { + "torch": Module("torch", {"Tensor": torch.Tensor}), + "Tensor": torch.Tensor, + "typing": Module("typing", {"Tuple": Tuple}), + "Tuple": Tuple, + "List": List, + "Dict": Dict, + "Optional": Optional, + "Union": Union, + "Future": Future, + "Await": _Await, + } + + def __init__(self, rcb): + self.rcb = rcb + if torch.distributed.rpc.is_available(): + self.env["RRef"] = RRef + + def __getitem__(self, name): + if name in self.env: + return self.env[name] + if self.rcb is not None: + return self.rcb(name) + return getattr(builtins, name, None) + + +def get_signature(fn, rcb, loc, is_method): + if isinstance(fn, OpOverloadPacket): + signature = try_real_annotations(fn.op, loc) + else: + signature = try_real_annotations(fn, loc) + if signature is not None and is_method: + # If this is a method, then the signature will include a type for + # `self`, but type comments do not contain a `self`. So strip it + # away here so everything is consistent (`inspect.ismethod` does + # not work here since `fn` is unbound at this point) + param_types, return_type = signature + param_types = param_types[1:] + signature = (param_types, return_type) + + if signature is None: + type_line, source = None, None + try: + source = dedent("".join(get_source_lines_and_file(fn)[0])) + type_line = get_type_line(source) + except TypeError: + pass + # This might happen both because we failed to get the source of fn, or + # because it didn't have any annotations. + if type_line is not None: + signature = parse_type_line(type_line, rcb, loc) + + return signature + + +def is_function_or_method(the_callable): + # A stricter version of `inspect.isroutine` that does not pass for built-in + # functions + return inspect.isfunction(the_callable) or inspect.ismethod(the_callable) + + +def is_vararg(the_callable): + if not is_function_or_method(the_callable) and callable(the_callable): # noqa: B004 + # If `the_callable` is a class, de-sugar the call so we can still get + # the signature + the_callable = the_callable.__call__ + + if is_function_or_method(the_callable): + return inspect.getfullargspec(the_callable).varargs is not None + else: + return False + + +def get_param_names(fn, n_args): + if isinstance(fn, OpOverloadPacket): + fn = fn.op + + if ( + not is_function_or_method(fn) + and callable(fn) + and is_function_or_method(fn.__call__) + ): # noqa: B004 + # De-sugar calls to classes + fn = fn.__call__ + + if is_function_or_method(fn): + if is_ignored_fn(fn): + fn = inspect.unwrap(fn) + return inspect.getfullargspec(fn).args + else: + # The `fn` was not a method or function (maybe a class with a __call__ + # method, so use a default param name list) + return [str(i) for i in range(n_args)] + + +def check_fn(fn, loc): + # Make sure the function definition is not a class instantiation + try: + source = dedent("".join(get_source_lines_and_file(fn)[0])) + except (OSError, TypeError): + return + if source is None: + return + + py_ast = ast.parse(source) + if len(py_ast.body) == 1 and isinstance(py_ast.body[0], ast.ClassDef): + raise torch.jit.frontend.FrontendError( + loc, + f"Cannot instantiate class '{py_ast.body[0].name}' in a script function", + ) + if len(py_ast.body) != 1 or not isinstance(py_ast.body[0], ast.FunctionDef): + raise torch.jit.frontend.FrontendError( + loc, "Expected a single top-level function" + ) + + +def _eval_no_call(stmt, glob, loc): + """Evaluate statement as long as it does not contain any method/function calls.""" + bytecode = compile(stmt, "", mode="eval") + for insn in dis.get_instructions(bytecode): + if "CALL" in insn.opname: + raise RuntimeError( + f"Type annotation should not contain calls, but '{stmt}' does" + ) + return eval(bytecode, glob, loc) # type: ignore[arg-type] # noqa: P204 + + +def parse_type_line(type_line, rcb, loc): + """Parse a type annotation specified as a comment. + + Example inputs: + # type: (Tensor, torch.Tensor) -> Tuple[Tensor] + # type: (Tensor, Tuple[Tensor, Tensor]) -> Tensor + """ + arg_ann_str, ret_ann_str = split_type_line(type_line) + + try: + arg_ann = _eval_no_call(arg_ann_str, {}, EvalEnv(rcb)) + except (NameError, SyntaxError) as e: + raise RuntimeError( + "Failed to parse the argument list of a type annotation" + ) from e + + if not isinstance(arg_ann, tuple): + arg_ann = (arg_ann,) + + try: + ret_ann = _eval_no_call(ret_ann_str, {}, EvalEnv(rcb)) + except (NameError, SyntaxError) as e: + raise RuntimeError( + "Failed to parse the return type of a type annotation" + ) from e + + arg_types = [ann_to_type(ann, loc) for ann in arg_ann] + return arg_types, ann_to_type(ret_ann, loc) + + +def get_type_line(source): + """Try to find the line containing a comment with the type annotation.""" + type_comment = "# type:" + + lines = source.split("\n") + lines = list(enumerate(lines)) + type_lines = list(filter(lambda line: type_comment in line[1], lines)) + # `type: ignore` comments may be needed in JIT'ed functions for mypy, due + # to the hack in torch/_VF.py. + + # An ignore type comment can be of following format: + # 1) type: ignore + # 2) type: ignore[rule-code] + # This ignore statement must be at the end of the line + + # adding an extra backslash before the space, to avoid triggering + # one of the checks in .github/workflows/lint.yml + type_pattern = re.compile("# type:\\ ignore(\\[[a-zA-Z-]+\\])?$") + type_lines = list(filter(lambda line: not type_pattern.search(line[1]), type_lines)) + + if len(type_lines) == 0: + # Catch common typo patterns like extra spaces, typo in 'ignore', etc. + wrong_type_pattern = re.compile("#[\t ]*type[\t ]*(?!: ignore(\\[.*\\])?$):") + wrong_type_lines = list( + filter(lambda line: wrong_type_pattern.search(line[1]), lines) + ) + if len(wrong_type_lines) > 0: + raise RuntimeError( + "The annotation prefix in line " + + str(wrong_type_lines[0][0]) + + " is probably invalid.\nIt must be '# type:'" + + "\nSee PEP 484 (https://www.python.org/dev/peps/pep-0484/#suggested-syntax-for-python-2-7-and-straddling-code)" # noqa: B950 + + "\nfor examples" + ) + return None + elif len(type_lines) == 1: + # Only 1 type line, quit now + return type_lines[0][1].strip() + + # Parse split up argument types according to PEP 484 + # https://www.python.org/dev/peps/pep-0484/#suggested-syntax-for-python-2-7-and-straddling-code + return_line = None + parameter_type_lines = [] + for line_num, line in type_lines: + if "# type: (...) -> " in line: + return_line = (line_num, line) + break + elif type_comment in line: + parameter_type_lines.append(line) + if return_line is None: + raise RuntimeError( + "Return type line '# type: (...) -> ...' not found on multiline " + "type annotation\nfor type lines:\n" + + "\n".join([line[1] for line in type_lines]) + + "\n(See PEP 484 https://www.python.org/dev/peps/pep-0484/#suggested-syntax-for-python-2-7-and-straddling-code)" + ) + + def get_parameter_type(line): + item_type = line[line.find(type_comment) + len(type_comment) :] + return item_type.strip() + + types = map(get_parameter_type, parameter_type_lines) + parameter_types = ", ".join(types) + + return return_line[1].replace("...", parameter_types) + + +def split_type_line(type_line): + """Split the comment with the type annotation into parts for argument and return types. + + For example, for an input of: + # type: (Tensor, torch.Tensor) -> Tuple[Tensor, Tensor] + + This function will return: + ("(Tensor, torch.Tensor)", "Tuple[Tensor, Tensor]") + + """ + start_offset = len("# type:") + try: + arrow_pos = type_line.index("->") + except ValueError: + raise RuntimeError( + "Syntax error in type annotation (couldn't find `->`)" + ) from None + return type_line[start_offset:arrow_pos].strip(), type_line[arrow_pos + 2 :].strip() + + +def try_real_annotations(fn, loc): + """Try to use the Py3.5+ annotation syntax to get the type.""" + try: + # Note: anything annotated as `Optional[T]` will automatically + # be returned as `Union[T, None]` per + # https://github.com/python/typing/blob/master/src/typing.py#L850 + sig = inspect.signature(fn) + except ValueError: + return None + + all_annots = [sig.return_annotation] + [ + p.annotation for p in sig.parameters.values() + ] + if all(ann is sig.empty for ann in all_annots): + return None + + arg_types = [ann_to_type(p.annotation, loc) for p in sig.parameters.values()] + return_type = ann_to_type(sig.return_annotation, loc) + return arg_types, return_type + + +# Finds common type for enum values belonging to an Enum class. If not all +# values have the same type, AnyType is returned. +def get_enum_value_type(e: Type[enum.Enum], loc): + enum_values: List[enum.Enum] = list(e) + if not enum_values: + raise ValueError(f"No enum values defined for: '{e.__class__}'") + + types = {type(v.value) for v in enum_values} + ir_types = [try_ann_to_type(t, loc) for t in types] + + # If Enum values are of different types, an exception will be raised here. + # Even though Python supports this case, we chose to not implement it to + # avoid overcomplicate logic here for a rare use case. Please report a + # feature request if you find it necessary. + res = torch._C.unify_type_list(ir_types) + if not res: + return AnyType.get() + return res + + +def is_tensor(ann): + if issubclass(ann, torch.Tensor): + return True + + if issubclass( + ann, + ( + torch.LongTensor, + torch.DoubleTensor, + torch.FloatTensor, + torch.IntTensor, + torch.ShortTensor, + torch.HalfTensor, + torch.CharTensor, + torch.ByteTensor, + torch.BoolTensor, + ), + ): + warnings.warn( + "TorchScript will treat type annotations of Tensor " + "dtype-specific subtypes as if they are normal Tensors. " + "dtype constraints are not enforced in compilation either." + ) + return True + + return False + + +def _fake_rcb(inp): + return None + + +def try_ann_to_type(ann, loc, rcb=None): + ann_args = typing.get_args(ann) # always returns a tuple! + + if ann is inspect.Signature.empty: + return TensorType.getInferred() + if ann is None: + return NoneType.get() + if inspect.isclass(ann) and is_tensor(ann): + return TensorType.get() + if is_tuple(ann): + # Special case for the empty Tuple type annotation `Tuple[()]` + if len(ann_args) == 1 and ann_args[0] == (): + return TupleType([]) + return TupleType([try_ann_to_type(a, loc) for a in ann_args]) + if is_list(ann): + elem_type = try_ann_to_type(ann_args[0], loc) + if elem_type: + return ListType(elem_type) + if is_dict(ann): + key = try_ann_to_type(ann_args[0], loc) + value = try_ann_to_type(ann_args[1], loc) + # Raise error if key or value is None + if key is None: + raise ValueError( + f"Unknown type annotation: '{ann_args[0]}' at {loc.highlight()}" + ) + if value is None: + raise ValueError( + f"Unknown type annotation: '{ann_args[1]}' at {loc.highlight()}" + ) + return DictType(key, value) + if is_optional(ann): + if issubclass(ann_args[1], type(None)): + contained = ann_args[0] + else: + contained = ann_args[1] + valid_type = try_ann_to_type(contained, loc) + msg = "Unsupported annotation {} could not be resolved because {} could not be resolved. At\n{}" + assert valid_type, msg.format(repr(ann), repr(contained), repr(loc)) + return OptionalType(valid_type) + if is_union(ann): + # TODO: this is hack to recognize NumberType + if set(ann_args) == {int, float, complex}: + return NumberType.get() + inner: List = [] + # We need these extra checks because both `None` and invalid + # values will return `None` + # TODO: Determine if the other cases need to be fixed as well + for a in typing.get_args(ann): + if a is None: + inner.append(NoneType.get()) + maybe_type = try_ann_to_type(a, loc) + msg = "Unsupported annotation {} could not be resolved because {} could not be resolved. At\n{}" + assert maybe_type, msg.format(repr(ann), repr(maybe_type), repr(loc)) + inner.append(maybe_type) + return UnionType(inner) # type: ignore[arg-type] + if torch.distributed.rpc.is_available() and is_rref(ann): + return RRefType(try_ann_to_type(ann_args[0], loc)) + if is_future(ann): + return FutureType(try_ann_to_type(ann_args[0], loc)) + if is_await(ann): + elementType = try_ann_to_type(ann_args[0], loc) if ann_args else AnyType.get() + return AwaitType(elementType) + if ann is float: + return FloatType.get() + if ann is complex: + return ComplexType.get() + if ann is int or ann is torch.SymInt: + return IntType.get() + if ann is str: + return StringType.get() + if ann is bool: + return BoolType.get() + if ann is Any: + return AnyType.get() + if ann is type(None): + return NoneType.get() + if inspect.isclass(ann) and hasattr(ann, "__torch_script_interface__"): + return InterfaceType(ann.__torch_script_interface__) + if ann is torch.device: + return DeviceObjType.get() + if ann is torch.Generator: + return _GeneratorType.get() + if ann is torch.Stream: + return StreamObjType.get() + if ann is torch.dtype: + return IntType.get() # dtype not yet bound in as its own type + if inspect.isclass(ann) and issubclass(ann, enum.Enum): + if _get_script_class(ann) is None: + scripted_class = torch.jit._script._recursive_compile_class(ann, loc) + name = scripted_class.qualified_name() + else: + name = _qualified_name(ann) + return EnumType(name, get_enum_value_type(ann, loc), list(ann)) + if inspect.isclass(ann): + maybe_script_class = _get_script_class(ann) + if maybe_script_class is not None: + return maybe_script_class + if torch._jit_internal.can_compile_class(ann): + return torch.jit._script._recursive_compile_class(ann, loc) + + # Maybe resolve a NamedTuple to a Tuple Type + if rcb is None: + rcb = _fake_rcb + return torch._C._resolve_type_from_object(ann, loc, rcb) + + +def ann_to_type(ann, loc, rcb=None): + the_type = try_ann_to_type(ann, loc, rcb) + if the_type is not None: + return the_type + raise ValueError(f"Unknown type annotation: '{ann}' at {loc.highlight()}") + + +__all__ = [ + "Any", + "List", + "BroadcastingList1", + "BroadcastingList2", + "BroadcastingList3", + "Tuple", + "is_tuple", + "is_list", + "Dict", + "is_dict", + "is_optional", + "is_union", + "TensorType", + "TupleType", + "FloatType", + "ComplexType", + "IntType", + "ListType", + "StringType", + "DictType", + "AnyType", + "Module", + # TODO: Consider not exporting these during wildcard import (reserve + # that for the types; for idiomatic typing code.) + "get_signature", + "check_fn", + "get_param_names", + "parse_type_line", + "get_type_line", + "split_type_line", + "try_real_annotations", + "try_ann_to_type", + "ann_to_type", +] diff --git a/janus/lib/python3.10/site-packages/torch/jit/frontend.py b/janus/lib/python3.10/site-packages/torch/jit/frontend.py new file mode 100644 index 0000000000000000000000000000000000000000..7eb2d768c5259b5328b1fe121d0e49a3b61b67d3 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/jit/frontend.py @@ -0,0 +1,1286 @@ +# mypy: allow-untyped-defs +import ast +import dataclasses +import inspect +import re +import string +import sys +from collections import namedtuple +from textwrap import dedent +from typing import List, Tuple # noqa: F401 + +import torch +import torch.jit.annotations +from torch import _jit_internal +from torch._C._jit_tree_views import ( + Apply, + Assert, + Assign, + Attribute, + AugAssign, + BinOp, + Break, + ClassDef, + Const, + Continue, + Decl, + Def, + Delete, + DictComp, + DictLiteral, + Dots, + EmptyTypeAnnotation, + ExprStmt, + FalseLiteral, + For, + Ident, + If, + ListComp, + ListLiteral, + NoneLiteral, + Param, + Pass, + Property, + Raise, + Return, + Select, + SliceExpr, + Starred, + Stmt, + StringLiteral, + Subscript, + TernaryIf, + TrueLiteral, + TupleLiteral, + UnaryOp, + Var, + While, + With, + WithItem, +) +from torch._jit_internal import ( # noqa: F401 + _is_drop_fn, + FunctionModifiers, + is_static_fn, + should_drop, +) +from torch._sources import ( + get_source_lines_and_file, + make_source_context, + parse_def, + ParsedDef as _ParsedDef, +) +from torch.jit._dataclass_impls import DATACLASS_MAGIC_METHODS +from torch.jit._monkeytype_config import get_qualified_name, monkeytype_trace + + +_IS_ASTUNPARSE_INSTALLED = False +try: + import astunparse # type: ignore[import] + + _IS_ASTUNPARSE_INSTALLED = True +except ImportError: + pass + +# Borrowed from cPython implementation +# https://github.com/python/cpython/blob/561612d8456cfab5672c9b445521113b847bd6b3/Lib/textwrap.py#L411# + +_reserved_prefix = "__jit" +_reserved_names = {"print"} +_identifier_chars = set(string.ascii_lowercase + string.ascii_uppercase + string.digits) + + +def is_reserved_name(name): + return name.startswith(_reserved_prefix) or name in _reserved_names + + +pretty_node_names = { + ast.FunctionDef: "function definitions", + ast.For: "for loops", + ast.Delete: "del statements", + ast.ClassDef: "class definitions", + ast.With: "with statements", + ast.Raise: "raise statements", + ast.Assert: "assertions", + ast.Import: "import statements", + ast.ImportFrom: "import statements", + ast.Global: "global variables", + ast.Break: "break statements", + ast.Continue: "continue statements", +} + +node_start_tokens = { + ast.FunctionDef: "def", + ast.For: "for", + ast.Delete: "del", + ast.ClassDef: "class", + ast.With: "with", + ast.Raise: "raise", + ast.Assert: "assert", + ast.Import: "import", + ast.ImportFrom: "from", + ast.Global: "global", + ast.Break: "break", + ast.Continue: "continue", +} + +pretty_node_names.update( + { + ast.AsyncFunctionDef: "async function definitions", + ast.AsyncFor: "async for loops", + ast.AsyncWith: "async with statements", + ast.Try: "try blocks", + ast.Nonlocal: "nonlocal variables", + } +) + +node_start_tokens.update( + { + ast.AsyncFunctionDef: "async def", + ast.AsyncFor: "async for", + ast.AsyncWith: "async with", + ast.Try: "try", + ast.Nonlocal: "nonlocal", + } +) + +pretty_node_names.update( + { + ast.AnnAssign: "annotated assignments", + } +) +# NB: no specific token for AnnAssign + + +class FrontendError(Exception): + def __init__(self, source_range, msg): + self.source_range = source_range + self.msg = msg + + # This has to be instantiated here so the ErrorReport is accurate to the + # call stack when the FrontendError was raised + self.error_report = torch._C.ErrorReport(self.source_range) + + def __str__(self): + return self.msg + self.error_report.what().lstrip() + + +class NotSupportedError(FrontendError): + pass + + +class UnsupportedNodeError(NotSupportedError): + def __init__(self, ctx, offending_node, reason=""): + # If we don't have a specific token, we default to length of 1 + node_type = type(offending_node) + range_len = len(node_start_tokens.get(node_type, " ")) + source_range = ctx.make_range( + offending_node.lineno, + offending_node.col_offset, + offending_node.col_offset + range_len, + ) + feature_name = pretty_node_names.get(node_type, node_type.__name__) + msg = f"{feature_name} {reason + ' ' if reason else ''}aren't supported" + super().__init__(source_range, msg) + + +class FrontendTypeError(FrontendError): + pass + + +def build_withitems(ctx, items): + items = [build_withitem(ctx, i) for i in items] + return list(items) + + +def build_stmts(ctx, stmts): + stmts = [build_stmt(ctx, s) for s in stmts] + return list(filter(None, stmts)) + + +def get_class_properties(cls, self_name): + """ + Get a list of Property objects representing the properties of a class. + + Args: + cls: The class to get properties of. + self_name: The name of the class that the properties should belong to. + Returns: + A list of Property objects corresponding to the properties of cls. Property + here refers to the subclass of TreeView. + """ + props = inspect.getmembers(cls, predicate=lambda m: isinstance(m, property)) + # Any property that should not compiled must be in this list on the Module. + unused_properties = getattr(cls, "__jit_unused_properties__", []) + + # Create Property TreeView objects from inspected property objects. + properties = [] + for prop in props: + if prop[0] not in unused_properties and not should_drop(prop[1].fget): + getter = get_jit_def( + prop[1].fget, f"__{prop[0]}_getter", self_name=self_name + ) + setter = ( + get_jit_def(prop[1].fset, f"__{prop[0]}_setter", self_name=self_name) + if prop[1].fset + else None + ) + properties.append( + Property(getter.range(), Ident(getter.range(), prop[0]), getter, setter) + ) + + return properties + + +def get_class_assigns(ctx, cls_ast): + assigns = [] + + def maybe_build_assign(builder, entry): + nonlocal assigns + try: + assigns.append(builder(ctx, entry)) + except NotSupportedError: + pass + + for entry in cls_ast.body: + if isinstance(entry, ast.Assign): + maybe_build_assign(StmtBuilder.build_Assign, entry) + elif isinstance(entry, ast.AnnAssign): + maybe_build_assign(StmtBuilder.build_AnnAssign, entry) + return assigns + + +def get_jit_class_def(cls, self_name): + """Get definitions for each method within the current class independently. + + Args: + cls: The class to get definition of. + self_name: The name of the class that the properties should belong to. + + Returns: + torch._C._jit_tree_views.ClassDef: A representation of the class, + the methods in the class and their definition as a tree. + """ + # TODO: proper overriding analysis when implementing class inheritance + methods = inspect.getmembers( + cls, + predicate=lambda m: (inspect.ismethod(m) or inspect.isfunction(m)) + and not is_static_fn(cls, m.__name__) + and m.__name__ in cls.__dict__ + and not _is_drop_fn(m), + ) + + def is_classmethod(fn): + return inspect.ismethod(fn) and getattr(fn, "__self__", None) == cls + + # Get and parse the source code for this class + sourcelines, file_lineno, filename = get_source_lines_and_file( + cls, torch._C.ErrorReport.call_stack() + ) + source = "".join(sourcelines) + + dedent_src = dedent(source) + py_ast = ast.parse(dedent_src) + + class_ast = py_ast.body[0] + assert isinstance(class_ast, ast.ClassDef) + + # Special case for dataclasses. In general we need access to the source code for + # an object in order to JIT compile it. But the dataclasses module dynamically synthesizes + # magic methods for classes, and we can't get the source code for these methods. As a + # workaround, we synthesize TorchScript-friendly implementations ourselves. + if dataclasses.is_dataclass(cls): + # Detect whether the user manually implemented any of the magic methods. If they did, + # we don't want to synthesize/override them. + overrides = { + method.name + for method in class_ast.body + if isinstance(method, ast.FunctionDef) + and method.name in DATACLASS_MAGIC_METHODS + } + for i, (name, _) in enumerate(methods): + # Is this a magic method we can synthesize? + synthesizer_fn = DATACLASS_MAGIC_METHODS.get(name) + if synthesizer_fn and name not in overrides: + parsed_def = synthesizer_fn(cls) + methods[i] = name, parsed_def + func = getattr(cls, name) + _jit_internal.loader.cache(func, parsed_def.source) + + method_defs = [ + get_jit_def(obj, name, self_name=self_name, is_classmethod=is_classmethod(obj)) + for (name, obj) in methods + ] + properties = get_class_properties(cls, self_name) + + leading_whitespace_len = len(source.split("\n", 1)[0]) - len( + dedent_src.split("\n", 1)[0] + ) + ctx = make_source_context( + source, filename, file_lineno, leading_whitespace_len, False + ) + assigns = get_class_assigns(ctx, class_ast) + + return build_class_def(ctx, class_ast, method_defs, properties, self_name, assigns) + + +def get_jit_def(fn, def_name, self_name=None, is_classmethod=False): + """ + Build a JIT AST (TreeView) from the given function. + + Args: + fn: A function object to compile or a pre-parsed ParsedDef object + def_name: The name to give to the resulting AST object. This is not + always the same as `fn.__name__`, for example: + def _forward(self): + ... + forward = _forward + In this case, the `__name__` attribute of the function object is "_forward", + but we want the result AST to have the name "forward". + self_name: If this function is a method, what the type name of `self` is. + """ + parsed_def = parse_def(fn) if not isinstance(fn, _ParsedDef) else fn + type_line = torch.jit.annotations.get_type_line(parsed_def.source) + fn_def = parsed_def.ast.body[0] + + if is_classmethod: + arg_name = fn_def.args.args[0].arg + # Insert a statement that assigns the first argument to the class + assign_stmt = ast.parse(f"{arg_name} = {self_name}").body[0] + fn_def.body.insert(0, assign_stmt) + + # Swap out the function signature and body if it is unused + if should_drop(fn): + unused_fn_def = ast.parse( + 'def unused_fn(self: Any):\n\traise RuntimeError("Cannot call @unused methods")' + ) + if len(unused_fn_def.body) != 1 or not isinstance( + unused_fn_def.body[0], ast.FunctionDef + ): + raise RuntimeError( + f"Expected a single top-level function: {parsed_def.filename}:{parsed_def.file_lineno}" + ) + unused_def = unused_fn_def.body[0] + fn_def.body = unused_def.body + # kwarg/vararg not supported by `build_def` + fn_def.args.kwarg = fn_def.args.vararg = None + for arg in fn_def.args.args + fn_def.args.kwonlyargs: + # Replace potentially unsupported type annotations by "Any" + arg.annotation = unused_def.args.args[0].annotation + if _is_drop_fn(fn): + # Dropping potentially unsupported return type annotation for jit._drop + fn_def.returns = None + fn_def.type_comment = None + + # If MonkeyType is installed, get all the consolidated type traces + # for the arguments from type_trace_db + type_trace_db = torch.jit._script._get_type_trace_db() + pdt_arg_types = None + if monkeytype_trace and not isinstance(fn, _ParsedDef): # type: ignore[truthy-function] + qualname = get_qualified_name(fn) + pdt_arg_types = type_trace_db.get_args_types(qualname) + + return build_def( + parsed_def.ctx, + fn_def, + type_line, + def_name, + self_name=self_name, + pdt_arg_types=pdt_arg_types, + ) + + +# TODO: more robust handling of recognizing ignore context manager +def is_torch_jit_ignore_context_manager(stmt): + # checks if the statement is torch.jit.ignore context manager + if isinstance(stmt.items[0].context_expr, ast.Call): + # extract torch part + function = stmt.items[0].context_expr.func + if isinstance(function, ast.Attribute): + attr_name = function.attr + attr_value = function.value + if attr_name == "_IgnoreContextManager" and isinstance( + attr_value, ast.Attribute + ): + # there should be at most two nested attributes (e.g torch.jit._IgnoreContextManager) + if attr_value.attr == "jit" and isinstance(attr_value.value, ast.Name): + if attr_value.value.id == "torch": + return True + return False + + +class Builder: + def __call__(self, ctx, node): + method = getattr(self, "build_" + node.__class__.__name__, None) + if method is None: + raise UnsupportedNodeError(ctx, node) + return method(ctx, node) + + +def build_class_def(ctx, py_def, methods, properties, self_name, assigns): + r = ctx.make_range( + py_def.lineno, py_def.col_offset, py_def.col_offset + len("class") + ) + return ClassDef( + Ident(r, self_name), [Stmt(method) for method in methods], properties, assigns + ) + + +def build_def(ctx, py_def, type_line, def_name, self_name=None, pdt_arg_types=None): + body = py_def.body + r = ctx.make_range(py_def.lineno, py_def.col_offset, py_def.col_offset + len("def")) + + param_list = build_param_list(ctx, py_def.args, self_name, pdt_arg_types) + return_type = None + if getattr(py_def, "returns", None) is not None: + return_type = build_expr(ctx, py_def.returns) + + decl = Decl(r, param_list, return_type) + is_method = self_name is not None + if type_line is not None: + type_comment_decl = torch._C.parse_type_comment(type_line) + decl = torch._C.merge_type_from_type_comment(decl, type_comment_decl, is_method) + + return Def(Ident(r, def_name), decl, build_stmts(ctx, body)) + + +_vararg_kwarg_err = ( + "Compiled functions can't take variable number of arguments " + "or use keyword-only arguments with defaults" +) + + +def build_param_list(ctx, py_args, self_name, pdt_arg_types=None): + if py_args.kwarg is not None: + expr = py_args.kwarg + ctx_range = ctx.make_range( + expr.lineno, expr.col_offset - 1, expr.col_offset + len(expr.arg) + ) + raise NotSupportedError(ctx_range, _vararg_kwarg_err) + if py_args.vararg is not None: + expr = py_args.vararg + ctx_range = ctx.make_range( + expr.lineno, expr.col_offset - 1, expr.col_offset + len(expr.arg) + ) + raise NotSupportedError(ctx_range, _vararg_kwarg_err) + if len(py_args.kw_defaults) > 0: + # kw_defaults is a list of the values for the kwargs (which default to None), + # so they don't actually have line numbers. + for arg in py_args.kw_defaults: + if arg is not None: + ctx_range = build_expr(ctx, arg).range() + raise NotSupportedError(ctx_range, _vararg_kwarg_err) + + # List of Tuple of args and type as inferred by profile directed typing + arg_and_types = [ + ( + arg, + pdt_arg_types[arg.arg] + if pdt_arg_types and bool(pdt_arg_types[arg.arg]) + else None, + ) + for arg in py_args.args + ] + arg_and_types_kwonlyargs = [ + ( + arg, + pdt_arg_types[arg.arg] + if pdt_arg_types and bool(pdt_arg_types[arg.arg]) + else None, + ) + for arg in py_args.kwonlyargs + ] + + result = [ + build_param(ctx, arg, self_name, kwarg_only=False, pdt_arg_type=arg_type) + for arg, arg_type in arg_and_types + ] + result += [ + build_param(ctx, arg, self_name, kwarg_only=True, pdt_arg_type=arg_type) + for arg, arg_type in arg_and_types_kwonlyargs + ] + return result + + +def build_param(ctx, py_arg, self_name, kwarg_only, pdt_arg_type=None): + # NB: In Python3 py_arg is a pair of (str arg, expr? annotation) + name = py_arg.arg + r = ctx.make_range(py_arg.lineno, py_arg.col_offset, py_arg.col_offset + len(name)) + if getattr(py_arg, "annotation", None) is not None: + annotation_expr = build_expr(ctx, py_arg.annotation) + elif pdt_arg_type: + annotation_expr = Var(Ident(r, pdt_arg_type)) + elif self_name is not None and name == "self": + annotation_expr = Var(Ident(r, self_name)) + else: + annotation_expr = EmptyTypeAnnotation(r) + return Param(annotation_expr, Ident(r, name), kwarg_only) + + +def build_ignore_context_manager(ctx, stmt): + InputType = namedtuple("InputType", ["name", "ann"]) + OutputType = namedtuple("OutputType", ["name", "ann"]) + + def process_ins_outs(args): + # parse the context manager to figure out inputs and outputs + # with their annotated types + # TODO: add input, output validator + inputs = [] + outputs = [] + for arg in args: + var_name = arg.arg + var_ann = arg.value.value + var_decl_type, var_ann = var_ann.split(":") + if var_decl_type == "inp": + inputs.append(InputType(var_name, var_ann)) + if var_decl_type == "out": + outputs.append(OutputType(var_name, var_ann)) + return inputs, outputs + + def create_unique_name_ext(ctx, stmt): + # extension will be based on the full path filename plus + # the line number of original context manager + fn = re.sub(r"[^a-zA-Z0-9_]", "_", ctx.filename) + return f"{fn}_{stmt.lineno}" + + def build_return_ann_stmt(outputs): + return_type_ann = "" + return_statement_str = "return " + if len(outputs) == 0: + return_type_ann += " -> None" + if len(outputs) == 1: + return_type_ann = " -> " + outputs[0].ann + return_statement_str += outputs[0].name + if len(outputs) > 1: + return_type_ann = " -> Tuple" + return_type_ann += "[" + ", ".join([var.ann for var in outputs]) + "]" + return_statement_str += ", ".join([var.name for var in outputs]) + return return_type_ann, return_statement_str + + def build_args(args): + return ", ".join([arg.name for arg in args]) + + inputs, outputs = process_ins_outs(stmt.items[0].context_expr.keywords) + + # build the replacement function str with given inputs and outputs + ignore_function_name = "func_ignore_" + create_unique_name_ext(ctx, stmt) + ignore_function_str = "\ndef " + ignore_function_name + ignore_function_str += ( + "(" + ", ".join([var.name + " :" + var.ann for var in inputs]) + ")" + ) + + return_ann, return_stmt = build_return_ann_stmt(outputs) + ignore_function_str += return_ann + ": pass" + + # first create the functionDef object from just declaration + ignore_function = ast.parse(ignore_function_str).body[0] + + # dump the body of context manager to dummy function + ignore_function.body = stmt.body # type: ignore[attr-defined] + + # insert return statement to the function + return_stmt = ast.parse(return_stmt).body[0] + ignore_function.body.append(return_stmt) # type: ignore[attr-defined] + + # registers the custom function in the global context + ignore_func_str = "@torch.jit.ignore\n" + astunparse.unparse(ignore_function) + ignore_func_str += f'\nglobals()["{ignore_function_name}"] = {ignore_function_name}' + exec(ignore_func_str) # noqa: P204 + + # build the statements as: + # , , ... = torch.jit.frontend.(, ) + assign_str_lhs = build_args(outputs) + # this function will be registered in torch.jit.frontend module by default + assign_str_rhs = ( + f"torch.jit.frontend.{ignore_function_name}(" + build_args(inputs) + ")" + ) + + if len(outputs) > 0: + assign_str = assign_str_lhs + " = " + assign_str_rhs + else: + assign_str = assign_str_rhs + assign_ast = ast.parse(assign_str).body[0] + return assign_ast + + +def get_default_args(fn): + """ + Get a dictionary of default arguments for a function. + + Args: + fn: Callable - The function to inspect for default arguments. + Returns: + (Dict[str, Any]): mapping argument names to their default values if + :attr:`fn` is not None, else empty dictionary. + """ + if fn is None: + return {} + + signature = inspect.signature(fn) + + return { + k: v.default + for k, v in signature.parameters.items() + if v.default is not inspect.Parameter.empty + } + + +def get_default_args_for_class(cls): + """ + Get default arguments for all methods in a class (except for static methods). + + Args: + cls: type - The class type to inspect for default arguments. + Returns: + A Dict[str, Dict[str, Any]] which maps each method name to a Dict[str, Any] + that maps each argument name to its default value. + """ + # Get methods (except static methods because those are compiled separately as + # if they were independent script functions). + methods = inspect.getmembers( + cls, + predicate=lambda m: (inspect.ismethod(m) or inspect.isfunction(m)) + and not is_static_fn(cls, m.__name__) + and m.__name__ in cls.__dict__, + ) + + # Get method defaults. Property defaults do not need to be considered + # because setters cannot be invoked without a value. + defaults = { + method_name: get_default_args(method_impl) + for method_name, method_impl in methods + } + + return defaults + + +class WithItemBuilder(Builder): + @staticmethod + def build_withitem(ctx, item): + lineno = item.context_expr.lineno + start = item.context_expr.col_offset + end = start + len(pretty_node_names[ast.With]) + op_vars = item.optional_vars + r = ctx.make_range(lineno, start, end) + + return WithItem( + r, + build_expr(ctx, item.context_expr), + build_expr(ctx, op_vars) if op_vars else None, + ) + + +class StmtBuilder(Builder): + augassign_map = { + ast.Add: "+", + ast.Sub: "-", + ast.Mult: "*", + ast.Div: "/", + ast.Mod: "%", + ast.BitOr: "|", + ast.BitAnd: "&", + ast.BitXor: "^", + ast.LShift: "<<", + ast.RShift: ">>", + ast.Pow: "**", + } + + @staticmethod + def build_Expr(ctx, stmt): + value = stmt.value + if value.__class__.__name__ == "Str": + # If a statement is a string literal expression, + # then it is a docstring. Just ignore it. + return None + else: + return ExprStmt(build_expr(ctx, value)) + + @staticmethod + def build_Assign(ctx, stmt): + rhs = build_expr(ctx, stmt.value) + lhs = [build_expr(ctx, x) for x in stmt.targets] + return Assign(lhs, rhs) + + @staticmethod + def build_AnnAssign(ctx, stmt): + if stmt.value is None: + raise UnsupportedNodeError(ctx, stmt, reason="without assigned value") + + # Disallow type annotations on instance attributes outside of __init__ + if ( + type(stmt.target) == ast.Attribute + and stmt.target.value.id == "self" # type: ignore[attr-defined] + and ctx.funcname != "__init__" + ): + start = stmt.col_offset + end = start + len(f"self.{stmt.target.attr}") + if hasattr(stmt.annotation, "id"): + end += len(f": {stmt.annotation.id}") + sr = ctx.make_range(stmt.lineno, start, end) + raise ValueError( + "Type annotations on instance attributes must be declared in " + f"__init__, not '{ctx.funcname}': {sr}" + ) + + rhs = build_expr(ctx, stmt.value) + lhs = build_expr(ctx, stmt.target) + the_type = build_expr(ctx, stmt.annotation) + return Assign([lhs], rhs, the_type) + + @staticmethod + def build_Delete(ctx, stmt): + r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("del")) + + return Delete(r, [build_expr(ctx, target) for target in stmt.targets]) + + @staticmethod + def build_Return(ctx, stmt): + r = ctx.make_range( + stmt.lineno, stmt.col_offset, stmt.col_offset + len("return") + ) + return Return(r, None if stmt.value is None else build_expr(ctx, stmt.value)) + + @staticmethod + def build_Raise(ctx, stmt): + r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("raise")) + expr = build_expr(ctx, stmt.exc) + return Raise(r, expr) + + @staticmethod + def build_Assert(ctx, stmt): + r = ctx.make_range( + stmt.lineno, stmt.col_offset, stmt.col_offset + len("assert") + ) + test = build_expr(ctx, stmt.test) + msg = build_expr(ctx, stmt.msg) if stmt.msg is not None else None + return Assert(r, test, msg) + + @staticmethod + def build_AugAssign(ctx, stmt): + lhs = build_expr(ctx, stmt.target) + rhs = build_expr(ctx, stmt.value) + op = type(stmt.op) + if op in StmtBuilder.augassign_map: + op_token = StmtBuilder.augassign_map[op] + else: + raise NotSupportedError( + find_before(ctx, rhs.range().start, "=", offsets=(-1, 0)), + "unsupported kind of augmented assignment: " + op.__name__, + ) + return AugAssign(lhs, op_token, rhs) + + @staticmethod + def build_While(ctx, stmt): + if stmt.orelse: + # TODO: try to recover the location of else:? Python doesn't give us useful + # annotations in this case + raise NotSupportedError( + None, "else branches of while loops aren't supported" + ) + r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("while")) + return While(r, build_expr(ctx, stmt.test), build_stmts(ctx, stmt.body)) + + @staticmethod + def build_For(ctx, stmt): + r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("for")) + if stmt.orelse: + raise NotSupportedError(r, "else branches of for loops aren't supported") + + return For( + r, + [build_expr(ctx, stmt.target)], + [build_expr(ctx, stmt.iter)], + build_stmts(ctx, stmt.body), + ) + + @staticmethod + def build_If(ctx, stmt): + r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("if")) + return If( + r, + build_expr(ctx, stmt.test), + build_stmts(ctx, stmt.body), + build_stmts(ctx, stmt.orelse), + ) + + @staticmethod + def build_Print(ctx, stmt): + r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("print")) + if stmt.dest: + raise NotSupportedError( + r, "print statements with non-default destinations aren't supported" + ) + args = [build_expr(ctx, val) for val in stmt.values] + return ExprStmt(Apply(Var(Ident(r, "print")), args, [])) + + @staticmethod + def build_Pass(ctx, stmt): + r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("pass")) + return Pass(r) + + @staticmethod + def build_Break(ctx, stmt): + r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("break")) + return Break(r) + + @staticmethod + def build_Continue(ctx, stmt): + r = ctx.make_range( + stmt.lineno, stmt.col_offset, stmt.col_offset + len("continue") + ) + return Continue(r) + + @staticmethod + def build_With(ctx, stmt): + r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("with")) + # Handle ignore context manager + if is_torch_jit_ignore_context_manager(stmt): + if not _IS_ASTUNPARSE_INSTALLED: + raise RuntimeError( + "torch.jit._IgnoreContextManager requires installing Python library `astunparse`, \ + please install it in your Python environment" + ) + assign_ast = build_ignore_context_manager(ctx, stmt) + return build_stmt(ctx, assign_ast) + return With(r, build_withitems(ctx, stmt.items), build_stmts(ctx, stmt.body)) + + +class ExprBuilder(Builder): + binop_map = { + ast.Add: "+", + ast.Sub: "-", + ast.Mult: "*", + ast.Div: "/", + ast.Pow: "**", + ast.Mod: "%", + ast.FloorDiv: "//", + ast.BitAnd: "&", + ast.BitXor: "^", + ast.BitOr: "|", + ast.LShift: "<<", + ast.RShift: ">>", + } + + binop_map[ast.MatMult] = "@" + + unop_map = { + ast.Not: "not", + ast.USub: "-", + ast.Invert: "~", + } + + boolop_map = { + ast.And: "and", + ast.Or: "or", + } + + cmpop_map = { + ast.Eq: "==", + ast.NotEq: "!=", + ast.LtE: "<=", + ast.Lt: "<", + ast.GtE: ">=", + ast.Gt: ">", + ast.Is: "is", + ast.IsNot: "is not", + ast.In: "in", + ast.NotIn: "not in", + } + + @staticmethod + def build_Attribute(ctx, expr): + base = build_expr(ctx, expr.value) + # expr.attr is just a string, so it's not annotated in any way, so we have + # to build the range manually + source = ctx.source.encode("utf-8") + + def get_char(index): + return chr(source[index]) + + start_pos = base.range().end + 1 + while get_char(start_pos) in string.whitespace: # Skip whitespace + start_pos += 1 + end_pos = start_pos + len(expr.attr) + name_range = ctx.make_raw_range(start_pos, end_pos) + return Select(base, Ident(name_range, expr.attr)) + + @staticmethod + def build_Call(ctx, expr): + func = build_expr(ctx, expr.func) + args = [build_expr(ctx, py_arg) for py_arg in expr.args] + if hasattr(expr, "starargs") and expr.starargs: + stararg_expr = build_expr(ctx, expr.starargs) + args += [Starred(stararg_expr.range(), stararg_expr)] + kwargs = [] + for kw in expr.keywords: + kw_expr = build_expr(ctx, kw.value) + # XXX: we could do a better job at figuring out the range for the name here + if not kw.arg: + raise NotSupportedError( + kw_expr.range(), "keyword-arg expansion is not supported" + ) + kwargs.append(Attribute(Ident(kw_expr.range(), kw.arg), kw_expr)) + return Apply(func, args, kwargs) + + @staticmethod + def build_Ellipsis(ctx, expr): + r = ctx.make_range( + expr.lineno, expr.col_offset, expr.col_offset + 3 + ) # len("...") == 3 + return Dots(r) + + @staticmethod + def build_Name(ctx, expr): + r = ctx.make_range(expr.lineno, expr.col_offset, expr.col_offset + len(expr.id)) + if expr.id.startswith(_reserved_prefix): + raise NotSupportedError( + r, + "names of variables used in JIT-ed functions " + "can't start with " + _reserved_prefix, + ) + if expr.id == "True": + return TrueLiteral(r) + elif expr.id == "False": + return FalseLiteral(r) + elif expr.id == "None": + return NoneLiteral(r) + elif expr.id == "Ellipsis": + return Dots(r) + return Var(Ident(r, expr.id)) + + @staticmethod + def build_NameConstant(ctx, expr): + r = ctx.make_range( + expr.lineno, expr.col_offset, expr.col_offset + len(str(expr.value)) + ) + if expr.value is True: + return TrueLiteral(r) + elif expr.value is False: + return FalseLiteral(r) + elif expr.value is None: + return NoneLiteral(r) + elif expr.value == Ellipsis: + return Dots(r) + else: + raise ValueError("Name constant value unsupported: " + str(expr.value)) + + @staticmethod + def build_BinOp(ctx, expr): + lhs = build_expr(ctx, expr.left) + rhs = build_expr(ctx, expr.right) + op = type(expr.op) + + if op == ast.Div and not ctx.uses_true_division: + err_range = ctx.make_raw_range(lhs.range().end, rhs.range().start) + raise FrontendError( + err_range, + "Division of ints in TorchScript uses Python 3 true " + "division semantics. Please put `from __future__ " + "import division` at the top of your file", + ) + op_token = ExprBuilder.binop_map.get(op) + if op_token is None: + err_range = ctx.make_raw_range(lhs.range().end, rhs.range().start) + raise NotSupportedError( + err_range, "unsupported binary operator: " + op.__name__ + ) + return BinOp(op_token, lhs, rhs) + + @staticmethod + def build_UnaryOp(ctx, expr): + sub_expr = build_expr(ctx, expr.operand) + op = type(expr.op) + op_token = ExprBuilder.unop_map.get(op) + if op_token is None: + raise NotSupportedError( + expr.range(), "unsupported unary operator: " + op.__name__ + ) + r = ctx.make_range( + expr.lineno, expr.col_offset, expr.col_offset + len(op_token) + ) + return UnaryOp(r, op_token, sub_expr) + + @staticmethod + def build_BoolOp(ctx, expr): + if len(expr.values) < 2: + raise AssertionError( + "expected at least 2 values in BoolOp, but got " + str(len(expr.values)) + ) + sub_exprs = [build_expr(ctx, sub_expr) for sub_expr in expr.values] + op = type(expr.op) + op_token = ExprBuilder.boolop_map.get(op) + if op_token is None: + err_range = ctx.make_raw_range( + sub_exprs[0].range().end, sub_exprs[1].range().start + ) + raise NotSupportedError( + err_range, "unsupported boolean operator: " + op.__name__ + ) + lhs = sub_exprs[0] + for rhs in sub_exprs[1:]: + lhs = BinOp(op_token, lhs, rhs) + return lhs + + @staticmethod + def build_IfExp(ctx, expr): + return TernaryIf( + build_expr(ctx, expr.test), + build_expr(ctx, expr.body), + build_expr(ctx, expr.orelse), + ) + + @staticmethod + def build_Compare(ctx, expr): + operands = [build_expr(ctx, e) for e in [expr.left] + list(expr.comparators)] + result = None + for lhs, op_, rhs in zip(operands, expr.ops, operands[1:]): + op = type(op_) + op_token = ExprBuilder.cmpop_map.get(op) + r = ctx.make_raw_range(lhs.range().end, rhs.range().start) + if op_token is None: + raise NotSupportedError( + r, "unsupported comparison operator: " + op.__name__ + ) + + if op == ast.NotIn: + # NB: `not in` is just `not( in )`, so we don't introduce new tree view + # but just make it a nested call in our tree view structure + in_expr = BinOp("in", lhs, rhs) + cmp_expr = UnaryOp(r, "not", in_expr) + else: + cmp_expr = BinOp(op_token, lhs, rhs) + + if result is None: + result = cmp_expr + else: + result = BinOp("and", result, cmp_expr) + return result + + @staticmethod + def build_Subscript(ctx, expr): + def build_SliceExpr(ctx, base, slice_expr): + lower = ( + build_expr(ctx, slice_expr.lower) + if slice_expr.lower is not None + else None + ) + upper = ( + build_expr(ctx, slice_expr.upper) + if slice_expr.upper is not None + else None + ) + step = ( + build_expr(ctx, slice_expr.step) + if slice_expr.step is not None + else None + ) + return SliceExpr(base.range(), lower, upper, step) + + def build_Index(ctx, base, index_expr): + if isinstance(index_expr.value, ast.Tuple): + raise NotSupportedError( + base.range(), + "slicing multiple dimensions with tuples not supported yet", + ) + return build_expr(ctx, index_expr.value) + + def build_ExtSlice(ctx, base, extslice): + sub_exprs = [] + for expr in extslice.dims: + sub_type = type(expr) + if sub_type is ast.Index: + sub_exprs.append(build_Index(ctx, base, expr)) + elif sub_type is ast.Slice: + sub_exprs.append(build_SliceExpr(ctx, base, expr)) + elif sub_type is ast.Constant and expr.value is Ellipsis: + sub_exprs.append(Dots(base.range())) + else: + raise NotSupportedError( + base.range(), + f"slicing multiple dimensions with {sub_type} not supported", + ) + return sub_exprs + + base = build_expr(ctx, expr.value) + sub_type = type(expr.slice) + if sub_type is ast.Index: + if isinstance(expr.slice.value, ast.Tuple): + # N-dimensional indexing using Tuple: x[(i, j, k)] is equivalent to x[i, j, k] + # XXX: Indexing using a list is **different**! It triggers advanced indexing. + indices = [ + build_expr(ctx, index_expr) for index_expr in expr.slice.value.elts + ] + if not indices: + # `col_offset` is an int, but `end_col_offset` is + # `Optional[int]`. The magic number is here to make + # sure we can parse `()` on any machine + r = ctx.make_range( + expr.lineno, + expr.slice.value.col_offset, + expr.slice.value.col_offset + 2, + ) + tup = TupleLiteral(r, []) + indices.append(tup) + return Subscript(base, indices) + else: + return Subscript(base, [build_expr(ctx, expr.slice.value)]) + elif sub_type is ast.Slice: + return Subscript(base, [build_SliceExpr(ctx, base, expr.slice)]) + elif sub_type is ast.ExtSlice: + return Subscript(base, build_ExtSlice(ctx, base, expr.slice)) + elif sys.version_info >= ( + 3, + 9, + ): # In Python3.9 array indicies are not wrapped in ast.Index + if sub_type is ast.Tuple: + # N-dimensional indexing using Tuple: x[(i, j, k)] is equivalent to x[i, j, k] + indices = [] + for index_expr in expr.slice.elts: + if isinstance(index_expr, ast.Slice): + indices.append(build_SliceExpr(ctx, base, index_expr)) + else: + indices.append(build_expr(ctx, index_expr)) + # Special-case logic for `typing.Tuple[()]` + if not indices: + # See note above r.e. magic number + r = ctx.make_range( + expr.lineno, expr.slice.col_offset, expr.slice.col_offset + 2 + ) + tup = TupleLiteral(r, []) + indices.append(tup) + return Subscript(base, indices) + return Subscript(base, [build_expr(ctx, expr.slice)]) + else: # Ellipsis (can only happen in Python 2) + raise NotSupportedError(base.range(), "ellipsis is not supported") + + @staticmethod + def build_List(ctx, expr): + return ListLiteral( + ctx.make_range(expr.lineno, expr.col_offset, expr.col_offset + 1), + [build_expr(ctx, e) for e in expr.elts], + ) + + @staticmethod + def build_Tuple(ctx, expr): + return TupleLiteral( + ctx.make_range(expr.lineno, expr.col_offset, expr.col_offset + 1), + [build_expr(ctx, e) for e in expr.elts], + ) + + @staticmethod + def build_Dict(ctx, expr): + range = ctx.make_range(expr.lineno, expr.col_offset, expr.col_offset + 1) + if expr.keys and not expr.keys[0]: + raise NotSupportedError( + range, "Dict expansion (e.g. `{**dict}`) is not supported" + ) + return DictLiteral( + range, + [build_expr(ctx, e) for e in expr.keys], + [build_expr(ctx, e) for e in expr.values], + ) + + @staticmethod + def build_Num(ctx, expr): + value = str(expr.value) + r = ctx.make_range(expr.lineno, expr.col_offset, expr.col_offset + len(value)) + return Const(r, value) + + @staticmethod + def build_Constant(ctx, expr): + value = expr.value + if value is None or isinstance(value, bool): + # NB: this check has to happen before the int check because bool is + # a subclass of int + return ExprBuilder.build_NameConstant(ctx, expr) + if isinstance(value, (int, float, complex)): + return ExprBuilder.build_Num(ctx, expr) + elif isinstance(value, str): + return ExprBuilder.build_Str(ctx, expr) + elif isinstance(value, type(Ellipsis)): + return ExprBuilder.build_Ellipsis(ctx, expr) + else: + error_range = ctx.make_range( + expr.lineno, expr.col_offset, expr.col_offset + len(str(value)) + ) + raise FrontendError(error_range, "Unknown Constant expression type") + + @staticmethod + def build_Str(ctx, expr): + value = str(expr.value) + r = ctx.make_range( + expr.lineno, expr.col_offset, expr.col_offset + len(value) + 1 + ) + return StringLiteral(r, value) + + @staticmethod + def build_JoinedStr(ctx, expr): + s = "" + args = [] + for value in expr.values: + r = ctx.make_range(value.lineno, value.col_offset, value.col_offset + 1) + if isinstance(value, ast.FormattedValue): + if value.conversion != -1: + raise NotSupportedError(r, "Don't support conversion in JoinedStr") + if value.format_spec is not None: + raise NotSupportedError(r, "Don't support formatting in JoinedStr") + s += "{}" + args.append(build_expr(ctx, value.value)) + elif isinstance(value, ast.Constant): + s += value.value + else: + raise NotSupportedError(r, "Unsupported value in JoinedStr") + + r = ctx.make_range(expr.lineno, expr.col_offset, expr.col_offset + 1) + return Apply(Select(StringLiteral(r, s), Ident(r, "format")), args, []) + + @staticmethod + def build_ListComp(ctx, stmt): + r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset) + if len(stmt.generators) != 1: + raise NotSupportedError(r, "Only a single generator is currently supported") + + if len(stmt.generators[0].ifs) != 0: + raise NotSupportedError(r, "Comprehension ifs are not supported yet") + + elt_expr = build_expr(ctx, stmt.elt) + target_expr = build_expr(ctx, stmt.generators[0].target) + iter_expr = build_expr(ctx, stmt.generators[0].iter) + + return ListComp(r, elt_expr, target_expr, iter_expr) + + @staticmethod + def build_GeneratorExp(ctx, stmt): + # Convert Generator expression to ListComp + return ExprBuilder.build_ListComp(ctx, stmt) + + @staticmethod + def build_DictComp(ctx, stmt): + r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset) + if len(stmt.generators) != 1: + raise NotSupportedError(r, "Only a single generator is currently supported") + + if len(stmt.generators[0].ifs) != 0: + raise NotSupportedError(r, "Comprehension ifs are not supported yet") + + key_expr = build_expr(ctx, stmt.key) + value_expr = build_expr(ctx, stmt.value) + target_expr = build_expr(ctx, stmt.generators[0].target) + iter_expr = build_expr(ctx, stmt.generators[0].iter) + + return DictComp(r, key_expr, value_expr, target_expr, iter_expr) + + @staticmethod + def build_Starred(ctx, expr): + r = ctx.make_range(expr.lineno, expr.col_offset, expr.col_offset + 1) + return Starred(r, build_expr(ctx, expr.value)) + + +build_expr = ExprBuilder() +build_stmt = StmtBuilder() +build_withitem = WithItemBuilder() + + +def find_before(ctx, pos, substr, offsets=(0, 0)): + new_pos = ctx.source[:pos].rindex(substr) + return ctx.make_raw_range(new_pos + offsets[0], new_pos + len(substr) + offsets[1]) diff --git a/janus/lib/python3.10/site-packages/torch/jit/generate_bytecode.py b/janus/lib/python3.10/site-packages/torch/jit/generate_bytecode.py new file mode 100644 index 0000000000000000000000000000000000000000..f66bf7bfc4c1b25a35b342ab6792af025266a3a8 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/jit/generate_bytecode.py @@ -0,0 +1,34 @@ +# mypy: allow-untyped-defs +from typing import List + +from torch._C import _compile_graph_to_code_table, _generate_upgraders_graph + + +def format_bytecode(table): + # given a nested tuple, convert it to nested list + def listify(content): + if not isinstance(content, tuple): + return content + return [listify(i) for i in content] + + formatted_table = {} + for entry in table: + identifier = entry[0] + content = entry[1] + content = listify(content) + formatted_table[identifier] = content + return formatted_table + + +def generate_upgraders_bytecode() -> List: + yaml_content = [] + upgraders_graph_map = _generate_upgraders_graph() + for upgrader_name, upgrader_graph in upgraders_graph_map.items(): + bytecode_table = _compile_graph_to_code_table(upgrader_name, upgrader_graph) + entry = {upgrader_name: format_bytecode(bytecode_table)} + yaml_content.append(entry) + return yaml_content + + +if __name__ == "__main__": + raise RuntimeError("This file is not meant to be run directly") diff --git a/janus/lib/python3.10/site-packages/torch/jit/mobile/__init__.py b/janus/lib/python3.10/site-packages/torch/jit/mobile/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..593dabf2fc43ca075d604977a0d254ea02f3ba55 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/jit/mobile/__init__.py @@ -0,0 +1,232 @@ +# mypy: allow-untyped-defs +import os + +import torch +from torch.jit._serialization import validate_map_location + + +def _load_for_lite_interpreter(f, map_location=None): + r""" + Load a :class:`LiteScriptModule` saved with :func:`torch.jit._save_for_lite_interpreter`. + + Args: + f: a file-like object (has to implement read, readline, tell, and seek), + or a string containing a file name + map_location: a string or torch.device used to dynamically remap + storages to an alternative set of devices. + + Returns: + A :class:`LiteScriptModule` object. + + Example: + + .. testcode:: + + import torch + import io + + # Load LiteScriptModule from saved file path + torch.jit._load_for_lite_interpreter('lite_script_module.pt') + + # Load LiteScriptModule from io.BytesIO object + with open('lite_script_module.pt', 'rb') as f: + buffer = io.BytesIO(f.read()) + + # Load all tensors to the original device + torch.jit.mobile._load_for_lite_interpreter(buffer) + """ + if isinstance(f, (str, os.PathLike)): + if not os.path.exists(f): + raise ValueError(f"The provided filename {f} does not exist") + if os.path.isdir(f): + raise ValueError(f"The provided filename {f} is a directory") + + map_location = validate_map_location(map_location) + + if isinstance(f, (str, os.PathLike)): + cpp_module = torch._C._load_for_lite_interpreter(os.fspath(f), map_location) + else: + cpp_module = torch._C._load_for_lite_interpreter_from_buffer( + f.read(), map_location + ) + + return LiteScriptModule(cpp_module) + + +class LiteScriptModule: + def __init__(self, cpp_module): + self._c = cpp_module + super().__init__() + + def __call__(self, *input): + return self._c.forward(input) + + def find_method(self, method_name): + return self._c.find_method(method_name) + + def forward(self, *input): + return self._c.forward(input) + + def run_method(self, method_name, *input): + return self._c.run_method(method_name, input) + + +def _export_operator_list(module: LiteScriptModule): + r"""Return a set of root operator names (with overload name) that are used by any method in this mobile module.""" + return torch._C._export_operator_list(module._c) + + +def _get_model_bytecode_version(f_input) -> int: + r"""Take a file-like object to return an integer. + + Args: + f_input: a file-like object (has to implement read, readline, tell, and seek), + or a string containing a file name + + Returns: + version: An integer. If the integer is -1, the version is invalid. A warning + will show in the log. + + Example: + .. testcode:: + + from torch.jit.mobile import _get_model_bytecode_version + + # Get bytecode version from a saved file path + version = _get_model_bytecode_version("path/to/model.ptl") + + """ + if isinstance(f_input, (str, os.PathLike)): + if not os.path.exists(f_input): + raise ValueError(f"The provided filename {f_input} does not exist") + if os.path.isdir(f_input): + raise ValueError(f"The provided filename {f_input} is a directory") + + if isinstance(f_input, (str, os.PathLike)): + return torch._C._get_model_bytecode_version(os.fspath(f_input)) + else: + return torch._C._get_model_bytecode_version_from_buffer(f_input.read()) + + +def _get_mobile_model_contained_types(f_input) -> int: + r"""Take a file-like object and return a set of string, like ("int", "Optional"). + + Args: + f_input: a file-like object (has to implement read, readline, tell, and seek), + or a string containing a file name + + Returns: + type_list: A set of string, like ("int", "Optional"). These are types used in bytecode. + + Example: + + .. testcode:: + + from torch.jit.mobile import _get_mobile_model_contained_types + + # Get type list from a saved file path + type_list = _get_mobile_model_contained_types("path/to/model.ptl") + + """ + if isinstance(f_input, (str, os.PathLike)): + if not os.path.exists(f_input): + raise ValueError(f"The provided filename {f_input} does not exist") + if os.path.isdir(f_input): + raise ValueError(f"The provided filename {f_input} is a directory") + + if isinstance(f_input, (str, os.PathLike)): + return torch._C._get_mobile_model_contained_types(os.fspath(f_input)) + else: + return torch._C._get_mobile_model_contained_types_from_buffer(f_input.read()) + + +def _backport_for_mobile(f_input, f_output, to_version): + r"""Take a input string containing a file name (file-like object) and a new destination to return a boolean. + + Args: + f_input: a file-like object (has to implement read, readline, tell, and seek), + or a string containing a file name + f_output: path to new model destination + to_version: the expected output model bytecode version + Returns: + success: A boolean. If backport success, return true, otherwise false + """ + if isinstance(f_input, (str, os.PathLike)): + if not os.path.exists(f_input): + raise ValueError(f"The provided filename {f_input} does not exist") + if os.path.isdir(f_input): + raise ValueError(f"The provided filename {f_input} is a directory") + + if (isinstance(f_input, (str, os.PathLike))) and ( + isinstance(f_output, (str, os.PathLike)) + ): + return torch._C._backport_for_mobile( + os.fspath(f_input), os.fspath(f_output), to_version + ) + else: + return torch._C._backport_for_mobile_from_buffer( + f_input.read(), str(f_output), to_version + ) + + +def _backport_for_mobile_to_buffer(f_input, to_version): + r"""Take a string containing a file name (file-like object). + + Args: + f_input: a file-like object (has to implement read, readline, tell, and seek), + or a string containing a file name + + """ + if isinstance(f_input, (str, os.PathLike)): + if not os.path.exists(f_input): + raise ValueError(f"The provided filename {f_input} does not exist") + if os.path.isdir(f_input): + raise ValueError(f"The provided filename {f_input} is a directory") + + if isinstance(f_input, (str, os.PathLike)): + return torch._C._backport_for_mobile_to_buffer(os.fspath(f_input), to_version) + else: + return torch._C._backport_for_mobile_from_buffer_to_buffer( + f_input.read(), to_version + ) + + +def _get_model_ops_and_info(f_input): + r"""Retrieve the root (top level) operators of a model and their corresponding compatibility info. + + These root operators can call other operators within them (traced ops), and + a root op can call many different traced ops depending on internal code paths in the root op. + These traced ops are not returned by this function. Those operators are abstracted into the + runtime as an implementation detail (and the traced ops themselves can also call other operators) + making retrieving them difficult and their value from this api negligible since they will differ + between which runtime version the model is run on. Because of this, there is a false positive this + api can't prevent in a compatibility usecase. All the root ops of a model are present in a + target runtime, but not all the traced ops are which prevents a model from being able to run. + Args: + f_input: a file-like object (has to implement read, readline, tell, and seek), + or a string containing a file name + + Returns: + Operators and info: A Dictionary mapping strings (the qualified names of the root operators) + of the model to their OperatorInfo structs. + + Example: + + .. testcode:: + + from torch.jit.mobile import _get_model_ops_and_info + + # Get bytecode version from a saved file path + ops_and_info = _get_model_ops_and_info("path/to/model.ptl") + + """ + if isinstance(f_input, (str, os.PathLike)): + if not os.path.exists(f_input): + raise ValueError(f"The provided filename {f_input} does not exist") + if os.path.isdir(f_input): + raise ValueError(f"The provided filename {f_input} is a directory") + + if isinstance(f_input, (str, os.PathLike)): + return torch._C._get_model_ops_and_info(os.fspath(f_input)) + else: + return torch._C._get_model_ops_and_info(f_input.read()) diff --git a/janus/lib/python3.10/site-packages/torch/jit/mobile/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/jit/mobile/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be8c294ac808ce37aa27a07f1a904bcf6636cca8 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/jit/mobile/__pycache__/__init__.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/jit/quantized.py b/janus/lib/python3.10/site-packages/torch/jit/quantized.py new file mode 100644 index 0000000000000000000000000000000000000000..a2500c1f1b9fed481ab713953746294dc52668af --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/jit/quantized.py @@ -0,0 +1,100 @@ +# mypy: allow-untyped-defs +import torch + + +class QuantizedLinear(torch.jit.ScriptModule): + def __init__(self, other): + raise RuntimeError( + "torch.jit.QuantizedLinear is no longer supported. Please use " + "torch.ao.nn.quantized.dynamic.Linear instead." + ) + + +# FP16 weights +class QuantizedLinearFP16(torch.jit.ScriptModule): + def __init__(self, other): + super().__init__() + raise RuntimeError( + "torch.jit.QuantizedLinearFP16 is no longer supported. " + "Please use the torch.ao.nn.quantized.dynamic.Linear instead." + ) + + +# Quantized RNN cell implementations +class QuantizedRNNCellBase(torch.jit.ScriptModule): + def __init__(self, other): + raise RuntimeError( + "torch.jit.QuantizedRNNCellBase is no longer supported. " + "Please use the torch.ao.nn.quantized.dynamic.RNNCell instead." + ) + + +class QuantizedRNNCell(QuantizedRNNCellBase): + def __init__(self, other): + raise RuntimeError( + "torch.jit.QuantizedRNNCell is no longer supported. " + "Please use the torch.ao.nn.quantized.dynamic.RNNCell instead." + ) + + +class QuantizedLSTMCell(QuantizedRNNCellBase): + def __init__(self, other): + super().__init__(other) + raise RuntimeError( + "torch.jit.QuantizedLSTMCell is no longer supported. " + "Please use the torch.ao.nn.quantized.dynamic.LSTMCell instead." + ) + + +class QuantizedGRUCell(QuantizedRNNCellBase): + def __init__(self, other): + super().__init__(other) + raise RuntimeError( + "torch.jit.QuantizedGRUCell is no longer supported. " + "Please use the torch.ao.nn.quantized.dynamic.GRUCell instead." + ) + + +class QuantizedRNNBase(torch.jit.ScriptModule): + def __init__(self, other, dtype=torch.int8): + raise RuntimeError( + "torch.jit.QuantizedRNNBase is no longer supported. " + "Please use the torch.ao.nn.quantized.dynamic instead." + ) + + +class QuantizedLSTM(QuantizedRNNBase): + def __init__(self, other, dtype): + raise RuntimeError( + "torch.jit.QuantizedLSTM is no longer supported. " + "Please use the torch.ao.nn.quantized.dynamic.LSTM instead." + ) + + +class QuantizedGRU(QuantizedRNNBase): + def __init__(self, *args, **kwargs): + raise RuntimeError( + "torch.jit.QuantizedGRU is no longer supported. " + "Please use the torch.ao.nn.quantized.dynamic.GRU instead." + ) + + +def quantize_rnn_cell_modules(module): + raise RuntimeError( + "quantize_rnn_cell_modules function is no longer supported. " + "Please use torch.ao.quantization.quantize_dynamic API instead." + ) + + +def quantize_linear_modules(module, dtype=torch.int8): + raise RuntimeError( + "quantize_linear_modules function is no longer supported. " + "Please use torch.ao.quantization.quantize_dynamic API instead." + ) + + +def quantize_rnn_modules(module, dtype=torch.int8): + raise RuntimeError( + "quantize_rnn_modules function is no longer supported. " + "Please use torch.ao.quantization.quantize_dynamic API instead." + ) diff --git a/janus/lib/python3.10/site-packages/torch/jit/supported_ops.py b/janus/lib/python3.10/site-packages/torch/jit/supported_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..791a11a9b3aa7dcac62cc3a2c129985abaf85bc1 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/jit/supported_ops.py @@ -0,0 +1,344 @@ +# mypy: allow-untyped-defs +import inspect +import textwrap + +import torch.jit +from torch.jit._builtins import _find_builtin + + +# this file is for generating documentation using sphinx autodoc +# > help(torch.jit.supported_ops) will also give a nice listed of the +# supported ops programmatically + + +def _hidden(name): + return name.startswith("_") and not name.startswith("__") + + +def _emit_type(type): + return str(type) + + +def _emit_arg(indent, i, arg): + v = f"{arg.name} : {_emit_type(arg.type)}" + default = arg.default_value + if default is not None: + v = f"{v}={str(default)}" + if i > 0: + v = f"\n{' ' * indent}{v}" + return v + + +def _emit_args(indent, arguments): + return ",".join(_emit_arg(indent, i, arg) for i, arg in enumerate(arguments)) + + +def _emit_ret(ret): + return _emit_type(ret.type) + + +def _emit_rets(returns): + if len(returns) == 1: + return _emit_ret(returns[0]) + return f"Tuple[{', '.join(_emit_ret(r) for r in returns)}]" + + +def _emit_schema(mod, name, schema, arg_start=0, padding=4): + if mod is None: + qualified_name = name + else: + qualified_name = f"{mod}.{name}" + schema_str = ( + f"{qualified_name}" + f"({_emit_args(len(qualified_name) + 1 + padding, schema.arguments[arg_start:])}) " + f"-> {_emit_rets(schema.returns)}" + ) + return schema_str + + +def _get_tensor_ops(): + def is_tensor_method(schema): + if len(schema.arguments) == 0: + return False + self = schema.arguments[0] + if self.name != "self": + return False + if not self.type.isSubtypeOf(torch._C.TensorType.get()): + return False + return True + + methods = [] + # discover methods + for elem in dir(torch.Tensor): + if not _hidden(elem): + schemas = torch._C._jit_get_schemas_for_operator("aten::" + elem) + for schema in schemas: + if is_tensor_method(schema): + methods.append(_emit_schema("Tensor", elem, schema, arg_start=1)) + + return "Supported Tensor Methods", methods + + +def _get_nn_functional_ops(): + functions = [] + + # Iterate over torch.nn.functional + mod = torch.nn.functional + name = mod.__name__ + for elem in dir(torch.nn.functional): + attr = getattr(mod, elem) + if not inspect.isfunction(attr) or _hidden(elem[0]): + # Ignore non-functions and internal methods + continue + + attr_module = inspect.getmodule(attr) + if not attr_module: + raise RuntimeError(f"Module for {attr} not found") + + if "torch.nn.functional" not in attr_module.__name__: + # Ignore functions from outside torch.nn.functional + continue + + try: + # compile fn, get schema + scripted = torch.jit.script(attr) + scripted_schema = scripted.schema + functions.append(_emit_schema(name, elem, scripted_schema)) + except: # noqa: B001,E722 + # Skip interpolate / boolean dispatched things + pass + + # Iterate over modules that we know contain a lot of builtins + for mod in torch.jit._builtins._modules_containing_builtins: + name = mod.__name__ + for elem in dir(mod): + builtin = _find_builtin(getattr(mod, elem)) + if builtin is not None: + schemas = torch._C._jit_get_schemas_for_operator(builtin) + for schema in schemas: + # remove _tan but not __and__ + if not _hidden(elem): + functions.append(_emit_schema(name, elem, schema)) + return "Supported PyTorch Functions", functions + + +def _get_builtins_helper(): + builtins = [] + for fn, _builtin_name in torch.jit._builtins._builtin_ops: + mod = inspect.getmodule(fn) + + if not hasattr(fn, "__name__"): + # typing classes + continue + if not mod: + continue + if _hidden(fn.__name__) or _hidden(fn.__qualname__) or _hidden(mod.__name__): + # skip internal-only methods + continue + + if "torch._C" in mod.__name__: + continue + + builtins.append((fn, _builtin_name)) + + return builtins + + +def _is_math_fn(fn): + mod = inspect.getmodule(fn) + if not mod: + raise RuntimeError(f"Module for {fn} not found") + + return mod.__name__ == "math" + + +def _get_torchscript_builtins(): + functions = [] + builtins = filter(lambda fn: not _is_math_fn(fn[0]), _get_builtins_helper()) + builtins_list = list(builtins) + # Iterate over the specially added builtins + for fn, _builtin_name in builtins_list: + mod = inspect.getmodule(fn) + if not mod: + raise RuntimeError(f"Module for {fn} not found") + builtin = _find_builtin(fn) + if builtin is not None: + schemas = torch._C._jit_get_schemas_for_operator(builtin) + for schema in schemas: + functions.append(_emit_schema(mod.__name__, fn.__name__, schema)) + + return "TorchScript Builtin Functions", functions + + +def _get_math_builtins(): + functions = [] + builtins = filter(lambda fn: _is_math_fn(fn[0]), _get_builtins_helper()) + builtins_list = list(builtins) + # Iterate over the specially added builtins + for fn, _builtin_name in builtins_list: + mod = inspect.getmodule(fn) + if not mod: + raise RuntimeError(f"Module for {fn} not found") + builtin = _find_builtin(fn) + if builtin is not None: + schemas = torch._C._jit_get_schemas_for_operator(builtin) + for schema in schemas: + schema_str = _emit_schema(mod.__name__, fn.__name__, schema) + if "Tensor" in schema_str: + # Skip Tensor ops that have the same name as math functions + # (they will show up in the tensor methods section) + continue + functions.append(schema) + + return "``math`` Module", functions + + +def _get_global_builtins(): + # Taken from the 'globals' map in torch/csrc/jit/frontend/ir_emitter.cpp + supported_builtins = [ + "print", + "tuple", + "float", + "complex", + "int", + "bool", + "str", + "getattr", + "hasattr", + "isinstance", + "len", + "hex", + "oct", + "round", + "hash", + "min", + "max", + "abs", + "all", + "divmod", + "list", + "ord", + "chr", + "bin", + "range", + "zip", + "enumerate", + "sorted", + ] + + op_renames = { + "bool": "aten::Bool", + "int": "aten::Int", + "float": "aten::Float", + "complex": "aten::Complex", + "abs": "prim::abs", + "max": "prim::max", + "min": "prim::min", + "range": "fake::does_not_exist", + } + + schemaless_op_explanations = { + "print": "Print any value", + "tuple": "Lists cannot be converted to tuples with this method since their size is not statically known", + "getattr": "Attribute name must be a literal string", + "hasattr": "Attribute name must be a literal string", + "isinstance": "Result is static", + "zip": "Arguments must be iterable. See :ref:`Iterables ` for details.", + "enumerate": "Arguments must be iterable. See :ref:`Iterables ` for details.", + "range": "Can only be used as an iterator in a for loop", + } + + magic_methods = [ + ("complex", "__complex__"), + ("float", "__float__"), + ("int", "__int__"), + ("bool", "__bool__"), + ("str", "__str__"), + ("len", "__len__"), + ("hex", "__hex__"), + ("oct", "__oct__"), + ] + + magic_methods_rows = [] + for fn, magic_method in magic_methods: + magic_methods_rows.append(f'"{fn}", "``{magic_method}``"') + + schematized_ops = [] + schemaless_ops = [] + + for fn in supported_builtins: + op_name = f"aten::{fn}" + if fn in op_renames: + op_name = op_renames[fn] + schemas = torch._C._jit_get_schemas_for_operator(op_name) + for s in schemas: + schematized_ops.append(_emit_schema(None, fn, s, padding=0)) + if len(schemas) > 0: + schematized_ops.append("") + else: + table_row = ( + f'":external+python:py:obj:`{fn}`", "{schemaless_op_explanations[fn]}"' + ) + schemaless_ops.append(table_row) + + schematized_ops_str = "\n".join(schematized_ops) + schemaless_ops_str = "\n".join(schemaless_ops) + magic_methods_rows_str = "\n".join(magic_methods_rows) + schematized_ops_str = textwrap.indent(schematized_ops_str, "\t") + schemaless_ops_str = textwrap.indent(schemaless_ops_str, "\t") + magic_methods_rows_str = textwrap.indent(magic_methods_rows_str, "\t") + section = f""" +The functions in the following table are supported but do not have a static schema + +.. csv-table:: + :header: "Function", "Note" + +{schemaless_ops_str} + +The following functions will use the corresponding magic method on :any:`TorchScript classes` + +.. csv-table:: + :header: "Function", "Magic Method" + +{magic_methods_rows_str} + +These built-in functions use the schema + +.. rst-class:: codeblock-height-limiter + +:: + +{schematized_ops_str} + """ + + return "Python Built-in Functions", section + + +def _list_supported_ops(): + def emit_block(decls): + return "\n.. rst-class:: codeblock-height-limiter\n\n::\n\n{}\n".format( + "".join(f" {d}\n\n" for d in decls) + ) + + body = "" + op_gathering_fns = ( + _get_tensor_ops, + _get_nn_functional_ops, + _get_torchscript_builtins, + _get_global_builtins, + _get_math_builtins, + ) + for fn in op_gathering_fns: + header, items = fn() + link_target = header.replace("`", "").replace("-", "").lower().replace(" ", "-") + if isinstance(items, str): + section = f"{header}\n{'~' * len(header)}\n{items}\n" + else: + section = f"{header}\n{'~' * len(header)}\n{emit_block(items)}" + section = f".. _{link_target}:" + "\n\n" + section + body += section + + return body + + +__doc__ = _list_supported_ops() diff --git a/janus/lib/python3.10/site-packages/torch/jit/unsupported_tensor_ops.py b/janus/lib/python3.10/site-packages/torch/jit/unsupported_tensor_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..46b0a000bd618c8bb9ba0b9a345d8a061a69a1fc --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/jit/unsupported_tensor_ops.py @@ -0,0 +1,78 @@ +# mypy: allow-untyped-defs +from textwrap import dedent +from typing import Any, Dict + +import torch.jit + + +def execWrapper(code, glob, loc): + exec(code, glob, loc) + + +def _gen_unsupported_methods_properties(): + tensor_attrs = set(filter(lambda x: x[0] != "_", dir(torch.Tensor))) + tensor = torch.tensor([2]) + funcs_template = dedent( + """ + def func(x): + return x.{op}() + """ + ) + + deprecated_apis = { + "volatile", + "resize", + "reinforce", + "new", + "name", + "map2_", + "has_names", + "grad_fn", + "resize_as", + } + tensor_attrs = tensor_attrs - deprecated_apis + + properties = [] + methods = [] + sorted_tensor_attrs = sorted(tensor_attrs, key=lambda x: x.lower()) + for attr in sorted_tensor_attrs: + funcs_str = funcs_template.format(op=attr) + scope: Dict[str, Any] = {} + execWrapper(funcs_str, globals(), scope) + try: + cu = torch.jit.CompilationUnit(funcs_str) + except Exception as e: + if "nonexistent attribute" not in repr(e): + continue + attr_repr = repr(getattr(tensor, attr)) + if "bound method" in attr_repr or "built-in method" in attr_repr: + methods.append(attr) + else: + properties.append(attr) + + mapped_methods = ("\t* :meth:`~torch.Tensor." + x + r"`" for x in methods) + mapped_properties = ("\t* :attr:`~torch.Tensor." + x + r"`" for x in properties) + return "\n".join(mapped_methods), "\n".join(mapped_properties) + + +def _list_unsupported_tensor_ops(): + header = """\n\n +Unsupported Tensor Methods +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + """ + methods, properties = _gen_unsupported_methods_properties() + return ( + header + + "\n" + + methods + + """ + +Unsupported Tensor Properties +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + """ + + "\n" + + properties + ) + + +__doc__ = _list_unsupported_tensor_ops() diff --git a/janus/lib/python3.10/site-packages/torch/lib/libcaffe2_nvrtc.so b/janus/lib/python3.10/site-packages/torch/lib/libcaffe2_nvrtc.so new file mode 100644 index 0000000000000000000000000000000000000000..3519bfa004c29dfb79ac4fcbfaa47caedae9feda Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/lib/libcaffe2_nvrtc.so differ diff --git a/janus/lib/python3.10/site-packages/torch/lib/libshm.so b/janus/lib/python3.10/site-packages/torch/lib/libshm.so new file mode 100644 index 0000000000000000000000000000000000000000..b0af7c66061e84734143787864a4f59e2a9197a8 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/lib/libshm.so differ