diff --git a/infer_4_33_0/lib/python3.10/site-packages/fifty/utilities/models/4096_2.h5 b/infer_4_33_0/lib/python3.10/site-packages/fifty/utilities/models/4096_2.h5 new file mode 100644 index 0000000000000000000000000000000000000000..f6d2a7469a2b5d18d9fe7cf0351f94f7d591b70f --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/fifty/utilities/models/4096_2.h5 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e7f9ae009b40fbd2181f7788f963a693da5c64d443d879e752ee48bd82f5d48e +size 4815072 diff --git a/janus/lib/python3.10/site-packages/torch/_export/__init__.py b/janus/lib/python3.10/site-packages/torch/_export/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..91d893e05cb89c65a9bf2a07b5ed973d20f9a2c6 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/_export/__init__.py @@ -0,0 +1,317 @@ +# mypy: allow-untyped-defs +import copy +import dataclasses +import functools +import io +import json +import logging +import os +import re +import sys +import types +import warnings +import weakref +import zipfile +from collections import OrderedDict +from contextlib import contextmanager +from functools import lru_cache + +from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from unittest.mock import patch + +import torch +import torch.fx +import torch.utils._pytree as pytree + +from torch._dispatch.python import enable_python_dispatcher +from torch._utils_internal import log_export_usage +from torch.export._tree_utils import reorder_kwargs +from torch.export.graph_signature import ( + ArgumentSpec, + ConstantArgument, + ExportGraphSignature, + InputKind, + InputSpec, + OutputKind, + OutputSpec, + SymIntArgument, + TensorArgument, +) +from torch.fx import traceback as fx_traceback +from torch.fx._compatibility import compatibility +from torch.fx.experimental.proxy_tensor import make_fx +from torch._subclasses.fake_tensor import unset_fake_temporarily +from torch.fx.graph import _PyTreeCodeGen, _PyTreeInfo + +from .wrappers import _wrap_submodules + +log = logging.getLogger(__name__) + +@dataclasses.dataclass +class ExportDynamoConfig: + """ + Manage Export-specific configurations of Dynamo. + """ + allow_rnn: bool = True + + +# We only want to print this once to avoid flooding logs in workflows where capture_pre_autograd_graph +# is called multiple times. +@lru_cache +def capture_pre_autograd_graph_warning(): + from torch._inductor import config + + log.warning("+============================+") + log.warning("| !!! WARNING !!! |") + log.warning("+============================+") + log.warning("capture_pre_autograd_graph() is deprecated and doesn't provide any function guarantee moving forward.") + log.warning("Please switch to use torch.export.export_for_training instead.") + if config.is_fbcode(): + log.warning("Unless the unittest is in the blocklist, capture_pre_autograd_graph() will fallback to torch.export.export_for_training.") # noqa: B950 + + +@compatibility(is_backward_compatible=False) +def capture_pre_autograd_graph( + f: torch.nn.Module, + args: Tuple[Any], + kwargs: Optional[Dict[str, Any]] = None, + dynamic_shapes: Optional[Union[Dict[str, Any], Tuple[Any]]] = None, +) -> torch.nn.Module: + """ + A helper function that is intended to trace a module before any pre-autograd + decomposition is run. The produced module will be "non-functional" and + composed of aten operators. Later this API will be deleted in favor of more general + torch.export API. + + Args: + f: nn.Module to be traced + + args: example positional inputs. + + kwargs: optional example keyword inputs. + + dynamic_shapes: Should either be: + 1) a dict from argument names of ``f`` to their dynamic shape specifications, + 2) a tuple that specifies dynamic shape specifications for each input in original order. + If you are specifying dynamism on keyword args, you will need to pass them in the order that + is defined in the original function signature. + + The dynamic shape of a tensor argument can be specified as either + (1) a dict from dynamic dimension indices to :func:`Dim` types, where it is + not required to include static dimension indices in this dict, but when they are, + they should be mapped to None; or (2) a tuple / list of :func:`Dim` types or None, + where the :func:`Dim` types correspond to dynamic dimensions, and static dimensions + are denoted by None. Arguments that are dicts or tuples / lists of tensors are + recursively specified by using mappings or sequences of contained specifications. + + Returns: + An nn.Module containing the traced method. + + """ + from torch.export._trace import _extract_fake_inputs, DEFAULT_EXPORT_DYNAMO_CONFIG, _ignore_backend_decomps + from torch._utils_internal import capture_pre_autograd_graph_using_training_ir + from torch._export.non_strict_utils import make_constraints + from torch._subclasses.functional_tensor import FunctionalTensor + from torch.export._unlift import _create_stateful_graph_module + from torch.export.dynamic_shapes import _combine_args + + capture_pre_autograd_graph_warning() + + if sys.platform == "win32": + raise RuntimeError("capture_pre_autograd_graph not yet supported on Windows") + + assert isinstance(f, torch.nn.Module), "Expected an nn.Module instance." + + if kwargs is None: + kwargs = {} + + if capture_pre_autograd_graph_using_training_ir(): + @lru_cache + def print_export_warning(): + log.warning("Using torch.export.export_for_training(...,strict=True)") + print_export_warning() + module = torch.export.export_for_training(f, args, kwargs, dynamic_shapes=dynamic_shapes, strict=True).module() + else: + log_export_usage(event="export.private_api", flags={"capture_pre_autograd_graph"}) + + # Do not decompose dropout for exported models, because in eval mode the dropout + # op disappears from the graph, which makes it difficult to switch to train mode. + # See https://github.com/pytorch/pytorch/pull/115258#issuecomment-1900755832. + decomp_table = { + op: op.decompose + for op in FunctionalTensor.maybe_aliasing_or_mutating_ops + if op != torch.ops.aten.dropout.default + } + with torch._dynamo.config.patch(dataclasses.asdict(DEFAULT_EXPORT_DYNAMO_CONFIG)), _ignore_backend_decomps(): + m = torch._dynamo.export( + f, + dynamic_shapes=dynamic_shapes, + assume_static_by_default=True, + tracing_mode="symbolic", + decomposition_table=decomp_table, + pre_dispatch=True, + aten_graph=True, + _log_export_usage=False, + )( + *args, + **kwargs, + )[0] + + _, _, fake_mode = _extract_fake_inputs(m, args, kwargs) + + m.meta["inline_constraints"] = { + k: v + for k, v in fake_mode.shape_env.var_to_range.items() + if re.match(r"^[if]\d+$", str(k)) + } + + if isinstance(f, torch.nn.Module): + from torch.export._trace import _restore_state_dict + _restore_state_dict(f, m) + + flat_args, _ = pytree.tree_flatten((args, kwargs or {})) + combined_args = _combine_args(f, args, kwargs) + range_constraints = make_constraints( + fake_mode, + m, + combined_args, + dynamic_shapes, + 0, + ) + + module = _create_stateful_graph_module( + m, + range_constraints=range_constraints, + ) + + error_message = \ + """ + Calling train() or eval() is not supported for exported models. + Alternatively, you may override these methods to do custom user behavior as follows: + + def _my_train(self, mode: bool = True): + ... + + def _my_eval(self): + ... + + model.train = types.MethodType(_my_train, model) + model.eval = types.MethodType(_my_eval, model) + """ + + def _train(self, mode: bool = True): + raise NotImplementedError(error_message) + + def _eval(self, mode: bool = True): + raise NotImplementedError(error_message) + + module.train = types.MethodType(_train, module) # type: ignore[method-assign] + module.eval = types.MethodType(_eval, module) # type: ignore[method-assign] + + # Remove Proxy because they cannot be deepcopied or pickled. + if hasattr(module, "_buffers"): + torch._export.utils.remove_proxy_from_state_dict( + module._buffers, in_place=True + ) + return module + + +def aot_compile( + f: Callable, + args: Tuple[Any], + kwargs: Optional[Dict[str, Any]] = None, + *, + dynamic_shapes: Optional[Dict[str, Any]] = None, + options: Optional[Dict[str, Any]] = None, + remove_runtime_assertions: bool = False, + disable_constraint_solver: bool = False, + same_signature: bool = True, +) -> str: + """ + Note: this function is not stable yet + + Traces either an nn.Module's forward function or just a callable with PyTorch + operations inside, generates executable cpp code from the program, and returns + the path to the generated shared library + + Args: + f: the `nn.Module` or callable to trace. + + args: example positional inputs. + + kwargs: optional example keyword inputs. + + dynamic_shapes: Should either be: + 1) a dict from argument names of ``f`` to their dynamic shape specifications, + 2) a tuple that specifies dynamic shape specifications for each input in original order. + If you are specifying dynamism on keyword args, you will need to pass them in the order that + is defined in the original function signature. + + The dynamic shape of a tensor argument can be specified as either + (1) a dict from dynamic dimension indices to :func:`Dim` types, where it is + not required to include static dimension indices in this dict, but when they are, + they should be mapped to None; or (2) a tuple / list of :func:`Dim` types or None, + where the :func:`Dim` types correspond to dynamic dimensions, and static dimensions + are denoted by None. Arguments that are dicts or tuples / lists of tensors are + recursively specified by using mappings or sequences of contained specifications. + + options: A dictionary of options to control inductor + + disable_constraint_solver: Whether the dim constraint solver must be disabled. + + Returns: + Path to the generated shared library + """ + from torch.export._trace import _export_to_torch_ir + from torch._inductor.decomposition import select_decomp_table + from torch._inductor import config + + if config.is_predispatch: + gm = torch.export._trace._export(f, args, kwargs, dynamic_shapes, pre_dispatch=True).module() + else: + # We want to export to Torch IR here to utilize the pre_grad passes in + # inductor, which run on Torch IR. + gm = _export_to_torch_ir( + f, + args, + kwargs, + dynamic_shapes, + disable_constraint_solver=disable_constraint_solver, + same_signature=same_signature, + # Disabling this flag, because instead we can rely on the mapping + # dynamo_flat_name_to_original_fqn which is coming from Dynamo. + restore_fqn=False, + ) + + with torch.no_grad(): + so_path = torch._inductor.aot_compile(gm, args, kwargs, options=options) # type: ignore[arg-type] + + return so_path + +def aot_load(so_path: str, device: str) -> Callable: + """ + Loads a shared library generated by aot_compile and returns a callable + + Args: + so_path: Path to the shared library + + Returns: + A callable + """ + if device == "cpu": + runner = torch._C._aoti.AOTIModelContainerRunnerCpu(so_path, 1) # type: ignore[call-arg] + elif device == "cuda" or device.startswith("cuda:"): + runner = torch._C._aoti.AOTIModelContainerRunnerCuda(so_path, 1, device) # type: ignore[assignment, call-arg] + else: + raise RuntimeError("Unsupported device " + device) + + def optimized(*args, **kwargs): + call_spec = runner.get_call_spec() # type: ignore[attr-defined] + in_spec = pytree.treespec_loads(call_spec[0]) + out_spec = pytree.treespec_loads(call_spec[1]) + flat_inputs = pytree.tree_flatten((args, reorder_kwargs(kwargs, in_spec)))[0] + flat_inputs = [x for x in flat_inputs if isinstance(x, torch.Tensor)] + flat_outputs = runner.run(flat_inputs) # type: ignore[attr-defined] + return pytree.tree_unflatten(flat_outputs, out_spec) + + return optimized diff --git a/janus/lib/python3.10/site-packages/torch/_export/converter.py b/janus/lib/python3.10/site-packages/torch/_export/converter.py new file mode 100644 index 0000000000000000000000000000000000000000..b45d7849b29ae04ff1e77a812b0ccf86a90a4b0d --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/_export/converter.py @@ -0,0 +1,1584 @@ +# mypy: allow-untyped-defs +import builtins +import logging +import operator +import typing +import warnings +from contextlib import contextmanager +from typing import Any, Dict, List, Optional, Sequence, Set, Tuple, Union + +import torch +import torch.export._trace +from torch import _C +from torch._export.passes.replace_quantized_ops_with_standard_ops_pass import ( + replace_quantized_ops_with_standard_ops, +) +from torch.export.exported_program import ExportedProgram +from torch.export.graph_signature import ( + ConstantArgument, + CustomObjArgument, + InputKind, + InputSpec, + OutputKind, + OutputSpec, + TensorArgument, +) +from torch.fx import subgraph_rewriter + + +log = logging.getLogger(__name__) + + +def _get_param_count_list(method_graph, args_params): + param_count_list = [] + for input_, arg_params_ in zip(method_graph.inputs(), args_params): + if "PackedParams" in str(input_.type()): + in_vars, _ = torch.jit._flatten(arg_params_) + param_count_list.append(len(in_vars)) + else: + param_count_list.append(arg_params_ is not None) + + return param_count_list + + +def _trace_and_get_graph_from_model(model, args): + # A basic sanity check: make sure the state_dict keys are the same + # before and after running the model. Fail fast! + orig_state_dict_keys = torch.jit._unique_state_dict(model).keys() + + # Disable Autocast cache because it replaces kernel's weight and bias + # by (undesired) constants. + # No perf impact for when there are reused weights since https://github.com/pytorch/pytorch/pull/85665 + prev_autocast_cache_enabled = torch.is_autocast_cache_enabled() + torch.set_autocast_cache_enabled(False) + trace_graph, torch_out, inputs_states = torch.jit._get_trace_graph( + model, + args, + strict=False, + _force_outplace=False, + _return_inputs_states=True, + ) + torch.set_autocast_cache_enabled(prev_autocast_cache_enabled) + + if orig_state_dict_keys != torch.jit._unique_state_dict(model).keys(): + raise RuntimeError( + "state_dict changed after running the tracer; " + "something weird is happening in your model!" + ) + + return trace_graph, torch_out + + +def _create_jit_graph( + model: Union[torch.nn.Module, torch.jit.ScriptFunction], args: Sequence[Any] +) -> Tuple[torch.Graph, List["_C.IValue"], Any, Optional[torch.ScriptModule]]: + if isinstance(model, (torch.jit.ScriptFunction, torch.jit.ScriptModule)): + flattened_args = tuple(torch.jit._flatten(tuple(args))[0]) + torch_out = None + + if isinstance(model, torch.jit.ScriptModule): + try: + graph = model.forward.graph # type: ignore[attr-defined] + except AttributeError as e: + raise RuntimeError("'forward' method must be a script method") from e + _C._jit_pass_onnx_function_substitution(graph) + freezed_module = _C._freeze_module( + typing.cast(_C.ScriptModule, model._c), preserveParameters=True + ) + module, params = _C._jit_onnx_list_model_parameters(freezed_module) + method_graph = module._get_method("forward").graph + args_params = tuple(args) + tuple(params) + param_count_list = _get_param_count_list(method_graph, args_params) + in_vars, _ = torch.jit._flatten(args_params) + graph = _C._propagate_and_assign_input_shapes( + method_graph, tuple(in_vars), param_count_list, False, False + ) + return graph, params, torch_out, module + + # torch.jit.ScriptFunction + params = [] + graph = model.graph + _C._jit_pass_onnx_function_substitution(graph) + param_count_list = _get_param_count_list(graph, args) + graph = _C._propagate_and_assign_input_shapes( + graph, flattened_args, param_count_list, False, False + ) + return graph, params, torch_out, None + + graph, torch_out = _trace_and_get_graph_from_model(model, args) + _C._jit_pass_onnx_lint(graph) + state_dict = torch.jit._unique_state_dict(model) + params = list(state_dict.values()) + graph_inputs = list(graph.inputs()) + user_input_num = len(graph_inputs) - len(state_dict) + param_names = list(state_dict.keys()) + for i, inp in enumerate(graph_inputs): + if i >= user_input_num: + inp.setDebugName(param_names[i - user_input_num]) + _C._jit_pass_onnx_function_substitution(graph) + return graph, params, torch_out, None + + +def list_add(a, b): + return a + b + + +def list_append(container, element): + return container + [element] + + +def execute_subgraph_from_prim_loop( + subgraph, iter_idx, len_loop_local_arguments, *args, **kwargs +): + """ + subgraph: GraphModule from sub-block. + iter_idx: The index of interation. + len_loop_local_arguments: The number of loop local arguments in args. + """ + + # Loop local variables. TS graph create those as inputs because their values + # are updated inside the loop. + loop_local_args = args[:len_loop_local_arguments] + # Global variables that are not passed in as inputs to the loop sub-blocks + # but are directly used. Most of time, their values are not updated, but + # the only exception is when there are some operations that perform inplace + # updates. + global_args = args[len_loop_local_arguments:] + return subgraph(*global_args, iter_idx, *loop_local_args, **kwargs) + + +def inplace_optimize_sym_size_div(gm: torch.fx.GraphModule): + def pattern(im, dim, scale): + sym_size_int = torch.ops.aten.sym_size.int(im, dim) + scalar_tensor = torch.ops.aten.scalar_tensor(sym_size_int) + div_scalar_mode = torch.ops.aten.div.Scalar_mode( + scalar_tensor, scale, rounding_mode="trunc" + ) + int_tensor = torch.ops.aten.Int.Tensor(div_scalar_mode) + return int_tensor + + def replacement(im, dim, scale): + sym_size_int = torch.ops.aten.sym_size.int(im, dim) + return sym_size_int // scale + + replaced_patterns = subgraph_rewriter.replace_pattern(gm, pattern, replacement) + + +def is_valid_for_codegen(name): + if len(name) == 0: + raise RuntimeError("Empty argument name for codegen") + if name[0].isdigit(): + return False + return True + + +def normalize_name(name: str, prefix: str = "rename") -> str: + name = name.replace(".", "_") + if is_valid_for_codegen(name): + return name + return f"{prefix}_{name}" + + +def ir_name_to_func_name(name: str) -> str: + """prim::If -> convert_prim_If""" + name_list = name.split("::") + return "convert_" + "_".join(name_list) + + +def get_node_as_placeholder_or_get_attr(fx_graph, name, is_top_level_graph): + if is_top_level_graph: + return fx_graph.get_attr(name) + return fx_graph.placeholder(name) + + +_TORCH_DTYPE_TO_ENUM = { + torch.uint8: 0, + torch.int8: 1, + torch.int16: 2, + torch.int32: 3, + torch.int64: 4, + torch.float16: 5, + torch.float32: 6, + torch.float64: 7, + torch.complex32: 8, + torch.complex64: 9, + torch.complex128: 10, + torch.bool: 11, + torch.qint8: 12, + torch.quint8: 13, + torch.bfloat16: 15, +} + +_TORCH_ENUM_TO_DTYPE = {value: key for key, value in _TORCH_DTYPE_TO_ENUM.items()} + + +def get_dtype_as_int(tensor): + """ + prim::dtype has the signature "Tensor a) -> int", where it gets the dtype of + the tensor and returns the integer corresponding to this dtype based on the + enum in ScalarType.h + """ + dtype = tensor.dtype + if dtype not in _TORCH_DTYPE_TO_ENUM: + raise RuntimeError(f"Unsupported dtype {dtype}") + return _TORCH_DTYPE_TO_ENUM[dtype] + + +# Those operators will be automatically populated to a instance method +# of TS2FXGraphConverter with name convert__(). +# Please check __init__ for method population implementations. +kind_to_standard_operators = { + "prim::max": builtins.max, + "prim::min": builtins.min, + "prim::TupleIndex": operator.getitem, + "aten::__is__": operator.is_, + "aten::__isnot__": operator.is_not, + "aten::__not__": operator.not_, + "aten::__contains__": operator.contains, + "prim::dtype": get_dtype_as_int, + "aten::len": len, + # Mapping from specialized op to its symbolic counterpart. + # They currently do not have any other overrides. + "aten::numel": torch.ops.aten.sym_numel, + "aten::size": torch.ops.aten.sym_size, + "aten::storage_offset": torch.ops.aten.sym_storage_offset, + "aten::stride": torch.ops.aten.sym_stride, +} + + +def get_ir_value_parent_name_and_attr_name(node): + irv_parent_name, irv_name = node.input().debugName(), node.output().debugName() + attr_name = node.s("name") + return irv_name, irv_parent_name, attr_name + + +def construct_fqn(ir, ref_map, name_map): + name_list = [] + while ir in ref_map: + name_list.append(name_map[ir]) + ir = ref_map[ir] + return ".".join(reversed(name_list)) + + +def get_block_to_lifted_attrs(graph: torch._C.Graph) -> Dict[torch._C.Block, Set[str]]: + """ + Perform two passes to get a mapping of blocks to a set of FQNs of its lifted attributes. + When a graph has control flow, the graph will be divided into multiple blocks. We want to convert + each block to a graph which will be passed into torch.cond. A restriction for torch.cond is that model + parameters/buffers are expected to be lifted as inputs to the subgraphs. Before converting the model, + we will run this pass which will: + 1. Figure out which params/buffers are used within blocks through tracing the GetAttr calls. + 2. Process the graph bottom up to find the lifted attributes of each block by taking the union + of the attributes used in the current block, and the lifted attributes of all its child blocks. + + Returns: + A mapping of blocks to a set of FQNs of its lifted attributes. + """ + + # A map from a block to its expected to be lifted arguments. + blocks_to_lifted_attrs: Dict[torch._C.Block, Set[str]] = {} + + # Reference map stores the input (i.e., src) and output (i.e., dest) IR of a + # GetAttr node. By traversing this reference map, we can figure out the + # full IR aliasing pass and figure out the FQN of an attribute. + # E.g., %2 = GetAttr(linear)[%1] --> node_to_parent_map["%2"] = "%1" + node_to_parent_map: Dict[str, str] = {} + + # Used for reconstructing the FQN of an attribute based on the reference map. + # In nutshell, for each GetAttr call, GetAttr(input IR, attribute name) -> output IR + # This name map stores which attribute name is called for a src IR --> dest IR action. + # E.g., %2 = GetAttr(linear)[%1] --> node_to_attr_name["%2"] = "linear" + node_to_attr_name: Dict[str, str] = {} + + def _dfs_get_attr_dependency(entry): + """ + First DFS path to construct reference map and name map. + """ + for node in entry.nodes(): + if node.kind() == "prim::GetAttr": + ( + irv_name, + irv_parent_name, + attr_name, + ) = get_ir_value_parent_name_and_attr_name(node) + node_to_parent_map[irv_name] = irv_parent_name + node_to_attr_name[irv_name] = attr_name + for block in node.blocks(): + _dfs_get_attr_dependency(block) + + def _map_blocks_to_lifted_attrs(entry): + """ + Walk the graph in a bottom-up fashion to build the expected to be + lifted arguments for each block. + """ + arguments: Set[str] = set() + for node in entry.nodes(): + for block in node.blocks(): + # Recursively build. + arguments = arguments.union(_map_blocks_to_lifted_attrs(block)) + if node.kind() == "prim::GetAttr": + irv_name = node.output().debugName() + # Skip for intermediate GetAttr, which will anyway not result a FQN. + # E.g., node_to_parent_name: {"%3": "%2", "%2": "%1"} + # node_to_attr_name: {"%3": "weight", "%2": "linear", "%1": "self"} + # There is only one FQN %3-->%2-->%1: self.linear.weight + # %2-->%1 is not a FQN: self.linear + if irv_name not in set(node_to_parent_map.values()): + arguments.add( + construct_fqn(irv_name, node_to_parent_map, node_to_attr_name) + ) + if not isinstance(entry, torch._C.Graph): # Skip the top level. + blocks_to_lifted_attrs[entry] = arguments + return arguments + + _dfs_get_attr_dependency(graph) + _map_blocks_to_lifted_attrs(graph) + + return blocks_to_lifted_attrs + + +def get_attribute_fqn_from_ts_node( + name_to_attribute_fqn: Dict[str, str], node: torch._C.Node +) -> str: + def get_attr(name: str): + if name in name_to_attribute_fqn: + return name_to_attribute_fqn[name] + else: + raise ValueError(f"Attribute {name} not found") + + if node.kind() == "prim::SetAttr": + input_name = next(node.inputs()).debugName() + elif node.kind() == "prim::GetAttr": + input_name = node.input().debugName() + else: + raise RuntimeError( + f"Unexpected node kind when getting attribute fqn. node: {node} " + ) + + attr_name = node.s("name") + root_attr_name = get_attr(input_name) + attr_fqn = f"{root_attr_name}.{attr_name}" if root_attr_name else attr_name + + return attr_fqn + + +def get_op_overload(node: torch._C.Node): + schema_str = node.schema() + assert schema_str != "(no schema)", f"got empty schema for {node}" + schema: torch._C.FunctionSchema = torch._C.parse_schema(schema_str) + ns, op_name = str(schema.name).split("::") + override = schema.overload_name + + try: + op_overload_mod = getattr(torch.ops, ns) + op_overload_packet = getattr(op_overload_mod, op_name) + if override: + op_overload = getattr(op_overload_packet, override) + else: + op_overload = op_overload_packet.default + except Exception as e: + raise RuntimeError( + f"Unable to find operator {node.kind()} with schema {node.schema()}" + ) from e + + return op_overload + + +class TS2FXGraphConverter: + def __init__( + self, + ts_graph: Union[torch._C.Graph, torch._C.Block], + name_to_param: Dict[str, torch.Tensor], + name_to_buffer: Dict[str, torch.Tensor], + blocks_to_lifted_attrs: Dict[torch._C.Block, Set[str]], + name_to_non_tensor_attribute: Dict[str, Any], + name_to_constant: Dict[str, Any], + ): + self.ts_graph = ts_graph + self.name_to_param = name_to_param + self.name_to_buffer = name_to_buffer + + self.fx_graph: torch.fx.Graph = torch.fx.Graph() + self.input_specs: List[InputSpec] = [] + self.output_specs: List[OutputSpec] = [] + + self.name_to_node: Dict[ + str, Union[torch.fx.Node, List[torch.fx.Node], Dict[Any, torch.fx.Node]] + ] = {} + self.name_to_constant: Dict[str, Any] = name_to_constant + + # Mapping from torchscript node output name to attribute fully qualified name + self.name_to_attribute_fqn: Dict[str, str] = {} + + # Mapping from fully qualified name to real values or a fx graph node + # During convert, this represents the current value of a non-tensor attribute + # One use case is: + # def forward(self, x): + # c1 = self.count + # self.count += 1 + # c2 = self.count + # return x + c1 + c2 + self.name_to_non_tensor_attribute_node: Dict[str, Any] = {} + + # Mapping from fully qualified name to initial real values inputs + # We separate it from self.name_to_non_tensor_attribute_node since + # we need initial real value input when we construct fx.GraphModule + self.name_to_non_tensor_attribute: Dict[str, Any] = name_to_non_tensor_attribute + + self.subgraphs: Dict[str, torch.fx.GraphModule] = {} + + self.blocks_to_lifted_attrs = blocks_to_lifted_attrs + + # Populate methods for the standard operators. + for k in kind_to_standard_operators.keys(): + handler_func_name = ir_name_to_func_name(k) + # Create an indirect function call: + # convert__ --> lambda node: _convert_standard_operator(node) + setattr( + self, + handler_func_name, + lambda node: self._convert_standard_operators(node), + ) + + # This stores a list of return results that do not appear in the original TS + # graph's outputs. The reason we maintain this is because some operations in the sub-block + # might have inplace updates to the variable defined in the parent fx graph. After + # the execution of that sub-block, the variable defined in the parent fx graph also + # needs to be updated. + self.name_update_from_subblock_to_parent: Set[str] = set() + + def _is_get_attr_node(self, fqn): + return ( + fqn in self.name_to_buffer + or fqn in self.name_to_param + or ( + fqn in self.name_to_constant + and isinstance(self.name_to_constant[fqn], torch.ScriptObject) + ) + ) + + def _convert_block_to_subgraph(self, node: torch._C.Node, arguments: List[str]): + subgraph_nodes, subgraph_converters = [], [] + for block in node.blocks(): + subgraph_converter = TS2FXGraphConverter( + block, + self.name_to_param, + self.name_to_buffer, + self.blocks_to_lifted_attrs, + {}, + self.name_to_constant, + ) + subgraph_converter.name_to_attribute_fqn = self.name_to_attribute_fqn + + for block_arg in arguments: + normalized_block_arg_name = normalize_name(block_arg) + placeholder_node = subgraph_converter.fx_graph.placeholder( + normalized_block_arg_name + ) + subgraph_converter.name_to_node[block_arg] = placeholder_node + + subgraph = subgraph_converter.convert() + subgraph_name = self.add_subgraph(subgraph) + subgraph_nodes.append(self.fx_graph.get_attr(subgraph_name)) + subgraph_converters.append(subgraph_converter) + return subgraph_nodes, subgraph_converters + + def _identify_inputs_as_arguments(self, entry): + """ + Identify inputs from the innermost sub-block. This is needed + for nested sub-blocks when the input is hidden in the nested sub-block. + E.g., example IR of input is hidden in the nested sub-block. + Graph[x.1] + %1 = ... + Block[] + Block[x.1] + %2 = x.1 ... + """ + arguments: Set[str] = set() + for block in entry.blocks(): + for block_node in block.nodes(): + for block_node_in in block_node.inputs(): + if ( + block_node_in.debugName() in self.name_to_node + and block_node_in.debugName() not in self.name_to_attribute_fqn + ): + arguments.add(block_node_in.debugName()) + arguments = arguments.union( + self._identify_inputs_as_arguments(block_node) + ) + return arguments + + def is_top_level_graph(self): + return isinstance(self.ts_graph, torch._C.Graph) + + def add_subgraph(self, subgraph) -> str: + name = f"subgraph_{len(self.subgraphs)}" + self.subgraphs[name] = subgraph + return name + + def get_args_kwargs(self, node: torch._C.Node, schema): + args = [] + kwargs = {} + for input, schema_arg in zip(node.inputs(), schema.arguments): + if schema_arg.kwarg_only: + kwargs[schema_arg.name] = self.get_fx_value_by_ir_value(input) + else: + args.append(self.get_fx_value_by_ir_value(input)) + + return tuple(args), kwargs + + def get_fx_value_by_ir_value(self, value: torch._C.Value): + value_name = value.debugName() + + if value_name in self.name_to_node: + input_node = self.name_to_node[value_name] + return input_node + elif value_name in self.name_to_constant: + if isinstance(self.name_to_constant[value_name], torch.ScriptObject): + return self.fx_graph.get_attr(value_name) + return self.name_to_constant[value_name] + else: + raise ValueError(f"Input {value_name} not found") + + def get_fx_value_by_fqn(self, name): + if name in self.name_to_node: + fx_node = self.name_to_node[name] + elif name in self.name_to_constant: + fx_node = self.name_to_constant[name] + elif name in self.name_to_non_tensor_attribute_node: + fx_node = self.name_to_non_tensor_attribute_node[name] + elif name in self.name_to_non_tensor_attribute: + fx_node = self.name_to_non_tensor_attribute[name] + else: + raise ValueError(f"Attribute {name} not found") + return fx_node + + def convert(self) -> torch.fx.GraphModule: + self.convert_graph_inputs() + + for node in self.ts_graph.nodes(): + self.convert_node(node) + + self.convert_graph_outputs() + + # Pass parameter and buffer to the root for lookup. + gm = torch.fx.GraphModule( + { + **self.subgraphs, + **self.name_to_param, + **self.name_to_buffer, + **self.name_to_non_tensor_attribute, + **self.name_to_constant, + }, + self.fx_graph, + ) + + inplace_optimize_sym_size_div(gm) + + gm.graph.lint() + + return gm + + def convert_graph_inputs(self): + for graph_input in self.ts_graph.inputs(): + name = graph_input.debugName() + + if name in self.name_to_param: + normalized_name = normalize_name(name) + self.input_specs.append( + InputSpec( + InputKind.PARAMETER, + arg=TensorArgument(name=normalized_name), + target=name, + ) + ) + fx_node = get_node_as_placeholder_or_get_attr( + self.fx_graph, name, self.is_top_level_graph() + ) + elif name in self.name_to_buffer: + normalized_name = normalize_name(name) + self.input_specs.append( + InputSpec( + InputKind.BUFFER, + arg=TensorArgument(name=normalized_name), + target=name, + persistent=True, + ) + ) + fx_node = get_node_as_placeholder_or_get_attr( + self.fx_graph, name, self.is_top_level_graph() + ) + elif name in self.name_to_constant: + assert isinstance( + self.name_to_constant[name], torch.ScriptObject + ), "Input conversion only handles ScriptObject" + normalized_name = normalize_name(name) + self.input_specs.append( + InputSpec( + InputKind.CUSTOM_OBJ, + arg=CustomObjArgument( + name=normalized_name, class_fqn=normalized_name + ), + target=name, + persistent=False, + ) + ) + fx_node = get_node_as_placeholder_or_get_attr( + self.fx_graph, name, self.is_top_level_graph() + ) + elif isinstance(graph_input.type(), torch.ClassType): + # Directly skip inputs that are ScriptObject but not used in the graph. + continue + else: + normalized_name = normalize_name(name, prefix="input") + self.input_specs.append( + InputSpec( + InputKind.USER_INPUT, + arg=TensorArgument(name=normalized_name), + target=name, + ) + ) + fx_node = self.fx_graph.placeholder(normalized_name) + + self.name_to_node[name] = fx_node + + def convert_aten_Float(self, node: torch._C.Node): + def to_float_tensor(t): + return t.to(dtype=torch.float).item() + + inp_list = [ + self.get_fx_value_by_ir_value(inp) for inp in node.inputs() + ] # noqa: C416 + fx_node = self.fx_graph.call_function( + to_float_tensor, + tuple(inp_list), + ) + self.name_to_node[node.output().debugName()] = fx_node + + def convert_aten_tensor(self, node: torch._C.Node): + """aten::tensor creates a constant tensor ad-hoc --> GetAttr""" + args, kwargs = self.get_args_kwargs(node, torch.ops.aten.tensor.default._schema) + + for k in kwargs: + if k == "requires_grad": + kwargs[k] = bool(kwargs[k]) # 0 -> False, 1 -> True + + to_tensor = ( + torch.tensor + if all(isinstance(a, int) for a in args) + else torch._refs.tensor + ) + + def target(*args, **kwargs): + if "dtype" in kwargs and kwargs["dtype"] is not None: + kwargs["dtype"] = _TORCH_ENUM_TO_DTYPE[kwargs["dtype"]] + return to_tensor(*args, **kwargs) + + # def to_dynamic_tensor(*args, **kwargs): + # if "dtype" in kwargs and kwargs["dtype"] is not None: + # kwargs["dtype"] = _TORCH_ENUM_TO_DTYPE[kwargs["dtype"]] + # return torch._refs.tensor(*args, **kwargs) + + output_name = node.output().debugName() + fx_node = self.fx_graph.call_function(target, args, kwargs) + self.name_to_node[output_name] = fx_node + + def convert_aten_append(self, node: torch._C.Node): + # special handle python list append: "aten::append.t(t[](a!) self, t(c -> *) el) -> t[](a!)" + + # inplace append to the list!! This is kinda crazy, as we are inplace mutating the list + # This makes the converter "non-functional", and the result depends on the order of the nodes being converter + # In a sense, the converter now becomes an stateful interpreter + warnings.warn( + "Converting aten::append.t, which is a inplace mutation of the list. " + "This makes the converter non-functional: the result depends on the order of the append nodes being converter!" + ) + + args = tuple(self.get_fx_value_by_ir_value(inp) for inp in node.inputs()) + fx_node = self.fx_graph.call_function(list_append, args) + self.name_to_node[node.output().debugName()] = fx_node + + # inplace mutate arg[0], which is the python list + self.name_to_node[node.inputsAt(0).debugName()] = fx_node + + # Variables that need to be updated to parent module. + if not self.is_top_level_graph() and args[0].op == "placeholder": + self.name_update_from_subblock_to_parent.add(node.inputsAt(0).debugName()) + + def convert_prim_Constant(self, node: torch._C.Node): + name = node.output().debugName() + + value: Any = None + if node.hasAttribute("value"): + constant_kind = node.kindOf("value") + if constant_kind == "i": + value = node.i("value") + elif constant_kind == "f": + value = node.f("value") + elif constant_kind == "s": + value = node.s("value") + elif constant_kind == "t": + alias_name = ( + f"lifted_tensor_{name}" # Follow naming convention from EP tracing. + ) + fx_node = self.fx_graph.get_attr(alias_name) + self.name_to_node[name] = fx_node + name, value = alias_name, node.t("value") + elif constant_kind == "ival": + value = node.ival("value") + else: + raise ValueError(f"Unsupported constant type: {node.kindOf('value')}") + else: + value = None + + self.name_to_constant[name] = value + + def convert_prim_CallMethod(self, node: torch._C.Node): + inp_list = [ + self.get_fx_value_by_ir_value(inp) for inp in node.inputs() + ] # noqa: C416 + fx_node = self.fx_graph.call_method( + node.s("name"), + tuple(inp_list), + ) + self.name_to_node[node.output().debugName()] = fx_node + + def convert_prim_device(self, node: torch._C.Node): + input_type = node.input().type() + if input_type.isSubtypeOf(torch._C.TensorType.get()): + device = input_type.device() # type: ignore[attr-defined] + output_name = node.output().debugName() + self.name_to_constant[output_name] = device + else: + raise ValueError(f"Unsupported JitType ({input_type}) when get device") + + def convert_prim_GetAttr(self, node: torch._C.Node): + # Build fully qulified name + attr_fqn = get_attribute_fqn_from_ts_node(self.name_to_attribute_fqn, node) + output_name = node.output().debugName() + self.name_to_attribute_fqn[output_name] = attr_fqn + + if self.is_top_level_graph(): + if self._is_get_attr_node(attr_fqn): + # We insert a get_attr node due to two reasons. + # First, ts graph does not lift tensor constants as input nodes. So + # tensor constants may be ignored by in convert_graph_inputs(). + # Second, attr_fqn may have been written to via SetAttr. Two + # GetAttr may give different values. + self.name_to_node[output_name] = self.fx_graph.get_attr(attr_fqn) + else: + if attr_fqn not in self.name_to_non_tensor_attribute_node: + self.name_to_non_tensor_attribute_node[ + attr_fqn + ] = self.name_to_non_tensor_attribute[attr_fqn] + self.name_to_node[output_name] = self.name_to_non_tensor_attribute_node[ + attr_fqn + ] + else: + # Special support for if blocks which do not allow SetAttr TorchScript + # node and get_attr FX Graph Node. + if self._is_get_attr_node(attr_fqn): + self.name_to_node[output_name] = self.name_to_node[attr_fqn] + + def convert_prim_SetAttr(self, node: torch._C.Node): + attr_fqn = get_attribute_fqn_from_ts_node(self.name_to_attribute_fqn, node) + attr_value = tuple(node.inputs())[1] + ts_graph_tensor_input = self.get_fx_value_by_ir_value(attr_value) + if self._is_get_attr_node(attr_fqn): + fx_attr_node = self.fx_graph.get_attr(attr_fqn) + self.fx_graph.call_function( + torch.Tensor.copy_, (fx_attr_node, ts_graph_tensor_input) + ) + else: + self.name_to_non_tensor_attribute_node[attr_fqn] = ts_graph_tensor_input + + def convert_call_function_op(self, node: torch._C.Node): + target = get_op_overload(node) + + args, kwargs = self.get_args_kwargs(node, target._schema) + + fx_node = self.fx_graph.call_function(target, args, kwargs) + + # TODO: covnert sourceRange() into stack_trace + # fx_node.meta["stack_trace"] = node.sourceRange() + + if node.outputsSize() == 1: + output_name = node.output().debugName() + self.name_to_node[output_name] = fx_node + else: + for i, outp in enumerate(node.outputs()): + output_name = outp.debugName() + next_fx_node = self.fx_graph.call_function( + operator.getitem, (fx_node, i) + ) + self.name_to_node[output_name] = next_fx_node + + def convert_prim_TupleConstruct(self, node: torch._C.Node): + self._convert_prim_iterator(node) + + def convert_prim_ListConstruct(self, node: torch._C.Node): + self._convert_prim_iterator(node) + + def _convert_prim_iterator(self, node: torch._C.Node): + output_list = [] + for inp in node.inputs(): + output_list.append(self.get_fx_value_by_ir_value(inp)) + + output_name = node.output().debugName() + self.name_to_node[output_name] = output_list + + def convert_prim_DictConstruct(self, node: torch._C.Node): + output_dict = {} + k, v = None, None + for i, inp in enumerate(node.inputs()): + # We assume key value are stored in pair in the DictConstruct. + # The first element is the key and the following is the value. + if i % 2 == 0: + k = self.get_fx_value_by_ir_value(inp) + else: + v = self.get_fx_value_by_ir_value(inp) + assert ( + k is not None and v is not None + ), "DictConstruct has an empty key value pair." + output_dict[k] = v + k, v = None, None + + assert ( + k is None and v is None + ), "DictConstruct has an odd number of elements (violating our assumption)." + + output_name = node.output().debugName() + self.name_to_node[output_name] = output_dict + + def convert_prim_ListUnpack(self, node: torch._C.Node): + self._convert_prim_unpack_iterator(node) + + def convert_prim_TupleUnpack(self, node: torch._C.Node): + self._convert_prim_unpack_iterator(node) + + def _convert_prim_unpack_iterator(self, node: torch._C.Node): + # Single input and multiple outputs for unpacking. + for i, outp in enumerate(node.outputs()): + outp_name = outp.debugName() + inp = self.get_fx_value_by_ir_value(node.input()) + fx_node = self.fx_graph.call_function(operator.getitem, (inp, i)) + self.name_to_node[outp_name] = fx_node + + def convert_aten_Int(self, node: torch._C.Node): + # converts aten::Int as aten._to_copy + aten::_local_scalar_dense + target = torch.ops.aten._to_copy.default + args = tuple(self.get_fx_value_by_ir_value(input) for input in node.inputs()) + to_copy_node = self.fx_graph.call_function(target, args, {"dtype": torch.int32}) + + fx_node = self.fx_graph.call_function( + torch.ops.aten._local_scalar_dense.default, (to_copy_node,) + ) + + # TODO: covnert sourceRange() into stack_trace + # fx_node.meta["stack_trace"] = node.sourceRange() + + output_name = node.output().debugName() + self.name_to_node[output_name] = fx_node + + def convert_prim_NumToTensor(self, node: torch._C.Node): + # Converts prim::NumToTensor as aten.scalar_tensor. + # prim::NumToTensor IRs are currently triggered by: + # .size() https://github.com/pytorch/pytorch/blob/main/torch/csrc/jit/frontend/tracer.cpp#L950 + # .numel() https://github.com/pytorch/pytorch/blob/main/torch/csrc/jit/frontend/tracer.cpp#L971 + # For both of those APIs, torch.jit.trace implicitly sets the output tensor type + # to be LongTensor. + target = torch.ops.aten.scalar_tensor + args = tuple(self.get_fx_value_by_ir_value(input) for input in node.inputs()) + + fx_node = self.fx_graph.call_function(target, args, {"dtype": torch.long}) + output_name = node.output().debugName() + self.name_to_node[output_name] = fx_node + + def convert_prim_CreateObject(self, node: torch._C.Node): + output_name = node.output().debugName() + self.name_to_attribute_fqn[output_name] = "" + + def convert_aten__convolution(self, node: torch._C.Node): + # converts aten::_convolution as aten.convolution, since aten::_convolution + # doesn't have a meta function + target = torch.ops.aten.convolution.default + args, kwargs = self.get_args_kwargs(node, target._schema) + + fx_node = self.fx_graph.call_function(target, args, kwargs) + + output_name = node.output().debugName() + self.name_to_node[output_name] = fx_node + + def convert_aten_div(self, node: torch._C.Node): + target = get_op_overload(node) + schema = target._schema + + args, kwargs = self.get_args_kwargs(node, schema) + + # converts aten::div.Tensor_mode(x, tensor_constant) + # as aten.div.Scalar_mode(x, tensor_constant.item()) + if schema.overload_name == "Tensor_mode": + arg1_name = args[1].name + if arg1_name in self.name_to_constant and isinstance( + self.name_to_constant[arg1_name], torch.Tensor + ): + tensor_constant = self.name_to_constant[arg1_name] + if tensor_constant.numel() == 1: + updated_args = list(args) + updated_args[1] = self.name_to_constant[arg1_name].item() + + fx_node = self.fx_graph.call_function( + torch.ops.aten.div.Scalar_mode, + tuple(updated_args), + kwargs, + ) + + # TODO: covnert sourceRange() into stack_trace + # fx_node.meta["stack_trace"] = node.sourceRange() + + output_name = node.output().debugName() + self.name_to_node[output_name] = fx_node + return + + self.convert_call_function_op(node) + + def convert_aten___getitem__(self, node: torch._C.Node): + input_container, index = tuple( + self.get_fx_value_by_ir_value(input) for input in node.inputs() + ) + fx_node = self.fx_graph.call_function( + operator.getitem, (input_container, index) + ) + output_name = node.output().debugName() + self.name_to_node[output_name] = fx_node + + def convert_aten_to(self, node: torch._C.Node): + target = get_op_overload(node) + args, kwargs = self.get_args_kwargs(node, target._schema) + + # special handle aten.to.dtype and aten.to.prim_dtype followed by inplace_mutation_op + # coz aten.to + inplace_mutation_op pattern would trigger + # "cannot mutate tensors with frozen storage" functionalization error. + # To work around the issue, we override the copy to be True, so that the output + # is for sure not an alias of input + if target == torch.ops.aten.to.dtype or target == torch.ops.aten.to.prim_dtype: + user_nodes = [use.user for use in node.output().uses()] + user_targets = [ + get_op_overload(user_node) + for user_node in user_nodes + if user_node.schema() != "(no schema)" + ] + has_mutable_target = any( + target._schema.is_mutable for target in user_targets + ) + + if has_mutable_target: + assert len(args) >= 4 + new_args = list(args) + new_args[3] = True # copy, override to True + fx_node = self.fx_graph.call_function( + torch.ops.aten.to.dtype, tuple(new_args) + ) + # temp hack to work around the issue https://github.com/pytorch/pytorch/issues/131679 + # When this issue is fixed, the clone node would be no longer needed + clone_node = self.fx_graph.call_function( + torch.ops.aten.clone.default, (fx_node,) + ) + output_name = node.output().debugName() + self.name_to_node[output_name] = clone_node + return + + self.convert_call_function_op(node) + + def convert_aten_add(self, node: torch._C.Node): + if node.schema() == "(no schema)": + if isinstance(node.inputsAt(0).type(), torch.ListType) and isinstance( + node.inputsAt(1).type(), torch.ListType + ): + target = torch.ops.aten.add.t + else: + raise RuntimeError(f"unable to determind the target for {node}") + else: + target = get_op_overload(node) + + if target == torch.ops.aten.add.t: + # special handle python list/tuple add: "aten::add.t(t[] a, t[] b) -> t[]" for + # RuntimeError: aten::add() Expected a value of type 'List[t]' for argument 'a' but instead found type 'immutable_list'. + args, kwargs = self.get_args_kwargs(node, target._schema) + output_name = node.output().debugName() + self.name_to_node[output_name] = self.fx_graph.call_function(list_add, args) + else: + self.convert_call_function_op(node) + + def _check_prim_loop_support(self, node): + inputs = list(node.inputs()) + + # TODO: (1/N) stage. + if inputs[0].debugName() not in self.name_to_constant: + raise RuntimeError( + "prim::Loop currently cannot run with dynamic value of number of iterations." + ) + + # Make sure the condition is not updated in the subblock. + subblock = next(node.blocks()) + condition_output_name = next(subblock.outputs()).debugName() + for node in subblock.nodes(): + if ( + node.outputsSize() == 1 + and node.output().debugName() == condition_output_name + ): + raise RuntimeError( + "prim::Loop currently cannot run with dynamic value of condition." + ) + if node.outputsSize() >= 2: + for outp in node.outputs(): + if outp.debugName() == condition_output_name: + raise RuntimeError( + "prim::Loop currently cannot run with dynamic value of condition." + ) + + def convert_prim_Loop(self, node: torch._C.Node): + inputs = list(node.inputs()) + self._check_prim_loop_support(node) + + num_iterations = self.get_fx_value_by_ir_value(inputs[0]) + + # Find inputs. + loop_local_arguments = [inp.debugName() for inp in inputs[2:]] + + global_arguments = self._identify_inputs_as_arguments(node) + + # Lift parameters as inputs. + for block in node.blocks(): + global_arguments = global_arguments.union( + self.blocks_to_lifted_attrs[block] + ) + + global_arguments = list(global_arguments) + + subgraph_nodes, subgraph_converters = self._convert_block_to_subgraph( + node, global_arguments + ) + + assert len(subgraph_nodes) == 1 + subgraph_converter = subgraph_converters[0] + if not self.is_top_level_graph(): + self.name_update_from_subblock_to_parent = ( + self.name_update_from_subblock_to_parent.union( + subgraph_converter.name_update_from_subblock_to_parent + ) + ) + + fx_block_args = [ + self.get_fx_value_by_fqn(name) + for name in loop_local_arguments + global_arguments + ] + for iter_idx in range(num_iterations): + loop_node = self.fx_graph.call_function( + execute_subgraph_from_prim_loop, + # Check execute_node function for the expected arguments order. + ( + subgraph_nodes[0], + iter_idx, + len(loop_local_arguments), + *fx_block_args, + ), + {}, + ) + + # Update the value of loop local variables. + if node.outputsSize() >= 1: + for i, outp in enumerate(node.outputs()): + output_name = outp.debugName() + self.name_to_node[output_name] = self.fx_graph.call_function( + operator.getitem, + ( + loop_node, + i + 1, + ), # + 1 because the 0th element is the condition. + ) + fx_block_args[i] = self.name_to_node[output_name] + + # Update the value of global variables, whose values are modified inplace. + for i, name in enumerate( + subgraph_converter.name_update_from_subblock_to_parent + ): + self.name_to_node[name] = self.fx_graph.call_function( + operator.getitem, + ( + loop_node, + i + node.outputsSize() + 1, + ), # + 1 because the 0th element is the condition. + ) + global_argument_index = global_arguments.index(name) + fx_block_args[ + i + node.outputsSize() + global_argument_index + ] = self.name_to_node[name] + + def _check_set_attr_in_if_block(self, if_node: torch._C.Node): + for block in if_node.blocks(): + for node in block.nodes(): + if node.kind() == "prim::SetAttr": + raise RuntimeError( + "During converting prim::If to torch.cond, found prim::SetAttr op" + " which is not supported yet. Please file an issue if you come " + "across this error." + ) + + def convert_prim_If(self, node: torch._C.Node): + self._check_set_attr_in_if_block(node) + + inputs = list(node.inputs()) + assert len(inputs) == 1 + predicate = self.get_fx_value_by_ir_value(inputs[0]) + + # Find inputs. + arguments = self._identify_inputs_as_arguments(node) + + # Lift parameters as inputs. + for block in node.blocks(): + arguments = arguments.union(self.blocks_to_lifted_attrs[block]) + + arguments = list(arguments) + subgraph_nodes, _ = self._convert_block_to_subgraph(node, arguments) + + assert len(subgraph_nodes) == 2 + + fx_block_args = [self.get_fx_value_by_fqn(name) for name in arguments] + + args = ( + predicate, + subgraph_nodes[0], + subgraph_nodes[1], + tuple(fx_block_args), + ) + + cond_node = self.fx_graph.call_function(torch.cond, args, {}) + + # prim::If can also have zero output. + if node.outputsSize() == 1: + output_name = node.output().debugName() + self.name_to_node[output_name] = cond_node + elif node.outputsSize() > 1: + for i, output in enumerate(node.outputs()): + output_name = output.debugName() + getitem = self.fx_graph.call_function(operator.getitem, (cond_node, i)) + self.name_to_node[output_name] = getitem + + def convert_aten_Bool(self, node: torch._C.Node): + self._convert_as_noop(node) + + def convert_prim_Enter(self, node: torch._C.Node): + # export generally treats prim::Enter as noop + # The only context manager export supports is aten::enable_grad. + # Unfortunately, TorchScript does not support aten::enable_grad yet. + # TODO: support aten::enable_grad in both TorchScript and Converter. + return + + def convert_prim_Exit(self, node: torch._C.Node): + # export treats prim::Exit as noop + return + + def _convert_as_noop(self, node: torch._C.Node): + # Converts the node as a no-op by mapping its output node as arg[0] + + target = get_op_overload(node) + schema = target._schema + + args, kwargs = self.get_args_kwargs(node, schema) + + output_name = node.output().debugName() + self.name_to_node[output_name] = args[0] + + def convert_profiler__record_function_exit(self, node: torch._C.Node): + # _record_function_exit has side effect so we keep it in fx.graph + # currently, _record_function_enter_new and _record_function_exit are + # discarded during `retrace_as_exported_program`. + target = torch.ops.profiler._record_function_exit + args = tuple(self.get_fx_value_by_ir_value(input) for input in node.inputs()) + self.fx_graph.call_function(target, args) + + def convert_prim_tolist(self, node: torch._C.Node): + # prim::tolist cannot be supported by `_convert_standard_operators` + # since it requires call_method instead of call_function. + target = "tolist" + args = (self.get_fx_value_by_ir_value(next(node.inputs())),) + fx_node = self.fx_graph.call_method(target, args) + output_name = node.output().debugName() + self.name_to_node[output_name] = fx_node + + def convert_prim_Uninitialized(self, node: torch._C.Node): + # `prim::Uninitialized` is inserted by the compiler when it can prove + # the value will never be used. It can be introduced by exceptions, + # breaks, continues, and returns. + # So we add a dummy constant to the graph. + output_name = node.output().debugName() + self.name_to_constant[output_name] = torch.Tensor() + + def _convert_standard_operators(self, node: torch._C.Node): + target = kind_to_standard_operators[node.kind()] + args = tuple(self.get_fx_value_by_ir_value(input) for input in node.inputs()) + fx_node = self.fx_graph.call_function(target, args) + output_name = node.output().debugName() + self.name_to_node[output_name] = fx_node + + def convert_node(self, node: torch._C.Node): + node_kind = node.kind() + + # Get handler based on namespace and operator name. + # Provide a default node handler as well in case we don't find + # matching converter for that. + handler_func_name = ir_name_to_func_name(node_kind) + handler_func = getattr(self, handler_func_name, self.convert_call_function_op) + + # str calls print function implemented in CPP. To avoid repeating + # the entire logic here, we simply keep first line from node string (getting rid + # of sub-blocks IR prints). + node_str = "".join(str(node).split("\n")[:1]) + log.debug("[%s] converts [%s]", handler_func.__name__, node_str) + try: + handler_func(node) + except Exception as e: + raise RuntimeError(f"TS2EPConverter failed for node {node_kind}") from e + + def convert_graph_outputs(self): + args = [] + outp_name_list = [outp.debugName() for outp in self.ts_graph.outputs()] + list( + self.name_update_from_subblock_to_parent + ) + for output_name in outp_name_list: + if output_name in self.name_to_node: + fx_node = self.name_to_node[output_name] + # TODO: Revisit this later after HigherOrderOp design changes. + # Currently, we cannot directly return input as output. + if ( + not self.is_top_level_graph() + and isinstance(fx_node, torch.fx.Node) + and fx_node.op == "placeholder" + ): + fx_node = self.fx_graph.call_function(torch.clone, (fx_node,)) + args.append(fx_node) + self.output_specs.append( + OutputSpec( + OutputKind.USER_OUTPUT, + arg=TensorArgument(name=output_name), + target=output_name, + ) + ) + elif output_name in self.name_to_constant: + args.append(self.name_to_constant[output_name]) + self.output_specs.append( + OutputSpec( + OutputKind.USER_OUTPUT, + arg=ConstantArgument( + name=output_name, value=self.name_to_constant[output_name] + ), + target=output_name, + ) + ) + else: + raise ValueError(f"Output {output_name} not found") + + if len(args) == 0: + # Sub-block of prim::If can have zero output. + self.fx_graph.output([]) + elif len(args) == 1: + self.fx_graph.output( + args[0] + ) # Get rid of an extra list wrapped around final output. + elif len(args) > 1: + self.fx_graph.output( + args + ) # For prim::Loop and prim::If with multiple outputs. + else: + # Sub-block of prim::Loop can have multiple outputs. + self.fx_graph.output(args) + + +class ExplainTS2FXGraphConverter(TS2FXGraphConverter): + """ + Run TS2FXGraphConverter in an explain mode. It collects all failed operators conversions + and provide that information to users. In order to collect all failed conversions, it + also mocks some internal attributes (e.g., name_to_node). + """ + + class _DictMock(dict): + def __init__(self, dict_data, mock_value): + super().__init__(dict_data) + self.mock_value = mock_value + + def __getitem__(self, key): + # If the original dictionary has the key, return its value. + # Otherwise, return the mock value. + if not super().__contains__(key): + return self.mock_value + return super().__getitem__(key) + + def __contains__(self, key): + return True + + def __init__( + self, + ts_graph: Union[torch._C.Graph, torch._C.Block], + name_to_param: Dict[str, torch.Tensor], + name_to_buffer: Dict[str, torch.Tensor], + blocks_to_lifted_attrs: Dict[torch._C.Block, Set[str]], + name_to_non_tensor_attribute: Dict[str, Any], + name_to_constant: Dict[str, Any], + ): + super().__init__( + ts_graph, + name_to_param, + name_to_buffer, + blocks_to_lifted_attrs, + name_to_non_tensor_attribute, + name_to_constant, + ) + + # Data to keep track of unsupported nodes. + self.unsupported_node_list: List[torch._C.Node] = [] + + # Add mock to needed attributes. + self.name_to_node = ExplainTS2FXGraphConverter._DictMock( + self.name_to_node, + # Dummy node. + torch.fx.Node( + None, # type: ignore[arg-type] + "mock", + "call_function", + lambda: None, + (), + {}, + ), + ) + + def explain(self): + self.convert_graph_inputs() + for node in self.ts_graph.nodes(): + self.convert_node(node) + self.convert_graph_outputs() + + def convert_node(self, node): + try: + super().convert_node(node) + except Exception as e: + self.unsupported_node_list.append(node) + + +@contextmanager +def disable_logging(log): + disabled = log.disabled + log.disabled = True + try: + yield + finally: + log.disabled = disabled + + +class TS2EPConverter: + # TorchScript model to ExportedProgram converter + def __init__( + self, + ts_model: Union[torch.jit.ScriptModule, torch.jit.ScriptFunction], + sample_args: Tuple[Any, ...], + sample_kwargs: Optional[Dict[str, Any]] = None, + ): + self.ts_model = ts_model + self.ts_graph, self.params, _, _ = _create_jit_graph(ts_model, sample_args) + + self.sample_args = sample_args + self.sample_kwargs = sample_kwargs + + self.name_to_param: Dict[str, torch.Tensor] = {} + self.name_to_buffer: Dict[str, torch.Tensor] = {} + param_list = ( + list(self.ts_model.parameters()) + if not isinstance(self.ts_model, torch._C.ScriptFunction) + else [] + ) + if not isinstance(self.ts_model, torch._C.ScriptFunction): + for k, tensor in self.ts_model.state_dict().items(): # type: ignore[union-attr] + # Check if tensor belongs to any parameter. + if any( + (tensor == param).all() + for param in param_list + if tensor.shape == param.shape + ): + self.name_to_param[k] = tensor + else: + self.name_to_buffer[k] = tensor + + self.name_to_non_tensor_attributes: Dict[str, Any] = {} + self.name_to_constant: Dict[str, Any] = {} + + self.lift_get_attr() + + def convert(self) -> ExportedProgram: + log.info( + """ +TS2EPConverter logging starts from here. + +INFO: (TORCH_LOGS="export" ) + * Log TorchScript IR. + +DEBUG: (TORCH_LOGS="+export" ), additionally + * Log conversion IR by IR in a format of [] converts []. + """ + ) + log.info("TorchScript graph\n\n%s\n", self.ts_graph) + + blocks_to_lifted_attrs = get_block_to_lifted_attrs(self.ts_graph) + + graph_converter = TS2FXGraphConverter( + self.ts_graph, + self.name_to_param, + self.name_to_buffer, + blocks_to_lifted_attrs, + self.name_to_non_tensor_attributes, + self.name_to_constant, + ) + gm = graph_converter.convert() + + # Post-proccessing step to deal with quantized operators. + replace_quantized_ops_with_standard_ops(gm) + log.info("GraphModule: %s", gm.print_readable(print_output=False)) + + ep = self.retrace_as_exported_program( + gm, + graph_converter.name_to_constant, + ) + log.info("%s", ep) + + # Post-processing step to ensure ExportedProgram has the same state_dict as + # the original TorchScript model. Throw warnings for additionally populated + # state_dict entries. + if not isinstance(self.ts_model, torch._C.ScriptFunction): + for k, tensor in self.ts_model.state_dict().items(): # type: ignore[union-attr] + if k not in ep.state_dict: + warnings.warn( + f"Manually populate {k} into state_dict ExportedProgram, but it is never used by the ExportedProgram." + ) + ep.state_dict[k] = tensor + + return ep + + @disable_logging(log) + def explain(self, print_output=True): + blocks_to_lifted_attrs = get_block_to_lifted_attrs(self.ts_graph) + + graph_converter = ExplainTS2FXGraphConverter( + self.ts_graph, + self.name_to_param, + self.name_to_buffer, + blocks_to_lifted_attrs, + self.name_to_non_tensor_attributes, + self.name_to_constant, + ) + graph_converter.explain() + if len(graph_converter.unsupported_node_list) > 0: + explain_str = "Unsupported nodes are found in the following list:" + for i, n in enumerate(graph_converter.unsupported_node_list): + node_str = "".join(str(n).split("\n")[:1]) + explain_str += f"\n\n {i}. {n.kind()} [{node_str}]" + else: + explain_str = "Success!" + if print_output: + print(explain_str) + return explain_str + + def retrace_as_exported_program( + self, + gm: torch.fx.GraphModule, + name_to_constant: Dict[str, Any], + ): + # TODO: adjust input orders to match GraphSignature convention + ep = torch.export._trace._export( + gm, + self.sample_args, + strict=False, + pre_dispatch=True, + ) + + # Post-processing to make sure the ExportedProgram states are correct. + # Because during conversion, we set tensor constants as GetAttr, + # retracing cannot recognize them as tensor constants but instead + # treat them as buffers. We need to set them again here. + ep._constants.update( + { + k: v + for k, v in name_to_constant.items() + if isinstance(v, (torch.Tensor, torch.ScriptObject)) + } + ) + for k in name_to_constant: + ep.state_dict.pop(k, None) + + for i, spec in enumerate(ep.graph_signature.input_specs): + # Mark as constant tensors for erroneously traced buffers. + if spec.kind == InputKind.BUFFER and spec.target in name_to_constant: + assert isinstance( + name_to_constant[spec.target], torch.Tensor + ), f"{type(name_to_constant[spec.target])} has been erroneously marked as buffer" + spec.kind = InputKind.CONSTANT_TENSOR + ep.verifier().check(ep) + + return ep + + def lift_get_attr(self): + # This function lifts multiple data types. + + # 1. Tensor constants attributes (e.g., self.data = torch.tensor([2,3])) + # to buffers. Currently, when there are tensor constants, export + # would error and ask users to register tensor constants as buffers. + # Since it is hard to manually do so for TorchScript models + # (e.g., source code is missing), this function automatically + # lifts tensor constants to be buffers. + + # 2. ScriptObbject to constant. It will then be converted to getattr in + # in the fx graph. + # + # This function should happen in TS2EPConverter instead of + # TS2FXGraphConverter since it gets attributes from self.ts_model + # which is not accessable in TS2FXGraphConverter. It is similar to where + # we collect self.name_to_param and self.name_to_buffer. + name_to_attribute_fqn: Dict[str, str] = {} + + def get_attr(fqn: str): + name = fqn.split(".") + v = self.ts_model + for n in name: + v = getattr(v, n) + return v + + def get_fqn(node: torch._C.Node): + attr_name = node.s("name") + input_name = node.input().debugName() + root_attr_name = name_to_attribute_fqn[input_name] + attr_fqn = f"{root_attr_name}.{attr_name}" if root_attr_name else attr_name + return attr_fqn + + def _dfs_get_attr(block): + for node in block.nodes(): + if node.kind() == "prim::CreateObject": + output_name = node.output().debugName() + name_to_attribute_fqn[output_name] = "" + + if node.kind() == "prim::GetAttr": + attr_fqn = get_fqn(node) + value = get_attr(attr_fqn) + output_name = node.output().debugName() + name_to_attribute_fqn[output_name] = attr_fqn + if isinstance(value, torch.Tensor): + if attr_fqn not in self.name_to_buffer: + # Lift tensor constants to be a buffer + self.name_to_buffer[attr_fqn] = value + elif isinstance(value, torch.ScriptObject): + if attr_fqn not in self.name_to_constant: + self.name_to_constant[attr_fqn] = value + else: + self.name_to_non_tensor_attributes[attr_fqn] = value + + for subblock in node.blocks(): + _dfs_get_attr(subblock) + + _dfs_get_attr(self.ts_graph) diff --git a/janus/lib/python3.10/site-packages/torch/_export/db/__init__.py b/janus/lib/python3.10/site-packages/torch/_export/db/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..10a55772ab58b21573a6eba0356ddd3080164ac7 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/_export/db/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. diff --git a/janus/lib/python3.10/site-packages/torch/_export/db/case.py b/janus/lib/python3.10/site-packages/torch/_export/db/case.py new file mode 100644 index 0000000000000000000000000000000000000000..b228f6c2c33773b07e5cb7e82abc1adcad6422d0 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/_export/db/case.py @@ -0,0 +1,174 @@ +# mypy: allow-untyped-defs +import inspect +import re +import string +from dataclasses import dataclass, field +from enum import Enum +from typing import Any, Dict, List, Optional, Set, Tuple +from types import ModuleType + +import torch + +_TAGS: Dict[str, Dict[str, Any]] = { + "torch": { + "cond": {}, + "dynamic-shape": {}, + "escape-hatch": {}, + "map": {}, + "dynamic-value": {}, + "operator": {}, + "mutation": {}, + }, + "python": { + "assert": {}, + "builtin": {}, + "closure": {}, + "context-manager": {}, + "control-flow": {}, + "data-structure": {}, + "standard-library": {}, + "object-model": {}, + }, +} + + +class SupportLevel(Enum): + """ + Indicates at what stage the feature + used in the example is handled in export. + """ + + SUPPORTED = 1 + NOT_SUPPORTED_YET = 0 + + +ArgsType = Tuple[Any, ...] + + +def check_inputs_type(args, kwargs): + if not isinstance(args, tuple): + raise ValueError( + f"Expecting args type to be a tuple, got: {type(args)}" + ) + if not isinstance(kwargs, dict): + raise ValueError( + f"Expecting kwargs type to be a dict, got: {type(kwargs)}" + ) + for key in kwargs: + if not isinstance(key, str): + raise ValueError( + f"Expecting kwargs keys to be a string, got: {type(key)}" + ) + +def _validate_tag(tag: str): + parts = tag.split(".") + t = _TAGS + for part in parts: + assert set(part) <= set( + string.ascii_lowercase + "-" + ), f"Tag contains invalid characters: {part}" + if part in t: + t = t[part] + else: + raise ValueError(f"Tag {tag} is not found in registered tags.") + + +@dataclass(frozen=True) +class ExportCase: + example_args: ArgsType + description: str # A description of the use case. + model: torch.nn.Module + name: str + example_kwargs: Dict[str, Any] = field(default_factory=dict) + extra_args: Optional[ArgsType] = None # For testing graph generalization. + # Tags associated with the use case. (e.g dynamic-shape, escape-hatch) + tags: Set[str] = field(default_factory=set) + support_level: SupportLevel = SupportLevel.SUPPORTED + dynamic_shapes: Optional[Dict[str, Any]] = None + + def __post_init__(self): + check_inputs_type(self.example_args, self.example_kwargs) + if self.extra_args is not None: + check_inputs_type(self.extra_args, {}) + + for tag in self.tags: + _validate_tag(tag) + + if not isinstance(self.description, str) or len(self.description) == 0: + raise ValueError(f'Invalid description: "{self.description}"') + + +_EXAMPLE_CASES: Dict[str, ExportCase] = {} +_MODULES: Set[ModuleType] = set() +_EXAMPLE_CONFLICT_CASES: Dict[str, List[ExportCase]] = {} +_EXAMPLE_REWRITE_CASES: Dict[str, List[ExportCase]] = {} + + +def register_db_case(case: ExportCase) -> None: + """ + Registers a user provided ExportCase into example bank. + """ + if case.name in _EXAMPLE_CASES: + if case.name not in _EXAMPLE_CONFLICT_CASES: + _EXAMPLE_CONFLICT_CASES[case.name] = [_EXAMPLE_CASES[case.name]] + _EXAMPLE_CONFLICT_CASES[case.name].append(case) + return + + _EXAMPLE_CASES[case.name] = case + + +def to_snake_case(name): + name = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name) + return re.sub("([a-z0-9])([A-Z])", r"\1_\2", name).lower() + + +def _make_export_case(m, name, configs): + if not isinstance(m, torch.nn.Module): + raise TypeError("Export case class should be a torch.nn.Module.") + + if "description" not in configs: + # Fallback to docstring if description is missing. + assert ( + m.__doc__ is not None + ), f"Could not find description or docstring for export case: {m}" + configs = {**configs, "description": m.__doc__} + return ExportCase(**{**configs, "model": m, "name": name}) + + +def export_case(**kwargs): + """ + Decorator for registering a user provided case into example bank. + """ + + def wrapper(m): + configs = kwargs + module = inspect.getmodule(m) + if module in _MODULES: + raise RuntimeError("export_case should only be used once per example file.") + + assert module is not None + _MODULES.add(module) + module_name = module.__name__.split(".")[-1] + case = _make_export_case(m, module_name, configs) + register_db_case(case) + return case + + return wrapper + + +def export_rewrite_case(**kwargs): + def wrapper(m): + configs = kwargs + + parent = configs.pop("parent") + assert isinstance(parent, ExportCase) + key = parent.name + if key not in _EXAMPLE_REWRITE_CASES: + _EXAMPLE_REWRITE_CASES[key] = [] + + configs["example_args"] = parent.example_args + case = _make_export_case(m, to_snake_case(m.__name__), configs) + _EXAMPLE_REWRITE_CASES[key].append(case) + return case + + return wrapper diff --git a/janus/lib/python3.10/site-packages/torch/_export/db/examples/class_method.py b/janus/lib/python3.10/site-packages/torch/_export/db/examples/class_method.py new file mode 100644 index 0000000000000000000000000000000000000000..f701f54d4f4ea1cb5816292cd60bb4df3d03c5e8 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/_export/db/examples/class_method.py @@ -0,0 +1,22 @@ +# mypy: allow-untyped-defs +import torch + +class ClassMethod(torch.nn.Module): + """ + Class methods are inlined during tracing. + """ + + @classmethod + def method(cls, x): + return x + 1 + + def __init__(self) -> None: + super().__init__() + self.linear = torch.nn.Linear(4, 2) + + def forward(self, x): + x = self.linear(x) + return self.method(x) * self.__class__.method(x) * type(self).method(x) + +example_args = (torch.randn(3, 4),) +model = ClassMethod() diff --git a/janus/lib/python3.10/site-packages/torch/_export/db/examples/dictionary.py b/janus/lib/python3.10/site-packages/torch/_export/db/examples/dictionary.py new file mode 100644 index 0000000000000000000000000000000000000000..49e688bc0ac1f09567e3b877aaca29a1d02b4121 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/_export/db/examples/dictionary.py @@ -0,0 +1,17 @@ +# mypy: allow-untyped-defs +import torch + +class Dictionary(torch.nn.Module): + """ + Dictionary structures are inlined and flattened along tracing. + """ + + def forward(self, x, y): + elements = {} + elements["x2"] = x * x + y = y * elements["x2"] + return {"y": y} + +example_args = (torch.randn(3, 2), torch.tensor(4)) +tags = {"python.data-structure"} +model = Dictionary() diff --git a/janus/lib/python3.10/site-packages/torch/_export/db/examples/model_attr_mutation.py b/janus/lib/python3.10/site-packages/torch/_export/db/examples/model_attr_mutation.py new file mode 100644 index 0000000000000000000000000000000000000000..4aa623c7dc39efd94fecb8eb32caac3f7420f05d --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/_export/db/examples/model_attr_mutation.py @@ -0,0 +1,26 @@ +# mypy: allow-untyped-defs +import torch +from torch._export.db.case import SupportLevel + + +class ModelAttrMutation(torch.nn.Module): + """ + Attribute mutation is not supported. + """ + + def __init__(self) -> None: + super().__init__() + self.attr_list = [torch.randn(3, 2), torch.randn(3, 2)] + + def recreate_list(self): + return [torch.zeros(3, 2), torch.zeros(3, 2)] + + def forward(self, x): + self.attr_list = self.recreate_list() + return x.sum() + self.attr_list[0].sum() + + +example_args = (torch.randn(3, 2),) +tags = {"python.object-model"} +support_level = SupportLevel.NOT_SUPPORTED_YET +model = ModelAttrMutation() diff --git a/janus/lib/python3.10/site-packages/torch/_export/db/logging.py b/janus/lib/python3.10/site-packages/torch/_export/db/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..2078113fef157e38a465c78156c3b22ff4c235c7 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/_export/db/logging.py @@ -0,0 +1,47 @@ +# mypy: allow-untyped-defs + + +def exportdb_error_message(case_name: str): + from .examples import all_examples + from torch._utils_internal import log_export_usage + + ALL_EXAMPLES = all_examples() + # Detect whether case_name is really registered in exportdb. + if case_name in ALL_EXAMPLES: + url_case_name = case_name.replace("_", "-") + return f"See {case_name} in exportdb for unsupported case. \ + https://pytorch.org/docs/main/generated/exportdb/index.html#{url_case_name}" + else: + log_export_usage( + event="export.error.casenotregistered", + message=case_name, + ) + return f"{case_name} is unsupported." + + +def get_class_if_classified_error(e): + """ + Returns a string case name if the export error e is classified. + Returns None otherwise. + """ + + from torch._dynamo.exc import TorchRuntimeError, Unsupported, UserError + + ALWAYS_CLASSIFIED = "always_classified" + DEFAULT_CLASS_SIGIL = "case_name" + + # add error types that should be classified, along with any attribute name + # whose presence acts like a sigil to further distinguish which errors of + # that type should be classified. If the attribute name is None, then the + # error type is always classified. + _ALLOW_LIST = { + Unsupported: DEFAULT_CLASS_SIGIL, + UserError: DEFAULT_CLASS_SIGIL, + TorchRuntimeError: None, + } + if type(e) in _ALLOW_LIST: + attr_name = _ALLOW_LIST[type(e)] + if attr_name is None: + return ALWAYS_CLASSIFIED + return getattr(e, attr_name, None) + return None diff --git a/janus/lib/python3.10/site-packages/torch/_export/error.py b/janus/lib/python3.10/site-packages/torch/_export/error.py new file mode 100644 index 0000000000000000000000000000000000000000..03b7f52fb9de435b9e58fa4a0bb141cc191e84c5 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/_export/error.py @@ -0,0 +1,56 @@ +from enum import Enum + + +class ExportErrorType(Enum): + # User providing invalid inputs to either tracer, or other public facing APIs + INVALID_INPUT_TYPE = 1 + + # User returning values from their models that we don't support. + INVALID_OUTPUT_TYPE = 2 + + # Generated IR does not conform to Export IR Specification. + VIOLATION_OF_SPEC = 3 + + # User's code contains types and functionalities we don't support. + NOT_SUPPORTED = 4 + + # User's code didn't provide necessary details for us to successfully trace and export. + # For example, we use a lot of decorators and ask users to annotate their model. + MISSING_PROPERTY = 5 + + # User is using an API without proper initialization step. + UNINITIALIZED = 6 + + +def internal_assert(pred: bool, assert_msg: str) -> None: + """ + This is exir's custom assert method. It internally just throws InternalError. + Note that the sole purpose is to throw our own error while maintaining similar syntax + as python assert. + """ + + if not pred: + raise InternalError(assert_msg) + + +class InternalError(Exception): + """ + Raised when an internal invariance is violated in EXIR stack. + Should hint users to report a bug to dev and expose the original + error message. + """ + + def __init__(self, message: str) -> None: + super().__init__(message) + + +class ExportError(Exception): + """ + This type of exception is raised for errors that are directly caused by the user + code. In general, user errors happen during model authoring, tracing, using our public + facing APIs, and writing graph passes. + """ + + def __init__(self, error_code: ExportErrorType, message: str) -> None: + prefix = f"[{error_code}]: " + super().__init__(prefix + message) diff --git a/janus/lib/python3.10/site-packages/torch/_export/non_strict_utils.py b/janus/lib/python3.10/site-packages/torch/_export/non_strict_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..5c7331659d26a00dc68e0d169a70328cec251c2b --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/_export/non_strict_utils.py @@ -0,0 +1,523 @@ +# mypy: allow-untyped-defs +import contextlib +import inspect +import logging +from collections import defaultdict +from typing import Any, Callable, Dict, List, Tuple, TYPE_CHECKING, Union + +import torch +import torch.utils._pytree as pytree +from torch._dynamo.source import ( + AttrSource, + GetItemSource, + LocalSource, + TensorProperty, + TensorPropertySource, +) +from torch._dynamo.variables.builder import TrackedFake +from torch._export.passes.add_runtime_assertions_for_constraints_pass import InputDim +from torch._export.passes.lift_constants_pass import ConstantAttrMap +from torch._guards import Source +from torch._library.fake_class_registry import FakeScriptObject +from torch._subclasses.fake_tensor import FakeTensorMode +from torch.export import Constraint +from torch.export.dynamic_shapes import ( + _check_dynamic_shapes, + _combine_args, + _DimHint, + _process_dynamic_shapes, + _transform_shapes_for_default_dynamic, + _tree_map_with_path, +) +from torch.export.graph_signature import CustomObjArgument +from torch.fx.experimental import _config as config +from torch.fx.experimental.symbolic_shapes import ( + _find_user_code_frame, + _suggest_fixes_for_data_dependent_error_non_strict, + ConstraintViolationError, + DimDynamic, + EqualityConstraint, + GuardOnDataDependentSymNode, + ShapeEnv, + StatelessSymbolicContext, + ValueRanges, +) +from torch.utils._pytree import ( + GetAttrKey, + KeyPath, + MappingKey, + SequenceKey, + tree_map_with_path, +) + + +if TYPE_CHECKING: + from sympy import Symbol + + +log = logging.getLogger(__name__) + + +def key_path_to_source(kp: KeyPath) -> Source: + """ + Given a key path, return the source for the key path. + """ + source: Source = LocalSource("args") + for k in kp: + if isinstance(k, SequenceKey): + source = GetItemSource(source, k.idx) + elif isinstance(k, MappingKey): + source = GetItemSource(source, k.key) + elif isinstance(k, GetAttrKey): + source = AttrSource(source, k.name) + else: + raise ValueError(f"Unknown KeyEntry {k}") + + return source + + +def _is_constant_argument(t): + return t is None or isinstance(t, (int, float, bool, str)) + + +def fakify( + mode: FakeTensorMode, + kp: KeyPath, + t: Any, + t_constraints: Dict[int, Dict[int, Constraint]], + sources: Dict[Tuple[int, int], List[Source]], +): + source = key_path_to_source(kp) + if _is_constant_argument(t) or isinstance(t, torch.ScriptObject): + return t + + if not isinstance(t, torch.Tensor): + raise ValueError(f"Unsupported input type {type(t)}") + n_dims = len(t.shape) + symbolic_context = StatelessSymbolicContext( + dynamic_sizes=[DimDynamic.DYNAMIC] * n_dims, + constraint_sizes=[None] * n_dims, + ) + t_id = id(t) + assert mode.shape_env is not None + if t_id in t_constraints: + for i, constraint in t_constraints[t_id].items(): + symbolic_context.constraint_sizes[i] = constraint.constraint_range + src = TensorPropertySource(base=source, prop=TensorProperty.SIZE, idx=i) + sources[(t_id, i)].append(src) + mode.shape_env.source_name_to_debug_name[src.name()] = constraint.name # type: ignore[assignment] + fake = mode.from_tensor(t, source=source, symbolic_context=symbolic_context) + mode.shape_env.tracked_fakes.append(TrackedFake(fake, source, symbolic_context)) # type: ignore[union-attr] + return fake + + +def make_fake_inputs( + nn_module, + args, + kwargs, + dynamic_shapes, + _is_torch_jit_trace=False, + allow_complex_guards_as_runtime_asserts=False, +): + """ + Given an nn module, example inputs, and constraints, return a new fake mode, + fake inputs created in that mode whose dynamic shape dimensions are constrained + by the given ranges, and sources for pairs of dynamic shape dimensions that are + constrained to be equal. + """ + # TODO(avik): refactor Dynamo to avoid duplication of the following code + # between non-strict and strict. + # Specifically, here (non-strict) we do the following pre-tracing steps: + # - Fakify inputs. + # - Process input shape equalities. + # In strict, these steps are spread across multiple files: + # - output_graph.py fakifies inputs. + # - [post-tracing] guards.py processes input shape equalities. + + combined_args = _combine_args(nn_module, args, kwargs) + _check_dynamic_shapes(combined_args, dynamic_shapes) + transformed_dynamic_shapes = _transform_shapes_for_default_dynamic( + combined_args, dynamic_shapes + ) + constraints = _process_dynamic_shapes(combined_args, transformed_dynamic_shapes) + t_constraints: Dict[int, Dict[int, Constraint]] = defaultdict(dict) + for constraint in constraints: + t_constraints[constraint.t_id][constraint.dim] = constraint + + context = torch._guards.TracingContext.try_get() + if context is not None: + # This occurs when we are exporting within dynamo. There already exists + # a toplevel TracingContext with a fake mode, so we do not want to + # create another fake mode. + fake_mode = context.fake_mode + elif not _is_torch_jit_trace: + code = nn_module.forward.__code__ + co_fields = { + "co_name": code.co_name, + "co_filename": code.co_filename, + "co_firstlineno": code.co_firstlineno, + } + fake_mode = FakeTensorMode( + shape_env=ShapeEnv( + tracked_fakes=[], + co_fields=co_fields, + prefer_deferred_runtime_asserts_over_guards=True, + allow_complex_guards_as_runtime_asserts=allow_complex_guards_as_runtime_asserts, + ), + allow_non_fake_inputs=True, + export=True, + ) + else: + fake_mode = FakeTensorMode( + shape_env=ShapeEnv( + tracked_fakes=[], + prefer_deferred_runtime_asserts_over_guards=True, + allow_complex_guards_as_runtime_asserts=allow_complex_guards_as_runtime_asserts, + ), + allow_non_fake_inputs=True, + ) + if fake_mode.shape_env is None or fake_mode.shape_env.tracked_fakes is None: + raise ValueError( + "Detected fake_mode does not have a shape_env with tracked fakes. " + "If you constructed the module under a FakeTensorMode, " + "please initialize it like: FakeTensorMode(shape_env=ShapeEnv(tracked_fakes=[]))" + ) + + with fake_mode: + # FIXME(ycao) ScriptMethod doesn't have signature, I am using an empty one to unblock + if not _is_torch_jit_trace: + original_signature = inspect.signature(nn_module.forward) + else: + original_signature = None + sources: Dict[Tuple[int, int], List[Source]] = defaultdict(list) + fake_args, fake_kwargs = tree_map_with_path( + lambda kp, val: fakify(fake_mode, kp, val, t_constraints, sources), + (args, kwargs), + ) + + names: Dict[str, Tuple[int, int]] = {} + source_pairs: List[Tuple[Source, Source]] = [] + derived_equalities: List[Tuple[Source, Union[Source, Symbol], Callable]] = [] + phantom_symbols: Dict[str, Symbol] = {} + for constraint in constraints: + torch.export.dynamic_shapes._process_equalities( + constraint, + lambda t_id, dim: sources[(t_id, dim)], + fake_mode.shape_env, + names, + source_pairs, + derived_equalities, + phantom_symbols, + ) + + equalities_inputs = EqualityConstraint( + source_pairs=source_pairs, + derived_equalities=derived_equalities, + phantom_symbols=list(phantom_symbols.values()), + warn_only=False, + ) + return ( + fake_mode, + fake_args, + fake_kwargs, + equalities_inputs, + original_signature, + transformed_dynamic_shapes, + ) + + +def _flatten_dynamic_shapes( + combined_args: Dict[str, Any], + dynamic_shapes: Union[Dict[str, Any], Tuple[Any], List[Any]], +) -> List[Any]: + flat_shapes = [] + + def _tree_map_helper(path, t, shape): + nonlocal flat_shapes + flat_shapes.append(shape) + + _tree_map_with_path(_tree_map_helper, combined_args, dynamic_shapes) + return flat_shapes + + +def produce_guards_and_solve_constraints( + fake_mode: FakeTensorMode, + gm: torch.fx.GraphModule, + dynamic_shapes: Union[Dict[str, Any], Tuple[Any], List[Any], None], + equalities_inputs: EqualityConstraint, + original_signature: inspect.Signature, + _is_torch_jit_trace=False, +): + """ + Given a fake mode, sources pairs corresponding to equal dynamic shape dimensions, + and a graph module, produce guards on the fake mode's shape env (raising constraint + violations if any), solve (to suggest simplifications or fixes). + Dynamo already performs this, so this is for non-strict mode. + + Additional inputs: + equalities_inputs: the equality constraints to use for guards + original_signature: the signature of the forward method + """ + shape_env = fake_mode.shape_env + assert shape_env is not None + assert shape_env.tracked_fakes is not None + + placeholders = [tf.fake for tf in shape_env.tracked_fakes] + sources = [tf.source for tf in shape_env.tracked_fakes] + input_contexts = [tf.symbolic_context for tf in shape_env.tracked_fakes] + constraint_violation_error = None + try: + shape_env.produce_guards( + placeholders, + sources, + input_contexts=input_contexts, + equalities_inputs=equalities_inputs, + ignore_static=False, + ) + except ConstraintViolationError as e: + constraint_violation_error = e + + shape_env.frozen = True + dim_constraints = shape_env.dim_constraints + if dim_constraints is None: + # Expected when shape_env.produce_guards throws an early constraint violation error. + # There is nothing to solve for in this case. + # TODO(avik): Maybe record the constraint violation error instead and replay later? + assert constraint_violation_error + raise constraint_violation_error + dim_constraints.solve() + forced_specializations = dim_constraints.forced_specializations() + if not _is_torch_jit_trace: + msg = dim_constraints.prettify_results( + original_signature, + dynamic_shapes, + constraint_violation_error, + forced_specializations, + ) + else: + # FIXME(ycao): This is a hack to get around missing signature from ScriptMethod + msg = "dummy constraint violation message" + if constraint_violation_error: + constraint_violation_error.args = (constraint_violation_error.args[0] + msg,) + elif forced_specializations: + constraint_violation_error = ConstraintViolationError(msg) + if constraint_violation_error: + raise constraint_violation_error + + +def make_constraints( + fake_mode: FakeTensorMode, + gm: torch.fx.GraphModule, + combined_args: Dict[str, Any], + dynamic_shapes: Union[Dict[str, Any], Tuple[Any], List[Any], None], + num_lifted_inputs: int, +): + """ + Given a fake mode's shape env and user-specified dynamic shapes, + return the resulting range constraints and equality constraints. + + Additional args: + num_lifted_inputs: the number of non-user-input placeholder nodes in the graph + (used only to enumerate the user-input nodes) + """ + + shape_env = fake_mode.shape_env + assert shape_env is not None + inline_constraints = gm.meta.get("inline_constraints", []) + range_constraints = { + symbol: inline_constraints[symbol] for symbol in inline_constraints + } + if not dynamic_shapes: + return range_constraints + + # get individual dynamic shapes spec for each input + if not isinstance(dynamic_shapes, dict): + assert isinstance(dynamic_shapes, (tuple, list)) + combined_args = type(dynamic_shapes)(combined_args.values()) # type: ignore[assignment, misc] + flat_dynamic_shapes = _flatten_dynamic_shapes(combined_args, dynamic_shapes) + + # check number of shapes vs. number of inputs + num_placeholders = [node.op == "placeholder" for node in gm.graph.nodes].count(True) + assert len(flat_dynamic_shapes) == num_placeholders - num_lifted_inputs + + input_dims = defaultdict(list) + free_symbols = set() + for input_index, node in enumerate(gm.graph.nodes): + if input_index < num_lifted_inputs or node.op != "placeholder": + continue + if _is_constant_argument(node.meta["val"]) or isinstance( + node.meta["val"], CustomObjArgument + ): + continue + shape_spec = flat_dynamic_shapes[input_index - num_lifted_inputs] + for i, d in enumerate(node.meta["val"].shape): + if isinstance(d, torch.SymInt) and not d.node.expr.is_number: + # Look up the range constraint for the symbol corresponding to this shape dimension + # and store it indexed by the symbolic expression corresponding to it. + # NOTE(avik): Use node._expr instead of node.expr for the lookup here because + # we want the symbol, not its replacement, which could be an expression. Maybe + # there's a better way to do this, e.g., by (re)computing value ranges for expressions? + dim = shape_spec[i] if shape_spec else None + if dim is None or isinstance(dim, _DimHint): + range_constraints[d.node.expr] = shape_env.var_to_range[ + d.node._expr + ] + else: + range_constraints[d.node.expr] = ValueRanges( + lower=dim.min, upper=dim.max + ) + input_dims[d.node.expr].append(InputDim(input_name=node.name, dim=i)) + free_symbols.update(d.node.expr.free_symbols) + + for symbol in free_symbols: + if symbol not in range_constraints: + # Placeholders can have symbolic shapes that are derived expressions. + # The above code will record direct range constraints for them + # so that we can do runtime assertions. In addition, for serde checks + # we want to record range constraints for their root symbols. + range_constraints[symbol] = shape_env.var_to_range[symbol] + + return range_constraints + + +def _gather_constant_attrs(m: torch.nn.Module) -> ConstantAttrMap: + """Search the module hierarchy, gathering up all tensor and ScriptObject constants. + + Returns a dictionary mapping hash(value) to the name of the constant. We + have to abuse `hash` here unfortunately, see: [ScriptObject hash]. + """ + constants = ConstantAttrMap() + buffers_parameters = set(m.buffers()) + buffers_parameters.update(m.parameters()) + + def inner(m: torch.nn.Module, prefix_atoms: List[str], constants): + for k, v in m.__dict__.items(): + if isinstance( + v, + ( + torch.Tensor, + torch.ScriptObject, + FakeScriptObject, + ), + ): + if v in buffers_parameters: + # filter out buffers and parameters, leaving only constants + continue + + fqn = ".".join(prefix_atoms + [k]) + constants.add(v, fqn) + for k, v in m.named_children(): + inner(v, prefix_atoms + [k], constants) + + inner(m, [], constants) + return constants + + +@contextlib.contextmanager +def _fakify_script_objects( + mod: torch.nn.Module, + args: Tuple[Any], + kwargs: Dict[Any, Any], + fake_mode: torch._subclasses.fake_tensor.FakeTensorMode, +): + # This context manager is used to fakify script objects into FakeScriptObject. + # Inputs: + # mod: the module to be exported, it (and its recursive submodules)'s script object attrs haven't been fakified. + # args, kwargs: the args and kwargs inputs for mod, script object inputs haven't been fakified. + # fake_mode: the fake mode to be used for fakifying script objects. It's the same mode that fakify input tensors. + # + # Returns: + # mod: the patched module, its (and its recursive submodules) script object attrs have been fakified. + # fake_args, fake_kwargs: new fakified args and kwargs. + # Script object inputs have been fakified. Don't touch the tensors. + # fake_constant_attrs: a new map from FakeScriptObject to the fqn of the original script object. + # fake_to_real: a mapping between FakeScriptObject and the original script object in order to un-do the patching. + + constant_attrs: ConstantAttrMap = _gather_constant_attrs(mod) + assert not any( + isinstance(obj, FakeScriptObject) for obj in constant_attrs.values() + ), "Mod shouldn't contain any FakeScriptObject." + assert not pytree.tree_any( + lambda obj: isinstance(obj, FakeScriptObject), (args, kwargs) + ), "args and kwargs shouldn't contain any FakeScriptObject." + + patched_attr = {} + fake_constant_attrs = ConstantAttrMap() + fake_to_real = {} + + def _maybe_fakify_obj(obj): + fake_obj = torch._library.fake_class_registry.maybe_to_fake_obj(fake_mode, obj) + fake_to_real[fake_obj] = obj + return fake_obj + + def _leaf_mod_and_attr( + mod: torch.nn.Module, attr_fqn: str + ) -> Tuple[torch.nn.Module, str]: + *prefix_attr, last_attr = attr_fqn.split(".") + cur_mod = mod + for attr in prefix_attr: + cur_mod = getattr(cur_mod, attr) + return cur_mod, last_attr + + try: + for obj, fqns in constant_attrs.items(): + if isinstance(obj, torch.ScriptObject): + fake_script_obj = _maybe_fakify_obj(obj) + for fqn in fqns: + cur_mod, attr = _leaf_mod_and_attr(mod, fqn) + assert obj is getattr(cur_mod, attr) + setattr(cur_mod, attr, fake_script_obj) + fake_constant_attrs.add(fake_script_obj, fqn) + patched_attr[fqn] = obj + else: + for fqn in fqns: + fake_constant_attrs.add(obj, fqn) + + fake_args, fake_kwargs = pytree.tree_map_only( + torch.ScriptObject, _maybe_fakify_obj, (args, kwargs) + ) + yield (mod, fake_args, fake_kwargs, fake_constant_attrs, fake_to_real) + finally: + for fqn, orig_obj in patched_attr.items(): + cur_mod, attr = _leaf_mod_and_attr(mod, fqn) + setattr(cur_mod, attr, orig_obj) + + +class _NonStrictTorchFunctionHandler(torch.overrides.TorchFunctionMode): + """ + 1. Handles data-dependent errors raised by torch function calls in non-strict. + + Any data-dependent error is due to some condition on unbacked symints + that cannot be resolved. A mechanical way of fixing the error is to use + a torch._check() call to assert either that condition or its negation. + The handler suggests these options as code and points to the location + of the torch function call that raised the error as part of the error + message shown to the user, who can then simply select and copy-paste + a suggested fix at that location. + + NOTE: Not all data-dependent errors are raised by torch function calls. + In particular, conditions on unbacked symints can appear outside such + calls, and as such are not handled here. + + 2. Handles line-of-code logging for each torch function call in non-strict. + + Usage: TORCHEXPORT_EXTENDED_DEBUG_CURRENT_LOC=1 TORCH_LOGS="+export" ... + """ + + def __torch_function__(self, func, types, args=(), kwargs=None): + kwargs = kwargs or {} + if log.isEnabledFor(logging.DEBUG) and config.extended_debug_current_loc: + frame = _find_user_code_frame() + if frame is not None: + log.debug( + "%s called at %s:%s in %s", + func.__qualname__, + frame.f_code.co_filename, + frame.f_lineno, + frame.f_code.co_name, + ) + try: + return func(*args, **kwargs) + except GuardOnDataDependentSymNode as e: + _suggest_fixes_for_data_dependent_error_non_strict(e) + raise diff --git a/janus/lib/python3.10/site-packages/torch/_export/pass_base.py b/janus/lib/python3.10/site-packages/torch/_export/pass_base.py new file mode 100644 index 0000000000000000000000000000000000000000..55612c98ce8d51d95999f0f4e124f3479070deb1 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/_export/pass_base.py @@ -0,0 +1,441 @@ +# mypy: allow-untyped-defs +import operator +import traceback +import typing +from contextlib import nullcontext +from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union + +import torch +from functorch.experimental.control_flow import _unstack_pytree +from torch import fx +from torch._dispatch.python import enable_python_dispatcher +from torch._export.pass_infra.node_metadata import NodeMetadata +from torch._export.pass_infra.proxy_value import ProxyValue +from torch._subclasses import FakeTensor, UnsupportedFakeTensorException +from torch._subclasses.fake_tensor import FakeTensorMode +from torch.fx import traceback as fx_traceback +from torch.fx.experimental.proxy_tensor import PythonKeyTracer +from torch.fx.graph import CodeGen +from torch.fx.passes.infra.pass_base import PassBase, PassResult +from torch.fx.passes.shape_prop import _extract_tensor_metadata, TensorMetadata +from torch.utils import _pytree as pytree +from torch.fx.experimental.symbolic_shapes import PropagateUnbackedSymInts, compute_unbacked_bindings + + +__all__ = ["_ExportPassBaseDeprecatedDoNotUse"] + + +Argument = Any +Value = Any +Fn = Callable[..., Any] +PassType = Callable[[torch.fx.GraphModule], Optional[PassResult]] + + +_TORCH_SYM_OPS: Set[Callable] = { + torch.sym_int, + torch.sym_float, + torch.sym_ite, + torch.sym_max, + torch.sym_min, + torch.sym_not, + torch.sym_sqrt, +} + + +class ExportPassBaseError(RuntimeError): + pass + + +class _ExportPassBaseDeprecatedDoNotUse(PassBase): + """ + Interpreter-based pass class to help users maintain the IR spec while writing + transformations. + """ + + @staticmethod + def _create_dummy_node_metadata(): + return NodeMetadata({"stack_trace": "".join(traceback.format_stack(limit=1))}) + + + class ExportTracer(PythonKeyTracer): + def __init__(self, callback: "_ExportPassBaseDeprecatedDoNotUse", codegen: CodeGen) -> None: + super().__init__() + self.callback = callback + self.root = torch.nn.Module() + self.graph = torch.fx.Graph() + self.graph.set_codegen(codegen) + self.tensor_attrs: Dict[str, torch.Tensor] = {} # type: ignore[assignment] + self.fake_tensor_mode: Optional[FakeTensorMode] = None + self.submodules: Dict[torch.nn.Module, str] = {} + + def trace(self) -> None: # type: ignore[override] + raise ExportPassBaseError("ExportTracer doesn't support trace().") + + def create_arg(self, a: Argument) -> torch.fx.Node: + if isinstance(a, torch.nn.Module): + if a not in self.submodules: + name_submodule = f"submodule_{len(self.submodules)}" + self.root.add_module(name_submodule, a) + self.submodules[a] = name_submodule + elif isinstance(a, FakeTensor): + if not hasattr(a, "constant") or a.constant is None: + raise ExportPassBaseError(f"Cannot add {a} to graph.") + a = a.constant + node = super().create_arg(a) + if ( + isinstance(a, torch.Tensor) + and isinstance(node, torch.fx.Node) + and node.op == "get_attr" + ): + self.set_metadata(node, a) + self.callback.on_attr(ProxyValue(a, node)) + return node + + def set_metadata( + self, node: torch.fx.Node, value: Argument, + ) -> None: + # propagate the fake tensor or sym nodes + def make_val( + x: Argument, + ) -> Union[FakeTensor, torch.SymInt, torch.SymFloat, torch.SymBool, int, float, bool, str, None]: + if isinstance(x, FakeTensor): + return x + elif isinstance(x, torch.Tensor): + if x.is_quantized: + # TODO (tmanlaibaatar) properly support Quantized FakeTensor + x = torch.dequantize(x) + + try: + assert self.fake_tensor_mode is not None + # TODO we should allocate static shapes + # for param/buffer values + if isinstance(x, torch.nn.Parameter): + fake_tensor = self.fake_tensor_mode.from_tensor( + x, static_shapes=True + ) + else: + fake_tensor = self.fake_tensor_mode.from_tensor(x) + except UnsupportedFakeTensorException: + # TODO: This is just a workaround to get over the + # x.as_subclass error + print( + "Fakeifying a Tensor subclass is not supported \ + right now. Instead a TensorMetadata is used." + ) + fake_tensor = None + return fake_tensor + elif isinstance(x, (torch.SymInt, torch.SymFloat, torch.SymBool, int, float, bool, str)): + return x + else: + return None + + node.meta["val"] = pytree.tree_map(make_val, value) + + # Set the tensor_metadata for values that do not have a corresponding FakeTensor + def make_tensor_meta(x: Argument) -> Optional[TensorMetadata]: + if not isinstance(x, FakeTensor) and isinstance(x, torch.Tensor): + if x.is_quantized: + # TODO (tmanlaibaatar) properly support Quantized FakeTensor + x = torch.dequantize(x) + + try: + assert self.fake_tensor_mode is not None + _ = self.fake_tensor_mode.from_tensor(x) + tensor_meta = None + except UnsupportedFakeTensorException: + # TODO: This is just a workaround to get over the + # x.as_subclass error + tensor_meta = _extract_tensor_metadata(x) + return tensor_meta + else: + return None + + node.meta["tensor_meta"] = pytree.tree_map(make_tensor_meta, value) + + class ExportInterpreter(fx.Interpreter): + def __init__(self, callback: "_ExportPassBaseDeprecatedDoNotUse", gm: fx.GraphModule) -> None: + super().__init__(gm) + self.callback = callback + self.node: torch.fx.Node = next(iter(gm.graph.nodes)) + + def placeholder( + self, + target: str, # type: ignore[override] + args: Tuple[Argument, ...], + kwargs: Dict[str, Argument], + ) -> ProxyValue: + arg = super().placeholder(target, args, kwargs) + return self.callback.placeholder(target, arg, NodeMetadata(self.node.meta)) + + def output( + self, + target: torch.fx.node.Target, + args: Tuple[Argument, ...], + kwargs: Dict[str, Argument], + ) -> ProxyValue: + return self.callback.output(args[0], NodeMetadata(self.node.meta)).data + + def call_function( + self, + target: torch.fx.node.Target, + args: Tuple[Argument, ...], + kwargs: Dict[str, Argument], + ) -> ProxyValue: + meta = NodeMetadata(self.node.meta) + + if target == operator.getitem: + value, key = args + return self.callback.call_getitem(value, key, meta) + elif getattr(target, "__module__", None) in {"_operator", "math"}: + assert callable(target) + return self.callback.call_sym(target, args, meta) + elif target in _TORCH_SYM_OPS: + assert callable(target) + return self.callback.call_sym(target, args, meta) + elif isinstance(target, (torch._ops.OpOverload, torch._ops.OpOverloadPacket)): + return self.callback.call_operator( + target, + args, + kwargs, + meta, + ) + elif target == torch.ops.higher_order.cond: + pred, true_fn, false_fn, inputs = args + return self.callback.call_cond(pred, true_fn, false_fn, inputs, meta) + elif target == torch.ops.higher_order.map_impl: + f, mapped_args, operands = args # type: ignore[assignment] + return self.callback.call_map(f, mapped_args, operands, meta) + # For other unregistered HigherOrderOps, just interpret them blindly + elif isinstance(target, torch._ops.HigherOrderOperator): + return self.callback._fx( + "call_function", + target, + args, + kwargs, + meta, + ) + else: + raise ExportPassBaseError(f"Unsupported target type: {target}") + + def get_attr( + self, target: str, args: Tuple[Argument, ...], kwargs: Dict[str, Argument] # type: ignore[override] + ) -> Argument: + return super().get_attr(target, args, kwargs) + + def call_module( + self, + target: torch.fx.node.Target, + args: Tuple[Argument, ...], + kwargs: Dict[str, Argument], + ) -> None: + raise ExportPassBaseError("call_module is not supported.") + + def call_method( + self, target: str, args: Tuple[Argument, ...], kwargs: Dict[str, Argument] # type: ignore[override] + ) -> None: + raise ExportPassBaseError("call_method is not supported.") + + def run_node(self, n: torch.fx.Node) -> Argument: + self.node = n + self.callback.node_debug_str = n.format_node() + return super().run_node(n) + + def __init__(self) -> None: + self.interpreter = PropagateUnbackedSymInts( + torch.fx.GraphModule(torch.nn.Module(), torch.fx.Graph()) + ) + self.tracer = self.ExportTracer(self, CodeGen()) + self.fake_tensor_mode: Optional[FakeTensorMode] = None + self._initialized = True + self.node_debug_str: typing.Optional[str] = None + + def _fx( + self, + kind: str, + target: torch.fx.node.Target, + args: Tuple[Argument, ...], + kwargs: Dict[str, Argument], + meta: NodeMetadata, + ) -> ProxyValue: + args_data, kwargs_data = pytree.tree_map_only( + ProxyValue, lambda x: x.data, (args, kwargs) + ) + res_data = getattr(self.interpreter, kind)(target, args_data, kwargs_data) + args_proxy, kwargs_proxy = pytree.tree_map_only( + ProxyValue, lambda x: x.proxy, (args, kwargs) + ) + + name = None + if isinstance(target, torch._ops.OpOverload): + name = self.tracer.graph._target_to_str(target.overloadpacket.__name__) + + res_proxy = self.tracer.create_proxy(kind, target, args_proxy, kwargs_proxy, name=name) + res_proxy.node.meta.update(meta.data) + if self.fake_tensor_mode and (shape_env := self.fake_tensor_mode.shape_env): + if symbol_to_path := compute_unbacked_bindings(shape_env, res_data): + res_proxy.node.meta["unbacked_bindings"] = symbol_to_path + self.tracer.set_metadata(res_proxy.node, res_data) + return ProxyValue(res_data, res_proxy) + + def inputs(self, graph_module: torch.fx.GraphModule) -> List[Argument]: + # TODO(angelayi): Update this with what we decide to do for metadata in + # the exported graph module + if (args := graph_module.meta.get("args", None)) is not None: + return list(args) + + def extract_input(node: torch.fx.Node) -> Optional[FakeTensor]: + if "val" in node.meta: + fake = node.meta["val"] + if hasattr(fake, "constant") and fake.constant is not None: + return fake.constant + return fake + elif tensor_meta := node.meta.get("tensor_meta"): + assert self.fake_tensor_mode is not None + return FakeTensor( + self.fake_tensor_mode, + torch.empty( + tensor_meta.shape, + dtype=tensor_meta.dtype, + device="meta", + requires_grad=tensor_meta.requires_grad, + memory_format=tensor_meta.memory_format, + ), + torch.device("cpu"), + ) + elif len(node.users) == 0: + return None + raise ExportPassBaseError( + f"Cannot construct an input for graph module: {graph_module}.", + ) + + return [ + extract_input(node) + for node in graph_module.graph.nodes + if node.op == "placeholder" + ] + + def on_attr(self, attr: ProxyValue) -> None: + pass + + def placeholder(self, name: str, arg: Argument, meta: NodeMetadata) -> ProxyValue: + arg_proxy = self.tracer.create_proxy("placeholder", name, (), {}) + arg_proxy.node.meta = meta.data + self.tracer.set_metadata(arg_proxy.node, arg) + return ProxyValue(arg, arg_proxy) + + def call_operator( + self, + op, + args: Tuple[Argument, ...], + kwargs: Dict[str, Argument], + meta: NodeMetadata, + ) -> ProxyValue: + return self._fx("call_function", op, args, kwargs, meta) + + def call_sym( + self, + target: Fn, + args: Tuple[Argument, ...], + meta: NodeMetadata, + ) -> ProxyValue: + return self._fx("call_function", target, args, {}, meta) + + def call_cond( + self, + pred: ProxyValue, + true_fn: torch.fx.GraphModule, + false_fn: torch.fx.GraphModule, + inputs: List[Argument], + meta: NodeMetadata, + ) -> ProxyValue: + true_branch = self.call_submodule(true_fn, tuple(inputs)) + false_branch = self.call_submodule(false_fn, tuple(inputs)) + assert true_branch is not None + assert false_branch is not None + return self._fx( + "call_function", + torch.ops.higher_order.cond, + (pred, true_branch.graph_module, false_branch.graph_module, list(inputs)), + {}, + meta, + ) + + def call_map( + self, + f: torch.fx.GraphModule, + mapped_args: List[ProxyValue], + operands: List[ProxyValue], + meta: NodeMetadata, + ) -> ProxyValue: + xs = _unstack_pytree([arg.data for arg in mapped_args])[0] + f_branch = self.call_submodule(f, tuple(xs + [arg.data for arg in operands])) + assert f_branch is not None + return self._fx( + "call_function", + torch.ops.higher_order.map_impl, + (f_branch.graph_module, mapped_args, operands), + {}, + meta, + ) + + def call_getitem( + self, value: ProxyValue, key: int, meta: NodeMetadata + ) -> ProxyValue: + return self._fx("call_function", operator.getitem, (value, key), {}, meta) + + def output(self, results: List[Argument], meta: NodeMetadata) -> ProxyValue: + return self._fx("output", "output", (results,), {}, meta) + + def call_submodule( + self, graph_module: fx.GraphModule, inputs: Tuple[Argument, ...] + ) -> PassResult: + prev_tracer, self.tracer = self.tracer, self.ExportTracer( + self, graph_module.graph._codegen + ) + self.tracer.fake_tensor_mode = prev_tracer.fake_tensor_mode + interpreter = self.ExportInterpreter(self, graph_module) + prev_interpreter, self.interpreter = self.interpreter, torch.fx.Interpreter( # type: ignore[assignment] + torch.fx.GraphModule(torch.nn.Module(), torch.fx.Graph()) + ) + inputs_data = pytree.tree_map_only(ProxyValue, lambda x: x.data, inputs) + with fx_traceback.preserve_node_meta(): + interpreter.run(*inputs_data) + + new_graph_module = torch.fx.GraphModule(self.tracer.root, self.tracer.graph) + + self.tracer = prev_tracer + self.interpreter = prev_interpreter + return PassResult( + new_graph_module, + True, + ) + + def call(self, graph_module: fx.GraphModule) -> PassResult: + if not getattr(self, "_initialized", False): + raise ExportPassBaseError( + "ExportPass is not initialized with __init__().", + ) + + inputs = self.inputs(graph_module) + + fake_tensor_mode = None + for i in inputs: + if isinstance(i, FakeTensor): + assert ( + fake_tensor_mode is None or fake_tensor_mode is i.fake_mode + ), "Multiple fake tensor mode detected." + fake_tensor_mode = i.fake_mode + if fake_tensor_mode is None: + self.tracer.fake_tensor_mode = FakeTensorMode(allow_non_fake_inputs=True) + fake_tensor_mode = nullcontext() # type: ignore[assignment] + dispatcher_mode = nullcontext() # type: ignore[assignment] + else: + fake_tensor_mode.allow_non_fake_inputs = True + self.tracer.fake_tensor_mode = fake_tensor_mode + dispatcher_mode = enable_python_dispatcher() # type: ignore[assignment] + self.fake_tensor_mode = self.tracer.fake_tensor_mode + + with fake_tensor_mode, dispatcher_mode: # type: ignore[assignment, union-attr] + result = self.call_submodule(graph_module, tuple(inputs)) + + return result diff --git a/janus/lib/python3.10/site-packages/torch/_export/pass_infra/__init__.py b/janus/lib/python3.10/site-packages/torch/_export/pass_infra/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/janus/lib/python3.10/site-packages/torch/_export/pass_infra/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/_export/pass_infra/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22b9267e7d8689c0dd7dcd62af68257e853e58ca Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/_export/pass_infra/__pycache__/__init__.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/_export/pass_infra/__pycache__/node_metadata.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/_export/pass_infra/__pycache__/node_metadata.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..512399a1cd24aca3af718521c39396cc83658670 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/_export/pass_infra/__pycache__/node_metadata.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/_export/pass_infra/__pycache__/proxy_value.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/_export/pass_infra/__pycache__/proxy_value.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0150a2a0292dc64df155793306c665f4242d30a Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/_export/pass_infra/__pycache__/proxy_value.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/_export/pass_infra/proxy_value.py b/janus/lib/python3.10/site-packages/torch/_export/pass_infra/proxy_value.py new file mode 100644 index 0000000000000000000000000000000000000000..07d888b306560e8e9a29a4d49748bd28c32720eb --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/_export/pass_infra/proxy_value.py @@ -0,0 +1,42 @@ +# mypy: allow-untyped-defs +# pyre-strict +from typing import Union + +import torch + + +class ProxyValue: + # pyre-ignore + def __init__(self, data, proxy: Union[torch.fx.Proxy, torch.fx.Node]): + # pyre-ignore + self.data = data + self.proxy_or_node = proxy + + @property + def node(self) -> torch.fx.Node: + if isinstance(self.proxy_or_node, torch.fx.Node): + return self.proxy_or_node + assert isinstance(self.proxy_or_node, torch.fx.Proxy) + return self.proxy_or_node.node + + @property + def proxy(self) -> torch.fx.Proxy: + if not isinstance(self.proxy_or_node, torch.fx.Proxy): + raise RuntimeError( + f"ProxyValue doesn't have attached Proxy object. Node: {self.proxy_or_node.format_node()}" + ) + return self.proxy_or_node + + def to_tensor(self) -> torch.Tensor: + assert isinstance(self.data, torch.Tensor) + return self.data + + def is_tensor(self) -> bool: + return isinstance(self.data, torch.Tensor) + + # pyre-ignore + def __iter__(self): + yield from self.data + + def __bool__(self) -> bool: + return bool(self.data) diff --git a/janus/lib/python3.10/site-packages/torch/_export/passes/__init__.py b/janus/lib/python3.10/site-packages/torch/_export/passes/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..aa9ce2ac03c23600c86ff02e38a2a4bfeefef9e2 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/_export/passes/__init__.py @@ -0,0 +1 @@ +from .replace_view_ops_with_view_copy_ops_pass import ReplaceViewOpsWithViewCopyOpsPass diff --git a/janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77c222fa2fade2f676b1fd8d1c096ec12bd1316d Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/__init__.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/_node_metadata_hook.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/_node_metadata_hook.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bcc1c2e932fc634ee8702aa57125cc08960802cb Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/_node_metadata_hook.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/add_runtime_assertions_for_constraints_pass.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/add_runtime_assertions_for_constraints_pass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6a623f3851ef746555ed74fcb55bbc7540df4fe Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/add_runtime_assertions_for_constraints_pass.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/collect_tracepoints_pass.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/collect_tracepoints_pass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f69980de89e4c5f64000c4f121c29616b6138267 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/collect_tracepoints_pass.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/constant_folding.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/constant_folding.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aff2dd70293d8e6c7f3e58436d578e80be046313 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/constant_folding.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/functionalize_side_effectful_ops_pass.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/functionalize_side_effectful_ops_pass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..74c4279daca43c80e47375d89e52468c7c26b516 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/functionalize_side_effectful_ops_pass.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/lift_constants_pass.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/lift_constants_pass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..405706b3f47672b4d74d7f69f5213b5356ead339 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/lift_constants_pass.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/remove_runtime_assertions.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/remove_runtime_assertions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..318c2b59e9001fc64aae8857ae0da6f19e9fc1a4 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/remove_runtime_assertions.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_autocast_with_hop_pass.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_autocast_with_hop_pass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec55ee647ffe139758c48e36db9480eb374b1484 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_autocast_with_hop_pass.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_quantized_ops_with_standard_ops_pass.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_quantized_ops_with_standard_ops_pass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e82636683b9395728ff18640b179e3390068d77a Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_quantized_ops_with_standard_ops_pass.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_set_grad_with_hop_pass.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_set_grad_with_hop_pass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb66856b614559a054df6ef22c40d1ccf8e4762c Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_set_grad_with_hop_pass.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_view_ops_with_view_copy_ops_pass.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_view_ops_with_view_copy_ops_pass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52bd15ccf14aa99c985129d664ef3ac5357efb76 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_view_ops_with_view_copy_ops_pass.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_with_hop_pass_util.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_with_hop_pass_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..81cc7e5b8600a8b48a944b9d87d124113e38edda Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_with_hop_pass_util.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/_export/passes/_node_metadata_hook.py b/janus/lib/python3.10/site-packages/torch/_export/passes/_node_metadata_hook.py new file mode 100644 index 0000000000000000000000000000000000000000..3dd87b546da8df08522f1c237bab44e9668b4b47 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/_export/passes/_node_metadata_hook.py @@ -0,0 +1,80 @@ +# mypy: allow-untyped-defs +import contextlib + +import torch +from torch.fx.graph_module import GraphModule + + +_EMPTY_NN_MODULE_STACK_KEY = "_empty_nn_module_stack_from_metadata_hook" + + +def _node_metadata_hook(node: torch.fx.Node, stack_trace: str) -> None: + """ + Hook for adding the appropriate metadata to nodes that are created during a + pass using graph.create_node. An example of how to use it: + + ``` + with _set_node_metadata_hook(gm, + functools.partial(_node_metadata_hook, stack_trace="file") + ): + pass(gm) + ``` + + This hook should not work for all generic cases -- specifically it assumes + that nodes being added are only call_function nodes, and copies over the + first argument node's nn_module_stack. + """ + assert node.op == "call_function" and callable(node.target) + + arg_meta = [arg.meta for arg in node.args if isinstance(arg, torch.fx.Node)] + assert len(arg_meta) >= 1 + arg_meta = arg_meta[0] + + if ( + isinstance(node.target, torch._ops.OpOverload) + and len(node.target._schema.returns) == 0 + ): + node.meta["val"] = None + else: + fake_args = [ + arg.meta["val"] if isinstance(arg, torch.fx.Node) else arg + for arg in node.args + ] + fake_res = node.target(*fake_args) + node.meta["val"] = fake_res + + node.meta["stack_trace"] = stack_trace + node.meta["nn_module_stack"] = arg_meta.get( + "nn_module_stack", + { + _EMPTY_NN_MODULE_STACK_KEY: ( + _EMPTY_NN_MODULE_STACK_KEY, + _EMPTY_NN_MODULE_STACK_KEY, + ) + }, + ) + node.meta["torch_fn"] = ( + f"{node.target.__name__}_0", + f"{node.target.__class__.__name__}.{node.target.__name__}", + ) + + +@contextlib.contextmanager +def _set_node_metadata_hook(gm: torch.fx.GraphModule, f): + """ + Takes a callable which will be called after we create a new node. The + callable takes the newly created node as input and returns None. + """ + assert callable(f), "node_metadata_hook must be a callable." + + # Add the hook to all submodules + for m in gm.modules(): + if isinstance(m, GraphModule): + m._register_create_node_hook(f) + try: + yield + finally: + # Restore hook for all submodules + for m in gm.modules(): + if isinstance(m, GraphModule): + m._unregister_create_node_hook(f) diff --git a/janus/lib/python3.10/site-packages/torch/_export/passes/add_runtime_assertions_for_constraints_pass.py b/janus/lib/python3.10/site-packages/torch/_export/passes/add_runtime_assertions_for_constraints_pass.py new file mode 100644 index 0000000000000000000000000000000000000000..e8ed5931a74fc21f71d35a5cf23983846b2be449 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/_export/passes/add_runtime_assertions_for_constraints_pass.py @@ -0,0 +1,227 @@ +# mypy: allow-untyped-defs +import math +import operator +import traceback +from functools import partial +from typing import Callable, Dict, List, NamedTuple, Set + +import sympy + +import torch +import torch.fx +from torch.utils._sympy.value_ranges import ValueRanges +from torch.utils._sympy.numbers import int_oo +from torch.fx.experimental.symbolic_shapes import free_unbacked_symbols +from torch.fx.passes.infra.pass_base import PassBase, PassResult + +__all__ = ["InputDim"] + + +class InputDim(NamedTuple): + input_name: str + dim: int + + +def _convert_to_int(val): + # Convert simple sympy Integers into concrete int + if val in (sympy.oo, int_oo): + return math.inf + if val in (-sympy.oo, -int_oo): + return -math.inf + if isinstance(val, sympy.Integer): + return int(val) + raise RuntimeError( + "Export constraints cannot be non-integer expressions" + ) + + +def _convert_range_to_int(range: ValueRanges): + assert isinstance(range, ValueRanges) + min_val = _convert_to_int(range.lower) + max_val = _convert_to_int(range.upper) + return min_val, max_val + + +class _AddRuntimeAssertionsForInlineConstraintsPass(PassBase): + def __init__( + self, + range_constraints: Dict[sympy.Symbol, ValueRanges], + ): + super().__init__() + self.range_constraints: Dict[sympy.Symbol, ValueRanges] = range_constraints + self._asserts_generated_unbacked_symbols: Set[sympy.Symbol] = set() + self.counter = 0 + + def _assert_range_constraint(self, node, lower, upper, assert_msg): + last_node = node + if lower > -math.inf: + last_node = self._insert_assert_async(last_node, operator.ge, node, lower, assert_msg) + + if upper < math.inf: + last_node = self._insert_assert_async(last_node, operator.le, node, upper, assert_msg) + + def _insert_assert_async(self, last_node, op, lower, upper, assert_msg): + """ + Inserts assert_async call_function nodes in the graph. This function is + called **during** the interpreter-based pass. + """ + self.counter += 1 + graph = last_node.graph + with graph.inserting_after(last_node): + cmp = graph.call_function(op, (lower, upper), {}) + with graph.inserting_after(cmp): + cmp_tensor = graph.call_function(torch.ops.aten.scalar_tensor.default, (cmp,), {}) + with graph.inserting_after(cmp_tensor): + assert_async = graph.call_function( + torch.ops.aten._assert_async.msg, + (cmp_tensor, assert_msg), + {}, + ) + return assert_async + + def call(self, graph_module) -> PassResult: + self.existing_inline_assertions = _get_existing_inline_assertions( + graph_module, self.range_constraints + ) + + for module in graph_module.modules(): + if not isinstance(module, torch.fx.GraphModule): + continue + for node in module.graph.nodes: + if node.op != "call_function": + continue + if "val" not in node.meta: + continue + + val = node.meta["val"] + # In general, we may have to deal the case such as: ret[1].shape[0]. + # We need first find out what symbols require assertion, then we need to follow the path + # from ret to the symbol, construct the proxies along the way and construct the messages + # piece-wise at the same time. + # + # We use post-order traversal to collect all the proxies callbacks needed, construct + # the error message callbacks, and at the top-level traversal tree we execute all the callbacks. + # We need the callbacks because, in order to call the function to create a proxy for shape[0], we + # need the proxy for shape, which further requires the proxy for ret[1], etc. + + def add_assertions(val): + call_backs: List[Callable] = [] + messages: List[str] = [] + if isinstance(val, (torch.SymInt, torch.SymFloat, torch.SymBool)): + symbol = val.node.expr + if symbol in self.existing_inline_assertions: + return call_backs, messages + if isinstance(symbol, sympy.Symbol) and free_unbacked_symbols(symbol): + if symbol in self._asserts_generated_unbacked_symbols: + return call_backs, messages + # We only care about unbacked symints for these inline + # constraints, which are prefixed with 'u' + constraint = self.range_constraints[symbol] + min_val, max_val = _convert_range_to_int(constraint) + assert_msg = f" is outside of inline constraint [{min_val}, {max_val}]." + call_backs.append( + partial(self._assert_range_constraint, lower=min_val, upper=max_val) + ) + messages.append(assert_msg) + self._asserts_generated_unbacked_symbols.add(symbol) + + elif isinstance(val, torch.Tensor): + for i, sym in enumerate(val.shape): + cbs, msgs = add_assertions(sym) + for cb, msg in zip(cbs, msgs): + def sym_size_cb(node, assert_msg, dim): + with node.graph.inserting_after(node): + dim_node = module.graph.call_function( + torch.ops.aten.sym_size.int, + (node, dim), + {}, + ) + cb(node=dim_node, assert_msg=assert_msg) + call_backs.append(partial(sym_size_cb, dim=i)) + messages.append(f".shape[{i}]" + msg) + return call_backs, messages + + callbacks, messages = add_assertions(val) + for cb, msg in zip(callbacks, messages): + cb(node=node, assert_msg=f"{node}" + msg) + + module.recompile() + + # Sometimes this pass would return a wrong graph where we have mismatched + # node names in signature. Before we fix it, let's just skip it. + if self.counter == 0 and type(self) is _AddRuntimeAssertionsForInlineConstraintsPass: + return PassResult(graph_module, False) + + # Populate the stack trace with dummy vals to respect IR + for node in graph_module.graph.nodes: + if not node.meta.get("stack_trace", None) and node.op not in ["placeholder", "output"]: + node.meta["stack_trace"] = "".join(traceback.format_stack(limit=1)) + return PassResult(graph_module, True) + + +def _get_existing_inline_assertions( + graph_module: torch.fx.GraphModule, + range_constraints: Dict[sympy.Symbol, ValueRanges], +) -> Dict[sympy.Symbol, ValueRanges]: + existing_inline_assertions: Dict[sympy.Symbol, ValueRanges] = {} + + for module in graph_module.modules(): + if not isinstance(module, torch.fx.GraphModule): + continue + + # Find all the existing inline assertions. They will look something like: + # %_local_scalar_dense = call_function[target=torch.ops.aten._local_scalar_dense.default](args = (%arg1_1,), kwargs = {}) + # %ge = call_function[target=operator.ge](args = (%_local_scalar_dense, 0), kwargs = {}) + # %_assert_scalar = call_function[target=torch.ops.aten._assert_scalar.default](args = (%scalar_tensor, "..."), kwargs = {}) + for node in module.graph.nodes: + if node.target != torch.ops.aten._assert_scalar.default: + continue + + compare_arg = node.args[0] + if not ( + isinstance(compare_arg, torch.fx.Node) and + compare_arg.op == "call_function" and + compare_arg.target in (operator.le, operator.ge) and + len(compare_arg.args) == 2 + ): + continue + + compare_op = compare_arg.target + lhs, rhs = compare_arg.args + + def maybe_get_symint(x): + if ( + isinstance(x, torch.fx.Node) and + "val" in x.meta and + isinstance(x.meta["val"], torch.SymInt) + ): + return x.meta["val"].node.expr + return x + + lhs = maybe_get_symint(lhs) + rhs = maybe_get_symint(rhs) + + if compare_op == operator.ge: + lhs, rhs = rhs, lhs + + if isinstance(lhs, sympy.Symbol) and isinstance(rhs, int): + symint = lhs + scalar = rhs + elif isinstance(rhs, sympy.Symbol) and isinstance(lhs, int): + symint = rhs + scalar = lhs + else: + continue + + if symint not in range_constraints: + raise RuntimeError(f"Unable to find symint {symint} in {range_constraints}") + + previous_range = existing_inline_assertions.get(symint, ValueRanges(-math.inf, math.inf)) + + if symint is lhs: + bounds = ValueRanges(-math.inf, scalar) + else: + bounds = ValueRanges(scalar, math.inf) + existing_inline_assertions[symint] = previous_range & bounds + + return existing_inline_assertions diff --git a/janus/lib/python3.10/site-packages/torch/_export/passes/collect_tracepoints_pass.py b/janus/lib/python3.10/site-packages/torch/_export/passes/collect_tracepoints_pass.py new file mode 100644 index 0000000000000000000000000000000000000000..c89d2216632fa5dfde608ec5f4b857195bcb19ad --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/_export/passes/collect_tracepoints_pass.py @@ -0,0 +1,102 @@ +# mypy: allow-untyped-defs +import operator + +import torch +from torch.export.exported_program import ConstantArgument, TensorArgument +from torch.fx.passes.infra.pass_base import PassBase, PassResult + + +__all__ = ["CollectTracepointsPass"] + + +class CollectTracepointsPass(PassBase): + """ + Performs constant folding and constant propagation. + """ + + def __init__(self, specs, sig) -> None: + super().__init__() + self.specs = specs + self.sig = sig + + def call(self, gm): + def get_arg_spec(arg): + if isinstance(arg, torch.fx.Node): + if isinstance(arg.meta.get("val"), torch.Tensor): + return TensorArgument(name=arg.name) + else: + raise AssertionError( + "Symint input is not implemented yet for submodule call signature." + ) + else: + return ConstantArgument(name="", value=arg) + + for module in gm.modules(): + if not isinstance(module, torch.fx.GraphModule): + continue + nn_module_stack = None + for node in module.graph.nodes: + if node.op != "call_function": + continue + if node.target == torch.ops.higher_order._export_tracepoint: + kind = node.kwargs["kind"] + if kind == "module_call_outputs": + nn_module_stack = node.meta["nn_module_stack"] + elif kind == "module_call_inputs": + nn_module_stack = None + else: + raise AssertionError(f"Unknown tracepoint kind: {kind}") + elif node.meta["nn_module_stack"] == nn_module_stack: + node.meta["nn_module_stack"].popitem() + else: + nn_module_stack = None + nn_module_stack = None + for node in reversed(module.graph.nodes): + if node.op != "call_function": + continue + if node.target == torch.ops.higher_order._export_tracepoint: + kind = node.kwargs["kind"] + if kind == "module_call_inputs": + nn_module_stack = node.meta["nn_module_stack"] + elif kind == "module_call_outputs": + nn_module_stack = None + else: + raise AssertionError(f"Unknown tracepoint kind: {kind}") + elif node.meta["nn_module_stack"] == nn_module_stack: + node.meta["nn_module_stack"].popitem() + else: + nn_module_stack = None + for module in gm.modules(): + if not isinstance(module, torch.fx.GraphModule): + continue + for node in module.graph.nodes: + if node.op != "call_function": + continue + if node.target == torch.ops.higher_order._export_tracepoint: + for i, arg in enumerate(node.args): + kind = node.kwargs["kind"] + if kind == "module_call_inputs": + self.specs[node.kwargs["path"]].inputs.append( + get_arg_spec(arg) + ) + elif kind == "module_call_outputs": + self.specs[node.kwargs["path"]].outputs.append( + get_arg_spec(arg) + ) + else: + raise AssertionError(f"Unknown tracepoint kind: {kind}") + if isinstance(arg, torch.fx.Node): + for user in node.users: + assert user.op == "call_function" + assert user.target == operator.getitem + assert isinstance(user.args[1], int) + if user.args[1] == i: + user.replace_all_uses_with(arg) + self.sig.replace_all_uses(user.name, arg.name) + break + users = list(node.users) + for user in users: + assert len(user.users) == 0 + gm.graph.erase_node(user) + gm.graph.erase_node(node) + return PassResult(gm, True) diff --git a/janus/lib/python3.10/site-packages/torch/_export/passes/constant_folding.py b/janus/lib/python3.10/site-packages/torch/_export/passes/constant_folding.py new file mode 100644 index 0000000000000000000000000000000000000000..b1491ca5d4794647f3cc348dc4bcf4c59134031a --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/_export/passes/constant_folding.py @@ -0,0 +1,299 @@ +# mypy: allow-untyped-defs +import collections +from collections import defaultdict +from typing import Any, Callable, Dict, Optional + +import torch +import torch.utils._pytree as pytree + + +aten = torch.ops.aten + +# We would like to split modules into two subgraphs for runtime weight updates to work correctly. +# The use case and more information could be found at: +# https://docs.google.com/document/d/1inZC-8KarJ6gKB7G9egmYLx1V_dKX_apxon0w4zPC0Q/edit?usp=sharing +META_TAG = "MODULE_TYPE" +MODULE_TAG = "_MAIN_MODULE" +CONST_MODULE_TAG = "_CONST_MODULE" + + +def replace_node_with_constant(gm, node, constant, name=None): + g = gm.graph + + if name: + qualname = name + else: + if not hasattr(gm, "_frozen_param_count"): + gm._frozen_param_count = 0 + i = gm._frozen_param_count + + while True: + qualname = f"_frozen_param{i}" + if not hasattr(gm, qualname): + break + i += 1 + + gm._frozen_param_count = i + 1 + + with g.inserting_before(node): + new_input_node = g.create_node("get_attr", qualname, (), {}) + node.replace_all_uses_with(new_input_node) + new_input_node.meta.update(node.meta) + g.erase_node(node) + + # needed to suppress `does not reference an nn.Module, nn.Parameter, or buffer` warning + gm.register_buffer(qualname, constant) + setattr(gm, qualname, constant) + + +class ConstantFolder(torch.fx.Interpreter): + def __init__( + self, + gm, + skip_constructors=False, + ): + super().__init__(gm) + self.node_replacements: Dict[torch.fx.Node, Any] = {} + self.replaced_uses: Dict[torch.fx.Node, int] = collections.Counter() + self.unknown_value = object() + self.skip_constructors: bool = skip_constructors + + # overwrite this to deallocate env values if their only remaining use + # is the output + self.user_to_last_uses = self.node_to_last_non_output_use() + + def is_impure(self, node: torch.fx.node.Node): + if ( + node.target == torch.ops.prims.convert_element_type.default + and node.args[0].op == "get_attr" # type: ignore[union-attr] + and node.args[0].meta["val"].dtype == torch.int8 # type: ignore[union-attr] + and node.args[1] == torch.bfloat16 + ): + # For int8_weight -> dq -> bf16_weight + return True + if node.target in [ + torch.ops.quantized_decomposed.dequantize_per_channel.default, + torch.ops.quantized_decomposed.dequantize_per_tensor.default, + torch.ops.quantized_decomposed.dequantize_per_tensor.tensor, + ]: + # For the pattern fp32_weight -> q -> dq + # We only folding fp32_weight -> q + # int8_weight and leave dq in graph to be fused + return True + return False + + def node_to_last_non_output_use(self): + last_non_output_use = collections.defaultdict(list) + seen_uses = set() + output_node = next(iter(reversed(self.module.graph.nodes))) + + for node in reversed(self.module.graph.nodes): + if node.target == "output": + continue + + def add_use(inp): + if inp in seen_uses: + return + + seen_uses.add(inp) + last_non_output_use[node].append(inp) + + # In-place is fine since we don't mutate + pytree.tree_map_only_(torch.fx.Node, add_use, (node.args, node.kwargs)) + + # if this node is only used in output, we want to gc it right away + if len(node.users) == 1 and output_node in node.users: + last_non_output_use[node].append(node) + + return last_non_output_use + + def run_node(self, node): + if node.target == "output": + # because we remove nodes from env on last non output use, + # re-define them now or we'll get error in interpreter + def set_env(arg): + self.env[arg] = self.unknown_value + + # In-place is fine since we don't mutate + pytree.tree_map_only_(torch.fx.Node, set_env, node.args) + return super().run_node(node) + + args, kwargs = self.fetch_args_kwargs_from_env(node) + flattened_inputs = pytree.arg_tree_leaves(*args, **kwargs) + + # We need to do this weird thing because in cases where flattened_inputs + # contains a ScriptObject, equality checking results in a type error if + # the types are different. + if any( + type(self.unknown_value) == type(input_) and self.unknown_value == input_ + for input_ in flattened_inputs + ): + return self.unknown_value + + # TODO - fix errors with this + if ( + node.op == "call_function" + and node.target == aten._efficientzerotensor.default + ): + return self.unknown_value + + # TODO - constant folding triton kernel returns the inputs -- fix this + if ( + node.op == "call_function" + and node.name == "triton_kernel_wrapper_functional_proxy" + ): + return self.unknown_value + + # skip constructors, since inductor generates optimal code for them already + # and turning into tensor would result in an additional global memory read + # TODO - more complicated strategy + if ( + self.skip_constructors + and node.op != "get_attr" + and not any(isinstance(e, torch.Tensor) for e in flattened_inputs) + ): + return self.unknown_value + + # All mutations should either be removed or on inputs which we did not make constant + if ( + isinstance(node.target, torch._ops.OpOverload) + and torch.Tag.nondeterministic_seeded in node.target.tags + ): + return self.unknown_value + + out = super().run_node(node) + + if node.op != "get_attr" and isinstance(out, torch.Tensor): + if out.device.type == "meta": + return out + + if not self.insertable_tensor_check(out): + return out + + if self.is_impure(node): + return self.unknown_value + + self.add_node_replacement(node, out) + + flattened_node_inps = pytree.arg_tree_leaves(*node.args, **node.kwargs) + + for n in flattened_node_inps: + if not isinstance(n, torch.fx.Node): + continue + + self.replaced_uses[n] += 1 + + for to_delete in self.user_to_last_uses.get(node, []): + if self.replaced_uses[to_delete] == len(to_delete.users): + self.node_replacements.pop(to_delete, None) + + return out + + def insertable_tensor_check(self, tensor: torch.Tensor) -> bool: + return True + + def add_node_replacement(self, node: torch.fx.Node, tensor: torch.Tensor) -> None: + self.node_replacements[node] = tensor + + def run(self): + env = {} + for n in self.module.graph.find_nodes(op="placeholder"): + env[n] = self.unknown_value + return super().run(initial_env=env) + + +def constant_fold(gm, constraint_fn: Optional[Callable[[torch.fx.Node], bool]] = None): + with torch.utils._python_dispatch._disable_current_modes(): + cf = ConstantFolder(gm, skip_constructors=True) + cf.run() + + for node, constant in cf.node_replacements.items(): + if constraint_fn is not None and not constraint_fn(node): + continue + replace_node_with_constant(gm, node, constant) + + erased_params = [] + # Get all attr users by looking up the graph instead from node.users, because in this case + # _tensor_constant0 and _tensor_constant0_1 are actually refereing to the same tensor. + + # opcode name target args kwargs + # ------------- ------------------- ---------------- --------------------------- -------- + # placeholder arg0_1 arg0 () {} + # get_attr _tensor_constant0 state () {} + # call_function add aten.add.Tensor (arg0_1, _tensor_constant0) {} + # get_attr _tensor_constant0_1 state () {} + # call_function add_ aten.add_.Tensor (_tensor_constant0_1, 1) {} + # output output output ([add],) {} + + get_attr_node_users = defaultdict(list) + for node in gm.graph.nodes: + if node.op == "get_attr": + get_attr_node_users[node.target].extend(node.users.keys()) + for node in gm.graph.find_nodes(op="get_attr"): + if node.op == "get_attr" and len(get_attr_node_users[node.target]) == 0: + if hasattr(gm, node.target): + delattr(gm, node.target) + erased_params.append(node) + for node in erased_params: + gm.graph.erase_node(node) + + gm.graph.eliminate_dead_code() + gm.graph.lint() + gm.recompile() + + +def constant_graph_tag(gm: torch.fx.GraphModule): + with torch.utils._python_dispatch._disable_current_modes(): + cf = ConstantFolder(gm, skip_constructors=True) + cf.run() + + for node in gm.graph.nodes: + if ( + node.op == "get_attr" + or node in cf.node_replacements + or node in cf.replaced_uses + ): + node.meta[META_TAG] = CONST_MODULE_TAG + else: + node.meta[META_TAG] = MODULE_TAG + + +def run_and_get_constant_graph(gm: torch.fx.GraphModule) -> torch.fx.GraphModule: + """ + Construct a GraphModule which corresponds to the part which could be + constant folded in provided gm. + """ + + constant_graph_tag(gm) + # We rewrite the tags, if it's a constant being directly consumed, without + # any folding opportunity, we keep it in main gm. + for node in gm.graph.find_nodes(op="get_attr"): + used_to_fold = False + for u in node.users: + if u.meta[META_TAG] == CONST_MODULE_TAG: + used_to_fold = True + break + if not used_to_fold: + node.meta[META_TAG] = MODULE_TAG + + new_graph = torch.fx.Graph() + + node_remapping: Dict[torch.fx.Node, torch.fx.Node] = {} + output_nodes = [] + for node in gm.graph.nodes: + if node.meta[META_TAG] == MODULE_TAG: + continue + + new_node = new_graph.node_copy(node, lambda x: node_remapping[x]) + node_remapping[node] = new_node + + for user in node.users: + if user.meta[META_TAG] == MODULE_TAG: + output_nodes.append(new_node) + break + + new_graph.output(tuple(output_nodes)) + new_graph.lint() + new_gm = torch.fx.GraphModule(gm, new_graph) + + return new_gm diff --git a/janus/lib/python3.10/site-packages/torch/_export/passes/functionalize_side_effectful_ops_pass.py b/janus/lib/python3.10/site-packages/torch/_export/passes/functionalize_side_effectful_ops_pass.py new file mode 100644 index 0000000000000000000000000000000000000000..5fcf5adaca5b0b478db87e71633f5136b54969b2 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/_export/passes/functionalize_side_effectful_ops_pass.py @@ -0,0 +1,94 @@ +import copy +from typing import Dict, Optional, Tuple, List + +import torch +from torch._export.pass_base import _ExportPassBaseDeprecatedDoNotUse, PassResult, Argument +from torch._export.pass_infra.node_metadata import NodeMetadata +from torch._export.pass_infra.proxy_value import ProxyValue +from torch._ops import OpOverload + +aten = torch.ops.aten + +_NON_FUNCTIONAL_TO_FUNCTIONAL_SIDE_EFFECTFUL_FUNCS: Dict[OpOverload, OpOverload] = { + aten.sym_constrain_range.default: aten._functional_sym_constrain_range, + aten._assert_async.msg: aten._functional_assert_async.msg, +} + + +class _FunctionalizeSideEffectfulOpsPass(_ExportPassBaseDeprecatedDoNotUse): + """ + Functionalize ops with side effect in graph module by replacing the op with + functional version of it. A new dependency token (`dep_token`) will be + created and propagated through functional ops to output. + For example: + ``` + def f(x): + sym_constrain_range(x.shape[0], min=1, max=3) + return x.add(3) + ``` + Will be transformed to: + ``` + def f(x): + dep_token0 = _make_dep_token() + dep_token1 = _functional_sym_constrain_range( + x.shape[0], min=1, max=3, dep_token=dep_token0 + ) + + return x.add(3), dep_token1 + ``` + """ + + def __init__(self) -> None: + super().__init__() + self._dep_token: Optional[ProxyValue] = None + self._next_dep_token_index: Optional[int] = None + + def call(self, graph_module: torch.fx.GraphModule) -> PassResult: + # Early return if no non-functional assertions. + if not any( + n.target in _NON_FUNCTIONAL_TO_FUNCTIONAL_SIDE_EFFECTFUL_FUNCS + for n in graph_module.graph.nodes + ): + return PassResult(graph_module=graph_module, modified=False) + + gm = copy.deepcopy(graph_module) + self._dep_token = None + self._next_dep_token_index = None + return super().call(gm) + + def call_operator( + self, + op: OpOverload, + args: Tuple[Argument, ...], + kwargs: Dict[str, Argument], + meta: NodeMetadata, + ) -> ProxyValue: + if op not in _NON_FUNCTIONAL_TO_FUNCTIONAL_SIDE_EFFECTFUL_FUNCS: + return super().call_operator(op, args, kwargs, meta) + + if self._dep_token is None: + self._dep_token = super().call_operator( + aten._make_dep_token, + args=(), + kwargs={}, + meta=self._create_dummy_node_metadata(), + ) + self._dep_token.node.name = "dep_token0" + self._next_dep_token_index = 1 + + self._dep_token = super().call_operator( + _NON_FUNCTIONAL_TO_FUNCTIONAL_SIDE_EFFECTFUL_FUNCS[op], + args=args, + kwargs={**kwargs, "dep_token": self._dep_token}, + meta=meta, + ) + assert self._next_dep_token_index is not None + self._dep_token.node.name = f"dep_token{self._next_dep_token_index}" + self._next_dep_token_index += 1 + + return self._dep_token + + def output(self, results: List[Argument], meta: NodeMetadata) -> ProxyValue: + assert self._dep_token is not None + + return super().output(results=(*results, self._dep_token), meta=meta) # type: ignore[arg-type] diff --git a/janus/lib/python3.10/site-packages/torch/_export/passes/lift_constants_pass.py b/janus/lib/python3.10/site-packages/torch/_export/passes/lift_constants_pass.py new file mode 100644 index 0000000000000000000000000000000000000000..3d1ce32cda31776015a448b59bdadfd6fdd363a2 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/_export/passes/lift_constants_pass.py @@ -0,0 +1,318 @@ +# mypy: allow-untyped-defs +import collections +import warnings +from typing import Any, Dict, List, Union + +import torch +from torch._export.verifier import SpecViolationError +from torch._guards import detect_fake_mode +from torch._library.fake_class_registry import FakeScriptObject +from torch._subclasses.fake_tensor import unset_fake_temporarily +from torch.export.exported_program import ( + ArgumentSpec, + CustomObjArgument, + ExportGraphSignature, + InputKind, + InputSpec, + TensorArgument, +) + + +class ConstantAttrMap(collections.abc.MutableMapping): + """A mapping class that understands how to use module constants (tensors, + ScriptObjects, FakeScriptObjects) as keys. We store tensors and FakeScriptObjects normally, + but ScriptObjects are stored by hash, because different torch.ScriptObjects can point to + the same underlying value (but we guarantee that they will `hash()` to the same value + if that's the case). + """ + + def __init__(self) -> None: + # Underlying dict that we use to implement this mapping. + self._constant_attrs: Dict[ + Union[int, torch.Tensor, FakeScriptObject], List[Any] + ] = {} + # Map from the hash(ScriptObject) to the ScriptObject itself. Used for + # APIs like `__iter__` that should look like they're returning the + # original ScriptObjects. + self._script_object_map: Dict[int, torch.ScriptObject] = {} + + def __getitem__( + self, key: Union[torch.Tensor, torch.ScriptObject, FakeScriptObject] + ) -> Any: + real_key = hash(key) if isinstance(key, torch.ScriptObject) else key + assert isinstance(real_key, (int, torch.Tensor, FakeScriptObject)) + return self._constant_attrs[real_key] + + def __setitem__(self, key: Union[torch.Tensor, torch.ScriptObject], value): + # we shouldn't actually call this, should go to add() instead to handle aliasing + raise NotImplementedError( + """Directly setting values for ConstantAttrMap is not supported, please use add(key, value) instead. +The same key can be mapped to multiple values, for handling constant aliasing.""" + ) + + def add( + self, key: Union[torch.Tensor, torch.ScriptObject, FakeScriptObject], value: Any + ) -> None: + if isinstance(key, torch.ScriptObject): + if hash(key) not in self._constant_attrs: + self._constant_attrs[hash(key)] = [] + self._constant_attrs[hash(key)].append(value) + self._script_object_map[hash(key)] = key + elif isinstance(key, (torch.Tensor, FakeScriptObject)): + if key not in self._constant_attrs: + self._constant_attrs[key] = [] + self._constant_attrs[key].append(value) + else: + raise TypeError( + f"Expected key to be a tensor or ScriptObject, got {type(key)}" + ) + + def __delitem__(self, key): + real_key = hash(key) if isinstance(key, torch.ScriptObject) else key + + del self._constant_attrs[real_key] + + def __iter__(self): + for key in self._constant_attrs: + if isinstance(key, int): + yield self._script_object_map[key] + else: + yield key + + def __len__(self): + return len(self._constant_attrs) + + def __contains__(self, key: object) -> bool: + real_key = hash(key) if isinstance(key, torch.ScriptObject) else key + return real_key in self._constant_attrs + + +def get_constant_fqn(node: torch.fx.Node, constant_name: str) -> str: + # The FQN of the constant tensor in the state dict should + # correspond to the module where the constant tensor was + # originally used. + if len(node.meta["nn_module_stack"]) == 0: + return constant_name + parent_fqn = list(node.meta["nn_module_stack"].values())[-1][0] + if len(parent_fqn) > 0: + return f"{parent_fqn}.{constant_name}" + else: + return constant_name + + +def _get_first_fqn( + const_attrs: ConstantAttrMap, + key: Union[torch.Tensor, torch.ScriptObject, FakeScriptObject], +) -> Any: + fqns = const_attrs.get(key) + return fqns[0] if fqns else None + + +def lift_constants_pass( + gm: torch.fx.GraphModule, + graph_signature: ExportGraphSignature, + constant_attrs: ConstantAttrMap, +) -> Dict[str, Union[torch.Tensor, torch.ScriptObject, FakeScriptObject]]: + """ + Takes a graph module, graph signature, and modifies them implace to lift any + constants (tensors or custom classes) as inputs to the graph. Returns a + dictionary of names to constants. + + Arguments: + gm (torch.fx.GraphModule): The graph module containing the graph and constants to lift. + graph_signature (ExportGraphSignature): This graph signature will be + mutated to add additional CONSTANT_TENSOR and CUSTOM_OBJ inputs. + constant_attrs (ConstantAttr): A mapping from a constant value to its + fully-qualified path in `gm`. This is used to maintain consistent + location of constants between the original module and the exported + version. + + Returns: + A dictionary of fqn => constant value. + """ + all_constants: Dict[ + str, Union[torch.Tensor, torch.ScriptObject, FakeScriptObject] + ] = {} + + inputs = graph_signature.input_specs + num_custom_obj = sum( + input_specs.kind == InputKind.CUSTOM_OBJ for input_specs in inputs + ) + num_tensor_constants = sum( + input_specs.kind == InputKind.CONSTANT_TENSOR for input_specs in inputs + ) + + fake_mode = detect_fake_mode( + tuple(node.meta["val"] for node in gm.graph.nodes if node.op == "placeholder") + ) + + first_user_input_loc, first_user_input = 0, None + for node in gm.graph.nodes: + if node.op == "placeholder" and node.name in graph_signature.user_inputs: + first_user_input = node + break + first_user_input_loc += 1 + + lifted_objs = ConstantAttrMap() + for node in gm.graph.nodes: + if node.op == "get_attr": + constant_val = getattr(gm, node.target) + if constant_val in lifted_objs: + # We already lifted this constant elsewhere. Just rewrite uses + # of this get_attr to point to the already-existing placeholder + # node. + const_placeholder_node = _get_first_fqn(lifted_objs, constant_val) + node.replace_all_uses_with(const_placeholder_node) + gm.graph.erase_node(node) + continue + + # For ScriptObject, Tensor and FakeScriptObject constants: + # First check if the constant was an attribute on some module by + # consulting `constant_attrs` map. If it is, use the fqn that keeps + # its location consistent with the eager module. + # + # If it's not in the `constant_attrs` map, that means it's an inline + # constant (e.g. x + torch.tensor(0)), and thus did not have a + # specific location in the eager module. In that case, just generate + # some name and attach it to the module in which it was used. + if isinstance(constant_val, (torch.ScriptObject, FakeScriptObject)): + constant_kind = InputKind.CUSTOM_OBJ + constant_fqn = _get_first_fqn(constant_attrs, constant_val) + if constant_fqn is not None: + constant_name = constant_fqn.replace(".", "_") + else: + constant_name = f"lifted_custom_{num_custom_obj}" + constant_fqn = get_constant_fqn(node, constant_name) + num_custom_obj += 1 + elif isinstance(constant_val, torch.Tensor): + # Remove the parameterness of constant_val + if isinstance(constant_val, torch.nn.Parameter): + warnings.warn( + f"{node.target} created when tracing {node.meta['stack_trace']} is a parameter. But" + f"it's not registered with register_parameter(). export will treat it as a constant tensor" + ) + # We get the real data out of the parameter by disabling the surrounding fake mode. + with unset_fake_temporarily(): + constant_val = constant_val.data + constant_kind = InputKind.CONSTANT_TENSOR + constant_fqn = _get_first_fqn(constant_attrs, constant_val) + if constant_fqn is not None: + constant_name = constant_fqn.replace(".", "_") + else: + constant_name = f"lifted_tensor_{num_tensor_constants}" + constant_fqn = get_constant_fqn(node, constant_name) + num_tensor_constants += 1 + elif isinstance(constant_val, torch.fx.GraphModule): + continue + elif "LoweredBackendModule" in type(constant_val).__name__: + continue + else: + raise SpecViolationError( + f"getattr node {node} referencing unsupported type {type(constant_val)}" + ) + + with gm.graph.inserting_before(first_user_input): + # Insert the constant node before the first user input + const_placeholder_node = gm.graph.placeholder(constant_name) + # match target name with its node name in case there is name collision + # and suffix is added to node name in fx + const_placeholder_node.target = const_placeholder_node.name + + for k, v in node.meta.items(): + const_placeholder_node.meta[k] = v + + # Once the FQN has been used, remove nn_module_stack, stack_trace + const_placeholder_node.meta.pop("nn_module_stack") + const_placeholder_node.meta.pop("stack_trace", None) + + input_spec_arg: ArgumentSpec + if isinstance(constant_val, torch.Tensor): + if fake_mode is not None: + const_placeholder_node.meta["val"] = fake_mode.from_tensor( + constant_val, static_shapes=True + ) + const_placeholder_node.meta["val"].constant = constant_val + else: + const_placeholder_node.meta["val"] = constant_val + input_spec_arg = TensorArgument(name=const_placeholder_node.name) + elif isinstance(constant_val, torch._C.ScriptObject): + class_fqn = constant_val._type().qualified_name() # type: ignore[attr-defined] + const_placeholder_node.meta["val"] = CustomObjArgument( + constant_fqn, class_fqn + ) + input_spec_arg = CustomObjArgument( + name=const_placeholder_node.name, class_fqn=class_fqn + ) + elif isinstance(constant_val, FakeScriptObject): + class_fqn = constant_val.script_class_name + const_placeholder_node.meta["val"] = CustomObjArgument( + constant_fqn, class_fqn, constant_val + ) + input_spec_arg = CustomObjArgument( + name=const_placeholder_node.name, + class_fqn=class_fqn, + fake_val=constant_val, + ) + else: + raise SpecViolationError( + f"tried to lift unsupported type {type(constant_val)} from node {node.format_node()}" + ) + + lifted_objs.add(constant_val, const_placeholder_node) + node.replace_all_uses_with(const_placeholder_node) + gm.graph.erase_node(node) + + # Add the constant as a buffer to the graph signature + graph_signature.input_specs.insert( + first_user_input_loc, + InputSpec( + kind=constant_kind, + arg=input_spec_arg, + target=constant_fqn, + ), + ) + if constant_val in constant_attrs: + for fqn in constant_attrs[constant_val]: + all_constants[fqn] = constant_val + else: + all_constants[constant_fqn] = constant_val + first_user_input_loc += 1 + + return all_constants + + +def rewrite_script_object_meta( + gm: torch.fx.GraphModule, +) -> Dict[str, Union[torch.Tensor, torch.ScriptObject, FakeScriptObject],]: + """When tracing, we produce a graph with FakeScriptObject in the + meta["val"]. + + For now, we rewrie meta["val"] to be a placeholder CustomObjArgument + """ + constants: Dict[ + str, + Union[ + torch.Tensor, + torch.ScriptObject, + FakeScriptObject, + ], + ] = {} + for node in gm.graph.nodes: + if "val" not in node.meta: + continue + + old_meta = node.meta["val"] + + if isinstance(old_meta, torch.ScriptObject): + class_fqn = old_meta._type().qualified_name() # type: ignore[attr-defined] + new_meta = CustomObjArgument(node.name, class_fqn) + constants[node.name] = old_meta + node.meta["val"] = new_meta + + elif isinstance(old_meta, FakeScriptObject): + class_fqn = old_meta.script_class_name # type: ignore[attr-defined] + new_meta = CustomObjArgument(node.name, class_fqn, old_meta) + constants[node.name] = old_meta + node.meta["val"] = new_meta + + return constants diff --git a/janus/lib/python3.10/site-packages/torch/_export/passes/remove_runtime_assertions.py b/janus/lib/python3.10/site-packages/torch/_export/passes/remove_runtime_assertions.py new file mode 100644 index 0000000000000000000000000000000000000000..a80b62d2765a87b0e20dc7614c6a353c86225d81 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/_export/passes/remove_runtime_assertions.py @@ -0,0 +1,27 @@ +# mypy: allow-untyped-defs +import torch +from torch.fx.passes.infra.pass_base import PassBase, PassResult + + +class _RemoveRuntimeAssertionsPass(PassBase): + """ + Remove runtime assertions inserted by the + _AddRuntimeAssertionsForInlineConstraintsPass. + """ + + def call(self, graph_module) -> PassResult: + modified = False + for module in graph_module.modules(): + if not isinstance(module, torch.fx.GraphModule): + continue + for node in module.graph.nodes: + if node.target == torch.ops.aten._assert_async.msg: + assert_async_node = node + if len(assert_async_node.users) > 0: + continue + module.graph.erase_node(assert_async_node) + # the upstream scalar_tensor <- {le, ge} <- sym_size + # linear chain of nodes of nodes is removed by the + # downstream dead code elimination + modified = True + return PassResult(graph_module, modified) diff --git a/janus/lib/python3.10/site-packages/torch/_export/passes/replace_autocast_with_hop_pass.py b/janus/lib/python3.10/site-packages/torch/_export/passes/replace_autocast_with_hop_pass.py new file mode 100644 index 0000000000000000000000000000000000000000..a15a3ef9c3b3f7465f4dc9cf8f6a2f32ea1b0884 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/_export/passes/replace_autocast_with_hop_pass.py @@ -0,0 +1,179 @@ +# mypy: allow-untyped-defs +from typing import List + +import torch +from torch._higher_order_ops.wrap import wrap_with_autocast + +from ..utils import node_inline_, nodes_filter, nodes_first, sequential_split +from .replace_with_hop_pass_util import ( + _replace_with_hop_helper, + _replace_with_hop_pass_helper, + _sequential_split_and_maybe_inline_subgraphs_helper, +) + + +def _is_autocast_node(node: torch.fx.Node): + return ( + node + and node.op == "call_function" + and node.target + in [ + torch.amp.autocast_mode._enter_autocast, + torch.amp.autocast_mode._exit_autocast, + ] + ) + + +def _is_enter_autocast_node(node: torch.fx.Node): + return ( + node + and node.op == "call_function" + and node.target == torch.amp.autocast_mode._enter_autocast + ) + + +def _is_exit_autocast_node(node: torch.fx.Node): + return ( + node + and node.op == "call_function" + and node.target == torch.amp.autocast_mode._exit_autocast + ) + + +def _is_autocast_sub_mod(node: torch.fx.Node): + """ + Check if the first non-placeholder node is `torch.amp.autocast_mode._enter_autocast`. + """ + if node.op == "call_module": + assert isinstance(node.target, str) + subgm = getattr(node.graph.owning_module, node.target) + first_non_ph = nodes_first( + subgm.graph.nodes, lambda node: node.op != "placeholder" + ) + if ( + first_non_ph + and first_non_ph.op == "call_function" + and first_non_ph.target == torch.amp.autocast_mode._enter_autocast + ): + # TODO: check if current auto-cast type is the same as the args of + # _enter_autocast. If so, return False, i.e. do not create a submodule. + return True + return False + + +def _check_valid_autocast_block(enter_autocast_node, exit_autocast_node): + assert _is_enter_autocast_node(enter_autocast_node) + assert _is_exit_autocast_node(exit_autocast_node) + assert exit_autocast_node.args[0] == enter_autocast_node + + +def _replace_with_hop(node: torch.fx.Node): + assert node.op == "call_module" + graph: torch.fx.Graph = node.graph + gm: torch.fx.GraphModule = graph.owning_module + assert isinstance(node.target, str) + sub_gm = getattr(gm, node.target) + sub_graph = sub_gm.graph + autocast_nodes = nodes_filter(sub_graph.nodes, _is_autocast_node) + if len(autocast_nodes) > 0: + assert len(autocast_nodes) > 1 # need at least an enter node and an exist node + enter_autocast_node = autocast_nodes[0] + exit_autocast_node = autocast_nodes[-1] + _check_valid_autocast_block(enter_autocast_node, exit_autocast_node) + + _replace_with_hop_helper( + node, enter_autocast_node, _is_autocast_node, wrap_with_autocast + ) + sub_graph.erase_node(exit_autocast_node) + sub_graph.erase_node(enter_autocast_node) + + +def _split_autocast(gm: torch.fx.GraphModule) -> torch.fx.GraphModule: + """ + split_autocast creates a new graph module that splits the input graph module into multiple submodules + based on the `_enter_autocast` and `_exit_autocast` nodes. It doesn't mutate the input graph module. + + Nodes between the **outer-most** `_enter_autocast` and `_exit_autocast(_enter_autocast)` are splitted + into a submodule. Nested autocast regions are not splitted. + `_enter_autocast` and `_exit_autocast(_enter_autocast)` nodes are in the submodule as well. + + Below is an example of splitting. A, B, C, D, E are blocks of non-autocast nodes in the original graph + module. Nodes marked with the same number are grouped into the same submodule. + A # 0 + enter_autocast # 1 + B # 1 + exit_autocast # 1 + C # 2 + enter_autocast # 3 + D # 3 + exit_autocast # 3 + E # 4 + """ + enter_autocast_node_stack: List[torch.fx.Node] = [] + first_node_after_outer_most_exit: bool = False + + def node_call_back(node: torch.fx.Node): + nonlocal enter_autocast_node_stack, first_node_after_outer_most_exit + if first_node_after_outer_most_exit or ( + len(enter_autocast_node_stack) == 0 and _is_enter_autocast_node(node) + ): + assert len(enter_autocast_node_stack) == 0 + first_node_after_outer_most_exit = False + if _is_enter_autocast_node(node): + enter_autocast_node_stack.append(node) + return True + if _is_exit_autocast_node(node): + assert len(enter_autocast_node_stack) > 0 + last_enter_autocast_node = enter_autocast_node_stack.pop() + assert node.args[0] == last_enter_autocast_node + if len(enter_autocast_node_stack) == 0: + # next node should be in the next submodule since + # autocast block ends + first_node_after_outer_most_exit = True + return False + + return sequential_split(gm, node_call_back) + + +def _sequential_split_and_maybe_inline_subgraphs( + gm: torch.fx.GraphModule, graph_signature +): + """ + Helper function for replace_autocast_with_hop_pass(). + Split the graph module into multiple subgraphs based on the autocast nodes. + For each subgraph, decides whether to construct a HOO subgraph, or inline the calls + back into the parent graph module. + Nodes between `_enter_autocast` and `_exit_autocast(_enter_autocast)` are considered + as a subgraph. + """ + need_replacing = any(_is_autocast_node(node) for node in gm.graph.nodes) + if not need_replacing: + return gm, graph_signature + + # split_autocast returns a new graph module that could have different output + # args names. We need to fix the graph signature in `_sequential_split_and_maybe_inline_subgraphs_helper`. + new_gm = _split_autocast(gm) + + def _maybe_inline_or_replace_with_hop(node: torch.fx.Node): + if _is_autocast_sub_mod(node): + _replace_with_hop(node) + else: + assert node.op == "call_module" + assert isinstance(node.target, str) + node_inline_(node) + + return _sequential_split_and_maybe_inline_subgraphs_helper( + new_gm, graph_signature, _maybe_inline_or_replace_with_hop + ) + + +def replace_autocast_with_hop_pass(gm: torch.fx.GraphModule, graph_signature): + """ + Split gm into sub-graph-modules using `sequential_split_and_maybe_inline_subgraphs`, and + then recursively call itself on each of the submodules. + """ + return _replace_with_hop_pass_helper( + gm, + graph_signature, + _sequential_split_and_maybe_inline_subgraphs, + ) diff --git a/janus/lib/python3.10/site-packages/torch/_export/passes/replace_quantized_ops_with_standard_ops_pass.py b/janus/lib/python3.10/site-packages/torch/_export/passes/replace_quantized_ops_with_standard_ops_pass.py new file mode 100644 index 0000000000000000000000000000000000000000..47a93a035aa5389ce347a49d99ca6230484d3b16 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/_export/passes/replace_quantized_ops_with_standard_ops_pass.py @@ -0,0 +1,673 @@ +# mypy: allow-untyped-defs +import logging +import operator +from typing import List, Optional, Tuple, Union + +import torch +import torch.export._trace +from torch._ops import OpOverload +from torch.ao.quantization.fx._decomposed import ( + dequantize_per_channel, + dequantize_per_tensor, + quantize_per_tensor, +) +from torch.ao.quantization.utils import calculate_qmin_qmax +from torch.fx.graph_module import _assign_attr + + +log = logging.getLogger(__name__) + +# Those values will need to be carried over multiple operators. +_INPUT_Q_DTYPE: Optional[Union[torch.dtype, torch.fx.Node]] = None +_SCALE: Optional[Union[float, torch.fx.Node]] = None +_ZERO_POINT: Optional[Union[float, torch.fx.Node]] = None + + +def int_to_valid_dtype(val: int) -> torch.dtype: + from torch._export.converter import _TORCH_ENUM_TO_DTYPE # No circular import. + + if isinstance(val, torch.dtype): + return val + dtype = _TORCH_ENUM_TO_DTYPE[val] + if dtype == torch.quint8: + return torch.uint8 + elif dtype == torch.qint8: + return torch.int8 + return dtype + + +def fx_enum_to_dtype(gm: torch.fx.GraphModule, val: int) -> torch.fx.Node: + return gm.graph.call_function(int_to_valid_dtype, (val,)) + + +def insert_quantized_node( + gm: torch.fx.GraphModule, + val_node: torch.fx.Node, + scale_node: Union[float, torch.fx.Node], + zero_point_node: Union[float, torch.fx.Node], + qmin_node: Union[float, int, torch.fx.Node], + qmax_node: Union[float, int, torch.fx.Node], + dtype_node: Union[torch.dtype, torch.fx.Node], + qscheme: Optional[torch.qscheme], +) -> torch.fx.Node: + return gm.graph.call_function( + quantize_per_tensor, + ( + val_node, + scale_node, + zero_point_node, + qmin_node, + qmax_node, + dtype_node, + ), + ) + + +def get_dequantized( + val: torch.Tensor, + scale: Union[float, torch.Tensor], + zero_point: Union[float, torch.Tensor], + qmin: Union[float, int], + qmax: Union[float, int], + dtype: torch.dtype, + axis: Optional[int], + qscheme: Optional[torch.qscheme], +) -> torch.Tensor: + if qscheme is torch.per_tensor_affine: + return dequantize_per_tensor( + val, + scale, + zero_point, + qmin, + qmax, + dtype, + ) + elif qscheme is torch.per_channel_affine: + return dequantize_per_channel( + val, + scale, + zero_point, + axis, + qmin, + qmax, + dtype, + ) + else: + raise RuntimeError(f"Unsupported dequantization scheme: {qscheme}") + + +def insert_dequantized_node( + gm: torch.fx.GraphModule, + val_node: torch.fx.Node, + scale_node: Union[float, torch.fx.Node], + zero_point_node: Union[float, torch.fx.Node], + qmin_node: Union[float, int, torch.fx.Node], + qmax_node: Union[float, int, torch.fx.Node], + dtype_node: Union[torch.dtype, torch.fx.Node], + axis_node: Optional[Union[int, torch.fx.Node]], + qscheme: Optional[torch.qscheme], +) -> torch.fx.Node: + if qscheme is torch.per_tensor_affine: + return gm.graph.call_function( + dequantize_per_tensor, + ( + val_node, + scale_node, + zero_point_node, + qmin_node, + qmax_node, + dtype_node, + ), + ) + elif qscheme is torch.per_channel_affine: + return gm.graph.call_function( + dequantize_per_channel, + ( + val_node, + scale_node, + zero_point_node, + axis_node, + qmin_node, + qmax_node, + dtype_node, + ), + ) + else: + raise RuntimeError(f"Unsupported dequantization scheme: {qscheme}") + + +def get_qmin_qmax(dtype: torch.dtype) -> Tuple[Union[int, float], Union[int, float]]: + return calculate_qmin_qmax(None, None, False, dtype, False) # type: ignore[arg-type] + + +def insert_qmin_qmax_node( + gm: torch.fx.GraphModule, dtype_node: Union[torch.dtype, torch.fx.Node] +) -> Tuple[torch.fx.Node, torch.fx.Node]: + q_min_max_node = gm.graph.call_function( + calculate_qmin_qmax, (None, None, False, dtype_node, False) + ) + qmin_node = gm.graph.call_function(operator.getitem, (q_min_max_node, 0)) + qmax_node = gm.graph.call_function(operator.getitem, (q_min_max_node, 1)) + return qmin_node, qmax_node + + +def get_script_object( + gm: torch.nn.Module, node: torch.fx.Node +) -> torch._C.ScriptObject: + assert isinstance(node, torch.fx.Node) + assert node.op == "get_attr" + attr_name = node.target + assert isinstance(attr_name, str) + + mod = gm + for attr in attr_name.split("."): + mod = getattr(mod, attr) + assert isinstance(mod, torch._C.ScriptObject) + return mod + + +def insert_weight_and_bias_get_attr_node_from_get_attr_to_scriptobject( + gm: torch.fx.GraphModule, + param_node: torch.fx.Node, +) -> Tuple[torch.fx.Node, Optional[torch.fx.Node]]: + """Directly inline tensor from a get_attr fx node.""" + mod = get_script_object(gm, param_node) + w_qtensor, b_qtensor = mod.unpack() # type: ignore[attr-defined] + w_attr_name, b_attr_name = ( + f"dequantized_{param_node.target}_w", + f"dequantized_{param_node.target}_b", + ) + return insert_weight_and_bias_get_attr_node( + gm, w_qtensor, b_qtensor, w_attr_name, b_attr_name + ) + + +def insert_weight_and_bias_get_attr_node_from_get_attr_to_qtensor( + gm: torch.fx.GraphModule, + get_attr_to_weight_node: torch.fx.Node, + get_attr_to_bias_node: Optional[torch.fx.Node], +) -> Tuple[torch.fx.Node, Optional[torch.fx.Node]]: + assert isinstance(get_attr_to_weight_node.target, str) + w_qtensor = getattr(gm, get_attr_to_weight_node.target) + w_attr_name = f"dequantized_{get_attr_to_weight_node.target}_w" + + if get_attr_to_bias_node is not None: + assert isinstance(get_attr_to_bias_node.target, str) + b_qtensor = getattr(gm, get_attr_to_bias_node.target) + b_attr_name = f"dequantized_{get_attr_to_bias_node.target}_b" + else: + b_qtensor, b_attr_name = None, "" + + return insert_weight_and_bias_get_attr_node( + gm, w_qtensor, b_qtensor, w_attr_name, b_attr_name + ) + + +def insert_weight_and_bias_get_attr_node( + gm: torch.fx.GraphModule, + w_qtensor: torch.Tensor, + b_qtensor: Optional[torch.Tensor], + w_attr_name: str, + b_attr_name: str, +) -> Tuple[torch.fx.Node, Optional[torch.fx.Node]]: + w_tensor = get_tensor_from_qtensor(w_qtensor) + _assign_attr(w_tensor, gm, w_attr_name) + w_tensor_attr = gm.graph.get_attr(w_attr_name) + + if b_qtensor is not None: + b_tensor = get_tensor_from_qtensor(b_qtensor, dequant=False) + _assign_attr(b_tensor, gm, b_attr_name) + b_tensor_attr = gm.graph.get_attr(b_attr_name) + else: + b_tensor_attr = None + + return w_tensor_attr, b_tensor_attr + + +def get_tensor_from_qtensor( + qtensor: torch.Tensor, dequant: bool = True +) -> torch.Tensor: + # Manual conversion because qint8 is not used anymore. + if qtensor.dtype in [torch.qint8, torch.quint8]: + tensor = qtensor.int_repr() + else: + tensor = qtensor + + # Weights need dequantization with scaling and zero_point adjustment, but + # bias does not need that. + if dequant: + qscheme = qtensor.qscheme() + if qscheme == torch.per_channel_affine: + scale, zero_point, axis = ( + qtensor.q_per_channel_scales(), + qtensor.q_per_channel_zero_points(), + qtensor.q_per_channel_axis(), + ) + else: + scale, zero_point, axis = ( + qtensor.q_scale(), # type: ignore[assignment] + qtensor.q_zero_point(), # type: ignore[assignment] + None, + ) + dtype = tensor.dtype + qmin, qmax = get_qmin_qmax(dtype) + return get_dequantized( + tensor, scale, zero_point, qmin, qmax, dtype, axis, qscheme + ) + return tensor + + +def insert_fused_activation_node( + gm: torch.fx.GraphModule, opname: str, fx_node: torch.fx.Node +) -> torch.fx.Node: + if opname in ["conv1d_relu", "conv2d_relu", "linear_relu", "add_relu", "mul_relu"]: + fx_node = gm.graph.call_function(torch.ops.aten.relu, (fx_node,)) + return fx_node + + +def _conv1d_op_with_squeeze( + inp: torch.Tensor, + weight: torch.Tensor, + bias: Optional[torch.Tensor], + stride: List[int], + padding: List[int], + dilation: List[int], + groups: int, +) -> torch.Tensor: + # In quantized version, conv1d is emulated using conv2d with squeeze and unsqueeze + # operations before and after the conv2d operation to match the dimension of weights. + # Reference: https://github.com/pytorch/pytorch/blob/eca0cb0fbe84bb0a34fa94afe261bceecd52c436/aten/src/ATen/native/quantized/cpu/qconv.cpp#L1827 # noqa: B950 + s_inp = torch.ops.aten.unsqueeze(inp, 2) + conv1d_res = torch.ops.aten.conv2d( + s_inp, + weight, + bias, + stride, + padding, + dilation, + groups, + ) + uns_conv1d_res = torch.ops.aten.squeeze(conv1d_res, 2) + return uns_conv1d_res + + +def _transform_conv_with_packedparam(gm: torch.fx.GraphModule, node: torch.fx.Node): + """Conv specfic transformation function.""" + assert isinstance(node.target, torch._ops.OpOverload) + opname = node.target._opname + scale_node, zero_point_node = node.args[2], node.args[3] + + op_f = ( + torch.ops.aten.conv2d + if opname in ["conv2d", "conv2d_relu"] + else _conv1d_op_with_squeeze + ) + + inp_node, param_node = node.args[0], node.args[1] + assert isinstance(inp_node, torch.fx.Node) + assert isinstance(param_node, torch.fx.Node) + + if param_node.op == "call_function": + # Using Conv2dPrepackParam from conv_prepack. + # We directly skip the packing call and inline weights and bias. + w_node, b_node = param_node.args[0], param_node.args[1] + assert isinstance(w_node, torch.fx.Node) + assert b_node is None or isinstance(b_node, torch.fx.Node) + ( + param_0, + param_1, + ) = insert_weight_and_bias_get_attr_node_from_get_attr_to_qtensor( + gm, w_node, b_node + ) + op_res_node = gm.graph.call_function( + op_f, (inp_node, param_0, param_1, *param_node.args[2:]) + ) + else: + # Using ConvPrepackedParam. + param = get_script_object(gm, param_node) + ( + param_0, + param_1, + ) = insert_weight_and_bias_get_attr_node_from_get_attr_to_scriptobject( + gm, param_node + ) # type: ignore[assignment] + op_res_node = gm.graph.call_function( + op_f, + ( + inp_node, + param_0, + param_1, + param.stride(), # type: ignore[attr-defined] + param.padding(), # type: ignore[attr-defined] + param.dilation(), # type: ignore[attr-defined] + param.groups(), # type: ignore[attr-defined] + ), + ) + return op_res_node, scale_node, zero_point_node + + +def _transform_linear_with_packedparam(gm: torch.fx.GraphModule, node: torch.fx.Node): + """Linear specfic transformation function.""" + scale_node, zero_point_node = node.args[2], node.args[3] + + inp_node, param_node = node.args[0], node.args[1] + assert isinstance(inp_node, torch.fx.Node) + assert isinstance(param_node, torch.fx.Node) + + if param_node.op == "call_function": + # Using LinearPrepackParam from linear_prepack. + # We directly skip the packing call and inline weights and bias. + w_node, b_node = param_node.args[0], param_node.args[1] + assert isinstance(w_node, torch.fx.Node) + assert b_node is None or isinstance(b_node, torch.fx.Node) + ( + param_0, + param_1, + ) = insert_weight_and_bias_get_attr_node_from_get_attr_to_qtensor( + gm, w_node, b_node + ) + op_res_node = gm.graph.call_function( + torch.ops.aten.linear, (inp_node, param_0, param_1, *param_node.args[2:]) + ) + else: + # Using LinearPackedParams. + ( + param_0, + param_1, + ) = insert_weight_and_bias_get_attr_node_from_get_attr_to_scriptobject( + gm, param_node + ) # type: ignore[assignment] + op_res_node = gm.graph.call_function( + torch.ops.aten.linear, (inp_node, param_0, param_1) + ) + return op_res_node, scale_node, zero_point_node + + +def _transform_op_where_last_two_arguments_are_scale_and_zero_point( + gm: torch.fx.GraphModule, node: torch.fx.Node +): + """ + This transformation function can be used for function where the last two + parameters are scale and zero point. Additionally, the function's parameters + do not need any unpacking. + """ + to_standard_op = { + "mul": torch.ops.aten.mul, + "mul_relu": torch.ops.aten.mul, + "add": torch.ops.aten.add, + "add_relu": torch.ops.aten.add, + "softmax": torch.ops.aten.softmax, + "cat": torch.ops.aten.cat, + "hardswish": torch.ops.aten.hardswish, + } + + assert isinstance(node.target, torch._ops.OpOverload) + opname, args = node.target._opname, node.args + scale_node, zero_point_node = args[-2], args[-1] + op_res_node = gm.graph.call_function(to_standard_op[opname], tuple(args[:-2])) + return op_res_node, scale_node, zero_point_node + + +def _transform_scalar_arithmetic(gm: torch.fx.GraphModule, node: torch.fx.Node): + """Transform scalar overload for basic arithmetic.""" + to_standard_op = { + "mul": torch.ops.aten.mul.Scalar, + "add": torch.ops.aten.add.Scalar, + } + assert isinstance(node.target, torch._ops.OpOverload) + opname, args = node.target._opname, node.args + op_res_node = gm.graph.call_function(to_standard_op[opname], args) + return op_res_node, _SCALE, _ZERO_POINT + + +def _transform_prepacked_op(gm: torch.fx.GraphModule, node: torch.fx.Node): + """ + Transformation for functions under prepacked namespace, where they share + the same handling logic that [...]OpContext contains all parameters. + """ + assert isinstance(node.target, torch._ops.OpOverload) + opname, args = node.target._opname, node.args + op_f = None + if opname == "conv2d_clamp_run": + op_f = torch.ops.aten.conv2d + elif opname == "linear_clamp_run": + op_f = torch.ops.aten.linear + else: + raise RuntimeError(f"Invalid operator {opname}") + + assert isinstance(args[1], torch.fx.Node) + so = get_script_object(gm, args[1]) + + func_args = [] + func_args += [args[0]] + func_args += so.unpack()[:2] # type: ignore[attr-defined] + if opname == "conv2d_clamp_run": + func_args += torch.ops.prepacked.unpack_prepacked_sizes_conv2d(so)[2:] + + op_res_node = gm.graph.call_function(op_f, tuple(func_args)) + return op_res_node + + +def _transform_batch_norm(gm: torch.fx.GraphModule, node: torch.fx.Node): + args = node.args + scale_node, zero_point_node = args[-2], args[-1] + op_res_node = gm.graph.call_function( + torch.ops.aten.native_batch_norm, (*args[:-3], False, 0.1, args[-3]) + ) + op_res_node = gm.graph.call_function(operator.getitem, (op_res_node, 0)) + return op_res_node, scale_node, zero_point_node + + +def fx_transform_quantized_op_to_standard_op( + gm: torch.fx.GraphModule, node: torch.fx.Node +) -> torch.fx.Node: + global _SCALE, _ZERO_POINT, _INPUT_Q_DTYPE + + assert isinstance(node.target, torch._ops.OpOverload) + opname, overload = node.target._opname, node.target._overloadname + + key = f"{opname}.{overload}" + opname_to_transform_f = { + "conv1d.new": _transform_conv_with_packedparam, + "conv1d_relu.new": _transform_conv_with_packedparam, + "conv1d.default": _transform_conv_with_packedparam, + "conv1d_relu.default": _transform_conv_with_packedparam, + "conv2d.new": _transform_conv_with_packedparam, + "conv2d_relu.new": _transform_conv_with_packedparam, + "conv2d.default": _transform_conv_with_packedparam, + "conv2d_relu.default": _transform_conv_with_packedparam, + "linear.default": _transform_linear_with_packedparam, + "linear_relu.default": _transform_linear_with_packedparam, + "add.default": _transform_op_where_last_two_arguments_are_scale_and_zero_point, + "add_relu.default": _transform_op_where_last_two_arguments_are_scale_and_zero_point, + "mul.default": _transform_op_where_last_two_arguments_are_scale_and_zero_point, + "mul_relu.default": _transform_op_where_last_two_arguments_are_scale_and_zero_point, + "softmax.default": _transform_op_where_last_two_arguments_are_scale_and_zero_point, + "cat.default": _transform_op_where_last_two_arguments_are_scale_and_zero_point, + "hardswish.default": _transform_op_where_last_two_arguments_are_scale_and_zero_point, + "batch_norm2d.default": _transform_batch_norm, + "mul.Scalar": _transform_scalar_arithmetic, + "add.Scalar": _transform_scalar_arithmetic, + } + + if f"{key}" not in opname_to_transform_f: + raise RuntimeError(f"Unsupported quantized op during transformation: {key}") + + op_res_node, scale_node, zero_point_node = opname_to_transform_f[f"{key}"](gm, node) + + # Add fused activation layer. + op_res_node = insert_fused_activation_node(gm, opname, op_res_node) + _SCALE, _ZERO_POINT = scale_node, zero_point_node + + assert _INPUT_Q_DTYPE is not None + qmin_node, qmax_node = insert_qmin_qmax_node(gm, _INPUT_Q_DTYPE) + q_fx_node = insert_quantized_node( + gm, + op_res_node, + scale_node, + zero_point_node, + qmin_node, + qmax_node, + _INPUT_Q_DTYPE, + torch.per_tensor_affine, + ) + dq_fx_node = insert_dequantized_node( + gm, + q_fx_node, + scale_node, + zero_point_node, + qmin_node, + qmax_node, + _INPUT_Q_DTYPE, + None, + torch.per_tensor_affine, + ) + return dq_fx_node + + +def replace_quantized_ops_with_standard_ops(gm: torch.fx.GraphModule): + """ + Replace legacy quantized ops (aten.quantize_per_tensor, quantized.conv) with + PT2 ops (quantize_decomposed.quantize_per_tensor, aten.conv). + + Before: x || -> aten.q || -> quantized.conv2d || -> quantized.linear || -> aten.dq || -> y + + After: x || -> qd.q -> qd.dq || -> aten.conv2d -> qd.q -> qd.dq || aten.linear -> qd.q -> qd.dq || -> y + + (qd == quantized_decomposed library, q = quantize, dq = dequantize) + ^ + | + getattr(w), getattr(b) from Conv2dParamPrepack + + During each iteration, the transformation spits out the transformed operator, its quantized output, + and its dequantized value together. We did this because dequantization need to use the + scale and zero point parameters from the quantization to recover the approximate original value. After each + iteration, the new dequantization node will be used as the input to the next node (e.g., dq2 -> linear). + + For operators like conv2d and linear, their weights and bias are packed in a quantized format in the ScriptObject. + During the transformation, we unpack those objects, get their dequantized tensor, populate those + as attributes to the module, and use getattr to access them. + + One exception in the transformation is conv_prepack and linear_prepack. Those calls pack + weight and bias constant tensors into ScriptObject, which are then used by subsequent conv2d or linear calls. + During transformation, we directly skip transforming conv_prepack or linear_prepack. We check whether ScriptObject to the + quantized::conv2d or linear is from conv_prepack or linear_prepack. If it is, we then inline those parameters + to the operator by converting them to a getattr fx.node. + + For prepacked::conv2d_clamp_run and prepacked::linear_clamp_run, we directly convert them to aten.conv2d and aten.linear + without the need of doing de/quantization. + + Three global variables defined are _INPUT_Q_DTYPE, _SCALE, _ZERO_POINT. _INPUT_Q_DTYPE determines the de/quantization + data type, which is the same across the entire program, but it only shows up in the very first quantization + call. _SCALE and _ZERO_POINT are used only when operators do not have those specified. E.g., mul.Scalar. + """ + + global _INPUT_Q_DTYPE + + quantized = False + + last_quantized_node = None + for node in gm.graph.nodes: + if isinstance(node.target, OpOverload): + with gm.graph.inserting_before(node): + namespace, opname = node.target.namespace, node.target._opname + if namespace == "quantized" and opname not in [ + "conv_prepack", + "linear_prepack", + ]: + quantized = True + fx_node = fx_transform_quantized_op_to_standard_op(gm, node) + node.replace_all_uses_with(fx_node) + last_quantized_node = fx_node + elif namespace == "prepacked": + quantized = True + fx_node = _transform_prepacked_op(gm, node) + node.replace_all_uses_with(fx_node) + last_quantized_node = fx_node + elif namespace == "aten" and opname == "quantize_per_tensor": + inp_node, scale_node, zero_point_node, dtype_node = node.args + dtype_node = fx_enum_to_dtype(gm, dtype_node) + _INPUT_Q_DTYPE = dtype_node + qmin_node, qmax_node = insert_qmin_qmax_node(gm, dtype_node) + q_fx_node = insert_quantized_node( + gm, + inp_node, + scale_node, + zero_point_node, + qmin_node, + qmax_node, + dtype_node, + torch.per_tensor_affine, + ) + dq_fx_node = insert_dequantized_node( + gm, + q_fx_node, + scale_node, + zero_point_node, + qmin_node, + qmax_node, + dtype_node, + None, + torch.per_tensor_affine, + ) + node.replace_all_uses_with(dq_fx_node) + last_quantized_node = dq_fx_node + elif namespace == "aten" and opname == "dequantize": + assert last_quantized_node is not None + node.replace_all_uses_with(last_quantized_node) + else: + last_quantized_node = node + + # Post-processing again to remove legacy ScriptObjects and quantizated tensors + # stored as attributes or in the buffer. This is used to clean up the GraphModule + # to not trigger tracing errors like missing __obj_flatten__ functions. + def _clean_attr(mod: torch.nn.Module): + for submod in mod.modules(): + attr_names_to_clean = set() + for k, v in submod.__dict__.items(): + if isinstance(v, torch.ScriptObject): + attr_names_to_clean.add(k) + if k == "_buffers": + buffer_name_to_clean = set() + for b_name, b_value in v.items(): + if isinstance(b_value, torch.Tensor) and b_value.dtype in [ + torch.qint8, + torch.quint8, + ]: + buffer_name_to_clean.add(b_name) + for b_name in buffer_name_to_clean: + v.pop(b_name, None) + for attr_name in attr_names_to_clean: + delattr(submod, attr_name) + + if quantized: + """ + TODO: SetAttr + quantized ops will result incorrect program. This flag is used to temporarily + bypass test cases. + + The deadcode elimination pass is needed to remove legacy quantized ops. Otherwise, retracing + will throw errors. However, the current way of SetAttr does inplace update to attributes, so + this pass regard them as dead code and remove them. Below is an example of GraphModule before + and after the dead code elimination pass. + + class GraphModule(torch.nn.Module): + def forward(self, x_1): + # No stacktrace found for following nodes + data = self.data; data = None + data_1 = self.data + add_tensor = torch.ops.aten.add.Tensor(data_1, x_1, alpha = 1); data_1 = None + data_2 = self.data + copy_ = torch_Tensor_copy_(data_2, add_tensor); data_2 = add_tensor = copy_ = None + data_3 = self.data + add_tensor_1 = torch.ops.aten.add.Tensor(x_1, data_3, alpha = 1); x_1 = data_3 = None + return add_tensor_1 + + class GraphModule(torch.nn.Module): + def forward(self, x_1): + # No stacktrace found for following nodes + data_3 = self.data + add_tensor_1 = torch.ops.aten.add.Tensor(x_1, data_3, alpha = 1); x_1 = data_3 = None + return add_tensor_1 + """ + gm.graph.eliminate_dead_code() + _clean_attr(gm) diff --git a/janus/lib/python3.10/site-packages/torch/_export/passes/replace_set_grad_with_hop_pass.py b/janus/lib/python3.10/site-packages/torch/_export/passes/replace_set_grad_with_hop_pass.py new file mode 100644 index 0000000000000000000000000000000000000000..96104a83ce8b67b62d5aac1fca11cea395ccf2d5 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/_export/passes/replace_set_grad_with_hop_pass.py @@ -0,0 +1,110 @@ +# mypy: allow-untyped-defs + +import torch +from torch._higher_order_ops.wrap import wrap_with_set_grad_enabled + +from ..utils import node_inline_, nodes_filter, nodes_first, nodes_map, sequential_split +from .replace_with_hop_pass_util import ( + _replace_with_hop_helper, + _replace_with_hop_pass_helper, + _sequential_split_and_maybe_inline_subgraphs_helper, +) + + +def _is_set_grad_enabled_node(node: torch.fx.Node): + return ( + node + and node.op == "call_function" + and node.target == torch._C._set_grad_enabled + ) + + +def _is_set_grad_enabled_sub_mod(node: torch.fx.Node, omit_if_same_with_ambient=False): + if node.op == "call_module": + assert isinstance(node.target, str) + subgm = getattr(node.graph.owning_module, node.target) + first_non_ph = nodes_first( + subgm.graph.nodes, lambda node: node.op != "placeholder" + ) + if ( + first_non_ph + and first_non_ph.op == "call_function" + and first_non_ph.target == torch._C._set_grad_enabled + ): + return ( + first_non_ph.args[0] != torch.is_grad_enabled() + if omit_if_same_with_ambient + else True + ) + return False + + +def _replace_with_hop(node: torch.fx.Node): + assert node.op == "call_module" + graph: torch.fx.Graph = node.graph + gm: torch.fx.GraphModule = graph.owning_module + assert isinstance(node.target, str) + sub_gm = getattr(gm, node.target) + sub_graph = sub_gm.graph + set_grad_nodes = nodes_filter(sub_graph.nodes, _is_set_grad_enabled_node) + if len(set_grad_nodes) > 0: + assert len(set_grad_nodes) == 1 + set_grad_node = set_grad_nodes[0] + _replace_with_hop_helper( + node, set_grad_node, _is_set_grad_enabled_node, wrap_with_set_grad_enabled + ) + sub_graph.erase_node(set_grad_node) + + +def _remove_set_grad_and_inline(node: torch.fx.Node): + assert node.op == "call_module" + graph: torch.fx.Graph = node.graph + gm: torch.fx.GraphModule = graph.owning_module + assert isinstance(node.target, str) + sub_gm = getattr(gm, node.target) + sub_graph = sub_gm.graph + nodes_map( + sub_graph.nodes, + lambda n: sub_graph.erase_node(n) if _is_set_grad_enabled_node(n) else n, + ) + node_inline_(node) + + +def _sequential_split_and_maybe_inline_subgraphs( + gm: torch.fx.GraphModule, graph_signature +): + """ + Helper function for replace_set_grad_with_hop_pass(). + Split the graph module into multiple subgraphs based on the set_grad_enabled nodes. + For each subgraph, decides whether to construct a HOO subgraph, or inline the calls + back into the parent graph module. + """ + need_replacing = any(_is_set_grad_enabled_node(node) for node in gm.graph.nodes) + if not need_replacing: + return gm, graph_signature + + # sequential_split returns a new graph module that could have different output + # args names. We need to fix the graph signature. + new_gm = sequential_split(gm, _is_set_grad_enabled_node) + + def _maybe_inline_or_replace_with_hop(node: torch.fx.Node): + if _is_set_grad_enabled_sub_mod(node, omit_if_same_with_ambient=True): + _replace_with_hop(node) + else: + _remove_set_grad_and_inline(node) + + return _sequential_split_and_maybe_inline_subgraphs_helper( + new_gm, graph_signature, _maybe_inline_or_replace_with_hop + ) + + +def replace_set_grad_with_hop_pass(gm: torch.fx.GraphModule, graph_signature): + """ + Split gm into sub-graph-modules using `sequential_split_and_maybe_inline_subgraphs`, and + then recursively call itself on each of the submodules. + """ + return _replace_with_hop_pass_helper( + gm, + graph_signature, + _sequential_split_and_maybe_inline_subgraphs, + ) diff --git a/janus/lib/python3.10/site-packages/torch/_export/passes/replace_view_ops_with_view_copy_ops_pass.py b/janus/lib/python3.10/site-packages/torch/_export/passes/replace_view_ops_with_view_copy_ops_pass.py new file mode 100644 index 0000000000000000000000000000000000000000..6723ac5f86a6cbf703e886318ee44d5ebfc2e13f --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/_export/passes/replace_view_ops_with_view_copy_ops_pass.py @@ -0,0 +1,65 @@ +# mypy: allow-untyped-defs +from typing import Dict, Optional +import torch +from torch._ops import OpOverload, HigherOrderOperator +from torch._export.error import InternalError +from torch._export.pass_base import _ExportPassBaseDeprecatedDoNotUse + + +__all__ = ["ReplaceViewOpsWithViewCopyOpsPass"] + + +_NON_FUNCTIONAL_OPS_TO_FUNCTIONAL_OPS: Dict[OpOverload, OpOverload] = { + torch.ops.aten._unsafe_view.default: torch.ops.aten.view_copy.default, +} + + +def is_view_op(schema: torch._C.FunctionSchema) -> bool: + if len(schema.arguments) == 0: + return False + alias_info = schema.arguments[0].alias_info + return (alias_info is not None) and (not alias_info.is_write) + + +def get_view_copy_of_view_op(schema: torch._C.FunctionSchema) -> Optional[OpOverload]: + if is_view_op(schema) and schema.name.startswith("aten::"): + view_op_name = schema.name.split("::")[1] + view_op_overload = ( + schema.overload_name + if schema.overload_name != "" + else "default" + ) + view_copy_op_name = view_op_name + "_copy" + if not hasattr(torch.ops.aten, view_copy_op_name): + raise InternalError(f"{schema.name} is missing a view_copy variant") + + view_copy_op_overload_packet = getattr(torch.ops.aten, view_copy_op_name) + + if not hasattr(view_copy_op_overload_packet, view_op_overload): + raise InternalError(f"{schema.name} is missing a view_copy variant") + + return getattr(view_copy_op_overload_packet, view_op_overload) + + return None + + +class ReplaceViewOpsWithViewCopyOpsPass(_ExportPassBaseDeprecatedDoNotUse): + """ + Our backend expects pure functional operators. For efficiency + purposes, we keep view ops around while functionalizing the exported + program. This pass replaces view ops with view copy ops for backends that + need AOT memory planning. + """ + def call_operator(self, op, args, kwargs, meta): + if op in _NON_FUNCTIONAL_OPS_TO_FUNCTIONAL_OPS: + return super().call_operator( + (_NON_FUNCTIONAL_OPS_TO_FUNCTIONAL_OPS[op]), args, kwargs, meta + ) + + if isinstance(op, HigherOrderOperator): + return super().call_operator(op, args, kwargs, meta) + + if view_copy_op := get_view_copy_of_view_op(op._schema): + return super().call_operator(view_copy_op, args, kwargs, meta) + + return super().call_operator(op, args, kwargs, meta) diff --git a/janus/lib/python3.10/site-packages/torch/_export/passes/replace_with_hop_pass_util.py b/janus/lib/python3.10/site-packages/torch/_export/passes/replace_with_hop_pass_util.py new file mode 100644 index 0000000000000000000000000000000000000000..b2ca55025bd0a8e0abbe59b28012131bddc1e51f --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/_export/passes/replace_with_hop_pass_util.py @@ -0,0 +1,178 @@ +# mypy: allow-untyped-defs + +import contextlib +import copy +import operator +from typing import Callable + +import torch +from torch._ops import HigherOrderOperator + +from ..utils import node_replace_, nodes_map + + +def _replace_with_hop_helper( + node: torch.fx.Node, + enter_block_node: torch.fx.Node, + node_filter: Callable, + wrap_hoo: HigherOrderOperator, +): + graph: torch.fx.Graph = node.graph + gm: torch.fx.GraphModule = graph.owning_module + assert isinstance(node.target, str) + sub_gm = getattr(gm, node.target) + + def set_hoo_node_meta(call_func_node): + call_func_node.meta["nn_module_stack"] = copy.copy( + enter_block_node.meta.get("nn_module_stack", {}) + ) + call_func_node.meta["torch_fn"] = ( + f"{wrap_hoo.__name__}", + f"{wrap_hoo.__class__.__name__}.{wrap_hoo.__name__}", + ) + if isinstance(output_args, (tuple, list)): + call_func_node.meta["val"] = tuple(arg.meta["val"] for arg in output_args) + elif isinstance(output_args, torch.fx.Node): + call_func_node.meta["val"] = (output_args.meta["val"],) + + with graph.inserting_before(node): + get_attr_node = graph.get_attr(node.target) + get_attr_node.meta["nn_module_stack"] = copy.copy( + enter_block_node.meta.get("nn_module_stack", {}) + ) + output_node = next(iter(reversed(sub_gm.graph.nodes)), None) + # Split_module pass intentially doesn't add output node + # if the graph doesn't return anything. + # TODO (tmanlaibaatar) Figure out if this is right behaviour + # for split_module + if isinstance(output_node, torch.fx.Node) and output_node.op != "output": + output_node = None + if output_node is not None: + assert len(output_node.args) == 1 + output_args = output_node.args[0] + enter_block_node_args = enter_block_node.args + if isinstance(output_args, (tuple, list)): + call_func_node = graph.call_function( + wrap_hoo, + (*enter_block_node_args, get_attr_node, *node.args), + {}, + ) + # Create the metadata + set_hoo_node_meta(call_func_node) + node_replace_(node, call_func_node) + + # Rename the name of getitem nodes to the actual name of its contents + # for passing verifier and better readability, also propagate metadata + for get_item_node in call_func_node.users.keys(): + idx: int = get_item_node.args[1] # type: ignore[assignment] + output_node = output_args[idx] + get_item_node._rename(output_node.name) + get_item_node.meta = output_node.meta + + elif isinstance(output_args, torch.fx.Node): + call_func_node = graph.create_node( + "call_function", + wrap_hoo, + (*enter_block_node_args, get_attr_node, *node.args), + {}, + output_args.name, + ) + # Modify the subgraph to output a singleton list. + output_node.args = ((output_args,),) + # Add in an extra `getitem(wrap_hoo, 0)` node to the toplevel graph. + get_item_node = graph.create_node( + "call_function", + operator.getitem, + (call_func_node, 0), + {}, + ) + # Create the metadata + get_item_node.meta = output_args.meta + set_hoo_node_meta(call_func_node) + node_replace_(node, get_item_node) + else: + raise NotImplementedError( + f"repalce_with_hop_pass doesnt' support output type {type(output_args)}" + ) + else: + # TODO (shangdiy): remove this line, since the export graph can be non-functional + node.graph.erase_node(node) + + +def _sequential_split_and_maybe_inline_subgraphs_helper( + new_gm: torch.fx.GraphModule, + graph_signature, + maybe_inline_or_replace_with_hop: Callable[[torch.fx.Node], None], +): + """ + Helper function for replacing graph nodse with higher order nodes. + For each subgraph in `new_gm`, decides whether to construct a HOO subgraph, or inline the calls + back into the parent graph module, depending on `maybe_inline_or_replace_with_hop`. + """ + # new_gm is a new graph module that could have different output args names. + # We need to fix the graph signature. + replace_ctx = contextlib.nullcontext() + new_signature = None + if graph_signature is not None: + # Cannot deep copy a real ScriptObject, which is referenced + # in the FakeScriptObject. Copy should be good enough to guard + # against accidental mutation to original graph_signature. + new_signature = copy.copy(graph_signature) + new_gm_out_node = next(reversed(new_gm.graph.find_nodes(op="output"))) + assert new_gm_out_node.op == "output" and len(new_gm_out_node.args[0]) == len( + new_signature.output_specs + ) + for arg_node, out_spec in zip( + new_gm_out_node.args[0], new_signature.output_specs + ): + if arg_node is None: + assert out_spec.arg.value is None + elif ( + isinstance(arg_node, torch.fx.Node) + and out_spec.arg.name != arg_node.name + ): + out_spec.arg.name = arg_node.name + + replace_ctx = new_gm._set_replace_hook(new_signature.get_replace_hook()) # type: ignore[assignment] + + with replace_ctx: + nodes_map( + list(new_gm.graph.nodes), + lambda node: ( + maybe_inline_or_replace_with_hop(node) + if node.op == "call_module" + else node + ), + ) + new_gm.recompile() + return new_gm, new_signature + + +def _replace_with_hop_pass_helper( + gm: torch.fx.GraphModule, + graph_signature, + sequential_split_and_maybe_inline_subgraphs: Callable, +): + """ + Split gm into sub-graph-modules using `sequential_split_and_maybe_inline_subgraphs`, and + then recursively call itself on each of the submodules. + """ + new_gm, new_signature = sequential_split_and_maybe_inline_subgraphs( + gm, graph_signature + ) + # recursively call + for node in new_gm.graph.nodes: + if node.op == "get_attr": + subgm = getattr(new_gm, node.target) + if not isinstance(subgm, torch.fx.GraphModule): + continue + new_subgm, _ = _replace_with_hop_pass_helper( + subgm, + None, + sequential_split_and_maybe_inline_subgraphs, + ) + setattr(new_gm, node.target, new_subgm) + + new_gm.recompile() + new_gm.graph.lint() + return new_gm, new_signature diff --git a/janus/lib/python3.10/site-packages/torch/_export/tools.py b/janus/lib/python3.10/site-packages/torch/_export/tools.py new file mode 100644 index 0000000000000000000000000000000000000000..a4b96f909d1642f888546d1068d31d1a5f4ee9f1 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/_export/tools.py @@ -0,0 +1,146 @@ +# mypy: allow-untyped-defs +import logging +import warnings +from typing import Any, Dict, Iterable, Optional, Tuple + +import torch +import torch.export +import torch.export._trace +from torch._utils_internal import log_export_usage + + +log = logging.getLogger(__name__) + +__all__ = ["report_exportability"] + + +def _generate_inputs_for_submodules( + model: torch.nn.Module, + target_submodules: Iterable[str], + args: Tuple[Any, ...], + kwargs: Optional[Dict[str, Any]] = None, +) -> Dict[str, Tuple[Any, Any]]: + """ + Generate inputs for targeting submdoules in the given model. Note that if two submodules refer to the same obj, this + function doesn't work. + + Args: + model: root model. + inputs: inputs to the root model. + target_submodules: submodules that we want to generate inputs for. + + Returns: + A dict that maps from submodule name to its inputs. + """ + kwargs = kwargs or {} + + handles = [] + results = {} + submodule_to_names = {mod: name for name, mod in model.named_modules()} + + def pre_forward(module, module_args, module_kwargs): + results[submodule_to_names[module]] = (module_args, module_kwargs) + + try: + for name, mod in model.named_modules(): + if name in target_submodules: + handles.append( + mod.register_forward_pre_hook(pre_forward, with_kwargs=True) + ) + model(*args, **kwargs) + except Exception as e: + warnings.warn( + f"Failed to generate submodule inputs because of the following error:\n{e}" + ) + finally: + for h in handles: + h.remove() + return results + + +def report_exportability( + mod: torch.nn.Module, + args: Tuple[Any, ...], + kwargs: Optional[Dict[str, Any]] = None, + *, + strict: bool = True, + pre_dispatch: bool = False, +) -> Dict[str, Optional[Exception]]: + """ + Report exportability issues for a module in one-shot. + + Args: + mod: root module. + args: args to the root module. + kwargs: kwargs to the root module. + Returns: + A dict that maps from submodule name to the exception that was raised when trying to export it. + `None` means the module is exportable without issue. + Sample output: + { + '': UnsupportedOperatorException(func=), + 'submod_1': UnsupportedOperatorException(func=), + 'submod_2': None + } + """ + + log_export_usage(event="export.report_exportability") + + kwargs = kwargs or {} + + all_submod_names = [name for name, _ in mod.named_modules() if name != ""] + submod_inputs = _generate_inputs_for_submodules(mod, all_submod_names, args, kwargs) + + tried_module_types = set() + report: Dict[str, Optional[Exception]] = {} + + def try_export(module, module_name, args, kwargs): + nonlocal submod_inputs, report, strict, pre_dispatch, tried_module_types + + if type(module) in tried_module_types: + return + tried_module_types.add(type(module)) + + if args is not None or kwargs is not None: + try: + torch.export._trace._export( + module, + args, + kwargs, + strict=strict, + pre_dispatch=pre_dispatch, + ) + report[module_name] = None + log.info("Successfully exported `%s`", module_name) + return + except Exception as e: + short_msg = repr(e).split("\n")[0] + log.warning( + "Failed exporting `%s` with exception: %s", module_name, short_msg + ) + report[module_name] = e + + for name, submod in module.named_children(): + sub_module_name = name if module_name == "" else f"{module_name}.{name}" + + submod_args, submod_kwargs = submod_inputs.get( + sub_module_name, (None, None) + ) + + try_export(submod, sub_module_name, submod_args, submod_kwargs) + + return + + try_export(mod, "", args, kwargs) + + unique_issues = set() + for exception in report.values(): + if exception is not None: + key = repr(exception).split("\\n")[0] + unique_issues.add(key) + + log.warning("Found %d export issues:", len(unique_issues)) + for issue in unique_issues: + log.warning(issue) + + return report diff --git a/janus/lib/python3.10/site-packages/torch/_export/utils.py b/janus/lib/python3.10/site-packages/torch/_export/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e085e18b68a20753efa38d6f85e6eadf78f8895a --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/_export/utils.py @@ -0,0 +1,893 @@ +# mypy: allow-untyped-defs +import ast +import dataclasses +import inspect +import math +import operator +import re +from inspect import Parameter +from typing import Any, Dict, Iterable, List, Optional, Tuple, Type, TYPE_CHECKING + +import torch +from torch._guards import detect_fake_mode +from torch._subclasses.fake_tensor import FakeTensor + + +if TYPE_CHECKING: + from torch._export.passes.lift_constants_pass import ConstantAttrMap + from torch.export import ExportedProgram + from torch.export.graph_signature import ExportGraphSignature + +from torch.export.graph_signature import InputKind, OutputKind +from torch.utils._pytree import ( + _register_pytree_node, + Context, + FlattenFunc, + FromDumpableContextFn, + GetAttrKey, + KeyPath, + keystr, + MappingKey, + SequenceKey, + ToDumpableContextFn, + tree_flatten_with_path, + UnflattenFunc, +) + + +placeholder_prefixes = { + InputKind.USER_INPUT: "", + InputKind.PARAMETER: "p_", + InputKind.BUFFER: "b_", + InputKind.CONSTANT_TENSOR: "c_", + InputKind.CUSTOM_OBJ: "obj_", + InputKind.TOKEN: "token", +} + + +def _collect_and_set_constant_attrs( + graph_signature, constants, mod +) -> "ConstantAttrMap": + # the exported module will store constants & non-persistent buffers such that + # retracing treats them as persistent buffers, so we inform the constants lifting pass + # and overwrite the new graph signature using the previous program. This is intended to only be used + # in run_decompositions where we still have access to original EP. + from torch._export.passes.lift_constants_pass import ConstantAttrMap + + constant_attrs = ConstantAttrMap() + non_persistent_buffers = { + spec.target + for spec in graph_signature.input_specs + if spec.kind == InputKind.BUFFER and not spec.persistent + } + for name, value in constants.items(): + if name in non_persistent_buffers: + continue + # recursive getattr + _mod = mod + *atoms, attr = name.split(".") + for atom in atoms: + _mod = getattr(_mod, atom) + # remove as buffer, reassign as constant/non-persistent buffer + _mod._buffers.pop(attr, None) + setattr(_mod, attr, value) + constant_attrs.add(value, name) + return constant_attrs + + +def _overwrite_signature_for_non_persistent_buffers( + old_sig: "ExportGraphSignature", new_sig: "ExportGraphSignature" +): + # overwrite signature for non-persistent buffers + non_persistent_buffers = { + spec.target + for spec in old_sig.input_specs + if spec.kind == InputKind.BUFFER and not spec.persistent + } + + for spec in new_sig.input_specs: + if spec.kind == InputKind.BUFFER and spec.target in non_persistent_buffers: + spec.persistent = False + return new_sig + + +def _collect_param_buffer_metadata(mod: torch.fx.GraphModule) -> Dict[str, Any]: + """ + Param/buffer metadata needs to be saved before lowering to aten IR + because aten IR lifts them, as a result, automatic preservation doesn't work. + This is intended to be called on the strict mode tracing right before lowering to + aten IR OR run_decomposition pass. + """ + params_buffers_to_node_meta = {} + + def _getattr(model: torch.fx.GraphModule, attr_name: str): + *prefix, field = attr_name.split(".") + t = model + for item in prefix: + t = getattr(t, item, None) # type: ignore[assignment] + assert t is not None + + return getattr(t, field) + + for node in mod.graph.nodes: + target = node.target + meta = node.meta + if node.op == "call_module": + submodule = _getattr(mod, target) + if isinstance(submodule, torch.nn.Module): + for name, _ in submodule.named_parameters( + recurse=True, remove_duplicate=False + ): + params_buffers_to_node_meta[target + "." + name] = meta + + for name, _ in submodule.named_buffers( + recurse=True, remove_duplicate=False + ): + params_buffers_to_node_meta[target + "." + name] = meta + + if node.op == "get_attr": + submodule = _getattr(mod, target) + if not isinstance(submodule, torch.fx.GraphModule): + params_buffers_to_node_meta[target] = meta + + # If the call_function uses param as input, we also need to update params' meta + # with this call_function node's meta. + # This is basically the same flow as torch.fx.traceback.preserve_meta() + if node.op == "call_function" and not isinstance( + node.target, torch._ops.HigherOrderOperator + ): + for arg in node._input_nodes: + if arg.op == "get_attr": + for entry in torch.fx.proxy._COPY_META_FIELDS: + if entry in meta: + params_buffers_to_node_meta[arg.target][entry] = meta[entry] + + return params_buffers_to_node_meta + + +def _populate_param_buffer_metadata_to_new_gm( + params_buffers_to_node_meta: Dict[str, Any], + gm: torch.fx.GraphModule, + new_sig: "ExportGraphSignature", +) -> None: + """ + Given that we collected param'buffer metadata before, we put them back in + newly traced graph module + """ + # Don't copy over nn_module_stack, stack_trace metadata for params/buffers nodes + for metadata in params_buffers_to_node_meta.values(): + metadata.pop("nn_module_stack", None) + metadata.pop("stack_trace", None) + + for node in gm.graph.nodes: + if node.op == "placeholder": + if node.target in new_sig.inputs_to_parameters: + param_name = new_sig.inputs_to_parameters[node.target] + if param_name in params_buffers_to_node_meta: + for k, v in params_buffers_to_node_meta[param_name].items(): + node.meta[k] = v + if node.target in new_sig.inputs_to_buffers: + buffer_name = new_sig.inputs_to_buffers[node.target] + if buffer_name in params_buffers_to_node_meta: + for k, v in params_buffers_to_node_meta[buffer_name].items(): + node.meta[k] = v + + +def _get_shape_env_from_gm(gm: torch.fx.GraphModule): + vals = [ + node.meta["val"] + for node in gm.graph.nodes + if node.meta.get("val", None) is not None + ] + + fake_mode = _detect_fake_mode_from_gm(gm) + if fake_mode is not None: + return fake_mode.shape_env + for v in vals: + if isinstance(v, torch.SymInt): + return v.node.shape_env + + +def _rename_without_collisions( + name_map: Dict[str, str], + orig_name: str, + name: str, + is_placeholder: bool = False, +): + """ + Renames nodes to avoid name collisions, with suffixing. + name_map: map from original name to new name + orig_name: mapping key + name: candidate name (potentially suffixed, e.g. mul_2) + is_placeholder: if the node is a placeholder, avoid detecting suffix + """ + if name in name_map.values(): + # non-placeholder nodes may be suffixed with the count + # instead of adding another suffix, we will try to increment it + match = re.match(r"(.*)_(\d+)", name) + if match and not is_placeholder: + name, n = match.group(1), int(match.group(2)) + else: + n = 0 + while (dup_name := f"{name}_{n + 1}") in name_map.values(): + n += 1 + name_map[orig_name] = dup_name + else: + name_map[orig_name] = name + return name_map[orig_name] + + +def _check_input_constraints_for_graph( + input_placeholders: List[torch.fx.Node], flat_args_with_path, range_constraints +): + def get_keystr(key_path: KeyPath) -> str: + """For a given index into the flat_args, return a human readable string + describing how to access it, e.g. "*args["foo"][0].bar" + """ + # Prefix the keypath with "*args" or "**kwargs" to make it clearer where + # the arguments come from. Ultimately we ought to serialize the + # original arg names for the best error message here. + args_kwargs_key_path = key_path[0] + assert isinstance(args_kwargs_key_path, SequenceKey) + if args_kwargs_key_path.idx == 0: + return f"*args{keystr(key_path[1:])}" + else: + kwarg_key = key_path[1] + assert isinstance(kwarg_key, MappingKey) + name = str(kwarg_key)[1:-1] # get rid of the enclosed [] + return f"{name}{keystr(key_path[2:])}" + + import sympy + + from torch._export.passes.add_runtime_assertions_for_constraints_pass import ( + _convert_range_to_int, + ) + from torch.utils._sympy.solve import try_solve + + if len(flat_args_with_path) != len(input_placeholders): + raise RuntimeError( + "Unexpected number of inputs " + f"(expected {len(input_placeholders)}, got {len(flat_args_with_path)})" + ) + # NOTE: export already guarantees that the same symbol is used in metadata + # for all InputDims related by equality constraints, so we can just unify + # symbols with given input dimension values to check equality constraints. + unification_map: Dict[sympy.Symbol, Any] = {} + for (key_path, arg), node in zip(flat_args_with_path, input_placeholders): + node_val = node.meta.get("val") + if isinstance(node_val, FakeTensor): + if not isinstance(arg, torch.Tensor): + raise RuntimeError( + f"Expected input at {get_keystr(key_path)} to be a tensor, but got {type(arg)}", + ) + + if len(node_val.shape) != len(arg.shape): + raise RuntimeError( + f"Unexpected number of dimensions in input at {get_keystr(key_path)}.shape " + f"(expected {node_val.shape}, got {arg.shape})" + ) + + for j, (arg_dim, node_dim) in enumerate(zip(arg.shape, node_val.shape)): + # TODO(avik): Assert the following property in the IR verifier: + # node_dim is either an int or a SymInt containing an int or a unary sympy.Expr + if ( + isinstance(node_dim, torch.SymInt) + and len(node_dim.node.expr.free_symbols) == 1 + ): + symbol = next(iter(node_dim.node.expr.free_symbols)) + if symbol in unification_map: + existing_dim = node_dim.node.expr.subs(unification_map) + if arg_dim != existing_dim: + raise RuntimeError( + f"Expected input at {get_keystr(key_path)}.shape[{j}] to be equal to " + f"{existing_dim}, but got {arg_dim}", + ) + else: + if ( + isinstance(arg_dim, torch.SymInt) + and not arg_dim.node.expr.is_number + ): + # This can happen when, say, arg is a fake tensor. + # We do not run checks on symbolic shapes of fake inputs as + # such checks can affect the shape env. + pass + else: + if isinstance(node_dim.node.expr, sympy.Symbol): + # Short cut for try_solve below. Also useful in cases where + # sympy.Eq(node_dim.node.expr, arg_dim) would evaluate to False + # purely because symbol is constrained to be size-like, + # e.g., when node_dim.node.expr = symbol and arg_dim = 0. + unification_map[symbol] = int(arg_dim) + else: + solution = try_solve( + sympy.Eq(node_dim.node.expr, arg_dim), symbol + ) + if solution is None: + raise RuntimeError( # noqa: B904 + f"Expected input {node.name}.shape[{j}] = {arg_dim} to be " + f"of the form {node_dim.node.expr}, where {symbol} is an integer" + ) + else: + unification_map[symbol] = int(solution[1]) + + if node_dim.node.expr in range_constraints: + min_val, max_val = _convert_range_to_int( + range_constraints[node_dim.node.expr] + ) + # NOTE: we allow dimensions to be 0/1 at runtime + if min_val > 2: + if arg_dim < min_val: + raise RuntimeError( + f"Expected input at {get_keystr(key_path)}.shape[{j}] to be >= " + f"{min_val}, but got {arg_dim}", + ) + if max_val < math.inf: + if arg_dim > max_val: + raise RuntimeError( + f"Expected input at {get_keystr(key_path)}.shape[{j}] to be <= " + f"{max_val}, but got {arg_dim}", + ) + else: + if arg_dim != node_dim: + if ( + isinstance(node_dim, torch.SymInt) + and not node_dim.node.expr.is_number + ): + # this means we deferred a guard from export analysis to runtime, let this pass + # we'll add a runtime assert checking equality to this replacement expression + continue + raise RuntimeError( + f"Expected input at {get_keystr(key_path)}.shape[{j}] to be equal to " + f"{node_dim}, but got {arg_dim}", + ) + elif isinstance(node_val, (int, float, str)): + if type(arg) != type(node_val) or arg != node_val: + raise RuntimeError( + f"Expected input at {get_keystr(key_path)} to be equal to {node_val}, but got {arg}", + ) + + +def register_dataclass_as_pytree_node( + cls: Type[Any], + flatten_fn: Optional[FlattenFunc] = None, + unflatten_fn: Optional[UnflattenFunc] = None, + *, + serialized_type_name: Optional[str] = None, + to_dumpable_context: Optional[ToDumpableContextFn] = None, + from_dumpable_context: Optional[FromDumpableContextFn] = None, + return_none_fields: bool = False, +) -> None: + assert dataclasses.is_dataclass( + cls + ), f"Only dataclasses can be registered with this function: {cls}" + + def default_flatten_fn(obj: Any) -> Tuple[List[Any], Context]: + flattened = [] + flat_names = [] + none_names = [] + for f in dataclasses.fields(obj): + name, val = f.name, getattr(obj, f.name) + if val is not None or return_none_fields: + flattened.append(val) + flat_names.append(name) + else: + none_names.append(name) + return flattened, [flat_names, none_names] + + def default_unflatten_fn(values: Iterable[Any], context: Context) -> Any: + flat_names, none_names = context + return cls(**dict(zip(flat_names, values)), **dict.fromkeys(none_names)) + + def default_flatten_fn_with_keys(obj: Any) -> Tuple[List[Any], Context]: + flattened, (flat_names, none_names) = flatten_fn(obj) # type: ignore[misc] + return [(MappingKey(k), v) for k, v in zip(flat_names, flattened)], flat_names + + flatten_fn = flatten_fn if flatten_fn is not None else default_flatten_fn + unflatten_fn = unflatten_fn if unflatten_fn is not None else default_unflatten_fn + + if (to_dumpable_context is None) ^ (from_dumpable_context is None): + raise ValueError( + f"Both to_dumpable_context and from_dumpable_context for {cls} must " + "be None or registered." + ) + + _register_pytree_node( + cls, + flatten_fn, + unflatten_fn, + serialized_type_name=serialized_type_name, + flatten_with_keys_fn=default_flatten_fn_with_keys, + to_dumpable_context=to_dumpable_context, + from_dumpable_context=from_dumpable_context, + ) + + +def is_param(program: "ExportedProgram", node: torch.fx.Node) -> bool: + """ + Checks if the given node is a parameter within the exported program + """ + + return node.name in program.graph_signature.inputs_to_parameters + + +def get_param( + program: "ExportedProgram", + node: torch.fx.Node, +) -> Optional[torch.nn.Parameter]: + """ + Returns the parameter associated with the given node in the exported program. + Returns None if the node is not a parameter within the exported program + """ + + if is_param(program, node): + parameter_name = program.graph_signature.inputs_to_parameters[node.name] + return program.state_dict[parameter_name] + + return None + + +def is_buffer(program: "ExportedProgram", node: torch.fx.Node) -> bool: + """ + Checks if the given node is a buffer within the exported program + """ + + return node.name in program.graph_signature.inputs_to_buffers + + +def get_buffer( + program: "ExportedProgram", + node: torch.fx.Node, +) -> Optional[torch.Tensor]: + """ + Returns the buffer associated with the given node in the exported program. + Returns None if the node is not a buffer within the exported program + """ + + if is_buffer(program, node): + buffer_name = program.graph_signature.inputs_to_buffers[node.name] + if buffer_name in program.graph_signature.non_persistent_buffers: + return program.constants[buffer_name] + else: + return program.state_dict[buffer_name] + + return None + + +def is_lifted_tensor_constant( + program: "ExportedProgram", + node: torch.fx.Node, +) -> bool: + """ + Checks if the given node is a lifted tensor constant within the exported program + """ + + return node.name in program.graph_signature.inputs_to_lifted_tensor_constants + + +def get_lifted_tensor_constant( + program: "ExportedProgram", + node: torch.fx.Node, +) -> Optional[torch.Tensor]: + """ + Returns the lifted tensor constant associated with the given node in the exported program. + Returns None if the node is not a lifted tensor constant within the exported program + """ + + if is_lifted_tensor_constant(program, node): + lifted_tensor_name = program.graph_signature.inputs_to_lifted_tensor_constants[ + node.name + ] + return program.constants[lifted_tensor_name] + + return None + + +def sequential_split(gm: torch.fx.GraphModule, node_call_back) -> torch.fx.GraphModule: + """ + sequential_split creates a new graph module that splits the input graph module into multiple submodules + based on the node_call_back. It doesn't mutate the input graph module. The node_call_back should return + True if the node is a delimiter. Delimiter will be the first node in the next submodule. + """ + from torch.fx.passes.split_module import split_module + + split_map = {} + split_id = 0 + for node in gm.graph.nodes: + if node_call_back(node): + split_id += 1 + split_map[node] = split_id + + new_gm = split_module( + gm, + gm, + lambda node: split_map[node], + keep_original_order=True, + keep_original_node_name=True, + ) + # Keep the codegen from original graph module to preserve e.g. pytree info. + new_gm.graph._codegen = gm.graph._codegen + new_gm.recompile() + return new_gm + + +def nodes_filter(nodes: List[torch.fx.Node], node_call_back) -> List[torch.fx.Node]: + """Returns the nodes that match the node_call_back as a list.""" + return [node for node in nodes if node_call_back(node)] + + +def nodes_first( + nodes: List[torch.fx.Node], node_call_back=None +) -> Optional[torch.fx.Node]: + """ + Returns the first node that matches the node_call_back. If no node matches, returns None. + When node_call_back is None, returns the first node in the node list. + """ + ret = nodes_filter(nodes, node_call_back if node_call_back else lambda node: True) + if len(ret) > 0: + return ret[0] + return None + + +def nodes_count(nodes: List[torch.fx.Node], node_call_back) -> int: + """Returns the number of nodes that match the node_call_back.""" + return len(nodes_filter(nodes, node_call_back)) + + +def nodes_map(nodes: List[torch.fx.Node], node_call_back) -> List[torch.fx.Node]: + """ + Sequentially visit the nodes list and invoke node_call_back on each element. + Returns the nodes list after the node_call_back is invoked on each element. + """ + for node in nodes: + node_call_back(node) + return nodes + + +def node_replace_(old_node: torch.fx.Node, new_node: torch.fx.Node) -> None: + """ + Replace all uses of old_node with new_node. + """ + old_node.replace_all_uses_with(new_node) + old_node.users.clear() + old_node.graph.erase_node(old_node) + + +def node_inline_(call_mod_node: torch.fx.Node) -> None: + """ + Inline the submodule of the given node into the parent module. + Note: we only support the case where submodule takes tensors inputs. + """ + assert call_mod_node.op == "call_module" + gm = call_mod_node.graph.owning_module + + assert isinstance(call_mod_node.target, str) + sub_gm = getattr(gm, call_mod_node.target) + + phs = (node for node in sub_gm.graph.nodes if node.op == "placeholder") + body = ( + node for node in sub_gm.graph.nodes if node.op not in ("placeholder", "output") + ) + output = [node for node in sub_gm.graph.nodes if node.op == "output"] + + for ph, arg in zip(phs, call_mod_node.args): + assert isinstance(arg, torch.fx.Node) + node_replace_(ph, arg) + + with gm.graph.inserting_before(call_mod_node): + for node in body: + new_node = gm.graph.node_copy(node) + node_replace_(node, new_node) + + if len(output) > 0: + assert len(output) == 1 and len(output[0].args) == 1 + new_output = output[0].args[0] + + if isinstance(new_output, torch.fx.Node): + # Clear the users of the output node and set + # the users to be the users of original call_module node. + new_output.users.clear() + node_replace_(call_mod_node, new_output) + elif isinstance(new_output, (list, tuple)): + # Pop subgraph output node from users. + for node in new_output: + node.users.pop(output[0]) + + # Inline the get_item calls for the output node. + get_item_users = nodes_filter( + list(call_mod_node.users.keys()), + lambda node: node.op == "call_function" + and node.target == operator.getitem, + ) + # get_item_node.args[1] is the idx referring to new_output[idx] + nodes_map( + get_item_users, + lambda get_item_node: node_replace_( + get_item_node, + new_output[get_item_node.args[1]], + ), + ) + call_mod_node.graph.erase_node(call_mod_node) + else: + raise NotImplementedError( + f"Unsupported output type {type(new_output)}. Expect it to be a Node or a list/tuple of Nodes." + ) + else: + call_mod_node.graph.erase_node(call_mod_node) + + gm.delete_all_unused_submodules() + gm.recompile() + return gm + + +def _get_torch_jit_trace_forward_signature(mod: torch.nn.Module): + """ + Get source code and parse argument names using AST. The function returns + a signature of the forward() function. + + # TODO: Directly provide inspect.signature compatible TS-d module. + """ + ast_mod = ast.parse(mod.code) + ast_func_def: ast.FunctionDef = ast_mod.body[0] # type: ignore[assignment] + + # FIXME(jiashenc): TorchScript should only allow positional or keywords arguments. + arg_type_map = {"args": Parameter.POSITIONAL_OR_KEYWORD} + + # Traverse all argument types in AST tree and create associated parameters. + param_list = [] + for arg_type, param_type in arg_type_map.items(): + arg_name_list = [a.arg for a in getattr(ast_func_def.args, arg_type)] + for arg_name in arg_name_list: + if arg_name == "self": + continue # Skip self argument. + param_list.append(inspect.Parameter(arg_name, param_type)) + + return inspect.Signature(parameters=param_list) + + +def _bind_signature_to_inputs(mod, fake_args, fake_kwargs): + if isinstance(mod, (torch.jit.ScriptModule, torch.jit.TracedModule)): + sig = _get_torch_jit_trace_forward_signature(mod) + + # Sanity check for placeholder names coming from TorchScript. + assert len(sig.parameters) == len(fake_args) + len(fake_kwargs), ( + "Arguments other than POSITIONAL_OR_KEYWORD kinds in forward() " + "are not supported in _get_torch_jit_trace_forward_signature" + ) + else: + sig = inspect.signature(mod.forward) + + return sig.bind(*fake_args, **fake_kwargs).arguments + + +def _name_hoo_subgraph_placeholders(gm: torch.fx.GraphModule) -> None: + """ + Propagate placeholder names from the top-level graph into HigherOrderOp subgraphs, + and handle collisions with non-placeholders by count suffixing. + Different HOO subgraph types have different input schemas, so we first enumerate them + and gather the top-level named placeholder nodes. + """ + # gather all HOO subgraphs and their top-level named placeholder nodes + subgraph_ph_tuples: List[Tuple[torch.fx.GraphModule, List[torch.fx.Node]]] = [] + for node in gm.graph.nodes: + if node.op == "call_function" and isinstance( + node.target, torch._ops.HigherOrderOperator + ): + # HOO subgraphs have varying input schemas, so we enumerate them there + if node.target._name == "cond": + _, true_graph, false_graph, cond_args = node._args + subgraph_ph_tuples.append((getattr(gm, true_graph.target), cond_args)) + subgraph_ph_tuples.append((getattr(gm, false_graph.target), cond_args)) + elif node.target._name == "wrap_with_set_grad_enabled": + subgraph, phs = node._args[1], node._args[2:] + subgraph_ph_tuples.append((getattr(gm, subgraph.target), phs)) + elif node.target._name == "map_impl": + body_graph, array, args = node._args + subgraph_ph_tuples.append( + (getattr(gm, body_graph.target), array + args) + ) + + # propagate names + for subgraph, hoo_phs in subgraph_ph_tuples: + name_map: Dict[str, str] = {} + for i, node in enumerate(subgraph.graph.nodes): + if i < len(hoo_phs): # placeholder, retain name + name_map[node.name] = hoo_phs[i].name + node.name = node.target = hoo_phs[i].name + else: # non-placeholder, check for collisions + node.name = _rename_without_collisions(name_map, node.name, node.name) + + # recurse and recompile + _name_hoo_subgraph_placeholders(subgraph) + subgraph.recompile() + + +def placeholder_naming_pass( + gm: torch.fx.GraphModule, + export_graph_signature: "ExportGraphSignature", + mod: torch.nn.Module, + fake_args, + fake_kwargs, + fake_params_buffers, + constants: Dict[str, Any], +) -> None: + """ + This pass is run at the end of _export_non_strict() to assign better placeholder node names: + - User inputs: + These follow the signature of mod.forward(), e.g. forward(x, y) produces nodes x, y. + For nested inputs from dictionaries, lists, tuples, or dataclasses, + the names are a concatenation of the path to the tensor. + e.g. x = { + 'a': torch.randn(), + 'b': [torch.randn(), torch.randn()] + } + produces nodes x_a, x_b_0, x_b_1. + - Parameters/buffers/constants/custom objects: + These follow the FQN of the object, prefixed by "p", "b", "c", "obj" respectively. + e.g. self.bar.l0.weight produces "p_bar_l0_weight". + - Effect tokens: + These are named token, token_1, ... + """ + + def _strip_name(x): + if x.startswith("L__self___"): + x = x[len("L__self___") :] + elif x.startswith("self_"): + x = x[len("self_") :] + x = re.sub(r"[^a-zA-Z0-9]", "_", x) + return x + + def _extract_pytree_key(x): + if isinstance(x, MappingKey): + x = re.sub(r"[^a-zA-Z0-9]", "_", str(x.key)) + return x + elif isinstance(x, SequenceKey): + return str(x.idx) + elif isinstance(x, GetAttrKey): + return x.name + else: + raise RuntimeError(f"Pytree key of type {type(x)} not handled for {x}") + + name_map: Dict[str, str] = {} + + # map user input names with mod.forward() signature + combined_args = _bind_signature_to_inputs(mod, fake_args, fake_kwargs) + + flat_args_with_path, _ = tree_flatten_with_path(combined_args) + user_input_names = [ + spec.arg.name + for spec in export_graph_signature.input_specs + if spec.kind == InputKind.USER_INPUT + ] + + # use pytree path to name nested user inputs + for (arg_path, arg), user_input_name in zip(flat_args_with_path, user_input_names): + if user_input_name: + _rename_without_collisions( + name_map, + user_input_name, + placeholder_prefixes[InputKind.USER_INPUT] + + "_".join(_extract_pytree_key(x).lower() for x in arg_path), + is_placeholder=True, + ) + + # use graph signature input specs to map param/buffer/constant names + # name effect tokens as token, token_1, ... (these aren't visible to user) + for spec in export_graph_signature.input_specs: + if spec.kind == InputKind.USER_INPUT: + continue + if spec.kind == InputKind.TOKEN: + base_name = "" + else: + base_name = _strip_name(spec.target).lower() + base_name = re.sub(r"[^a-zA-Z0-9]", "_", base_name) + + _rename_without_collisions( + name_map, + spec.arg.name, + placeholder_prefixes[spec.kind] + base_name, + is_placeholder=True, + ) + + # handle naming collisions with call_function/get_attr inputs. + # here, we want to prioritize user input names over call_function names + # e.g. not have forward(self, mul): lead to a placeholder node called mul_13, + # so we increment the suffix of call_function nodes as needed + for node in gm.graph.nodes: + if node.op == "placeholder": + continue + _rename_without_collisions(name_map, node.name, node.name) + + # assign new node names + for node in gm.graph.nodes: + if node.op == "placeholder": + assert node.name in name_map + node.name = node.target = name_map[node.name] + elif node.name in name_map: + node.name = name_map[node.name] + + # propagate names to higher order op subgraphs + _name_hoo_subgraph_placeholders(gm) + + # re-generate graph module code + gm.recompile() + + # modify graph signature (input specs, output specs, user input mutations) + for spec in export_graph_signature.input_specs: + assert spec.arg.name in name_map + spec.arg.name = name_map[spec.arg.name] + if ( # handle targets for custom objects + spec.kind == InputKind.CUSTOM_OBJ and spec.target in name_map + ): + spec.target = name_map[spec.target][4:] # strip obj_ prefix + + for spec in export_graph_signature.output_specs: + if spec.arg.name in name_map: + spec.arg.name = name_map[spec.arg.name] + if spec.kind == OutputKind.USER_INPUT_MUTATION and spec.target in name_map: + spec.target = name_map[spec.target] + + # rename keys in constants dict for custom objects + for name in list(constants.keys()): + constant = constants[name] + if name in name_map and not isinstance( + constant, torch.Tensor + ): # rename custom objects with generic names + new_name = name_map[name] + if ( + new_name != name + and re.match(r"arg(\d+)_1", name) + and new_name != placeholder_prefixes[InputKind.CUSTOM_OBJ] + name + ): + constants[new_name] = constant + del constants[name] + + +def remove_proxy_from_state_dict(state_dict: Dict, in_place: bool) -> Dict: + """ + If `in_place` is false, return a new copy of `state_dict` with "proxy" removed from `v.__dict__`. + `v` is the values in the dictionary. + If `in_place` is true, modify `state_dict` in place. + """ + if in_place: + for k, v in state_dict.items(): + if hasattr(v, "proxy"): + delattr(state_dict[k], "proxy") + return state_dict + else: + new_state_dict = {} + for k, v in state_dict.items(): + if hasattr(v, "proxy"): + new_state_dict[k] = v.clone().detach() + else: + new_state_dict[k] = v + return new_state_dict + + +def _detect_fake_mode_from_gm( + gm: torch.fx.GraphModule, +) -> torch._subclasses.fake_tensor.FakeTensorMode: + """ + For a given graph module, we look at the "val" of placeholder nodes to find the fake inputs. + Additionally, if gm doesn't have placeholders, we further look at the "example_value" or "val" of other nodes. + If no fake mode is found, we return None for fake_mode. + """ + + fake_inps: List[torch.Tensor] = [] + fake_vals: List[torch.Tensor] = [] + for node in gm.graph.nodes: + if node.op == "placeholder" and "val" in node.meta: + fake_val = node.meta["val"] + if fake_val is not None and isinstance(fake_val, torch.Tensor): + fake_inps.append(fake_val) + elif len(fake_inps) == 0 and ( + "example_value" in node.meta or "val" in node.meta + ): + fake_val = None + if "example_value" in node.meta: + fake_val = node.meta["example_value"] + elif "val" in node.meta: + fake_val = node.meta["val"] + if fake_val is not None and isinstance(fake_val, torch.Tensor): + fake_vals.append(fake_val) + + return detect_fake_mode(fake_inps + fake_vals) diff --git a/janus/lib/python3.10/site-packages/torch/_export/verifier.py b/janus/lib/python3.10/site-packages/torch/_export/verifier.py new file mode 100644 index 0000000000000000000000000000000000000000..68c5bcaae39af69f5527e8dcf0e08ed49bad4563 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/_export/verifier.py @@ -0,0 +1,456 @@ +# mypy: allow-untyped-defs +import inspect +import math +import operator +from collections.abc import Iterable +from typing import Any, Dict, final, List, Tuple, Type, TYPE_CHECKING + +import torch +from torch._ops import HigherOrderOperator, OpOverload +from torch._subclasses.fake_tensor import FakeTensor +from torch.export.graph_signature import ( + CustomObjArgument, + InputKind, + SymIntArgument, + TensorArgument, + TokenArgument, +) +from torch.fx import GraphModule + +if TYPE_CHECKING: + from torch.export.exported_program import ExportedProgram + +class SpecViolationError(Exception): + pass + + +def is_functional(op: OpOverload) -> bool: + return not op._schema.is_mutable + + +def _check_has_fake_tensor(node: torch.fx.Node) -> None: + # TODO(angelayi): remove this in favor of _check_val + return _check_val(node) + + +def _check_val(node: torch.fx.Node) -> None: + from torch.fx.experimental.symbolic_shapes import SymBool, SymFloat, SymInt + + def _check_correct_val(val): + if val is None: + return True + elif isinstance(val, (int, bool, str, float)): + return True + elif isinstance(val, (torch.memory_format, torch.dtype, torch.device, torch.layout)): + return True + elif isinstance(val, (FakeTensor, torch.Tensor)): # TODO(zhxchen17) Remove Tensor. + return True + elif isinstance(val, (SymInt, SymFloat, SymBool)): + return True + elif isinstance(val, CustomObjArgument): + return True + elif isinstance(val, Iterable): + return all(_check_correct_val(x) for x in val) + return False + + def _no_returns(op): + if not isinstance(op, OpOverload): + return False + return len(op._schema.returns) == 0 + + if "val" not in node.meta: + if node.op == "call_function" and _no_returns(node.target): + return + raise SpecViolationError(f"Node.meta {node.name} is missing val field.") + + val = node.meta["val"] + if not _check_correct_val(val): + raise SpecViolationError(f"Node.meta {node.name} has invalid val field {val}") + + +def _check_torch_fn(node: torch.fx.Node) -> None: + torch_fn = node.meta.get("torch_fn") + if torch_fn is None: + raise SpecViolationError(f"Unable to find torch_fn metadata for node {node.name}") + if ( + not isinstance(torch_fn, tuple) and + isinstance(torch_fn[0], str) and + isinstance(torch_fn[1], str) + ): + raise SpecViolationError(f"Node.meta {node.name} has invalid torch_fn field {torch_fn}") + +class _VerifierMeta(type): + _registry: Dict[str, Type['Verifier']] = {} + + def __new__(metacls, name, bases, attrs): + if bases: + if "check" in attrs or "_check_graph_module" in attrs: + raise SyntaxError("Overriding method check is not allowed.") + assert "dialect" in attrs and attrs["dialect"] != "ATEN" + else: + assert "check" in attrs + assert "_check_graph_module" in attrs + assert attrs["dialect"] == "ATEN" + + assert isinstance(attrs["dialect"], str) + ret = type.__new__(metacls, name, bases, attrs) + metacls._registry[attrs["dialect"]] = ret # type: ignore[assignment] + return ret + +def getattr_recursive(obj: Any, target: str) -> Any: + target_atoms = target.split('.') + attr_itr = obj + for i, atom in enumerate(target_atoms): + if not hasattr(attr_itr, atom): + raise RuntimeError(f"Node referenced nonexistent target {'.'.join(target_atoms[:i])}") + attr_itr = getattr(attr_itr, atom) + return attr_itr + + +class Verifier(metaclass=_VerifierMeta): + dialect = "ATEN" + + def allowed_builtin_ops(self) -> List: + return [ + operator.getitem, + operator.add, + operator.mul, + operator.sub, + operator.truediv, + operator.ge, + operator.le, + operator.gt, + operator.lt, + operator.eq, + operator.ne, + operator.floordiv, + operator.mod, + operator.and_, + operator.or_, + operator.not_, + operator.pow, + operator.neg, + operator.abs, + math.ceil, + math.floor, + math.trunc, + ] + + def allowed_op_types(self) -> Tuple[Type[Any], ...]: + return (OpOverload, HigherOrderOperator) + + def allowed_getattr_types(self) -> Tuple[Type[Any], ...]: + return (torch.fx.GraphModule,) + + def check_valid_op(self, op): + pass + + def check_additional(self, gm: GraphModule) -> None: + """ + Additional checks that are specific to some dialects. + """ + + @final + def check(self, ep: "ExportedProgram") -> None: + self._check_graph_module(ep.graph_module) + _verify_exported_program_module_call_graph(ep) + _verify_exported_program_signature(ep) + + @final + def _check_graph_module(self, gm: torch.fx.GraphModule) -> None: + def _allowed_getattr_types() -> Tuple[Type[Any], ...]: + ret = self.allowed_getattr_types() + assert not any(t is object for t in ret) + return ret + + def _check_valid_op(op) -> None: + def _allowed_builtin_ops() -> List: + ret = self.allowed_builtin_ops() + assert all(inspect.isbuiltin(op) for op in ret) + return ret + + def _allowed_op_types() -> Tuple[Type[Any], ...]: + ret = self.allowed_op_types() + assert not any(t is object for t in ret) + return ret + + # TODO Remove this allowlist. + _allowed_torch_functions = ( + torch.autograd.grad_mode.set_grad_enabled, + torch.sym_int, + torch.sym_float, + torch.sym_ite, + torch.sym_max, + torch.sym_min, + torch.sym_not, + torch.sym_sqrt, + # TODO (tmanlaibaatar) + # Predispatch export is able to contain autograd ops. + # These will be modeled as HOO later + torch._C._set_grad_enabled, + ) + + if not isinstance(op, _allowed_op_types()): + if op not in _allowed_builtin_ops() and op not in _allowed_torch_functions: + raise SpecViolationError( + f"Operator '{op}' is not an allowed operator type: {_allowed_op_types()}\n" + f"Valid builtin ops: {_allowed_builtin_ops()}" + f"Valid torch functions: {_allowed_torch_functions}" + ) + + if isinstance(op, OpOverload): + # All ops functional + # TODO (tmanlaibaatar) more proper way is needed here + if self.dialect != "TRAINING" and not is_functional(op): + raise SpecViolationError( + f"operator '{op}' is not functional" + ) + self.check_valid_op(op) + + for mod in gm.modules(): + if not isinstance(mod, torch.fx.GraphModule): + continue + + mod.graph.lint() + for node in mod.graph.nodes: + # TODO(T140410192): should have fake tensor for all dialects + if node.op in {"call_module", "call_method"}: + raise SpecViolationError( + f"call_module is not valid: got a class '{node.target}' ", + ) + + elif node.op == "call_function": + _check_val(node) + + _check_valid_op(node.target) + + elif node.op == "get_attr": + if not isinstance(node.target, str): + raise SpecViolationError( + f"Expected get_attr target to be string, but got {type(node.target)}" + ) + + attr = getattr_recursive(mod, node.target) + if isinstance(attr, torch.nn.Module): + def _is_type(name, ty): + return isinstance(getattr(attr, name, None), ty) + if type(attr).__name__ == "LoweredBackendModule": + if _is_type("backend_id", str) \ + and _is_type("processed_bytes", bytes) \ + and _is_type("compile_specs", list) \ + and hasattr(attr, "original_module"): + continue + else: + backend_id = getattr(attr, "backend_id", None) + processed_bytes = getattr(attr, "processed_bytes", None) + compile_specs = getattr(attr, "compile_specs", None) + raise SpecViolationError( + f"Invalid get_attr type {type(attr)}. \n" + f"LoweredBackendModule fields: " + f"backend_id(str) : {type(backend_id)}, " + f"processed_bytes(bytes) : {type(processed_bytes)}, " + f"compile_specs(list) : {type(compile_specs)}" + ) + + if not isinstance(attr, _allowed_getattr_types()): + raise SpecViolationError( + f"Invalid get_attr type {type(attr)}. \n" + f"Valid get_attr types: {_allowed_getattr_types()}" + ) + + + elif node.op == "placeholder": + _check_val(node) + # TODO(zhxchen17) + # elif node.op == "output": + # _check_flattened_outputs() + + self.check_additional(gm) + + +class TrainingIRVerifier(Verifier): + dialect = "TRAINING" + + +def _verify_exported_program_module_call_graph(exported_program) -> None: + module_call_graph = exported_program.module_call_graph + nodes = { + node.name for node in exported_program.graph.nodes + } + for entry in module_call_graph: + if entry.signature is not None: + for arg in entry.signature.inputs: + if arg.name and arg.name not in nodes: + raise SpecViolationError( + f"Input {arg.name} does not exist in the graph." + ) + for arg in entry.signature.outputs: + if arg.name and arg.name not in nodes: + raise SpecViolationError( + f"Output {arg.name} does not exist in the graph." + ) + + +def _verify_exported_program_signature(exported_program) -> None: + # Check ExportedProgram signature matches + gs = exported_program.graph_signature + + # Check every node in the signature exists in the graph + input_node_names = [node.name for node in exported_program.graph.nodes if node.op == "placeholder"] + + if len(input_node_names) != len(gs.input_specs): + raise SpecViolationError( + f"Number of graph inputs ({len(input_node_names)}) " + f"does not match number of inputs in the graph signature ({len(gs.input_specs)})" + ) + + for input_spec, node in zip(gs.input_specs, input_node_names): + if isinstance(input_spec.arg, (TensorArgument, SymIntArgument)): + if input_spec.arg.name != node: + raise SpecViolationError( + f"Input spec name {input_spec.arg.name} does not match node name {node}" + ) + + if input_spec.kind == InputKind.USER_INPUT: + continue + + elif input_spec.kind == InputKind.PARAMETER: + if not isinstance(input_spec.arg, TensorArgument): + raise SpecViolationError( + f"Parameter {input_spec.name} is not a tensor argument. Found {input_spec.arg} instead." + ) + if input_spec.target is None: + raise SpecViolationError( + f"InputSpec for {input_spec.name} has no target." + ) + + param = input_spec.target + if param not in exported_program.state_dict: + raise SpecViolationError( + f"Parameter {param} is not in the state dict." + ) + + if not isinstance(exported_program.state_dict[param], torch.nn.Parameter): + raise SpecViolationError( + f"State dict entry for parameter {param} is not an instance of torch.nn.Parameter." + ) + + elif input_spec.kind == InputKind.BUFFER: + if not isinstance(input_spec.arg, TensorArgument): + raise SpecViolationError( + f"Buffer {input_spec.name} is not a tensor argument. Found {input_spec.arg} instead." + ) + if input_spec.target is None: + raise SpecViolationError( + f"InputSpec for {input_spec.name} has no target." + ) + + buffer = input_spec.target + if input_spec.persistent is None: + raise SpecViolationError( + f"Buffer {buffer} is missing a persistence flag" + ) + + if input_spec.persistent is True and buffer not in exported_program.state_dict: + raise SpecViolationError( + f"Buffer {buffer} is not in the state dict." + ) + + if input_spec.persistent is False and buffer in exported_program.state_dict: + raise SpecViolationError( + f"Non-persistent buffer {buffer} is in the state dict, it should not be." + ) + elif input_spec.kind == InputKind.CONSTANT_TENSOR: + if not isinstance(input_spec.arg, TensorArgument): + raise SpecViolationError( + f"Constant tensor {input_spec.name} is not a tensor argument. Found {input_spec.arg} instead." + ) + if input_spec.target is None: + raise SpecViolationError( + f"InputSpec for {input_spec.name} has no target." + ) + + tensor_const = input_spec.target + if tensor_const not in exported_program.constants: + raise SpecViolationError( + f"Constant tensor {tensor_const} is not in the constants dictionary." + ) + elif input_spec.kind == InputKind.CUSTOM_OBJ: + if not isinstance(input_spec.arg, CustomObjArgument): + raise SpecViolationError( + f"Custom object {input_spec.name} is not a custom object argument. Found {input_spec.arg} instead." + ) + if input_spec.target is None: + raise SpecViolationError( + f"InputSpec for {input_spec.name} has no target." + ) + + custom_obj = input_spec.target + if custom_obj not in exported_program.constants: + raise SpecViolationError( + f"Custom object {custom_obj} is not in the constants dictionary." + ) + elif input_spec.kind == InputKind.TOKEN: + if not isinstance(input_spec.arg, TokenArgument): + raise SpecViolationError( + f"Constant tensor {input_spec.name} is not a tensor argument. Found {input_spec.arg} instead." + ) + else: + raise SpecViolationError( + f"Unknown InputKind {input_spec.kind}." + ) + + # Check outputs + output_node = list(exported_program.graph.nodes)[-1] + assert output_node.op == "output" + output_nodes = [ + arg.name if isinstance(arg, torch.fx.Node) else arg + for arg in output_node.args[0] + ] + + if len(output_nodes) != len(gs.output_specs): + raise SpecViolationError( + f"Number of output nodes {len(output_nodes)} is different " + "Than the number of outputs specified by the graph signature: \n" + f"Number of mutated buffers: {len(gs.buffers_to_mutate)}. \n" + f"Number of user outputs: {len(gs.user_outputs)}. \n" + ) + + num_tokens = len(gs.output_tokens) + end = len(gs.buffers_to_mutate) + len(gs.user_inputs_to_mutate) + num_tokens + mutate_nodes: List[str] = output_nodes[num_tokens:end] + user_output_nodes = output_nodes[end:end + len(gs.user_outputs)] + + for mutation_node in mutate_nodes: + if mutation_node in gs.buffers_to_mutate: + if gs.buffers_to_mutate[mutation_node] not in gs.buffers: + raise SpecViolationError( + f"Buffer output {mutation_node} does not point to a buffer that exists. \n" + f"Dict of buffers that are mutated, in order: {gs.buffers_to_mutate} \n" + f"Buffer nodes available: {gs.buffers} \n" + ) + elif mutation_node in gs.user_inputs_to_mutate: + if gs.user_inputs_to_mutate[mutation_node] not in gs.user_inputs: + raise SpecViolationError( + f"User input output {mutation_node} does not point to a user input that exists. \n" + f"Dict of user inputs that are mutated, in order: {gs.user_inputs_to_mutate} \n" + f"User input nodes available: {gs.user_inputs} \n") + else: + raise SpecViolationError( + f"Mutation node {mutation_node} is neither a buffer nor a user input. " + f"Buffers to mutate: {gs.buffers_to_mutate}, User inputs to mutate: {gs.user_inputs_to_mutate}" + ) + + for user_output_node, user_output_name in zip(user_output_nodes, gs.user_outputs): + if user_output_node != user_output_name: + raise SpecViolationError( + f"User output {user_output_node} is not in the correct " + "order or is not found in the " + f"exported program's user_output list: {gs.user_outputs}. " + ) + + +def load_verifier(dialect: str) -> Type[Verifier]: + if dialect == "ATEN" or dialect == "": + return _VerifierMeta._registry.get(dialect, Verifier) + return _VerifierMeta._registry[dialect] diff --git a/janus/lib/python3.10/site-packages/torch/_export/wrappers.py b/janus/lib/python3.10/site-packages/torch/_export/wrappers.py new file mode 100644 index 0000000000000000000000000000000000000000..d57ff46de41c8f5961a859d3d1e2871984929b8d --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/_export/wrappers.py @@ -0,0 +1,121 @@ +# mypy: allow-untyped-defs +from contextlib import contextmanager + +import torch +import torch._custom_ops +from torch._C import DispatchKey +from torch._higher_order_ops.strict_mode import strict_mode +from torch._higher_order_ops.utils import autograd_not_implemented +from torch._ops import HigherOrderOperator +from torch._subclasses.fake_tensor import FakeTensorMode +from torch.fx.experimental.proxy_tensor import ProxyTorchDispatchMode, track_tensor_tree +from torch.utils import _pytree as pytree + + +class ExportTracepoint(HigherOrderOperator): + def __init__(self): + super().__init__("_export_tracepoint") + + def __call__(self, *args, **kwargs): + return super().__call__(*args, **kwargs) + + +_export_tracepoint = ExportTracepoint() + + +@_export_tracepoint.py_impl(ProxyTorchDispatchMode) +def export_tracepoint_dispatch_mode(mode, *args, **kwargs): + p_args, p_kwargs = pytree.tree_map(mode.tracer.unwrap_proxy, (args, kwargs)) + proxy = mode.tracer.create_proxy( + "call_function", _export_tracepoint, p_args, p_kwargs + ) + return track_tensor_tree(args, proxy, constant=None, tracer=mode.tracer) + + +@_export_tracepoint.py_impl(FakeTensorMode) +def export_tracepoint_fake_tensor_mode(mode, *args, **kwargs): + with mode: + return args + + +@_export_tracepoint.py_functionalize_impl +def export_tracepoint_functional(ctx, *args, **kwargs): + unwrapped_args = ctx.unwrap_tensors(args) + unwrapped_kwargs = ctx.unwrap_tensors(kwargs) + + with ctx.redispatch_to_next(): + out = _export_tracepoint(*unwrapped_args, **unwrapped_kwargs) + return ctx.wrap_tensors(out) + + +_export_tracepoint.py_impl(DispatchKey.Autograd)( + autograd_not_implemented(_export_tracepoint, deferred_error=True) +) + + +@_export_tracepoint.py_impl(DispatchKey.CPU) +def export_tracepoint_cpu(*args, **kwargs): + return args + + +def _wrap_submodule(mod, path, module_call_specs): + assert isinstance(mod, torch.nn.Module) + assert path != "" + submodule = mod + for name in path.split("."): + if not hasattr(submodule, name): + raise RuntimeError(f"Couldn't find submodule at path {path}") + submodule = getattr(submodule, name) + + def update_module_call_signatures(path, in_spec, out_spec): + if path in module_call_specs: + assert module_call_specs[path]["in_spec"] == in_spec + assert module_call_specs[path]["out_spec"] == out_spec + module_call_specs[path] = {"in_spec": in_spec, "out_spec": out_spec} + + def check_flattened(flat_args): + for a in flat_args: + if not (isinstance(a, (torch.Tensor, str, int, float, bool)) or a is None): + raise AssertionError( + f"Only Tensors or scalars are supported as pytree flattened inputs, got: {a}" + ) + + def pre_hook(module, args, kwargs): + flat_args, in_spec = pytree.tree_flatten((args, kwargs)) + check_flattened(flat_args) + flat_args = _export_tracepoint(*flat_args, kind="module_call_inputs", path=path) + args, kwargs = pytree.tree_unflatten(flat_args, in_spec) + return args, kwargs + + def post_hook(module, args, kwargs, res): + _, in_spec = pytree.tree_flatten((args, kwargs)) + flat_res, out_spec = pytree.tree_flatten(res) + check_flattened(flat_res) + flat_res = _export_tracepoint(*flat_res, kind="module_call_outputs", path=path) + update_module_call_signatures(path, in_spec, out_spec) + return pytree.tree_unflatten(flat_res, out_spec) + + pre_handle = submodule.register_forward_pre_hook(pre_hook, with_kwargs=True) + post_handle = submodule.register_forward_hook(post_hook, with_kwargs=True) + return pre_handle, post_handle + + +@contextmanager +def _wrap_submodules(f, preserve_signature, module_call_signatures): + handles = [] + + try: + for path in preserve_signature: + handles.extend(_wrap_submodule(f, path, module_call_signatures)) + yield + finally: + for handle in handles: + handle.remove() + + +def _mark_strict_experimental(cls): + def call(self, *args): + return strict_mode(self, args) + + cls.__call__ = call + return cls diff --git a/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ebc49a9a77c744e8ac6478ead38d8cada1633212 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/__init__.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/_gpu_trace.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/_gpu_trace.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12f7b5042d6fa1320edfdbda183329c82428ad74 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/_gpu_trace.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/_memory_viz.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/_memory_viz.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d983319493f5294208673aff011787efbeb5857 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/_memory_viz.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/_sanitizer.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/_sanitizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6dcb5650b63109d3aecf0e3ab3e4add434b81967 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/_sanitizer.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/_utils.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4c4474506d5100bdca3ff62c1643435dd59f92a Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/_utils.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/comm.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/comm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2482d75350034944fe3d2efcd4e7a658540c0d5a Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/comm.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/error.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/error.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f221147a50cf74950d5932d5ab8d470009e0cf3 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/error.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/gds.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/gds.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31fc700608e0a99bedd76eeb9956adc6b4caeae2 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/gds.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/graphs.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/graphs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00a15f3ffc34cd97e757c2a6d156b64c107ae1d6 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/graphs.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/jiterator.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/jiterator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3bbc1eb0bd75f328a577361f3821997da08844a4 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/jiterator.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/memory.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/memory.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2586add840201ee18f9a8c62e6a6421a105bab5 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/memory.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/nccl.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/nccl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb7e668bd847865f0fdf37b8695bfa91d27c283f Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/nccl.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/nvtx.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/nvtx.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee934b76fdde709e1e6efeb1967c1af6a78b8899 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/nvtx.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/profiler.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/profiler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..54460441475333a6bc55e9da6703c327e0b0cce1 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/profiler.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/random.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/random.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c39b1602392cc51eb99cbf3b0d7abad3bbd60cc0 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/random.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/sparse.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/sparse.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a2a0bcfda817c9be018eeae131b2a32dac21e7c Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/sparse.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/streams.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/streams.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39fd976acd39ae2e0f6b475bc888add3cd4a7ebe Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/streams.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/tunable.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/tunable.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3da6bef07ce0944d61815af35056d60665c354a0 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/cuda/__pycache__/tunable.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/cuda/amp/__init__.py b/janus/lib/python3.10/site-packages/torch/cuda/amp/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..74520496372f549356a26c2c70b637b9e3ea4d4d --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/cuda/amp/__init__.py @@ -0,0 +1,12 @@ +from .autocast_mode import autocast, custom_bwd, custom_fwd +from .common import amp_definitely_not_available +from .grad_scaler import GradScaler + + +__all__ = [ + "amp_definitely_not_available", + "autocast", + "custom_bwd", + "custom_fwd", + "GradScaler", +] diff --git a/janus/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1297d46ac8fbd2d767f1f61d0ac224fd0530c20f Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/__init__.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/autocast_mode.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/autocast_mode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f3a663676f54dde16894101385f469c2f62593b Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/autocast_mode.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/common.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..666ef4b1ff5d1f46b08974d4471e2438f7f93fcc Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/common.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/grad_scaler.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/grad_scaler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1750601dbbe7896931a62c320044a2ce0514b833 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/grad_scaler.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/cuda/amp/autocast_mode.py b/janus/lib/python3.10/site-packages/torch/cuda/amp/autocast_mode.py new file mode 100644 index 0000000000000000000000000000000000000000..d52ff7cf672bbdaaf4ac31a06a21cac8aabcfe0f --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/cuda/amp/autocast_mode.py @@ -0,0 +1,90 @@ +# mypy: allow-untyped-defs +import functools +from typing import Any +from typing_extensions import deprecated + +import torch + + +__all__ = ["autocast", "custom_fwd", "custom_bwd"] + + +class autocast(torch.amp.autocast_mode.autocast): + r"""See :class:`torch.autocast`. + + ``torch.cuda.amp.autocast(args...)`` is deprecated. Please use ``torch.amp.autocast("cuda", args...)`` instead. + """ + + @deprecated( + "`torch.cuda.amp.autocast(args...)` is deprecated. " + "Please use `torch.amp.autocast('cuda', args...)` instead.", + category=FutureWarning, + ) + def __init__( + self, + enabled: bool = True, + dtype: torch.dtype = torch.float16, + cache_enabled: bool = True, + ): + if torch._jit_internal.is_scripting(): + self._enabled = enabled + self.device = "cuda" + self.fast_dtype = dtype + return + super().__init__( + "cuda", enabled=enabled, dtype=dtype, cache_enabled=cache_enabled + ) + + def __enter__(self): + if torch._jit_internal.is_scripting(): + return self + return super().__enter__() + + # TODO: discuss a unified TorchScript-friendly API for autocast + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any): # type: ignore[override] + if torch._jit_internal.is_scripting(): + return + return super().__exit__(exc_type, exc_val, exc_tb) + + def __call__(self, func): + if torch._jit_internal.is_scripting(): + return func + return super().__call__(func) + + +# Preserved only for BC reasons +@deprecated( + "`torch.cuda.amp.autocast_mode._cast(value, dtype)` is deprecated. " + "Please use `torch.amp.autocast_mode._cast(value, 'cuda', dtype)` instead.", + category=FutureWarning, +) +def _cast(value, dtype): + return torch.amp.autocast_mode._cast(value, "cuda", dtype) + + +@deprecated( + "`torch.cuda.amp.custom_fwd(args...)` is deprecated. " + "Please use `torch.amp.custom_fwd(args..., device_type='cuda')` instead.", + category=FutureWarning, +) +def custom_fwd(fwd=None, *, cast_inputs=None): + """ + ``torch.cuda.amp.custom_fwd(args...)`` is deprecated. Please use + ``torch.amp.custom_fwd(args..., device_type='cuda')`` instead. + """ + return functools.partial(torch.amp.custom_fwd, device_type="cuda")( + fwd=fwd, cast_inputs=cast_inputs + ) + + +@deprecated( + "`torch.cuda.amp.custom_bwd(args...)` is deprecated. " + "Please use `torch.amp.custom_bwd(args..., device_type='cuda')` instead.", + category=FutureWarning, +) +def custom_bwd(bwd): + """ + ``torch.cuda.amp.custom_bwd(args...)`` is deprecated. Please use + ``torch.amp.custom_bwd(args..., device_type='cuda')`` instead. + """ + return functools.partial(torch.amp.custom_bwd, device_type="cuda")(bwd) diff --git a/janus/lib/python3.10/site-packages/torch/cuda/amp/common.py b/janus/lib/python3.10/site-packages/torch/cuda/amp/common.py new file mode 100644 index 0000000000000000000000000000000000000000..915a9b4f4a9ca6c147abefd7c8ab1891ee5a8179 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/cuda/amp/common.py @@ -0,0 +1,11 @@ +# mypy: allow-untyped-defs +from importlib.util import find_spec + +import torch + + +__all__ = ["amp_definitely_not_available"] + + +def amp_definitely_not_available(): + return not (torch.cuda.is_available() or find_spec("torch_xla")) diff --git a/janus/lib/python3.10/site-packages/torch/cuda/amp/grad_scaler.py b/janus/lib/python3.10/site-packages/torch/cuda/amp/grad_scaler.py new file mode 100644 index 0000000000000000000000000000000000000000..62e2020073c8ed99f7295edd1aaea4c54d815f63 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/cuda/amp/grad_scaler.py @@ -0,0 +1,38 @@ +from typing_extensions import deprecated + +import torch + +# We need to keep this unused import for BC reasons +from torch.amp.grad_scaler import OptState # noqa: F401 + + +__all__ = ["GradScaler"] + + +class GradScaler(torch.amp.GradScaler): + r""" + See :class:`torch.amp.GradScaler`. + ``torch.cuda.amp.GradScaler(args...)`` is deprecated. Please use ``torch.amp.GradScaler("cuda", args...)`` instead. + """ + + @deprecated( + "`torch.cuda.amp.GradScaler(args...)` is deprecated. " + "Please use `torch.amp.GradScaler('cuda', args...)` instead.", + category=FutureWarning, + ) + def __init__( + self, + init_scale: float = 2.0**16, + growth_factor: float = 2.0, + backoff_factor: float = 0.5, + growth_interval: int = 2000, + enabled: bool = True, + ) -> None: + super().__init__( + "cuda", + init_scale=init_scale, + growth_factor=growth_factor, + backoff_factor=backoff_factor, + growth_interval=growth_interval, + enabled=enabled, + ) diff --git a/janus/lib/python3.10/site-packages/torch/cuda/graphs.py b/janus/lib/python3.10/site-packages/torch/cuda/graphs.py new file mode 100644 index 0000000000000000000000000000000000000000..b5de9f73df726cc2d6e0fd5bee2bd178dcf6aa89 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/cuda/graphs.py @@ -0,0 +1,491 @@ +# mypy: allow-untyped-defs +import gc +import typing + +import torch + +from .._utils import _dummy_type + + +if not hasattr(torch._C, "_CudaStreamBase"): + # Define dummy base classes + torch._C.__dict__["_CUDAGraph"] = _dummy_type("_CUDAGraph") + torch._C.__dict__["_graph_pool_handle"] = _dummy_type("_graph_pool_handle") + torch._C.__dict__["_cuda_isCurrentStreamCapturing"] = _dummy_type( + "_cuda_isCurrentStreamCapturing" + ) + +from torch._C import ( # noqa: F401 + _cuda_isCurrentStreamCapturing, + _CUDAGraph, + _graph_pool_handle, +) + + +def is_current_stream_capturing(): + r"""Return True if CUDA graph capture is underway on the current CUDA stream, False otherwise. + + If a CUDA context does not exist on the current device, returns False without initializing the context. + """ + return _cuda_isCurrentStreamCapturing() + + +# Python shim helps Sphinx process docstrings more reliably. +def graph_pool_handle(): + r"""Return an opaque token representing the id of a graph memory pool. + + See :ref:`Graph memory management`. + + .. warning:: + This API is in beta and may change in future releases. + """ + return _graph_pool_handle() + + +# Python shim helps Sphinx process docstrings more reliably. +class CUDAGraph(torch._C._CUDAGraph): + r"""Wrapper around a CUDA graph. + + .. warning:: + This API is in beta and may change in future releases. + """ + + def __new__(cls): + return super().__new__(cls) + + def capture_begin(self, pool=None, capture_error_mode="global"): + r"""Begin capturing CUDA work on the current stream. + + Typically, you shouldn't call ``capture_begin`` yourself. + Use :class:`~torch.cuda.graph` or :func:`~torch.cuda.make_graphed_callables`, + which call ``capture_begin`` internally. + + Arguments: + pool (optional): Token (returned by :func:`~torch.cuda.graph_pool_handle` or + :meth:`other_Graph_instance.pool()`) that hints this graph may share memory + with the indicated pool. See :ref:`Graph memory management`. + capture_error_mode (str, optional): specifies the cudaStreamCaptureMode for the graph capture stream. + Can be "global", "thread_local" or "relaxed". During cuda graph capture, some actions, such as cudaMalloc, + may be unsafe. "global" will error on actions in other threads, "thread_local" will only error for + actions in the current thread, and "relaxed" will not error on these actions. Do NOT change this setting + unless you're familiar with `cudaStreamCaptureMode `_ + """ # noqa: B950 + super().capture_begin(pool=pool, capture_error_mode=capture_error_mode) + + def capture_end(self): + r"""End CUDA graph capture on the current stream. + + After ``capture_end``, ``replay`` may be called on this instance. + + Typically, you shouldn't call ``capture_end`` yourself. + Use :class:`~torch.cuda.graph` or :func:`~torch.cuda.make_graphed_callables`, + which call ``capture_end`` internally. + """ + super().capture_end() + + def replay(self): + r"""Replay the CUDA work captured by this graph.""" + super().replay() + + def reset(self): + r"""Delete the graph currently held by this instance.""" + super().reset() + + def pool(self): + r"""Return an opaque token representing the id of this graph's memory pool. + + This id can optionally be passed to another graph's ``capture_begin``, + which hints the other graph may share the same memory pool. + """ + return super().pool() + + def enable_debug_mode(self): + r"""Enable debugging mode for CUDAGraph.debug_dump.""" + return super().enable_debug_mode() + + def debug_dump(self, debug_path): + r""" + Arguments: + debug_path (required): Path to dump the graph to. + + Calls a debugging function to dump the graph if the debugging is + enabled via CUDAGraph.enable_debug_mode() + """ + return super().debug_dump(debug_path) + + +class graph: + r"""Context-manager that captures CUDA work into a :class:`torch.cuda.CUDAGraph` object for later replay. + + See :ref:`CUDA Graphs ` for a general introduction, + detailed use, and constraints. + + Arguments: + cuda_graph (torch.cuda.CUDAGraph): Graph object used for capture. + pool (optional): Opaque token (returned by a call to :func:`~torch.cuda.graph_pool_handle()` or + :meth:`other_Graph_instance.pool()`) hinting this graph's capture + may share memory from the specified pool. See :ref:`Graph memory management`. + stream (torch.cuda.Stream, optional): If supplied, will be set as the current stream in the context. + If not supplied, ``graph`` sets its own internal side stream as the current stream in the context. + capture_error_mode (str, optional): specifies the cudaStreamCaptureMode for the graph capture stream. + Can be "global", "thread_local" or "relaxed". During cuda graph capture, some actions, such as cudaMalloc, + may be unsafe. "global" will error on actions in other threads, "thread_local" will only error for + actions in the current thread, and "relaxed" will not error on actions. Do NOT change this setting + unless you're familiar with `cudaStreamCaptureMode `_ + + .. note:: + For effective memory sharing, if you pass a ``pool`` used by a previous capture and the previous capture + used an explicit ``stream`` argument, you should pass the same ``stream`` argument to this capture. + + .. warning:: + This API is in beta and may change in future releases. + + .. _cudaStreamCaptureMode: + https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__STREAM.html#group__CUDART__STREAM_1g9d0535d93a214cbf126835257b16ba85 + """ # noqa: B950 + + default_capture_stream: typing.Optional["torch.cuda.Stream"] = None + + def __init__( + self, + cuda_graph, + pool=None, + stream=None, + capture_error_mode: str = "global", + ): + # Lazy-init of default_capture_stream helps avoid circular-import errors. + # Not thread safe, but graphs already have the general (explicitly documented) + # restriction that only one capture may be underway at a time in the process. + if self.__class__.default_capture_stream is None: + self.__class__.default_capture_stream = torch.cuda.Stream() + + self.pool = () if pool is None else (pool,) + self.capture_stream = ( + stream if stream is not None else self.__class__.default_capture_stream + ) + assert self.capture_stream is not None + self.stream_ctx = torch.cuda.stream(self.capture_stream) + self.cuda_graph = cuda_graph + self.capture_error_mode = capture_error_mode + + def __enter__(self): + # Free as much memory as we can for the graph + torch.cuda.synchronize() + gc.collect() + torch.cuda.empty_cache() + + # Stackoverflow seems comfortable with this pattern + # https://stackoverflow.com/questions/26635684/calling-enter-and-exit-manually#39172487 + self.stream_ctx.__enter__() + + self.cuda_graph.capture_begin( + *self.pool, capture_error_mode=self.capture_error_mode + ) + + def __exit__(self, exc_type, exc_value, traceback): + self.cuda_graph.capture_end() + self.stream_ctx.__exit__(exc_type, exc_value, traceback) + # returning None should propagate exceptions from either capture_end or stream_ctx.__exit__() + + +def make_graphed_callables( + callables, sample_args, num_warmup_iters=3, allow_unused_input=False, pool=None +): + r"""Accept callables (functions or :class:`nn.Module`\ s) and returns graphed versions. + + Each graphed callable's forward pass runs its source callable's + forward CUDA work as a CUDA graph inside a single autograd node. + + The graphed callable's forward pass also appends + a backward node to the autograd graph. During backward, this node runs the + callable's backward work as a CUDA graph. + + Therefore, each graphed callable should be a drop-in replacement for its source callable + in an autograd-enabled training loop. + + See :ref:`Partial-network capture` for detailed use and constraints. + + If you pass a tuple of several callables, their captures will use the same memory pool. + See :ref:`Graph memory management` for when this is appropriate. + + Arguments: + callables (torch.nn.Module or Python function, or tuple of these): Callable or callables to graph. + See :ref:`Graph memory management` for when passing a tuple of callables + is appropriate. If you pass a tuple of callables, their order in the tuple must be the same order + they'll run in the live workload. + sample_args (tuple of Tensors, or tuple of tuples of Tensors): Samples args for each callable. + If a single callable was passed, ``sample_args`` must be a single tuple of argument Tensors. + If a tuple of callables was passed, ``sample_args`` must be tuple of tuples of argument Tensors. + num_warmup_iters (int): The number of warmup iterations. Currently, ``DataDistributedParallel`` needs + 11 iterations for warm up. Default: ``3``. + allow_unused_input (bool): If False, specifying inputs that were not used when computing outputs + (and therefore their grad is always zero) is an error. Defaults to False. + pool (optional): Token (returned by :func:`~torch.cuda.graph_pool_handle` or + :meth:`other_Graph_instance.pool()`) that hints this graph may share memory + with the indicated pool. See :ref:`Graph memory management`. + .. note:: + The ``requires_grad`` state of each Tensor in ``sample_args`` must match the state + that's expected for the corresponding real input in the training loop. + + .. warning:: + This API is in beta and may change in future releases. + + .. warning:: + ``sample_args`` for each callable must contain only Tensors. Other types are not allowed. + + .. warning:: + Returned callables do not support higher order differentiation (e.g., double backward). + + .. warning:: + In any :class:`~torch.nn.Module` passed to :func:`~make_graphed_callables`, only parameters + may be trainable. Buffers must have ``requires_grad=False``. + + .. warning:: + After you pass a :class:`torch.nn.Module` through :func:`~make_graphed_callables`, + you may not add or remove any of that Module's parameters or buffers. + + .. warning:: + :class:`torch.nn.Module`\s passed to :func:`~torch.cuda.make_graphed_callables` must not have module hooks + registered on them at the time they are passed. However, registering hooks on modules *after* passing them + through :func:`~torch.cuda.make_graphed_callables` is allowed. + + .. warning:: + When running a graphed callable, you must pass its arguments in the same order and format + they appeared in that callable's ``sample_args``. + + .. warning:: + The automatic mixed precision is supported in :func:`~torch.cuda.make_graphed_callables` only with disabled + caching. The context manager `torch.cuda.amp.autocast()` must have `cache_enabled=False`. + """ + if torch.is_autocast_enabled() and torch.is_autocast_cache_enabled(): + raise RuntimeError( + "make_graphed_callables does not support the autocast caching. Please set `cache_enabled=False`." + ) + + just_one_callable = False + + if not isinstance(callables, tuple): + just_one_callable = True + callables = (callables,) + sample_args = (sample_args,) + + flatten_sample_args = [] + + for c, args in zip(callables, sample_args): + if isinstance(c, torch.nn.Module): + assert ( + len(c._backward_hooks) == 0 + and len(c._forward_hooks) == 0 + and len(c._forward_pre_hooks) == 0 + ), ( + "Modules must not have hooks registered at the time they are passed. However, registering hooks " + + "on modules after passing them through make_graphed_callables is allowed." + ) + assert all(b.requires_grad is False for b in c.buffers()), ( + "In any :class:`~torch.nn.Module` passed to " + + ":func:`~make_graphed_callables`, only parameters may be trainable. All buffers must have " + + "``requires_grad=False``." + ) + flatten_arg = torch.utils._pytree.arg_tree_leaves(*args) + flatten_sample_args.append(tuple(flatten_arg)) + assert all(isinstance(arg, torch.Tensor) for arg in flatten_arg), ( + "In the beta API, sample_args " + + "for each callable must contain only Tensors. Other types are not allowed." + ) + + # If a callable is an nn.Module, its graph's full input surface is the args the user explicitly + # passes to forward (ie, its sample_args) AND the module's parameter attributes. + per_callable_len_user_args = [len(args) for args in flatten_sample_args] + per_callable_module_params = [ + tuple(c.parameters()) if isinstance(c, torch.nn.Module) else () + for c in callables + ] + per_callable_static_input_surfaces = [ + flatten_sample_args[i] + per_callable_module_params[i] + for i in range(len(callables)) + ] + + fwd_graphs = [torch.cuda.CUDAGraph() for _ in range(len(callables))] + bwd_graphs = [torch.cuda.CUDAGraph() for _ in range(len(callables))] + + mempool = graph_pool_handle() if pool is None else pool + + # Warmup + # Hopefully prevents cudnn benchmarking and other lazy-initialization cuda work + # from ending up in any captures. + torch.cuda.synchronize() + with torch.cuda.stream(torch.cuda.Stream()): + for func, args, static_input_surface in zip( + callables, sample_args, per_callable_static_input_surfaces + ): + grad_inputs, outputs, outputs_grad = None, None, None + for _ in range(num_warmup_iters): + outputs = torch.utils._pytree.tree_leaves(func(*args)) + outputs_grad = tuple(o for o in outputs if o.requires_grad) + if len(outputs_grad) > 0: + grad_inputs = torch.autograd.grad( + outputs=outputs_grad, + inputs=tuple( + i for i in static_input_surface if i.requires_grad + ), + grad_outputs=tuple( + torch.empty_like(o) for o in outputs if o.requires_grad + ), + only_inputs=True, + allow_unused=allow_unused_input, + ) + for v in [outputs, outputs_grad, grad_inputs]: + del v + + torch.cuda.synchronize() + + # All captures here share a mempool. To avoid replays corrupting each other's memory, + # the safest approach is to capture all passes in the same order they'll run: + # fwd 1, fwd 2, ... fwd N, then bwd N, bwd N-1, ... bwd 1. + + # Capture forward graphs + per_callable_static_outputs = [] + per_callable_output_unflatten_spec = [] + for func, args, fwd_graph in zip(callables, sample_args, fwd_graphs): + with torch.cuda.graph(fwd_graph, pool=mempool): + outputs = func(*args) + + flatten_outputs, spec = torch.utils._pytree.tree_flatten(outputs) + per_callable_static_outputs.append(tuple(flatten_outputs)) + per_callable_output_unflatten_spec.append(spec) + + # Capture backward graphs in reverse order + per_callable_static_grad_outputs = [] + per_callable_static_grad_inputs = [] + for static_input_surface, static_outputs, bwd_graph, module_params in zip( + reversed(per_callable_static_input_surfaces), + reversed(per_callable_static_outputs), + reversed(bwd_graphs), + reversed(per_callable_module_params), + ): + # For now, assumes all static_outputs require grad + # assert all(o.requires_grad for o in static_outputs), "Outputs of graphed callables must require grad." + static_grad_outputs = tuple( + torch.empty_like(o) if o.requires_grad else None for o in static_outputs + ) + + outputs_grad = tuple(o for o in static_outputs if o.requires_grad) + grad_inputs = None + if len(outputs_grad) > 0: + with torch.cuda.graph(bwd_graph, pool=mempool): + grad_inputs = torch.autograd.grad( + outputs=outputs_grad, + inputs=tuple(i for i in static_input_surface if i.requires_grad), + grad_outputs=tuple(o for o in static_grad_outputs if o is not None), + only_inputs=True, + allow_unused=allow_unused_input, + ) + + # Constructs a tuple suitable for returning from Graphed.backward: + # Pads out the actually-needed grads with Nones in gradient slots for inputs that don't require grad. + # I couldn't think of a slick one-liner for this pattern. + static_grad_inputs = [] + grad_idx = 0 + for arg in static_input_surface: + if arg.requires_grad and grad_inputs is not None: + static_grad_inputs.append(grad_inputs[grad_idx]) + grad_idx += 1 + else: + static_grad_inputs.append(None) # type: ignore[arg-type] + static_grad_inputs = tuple(static_grad_inputs) # type: ignore[assignment] + + per_callable_static_grad_outputs.append(static_grad_outputs) + per_callable_static_grad_inputs.append(static_grad_inputs) + + # Reverses the most recent two lists + per_callable_static_grad_outputs.reverse() + per_callable_static_grad_inputs.reverse() + # Now for every per_callable list, per_callable_*[i] holds the stuff for the ith callable. + + def make_graphed_autograd_function( + fwd_graph, + bwd_graph, + module_params, + len_user_args, + output_unflatten_spec, + static_input_surface, + static_outputs, + static_grad_outputs, + static_grad_inputs, + ): + class Graphed(torch.autograd.Function): + @staticmethod + def forward(ctx, *inputs): + # At this stage, only the user args may (potentially) be new tensors. + for i in range(len_user_args): + if static_input_surface[i].data_ptr() != inputs[i].data_ptr(): + static_input_surface[i].copy_(inputs[i]) + fwd_graph.replay() + assert isinstance(static_outputs, tuple) + return tuple(o.detach() for o in static_outputs) + + @staticmethod + @torch.autograd.function.once_differentiable + def backward(ctx, *grads): + assert len(grads) == len(static_grad_outputs) + for g, grad in zip(static_grad_outputs, grads): + if g is not None: + # don't copy if autograd gods have been kind and the + # incoming grad is already in the right place + if g.data_ptr() != grad.data_ptr(): + g.copy_(grad) + bwd_graph.replay() + + # Input args that didn't require grad expect a None gradient. + assert isinstance(static_grad_inputs, tuple) + return tuple( + b.detach() if b is not None else b for b in static_grad_inputs + ) + + def functionalized(*user_args): + # Runs the autograd function with inputs == all inputs to the graph that might require grad + # (explicit user args + module parameters) + # Assumes module params didn't change since capture. + flatten_user_args = torch.utils._pytree.arg_tree_leaves(*user_args) + out = Graphed.apply(*(tuple(flatten_user_args) + module_params)) + return torch.utils._pytree.tree_unflatten(out, output_unflatten_spec) + + return functionalized + + # Put together the final graphed callables + ret = [] + for i, func in enumerate(callables): + graphed = make_graphed_autograd_function( + fwd_graphs[i], + bwd_graphs[i], + per_callable_module_params[i], + per_callable_len_user_args[i], + per_callable_output_unflatten_spec[i], + per_callable_static_input_surfaces[i], + per_callable_static_outputs[i], + per_callable_static_grad_outputs[i], + per_callable_static_grad_inputs[i], + ) + + if isinstance(func, torch.nn.Module): + + def make_graphed_forward(func, graph_training_state, graphed, orig_fwd): + def new_fwd(*user_args): + # If the module's training-or-eval state matches what we graphed, + # run the graph, otherwise run the original forward method + if func.training == graph_training_state: + return graphed(*user_args) + else: + return orig_fwd(*user_args) + + return new_fwd + + func.forward = make_graphed_forward(func, func.training, graphed, func.forward) # type: ignore[assignment] + ret.append(func) + else: + ret.append(graphed) + + if just_one_callable: + return ret[0] + + return tuple(ret) diff --git a/janus/lib/python3.10/site-packages/torch/special/__init__.py b/janus/lib/python3.10/site-packages/torch/special/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..07e104c4090ef8456f8acf5f2548bbbc3532a2d3 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/special/__init__.py @@ -0,0 +1,1283 @@ +import torch +from torch._C import _add_docstr, _special # type: ignore[attr-defined] +from torch._torch_docs import common_args, multi_dim_common + +__all__ = [ + 'airy_ai', + 'bessel_j0', + 'bessel_j1', + 'bessel_y0', + 'bessel_y1', + 'chebyshev_polynomial_t', + 'chebyshev_polynomial_u', + 'chebyshev_polynomial_v', + 'chebyshev_polynomial_w', + 'digamma', + 'entr', + 'erf', + 'erfc', + 'erfcx', + 'erfinv', + 'exp2', + 'expit', + 'expm1', + 'gammainc', + 'gammaincc', + 'gammaln', + 'hermite_polynomial_h', + 'hermite_polynomial_he', + 'i0', + 'i0e', + 'i1', + 'i1e', + 'laguerre_polynomial_l', + 'legendre_polynomial_p', + 'log1p', + 'log_ndtr', + 'log_softmax', + 'logit', + 'logsumexp', + 'modified_bessel_i0', + 'modified_bessel_i1', + 'modified_bessel_k0', + 'modified_bessel_k1', + 'multigammaln', + 'ndtr', + 'ndtri', + 'polygamma', + 'psi', + 'round', + 'shifted_chebyshev_polynomial_t', + 'shifted_chebyshev_polynomial_u', + 'shifted_chebyshev_polynomial_v', + 'shifted_chebyshev_polynomial_w', + 'scaled_modified_bessel_k0', + 'scaled_modified_bessel_k1', + 'sinc', + 'softmax', + 'spherical_bessel_j0', + 'xlog1py', + 'xlogy', + 'zeta', +] + +Tensor = torch.Tensor + +entr = _add_docstr(_special.special_entr, + r""" +entr(input, *, out=None) -> Tensor +Computes the entropy on :attr:`input` (as defined below), elementwise. + +.. math:: + \begin{align} + \text{entr(x)} = \begin{cases} + -x * \ln(x) & x > 0 \\ + 0 & x = 0.0 \\ + -\infty & x < 0 + \end{cases} + \end{align} +""" + """ + +Args: + input (Tensor): the input tensor. + +Keyword args: + out (Tensor, optional): the output tensor. + +Example:: + >>> a = torch.arange(-0.5, 1, 0.5) + >>> a + tensor([-0.5000, 0.0000, 0.5000]) + >>> torch.special.entr(a) + tensor([ -inf, 0.0000, 0.3466]) +""") + +psi = _add_docstr(_special.special_psi, + r""" +psi(input, *, out=None) -> Tensor + +Alias for :func:`torch.special.digamma`. +""") + +digamma = _add_docstr(_special.special_digamma, + r""" +digamma(input, *, out=None) -> Tensor + +Computes the logarithmic derivative of the gamma function on `input`. + +.. math:: + \digamma(x) = \frac{d}{dx} \ln\left(\Gamma\left(x\right)\right) = \frac{\Gamma'(x)}{\Gamma(x)} +""" + r""" +Args: + input (Tensor): the tensor to compute the digamma function on + +Keyword args: + {out} + +.. note:: This function is similar to SciPy's `scipy.special.digamma`. + +.. note:: From PyTorch 1.8 onwards, the digamma function returns `-Inf` for `0`. + Previously it returned `NaN` for `0`. + +Example:: + + >>> a = torch.tensor([1, 0.5]) + >>> torch.special.digamma(a) + tensor([-0.5772, -1.9635]) + +""".format(**common_args)) + +gammaln = _add_docstr(_special.special_gammaln, + r""" +gammaln(input, *, out=None) -> Tensor + +Computes the natural logarithm of the absolute value of the gamma function on :attr:`input`. + +.. math:: + \text{out}_{i} = \ln \Gamma(|\text{input}_{i}|) +""" + """ +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.arange(0.5, 2, 0.5) + >>> torch.special.gammaln(a) + tensor([ 0.5724, 0.0000, -0.1208]) + +""".format(**common_args)) + +polygamma = _add_docstr(_special.special_polygamma, + r""" +polygamma(n, input, *, out=None) -> Tensor + +Computes the :math:`n^{th}` derivative of the digamma function on :attr:`input`. +:math:`n \geq 0` is called the order of the polygamma function. + +.. math:: + \psi^{(n)}(x) = \frac{d^{(n)}}{dx^{(n)}} \psi(x) + +.. note:: + This function is implemented only for nonnegative integers :math:`n \geq 0`. +""" + """ +Args: + n (int): the order of the polygamma function + {input} + +Keyword args: + {out} + +Example:: + >>> a = torch.tensor([1, 0.5]) + >>> torch.special.polygamma(1, a) + tensor([1.64493, 4.9348]) + >>> torch.special.polygamma(2, a) + tensor([ -2.4041, -16.8288]) + >>> torch.special.polygamma(3, a) + tensor([ 6.4939, 97.4091]) + >>> torch.special.polygamma(4, a) + tensor([ -24.8863, -771.4742]) +""".format(**common_args)) + +erf = _add_docstr(_special.special_erf, + r""" +erf(input, *, out=None) -> Tensor + +Computes the error function of :attr:`input`. The error function is defined as follows: + +.. math:: + \mathrm{erf}(x) = \frac{2}{\sqrt{\pi}} \int_{0}^{x} e^{-t^2} dt +""" + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> torch.special.erf(torch.tensor([0, -1., 10.])) + tensor([ 0.0000, -0.8427, 1.0000]) +""".format(**common_args)) + +erfc = _add_docstr(_special.special_erfc, + r""" +erfc(input, *, out=None) -> Tensor + +Computes the complementary error function of :attr:`input`. +The complementary error function is defined as follows: + +.. math:: + \mathrm{erfc}(x) = 1 - \frac{2}{\sqrt{\pi}} \int_{0}^{x} e^{-t^2} dt +""" + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> torch.special.erfc(torch.tensor([0, -1., 10.])) + tensor([ 1.0000, 1.8427, 0.0000]) +""".format(**common_args)) + +erfcx = _add_docstr(_special.special_erfcx, + r""" +erfcx(input, *, out=None) -> Tensor + +Computes the scaled complementary error function for each element of :attr:`input`. +The scaled complementary error function is defined as follows: + +.. math:: + \mathrm{erfcx}(x) = e^{x^2} \mathrm{erfc}(x) +""" + r""" + +""" + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> torch.special.erfcx(torch.tensor([0, -1., 10.])) + tensor([ 1.0000, 5.0090, 0.0561]) +""".format(**common_args)) + +erfinv = _add_docstr(_special.special_erfinv, + r""" +erfinv(input, *, out=None) -> Tensor + +Computes the inverse error function of :attr:`input`. +The inverse error function is defined in the range :math:`(-1, 1)` as: + +.. math:: + \mathrm{erfinv}(\mathrm{erf}(x)) = x +""" + r""" + +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> torch.special.erfinv(torch.tensor([0, 0.5, -1.])) + tensor([ 0.0000, 0.4769, -inf]) +""".format(**common_args)) + +logit = _add_docstr(_special.special_logit, + r""" +logit(input, eps=None, *, out=None) -> Tensor + +Returns a new tensor with the logit of the elements of :attr:`input`. +:attr:`input` is clamped to [eps, 1 - eps] when eps is not None. +When eps is None and :attr:`input` < 0 or :attr:`input` > 1, the function will yields NaN. + +.. math:: + \begin{align} + y_{i} &= \ln(\frac{z_{i}}{1 - z_{i}}) \\ + z_{i} &= \begin{cases} + x_{i} & \text{if eps is None} \\ + \text{eps} & \text{if } x_{i} < \text{eps} \\ + x_{i} & \text{if } \text{eps} \leq x_{i} \leq 1 - \text{eps} \\ + 1 - \text{eps} & \text{if } x_{i} > 1 - \text{eps} + \end{cases} + \end{align} +""" + r""" +Args: + {input} + eps (float, optional): the epsilon for input clamp bound. Default: ``None`` + +Keyword args: + {out} + +Example:: + + >>> a = torch.rand(5) + >>> a + tensor([0.2796, 0.9331, 0.6486, 0.1523, 0.6516]) + >>> torch.special.logit(a, eps=1e-6) + tensor([-0.9466, 2.6352, 0.6131, -1.7169, 0.6261]) +""".format(**common_args)) + +logsumexp = _add_docstr(_special.special_logsumexp, + r""" +logsumexp(input, dim, keepdim=False, *, out=None) + +Alias for :func:`torch.logsumexp`. +""".format(**multi_dim_common)) + +expit = _add_docstr(_special.special_expit, + r""" +expit(input, *, out=None) -> Tensor + +Computes the expit (also known as the logistic sigmoid function) of the elements of :attr:`input`. + +.. math:: + \text{out}_{i} = \frac{1}{1 + e^{-\text{input}_{i}}} +""" + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> t = torch.randn(4) + >>> t + tensor([ 0.9213, 1.0887, -0.8858, -1.7683]) + >>> torch.special.expit(t) + tensor([ 0.7153, 0.7481, 0.2920, 0.1458]) +""".format(**common_args)) + +exp2 = _add_docstr(_special.special_exp2, + r""" +exp2(input, *, out=None) -> Tensor + +Computes the base two exponential function of :attr:`input`. + +.. math:: + y_{i} = 2^{x_{i}} + +""" + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> torch.special.exp2(torch.tensor([0, math.log2(2.), 3, 4])) + tensor([ 1., 2., 8., 16.]) +""".format(**common_args)) + +expm1 = _add_docstr(_special.special_expm1, + r""" +expm1(input, *, out=None) -> Tensor + +Computes the exponential of the elements minus 1 +of :attr:`input`. + +.. math:: + y_{i} = e^{x_{i}} - 1 + +.. note:: This function provides greater precision than exp(x) - 1 for small values of x. + +""" + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> torch.special.expm1(torch.tensor([0, math.log(2.)])) + tensor([ 0., 1.]) +""".format(**common_args)) + +xlog1py = _add_docstr(_special.special_xlog1py, + r""" +xlog1py(input, other, *, out=None) -> Tensor + +Computes ``input * log1p(other)`` with the following cases. + +.. math:: + \text{out}_{i} = \begin{cases} + \text{NaN} & \text{if } \text{other}_{i} = \text{NaN} \\ + 0 & \text{if } \text{input}_{i} = 0.0 \text{ and } \text{other}_{i} != \text{NaN} \\ + \text{input}_{i} * \text{log1p}(\text{other}_{i})& \text{otherwise} + \end{cases} + +Similar to SciPy's `scipy.special.xlog1py`. + +""" + r""" + +Args: + input (Number or Tensor) : Multiplier + other (Number or Tensor) : Argument + +.. note:: At least one of :attr:`input` or :attr:`other` must be a tensor. + +Keyword args: + {out} + +Example:: + + >>> x = torch.zeros(5,) + >>> y = torch.tensor([-1, 0, 1, float('inf'), float('nan')]) + >>> torch.special.xlog1py(x, y) + tensor([0., 0., 0., 0., nan]) + >>> x = torch.tensor([1, 2, 3]) + >>> y = torch.tensor([3, 2, 1]) + >>> torch.special.xlog1py(x, y) + tensor([1.3863, 2.1972, 2.0794]) + >>> torch.special.xlog1py(x, 4) + tensor([1.6094, 3.2189, 4.8283]) + >>> torch.special.xlog1py(2, y) + tensor([2.7726, 2.1972, 1.3863]) +""".format(**common_args)) + +xlogy = _add_docstr(_special.special_xlogy, + r""" +xlogy(input, other, *, out=None) -> Tensor + +Computes ``input * log(other)`` with the following cases. + +.. math:: + \text{out}_{i} = \begin{cases} + \text{NaN} & \text{if } \text{other}_{i} = \text{NaN} \\ + 0 & \text{if } \text{input}_{i} = 0.0 \\ + \text{input}_{i} * \log{(\text{other}_{i})} & \text{otherwise} + \end{cases} + +Similar to SciPy's `scipy.special.xlogy`. + +""" + r""" + +Args: + input (Number or Tensor) : Multiplier + other (Number or Tensor) : Argument + +.. note:: At least one of :attr:`input` or :attr:`other` must be a tensor. + +Keyword args: + {out} + +Example:: + + >>> x = torch.zeros(5,) + >>> y = torch.tensor([-1, 0, 1, float('inf'), float('nan')]) + >>> torch.special.xlogy(x, y) + tensor([0., 0., 0., 0., nan]) + >>> x = torch.tensor([1, 2, 3]) + >>> y = torch.tensor([3, 2, 1]) + >>> torch.special.xlogy(x, y) + tensor([1.0986, 1.3863, 0.0000]) + >>> torch.special.xlogy(x, 4) + tensor([1.3863, 2.7726, 4.1589]) + >>> torch.special.xlogy(2, y) + tensor([2.1972, 1.3863, 0.0000]) +""".format(**common_args)) + +i0 = _add_docstr(_special.special_i0, + r""" +i0(input, *, out=None) -> Tensor + +Computes the zeroth order modified Bessel function of the first kind for each element of :attr:`input`. + +.. math:: + \text{out}_{i} = I_0(\text{input}_{i}) = \sum_{k=0}^{\infty} \frac{(\text{input}_{i}^2/4)^k}{(k!)^2} + +""" + r""" +Args: + input (Tensor): the input tensor + +Keyword args: + {out} + +Example:: + + >>> torch.i0(torch.arange(5, dtype=torch.float32)) + tensor([ 1.0000, 1.2661, 2.2796, 4.8808, 11.3019]) + +""".format(**common_args)) + +i0e = _add_docstr(_special.special_i0e, + r""" +i0e(input, *, out=None) -> Tensor +Computes the exponentially scaled zeroth order modified Bessel function of the first kind (as defined below) +for each element of :attr:`input`. + +.. math:: + \text{out}_{i} = \exp(-|x|) * i0(x) = \exp(-|x|) * \sum_{k=0}^{\infty} \frac{(\text{input}_{i}^2/4)^k}{(k!)^2} + +""" + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + >>> torch.special.i0e(torch.arange(5, dtype=torch.float32)) + tensor([1.0000, 0.4658, 0.3085, 0.2430, 0.2070]) +""".format(**common_args)) + +i1 = _add_docstr(_special.special_i1, + r""" +i1(input, *, out=None) -> Tensor +Computes the first order modified Bessel function of the first kind (as defined below) +for each element of :attr:`input`. + +.. math:: + \text{out}_{i} = \frac{(\text{input}_{i})}{2} * \sum_{k=0}^{\infty} \frac{(\text{input}_{i}^2/4)^k}{(k!) * (k+1)!} + +""" + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + >>> torch.special.i1(torch.arange(5, dtype=torch.float32)) + tensor([0.0000, 0.5652, 1.5906, 3.9534, 9.7595]) +""".format(**common_args)) + +i1e = _add_docstr(_special.special_i1e, + r""" +i1e(input, *, out=None) -> Tensor +Computes the exponentially scaled first order modified Bessel function of the first kind (as defined below) +for each element of :attr:`input`. + +.. math:: + \text{out}_{i} = \exp(-|x|) * i1(x) = + \exp(-|x|) * \frac{(\text{input}_{i})}{2} * \sum_{k=0}^{\infty} \frac{(\text{input}_{i}^2/4)^k}{(k!) * (k+1)!} + +""" + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + >>> torch.special.i1e(torch.arange(5, dtype=torch.float32)) + tensor([0.0000, 0.2079, 0.2153, 0.1968, 0.1788]) +""".format(**common_args)) + +ndtr = _add_docstr(_special.special_ndtr, + r""" +ndtr(input, *, out=None) -> Tensor +Computes the area under the standard Gaussian probability density function, +integrated from minus infinity to :attr:`input`, elementwise. + +.. math:: + \text{ndtr}(x) = \frac{1}{\sqrt{2 \pi}}\int_{-\infty}^{x} e^{-\frac{1}{2}t^2} dt + +""" + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + >>> torch.special.ndtr(torch.tensor([-3., -2, -1, 0, 1, 2, 3])) + tensor([0.0013, 0.0228, 0.1587, 0.5000, 0.8413, 0.9772, 0.9987]) +""".format(**common_args)) + +ndtri = _add_docstr(_special.special_ndtri, + r""" +ndtri(input, *, out=None) -> Tensor +Computes the argument, x, for which the area under the Gaussian probability density function +(integrated from minus infinity to x) is equal to :attr:`input`, elementwise. + +.. math:: + \text{ndtri}(p) = \sqrt{2}\text{erf}^{-1}(2p - 1) + +.. note:: + Also known as quantile function for Normal Distribution. + +""" + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + >>> torch.special.ndtri(torch.tensor([0, 0.25, 0.5, 0.75, 1])) + tensor([ -inf, -0.6745, 0.0000, 0.6745, inf]) +""".format(**common_args)) + +log_ndtr = _add_docstr(_special.special_log_ndtr, + r""" +log_ndtr(input, *, out=None) -> Tensor +Computes the log of the area under the standard Gaussian probability density function, +integrated from minus infinity to :attr:`input`, elementwise. + +.. math:: + \text{log\_ndtr}(x) = \log\left(\frac{1}{\sqrt{2 \pi}}\int_{-\infty}^{x} e^{-\frac{1}{2}t^2} dt \right) + +""" + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + >>> torch.special.log_ndtr(torch.tensor([-3., -2, -1, 0, 1, 2, 3])) + tensor([-6.6077 -3.7832 -1.841 -0.6931 -0.1728 -0.023 -0.0014]) +""".format(**common_args)) + +log1p = _add_docstr(_special.special_log1p, + r""" +log1p(input, *, out=None) -> Tensor + +Alias for :func:`torch.log1p`. +""") + +sinc = _add_docstr(_special.special_sinc, + r""" +sinc(input, *, out=None) -> Tensor + +Computes the normalized sinc of :attr:`input.` + +.. math:: + \text{out}_{i} = + \begin{cases} + 1, & \text{if}\ \text{input}_{i}=0 \\ + \sin(\pi \text{input}_{i}) / (\pi \text{input}_{i}), & \text{otherwise} + \end{cases} +""" + r""" + +Args: + {input} + +Keyword args: + {out} + +Example:: + >>> t = torch.randn(4) + >>> t + tensor([ 0.2252, -0.2948, 1.0267, -1.1566]) + >>> torch.special.sinc(t) + tensor([ 0.9186, 0.8631, -0.0259, -0.1300]) +""".format(**common_args)) + +round = _add_docstr(_special.special_round, + r""" +round(input, *, out=None) -> Tensor + +Alias for :func:`torch.round`. +""") + +softmax = _add_docstr(_special.special_softmax, + r""" +softmax(input, dim, *, dtype=None) -> Tensor + +Computes the softmax function. + +Softmax is defined as: + +:math:`\text{Softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)}` + +It is applied to all slices along dim, and will re-scale them so that the elements +lie in the range `[0, 1]` and sum to 1. + +Args: + input (Tensor): input + dim (int): A dimension along which softmax will be computed. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is cast to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + +Examples:: + >>> t = torch.ones(2, 2) + >>> torch.special.softmax(t, 0) + tensor([[0.5000, 0.5000], + [0.5000, 0.5000]]) + +""") + +log_softmax = _add_docstr(_special.special_log_softmax, + r""" +log_softmax(input, dim, *, dtype=None) -> Tensor + +Computes softmax followed by a logarithm. + +While mathematically equivalent to log(softmax(x)), doing these two +operations separately is slower and numerically unstable. This function +is computed as: + +.. math:: + \text{log\_softmax}(x_{i}) = \log\left(\frac{\exp(x_i) }{ \sum_j \exp(x_j)} \right) +""" + r""" + +Args: + input (Tensor): input + dim (int): A dimension along which log_softmax will be computed. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is cast to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + +Example:: + >>> t = torch.ones(2, 2) + >>> torch.special.log_softmax(t, 0) + tensor([[-0.6931, -0.6931], + [-0.6931, -0.6931]]) +""") + +zeta = _add_docstr(_special.special_zeta, + r""" +zeta(input, other, *, out=None) -> Tensor + +Computes the Hurwitz zeta function, elementwise. + +.. math:: + \zeta(x, q) = \sum_{k=0}^{\infty} \frac{1}{(k + q)^x} + +""" + r""" +Args: + input (Tensor): the input tensor corresponding to `x`. + other (Tensor): the input tensor corresponding to `q`. + +.. note:: + The Riemann zeta function corresponds to the case when `q = 1` + +Keyword args: + {out} + +Example:: + >>> x = torch.tensor([2., 4.]) + >>> torch.special.zeta(x, 1) + tensor([1.6449, 1.0823]) + >>> torch.special.zeta(x, torch.tensor([1., 2.])) + tensor([1.6449, 0.0823]) + >>> torch.special.zeta(2, torch.tensor([1., 2.])) + tensor([1.6449, 0.6449]) +""".format(**common_args)) + +multigammaln = _add_docstr(_special.special_multigammaln, + r""" +multigammaln(input, p, *, out=None) -> Tensor + +Computes the `multivariate log-gamma function +`_ with dimension +:math:`p` element-wise, given by + +.. math:: + \log(\Gamma_{p}(a)) = C + \displaystyle \sum_{i=1}^{p} \log\left(\Gamma\left(a - \frac{i - 1}{2}\right)\right) + +where :math:`C = \log(\pi) \cdot \frac{p (p - 1)}{4}` and :math:`\Gamma(-)` is the Gamma function. + +All elements must be greater than :math:`\frac{p - 1}{2}`, otherwise the behavior is undefiend. +""" + """ + +Args: + input (Tensor): the tensor to compute the multivariate log-gamma function + p (int): the number of dimensions + +Keyword args: + {out} + +Example:: + + >>> a = torch.empty(2, 3).uniform_(1, 2) + >>> a + tensor([[1.6835, 1.8474, 1.1929], + [1.0475, 1.7162, 1.4180]]) + >>> torch.special.multigammaln(a, 2) + tensor([[0.3928, 0.4007, 0.7586], + [1.0311, 0.3901, 0.5049]]) +""".format(**common_args)) + +gammainc = _add_docstr(_special.special_gammainc, + r""" +gammainc(input, other, *, out=None) -> Tensor + +Computes the regularized lower incomplete gamma function: + +.. math:: + \text{out}_{i} = \frac{1}{\Gamma(\text{input}_i)} \int_0^{\text{other}_i} t^{\text{input}_i-1} e^{-t} dt + +where both :math:`\text{input}_i` and :math:`\text{other}_i` are weakly positive +and at least one is strictly positive. +If both are zero or either is negative then :math:`\text{out}_i=\text{nan}`. +:math:`\Gamma(\cdot)` in the equation above is the gamma function, + +.. math:: + \Gamma(\text{input}_i) = \int_0^\infty t^{(\text{input}_i-1)} e^{-t} dt. + +See :func:`torch.special.gammaincc` and :func:`torch.special.gammaln` for related functions. + +Supports :ref:`broadcasting to a common shape ` +and float inputs. + +.. note:: + The backward pass with respect to :attr:`input` is not yet supported. + Please open an issue on PyTorch's Github to request it. + +""" + r""" +Args: + input (Tensor): the first non-negative input tensor + other (Tensor): the second non-negative input tensor + +Keyword args: + {out} + +Example:: + + >>> a1 = torch.tensor([4.0]) + >>> a2 = torch.tensor([3.0, 4.0, 5.0]) + >>> a = torch.special.gammaincc(a1, a2) + tensor([0.3528, 0.5665, 0.7350]) + tensor([0.3528, 0.5665, 0.7350]) + >>> b = torch.special.gammainc(a1, a2) + torch.special.gammaincc(a1, a2) + tensor([1., 1., 1.]) + +""".format(**common_args)) + +gammaincc = _add_docstr(_special.special_gammaincc, + r""" +gammaincc(input, other, *, out=None) -> Tensor + +Computes the regularized upper incomplete gamma function: + +.. math:: + \text{out}_{i} = \frac{1}{\Gamma(\text{input}_i)} \int_{\text{other}_i}^{\infty} t^{\text{input}_i-1} e^{-t} dt + +where both :math:`\text{input}_i` and :math:`\text{other}_i` are weakly positive +and at least one is strictly positive. +If both are zero or either is negative then :math:`\text{out}_i=\text{nan}`. +:math:`\Gamma(\cdot)` in the equation above is the gamma function, + +.. math:: + \Gamma(\text{input}_i) = \int_0^\infty t^{(\text{input}_i-1)} e^{-t} dt. + +See :func:`torch.special.gammainc` and :func:`torch.special.gammaln` for related functions. + +Supports :ref:`broadcasting to a common shape ` +and float inputs. + +.. note:: + The backward pass with respect to :attr:`input` is not yet supported. + Please open an issue on PyTorch's Github to request it. + +""" + r""" +Args: + input (Tensor): the first non-negative input tensor + other (Tensor): the second non-negative input tensor + +Keyword args: + {out} + +Example:: + + >>> a1 = torch.tensor([4.0]) + >>> a2 = torch.tensor([3.0, 4.0, 5.0]) + >>> a = torch.special.gammaincc(a1, a2) + tensor([0.6472, 0.4335, 0.2650]) + >>> b = torch.special.gammainc(a1, a2) + torch.special.gammaincc(a1, a2) + tensor([1., 1., 1.]) + +""".format(**common_args)) + +airy_ai = _add_docstr(_special.special_airy_ai, + r""" +airy_ai(input, *, out=None) -> Tensor + +Airy function :math:`\text{Ai}\left(\text{input}\right)`. + +""" + r""" +Args: + {input} + +Keyword args: + {out} +""".format(**common_args)) + +bessel_j0 = _add_docstr(_special.special_bessel_j0, + r""" +bessel_j0(input, *, out=None) -> Tensor + +Bessel function of the first kind of order :math:`0`. + +""" + r""" +Args: + {input} + +Keyword args: + {out} +""".format(**common_args)) + +bessel_j1 = _add_docstr(_special.special_bessel_j1, + r""" +bessel_j1(input, *, out=None) -> Tensor + +Bessel function of the first kind of order :math:`1`. + +""" + r""" +Args: + {input} + +Keyword args: + {out} +""".format(**common_args)) + +bessel_y0 = _add_docstr(_special.special_bessel_y0, + r""" +bessel_y0(input, *, out=None) -> Tensor + +Bessel function of the second kind of order :math:`0`. + +""" + r""" +Args: + {input} + +Keyword args: + {out} +""".format(**common_args)) + +bessel_y1 = _add_docstr(_special.special_bessel_y1, + r""" +bessel_y1(input, *, out=None) -> Tensor + +Bessel function of the second kind of order :math:`1`. + +""" + r""" +Args: + {input} + +Keyword args: + {out} +""".format(**common_args)) + +chebyshev_polynomial_t = _add_docstr(_special.special_chebyshev_polynomial_t, + r""" +chebyshev_polynomial_t(input, n, *, out=None) -> Tensor + +Chebyshev polynomial of the first kind :math:`T_{n}(\text{input})`. + +If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}` +is returned. If :math:`n < 6` or :math:`|\text{input}| > 1` the recursion: + +.. math:: + T_{n + 1}(\text{input}) = 2 \times \text{input} \times T_{n}(\text{input}) - T_{n - 1}(\text{input}) + +is evaluated. Otherwise, the explicit trigonometric formula: + +.. math:: + T_{n}(\text{input}) = \text{cos}(n \times \text{arccos}(x)) + +is evaluated. + +""" + r""" +Args: + {input} + n (Tensor): Degree of the polynomial. + +Keyword args: + {out} +""".format(**common_args)) + +chebyshev_polynomial_u = _add_docstr(_special.special_chebyshev_polynomial_u, + r""" +chebyshev_polynomial_t(input, n, *, out=None) -> Tensor + +Chebyshev polynomial of the second kind :math:`U_{n}(\text{input})`. + +If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, +:math:`2 \times \text{input}` is returned. If :math:`n < 6` or +:math:`|\text{input}| > 1`, the recursion: + +.. math:: + T_{n + 1}(\text{input}) = 2 \times \text{input} \times T_{n}(\text{input}) - T_{n - 1}(\text{input}) + +is evaluated. Otherwise, the explicit trigonometric formula: + +.. math:: + \frac{\text{sin}((n + 1) \times \text{arccos}(\text{input}))}{\text{sin}(\text{arccos}(\text{input}))} + +is evaluated. + +""" + r""" +Args: + {input} + n (Tensor): Degree of the polynomial. + +Keyword args: + {out} +""".format(**common_args)) + +chebyshev_polynomial_v = _add_docstr(_special.special_chebyshev_polynomial_v, + r""" +chebyshev_polynomial_v(input, n, *, out=None) -> Tensor + +Chebyshev polynomial of the third kind :math:`V_{n}^{\ast}(\text{input})`. + +""" + r""" +Args: + {input} + n (Tensor): Degree of the polynomial. + +Keyword args: + {out} +""".format(**common_args)) + +chebyshev_polynomial_w = _add_docstr(_special.special_chebyshev_polynomial_w, + r""" +chebyshev_polynomial_w(input, n, *, out=None) -> Tensor + +Chebyshev polynomial of the fourth kind :math:`W_{n}^{\ast}(\text{input})`. + +""" + r""" +Args: + {input} + n (Tensor): Degree of the polynomial. + +Keyword args: + {out} +""".format(**common_args)) + +hermite_polynomial_h = _add_docstr(_special.special_hermite_polynomial_h, + r""" +hermite_polynomial_h(input, n, *, out=None) -> Tensor + +Physicist's Hermite polynomial :math:`H_{n}(\text{input})`. + +If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}` +is returned. Otherwise, the recursion: + +.. math:: + H_{n + 1}(\text{input}) = 2 \times \text{input} \times H_{n}(\text{input}) - H_{n - 1}(\text{input}) + +is evaluated. + +""" + r""" +Args: + {input} + n (Tensor): Degree of the polynomial. + +Keyword args: + {out} +""".format(**common_args)) + +hermite_polynomial_he = _add_docstr(_special.special_hermite_polynomial_he, + r""" +hermite_polynomial_he(input, n, *, out=None) -> Tensor + +Probabilist's Hermite polynomial :math:`He_{n}(\text{input})`. + +If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}` +is returned. Otherwise, the recursion: + +.. math:: + He_{n + 1}(\text{input}) = 2 \times \text{input} \times He_{n}(\text{input}) - He_{n - 1}(\text{input}) + +is evaluated. + +""" + r""" +Args: + {input} + n (Tensor): Degree of the polynomial. + +Keyword args: + {out} +""".format(**common_args)) + +laguerre_polynomial_l = _add_docstr(_special.special_laguerre_polynomial_l, + r""" +laguerre_polynomial_l(input, n, *, out=None) -> Tensor + +Laguerre polynomial :math:`L_{n}(\text{input})`. + +If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}` +is returned. Otherwise, the recursion: + +.. math:: + L_{n + 1}(\text{input}) = 2 \times \text{input} \times L_{n}(\text{input}) - L_{n - 1}(\text{input}) + +is evaluated. + +""" + r""" +Args: + {input} + n (Tensor): Degree of the polynomial. + +Keyword args: + {out} +""".format(**common_args)) + +legendre_polynomial_p = _add_docstr(_special.special_legendre_polynomial_p, + r""" +legendre_polynomial_p(input, n, *, out=None) -> Tensor + +Legendre polynomial :math:`P_{n}(\text{input})`. + +If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}` +is returned. Otherwise, the recursion: + +.. math:: + P_{n + 1}(\text{input}) = 2 \times \text{input} \times P_{n}(\text{input}) - P_{n - 1}(\text{input}) + +is evaluated. + +""" + r""" +Args: + {input} + n (Tensor): Degree of the polynomial. + +Keyword args: + {out} +""".format(**common_args)) + +modified_bessel_i0 = _add_docstr(_special.special_modified_bessel_i0, + r""" +modified_bessel_i0(input, *, out=None) -> Tensor + +Modified Bessel function of the first kind of order :math:`0`. + +""" + r""" +Args: + {input} + +Keyword args: + {out} +""".format(**common_args)) + +modified_bessel_i1 = _add_docstr(_special.special_modified_bessel_i1, + r""" +modified_bessel_i1(input, *, out=None) -> Tensor + +Modified Bessel function of the first kind of order :math:`1`. + +""" + r""" +Args: + {input} + +Keyword args: + {out} +""".format(**common_args)) + +modified_bessel_k0 = _add_docstr(_special.special_modified_bessel_k0, + r""" +modified_bessel_k0(input, *, out=None) -> Tensor + +Modified Bessel function of the second kind of order :math:`0`. + +""" + r""" +Args: + {input} + +Keyword args: + {out} +""".format(**common_args)) + +modified_bessel_k1 = _add_docstr(_special.special_modified_bessel_k1, + r""" +modified_bessel_k1(input, *, out=None) -> Tensor + +Modified Bessel function of the second kind of order :math:`1`. + +""" + r""" +Args: + {input} + +Keyword args: + {out} +""".format(**common_args)) + +scaled_modified_bessel_k0 = _add_docstr(_special.special_scaled_modified_bessel_k0, + r""" +scaled_modified_bessel_k0(input, *, out=None) -> Tensor + +Scaled modified Bessel function of the second kind of order :math:`0`. + +""" + r""" +Args: + {input} + +Keyword args: + {out} +""".format(**common_args)) + +scaled_modified_bessel_k1 = _add_docstr(_special.special_scaled_modified_bessel_k1, + r""" +scaled_modified_bessel_k1(input, *, out=None) -> Tensor + +Scaled modified Bessel function of the second kind of order :math:`1`. + +""" + r""" +Args: + {input} + +Keyword args: + {out} +""".format(**common_args)) + +shifted_chebyshev_polynomial_t = _add_docstr(_special.special_shifted_chebyshev_polynomial_t, + r""" +shifted_chebyshev_polynomial_t(input, n, *, out=None) -> Tensor + +Chebyshev polynomial of the first kind :math:`T_{n}^{\ast}(\text{input})`. + +""" + r""" +Args: + {input} + n (Tensor): Degree of the polynomial. + +Keyword args: + {out} +""".format(**common_args)) + +shifted_chebyshev_polynomial_u = _add_docstr(_special.special_shifted_chebyshev_polynomial_u, + r""" +shifted_chebyshev_polynomial_u(input, n, *, out=None) -> Tensor + +Chebyshev polynomial of the second kind :math:`U_{n}^{\ast}(\text{input})`. + +""" + r""" +Args: + {input} + n (Tensor): Degree of the polynomial. + +Keyword args: + {out} +""".format(**common_args)) + +shifted_chebyshev_polynomial_v = _add_docstr(_special.special_shifted_chebyshev_polynomial_v, + r""" +shifted_chebyshev_polynomial_v(input, n, *, out=None) -> Tensor + +Chebyshev polynomial of the third kind :math:`V_{n}^{\ast}(\text{input})`. + +""" + r""" +Args: + {input} + n (Tensor): Degree of the polynomial. + +Keyword args: + {out} +""".format(**common_args)) + +shifted_chebyshev_polynomial_w = _add_docstr(_special.special_shifted_chebyshev_polynomial_w, + r""" +shifted_chebyshev_polynomial_w(input, n, *, out=None) -> Tensor + +Chebyshev polynomial of the fourth kind :math:`W_{n}^{\ast}(\text{input})`. + +""" + r""" +Args: + {input} + n (Tensor): Degree of the polynomial. + +Keyword args: + {out} +""".format(**common_args)) + +spherical_bessel_j0 = _add_docstr(_special.special_spherical_bessel_j0, + r""" +spherical_bessel_j0(input, *, out=None) -> Tensor + +Spherical Bessel function of the first kind of order :math:`0`. + +""" + r""" +Args: + {input} + +Keyword args: + {out} +""".format(**common_args)) diff --git a/janus/lib/python3.10/site-packages/torch/special/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/special/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b923406b6823165813267f687536a4776323715 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/special/__pycache__/__init__.cpython-310.pyc differ