diff --git a/.gitattributes b/.gitattributes index e595082cfca583530533f23dadc3cf21b9d83b49..3806a85b4615904c24e75f9ec84cf00af2ccfba4 100644 --- a/.gitattributes +++ b/.gitattributes @@ -343,3 +343,4 @@ parrot/lib/python3.10/site-packages/sympy/solvers/tests/__pycache__/test_solvers parrot/lib/python3.10/site-packages/mpl_toolkits/mplot3d/__pycache__/axes3d.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text parrot/lib/libcrypto.so.3 filter=lfs diff=lfs merge=lfs -text llava_next/lib/python3.10/site-packages/torch/bin/nvfuser_tests filter=lfs diff=lfs merge=lfs -text +llava_next/lib/libcrypto.a filter=lfs diff=lfs merge=lfs -text diff --git a/llava_next/lib/libcrypto.a b/llava_next/lib/libcrypto.a new file mode 100644 index 0000000000000000000000000000000000000000..e19426e6fd29410c95fbaad399a304751916c459 --- /dev/null +++ b/llava_next/lib/libcrypto.a @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59bebb9b9a2e4af6837913a0a014f6bdc44b3c02079496f0ffc857e1dd0f87e3 +size 11086938 diff --git a/parrot/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_compressed_pickle_py27_np16.gz b/parrot/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_compressed_pickle_py27_np16.gz new file mode 100644 index 0000000000000000000000000000000000000000..fedefdd304054a85fa995801885f997ca8e1a44f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_compressed_pickle_py27_np16.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:418447e90d83486568ae3092a960b18d358230e24ac9ec38365daa99f415bd0f +size 769 diff --git a/parrot/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py27_np17.pkl b/parrot/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py27_np17.pkl new file mode 100644 index 0000000000000000000000000000000000000000..38202b72d880ff53255dbb86eccd73bdb6224c74 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py27_np17.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:89c4508e3dfbe01f801e4e739f1aded13f685941e89281c8050f0ca8aa3c97e5 +size 986 diff --git a/parrot/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py27_np17.pkl.xz b/parrot/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py27_np17.pkl.xz new file mode 100644 index 0000000000000000000000000000000000000000..7812497bc95e5894c8e880736bfb06aa22bb2fae --- /dev/null +++ b/parrot/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py27_np17.pkl.xz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:efb146d450c6d061d06affb56f17384e7f64cbab9b516fcc6c4d3f8869b3e707 +size 712 diff --git a/parrot/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py34_np19.pkl b/parrot/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py34_np19.pkl new file mode 100644 index 0000000000000000000000000000000000000000..f22c25bdb59d15a3771104dff6dfebe564e98add --- /dev/null +++ b/parrot/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py34_np19.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1cbe456f5b91f5a3cb8e386838f276c30335432a351426686187761d5c34168b +size 1068 diff --git a/parrot/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py35_np19.pkl.bz2 b/parrot/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py35_np19.pkl.bz2 new file mode 100644 index 0000000000000000000000000000000000000000..06e8395437874c25cfdf6a6783eab12a6c178f90 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py35_np19.pkl.bz2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6a1a9b884be654e2e3fc9a48251ecf0c6920e255c3f2ee5dd71d8252a694606 +size 1005 diff --git a/parrot/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py35_np19.pkl.xz b/parrot/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py35_np19.pkl.xz new file mode 100644 index 0000000000000000000000000000000000000000..cec2871b09ae347e07c81eb55e7979300748ccd1 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py35_np19.pkl.xz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:02cf30d8b196c303662b2dd035d2a58caeb762ae3a82345ffd1274961e7f5aa0 +size 752 diff --git a/parrot/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_compressed_pickle_py35_np19.gz b/parrot/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_compressed_pickle_py35_np19.gz new file mode 100644 index 0000000000000000000000000000000000000000..0720a70aee276c37f9457817922ae60b67600d47 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_compressed_pickle_py35_np19.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f33bd8a21a41b729b05dac5deeb0e868f218a092b0e3fe5988094cf167217f6 +size 673 diff --git a/parrot/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np16.pkl_04.npy b/parrot/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np16.pkl_04.npy new file mode 100644 index 0000000000000000000000000000000000000000..e9b5e77c73268dfff541b576126f06fc6fed3d59 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np16.pkl_04.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ecbe244294ba93e08479b16c1b9a9411e3569ff660ed0459dca1d241381df05 +size 104 diff --git a/parrot/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py33_np18.pkl b/parrot/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py33_np18.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e739b6d035cdf110063dbb8b2cdceb116e187019 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py33_np18.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c3d4cbc690d3ce9e5323a714ea546f32c01ab1710285c420184f6cdf4b26fc25 +size 691 diff --git a/parrot/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py34_np19.pkl_03.npy b/parrot/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py34_np19.pkl_03.npy new file mode 100644 index 0000000000000000000000000000000000000000..73976395be90d4b2b2d955c79a90721e16cebc82 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py34_np19.pkl_03.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ede9a64a52b25d7db30950956c978ec0b3932b7d14acd5abc63216e64babde7 +size 307 diff --git a/parrot/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py35_np19.pkl_01.npy b/parrot/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py35_np19.pkl_01.npy new file mode 100644 index 0000000000000000000000000000000000000000..15574a4193ad4ad724b2b8053c701a82efa78fd5 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py35_np19.pkl_01.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0efbd7d9ce7eec3a6e0a0db41e795e0396cca3d6b037dad6c61b464843d28809 +size 120 diff --git a/parrot/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py35_np19.pkl_02.npy b/parrot/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py35_np19.pkl_02.npy new file mode 100644 index 0000000000000000000000000000000000000000..f00f08fbeeda280fa3ce00069c313c5412a33eca --- /dev/null +++ b/parrot/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py35_np19.pkl_02.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c1cf36cb781fbcc21b953bb0a0b45df092da0eae0e765882e5963ccd70105b1 +size 120 diff --git a/parrot/lib/python3.10/site-packages/torch/_custom_ops.py b/parrot/lib/python3.10/site-packages/torch/_custom_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..b8231a186c0a43119075c36b111e71f03c49a9a3 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/_custom_ops.py @@ -0,0 +1,323 @@ +# mypy: allow-untyped-defs +import inspect + +from torch._custom_op.impl import ( + _custom_op_with_schema, + _find_custom_op, + infer_schema, + parse_qualname, + validate_namespace, +) +from torch.library import get_ctx + +__all__ = [ + "custom_op", + "impl", + "impl_abstract", + "get_ctx", + "impl_save_for_backward", + "impl_backward", +] + + +def custom_op(qualname, func_or_schema=None): + r"""Register a new custom operator + + In PyTorch, defining an op (short for "operator") is a two step-process: + - we need to define the op (by providing an operator name and schema) + - we need to implement behavior for how the operator interacts with + various PyTorch subsystems, like CPU/CUDA Tensors, Autograd, etc. + + This entrypoint defines the custom operator (the first step) + you must then perform the second step by calling various + ``impl_*`` APIs. + + This API may be used as a decorator (see examples). + + For a detailed guide on custom ops, please see + https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk + + Arguments: + qualname (str): Should be a string that looks like + "namespace::operator_name". Operators in PyTorch need a namespace to + avoid name collisions; a given operator may only be created once. + If you are writing a Python library, we recommend the namespace to + be the name of your top-level module. + func_or_schema (Union[Callable, str]): Each PyTorch operator needs a + schema that tells PyTorch the types of the inputs/outputs. + If this is a Callable, we will automatically infer the schema from + the type annotations on the function (see examples). Otherwise, + if you don't want to use type annotations, you may provide us the + schema string. + + Example:: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA) + >>> import torch + >>> import numpy as np + >>> from torch import Tensor + >>> + >>> # Step 1: define the custom op. + >>> # We need to provide the API a "prototype function" + >>> # (a function that returns NotImplementedError), from which + >>> # we will infer the types of the inputs and outputs. + >>> @torch._custom_ops.custom_op("mylibrary::numpy_sin") + >>> def numpy_sin(x: Tensor) -> Tensor: + >>> raise NotImplementedError + >>> + >>> # The custom op is now accessible via the torch.ops module: + >>> torch.ops.mylibrary.numpy_sin + >>> + >>> # Step 2: Register an implementation for various PyTorch subsystems + >>> + >>> # Register an implementation for CPU tensors + >>> @torch._custom_ops.impl("mylibrary::numpy_sin", device_types="cpu") + >>> def numpy_sin_impl_cpu(x): + >>> return torch.from_numpy(np.sin(x.numpy())) + >>> + >>> # Register an implementation for CUDA tensors + >>> @torch._custom_ops.impl("mylibrary::numpy_sin", device_types="cuda") + >>> def numpy_sin_impl_cuda(x): + >>> return torch.from_numpy(np.sin(x.cpu().numpy())).to(x.device) + >>> + >>> x = torch.randn(3) + >>> torch.ops.mylibrary.numpy_sin(x) # calls numpy_sin_impl_cpu + >>> + >>> x_cuda = x.cuda() + >>> torch.ops.mylibrary.numpy_sin(x) # calls numpy_sin_impl_cuda + + """ + ns, name = parse_qualname(qualname) + validate_namespace(ns) + + def inner(func): + if not inspect.isfunction(func): + raise ValueError( + f"custom_op(...)(func): Expected `func` to be a Python " + f"function, got: {type(func)}" + ) + + if func.__name__ != name: + raise ValueError( + f"custom_op(qualname='{qualname}', ...)(func): expected `func` " + f"to have name '{name}' but got '{func.__name__}'. " + f"Please either change the name of `func` or the qualname that " + f"is passed to `custom_op`" + ) + + schema = infer_schema(func) + _custom_op_with_schema(qualname, schema) + return func + + if func_or_schema is None: + return inner + if isinstance(func_or_schema, str): + _custom_op_with_schema(qualname, func_or_schema) + else: + return inner(func_or_schema) + + +def impl(qualname, *, device_types=("cpu", "cuda"), func=None): + r"""Register an implementation for a device type for this custom op. + + If the op is passed multiple Tensor inputs with different device + types, it will dispatch to the registered implementation for the highest + priority device type among those present. + The supported device types, in order of priority, are {'cuda', 'cpu'}. + + This API may be used as a decorator (see examples). + + For a detailed guide on custom ops, please see + https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk + + Arguments: + device_types (str or Iterable[str]): the device type(s) to register the function for. + + Example:: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA) + >>> import torch + >>> import numpy as np + >>> from torch import Tensor + >>> + >>> # Step 1: define the custom op. + >>> # We need to provide the API a "prototype function" + >>> # (a function that returns NotImplementedError), from which + >>> # we will infer the types of the inputs and outputs. + >>> @torch._custom_ops.custom_op("mylibrary::numpy_cos") + >>> def numpy_cos(x: Tensor) -> Tensor: + >>> raise NotImplementedError + >>> + >>> # The custom op is now accessible via the torch.ops module: + >>> torch.ops.mylibrary.numpy_cos + >>> + >>> # Step 2: Register an implementation for various PyTorch subsystems + >>> + >>> # Register an implementation for CPU tensors + >>> @torch._custom_ops.impl("mylibrary::numpy_cos", device_types="cpu") + >>> def numpy_cos_impl_cpu(x): + >>> return torch.from_numpy(np.cos(x.numpy())) + >>> + >>> # Register an implementation for CUDA tensors + >>> @torch._custom_ops.impl("mylibrary::numpy_cos", device_types="cuda") + >>> def numpy_cos_impl_cuda(x): + >>> return torch.from_numpy(np.cos(x.cpu().numpy())).to(x.device) + >>> + >>> x = torch.randn(3) + >>> torch.ops.mylibrary.numpy_cos(x) # calls numpy_cos_impl_cpu + >>> + >>> x_cuda = x.cuda() + >>> torch.ops.mylibrary.numpy_cos(x) # calls numpy_cos_impl_cuda + + """ + + def inner(func): + custom_op = _find_custom_op(qualname, also_check_torch_library=True) + custom_op.impl(device_types, _stacklevel=3)(func) + return func + + if func is None: + return inner + return inner(func) + + +def impl_abstract(qualname, *, func=None): + r"""Register an abstract implementation for this operator. + + An "abstract implementation" specifies the behavior of this operator on + Tensors that carry no data. Given some input Tensors with certain properties + (sizes/strides/storage_offset/device), it specifies what the properties of + the output Tensors are. + + The abstract implementation has the same signature as the operator. + It is run for both FakeTensors and meta tensors. To write an abstract + implementation, assume that all Tensor inputs to the operator are + regular CPU/CUDA/Meta tensors, but they do not have storage, and + you are trying to return regular CPU/CUDA/Meta tensor(s) as output. + The abstract implementation must consist of only PyTorch operations + (and may not directly access the storage or data of any input or + intermediate Tensors). + + This API may be used as a decorator (see examples). + + For a detailed guide on custom ops, please see + https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk + + Examples:: + >>> import numpy as np + >>> from torch import Tensor + >>> + >>> # Example 1: an operator without data-dependent output shape + >>> @torch._custom_ops.custom_op("mylibrary::custom_linear") + >>> def custom_linear(x: Tensor, weight: Tensor, bias: Tensor) -> Tensor: + >>> raise NotImplementedError + >>> + >>> @torch._custom_ops.impl_abstract("mylibrary::custom_linear") + >>> def custom_linear_abstract(x, weight): + >>> assert x.dim() == 2 + >>> assert weight.dim() == 2 + >>> assert bias.dim() == 1 + >>> assert x.shape[1] == weight.shape[1] + >>> assert weight.shape[0] == bias.shape[0] + >>> assert x.device == weight.device + >>> + >>> return (x @ weight.t()) + bias + >>> + >>> # Example 2: an operator with data-dependent output shape + >>> @torch._custom_ops.custom_op('mylibrary::custom_nonzero') + >>> def custom_nonzero(x: Tensor) -> Tensor: + >>> ... + >>> + >>> @torch._custom_ops.impl_abstract("mylibrary::custom_nonzero") + >>> def custom_nonzero_abstract(x): + >>> # Number of nonzero-elements is data-dependent. + >>> # Since we cannot peek at the data in an abstract impl, + >>> # we use the ctx object to construct a new symint that + >>> # represents the data-dependent size. + >>> ctx = torch._custom_ops.get_ctx() + >>> nnz = ctx.create_unbacked_symint() + >>> shape = [x.dim(), nnz] + >>> result = x.new_empty(shape, dtype=torch.long) + >>> return result + >>> + >>> @torch._custom_ops.impl("mylibrary::custom_nonzero") + >>> def custom_nonzero_impl(x): + >>> x_np = to_numpy(x) + >>> res = np.stack(np.nonzero(x_np), axis=1) + >>> # unbacked symbolic ints in PyTorch must be >= 2, so we + >>> # constrain the range to at least 2 + >>> if res.shape[0] <= 1: + >>> raise RuntimeError("not supported") + >>> return torch.tensor(res, device=x.device) + + """ + import torch.library + + return torch.library.register_fake(qualname, func, _stacklevel=2) + + +def impl_save_for_backward(qualname, *, func=None): + r"""Register a function that tells us what to save for backward. + + Please see :func:`impl_backward` for more details. + """ + + def inner(func): + custom_op = _find_custom_op(qualname, also_check_torch_library=True) + custom_op.impl_save_for_backward(_stacklevel=3)(func) + return func + + if func is None: + return inner + return inner(func) + + +def impl_backward(qualname, output_differentiability=None, *, func=None): + r"""Registers a backward formula for an operator. + + In order for an operator to work with autograd, you need to register + a backward formula. There are two pieces to this: + 1. You must give us a function to specify what to save for backward. + Call this the "save for backward" function. + 2. You must give us a function that computes gradients. Call this the + "backward" function. + + Use `impl_save_for_backward` to define a "save for backward" function + that specifies what gets saved for backward. The function should accept + two arguments ``(inputs, output)`` and return the quantities to be saved + for backward. + + During runtime, when you call the operator in a forwards pass, PyTorch + will invoke the "save for backward" function with the inputs and output + of the operator. + + Use `impl_backward` to define the "backward" function. The backward + function must accept ``(ctx, saved, *grads)``: + - ``ctx`` is a context object where we may provide information + - ``saved`` is exactly what gets returned from the "save for backward" + function + - ``grads`` is one or more gradients. The number of gradients matches + the number of outputs of the operator. + + The backward function must return a dict that maps the name of + an input to the operator to its corresponding gradient. All inputs that + were declared to be Tensors in the operator definition must be accounted + for in the dict. The gradient may be a Tensor or None. + + For a detailed guide on custom ops, please see + https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk + + """ + + def inner(func): + custom_op = _find_custom_op(qualname, also_check_torch_library=True) + custom_op.impl_backward(output_differentiability, _stacklevel=3)(func) + return func + + if func is None: + return inner + return inner(func) + + +def _destroy(qualname): + """De-registers a custom op. For testing purposes only""" + custom_op = _find_custom_op(qualname) + custom_op._destroy() diff --git a/parrot/lib/python3.10/site-packages/torch/_refs/__init__.py b/parrot/lib/python3.10/site-packages/torch/_refs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..db1f2a99d3d45d7a4b0a58733342bcdd0d33c5c7 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/_refs/__init__.py @@ -0,0 +1,6492 @@ +# mypy: allow-untyped-defs +import builtins +import collections +import inspect +import itertools +import math +import operator +import warnings + +from collections.abc import Iterable +from enum import Enum +from functools import partial, reduce, singledispatch, wraps +from typing import Any, Callable, Dict, List, Optional, overload, Sequence, Tuple, Union + +import torch + +import torch._prims as prims +import torch._prims_common as utils +from torch import sym_float, sym_int +from torch._prims_common import ( + BoolLike, + DeviceLikeType, + Dim, + DimsSequenceType, + DimsType, + dtype_to_type, + ELEMENTWISE_TYPE_PROMOTION_KIND, + FloatLike, + FloatWithoutSymFloat, + IntLike, + is_weakly_lesser_type, + Number, + NumberType, + RealNumberType, + REDUCTION_OUTPUT_TYPE_KIND, + ShapeType, + StrideType, + TensorLike, + TensorLikeType, + TensorOrNumberLikeType, + TensorSequenceType, +) +from torch._prims_common.wrappers import ( + _maybe_convert_to_dtype, + _maybe_resize_out, + _safe_copy_out, + elementwise_type_promotion_wrapper, + elementwise_unary_scalar_wrapper, + out_wrapper, +) + +# Experimental module containing prototype Python references for existing +# PyTorch operations. + +__all__ = [ + # + # Elementwise Unary References + # + "abs", + "acos", + "acosh", + "asinh", + "asin", + "atan", + "atanh", + "bitwise_not", + # "cbrt", # No corresponding torch operation + "ceil", + "conj_physical", + "cos", + "cosh", + "count_nonzero", + "deg2rad", + "digamma", + "erf", + "erfinv", + "erfc", + "exp", + "expm1", + "exponential", + "exp2", + "fill", + "fill_", + "floor", + "frac", + "geometric", + "index_add", + "index_copy", + "index_copy_", + "index_select", + "index_fill", + "index_fill_", + "isfinite", + "isinf", + "isposinf", + "isneginf", + "isnan", + "isreal", + "i0", + "lerp", + "lgamma", + "log", + "log1p", + "log2", + "log10", + "log_normal", + "log_softmax", + "mvlgamma", + "norm", + "normal", + "nan_to_num", + "neg", + "positive", + "rad2deg", + "reciprocal", + "round", # TODO: model kwargs + "sigmoid", + "sgn", + "sign", + "signbit", + "sin", + "sinc", + "sinh", + "softmax", + "sqrt", + "square", + "tan", + "tanh", + "trace", + "trunc", + # + # Elementwise Binary References + # + "add", + "atan2", + "bitwise_and", + "bitwise_left_shift", + "bitwise_or", + "bitwise_right_shift", + "bitwise_xor", + "clamp_min", + "clamp_max", + "copysign", + "div", + "eq", + "float_power", + "floor_divide", + "fmax", + "fmin", + "fmod", + "gcd", + "ge", + "gt", + "heaviside", + "hypot", + "igamma", + "igammac", + "imag", + "isclose", + "lcm", + # 'ldexp', + "le", + "logaddexp", + "logaddexp2", + "logical_and", + "logical_not", + "logical_or", + "logical_xor", + "logsumexp", + "lt", + # 'max', # implement with reductions + "maximum", + # 'min', # implement with reductions + "minimum", + "mul", + "ne", + "nextafter", + # 'polar', # abs, cos, sin + "pow", + "real", + "rpow", + "remainder", + "rsub", + "rtruediv", + "rfloordiv", + "sub", + "true_divide", + "trunc_divide", + "xlogy", + # + # Elementwise Ternary References + # + "addcdiv", + "addcmul", + "clamp", + # + # Conditional references + # + "masked_fill", + "masked_fill_", + "where", + # + # Data conversion and movement references + # + "clone", + "copy_to", # TODO: add OpInfo (or implement .to) + "item", + "to", + # + # Reduction ops + # + "all", + "amax", + "amin", + "any", + "cumsum", + "cumprod", + "mean", + "dot", + "vdot", + "std", + "std_mean", + "sum", + "sum_to_size", + "prod", + "var", + "var_mean", + # + # Linear algebra ops + # + "addr", + # + # View & Shape Ops + # + "alias", + "atleast_1d", + "atleast_2d", + "atleast_3d", + "as_strided", + "as_strided_scatter", + "block_diag", + "broadcast_shapes", + "broadcast_tensors", + "broadcast_to", + "cat", + "chunk", + "column_stack", + "conj", + "constant_pad_nd", + "contiguous", + "diag_embed", + "diag", + "diagonal", + "diagonal_copy", + "diagonal_scatter", + "dsplit", + "dstack", + "expand", + "expand_as", + "flatten", + "flip", + "fliplr", + "flipud", + "hsplit", + "hstack", + "meshgrid", + "movedim", + "narrow", + "narrow_copy", + "native_group_norm", + "native_layer_norm", + "permute", + "ravel", + "repeat", + "reshape", + "reshape_as", + "roll", + "rot90", + "rsqrt", + "stack", + "swap_axes", # alias for transpose + "squeeze", + "t", + "T", + "take_along_dim", + "tensor_split", + "transpose", + "unfold", + "unfold_copy", + "unsqueeze", + "view", + "view_as", + "vsplit", + "vstack", + "view_as_complex", + "unflatten", + "unbind", + "triu", + "tril", + "triu_indices", + "tril_indices", + # + # Tensor Creation + # + "arange", + "cauchy", + "empty", + "empty_like", + "empty_permuted", + "empty_strided", + "eye", + "full", + "full_like", + "linspace", + "logspace", + "new_empty", + "new_empty_strided", + "new_full", + "new_ones", + "new_zeros", + "ones", + "ones_like", + "randn", + "scalar_tensor", + "zero", + "zeros", + "zeros_like", + # + # Test-related functions + # + "allclose", + "equal", + # + # Statistical operations + # + "bucketize", + # + # Misc + # + "is_complex", + "renorm", + "stft", + "istft", +] + +Tensor = torch.Tensor +DispatchKey = torch._C.DispatchKey # type: ignore[attr-defined] +aten = torch._ops.ops.aten + +# Note that the docstrings for the public methods from this file are in +# torch/_torch_docs.py + + +def is_noncontiguous_supported(device): + if device is not None and device.type == "hpu": + return False + return True + + +def handle_noncontiguous_outputs(input_tlist, output): + device = None + from torch._subclasses.fake_tensor import FakeTensor + + for t in input_tlist: + if isinstance(t, FakeTensor): + device = t.fake_device + break + + if not is_noncontiguous_supported(device): + output = output.contiguous() + + return output + + +def _broadcast_shapes(*_shapes): + from torch.fx.experimental.symbolic_shapes import guard_size_oblivious + + shapes = tuple( + (x,) if isinstance(x, IntLike) else x + for x in filter(lambda x: x is not None, _shapes) + ) + + # Short-circuits on no input + if len(shapes) == 0: + return None + + # Type checking + # TODO: make common validations available as utils + for shape in shapes: + assert isinstance(shape, Sequence) + + # Computes common shape + common_shape = [ + 1, + ] * reduce(max, (len(shape) for shape in shapes)) + for arg_idx, shape in enumerate(shapes): + for idx in range(-1, -1 - len(shape), -1): + if guard_size_oblivious(common_shape[idx] == 1): + if shape[idx] < 0: + raise ValueError( + "Attempting to broadcast a dimension with negative length!" + ) + common_shape[idx] = shape[idx] + elif guard_size_oblivious(shape[idx] != 1): + if common_shape[idx] != shape[idx]: + raise RuntimeError( + f"Attempting to broadcast a dimension of length {shape[idx]} at {idx}! " + f"Mismatching argument at index {arg_idx} had {shape}; but expected shape " + f"should be broadcastable to {common_shape}" + ) + + return common_shape + + +def _maybe_broadcast(*args, preserve_cpu_scalar_tensors=True): + # Computes common shape + common_shape = _broadcast_shapes( + *(t.shape if isinstance(t, TensorLike) else None for t in args) + ) + + def __maybe_broadcast(x, shape): + if x is None: + return None + elif isinstance(x, Number): + return x + elif isinstance(x, TensorLike): + if preserve_cpu_scalar_tensors and utils.is_cpu_scalar_tensor(x): + return x + + if not utils.same_shape(x.shape, common_shape): + return x.expand(common_shape) + + return x + else: + raise RuntimeError( + "Unexpected type when broadcasting: " + str(type(x)) + "!" + ) + + return tuple(__maybe_broadcast(x, common_shape) for x in args) + + +# Utilities should come BEFORE this import +from torch._decomp import register_decomposition + +# +# Elementwise unary references +# + +infer_aten_op = object() + + +# TODO: add type promotion support +def _make_elementwise_unary_reference( + type_promotion_kind, + *, + aten_op=infer_aten_op, + extra_meta=None, +) -> Callable: + def inner(prim: Callable): + nonlocal aten_op + + @wraps(prim) + @out_wrapper() + @elementwise_unary_scalar_wrapper + @elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=type_promotion_kind, + ) + def _ref(a: TensorLikeType) -> TensorLikeType: + if extra_meta is not None: + extra_meta(a) + + output = prim(a) + return handle_noncontiguous_outputs([a], output) + + if aten_op is infer_aten_op: + aten_op = utils.get_aten_op(prim, prim.__name__) + if aten_op is not None: + register_decomposition(aten_op)(_ref) + + return _ref + + return inner + + +def _make_alias(fn, name): + """ + This function defines an alias of another function and sets its __name__ argument. + It also sets its __module__ argument to the module of the caller. + Note that when naively doing `alias = fn`, we have that `alias.__name__ == "fn"`, and + `alias.__module__ == fn.__module__`. + """ + + def _fn(*args, **kwargs): + return fn(*args, **kwargs) + + _fn.__name__ = name + _fn.__module__ = inspect.currentframe().f_back.f_globals["__name__"] # type: ignore[union-attr] + return _fn + + +def _make_inplace(fn): + """ + Given a function with out variant (i.e. using `out_wrapper()), it returns its in-place variant + See https://github.com/pytorch/pytorch/wiki/Developer-FAQ#how-do-in-place-operations-work-in-pytorch + """ + + # nb. We use the name of the first argument used in the unary references + @wraps(fn) + def _fn(a, *args, **kwargs): + return fn(a, *args, out=a, **kwargs) + + inplace_name = f"{fn.__name__}_" + _fn.__name__ = inplace_name + _fn = register_decomposition(getattr(aten, inplace_name))(_fn) + + # We access the __all__ attribute of the module where fn is defined + # There may be a cleaner way of doing this... + from inspect import getmodule + + _all = getmodule(fn).__all__ # type: ignore[union-attr] + if inplace_name not in _all: + _all.append(inplace_name) + return _fn + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT) +def abs(a): + return prims.abs(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def acos(a): + return prims.acos(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def acosh(a): + return prims.acosh(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def asin(a): + return prims.asin(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def asinh(a): + return prims.asinh(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def atan(a): + return prims.atan(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def atanh(a): + return prims.atanh(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT) +def bitwise_not(a): + return prims.bitwise_not(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT) +def ceil(a): + return prims.ceil(a) + + +@register_decomposition(aten.is_complex) +def is_complex(input: TensorLikeType): + return utils.is_complex_dtype(input.dtype) + + +@register_decomposition(aten.conj_physical) +@out_wrapper() +def conj_physical(input: TensorLikeType): + if not utils.is_complex_dtype(input.dtype): + return input + return prims.conj_physical(input) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def cos(a): + return prims.cos(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def cosh(a): + return prims.cosh(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def digamma(a): + return prims.digamma(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def erf(a): + return prims.erf(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def erfinv(a): + return prims.erf_inv(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def erfc(a): + return prims.erfc(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def exp(a): + return prims.exp(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def expm1(a): + return prims.expm1(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def exp2(a): + return prims.exp2(a) + + +# Fill has its own implementation because it has a value parameter +# CompositeImplicitAutograd - don't register decomp +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a,"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.NO_OPMATH, +) +def fill(a: TensorLikeType, value: NumberType) -> TensorLikeType: + assert isinstance(a, TensorLike) + assert isinstance(value, Number) + + python_type = utils.dtype_to_type(a.dtype) + if not utils.is_weakly_lesser_type(type(value), python_type): + msg = f"value argument of type {type(value)} cannot be safely cast to type {python_type}!" + raise ValueError(msg) + + return prims.fill(a, value) + + +def fill_(a: TensorLikeType, value: NumberType) -> TensorLikeType: + r = prims.fill(a, value) + prims.copy_to(a, r) + return a + + +@register_decomposition(aten.zero) +@out_wrapper() +def zero(input: TensorLikeType) -> TensorLikeType: + return torch.zeros_like(input) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT) +def floor(a): + return prims.floor(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT) +def frac(x: TensorLikeType) -> TensorLikeType: + trunc_x = torch.mul(torch.floor(torch.abs(x)), torch.sign(x)) + return torch.sub(x, trunc_x) + + +# imag does not use _make_elementwise_unary_reference because it does not support out +def imag(a: TensorLikeType) -> TensorLikeType: + assert isinstance(a, TensorLike) + torch._check( + utils.is_complex_dtype(a.dtype), lambda: "imag only supports complex tensors." + ) + return prims.imag(a) + + +@_make_elementwise_unary_reference( + ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, + aten_op=None, # CompositeImplicitAutograd +) +def isfinite(a: TensorLikeType) -> TensorLikeType: + if utils.is_float_dtype(a.dtype) or utils.is_complex_dtype(a.dtype): + return prims.isfinite(a) + + return ones_like(a, dtype=torch.bool) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL) +def isinf(a: TensorLikeType) -> TensorLikeType: + if utils.is_complex_dtype(a.dtype): + return torch.logical_or(isinf(torch.real(a)), isinf(torch.imag(a))) + if utils.is_float_dtype(a.dtype): + return torch.abs(a) == float("inf") + return torch.zeros_like(a, dtype=torch.bool) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL) +def isposinf(a: TensorLikeType) -> TensorLikeType: + torch._check( + not utils.is_complex_dtype(a.dtype), + lambda: f"Complex dtype is not supported for isposinf, got dtype {a.dtype}", + ) + if utils.is_float_dtype(a.dtype): + return a == float("inf") + return torch.zeros_like(a, dtype=torch.bool) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL) +def isneginf(a: TensorLikeType) -> TensorLikeType: + torch._check( + not utils.is_complex_dtype(a.dtype), + lambda: f"Complex dtype is not supported for isneginf, got dtype {a.dtype}", + ) + if utils.is_float_dtype(a.dtype): + return a == float("-inf") + return torch.zeros_like(a, dtype=torch.bool) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL) +def isnan(a: TensorLikeType) -> TensorLikeType: + return prims.ne(a, a) + + +# alias +mvlgamma = _make_alias(torch.special.multigammaln, "mvlgamma") # type: ignore[has-type] + + +@_make_elementwise_unary_reference( + ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, + aten_op=None, # CompositeImplicitAutograd +) +def isreal(a: TensorLikeType) -> TensorLikeType: + if utils.is_complex_dtype(a.dtype): + return torch.imag(a) == 0 + return torch.ones_like(a, dtype=torch.bool) + + +# TODO: if this is special maybe it should be defined there and imported here? +@_make_elementwise_unary_reference( + ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, aten_op=aten.i0 +) +def i0(a): + return prims.bessel_i0(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def lgamma(a): + return prims.lgamma(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def log(a): + return prims.log(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def log1p(a): + return prims.log1p(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def log2(a): + return prims.log2(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def log10(a): + return prims.log10(a) + + +# CompositeImplicitAutograd - don't register decomp +@out_wrapper() +def log_softmax( + a: TensorLikeType, + dim: int, + dtype: Optional[torch.dtype] = None, +) -> TensorLikeType: + result_dtype = dtype or a.dtype + computation_dtype = utils.get_computation_dtype(result_dtype) + a_ = _maybe_convert_to_dtype(a, computation_dtype) + return _maybe_convert_to_dtype(a_ - logsumexp(a_, dim, keepdim=True), result_dtype) # type: ignore[return-value] + + +@register_decomposition(aten.logsumexp) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def logsumexp( + self: TensorLikeType, dim: DimsType, keepdim: bool = False +) -> TensorLikeType: + if not isinstance(dim, Iterable): + dim = (dim,) + if self.numel() == 0: + return torch.sum(torch.exp(self), dim, keepdim).log() + maxes = torch.amax(self, dim, keepdim=True) + maxes = torch.masked_fill(maxes, maxes.abs() == float("inf"), 0) + maxes_squeezed = maxes if keepdim else torch.squeeze(maxes, dim) + result = torch.sum(torch.exp(self - maxes), dim, keepdim) + return result.log().add(maxes_squeezed) + + +@register_decomposition(aten.nan_to_num) +@out_wrapper() +def nan_to_num( + a: TensorLikeType, + nan: Optional[NumberType] = 0.0, + posinf: Optional[NumberType] = None, + neginf: Optional[NumberType] = None, +) -> TensorLikeType: + assert isinstance(a, TensorLike) + + if utils.is_boolean_dtype(a.dtype) or utils.is_integer_dtype(a.dtype): + return a.clone() + + if nan is None: + nan = 0.0 + + if posinf is None: + posinf = torch.finfo(a.dtype).max + + if neginf is None: + neginf = torch.finfo(a.dtype).min + + result = torch.where(torch.isnan(a), nan, a) # type: ignore[call-overload] + result = torch.where(torch.isneginf(a), neginf, result) # type: ignore[call-overload] + result = torch.where(torch.isposinf(a), posinf, result) # type: ignore[call-overload] + return result + + +def _neg_meta(a: TensorLikeType): + torch._check( + a.dtype is not torch.bool, + lambda: ( + "Negation, the `-` operator, on a bool tensor is not supported. " + "If you are trying to invert a mask, use the `~` or `logical_not()` " + "operator instead." + ), + ) + + +@_make_elementwise_unary_reference( + ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, extra_meta=_neg_meta +) +def neg(a): + return prims.neg(a) + + +# positive does not use _make_elementwise_unary_reference because it does not support out +# CompositeImplicitAutograd - don't register decomp +def positive(a: TensorLikeType) -> TensorLikeType: + assert isinstance(a, TensorLike) + if a.dtype is torch.bool: + msg = "positive does not support bool tensors." + raise RuntimeError(msg) + return a + + +# real does not use _make_elementwise_unary_reference because it does not support out +def real(a: TensorLikeType) -> TensorLikeType: + assert isinstance(a, TensorLike) + if utils.is_complex_dtype(a.dtype): + return prims.real(a) + return a + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def reciprocal(a): + return prims.reciprocal(a) + + +@register_decomposition(aten.round) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def round(a: TensorLikeType, *, decimals: int = 0) -> TensorLikeType: + if decimals == 0: + return prims.round(a) + else: + ten_pow = 10**decimals + ten_neg_pow = 10 ** (-decimals) + return prims.mul(prims.round(prims.mul(a, ten_pow)), ten_neg_pow) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def rsqrt(a): + return prims.rsqrt(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def sigmoid(a: TensorLikeType) -> TensorLikeType: + return true_divide(1, add(1, exp(neg(a)))) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT) +def sgn(a): + if utils.is_complex_dtype(a.dtype): + a_abs = a.abs() + return torch.where(a_abs == 0, 0, a / a_abs) + else: + return a.sign() + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT) +def sign(a): + return prims.sign(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL) +def signbit(a): + return prims.signbit(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def sin(a): + return prims.sin(a) + + +# Autograd note: This will give the right first derivative at zero (by chance), +# but not the right second derivative +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def sinc(a): + a = math.pi * a + return torch.where(a == 0, 1, torch.sin(a) / a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def sinh(a): + return prims.sinh(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def sqrt(a): + return prims.sqrt(a) + + +@_make_elementwise_unary_reference( + ELEMENTWISE_TYPE_PROMOTION_KIND.BOOL_TO_LONG, + aten_op=None, # CompositeImplicitAutograd, +) +def square(a: TensorLikeType) -> TensorLikeType: + return mul(a, a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def tan(a): + return prims.tan(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def tanh(a): + return prims.tanh(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT) +def trunc(a): + return prims.trunc(a) + + +# TODO: register this as a real ref/decomposition once TorchInductor supports complex! +def view_as_complex(self: TensorLikeType) -> TensorLikeType: + input_dtype = self.dtype + torch._check( + utils.is_float_dtype(input_dtype), + lambda: f"view_as_complex is only supported for floating point" + f"tensors, but got a tensor of scalar type: {input_dtype}", + ) + sizes = self.size() + torch._check( + len(sizes) != 0, + lambda: "Input tensor must have one or more dimensions", + ) + torch._check( + sizes[-1] == 2, + lambda: "Tensor must have a last dimension of size 2", + ) + + old_strides = self.stride() + torch._check( + old_strides[-1] == 1, + lambda: "Tensor must have a last dimension with stride 1", + ) + dims = old_strides[:-1] + torch._check( + py_all(stride % 2 == 0 for stride in dims), + lambda: "Tensor must have a stride divisible by 2 for all but last dimension", + ) + torch._check( + self.storage_offset() % 2 == 0, + lambda: "Tensor must have a storage_offset divisible by 2", + ) + return prims.view_element_type( + self, utils.corresponding_complex_dtype(input_dtype) + ).squeeze(-1) + + +def _make_elementwise_binary_reference( + type_promotion_kind, + aten_op=infer_aten_op, + name=None, + has_out=True, + supports_lhs_python_scalar=True, + supports_rhs_python_scalar=True, + supports_two_python_scalars=False, + should_register_decomposition=True, +) -> Callable: + def inner(prim: Callable): + nonlocal aten_op, name + if name is None: + name = prim.__name__ + + @wraps(prim) + @elementwise_type_promotion_wrapper( + type_promoting_args=("a", "b"), + type_promotion_kind=type_promotion_kind, + ) + def _ref( + a: Union[Tensor, NumberType], + b: Union[Tensor, NumberType], + ) -> Tensor: + torch._check_value( + supports_lhs_python_scalar or not isinstance(a, Number), + lambda: f"{name}: Received a lhs Python scalar to an elementwise binary " + "operation that does not accept lhs scalars!", + ) + torch._check_value( + supports_rhs_python_scalar or not isinstance(b, Number), + lambda: f"{name}: Received a rhs Python scalar to an elementwise binary " + "operation that does not accept rhs scalars!", + ) + torch._check_value( + supports_two_python_scalars + or not (isinstance(a, Number) and isinstance(b, Number)), + lambda: f"{name}: Receive two Number inputs to an elementwise binary operation!", + ) + a, b = _maybe_broadcast(a, b) + output = prim(a, b) + return handle_noncontiguous_outputs([a, b], output) + + if has_out: + _ref = out_wrapper()(_ref) + + _ref.__name__ = name + if aten_op is infer_aten_op: + aten_op = utils.get_aten_op(prim, name) + if aten_op is not None and should_register_decomposition: + register_decomposition(aten_op)(_ref) + + return _ref + + return inner + + +# Add has its own implementation because it has an alpha argument +@register_decomposition(aten.add) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a", "b"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def add( + a: Union[TensorLikeType, NumberType], + b: Union[TensorLikeType, NumberType], + *, + alpha: Optional[NumberType] = None, +): + """ + Reference implementation of torch.add + """ + + a, b = _maybe_broadcast(a, b) + + if alpha is not None: + dtype = a.dtype if isinstance(a, TensorLike) else b.dtype # type: ignore[union-attr] + python_type = utils.dtype_to_type(dtype) + if python_type != bool and not utils.is_weakly_lesser_type( + type(alpha), python_type + ): + msg = f"alpha argument of type {type(alpha)} cannot be safely cast to type {python_type}!" + raise ValueError(msg) + if isinstance(b, TensorLike): + b = prims.mul(b, alpha) + else: + b = b * alpha + + output = prims.add(a, b) + return handle_noncontiguous_outputs([a, b], output) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def atan2(a, b): + return prims.atan2(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def bitwise_and(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.bitwise_and(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def bitwise_left_shift(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.shift_left(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def bitwise_or(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.bitwise_or(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def bitwise_right_shift(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.shift_right_arithmetic(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def bitwise_xor(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.bitwise_xor(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + supports_lhs_python_scalar=False, +) +def copysign( + a: Union[TensorLikeType, NumberType], b: Union[TensorLikeType, NumberType] +): + if isinstance(b, Number) and isinstance(a, Tensor): + b = scalar_tensor(b, dtype=a.dtype, device=a.device) + elif isinstance(a, Tensor) and isinstance(b, Tensor) and a.device != b.device: + msg = f"Expected divisor (b) to be on the same device ({a.device}) as dividend (a), but it is found on {b.device}!" + raise RuntimeError(msg) + return where(signbit(b), neg(abs(a)), abs(a)) + + +# complex = _make_elementwise_binary_reference(prims.complex, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT) + + +@register_decomposition(aten.div) +@out_wrapper() +def div( + a: Union[TensorLikeType, NumberType], + b: Union[TensorLikeType, NumberType], + *, + rounding_mode: Optional[str] = None, +): + """ + Reference implementation of torch.div + """ + if rounding_mode is None: + return true_divide(a, b) + elif rounding_mode == "trunc": + return trunc_divide(a, b) + elif rounding_mode == "floor": + return floor_divide(a, b) + else: + msg = f"div expected rounding_mode to be one of None, 'trunc', or 'floor' but found {rounding_mode}." + raise ValueError(msg) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, + supports_lhs_python_scalar=False, +) +def eq(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.eq(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.BOOL_TO_LONG, +) +def pow( + a: Union[TensorLikeType, NumberType], + b: Union[TensorLikeType, NumberType], +) -> TensorLikeType: + assert isinstance(a, TensorLikeType) or isinstance(b, TensorLikeType) + + if isinstance(b, Number): + if b == 1.0: + return a.clone() # type: ignore[return-value,union-attr] + elif b == 2.0: + return a * a # type: ignore[return-value] + elif b == 0.5: + return torch.sqrt(a) # type: ignore[arg-type] + elif isinstance(a, Number): + if a == 1.0: + return torch.fill(b, True) + if a == 2.0 and ( + utils.is_float_dtype(b.dtype) or utils.is_complex_dtype(b.dtype) + ): + return torch.exp2(b) + + return prims.pow(a, b) + + +# Float power has its own implementation because it has unique type promotion. +# CompositeImplicitAutograd - don't register decomp +@out_wrapper() +def float_power( + a: Union[TensorLikeType, NumberType], + b: Union[TensorLikeType, NumberType], +) -> Tensor: + if isinstance(a, Number) and isinstance(b, Number): + raise ValueError( + "Receive two Number inputs to an elementwise binary operation!" + ) + + # Handles type promotion + dtype = utils.get_higher_dtype(a, b) + assert dtype is not None + if utils.is_complex_dtype(dtype): + dtype = torch.complex128 + else: + dtype = torch.float64 + + # Float power has the following contiguous cast behavior to be + # consistent with its C++ impl + a = _maybe_convert_to_dtype(a, dtype) + b = _maybe_convert_to_dtype(b, dtype) + + a, b = _maybe_broadcast(a, b) + return pow(a, b) + + +# >>> a = torch.tensor(-0.2500, dtype=torch.float64) +# tensor(-0.250000000000000, dtype=torch.float64) +# +# >>> b = torch.tensor(-0.0010, dtype=torch.float64) +# tensor(-0.001000000000000, dtype=torch.float64) +# +# Note: In this case, casting float to double will expand the float mantissa with zeros, +# while creating a double generates a distinct mantissa. +# >>> torch.tensor(-0.001).to(dtype=torch.float64) +# tensor(-0.001000000047497, dtype=torch.float64) +# +# Floor Division +# The difference is caused because torch.remainder(a, b) = -0.001. +# +# >>> torch.floor(torch.true_divide(a, b)) +# tensor(250., dtype=torch.float64) +# +# >>> torch.div(a, b, rounding_mode='floor') +# tensor(249., dtype=torch.float64) +# +# Definition: a // b = (a - remainder(a, b)) / b +# >>> torch.true_divide(torch.sub(a, torch.remainder(a, b)), b) +# tensor(249., dtype=torch.float64) +# +# For reference, see CPython's implementation: +# https://github.com/python/cpython/blob/ace008c531dd685a30c1dd68f9b5ba35f20171cf/Objects/floatobject.c#L636 + + +@_make_elementwise_binary_reference( + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_two_python_scalars=True, + should_register_decomposition=False, +) +def floor_divide( + a: Union[TensorLikeType, NumberType], b: Union[TensorLikeType, NumberType] +): + # Wrap scalars because some references only accept tensor arguments. + if isinstance(a, Number) and isinstance(b, Number): + a = scalar_tensor(a) + b = scalar_tensor(b) + elif isinstance(b, Number) and isinstance(a, Tensor): + b = scalar_tensor(b, dtype=a.dtype, device=a.device) + elif isinstance(a, Number) and isinstance(b, Tensor): + a = scalar_tensor(a, dtype=b.dtype, device=b.device) + elif isinstance(a, Tensor) and isinstance(b, Tensor) and a.device != b.device: + if a.device == torch.device("cpu"): + msg = f"Expected divisor (b) to be on the same device ({a.device}) as dividend (a), but it is found on {b.device}!" + raise RuntimeError(msg) + else: + b = prims.device_put(b, device=a.device) + + assert isinstance(a, Tensor) and isinstance(b, Tensor) + dtype = a.dtype + if utils.is_float_dtype(dtype): + return _floor_divide_float(a, b) + elif utils.is_integer_dtype(dtype): + return _floor_divide_integer(a, b) + else: + torch._check(False, lambda: f"{dtype} not supported for floor_divide") + + +def _floor_divide_integer(a: Tensor, b: Tensor) -> Tensor: + a, b = _maybe_broadcast(a, b) + + if not a.dtype.is_signed: + return prims.div(a, b) + + # Convert truncation to flooring: + offset = (torch.signbit(a) != torch.signbit(b)).logical_and(torch.fmod(a, b) != 0) + return prims.div(a, b) - _maybe_convert_to_dtype(offset, a.dtype) + + +def _floor_divide_float(a: Tensor, b: Tensor) -> Tensor: + mod = fmod(a, b) + div = true_divide(sub(a, mod), b) + + # Ensure that the remainder has the same sign as denominator + different_signed_inputs = bitwise_xor(lt(a, 0), lt(b, 0)) + non_zero_remainder = ne(mod, 0) + mask = bitwise_and(non_zero_remainder, different_signed_inputs) + div = where(mask, sub(div, 1), div) + + # Map quotient to nearest integer value + floor_div = floor(div) + mask = gt(sub(div, floor_div), 0.5) + floor_div = where(mask, add(floor_div, 1), floor_div) + + basic_div = true_divide(a, b) + zero_tensor = scalar_tensor(0, dtype=basic_div.dtype, device=basic_div.device) + + # If quotient is zero, copy signbit from true_divide quotient + floor_div = where(ne(div, 0), floor_div, copysign(zero_tensor, basic_div)) + + # If denominator is zero, then follow true_divide behavior + return where(ne(b, 0), floor_div, basic_div) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def fmax(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.fmax(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def fmin(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.fmin(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=True, +) +def fmod(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.fmod(a, b) + + +@register_decomposition(aten.frexp) +@out_wrapper("mantissa", "exponent") +def frexp(self: TensorLikeType) -> Tuple[TensorLikeType, TensorLikeType]: + return torch.return_types.frexp(prims.frexp(self)) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def gcd(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.gcd(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, + supports_lhs_python_scalar=False, +) +def ge(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.ge(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, + supports_lhs_python_scalar=False, +) +def gt(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.gt(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def heaviside(input: TensorLikeType, values: TensorLikeType) -> TensorLikeType: + input_eq_zero = torch.eq(input, 0) + input_lt_zero = torch.logical_or(torch.lt(input, 0), torch.isnan(input)) + zeros_and_ones = torch.where(input_lt_zero, 0, 1) + output = torch.where(input_eq_zero, values, zeros_and_ones) + return output + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def hypot(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.hypot(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def igamma(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.igamma(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def igammac(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.igammac(a, b) + + +def _check_close_args( + name: str, + a: TensorLikeType, + b: TensorLikeType, + rtol: float, + atol: float, +) -> None: + torch._check_value( + a.dtype == b.dtype, + lambda: f"{name}: Attempting to compare tensors of different dtypes {a.dtype} and {b.dtype}!", + ) + torch._check( + rtol >= 0, + lambda: f"{name}: rtol must be greater than or equal to zero, but got {rtol}!", + ) + torch._check( + atol >= 0, + lambda: f"{name}: atol must be greater than or equal to zero, but got {atol}!", + ) + + +# CompositeImplicitAutograd - don't register decomp +def isclose( + a: TensorLikeType, + b: TensorLikeType, + rtol: float = 1e-05, + atol: float = 1e-08, + equal_nan: bool = False, +) -> TensorLikeType: + _check_close_args(name="torch.isclose", a=a, b=b, rtol=rtol, atol=atol) + + close = eq(a, b) + if equal_nan and (utils.is_float_dtype(a.dtype) or utils.is_complex_dtype(a.dtype)): + close = logical_or(close, logical_and(isnan(a), isnan(b))) + + # Note: In case of zero tolerances the closeness inequality degenerates to an equality check. + # In this case, the short-circuit prevents false positives as detailed in the paragraph below. + if atol == 0 and rtol == 0: + return close + + # Note [closeness error computation] + # atol and rtol are provided as doubles, so the computation + # rtol * other will produce a float or complex tensor. + # When the difference (self - other) is compared to it then the + # tensor representing the difference will also be cast to float or complex. + # However, since (self - other) in uint8 is very likely to produce a + # negative value, this moves the cast forward so the difference is + # always computed in a float or complex type. + # If the values of the integer tensors cannot be exactly represented + # by the default scalar type then this may cause an incorrect result. + if not utils.is_float_dtype(a.dtype) and not utils.is_complex_dtype(a.dtype): + a = prims.convert_element_type(a, torch.get_default_dtype()) + b = prims.convert_element_type(b, torch.get_default_dtype()) + + allowed_error = add(atol, abs(mul(b, rtol))) + actual_error = abs(sub(a, b)) + + # Computes finite closeness + result = logical_or( + close, logical_and(isfinite(actual_error), le(actual_error, allowed_error)) + ) + + return result + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def lcm(a: TensorLikeType, b: TensorLikeType): + dtype = a.dtype + # promoting to int32 to maintain 100% consistency with C++ and to + # prevent overflow in case of int8 and int16 + promote_to_int = dtype in (torch.int8, torch.int16) + if promote_to_int: + a = prims.convert_element_type(a, torch.int32) + b = prims.convert_element_type(b, torch.int32) + + g = torch.gcd(a, b) + # Avoid division by zero in case gcd(0, 0) == 0 + g = torch.where(g == 0, 1, g) + res = torch.abs(prims.div(a, g) * b) + return res if not promote_to_int else prims.convert_element_type(res, dtype) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, + supports_lhs_python_scalar=False, +) +def le(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.le(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def logaddexp(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + # Nb. this implementation does not distribute the gradients evenly when a == b + mask = torch.real(a) >= torch.real(b) + max_ = torch.where(mask, a, b) + min_ = torch.where(mask, b, a) + inf_mask = torch.logical_and( + torch.logical_not(torch.isfinite(torch.real(a))), torch.real(a) == torch.real(b) + ) + if utils.is_complex_dtype(a.dtype) or utils.is_complex_dtype(b.dtype): + # are you wondering what this bunch of codes are for? edge cases! + neg_min_mask = torch.real(min_) < 0 + inf_vals = torch.where( + neg_min_mask, min_, torch.log(torch.exp(min_) + torch.exp(max_)) + ) + non_nan_vals = torch.where( + inf_mask, inf_vals, max_ + torch.log1p(torch.exp(min_ - max_)) + ) + # the type for full_like does not include tensor yet + nan_mask = torch.isnan(min_) + return torch.where(nan_mask, complex(float("nan"), float("nan")), non_nan_vals) # type: ignore[call-overload] + else: + return torch.where(inf_mask, a, max_ + torch.log1p(torch.exp(min_ - max_))) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def logaddexp2(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + torch._check( + not (utils.is_complex_dtype(a.dtype) or utils.is_complex_dtype(b.dtype)), + lambda: "logaddexp2 doesn't support complex dtypes", + ) + # Nb. this implementation does not distribute the gradients evenly when a == b + mask = a >= b + max_ = torch.where(mask, a, b) + min_ = torch.where(mask, b, a) + inf_mask = torch.logical_and(torch.isinf(a), a == b) + inv_log_2 = 1.0 / math.log(2) + result = max_ + torch.log1p(torch.exp2(min_ - max_)) * inv_log_2 + return torch.where(inf_mask, a, result) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, +) +def logical_and(a: TensorLikeType, b: TensorLikeType): + if not utils.is_boolean_dtype(a.dtype): + a = a != 0 + if not utils.is_boolean_dtype(b.dtype): + b = b != 0 + return a & b + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL) +def logical_not(a: TensorLikeType): + if not utils.is_boolean_dtype(a.dtype): + return a == 0 + return ~a + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, +) +def logical_or(a: TensorLikeType, b: TensorLikeType): + if not utils.is_boolean_dtype(a.dtype): + a = a != 0 + if not utils.is_boolean_dtype(b.dtype): + b = b != 0 + return bitwise_or(a, b) + + +# TODO: skip unnecessary conversion of long to float +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, +) +def logical_xor(a: TensorLikeType, b: TensorLikeType): + if not utils.is_boolean_dtype(a.dtype): + a = a != 0 + if not utils.is_boolean_dtype(b.dtype): + b = b != 0 + return a ^ b + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, + supports_lhs_python_scalar=False, +) +def lt(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.lt(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def maximum(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.maximum(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def minimum(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.minimum(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_two_python_scalars=True, +) +def mul(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.mul(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, + supports_lhs_python_scalar=False, +) +def ne(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.ne(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.NO_OPMATH, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def nextafter(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.nextafter(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def remainder(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.remainder(a, b) + + +# reverse sub +@register_decomposition(aten.rsub) +@out_wrapper() +def rsub( + a: Union[TensorLikeType, NumberType], + b: Union[TensorLikeType, NumberType], + alpha: NumberType = 1, +): + if isinstance(a, Number): + msg = "Received a Number for the first argument, but expected a Tensor" + raise ValueError(msg) + + return torch.sub(b, a, alpha=alpha) + + +# TODO: consider refactoring this with add impl +# sub has its own implementation because it has an alpha argument +@register_decomposition(aten.sub) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a", "b"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def sub( + a: Union[TensorLikeType, NumberType], + b: Union[TensorLikeType, NumberType], + *, + alpha: NumberType = 1, +): + """ + Reference implementation of torch.sub + """ + + a, b = _maybe_broadcast(a, b) + + if alpha != 1: + dtype = a.dtype if isinstance(a, TensorLike) else b.dtype # type: ignore[union-attr] + python_type = utils.dtype_to_type(dtype) + if not utils.is_weakly_lesser_type(type(alpha), python_type): + msg = f"alpha argument of type {type(alpha)} cannot be safely cast to type {python_type}!" + raise ValueError(msg) + if isinstance(b, torch.Tensor): + b = prims.mul(b, alpha) + else: + # Carefully not to use prims.mul if b is a scalar / symint. + # prims.mul always returns a tensor, + # which will mess with type promotion. + b = b * alpha + + output = prims.sub(a, b) + return handle_noncontiguous_outputs([a, b], output) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + name="true_divide", + aten_op=None, # CompositeImplicitAutograd + supports_two_python_scalars=True, +) +def true_divide(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.div(a, b) + + +@register_decomposition(aten.xlogy) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a", "b"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def xlogy(a: Union[TensorLikeType, NumberType], b: Union[TensorLikeType, NumberType]): + torch._check( + isinstance(a, TensorLike) or isinstance(b, TensorLike), + lambda: 'Expected either argument a or b to be a Tensor"', + ) + + # Operations like eq and log do not handle scalar values, so we convert them to scalar_tensors. + if isinstance(b, TensorLike) and isinstance(a, Number): + a = scalar_tensor(a, dtype=b.dtype, device=b.device) + elif isinstance(a, TensorLike) and isinstance(b, Number): + b = scalar_tensor(b, dtype=a.dtype, device=a.device) + + # mypy: expected "Tensor" + assert isinstance(a, TensorLike) + assert isinstance(b, TensorLike) + rhs = torch.where(torch.eq(a, 0), 0, torch.mul(a, torch.log(b))) + return torch.where(torch.isnan(b), float("nan"), rhs) + + +@_make_elementwise_binary_reference( + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + aten_op=None, # CompositeImplicitAutograd + supports_two_python_scalars=True, +) +def trunc_divide( + a: Union[TensorLikeType, NumberType], b: Union[TensorLikeType, NumberType] +): + dtype = utils.get_dtype(a) + if utils.is_integer_dtype(dtype): + return prims.div(a, b) + + return trunc(prims.div(a, b)) + + +# +# Elementwise Ternary References +# + + +@register_decomposition(aten.addcdiv) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self", "tensor1", "tensor2"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def addcdiv( + self: TensorLikeType, + tensor1: TensorLikeType, + tensor2: TensorLikeType, + *, + value: NumberType = 1, +) -> TensorLikeType: + """ + Reference implementation of torch.addcdiv + """ + if value is not None: + dtype = self.dtype # no scalars allowed, see add + python_type = utils.dtype_to_type(dtype) + torch._check_value( + utils.is_weakly_lesser_type(type(value), python_type), + lambda: f"value argument of type {type(value)} cannot be safely cast to type {python_type}!", + ) + + return self + value * tensor1 / tensor2 + + +@register_decomposition(aten.addcmul) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self", "tensor1", "tensor2"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def addcmul( + self: TensorLikeType, + tensor1: TensorLikeType, + tensor2: TensorLikeType, + *, + value: NumberType = 1, +) -> TensorLikeType: + """ + Reference implementation of torch.addcmul + """ + if value is not None: + dtype = self.dtype # no scalars allowed, see add + python_type = utils.dtype_to_type(dtype) + torch._check_value( + utils.is_weakly_lesser_type(type(value), python_type), + lambda: f"value argument of type {type(value)} cannot be safely cast to type {python_type}!", + ) + + return self + value * tensor1 * tensor2 + + +@register_decomposition(aten.clamp) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a", "min", "max"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def clamp( + a: TensorLikeType, + min: Optional[TensorOrNumberLikeType] = None, + max: Optional[TensorOrNumberLikeType] = None, +) -> TensorLikeType: + # NOTE: grad behavior with implementation `where` is not consistent on `nan` + if min is None and max is None: + msg = "clamp called but both min and max are none!" + raise ValueError(msg) + if min is not None: + a_isnan = torch.isnan(a) + condition = torch.bitwise_or(torch.ge(a, min), a_isnan) # type: ignore[arg-type] + # we should also propagate `nan` coming from boundaries. However, that's + # not necessary since `ge` would already `False` when either operands has + # a `nan`. So this line below is redundant + # `condition = bitwise_and(condition, bitwise_not(isnan(min)))` + a = torch.where(condition, a, min) # type: ignore[arg-type] + if max is not None: + a_isnan = torch.isnan(a) + # same as above, no need to adjust `nan` from `max` + condition = torch.bitwise_or(torch.le(a, max), a_isnan) # type: ignore[arg-type] + a = torch.where(condition, a, max) # type: ignore[arg-type] + + return a + + +@register_decomposition(aten.clamp_min) +@out_wrapper() +def clamp_min( + self: TensorLikeType, + min: Optional[TensorOrNumberLikeType] = None, +) -> TensorLikeType: + return torch.clamp(self, min=min) # type: ignore[arg-type] + + +@register_decomposition(aten.clamp_max) +@out_wrapper() +def clamp_max( + self: TensorLikeType, + max: Optional[TensorOrNumberLikeType] = None, +) -> TensorLikeType: + return torch.clamp(self, max=max) # type: ignore[arg-type] + + +# +# Conditional references +# + + +# https://pytorch.org/docs/stable/generated/torch.where.html +# TODO: implement alternate where +@register_decomposition(aten.where) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a", "b"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.NO_OPMATH, +) +def where( + pred: Tensor, + a: Optional[TensorOrNumberLikeType] = None, + b: Optional[TensorOrNumberLikeType] = None, +): + """ """ + + if a is None or b is None: + raise NotImplementedError + + utils.check_same_device(pred, a, b, allow_cpu_scalar_tensors=True) + torch._check( + pred.dtype is torch.bool, + lambda: f"expected predicate to be bool, got {pred.dtype}", + ) + + pred, a, b = _maybe_broadcast(pred, a, b) + return prims.where(pred, a, b) + + +# +# Data Movement References +# +@register_decomposition(aten.clone) +@out_wrapper() +def clone( + a: TensorLikeType, *, memory_format: torch.memory_format = torch.preserve_format +) -> TensorLikeType: + result = prims.clone(a, memory_format=memory_format) + return result + + +def copy_to(a: Tensor, b: Tensor, *, allow_cross_device=True): + if not allow_cross_device and a.device != b.device: + msg = f"Attempting to copy from device {b.device} to device {a.device}, but cross-device copies are not allowed!" + raise RuntimeError(msg) + + return prims.copy_to(a, b) + + +@register_decomposition(aten.item) +def item(a: TensorLikeType) -> NumberType: + if a.numel() != 1: + msg = f"Can't convert a tensor with {a.numel()} elements to a number!" + raise ValueError(msg) + + # NOTE: explicit conversion is necessary for bool! + # See https://github.com/pytorch/pytorch/issues/78071 + number_type = utils.dtype_to_type(a.dtype) + return number_type(prims.item(a)) + + +# fast path when `to` returns an alias to input. This mimics the same function in aten +def _to_will_alias( + a: TensorLikeType, + device: Optional[DeviceLikeType] = None, + dtype: Optional[torch.dtype] = None, + copy: Optional[bool] = None, + layout: Optional[torch.layout] = None, + memory_format: Optional[torch.memory_format] = None, + pin_memory: Optional[bool] = False, + non_blocking: bool = False, # not using non_blocking +) -> bool: + return ( + not copy + and (device is None or a.device == device) + and (dtype is None or a.dtype == dtype) + and (layout is None or a.layout == layout) + # is_pinned issue #84925 + # and (pin_memory is None or pin_memory == a.is_pinned()) + and ( + memory_format is None + or memory_format == torch.preserve_format + or utils.is_contiguous_for_memory_format(a, memory_format=memory_format) + ) + ) + + +@singledispatch +def _to_dispatch(*args, **kwargs): + raise NotImplementedError + + +@_to_dispatch.register +def _to_device( + device: torch.device, + dtype: torch.dtype, + non_blocking: bool = False, + copy: bool = False, + memory_format: Optional[torch.memory_format] = None, +) -> Dict[str, Any]: + kwargs = { + "device": device, + "dtype": dtype, + "non_blocking": non_blocking, + "copy": copy, + "memory_format": memory_format, + } + return kwargs + + +@_to_dispatch.register +def _to_device_str( + device: str, + dtype: torch.dtype, + non_blocking: bool = False, + copy: bool = False, + memory_format: Optional[torch.memory_format] = None, +) -> Dict[str, Any]: + kwargs = { + "device": torch.device(device), + "dtype": dtype, + "non_blocking": non_blocking, + "copy": copy, + "memory_format": memory_format, + } + return kwargs + + +@_to_dispatch.register +def _to_dtype( + dtype: torch.dtype, + non_blocking: bool = False, + copy: bool = False, + memory_format: Optional[torch.memory_format] = None, +) -> Dict[str, Any]: + kwargs = { + "dtype": dtype, + "non_blocking": non_blocking, + "copy": copy, + "memory_format": memory_format, + } + return kwargs + + +@_to_dispatch.register +def _to_other( + other: Tensor, + non_blocking: bool = False, + copy: bool = False, + memory_format: Optional[torch.memory_format] = None, +) -> Dict[str, Any]: + device = other.device + dtype = other.dtype + layout = other.layout + # is_pinned issue #84925 + # pin_memory = other.is_pinned() + kwargs = { + "device": device, + "dtype": dtype, + "layout": layout, + "non_blocking": non_blocking, + "copy": copy, + "memory_format": memory_format, + } + return kwargs + + +# remove to_kwargs that is already present in `a` +def _canonicalize_to_arguments(a: Tensor, to_kwargs: dict): + options_to_check = ["dtype", "device", "layout", "memory_format"] + # "device" option could be passed a str instead torch.device + if "device" in to_kwargs and isinstance(to_kwargs["device"], str): + to_kwargs["device"] = torch.device(to_kwargs["device"]) + + for kw in options_to_check: + if kw in to_kwargs: + if ( + (kw == "memory_format" and to_kwargs[kw] is torch.preserve_format) + or ( + kw == "device" + and to_kwargs[kw].type == a.device.type + and ( + not to_kwargs[kw].index or to_kwargs[kw].index == a.device.index + ) + ) + or ( + getattr(a, kw, None) == to_kwargs[kw] + ) # this also handles {"memory_format": None} + ): + to_kwargs.pop(kw) + + +def to(a: TensorLikeType, *args, **kwargs) -> TensorLikeType: + # handled dispatch via positional arguments + if len(args) != 0: + kwargs = _to_dispatch(*args, **kwargs) + + # TODO: is_pinned is not currently supported in refs or fake_tensor + # https://github.com/pytorch/pytorch/issues/84925 + assert "pin_memory" not in kwargs + _canonicalize_to_arguments(a, kwargs) + + if _to_will_alias(a, **kwargs): + return a + + copy = kwargs.pop("copy") if "copy" in kwargs else False + non_blocking = kwargs.pop("non_blocking") if "non_blocking" in kwargs else False + + # short-circuit to `prims.convert_element_type` when `to` is just a dtype change + if ( + (copy or (kwargs.get("dtype", a.dtype) != a.dtype)) + and (not non_blocking) + and ("memory_format" not in kwargs) + and ("device" not in kwargs) + and ("layout" not in kwargs) + # is_pinned issue #84925 + # and ("pin_memory" not in kwargs) + ): + return prims.convert_element_type(a, kwargs.get("dtype", a.dtype)) + + result = torch.empty_like(a, **kwargs) + # TODO: non_blocking should be handled by `copy_to` + copy_to(result, a) + return result + + +# +# Reduction references +# + + +def _reduction( + a: TensorLikeType, + prim: Callable, + *, + has_identity: bool = True, + accepts_dim_tuple: bool = True, # to handle min/argmin that accept single dim only + dims: Optional[DimsType] = None, + keepdims: bool = False, + dtype: Optional[torch.dtype] = None, # should be specified for ops that support it + out: Optional[Tensor] = None, + output_dtype_kind: REDUCTION_OUTPUT_TYPE_KIND, +) -> TensorLikeType: # it is usually SAME, but I want + # ref writers to actually think about what to put here + assert isinstance(a, TensorLike) + if a.ndim > 64: + raise RuntimeError( + f"Received a tensor with {a.ndim} dimensions, but only tensors with up to 64 dims are supported!" + ) + + if out is not None: + assert isinstance(out, TensorLike) + if dtype is not None: + # TODO - this is true for eager mode currently, but it's wrong behavior for complex norms + if dtype != out.dtype: + raise RuntimeError( + "dtype argument and out dtype must match in reduction" + ) + if not accepts_dim_tuple: + assert dims is None or isinstance(dims, Dim) + if isinstance(dims, Dim): + dims = (dims,) # type: ignore[assignment] + dims = utils.reduction_dims(a.shape, dims) + if not has_identity: + valid_shape = a.ndim == 0 or py_all(a.shape[i] for i in dims) + if not valid_shape: + raise RuntimeError( + "reducing over zero-size dimension for reduction operation without identity" + ) + computation_dtype, result_dtype = utils.reduction_dtypes( + a, output_dtype_kind, dtype + ) + a = _maybe_convert_to_dtype(a, computation_dtype) # type: ignore[method-assign] + result = prim(a, dims) + if keepdims: + output_shape = [a.shape[i] if i not in dims else 1 for i in range(a.ndim)] + broadcast_dims = [i for i in range(a.ndim) if i not in dims] + result = prims.broadcast_in_dim(result, output_shape, broadcast_dims) + + if out is not None: + assert result_dtype is not None + if dtype is not None and result_dtype != out.dtype: + raise RuntimeError( + "Expected the dtype of reduction result and out to match" + ) + out = _maybe_resize_out(out, result.shape) + return _safe_copy_out(copy_from=result, copy_to=out) # type: ignore[arg-type] + + if result.dtype != result_dtype and result_dtype is not None: + result = prims.convert_element_type(result, result_dtype) + + return result + + +def _make_copy_from_view(fn): + """ + Given a view function (e.g. torch.diagonal) generates its copy variant (e.g. torch.diagonal_copy) + """ + name = fn.__name__ + fn = out_wrapper()(fn) + + def _fn(*args, out=None, **kwargs): + result = fn(*args, out=out, **kwargs) + if out is None: + return result.clone(memory_format=torch.contiguous_format) + return result + + copy_name = f"{name}_copy" + _fn.__name__ = copy_name + _fn = register_decomposition(getattr(aten, copy_name))(_fn) + return _fn + + +# Saves Python all +py_all = all + + +@register_decomposition(aten.all) +@out_wrapper() +def all( + a: TensorLikeType, + dim: Optional[DimsType] = None, + keepdim: bool = False, +) -> TensorLikeType: + result = torch.logical_not(torch.any(torch.logical_not(a), dim, keepdim=keepdim)) + + if a.dtype == torch.uint8: + result = result.to(dtype=torch.uint8) + + return result + + +# Saves Python any +py_any = any + + +@register_decomposition(aten.any) +@out_wrapper() +def any( + a: TensorLikeType, + dim: Optional[DimsType] = None, + keepdim: bool = False, +) -> TensorLikeType: + a_ = _maybe_convert_to_dtype(a, torch.bool) + if isinstance(dim, (list, tuple)) and len(dim) == 0: + result = a_.clone() + else: + result = a_.sum(dim=dim, keepdim=keepdim).ne(False) + + # Preserves uint8 -- probably a legacy mask thing + if a.dtype is torch.uint8: + return prims.convert_element_type(result, torch.uint8) + + return result + + +@register_decomposition([aten.sum.dim_IntList, aten.sum.IntList_out]) +def sum( + a: TensorLikeType, + dim: Union[Optional[int], Optional[List[int]]] = None, + keepdim: bool = False, + *, + dtype: Optional[torch.dtype] = None, + out: Optional[Tensor] = None, +) -> TensorLikeType: + if dtype is None: + if out is not None: + dtype = out.dtype + elif utils.is_boolean_dtype(a.dtype) or utils.is_integer_dtype(a.dtype): + dtype = torch.int64 + else: + dtype = a.dtype + # reduces over all dimensions if dim=() is passed + if dim == () or dim == []: + dim = None + return _reduction( + a, + prims.sum, + dims=dim, + keepdims=keepdim, + dtype=dtype, + out=out, + output_dtype_kind=REDUCTION_OUTPUT_TYPE_KIND.SAME, + ) + + +def sum_to_size( + a: Tensor, + *shape, +) -> Tensor: + shape = utils.extract_shape_from_varargs(shape, validate=False) + torch._check( + utils.is_expandable_to(shape, a.shape), + lambda: f'sum_to_size: size "{shape}" is not expandable to size "{a.shape}"', + ) + # In ATen scalar tensors are sent through sum and the result is returned as + # type promoted + if utils.is_same_shape(shape, a.shape) and len(shape) > 0: + return prims.view_of(a) + leading_dims = a.ndim - len(shape) + reduce_dims = tuple(range(leading_dims)) + tuple( + i + for i in range(leading_dims, len(shape)) + if shape[i - leading_dims] == 1 and a.shape[i] != 1 + ) + return torch.sum(a, dim=reduce_dims, keepdim=True, dtype=None) + + +@register_decomposition(aten.prod) +def prod( + a: TensorLikeType, + dim: Union[Optional[int], Optional[List[int]]] = None, + keepdim: bool = False, + *, + dtype=None, + out: Optional[Tensor] = None, +) -> TensorLikeType: + if dtype is None: + if out is not None: + dtype = out.dtype + elif utils.is_boolean_dtype(a.dtype) or utils.is_integer_dtype(a.dtype): + dtype = torch.int64 + else: + dtype = a.dtype + # reduces over all dimensions if dim=() is passed + if dim == () or dim == []: + dim = None + return _reduction( + a, + prims.prod, + dims=dim, + keepdims=keepdim, + dtype=dtype, + out=out, + output_dtype_kind=REDUCTION_OUTPUT_TYPE_KIND.SAME, + ) + + +@register_decomposition(aten.amin) +def amin( + a: TensorLikeType, + dim: Optional[DimsType] = None, + keepdim: bool = False, + *, + out: Optional[Tensor] = None, +) -> TensorLikeType: + # reduces over all dimensions if dim=() is passed + if dim == () or dim == []: + dim = None + + return _reduction( + a, + prims.amin, + dims=dim, + keepdims=keepdim, + dtype=None, + out=out, + has_identity=False, + output_dtype_kind=REDUCTION_OUTPUT_TYPE_KIND.SAME, + ) + + +@register_decomposition(aten.amax) +def amax( + a: TensorLikeType, + dim: Optional[DimsType] = None, + keepdim: bool = False, + *, + out: Optional[Tensor] = None, +) -> TensorLikeType: + # reduces over all dimensions if dim=() is passed + if dim == () or dim == []: + dim = None + + return _reduction( + a, + prims.amax, + dims=dim, + keepdims=keepdim, + dtype=None, + out=out, + has_identity=False, + output_dtype_kind=REDUCTION_OUTPUT_TYPE_KIND.SAME, + ) + + +def _dim_var_dispatch(dim=None, unbiased=None): + # There's the following overload of torch.var: + # var(Tensor self, bool unbiased=True) -> (Tensor, Tensor) + # We need to explicitly convert bool dims to unbiased arg + if unbiased is None and isinstance(dim, bool): + unbiased = dim + dim = None + return dim, unbiased + + +@register_decomposition(aten.var) +@out_wrapper() +def var( + a: TensorLikeType, + dim: Optional[DimsType] = None, + unbiased: Optional[bool] = None, + keepdim: bool = False, + *, + correction: Optional[NumberType] = None, +) -> TensorLikeType: + dim, unbiased = _dim_var_dispatch(dim, unbiased) + correction = utils.set_correction(unbiased, correction) + # reduces over all dimensions if dim=() is passed + if dim == () or dim == []: + dim = None + + result = _reduction( + a, + partial(prims.var, correction=correction), + dims=dim, + keepdims=keepdim, + dtype=None, + out=None, + has_identity=True, + output_dtype_kind=REDUCTION_OUTPUT_TYPE_KIND.COMPLEX_TO_FLOAT, + ) + return result + + +@register_decomposition(aten.std) +@out_wrapper() +def std( + a: TensorLikeType, + dim: Union[Optional[int], Optional[List[int]]] = None, + unbiased: Optional[bool] = None, + keepdim: bool = False, + *, + correction: Optional[NumberType] = None, +) -> TensorLikeType: + dim, unbiased = _dim_var_dispatch(dim, unbiased) + correction = utils.set_correction(unbiased, correction) + + opmath_dtype, dtype = utils.reduction_dtypes( + a, REDUCTION_OUTPUT_TYPE_KIND.COMPLEX_TO_FLOAT + ) + a = _maybe_convert_to_dtype(a, opmath_dtype) + a_var = torch.var(a, dim, correction=correction, keepdim=keepdim) + a_std = torch.sqrt(a_var) + assert dtype is not None + return _maybe_convert_to_dtype(a_std, dtype) + + +@register_decomposition(aten.mean) +def mean( + a: TensorLikeType, + dim: Optional[DimsType] = None, + keepdim: bool = False, + *, + dtype=None, + out=None, +) -> TensorLikeType: + # reduces over all dimensions if dim=() is passed + if dim == () or dim == []: + dim = None + orig_dtype = dtype + if dtype is None: + dtype = a.dtype + # can't use out wrapper because of this argument + torch._check( + out is None or out.dtype == dtype, + lambda: f"Expected out tensor to have dtype {dtype}, but got {out.dtype} instead", + ) + result = _reduction( + a, + prims.sum, + dims=dim, + keepdims=keepdim, + dtype=dtype, + out=None, + output_dtype_kind=REDUCTION_OUTPUT_TYPE_KIND.KEEP_PROMOTED_TYPE, + ) + torch._check( + utils.is_float_dtype(dtype) or utils.is_complex_dtype(dtype), + lambda: ( + f"mean(): could not infer output dtype. " + f"{'Input' if orig_dtype is None else 'Optional'} dtype must be either " + f"a floating point or complex dtype. Got: {dtype}" + ), + ) + if isinstance(dim, Dim): + dim = (dim,) # type: ignore[assignment] + dims = utils.reduction_dims(a.shape, dim) # type: ignore[arg-type] + nelem = 1 if a.ndim == 0 else reduce(operator.mul, (a.shape[i] for i in dims), 1) + result = true_divide(result, nelem) + result_dtype = a.dtype if dtype is None else dtype + result = _maybe_convert_to_dtype(result, result_dtype) # type: ignore[method-assign] + if out is not None: + assert isinstance(out, TensorLike) + out = _maybe_resize_out(out, result.shape) + return _safe_copy_out(copy_from=result, copy_to=out) # type: ignore[arg-type] + return result + + +@register_decomposition(aten.std_mean) +@out_wrapper("out0", "out1") +def std_mean( + a: TensorLikeType, + dim: Optional[DimsType] = None, + *, + unbiased: Optional[bool] = None, + keepdim: bool = False, + correction: Optional[NumberType] = None, +): + dim, unbiased = _dim_var_dispatch(dim, unbiased) + correction = utils.set_correction(unbiased, correction) + opmath_dtype, dtype = utils.reduction_dtypes( + a, REDUCTION_OUTPUT_TYPE_KIND.COMPLEX_TO_FLOAT + ) + original_dtype = a.dtype + a = _maybe_convert_to_dtype(a, opmath_dtype) + a_var, a_mean = torch.var_mean(a, dim, correction=correction, keepdim=keepdim) + a_std = torch.sqrt(a_var) + assert dtype is not None + return ( + _maybe_convert_to_dtype(a_std, dtype), + _maybe_convert_to_dtype(a_mean, original_dtype), + ) + + +@register_decomposition(aten.var_mean) +@out_wrapper("out0", "out1") +def var_mean( + a: TensorLikeType, + dim: Optional[DimsType] = None, + unbiased: Optional[bool] = None, + keepdim: bool = False, + *, + correction: Optional[NumberType] = None, +): + dim, unbiased = _dim_var_dispatch(dim, unbiased) + v = var(a, dim, unbiased, keepdim, correction=correction) + m = mean(a, dim, keepdim) + return v, m + + +@register_decomposition(aten.addr) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self", "vec1", "vec2"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def addr( + self: TensorLikeType, + vec1: TensorLikeType, + vec2: TensorLikeType, + *, + beta: NumberType = 1, + alpha: NumberType = 1, +) -> TensorLikeType: + torch._check( + vec1.ndim == 1, + lambda: f"addr: Expected 1-D argument vec1, but got {vec1.ndim}-D", + ) + torch._check( + vec2.ndim == 1, + lambda: f"addr: Expected 1-D argument vec2, but got {vec2.ndim}-D", + ) + self = self.expand(vec1.shape[0], vec2.shape[0]) + if utils.is_boolean_dtype(self.dtype): + # Integers are accepted for booleans + torch._check( + is_weakly_lesser_type(type(beta), int), + lambda: f"expected bool/int beta but got {type(beta)}", + ) + torch._check( + is_weakly_lesser_type(type(alpha), int), + lambda: f"expected bool/int alpha but got {type(beta)}", + ) + if not beta: + return torch.outer(vec1, vec2) if alpha else torch.full_like(self, False) + else: + return torch.logical_or( + self, + torch.outer(vec1, vec2) if alpha else torch.full_like(self, False), + ) + else: + torch._check( + is_weakly_lesser_type(type(beta), dtype_to_type(self.dtype)), + lambda: f"cannot safely convert {type(beta)} to {self.dtype}", + ) + torch._check( + is_weakly_lesser_type(type(alpha), dtype_to_type(self.dtype)), + lambda: f"cannot safely convert {type(alpha)} to {self.dtype}", + ) + if beta == 0: + # This means NaNs from self are dropped if beta is zero + return alpha * torch.outer(vec1, vec2) + else: + return beta * self + alpha * torch.outer(vec1, vec2) + + +# CompositeImplicitAutograd - don't register decomp +def atleast_1d( + arg: Union[TensorLikeType, Sequence[TensorLikeType]], *args: TensorLikeType +) -> Union[TensorLikeType, Tuple[TensorLikeType, ...]]: + """Reference implementation of :func:`torch.atleast_1d`.""" + if not args and isinstance(arg, collections.abc.Sequence): + args_ = arg + else: + assert not isinstance(arg, collections.abc.Sequence) + args_ = (arg,) + args + res = tuple(a if a.ndim >= 1 else unsqueeze(a, 0) for a in args_) + return res if len(res) > 1 else res[0] + + +# Helper function with assert to avoid MyPy error +# of incompatible type passed to unsqueeze +def _unsqueeze_atleast( + at_least_fn: Callable, dim: int, arg: TensorLikeType +) -> TensorLikeType: + arg_ = at_least_fn(arg) + assert isinstance(arg_, TensorLike) + return unsqueeze(arg_, dim) + + +# CompositeImplicitAutograd - don't register decomp +def atleast_2d( + arg: Union[TensorLikeType, Sequence[TensorLikeType]], *args: TensorLikeType +) -> Union[TensorLikeType, Tuple[TensorLikeType, ...]]: + """Reference implementation of :func:`torch.atleast_2d`.""" + if not args and isinstance(arg, collections.abc.Sequence): + args_ = arg + else: + assert not isinstance(arg, collections.abc.Sequence) + args_ = (arg,) + args + unsqueeze_atleast_1d = partial(_unsqueeze_atleast, atleast_1d, 0) + res = tuple(a if a.ndim >= 2 else unsqueeze_atleast_1d(a) for a in args_) + return res if len(res) > 1 else res[0] + + +# CompositeImplicitAutograd - don't register decomp +def atleast_3d( + arg: Union[TensorLikeType, Sequence[TensorLikeType]], *args: TensorLikeType +) -> Union[TensorLikeType, Tuple[TensorLikeType, ...]]: + """Reference implementation of :func:`torch.atleast_3d`.""" + if not args and isinstance(arg, collections.abc.Sequence): + args_ = arg + else: + assert not isinstance(arg, collections.abc.Sequence) + args_ = (arg,) + args + unsqueeze_atleast_2d = partial(_unsqueeze_atleast, atleast_2d, -1) + res = tuple(a if a.ndim >= 3 else unsqueeze_atleast_2d(a) for a in args_) + return res if len(res) > 1 else res[0] + + +def as_strided( + a: TensorLikeType, + size: ShapeType, + stride: StrideType, + storage_offset: Optional[int] = None, +) -> TensorLikeType: + storage_offset_int = ( + storage_offset if storage_offset is not None else a.storage_offset() + ) + return prims.as_strided(a, size, stride, storage_offset_int) + + +@register_decomposition(aten.as_strided_scatter) +@out_wrapper() +def as_strided_scatter( + input: TensorLikeType, + src: TensorLikeType, + size: ShapeType, + stride: StrideType, + storage_offset: Optional[int] = None, +) -> TensorLikeType: + storage_offset_int = 0 if storage_offset is None else storage_offset + return prims.as_strided_scatter(input, src, size, stride, storage_offset_int) + + +def broadcast_shapes(*shapes) -> ShapeType: + return torch.Size(_broadcast_shapes(*shapes)) + + +@aten.broadcast_tensors.default.py_impl(DispatchKey.CompositeImplicitAutograd) +@aten.broadcast_tensors.default.py_impl(DispatchKey.Meta) +def broadcast_tensors(*tensors) -> List[TensorLikeType]: + if len(tensors) == 1 and not isinstance(tensors[0], Tensor): + tensors = tensors[0] + return list(_maybe_broadcast(*tensors, preserve_cpu_scalar_tensors=False)) + + +# CompositeImplicitAutograd - don't register decomp +def broadcast_to(a: TensorLikeType, size: ShapeType) -> TensorLikeType: + start = len(size) - len(a.shape) + dims = tuple(range(start, len(a.shape) + start)) + return prims.broadcast_in_dim(a, size, dims) + + +@register_decomposition(aten.cat) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("tensors",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.NO_OPMATH, +) +def cat(tensors: TensorSequenceType, dim: int = 0) -> TensorLikeType: + def cat_compute_output_memory_format(inputs): + format = None + for t in inputs: + f = utils.suggest_memory_format(t) + if f == torch.contiguous_format: + return f + if format is not None and format != f: + return torch.contiguous_format + format = f + assert format is not None + return format + + if len(tensors) == 0: + msg = "cat expects at least one tensor, but received zero!" + raise ValueError(msg) + + for tensor in tensors: + assert isinstance(tensor, TensorLike) + + utils.check_same_device(*tensors, allow_cpu_scalar_tensors=False) + + from torch.fx.experimental.symbolic_shapes import guard_size_oblivious + + # This is a bit tricky. Naively, you would expect to just pick one + # arbitrary tensor and check that all tensors match this tensor. However, + # there is legacy behavior which says that if you have a 1-D empty tensor + # (0,), this is permissible. So you can't assume that all the tensors + # have same dimensionality, and you can't assume that the first tensor is + # the correct stencil. + # + # We'll implement this in a few passes. First, we will try to infer the + # ndim of the cat output. If this ndim != 1, then we know that all ndim = + # 1 inputs must be empty, or are errors. If this ndim == 1, then life + # is easy (the legacy special case coincides with regular handling). + # + # NB: The regular implementation of cat just filters out empty inputs, + # but we do it slightly different here for better handling for unbacked + # SymInts + + example = None + for i, t in enumerate(tensors): + if example is None: + if t.ndim != 1: + example = t + else: + if t.ndim != 1: + torch._check( + t.ndim == example.ndim, + lambda: "Number of dimensions of tensors must match. " + f"Expected {example.ndim}-D tensors, but got {t.ndim}-D for " + f"tensor number {i} in the list", + ) + + if example is None: + # example is None if everything is 1-D. If so, just arbitrarily pick + # the first one + example = tensors[0] + + shape = example.shape + filtered = [] + for tensor_idx, tensor in enumerate(tensors): + if len(shape) != len(tensor.shape): + assert tensor.ndim == 1 # we've already checked this above + # Don't suggest the legacy behavior in the error message + torch._check( + tensor.shape[0] == 0, + lambda: f"Number of dimensions of tensors must match. " + f"Expected {example.ndim}-D tensors, but got 1-D for " + f"tensor number {tensor_idx} in the list", + ) + else: + # Remove inputs that are 1-D, zero size + if tensor.ndim == 1 and guard_size_oblivious(tensor.shape[0] == 0): + continue + # Don't bother checking size match, prims.cat will handle it + filtered.append(tensor) + + memory_format = cat_compute_output_memory_format(tensors) + + if len(filtered) == 0: + t = tensors[0] + + # TODO: fix this to work with meta tensors + try: + requires_grad = any(x.requires_grad for x in tensors) + except Exception: + requires_grad = False + + return empty( + (0,), + dtype=t.dtype, + device=t.device, + requires_grad=requires_grad, + memory_format=memory_format, + ) + + dim = utils.canonicalize_dim(filtered[0].ndim, dim) + utils.validate_idx(filtered[0].ndim, dim) + + return prims.cat(filtered, dim).clone(memory_format=memory_format) + + +# CompositeImplicitAutograd - don't register decomp +@out_wrapper() +def column_stack(tensors: TensorSequenceType) -> TensorLikeType: + aligned_tensors = tuple( + x if x.ndim > 1 else x.reshape((x.numel(), 1)) for x in tensors + ) + return cat(aligned_tensors, 1) + + +def conj(input: TensorLikeType) -> TensorLikeType: + if not utils.is_complex_dtype(input.dtype): + return input + if input.is_sparse: + return torch.conj_physical(input) + return prims.conj(input) + + +# This replicates at::constant_pad_nd, defined in ATen/native/PadNd.cpp +@register_decomposition(aten.constant_pad_nd) +@out_wrapper() +def constant_pad_nd( + input: TensorLikeType, pad: List[int], value: NumberType = 0 +) -> TensorLikeType: + torch._check( + len(pad) % 2 == 0, + lambda: f"Length of pad must be even but instead it equals {len(pad)}", + ) + + input_sizes = input.shape + l_inp = len(input_sizes) + + l_pad = len(pad) // 2 + l_diff = l_inp - l_pad + + torch._check( + l_inp >= l_pad, + lambda: "Length of pad should be no more than twice the number of " + f"dimensions of the input. Pad length is {len(pad)} while the input has " + f"{l_inp} dimensions.", + ) + + c_input = input + for i in range(l_diff, l_inp): + pad_idx = 2 * (l_inp - i - 1) + if pad[pad_idx] < 0: + c_input = c_input.narrow(i, -pad[pad_idx], c_input.shape[i] + pad[pad_idx]) + + if pad[pad_idx + 1] < 0: + c_input = c_input.narrow(i, 0, c_input.shape[i] + pad[pad_idx + 1]) + + # if none of the pads are positive we can just return the result + if builtins.all(p <= 0 for p in pad): + return c_input.clone() + + new_shape = list(input_sizes[:l_diff]) + + for i in range(l_pad): + pad_idx = len(pad) - ((i + 1) * 2) + new_dim = input_sizes[l_diff + i] + pad[pad_idx] + pad[pad_idx + 1] + torch._check( + new_dim > 0, + lambda: f"The input size {input_sizes[l_diff + i]}, plus negative padding " + f"{pad[pad_idx]} and {pad[pad_idx + 1]} resulted in a negative output size, " + f"which is invalid. Check dimension {l_diff + i} of your input.", + ) + new_shape.append(new_dim) + + memory_format = utils.suggest_memory_format(input) + output = torch.empty( + new_shape, + dtype=input.dtype, + device=input.device, + requires_grad=input.requires_grad, + memory_format=memory_format, + ) + + if value == 0 and input.dtype == torch.bool: + value = False + # torch.fill isn't typed to allow complex values + output = torch.fill(output, value) # type: ignore[arg-type] + + c_output = output + for i in range(l_diff, l_inp): + pad_idx = 2 * (l_inp - i - 1) + if pad[pad_idx] > 0: + c_output = c_output.narrow( + i, pad[pad_idx], c_output.shape[i] - pad[pad_idx] + ) + if pad[pad_idx + 1] > 0: + c_output = c_output.narrow(i, 0, c_output.shape[i] - pad[pad_idx + 1]) + + prims.copy_to(c_output, c_input) + return output + + +def contiguous( + a: Tensor, *, memory_format: torch.memory_format = torch.contiguous_format +) -> Tensor: + torch._check( + memory_format != torch.preserve_format, + lambda: "preserve memory format is unsupported by the contiguous operator", + ) + + if utils.is_contiguous_for_memory_format(a, memory_format=memory_format): + return a + + return torch.clone(a, memory_format=memory_format) + + +@out_wrapper() +def dstack(tensors: TensorSequenceType) -> TensorLikeType: + torch._check(len(tensors) > 0, lambda: "dstack expects a non-empty TensorList") + aligned_tensors = atleast_3d(*tensors) + return cat(aligned_tensors, 2) + + +@register_decomposition(aten.expand) +def expand(a: Tensor, *shape) -> Tensor: + from torch.fx.experimental.symbolic_shapes import guard_size_oblivious + + # NOTE: cannot use utils.extract_shape_from_varargs here + # because that also validates the shape, but the shape + # given to expand may be "invalid" + if len(shape) == 1 and isinstance(shape[0], Sequence): + shape = tuple(shape[0]) + + torch._check( + len(shape) >= len(a.shape), + lambda: "expand: the requested shape has too few dimensions!", + ) + + offset = len(shape) - len(a.shape) + shape_ = list(shape) + for idx, x in enumerate(a.shape): + offset_idx = idx + offset + requested_length = shape[offset_idx] + torch._check( + guard_size_oblivious(requested_length == x) + or guard_size_oblivious(x == 1) + or requested_length == -1, + lambda: f"expand: attempting to expand a dimension of length {x}!", + ) + + shape_[offset_idx] = requested_length if requested_length != -1 else x + + # At this point shape must be valid + utils.validate_shape(shape_) + + return prims.broadcast_in_dim( + a, shape_, tuple(range(offset, len(a.shape) + offset)) + ) + + +# CompositeImplicitAutograd - don't register decomp +def expand_as(a: Tensor, b: Tensor) -> Tensor: + return a.expand(b.shape) + + +def chunk(a: TensorLikeType, chunks: int, dim: int = 0) -> Tuple[TensorLikeType, ...]: + if chunks <= 0: + msg = f"Expected at least one chunk, but got {chunks}!" + raise ValueError(msg) + + dim = utils.canonicalize_dim(a.ndim, dim) + length = a.shape[dim] + chunk_size = math.ceil(length / chunks) + full_chunks = math.floor(length / chunk_size) + tail_chunk_size = length % chunk_size + + result = [] + for i in range(full_chunks): + result.append(narrow(a, dim, i * chunk_size, chunk_size)) + + if tail_chunk_size != 0: + result.append(narrow(a, dim, full_chunks * chunk_size, tail_chunk_size)) + + return tuple(result) + + +# Note: flatten, unlike other shape operators, returns the input tensor on a no-op (unless +# a 0D tensor is flattened, in which case it's returned in 1D) +# CompositeImplicitAutograd - don't register decomp +def flatten(a: TensorLikeType, start_dim: int = 0, end_dim: int = -1) -> TensorLikeType: + start_dim = utils.canonicalize_dim(a.ndim, start_dim) + end_dim = utils.canonicalize_dim(a.ndim, end_dim) + + # Short-circuits on no-op + if start_dim == end_dim and a.ndim != 0: + return a + + # Tries to take a view + # TODO: we could look at directing collapse_view to skip its meta function here (unsafe_collapse_view) + new_shape, new_strides = prims._collapse_view_helper(a, start_dim, end_dim) + if new_shape is not None: + return prims.collapse_view(a, start_dim, end_dim) + + # Makes a copy if it can't make a view + return prims.collapse(a, start_dim, end_dim) + + +@register_decomposition(aten.flip) +@out_wrapper() +def flip(a: TensorLikeType, dims: DimsSequenceType) -> TensorLikeType: + if not isinstance(dims, tuple) and not isinstance(dims, list): + raise ValueError("dims has to be a sequence of ints") + dims = utils.canonicalize_dims(a.ndim, dims) # type: ignore[assignment] + utils.validate_no_repeating_dims(dims) + return prims.rev(a, dims) + + +# CompositeImplicitAutograd - don't register decomp +def fliplr(a: TensorLikeType) -> TensorLikeType: + if a.ndim < 2: + raise RuntimeError("Input must be >= 2-d.") + + return flip(a, (1,)) + + +# CompositeImplicitAutograd - don't register decomp +def flipud(a: TensorLikeType) -> TensorLikeType: + if a.ndim < 1: + raise RuntimeError("Input must be >= 1-d.") + + return flip(a, (0,)) + + +# CompositeImplicitAutograd - don't register decomp +def narrow( + a: TensorLikeType, dim: int, start: Union[int, TensorLikeType], length: int +) -> TensorLikeType: + # Supports Tensor overload that was added for XLA: + # https://github.com/pytorch/pytorch/issues/31558 + if isinstance(start, TensorLike): + torch._check( + start.dim() == 0 and utils.is_integer_dtype(start.dtype), + lambda: "start must be an 0-dim integral Tensor.", + ) + start = start.item() # type: ignore[assignment] + torch._check(a.dim() > 0, lambda: "narrow() cannot be applied to a 0-dim tensor.") + torch._check(length >= 0, lambda: "narrow(): length must be non-negative.") + dim = utils.canonicalize_dim(a.ndim, dim) + dim_length = a.size(dim) + torch._check_with( + IndexError, + -dim_length <= start and start <= dim_length, # type: ignore[arg-type] + lambda: f"start out of range (expected to be in range of [{-dim_length}, {dim_length}], but got {start})", + ) + if start < 0: + start = start + dim_length + torch._check( + start <= dim_length - length, # type: ignore[arg-type] + lambda: f"start ({start}) + length ({length}) exceeds dimension size ({dim_length}).", + ) + return prims.slice_in_dim(a, start, start + length, axis=dim) + + +# TODO: This must return a sparse tensor if the input is sparse, but refs have +# no sparse support. See narrow_copy_sparse in core. +narrow_copy = _make_copy_from_view(narrow) + + +def _normalize( + a: Tensor, norm_dims: DimsType, eps: float +) -> Tuple[Tensor, Tensor, Tensor]: + """Computes mean and 1/std of a tensor along norm_dims. + + Used as a helper function for normalization layers. + + Args: + a (Tensor): input tensor + norm_dims (DimsType): dimensions to normalize over + eps (float): epsilon for numerical stability + + Returns: + out (Tensor): normalized tensor. + mean (Tensor): mean of the tensor along norm_dims. + rstd (Tensor): 1/std of the tensor along norm_dims. + """ + norm_dims = utils.canonicalize_dims(a.ndim, norm_dims) + computation_dtype = utils.get_computation_dtype(a.dtype) + a_acc = _maybe_convert_to_dtype(a, computation_dtype) + assert isinstance(a_acc, TensorLike) # to avoid mypy error for var_mean + biased_var, mean = torch.var_mean( + a_acc, dim=norm_dims, unbiased=False, keepdim=True + ) + rstd = torch.rsqrt(biased_var + eps) + out = (a - mean) * rstd + return out, mean, rstd + + +# add all specified dimensions +def _unsqueeze_multiple(x: TensorLikeType, dimensions: List[int]) -> TensorLikeType: + for dim in sorted(dimensions): + x = torch.unsqueeze(x, dim) + return x + + +@register_decomposition(aten.native_group_norm.default) +def native_group_norm( + input: Tensor, + weight: Optional[Tensor], + bias: Optional[Tensor], + batch_size: int, + num_channels: int, + flattened_inner_size: int, + num_groups: int, + eps: float, +) -> Tuple[Tensor, Tensor, Tensor]: + torch._check( + input.ndim >= 2, + lambda: f"Expected at least 2 dimensions for input tensor but received {input.ndim}", + ) + torch._check( + num_channels % num_groups == 0, + lambda: "Expected number of channels in input to be divisible by num_groups, " + + f"but got input of shape {input.shape} and num_groups = {num_groups}", + ) + + # num_channels / num_groups and flattened inner dimension are the reduction axes + reduction_dims = [2, 3] + input_reshaped = torch.reshape( + input, + [batch_size, num_groups, num_channels // num_groups, flattened_inner_size], + ) + out, mean, rstd = _normalize(input_reshaped, reduction_dims, eps) + out = out.view(input.shape) + + broadcast_dims = [0] + list(range(2, input.ndim)) + unsqueeze_bias = None + if bias is not None: + unsqueeze_bias = _unsqueeze_multiple(bias, broadcast_dims) + unsqueeze_weight = None + if weight is not None: + unsqueeze_weight = _unsqueeze_multiple(weight, broadcast_dims) + + if unsqueeze_weight is not None: + out = out * unsqueeze_weight + if unsqueeze_bias is not None: + out = out + unsqueeze_bias + + out = _maybe_convert_to_dtype(out, input.dtype) # type: ignore[assignment] + mean = _maybe_convert_to_dtype(mean, input.dtype) # type: ignore[assignment] + rstd = _maybe_convert_to_dtype(rstd, input.dtype) # type: ignore[assignment] + + # remove broadcast dimensions from mean and rstd + mean = torch.squeeze(mean, reduction_dims) + rstd = torch.squeeze(rstd, reduction_dims) + return (out, mean, rstd) + + +@register_decomposition(aten.native_layer_norm) +@out_wrapper("out0", "out1", "out2") +def native_layer_norm( + input: Tensor, + normalized_shape: ShapeType, + weight: Optional[Tensor], + bias: Optional[Tensor], + eps: float, +) -> Tuple[Tensor, Tensor, Tensor]: + normalized_ndim = len(normalized_shape) + torch._check( + normalized_ndim >= 1, + lambda: "Expected normalized_shape to be at least 1-dimensional, i.e., " + + "containing at least one element, but got normalized_shape = " + + str(normalized_shape), + ) + # torch.Size([1, 2, 3]) == [1, 2, 3] evaluates to False + # while torch.Size([1, 2, 3]) == (1, 2, 3) is True + # therefore we use tuple(normalized_shape) + torch._check( + weight is None or weight.shape == tuple(normalized_shape), + lambda: "Expected weight to be of same shape as normalized_shape, but got " + + "weight of shape " + + str(weight.shape) # type: ignore[union-attr] + + " and normalized_shape = " + + str(normalized_shape), + ) + torch._check( + bias is None or bias.shape == tuple(normalized_shape), + lambda: "Expected bias to be of same shape as normalized_shape, but got " + + "bias of shape " + + str(bias.shape) # type: ignore[union-attr] + + " and normalized_shape = " + + str(normalized_shape), + ) + torch._check( + input.ndim >= normalized_ndim + and input.shape[(input.ndim - normalized_ndim) :] == tuple(normalized_shape), + lambda: "Given normalized_shape=" + + str(normalized_shape) + + ", expected input with shape " + + str(normalized_shape) + + ", but got input of size " + + str(input.shape), + ) + + input = input.contiguous() + if weight is not None: + weight = weight.contiguous() + if bias is not None: + bias = bias.contiguous() + + axis = input.ndim - normalized_ndim + reduction_dims = list(range(axis, input.ndim)) + out, mean, rstd = _normalize(input, reduction_dims, eps) + + if weight is None and bias is not None: + out = out + bias + elif weight is not None and bias is None: + out = out * weight + elif weight is not None and bias is not None: + out = out * weight + bias + + out = _maybe_convert_to_dtype(out, input.dtype) # type: ignore[assignment] + if input.device.type == "cpu": + mean = _maybe_convert_to_dtype(mean, input.dtype) # type: ignore[assignment] + rstd = _maybe_convert_to_dtype(rstd, input.dtype) # type: ignore[assignment] + return (out, mean, rstd) + + +# TODO: Adding this as a meta function causes functorch tests to fail when compiled with debug mode. +# test/test_eager_transforms.py::TestFunctionalizeCPU::test_functionalize_fx_transpose_simple_cpu +@register_decomposition(aten.permute) +def permute(a: TensorLikeType, *dims) -> TensorLikeType: + _permutation = utils.canonicalize_dims( + a.ndim, utils.extract_dims_from_varargs(dims) + ) + return prims.transpose(a, _permutation) + + +@register_decomposition(aten.renorm) +@out_wrapper() +def renorm( + input: TensorLikeType, p: RealNumberType, dim: int, maxnorm: RealNumberType +) -> TensorLikeType: + torch._check(not isinstance(p, complex), lambda: "renorm: p must be real-valued") + torch._check(p > 0, lambda: "renorm: non-positive norm not supported") + torch._check( + not isinstance(maxnorm, complex), lambda: "renorm: maxnorm must be real-valued" + ) + torch._check( + maxnorm >= 0, lambda: f"renorm: expected maxnorm to be >= 0 but got {maxnorm}" + ) + ndim = input.ndim + torch._check( + ndim > 1, + lambda: f"renorm: input needs at least 2 dimensions, got {ndim} dimensions", + ) + + dim = utils.canonicalize_dim(ndim, dim) + reduce_dims = list(range(ndim)) + del reduce_dims[dim] + + # For half and bfloat16, calculate norm in float precision then cast + # normalization factor to half + acc_type = utils.get_computation_dtype(input.dtype) + if acc_type != input.dtype: + norm = torch.linalg.vector_norm( + input, p, reduce_dims, keepdim=True, dtype=acc_type + ) + else: + norm = torch.linalg.vector_norm(input, p, reduce_dims, keepdim=True) + + eps = 1e-7 + norm_factor = torch.where(norm > maxnorm, maxnorm / (norm + eps), 1.0) + if acc_type != input.dtype: + norm_factor = prims.convert_element_type(norm_factor, input.dtype) + return (input * norm_factor).contiguous() + + +# CompositeImplicitAutograd - don't register decomp +@aten.stft.center.py_impl(DispatchKey.CompositeImplicitAutograd) +def stft( + input: Tensor, + n_fft: int, + hop_length: Optional[int] = None, + win_length: Optional[int] = None, + window: Optional[Tensor] = None, + center: bool = True, + pad_mode: str = "reflect", + normalized: bool = False, + onesided: Optional[bool] = None, + return_complex: Optional[bool] = None, +) -> Tensor: + torch._check( + window is None or window.device == input.device, + lambda: ( + f"stft input and window must be on the same device but got self on {input.device}" + + f" and window on {window.device}" # type: ignore[union-attr] + ), + ) + + hop_length_ = hop_length if hop_length is not None else n_fft // 4 + win_length_ = win_length if win_length is not None else n_fft + + if return_complex is None: + return_complex_ = input.is_complex() or ( + window is not None and utils.is_complex_dtype(window.dtype) + ) + torch._check( + return_complex_, + ( + "stft requires the return_complex parameter be given for real inputs, " + + "and will further require that return_complex=True in a future PyTorch release." + ), + ) + else: + return_complex_ = return_complex + + torch._check( + utils.is_float_dtype(input.dtype) or utils.is_complex_dtype(input.dtype), + lambda: "stft expected a tensor of floating point or complex values", + ) + torch._check(1 <= input.ndim <= 2, lambda: "stft expected a 1D or 2D tensor") + + original_ndim = input.ndim + if original_ndim == 1: + input = input.unsqueeze(0) + + if center: + extra_dims = 3 - input.ndim + pad_amount = n_fft // 2 + extended_shape = [*itertools.repeat(1, extra_dims), *input.shape] + input = aten.pad(input.view(extended_shape), [pad_amount, pad_amount], pad_mode) + input = input.view(input.size()[extra_dims:]) + + batch = input.size(0) + length = input.size(1) + torch._check( + 0 < n_fft <= length, + lambda: f"stft expected 0 < n_fft <= {length}, but got n_fft={n_fft}", + ) + torch._check( + hop_length_ > 0, + lambda: f"stft expected hop_length > 0 but got hop_length={hop_length_}", + ) + torch._check( + 0 < win_length_ <= n_fft, + lambda: f"stft expected 0 < win_length <= n_fft but got win_length={win_length_}", + ) + torch._check( + window is None or window.shape == (win_length_,), + lambda: ( + f"expected a 1D window tensor of size equal to win_length={win_length_}, " + + f"but got window with size {window.shape}" # type: ignore[union-attr] + ), + ) + + if win_length_ < n_fft: + if window is None: + window = torch.ones(win_length_, dtype=input.dtype, device=input.device) + left = (n_fft - win_length_) // 2 + window = aten.constant_pad_nd(window, [left, n_fft - win_length_ - left]) + + input = input.unfold(dimension=-1, size=n_fft, step=hop_length_) + if window is not None: + input = input * window + + complex_fft = utils.is_complex_dtype(input.dtype) + onesided = onesided if onesided is not None else not complex_fft + norm = "ortho" if normalized else None + if onesided: + torch._check( + not complex_fft, + lambda: "Cannot have onesided output if window or input is complex", + ) + out = torch.fft.rfft(input, dim=-1, norm=norm) + else: + out = torch.fft.fft(input, dim=-1, norm=norm) + + out.transpose_(1, 2) + + if original_ndim == 1: + out = out.squeeze_(0) + + return out if return_complex_ else torch.view_as_real(out) + + +# CompositeImplicitAutograd - don't register decomp +@aten.istft.default.py_impl(DispatchKey.CompositeImplicitAutograd) +def istft( + input: Tensor, + n_fft: int, + hop_length: Optional[int] = None, + win_length: Optional[int] = None, + window: Optional[Tensor] = None, + center: bool = True, + normalized: bool = False, + onesided: Optional[bool] = None, + length: Optional[int] = None, + return_complex=False, +) -> Tensor: + torch._check( + window is None or window.device == input.device, + lambda: ( + f"istft input and window must be on the same device but got self on {input.device}" + + f" and window on {window.device}" # type: ignore[union-attr] + ), + ) + + hop_length_ = hop_length if hop_length is not None else n_fft // 4 + win_length_ = win_length if win_length is not None else n_fft + + torch._check( + utils.is_complex_dtype(input.dtype), + lambda: ( + "istft input and window must be on the same device but got self on " + + f"{input.device} and window on {window.device}" # type: ignore[union-attr] + ), + ) + n_frames = input.size(-1) + fft_size = input.size(-2) + + expected_output_signal_len = n_fft + hop_length_ * (n_frames - 1) + torch._check(input.numel() > 0, lambda: "istft input tensor cannot be empty") + torch._check( + 2 <= input.ndim <= 3, + lambda: f"istft expected a tensor with 2 or 3 dimensions, but got {input.ndim}", + ) + onesided_ = onesided if onesided is not None else fft_size != n_fft + + if onesided_: + torch._check( + n_fft // 2 + 1 == fft_size, + lambda: ( + "istft expected the frequency dimension (3rd to the last) of the input tensor " + + "to match n_fft / 2 + 1 when onesided=True, but got {fft_size}" + ), + ) + else: + torch._check( + n_fft == fft_size, + lambda: ( + "istft expected the frequency dimension (3rd to the last) of the input tensor " + + "to match n_fft when onesided=False, but got {fft_size}", + ), + ) + + torch._check( + 0 < hop_length_ <= win_length_, + lambda: "istft expected 0 < hop_length <= win_length", + ) + torch._check( + 0 < win_length_ <= n_fft, lambda: "istft expected 0 < win_length <= n_fft" + ) + torch._check( + window is None or window.shape == (win_length_,), + lambda: "Invalid window shape. window has to be 1D and length of `win_length`", + ) + + if window is None: + real_dtype = utils.corresponding_real_dtype(input.dtype) + window_ = torch.ones(win_length_, dtype=real_dtype, device=input.device) + else: + window_ = window + + if win_length_ != n_fft: + left = (n_fft - win_length_) // 2 + window_ = aten.constant_pad_nd(window_, (left, n_fft - win_length_ - left), 0) + + original_ndim = input.ndim + if input.ndim == 2: + input = input.unsqueeze(0) + + input = input.transpose(1, 2) + norm = "ortho" if normalized else None + if return_complex: + torch._check( + not onesided_, + lambda: "cannot have onesided output if window or input is complex", + ) + input = torch.fft.ifft(input, dim=-1, norm=norm) + else: + torch._check( + window is None or not utils.is_complex_dtype(window.dtype), + lambda: "Complex windows are incompatible with return_complex=False", + ) + if not onesided_: + input = input.narrow(dim=-1, start=0, length=n_fft // 2 + 1) + input = torch.fft.irfft(input, dim=-1, norm=norm) + + assert input.size(2) == n_fft + + y_tmp = input * window_.view([1, 1, n_fft]) + y = aten.unfold_backward( + y_tmp, + input_sizes=(y_tmp.size(0), expected_output_signal_len), + dim=1, + size=n_fft, + step=hop_length_, + ) + window_envelop = aten.unfold_backward( + window_.pow(2).expand((1, n_frames, n_fft)), + input_sizes=(y_tmp.size(0), expected_output_signal_len), + dim=1, + size=n_fft, + step=hop_length_, + ) + + assert expected_output_signal_len == y.size(1) + assert expected_output_signal_len == window_envelop.size(1) + + start = n_fft // 2 if center else 0 + if length is not None: + end = start + length + elif center: + end = expected_output_signal_len - n_fft // 2 + else: + end = expected_output_signal_len + + length = max(0, end - start) + y = y.narrow(dim=1, start=start, length=length) + window_envelop = window_envelop.narrow(dim=1, start=start, length=length) + + window_envelop_lowest = window_envelop.abs().min().lt(1e-11) + torch._check( + not window_envelop_lowest.item(), + lambda: "window overlap add min less than 1e-11", + ) + + y = y / window_envelop + if original_ndim == 2: + y = y.squeeze(0) + + if end > expected_output_signal_len: + warnings.warn( + "The length of signal is shorter than the length parameter. Result is being " + + "padded with zeros in the tail. Please check your center and hop_length settings" + ) + y = aten.constant_pad_nd(y, (0, end - expected_output_signal_len), 0) + return y + + +# Get the new shape and stride after applying unfold to an input tensor +def _get_unfold_shape_stride( + a_shape: ShapeType, a_stride: StrideType, dimension: int, size: int, step: int +): + a_ndim = len(a_shape) + dim = utils.canonicalize_dim(a_ndim, dimension, wrap_scalar=True) + max_size = 1 if a_ndim == 0 else a_shape[dim] + last_stride = 1 if a_ndim == 0 else a_stride[dim] + + torch._check( + size <= max_size, + lambda: f"Maximum size for tensor at dimension {dim} is {max_size} but size is {size}", + ) + + torch._check( + step > 0, + lambda: f"Step is {step} but must be > 0", + ) + + shape = list(a_shape) + strides = list(a_stride) + shape.append(size) + strides.append(last_stride) + if dim < a_ndim: + shape[dim] = (shape[dim] - size) // step + 1 + strides[dim] *= step + return shape, strides + + +@register_decomposition(aten.repeat) +@out_wrapper() +def repeat(a: Tensor, *repeat_shape) -> Tensor: + repeat_shape = utils.extract_shape_from_varargs(repeat_shape, validate=False) + torch._check( + len(repeat_shape) >= len(a.shape), + lambda: "repeat: Number of dimensions of repeat dims can not be smaller than number of dimensions of tensor", + ) + + if len(repeat_shape) == 0: + return torch.clone(a) + + num_new_dimensions = len(repeat_shape) - a.ndim + padded_shape = [1] * num_new_dimensions + for dim_size in a.shape: + padded_shape.append(dim_size) + + target_shape = tuple( + padded_size * repeat_size + for padded_size, repeat_size in zip(padded_shape, repeat_shape) + ) + + # return an empty tensor if one of the repeat_shape dimensions is zero + if 0 in repeat_shape: + return torch.empty( + target_shape, + dtype=a.dtype, + device=a.device, + requires_grad=a.requires_grad, + memory_format=utils.suggest_memory_format(a), + ) + + urtensor_shape = target_shape + urtensor_stride = utils.make_contiguous_strides_for(target_shape) + for dim, dim_size in enumerate(padded_shape): + # repeat each dimension by using unfold_copy operation + urtensor_shape, urtensor_stride = _get_unfold_shape_stride( + urtensor_shape, urtensor_stride, dim, dim_size, max(dim_size, 1) + ) + + # derive permute order by sorting urtensor strides + enumerated_stride = list(enumerate(urtensor_stride)) + enumerated_stride.sort(key=operator.itemgetter(1), reverse=True) + permute_order, sorted_stride = zip(*enumerated_stride) + + # add new and expand dimensions according to urtensor + repeat_xtensor = a.expand(urtensor_shape) + + # clone tensor to concretize expanded dimensions + cloned_result = torch.clone(repeat_xtensor) + + # transpose axis so strides are in sorted order + permuted_result = cloned_result.permute(permute_order) + + # reshape to get contiguous tensor with correct target shape + return permuted_result.reshape(target_shape) + + +def _reshape_view_helper(a: TensorLikeType, *shape, allow_copy: bool) -> TensorLikeType: + from torch.fx.experimental.symbolic_shapes import guard_size_oblivious, sym_eq + + # Creates a valid shape + shape = utils.extract_shape_from_varargs(shape, validate=False) + # Reshape may be given a shape with a -1 length + # This indicates that the dimension's length should be inferred + shape = utils.infer_size(shape, a.numel()) + + # Special-cases tensors with no elements + if guard_size_oblivious(a.numel() == 0): + return as_strided(a, shape, utils.make_contiguous_strides_for(shape)) + + # Special-cases reshaping zero dim tensors + if a.ndim == 0: + _a = a + for length in shape: + assert length == 1 + _a = unsqueeze(_a, -1) + if _a is a: + return prims.view_of(a) + else: + return _a + + # Special-cases reshaping to zero dim tensors + if len(shape) == 0: + _a = a + for length in a.shape: + assert length == 1 + _a = squeeze(_a, -1) + if _a is a: + return prims.view_of(a) + else: + return _a + + if a.is_contiguous(): + # Special-cases for nd_to_1d + if len(shape) == 1 and a.ndim > 1: + return torch.as_strided(a, [a.numel()], [1]) + # Special-cases for 1d_to_2d + if len(shape) == 2 and a.ndim == 1: + dim0 = shape[0] + dim1 = shape[1] + return torch.as_strided(a, [dim0, dim1], [dim1, 1]) + + # Handles general case: a 1+D tensor reshaped into a distinct 1+D shape + + # NOTE [Reshape Algorithm] + # This algorithm works by attempting to greedily construct the desired dimensions in + # the output shape, left to right. It does this by, conceptually, accumulating + # dimensions of the original tensor, also left to right, until the dimension + # can be constructed using prims.split_dim. + # The algorithm also has special handling for tail squeezes/unsqueezes, like + # if a reshape from (5, 5) to (5, 5, 1) or vice versa. + # + # This algorithm does not flatten the original tensor and then split dims as appropriate + # because that would create copies more often than this algorithm. flatten is the only + # operation below which can create a view or a copy, and while it prefers creating + # views it may sometimes create a copy if the tensor's strides do not permit a view. + # As a result, this algorithm tries to minimize flattening. + # + # Note that a better version of this algorithm may exist. Regions which could be + # flattened without creating a copy can be identified in advance, and that might + # allow fewer flatten calls or faster short-circuiting to make a copy. + idx = 0 + a_ = a + for length in shape: + # Handles tail unsqueezes + if idx >= a_.ndim: + assert length == 1 + last_dim = a_.ndim - 1 + # NOTE: using split_dim instead of unsqueeze may seem silly here, + # but it's necessary to get the strides correct + a_ = prims.split_dim(a_, last_dim, a_.shape[last_dim]) + idx = idx + 1 + continue + + # Skips dimensions that are already the correct length + if guard_size_oblivious(length == a_.shape[idx]): + idx = idx + 1 + continue + + # Gathers enough original dimensions such that this new dimension can be created + # Note that this accumulation will terminate because we've verified a and the shape + # specify the same number of elements above + accum = a_.shape[idx] + end = idx + while guard_size_oblivious(accum % length != 0): + end = end + 1 + accum = accum * a_.shape[end] + if end != idx: + # NOTE: in this case multiple dimensions must be flatten to create the desired dimension + # This flattening is why reshape sometimes creates a copy -- because flattening + # may return a view of a copy + + # Checks if collapse can be a view and short-circuits to copying reshape if it can't + new_shape, new_strides = prims._collapse_view_helper(a_, idx, end) + if new_shape is None: + if allow_copy: + return prims.reshape(a, shape) + + msg = f"Cannot view a tensor with shape {a.shape} and strides {a.stride()} as a tensor with shape {shape}!" + raise ValueError(msg) + + a_ = flatten(a_, idx, end) + + # Splits the (possibly flattened) dimension to create the desired dim length + if guard_size_oblivious(accum != length): + a_ = prims.split_dim(a_, idx, length) + + idx = idx + 1 + + # Squeezes tail + while idx < a_.ndim: + torch._check( + a_.shape[idx] == 1, + lambda: f"a.size({idx}) expected to be 1 but got {a_.shape[idx]}", + ) + a_ = squeeze(a_, idx) + + if a_ is a: + return prims.view_of(a) + else: + return a_ + + +# CompositeImplicitAutograd - don't register decomp +# NOTE: shape is a vararg because Tensor.reshape can be called with as +# Tensor.reshape(a, b, c) or Tensor.reshape((a, b, c)) Function call +# torch.reshape doesn't support unpacked shapes +def reshape(a: TensorLikeType, *shape: ShapeType) -> TensorLikeType: + return _reshape_view_helper(a, *shape, allow_copy=True) + + +# CompositeImplicitAutograd - don't register decomp +def reshape_as(self: TensorLikeType, other: TensorLikeType) -> TensorLikeType: + return self.reshape(other.size()) + + +@register_decomposition(aten.roll) +@out_wrapper() +def roll( + a: TensorLikeType, shifts: DimsType, dims: DimsType = tuple() +) -> TensorLikeType: + """Reference implementation of :func:`torch.roll`.""" + dims = utils.canonicalize_dims(a.ndim, dims) + # ATen specifies int[1] type for shifts and dims which expands integers to tuples of length 1 + if not isinstance(shifts, Iterable): + shifts = (shifts,) + if not isinstance(dims, Iterable): + dims = (dims,) + + # Avoid modulo by zero + if a.numel() == 0: + # Keeping this as ref for now as FakeTensor runs into some issues with complex tensors + return a.clone() + + if a.dim() == 0 and len(dims) > 0: + raise IndexError( + f"Dimension specified as {dims[0]} but tensor has no dimensions" + ) + + len_shifts = len(shifts) + len_dims = len(dims) + if len_shifts != 1 or len_dims != 1: + if len_shifts == 0: + raise RuntimeError("`shifts` required") + # Takes care of the case when dims is not specified (default) + # By default, the tensor is flattened before shifting, after which the original shape is restored + if len_dims == 0 and len_shifts == 1: + return torch.roll(torch.flatten(a), shifts, 0).view(a.shape) + if len_shifts != len_dims: + raise RuntimeError( + f"shifts and dimensions must align. shifts: {len_shifts}, dims: {len_dims}" + ) + assert len_dims > 1 + tail_shifts = shifts[1:] + tail_dims = dims[1:] + first_dim_rolled = torch.roll(a, (shifts[0],), dims[0]) + return torch.roll(first_dim_rolled, tail_shifts, tail_dims) + + # This path is taken when only one dimension is rolled + # For example to get `first_dim_rolled` above + dim = dims[0] + size = a.shape[dim] + start = (size - shifts[0]) % size + idx = torch.arange(size, device=a.device) + return a.index_select(dim, torch.fmod(start + idx, size)) + + +@register_decomposition(aten.rot90) +@out_wrapper() +def rot90( + a: TensorLikeType, k: int = 1, dims: DimsSequenceType = (0, 1) +) -> TensorLikeType: + """Reference implementation of :func:`torch.rot90`.""" + if len(dims) != 2: + raise RuntimeError( + f"expected total rotation dims == 2, but got dims = {len(dims)}" + ) + if a.ndim < 2: + raise RuntimeError(f"expected total dims >= 2, but got total dims = {a.ndim}") + + # Do this after the initial checks to be compatible with the behavior in + # core. + dims = utils.canonicalize_dims(a.ndim, dims) + + if dims[0] == dims[1]: + raise RuntimeError( + f"expected rotation dims to be different, but got dim0 = {dims[0]} and dim1 = {dims[1]}" + ) + k = k % 4 # Rotation direction is from the second towards the first axis for k < 0 + if k == 1: + return torch.transpose(torch.flip(a, (dims[1],)), dims[0], dims[1]) + elif k == 2: + return torch.flip(a, dims) + elif k == 3: + return torch.transpose(torch.flip(a, (dims[0],)), dims[0], dims[1]) + else: + return clone(a, memory_format=torch.contiguous_format) + + +def _check_stack_inputs(tensors: TensorSequenceType) -> None: + entry_shape = tensors[0].shape + for i in range(1, len(tensors)): + assert tensors[i].shape == entry_shape, ( + f"stack expects each tensor to be equal size, but got {entry_shape} at entry 0 " + f"and {tensors[i].shape} at entry {i}" + ) + + +@register_decomposition(aten.stack) +@out_wrapper() +def stack(tensors: TensorSequenceType, dim: int = 0) -> TensorLikeType: + assert len(tensors) > 0, "stack expects a non-empty TensorList" + wrapped_dim = utils.canonicalize_dim(tensors[0].ndim + 1, dim) + # Refs need sparse support to check other condition + if wrapped_dim < tensors[0].ndim: # and not tensors[0].is_sparse: + _check_stack_inputs(tensors) + result_sizes = list(tensors[0].shape) + result_sizes.insert(wrapped_dim, len(tensors)) + out = torch.cat(tensors, wrapped_dim) + return out.view(result_sizes) + + # If dim == tensors[0].ndim, view cannot efficiently handle it + return torch.cat([t.unsqueeze(wrapped_dim) for t in tensors], dim) + + +# CompositeImplicitAutograd - don't register decomp +@out_wrapper() +def softmax( + a: TensorLikeType, + dim: int, + dtype: Optional[torch.dtype] = None, +) -> TensorLikeType: + result_dtype = dtype or a.dtype + computation_dtype = utils.get_computation_dtype(result_dtype) + a_ = _maybe_convert_to_dtype(a, computation_dtype) + if a.numel() == 0: + a_exp = exp(a_) + else: + a_max = amax(a_, dim, keepdim=True) + a_exp = exp(a_ - a_max) + return _maybe_convert_to_dtype( + true_divide(a_exp, sum(a_exp, dim, keepdim=True)), result_dtype + ) # type: ignore[return-value] + + +# CompositeImplicitAutograd - don't register decomp +@out_wrapper() +def hstack(tensors: TensorSequenceType) -> TensorLikeType: + torch._check(len(tensors) > 0, lambda: "hstack expects a non-empty TensorList") + aligned_tensors = atleast_1d(*tensors) + if aligned_tensors[0].ndim == 1: + return cat(aligned_tensors, 0) + return cat(aligned_tensors, 1) + + +# CompositeImplicitAutograd - don't register decomp +@out_wrapper() +def vstack(tensors: TensorSequenceType) -> TensorLikeType: + torch._check(len(tensors) > 0, lambda: "vstack expects a non-empty TensorList") + aligned_tensors = atleast_2d(*tensors) + return cat(aligned_tensors, 0) + + +# CompositeImplicitAutograd - don't register decomp +def unflatten(a: TensorLikeType, dim: int, sizes: ShapeType) -> TensorLikeType: + dim = utils.canonicalize_dim(a.ndim, dim) + torch._check(len(sizes) != 0, lambda: "unflatten: sizes must be non-empty") + return a.view(tuple(a.shape[:dim]) + tuple(sizes) + tuple(a.shape[dim + 1 :])) + + +@register_decomposition(aten.unbind) +def unbind(t: TensorLikeType, dim: int = 0) -> TensorSequenceType: + from torch.fx.experimental.symbolic_shapes import guard_size_oblivious + + dim = utils.canonicalize_dim(t.ndim, dim) + torch._check_index( + len(t.shape) > 0, + lambda: "Dimension specified as 0 but tensor has no dimensions", + ) + if guard_size_oblivious(t.shape[dim] == 0): + return tuple() + else: + return tuple( + torch.squeeze(s, dim) for s in torch.tensor_split(t, t.shape[dim], dim) + ) + + +@out_wrapper() +def index_copy(x: TensorLike, dim: int, index: TensorLike, tensor: TensorLike): + return x.clone(memory_format=torch.contiguous_format).index_copy_( + dim, index, tensor + ) + + +def index_copy_(x: TensorLike, dim: int, index: TensorLike, tensor: TensorLike): + dim = utils.canonicalize_dims(x.ndim, dim) + torch._check( + index.ndim <= 1, + lambda: f"Index should have dimension 1 or 0 (got {index.ndim})", + ) + # Treat scalars as elements of \R^1 + y = x.unsqueeze(0) if x.ndim == 0 else x + idx = (slice(None),) * dim + (index,) + y[idx] = tensor + return x + + +@register_decomposition(aten.index_fill) +@out_wrapper() +def index_fill( + x: TensorLike, dim: int, index: TensorLike, value: Union[NumberType, TensorLike] +): + return _index_fill(x, dim, index, value, inplace=False) + + +@register_decomposition(aten.index_fill_) +def index_fill_( + x: TensorLike, dim: int, index: TensorLike, value: Union[NumberType, TensorLike] +): + return _index_fill(x, dim, index, value, inplace=True) + + +def _index_fill( + x: TensorLike, + dim: int, + index: TensorLike, + value: Union[NumberType, TensorLike], + *, + inplace: bool, +): + torch._check( + index.ndim <= 1, + lambda: f"Index should have dimension 1 or 0 (got {index.ndim})", + ) + if isinstance(value, TensorLike): + torch._check( + value.ndim == 0, + lambda: "Only supports 0-dimensional value tensor. " # type: ignore[union-attr] + f"Got a tensor with {value.ndim} dimensions.", + ) # type: ignore[arg-type] + else: + value = torch.scalar_tensor( + value, dtype=x.dtype, layout=x.layout, device=x.device # type: ignore[arg-type] + ) + + # index_copy has some unnecessary preconditions when x is a scalar. We do this to work through them + zero_dim = x.ndim == 0 + y = x.unsqueeze(0) if zero_dim else x + # index_copy does not broadcast on value so we have to do it manually + shape = list(y.shape) + shape[dim] = index.numel() + value = value.expand(shape) + index_copy = Tensor.index_copy_ if inplace else torch.index_copy + out = index_copy(y, dim, index, value) # type: ignore[operator] + if inplace: + return x + else: + if zero_dim: + # The clone is necessary so that it returns a fresh tensor rather than a view + out = out.squeeze(0).clone() + # index_fill preserves the strides. index_copy always returns contiguous tensors + if out.stride() != x.stride(): + new_out = torch.empty_like(x) + new_out.copy_(out) + out = new_out + return out + + +@out_wrapper() +def index_add( + x: TensorLike, + dim: int, + index: TensorLike, + tensor: TensorLike, + *, + alpha: NumberType = 1, +): + # index_add always returns a new contiguous tensor + return x.clone(memory_format=torch.contiguous_format).index_add_( + dim, index, tensor, alpha=alpha # type: ignore[arg-type] + ) + + +@register_decomposition(aten.index_select) +@out_wrapper() +def index_select(x: TensorLike, dim: int, index: TensorLike): + dim = utils.canonicalize_dims(x.ndim, dim) + torch._check( + index.ndim <= 1, + lambda: f"Index should have dimension 1 or 0 (got {index.ndim})", + ) + if index.ndim == 0: + index = index.unsqueeze(0) + if x.ndim == 0: + # Treat scalars as elements of \R^1 + # We cannot use x[idx] here as it accesses item() (??), hence this awkward construction + return torch.empty_like(x).index_copy(0, index, x.expand_as(index)) + + idx = (slice(None),) * dim + (index,) + return x[idx] + + +@register_decomposition(aten.squeeze.dims) +def squeeze(a: TensorLikeType, dim: Optional[DimsType] = None) -> TensorLikeType: + from torch.fx.experimental.symbolic_shapes import guard_size_oblivious + + if dim is None: + dims = tuple(idx for idx, size in enumerate(a.shape) if size == 1) + return prims.squeeze(a, dims) if dims else prims.view_of(a) + + ndim = a.ndim + dim = utils.canonicalize_dims(ndim, dim) + dims = (dim,) if isinstance(dim, Dim) else dim + # Short-circuits if the tensor has no dimensions + if ndim == 0: + assert len(dims) == 0 or dims == (0,) + return prims.view_of(a) + + # Note: squeeze does not modify tensors when the given dim is not a dimension of length 1 + dims = tuple(d for d in dims if guard_size_oblivious(a.shape[d] == 1)) + if len(dims) == 0: + return prims.view_of(a) + if len(dims) == 1: + return prims.squeeze(a, dims) + dims_list = list(dims) + dims_list = sorted(dims_list, reverse=True) + for i in dims_list: + a = squeeze(a, i) + return a + + +# Note: does not work with TensorMetas because of data-dependent control-flow +# CompositeImplicitAutograd - don't register decomp +def tensor_split( + a: TensorLikeType, + indices_or_sections: Union[Tensor, DimsType], + dim: int = 0, +) -> Tuple[TensorLikeType, ...]: + _dim = utils.canonicalize_dim(a.ndim, dim) + if a.ndim == 0: + msg = "tensor_split: received a rank zero tensor, but expected a tensor of rank one or greater!" + raise ValueError(msg) + + # If indices_or_sections is a tensor, it must be a CPU Long tensor + if isinstance(indices_or_sections, TensorLike): + if not indices_or_sections.device.type == "cpu": + msg = ( + f"tensor_split: if indices_or_sections is a tensor it must be on the CPU, " + f"but received one on {indices_or_sections.device}" + ) + raise ValueError(msg) + if indices_or_sections.dtype != torch.long: + msg = "tensor_split: if indices_or_sections is a tensor it must have long dtype, " + f" but received one with dtype {indices_or_sections.dtype}" + raise ValueError(msg) + + # Case 0 -- indices_or_sections is an integer or a scalar tensor n and a is split along dim into n parts of equal-ish length + if isinstance(indices_or_sections, IntLike) or ( + isinstance(indices_or_sections, TensorLike) and indices_or_sections.ndim == 0 + ): + sections: int = ( + indices_or_sections # type: ignore[assignment] + if isinstance(indices_or_sections, Number) + else indices_or_sections.item() + ) + + if sections <= 0: + msg = f"tensor_split: number of sections must be greater than 0, but was {sections}" + raise ValueError(msg) + + splits = [] + dim_size = a.shape[_dim] + min_split_size = math.floor(dim_size / sections) + num_splits_one_extra = dim_size % sections + start_idx = 0 + for split_idx in range(sections): + split_size = ( + min_split_size + 1 + if (split_idx < num_splits_one_extra) + else min_split_size + ) + s = prims.slice_in_dim(a, start_idx, start_idx + split_size, axis=_dim) + splits.append(s) + start_idx = start_idx + split_size + + return tuple(splits) + # Case 1 -- indices_or_sections is a sequence of integers or a 1D tensor describing the splits + else: + indices = indices_or_sections + if isinstance(indices_or_sections, TensorLike): + if indices_or_sections.ndim != 1: + msg = "tensor_split: non-scalar indices_or_sections tensors must have only one dimension, " + f"but received a tensor with {indices_or_sections.ndim} dimensions" + raise ValueError(msg) + + indices = indices_or_sections.tolist() + + splits = [] + start_idx = 0 + for x in indices: + splits.append(prims.slice_in_dim(a, start_idx, x, axis=_dim)) + start_idx = x + splits.append(prims.slice_in_dim(a, start_idx, a.shape[_dim], axis=_dim)) + return tuple(splits) + + +# CompositeImplicitAutograd - don't register decomp +def hsplit( + a: TensorLikeType, indices_or_sections: DimsType +) -> Tuple[TensorLikeType, ...]: + torch._check( + a.ndim >= 1, + lambda: ( + "torch.hsplit requires a tensor with at least 1 dimension, but got a tensor with " + + str(a.ndim) + + " dimensions!" + ), + ) + dim = 0 if a.ndim == 1 else 1 + if isinstance(indices_or_sections, IntLike): + split_size = indices_or_sections + torch._check( + (split_size != 0 and a.shape[dim] % split_size == 0), + lambda: ( + "torch.hsplit attempted to split along dimension " + + str(dim) + + ", but the size of the dimension " + + str(a.shape[dim]) + + " is not divisible by the split_size " + + str(split_size) + + "!" + ), + ) + return tensor_split(a, split_size, dim) + + torch._check_type( + isinstance(indices_or_sections, (list, tuple)), + lambda: ( + "hsplit(): received an invalid combination of arguments. " + "Expected indices_or_sections to be of type int, list of ints or tuple of ints " + f"but got type {type(indices_or_sections)}" + ), + ) + + split_sizes = indices_or_sections + return tensor_split(a, split_sizes, dim) + + +# CompositeImplicitAutograd - don't register decomp +def vsplit( + a: TensorLikeType, indices_or_sections: DimsType +) -> Tuple[TensorLikeType, ...]: + torch._check( + a.ndim >= 2, + lambda: ( + "torch.vsplit requires a tensor with at least 2 dimension, but got a tensor with " + + str(a.ndim) + + " dimensions!" + ), + ) + if isinstance(indices_or_sections, IntLike): + split_size = indices_or_sections + torch._check( + (split_size != 0 and a.shape[0] % split_size == 0), + lambda: ( + f"torch.vsplit attempted to split along dimension 0" + f", but the size of the dimension " + f"{a.shape[0]}" + f" is not divisible by the split_size " + f"{split_size}" + f"!" + ), + ) + return tensor_split(a, split_size, 0) + + torch._check_type( + isinstance(indices_or_sections, (list, tuple)), + lambda: ( + "vsplit(): received an invalid combination of arguments. " + "Expected indices_or_sections to be of type int, list of ints or tuple of ints " + f"but got type {type(indices_or_sections)}" + ), + ) + + split_sizes = indices_or_sections + return tensor_split(a, split_sizes, 0) + + +@register_decomposition(aten.diag.out) +@out_wrapper() +def diag( + self: TensorLikeType, + offset: int = 0, +) -> TensorLikeType: + ndim = self.dim() + torch._check( + ndim in (1, 2), lambda: f"diag(): Supports 1D or 2D tensors. Got {ndim}D" + ) + if ndim == 1: + return torch.diag_embed(self, offset) + else: + return torch.diagonal_copy(self, offset) + + +@register_decomposition(aten.diagonal_scatter) +@out_wrapper() +def diagonal_scatter( + input: TensorLikeType, + src: TensorLikeType, + offset: int = 0, + dim1: int = 0, + dim2: int = 1, +) -> TensorLikeType: + out = utils.clone_preserve_strides(input) + diag = out.diagonal(offset, dim1, dim2) + torch._check( + diag.shape == src.shape, + lambda: "expected src to have a size equal to the diagonal of the input." + f"Got {src.shape} for a diagonal of shape {diag.shape}", + ) + copy_to(diag, src) + return out + + +@register_decomposition(aten.diagonal) +def diagonal( + self: TensorLikeType, + offset: int = 0, + dim1: int = 0, + dim2: int = 1, +) -> TensorLikeType: + """ + Reference implementation of torch.diagonal + """ + num_dims = self.dim() + dim1 = utils.canonicalize_dim(idx=dim1, rank=num_dims) + dim2 = utils.canonicalize_dim(idx=dim2, rank=num_dims) + + torch._check( + dim1 != dim2, lambda: f"diagonal dimensions cannot be identical {dim1}, {dim2}" + ) + + storage_offset = self.storage_offset() + + if offset >= 0: + diag_size = max(min(self.size()[dim1], self.size()[dim2] - offset), 0) + else: + diag_size = max(min(self.size()[dim1] + offset, self.size()[dim2]), 0) + + if diag_size > 0: + if offset >= 0: + storage_offset += offset * self.stride()[dim2] + else: + storage_offset -= offset * self.stride()[dim1] + + sizes = [s for i, s in enumerate(self.size()) if i not in (dim1, dim2)] + sizes.append(diag_size) + + strides = [s for i, s in enumerate(self.stride()) if i not in (dim1, dim2)] + strides.append(self.stride()[dim1] + self.stride()[dim2]) + + result = self.as_strided(size=sizes, stride=strides, storage_offset=storage_offset) + + return result + + +diagonal_copy = _make_copy_from_view(diagonal) + + +@register_decomposition(aten.diag_embed) +@out_wrapper() +def diag_embed( + t: TensorLikeType, + offset: int = 0, + dim1: int = -2, + dim2: int = -1, +) -> TensorLikeType: + """ + Reference implementation of torch.diag_embed + """ + # convert from negative dims + rank = t.ndim + 1 + dim1 = utils.canonicalize_dim(rank=rank, idx=dim1) + dim2 = utils.canonicalize_dim(rank=rank, idx=dim2) + + # as per the docs, exchanging dims is equivalent to changing the sign of + # offset + if dim1 > dim2: + dim1, dim2 = dim2, dim1 + offset = -offset + + torch._check( + dim1 != dim2, lambda: f"diagonal dimensions cannot be identical {dim1}, {dim2}" + ) + + # as per the docs, the size of last dim is placed at dim1 and dim2 + last_dim = t.size(-1) + + if offset != 0: + # add padding to match the new size + t_shape = list(t.shape) + t_shape[-1] = builtins.abs(offset) + z = torch.zeros(t_shape, dtype=t.dtype, device=t.device, requires_grad=False) + pair = (z, t) if offset > 0 else (t, z) + t = torch.cat(pair, dim=-1) + # make sure the diagonal always has the same size + last_dim += builtins.abs(offset) + + # preserve original data, but place 1 at dim1 and move last dim to dim2 + t = t.unsqueeze(dim1).movedim(-1, dim2) + + # generate ranges shifting indices based on offset + a_range = torch.arange(last_dim, device=t.device, dtype=torch.int64) + b_range = torch.arange( + offset, last_dim + offset, device=t.device, dtype=torch.int64 + ) + + # broadcast + cond = a_range == b_range.unsqueeze(-1) + cond_shape = [last_dim if i in (dim1, dim2) else 1 for i in range(len(t.shape))] + cond = cond.reshape(cond_shape) + + # aten.diag_embed always returns a new contiguous tensor + # contiguous() is needed to correctly model the output stride + return utils.mask_tensor(cond, t).contiguous() + + +@register_decomposition(aten.block_diag) +@out_wrapper() +def _block_diag_iterable(tensors: List[TensorLikeType]) -> TensorLikeType: + """ + Reference implementation of torch.block_diag + """ + tensors_2d = [ + tensor.view(1, -1) if tensor.dim() <= 1 else tensor for tensor in tensors + ] + + ncols = builtins.sum(tensor.shape[1] for tensor in tensors_2d) + device = tensors_2d[0].device + + result = [] + + col_start = 0 + for i, tensor in enumerate(tensors_2d): + torch._check( + tensor.dim() == 2, + lambda: "Input tensors must have 2 or fewer dimensions. " + f"Input {i} has {tensor.dim()} dimensions", + ) + torch._check( + tensor.device == device, + lambda: "Input tensors must all be on the same device. " + f"Input 0 is on device {device} and input {i} is on device {tensor.device}.", + ) + row, col = tensor.shape + left = torch.zeros((row, col_start), device=device, dtype=tensor.dtype) + right = torch.zeros( + (row, ncols - col_start - col), device=device, dtype=tensor.dtype + ) + result += [torch.cat((left, tensor, right), dim=1)] + col_start += col + + return torch.cat(result, dim=0) + + +def block_diag(*tensors: List[TensorLikeType]) -> TensorLikeType: + """ + This is used as an input to PythonRefInfo. `torch.block_diag` + expects arguments splatted, but `aten.block_diag` expects only + one argument that is a list of Tensors. + """ + return _block_diag_iterable(tensors) + + +# CompositeImplicitAutograd - don't register decomp +def dsplit(a: TensorLikeType, sections: DimsType) -> TensorSequenceType: + if a.ndim < 3: + raise RuntimeError( + f"torch.dsplit requires a tensor with at least 3 dimension, but got a tensor with {a.ndim} dimensions!" + ) + if isinstance(sections, IntLike) and (sections == 0 or a.shape[2] % sections != 0): + raise RuntimeError( + "torch.dsplit attempted to split along dimension 2, " + + f"but the size of the dimension {a.shape[2]} is not divisible by the split_size {sections}!" + ) + return tensor_split(a, sections, 2) + + +@register_decomposition(aten.t.default) +def t(a: TensorLikeType): + # TODO: Add sparse support + # if a.is_sparse: + # sparse_dim = a.sparse_dim() + # dense_dim = a.dense_dim() + # if not (sparse_dim <= 2 and dense_dim == 0): + # raise RuntimeError( + # f"t() expects a tensor with <= 2 sparse and 0 dense dimensions, but got {sparse_dim} sparse and" + # f"{dense_dim} dense dimensions" + # ) + if a.ndim > 2: + raise RuntimeError( + f"t() expects a tensor with <= 2 dimensions, but self is {a.ndim}D" + ) + return torch.transpose(a, 0, 0 if a.ndim < 2 else 1) + + +# CompositeImplicitAutograd - don't register decomp +def T(a: TensorLikeType) -> TensorLikeType: + # n != 2 && n != 0 is deprecated in regular PyTorch. + torch._check( + a.ndim in (0, 2), + lambda: ( + "The use of `x.T` on tensors of dimension other than 0 or 2 " + "to reverse their shape is not supported." + ), + ) + return a.t() + + +@register_decomposition(aten.alias) +def alias(a: TensorLikeType) -> TensorLikeType: + return prims.view_of(a) + + +@register_decomposition(aten.transpose) +def transpose(a: TensorLikeType, dim0: int, dim1: int) -> TensorLikeType: + _dim0, _dim1 = utils.canonicalize_dims(a.ndim, (dim0, dim1)) # type: ignore[misc] + + if a.ndim <= 1 or dim0 == dim1: + return aten.alias.default(a) + + _permutation = list(range(0, a.ndim)) + _permutation[_dim0] = _dim1 + _permutation[_dim1] = _dim0 + return torch.permute(a, _permutation) + + +# Aliases for transpose +swap_axes = transpose + + +@register_decomposition(aten.unfold) +def unfold( + self: TensorLikeType, dimension: int, size: int, step: int +) -> TensorLikeType: + shape, strides = _get_unfold_shape_stride( + self.shape, self.stride(), dimension, size, step + ) + return self.as_strided(shape, strides) + + +@register_decomposition(aten.unfold_copy) +@out_wrapper() +def unfold_copy(self: TensorLikeType, dimension: int, size: int, step: int): + return self.unfold(dimension, size, step).clone( + memory_format=torch.contiguous_format + ) + + +def _cumsumprod_common( + func, + init, + a: TensorLikeType, + dim: int, + *, + dtype: Optional[torch.dtype] = None, + out: Optional[Tensor] = None, +) -> TensorLikeType: + # We implement all the kwargs of a reduction. ATen just handles dtype + # nb. This decomposition may not be as efficient as a backend-specific implementation + ndim = a.ndim + dim = utils.canonicalize_dim(ndim, dim) + if ndim == 0: + return func(a.unsqueeze(0), dim=0, dtype=dtype, out=out) + a = a.unsqueeze(dim + 1) + rg = torch.arange(a.shape[dim], device=a.device) + mask = rg.unsqueeze(1) <= rg + for _ in range(ndim - dim - 1): + mask = mask.unsqueeze(-1) + masked_a = torch.where(mask, a, init) + return func(masked_a, dim=dim, dtype=dtype, out=out) + + +@register_decomposition(aten.cumsum) +def cumsum( + a: TensorLikeType, + dim: int, + *, + dtype: Optional[torch.dtype] = None, + out: Optional[Tensor] = None, +) -> TensorLikeType: + return _cumsumprod_common(func=sum, init=0, a=a, dim=dim, dtype=dtype, out=out) + + +@register_decomposition(aten.cumprod) +def cumprod( + a: TensorLikeType, + dim: int, + *, + dtype: Optional[torch.dtype] = None, + out: Optional[Tensor] = None, +) -> TensorLikeType: + return _cumsumprod_common(func=prod, init=1, a=a, dim=dim, dtype=dtype, out=out) + + +# Note: although squeeze is documented as having the out= kwarg it doesn't +@register_decomposition(aten.unsqueeze) +def unsqueeze(a: TensorLikeType, dim: int) -> TensorLikeType: + # Note that unsqueeze canonicalizes with rank + 1 because it allows + # a new innermost dimension to be specified + ndim = a.ndim + 1 + dim = utils.canonicalize_dim(ndim, dim) + return prims.expand_dims(a, (dim,), ndim=ndim) + + +# NOTE: shape is a vararg because Tensor.reshape can be called with as +# Tensor.view(a, b, c) or Tensor.view((a, b, c)) Function call torch.view +# doesn't support unpacked shapes +# TODO: Turn this into a decomposition (currently fails on reshape meta tests) +@register_decomposition(aten.view.default) +def view(a: TensorLikeType, *shape: ShapeType) -> TensorLikeType: + return _reshape_view_helper(a, *shape, allow_copy=False) + + +# CompositeImplicitAutograd - don't register decomp +def view_as(self: TensorLikeType, other: TensorLikeType) -> TensorLikeType: + return self.view(other.size()) + + +# CompositeImplicitAutograd - don't register decomp +def ravel(a: TensorLikeType) -> TensorLikeType: + return reshape(a, (-1,)) + + +# CompositeImplicitAutograd - don't register decomp +# missing ref impl. for aten.gather +@out_wrapper() +def take_along_dim( + a: torch.Tensor, indices: torch.Tensor, dim: Optional[int] = None +) -> torch.Tensor: + torch._check( + a.ndim == indices.ndim, + lambda: ( + "torch.take_along_dim(): input and indices should have the same " + f"number of dimensions, but got {a.ndim} dimensions for input, and " + f"{indices.ndim} dimensions for indices" + ), + ) + + torch._check( + utils.is_integer_dtype(indices.dtype), + lambda: ( + "torch.take_along_dim(): dtype of indices should be int but got " + f"{indices.dtype} instead" + ), + ) + + if dim is None: + return torch.gather(a.view(-1), 0, indices.view(-1)) + else: + self_sizes = list(a.shape) + self_sizes[dim] = indices.size(dim) + broadcast_shape = utils.infer_size_shapes(self_sizes, indices.size()) + indices_broadcast = broadcast_to(indices, broadcast_shape) + + indices_sizes = list(indices.shape) + indices_sizes[dim] = a.size(dim) + broadcast_shape = utils.infer_size_shapes(indices_sizes, a.size()) + self_broadcast = broadcast_to(a, broadcast_shape) + + return torch.gather(self_broadcast, dim, indices_broadcast) + + +@out_wrapper() +def empty( + *shape, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[DeviceLikeType] = None, + requires_grad: bool = False, + pin_memory: bool = False, + memory_format: torch.memory_format = torch.contiguous_format, +) -> TensorLikeType: + torch._check( + memory_format != torch.preserve_format, + lambda: "torch.empty: the Preserve memory format is not supported", + ) + + shape = utils.extract_shape_from_varargs(shape) + + if memory_format == torch.contiguous_format: + strides = utils.make_contiguous_strides_for(shape) + elif memory_format == torch.channels_last_3d: + strides = utils.make_channels_last_3d_strides_for(shape) + else: # memory_format == torch.channels_last + torch._check( + memory_format == torch.channels_last, + lambda: f"torch.empty: received an unknown memory format {memory_format}!", + ) + strides = utils.make_channels_last_2d_strides_for(shape) + + return torch.empty_strided( + shape, + strides, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + ) + + +@out_wrapper() +def empty_permuted( + shape, + physical_layout, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[DeviceLikeType] = None, + requires_grad: bool = False, + pin_memory: bool = False, +) -> TensorLikeType: + return prims.empty_permuted( + shape, + physical_layout, + dtype=dtype, + device=device, + requires_grad=requires_grad, + ) + + +@register_decomposition(aten.new_empty) +@out_wrapper() +def new_empty( + a: TensorLikeType, + size: ShapeType, + *, + dtype: Optional[torch.dtype] = None, + layout: Optional[torch.layout] = None, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, +) -> TensorLikeType: + dtype = a.dtype if dtype is None else dtype + layout = a.layout if layout is None else layout + device = a.device if device is None else device + + return torch.empty( + size, + dtype=dtype, + device=device, + pin_memory=pin_memory, + layout=layout, + ) + + +@register_decomposition(aten.new_empty_strided) +@out_wrapper() +def new_empty_strided( + a: TensorLikeType, + size: ShapeType, + stride: StrideType, + *, + dtype: Optional[torch.dtype] = None, + layout: Optional[torch.layout] = None, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, +) -> TensorLikeType: + """ + Reference implementation of torch.Tensor.new_empty_strided + """ + + dtype = a.dtype if dtype is None else dtype + layout = a.layout if layout is None else layout + device = a.device if device is None else device + + return torch.empty_strided( + size, + stride, + dtype=dtype, + device=device, + pin_memory=pin_memory, + layout=layout, + ) + + +@register_decomposition(aten.zeros.default) +@out_wrapper() +def zeros( + *size, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, + requires_grad: bool = False, +) -> TensorLikeType: + size = utils.extract_shape_from_varargs(size) + + if dtype is None: + dtype = torch.get_default_dtype() + + return torch.full( + size, + False if dtype == torch.bool else 0, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + ) + + +@register_decomposition(aten.new_zeros) +@out_wrapper() +def new_zeros( + a: TensorLikeType, + size: ShapeType, + *, + dtype: Optional[torch.dtype] = None, + layout: Optional[torch.layout] = None, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, + requires_grad: bool = False, +) -> TensorLikeType: + dtype = a.dtype if dtype is None else dtype + layout = a.layout if layout is None else layout + device = a.device if device is None else device + + return torch.full( + size, + False if (dtype or a.dtype) == torch.bool else 0, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + ) + + +@register_decomposition(aten.ones.default) +@out_wrapper() +def ones( + *size, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, + requires_grad: bool = False, +) -> TensorLikeType: + size = utils.extract_shape_from_varargs(size) + + if dtype is None: + dtype = torch.get_default_dtype() + + return torch.full( + size, + True if dtype == torch.bool else 1, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + ) + + +@register_decomposition(aten.new_ones) +@out_wrapper() +def new_ones( + a: TensorLikeType, + size: ShapeType, + *, + dtype: Optional[torch.dtype] = None, + layout: Optional[torch.layout] = None, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, + requires_grad: bool = False, +) -> TensorLikeType: + dtype = a.dtype if dtype is None else dtype + layout = a.layout if layout is None else layout + device = a.device if device is None else device + + return torch.full( + size, + True if (dtype or a.dtype) == torch.bool else 1, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + ) + + +@register_decomposition(aten.new_full) +@out_wrapper() +def new_full( + a: TensorLikeType, + size: ShapeType, + fill_value: NumberType, + *, + dtype: Optional[torch.dtype] = None, + layout: Optional[torch.layout] = None, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, +) -> TensorLikeType: + dtype = a.dtype if dtype is None else dtype + layout = a.layout if layout is None else layout + device = a.device if device is None else device + + return torch.full( + size, + fill_value, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + ) + + +@register_decomposition(aten.empty_like) +@out_wrapper() +def empty_like( + a: TensorLikeType, + *, + dtype: Optional[torch.dtype] = None, + device: Optional[DeviceLikeType] = None, + layout: Optional[torch.layout] = None, + pin_memory: bool = False, + requires_grad: bool = False, + memory_format: torch.memory_format = torch.preserve_format, +) -> TensorLikeType: + dtype = a.dtype if dtype is None else dtype + layout = a.layout if layout is None else layout + device = a.device if device is None else device + + if memory_format != torch.preserve_format: + return torch.empty( + a.shape, + dtype=dtype, + layout=layout, + device=device, + requires_grad=requires_grad, + pin_memory=pin_memory, + memory_format=memory_format, + ) + + # memory_format == torch.preserve_format + logical_to_physical_perm = ( + utils.compute_elementwise_output_logical_to_physical_perm(a) + ) + # identity perm is [2, 1, 0] + return torch.empty_permuted( + a.shape, + logical_to_physical_perm, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + ) + + +@register_decomposition([aten.arange.start_step, aten.arange.start_out]) +@out_wrapper() +def arange( + start: NumberType = 0, + end: Optional[NumberType] = None, + step: NumberType = 1, + *, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, + requires_grad: bool = False, +) -> TensorLikeType: + utils.check_layout(layout) + utils.check_pin_memory(pin_memory) + device = torch.device(utils.device_or_default(device)) + + assert not isinstance(start, complex) + assert not isinstance(end, complex) + assert not isinstance(step, complex) + + # Case: torch.arange(5) + if end is None: + end = start + start = 0 + torch._check(step != 0, lambda: "step must be nonzero") + if step > 0: + torch._check( + end >= start, + lambda: "upper bound and lower bound inconsistent with step sign", + ) + elif step < 0: + torch._check( + end <= start, + lambda: "upper bound and lower bound inconsistent with step sign", + ) + + def is_finite(x): + return not isinstance(x, FloatWithoutSymFloat) or math.isfinite(x) + + torch._check( + is_finite(start) and is_finite(end), + lambda: f"unsupported range: {start} -> {end}", + ) + torch._check( + is_finite(step), + lambda: f"step must be finite but got {step}", + ) + + args = (start, end, step) + integer_args = builtins.all(isinstance(arg, IntLike) for arg in args) + + if dtype is None: + dtype = torch.int64 if integer_args else torch.get_default_dtype() + + is_integer = utils.is_integer_dtype(dtype) + if is_integer: + xstart = sym_int(start) + xend = sym_int(end) + xstep = sym_int(step) + + # For int64 we truncate arguments to int before calculating length, but + # other integral dtypes we don't. Weird... but needed to match ATen shapes. + if dtype == torch.int64: + # Uses floordiv to avoid ceil in inductor. + sgn = bool(xstep > 0) - bool(xstep < 0) # type: ignore[possibly-undefined] + length = (xend - xstart + xstep - sgn) // xstep # type: ignore[possibly-undefined] + else: + length = math.ceil((end - start) / step) + + if is_integer: + return prims.iota( + length, + start=xstart, # type: ignore[possibly-undefined] + step=xstep, # type: ignore[possibly-undefined] + dtype=dtype, + device=device, + requires_grad=requires_grad, + ) + + index = prims.iota( + length, + start=0, + step=1, + dtype=torch.int64, + device=device, + requires_grad=False, + ) + + computation_dtype = ( + torch.long if integer_args else utils.get_acc_type(dtype, device) + ) + index = _maybe_convert_to_dtype(index, computation_dtype) + result = start + step * index + result = _maybe_convert_to_dtype(result, dtype) + + if requires_grad: + result.requires_grad_(True) + return result + + +@register_decomposition(aten.lerp) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("start", "end", "weight"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def lerp(start: Tensor, end: Tensor, weight: Union[Tensor, NumberType]): + inputs = [start, end] + if isinstance(weight, Number): + weight = start.new_full((), weight) # type: ignore[arg-type] + else: + inputs.append(weight) + assert isinstance(weight, Tensor) # mypy + # We implement it this way for numerical stability. We assume (in the stability optimisation) + # that 0 <= weight <= 1. We take the abs to deal with complex numbers + # We want to perform operations near zero, which is where floating points are most precise + # thus, we perform the following optimisation: + # If weight.abs() >= 0.5: + # return (1 - weight) * (start - end) + end + mask = weight.abs() >= 0.5 + coeff = torch.where(mask, weight - 1, weight) + base = torch.where(mask, end, start) + output = coeff * (end - start) + base + # make sure the decomposition output's stride is same as non-decomposition path. + stride = utils.compute_elementwise_output_strides(*_maybe_broadcast(*inputs)) + if output.stride() != stride: + output = prims.copy_strided(output, stride) + + return handle_noncontiguous_outputs(inputs, output) + + +@register_decomposition(aten.linspace) +@out_wrapper() +def linspace( + start: Union[NumberType, TensorLikeType], + end: Union[NumberType, TensorLikeType], + steps: NumberType, + *, + dtype: Optional[torch.dtype] = None, + device: Optional[DeviceLikeType] = None, + layout: torch.layout = torch.strided, + pin_memory: bool = False, + requires_grad: bool = False, +) -> TensorLikeType: + if isinstance(start, TensorLikeType): + torch._check( + start.dim() == 0, + lambda: "linspace only supports 0-dimensional start and end tensors", + ) + start = _maybe_convert_to_dtype(start, torch.float64) + if isinstance(end, TensorLikeType): + torch._check( + end.dim() == 0, + lambda: "linspace only supports 0-dimensional start and end tensors", + ) + end = _maybe_convert_to_dtype(end, torch.float64) + + if py_any(isinstance(arg, complex) for arg in (start, end, steps)): + default_complex_dtype = utils.corresponding_complex_dtype( + torch.get_default_dtype() + ) + if dtype is None: + dtype = default_complex_dtype + else: + torch._check( + utils.is_complex_dtype(dtype), + lambda: f"linspace(): inferred dtype {default_complex_dtype} can't be safely cast to passed dtype {dtype}", + ) + else: + dtype = dtype or torch.get_default_dtype() + assert isinstance(dtype, torch.dtype) + + # steps does not participate in the computation of the dtype + torch._check_type( + isinstance(steps, IntLike), + lambda: f"received an invalid combination of arguments - got \ +({type(start).__name__}, {type(end).__name__}, {type(steps).__name__})", + ) + assert isinstance(steps, IntLike) # for mypy + torch._check(steps >= 0, lambda: "number of steps must be non-negative") + + factory_kwargs = { + "layout": layout, + "device": device, + "pin_memory": pin_memory, + "requires_grad": requires_grad, + } + if steps == 0: + return torch.full((0,), 0, dtype=dtype, **factory_kwargs) # type: ignore[arg-type] + if steps == 1: + if isinstance(start, TensorLikeType): + return torch.empty((steps,), dtype=dtype, **factory_kwargs).copy_(start) # type: ignore[arg-type] + else: + return torch.full((steps,), start, dtype=dtype, **factory_kwargs) # type: ignore[arg-type] + + # Perform in arange in int because some backends like ATen or Triton do not support all the dtypes + rg = torch.arange(0, steps, **factory_kwargs) # type: ignore[arg-type] + + # Small types need to be computed in higher precision as this is, at heart, an associative scan + dtype_red = ( + torch.int64 + if (utils.is_boolean_dtype(dtype) or utils.is_integer_dtype(dtype)) + else dtype + ) + computation_dtype, _ = utils.reduction_dtypes( + rg, REDUCTION_OUTPUT_TYPE_KIND.SAME, dtype_red + ) + cast_rg = partial(_maybe_convert_to_dtype, dtype=computation_dtype) + + # We implement torch.lerp without performing rg / (steps - 1) explicitly + # With this we get out[0] == start, out[-1] == end + step = (end - start) / (steps - 1) + out = torch.where( + rg < steps / 2, + start + step * cast_rg(rg), # type: ignore[arg-type,operator] + end - step * cast_rg((steps - 1) - rg), # type: ignore[arg-type,operator] + ) + return _maybe_convert_to_dtype(out, dtype) # type: ignore[return-value] + + +@register_decomposition(aten.logspace) +@out_wrapper() +def logspace( + start: Union[NumberType, TensorLikeType], + end: Union[NumberType, TensorLikeType], + steps: NumberType, + base: NumberType = 10, + *, + dtype: Optional[torch.dtype] = None, + device: Optional[DeviceLikeType] = None, + layout: torch.layout = torch.strided, + pin_memory: bool = False, + requires_grad: bool = False, +) -> TensorLikeType: + if dtype is None: + dtype = torch.get_default_dtype() + + # NB: NumPy doesn't have this cast + if prims.utils.is_integer_dtype(dtype): + if isinstance(start, FloatLike): + start = sym_int(start) + elif isinstance(start, TensorLikeType): + torch._check( + start.dim() == 0, + lambda: "logspace only supports 0-dimensional start and end tensors", + ) + start = _maybe_convert_to_dtype(start, dtype) + if isinstance(end, FloatLike): + end = sym_int(end) + elif isinstance(end, TensorLikeType): + torch._check( + end.dim() == 0, + lambda: "logspace only supports 0-dimensional start and end tensors", + ) + end = _maybe_convert_to_dtype(end, dtype) + + if py_any(isinstance(arg, complex) for arg in (start, end, steps)): + default_complex_dtype = utils.corresponding_complex_dtype( + torch.get_default_dtype() + ) + dtype = default_complex_dtype + _dtype = None # torch.linspace will update the correct dtype + else: + _dtype = torch.float64 + + assert not isinstance(base, complex) # for mypy + if base < 0: + raise NotImplementedError + ret = torch.linspace( # type: ignore[misc] + start, # type: ignore[arg-type] + end, # type: ignore[arg-type] + steps, # type: ignore[arg-type] + dtype=_dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + ) + return _maybe_convert_to_dtype(torch.pow(base, ret), dtype) # type: ignore[arg-type,return-value] + + +@overload +def meshgrid(tensors: Sequence[TensorLikeType], indexing: str): + pass + + +@overload +def meshgrid(*tensors: TensorLikeType, indexing: str): + pass + + +@register_decomposition(aten.meshgrid) +def meshgrid( + *tensors: Union[TensorLikeType, List[TensorLikeType], Tuple[TensorLikeType]], + indexing: str, +) -> List[TensorLikeType]: + # This ref simultaneously handles two overloads (see stubs above) + # The `indexing` argument is currently optional for torch.meshgrid, but we + # plan to make the argument required: https://github.com/pytorch/pytorch/issues/50276 + if isinstance(tensors[0], (list, tuple)): + assert len(tensors) == 1 + tensors = tuple(tensors[0]) + + torch._check( + py_all(isinstance(a, TensorLike) for a in tensors), + lambda: "meshgrid expects its inputs to be tensors", + ) + + torch._check(len(tensors) > 0, lambda: "meshgrid expects a non-empty TensorList") + + for i in range(len(tensors) - 1): + torch._check( + tensors[i].dtype == tensors[i + 1].dtype, # type: ignore[union-attr] + lambda: "meshgrid expects all tensors to have the same dtype", + ) + torch._check( + tensors[i].device == tensors[i + 1].device, # type: ignore[union-attr] + lambda: "meshgrid expects all tensors to have the same device", + ) + + swap_first_and_second_tensors = False + if indexing == "xy": + swap_first_and_second_tensors = len(tensors) >= 2 + if swap_first_and_second_tensors: + tensors = (tensors[1], tensors[0], *tensors[2:]) + else: + torch._check( + indexing == "ij", + lambda: ( + 'torch.meshgrid: indexing must be one of "xy" or "ij", ' + f"but received: {indexing}" + ), + ) + + result_shape: List[int] = [] + for t in tensors: + assert isinstance(t, TensorLike) # mypy + torch._check( + t.ndim == 0 or t.ndim == 1, + lambda: f"torch.meshgrid: Expected 0D or 1D tensor in the tensor list but got: {t}", + ) + result_shape.append(t.numel()) + + grids: List[TensorLikeType] = [] + for i, t in enumerate(tensors): + assert isinstance(t, TensorLike) # mypy + if t.ndim == 0: + t = t.view((1,)) + grids.append(prims.broadcast_in_dim(t, result_shape, (i,))) + + if swap_first_and_second_tensors: + # Swap outputs if we originally swapped at the beginning + grids[0], grids[1] = grids[1], grids[0] + + return grids + + +# CompositeImplicitAutograd - don't register decomp +def movedim( + input: TensorLikeType, + source: Union[int, DimsSequenceType], + destination: Union[int, DimsSequenceType], +) -> TensorLikeType: + """ + Reference implementation of torch.movedim + """ + if type(source) is int: + source = (source,) + if type(destination) is int: + destination = (destination,) + + # Converts to list to produce a compatible error message with core PyTorch, + # which prints sequences in square brackets. + torch._check( + len(source) == len(destination), # type: ignore[arg-type] + lambda: ( + "movedim: Invalid source or destination dims: source " # type: ignore[arg-type] + f"({list(source)} dims) should contain the same number " # type: ignore[arg-type] + f"of dims as destination ({list(destination)} dims)" # type: ignore[arg-type] + ), + ) + + rank = input.ndim + ss = tuple(utils.canonicalize_dims(rank=rank, indices=source)) # type: ignore[arg-type] + ds = tuple(utils.canonicalize_dims(rank=rank, indices=destination)) # type: ignore[arg-type] + + sss = set(ss) + dss = set(ds) + + # See above on why this converts to list in error messages. + torch._check( + len(ss) == len(sss), + lambda: f"movedim: repeated dim in `source` ({list(source)})", # type: ignore[arg-type] + ) + torch._check( + len(ds) == len(dss), + lambda: f"movedim: repeated dim in `destination` ({list(destination)})", # type: ignore[arg-type] + ) + + m = dict(zip(ds, ss)) + dims = [] + si = 0 # source index + for di in range(rank): + # check if the destination index is in the mapping + s = m.get(di) + if s is not None: + # insert source index if found + dims.append(s) + else: + # insert source index sequentially, skipping indices from the mapping + while si in sss: + si += 1 + dims.append(si) + si += 1 + + result = torch.permute(input, tuple(dims)) + + return result + + +# NOTE: for convenience, shape can be a tuple of ints or a tuple containing a tuple of ints +@register_decomposition(aten.empty_strided) +@out_wrapper() +def empty_strided( + shape: Union[ShapeType, Tuple[ShapeType]], + strides: StrideType, + *, + dtype: Optional[torch.dtype] = None, + device: Optional[DeviceLikeType] = None, + layout: torch.layout = torch.strided, + requires_grad: bool = False, + pin_memory: bool = False, +) -> TensorLikeType: + # Layout == strided, pin_memory is False + utils.check_layout(layout) + utils.check_pin_memory(pin_memory) + + shape = utils.extract_shape_from_varargs(shape) + dtype = torch.get_default_dtype() if dtype is None else dtype + device = torch.device("cpu") if device is None else device + + return prims.empty_strided( + shape, + strides, + dtype=dtype, + device=device, + requires_grad=requires_grad, + ) + + +@register_decomposition(aten.eye) +@out_wrapper() +def eye( + n: int, + m: Optional[int] = None, + *, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, + requires_grad: bool = False, # TODO: unused +) -> TensorLikeType: + """ + Reference implementation of torch.eye + """ + if m is None: + m = n + + torch._check(n >= 0, lambda: f"n must be greater or equal to 0, got {n}") + torch._check(m >= 0, lambda: f"m must be greater or equal to 0, got {m}") + + range_n = torch.arange(n, dtype=torch.int64, device=device, requires_grad=False) + range_m = torch.arange(m, dtype=torch.int64, device=device, requires_grad=False) + + cond = range_n.unsqueeze(-1) == range_m + if dtype is torch.bool: + return cond + else: + one = torch.ones( + (1,), + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=False, + ) + return torch.where(cond, one, 0) + # TODO: Use requires_grad. All refs taking the requires_grad kwarg must + # return a leaf tensor. + # result.requires_grad_(requires_grad) + + +@register_decomposition([aten.full.default, aten.full.out]) +@out_wrapper() +def full( + shape: ShapeType, + fill_value: NumberType, + *, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, + requires_grad: bool = False, +) -> TensorLikeType: + utils.check_layout(layout) + utils.check_pin_memory(pin_memory) + + dtype = dtype if dtype is not None else utils.type_to_dtype(type(fill_value)) + device = device if device is not None else torch.device("cpu") + + e = empty( + shape, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + ) + return torch.fill(e, fill_value) # type: ignore[arg-type] + + +def full_like( + a: TensorLikeType, + fill_value: NumberType, + *, + dtype: Optional[torch.dtype] = None, + layout: Optional[torch.layout] = None, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, + requires_grad: bool = False, + memory_format: torch.memory_format = torch.preserve_format, +) -> TensorLikeType: + e = torch.empty_like( + a, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + memory_format=memory_format, + ) + return fill(e, fill_value) + + +@register_decomposition(aten.zeros_like) +@out_wrapper() +def zeros_like( + a: TensorLikeType, + *, + dtype: Optional[torch.dtype] = None, + layout: Optional[torch.layout] = None, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, + requires_grad: bool = False, + memory_format: torch.memory_format = torch.preserve_format, +) -> TensorLikeType: + return torch.full_like( + a, + False if (dtype or a.dtype) == torch.bool else 0, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + memory_format=memory_format, + ) + + +@register_decomposition(aten.ones_like) +@out_wrapper() +def ones_like( + a: TensorLikeType, + *, + dtype: Optional[torch.dtype] = None, + layout: Optional[torch.layout] = None, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, + requires_grad: bool = False, + memory_format: torch.memory_format = torch.preserve_format, +) -> TensorLikeType: + return torch.full_like( + a, + True if (dtype or a.dtype) == torch.bool else 1, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + memory_format=memory_format, + ) + + +@register_decomposition(aten.randn.default) +@out_wrapper() +def randn( + *shape, + dtype: Optional[torch.dtype] = None, + device: Optional[DeviceLikeType] = None, + layout: Optional[torch.layout] = None, + requires_grad: bool = False, + pin_memory: bool = False, +) -> TensorLikeType: + utils.check_pin_memory(pin_memory) + + shape_ = utils.extract_shape_from_varargs(shape) + + dtype = utils.dtype_or_default(dtype) + device = utils.device_or_default(device) + + return prims.normal( + shape_, + mean=0.0, + std=1.0, + dtype=dtype, + device=device, + requires_grad=requires_grad, + ) + + +def scalar_tensor( + a: NumberType, + *, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, +) -> TensorLikeType: + utils.check_layout(layout) + utils.check_pin_memory(pin_memory) + dtype = dtype if dtype is not None else utils.type_to_dtype(type(a)) + device = device if device is not None else torch.device("cpu") + return prims.scalar_tensor(a, dtype=dtype, device=device) + + +# +# Randomness References +# + + +def _uniform_helper( + shape: ShapeType, + low: Union[bool, int, float] = 0.0, + high: Union[bool, int, float] = 1.0, + *, + dtype: torch.dtype, + device: DeviceLikeType, +) -> TensorLikeType: + utils.validate_shape(shape) + + assert isinstance(low, Number) + assert isinstance(high, Number) + low = sym_float(low) + high = sym_float(high) + + assert isinstance(dtype, torch.dtype) + device = utils.canonicalize_device(device) + + return prims._uniform_helper(shape, low=low, high=high, dtype=dtype, device=device) + + +@register_decomposition(aten.masked_fill) +@out_wrapper() +def masked_fill(a: TensorLikeType, mask: TensorLikeType, value: TensorOrNumberLikeType): + python_type = utils.dtype_to_type(a.dtype) + if isinstance(value, Number): + value_type = type(value) + else: + # NOTE: Could not use value = item(value) as it resulted in + # RuntimeError: Cannot cast FakeTensor(cpu) to number + value_ndim = value.ndim + torch._check( + value_ndim == 0, + lambda: f"only supports a 0-dimensional value tensor, but got tensor with {value_ndim} dimension", + ) + # `masked_fill` allows cpu scalar to be moved to cuda and xpu but not otherwise. + is_cpu_scalar = ( + a.device.type in ["cuda", "xpu", torch._C._get_privateuse1_backend_name()] + and value.device.type == "cpu" + ) + torch._check( + is_cpu_scalar or value.device == a.device, + lambda: "Expected `value` to be on same device as `a`", + ) + value_type = utils.dtype_to_type(value.dtype) + + if value_type is complex: + # only downcasting from complex to lower type is not allowed. + # We allow casting `value` to lower type for other case + # Eg. float -> int. + # Ref: https://github.com/pytorch/pytorch/issues/79195 + torch._check( + utils.is_weakly_lesser_type(value_type, python_type), + lambda: f"could not convert to type {python_type} without overflow", + ) + + # Since `where` allows type-promotion, + # cast value to correct type before passing to `where` + value = _maybe_convert_to_dtype(value, a.dtype) + r = torch.where(mask, value, a) # type: ignore[arg-type] + + # aten.mask_fill always return a new contiguous tensor + # contiguous() is needed to correctly model the output stride + return r.contiguous() + + +@register_decomposition(aten.masked_fill_) +def masked_fill_( + a: TensorLikeType, mask: TensorLikeType, value: TensorOrNumberLikeType +) -> TensorLikeType: + b = torch.masked_fill(a, mask, value) # type: ignore[arg-type] + a.copy_(b) + return a + + +# CompositeImplicitAutograd - don't register decomp +def allclose( + a: TensorLikeType, + b: TensorLikeType, + rtol: float = 1e-05, + atol: float = 1e-08, + equal_nan: bool = False, +) -> bool: + """ + Reference implementation of torch.allclose + """ + _check_close_args(name="torch.allclose", a=a, b=b, rtol=rtol, atol=atol) + + return bool( + torch.all(torch.isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)).item() + ) + + +def equal(a: TensorLikeType, b: TensorLikeType) -> bool: + utils.check_same_device(a, b, allow_cpu_scalar_tensors=False) + utils.check_same_dtype(a, b) + + # Shape check + if a.ndim != b.ndim: + return False + + for x, y in zip(a.shape, b.shape): + if x != y: + return False + + # Short-circuits if there are no elements to validate + if a.numel() == 0: + return True + + return item(all(eq(a, b))) # type: ignore[return-value] + + +@register_decomposition(aten.norm) +@out_wrapper(exact_dtype=True) +def norm( + input: TensorLikeType, + p: Optional[Union[float, str]] = "fro", + dim: Optional[DimsType] = None, + keepdim: bool = False, + *, + dtype: Optional[torch.dtype] = None, +) -> TensorLikeType: + # In these cases we compute the "Frobenius norm" + if ( + p == "fro" and (dim is None or isinstance(dim, Dim) or len(dim) <= 2) + ) or p is None: + p = 2 + if isinstance(dim, Dim): + dim = [dim] + if isinstance(p, str): + # Here we either call the nuclear norm, or we call matrix_norm with some arguments + # that will throw an error + if dim is None: + dim = tuple(range(input.ndim)) + return torch.linalg.matrix_norm(input, p, dim, keepdim, dtype=dtype) + else: + return torch.linalg.vector_norm(input, p, dim, keepdim, dtype=dtype) + + +@register_decomposition(aten.trace) +@out_wrapper() +def trace(self: TensorLikeType) -> TensorLikeType: + torch._check( + self.ndim == 2, lambda: "expected a matrix, but got tensor with dim {self.ndim}" + ) + return torch.sum(torch.diag(self, 0)) + + +def _make_r_binary_op(base_op): + def rop( + a: Union[TensorLikeType, NumberType], + b: Union[TensorLikeType, NumberType], + ) -> TensorLikeType: + return base_op(b, a) + + return rop + + +rtruediv = _make_r_binary_op(true_divide) +rfloordiv = _make_r_binary_op(floor_divide) +rpow = _make_r_binary_op(pow) + + +@register_decomposition(aten.triu) +@out_wrapper() +def triu(a: TensorLikeType, diagonal: int = 0) -> TensorLikeType: + torch._check( + a.ndim >= 2, lambda: "triu: input tensor must have at least 2 dimensions" + ) + h, w = a.shape[-2:] + mask = ( + torch.arange(w, device=a.device).unsqueeze(-2) + - torch.arange(h, device=a.device).unsqueeze(-1) + ) >= diagonal + + # aten.triu always returns a new contiguous tensor + # contiguous() is needed to correctly model the output stride + return utils.mask_tensor(mask, a).contiguous() + + +@register_decomposition(aten.tril) +@out_wrapper() +def tril(a: TensorLikeType, diagonal: int = 0) -> TensorLikeType: + torch._check( + a.ndim >= 2, lambda: "tril: input tensor must have at least 2 dimensions" + ) + h, w = a.shape[-2:] + mask = ( + torch.arange(w, device=a.device).unsqueeze(-2) + - torch.arange(h, device=a.device).unsqueeze(-1) + ) <= diagonal + + # aten.tril always returns a new contiguous tensor + # contiguous() is needed to correctly model the output stride + return utils.mask_tensor(mask, a).contiguous() + + +# This is based on get_tril_size in aten/src/ATen/native/TensorFactories.h +# The components of the matrix that belong to the lower triangle with offset +# form a pentagon that can be broken down into a top trapezoid and a bottom +# rectangle. For the implementation of tril_indices, we need the sizes of +# both of these, as well as the length of the top side of the trapezoid. +def _get_tril_sizes(row: int, col: int, offset: int) -> Tuple[int, int, int]: + if row == 0 or col == 0: + return 0, 0, 0 + + m_first_row = min(col, 1 + offset) if offset > 0 else int(row + offset > 0) + m_last_row = max(0, min(col, row + offset)) + n_row_all = max(0, min(row, row + offset)) + n_row_trapezoid = m_last_row - m_first_row + 1 + + # Number of elements in top trapezoid + trapezoid_size = (m_first_row + m_last_row) * n_row_trapezoid // 2 + # Number of elements in bottom rectangle + diff_row = n_row_all - n_row_trapezoid + rectangle_size = max(0, diff_row * col) + + return trapezoid_size, rectangle_size, m_first_row + + +def _trilu_checks( + name: str, + row: int, + col: int, + dtype: torch.dtype, + layout: torch.layout, + pin_memory: bool, +): + torch._check(row >= 0, lambda: f"row must be non-negative, got {row}") + torch._check(col >= 0, lambda: f"col must be non-negative, got {col}") + torch._check( + dtype in (torch.int32, torch.int64), + lambda: f"\"{name}\" not implemented for '{dtype}'", + ) + + +# This is based on tril_indices_cuda in aten/src/ATen/native/cuda/TensorFactories.cu +@register_decomposition(aten.tril_indices) +@out_wrapper() +def tril_indices( + row: int, + col: int, + offset: int = 0, + *, + dtype: torch.dtype = torch.long, + layout: torch.layout = torch.strided, + device: DeviceLikeType = "cpu", + pin_memory: bool = False, +) -> TensorLikeType: + _trilu_checks("tril_indices", row, col, dtype, layout, pin_memory) + + trapezoid_size, rectangle_size, m_first_row = _get_tril_sizes(row, col, offset) + row_offset = max(0, -offset) + + arange_kw = partial( + torch.arange, layout=layout, device=device, pin_memory=pin_memory + ) + + # first we do the indices for top trapezoid + xs1 = arange_kw(0, trapezoid_size, dtype=torch.float64) + b = m_first_row - 0.5 + row_inds1 = torch.floor(-b + torch.sqrt(b * b + 2 * xs1)) + col_inds1 = torch.floor(xs1 - (2 * m_first_row - 1 + row_inds1) * row_inds1 * 0.5) + row_inds1 = _maybe_convert_to_dtype(row_inds1 + row_offset, dtype) + col_inds1 = _maybe_convert_to_dtype(col_inds1, dtype) + + # then bottom rectangle + xs2 = arange_kw(0, rectangle_size, dtype=dtype) + row_inds2 = xs2 // col + (col - m_first_row + 1 + row_offset) + col_inds2 = xs2 % col + + return torch.stack( + (torch.cat((row_inds1, row_inds2)), torch.cat((col_inds1, col_inds2))) + ) + + +# Similar to _get_tril_sizes above, but here there is a top trapezoid and +# a bottom rectangle instead. Note that you can't reduce this to +# _get_tril_sizes(col, row, -offset) because that would correspond to +# decomposing into a left trapezoid and right rectangle. +def _get_triu_sizes(row: int, col: int, offset: int) -> Tuple[int, int, int]: + if row == 0 or col == 0: + return 0, 0, 0 + + m_first_row = max(0, col - offset) if offset > 0 else col + + # Number of elements in top rectangle + rectangle_size = max(0, min(row, -offset) * col) + + # Number of elements in bottom trapezoid + trapezoid_size_tril, rectangle_size_tril, _ = _get_tril_sizes(row, col, offset - 1) + triu_size = row * col - (trapezoid_size_tril + rectangle_size_tril) + trapezoid_size = triu_size - rectangle_size + + return trapezoid_size, rectangle_size, m_first_row + + +@register_decomposition(aten.triu_indices) +@out_wrapper() +def triu_indices( + row: int, + col: int, + offset: int = 0, + *, + dtype: torch.dtype = torch.long, + layout: torch.layout = torch.strided, + device: DeviceLikeType = "cpu", + pin_memory: bool = False, +) -> TensorLikeType: + _trilu_checks("triu_indices", row, col, dtype, layout, pin_memory) + + trapezoid_size, rectangle_size, m_first_row = _get_triu_sizes(row, col, offset) + col_offset = max(0, offset) + + arange_kw = partial( + torch.arange, layout=layout, device=device, pin_memory=pin_memory + ) + + # indices for top rectangle + xs2 = arange_kw(0, rectangle_size, dtype=dtype) + row_inds2 = xs2 // col + col_inds2 = xs2 % col + + # bottom trapezoid + xs1 = arange_kw(0, trapezoid_size, dtype=torch.float64) + b = -0.5 - m_first_row + row_inds1 = torch.floor(-b - torch.sqrt(b * b - 2 * xs1)) + col_inds1 = torch.floor(xs1 - ((2 * m_first_row - 1 - row_inds1) * row_inds1) * 0.5) + row_inds1 = _maybe_convert_to_dtype(row_inds1, dtype) + col_inds1 = _maybe_convert_to_dtype(col_inds1, dtype) + + if col: + row_inds1 = row_inds1 + (rectangle_size // col) + col_inds1 = col_inds1 + col_offset + + return torch.stack( + (torch.cat((row_inds2, row_inds1)), torch.cat((col_inds2, col_inds1))) + ) + + +@register_decomposition(aten.bucketize) +@out_wrapper(exact_dtype=True) +def bucketize( + a: TensorLikeType, + boundaries: TensorLikeType, + *, + out_int32: bool = False, + right: bool = False, +): + torch._check( + boundaries.dim() == 1, + lambda: f"boundaries tensor must be 1 dimension but got dim({boundaries.dim()})", + ) + + out_dtype = torch.int32 if out_int32 else torch.int64 + n_boundaries = boundaries.shape[-1] + if n_boundaries == 0: + return torch.zeros_like(a) + # We are trying to find the bucket (defined by pairs of consecutive elements of `boundaries`) + # each element of `a` belongs to. We use binary search to achieve logarithimic complexity, + # but each step of the search is done "in parallel" over all elements of `a` + # can't use int32 as indexes, so we have to do all computations with int64 and convert at the end + start = torch.zeros(a.shape, device=a.device, dtype=torch.int64) + end = start + n_boundaries + # Max depth of the binary search + # Since we can't break out of the loop at different points for different elements of a, + # we just do the max amount of iterations that binary search requires and add condition + # tensor (cond_update below) to stop updating once the search terminates + + # For first iteration through loop we can skip some checks, we have separate implementation + mid = start + (end - start) // 2 + mid_val = boundaries[mid] + if right: + cond_mid = mid_val > a + else: + cond_mid = mid_val >= a + start = torch.where(cond_mid, start, mid + 1) + + if n_boundaries > 1: + cond_update = torch.ones_like(a, dtype=torch.bool) + niters = int(math.log2(n_boundaries)) + for _ in range(niters): + end = torch.where(cond_mid & cond_update, mid, end) + cond_update = start < end + # start might end up pointing to 1 past the end, we guard against that + mid = torch.where(cond_update, start + (end - start) // 2, 0) + mid_val = boundaries[mid] + # If right is true, the buckets are closed on the *left* + # (i.e., we are doing the equivalent of std::upper_bound in C++) + # Otherwise they are closed on the right (std::lower_bound) + if right: + cond_mid = mid_val > a + else: + cond_mid = mid_val >= a + start = torch.where((~cond_mid) & cond_update, mid + 1, start) + + return start.to(dtype=out_dtype) + + +@register_decomposition(aten.cauchy) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def cauchy(self, median=0, sigma=1, generator=None): + assert generator is None + torch._check( + not utils.is_complex_dtype(self.dtype) + and not utils.is_integer_dtype(self.dtype) + and not utils.is_boolean_dtype(self.dtype), + lambda: f"Cauchy distribution is a continuous probability distribution. \ + dtype must be a floating point but you specified {self.dtype}", + ) + torch._check( + sigma > 0.0, + lambda: f"cauchy_ expects sigma > 0.0, but found sigma={sigma}", + ) + return median + sigma * torch.tan(math.pi * (torch.rand_like(self) - 0.5)) + + +@register_decomposition(aten.exponential) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def exponential(self, rate=1, generator=None): + assert generator is None + torch._check( + not utils.is_complex_dtype(self.dtype) + and not utils.is_integer_dtype(self.dtype) + and not utils.is_boolean_dtype(self.dtype), + lambda: f"Exponential distribution is a continuous probability distribution. \ + dtype must be a floating point but you specified {self.dtype}", + ) + torch._check( + rate > 0.0, + lambda: f"exponential_ expects lambda > 0.0, but found lambda={rate}", + ) + return -1 / rate * torch.log1p(-torch.rand_like(self)) + + +@register_decomposition(aten.geometric) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def geometric(self, p, generator=None): + assert generator is None + # TODO: fix inductor rand_like for integer, bool dtypes + torch._check( + not utils.is_complex_dtype(self.dtype) + and not utils.is_boolean_dtype(self.dtype), + lambda: f"geometric not implemented for {self.dtype}", + ) + torch._check( + 0 < p and p < 1, + lambda: f"geometric_ expects p to be in (0, 1), but got p={p}", + ) + return torch.floor(torch.log1p(-torch.rand_like(self)) / math.log1p(-p)) + 1 + + +@register_decomposition(aten.log_normal) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def log_normal(self, mean=1, std=2, generator=None): + assert generator is None + torch._check( + not utils.is_complex_dtype(self.dtype) + and not utils.is_integer_dtype(self.dtype) + and not utils.is_boolean_dtype(self.dtype), + lambda: f"log_normal not implemented for {self.dtype}", + ) + torch._check( + 0 < std, + lambda: f"log_normal_ expects std > 0.0, but found std={std}", + ) + return torch.exp(std * torch.randn_like(self) + mean) + + +# TODO: add support for functionalization aten.normal_functional +# NOTE: the device and dtype will be ignored when shape is None +@register_decomposition(aten.normal) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=( + "mean", + "std", + ), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def normal( + mean=0, + std=1, + size=None, + *, + generator=None, + dtype=None, + layout=None, + device=None, + pin_memory=None, +): + assert layout is None or layout == torch.strided + + if not isinstance(std, TensorLike): + torch._check( + std >= 0, lambda: f"normal expects std >= 0.0, but found std {std}" + ) + + if size is None: + tensors = tuple(t for t in (mean, std) if isinstance(t, TensorLike)) + torch._check( + len(tensors) > 0, + lambda: "normal expects that either mean or std is a tensor, or size is defined", + ) + torch._check( + layout is None and pin_memory is None, + lambda: "Cannot pass layout, or pin_memory without size", + ) + + size = _broadcast_shapes(*(t.shape for t in tensors)) + dtype = tensors[0].dtype + device = tensors[0].device + else: + torch._check( + not isinstance(mean, TensorLike) and not isinstance(std, TensorLike), + lambda: "normal expects mean and std to be scalars when size is defined", + ) + dtype = torch.get_default_dtype() if dtype is None else dtype + device = torch.device("cpu") if device is None else device + + normal_samples = prims.normal( + size, + mean=0.0, + std=1.0, + dtype=dtype, + device=device, + requires_grad=False, + generator=generator, + ) + return std * normal_samples + mean + + +@register_decomposition(aten.normal_) +def normal_(self, mean=0, std=1, *, generator=None): + return normal(mean, std, self.shape, out=self, generator=generator) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def rad2deg(self: TensorLikeType): + torch._check( + not utils.is_complex_dtype(self.dtype), + lambda: "rad2deg is not supported for complex tensors.", + ) + M_180_PI = 57.295779513082320876798154814105170332405472466564 + return self * M_180_PI + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def deg2rad(self: TensorLikeType): + torch._check( + not utils.is_complex_dtype(self.dtype), + lambda: "deg2rad is not supported for complex tensors.", + ) + M_PI_180 = 0.017453292519943295769236907684886127134428718885417 + return self * M_PI_180 + + +@register_decomposition(aten.count_nonzero) +@out_wrapper() +def count_nonzero(self, dim: Optional[DimsType] = None): + return (self != 0).sum(dim) + + +def _dot_check(self, other): + torch._check( + self.dim() == 1 and other.dim() == 1, + lambda: f"1D tensors expected, but got {self.dim()}D and {other.dim()}D tensors", + ) + + def numel_error(): + return ( + f"inconsistent tensor size, expected tensor [{self.numel()}] and src [{other.numel()}] to have the" + f"same number of elements, but got {self.numel()} and {other.numel()} elements respectively" + ) + + torch._check(self.numel() == other.numel(), numel_error) + + +@register_decomposition(aten.dot) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self", "other"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def dot(self, other): + if self.is_complex(): + if self.is_conj(): + if other.is_conj(): + return torch.dot(self.conj(), other.conj()).conj() + else: + return torch.vdot(self.conj(), other) + elif other.is_conj(): + return torch.vdot(other.conj(), self) + + _dot_check(self, other) + return (self * other).sum() + + +@register_decomposition(aten.vdot) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self", "other"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def vdot(self, other): + if not self.is_complex(): + return torch.dot(self, other) + + if self.is_conj(): + if other.is_conj(): + return torch.vdot(other.conj(), self.conj()) + else: + return torch.dot(self.conj(), other) + elif other.is_conj(): + return torch.dot(self, other.conj()).conj() + + _dot_check(self, other) + # The decomposition fails if you do self.conj()... not sure why + return (self.conj_physical() * other).sum() + + +@register_decomposition(aten.select_scatter) +@out_wrapper() +def select_scatter(x: TensorLikeType, src: TensorLikeType, dim: int, index: int): + dim = utils.canonicalize_dim(x.ndim, dim) + mask_shape = [1] * x.ndim + mask_shape[dim] = -1 + if index < 0: + index = index + x.shape[dim] + mask = torch.arange(x.shape[dim], device=x.device).view(mask_shape) == index + src = torch.unsqueeze(src, dim).expand(x.shape) + return torch.where(mask, src, x) + + +# inplace +abs_ = _make_inplace(abs) +acos_ = _make_inplace(acos) +acosh_ = _make_inplace(acosh) +add_ = _make_inplace(add) +addcmul_ = _make_inplace(addcmul) +addcdiv_ = _make_inplace(addcdiv) +asin_ = _make_inplace(asin) +asinh_ = _make_inplace(asinh) +atan_ = _make_inplace(atan) +atanh_ = _make_inplace(atanh) +atan2_ = _make_inplace(atan2) +bitwise_and_ = _make_inplace(bitwise_and) +bitwise_left_shift_ = _make_inplace(bitwise_left_shift) +bitwise_not_ = _make_inplace(bitwise_not) +bitwise_or_ = _make_inplace(bitwise_or) +bitwise_right_shift_ = _make_inplace(bitwise_right_shift) +bitwise_xor_ = _make_inplace(bitwise_xor) +ceil_ = _make_inplace(ceil) +clamp_ = _make_inplace(clamp) +clamp_min_ = _make_inplace(clamp_min) +clamp_max_ = _make_inplace(clamp_max) +conj_physical_ = _make_inplace(conj_physical) +copysign_ = _make_inplace(copysign) +cos_ = _make_inplace(cos) +cosh_ = _make_inplace(cosh) +cumsum_ = _make_inplace(cumsum) +cumprod_ = _make_inplace(cumprod) +deg2rad_ = _make_inplace(deg2rad) +digamma_ = _make_inplace(digamma) +div_ = _make_inplace(div) +eq_ = _make_inplace(eq) +erf_ = _make_inplace(erf) +erfc_ = _make_inplace(erfc) +erfinv_ = _make_inplace(erfinv) +exp_ = _make_inplace(exp) +exp2_ = _make_inplace(exp2) +expm1_ = _make_inplace(expm1) +float_power_ = _make_inplace(float_power) +floor_ = _make_inplace(floor) +floor_divide_ = _make_inplace(floor_divide) +fmod_ = _make_inplace(fmod) +frac_ = _make_inplace(frac) +gcd_ = _make_inplace(gcd) +ge_ = _make_inplace(ge) +gt_ = _make_inplace(gt) +heaviside_ = _make_inplace(heaviside) +hypot_ = _make_inplace(hypot) +igamma_ = _make_inplace(igamma) +igammac_ = _make_inplace(igammac) +i0_ = _make_inplace(i0) +lcm_ = _make_inplace(lcm) +le_ = _make_inplace(le) +lerp_ = _make_inplace(lerp) +lgamma_ = _make_inplace(lgamma) +log10_ = _make_inplace(log10) +log1p_ = _make_inplace(log1p) +log2_ = _make_inplace(log2) +log_ = _make_inplace(log) +logical_and_ = _make_inplace(logical_and) +logical_not_ = _make_inplace(logical_not) +logical_or_ = _make_inplace(logical_or) +logical_xor_ = _make_inplace(logical_xor) +lt_ = _make_inplace(lt) +mul_ = _make_inplace(mul) +mvlgamma_ = _make_inplace(mvlgamma) +nan_to_num_ = _make_inplace(nan_to_num) +ne_ = _make_inplace(ne) +neg_ = _make_inplace(neg) +nextafter_ = _make_inplace(nextafter) +pow_ = _make_inplace(pow) +rad2deg_ = _make_inplace(rad2deg) +reciprocal_ = _make_inplace(reciprocal) +remainder_ = _make_inplace(remainder) +rsqrt_ = _make_inplace(rsqrt) +sgn_ = _make_inplace(sgn) +sigmoid_ = _make_inplace(sigmoid) +sign_ = _make_inplace(sign) +sin_ = _make_inplace(sin) +sinc_ = _make_inplace(sinc) +sinh_ = _make_inplace(sinh) +sqrt_ = _make_inplace(sqrt) +square_ = _make_inplace(square) +sub_ = _make_inplace(sub) +tan_ = _make_inplace(tan) +tanh_ = _make_inplace(tanh) +tril_ = _make_inplace(tril) +triu_ = _make_inplace(triu) +true_divide_ = _make_inplace(true_divide) +trunc_ = _make_inplace(trunc) +xlogy_ = _make_inplace(xlogy) +cauchy_ = _make_inplace(cauchy) +exponential_ = _make_inplace(exponential) +geometric_ = _make_inplace(geometric) +log_normal_ = _make_inplace(log_normal) +zero_ = _make_inplace(zero) + + +# xref: isStorage in torch/csrc/DynamicTypes.cpp +def _isStorage(obj): + return isinstance(obj, (torch.TypedStorage, torch.UntypedStorage)) + + +# xref: compute_sizes in torch/csrc/utils/tensor_new.cpp +def _compute_sizes(seq, scalar_type): + MAX_DIMS = 128 + is_storage = _isStorage(seq) + sizes = [] + # TODO: this is inaccurate, we actually test PySequence_Check + while isinstance(seq, (list, tuple)): + length = len(seq) + if is_storage: + length //= scalar_type.itemsize + sizes.append(length) + if len(sizes) > MAX_DIMS: + raise ValueError(f"too many dimensions '{type(seq).__name__}'") + if length == 0: + break + try: + handle = seq[0] + except Exception: + raise ValueError( # noqa: B904 + f"could not determine the shape of object type '{type(seq).__name__}'" + ) + seq = handle + + return sizes + + +# xref: infer_scalar_type in torch/csrc/utils/tensor_new.cpp +def _infer_scalar_type(obj): + if isinstance(obj, FloatLike): + return torch.get_default_dtype() + if isinstance(obj, IntLike) and not isinstance(obj, bool): # careful! + return torch.int64 + if isinstance(obj, BoolLike): + return torch.bool + if isinstance(obj, complex): + default_dtype = torch.get_default_dtype() + if default_dtype is torch.float: + return torch.cfloat + elif default_dtype is torch.double: + return torch.cdouble + elif default_dtype is torch.half: + return torch.chalf + else: + raise RuntimeError("invalid default scalar type for complex") + if isinstance(obj, torch.Tensor): + return obj.dtype + if isinstance(obj, str): + raise TypeError(f"new(): invalid data type '{type(obj).__name__}'") + # TODO: this is inaccurate, we actually test PySequence_Check + if isinstance(obj, (list, tuple)): + scalarType = None + length = len(obj) + # match NumPy semantics, except use default tensor type instead of + # double. + if length == 0: + return torch.get_default_dtype() + for i in range(length): + cur_item = obj[i] + # TODO: test this + """ + if cur_item is obj: + raise TypeError("new(): self-referential lists are incompatible") + """ + item_scalarType = _infer_scalar_type(cur_item) # recurse! + if scalarType is not None: + scalarType = torch.promote_types(scalarType, item_scalarType) + else: + scalarType = item_scalarType + if scalarType is torch.cdouble: + # this won't change (unless we hit undefined, but that will + # fail later) + return scalarType + return scalarType + raise RuntimeError(f"Could not infer dtype of {type(obj).__name__}") + + +# Analogous to recursive_store +# xref: recursive_store in torch/csrc/utils/tensor_new.cpp +def _recursive_build( + scalarType: torch.dtype, obj: Union[TensorOrNumberLikeType, TensorSequenceType] +): + if isinstance(obj, Tensor) and obj.numel() == 1: + return obj.detach().to(dtype=scalarType, device="cpu", copy=True).view(()) + elif isinstance(obj, Tensor): + # It is invalid to call ".tensor([...])" with a non-scalar tensor in eager mode + # >>> torch.tensor([torch.randn(2)]) + # ValueError: only one element tensors can be converted to Python scalars + # + # But it is possible with a NumPy array + # >>> torch.tensor([np.random.uniform(size=(2,))]).shape + # torch.Size([1, 2]) + return obj.detach().to(dtype=scalarType, device="cpu", copy=True) + elif isinstance(obj, Number): + return torch.scalar_tensor(obj, dtype=scalarType) + + # seq can be a list of tensors + seq = obj + return torch.stack([_recursive_build(scalarType, item) for item in seq]) + + +# xref: internal_new_from_data in torch/csrc/utils/tensor_new.cpp +def _internal_new_from_data( + options, + scalar_type, + device_opt, + data, + copy_variables, + copy_numpy, + type_inference, + pin_memory=False, +): + if isinstance(data, torch.Tensor): + torch._check( + not pin_memory, lambda: "Can't pin tensor constructed from a variable" + ) + var = data + if copy_variables: + var = var.detach() + inferred_scalar_type = var.dtype if type_inference else scalar_type + device = device_opt if device_opt is not None else var.device + return var.to( + device=device, + dtype=inferred_scalar_type, + non_blocking=False, + copy=copy_variables, + ) + + # TODO + if hasattr(data, "__cuda_array_interface__"): + return NotImplemented + + # TODO: test for numpy input with PyArray_Check + + device = device_opt if device_opt is not None else options["device"] + inferred_scalar_type = _infer_scalar_type(data) if type_inference else scalar_type + + # NB: Don't need to avoid tracing, as we aren't going to do any manual + # pointer filling tricks + if _isStorage(data): + return NotImplemented + else: + if torch.device(device).type == "meta": + return NotImplemented + + # In the C implementation, we would directly start poking the memory + # of a freshly allocated CPU tensor. Here, we're going to do an + # alternate, heinously slow implementation: turn each individual + # scalar into a tensor, and then repeatedly cat them together + tensor = _recursive_build(inferred_scalar_type, data) + + tensor = tensor.to(device, inferred_scalar_type, non_blocking=False, copy=False) + + # NB: lift_fresh is not needed, because we built the tensor from scalars + # guaranteeing a fresh tensor in this case + return tensor + + +# xref: tensor_ctor in torch/csrc/utils/tensor_new.cpp +def tensor(data, *, dtype=None, device=None, pin_memory=False, requires_grad=False): + # TODO (or not): support names kwarg + if isinstance(data, torch.Tensor): + warnings.warn( + "To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() " + "or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor)" + ) + type_inference = dtype is None + new_tensor = _internal_new_from_data( + # device="cpu" because that's what you get with torch.tensor(2) no + # device by default + {"device": "cpu"}, # TODO: use torch.get_default_tensor_type + dtype if dtype is not None else torch.get_default_dtype(), + device, + data, + copy_variables=True, + copy_numpy=True, + type_inference=type_inference, + pin_memory=pin_memory, + ) + new_tensor.detach_() + if requires_grad: + new_tensor.requires_grad_(requires_grad) + return new_tensor + + +# Views +# We can't model these as above, as the pattern of doing `op(a, out=a)` does not work for a view function +# given that it does not reshape the input (it just copies the result into it) + +# squeeze_ = _make_inplace(squeeze) +# t_ = _make_inplace(t) +# transpose_ = _make_inplace(transpose) +# unsqueeze_ = _make_inplace(unsqueeze) + + +import torch._refs._conversions +import torch._refs.fft +import torch._refs.linalg +import torch._refs.nn.functional +import torch._refs.special diff --git a/parrot/lib/python3.10/site-packages/torch/_refs/__pycache__/_conversions.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/_refs/__pycache__/_conversions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12ed0aa2988871e8cd9a3d46f3a65b876d624009 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/_refs/__pycache__/_conversions.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/_refs/__pycache__/fft.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/_refs/__pycache__/fft.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7cf41c5246f1d4df954651c5223e1120c3f513af Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/_refs/__pycache__/fft.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/_refs/_conversions.py b/parrot/lib/python3.10/site-packages/torch/_refs/_conversions.py new file mode 100644 index 0000000000000000000000000000000000000000..b312f8f6eada5046778f40158f1e81953ad22985 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/_refs/_conversions.py @@ -0,0 +1,119 @@ +# mypy: allow-untyped-defs +import torch +import torch._prims_common as utils + +# Utilities should come BEFORE this import +from torch._decomp import register_decomposition + +from torch._prims_common import TensorLikeType +from torch._prims_common.wrappers import out_wrapper +from torch._refs import _broadcast_shapes + +# Data conversion references. +# +# Note: this module breaks the usual _refs to torch naming scheme where +# _refs.foo.bar is a ref for torch.foo.bar. The following definitions are not +# part of _refs/__init__.py to avoid name clashes with Python builtin types +# (like int). + +__all__ = [ + # dtypes + "bfloat16", + "bool", + "byte", + "cdouble", + "cfloat", + "chalf", + "char", + "double", + "float", + "half", + "int", + "long", + "short", + # misc + "complex", + "polar", +] + + +def _make_conversion_method(name: str, dtype: torch.dtype): + def fn( + self: TensorLikeType, memory_format: torch.memory_format = torch.preserve_format + ) -> TensorLikeType: + return self.to(dtype, memory_format=memory_format) # type: ignore[call-overload] + + fn.__name__ = name + return fn + + +bfloat16 = _make_conversion_method("bfloat16", torch.bfloat16) + +bool = _make_conversion_method("bool", torch.bool) + +byte = _make_conversion_method("byte", torch.uint8) + +cdouble = _make_conversion_method("cdouble", torch.cdouble) + +cfloat = _make_conversion_method("cfloat", torch.cfloat) + +chalf = _make_conversion_method("chalf", torch.complex32) + +char = _make_conversion_method("char", torch.int8) + +double = _make_conversion_method("double", torch.double) + +float = _make_conversion_method("float", torch.float) + +half = _make_conversion_method("half", torch.half) + +int = _make_conversion_method("int", torch.int) + +long = _make_conversion_method("long", torch.long) + +short = _make_conversion_method("short", torch.short) + + +@register_decomposition(torch._ops.ops.aten.complex) +# Note: complex has type promotion tests disabled due to different semantics. +# exact_dtype is for compat with complex_check_dtype from core. +@out_wrapper(exact_dtype=True) +def complex(real: TensorLikeType, imag: TensorLikeType) -> TensorLikeType: + allowed_dtypes = (torch.float32, torch.float64, torch.float16) + torch._check( + real.dtype in allowed_dtypes and imag.dtype in allowed_dtypes, + lambda: ( + f"Expected both inputs to be Half, Float or Double tensors but got " + f"{real.dtype} and {imag.dtype}" + ), + ) + torch._check( + real.dtype == imag.dtype, + lambda: ( + f"Expected object of scalar type {real.dtype} but got " + f"scalar type {imag.dtype} for second argument" + ), + ) + result_dtype = utils.corresponding_complex_dtype(real.dtype) # type: ignore[arg-type] + common_shape = _broadcast_shapes(real.shape, imag.shape) + result = real.new_empty( + common_shape, + dtype=result_dtype, + layout=real.layout, + device=real.device, + # pin_memory=real.is_pinned(), # NYI + ) + result.real = real + result.imag = imag + return result + + +@register_decomposition(torch._ops.ops.aten.polar) +# Note: polar has type promotion tests disabled due to different semantics. +# exact_dtype is for compat with complex_check_dtype from core. +@out_wrapper(exact_dtype=True) +def polar(abs: TensorLikeType, angle: TensorLikeType) -> TensorLikeType: + result = torch.complex(abs, angle) + result.real = abs * torch.cos(angle) + result.imag = abs * torch.sin(angle) + return result diff --git a/parrot/lib/python3.10/site-packages/torch/_refs/linalg/__init__.py b/parrot/lib/python3.10/site-packages/torch/_refs/linalg/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..411087b773eaf6307c93e14fc0596cd65e1e0b16 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/_refs/linalg/__init__.py @@ -0,0 +1,313 @@ +# mypy: allow-untyped-defs +from functools import partial + +from typing import List, Optional, Tuple, Union + +import torch + +import torch._prims as prims + +import torch._prims_common as utils +import torch._refs as refs +import torch._refs.linalg as linalg +from torch import Tensor +from torch._prims_common import ( + check_fp_or_complex, + check_is_matrix, + Dim, + DimsType, + ELEMENTWISE_TYPE_PROMOTION_KIND, + IntLike, + NumberType, + TensorLikeType, +) +from torch._prims_common.wrappers import ( + _maybe_convert_to_dtype, + elementwise_type_promotion_wrapper, + out_wrapper, +) + + +__all__ = [ + "diagonal", + "matrix_norm", + "norm", + "svd", + "svdvals", + "vector_norm", + "vecdot", + "cross", +] + + +def _check_norm_dtype(dtype: Optional[torch.dtype], x_dtype: torch.dtype, fn_name: str): + """ + Checks related to the dtype kwarg in `linalg.*norm` functions + """ + if dtype is not None: + torch._check( + utils.is_float_dtype(dtype) or utils.is_complex_dtype(dtype), + lambda: f"{fn_name}: dtype should be floating point or complex. Got {dtype}", + ) + torch._check( + utils.is_complex_dtype(dtype) == utils.is_complex_dtype(x_dtype), + lambda: "{fn_name}: dtype should be {d} for {d} inputs. Got {dtype}".format( + fn_name=fn_name, + d="complex" if utils.is_complex_dtype(x_dtype) else "real", + dtype=dtype, + ), + ) + torch._check( + utils.get_higher_dtype(dtype, x_dtype) == dtype, + lambda: f"{fn_name}: the dtype of the input ({x_dtype}) should be convertible " + "without narrowing to the specified dtype ({dtype})", + ) + + +import operator + +# Utilities should come BEFORE this import +from torch._decomp import register_decomposition +from torch._decomp.decompositions import pw_cast_for_opmath + + +@register_decomposition(torch._ops.ops.aten.linalg_cross) +@out_wrapper() +@pw_cast_for_opmath +def cross(a: Tensor, b: Tensor, dim: int = -1): + torch._check( + a.ndim == b.ndim, + lambda: "linalg.cross: inputs must have the same number of dimensions.", + ) + torch._check( + a.size(dim) == 3 and b.size(dim) == 3, + lambda: f"linalg.cross: inputs dim {dim} must have length 3, got {a.size(dim)} and {b.size(dim)}", + ) + a, b = torch.broadcast_tensors(a, b) + dim = utils.canonicalize_dim(a.ndim, dim) + idx = torch.arange(3, device=a.device) + return a.index_select(dim, (idx + 1) % 3) * b.index_select( + dim, (idx + 2) % 3 + ) - a.index_select(dim, (idx + 2) % 3) * b.index_select(dim, (idx + 1) % 3) + + +def diagonal( + input: TensorLikeType, + *, + offset: int = 0, + dim1: int = -2, + dim2: int = -1, +) -> TensorLikeType: + return torch.diagonal(input, offset=offset, dim1=dim1, dim2=dim2) + + +@register_decomposition(torch._ops.ops.aten.linalg_vector_norm) +@out_wrapper(exact_dtype=True) +def vector_norm( + x: TensorLikeType, + ord: Union[float, int] = 2, + dim: Optional[DimsType] = None, + keepdim: bool = False, + *, + dtype: Optional[torch.dtype] = None, +) -> Tensor: + from torch.fx.experimental.symbolic_shapes import guard_size_oblivious + + # Checks + check_fp_or_complex(x.dtype, "linalg.vector_norm") + + if isinstance(dim, Dim): + dim = [dim] # type: ignore[assignment] + + if guard_size_oblivious(x.numel() == 0) and (ord < 0.0 or ord == float("inf")): + torch._check( + dim is not None and len(dim) != 0, + lambda: f"linalg.vector_norm cannot compute the {ord} norm on an empty tensor " + "because the operation does not have an identity", + ) + shape = x.shape + assert dim is not None # mypy does not seem to be able to see through check? + for d in dim: + torch._check( + shape[d] != 0, + lambda: f"linalg.vector_norm cannot compute the {ord} norm on the " + f"dimension {d} because this dimension is empty and the " + "operation does not have an identity", + ) + _check_norm_dtype(dtype, x.dtype, "linalg.vector_norm") + + computation_dtype, result_dtype = utils.reduction_dtypes( + x, utils.REDUCTION_OUTPUT_TYPE_KIND.COMPLEX_TO_FLOAT, dtype + ) + + to_result_dtype = partial(_maybe_convert_to_dtype, dtype=result_dtype) + + # Implementation + if ord == 0.0: + return torch.sum(torch.ne(x, 0.0), dim=dim, keepdim=keepdim, dtype=result_dtype) + elif ord == float("inf"): + return to_result_dtype(torch.amax(torch.abs(x), dim=dim, keepdim=keepdim)) # type: ignore[return-value,arg-type] + elif ord == float("-inf"): + return to_result_dtype(torch.amin(torch.abs(x), dim=dim, keepdim=keepdim)) # type: ignore[return-value,arg-type] + else: + # From here on the computation dtype is important as the reduction is non-trivial + x = _maybe_convert_to_dtype(x, computation_dtype) # type: ignore[assignment] + reduce_sum = partial(torch.sum, dim=dim, keepdim=keepdim) + + is_ord_even = ord % 2 == 0 if isinstance(ord, IntLike) else ord % 2.0 == 0.0 + if not (is_ord_even and utils.is_float_dtype(x.dtype)): + x = torch.abs(x) + return to_result_dtype(torch.pow(reduce_sum(torch.pow(x, ord)), 1.0 / ord)) # type: ignore[return-value] + + +def _backshift_permutation(dim0, dim1, ndim): + # Auxiliary function for matrix_norm + # Computes the permutation that moves the two given dimensions to the back + ret = [i for i in range(ndim) if i != dim0 and i != dim1] + ret.extend((dim0, dim1)) + return ret + + +def _inverse_permutation(perm): + # Given a permutation, returns its inverse. It's equivalent to argsort on an array + return [i for i, j in sorted(enumerate(perm), key=operator.itemgetter(1))] + + +# CompositeImplicitAutograd +@out_wrapper(exact_dtype=True) +def matrix_norm( + A: TensorLikeType, + ord: Union[float, str] = "fro", + dim: DimsType = (-2, -1), + keepdim: bool = False, + *, + dtype: Optional[torch.dtype] = None, +) -> TensorLikeType: + # shape + check_is_matrix(A, "linalg.matrix_norm") + # dim + dim = utils.canonicalize_dims(A.ndim, dim) + if isinstance(dim, Dim): + dim = (dim,) # type: ignore[assignment] + torch._check( + len(dim) == 2, lambda: "linalg.matrix_norm: dim must be a 2-tuple. Got {dim}" + ) + torch._check( + dim[0] != dim[1], + lambda: "linalg.matrix_norm: dims must be different. Got ({dim[0]}, {dim[1]})", + ) + # dtype arg + _check_norm_dtype(dtype, A.dtype, "linalg.matrix_norm") + + if isinstance(ord, str): + # ord + torch._check( + ord in ("fro", "nuc"), + lambda: "linalg.matrix_norm: Order {ord} not supported.", + ) + # dtype + check_fp_or_complex( + A.dtype, "linalg.matrix_norm", allow_low_precision_dtypes=ord != "nuc" + ) + + if ord == "fro": + return vector_norm(A, 2, dim, keepdim, dtype=dtype) + else: # ord == "nuc" + if dtype is not None: + A = _maybe_convert_to_dtype(A, dtype) # type: ignore[assignment] + perm = _backshift_permutation(dim[0], dim[1], A.ndim) + result = torch.sum(svdvals(prims.transpose(A, perm)), -1, keepdim) + if keepdim: + inv_perm = _inverse_permutation(perm) + result = prims.transpose(torch.unsqueeze(result, -1), inv_perm) + return result + else: + # ord + abs_ord = abs(ord) + torch._check( + abs_ord in (2, 1, float("inf")), + lambda: "linalg.matrix_norm: Order {ord} not supported.", + ) + # dtype + check_fp_or_complex( + A.dtype, "linalg.matrix_norm", allow_low_precision_dtypes=ord != 2 + ) + + max_min = partial(torch.amax if ord > 0.0 else torch.amin, keepdim=keepdim) + + if abs_ord == 2.0: + if dtype is not None: + A = _maybe_convert_to_dtype(A, dtype) # type: ignore[assignment] + perm = _backshift_permutation(dim[0], dim[1], A.ndim) + result = max_min(svdvals(prims.transpose(A, perm)), dim=-1) + if keepdim: + inv_perm = _inverse_permutation(perm) + result = prims.transpose(torch.unsqueeze(result, -1), inv_perm) + return result + else: # 1, -1, inf, -inf + dim0, dim1 = dim + if abs_ord == float("inf"): + dim0, dim1 = dim1, dim0 + if not keepdim and (dim0 < dim1): + dim1 -= 1 + return max_min( + vector_norm(A, 1.0, dim=dim0, keepdim=keepdim, dtype=dtype), dim1 + ) + + +# CompositeImplicitAutograd +@out_wrapper(exact_dtype=True) +def norm( + A: TensorLikeType, + ord: Optional[Union[float, str]] = None, + dim: Optional[DimsType] = None, + keepdim: bool = False, + *, + dtype: Optional[torch.dtype] = None, +) -> TensorLikeType: + if dim is not None: + if isinstance(dim, Dim): + dim = (dim,) # type: ignore[assignment] + torch._check( + len(dim) in (1, 2), + lambda: "linalg.norm: If dim is specified, it must be of length 1 or 2. Got {dim}", + ) + elif ord is not None: + torch._check( + A.ndim in (1, 2), + lambda: "linalg.norm: If dim is not specified but ord is, the input must be 1D or 2D. Got {A.ndim}D", + ) + + if ord is not None and ( + (dim is not None and len(dim) == 2) or (dim is None and A.ndim == 2) + ): + if dim is None: + dim = (0, 1) + return matrix_norm(A, ord, dim, keepdim, dtype=dtype) + else: + if ord is None: + ord = 2.0 + return vector_norm(A, ord, dim, keepdim, dtype=dtype) + + +# CompositeImplicitAutograd +@out_wrapper("U", "S", "Vh", exact_dtype=True) +def svd(A: TensorLikeType, full_matrices: bool = True) -> Tuple[Tensor, Tensor, Tensor]: + return prims.svd(A, full_matrices=full_matrices) + + +# CompositeImplicitAutograd +@out_wrapper(exact_dtype=True) +def svdvals(A: TensorLikeType) -> Tensor: + return svd(A, full_matrices=False)[1] + + +# CompositeImplicitAutograd +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("x", "y"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def vecdot(x: Tensor, y: Tensor, dim: int = -1) -> Tensor: + check_fp_or_complex(x.dtype, "linalg.vecdot") + return (x.conj() * y).sum(dim=dim) diff --git a/parrot/lib/python3.10/site-packages/torch/_refs/linalg/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/_refs/linalg/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3922beef7bd087c6c4fad5614ba057332800b684 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/_refs/linalg/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/_refs/nn/__init__.py b/parrot/lib/python3.10/site-packages/torch/_refs/nn/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3646b96be90be51e86070360b23212ed450186a6 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/_refs/nn/__init__.py @@ -0,0 +1,3 @@ +from typing import List + +__all__: List[str] = [] diff --git a/parrot/lib/python3.10/site-packages/torch/_refs/nn/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/_refs/nn/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22321eebb9c69b0f2a54ed55fa5bea22924028b1 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/_refs/nn/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/_refs/nn/functional/__init__.py b/parrot/lib/python3.10/site-packages/torch/_refs/nn/functional/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8383d888bbe87d79a8a523d5c91befa4d084ad32 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/_refs/nn/functional/__init__.py @@ -0,0 +1,1238 @@ +# mypy: allow-untyped-defs +import math +from functools import wraps +from typing import Callable, Optional, Union + +import torch +import torch._prims as prims +import torch._prims_common as utils +import torch._refs as refs +from torch._decomp import register_decomposition +from torch._prims_common import ( + ELEMENTWISE_TYPE_PROMOTION_KIND, + NumberType, + ShapeType, + TensorLike, + TensorLikeType, +) +from torch._prims_common.wrappers import ( + elementwise_type_promotion_wrapper, + elementwise_unary_scalar_wrapper, + out_wrapper, +) +from torch._refs import _make_inplace + +__all__ = [ + "alpha_dropout", + "celu", + "celu_", + "dropout", + "elu", + "elu_", + "gelu", + "glu", + "group_norm", + "hardshrink", + "hardtanh", + "hinge_embedding_loss", + "huber_loss", + "l1_loss", + "layer_norm", + "leaky_relu", + "log_softmax", + "margin_ranking_loss", + "mish", + "mish_", + "mse_loss", + "nll_loss", + "pairwise_distance", + "pdist", + "poisson_nll_loss", + "prelu", + "relu", + "relu6", + "selu", + "selu_", + "smooth_l1_loss", + "softmax", + "softmin", + "softplus", + "softshrink", + "tanhshrink", + "threshold", + "threshold_", + "triplet_margin_loss", +] + +Tensor = torch.Tensor +aten = torch._ops.ops.aten +DispatchKey = torch._C.DispatchKey # type: ignore[attr-defined] + + +def _dropout_helper( + self: TensorLikeType, + val: float, +) -> TensorLikeType: + """ + Helper function for all dropout-type operators. During training, + some of the elements of the input tensor are randomly masked. + + Returns the masked tensor of the boolean values. + + """ + + return ( + refs._uniform_helper( + self.shape, low=0.0, high=1.0, dtype=torch.float32, device=self.device + ) + < val + ) + + +@register_decomposition(aten.alpha_dropout) +def alpha_dropout( + self: TensorLikeType, p: float = 0.5, training: bool = False, inplace: bool = False +) -> TensorLikeType: + if inplace: + raise NotImplementedError + + if not training: + return self + + torch._check( + p <= 1 and p >= 0, + lambda: f"dropout probability has to be between 0 and 1, but got, {p}", + ) + + if p == 1: + return torch.zeros_like(self) + + if p == 0: + return self + + dropout_mask = _dropout_helper(self, 1 - p) + + # From paper: Self-Normalizing Neural Networks (https://arxiv.org/pdf/1706.02515.pdf) + # alpha = - SELU.alpha * SELU.scale, here + # SELU.alpha = 1.6732632423543772848170429916717 and + # SELU.scale = 1.0507009873554804934193349852946 + alpha = -1.7580993408473766 + + a = 1.0 / math.sqrt((alpha * alpha * p + 1) * (1 - p)) + b = torch.logical_not(dropout_mask) + b = b * (alpha * a) + alpha * a * p + dropout_mask = a * dropout_mask + + return self * dropout_mask + b + + +def _inplace_wrapper(fn): + """ + Given a nn.functional non-linearity, implements its `inplace: bool` argument + """ + + # nb. We use the name of the first argument used in the unary references + @wraps(fn) + def _fn(a, *args, inplace=False, **kwargs): + if inplace: + torch._check( + "out" not in kwargs, + lambda: "Cannot set inplace=True and pass out= at the same time", + ) + return fn(a, *args, inplace=False, out=a, **kwargs) + else: + return fn(a, *args, inplace=False, **kwargs) + + return _fn + + +# celu is implemented specially because it has an alpha argument +# celu is very similar to elu +@register_decomposition(aten.celu) +@_inplace_wrapper +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def celu( + a: TensorLikeType, alpha: Optional[NumberType] = None, inplace: bool = False +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.celu + """ + + if inplace: + raise NotImplementedError + + rhs: TensorLikeType + if alpha is not None: + python_type = utils.dtype_to_type(a.dtype) + if not utils.is_weakly_lesser_type(type(alpha), python_type): + msg = f"alpha argument of type {type(alpha)} cannot be safely cast to type {python_type}!" + raise ValueError(msg) + rhs = alpha * torch.expm1(torch.true_divide(a, alpha)) # type: ignore[arg-type] + else: + rhs = torch.expm1(a) + + return torch.where(a > 0, a, rhs) + + +@_inplace_wrapper +@out_wrapper() +def dropout( + a: TensorLikeType, p: float = 0.5, training: bool = True, inplace: bool = False +) -> TensorLikeType: + if inplace: + raise NotImplementedError + + if not training: + return a + + torch._check( + p <= 1 and p >= 0, + lambda: f"dropout probability has to be between 0 and 1, but got, {p}", + ) + + if p == 1: + return torch.zeros_like(a) + + if p == 0: + return a + + scale = 1 / (1 - p) + dropout_mask = _dropout_helper(a, 1 - p) + + return a * dropout_mask * scale + + +@register_decomposition(aten.elu) +@_inplace_wrapper +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def elu( + a: TensorLikeType, + alpha: NumberType = 1.0, + scale: NumberType = 1.0, + input_scale: NumberType = 1.0, + inplace: bool = False, +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.elu + """ + if inplace: + raise NotImplementedError + + # nb. This should be factored out into a can_cast aux function + python_type = utils.dtype_to_type(a.dtype) + torch._check( + utils.is_weakly_lesser_type(type(input_scale), python_type), + lambda: f"input_scale argument of type {type(input_scale)} cannot be safely cast to type {python_type}!", + ) + torch._check( + utils.is_weakly_lesser_type(type(scale), python_type), + lambda: f"scale argument of type {type(scale)} cannot be safely cast to type {python_type}!", + ) + torch._check( + utils.is_weakly_lesser_type(type(alpha), python_type), + lambda: f"alpha argument of type {type(alpha)} cannot be safely cast to type {python_type}!", + ) + + return torch.where(a > 0, scale * a, (alpha * scale) * torch.expm1(a * input_scale)) + + +@register_decomposition(aten.relu) +@_inplace_wrapper +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def relu(a: TensorLikeType, inplace: bool = False) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.relu + """ + + if inplace: + raise NotImplementedError + + return torch.where(torch.le(a, 0), 0, a) + + +def group_norm( + input: Tensor, + num_groups: int, + weight: Optional[Tensor] = None, + bias: Optional[Tensor] = None, + eps: float = 1e-5, +) -> Tensor: + """ + Reference implementation of :func:`torch.nn.functional.group_norm`. + """ + torch._check( + input.ndim >= 2, + lambda: f"Expected at least 2 dimensions for input tensor but received {input.ndim}", + ) + + batch_size = input.shape[0] + num_channels = input.shape[1] + torch._check( + num_channels % num_groups == 0, + lambda: "Expected number of channels in input to be divisible by num_groups, " + + f"but got input of shape {input.shape} and num_groups = {num_groups}", + ) + + # input shape is (N, C, *), so we flatten all inner dimensions except (N, C) + flattened_inner_size = 1 + for dim_length in input.shape[2:]: + flattened_inner_size *= dim_length + + return torch.native_group_norm( + input, + weight, + bias, + batch_size, + num_channels, + flattened_inner_size, + num_groups, + eps, + )[0] + + +def layer_norm( + input: Tensor, + normalized_shape: ShapeType, + weight: Optional[Tensor] = None, + bias: Optional[Tensor] = None, + eps: float = 1e-5, +) -> Tensor: + """ + Reference implementation of :func:`torch.nn.functional.layer_norm`. + """ + return torch.native_layer_norm(input, normalized_shape, weight, bias, eps)[0] + + +@register_decomposition(aten.leaky_relu) +@_inplace_wrapper +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def leaky_relu( + a: TensorLikeType, negative_slope: float = 0.01, inplace: bool = False +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.leaky_relu + """ + + if inplace: + raise NotImplementedError + + python_type = utils.dtype_to_type(a.dtype) + if not utils.is_weakly_lesser_type(type(negative_slope), python_type): + msg = f"negative_slope argument of type {type(negative_slope)} cannot be safely cast to type {python_type}!" + raise ValueError(msg) + return torch.where(torch.gt(a, 0), a, torch.mul(a, negative_slope)) + + +@register_decomposition(aten.mish) +@_inplace_wrapper +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def mish(a: TensorLikeType, inplace: bool = False) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.mish + """ + + if inplace: + raise NotImplementedError + return a * torch.tanh(torch.nn.functional.softplus(a)) + + +@register_decomposition(aten.selu) +@_inplace_wrapper +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def selu(a: TensorLikeType, inplace: bool = False) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.selu + """ + if inplace: + raise NotImplementedError + + alpha = 1.6732632423543772848170429916717 + scale = 1.0507009873554804934193349852946 + + rhs = alpha * torch.expm1(a) + + return scale * torch.where(a > 0, a, rhs) + + +# Forwarding alias: the functional variant doesn't support the out kwarg +# CompositeImplicitAutograd - don't register decomp +def softmax( + a: TensorLikeType, + dim: Optional[int] = None, + _stacklevel: int = 3, # for compat when using TorchRefsMode(strict=True) + dtype: Optional[torch.dtype] = None, +) -> TensorLikeType: + # The error is for compat with regular PyTorch, which has this behavior + # deprecated. For PrimTorch, it's fine to drop support for deprecated + # behavior because it requires explicit opt in. This error is to inform + # users how to update their calls. + torch._check(dim is not None, lambda: "implicit dim not supported, use dim=X") + return torch.softmax(a=a, dim=dim, dtype=dtype) # type: ignore[call-overload] + + +# CompositeImplicitAutograd - don't register decomp +def softmin( + a: TensorLikeType, + dim: Optional[int] = None, + _stacklevel: int = 3, # for compat when using TorchRefsMode(strict=True) + dtype: Optional[torch.dtype] = None, +) -> TensorLikeType: + # The error is for compat with regular PyTorch, which has this behavior + # deprecated. For PrimTorch, it's fine to drop support for deprecated + # behavior because it requires explicit opt in. This error is to inform + # users how to update their calls. + torch._check(dim is not None, lambda: "implicit dim not supported, use dim=X") + return torch.softmax(a=-a, dim=dim, dtype=dtype) # type: ignore[call-overload] + + +# softplus is implemented specially because it has beta and threshold arguments +@register_decomposition(aten.softplus) +@_inplace_wrapper +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def softplus( + a: TensorLikeType, + beta: Optional[NumberType] = None, + threshold: NumberType = 20, + inplace: bool = False, +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.softplus + """ + + if inplace: + raise NotImplementedError + + rhs: TensorLikeType + if beta is not None: + python_type = utils.dtype_to_type(a.dtype) + if not utils.is_weakly_lesser_type(type(beta), python_type): + msg = f"beta argument of type {type(beta)} cannot be safely cast to type {python_type}!" + raise ValueError(msg) + scaled_input = a * beta + rhs = torch.true_divide(torch.log1p(torch.exp(scaled_input)), beta) # type: ignore[arg-type] + + else: + scaled_input = a + rhs = torch.log1p(torch.exp(scaled_input)) + + return torch.where(scaled_input > threshold, a, rhs) + + +@aten.hardshrink.default.py_impl(DispatchKey.Autograd) +@register_decomposition(aten.hardshrink) +@out_wrapper() +def hardshrink(a: TensorLikeType, lambd: float = 0.5): + # Formula for reference, + # hardshrink(x) = x if x > lambd + # = x if x < -lambd + # = 0 otherwise + return torch.where(torch.abs(a) <= lambd, 0, a) + + +@aten.softshrink.default.py_impl(DispatchKey.Autograd) +@register_decomposition(aten.softshrink) +@out_wrapper() +def softshrink(a: TensorLikeType, lambd: float = 0.5): + # Formula for reference, + # softshrink(x) = x - lambd if x > lambd + # = x + lambd if x < -lambd + # = 0 otherwise + torch._check( + lambd >= 0, + lambda: f"lambda must be greater or equal to 0, but found to be {lambd}", + ) + # We implement this in one torch.where to generate better code in the backward + # see https://github.com/pytorch/pytorch/pull/107052#discussion_r1293748211 + return torch.where(torch.abs(a) > lambd, a - torch.sign(a) * lambd, 0) + + +# Losses +def _reduction_int_to_str(reduction: int) -> str: + from torch._decomp.decompositions import Reduction + + if reduction == Reduction.NONE.value: + return "none" + elif reduction == Reduction.MEAN.value: + return "mean" + elif reduction == Reduction.SUM.value: + return "sum" + else: + raise ValueError(f"{reduction} is not a valid value for reduction") + + +def _apply_loss_reduction(loss: TensorLikeType, reduction: str) -> TensorLikeType: + if reduction == "sum": + return torch.sum(loss) + elif reduction == "mean": + return torch.mean(loss) + else: # reduction == "none" + return loss + + +def _check_reduction_value(reduction: str): + if reduction not in ("mean", "sum", "none"): + raise ValueError(f"{reduction} is not a valid value for reduction") + + +# This helper function maps depreciated arguments, "size_average" and "reduce" +# to their corresponding "reduction" string argument +def _get_string_reduction_arg( + *, size_average: Optional[bool], reduce: Optional[bool] +) -> str: + if size_average is None: + size_average = True + if reduce is None: + reduce = True + if size_average and reduce: + ret = "mean" + elif reduce: + ret = "sum" + else: + ret = "none" + return ret + + +# CompositeImplicitAutograd - don't register decomp +@elementwise_type_promotion_wrapper( + type_promoting_args=("input", "target"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT, +) +def l1_loss( + input: TensorLikeType, + target: TensorLikeType, + size_average: Optional[bool] = None, + reduce: Optional[bool] = None, + reduction: str = "mean", +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.l1_loss + """ + if size_average is not None or reduce is not None: + # TODO: Raise exception instead of converting value. This is only for + # primTorch since it can drop support for deprecated arguments. + # msg = "size_average and reduce args are deprecated, please use reduction argument." + reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce) + _check_reduction_value(reduction) + loss = torch.abs(input - target) + return _apply_loss_reduction(loss, reduction) + + +@elementwise_type_promotion_wrapper( + type_promoting_args=("input", "target"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT, +) +def smooth_l1_loss( + input: TensorLikeType, + target: TensorLikeType, + size_average: Optional[bool] = None, + reduce: Optional[bool] = None, + reduction: str = "mean", + beta: float = 1.0, +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.smooth_l1_loss + """ + if size_average is not None or reduce is not None: + # TODO: Raise exception instead of converting value. This is only for + # primTorch since it can drop support for deprecated arguments. + # msg = "size_average and reduce args are deprecated, please use reduction argument." + reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce) + _check_reduction_value(reduction) + + if beta == 0.0: + return torch.nn.functional.l1_loss( + input, target, size_average=size_average, reduce=reduce, reduction=reduction + ) + else: + loss = torch.abs(input - target) + loss = torch.where(loss < beta, 0.5 * loss**2 / beta, loss - 0.5 * beta) + return _apply_loss_reduction(loss, reduction) + + +# Forwarding alias: the functional variant doesn't support the out kwarg +# CompositeImplicitAutograd - don't register decomp +def log_softmax( + a: TensorLikeType, + dim: Optional[int] = None, + _stacklevel: int = 3, # for compat when using TorchRefsMode(strict=True) + dtype: Optional[torch.dtype] = None, +) -> TensorLikeType: + # The error is for compat with regular PyTorch, which has this behavior + # deprecated. For PrimTorch, it's fine to drop support for deprecated + # behavior because it requires explicit opt in. This error is to inform + # users how to update their calls. + torch._check(dim is not None, lambda: "implicit dim not supported, use dim=X") + return torch.log_softmax(a=a, dim=dim, dtype=dtype) # type: ignore[call-overload] + + +@register_decomposition(aten.margin_ranking_loss) +def margin_ranking_loss( + input1: TensorLikeType, + input2: TensorLikeType, + target: TensorLikeType, + margin: float = 0.0, + reduction: str = "mean", +) -> TensorLikeType: + # loss_without_reduction = max(0, -target * (input1 - input2) + margin) + if input1.ndim != input2.ndim or input1.ndim != target.ndim: + raise RuntimeError( + "margin_ranking_loss : All input tensors should have same dimension but got sizes: " + f"input1: {input1.shape}, input2: {input2.shape}, target: {target.shape} " + ) + _check_reduction_value(reduction) + loss = torch.clamp_min(-target * (input1 - input2) + margin, 0) + return _apply_loss_reduction(loss, reduction) + + +@elementwise_type_promotion_wrapper( + type_promoting_args=("input", "target"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT, +) +def mse_loss( + input: TensorLikeType, + target: TensorLikeType, + size_average: Optional[bool] = None, + reduce: Optional[bool] = None, + reduction: str = "mean", +) -> TensorLikeType: + if size_average is not None or reduce is not None: + # TODO: Raise exception instead of converting value. This is only for + # primTorch since it can drop support for deprecated arguments. + # msg = "size_average and reduce args are deprecated, please use reduction argument." + reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce) + _check_reduction_value(reduction) + loss = torch.pow(input - target, 2) + return _apply_loss_reduction(loss, reduction) + + +@register_decomposition(aten.hinge_embedding_loss) +def hinge_embedding_loss( + input: TensorLikeType, + target: TensorLikeType, + margin: float = 1.0, + reduction: str = "mean", +) -> TensorLikeType: + # loss_without_reduction = input if y == 1 + # = max(0, margin - input) if y == -1 + _check_reduction_value(reduction) + margin_clamp = torch.clamp_min(margin - input, 0) + output_margin = torch.where(target != 1, margin_clamp, 0) + output_self = torch.where(target != -1, input, 0) + loss = output_margin + output_self + return _apply_loss_reduction(loss, reduction) + + +def _nll_loss_nd( + input: TensorLikeType, + target: TensorLikeType, + weight: Optional[TensorLikeType], + reduction: str, + ignore_index: int, +) -> TensorLikeType: + torch._check( + input.ndim > 0 and input.ndim <= 3, + lambda: f"Expected input dimension to be either [1, 2, 3] but received {input.ndim}.", + ) + + torch._check( + (input.ndim == 1) or (input.shape[0] == target.shape[0]), + lambda: f"Expected input batch size {input.shape[0]} to match target batch size {target.shape[0]}.", + ) + + _check_reduction_value(reduction) + + flat_target = torch.flatten(target) + ignore_classes_mask = torch.eq(flat_target, ignore_index) + + # TODO: Enable data-dependent checks with debug mode + # TODO: This check does not work with FakeTensor inputs; See Issue #85834 + # Explicit cast for class_check to bool; See Issue #78071 + """ + from torch._subclasses.fake_tensor import FakeTensor + num_classes = input.shape[1] if input.ndim > 1 else input.shape[0] + valid_classes_mask = torch.logical_and( + (flat_target >= 0), (flat_target < num_classes) + ) + class_check = torch.all(torch.logical_or(ignore_classes_mask, valid_classes_mask)) + torch._check( + isinstance(target, FakeTensor) or bool(class_check.item()), + lambda: "A target class is out-of-bounds and not the ignore index.", + ) + """ + + ignore_class_weight = torch.scalar_tensor(0, dtype=input.dtype, device=input.device) + class_weight = ( + torch.scalar_tensor(1, dtype=input.dtype, device=input.device) + if weight is None + else weight[flat_target] + ) + current_weight = torch.where( + ignore_classes_mask, + ignore_class_weight, + class_weight, + ) + + if input.ndim == 1: + # implicit batch size = 1 + # input (1 batch size, C classes) + loss = -input[target] * current_weight + elif input.ndim == 2: + # input (N batch size, C classes) + batch_size = input.shape[0] + loss = -input[torch.arange(batch_size), target] * current_weight + else: + # 3D case (N batch size, C classe, K dimensions) + # input (N batch size, C classes, K) + batch_size = input.shape[0] + extent = input.shape[2] + numel = batch_size * extent + indices = torch.arange(numel) + bdx = indices // extent + kdx = indices % extent + loss = -input[bdx, flat_target, kdx] * current_weight + loss = torch.reshape(loss, target.shape) + + if reduction == "none": + return loss + elif reduction == "sum": + return torch.sum(loss) + else: + # calculate weighted mean of the loss function + return torch.sum(loss) / torch.sum(current_weight) + + +@register_decomposition(aten.nll_loss) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("input",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def nll_loss( + input: TensorLikeType, + target: TensorLikeType, + weight: Optional[TensorLikeType] = None, + size_average: Optional[bool] = None, + ignore_index: int = -100, + reduce: Optional[bool] = None, + reduction: str = "mean", +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.nll_loss + """ + torch._check( + input.ndim > 0, + lambda: f"Expected input tensor to have 1 or more dimensions (got {input.ndim})", + ) + + # TODO: raise exception instead of converting value + # msg = "size_average and reduce args are deprecated, please use reduction argument." + # Convert these options for consistency with the eager mode + if size_average is not None or reduce is not None: + reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce) + + # The expected behavior when the target and input have zero elements: + # reduction = 'none' --- tensor([]) + # reduction = 'sum' --- tensor(0.) + # reduction = 'mean' --- tensor(nan) + # Mean reduction on empty tensors produces NaN. See the discussion in + # https://github.com/pytorch/pytorch/pull/64572#issuecomment-926504162 + if input.numel() == 0 and target.numel() == 0: + if reduction == "none": + return torch.zeros_like(target) + elif reduction == "sum": + return torch.empty_like(target) + else: + return torch.full_like(target, float("nan")) + + # The _nll_loss_nd helper function handles the most common cases. + # ndim == 1 (Single Example) + # => Batch Size: 1, Input: (C), Target: () + # ndim == 2 (k = 1) + # => Batch Size: N, Input: (N, C), Target: (N) + # ndim == 3 (k > 1) + # => Batch Size: N, Input: (N, C, K), Target: (N, K) + if input.ndim <= 3: + return _nll_loss_nd(input, target, weight, reduction, ignore_index) + + # For ndim > 3, we reshape the input and target to 3-D case. + # Input (N batch-size, C classes, k-dimensions) + # Target (N batch-size, k-dimensions) + torch._check( + input.ndim > 0 and target.ndim > 0 and target.shape[1:] == input.shape[2:], + lambda: ( + "Expected input and target to both have ndim > 0 and " + "target.shape[1:] == input.shape[2:], but got " + f"target.shape {target.shape} and input.shape {input.shape}" + ), + ) + + batch_size = input.shape[0] + num_classes = input.shape[1] + out_size = [batch_size] + list(target.shape[1:]) + + input = torch.reshape(input, [batch_size, num_classes, -1]) + target = torch.reshape(target, [batch_size, -1]) + if reduction != "none": + return _nll_loss_nd(input, target, weight, reduction, ignore_index) + else: + result = _nll_loss_nd(input, target, weight, reduction, ignore_index) + # reshape flattened inner-dim to original k-dimensions + return torch.reshape(result, out_size) + + +# TODO: This ref supports int reduction and out kwarg to be compatible with ATen: +# https://github.com/pytorch/pytorch/issues/83931 +# TODO: Could be rewritten to support complex: +# https://github.com/pytorch/pytorch/pull/85041 +@register_decomposition(aten.huber_loss) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("input", "target"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def huber_loss( + input: TensorLikeType, + target: TensorLikeType, + reduction: Union[str, int] = "mean", + delta: float = 1.0, +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.huber_loss + """ + if type(reduction) is int: + reduction = _reduction_int_to_str(reduction) + _check_reduction_value(reduction) # type: ignore[arg-type] + torch._check( + delta > 0, + lambda: "huber_loss does not support non-positive values for delta.", + ) + z = (input - target).abs() + loss = torch.where(z < delta, 0.5 * z * z, delta * (z - 0.5 * delta)) + return _apply_loss_reduction(loss, reduction) # type: ignore[arg-type] + + +# tanhshrink does not use _make_elementwise_unary_reference because it does not support out +@elementwise_unary_scalar_wrapper +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def tanhshrink(a: TensorLikeType) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.tanhshrink + """ + if not isinstance(a, TensorLike): + raise RuntimeError( + "Expected a tensor input for an elementwise unary operation!" + ) + return a - torch.tanh(a) + + +@register_decomposition(aten.threshold) +@_inplace_wrapper +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def threshold( + a: TensorLikeType, + threshold: NumberType, + value: Union[bool, int, float], + inplace: bool = False, +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.threshold + """ + + if inplace: + raise NotImplementedError + + return torch.where(a <= threshold, value, a) + + +# CompositeImplicitAutograd - don't register decomp +# No elementwise type promotion - core op doesn't explicitly type promote +def triplet_margin_loss( + anchor: TensorLikeType, + positive: TensorLikeType, + negative: TensorLikeType, + margin: float = 1.0, + p: float = 2, + eps: float = 1e-6, + swap: bool = False, + size_average: Optional[bool] = None, + reduce: Optional[bool] = None, + reduction: str = "mean", +) -> TensorLikeType: + if size_average is not None or reduce is not None: + # TODO: Raise exception instead of converting value. This is only for + # primTorch since it can drop support for deprecated arguments. + # msg = "size_average and reduce args are deprecated, please use reduction argument." + reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce) + + if margin <= 0: + raise ValueError(f"margin must be greater than 0, got {margin}") + + # torch.nn.functional.triplet_margin_with_distance_loss has no ref defined + # since it's a pure Python implementation. Use this helper instead. + return _triplet_margin_with_distance_loss( + anchor=anchor, + positive=positive, + negative=negative, + distance_function=lambda x, y: torch.pairwise_distance(x, y, p, eps), + margin=margin, + swap=swap, + reduction=reduction, + ) + + +# Pure Python impl - don't register decomp and don't add a ref. Defined as a +# helper here since triplet_margin_loss can be nicely implemented with it. +def _triplet_margin_with_distance_loss( + anchor: TensorLikeType, + positive: TensorLikeType, + negative: TensorLikeType, + *, + distance_function: Optional[ + Callable[[TensorLikeType, TensorLikeType], TensorLikeType] + ] = None, + margin: float = 1.0, + swap: bool = False, + reduction: str = "mean", +) -> TensorLikeType: + _check_reduction_value(reduction) + + a_dim = anchor.ndim + p_dim = positive.ndim + n_dim = negative.ndim + torch._check( + a_dim == p_dim and p_dim == n_dim, + lambda: ( + f"The anchor, positive, and negative tensors are expected to have " + f"the same number of dimensions, but got: anchor {a_dim}D, " + f"positive {p_dim}D, and negative {n_dim}D inputs" + ), + ) + + if distance_function is None: + distance_function = torch.pairwise_distance + + dist_pos = distance_function(anchor, positive) + dist_neg = distance_function(anchor, negative) + # The distance swap is described in the paper "Learning shallow + # convolutional feature descriptors with triplet losses" by V. Balntas, E. + # Riba et al. If True, and if the positive example is closer to the + # negative example than the anchor is, swaps the positive example and the + # anchor in the loss computation. + if swap: + dist_swap = distance_function(positive, negative) + dist_neg = torch.minimum(dist_neg, dist_swap) + loss = torch.clamp_min(margin + dist_pos - dist_neg, 0) + return _apply_loss_reduction(loss, reduction) + + +@register_decomposition(aten.hardtanh) +@_inplace_wrapper +@out_wrapper() +@elementwise_unary_scalar_wrapper +@elementwise_type_promotion_wrapper( + type_promoting_args=("a"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def hardtanh( + a: TensorLikeType, + min_val: NumberType = -1, + max_val: NumberType = 1, + inplace: bool = False, +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.hardtanh + """ + if inplace: + raise NotImplementedError + if utils.is_boolean_dtype(a.dtype): + raise RuntimeError("Bool inputs not supported for hardtanh") + + # preserve legacy behavior of boundaries not causing type promotion + if utils.is_integer_dtype(a.dtype): + min_val = int(min_val) # type: ignore[arg-type] + max_val = int(max_val) # type: ignore[arg-type] + if not (a.dtype != torch.uint8 or (min_val >= 0 and max_val >= 0)): + raise RuntimeError( + "Cannot do hardtanh on an unsigned type with negative limits" + ) + + if min_val > max_val: # type: ignore[operator] + raise ValueError("min_val cannot be greater than max_val") + + return torch.clamp(a, min_val, max_val) # type: ignore[arg-type] + + +@register_decomposition(aten.gelu) +@out_wrapper() +@elementwise_unary_scalar_wrapper +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def gelu(a: TensorLikeType, approximate: str = "none") -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.gelu + """ + if not isinstance(a, TensorLike): + raise RuntimeError( + "Expected a tensor input for an elementwise unary operation!" + ) + M_SQRT2 = 1.41421356237309504880 + M_SQRT1_2 = 0.70710678118654752440 + M_2_SQRTPI = 1.12837916709551257390 + if approximate == "tanh": + kBeta = M_SQRT2 * M_2_SQRTPI * 0.5 + kKappa = 0.044715 + a_cube = a * a * a + inner = kBeta * (a + kKappa * a_cube) + return 0.5 * a * (1 + torch.tanh(inner)) + elif approximate == "none": + kAlpha = M_SQRT1_2 + return a * 0.5 * (1 + torch.erf(a * kAlpha)) + else: + raise RuntimeError("approximate argument must be either none or tanh.") + + +# CompositeImplicitAutograd - don't register decomp +@elementwise_type_promotion_wrapper( + type_promoting_args=("input", "target"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def poisson_nll_loss( + input: TensorLikeType, + target: TensorLikeType, + log_input: bool = True, + full: bool = False, + size_average: Optional[bool] = None, + eps: float = 1e-8, + reduce: Optional[bool] = None, + reduction: str = "mean", +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.poisson_nll_loss + """ + if size_average is not None or reduce is not None: + # TODO: Raise exception instead of converting value. This is only for + # primTorch since it can drop support for deprecated arguments. + # msg = "size_average and reduce args are deprecated, please use reduction argument." + reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce) + _check_reduction_value(reduction) + if log_input: + loss = torch.exp(input) - target * input + else: + loss = input - target * torch.log(input + eps) + + if full: + stirling_term = ( + target * torch.log(target) - target + 0.5 * torch.log(2 * torch.pi * target) + ) + # avoid inplace add + loss = loss + stirling_term.masked_fill(target <= 1, 0) + return _apply_loss_reduction(loss, reduction) + + +@register_decomposition(aten.prelu) +@elementwise_type_promotion_wrapper( + type_promoting_args=("a", "weight"), + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def prelu(a: TensorLikeType, weight: TensorLikeType) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.prelu + """ + torch._check( + isinstance(a, TensorLike), + lambda: f"prelu: Expected `a` to be tensor, but got: {type(a)}", + ) + torch._check( + isinstance(weight, TensorLike), + lambda: f"prelu: Expected `weight` to be tensor, but got: {type(weight)}", + ) + + if weight.numel() != 1: + torch._check(a.ndim > 0, lambda: "Not allow zero-dim input tensor.") + channel_size = a.shape[1] if a.ndim >= 2 else 1 + torch._check( + weight.numel() == channel_size, + lambda: f"Mismatch of parameter numbers and input channel size. Found parameter numbers =" + f" {weight.numel()} and channel size = {channel_size}.", + ) + + torch._check( + weight.ndim == 0 or weight.ndim == 1, + lambda: f"prelu: Expected `weight` to be a scalar or 1D tensor, but got: " + f"ndim = {weight.ndim}", + ) + if a.ndim == 0: + weight = weight[0] if weight.ndim == 1 else weight + else: + weight = prims.broadcast_in_dim( + weight, a.shape, tuple() if weight.ndim == 0 else (0 if a.ndim == 1 else 1,) + ) + + return torch.where(a > 0, a, a * weight) + + +@register_decomposition(aten.relu6) +@_inplace_wrapper +@out_wrapper() +def relu6(a: TensorLikeType, inplace: bool = False) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.relu6 + """ + if inplace: + raise NotImplementedError + + # See https://github.com/pytorch/pytorch/pull/81142#discussion_r918220126 + # It may be better to use clamp here, but we use hardtanh to replicate + # the behavior of the existing implementation + return torch.nn.functional.hardtanh(a, 0, 6) + + +@register_decomposition(aten.glu) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def glu(a: TensorLikeType, dim: int = -1) -> TensorLikeType: + dim = utils.canonicalize_dims(a.ndim, dim) + torch._check( + a.shape[dim] % 2 == 0, + lambda: f"Halving dimension must be even, but dimension {dim} is size {a.shape[dim]}", + ) + b, c = torch.tensor_split(a, 2, dim) + + return b * torch.sigmoid(c) + + +@register_decomposition(aten.pairwise_distance) +@out_wrapper() +def pairwise_distance( + x1: TensorLikeType, + x2: TensorLikeType, + p: NumberType = 2.0, + eps: NumberType = 1e-6, + keepdim=False, +) -> TensorLikeType: + return torch.linalg.vector_norm(x1 - x2 + eps, ord=p, dim=-1, keepdim=keepdim) + + +@register_decomposition(aten.pdist) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def pdist(a: TensorLikeType, p: float = 2) -> TensorLikeType: + torch._check(a.ndim == 2, lambda: f"pdist only supports 2D tensors, got: {a.ndim}D") + torch._check(p >= 0, lambda: "pdist only supports non-negative p values") + # For p == 2 we can use an efficient implementation, but other values of p + # require creating a much bigger tensor for an intermediate step + if p == 2: + aTa = torch.mm(a, a.T) + aTa_diag = torch.diag(aTa) + t = torch.sqrt(torch.clamp(aTa_diag + aTa_diag.unsqueeze(-1) - 2 * aTa, min=0)) + else: + t = torch.linalg.vector_norm(a.unsqueeze(1) - a, ord=p, dim=2) + i = torch.triu_indices(t.shape[0], t.shape[1], offset=1, device=a.device) + return t.flatten().index_select(0, i[0] * t.shape[0] + i[1]) + + +@register_decomposition(aten.pixel_shuffle) +@out_wrapper() +def pixel_shuffle(self: Tensor, upscale_factor: int): + torch._check( + self.dim() >= 3, + lambda: f"pixel_shuffle expects input to have at least 3 dimensions, but got input with {self.dim} dimension(s)", + ) + batch = self.shape[:-3] + C_out = self.shape[-3] // upscale_factor**2 + HW_out = (self.shape[-2] * upscale_factor, self.shape[-1] * upscale_factor) + n = len(batch) + B_dims = range(n) + C_dim, r1_dim, r2_dim, H_dim, W_dim = range(n, n + 5) + return ( + self.view( + *batch, + C_out, + upscale_factor, + upscale_factor, + self.shape[-2], + self.shape[-1], + ) + .permute(*B_dims, C_dim, H_dim, r1_dim, W_dim, r2_dim) + .reshape(*batch, C_out, *HW_out) + .clone(memory_format=utils.suggest_memory_format(self)) + ) + + +@register_decomposition(aten.pixel_unshuffle) +@out_wrapper() +def pixel_unshuffle(self: Tensor, downscale_factor: int): + torch._check( + self.dim() >= 3, + lambda: f"pixel_unshuffle expects input to have at least 3 dimensions, but got input with {self.dim} dimension(s)", + ) + batch = self.shape[:-3] + C_out = self.shape[-3] * downscale_factor**2 + HW_out = (self.shape[-2] // downscale_factor, self.shape[-1] // downscale_factor) + n = len(batch) + B_dims = range(n) + C_dim, H_dim, r1_dim, W_dim, r2_dim = range(n, n + 5) + return ( + self.view( + *batch, + self.shape[-3], + HW_out[0], + downscale_factor, + HW_out[1], + downscale_factor, + ) + .permute(*B_dims, C_dim, r1_dim, r2_dim, H_dim, W_dim) + .reshape(*batch, C_out, *HW_out) + .clone(memory_format=utils.suggest_memory_format(self)) + ) + + +# Needed as aten.{celu_,elu_...} exist (even if they don't have the in-place kwarg) +celu_ = _make_inplace(celu) +elu_ = _make_inplace(elu) +mish_ = _make_inplace(mish) +selu_ = _make_inplace(selu) +threshold_ = _make_inplace(threshold) diff --git a/parrot/lib/python3.10/site-packages/torch/_refs/nn/functional/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/_refs/nn/functional/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84e29e6953b5264d6288cf05482ea7c0cc0c32d7 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/_refs/nn/functional/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/_refs/special/__init__.py b/parrot/lib/python3.10/site-packages/torch/_refs/special/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1e98deaeb16d9e923beacb12cdc9c5f4f6bdbde6 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/_refs/special/__init__.py @@ -0,0 +1,237 @@ +# mypy: allow-untyped-defs +import math +from typing import Optional, Union + +import torch +import torch._prims as prims +import torch._prims_common as utils +import torch._refs as refs + +from torch import Tensor +from torch._decomp import register_decomposition +from torch._prims_common import ( + ELEMENTWISE_TYPE_PROMOTION_KIND, + Number, + NumberType, + TensorLike, + TensorLikeType, +) +from torch._prims_common.wrappers import elementwise_type_promotion_wrapper, out_wrapper +from torch._refs import ( + _make_alias, + _make_elementwise_binary_reference, + _make_elementwise_unary_reference, +) + + +__all__ = [ + "bessel_j0", + "bessel_j1", + "entr", + "erfcx", + "expit", + "i0e", + "i1", + "i1e", + "log_ndtr", + "logit", + "log_softmax", + "multigammaln", + "ndtr", + "ndtri", + "softmax", + "spherical_bessel_j0", + "xlog1py", + "zeta", +] +aten = torch._ops.ops.aten + + +@_make_elementwise_unary_reference( + ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def bessel_j0(a: TensorLikeType) -> TensorLikeType: + return prims.bessel_j0(a) + + +@_make_elementwise_unary_reference( + ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def bessel_j1(a: TensorLikeType) -> TensorLikeType: + return prims.bessel_j1(a) + + +@register_decomposition(aten.special_entr) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def entr(a: TensorLikeType) -> TensorLikeType: + return torch.where( + torch.isnan(a), + a, + torch.where(a > 0, -a * torch.log(a), torch.where(a == 0, 0, -torch.inf)), + ) + + +@register_decomposition(aten.special_erfcx) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def erfcx(a: TensorLikeType) -> TensorLikeType: + return prims.erfcx(a) + + +# alias for sigmoid +expit = _make_alias(torch.sigmoid, "expit") + + +@_make_elementwise_unary_reference( + ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def i0e(a: TensorLikeType) -> TensorLikeType: + return prims.bessel_i0e(a) + + +@_make_elementwise_unary_reference( + ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def i1(a: TensorLikeType) -> TensorLikeType: + return prims.bessel_i1(a) + + +@_make_elementwise_unary_reference( + ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def i1e(a: TensorLikeType) -> TensorLikeType: + return prims.bessel_i1e(a) + + +@register_decomposition(aten.special_log_ndtr) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def log_ndtr(a: TensorLikeType) -> TensorLikeType: + # Note: M_SQRT1_2 is the value of 1 / sqrt(2) + M_SQRT1_2 = 0.707106781186547524400844362104849039 + t = a * M_SQRT1_2 + return torch.where( + a < 1.0, + torch.log(torch.special.erfcx(-t) / 2) - t * t, + torch.log1p(-torch.erfc(t) / 2), + ) + + +@register_decomposition(aten.logit) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self",), + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def logit(self: TensorLikeType, eps: Optional[float] = None) -> TensorLikeType: + if eps is None: + eps = -1.0 + lo = eps + hi = 1 - eps + self = torch.clamp(self, lo, hi) + return torch.log(torch.true_divide(self, torch.sub(1, self))) + + +@register_decomposition(aten.special_xlog1py) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a", "b"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def xlog1py(a: Union[TensorLikeType, NumberType], b: Union[TensorLikeType, NumberType]): + torch._check( + isinstance(a, TensorLike) or isinstance(b, TensorLike), + lambda: 'Expected either argument a or b to be a Tensor"', + ) + + # Operations like eq and log do not handle scalar values, so we convert them to scalar_tensors. + if isinstance(a, TensorLike) and isinstance(b, Number): + b = refs.scalar_tensor(b, dtype=a.dtype, device=a.device) + elif isinstance(b, TensorLike) and isinstance(a, Number): + a = refs.scalar_tensor(a, dtype=b.dtype, device=b.device) + + # mypy: expected "Tensor" + assert isinstance(a, TensorLike) + assert isinstance(b, TensorLike) + rhs = torch.where(torch.eq(a, 0), 0, torch.mul(a, torch.log1p(b))) + return torch.where(torch.isnan(b), float("nan"), rhs) + + +@register_decomposition(aten.mvlgamma) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def multigammaln(a: TensorLikeType, p: int) -> TensorLikeType: + c = 0.25 * p * (p - 1) * math.log(math.pi) + b = 0.5 * torch.arange(start=(1 - p), end=1, step=1, dtype=a.dtype, device=a.device) + return torch.sum(torch.lgamma(a.unsqueeze(-1) + b), dim=-1) + c + + +@register_decomposition(aten.special_ndtr) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def ndtr(a: TensorLikeType) -> TensorLikeType: + # Note: M_SQRT1_2 is the value of 1 / sqrt(2) + M_SQRT1_2 = 0.707106781186547524400844362104849039 + a_sqrt_2 = a * M_SQRT1_2 + return (1 + torch.erf(a_sqrt_2)) * 0.5 + + +@register_decomposition(aten.special_ndtri) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def ndtri(a: TensorLikeType) -> TensorLikeType: + return prims.ndtri(a) + + +# Forwarding alias: the special variant doesn't support the out kwarg +# CompositeImplicitAutograd - don't register decomp +def log_softmax( + a: TensorLikeType, + dim: int, + dtype: Optional[torch.dtype] = None, +) -> TensorLikeType: + return torch.log_softmax(a=a, dim=dim, dtype=dtype) # type: ignore[call-overload] + + +# Forwarding alias: the special variant doesn't support the out kwarg +# CompositeImplicitAutograd - don't register decomp +def softmax( + a: TensorLikeType, + dim: int, + dtype: Optional[torch.dtype] = None, +) -> TensorLikeType: + return torch.softmax(a=a, dim=dim, dtype=dtype) # type: ignore[call-overload] + + +@_make_elementwise_unary_reference( + ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def spherical_bessel_j0(a: TensorLikeType) -> TensorLikeType: + return prims.spherical_bessel_j0(a) + + +# TODO: add docstring +@_make_elementwise_binary_reference( + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def zeta(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.zeta(a, b) diff --git a/parrot/lib/python3.10/site-packages/torch/_refs/special/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/_refs/special/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e674afbc61482cb853ed876c990d00bcf80bc61 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/_refs/special/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/_vmap_internals.py b/parrot/lib/python3.10/site-packages/torch/_vmap_internals.py new file mode 100644 index 0000000000000000000000000000000000000000..cc23d7851eb553a659351b673de8cb9bbfb6a473 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/_vmap_internals.py @@ -0,0 +1,238 @@ +# mypy: allow-untyped-defs +import functools +from typing import Any, Callable, List, Optional, Tuple, Union +from typing_extensions import deprecated + +import torch +from torch import Tensor +from torch.utils._pytree import _broadcast_to_and_flatten, tree_flatten, tree_unflatten + +in_dims_t = Union[int, Tuple] +out_dims_t = Union[int, Tuple[int, ...]] + + +# Checks that all args-to-be-batched have the same batch dim size +def _validate_and_get_batch_size( + flat_in_dims: List[Optional[int]], flat_args: List +) -> int: + batch_sizes = [ + arg.size(in_dim) + for in_dim, arg in zip(flat_in_dims, flat_args) + if in_dim is not None + ] + if batch_sizes and any(size != batch_sizes[0] for size in batch_sizes): + raise ValueError( + f"vmap: Expected all tensors to have the same size in the mapped " + f"dimension, got sizes {batch_sizes} for the mapped dimension" + ) + return batch_sizes[0] + + +def _num_outputs(batched_outputs: Union[Tensor, Tuple[Tensor, ...]]) -> int: + if isinstance(batched_outputs, tuple): + return len(batched_outputs) + return 1 + + +# If value is a tuple, check it has length `num_elements`. +# If value is not a tuple, make a tuple with `value` repeated `num_elements` times +def _as_tuple( + value: Any, num_elements: int, error_message_lambda: Callable[[], str] +) -> Tuple: + if not isinstance(value, tuple): + return (value,) * num_elements + if len(value) != num_elements: + raise ValueError(error_message_lambda()) + return value + + +# Creates BatchedTensors for every Tensor in arg that should be batched. +# Returns the (potentially) batched arguments and the batch_size. +def _create_batched_inputs( + in_dims: in_dims_t, args: Tuple, vmap_level: int, func: Callable +) -> Tuple[Tuple, int]: + if not isinstance(in_dims, int) and not isinstance(in_dims, tuple): + raise ValueError( + f"vmap({_get_name(func)}, in_dims={in_dims}, ...)(): " + f"expected `in_dims` to be int or a (potentially nested) tuple " + f"matching the structure of inputs, got: {type(in_dims)}." + ) + if len(args) == 0: + raise ValueError( + f"vmap({_get_name(func)})(): got no inputs. Maybe you forgot to add " + f"inputs, or you are trying to vmap over a function with no inputs. " + f"The latter is unsupported." + ) + + flat_args, args_spec = tree_flatten(args) + flat_in_dims = _broadcast_to_and_flatten(in_dims, args_spec) + if flat_in_dims is None: + raise ValueError( + f"vmap({_get_name(func)}, in_dims={in_dims}, ...)(): " + f"in_dims is not compatible with the structure of `inputs`. " + f"in_dims has structure {tree_flatten(in_dims)[1]} but inputs " + f"has structure {args_spec}." + ) + + for arg, in_dim in zip(flat_args, flat_in_dims): + if not isinstance(in_dim, int) and in_dim is not None: + raise ValueError( + f"vmap({_get_name(func)}, in_dims={in_dims}, ...)(): " + f"Got in_dim={in_dim} for an input but in_dim must be either " + f"an integer dimension or None." + ) + if isinstance(in_dim, int) and not isinstance(arg, Tensor): + raise ValueError( + f"vmap({_get_name(func)}, in_dims={in_dims}, ...)(): " + f"Got in_dim={in_dim} for an input but the input is of type " + f"{type(arg)}. We cannot vmap over non-Tensor arguments, " + f"please use None as the respective in_dim" + ) + if in_dim is not None and (in_dim < 0 or in_dim >= arg.dim()): + raise ValueError( + f"vmap({_get_name(func)}, in_dims={in_dims}, ...)(): " + f"Got in_dim={in_dim} for some input, but that input is a Tensor " + f"of dimensionality {arg.dim()} so expected in_dim to satisfy " + f"0 <= in_dim < {arg.dim()}." + ) + + batch_size = _validate_and_get_batch_size(flat_in_dims, flat_args) + # See NOTE [Ignored _remove_batch_dim, _add_batch_dim] + batched_inputs = [ + arg if in_dim is None else torch._add_batch_dim(arg, in_dim, vmap_level) + for in_dim, arg in zip(flat_in_dims, flat_args) + ] + return tree_unflatten(batched_inputs, args_spec), batch_size + + +# Undos the batching (and any batch dimensions) associated with the `vmap_level`. +def _unwrap_batched( + batched_outputs: Union[Tensor, Tuple[Tensor, ...]], + out_dims: out_dims_t, + vmap_level: int, + batch_size: int, + func: Callable, + allow_none_pass_through: bool = False, +) -> Tuple: + num_outputs = _num_outputs(batched_outputs) + out_dims_as_tuple = _as_tuple( + out_dims, + num_outputs, + lambda: f"vmap({_get_name(func)}, ..., out_dims={out_dims}): `out_dims` must " + f"have one dim per output (got {num_outputs} outputs) of {_get_name(func)}.", + ) + + # NOTE [Ignored _remove_batch_dim, _add_batch_dim] + # There is something wrong with our type bindings for functions that begin + # with '_', see #40397. + if isinstance(batched_outputs, Tensor): + out_dim = out_dims_as_tuple[0] + return torch._remove_batch_dim(batched_outputs, vmap_level, batch_size, out_dim) # type: ignore[return-value] + if allow_none_pass_through: + return tuple( + ( + torch._remove_batch_dim(out, vmap_level, batch_size, out_dim) + if out is not None + else None + ) + for out, out_dim in zip(batched_outputs, out_dims_as_tuple) + ) + else: + return tuple( + torch._remove_batch_dim(out, vmap_level, batch_size, out_dim) + for out, out_dim in zip(batched_outputs, out_dims_as_tuple) + ) + + +# Checks that `fn` returned one or more Tensors and nothing else. +# NB: A python function that return multiple arguments returns a single tuple, +# so we are effectively checking that `outputs` is a single Tensor or a tuple of +# Tensors. +def _validate_outputs(outputs: Any, func: Callable) -> None: + if isinstance(outputs, Tensor): + return + if not isinstance(outputs, tuple): + raise ValueError( + f"vmap({_get_name(func)}, ...): `{_get_name(func)}` must only return " + f"Tensors, got type {type(outputs)} as the return." + ) + for idx, output in enumerate(outputs): + if isinstance(output, Tensor): + continue + raise ValueError( + f"vmap({_get_name(func)}, ...): `{_get_name(func)}` must only return " + f"Tensors, got type {type(output)} for return {idx}." + ) + + +def _check_out_dims_is_int_or_int_tuple(out_dims: out_dims_t, func: Callable) -> None: + if isinstance(out_dims, int): + return + if not isinstance(out_dims, tuple) or not all( + isinstance(out_dim, int) for out_dim in out_dims + ): + raise ValueError( + f"vmap({_get_name(func)}, ..., out_dims={out_dims}): `out_dims` must be " + f"an int or a tuple of int representing where in the outputs the " + f"vmapped dimension should appear." + ) + + +def _get_name(func: Callable): + if hasattr(func, "__name__"): + return func.__name__ + + # Not all callables have __name__, in fact, only static functions/methods do. + # A callable created via functools.partial or an nn.Module, to name some + # examples, don't have a __name__. + return repr(func) + + +# vmap(func)(inputs) wraps all Tensor inputs to be batched in BatchedTensors, +# sends those into func, and then unwraps the output BatchedTensors. Operations +# on BatchedTensors perform the batched operations that the user is asking for. +@deprecated( + "Please use `torch.vmap` instead of `torch._vmap_internals.vmap`.", + category=FutureWarning, +) +def vmap(func: Callable, in_dims: in_dims_t = 0, out_dims: out_dims_t = 0) -> Callable: + """ + Please use torch.vmap instead of this API. + """ + return _vmap(func, in_dims, out_dims) + + +# A version of vmap but without the initial "experimental prototype" warning +def _vmap( + func: Callable, + in_dims: in_dims_t = 0, + out_dims: out_dims_t = 0, + allow_none_pass_through: bool = False, +) -> Callable: + # The `allow_none_pass_through` argument is a temporary workaround may be removed. + # Currently it enables us to wrap the call in `autograd.grad` to the autograd engine, + # which may return None if any of the inputs are unused. See the issue discussing this: + # https://github.com/facebookresearch/functorch/issues/159. + @functools.wraps(func) + def wrapped(*args): + _check_out_dims_is_int_or_int_tuple(out_dims, func) + vmap_level = torch._C._vmapmode_increment_nesting() + try: + batched_inputs, batch_size = _create_batched_inputs( + in_dims, args, vmap_level, func + ) + batched_outputs = func(*batched_inputs) + if not allow_none_pass_through: + _validate_outputs(batched_outputs, func) + return _unwrap_batched( + batched_outputs, + out_dims, + vmap_level, + batch_size, + func, + allow_none_pass_through=allow_none_pass_through, + ) + finally: + torch._C._vmapmode_decrement_nesting() + + return wrapped diff --git a/parrot/lib/python3.10/site-packages/torch/autograd/__init__.py b/parrot/lib/python3.10/site-packages/torch/autograd/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..aca9abb24070888620756454858cb52a33b06793 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/autograd/__init__.py @@ -0,0 +1,539 @@ +# mypy: allow-untyped-defs +""" +``torch.autograd`` provides classes and functions implementing automatic +differentiation of arbitrary scalar valued functions. It requires minimal +changes to the existing code - you only need to declare :class:`Tensor` s +for which gradients should be computed with the ``requires_grad=True`` keyword. +As of now, we only support autograd for floating point :class:`Tensor` types ( +half, float, double and bfloat16) and complex :class:`Tensor` types (cfloat, cdouble). +""" +import warnings +from typing import Any, Callable, cast, List, Optional, Sequence, Tuple, Union + +import torch + +from torch.types import _size, _TensorOrTensors, _TensorOrTensorsOrGradEdge +from .. import _vmap_internals +from ..overrides import handle_torch_function, has_torch_function, is_tensor_like +from . import forward_ad, functional, graph +from .anomaly_mode import detect_anomaly, set_detect_anomaly +from .function import Function, NestedIOFunction +from .grad_mode import ( + _force_original_view_tracking, + _unsafe_preserve_version_counter, + enable_grad, + inference_mode, + no_grad, + set_grad_enabled, + set_multithreading_enabled, +) +from .gradcheck import gradcheck, gradgradcheck +from .graph import _engine_run_backward + +from .variable import Variable + +__all__ = [ + "Variable", + "Function", + "backward", + "grad_mode", + "NestedIOFunction", + "detect_anomaly", + "enable_grad", + "grad", + "gradcheck", + "gradgradcheck", + "inference_mode", + "no_grad", + "set_detect_anomaly", + "set_grad_enabled", + "set_multithreading_enabled", + "variable", +] + +_OptionalTensor = Optional[torch.Tensor] +_ShapeorNestedShape = Union[_size, Sequence[_size], torch.Tensor] + + +def _calculate_shape( + output: torch.Tensor, grad: torch.Tensor, is_grads_batched: bool +) -> Tuple[_ShapeorNestedShape, _ShapeorNestedShape]: + # is_same_size ensures that both tensors are either nested or non nested + # circular import + from torch.nested._internal.nested_tensor import NestedTensor + + if output.is_nested and not isinstance(output, NestedTensor): + if is_grads_batched: + raise RuntimeError("Batched grads are not supported with Nested Tensor.") + out_shape = output._nested_tensor_size() + grad_shape = grad._nested_tensor_size() + + return out_shape, grad_shape + + reg_out_shape = output.shape + reg_grad_shape = grad.shape if not is_grads_batched else grad.shape[1:] + return reg_out_shape, reg_grad_shape + + +def _make_grads( + outputs: Sequence[torch.Tensor], + grads: Sequence[_OptionalTensor], + is_grads_batched: bool, +) -> Tuple[_OptionalTensor, ...]: + new_grads: List[_OptionalTensor] = [] + for out, grad in zip(outputs, grads): + if isinstance(grad, torch.Tensor): + from torch.fx.experimental.symbolic_shapes import expect_true, sym_eq + + first_grad = grad if not is_grads_batched else grad[0] + # TODO: We can remove this conditional once we uniformly use + # singleton int to represent jagged dimension, so that size() call + # on nested tensor works + if out.is_nested or first_grad.is_nested: + shape_matches = torch.is_same_size(out, first_grad) + else: + # We need to do a regular size check, without going through + # the operator, to be able to handle unbacked symints + # (expect_true ensures we can deal with unbacked) + shape_matches = expect_true(sym_eq(out.size(), first_grad.size())) + if not shape_matches: + out_shape, grad_shape = _calculate_shape( + out, first_grad, is_grads_batched + ) + if is_grads_batched: + raise RuntimeError( + "If `is_grads_batched=True`, we interpret the first " + "dimension of each grad_output as the batch dimension. " + "The sizes of the remaining dimensions are expected to match " + "the shape of corresponding output, but a mismatch " + "was detected: grad_output[" + + str(grads.index(grad)) + + "] has a shape of " + + str(grad_shape) + + " and output[" + + str(outputs.index(out)) + + "] has a shape of " + + str(out_shape) + + ". " + "If you only want some tensors in `grad_output` to be considered " + "batched, consider using vmap." + ) + else: + raise RuntimeError( + "Mismatch in shape: grad_output[" + + str(grads.index(grad)) + + "] has a shape of " + + str(grad_shape) + + " and output[" + + str(outputs.index(out)) + + "] has a shape of " + + str(out_shape) + + "." + ) + if out.dtype.is_complex != grad.dtype.is_complex: + raise RuntimeError( + "For complex Tensors, both grad_output and output" + " are required to have the same dtype." + " Mismatch in dtype: grad_output[" + + str(grads.index(grad)) + + "] has a dtype of " + + str(grad.dtype) + + " and output[" + + str(outputs.index(out)) + + "] has a dtype of " + + str(out.dtype) + + "." + ) + new_grads.append(grad) + elif grad is None: + if out.requires_grad: + if out.numel() != 1: + raise RuntimeError( + "grad can be implicitly created only for scalar outputs" + ) + if not out.dtype.is_floating_point: + msg = ( + "grad can be implicitly created only for real scalar outputs" + f" but got {out.dtype}" + ) + raise RuntimeError(msg) + new_grads.append( + torch.ones_like(out, memory_format=torch.preserve_format) + ) + else: + new_grads.append(None) + else: + raise TypeError( + "gradients can be either Tensors or None, but got " + + type(grad).__name__ + ) + return tuple(new_grads) + + +def _tensor_or_tensors_to_tuple( + tensors: Optional[_TensorOrTensors], length: int +) -> Tuple[_OptionalTensor, ...]: + if tensors is None: + return (None,) * length + if isinstance(tensors, torch.Tensor): + return (tensors,) + return tuple(tensors) + + +def backward( + tensors: _TensorOrTensors, + grad_tensors: Optional[_TensorOrTensors] = None, + retain_graph: Optional[bool] = None, + create_graph: bool = False, + grad_variables: Optional[_TensorOrTensors] = None, + inputs: Optional[_TensorOrTensorsOrGradEdge] = None, +) -> None: + r"""Computes the sum of gradients of given tensors with respect to graph + leaves. + + The graph is differentiated using the chain rule. If any of ``tensors`` + are non-scalar (i.e. their data has more than one element) and require + gradient, then the Jacobian-vector product would be computed, in this + case the function additionally requires specifying ``grad_tensors``. + It should be a sequence of matching length, that contains the "vector" + in the Jacobian-vector product, usually the gradient of the differentiated + function w.r.t. corresponding tensors (``None`` is an acceptable value for + all tensors that don't need gradient tensors). + + This function accumulates gradients in the leaves - you might need to zero + ``.grad`` attributes or set them to ``None`` before calling it. + See :ref:`Default gradient layouts` + for details on the memory layout of accumulated gradients. + + .. note:: + Using this method with ``create_graph=True`` will create a reference cycle + between the parameter and its gradient which can cause a memory leak. + We recommend using ``autograd.grad`` when creating the graph to avoid this. + If you have to use this function, make sure to reset the ``.grad`` fields of your + parameters to ``None`` after use to break the cycle and avoid the leak. + + .. note:: + + If you run any forward ops, create ``grad_tensors``, and/or call ``backward`` + in a user-specified CUDA stream context, see + :ref:`Stream semantics of backward passes`. + + .. note:: + + When ``inputs`` are provided and a given input is not a leaf, + the current implementation will call its grad_fn (even though it is not strictly needed to get this gradients). + It is an implementation detail on which the user should not rely. + See https://github.com/pytorch/pytorch/pull/60521#issuecomment-867061780 for more details. + + Args: + tensors (Sequence[Tensor] or Tensor): Tensors of which the derivative will be + computed. + grad_tensors (Sequence[Tensor or None] or Tensor, optional): The "vector" in + the Jacobian-vector product, usually gradients w.r.t. each element of + corresponding tensors. None values can be specified for scalar Tensors or + ones that don't require grad. If a None value would be acceptable for all + grad_tensors, then this argument is optional. + retain_graph (bool, optional): If ``False``, the graph used to compute the grad + will be freed. Note that in nearly all cases setting this option to ``True`` + is not needed and often can be worked around in a much more efficient + way. Defaults to the value of ``create_graph``. + create_graph (bool, optional): If ``True``, graph of the derivative will + be constructed, allowing to compute higher order derivative products. + Defaults to ``False``. + inputs (Sequence[Tensor] or Tensor or Sequence[GradientEdge], optional): Inputs w.r.t. which the gradient + be will accumulated into ``.grad``. All other Tensors will be ignored. If + not provided, the gradient is accumulated into all the leaf Tensors that + were used to compute the :attr:`tensors`. + """ + if torch._C._are_functorch_transforms_active(): + raise RuntimeError( + "backward() called inside a functorch transform. This is not " + "supported, please use functorch.grad or functorch.vjp instead " + "or call backward() outside of functorch transforms." + ) + + if grad_variables is not None: + warnings.warn( + "`grad_variables` is deprecated. Use `grad_tensors` instead.", + FutureWarning, + stacklevel=2, + ) + if grad_tensors is None: + grad_tensors = grad_variables + else: + raise RuntimeError( + "`grad_tensors` and `grad_variables` (deprecated) " + "arguments both passed to `backward()`. Please only " + "use `grad_tensors`." + ) + if inputs is not None and len(inputs) == 0: + raise RuntimeError("`inputs` argument to `backward()` cannot be empty.") + + tensors = (tensors,) if isinstance(tensors, torch.Tensor) else tuple(tensors) + inputs = ( + (inputs,) + if isinstance(inputs, (torch.Tensor, graph.GradientEdge)) + else tuple(inputs) + if inputs is not None + else tuple() + ) + + grad_tensors_ = _tensor_or_tensors_to_tuple(grad_tensors, len(tensors)) + grad_tensors_ = _make_grads(tensors, grad_tensors_, is_grads_batched=False) + if retain_graph is None: + retain_graph = create_graph + + # The reason we repeat the same comment below is that + # some Python versions print out the first line of a multi-line function + # calls in the traceback and some print out the last line + _engine_run_backward( + tensors, + grad_tensors_, + retain_graph, + create_graph, + inputs, + allow_unreachable=True, + accumulate_grad=True, + ) + + +def grad( + outputs: _TensorOrTensors, + inputs: _TensorOrTensorsOrGradEdge, + grad_outputs: Optional[_TensorOrTensors] = None, + retain_graph: Optional[bool] = None, + create_graph: bool = False, + only_inputs: bool = True, + allow_unused: Optional[bool] = None, + is_grads_batched: bool = False, + materialize_grads: bool = False, +) -> Tuple[torch.Tensor, ...]: + r"""Computes and returns the sum of gradients of outputs with respect to + the inputs. + + ``grad_outputs`` should be a sequence of length matching ``output`` + containing the "vector" in vector-Jacobian product, usually the pre-computed + gradients w.r.t. each of the outputs. If an output doesn't require_grad, + then the gradient can be ``None``). + + .. note:: + + If you run any forward ops, create ``grad_outputs``, and/or call ``grad`` + in a user-specified CUDA stream context, see + :ref:`Stream semantics of backward passes`. + + .. note:: + + ``only_inputs`` argument is deprecated and is ignored now (defaults to ``True``). + To accumulate gradient for other parts of the graph, please use + ``torch.autograd.backward``. + + Args: + outputs (sequence of Tensor): outputs of the differentiated function. + inputs (sequence of Tensor or GradientEdge): Inputs w.r.t. which the gradient will be + returned (and not accumulated into ``.grad``). + grad_outputs (sequence of Tensor): The "vector" in the vector-Jacobian product. + Usually gradients w.r.t. each output. None values can be specified for scalar + Tensors or ones that don't require grad. If a None value would be acceptable + for all grad_tensors, then this argument is optional. Default: None. + retain_graph (bool, optional): If ``False``, the graph used to compute the grad + will be freed. Note that in nearly all cases setting this option to ``True`` + is not needed and often can be worked around in a much more efficient + way. Defaults to the value of ``create_graph``. + create_graph (bool, optional): If ``True``, graph of the derivative will + be constructed, allowing to compute higher order derivative products. + Default: ``False``. + allow_unused (Optional[bool], optional): If ``False``, specifying inputs + that were not used when computing outputs (and therefore their grad is + always zero) is an error. Defaults to the value of ``materialize_grads``. + is_grads_batched (bool, optional): If ``True``, the first dimension of each + tensor in ``grad_outputs`` will be interpreted as the batch dimension. + Instead of computing a single vector-Jacobian product, we compute a + batch of vector-Jacobian products for each "vector" in the batch. + We use the vmap prototype feature as the backend to vectorize calls + to the autograd engine so that this computation can be performed in a + single call. This should lead to performance improvements when compared + to manually looping and performing backward multiple times. Note that + due to this feature being experimental, there may be performance + cliffs. Please use ``torch._C._debug_only_display_vmap_fallback_warnings(True)`` + to show any performance warnings and file an issue on github if warnings exist + for your use case. Defaults to ``False``. + materialize_grads (bool, optional): If ``True``, set the gradient for unused inputs + to zero instead of None. This is useful when computing higher-order derivatives. + If ``materialize_grads`` is ``True`` and ``allow_unused`` is ``False``, an error + will be raised. Defaults to ``False``. + + """ + if materialize_grads and allow_unused is False: + raise ValueError( + "Expected allow_unused to be True or not passed when materialize_grads=True, " + "but got: allow_unused=False." + ) + if allow_unused is None: + allow_unused = materialize_grads + t_outputs = cast( + Tuple[torch.Tensor, ...], + (outputs,) if is_tensor_like(outputs) else tuple(outputs), + ) + if is_tensor_like(inputs) or isinstance(inputs, graph.GradientEdge): + inputs = cast(_TensorOrTensorsOrGradEdge, (inputs,)) + else: + inputs = tuple(inputs) + t_inputs = tuple(i for i in inputs if is_tensor_like(i)) + overridable_args = t_outputs + t_inputs + if has_torch_function(overridable_args): + return handle_torch_function( + grad, + overridable_args, + t_outputs, + inputs, + grad_outputs=grad_outputs, + retain_graph=retain_graph, + create_graph=create_graph, + only_inputs=only_inputs, + allow_unused=allow_unused, + is_grads_batched=is_grads_batched, + materialize_grads=materialize_grads, + ) + + if not only_inputs: + warnings.warn( + "only_inputs argument is deprecated and is ignored now " + "(defaults to True). To accumulate gradient for other " + "parts of the graph, please use torch.autograd.backward.", + FutureWarning, + stacklevel=2, + ) + + grad_outputs_ = _tensor_or_tensors_to_tuple(grad_outputs, len(t_outputs)) + grad_outputs_ = _make_grads( + t_outputs, grad_outputs_, is_grads_batched=is_grads_batched + ) + + if retain_graph is None: + retain_graph = create_graph + + # The reason we repeat the same comment several times below is because + # some Python versions print out the first line of multi-line function + # calls in the traceback and some print out the last line + if is_grads_batched: + + def vjp(gO): + return _engine_run_backward( + t_outputs, + gO, + retain_graph, + create_graph, + inputs, + allow_unused, + accumulate_grad=False, + ) + + result = _vmap_internals._vmap(vjp, 0, 0, allow_none_pass_through=True)( + grad_outputs_ + ) + else: + result = _engine_run_backward( + t_outputs, + grad_outputs_, + retain_graph, + create_graph, + inputs, + allow_unused, + accumulate_grad=False, + ) + if materialize_grads: + if any( + result[i] is None and not is_tensor_like(inputs[i]) + for i in range(len(inputs)) + ): + raise RuntimeError( + "materialize_grads cannot be used when the given input is a GradientEdge" + ) + result = tuple( + output + if output is not None + else torch.zeros_like(input, requires_grad=True) + for (output, input) in zip(result, inputs) + ) + return result + + +# This function applies in case of gradient checkpointing for memory +# optimization. Currently, gradient checkpointing is supported only if the +# execution engine is invoked through torch.autograd.backward() and its +# inputs argument is not passed. It is not supported for torch.autograd.grad(). +# This is because if inputs are specified, the gradient won't be calculated for +# anything else e.g. model parameters like weights, bias etc. +# +# This function returns whether the checkpointing is valid i.e. torch.autograd.backward +# or not i.e. torch.autograd.grad. The implementation works by maintaining a thread +# local variable in torch/csrc/autograd/engine.cpp which looks at the NodeTask +# in the stack and before a NodeTask is executed in evaluate_function, it +# checks for whether reentrant backwards is imperative or not. +# See https://github.com/pytorch/pytorch/pull/4594 for more discussion/context +def _is_checkpoint_valid(): + return Variable._execution_engine.is_checkpoint_valid() + + +def variable(*args, **kwargs): + raise RuntimeError( + "torch.autograd.variable(...) is deprecated, use torch.tensor(...) instead" + ) + + +# Monkey patching variable.Variable to fix FX codegen. FX generates a call by roughly doing +# f"{fn.__module__}.{fn.__name__}(...). This yields torch.autograd.variable.Variable(...) in the +# output of an FX graph. Unfortunately the module name torch.autograd.variable is shadowed by the +# deprecated function - variable(...). +variable.Variable = Variable # type: ignore[attr-defined] + +if not torch._C._autograd_init(): + raise RuntimeError("autograd initialization failed") + +# Import all native method/classes +from torch._C._autograd import ( + _add_metadata_json, + _disable_profiler, + _disable_profiler_legacy, + _enable_profiler, + _enable_profiler_legacy, + _enable_record_function, + _get_sequence_nr, + _kineto_step, + _KinetoEvent, + _pop_saved_tensors_default_hooks, + _prepare_profiler, + _profiler_enabled, + _ProfilerResult, + _push_saved_tensors_default_hooks, + _record_function_with_args_enter, + _record_function_with_args_exit, + _set_empty_test_observer, + _supported_activities, + DeviceType, + kineto_available, + ProfilerEvent, + SavedTensor, +) + +from torch._C._profiler import ProfilerActivity, ProfilerConfig, ProfilerState + +from . import profiler + + +def _register_py_tensor_class_for_device(device, cls): + if not isinstance(cls, type): + raise RuntimeError("cls isn't a typeinfo object") + torch._C._register_py_class_for_device(device, cls) + + +is_multithreading_enabled = torch._C._is_multithreading_enabled +torch._C._add_docstr( + is_multithreading_enabled, "Returns True if multithreading is currently enabled." +) + +is_view_replay_enabled = torch._C._is_view_replay_enabled +torch._C._add_docstr( + is_view_replay_enabled, "Returns True if view-replay is currently enabled." +) diff --git a/parrot/lib/python3.10/site-packages/torch/autograd/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/autograd/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3aa0696d44fb27ec1df0ef5e94021b14bf2f954e Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/autograd/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/autograd/__pycache__/anomaly_mode.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/autograd/__pycache__/anomaly_mode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1de1523e0ecaee7317b656b8e85cdafcaf041f1f Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/autograd/__pycache__/anomaly_mode.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/autograd/__pycache__/forward_ad.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/autograd/__pycache__/forward_ad.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b18f437cc58072128771382190732eda0dfd8501 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/autograd/__pycache__/forward_ad.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/autograd/__pycache__/function.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/autograd/__pycache__/function.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02aff6ba3d3bbe800106f903688aed991acfa621 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/autograd/__pycache__/function.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/autograd/__pycache__/functional.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/autograd/__pycache__/functional.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d7afd5b6e974a712573f9afd8a7c4657ea104ec Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/autograd/__pycache__/functional.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/autograd/__pycache__/grad_mode.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/autograd/__pycache__/grad_mode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a463f3b54eba81a0490b833ad6cbf789ef7d6677 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/autograd/__pycache__/grad_mode.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/autograd/__pycache__/gradcheck.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/autograd/__pycache__/gradcheck.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e56f0b84f3119593e295ad344264b0fbb39d65ab Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/autograd/__pycache__/gradcheck.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/autograd/__pycache__/graph.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/autograd/__pycache__/graph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b5e84838049269762f6253f68a9efb376e42c98 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/autograd/__pycache__/graph.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/autograd/__pycache__/profiler.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/autograd/__pycache__/profiler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ec2f6c9dbccba21a9be03fd1d8d10c2896727e5 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/autograd/__pycache__/profiler.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/autograd/__pycache__/profiler_legacy.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/autograd/__pycache__/profiler_legacy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c6ea7402ac7bb68b4067f17fe8c6cc77944f0a27 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/autograd/__pycache__/profiler_legacy.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/autograd/__pycache__/profiler_util.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/autograd/__pycache__/profiler_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3f25367af063b0125f7587f3d70396d55f62d7c Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/autograd/__pycache__/profiler_util.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/autograd/__pycache__/variable.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/autograd/__pycache__/variable.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac05d6e4844f7792ff6302a187f2d46f0f5de8de Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/autograd/__pycache__/variable.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/autograd/_functions/__init__.py b/parrot/lib/python3.10/site-packages/torch/autograd/_functions/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4170fad3eeac788dcb36b6ae1ddbee1b44dc25a1 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/autograd/_functions/__init__.py @@ -0,0 +1 @@ +from .tensor import * # noqa: F403 diff --git a/parrot/lib/python3.10/site-packages/torch/autograd/_functions/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/autograd/_functions/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1417248d78cea86e01ff3d85a801319bc6d4b61 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/autograd/_functions/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/autograd/_functions/__pycache__/tensor.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/autograd/_functions/__pycache__/tensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6971b6868b44798f8ee6d6d7a31e5cee167bf2e Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/autograd/_functions/__pycache__/tensor.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/autograd/_functions/__pycache__/utils.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/autograd/_functions/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c526ce2b68547de6a6ee184214f8dbedec1af126 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/autograd/_functions/__pycache__/utils.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/autograd/_functions/tensor.py b/parrot/lib/python3.10/site-packages/torch/autograd/_functions/tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..9c982b074b65c88d909719a37eba911d745d247d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/autograd/_functions/tensor.py @@ -0,0 +1,65 @@ +# mypy: allow-untyped-defs +import operator +from functools import reduce +from typing_extensions import deprecated + +import torch +import torch._utils +from ..function import Function + + +class Type(Function): + @staticmethod + @deprecated( + "`torch.autograd._functions.Type` is deprecated as of PyTorch 2.1, " + "please use `torch.tensor.to(dtype=dtype)` instead.", + category=FutureWarning, + ) + def forward(ctx, i, dest_type): + ctx.input_type = type(i) + ctx.input_device = -1 if not i.is_cuda else i.get_device() + return i.type(dest_type) + + @staticmethod + def backward(ctx, grad_output): + if ctx.input_device == -1: + return grad_output.type(ctx.input_type), None + else: + with torch.cuda.device(ctx.input_device): + return grad_output.type(ctx.input_type), None + + +# TODO: deprecate this +class Resize(Function): + @staticmethod + def forward(ctx, tensor, sizes): + ctx.sizes = sizes + ctx.numel = reduce(operator.mul, sizes, 1) + if tensor.numel() != ctx.numel: + raise RuntimeError( + ( + "requested resize to {} ({} elements in total), " + "but the given tensor has a size of {} ({} elements). " + "autograd's resize can only change the shape of a given " + "tensor, while preserving the number of elements. " + ).format( + "x".join(map(str, sizes)), + ctx.numel, + "x".join(map(str, tensor.size())), + tensor.numel(), + ) + ) + ctx.input_sizes = tensor.size() + if tensor.is_quantized: + tensor.copy_(tensor) + return tensor.contiguous().view(*sizes) + if tensor.is_contiguous(): + result = tensor.new(tensor).contiguous().view(*sizes) + return result + else: + return tensor.contiguous().view(*sizes) + + @staticmethod + def backward(ctx, grad_output): + assert grad_output.numel() == ctx.numel + return grad_output.contiguous().view(ctx.input_sizes), None diff --git a/parrot/lib/python3.10/site-packages/torch/autograd/_functions/utils.py b/parrot/lib/python3.10/site-packages/torch/autograd/_functions/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..56baae4aae3b05926a57a32853eb240bc30aa423 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/autograd/_functions/utils.py @@ -0,0 +1,63 @@ +# mypy: allow-untyped-defs +import operator +from functools import reduce + + +def maybe_view(tensor, size, check_same_size=True): + if check_same_size and tensor.size() == size: + return tensor + return tensor.contiguous().view(size) + + +def maybe_unexpand(tensor, old_size, check_same_size=True): + if check_same_size and tensor.size() == old_size: + return tensor + num_unsqueezed = tensor.dim() - len(old_size) + expanded_dims = [ + dim + for dim, (expanded, original) in enumerate( + zip(tensor.size()[num_unsqueezed:], old_size) + ) + if expanded != original + ] + + for _ in range(num_unsqueezed): + tensor = tensor.sum(0, keepdim=False) + for dim in expanded_dims: + tensor = tensor.sum(dim, keepdim=True) + return tensor + + +# Check whether the op enable broadcasting, and whether it is supported by ONNX. +# If dims1 and dims2 are different, then broadcast is True. +# We always assume the combination of dims1 and dims2 is broadcastable. +# The following types of broadcasting are supported in ONNX: +# 1) Only one element in dims2, such as dims2 = [1, 1] +# 2) dims2 is suffix of dims1, such as dims1 = [2, 3, 4], and dims2 = [3, 4] +# Details can be found here: https://github.com/onnx/onnx/blob/master/docs/Operators.md#Gemm +def check_onnx_broadcast(dims1, dims2): + broadcast = False + supported = True + len1 = len(dims1) + len2 = len(dims2) + numel1 = reduce(operator.mul, dims1) + numel2 = reduce(operator.mul, dims2) + if len1 < len2: + broadcast = True + if numel2 != 1: + supported = False + elif len1 > len2: + broadcast = True + if numel2 != 1 and dims1[len1 - len2 :] != dims2: + supported = False + else: + if dims1 != dims2: + broadcast = True + if numel2 != 1: + supported = False + + if not supported: + raise ValueError( + f"Numpy style broadcasting is not supported in ONNX. Input dims are: {dims1}, {dims2}" + ) + return broadcast diff --git a/parrot/lib/python3.10/site-packages/torch/autograd/anomaly_mode.py b/parrot/lib/python3.10/site-packages/torch/autograd/anomaly_mode.py new file mode 100644 index 0000000000000000000000000000000000000000..7e73ad4ef2c348f88f12aab35f99d81bcdb985ec --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/autograd/anomaly_mode.py @@ -0,0 +1,120 @@ +# mypy: allow-untyped-defs +import warnings + +import torch + +__all__ = ["detect_anomaly", "set_detect_anomaly"] + + +class detect_anomaly: + r"""Context-manager that enable anomaly detection for the autograd engine. + + This does two things: + + - Running the forward pass with detection enabled will allow the backward + pass to print the traceback of the forward operation that created the failing + backward function. + - If ``check_nan`` is ``True``, any backward computation that generate "nan" + value will raise an error. Default ``True``. + + .. warning:: + This mode should be enabled only for debugging as the different tests + will slow down your program execution. + + Example: + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_ANOMALY) + >>> import torch + >>> from torch import autograd + >>> class MyFunc(autograd.Function): + ... @staticmethod + ... def forward(ctx, inp): + ... return inp.clone() + ... @staticmethod + ... def backward(ctx, gO): + ... # Error during the backward pass + ... raise RuntimeError("Some error in backward") + ... return gO.clone() + >>> def run_fn(a): + ... out = MyFunc.apply(a) + ... return out.sum() + >>> inp = torch.rand(10, 10, requires_grad=True) + >>> out = run_fn(inp) + >>> out.backward() + Traceback (most recent call last): + File "", line 1, in + File "/your/pytorch/install/torch/_tensor.py", line 93, in backward + torch.autograd.backward(self, gradient, retain_graph, create_graph) + File "/your/pytorch/install/torch/autograd/__init__.py", line 90, in backward + allow_unreachable=True) # allow_unreachable flag + File "/your/pytorch/install/torch/autograd/function.py", line 76, in apply + return self._forward_cls.backward(self, *args) + File "", line 8, in backward + RuntimeError: Some error in backward + >>> with autograd.detect_anomaly(): + ... inp = torch.rand(10, 10, requires_grad=True) + ... out = run_fn(inp) + ... out.backward() + Traceback of forward call that caused the error: + File "tmp.py", line 53, in + out = run_fn(inp) + File "tmp.py", line 44, in run_fn + out = MyFunc.apply(a) + Traceback (most recent call last): + File "", line 4, in + File "/your/pytorch/install/torch/_tensor.py", line 93, in backward + torch.autograd.backward(self, gradient, retain_graph, create_graph) + File "/your/pytorch/install/torch/autograd/__init__.py", line 90, in backward + allow_unreachable=True) # allow_unreachable flag + File "/your/pytorch/install/torch/autograd/function.py", line 76, in apply + return self._forward_cls.backward(self, *args) + File "", line 8, in backward + RuntimeError: Some error in backward + + """ + + def __init__(self, check_nan=True) -> None: + self.prev = torch.is_anomaly_enabled() + self.check_nan = check_nan + self.prev_check_nan = torch.is_anomaly_check_nan_enabled() + warnings.warn( + "Anomaly Detection has been enabled. " + "This mode will increase the runtime " + "and should only be enabled for debugging.", + stacklevel=2, + ) + + def __enter__(self) -> None: + torch.set_anomaly_enabled(True, self.check_nan) + + def __exit__(self, *args: object) -> None: + torch.set_anomaly_enabled(self.prev, self.prev_check_nan) + + +class set_detect_anomaly: + r"""Context-manager that sets the anomaly detection for the autograd engine on or off. + + ``set_detect_anomaly`` will enable or disable the autograd anomaly detection + based on its argument :attr:`mode`. + It can be used as a context-manager or as a function. + + See ``detect_anomaly`` above for details of the anomaly detection behaviour. + + Args: + mode (bool): Flag whether to enable anomaly detection (``True``), + or disable (``False``). + check_nan (bool): Flag whether to raise an error when the backward + generate "nan" + + """ + + def __init__(self, mode: bool, check_nan: bool = True) -> None: + self.prev = torch.is_anomaly_enabled() + self.prev_check_nan = torch.is_anomaly_check_nan_enabled() + torch.set_anomaly_enabled(mode, check_nan) + + def __enter__(self) -> None: + pass + + def __exit__(self, *args: object) -> None: + torch.set_anomaly_enabled(self.prev, self.prev_check_nan) diff --git a/parrot/lib/python3.10/site-packages/torch/autograd/forward_ad.py b/parrot/lib/python3.10/site-packages/torch/autograd/forward_ad.py new file mode 100644 index 0000000000000000000000000000000000000000..4187e220ceabf4c9e8547a6c97fbc83f572e4a68 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/autograd/forward_ad.py @@ -0,0 +1,232 @@ +# mypy: allow-untyped-defs +import os +from collections import namedtuple + +from typing import Any + +import torch +from .grad_mode import _DecoratorContextManager + +__all__ = [ + "UnpackedDualTensor", + "enter_dual_level", + "exit_dual_level", + "make_dual", + "unpack_dual", + "dual_level", +] + +# Global variable used to make the python API simpler to use +_current_level = -1 + + +def enter_dual_level(): + r"""Enter a new forward grad level. + + This level can be used to make and unpack dual Tensors to compute + forward gradients. + + This function also updates the current level that is used by default + by the other functions in this API. + """ + global _current_level + new_level = torch._C._enter_dual_level() + if new_level != _current_level + 1: + raise RuntimeError( + "Entering a new forward AD level but the current level " + "is not valid. Make sure you did not modified it directly." + ) + _current_level = new_level + return new_level + + +def exit_dual_level(*, level=None): + r"""Exit a forward grad level. + + This function deletes all the gradients associated with this + level. Only deleting the latest entered level is allowed. + + This function also updates the current level that is used by default + by the other functions in this API. + """ + global _current_level + if level is None: + level = _current_level + if level != _current_level: + raise RuntimeError( + "Trying to exit a forward AD level that was not the last one " + "that was created. This is not supported." + ) + torch._C._exit_dual_level(level=level) + _current_level = level - 1 + + +def _maybe_load_decompositions(): + if os.environ.get("PYTORCH_JIT", "1") == "1" and __debug__: + from torch._decomp import decompositions_for_jvp # noqa: F401 + + +def make_dual(tensor, tangent, *, level=None): + r"""Associate a tensor value with its tangent to create a "dual tensor" for forward AD gradient computation. + + The result is a new tensor aliased to :attr:`tensor` with :attr:`tangent` embedded + as an attribute as-is if it has the same storage layout or copied otherwise. + The tangent attribute can be recovered with :func:`unpack_dual`. + + This function is backward differentiable. + + Given a function `f` whose jacobian is `J`, it allows one to compute the Jacobian-vector product (`jvp`) + between `J` and a given vector `v` as follows. + + Example:: + + >>> # xdoctest: +SKIP("Undefined variables") + >>> with dual_level(): + ... inp = make_dual(x, v) + ... out = f(inp) + ... y, jvp = unpack_dual(out) + + Please see the `forward-mode AD tutorial `__ + for detailed steps on how to use this API. + + """ + # See NOTE: [forward-mode AD decompositions mechanism] + # + # Import from torch._decomp import decompositions_for_jvp to register + # decompositions for jvp to the jit registry + # + # FIXME: We specify that __debug__ must be True because + # if python is run with -OO or -O flags (i.e., __debug__ is False), we encounter the + # following error: + # + # Return value was annotated as having type Tuple[NoneType, NoneType] but is actually of + # type Tuple[Tensor, Tensor]: + # File ".../torch/_decomp/__init__.py", line 1585 + # else: + # buffer = z + # return min - torch.log1p(z), buffer + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE + _maybe_load_decompositions() + + if level is None: + level = _current_level + + if level < 0: + raise RuntimeError( + "Trying to create a dual Tensor for forward AD but no level " + "exists, make sure to enter_dual_level() first." + ) + if not (tensor.is_floating_point() or tensor.is_complex()): + raise ValueError( + f"Expected primal to be floating point or complex, but got: {tensor.dtype}" + ) + if not (tangent.is_floating_point() or tangent.is_complex()): + raise ValueError( + f"Expected tangent to be floating point or complex, but got: {tangent.dtype}" + ) + + return torch._VF._make_dual(tensor, tangent, level=level) + + +_UnpackedDualTensor = namedtuple("_UnpackedDualTensor", ["primal", "tangent"]) + + +class UnpackedDualTensor(_UnpackedDualTensor): + r"""Namedtuple returned by :func:`unpack_dual` containing the primal and tangent components of the dual tensor. + + See :func:`unpack_dual` for more details. + + """ + + pass + + +def unpack_dual(tensor, *, level=None): + r"""Unpack a "dual tensor" to get both its Tensor value and its forward AD gradient. + + The result is a namedtuple ``(primal, tangent)`` where ``primal`` is a view of + :attr:`tensor`'s primal and ``tangent`` is :attr:`tensor`'s tangent as-is. + Neither of these tensors can be dual tensor of level :attr:`level`. + + This function is backward differentiable. + + Example:: + + >>> # xdoctest: +SKIP("Undefined variables") + >>> with dual_level(): + ... inp = make_dual(x, x_t) + ... out = f(inp) + ... y, jvp = unpack_dual(out) + ... jvp = unpack_dual(out).tangent + + Please see the `forward-mode AD tutorial `__ + for detailed steps on how to use this API. + """ + if level is None: + level = _current_level + + if level < 0: + return UnpackedDualTensor(tensor, None) + + primal, dual = torch._VF._unpack_dual(tensor, level=level) + + return UnpackedDualTensor(primal, dual) + + +class dual_level(_DecoratorContextManager): + r"""Context-manager for forward AD, where all forward AD computation must occur within the ``dual_level`` context. + + .. Note:: + + The ``dual_level`` context appropriately enters and exit the dual level to + controls the current forward AD level, which is used by default by the other + functions in this API. + + We currently don't plan to support nested ``dual_level`` contexts, however, so + only a single forward AD level is supported. To compute higher-order + forward grads, one can use :func:`torch.func.jvp`. + + Example:: + + >>> # xdoctest: +SKIP("Undefined variables") + >>> x = torch.tensor([1]) + >>> x_t = torch.tensor([1]) + >>> with dual_level(): + ... inp = make_dual(x, x_t) + ... # Do computations with inp + ... out = your_fn(inp) + ... _, grad = unpack_dual(out) + >>> grad is None + False + >>> # After exiting the level, the grad is deleted + >>> _, grad_after = unpack_dual(out) + >>> grad is None + True + + Please see the `forward-mode AD tutorial `__ + for detailed steps on how to use this API. + """ + + def __enter__(self): + return enter_dual_level() + + def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: + exit_dual_level() + + +# Private helper functions +_is_fwd_grad_enabled = torch._C._is_fwd_grad_enabled + + +# Private helper function to enable or disable fwd grad. +# If you're a user and want to use this, please file an issue to discuss the use case. +class _set_fwd_grad_enabled(_DecoratorContextManager): + def __init__(self, mode: bool) -> None: + self.prev = _is_fwd_grad_enabled() + torch._C._set_fwd_grad_enabled(mode) + + def __enter__(self) -> None: + pass + + def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: + torch._C._set_fwd_grad_enabled(self.prev) diff --git a/parrot/lib/python3.10/site-packages/torch/autograd/function.py b/parrot/lib/python3.10/site-packages/torch/autograd/function.py new file mode 100644 index 0000000000000000000000000000000000000000..62ec1183a365d9328ac2ba24b179e72cd2b4466f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/autograd/function.py @@ -0,0 +1,843 @@ +# mypy: allow-untyped-defs +import functools +import inspect +import itertools +import warnings +from collections import OrderedDict +from typing import Any, List, Optional, Tuple +from typing_extensions import deprecated + +import torch +import torch._C as _C +import torch._functorch as _functorch +import torch.utils.hooks as hooks +from torch._C import _functions +from torch._functorch.autograd_function import custom_function_call + +__all__ = [ + "FunctionCtx", + "BackwardCFunction", + "FunctionMeta", + "Function", + "once_differentiable", + "InplaceFunction", + "NestedIOFunction", +] + +# Unique id provider for each class inheriting from Function +# This is incremented in FunctionMeta during class definition +AUTOGRAD_FUNCTION_COUNTER = itertools.count() + + +# Formerly known as: _ContextMethodMixin +class FunctionCtx: + def save_for_backward(self, *tensors: torch.Tensor): + r"""Save given tensors for a future call to :func:`~Function.backward`. + + ``save_for_backward`` should be called at most once, in either the + :func:`setup_context` or :func:`forward` methods, and only with tensors. + + All tensors intended to be used in the backward pass should be saved + with ``save_for_backward`` (as opposed to directly on ``ctx``) to prevent + incorrect gradients and memory leaks, and enable the application of saved + tensor hooks. See :class:`torch.autograd.graph.saved_tensors_hooks`. + + Note that if intermediary tensors, tensors that are neither inputs + nor outputs of :func:`forward`, are saved for backward, your custom Function + may not support double backward. + Custom Functions that do not support double backward should decorate their + :func:`backward` method with ``@once_differentiable`` so that performing + double backward raises an error. If you'd like to support double backward, + you can either recompute intermediaries based on the inputs during backward + or return the intermediaries as the outputs of the custom Function. See the + `double backward tutorial `_ + for more details. + + In :func:`backward`, saved tensors can be accessed through the :attr:`saved_tensors` + attribute. Before returning them to the user, a check is made to ensure + they weren't used in any in-place operation that modified their content. + + Arguments can also be ``None``. This is a no-op. + + See :ref:`extending-autograd` for more details on how to use this method. + + Example:: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD) + >>> class Func(Function): + >>> @staticmethod + >>> def forward(ctx, x: torch.Tensor, y: torch.Tensor, z: int): + >>> w = x * z + >>> out = x * y + y * z + w * y + >>> ctx.save_for_backward(x, y, w, out) + >>> ctx.z = z # z is not a tensor + >>> return out + >>> + >>> @staticmethod + >>> @once_differentiable + >>> def backward(ctx, grad_out): + >>> x, y, w, out = ctx.saved_tensors + >>> z = ctx.z + >>> gx = grad_out * (y + y * z) + >>> gy = grad_out * (x + z + w) + >>> gz = None + >>> return gx, gy, gz + >>> + >>> a = torch.tensor(1., requires_grad=True, dtype=torch.double) + >>> b = torch.tensor(2., requires_grad=True, dtype=torch.double) + >>> c = 4 + >>> d = Func.apply(a, b, c) + + """ + self.to_save = tensors + + def save_for_forward(self, *tensors: torch.Tensor): + r"""Save given tensors for a future call to :func:`~Function.jvp`. + + ``save_for_forward`` should be called at most once, in either the + :func:`setup_context` or :func:`forward` methods, and all arguments + should be tensors. + + In :func:`jvp`, saved objects can be accessed through the :attr:`saved_tensors` + attribute. + + Arguments can also be ``None``. This is a no-op. + + See :ref:`extending-autograd` for more details on how to use this method. + + Example:: + >>> # xdoctest: +SKIP + >>> class Func(torch.autograd.Function): + >>> @staticmethod + >>> def forward(ctx, x: torch.Tensor, y: torch.Tensor, z: int): + >>> ctx.save_for_backward(x, y) + >>> ctx.save_for_forward(x, y) + >>> ctx.z = z + >>> return x * y * z + >>> + >>> @staticmethod + >>> def jvp(ctx, x_t, y_t, _): + >>> x, y = ctx.saved_tensors + >>> z = ctx.z + >>> return z * (y * x_t + x * y_t) + >>> + >>> @staticmethod + >>> def vjp(ctx, grad_out): + >>> x, y = ctx.saved_tensors + >>> z = ctx.z + >>> return z * grad_out * y, z * grad_out * x, None + >>> + >>> a = torch.tensor(1., requires_grad=True, dtype=torch.double) + >>> t = torch.tensor(1., dtype=torch.double) + >>> b = torch.tensor(2., requires_grad=True, dtype=torch.double) + >>> c = 4 + >>> + >>> with fwAD.dual_level(): + >>> a_dual = fwAD.make_dual(a, t) + >>> d = Func.apply(a_dual, b, c) + + """ + for tensor in tensors: + assert isinstance(tensor, torch.Tensor) or tensor is None, ( + "save_for_forward expects all arguments to be tensors; you should " + "save non-tensors as attributes on ctx." + ) + + self.saved_for_forward = tensors + + def mark_dirty(self, *args: torch.Tensor): + r"""Mark given tensors as modified in an in-place operation. + + This should be called at most once, in either the :func:`setup_context` + or :func:`forward` methods, and all arguments should be inputs. + + Every tensor that's been modified in-place in a call to :func:`forward` + should be given to this function, to ensure correctness of our checks. + It doesn't matter whether the function is called before or after + modification. + + Examples:: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD) + >>> class Inplace(Function): + >>> @staticmethod + >>> def forward(ctx, x): + >>> x_npy = x.numpy() # x_npy shares storage with x + >>> x_npy += 1 + >>> ctx.mark_dirty(x) + >>> return x + >>> + >>> @staticmethod + >>> @once_differentiable + >>> def backward(ctx, grad_output): + >>> return grad_output + >>> + >>> a = torch.tensor(1., requires_grad=True, dtype=torch.double).clone() + >>> b = a * a + >>> Inplace.apply(a) # This would lead to wrong gradients! + >>> # but the engine would not know unless we mark_dirty + >>> # xdoctest: +SKIP + >>> b.backward() # RuntimeError: one of the variables needed for gradient + >>> # computation has been modified by an inplace operation + + """ + self.dirty_tensors = args + + @deprecated( + "`mark_shared_storage` is deprecated. " + "Tensors with shared storages are automatically tracked. " + "Note that calls to `set_()` are not tracked", + category=FutureWarning, + ) + def mark_shared_storage(self, *pairs): + pass + + def mark_non_differentiable(self, *args: torch.Tensor): + r"""Mark outputs as non-differentiable. + + This should be called at most once, in either the :func:`setup_context` + or :func:`forward` methods, and all arguments should be tensor outputs. + + This will mark outputs as not requiring gradients, increasing the + efficiency of backward computation. You still need to accept a gradient + for each output in :meth:`~Function.backward`, but it's always going to + be a zero tensor with the same shape as the shape of a corresponding + output. + + This is used e.g. for indices returned from a sort. See example:: + >>> class Func(Function): + >>> @staticmethod + >>> def forward(ctx, x): + >>> sorted, idx = x.sort() + >>> ctx.mark_non_differentiable(idx) + >>> ctx.save_for_backward(x, idx) + >>> return sorted, idx + >>> + >>> @staticmethod + >>> @once_differentiable + >>> def backward(ctx, g1, g2): # still need to accept g2 + >>> x, idx = ctx.saved_tensors + >>> grad_input = torch.zeros_like(x) + >>> grad_input.index_add_(0, idx, g1) + >>> return grad_input + + """ + self.non_differentiable = args + + def set_materialize_grads(self, value: bool): + r"""Set whether to materialize grad tensors. Default is ``True``. + + This should be called only from either the :func:`setup_context` or + :func:`forward` methods. + + If ``True``, undefined grad tensors will be expanded to tensors full of zeros + prior to calling the :func:`backward` and :func:`jvp` methods. + + Example:: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD) + >>> class SimpleFunc(Function): + >>> @staticmethod + >>> def forward(ctx, x): + >>> return x.clone(), x.clone() + >>> + >>> @staticmethod + >>> @once_differentiable + >>> def backward(ctx, g1, g2): + >>> return g1 + g2 # No check for None necessary + >>> + >>> # We modify SimpleFunc to handle non-materialized grad outputs + >>> class Func(Function): + >>> @staticmethod + >>> def forward(ctx, x): + >>> ctx.set_materialize_grads(False) + >>> ctx.save_for_backward(x) + >>> return x.clone(), x.clone() + >>> + >>> @staticmethod + >>> @once_differentiable + >>> def backward(ctx, g1, g2): + >>> x, = ctx.saved_tensors + >>> grad_input = torch.zeros_like(x) + >>> if g1 is not None: # We must check for None now + >>> grad_input += g1 + >>> if g2 is not None: + >>> grad_input += g2 + >>> return grad_input + >>> + >>> a = torch.tensor(1., requires_grad=True) + >>> b, _ = Func.apply(a) # induces g2 to be undefined + + """ + self.materialize_grads = value + + +# DO NOT USE: This is only defined to be able to load old serialized models +_ContextMethodMixin = FunctionCtx + + +class _HookMixin: + @staticmethod + def _register_hook(backward_hooks, hook): + if backward_hooks is None: + backward_hooks = OrderedDict() + handle = hooks.RemovableHandle(backward_hooks) + backward_hooks[handle.id] = hook + return backward_hooks, handle + + +class BackwardCFunction(_C._FunctionBase, FunctionCtx, _HookMixin): + r""" + This class is used for internal autograd work. Do not use. + """ + + def apply(self, *args): + r""" + Apply method used when executing this Node during the backward + """ + # _forward_cls is defined by derived class + # The user should define either backward or vjp but never both. + backward_fn = self._forward_cls.backward # type: ignore[attr-defined] + vjp_fn = self._forward_cls.vjp # type: ignore[attr-defined] + if backward_fn is not Function.backward and vjp_fn is not Function.vjp: + raise RuntimeError( + "Implementing both 'backward' and 'vjp' for a custom " + "Function is not allowed. You should only implement one " + "of them." + ) + user_fn = vjp_fn if vjp_fn is not Function.vjp else backward_fn + return user_fn(self, *args) + + def apply_jvp(self, *args): + r""" + Apply method used when executing forward mode AD during the forward + """ + # _forward_cls is defined by derived class + return self._forward_cls.jvp(self, *args) # type: ignore[attr-defined] + + def _compiled_autograd_key(self): + return self._forward_cls._compiled_autograd_key(self) # type: ignore[attr-defined] + + +class FunctionMeta(type): + """Function metaclass. + + This metaclass sets up the following properties: + _backward_cls: The Function class corresponding to the differentiated + version of this function (which is generated on the fly by this + metaclass). + """ + + def __init__(cls, name, bases, attrs): + backward_fn = type( + name + "Backward", (BackwardCFunction,), {"_forward_cls": cls} + ) + backward_fn._autograd_function_id = next(AUTOGRAD_FUNCTION_COUNTER) # type: ignore[attr-defined] + backward_fn._compiled_autograd_should_lift = attrs.get( # type: ignore[attr-defined] + "_compiled_autograd_should_lift", True + ) + cls._backward_cls = backward_fn + + super().__init__(name, bases, attrs) + + +class _SingleLevelFunction( + _C._FunctionBase, FunctionCtx, _HookMixin, metaclass=FunctionMeta +): + @staticmethod + def forward(*args: Any, **kwargs: Any) -> Any: + r"""Define the forward of the custom autograd Function. + + This function is to be overridden by all subclasses. + There are two ways to define forward: + + Usage 1 (Combined forward and ctx):: + + @staticmethod + def forward(ctx: Any, *args: Any, **kwargs: Any) -> Any: + pass + + - It must accept a context ctx as the first argument, followed by any + number of arguments (tensors or other types). + - See :ref:`combining-forward-context` for more details + + Usage 2 (Separate forward and ctx):: + + @staticmethod + def forward(*args: Any, **kwargs: Any) -> Any: + pass + + @staticmethod + def setup_context(ctx: Any, inputs: Tuple[Any, ...], output: Any) -> None: + pass + + - The forward no longer accepts a ctx argument. + - Instead, you must also override the :meth:`torch.autograd.Function.setup_context` + staticmethod to handle setting up the ``ctx`` object. + ``output`` is the output of the forward, ``inputs`` are a Tuple of inputs + to the forward. + - See :ref:`extending-autograd` for more details + + The context can be used to store arbitrary data that can be then + retrieved during the backward pass. Tensors should not be stored + directly on `ctx` (though this is not currently enforced for + backward compatibility). Instead, tensors should be saved either with + :func:`ctx.save_for_backward` if they are intended to be used in + ``backward`` (equivalently, ``vjp``) or :func:`ctx.save_for_forward` + if they are intended to be used for in ``jvp``. + """ + raise NotImplementedError( + "You must implement the forward function for custom autograd.Function." + ) + + @staticmethod + def setup_context(ctx: Any, inputs: Tuple[Any, ...], output: Any) -> Any: + r"""There are two ways to define the forward pass of an autograd.Function. + + Either: + + 1. Override forward with the signature ``forward(ctx, *args, **kwargs)``. + ``setup_context`` is not overridden. Setting up the ctx for backward + happens inside the ``forward``. + 2. Override forward with the signature ``forward(*args, **kwargs)`` and + override ``setup_context``. Setting up the ctx for backward happens + inside ``setup_context`` (as opposed to inside the ``forward``) + + See :meth:`torch.autograd.Function.forward` and :ref:`extending-autograd` for more details. + """ + raise NotImplementedError("setup_context is not implemented.") + + @staticmethod + def backward(ctx: Any, *grad_outputs: Any) -> Any: + r"""Define a formula for differentiating the operation with backward mode automatic differentiation. + + This function is to be overridden by all subclasses. + (Defining this function is equivalent to defining the ``vjp`` function.) + + It must accept a context :attr:`ctx` as the first argument, followed by + as many outputs as the :func:`forward` returned (None will be passed in + for non tensor outputs of the forward function), + and it should return as many tensors, as there were inputs to + :func:`forward`. Each argument is the gradient w.r.t the given output, + and each returned value should be the gradient w.r.t. the + corresponding input. If an input is not a Tensor or is a Tensor not + requiring grads, you can just pass None as a gradient for that input. + + The context can be used to retrieve tensors saved during the forward + pass. It also has an attribute :attr:`ctx.needs_input_grad` as a tuple + of booleans representing whether each input needs gradient. E.g., + :func:`backward` will have ``ctx.needs_input_grad[0] = True`` if the + first input to :func:`forward` needs gradient computed w.r.t. the + output. + """ + raise NotImplementedError( + "You must implement either the backward or vjp method for " + "your custom autograd.Function to use it with backward " + "mode AD." + ) + + # vjp and backward are alias of each other + vjp = backward + + @staticmethod + def jvp(ctx: Any, *grad_inputs: Any) -> Any: + r"""Define a formula for differentiating the operation with forward mode automatic differentiation. + + This function is to be overridden by all subclasses. + It must accept a context :attr:`ctx` as the first argument, followed by + as many inputs as the :func:`forward` got (None will be passed in + for non tensor inputs of the forward function), + and it should return as many tensors as there were outputs to + :func:`forward`. Each argument is the gradient w.r.t the given input, + and each returned value should be the gradient w.r.t. the + corresponding output. If an output is not a Tensor or the function is not + differentiable with respect to that output, you can just pass None as a + gradient for that input. + + You can use the :attr:`ctx` object to pass any value from the forward to this + functions. + """ + raise NotImplementedError( + "You must implement the jvp function for custom " + "autograd.Function to use it with forward mode AD." + ) + + +class Function(_SingleLevelFunction): + r"""Base class to create custom `autograd.Function`. + + To create a custom `autograd.Function`, subclass this class and implement + the :meth:`forward` and :meth:`backward` static methods. Then, to use your custom + op in the forward pass, call the class method ``apply``. Do not call + :meth:`forward` directly. + + To ensure correctness and best performance, make sure you are calling the + correct methods on ``ctx`` and validating your backward function using + :func:`torch.autograd.gradcheck`. + + See :ref:`extending-autograd` for more details on how to use this class. + + Examples:: + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD) + >>> class Exp(Function): + >>> @staticmethod + >>> def forward(ctx, i): + >>> result = i.exp() + >>> ctx.save_for_backward(result) + >>> return result + >>> + >>> @staticmethod + >>> def backward(ctx, grad_output): + >>> result, = ctx.saved_tensors + >>> return grad_output * result + >>> + >>> # Use it by calling the apply method: + >>> # xdoctest: +SKIP + >>> output = Exp.apply(input) + """ + + def __init__(self, *args, **kwargs): + warnings.warn( + f"{self.__class__} should not be instantiated. Methods on autograd functions" + "are all static, so you should invoke them on the class itself. " + "Instantiating an autograd function will raise an " + "error in a future version of PyTorch.", + DeprecationWarning, + stacklevel=2, + ) + + def __call__(self, *args, **kwargs): + raise RuntimeError( + "Legacy autograd function with non-static forward method is deprecated. " + "Please use new-style autograd function with static forward method. " + "(Example: https://pytorch.org/docs/stable/autograd.html#torch.autograd.Function)" + ) + + """ + Bool that specifies if PyTorch should attempt to autogenerate + :func:`torch.vmap` support for this autograd.Function. You may set this to + True only if this autograd.Function's forward, backward, and jvp (if they + exist) are written using PyTorch operations; otherwise, please override + :meth:`torch.autograd.Function.vmap` to add support for :func:`torch.vmap`. + + Please see :ref:`func-autograd-function` for more details. + """ + generate_vmap_rule = False + + @staticmethod + def vmap(info, in_dims, *args): + r"""Define the behavior for this autograd.Function underneath :func:`torch.vmap`. + + For a :func:`torch.autograd.Function` to support + :func:`torch.vmap`, you must either override this static method, or set + ``generate_vmap_rule`` to ``True`` (you may not do both). + + If you choose to override this staticmethod: it must accept + + - an ``info`` object as the first argument. ``info.batch_size`` + specifies the size of the dimension being vmapped over, + while ``info.randomness`` is the randomness option passed to + :func:`torch.vmap`. + - an ``in_dims`` tuple as the second argument. + For each arg in ``args``, ``in_dims`` has a corresponding + ``Optional[int]``. It is ``None`` if the arg is not a Tensor or if + the arg is not being vmapped over, otherwise, it is an integer + specifying what dimension of the Tensor is being vmapped over. + - ``*args``, which is the same as the args to :meth:`~Function.forward`. + + The return of the vmap staticmethod is a tuple of ``(output, out_dims)``. + Similar to ``in_dims``, ``out_dims`` should be of the same structure as + ``output`` and contain one ``out_dim`` per output that specifies if the + output has the vmapped dimension and what index it is in. + + Please see :ref:`func-autograd-function` for more details. + """ + raise NotImplementedError( + "To use autograd.Function with vmap, you must either override the " + "vmap staticmethod or set generate_vmap_rule=True." + ) + + @classmethod + def apply(cls, *args, **kwargs): + def bind_default_args(func, *args, **kwargs): + signature = inspect.signature(func) + bound_args = signature.bind(*args, **kwargs) + bound_args.apply_defaults() + + return bound_args.args + + is_setup_ctx_defined = _is_setup_context_defined(cls.setup_context) + if is_setup_ctx_defined: + args = bind_default_args(cls.forward, *args, **kwargs) + + if not torch._C._are_functorch_transforms_active(): + # See NOTE: [functorch vjp and autograd interaction] + args = _functorch.utils.unwrap_dead_wrappers(args) + return super().apply(*args, **kwargs) # type: ignore[misc] + + if not is_setup_ctx_defined: + raise RuntimeError( + "In order to use an autograd.Function with functorch transforms " + "(vmap, grad, jvp, jacrev, ...), it must override the setup_context " + "staticmethod. For more details, please see " + "https://pytorch.org/docs/main/notes/extending.func.html" + ) + + return custom_function_call(cls, *args, **kwargs) + + @staticmethod + def _compiled_autograd_key(ctx): + return (ctx._autograd_function_id,) + + +def _is_setup_context_defined(fn): + return fn != _SingleLevelFunction.setup_context + + +def once_differentiable(fn): + @functools.wraps(fn) + def wrapper(ctx, *args): + with torch.no_grad(): + outputs = fn(ctx, *args) + + if not torch.is_grad_enabled(): + return outputs + + # If any of the inputs have requires_grad=True, we force the outputs + # to have requires_grad=True but point to a grad_fn which throws an + # error message during (double) back-propagation. + # XXX: this is only an approximation of requires_grad - there's no way + # to figure out if fn didn't use ctx.saved_tensors and as a result + # some Tensors might require grad, even if no args do. + # Unfortunately, this leads to unexpected error messages ("no nodes + # require computing gradients"), but I don't have a better idea. + # These functions would raise an error in backward anyway. + requires_grad = any( + isinstance(arg, torch.Tensor) and arg.requires_grad for arg in args + ) + if not requires_grad: + return outputs + + if not isinstance(outputs, tuple): + outputs = (outputs,) + + err_fn = _functions.DelayedError( + b"trying to differentiate twice a function that was marked " + b"with @once_differentiable", + len(outputs), + ) + + # Create aliases of each output that has requires_grad=True. We need + # at least one of the inputs to err_fn to require grad so that the + # output will have a grad_fn. + def fake_requires_grad(var): + if var is not None: + var = var.detach() + var.requires_grad = True + return var + + return err_fn(*[fake_requires_grad(v) for v in outputs]) + + return wrapper + + +class InplaceFunction(Function): + r""" + This class is here only for backward compatibility reasons. + Use :class:`Function` instead of this for any new use case. + """ + + def __init__(self, inplace=False): + super().__init__() + self.inplace = inplace + + +def _nested_map(condition, fn, condition_msg=None): + def _map(obj): + if condition(obj): + return fn(obj) + elif obj is None: + return None + elif isinstance(obj, (list, tuple)): + mapped = (_map(x) for x in obj) + if hasattr(obj, "_fields"): + # obj is namedtuple + return type(obj)(*mapped) + return type(obj)(mapped) + elif isinstance(obj, dict): + return {x: _map(obj[x]) for x in obj} + else: + raise ValueError( + "Auto nesting doesn't know how to process " + "an input object of type " + + torch.typename(obj) + + ( + ". Accepted types: " + condition_msg + ", or lists/tuples of them" + if condition_msg + else "" + ) + ) + + return _map + + +def _jit_unwrap_structured(obj): + if hasattr(obj, "_jit_unwrap"): + return obj._jit_unwrap() + return obj + + +def _iter_filter(condition, allow_unknown=False, condition_msg=None, conversion=None): + def _iter(obj): + if conversion is not None: + obj = conversion(obj) + if condition(obj): + yield obj + elif obj is None: + return + elif isinstance(obj, (list, tuple)): + for o in obj: + yield from _iter(o) + elif isinstance(obj, dict): + # We only accept primitive key types, so we needn't inspect them + for o in obj.values(): + yield from _iter(o) + elif allow_unknown: + yield obj + else: + raise ValueError( + "Auto nesting doesn't know how to process " + "an input object of type " + + torch.typename(obj) + + ( + ". Accepted types: " + condition_msg + ", or lists/tuples of them" + if condition_msg + else "" + ) + ) + + return _iter + + +def _unflatten(input, proto): + # unflatten a list or tuple input into a nested list/tuple structure + # specified by proto + def unflatten_helper(input, proto): + res: List[Optional[torch.Tensor]] = [] + if hasattr(proto, "_jit_wrap"): + return proto._jit_wrap(input) + if not isinstance(proto, (list, tuple)): + return input[0], input[1:] + for e in proto: + if e is None: + res.append(e) + else: + res_e, input = unflatten_helper(input, e) + res.append(res_e) + return type(proto)(res), input + + return unflatten_helper(input, proto)[0] + + +_iter_jit_values = _iter_filter( + lambda o: o is None or isinstance(o, torch._C.Value), + condition_msg="jit's Values or None", +) +_iter_tensors = _iter_filter( + lambda x: isinstance(x, torch.Tensor), + condition_msg="Tensors", + conversion=_jit_unwrap_structured, +) +_iter_tensors_permissive = _iter_filter( + lambda x: isinstance(x, torch.Tensor), + allow_unknown=True, + condition_msg="Tensors (permissive)", +) +_iter_None_tensors = _iter_filter( + lambda o: o is None or isinstance(o, torch.Tensor), condition_msg="Tensors or None" +) +_map_tensor_data = _nested_map( + lambda x: isinstance(x, torch.Tensor), lambda o: o.data, condition_msg="Tensors" +) + + +class NestedIOFunction(Function): + r""" + This class is here only for backward compatibility reasons. + Use :class:`Function` instead of this for any new use case. + """ + # The 'type: ignore' statements are needed here because these functions are declared as '@staticmethod' in the + # superclass (Function) but are instance methods here, which mypy reports as incompatible. + + def _do_forward(self, *input): + self._nested_input = input + flat_input = tuple(_iter_tensors(input)) + flat_output = super()._do_forward(*flat_input) # type: ignore[misc] + nested_output = self._nested_output + nested_tensors = _unflatten(flat_output, self._nested_output) + return nested_tensors + + def _do_backward(self, gradients, retain_variables): + self.retain_variables = retain_variables + result = super()._do_backward(gradients, retain_variables) # type: ignore[misc] + if not retain_variables: + del self._nested_output + del self._to_save_nested + return result + + def backward(self, *gradients: Any) -> Any: # type: ignore[override] + r""" + Shared backward utility. + """ + nested_gradients = _unflatten(gradients, self._nested_output) + result = self.backward_extended(*nested_gradients) # type: ignore[func-returns-value] + return tuple(_iter_None_tensors(result)) + + __call__ = _do_forward + + def forward(self, *args: Any) -> Any: # type: ignore[override] + r""" + Shared forward utility. + """ + nested_tensors = _map_tensor_data(self._nested_input) + result = self.forward_extended(*nested_tensors) # type: ignore[func-returns-value] + del self._nested_input + self._nested_output = result + return tuple(_iter_tensors(result)) + + def save_for_backward(self, *args: Any) -> None: + r""" + See :meth:`Function.save_for_backward`. + """ + self.to_save = tuple(_iter_tensors(args)) + self._to_save_nested = args + + @property + def saved_tensors(self): + r""" + See :meth:`Function.saved_tensors`. + """ + flat_tensors = super().saved_tensors # type: ignore[misc] + return _unflatten(flat_tensors, self._to_save_nested) + + def mark_dirty(self, *args: Any, **kwargs: Any) -> None: + r""" + See :meth:`Function.mark_dirty`. + """ + self.dirty_tensors = tuple(_iter_tensors((args, kwargs))) + + def mark_non_differentiable(self, *args: Any, **kwargs: Any) -> None: + r""" + See :meth:`Function.mark_non_differentiable`. + """ + self.non_differentiable = tuple(_iter_tensors((args, kwargs))) + + def forward_extended(self, *input: Any) -> None: + r""" + User defined forward. + """ + raise NotImplementedError + + def backward_extended(self, *grad_output: Any) -> None: + r""" + User defined backward. + """ + raise NotImplementedError diff --git a/parrot/lib/python3.10/site-packages/torch/autograd/functional.py b/parrot/lib/python3.10/site-packages/torch/autograd/functional.py new file mode 100644 index 0000000000000000000000000000000000000000..8cf3955a69278f02e4baf73de1bebf406081161a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/autograd/functional.py @@ -0,0 +1,1183 @@ +# mypy: allow-untyped-defs +from typing import List, Tuple + +import torch +from torch._vmap_internals import _vmap +from . import forward_ad as fwAD + +__all__ = ["vjp", "jvp", "jacobian", "hessian", "hvp", "vhp"] + +# Utility functions + + +def _as_tuple_nocheck(x): + if isinstance(x, tuple): + return x + elif isinstance(x, list): + return tuple(x) + else: + return (x,) + + +def _as_tuple(inp, arg_name=None, fn_name=None): + # Ensures that inp is a tuple of Tensors + # Returns whether or not the original inp was a tuple and the tupled version of the input + if arg_name is None and fn_name is None: + return _as_tuple_nocheck(inp) + + is_inp_tuple = True + if not isinstance(inp, tuple): + inp = (inp,) + is_inp_tuple = False + + for i, el in enumerate(inp): + if not isinstance(el, torch.Tensor): + if is_inp_tuple: + raise TypeError( + f"The {arg_name} given to {fn_name} must be either a Tensor or a tuple of Tensors but the" + f" value at index {i} has type {type(el)}." + ) + else: + raise TypeError( + f"The {arg_name} given to {fn_name} must be either a Tensor or a tuple of Tensors but the" + f" given {arg_name} has type {type(el)}." + ) + + return is_inp_tuple, inp + + +def _tuple_postprocess(res, to_unpack): + # Unpacks a potentially nested tuple of Tensors + # to_unpack should be a single boolean or a tuple of two booleans. + # It is used to: + # - invert _as_tuple when res should match the inp given to _as_tuple + # - optionally remove nesting of two tuples created by multiple calls to _as_tuple + if isinstance(to_unpack, tuple): + assert len(to_unpack) == 2 + if not to_unpack[1]: + res = tuple(el[0] for el in res) + if not to_unpack[0]: + res = res[0] + else: + if not to_unpack: + res = res[0] + return res + + +def _grad_preprocess(inputs, create_graph, need_graph): + # Preprocess the inputs to make sure they require gradient + # inputs is a tuple of Tensors to preprocess + # create_graph specifies if the user wants gradients to flow back to the Tensors in inputs + # need_graph specifies if we internally want gradients to flow back to the Tensors in res + # Note that we *always* create a new Tensor object to be able to see the difference between + # inputs given as arguments and the same Tensors automatically captured by the user function. + # Check this issue for more details on how that can happen: https://github.com/pytorch/pytorch/issues/32576 + res = [] + for inp in inputs: + if create_graph and inp.requires_grad: + # Create at least a new Tensor object in a differentiable way + if not inp.is_sparse: + # Use .view_as() to get a shallow copy + res.append(inp.view_as(inp)) + else: + # We cannot use view for sparse Tensors so we clone + res.append(inp.clone()) + else: + res.append(inp.detach().requires_grad_(need_graph)) + return tuple(res) + + +def _grad_postprocess(inputs, create_graph): + # Postprocess the generated Tensors to avoid returning Tensors with history when the user did not + # request it. + if isinstance(inputs[0], torch.Tensor): + if not create_graph: + return tuple(inp.detach() for inp in inputs) + else: + return inputs + else: + return tuple(_grad_postprocess(inp, create_graph) for inp in inputs) + + +def _validate_v(v, other, is_other_tuple): + # This assumes that other is the correct shape, and v should match + # Both are assumed to be tuples of Tensors + if len(other) != len(v): + if is_other_tuple: + raise RuntimeError( + f"v is a tuple of invalid length: should be {len(other)} but got {len(v)}." + ) + else: + raise RuntimeError("The given v should contain a single Tensor.") + + for idx, (el_v, el_other) in enumerate(zip(v, other)): + if el_v.size() != el_other.size(): + prepend = "" + if is_other_tuple: + prepend = f"Entry {idx} in " + raise RuntimeError( + f"{prepend}v has invalid size: should be {el_other.size()} but got {el_v.size()}." + ) + + +def _check_requires_grad(inputs, input_type, strict): + # Used to make all the necessary checks to raise nice errors in strict mode. + if not strict: + return + + if input_type not in ["outputs", "grad_inputs", "jacobian", "hessian"]: + raise RuntimeError("Invalid input_type to _check_requires_grad") + for i, inp in enumerate(inputs): + if inp is None: + # This can only be reached for grad_inputs. + raise RuntimeError( + f"The output of the user-provided function is independent of input {i}." + " This is not allowed in strict mode." + ) + if not inp.requires_grad: + if input_type == "hessian": + raise RuntimeError( + f"The hessian of the user-provided function with respect to input {i}" + " is independent of the input. This is not allowed in strict mode." + " You should ensure that your function is thrice differentiable and that" + " the hessian depends on the inputs." + ) + elif input_type == "jacobian": + raise RuntimeError( + "While computing the hessian, found that the jacobian of the user-provided" + f" function with respect to input {i} is independent of the input. This is not" + " allowed in strict mode. You should ensure that your function is twice" + " differentiable and that the jacobian depends on the inputs (this would be" + " violated by a linear function for example)." + ) + elif input_type == "grad_inputs": + raise RuntimeError( + f"The gradient with respect to input {i} is independent of the inputs of the" + " user-provided function. This is not allowed in strict mode." + ) + else: + raise RuntimeError( + f"Output {i} of the user-provided function does not require gradients." + " The outputs must be computed in a differentiable manner from the input" + " when running in strict mode." + ) + + +def _autograd_grad( + outputs, + inputs, + grad_outputs=None, + create_graph=False, + retain_graph=None, + is_grads_batched=False, +): + # Version of autograd.grad that accepts `None` in outputs and do not compute gradients for them. + # This has the extra constraint that inputs has to be a tuple + assert isinstance(outputs, tuple) + if grad_outputs is None: + grad_outputs = (None,) * len(outputs) + assert isinstance(grad_outputs, tuple) + assert len(outputs) == len(grad_outputs) + + new_outputs: Tuple[torch.Tensor, ...] = tuple() + new_grad_outputs: Tuple[torch.Tensor, ...] = tuple() + for out, grad_out in zip(outputs, grad_outputs): + if out is not None and out.requires_grad: + new_outputs += (out,) + new_grad_outputs += (grad_out,) + + if len(new_outputs) == 0: + # No differentiable output, we don't need to call the autograd engine + return (None,) * len(inputs) + else: + return torch.autograd.grad( + new_outputs, + inputs, + new_grad_outputs, + allow_unused=True, + create_graph=create_graph, + retain_graph=retain_graph, + is_grads_batched=is_grads_batched, + ) + + +def _fill_in_zeros(grads, refs, strict, create_graph, stage): + # Used to detect None in the grads and depending on the flags, either replace them + # with Tensors full of 0s of the appropriate size based on the refs or raise an error. + # strict and create graph allow us to detect when it is appropriate to raise an error + # stage gives us information of which backward call we consider to give good error message + if stage not in ["back", "back_trick", "double_back", "double_back_trick"]: + raise RuntimeError(f"Invalid stage argument '{stage}' to _fill_in_zeros") + + res: Tuple[torch.Tensor, ...] = tuple() + for i, grads_i in enumerate(grads): + if grads_i is None: + if strict: + if stage == "back": + raise RuntimeError( + "The output of the user-provided function is independent of " + f"input {i}. This is not allowed in strict mode." + ) + elif stage == "back_trick": + raise RuntimeError( + f"The gradient with respect to the input is independent of entry {i}" + " in the grad_outputs when using the double backward trick to compute" + " forward mode gradients. This is not allowed in strict mode." + ) + elif stage == "double_back": + raise RuntimeError( + "The jacobian of the user-provided function is independent of " + f"input {i}. This is not allowed in strict mode." + ) + else: + raise RuntimeError( + "The hessian of the user-provided function is independent of " + f"entry {i} in the grad_jacobian. This is not allowed in strict " + "mode as it prevents from using the double backward trick to " + "replace forward mode AD." + ) + + grads_i = torch.zeros_like(refs[i]) + else: + if strict and create_graph and not grads_i.requires_grad: + if "double" not in stage: + raise RuntimeError( + "The jacobian of the user-provided function is independent of " + f"input {i}. This is not allowed in strict mode when create_graph=True." + ) + else: + raise RuntimeError( + "The hessian of the user-provided function is independent of " + f"input {i}. This is not allowed in strict mode when create_graph=True." + ) + + res += (grads_i,) + + return res + + +# Public API + + +def vjp(func, inputs, v=None, create_graph=False, strict=False): + r"""Compute the dot product between a vector ``v`` and the Jacobian of the given function at the point given by the inputs. + + Args: + func (function): a Python function that takes Tensor inputs and returns + a tuple of Tensors or a Tensor. + inputs (tuple of Tensors or Tensor): inputs to the function ``func``. + v (tuple of Tensors or Tensor): The vector for which the vector + Jacobian product is computed. Must be the same size as the output + of ``func``. This argument is optional when the output of ``func`` + contains a single element and (if it is not provided) will be set + as a Tensor containing a single ``1``. + create_graph (bool, optional): If ``True``, both the output and result + will be computed in a differentiable way. Note that when ``strict`` + is ``False``, the result can not require gradients or be + disconnected from the inputs. Defaults to ``False``. + strict (bool, optional): If ``True``, an error will be raised when we + detect that there exists an input such that all the outputs are + independent of it. If ``False``, we return a Tensor of zeros as the + vjp for said inputs, which is the expected mathematical value. + Defaults to ``False``. + + Returns: + output (tuple): tuple with: + func_output (tuple of Tensors or Tensor): output of ``func(inputs)`` + + vjp (tuple of Tensors or Tensor): result of the dot product with + the same shape as the inputs. + + Example: + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD) + >>> def exp_reducer(x): + ... return x.exp().sum(dim=1) + >>> inputs = torch.rand(4, 4) + >>> v = torch.ones(4) + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> vjp(exp_reducer, inputs, v) + (tensor([5.7817, 7.2458, 5.7830, 6.7782]), + tensor([[1.4458, 1.3962, 1.3042, 1.6354], + [2.1288, 1.0652, 1.5483, 2.5035], + [2.2046, 1.1292, 1.1432, 1.3059], + [1.3225, 1.6652, 1.7753, 2.0152]])) + + >>> vjp(exp_reducer, inputs, v, create_graph=True) + (tensor([5.7817, 7.2458, 5.7830, 6.7782], grad_fn=), + tensor([[1.4458, 1.3962, 1.3042, 1.6354], + [2.1288, 1.0652, 1.5483, 2.5035], + [2.2046, 1.1292, 1.1432, 1.3059], + [1.3225, 1.6652, 1.7753, 2.0152]], grad_fn=)) + + >>> def adder(x, y): + ... return 2 * x + 3 * y + >>> inputs = (torch.rand(2), torch.rand(2)) + >>> v = torch.ones(2) + >>> vjp(adder, inputs, v) + (tensor([2.4225, 2.3340]), + (tensor([2., 2.]), tensor([3., 3.]))) + """ + with torch.enable_grad(): + is_inputs_tuple, inputs = _as_tuple(inputs, "inputs", "vjp") + inputs = _grad_preprocess(inputs, create_graph=create_graph, need_graph=True) + + outputs = func(*inputs) + is_outputs_tuple, outputs = _as_tuple( + outputs, "outputs of the user-provided function", "vjp" + ) + _check_requires_grad(outputs, "outputs", strict=strict) + + if v is not None: + _, v = _as_tuple(v, "v", "vjp") + v = _grad_preprocess(v, create_graph=create_graph, need_graph=False) + _validate_v(v, outputs, is_outputs_tuple) + else: + if len(outputs) != 1 or outputs[0].nelement() != 1: + raise RuntimeError( + "The vector v can only be None if the " + "user-provided function returns " + "a single Tensor with a single element." + ) + + enable_grad = True if create_graph else torch.is_grad_enabled() + with torch.set_grad_enabled(enable_grad): + grad_res = _autograd_grad(outputs, inputs, v, create_graph=create_graph) + vjp = _fill_in_zeros(grad_res, inputs, strict, create_graph, "back") + + # Cleanup objects and return them to the user + outputs = _grad_postprocess(outputs, create_graph) + vjp = _grad_postprocess(vjp, create_graph) + + return _tuple_postprocess(outputs, is_outputs_tuple), _tuple_postprocess( + vjp, is_inputs_tuple + ) + + +def jvp(func, inputs, v=None, create_graph=False, strict=False): + r"""Compute the dot product between the Jacobian of the given function at the point given by the inputs and a vector ``v``. + + Args: + func (function): a Python function that takes Tensor inputs and returns + a tuple of Tensors or a Tensor. + inputs (tuple of Tensors or Tensor): inputs to the function ``func``. + v (tuple of Tensors or Tensor): The vector for which the Jacobian + vector product is computed. Must be the same size as the input of + ``func``. This argument is optional when the input to ``func`` + contains a single element and (if it is not provided) will be set + as a Tensor containing a single ``1``. + create_graph (bool, optional): If ``True``, both the output and result + will be computed in a differentiable way. Note that when ``strict`` + is ``False``, the result can not require gradients or be + disconnected from the inputs. Defaults to ``False``. + strict (bool, optional): If ``True``, an error will be raised when we + detect that there exists an input such that all the outputs are + independent of it. If ``False``, we return a Tensor of zeros as the + jvp for said inputs, which is the expected mathematical value. + Defaults to ``False``. + + Returns: + output (tuple): tuple with: + func_output (tuple of Tensors or Tensor): output of ``func(inputs)`` + + jvp (tuple of Tensors or Tensor): result of the dot product with + the same shape as the output. + + Note: + ``autograd.functional.jvp`` computes the jvp by using the backward of + the backward (sometimes called the double backwards trick). This is not + the most performant way of computing the jvp. Please consider using + :func:`torch.func.jvp` or the + :ref:`low-level forward-mode AD API ` instead. + + Example: + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD) + >>> def exp_reducer(x): + ... return x.exp().sum(dim=1) + >>> inputs = torch.rand(4, 4) + >>> v = torch.ones(4, 4) + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> jvp(exp_reducer, inputs, v) + (tensor([6.3090, 4.6742, 7.9114, 8.2106]), + tensor([6.3090, 4.6742, 7.9114, 8.2106])) + + >>> jvp(exp_reducer, inputs, v, create_graph=True) + (tensor([6.3090, 4.6742, 7.9114, 8.2106], grad_fn=), + tensor([6.3090, 4.6742, 7.9114, 8.2106], grad_fn=)) + + >>> def adder(x, y): + ... return 2 * x + 3 * y + >>> inputs = (torch.rand(2), torch.rand(2)) + >>> v = (torch.ones(2), torch.ones(2)) + >>> jvp(adder, inputs, v) + (tensor([2.2399, 2.5005]), + tensor([5., 5.])) + + """ + with torch.enable_grad(): + is_inputs_tuple, inputs = _as_tuple(inputs, "inputs", "jvp") + inputs = _grad_preprocess(inputs, create_graph=create_graph, need_graph=True) + + if v is not None: + _, v = _as_tuple(v, "v", "jvp") + v = _grad_preprocess(v, create_graph=create_graph, need_graph=False) + _validate_v(v, inputs, is_inputs_tuple) + else: + if len(inputs) != 1 or inputs[0].nelement() != 1: + raise RuntimeError( + "The vector v can only be None if the input to " + "the user-provided function is a single Tensor " + "with a single element." + ) + + outputs = func(*inputs) + is_outputs_tuple, outputs = _as_tuple( + outputs, "outputs of the user-provided function", "jvp" + ) + _check_requires_grad(outputs, "outputs", strict=strict) + # The backward is linear so the value of grad_outputs is not important as + # it won't appear in the double backward graph. We only need to ensure that + # it does not contain inf or nan. + grad_outputs = tuple( + torch.zeros_like(out, requires_grad=True) for out in outputs + ) + + grad_inputs = _autograd_grad(outputs, inputs, grad_outputs, create_graph=True) + _check_requires_grad(grad_inputs, "grad_inputs", strict=strict) + + if create_graph: + with torch.enable_grad(): + grad_res = _autograd_grad( + grad_inputs, grad_outputs, v, create_graph=create_graph + ) + jvp = _fill_in_zeros(grad_res, outputs, strict, create_graph, "back_trick") + else: + grad_res = _autograd_grad( + grad_inputs, grad_outputs, v, create_graph=create_graph + ) + jvp = _fill_in_zeros(grad_res, outputs, strict, create_graph, "back_trick") + + # Cleanup objects and return them to the user + outputs = _grad_postprocess(outputs, create_graph) + jvp = _grad_postprocess(jvp, create_graph) + + return _tuple_postprocess(outputs, is_outputs_tuple), _tuple_postprocess( + jvp, is_outputs_tuple + ) + + +def _construct_standard_basis_for( + tensors: Tuple[torch.Tensor, ...], tensor_numels: Tuple[int, ...] +) -> Tuple[torch.Tensor, ...]: + # This function: + # - constructs a N=sum(tensor_numels) standard basis. i.e. an NxN identity matrix. + # - Splits the identity matrix into chunks with each chunk size determined by `tensor_numels`. + # - Each chunk corresponds to one tensor. The chunk has the same dtype and + # device as the tensor + # + # For example, with tensor_numels = [1, 2, 1], this function returns: + # ( tensor([[1], tensor([[0, 0], tensor([[0], + # [0], [1, 0], [0], + # [0], [0, 1], [0], + # [0]]) , [0, 0]]) , [1]]) ) + # + # Precondition: tensor_numels == tuple(tensor.numel() for tensor in tensors) + # Precondition: tensors always has at least one element. + # + # See NOTE: [Computing jacobian with vmap and grad for multiple tensors] + # for context behind this function. All the pre-conditions are guarded for + # in torch.autograd.functional.jacobian. + assert len(tensors) == len(tensor_numels) + assert len(tensors) > 0 + total_numel = sum(tensor_numels) + chunks = tuple( + tensor.new_zeros(total_numel, tensor_numel) + for tensor, tensor_numel in zip(tensors, tensor_numels) + ) + diag_start_idx = 0 + for chunk, numel in zip(chunks, tensor_numels): + chunk.diagonal(diag_start_idx).fill_(1) + diag_start_idx -= numel + return chunks + + +def _jacfwd(func, inputs, strict=False, vectorize=False): + if strict: + raise RuntimeError( + "torch.autograd.functional.jacobian: `strict=True` " + 'and `strategy="forward-mode"` are not supported together (yet). ' + "Please either set `strict=False` or " + '`strategy="reverse-mode"`.' + ) + is_inputs_tuple, inputs = _as_tuple(inputs, "inputs", "jacobian") + output_info = [] + + if vectorize: + # See NOTE: [Computing jacobian with vmap and grad for multiple outputs] + input_numels = tuple(input.numel() for input in inputs) + + # Step 1: Prepare tangents + tangents = _construct_standard_basis_for(inputs, input_numels) + + # Step 2: Compute vmap over computation with dual tensors + def jvp(tangents): + with fwAD.dual_level(): + dual_inputs = tuple( + fwAD.make_dual(input, tangent.view_as(input)) + for input, tangent in zip(inputs, tangents) + ) + _is_outputs_tuple, dual_outputs = _as_tuple( + func(*dual_inputs), "outputs" + ) + output_info.append(_is_outputs_tuple) + jv = [] + primal_outs = [] + for dual_out in dual_outputs: + primal, tangent = fwAD.unpack_dual(dual_out) + primal_outs.append(primal) + if tangent is not None: + jv.append(tangent) + else: + jv.append(torch.zeros_like(primal)) + output_info.append(primal_outs) + return tuple(jv) + + outputs_before_split = _vmap(jvp)(tangents) + is_outputs_tuple, outputs = output_info + # Step 3: for each of the output tangents, split along dim 0 + jacobian_input_output = [] + for jac_output_i, output_i in zip(outputs_before_split, outputs): + jacobian_output_i_output = [] + for jac, input_j in zip(jac_output_i.split(input_numels, dim=0), inputs): + # We need to transpose the Jacobian because in forward AD, the + # batch dimension represents that of the inputs + jacobian_input_i_output_j = jac.permute(*range(1, jac.ndim), 0).reshape( + (*output_i.shape, *input_j.shape) + ) # noqa: C409 + + jacobian_output_i_output.append(jacobian_input_i_output_j) + jacobian_input_output.append(jacobian_output_i_output) + + # Omit [Step 4] because everything is already transposed w/ forward AD + return _tuple_postprocess( + jacobian_input_output, (is_outputs_tuple, is_inputs_tuple) + ) + else: + raise NotImplementedError( + "Computing Jacobian using forward-AD or forward-over-reverse Hessian is" + "only implemented for `vectorize=True`." + ) + + +def jacobian( + func, + inputs, + create_graph=False, + strict=False, + vectorize=False, + strategy="reverse-mode", +): + r"""Compute the Jacobian of a given function. + + Args: + func (function): a Python function that takes Tensor inputs and returns + a tuple of Tensors or a Tensor. + inputs (tuple of Tensors or Tensor): inputs to the function ``func``. + create_graph (bool, optional): If ``True``, the Jacobian will be + computed in a differentiable manner. Note that when ``strict`` is + ``False``, the result can not require gradients or be disconnected + from the inputs. Defaults to ``False``. + strict (bool, optional): If ``True``, an error will be raised when we + detect that there exists an input such that all the outputs are + independent of it. If ``False``, we return a Tensor of zeros as the + jacobian for said inputs, which is the expected mathematical value. + Defaults to ``False``. + vectorize (bool, optional): This feature is experimental. + Please consider using :func:`torch.func.jacrev` or + :func:`torch.func.jacfwd` instead if you are looking for something + less experimental and more performant. + When computing the jacobian, usually we invoke + ``autograd.grad`` once per row of the jacobian. If this flag is + ``True``, we perform only a single ``autograd.grad`` call with + ``batched_grad=True`` which uses the vmap prototype feature. + Though this should lead to performance improvements in many cases, + because this feature is still experimental, there may be performance + cliffs. See :func:`torch.autograd.grad`'s ``batched_grad`` parameter for + more information. + strategy (str, optional): Set to ``"forward-mode"`` or ``"reverse-mode"`` to + determine whether the Jacobian will be computed with forward or reverse + mode AD. Currently, ``"forward-mode"`` requires ``vectorized=True``. + Defaults to ``"reverse-mode"``. If ``func`` has more outputs than + inputs, ``"forward-mode"`` tends to be more performant. Otherwise, + prefer to use ``"reverse-mode"``. + + Returns: + Jacobian (Tensor or nested tuple of Tensors): if there is a single + input and output, this will be a single Tensor containing the + Jacobian for the linearized inputs and output. If one of the two is + a tuple, then the Jacobian will be a tuple of Tensors. If both of + them are tuples, then the Jacobian will be a tuple of tuple of + Tensors where ``Jacobian[i][j]`` will contain the Jacobian of the + ``i``\th output and ``j``\th input and will have as size the + concatenation of the sizes of the corresponding output and the + corresponding input and will have same dtype and device as the + corresponding input. If strategy is ``forward-mode``, the dtype will be + that of the output; otherwise, the input. + + Example: + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD) + >>> def exp_reducer(x): + ... return x.exp().sum(dim=1) + >>> inputs = torch.rand(2, 2) + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> jacobian(exp_reducer, inputs) + tensor([[[1.4917, 2.4352], + [0.0000, 0.0000]], + [[0.0000, 0.0000], + [2.4369, 2.3799]]]) + + >>> jacobian(exp_reducer, inputs, create_graph=True) + tensor([[[1.4917, 2.4352], + [0.0000, 0.0000]], + [[0.0000, 0.0000], + [2.4369, 2.3799]]], grad_fn=) + + >>> def exp_adder(x, y): + ... return 2 * x.exp() + 3 * y + >>> inputs = (torch.rand(2), torch.rand(2)) + >>> jacobian(exp_adder, inputs) + (tensor([[2.8052, 0.0000], + [0.0000, 3.3963]]), + tensor([[3., 0.], + [0., 3.]])) + """ + assert strategy in ("forward-mode", "reverse-mode"), ( + 'Expected strategy to be either "forward-mode" or "reverse-mode". Hint: If your ' + 'function has more outputs than inputs, "forward-mode" tends to be more performant. ' + 'Otherwise, prefer to use "reverse-mode".' + ) + if strategy == "forward-mode": + if create_graph: + raise NotImplementedError( + "torch.autograd.functional.jacobian: `create_graph=True` " + 'and `strategy="forward-mode"` are not supported together (yet). ' + "Please either set `create_graph=False` or " + '`strategy="reverse-mode"`.' + ) + return _jacfwd(func, inputs, strict, vectorize) + + with torch.enable_grad(): + is_inputs_tuple, inputs = _as_tuple(inputs, "inputs", "jacobian") + inputs = _grad_preprocess(inputs, create_graph=create_graph, need_graph=True) + + outputs = func(*inputs) + is_outputs_tuple, outputs = _as_tuple( + outputs, "outputs of the user-provided function", "jacobian" + ) + _check_requires_grad(outputs, "outputs", strict=strict) + + if vectorize: + if strict: + raise RuntimeError( + "torch.autograd.functional.jacobian: `strict=True` " + "and `vectorized=True` are not supported together. " + "Please either set `strict=False` or " + "`vectorize=False`." + ) + # NOTE: [Computing jacobian with vmap and grad for multiple outputs] + # + # Let's consider f(x) = (x**2, x.sum()) and let x = torch.randn(3). + # It turns out we can compute the jacobian of this function with a single + # call to autograd.grad by using vmap over the correct grad_outputs. + # + # Firstly, one way to compute the jacobian is to stack x**2 and x.sum() + # into a 4D vector. E.g., use g(x) = torch.stack([x**2, x.sum()]) + # + # To get the first row of the jacobian, we call + # >>> autograd.grad(g(x), x, grad_outputs=torch.tensor([1, 0, 0, 0])) + # To get the 2nd row of the jacobian, we call + # >>> autograd.grad(g(x), x, grad_outputs=torch.tensor([0, 1, 0, 0])) + # and so on. + # + # Using vmap, we can vectorize all 4 of these computations into one by + # passing the standard basis for R^4 as the grad_output. + # vmap(partial(autograd.grad, g(x), x))(torch.eye(4)). + # + # Now, how do we compute the jacobian *without stacking the output*? + # We can just split the standard basis across the outputs. So to + # compute the jacobian of f(x), we'd use + # >>> autograd.grad(f(x), x, grad_outputs=_construct_standard_basis_for(...)) + # The grad_outputs looks like the following: + # ( torch.tensor([[1, 0, 0], + # [0, 1, 0], + # [0, 0, 1], + # [0, 0, 0]]), + # torch.tensor([[0], + # [0], + # [0], + # [1]]) ) + # + # But we're not done yet! + # >>> vmap(partial(autograd.grad(f(x), x, grad_outputs=...))) + # returns a Tensor of shape [4, 3]. We have to remember to split the + # jacobian of shape [4, 3] into two: + # - one of shape [3, 3] for the first output + # - one of shape [ 3] for the second output + + # Step 1: Construct grad_outputs by splitting the standard basis + output_numels = tuple(output.numel() for output in outputs) + grad_outputs = _construct_standard_basis_for(outputs, output_numels) + flat_outputs = tuple(output.reshape(-1) for output in outputs) + + # Step 2: Call vmap + autograd.grad + def vjp(grad_output): + vj = list( + _autograd_grad( + flat_outputs, + inputs, + grad_output, + create_graph=create_graph, + is_grads_batched=True, + ) + ) + for el_idx, vj_el in enumerate(vj): + if vj_el is not None: + continue + vj[el_idx] = torch.zeros_like(inputs[el_idx]).expand( + (sum(output_numels),) + inputs[el_idx].shape + ) + return tuple(vj) + + jacobians_of_flat_output = vjp(grad_outputs) + + # Step 3: The returned jacobian is one big tensor per input. In this step, + # we split each Tensor by output. + jacobian_input_output = [] + for jac_input_i, input_i in zip(jacobians_of_flat_output, inputs): + jacobian_input_i_output = [] + for jac, output_j in zip( + jac_input_i.split(output_numels, dim=0), outputs + ): + jacobian_input_i_output_j = jac.view(output_j.shape + input_i.shape) + jacobian_input_i_output.append(jacobian_input_i_output_j) + jacobian_input_output.append(jacobian_input_i_output) + + # Step 4: Right now, `jacobian` is a List[List[Tensor]]. + # The outer List corresponds to the number of inputs, + # the inner List corresponds to the number of outputs. + # We need to exchange the order of these and convert to tuples + # before returning. + jacobian_output_input = tuple(zip(*jacobian_input_output)) + + jacobian_output_input = _grad_postprocess( + jacobian_output_input, create_graph + ) + return _tuple_postprocess( + jacobian_output_input, (is_outputs_tuple, is_inputs_tuple) + ) + + jacobian: Tuple[torch.Tensor, ...] = tuple() + + for i, out in enumerate(outputs): + # mypy complains that expression and variable have different types due to the empty list + jac_i: Tuple[List[torch.Tensor]] = tuple([] for _ in range(len(inputs))) # type: ignore[assignment] + for j in range(out.nelement()): + vj = _autograd_grad( + (out.reshape(-1)[j],), + inputs, + retain_graph=True, + create_graph=create_graph, + ) + + for el_idx, (jac_i_el, vj_el, inp_el) in enumerate( + zip(jac_i, vj, inputs) + ): + if vj_el is not None: + if strict and create_graph and not vj_el.requires_grad: + msg = ( + "The jacobian of the user-provided function is " + f"independent of input {i}. This is not allowed in " + "strict mode when create_graph=True." + ) + raise RuntimeError(msg) + jac_i_el.append(vj_el) + else: + if strict: + msg = ( + f"Output {i} of the user-provided function is " + f"independent of input {el_idx}. This is not allowed in " + "strict mode." + ) + raise RuntimeError(msg) + jac_i_el.append(torch.zeros_like(inp_el)) + + jacobian += ( + tuple( + torch.stack(jac_i_el, dim=0).view( + out.size() + inputs[el_idx].size() # type: ignore[operator] + ) + for (el_idx, jac_i_el) in enumerate(jac_i) + ), + ) + + jacobian = _grad_postprocess(jacobian, create_graph) + + return _tuple_postprocess(jacobian, (is_outputs_tuple, is_inputs_tuple)) + + +def hessian( + func, + inputs, + create_graph=False, + strict=False, + vectorize=False, + outer_jacobian_strategy="reverse-mode", +): + r"""Compute the Hessian of a given scalar function. + + Args: + func (function): a Python function that takes Tensor inputs and returns + a Tensor with a single element. + inputs (tuple of Tensors or Tensor): inputs to the function ``func``. + create_graph (bool, optional): If ``True``, the Hessian will be computed in + a differentiable manner. Note that when ``strict`` is ``False``, the result can not + require gradients or be disconnected from the inputs. + Defaults to ``False``. + strict (bool, optional): If ``True``, an error will be raised when we detect that there exists an input + such that all the outputs are independent of it. If ``False``, we return a Tensor of zeros as the + hessian for said inputs, which is the expected mathematical value. + Defaults to ``False``. + vectorize (bool, optional): This feature is experimental. + Please consider using :func:`torch.func.hessian` + instead if you are looking for something less experimental and more performant. + When computing the hessian, usually we invoke + ``autograd.grad`` once per row of the hessian. If this flag is + ``True``, we use the vmap prototype feature as the backend to + vectorize calls to ``autograd.grad`` so we only invoke it once + instead of once per row. This should lead to performance + improvements in many use cases, however, due to this feature + being incomplete, there may be performance cliffs. Please + use `torch._C._debug_only_display_vmap_fallback_warnings(True)` + to show any performance warnings and file us issues if + warnings exist for your use case. Defaults to ``False``. + outer_jacobian_strategy (str, optional): The Hessian is computed by + computing the Jacobian of a Jacobian. The inner Jacobian is always + computed in reverse-mode AD. Setting strategy to ``"forward-mode"`` + or ``"reverse-mode"`` determines whether the outer Jacobian will be + computed with forward or reverse mode AD. Currently, computing the outer + Jacobian in ``"forward-mode"`` requires ``vectorized=True``. Defaults + to ``"reverse-mode"``. + + Returns: + Hessian (Tensor or a tuple of tuple of Tensors): if there is a single input, + this will be a single Tensor containing the Hessian for the input. + If it is a tuple, then the Hessian will be a tuple of tuples where + ``Hessian[i][j]`` will contain the Hessian of the ``i``\th input + and ``j``\th input with size the sum of the size of the ``i``\th input plus + the size of the ``j``\th input. ``Hessian[i][j]`` will have the same + dtype and device as the corresponding ``i``\th input. + + Example: + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD) + >>> def pow_reducer(x): + ... return x.pow(3).sum() + >>> inputs = torch.rand(2, 2) + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> hessian(pow_reducer, inputs) + tensor([[[[5.2265, 0.0000], + [0.0000, 0.0000]], + [[0.0000, 4.8221], + [0.0000, 0.0000]]], + [[[0.0000, 0.0000], + [1.9456, 0.0000]], + [[0.0000, 0.0000], + [0.0000, 3.2550]]]]) + + >>> hessian(pow_reducer, inputs, create_graph=True) + tensor([[[[5.2265, 0.0000], + [0.0000, 0.0000]], + [[0.0000, 4.8221], + [0.0000, 0.0000]]], + [[[0.0000, 0.0000], + [1.9456, 0.0000]], + [[0.0000, 0.0000], + [0.0000, 3.2550]]]], grad_fn=) + + + >>> def pow_adder_reducer(x, y): + ... return (2 * x.pow(2) + 3 * y.pow(2)).sum() + >>> inputs = (torch.rand(2), torch.rand(2)) + >>> hessian(pow_adder_reducer, inputs) + ((tensor([[4., 0.], + [0., 4.]]), + tensor([[0., 0.], + [0., 0.]])), + (tensor([[0., 0.], + [0., 0.]]), + tensor([[6., 0.], + [0., 6.]]))) + """ + is_inputs_tuple, inputs = _as_tuple(inputs, "inputs", "hessian") + assert outer_jacobian_strategy in ( + "forward-mode", + "reverse-mode", + ), 'Expected strategy to be either "forward-mode" or "reverse-mode".' + + def ensure_single_output_function(*inp): + out = func(*inp) + is_out_tuple, t_out = _as_tuple( + out, "outputs of the user-provided function", "hessian" + ) + _check_requires_grad(t_out, "outputs", strict=strict) + + if is_out_tuple or not isinstance(out, torch.Tensor): + raise RuntimeError( + "The function given to hessian should return a single Tensor" + ) + + if out.nelement() != 1: + raise RuntimeError( + "The Tensor returned by the function given to hessian should contain a single element" + ) + + return out.squeeze() + + def jac_func(*inp): + if outer_jacobian_strategy == "forward-mode": + # _grad_preprocess requires create_graph=True and input to require_grad + # or else the input will be detached + inp = tuple(t.requires_grad_(True) for t in inp) + jac = jacobian(ensure_single_output_function, inp, create_graph=True) + _check_requires_grad(jac, "jacobian", strict=strict) + return jac + + res = jacobian( + jac_func, + inputs, + create_graph=create_graph, + strict=strict, + vectorize=vectorize, + strategy=outer_jacobian_strategy, + ) + return _tuple_postprocess(res, (is_inputs_tuple, is_inputs_tuple)) + + +def vhp(func, inputs, v=None, create_graph=False, strict=False): + r"""Compute the dot product between vector ``v`` and Hessian of a given scalar function at a specified point. + + Args: + func (function): a Python function that takes Tensor inputs and returns + a Tensor with a single element. + inputs (tuple of Tensors or Tensor): inputs to the function ``func``. + v (tuple of Tensors or Tensor): The vector for which the vector Hessian + product is computed. Must be the same size as the input of + ``func``. This argument is optional when ``func``'s input contains + a single element and (if it is not provided) will be set as a + Tensor containing a single ``1``. + create_graph (bool, optional): If ``True``, both the output and result + will be computed in a differentiable way. Note that when ``strict`` + is ``False``, the result can not require gradients or be + disconnected from the inputs. + Defaults to ``False``. + strict (bool, optional): If ``True``, an error will be raised when we + detect that there exists an input such that all the outputs are + independent of it. If ``False``, we return a Tensor of zeros as the + vhp for said inputs, which is the expected mathematical value. + Defaults to ``False``. + + Returns: + output (tuple): tuple with: + func_output (tuple of Tensors or Tensor): output of ``func(inputs)`` + + vhp (tuple of Tensors or Tensor): result of the dot product with the + same shape as the inputs. + + Example: + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD) + >>> def pow_reducer(x): + ... return x.pow(3).sum() + >>> inputs = torch.rand(2, 2) + >>> v = torch.ones(2, 2) + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> vhp(pow_reducer, inputs, v) + (tensor(0.5591), + tensor([[1.0689, 1.2431], + [3.0989, 4.4456]])) + >>> vhp(pow_reducer, inputs, v, create_graph=True) + (tensor(0.5591, grad_fn=), + tensor([[1.0689, 1.2431], + [3.0989, 4.4456]], grad_fn=)) + >>> def pow_adder_reducer(x, y): + ... return (2 * x.pow(2) + 3 * y.pow(2)).sum() + >>> inputs = (torch.rand(2), torch.rand(2)) + >>> v = (torch.zeros(2), torch.ones(2)) + >>> vhp(pow_adder_reducer, inputs, v) + (tensor(4.8053), + (tensor([0., 0.]), + tensor([6., 6.]))) + """ + with torch.enable_grad(): + is_inputs_tuple, inputs = _as_tuple(inputs, "inputs", "vhp") + inputs = _grad_preprocess(inputs, create_graph=create_graph, need_graph=True) + + if v is not None: + _, v = _as_tuple(v, "v", "vhp") + v = _grad_preprocess(v, create_graph=create_graph, need_graph=False) + _validate_v(v, inputs, is_inputs_tuple) + else: + if len(inputs) != 1 or inputs[0].nelement() != 1: + raise RuntimeError( + "The vector v can only be None if the input to the user-provided function " + "is a single Tensor with a single element." + ) + outputs = func(*inputs) + is_outputs_tuple, outputs = _as_tuple( + outputs, "outputs of the user-provided function", "vhp" + ) + _check_requires_grad(outputs, "outputs", strict=strict) + + if is_outputs_tuple or not isinstance(outputs[0], torch.Tensor): + raise RuntimeError( + "The function given to vhp should return a single Tensor" + ) + + if outputs[0].nelement() != 1: + raise RuntimeError( + "The Tensor returned by the function given to vhp should contain a single element" + ) + + jac = _autograd_grad(outputs, inputs, create_graph=True) + _check_requires_grad(jac, "jacobian", strict=strict) + + enable_grad = True if create_graph else torch.is_grad_enabled() + with torch.set_grad_enabled(enable_grad): + grad_res = _autograd_grad(jac, inputs, v, create_graph=create_graph) + vhp = _fill_in_zeros(grad_res, inputs, strict, create_graph, "double_back") + + outputs = _grad_postprocess(outputs, create_graph) + vhp = _grad_postprocess(vhp, create_graph) + + return _tuple_postprocess(outputs, is_outputs_tuple), _tuple_postprocess( + vhp, is_inputs_tuple + ) + + +def hvp(func, inputs, v=None, create_graph=False, strict=False): + r"""Compute the dot product between the scalar function's Hessian and a vector ``v`` at a specified point. + + Args: + func (function): a Python function that takes Tensor inputs and returns + a Tensor with a single element. + inputs (tuple of Tensors or Tensor): inputs to the function ``func``. + v (tuple of Tensors or Tensor): The vector for which the Hessian vector + product is computed. Must be the same size as the input of + ``func``. This argument is optional when ``func``'s input contains + a single element and (if it is not provided) will be set as a + Tensor containing a single ``1``. + create_graph (bool, optional): If ``True``, both the output and result will be + computed in a differentiable way. Note that when ``strict`` is + ``False``, the result can not require gradients or be disconnected + from the inputs. Defaults to ``False``. + strict (bool, optional): If ``True``, an error will be raised when we + detect that there exists an input such that all the outputs are + independent of it. If ``False``, we return a Tensor of zeros as the + hvp for said inputs, which is the expected mathematical value. + Defaults to ``False``. + Returns: + output (tuple): tuple with: + func_output (tuple of Tensors or Tensor): output of ``func(inputs)`` + + hvp (tuple of Tensors or Tensor): result of the dot product with + the same shape as the inputs. + + Example: + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD) + >>> def pow_reducer(x): + ... return x.pow(3).sum() + >>> inputs = torch.rand(2, 2) + >>> v = torch.ones(2, 2) + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> hvp(pow_reducer, inputs, v) + (tensor(0.1448), + tensor([[2.0239, 1.6456], + [2.4988, 1.4310]])) + + >>> hvp(pow_reducer, inputs, v, create_graph=True) + (tensor(0.1448, grad_fn=), + tensor([[2.0239, 1.6456], + [2.4988, 1.4310]], grad_fn=)) + + + >>> def pow_adder_reducer(x, y): + ... return (2 * x.pow(2) + 3 * y.pow(2)).sum() + >>> inputs = (torch.rand(2), torch.rand(2)) + >>> v = (torch.zeros(2), torch.ones(2)) + >>> hvp(pow_adder_reducer, inputs, v) + (tensor(2.3030), + (tensor([0., 0.]), + tensor([6., 6.]))) + + Note: + + This function is significantly slower than `vhp` due to backward mode AD constraints. + If your functions is twice continuously differentiable, then hvp = vhp.t(). So if you + know that your function satisfies this condition, you should use vhp instead that is + much faster with the current implementation. + + """ + with torch.enable_grad(): + is_inputs_tuple, inputs = _as_tuple(inputs, "inputs", "hvp") + inputs = _grad_preprocess(inputs, create_graph=create_graph, need_graph=True) + + if v is not None: + _, v = _as_tuple(v, "v", "hvp") + v = _grad_preprocess(v, create_graph=create_graph, need_graph=False) + _validate_v(v, inputs, is_inputs_tuple) + else: + if len(inputs) != 1 or inputs[0].nelement() != 1: + raise RuntimeError( + "The vector v can only be None if the input to the user-provided function " + "is a single Tensor with a single element." + ) + outputs = func(*inputs) + is_outputs_tuple, outputs = _as_tuple( + outputs, "outputs of the user-provided function", "hvp" + ) + _check_requires_grad(outputs, "outputs", strict=strict) + + if is_outputs_tuple or not isinstance(outputs[0], torch.Tensor): + raise RuntimeError( + "The function given to hvp should return a single Tensor" + ) + + if outputs[0].nelement() != 1: + raise RuntimeError( + "The Tensor returned by the function given to hvp should contain a single element" + ) + + jac = _autograd_grad(outputs, inputs, create_graph=True) + _check_requires_grad(jac, "jacobian", strict=strict) + + grad_jac = tuple(torch.zeros_like(inp, requires_grad=True) for inp in inputs) + + double_back = _autograd_grad(jac, inputs, grad_jac, create_graph=True) + _check_requires_grad(jac, "hessian", strict=strict) + + enable_grad = True if create_graph else torch.is_grad_enabled() + with torch.set_grad_enabled(enable_grad): + grad_res = _autograd_grad(double_back, grad_jac, v, create_graph=create_graph) + hvp = _fill_in_zeros( + grad_res, inputs, strict, create_graph, "double_back_trick" + ) + + outputs = _grad_postprocess(outputs, create_graph) + hvp = _grad_postprocess(hvp, create_graph) + + return _tuple_postprocess(outputs, is_outputs_tuple), _tuple_postprocess( + hvp, is_inputs_tuple + ) diff --git a/parrot/lib/python3.10/site-packages/torch/autograd/grad_mode.py b/parrot/lib/python3.10/site-packages/torch/autograd/grad_mode.py new file mode 100644 index 0000000000000000000000000000000000000000..1c97ab58298bf86d891a453fc201400548912b75 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/autograd/grad_mode.py @@ -0,0 +1,397 @@ +# mypy: allow-untyped-defs +from typing import Any + +import torch + +from torch.utils._contextlib import ( + _DecoratorContextManager, + _NoParamDecoratorContextManager, + F, +) + +__all__ = [ + "no_grad", + "enable_grad", + "set_grad_enabled", + "inference_mode", + "set_multithreading_enabled", +] + + +class no_grad(_NoParamDecoratorContextManager): + r"""Context-manager that disables gradient calculation. + + Disabling gradient calculation is useful for inference, when you are sure + that you will not call :meth:`Tensor.backward()`. It will reduce memory + consumption for computations that would otherwise have `requires_grad=True`. + + In this mode, the result of every computation will have + `requires_grad=False`, even when the inputs have `requires_grad=True`. + There is an exception! All factory functions, or functions that create + a new Tensor and take a requires_grad kwarg, will NOT be affected by + this mode. + + This context manager is thread local; it will not affect computation + in other threads. + + Also functions as a decorator. + + .. note:: + No-grad is one of several mechanisms that can enable or + disable gradients locally see :ref:`locally-disable-grad-doc` for + more information on how they compare. + + .. note:: + This API does not apply to :ref:`forward-mode AD `. + If you want to disable forward AD for a computation, you can unpack + your dual tensors. + + Example:: + >>> # xdoctest: +SKIP + >>> x = torch.tensor([1.], requires_grad=True) + >>> with torch.no_grad(): + ... y = x * 2 + >>> y.requires_grad + False + >>> @torch.no_grad() + ... def doubler(x): + ... return x * 2 + >>> z = doubler(x) + >>> z.requires_grad + False + >>> @torch.no_grad + ... def tripler(x): + ... return x * 3 + >>> z = tripler(x) + >>> z.requires_grad + False + >>> # factory function exception + >>> with torch.no_grad(): + ... a = torch.nn.Parameter(torch.rand(10)) + >>> a.requires_grad + True + """ + + def __init__(self) -> None: + if not torch._jit_internal.is_scripting(): + super().__init__() + self.prev = False + + def __enter__(self) -> None: + self.prev = torch.is_grad_enabled() + torch.set_grad_enabled(False) + + def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: + torch.set_grad_enabled(self.prev) + + +class enable_grad(_NoParamDecoratorContextManager): + r"""Context-manager that enables gradient calculation. + + Enables gradient calculation, if it has been disabled via :class:`~no_grad` + or :class:`~set_grad_enabled`. + + This context manager is thread local; it will not affect computation + in other threads. + + Also functions as a decorator. + + .. note:: + enable_grad is one of several mechanisms that can enable or + disable gradients locally see :ref:`locally-disable-grad-doc` for + more information on how they compare. + + .. note:: + This API does not apply to :ref:`forward-mode AD `. + + Example:: + >>> # xdoctest: +SKIP + >>> x = torch.tensor([1.], requires_grad=True) + >>> with torch.no_grad(): + ... with torch.enable_grad(): + ... y = x * 2 + >>> y.requires_grad + True + >>> y.backward() + >>> x.grad + tensor([2.]) + >>> @torch.enable_grad() + ... def doubler(x): + ... return x * 2 + >>> with torch.no_grad(): + ... z = doubler(x) + >>> z.requires_grad + True + >>> @torch.enable_grad + ... def tripler(x): + ... return x * 3 + >>> with torch.no_grad(): + ... z = tripler(x) + >>> z.requires_grad + True + + """ + + def __enter__(self) -> None: + self.prev = torch.is_grad_enabled() + torch._C._set_grad_enabled(True) + + def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: + torch._C._set_grad_enabled(self.prev) + + +class set_grad_enabled(_DecoratorContextManager): + r"""Context-manager that sets gradient calculation on or off. + + ``set_grad_enabled`` will enable or disable grads based on its argument :attr:`mode`. + It can be used as a context-manager or as a function. + + This context manager is thread local; it will not affect computation + in other threads. + + Args: + mode (bool): Flag whether to enable grad (``True``), or disable + (``False``). This can be used to conditionally enable + gradients. + + .. note:: + set_grad_enabled is one of several mechanisms that can enable or + disable gradients locally see :ref:`locally-disable-grad-doc` for + more information on how they compare. + + .. note:: + This API does not apply to :ref:`forward-mode AD `. + + Example:: + >>> # xdoctest: +SKIP + >>> x = torch.tensor([1.], requires_grad=True) + >>> is_train = False + >>> with torch.set_grad_enabled(is_train): + ... y = x * 2 + >>> y.requires_grad + False + >>> _ = torch.set_grad_enabled(True) + >>> y = x * 2 + >>> y.requires_grad + True + >>> _ = torch.set_grad_enabled(False) + >>> y = x * 2 + >>> y.requires_grad + False + + """ + + def __init__(self, mode: bool) -> None: + self.prev = torch.is_grad_enabled() + self.mode = mode + torch._C._set_grad_enabled(mode) + + def __call__(self, orig_func: F) -> F: + torch._C._set_grad_enabled(self.prev) + return super().__call__(orig_func) + + def __enter__(self) -> None: + torch._C._set_grad_enabled(self.mode) + + def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: + torch._C._set_grad_enabled(self.prev) + + def clone(self) -> "set_grad_enabled": + r""" + Create a copy of this class + """ + return self.__class__(self.mode) + + +class inference_mode(_DecoratorContextManager): + r"""Context-manager that enables or disables inference mode. + + InferenceMode is a new context manager analogous to :class:`~no_grad` + to be used when you are certain your operations will have no interactions + with autograd (e.g., model training). Code run under this mode gets better + performance by disabling view tracking and version counter bumps. Note that + unlike some other mechanisms that locally enable or disable grad, + entering inference_mode also disables to :ref:`forward-mode AD `. + + This context manager is thread local; it will not affect computation + in other threads. + + Also functions as a decorator. + + .. note:: + Inference mode is one of several mechanisms that can enable or + disable gradients locally see :ref:`locally-disable-grad-doc` for + more information on how they compare. + + Args: + mode (bool or function): Either a boolean flag whether to enable or + disable inference mode or a Python function to decorate with + inference mode enabled + + Example:: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD) + >>> import torch + >>> x = torch.ones(1, 2, 3, requires_grad=True) + >>> with torch.inference_mode(): + ... y = x * x + >>> y.requires_grad + False + >>> # xdoctest: +SKIP("want string isnt quite right") + >>> y._version + Traceback (most recent call last): + File "", line 1, in + RuntimeError: Inference tensors do not track version counter. + >>> @torch.inference_mode() + ... def func(x): + ... return x * x + >>> out = func(x) + >>> out.requires_grad + False + >>> @torch.inference_mode + ... def doubler(x): + ... return x * 2 + >>> out = doubler(x) + >>> out.requires_grad + False + + """ + + def __init__(self, mode: bool = True) -> None: + if not torch._jit_internal.is_scripting(): + super().__init__() + self.mode = mode + + def __new__(cls, mode=True): + if isinstance(mode, bool): + return super().__new__(cls) + return cls()(mode) + + def __enter__(self) -> None: + self._inference_mode_context = torch._C._InferenceMode(self.mode) + self._inference_mode_context.__enter__() + + def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: + self._inference_mode_context.__exit__(exc_type, exc_value, traceback) + + def clone(self) -> "inference_mode": + r""" + Create a copy of this class + """ + return self.__class__(self.mode) + + +def _enter_inference_mode(mode): + mode_context = torch._C._InferenceMode(mode) + mode_context.__enter__() + return mode_context + + +def _exit_inference_mode(mode): + mode.__exit__(None, None, None) + + +class set_multithreading_enabled(_DecoratorContextManager): + r"""Context-manager that sets multithreaded backwards on or off. + + ``set_multithreading_enabled`` will enable or disable multithreaded backwards based on its argument :attr:`mode`. + It can be used as a context-manager or as a function. + + This context manager is thread local; it will not affect computation + in other threads. + + Args: + mode (bool): Flag whether to enable multithreaded backwards (``True``), or disable + (``False``). + + .. note:: + This API does not apply to :ref:`forward-mode AD `. + + """ + + def __init__(self, mode: bool) -> None: + self.prev = torch._C._is_multithreading_enabled() + torch._C._set_multithreading_enabled(mode) + self.mode = mode + + def __enter__(self) -> None: + pass + + def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: + torch._C._set_multithreading_enabled(self.prev) + + def clone(self) -> "set_multithreading_enabled": + r""" + Create a copy of this class + """ + return self.__class__(self.mode) + + +class _force_original_view_tracking(_DecoratorContextManager): + r"""Context-manager that sets whether or not to always enable view-replay in autograd. + + ``set_view_replay_enabled`` will enable or disable view-replay based on its argument :attr:`mode`. + It can be used as a context-manager or as a function. + + This context manager is thread local; it will not affect computation + in other threads. + + When a tensor view is mutated, the autograd engine needs to decide whether or not + to regenerate the "updated view" by either replaying the chain of views from the updated base, + or with a single call to as_strided. + + If set_view_replay_enabled is set to True, then autograd will always use view replay. + Otherwise, it will fall back to its existing logic. + + Args: + mode (bool): Flag whether to enable view-replay (``True``), or disable + (``False``). + + """ + + def __init__(self, mode: bool) -> None: + self.prev = torch._C._is_view_replay_enabled() + torch._C._set_view_replay_enabled(mode) + self.mode = mode + + def __enter__(self) -> None: + pass + + def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: + torch._C._set_view_replay_enabled(self.prev) + + def clone(self): + return self.__class__(self.mode) + + +class _unsafe_preserve_version_counter(_DecoratorContextManager): + r"""DO NOT USE THIS UNLESS YOU KNOW EXACTLY WHAT YOU'RE DOING. + + This context manager can lead to arbitrary silent-correctness issues in any other part of your code + (even the ones not touched directly by the context manager)! + + Ordinarily, autograd will track mutations to tensors by incrementing it's `._version` attribute. + This is generally important for correctness, as for example, mutating a tensor that autograd has saved + for the backwards pass can result in incorrect gradients, and autograd uses the version counter to detect + and error out in this situation. + + However, there are rare instances where it might be useful to hide mutations from autograd. For example: + if a tensor is very large, and you'd like to free its memory by storing it elsewhere, and re-populate + the tensor right before it is needed by autograd. + + Args: + tensor (torch.Tensor): the tensor in question, that you would like to preserve the version counter of. + + .. note:: + This API does not apply to :ref:`forward-mode AD `. + + """ + + def __init__(self, tensor: torch.Tensor) -> None: + self.tensor = tensor + self.prev_version = tensor._version + + def __enter__(self) -> None: + pass + + def __exit__(self, *args) -> None: + torch._C._autograd._unsafe_set_version_counter(self.tensor, self.prev_version) diff --git a/parrot/lib/python3.10/site-packages/torch/autograd/gradcheck.py b/parrot/lib/python3.10/site-packages/torch/autograd/gradcheck.py new file mode 100644 index 0000000000000000000000000000000000000000..5bf74afacb66fb6a55caf277714b8a99224c5f14 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/autograd/gradcheck.py @@ -0,0 +1,2270 @@ +# mypy: allow-untyped-defs +import collections +import functools +import warnings +from itertools import product +from typing import Callable, Dict, Iterable, List, Optional, Tuple, Union +from typing_extensions import deprecated + +import torch +import torch.testing +from torch._vmap_internals import _vmap, vmap +from torch.overrides import is_tensor_like +from torch.types import _TensorOrTensors + +# Note: `get_*_jacobian` functions are added here even though we didn't intend to make them public +# since they have been exposed from before we added `__all__` and we already maintain BC for them +# We should eventually deprecate them and remove them from `__all__` +__all__ = [ + "gradcheck", + "gradgradcheck", + "GradcheckError", + "get_numerical_jacobian", + "get_analytical_jacobian", + "get_numerical_jacobian_wrt_specific_input", +] + + +class GradcheckError(RuntimeError): + r"""Error raised by :func:`gradcheck` and :func:`gradgradcheck`.""" + + pass + + +def _is_sparse_compressed_tensor(obj: torch.Tensor): + return obj.layout in { + torch.sparse_csr, + torch.sparse_csc, + torch.sparse_bsr, + torch.sparse_bsc, + } + + +def _is_sparse_any_tensor(obj: torch.Tensor): + return _is_sparse_compressed_tensor(obj) or obj.layout is torch.sparse_coo + + +def _is_float_or_complex_tensor(obj): + return is_tensor_like(obj) and (obj.is_floating_point() or obj.is_complex()) + + +def _allocate_jacobians_with_inputs( + input_tensors: Tuple, numel_output +) -> Tuple[torch.Tensor, ...]: + # Makes zero-filled tensors from inputs. If `numel_output` is not None, for + # each tensor in `input_tensors`, returns a new zero-filled tensor with height + # of `t.numel` and width of `numel_output`. Otherwise, for each tensor, returns + # a 1-d tensor with size `(t.numel,)`. Each new tensor will be strided and have + # the same dtype and device as those of the corresponding input. + out: List[torch.Tensor] = [] + for t in input_tensors: + if _is_float_or_complex_tensor(t) and t.requires_grad: + out.append(t.new_zeros((t.numel(), numel_output), layout=torch.strided)) + return tuple(out) + + +def _allocate_jacobians_with_outputs( + output_tensors: Tuple, numel_input, dtype=None, device=None +) -> Tuple[torch.Tensor, ...]: + # Makes zero-filled tensors from outputs. If `dim` is not None, for each tensor + # in `output_tensors`, returns a new zero-filled tensor with height of `dim` and + # width of `t.numel`. Otherwise, for each tensor, returns a 1-d tensor with size + # (t.numel,). + out: List[torch.Tensor] = [] + options = {"dtype": dtype, "device": device, "layout": torch.strided} + for t in output_tensors: + if _is_float_or_complex_tensor(t): + out.append(t.new_zeros((numel_input, t.numel()), **options)) + return tuple(out) + + +def _iter_tensors( + x: Union[torch.Tensor, Iterable[torch.Tensor]], only_requiring_grad: bool = False +) -> Iterable[torch.Tensor]: + if is_tensor_like(x): + # mypy doesn't narrow type of `x` to torch.Tensor + if x.requires_grad or not only_requiring_grad: # type: ignore[union-attr] + yield x # type: ignore[misc] + elif isinstance(x, collections.abc.Iterable) and not isinstance(x, str): + for elem in x: + yield from _iter_tensors(elem, only_requiring_grad) + + +def _densify(x): + # return a copy of sparse x with all unspecified elements + # "replaced" with zero-valued elements + if isinstance(x, (list, tuple)): + return type(x)(map(_densify, x)) + elif not is_tensor_like(x) or x.layout in {torch.strided, torch._mkldnn}: # type: ignore[attr-defined] # no attr _mkldnn + return x + elif x.layout is torch.sparse_coo: + device = x.device + indices_dtype = x._indices().dtype + tmp = torch.ones(x.shape[: x.sparse_dim()], dtype=torch.int8, device=device) + indices = tmp.nonzero().t().to(dtype=indices_dtype) + values = torch.zeros( + (tmp.numel(), *x.shape[x.sparse_dim() :]), dtype=x.dtype, device=device + ) + x_coalesced = x.detach().coalesce() + if x_coalesced.numel() > 0: + stride = tmp.stride() + flat_indices = ( + x_coalesced.indices() + .mul( + torch.tensor(stride, dtype=indices_dtype, device=device).unsqueeze( + 1 + ) + ) + .sum(0) + ) + values[flat_indices] = x_coalesced.values() + return ( + torch.sparse_coo_tensor(indices, values, x.shape) + ._coalesced_(True) + .requires_grad_(x.requires_grad) + ) + elif _is_sparse_compressed_tensor(x): + blocksize = ( + x.values().shape[1:3] + if x.layout in {torch.sparse_bsr, torch.sparse_bsc} + else None + ) + compressed_indices = ( + x.crow_indices() + if x.layout in {torch.sparse_csr, torch.sparse_bsr} + else x.ccol_indices() + ) + # We'll use intermediate sparse COO for simplicity + r = _densify(x.detach().to_sparse(layout=torch.sparse_coo)).to_sparse( + layout=x.layout, blocksize=blocksize + ) + # Check that all elements are specified also after `to_sparse` op: + dense_numel = r.values().numel() // max(1, r.values().shape[0]) + batch_numel = compressed_indices.numel() // compressed_indices.shape[-1] + sparse_numel = r.numel() // max(1, dense_numel * batch_numel) + if sparse_numel != r._nnz(): + raise AssertionError( + f"{x.layout} densify failed: expected nnz={sparse_numel} but got {r._nnz()}" + ) + return r.requires_grad_(x.requires_grad) + elif _is_sparse_any_tensor(x): + raise NotImplementedError(x.layout) + return x + + +def _iter_tensor(x_tensor): + # (Only used for slow gradcheck) Returns a generator that yields the following + # elements at each iteration: + # 1) a tensor: the same tensor is returned across all iterations. The tensor + # is not the same as the original x_tensor as given as input - it is + # prepared so that it can be modified in-place. Depending on whether the + # input tensor is strided, sparse, or dense, the returned tensor may or may + # not share storage with x_tensor. + # 2) a tuple of indices that can be used with advanced indexing (yielded in + # dictionary order) + # 3) flattened index that will be used to index into the Jacobian tensor + # + # For a tensor t with size (2, 2), _iter_tensor yields: + # `x, (0, 0), 0`, `x, (0, 1), 1`, `x, (1, 0), 2`, `x, (1, 1), 3` + # + # where x is the t.data of the original tensor. Perturbing the entry of x + # at index (1, 1) yields the 3rd column of the overall Jacobian matrix. + if _is_sparse_any_tensor(x_tensor): + + def get_stride(size): + dim = len(size) + tmp = 1 + stride = [0] * dim + for i in reversed(range(dim)): + stride[i] = tmp + tmp *= size[i] + return stride + + x_nnz = x_tensor._nnz() + x_size = list(x_tensor.size()) + if x_tensor.layout is torch.sparse_coo: + x_indices = x_tensor._indices().t() + x_values = x_tensor._values() + elif x_tensor.layout is torch.sparse_csr: + x_indices = torch._convert_indices_from_csr_to_coo( + x_tensor.crow_indices(), x_tensor.col_indices() + ).t() + x_values = x_tensor.values() + elif x_tensor.layout is torch.sparse_csc: + x_indices = torch._convert_indices_from_csr_to_coo( + x_tensor.ccol_indices(), x_tensor.row_indices(), transpose=True + ).t() + x_values = x_tensor.values() + elif x_tensor.layout is torch.sparse_bsr: + x_block_values = x_tensor.values() + x_blocksize = x_block_values.size()[1:3] + x_indices = ( + torch._convert_indices_from_csr_to_coo( + x_tensor.crow_indices(), x_tensor.col_indices() + ) + .repeat_interleave(x_blocksize[0] * x_blocksize[1], 1) + .mul_(torch.tensor(x_blocksize, device=x_tensor.device).reshape(2, 1)) + .add_( + torch.stack( + torch.where(torch.ones(x_blocksize, device=x_tensor.device)) + ).repeat(1, x_nnz) + ) + .t() + ) + x_values = x_block_values.flatten(0, 2) + x_nnz = x_values.size(0) + elif x_tensor.layout is torch.sparse_bsc: + x_block_values = x_tensor.values() + x_blocksize = x_block_values.size()[1:3] + x_indices = ( + torch._convert_indices_from_csr_to_coo( + x_tensor.ccol_indices(), x_tensor.row_indices(), transpose=True + ) + .repeat_interleave(x_blocksize[0] * x_blocksize[1], 1) + .mul_(torch.tensor(x_blocksize, device=x_tensor.device).reshape(2, 1)) + .add_( + torch.stack( + torch.where(torch.ones(x_blocksize, device=x_tensor.device)) + ).repeat(1, x_nnz) + ) + .t() + ) + x_values = x_block_values.flatten(0, 2) + x_nnz = x_values.size(0) + else: + raise NotImplementedError(f"_iter_tensor for {x_tensor.layout} input") + x_stride = get_stride(x_size) + # Use .data here to get around the version check + x_values = x_values.data + for i in range(x_nnz): + x_value = x_values[i] + for x_idx in product(*[range(m) for m in x_values.size()[1:]]): + indices = x_indices[i].tolist() + list(x_idx) + d_idx = sum(indices[k] * x_stride[k] for k in range(len(x_size))) + yield x_value, x_idx, d_idx + elif x_tensor.layout == torch._mkldnn: # type: ignore[attr-defined] + for d_idx, x_idx in enumerate(product(*[range(m) for m in x_tensor.size()])): + # this is really inefficient, but without indexing implemented, there's + # not really a better way than converting back and forth + x_tensor_dense = x_tensor.to_dense() + yield x_tensor_dense, x_idx, d_idx + else: + # Use .data here to get around the version check + x_tensor = x_tensor.data + for d_idx, x_idx in enumerate(product(*[range(m) for m in x_tensor.size()])): + yield x_tensor, x_idx, d_idx + + +def _get_numerical_jacobian( + fn, inputs, outputs=None, target=None, eps=1e-3, is_forward_ad=False +) -> List[Tuple[torch.Tensor, ...]]: + """Compute the numerical Jacobian of `fn(inputs)` with respect to `target`. + + If not specified, targets are the input. Returns M * N Jacobians where N is the + number of tensors in target that require grad and M is the number of non-integral + outputs. + + Args: + fn: the function to compute the jacobian for + inputs: inputs to `fn` + outputs: provide precomputed outputs to avoid one extra invocation of fn + target: the Tensors wrt whom Jacobians are calculated (default=`inputs`) + eps: the magnitude of the perturbation during finite differencing + (default=`1e-3`) + is_forward_ad: if this numerical jacobian is computed to be checked wrt + forward AD gradients (this is used for error checking only) + + Returns: + A list of M N-tuples of tensors + + Note that `target` may not even be part of `input` to `fn`, so please be + **very careful** in this to not clone `target`. + """ + jacobians: List[Tuple[torch.Tensor, ...]] = [] + if outputs is None: + outputs = _as_tuple(fn(*_as_tuple(inputs))) + if not is_forward_ad and any(o.is_complex() for o in outputs): + raise ValueError( + "Expected output to be non-complex. get_numerical_jacobian no " + "longer supports functions that return complex outputs." + ) + if target is None: + target = inputs + inp_indices = [ + i for i, a in enumerate(target) if is_tensor_like(a) and a.requires_grad + ] + for i, (inp, inp_idx) in enumerate(zip(_iter_tensors(target, True), inp_indices)): + jacobians += [ + get_numerical_jacobian_wrt_specific_input( + fn, + inp_idx, + inputs, + outputs, + eps, + input=inp, + is_forward_ad=is_forward_ad, + ) + ] + return jacobians + + +@deprecated( + "`get_numerical_jacobian` was part of PyTorch's private API and not " + "meant to be exposed. We are deprecating it and it will be removed " + "in a future version of PyTorch. If you have a specific use for " + "this or feature request for this to be a stable API, please file " + "us an issue at https://github.com/pytorch/pytorch/issues/new", + category=FutureWarning, +) +def get_numerical_jacobian(fn, inputs, target=None, eps=1e-3, grad_out=1.0): + """Compute the numerical Jacobian for a given fn and its inputs. + + This is a Deprecated API. + + Args: + fn: the function to compute the Jacobian for (must take inputs as a tuple) + input: input to `fn` + target: the Tensors wrt whom Jacobians are calculated (default=`input`) + eps: the magnitude of the perturbation during finite differencing + (default=`1e-3`) + + Returns: + A list of Jacobians of `fn` (restricted to its first output) with respect to + each input or target, if provided. + + Note that `target` may not even be part of `input` to `fn`, so please be + **very careful** in this to not clone `target`. + """ + if ( + grad_out != 1.0 + ): # grad_out param is only kept for backward compatibility reasons + raise ValueError( + "Expected grad_out to be 1.0. get_numerical_jacobian no longer " + "supports values of grad_out != 1.0." + ) + + def fn_pack_inps(*inps): + return fn(inps) + + jacobians = _get_numerical_jacobian(fn_pack_inps, inputs, None, target, eps) + + return tuple(jacobian_for_each_output[0] for jacobian_for_each_output in jacobians) + + +def _compute_numerical_gradient(fn, entry, v, norm_v, nbhd_checks_fn): + # Computes numerical directional derivative as finite difference + # of function `fn` at input `entry`, perturbed by vector `v`. + if _is_sparse_compressed_tensor(entry): + # sparse compressed tensors don't implement sub/add/copy_ + # yet. However, in non-masked semantics context entry and v + # have the same sparse indices ... + assert entry.layout == v.layout, (entry.layout, v.layout) + assert entry._nnz() == v._nnz(), (entry._nnz(), v._nnz(), entry.shape) + # ... the finite differencing can be performed on values only: + entry = entry.values() + v = v.values() + # we'll detach to avoid backward computations that sparse + # tensors have limited support for. + entry = entry.detach() + + orig = entry.clone() + entry.copy_(orig - v) + outa = fn() + entry.copy_(orig + v) + outb = fn() + entry.copy_(orig) + + def compute(a, b): + nbhd_checks_fn(a, b) + ret = (b - a) / (2 * norm_v) # use central difference approx + return ret.detach().reshape(-1) + + return tuple(compute(a, b) for (a, b) in zip(outa, outb)) + + +def _compute_numerical_jvps_wrt_specific_input( + jvp_fn, delta, input_is_complex, is_forward_ad=False +) -> List[torch.Tensor]: + # Computing the jacobian only works for real delta + # For details on the algorithm used here, refer: + # Section 3.5.3 https://arxiv.org/pdf/1701.00392.pdf + # s = fn(z) where z = x for real valued input + # and z = x + yj for complex valued input + jvps: List[torch.Tensor] = [] + ds_dx_tup = jvp_fn(delta[0] if isinstance(delta, tuple) else delta) + + if input_is_complex: # C -> R + ds_dy_tup = ( + jvp_fn(delta[1] * 1j) if isinstance(delta, tuple) else jvp_fn(delta * 1j) + ) + for ds_dx, ds_dy in zip(ds_dx_tup, ds_dy_tup): + assert not ds_dx.is_complex() + # conjugate wirtinger derivative + conj_w_d = ds_dx + ds_dy * 1j + jvps.append(conj_w_d) + else: + for ds_dx in ds_dx_tup: # R -> R or (R -> C for the forward AD case) + assert is_forward_ad or not ds_dx.is_complex() + jvps.append(ds_dx) + return jvps + + +def _combine_jacobian_cols( + jacobians_cols: Dict[int, List[torch.Tensor]], outputs, input, numel +) -> Tuple[torch.Tensor, ...]: + # jacobian_cols maps column_idx -> output_idx -> single column of jacobian Tensor + # we return a list that maps output_idx -> full jacobian Tensor + jacobians = _allocate_jacobians_with_outputs( + outputs, numel, dtype=input.dtype if input.dtype.is_complex else None + ) + for i, jacobian in enumerate(jacobians): + for k, v in jacobians_cols.items(): + jacobian[k] = v[i] + return jacobians + + +def _prepare_input( + input: torch.Tensor, maybe_perturbed_input: Optional[torch.Tensor], fast_mode=False +) -> torch.Tensor: + # Prepares the inputs to be passed into the function while including the new + # modified input. + if input.layout == torch._mkldnn: # type: ignore[attr-defined] # no attr _mkldnn + # Convert back to mkldnn + if maybe_perturbed_input is not None: + return maybe_perturbed_input.to_mkldnn() + else: + return input + elif _is_sparse_any_tensor(input): + if fast_mode and maybe_perturbed_input is not None: + # entry is already a "cloned" version of the original tensor + # thus changes to entry are not reflected in the input + return maybe_perturbed_input + else: + return input + else: + # We cannot use entry (input.data) if we want gradgrad to work because + # fn (in the gradgrad case) needs to compute grad wrt input + return input + + +def _check_outputs_same_dtype_and_shape(output1, output2, eps, idx=None) -> None: + # Check that the returned outputs don't have different dtype or shape when you + # perturb the input + on_index = "on index {idx} " if idx is not None else "" + assert output1.shape == output2.shape, ( + f"Expected `func` to return outputs with the same shape" + f" when inputs are perturbed {on_index}by {eps}, but got:" + f" shapes {output1.shape} and {output2.shape}." + ) + assert output1.dtype == output2.dtype, ( + f"Expected `func` to return outputs with the same dtype" + f" when inputs are perturbed {on_index}by {eps}, but got:" + f" dtypes {output1.dtype} and {output2.dtype}." + ) + + +def get_numerical_jacobian_wrt_specific_input( + fn, input_idx, inputs, outputs, eps, input=None, is_forward_ad=False +) -> Tuple[torch.Tensor, ...]: + # Computes the numerical jacobians wrt to a single input. Returns N jacobian + # tensors, where N is the number of outputs. We use a dictionary for + # jacobian_cols because indices aren't necessarily consecutive for sparse inputs + # When we perturb only a single element of the input tensor at a time, the jvp + # is equivalent to a single col of the Jacobian matrix of fn. + jacobian_cols: Dict[int, List[torch.Tensor]] = {} + input = inputs[input_idx] if input is None else input + assert input.requires_grad + for x, idx, d_idx in _iter_tensor(input): + wrapped_fn = _with_prepare_inputs(fn, inputs, input_idx, x) + input_to_perturb = x[idx] + nbhd_checks_fn = functools.partial( + _check_outputs_same_dtype_and_shape, idx=idx, eps=eps + ) + jvp_fn = _get_numerical_jvp_fn( + wrapped_fn, input_to_perturb, eps, nbhd_checks_fn + ) + jacobian_cols[d_idx] = _compute_numerical_jvps_wrt_specific_input( + jvp_fn, eps, x.is_complex(), is_forward_ad + ) + return _combine_jacobian_cols(jacobian_cols, outputs, input, input.numel()) + + +def _get_analytical_jacobian_forward_ad( + fn, inputs, outputs, *, check_grad_dtypes=False, all_u=None +) -> Tuple[Tuple[torch.Tensor, ...], ...]: + """Compute the analytical Jacobian using forward mode AD of `fn(inputs)` using forward mode AD with respect to `target`. + + Return N * M Jacobians where N is the number of tensors in target that require grad and + M is the number of non-integral outputs. + Contrary to other functions here, this function requires "inputs" to actually be used by the function. + The computed value is expected to be wrong if the function captures the inputs by side effect instead of + using the passed ones (many torch.nn tests do this). + + Args: + fn: the function to compute the jacobian for + inputs: inputs to `fn` + outputs: provide precomputed outputs to avoid one extra invocation of fn + check_grad_dtypes: if True, will check that the gradient dtype are valid + all_u (optional): if provided, the Jacobian will be right multiplied with this vector + + Returns: + A tuple of M N-tuples of tensors + """ + # To avoid early import issues + fwAD = torch.autograd.forward_ad + + tensor_inputs = tuple(i for i in inputs if is_tensor_like(i) and i.requires_grad) + + if any(i.is_complex() for i in tensor_inputs): + raise ValueError( + "Expected inputs to be non-complex for _get_analytical_jacobian_forward_ad." + ) + + if all_u: + jacobians = tuple( + _allocate_jacobians_with_outputs(outputs, 1) for i in tensor_inputs + ) + else: + jacobians = tuple( + _allocate_jacobians_with_outputs(outputs, i.numel()) for i in tensor_inputs + ) + + with fwAD.dual_level(): + fw_grads = [] + dual_inputs = [] + for i, inp in enumerate(inputs): + if is_tensor_like(inp) and inp.requires_grad: + if inp.layout == torch._mkldnn: # type: ignore[attr-defined] + raise ValueError( + "MKLDNN inputs are not support for forward AD gradcheck." + ) + + inp = fwAD.make_dual(inp.detach(), torch.zeros_like(inp)) + # If inp is a differentiable view, the dual might not be the tangent given to + # make_dual, so read it explicitly from the dual tensor + fw_grads.append(fwAD.unpack_dual(inp)[1]) + dual_inputs.append(inp) + + if all_u: + # Do the full reduction in one pass + # To be consistent with numerical evaluation, we actually compute one reduction per input + for i, (fw_grad, u) in enumerate(zip(fw_grads, all_u)): + fw_grad.copy_(u.view_as(fw_grad)) + raw_outputs = _as_tuple(fn(*dual_inputs)) + dual_outputs = filter(_is_float_or_complex_tensor, raw_outputs) + for index_o, d_o in enumerate(dual_outputs): + val, res = fwAD.unpack_dual(d_o) + if ( + check_grad_dtypes + and res is not None + and val.is_complex() != res.is_complex() + ): + raise GradcheckError("Forward AD gradient has dtype mismatch.") + + # Remove extra dimension of size 1 corresponding to the reduced input + jacobians[i][index_o].squeeze_(0) + if res is None: + jacobians[i][index_o].zero_() + else: + jacobians[i][index_o].copy_(res.reshape(-1)) + fw_grad.zero_() + else: + # Reconstruct the full Jacobian column by column + for i, fw_grad in enumerate(fw_grads): + for lin_idx, grad_idx in enumerate( + product(*[range(m) for m in fw_grad.size()]) + ): + fw_grad[grad_idx] = 1.0 + raw_outputs = _as_tuple(fn(*dual_inputs)) + dual_outputs = filter(_is_float_or_complex_tensor, raw_outputs) + for index_o, d_o in enumerate(dual_outputs): + val, res = fwAD.unpack_dual(d_o) + if ( + check_grad_dtypes + and res is not None + and val.is_complex() != res.is_complex() + ): + raise GradcheckError( + "Forward AD gradient has dtype mismatch." + ) + + if res is None: + jacobians[i][index_o][lin_idx].zero_() + else: + jacobians[i][index_o][lin_idx].copy_(res.reshape(-1)) + fw_grad[grad_idx] = 0.0 + + return jacobians + + +def _get_input_to_perturb(input): + # Prepare the input so that it can be modified in-place and do certain + # operations that require the tensor to have strides. If fast_mode=False, + # _iter_tensor would handle the below cases: + if input.layout == torch._mkldnn: # type: ignore[attr-defined] # no attr _mkldnn + # Convert to dense so we can perform operations that require strided tensors + input_to_perturb = input.to_dense() + elif _is_sparse_any_tensor(input): + # Clone because input may require grad, and copy_ calls resize_, + # which is not allowed for .data + input_to_perturb = input.clone() + else: + input_to_perturb = input.data + return input_to_perturb + + +def _with_prepare_inputs(fn, inputs, input_idx, input_to_perturb, fast_mode=False): + # Wraps `fn` so that its inputs are already supplied + def wrapped_fn(): + inp = tuple( + _prepare_input(a, input_to_perturb if i == input_idx else None, fast_mode) + if is_tensor_like(a) + else a + for i, a in enumerate(_as_tuple(inputs)) + ) + return tuple(a.clone() for a in _as_tuple(fn(*inp))) + + return wrapped_fn + + +def _get_numerical_jvp_fn(wrapped_fn, input_to_perturb, eps, nbhd_checks_fn): + # Wraps jvp_fn so that certain arguments are already supplied + def jvp_fn(delta): + return _compute_numerical_gradient( + wrapped_fn, input_to_perturb, delta, eps, nbhd_checks_fn + ) + + return jvp_fn + + +def _reshape_tensor_or_tuple(u, shape): + # We don't need to reshape when input corresponding to u is sparse + if isinstance(u, tuple): + if not _is_sparse_any_tensor(u[0]): + return (u[0].reshape(shape), u[1].reshape(shape)) + else: + if not _is_sparse_any_tensor(u): + return u.reshape(shape) + return u + + +def _mul_tensor_or_tuple(u, k): + if isinstance(u, tuple): + return (k * u[0], k * u[1]) + else: + return k * u + + +def _get_numerical_jvp_wrt_specific_input( + fn, input_idx, inputs, u, eps, is_forward_ad=False +) -> List[torch.Tensor]: + input = inputs[input_idx] + input_to_perturb = _get_input_to_perturb(input) + wrapped_fn = _with_prepare_inputs(fn, inputs, input_idx, input_to_perturb, True) + nbhd_checks_fn = functools.partial(_check_outputs_same_dtype_and_shape, eps=eps) + jvp_fn = _get_numerical_jvp_fn(wrapped_fn, input_to_perturb, eps, nbhd_checks_fn) + u = _reshape_tensor_or_tuple(u, input_to_perturb.shape) + u = _mul_tensor_or_tuple(u, eps) + return _compute_numerical_jvps_wrt_specific_input( + jvp_fn, u, input.is_complex(), is_forward_ad + ) + + +def _get_numerical_vJu( + fn, inputs, inp_indices, func_out, all_u, all_v, eps, is_forward_ad +): + # Note that all_v can also be None, in that case, this function only computes Ju. + reduced_jacobians: List[List[torch.Tensor]] = [] + for i, (inp_idx, u) in enumerate(zip(inp_indices, all_u)): + all_Ju = _get_numerical_jvp_wrt_specific_input( + fn, inp_idx, inputs, u, eps, is_forward_ad + ) + # Filter out the Ju for non floating point outputs + filtered_Ju = [] + func_out = _as_tuple(func_out) + assert len(all_Ju) == len(func_out) + for Ju, output in zip(all_Ju, func_out): + if _is_float_or_complex_tensor(output): + filtered_Ju.append(Ju) + else: + # TODO: handle the other Ju + pass + if all_v is not None: + jacobian_scalars: List[torch.Tensor] = [] + for v, Ju in zip(all_v, filtered_Ju): + jacobian_scalars.append(_dot_with_type_promotion(v, Ju)) + reduced_jacobians.append(jacobian_scalars) + else: + reduced_jacobians.append(filtered_Ju) + return reduced_jacobians + + +def _check_jacobians_equal(j1, j2, atol): + # Check whether the max difference between two Jacobian tensors are within some + # tolerance `atol`. + for j1_x, j2_x in zip(j1, j2): + if j1_x.numel() != 0 and (j1_x - j2_x).abs().max() > atol: + return False + return True + + +def _stack_and_check_tensors( + list_of_list_of_tensors, inputs, numel_outputs +) -> Tuple[Tuple[torch.Tensor, ...], bool, bool]: + # For the ith tensor in the inner list checks whether it has the same size and + # dtype as the ith differentiable input. + out_jacobians = _allocate_jacobians_with_inputs(inputs, numel_outputs) + diff_input_list = list(_iter_tensors(inputs, True)) + correct_grad_sizes = True + correct_grad_types = True + for i, tensor_list in enumerate(list_of_list_of_tensors): + inp = diff_input_list[i] + out_jacobian = out_jacobians[i] + for j, tensor in enumerate(tensor_list): + if tensor is not None and tensor.size() != inp.size(): + correct_grad_sizes = False + elif tensor is not None and tensor.dtype != inp.dtype: + correct_grad_types = False + if tensor is None: + out_jacobian[:, j].zero_() + else: + dense = ( + tensor.to_dense() if not tensor.layout == torch.strided else tensor + ) + assert out_jacobian[:, j].numel() == dense.numel() + out_jacobian[:, j] = dense.reshape(-1) + return out_jacobians, correct_grad_sizes, correct_grad_types + + +FAILED_NONDET_MSG = """\n +NOTE: If your op relies on non-deterministic operations i.e., it is listed here: +https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html +this failure might be expected. + +If you are adding a new operator, please file an issue and then use one of the +workarounds. The workaround depends on how your test invokes gradcheck/gradgradcheck. +If the test +- manually invokes gradcheck/gradgradcheck, then call gradcheck/gradgradcheck + with `nondet_tol=` as a keyword argument. +- is OpInfo-based (e.g., in test_ops_gradients.py), then modify the OpInfo for the test + to have `gradcheck_nondet_tol=`. +- is a Module test (e.g., in common_nn.py), then modify the corresponding + module_test entry to have `gradcheck_nondet_tol=` +""" + + +def _check_analytical_jacobian_attributes( + inputs, output, nondet_tol, check_grad_dtypes, fast_mode=False, v=None +) -> Tuple[torch.Tensor, ...]: + # This is used by both fast and slow mode: + # - For slow mode, vjps[i][j] is the jth row of the Jacobian wrt the ith + # input. + # - For fast mode, vjps[i][0] is a linear combination of the rows + # of the Jacobian wrt the ith input + diff_input_list = list(_iter_tensors(inputs, True)) + + def vjp_fn(grad_output): + return torch.autograd.grad( + output, diff_input_list, grad_output, retain_graph=True, allow_unused=True + ) + + # Compute everything twice to check for nondeterminism (which we call reentrancy) + if fast_mode: + vjps1 = _get_analytical_vjps_wrt_specific_output(vjp_fn, output.clone(), v) + vjps2 = _get_analytical_vjps_wrt_specific_output(vjp_fn, output.clone(), v) + else: + vjps1 = _compute_analytical_jacobian_rows(vjp_fn, output.clone()) + vjps2 = _compute_analytical_jacobian_rows(vjp_fn, output.clone()) + + output_numel = output.numel() if not fast_mode else 1 + jacobians1, types_ok, sizes_ok = _stack_and_check_tensors( + vjps1, inputs, output_numel + ) + jacobians2, _, _ = _stack_and_check_tensors(vjps2, inputs, output_numel) + reentrant = _check_jacobians_equal(jacobians1, jacobians2, nondet_tol) + + if not types_ok and check_grad_dtypes: + raise GradcheckError("Gradient has dtype mismatch") + if not sizes_ok: + raise GradcheckError("Analytical gradient has incorrect size") + if not reentrant: + raise GradcheckError( + "Backward is not reentrant, i.e., running backward with " + "same input and grad_output multiple times gives different values, " + "although analytical gradient matches numerical gradient." + f"The tolerance for nondeterminism was {nondet_tol}." + FAILED_NONDET_MSG + ) + return jacobians1 + + +def _get_analytical_vJu_backward_mode( + inputs, outputs, nondet_tol, check_grad_dtypes, all_v, all_u +): + reduced_jacobians: List[List[torch.Tensor]] = [] + for output, v in zip(outputs, all_v): + all_vJ = _check_analytical_jacobian_attributes( + inputs, output, nondet_tol, check_grad_dtypes, fast_mode=True, v=v + ) + jacobian_scalars: List[torch.Tensor] = [] + for vJ, u in zip(all_vJ, all_u): + # Why do we need squeeze here? vJ is a 2-d tensor so that we can reuse + # the error checking logic from slow mode + vJ = vJ.T.squeeze(0) + if vJ.is_complex(): # C -> R + tv = torch.view_as_real(vJ.resolve_conj()) + tr = tv.select(-1, 0) + ti = tv.select(-1, 1) + jacobian_scalars.append(tr.dot(u[0]) + 1j * ti.dot(u[1])) + else: # R -> R + jacobian_scalars.append(vJ.dot(u)) + reduced_jacobians.append(jacobian_scalars) + return reduced_jacobians + + +@deprecated( + "`get_analytical_jacobian` was part of PyTorch's private API and not " + "meant to be exposed. We are deprecating it and it will be removed " + "in a future version of PyTorch. If you have a specific use for " + "this or feature request for this to be a stable API, please file " + "us an issue at https://github.com/pytorch/pytorch/issues/new", + category=FutureWarning, +) +def get_analytical_jacobian(inputs, output, nondet_tol=0.0, grad_out=1.0): + # Replicates the behavior of the old get_analytical_jacobian before the refactor + # This shares much of its code with _check_analytical_jacobian_attributes + if ( + grad_out != 1.0 + ): # grad_out param is only kept for backward compatibility reasons + raise ValueError( + "Expected grad_out to be 1.0. get_analytical_jacobian no longer " + "supports values of grad_out != 1.0." + ) + if output.is_complex(): + raise ValueError( + "Expected output to be non-complex. get_analytical_jacobian no " + "longer supports functions that return complex outputs." + ) + diff_input_list = list(_iter_tensors(inputs, True)) + + def vjp_fn(grad_output): + return torch.autograd.grad( + output, diff_input_list, grad_output, retain_graph=True, allow_unused=True + ) + + # Compute everything twice to check for nondeterminism (which we call reentrancy) + vjps1 = _compute_analytical_jacobian_rows(vjp_fn, output.clone()) + vjps2 = _compute_analytical_jacobian_rows(vjp_fn, output.clone()) + + output_numel = output.numel() + jacobians1, types_ok, sizes_ok = _stack_and_check_tensors( + vjps1, inputs, output_numel + ) + jacobians2, _, _ = _stack_and_check_tensors(vjps2, inputs, output_numel) + reentrant = _check_jacobians_equal(jacobians1, jacobians2, nondet_tol) + + return jacobians1, reentrant, sizes_ok, types_ok + + +def _get_analytical_jacobian(inputs, outputs, input_idx, output_idx): + # Computes the analytical Jacobian in slow mode for a single input-output pair. + # Forgoes performing checks on dtype, shape, and reentrancy. + jacobians = _check_analytical_jacobian_attributes( + inputs, outputs[output_idx], nondet_tol=float("inf"), check_grad_dtypes=False + ) + return jacobians[input_idx] + + +def _compute_analytical_jacobian_rows( + vjp_fn, sample_output +) -> List[List[Optional[torch.Tensor]]]: + # Computes Jacobian row-by-row by projecting `vjp_fn` = v^T J on standard basis + # vectors: vjp_fn(e) = e^T J is a corresponding row of the Jacobian. + # NB: this function does not assume vjp_fn(v) to return tensors with the same + # number of elements for different v. This is checked when we later combine the + # rows into a single tensor. + grad_out_base = torch.zeros_like( + sample_output, memory_format=torch.legacy_contiguous_format + ) + flat_grad_out = grad_out_base.view(-1) + # jacobians_rows[i][j] is the Jacobian jth row for the ith input + jacobians_rows: List[List[Optional[torch.Tensor]]] = [] + for j in range(flat_grad_out.numel()): + flat_grad_out.zero_() + flat_grad_out[j] = 1.0 # projection for jth row of Jacobian + grad_inputs = vjp_fn(grad_out_base) + for i, d_x in enumerate(grad_inputs): + if j == 0: + jacobians_rows.append([]) + jacobians_rows[i] += [ + d_x.clone() if isinstance(d_x, torch.Tensor) else None + ] + return jacobians_rows + + +def _get_analytical_vjps_wrt_specific_output( + vjp_fn, sample_output, v +) -> List[List[Optional[torch.Tensor]]]: + vjps: List[List[Optional[torch.Tensor]]] = [] + grad_inputs = vjp_fn(v.reshape(sample_output.shape)) + for vjp in grad_inputs: + vjps.append([vjp.clone() if isinstance(vjp, torch.Tensor) else None]) + return vjps + + +def _check_inputs(tupled_inputs) -> bool: + # Make sure that gradients are saved for at least one input + any_input_requiring_grad = False + for idx, inp in enumerate(tupled_inputs): + if is_tensor_like(inp) and inp.requires_grad: + if not (inp.dtype == torch.float64 or inp.dtype == torch.complex128): + warnings.warn( + f"Input #{idx} requires gradient and " + "is not a double precision floating point or complex. " + "This check will likely fail if all the inputs are " + "not of double precision floating point or complex. " + ) + if inp.is_sparse: + content = inp._values() + elif _is_sparse_compressed_tensor(inp): + content = inp.values() + else: + content = inp + # TODO: To cover more problematic cases, replace stride = 0 check with + # "any overlap in memory" once we have a proper function to check it. + if content.layout is not torch._mkldnn: # type: ignore[attr-defined] + if not all( + st > 0 or sz <= 1 + for st, sz in zip(content.stride(), content.size()) + ): + raise RuntimeError( + f"The {idx}th input has a dimension with stride 0. gradcheck only " + "supports inputs that are non-overlapping to be able to " + "compute the numerical gradients correctly. You should call " + ".contiguous on the input before passing it to gradcheck." + ) + any_input_requiring_grad = True + + if not any_input_requiring_grad: + raise ValueError( + "gradcheck expects at least one input tensor to require gradient, " + "but none of the them have requires_grad=True." + ) + return True + + +def _check_outputs(outputs) -> None: + if any(_is_sparse_any_tensor(t) for t in outputs if isinstance(t, torch.Tensor)): + # it is easier to call to_dense() on the sparse output than + # to modify analytical jacobian + raise ValueError( + "Sparse output is not supported at gradcheck yet. " + "Please call to_dense(masked_grad=...) on the output of fn for gradcheck." + ) + if any(t.layout == torch._mkldnn for t in outputs if isinstance(t, torch.Tensor)): # type: ignore[attr-defined] + raise ValueError( + "MKLDNN output is not supported at gradcheck yet. " + "Please call to_dense(masked_grad=...) on the output of fn for gradcheck." + ) + + +def _check_no_differentiable_outputs( + func, inputs, func_out, eps, *, is_forward_ad +) -> bool: + # When there are no differentiable outputs, numerical gradient for a function is + # expected to be zero. + jacobians_all_inputs_outputs = _get_numerical_jacobian( + func, inputs, func_out, eps=eps, is_forward_ad=is_forward_ad + ) + for jacobians_all_outputs_and_fixed_input in jacobians_all_inputs_outputs: + for jacobian in jacobians_all_outputs_and_fixed_input: + if torch.ne(jacobian, 0).sum() > 0: + raise GradcheckError( + "Numerical gradient for function expected to be zero" + ) + return True + + +def _check_no_differentiable_outputs_fast( + func, func_out, all_inputs, inputs_indices, all_u, eps, nondet_tol +): + for inp_idx, u in zip(inputs_indices, all_u): + jvps = _get_numerical_jvp_wrt_specific_input(func, inp_idx, all_inputs, u, eps) + for jvp in jvps: + if jvp.numel() == 0: + continue + if (jvp - torch.zeros_like(jvp)).abs().max() > nondet_tol: + raise GradcheckError( + "Numerical gradient for function expected to be zero" + ) + return True + + +FAILED_BATCHED_GRAD_MSG = """ +gradcheck or gradgradcheck failed while testing batched gradient computation. +This could have been invoked in a number of ways (via a test that calls +gradcheck/gradgradcheck directly or via an autogenerated test). + +If you are adding a new operator, please file an issue and then use one of the +workarounds. The workaround depends on how your test invokes gradcheck/gradgradcheck. +If the test +- manually invokes gradcheck/gradgradcheck, then call gradcheck/gradgradcheck + with `check_batched_grad=False` as a keyword argument. +- is OpInfo-based (e.g., in test_ops_gradients.py), then modify the OpInfo for the test + to have `check_batched_grad=False` and/or `check_batched_gradgrad=False`. + +If you're modifying an existing operator that supports batched grad computation, +or wish to make a new operator work with batched grad computation, please read +the following. + +To compute batched grads (e.g., jacobians, hessians), we vmap over the backward +computation. The most common failure case is if there is a 'vmap-incompatible +operation' in the backward pass. Please see +NOTE: [How to write vmap-compatible backward formulas] +in the codebase for an explanation of how to fix this. +""".strip() + +FAILED_BATCHED_GRAD_MSG_FWD_AD = """ +gradcheck failed while testing batched gradient computation with forward-mode AD. +This test is enabled automatically when both `check_batched_grad=True` +and `check_forward_ad=True`, but can be disabled in the following ways +dependong on how the test was invoked (via a test that calls gradcheck +directly or via an autogenerated test). + +If you are adding a new operator, please file an issue and then use one of the +workarounds. The workaround depends on how your test invokes gradcheck/gradgradcheck. +If the test +- manually invokes gradcheck/gradgradcheck, then call gradcheck/gradgradcheck + with `check_batched_forward_grad=False` as a keyword argument. +- is OpInfo-based (e.g., in test_ops_gradients.py), then modify the OpInfo for the test + to have `check_batched_forward_grad=False` +""" + + +def _get_failed_batched_grad_test_msg( + output_idx, input_idx, res, exp, is_forward_ad=False +): + return f""" +For output {output_idx} and input {input_idx}: + +{FAILED_BATCHED_GRAD_MSG_FWD_AD if is_forward_ad else FAILED_BATCHED_GRAD_MSG} + +Got: +{res} + +Expected: +{exp} +""".strip() + + +def _test_batched_grad_forward_ad(func, inputs) -> bool: + fwAD = torch.autograd.forward_ad # To avoid early import issues (do we need this?) + assert isinstance(inputs, tuple) + + for input_idx, current_input in enumerate(inputs): + if not (is_tensor_like(current_input) and current_input.requires_grad): + continue + + def jvp(tangent: torch.Tensor): + with fwAD.dual_level(): + dual = fwAD.make_dual(current_input.detach(), tangent) + inputs_with_dual = tuple( + dual + if idx == input_idx + else (inp.detach() if is_tensor_like(inp) else inp) + for idx, inp in enumerate(inputs) + ) + dual_outputs = _as_tuple(func(*inputs_with_dual)) + ret = [] + for dual_output in dual_outputs: + if dual_output is None: + continue + primal_out, tangent_out = fwAD.unpack_dual(dual_output) + if tangent_out is not None: + ret.append(tangent_out) + else: + ret.append( + torch.zeros( + [], dtype=primal_out.dtype, device=primal_out.device + ).expand(primal_out.shape) + ) + return tuple(ret) + + if not _is_float_or_complex_tensor(current_input): + continue + + tangents = [torch.randn_like(current_input) for _ in range(2)] + expected = [jvp(t) for t in tangents] + expected = [torch.stack(shards) for shards in zip(*expected)] + + try: + result = _vmap(jvp)(torch.stack(tangents)) + except RuntimeError as ex: + # Rethrow to provide a better error message + raise GradcheckError( + f"While computing batched gradients, got: {ex}\n\n{FAILED_BATCHED_GRAD_MSG_FWD_AD}" + ) from ex + + for input_idx, (res, exp) in enumerate(zip(result, expected)): + if torch.allclose(res, exp): + continue + raise GradcheckError( + _get_failed_batched_grad_test_msg( + input_idx, input_idx, res, exp, is_forward_ad=True + ) + ) + return True + + +def _test_batched_grad(input, output, output_idx) -> bool: + # NB: _test_batched_grad compares two autograd.grad invocations with a single + # vmap(autograd.grad) invocation. It's not exactly a "gradcheck" in the + # sense that we're not comparing an analytical jacobian with a numeric one, + # but it is morally similar (we could have computed a full analytic jac + # via vmap, but that is potentially slow) + diff_input_list = list(_iter_tensors(input, True)) + grad = functools.partial( + torch.autograd.grad, + output, + diff_input_list, + retain_graph=True, + allow_unused=True, + ) + + def vjp(v): + results = grad(v) + results = tuple( + grad + if grad is not None + else torch.zeros([], dtype=inp.dtype, device=inp.device).expand(inp.shape) + for grad, inp in zip(results, diff_input_list) + ) + return results + + grad_outputs = [torch.randn_like(output) for _ in range(2)] + + expected = [vjp(gO) for gO in grad_outputs] + expected = [torch.stack(shards) for shards in zip(*expected)] + + # Squash warnings since these are expected to happen in most cases + # NB: this doesn't work for CUDA tests: https://github.com/pytorch/pytorch/issues/50209 + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", message="There is a performance drop") + warnings.filterwarnings("ignore", message="Please use torch.vmap") + try: + result = vmap(vjp)(torch.stack(grad_outputs)) + except RuntimeError as ex: + # It's OK that we're not raising the error at the correct callsite. + # That's because the callsite is always going to inside the Python + # autograd.grad instead of the C++ traceback of what line in the + # backward formula + raise GradcheckError( + f"While computing batched gradients, got: {ex}\n\n{FAILED_BATCHED_GRAD_MSG}" + ) from ex + + for input_idx, (res, exp) in enumerate(zip(result, expected)): + if torch.allclose(res, exp): + continue + raise GradcheckError( + _get_failed_batched_grad_test_msg(output_idx, input_idx, res, exp) + ) + return True + + +def _test_backward_mul_by_grad_output(outputs, inputs, masked) -> bool: + # Tests that backward is multiplied by grad_output + diff_input_list: List[torch.Tensor] = list(_iter_tensors(inputs, True)) + if not diff_input_list: + raise GradcheckError("no Tensors requiring grad found in input") + grads_input = torch.autograd.grad( + outputs, + diff_input_list, + [ + torch.zeros_like(o, memory_format=torch.legacy_contiguous_format) + for o in outputs + ], + allow_unused=True, + ) + for gi, di in zip(grads_input, diff_input_list): + if gi is None: + continue + if isinstance(gi, torch.Tensor) and gi.layout != torch.strided: + if gi.layout != di.layout: + raise GradcheckError( + "grad is incorrect layout (" + + str(gi.layout) + + " is not " + + str(di.layout) + + ")" + ) + if _is_sparse_any_tensor(gi): + sparse_kind = str(gi.layout).replace("torch.", "").replace("_coo", "") + if gi.sparse_dim() != di.sparse_dim(): + raise GradcheckError( + f"grad is {sparse_kind} tensor, but has incorrect sparse_dim" + f" {gi.sparse_dim()}, expected {di.sparse_dim()}" + ) + if gi.dense_dim() != di.dense_dim(): + raise GradcheckError( + f"grad is {sparse_kind} tensor, but has incorrect dense_dim" + f" {gi.dense_dim()}, expected {di.dense_dim()}" + ) + gi = gi.to_dense() + di = di.to_dense() + if masked: + if not torch.allclose(gi, torch.zeros_like(gi)): + raise GradcheckError("backward not multiplied by grad_output") + elif not gi.eq(0).all(): + raise GradcheckError("backward not multiplied by grad_output") + if gi.dtype != di.dtype: + raise GradcheckError("grad is incorrect type") + if gi.device != di.device: + raise GradcheckError("grad is incorrect device") + if gi.size() != di.size(): + raise GradcheckError("grad is incorrect size") + return True + + +def _test_undefined_forward_mode(func, outputs, inputs): + fwAD = torch.autograd.forward_ad + + inp_tensors_idx, inp_tensors = _get_inp_tensors(inputs) + all_v, all_u, all_u_dense = _make_vectors(inp_tensors, outputs, use_forward_ad=True) + + tensor_inputs = tuple(i for i in inputs if is_tensor_like(i) and i.requires_grad) + + with fwAD.dual_level(): + fw_grads = [] + dual_inputs = [] + tensor_indices = set() + for i, inp in enumerate(inputs): + if is_tensor_like(inp) and inp.requires_grad: + if inp.layout == torch._mkldnn: # type: ignore[attr-defined] + raise ValueError( + "MKLDNN inputs are not support for forward AD gradcheck." + ) + + inp = fwAD.make_dual(inp.detach(), torch.zeros_like(inp)) + # If inp is a differentiable view, the dual might not be the tangent given to + # make_dual, so read it explicitly from the dual tensor + fw_grads.append(fwAD.unpack_dual(inp)[1]) + tensor_indices.add(i) + dual_inputs.append(inp) + + for i, (fw_grad, u) in enumerate(zip(fw_grads, all_u)): + fw_grad.copy_(u.view_as(fw_grad)) + + for idx, inp in enumerate(inputs): + if idx not in tensor_indices: + continue + dual_inp_obj = dual_inputs[idx] + + # case 1 (Materialized Zero Tensor Tangent) + dual_inputs[idx] = fwAD.make_dual(inp.detach(), torch.zeros_like(inp)) + raw_outputs = _as_tuple(func(*dual_inputs)) + dual_outputs1 = filter(_is_float_or_complex_tensor, raw_outputs) + + # case 2 (Efficient Zero Tensor Tangent since we don't make a dual object and pass a regular tensor) + dual_inputs[idx] = inp.detach() + raw_outputs = _as_tuple(func(*dual_inputs)) + dual_outputs2 = filter(_is_float_or_complex_tensor, raw_outputs) + + # reset + dual_inputs[idx] = dual_inp_obj + + for index_o, (d_o1, d_o2) in enumerate(zip(dual_outputs1, dual_outputs2)): + val1, res1 = fwAD.unpack_dual(d_o1) + val2, res2 = fwAD.unpack_dual(d_o2) + + if not (res1 is None or res2 is None): + if not torch.allclose(res1, res2): + raise GradcheckError( + "Mismatch in tangent values for output with index: ", + index_o, + " when input: ", + inp, + " has an undefined tangent value. ", + " Got: ", + res1, + " but expected: ", + res2, + ) + return True + + +def _test_undefined_backward_mode(func, outputs, inputs) -> bool: + diff_input_list: List[torch.Tensor] = list(_iter_tensors(inputs, True)) + if not diff_input_list: + raise GradcheckError("no Tensors requiring grad found in input") + + def warn_bc_breaking(): + warnings.warn( + "Backwards compatibility: New undefined gradient support checking " + "feature is enabled by default, but it may break existing callers " + "of this function. If this is true for you, you can call this " + 'function with "check_undefined_grad=False" to disable the feature' + ) + + def check_undefined_grad_support(output_to_check): + grads_output = [ + torch.zeros_like(o, memory_format=torch.legacy_contiguous_format) + for o in output_to_check + ] + try: + grads_input = torch.autograd.grad( + output_to_check, diff_input_list, grads_output, allow_unused=True + ) + except RuntimeError as e: + warn_bc_breaking() + raise GradcheckError( + "Expected backward function to handle undefined output grads. " + 'Please look at "Notes about undefined output gradients" in ' + '"tools/autograd/derivatives.yaml"' + ) from e + + for gi, i in zip(grads_input, diff_input_list): + if (gi is not None) and (not gi.eq(0).all()): + warn_bc_breaking() + raise GradcheckError( + "Expected all input grads to be undefined or zero when all output grads are undefined " + 'or zero. Please look at "Notes about undefined output gradients" in ' + '"tools/autograd/derivatives.yaml"' + ) + return True + + # All backward functions must work properly if all output grads are undefined + outputs_to_check = [ + [ + torch._C._functions.UndefinedGrad()(o) + for o in _differentiable_outputs(func(*inputs)) + # This check filters out Tensor-likes that aren't instances of Tensor. + if isinstance(o, torch.Tensor) + ] + ] + + # If there are multiple output grads, we should be able to undef one at a time without error + if len(outputs_to_check[0]) > 1: + for undef_grad_idx in range(len(outputs)): + output_to_check = _differentiable_outputs(func(*inputs)) + outputs_to_check.append( + [ + torch._C._functions.UndefinedGrad()(o) + if idx == undef_grad_idx + else o + for idx, o in enumerate(output_to_check) + ] + ) + + return all(check_undefined_grad_support(output) for output in outputs_to_check) + + +def _as_tuple(x): + if isinstance(x, tuple): + return x + elif isinstance(x, list): + return tuple(x) + else: + return (x,) + + +def _differentiable_outputs(x): + return tuple(o for o in _as_tuple(x) if o.requires_grad) + + +def _get_notallclose_msg( + analytical, + numerical, + output_idx, + input_idx, + complex_indices, + test_imag=False, + is_forward_ad=False, +) -> str: + out_is_complex = ( + (not is_forward_ad) and complex_indices and output_idx in complex_indices + ) + inp_is_complex = is_forward_ad and complex_indices and input_idx in complex_indices + part = "imaginary" if test_imag else "real" + element = "inputs" if is_forward_ad else "outputs" + prefix = ( + "" + if not (out_is_complex or inp_is_complex) + else f"While considering the {part} part of complex {element} only, " + ) + mode = "computed with forward mode " if is_forward_ad else "" + return ( + prefix + "Jacobian %smismatch for output %d with respect to input %d,\n" + "numerical:%s\nanalytical:%s\n" + % (mode, output_idx, input_idx, numerical, analytical) + ) + + +def _transpose(matrix_of_tensors): + # returns list of tuples + return list(zip(*matrix_of_tensors)) + + +def _real_and_imag_output(fn): + # returns new functions real(fn), and imag(fn) where real(fn) and imag(fn) behave the same as + # the original fn, except torch.real or torch.imag are applied to the complex outputs + def apply_to_c_outs(fn, fn_to_apply): + def wrapped_fn(*inputs): + outs = _as_tuple(fn(*inputs)) + return tuple(fn_to_apply(o) if o.is_complex() else o for o in outs) + + return wrapped_fn + + return apply_to_c_outs(fn, torch.real), apply_to_c_outs(fn, torch.imag) + + +def _real_and_imag_input(fn, complex_inp_indices, tupled_inputs): + # returns new functions that take real inputs instead of complex inputs as + # (x, y) -> fn(x + y * 1j). And it computes: inp -> fn(inp + y * 1j) and inp -> fn(x + inp * 1j). + # In each case, the other part is considered constant. + # We do not use 0 for the constant here to make sure we always call the user function with a valid input. + def apply_to_c_inps(fn, fn_to_apply): + def wrapped_fn(*inputs): + new_inputs = list(inputs) + for should_be_complex in complex_inp_indices: + new_inputs[should_be_complex] = fn_to_apply( + new_inputs[should_be_complex], tupled_inputs[should_be_complex] + ) + return _as_tuple(fn(*new_inputs)) + + return wrapped_fn + + real_fn = apply_to_c_inps(fn, lambda inp, orig: inp + orig.imag * 1j) + imag_fn = apply_to_c_inps(fn, lambda inp, orig: orig.real + inp * 1j) + return real_fn, imag_fn + + +def _gradcheck_real_imag( + gradcheck_fn, + func, + func_out, + tupled_inputs, + outputs, + eps, + rtol, + atol, + check_grad_dtypes, + check_forward_ad, + check_backward_ad, + nondet_tol, + check_undefined_grad, +): + complex_out_indices = [i for i, o in enumerate(outputs) if o.is_complex()] + has_any_complex_output = any(o.is_complex() for o in _as_tuple(func_out)) + if check_backward_ad: + if has_any_complex_output: + real_fn, imag_fn = _real_and_imag_output(func) + + imag_func_out = imag_fn(*tupled_inputs) + imag_outputs = _differentiable_outputs(imag_func_out) + gradcheck_fn( + imag_fn, + imag_func_out, + tupled_inputs, + imag_outputs, + eps, + rtol, + atol, + check_grad_dtypes, + nondet_tol, + complex_indices=complex_out_indices, + test_imag=True, + ) + + real_func_out = real_fn(*tupled_inputs) + real_outputs = _differentiable_outputs(real_func_out) + gradcheck_fn( + real_fn, + real_func_out, + tupled_inputs, + real_outputs, + eps, + rtol, + atol, + check_grad_dtypes, + nondet_tol, + complex_indices=complex_out_indices, + ) + else: + gradcheck_fn( + func, + func_out, + tupled_inputs, + outputs, + eps, + rtol, + atol, + check_grad_dtypes, + nondet_tol, + ) + + if check_forward_ad: + complex_inp_indices = [ + i + for i, inp in enumerate(tupled_inputs) + if is_tensor_like(inp) and inp.is_complex() + ] + if complex_inp_indices: + real_fn, imag_fn = _real_and_imag_input( + func, complex_inp_indices, tupled_inputs + ) + + imag_inputs = [ + inp.imag if is_tensor_like(inp) and inp.is_complex() else inp + for inp in tupled_inputs + ] + imag_func_out = imag_fn(*imag_inputs) + diff_imag_func_out = _differentiable_outputs(imag_func_out) + gradcheck_fn( + imag_fn, + imag_func_out, + imag_inputs, + diff_imag_func_out, + eps, + rtol, + atol, + check_grad_dtypes, + nondet_tol, + complex_indices=complex_inp_indices, + test_imag=True, + use_forward_ad=True, + ) + + real_inputs = [ + inp.real if is_tensor_like(inp) and inp.is_complex() else inp + for inp in tupled_inputs + ] + real_func_out = real_fn(*real_inputs) + diff_real_func_out = _differentiable_outputs(real_func_out) + gradcheck_fn( + real_fn, + real_func_out, + real_inputs, + diff_real_func_out, + eps, + rtol, + atol, + check_grad_dtypes, + nondet_tol, + complex_indices=complex_inp_indices, + use_forward_ad=True, + ) + if check_undefined_grad: + _test_undefined_forward_mode(imag_fn, imag_func_out, imag_inputs) + _test_undefined_forward_mode(real_fn, real_func_out, real_inputs) + else: + gradcheck_fn( + func, + func_out, + tupled_inputs, + outputs, + eps, + rtol, + atol, + check_grad_dtypes, + nondet_tol, + use_forward_ad=True, + ) + if check_undefined_grad: + _test_undefined_forward_mode(func, outputs, tupled_inputs) + + +def _slow_gradcheck( + func, + func_out, + tupled_inputs, + outputs, + eps, + rtol, + atol, + check_grad_dtypes, + nondet_tol, + *, + use_forward_ad=False, + complex_indices=None, + test_imag=False, + masked=False, +): + func_out = _as_tuple(func_out) + if not outputs: + return _check_no_differentiable_outputs( + func, tupled_inputs, func_out, eps=eps, is_forward_ad=use_forward_ad + ) + tupled_inputs_numerical = tupled_inputs if masked else _densify(tupled_inputs) + + numerical = _transpose( + _get_numerical_jacobian( + func, + tupled_inputs_numerical, + func_out, + eps=eps, + is_forward_ad=use_forward_ad, + ) + ) + # Note: [numerical vs analytical output length] + # The numerical path returns jacobian quantity for all outputs, even if requires_grad of that + # output is False. This behavior is necessary for _check_no_differentiable_outputs to work. + numerical = [nj for o, nj in zip(func_out, numerical) if o.requires_grad] + if use_forward_ad: + analytical_forward = _get_analytical_jacobian_forward_ad( + func, tupled_inputs, func_out, check_grad_dtypes=check_grad_dtypes + ) + + for i, n_per_out in enumerate(numerical): + for j, n in enumerate(n_per_out): + a = analytical_forward[j][i] + if not _allclose_with_type_promotion(a, n.to(a.device), rtol, atol): + raise GradcheckError( + _get_notallclose_msg( + a, n, i, j, complex_indices, test_imag, is_forward_ad=True + ) + ) + else: + for i, o in enumerate(outputs): + analytical = _check_analytical_jacobian_attributes( + tupled_inputs, o, nondet_tol, check_grad_dtypes + ) + + for j, (a, n) in enumerate(zip(analytical, numerical[i])): + if not _allclose_with_type_promotion(a, n.to(a.device), rtol, atol): + raise GradcheckError( + _get_notallclose_msg(a, n, i, j, complex_indices, test_imag) + ) + + return True + + +def _dot_with_type_promotion(u, v): + assert u.dim() == 1 and v.dim() == 1 + return (u * v).sum() + + +def _allclose_with_type_promotion(a, b, rtol, atol): + promoted_type = torch.promote_types(a.dtype, b.dtype) + a = a.to(dtype=promoted_type) + b = b.to(dtype=promoted_type) + return torch.allclose(a, b, rtol, atol) + + +def _to_real_dtype(dtype): + if dtype == torch.complex128: + return torch.float64 + elif dtype == torch.complex64: + return torch.float32 + else: + return dtype + + +def _vec_from_tensor(x, generator, downcast_complex=False): + # Create a random vector with the same number of elements as x and the same + # dtype/device. If x is complex and downcast_complex is False, we create a + # complex tensor with only real component. + if x.layout == torch.sparse_coo: + # For sparse, create a random sparse vec with random values in the same + # indices. Make sure size is set so that it isn't inferred to be smaller. + x_values = x._values() + dtype = _to_real_dtype(x.dtype) if downcast_complex else x.dtype + values = ( + torch.rand(x_values.numel(), generator=generator) + .to(dtype=dtype, device=x.device) + .view(x_values.shape) + ) + values /= values.norm() + vec = torch.sparse_coo_tensor(x._indices(), values, x.size(), device=x.device) + elif _is_sparse_compressed_tensor(x): + if x.layout in {torch.sparse_csr, torch.sparse_bsr}: + compressed_indices, plain_indices = x.crow_indices(), x.col_indices() + else: + compressed_indices, plain_indices = x.ccol_indices(), x.row_indices() + x_values = x.values() + dtype = _to_real_dtype(x.dtype) if downcast_complex else x.dtype + values = ( + torch.rand(x_values.numel(), generator=generator) + .to(dtype=dtype, device=x.device) + .view(x_values.shape) + ) + values /= values.norm() + vec = torch.sparse_compressed_tensor( + compressed_indices, + plain_indices, + values, + x.size(), + layout=x.layout, + device=x.device, + ) + else: + dtype = _to_real_dtype(x.dtype) if downcast_complex else x.dtype + vec = torch.rand(x.numel(), generator=generator).to( + dtype=dtype, device=x.device + ) + vec /= vec.norm() + return vec + + +def _get_inp_tensors(tupled_inputs): + inp_idx_tup = [ + (i, t) + for i, t in enumerate(tupled_inputs) + if is_tensor_like(t) and t.requires_grad + ] + return [tup[0] for tup in inp_idx_tup], [tup[1] for tup in inp_idx_tup] + + +def _adjusted_atol(atol, u, v): + # In slow gradcheck, we compare A and B element-wise, i.e., for some a, b we + # allow: |a - b| < atol + rtol * b. But since we now compare q1 = v^T A u and + # q2 = v^T B u, we must allow |q1 - q2| < v^T E u + rtol * v^T B u, where E is + # the correctly sized matrix in which each entry is atol. + # + # We see that atol needs to be scaled by v^T M u (where M is an all-ones M x N + # matrix): v^T M u = \sum_{i} \sum_{j} u_i * v_j = (\sum_{i} u_i)(\sum_{i} v_i) + # TODO: properly handle case when u is tuple instead of only taking first element + u = u[0] if isinstance(u, tuple) else u + sum_u = u.sum() + sum_v = 1.0 if v is None else v.sum() + return atol * float(sum_u) * float(sum_v) + + +FAST_FAIL_SLOW_OK_MSG = """ +Fast gradcheck failed but element-wise differences are small. This means that the +test might've passed in slow_mode! + +If you are adding a new operator, please file an issue and then use one of the +workarounds. The workaround depends on how your test invokes gradcheck/gradgradcheck: + +If the test +- manually invokes gradcheck/gradgradcheck, then call gradcheck/gradgradcheck + with `fast_mode=False` as a keyword argument. +- is OpInfo-based (e.g., in test_ops_gradients.py), then modify the OpInfo for the test + to have `gradcheck_fast_mode=False` +- is a Module test (e.g., in common_nn.py), then modify the corresponding + module_test entry to have `gradcheck_fast_mode=False` +""".strip() + + +def _run_slow_mode_and_get_error( + func, tupled_inputs, outputs, input_idx, output_idx, rtol, atol, eps, is_forward_ad +): + # Compute jacobians in slow mode for better error message + slow_numerical = _get_numerical_jacobian( + func, tupled_inputs, outputs, eps=eps, is_forward_ad=is_forward_ad + )[input_idx][output_idx] + if is_forward_ad: + + def new_fn(inp): + new_inputs = list(tupled_inputs) + new_inputs[input_idx] = inp + return _as_tuple(func(*new_inputs))[output_idx] + + slow_analytical = _get_analytical_jacobian_forward_ad( + new_fn, (tupled_inputs[input_idx],), (outputs[output_idx],) + )[0][0] + else: + slow_analytical = _get_analytical_jacobian( + tupled_inputs, outputs, input_idx, output_idx + ) + + # Assume jacobians are non-empty and have the same shape + slow_max_diff = (slow_numerical - slow_analytical).abs().max() + + slow_allclose = torch.allclose(slow_analytical, slow_numerical, rtol, atol) + msg = ( + "\nThe above quantities relating the numerical and analytical jacobians are computed \n" + "in fast mode. See: https://github.com/pytorch/pytorch/issues/53876 for more background \n" + "about fast mode. Below, we recompute numerical and analytical jacobians in slow mode:\n\n" + f"Numerical:\n {slow_numerical}\n" + f"Analytical:\n{slow_analytical}\n\n" + f"The max per-element difference (slow mode) is: {slow_max_diff}.\n" + ) + if slow_allclose: + # Slow gradcheck would've passed! + msg += FAST_FAIL_SLOW_OK_MSG + return msg + + +def _to_flat_dense_if_sparse(tensor): + if _is_sparse_any_tensor(tensor): + return tensor.to_dense().reshape(-1) + else: + return tensor + + +def _make_vectors(inp_tensors, outputs, *, use_forward_ad): + # Use our own generator to avoid messing with the user's RNG state + g_cpu = torch.Generator() + + def _vec_from_tensor_cpu(*args): + # Default allocate all tensors on CPU, so they are on the same device as the generator + # even if the user specified a default device + with torch.device("cpu"): + return _vec_from_tensor(*args) + + all_u = [] + all_u_dense = [] + for inp in inp_tensors: + ur = _vec_from_tensor_cpu(inp, g_cpu, True) + ur_dense = _to_flat_dense_if_sparse(ur) + if inp.is_complex(): + ui = _vec_from_tensor_cpu(inp, g_cpu, True) + all_u.append((ur, ui)) + ui_dense = _to_flat_dense_if_sparse(ui) + all_u_dense.append((ur_dense, ui_dense)) + else: + all_u.append(ur) + all_u_dense.append(ur_dense) + all_v = ( + None + if use_forward_ad + else [_vec_from_tensor_cpu(out, g_cpu) for out in outputs] + ) + return all_v, all_u, all_u_dense + + +def _check_analytical_numerical_equal( + all_analytical, + all_numerical, + complex_indices, + tupled_inputs, + outputs, + func, + all_v, + all_u, + rtol, + atol, + eps, + test_imag, + *, + is_forward_ad=False, +): + for i, all_numerical_for_input_i in enumerate(all_numerical): + for j, n in enumerate(all_numerical_for_input_i): + # Forward AD generates the transpose of what this function expects + if is_forward_ad: + a = all_analytical[i][j] + else: + a = all_analytical[j][i] + n = n.to(device=a.device) + updated_atol = _adjusted_atol(atol, all_u[i], all_v[j] if all_v else None) + if not _allclose_with_type_promotion(a, n.to(a.device), rtol, updated_atol): + jacobians_str = _run_slow_mode_and_get_error( + func, tupled_inputs, outputs, i, j, rtol, atol, eps, is_forward_ad + ) + raise GradcheckError( + _get_notallclose_msg( + a, n, j, i, complex_indices, test_imag, is_forward_ad + ) + + jacobians_str + ) + + +def _fast_gradcheck( + func, + func_out, + inputs, + outputs, + eps, + rtol, + atol, + check_grad_dtypes, + nondet_tol, + *, + use_forward_ad=False, + complex_indices=None, + test_imag=False, + masked=False, +): + # See https://github.com/pytorch/pytorch/issues/53876 for details + inp_tensors_idx, inp_tensors = _get_inp_tensors(inputs) + # Backward mode computes v^T * J (VJP) + # Since we computed J * u (JVP) through finite difference method, we perform an equality check + # between VJP * u, v * JVP + # ---- + # Forward mode computes J * u (JVP) + # Since we already compute JVP through finite difference method, + # we don't need v for correctness check here as asserted below + all_v, all_u, all_u_dense = _make_vectors( + inp_tensors, outputs, use_forward_ad=use_forward_ad + ) + + inputs_numerical, all_u_numerical, all_v_numerical = ( + (inputs, all_u, all_v) if masked else _densify((inputs, all_u, all_v)) + ) + + numerical_vJu = _get_numerical_vJu( + func, + inputs_numerical, + inp_tensors_idx, + func_out, + all_u_numerical, + all_v_numerical, + eps, + is_forward_ad=use_forward_ad, + ) + # TODO: replicate https://github.com/pytorch/pytorch/pull/77743 for fast gradcheck as well + if use_forward_ad: + assert all_v is None + analytical_vJu = _get_analytical_jacobian_forward_ad( + func, + inputs, + _as_tuple(func_out), + all_u=all_u, + check_grad_dtypes=check_grad_dtypes, + ) + else: + if not outputs: + _check_no_differentiable_outputs_fast( + func, func_out, inputs, inp_tensors_idx, all_u, eps, nondet_tol + ) + + analytical_vJu = _get_analytical_vJu_backward_mode( + inputs, outputs, nondet_tol, check_grad_dtypes, all_v, all_u_dense + ) + + _check_analytical_numerical_equal( + analytical_vJu, + numerical_vJu, + complex_indices, + inputs, + outputs, + func, + all_v, + all_u, + rtol, + atol, + eps, + test_imag, + is_forward_ad=use_forward_ad, + ) + + return True + + +# Note [VarArg of Tensors] +# ~~~~~~~~~~~~~~~~~~~~~~~~ +# 'func' accepts a vararg of tensors, which isn't expressable in the type system at the moment. +# If https://mypy.readthedocs.io/en/latest/additional_features.html?highlight=callable#extended-callable-types is accepted, +# the '...' first argument of Callable can be replaced with VarArg(Tensor). +# For now, we permit any input. +def gradcheck( + func: Callable[..., Union[_TensorOrTensors]], # See Note [VarArg of Tensors] + inputs: _TensorOrTensors, + *, + eps: float = 1e-6, + atol: float = 1e-5, + rtol: float = 1e-3, + raise_exception: bool = True, + nondet_tol: float = 0.0, + check_undefined_grad: bool = True, + check_grad_dtypes: bool = False, + check_batched_grad: bool = False, + check_batched_forward_grad: bool = False, + check_forward_ad: bool = False, + check_backward_ad: bool = True, + fast_mode: bool = False, + masked: Optional[bool] = None, +) -> bool: # noqa: D400,D205 + r"""Check gradients computed via small finite differences against analytical + gradients wrt tensors in :attr:`inputs` that are of floating point or complex type + and with ``requires_grad=True``. + + The check between numerical and analytical gradients uses :func:`~torch.allclose`. + + For most of the complex functions we consider for optimization purposes, no notion of + Jacobian exists. Instead, gradcheck verifies if the numerical and analytical values of + the Wirtinger and Conjugate Wirtinger derivatives are consistent. Because the gradient + computation is done under the assumption that the overall function has a real-valued + output, we treat functions with complex output in a special way. For these functions, + gradcheck is applied to two real-valued functions corresponding to taking the real + components of the complex outputs for the first, and taking the imaginary components + of the complex outputs for the second. For more details, check out + :ref:`complex_autograd-doc`. + + .. note:: + The default values are designed for :attr:`input` of double precision. + This check will likely fail if :attr:`input` is of less precision, e.g., + ``FloatTensor``. + + .. note:: + Gradcheck may fail when evaluated on non-differentiable points + because the numerically computed gradients via finite differencing may differ + those computed analytically (not necessarily because either is incorrect). + For more context, see :ref:`non-differentiable-func-grad`. + + .. warning:: + If any checked tensor in :attr:`input` has overlapping memory, i.e., + different indices pointing to the same memory address (e.g., from + :func:`torch.expand`), this check will likely fail because the numerical + gradients computed by point perturbation at such indices will change + values at all other indices that share the same memory address. + + Args: + func (function): a Python function that takes Tensor inputs and returns + a Tensor or a tuple of Tensors + inputs (tuple of Tensor or Tensor): inputs to the function + eps (float, optional): perturbation for finite differences + atol (float, optional): absolute tolerance + rtol (float, optional): relative tolerance + raise_exception (bool, optional): indicating whether to raise an exception if + the check fails. The exception gives more information about the + exact nature of the failure. This is helpful when debugging gradchecks. + nondet_tol (float, optional): tolerance for non-determinism. When running + identical inputs through the differentiation, the results must either match + exactly (default, 0.0) or be within this tolerance. + check_undefined_grad (bool, optional): if ``True``, check if undefined output grads + are supported and treated as zeros, for ``Tensor`` outputs. + check_batched_grad (bool, optional): if ``True``, check if we can compute + batched gradients using prototype vmap support. Defaults to False. + check_batched_forward_grad (bool, optional): if ``True``, checks if we can compute + batched forward gradients using forward ad and prototype vmap support. Defaults to ``False``. + check_forward_ad (bool, optional): if ``True``, check that the gradients computed with forward + mode AD match the numerical ones. Defaults to ``False``. + check_backward_ad (bool, optional): if ``False``, do not perform any checks that rely on + backward mode AD to be implemented. Defaults to ``True``. + fast_mode (bool, optional): Fast mode for gradcheck and gradgradcheck is currently only + implemented for R to R functions. If none of the inputs and outputs are complex + a faster implementation of gradcheck that no longer computes the entire jacobian + is run; otherwise, we fall back to the slow implementation. + masked (bool, optional): if ``True``, the gradients of unspecified elements of + sparse tensors are ignored. Defaults to ``False``. + Returns: + ``True`` if all differences satisfy allclose condition + + """ + assert ( + check_forward_ad or check_backward_ad + ), "Expected at least one of check_forward_ad or check_backward_ad to be True" + assert not ( + check_batched_grad and not check_backward_ad + ), "Setting check_batched_grad=True requires check_backward_ad to be True" + assert not ( + check_batched_forward_grad and not check_forward_ad + ), "Setting check_batched_forward_grad=True requires check_forward_ad to be True" + args = locals().copy() + args.pop("raise_exception") + if not raise_exception: + try: + return _gradcheck_helper(**args) + except GradcheckError as e: + return False + else: + return _gradcheck_helper(**args) + + +def _gradcheck_helper( + func, + inputs, + eps, + atol, + rtol, + nondet_tol, + check_undefined_grad, + check_grad_dtypes, + check_batched_grad, + check_batched_forward_grad, + check_forward_ad, + check_backward_ad, + fast_mode, + masked, +): + tupled_inputs = _as_tuple(inputs) + _check_inputs(tupled_inputs) + + func_out = func(*tupled_inputs) + outputs = _differentiable_outputs(func_out) + _check_outputs(outputs) + + gradcheck_fn = functools.partial( + _fast_gradcheck if fast_mode else _slow_gradcheck, masked=masked + ) + _gradcheck_real_imag( + gradcheck_fn, + func, + func_out, + tupled_inputs, + outputs, + eps, + rtol, + atol, + check_grad_dtypes, + check_forward_ad=check_forward_ad, + check_backward_ad=check_backward_ad, + nondet_tol=nondet_tol, + check_undefined_grad=check_undefined_grad, + ) + + if check_batched_forward_grad: + _test_batched_grad_forward_ad(func, tupled_inputs) + + # Short circuit because remaining tests rely on backward AD to be implemented + if not check_backward_ad: + return True + + for i, o in enumerate(outputs): + if check_batched_grad: + _test_batched_grad(tupled_inputs, o, i) + + _test_backward_mul_by_grad_output(outputs, tupled_inputs, masked) + + if check_undefined_grad and check_backward_ad: + _test_undefined_backward_mode(func, outputs, tupled_inputs) + return True + + +def gradgradcheck( + func: Callable[..., _TensorOrTensors], # See Note [VarArg of Tensors] + inputs: _TensorOrTensors, + grad_outputs: Optional[_TensorOrTensors] = None, + *, + eps: float = 1e-6, + atol: float = 1e-5, + rtol: float = 1e-3, + gen_non_contig_grad_outputs: bool = False, + raise_exception: bool = True, + nondet_tol: float = 0.0, + check_undefined_grad: bool = True, + check_grad_dtypes: bool = False, + check_batched_grad: bool = False, + check_fwd_over_rev: bool = False, + check_rev_over_rev: bool = True, + fast_mode: bool = False, + masked: bool = False, +) -> bool: # noqa: D400,D205 + r"""Check gradients of gradients computed via small finite differences + against analytical gradients wrt tensors in :attr:`inputs` and + :attr:`grad_outputs` that are of floating point or complex type and with + ``requires_grad=True``. + + This function checks that backpropagating through the gradients computed + to the given :attr:`grad_outputs` are correct. + + The check between numerical and analytical gradients uses :func:`~torch.allclose`. + + .. note:: + The default values are designed for :attr:`input` and + :attr:`grad_outputs` of double precision. This check will likely fail if + they are of less precision, e.g., ``FloatTensor``. + + .. warning:: + If any checked tensor in :attr:`input` and :attr:`grad_outputs` has + overlapping memory, i.e., different indices pointing to the same memory + address (e.g., from :func:`torch.expand`), this check will likely fail + because the numerical gradients computed by point perturbation at such + indices will change values at all other indices that share the same + memory address. + + Args: + func (function): a Python function that takes Tensor inputs and returns + a Tensor or a tuple of Tensors + inputs (tuple of Tensor or Tensor): inputs to the function + grad_outputs (tuple of Tensor or Tensor, optional): The gradients with + respect to the function's outputs. + eps (float, optional): perturbation for finite differences + atol (float, optional): absolute tolerance + rtol (float, optional): relative tolerance + gen_non_contig_grad_outputs (bool, optional): if :attr:`grad_outputs` is + ``None`` and :attr:`gen_non_contig_grad_outputs` is ``True``, the + randomly generated gradient outputs are made to be noncontiguous + raise_exception (bool, optional): indicating whether to raise an exception if + the check fails. The exception gives more information about the + exact nature of the failure. This is helpful when debugging gradchecks. + nondet_tol (float, optional): tolerance for non-determinism. When running + identical inputs through the differentiation, the results must either match + exactly (default, 0.0) or be within this tolerance. Note that a small amount + of nondeterminism in the gradient will lead to larger inaccuracies in + the second derivative. + check_undefined_grad (bool, optional): if True, check if undefined output grads + are supported and treated as zeros + check_batched_grad (bool, optional): if True, check if we can compute + batched gradients using prototype vmap support. Defaults to False. + fast_mode (bool, optional): if True, run a faster implementation of gradgradcheck that + no longer computes the entire jacobian. + masked (bool, optional): if True, the gradients of unspecified elements of + sparse tensors are ignored (default, False). + Returns: + True if all differences satisfy allclose condition + """ + assert ( + check_fwd_over_rev or check_rev_over_rev + ), "Expected at least one of check_fwd_over_rev or check_rev_over_rev to be True" + assert not ( + check_undefined_grad and not check_rev_over_rev + ), "Setting check_undefined_grad=True requires check_rev_over_rev to be True" + assert not ( + check_batched_grad and not check_rev_over_rev + ), "Setting check_batched_grad=True requires check_rev_over_rev to be True" + # TODO: do we want to test this too? + # assert not (check_batched_forward_grad and not check_fwd_over_rev), ( + # "Setting check_batched_forward_grad=True requires check_fwd_over_rev to be True") + tupled_inputs = _as_tuple(inputs) + + if grad_outputs is None: + # If grad_outputs is not specified, create random Tensors of the same shape, type, and device as the outputs + + outputs = _differentiable_outputs(func(*tupled_inputs)) + tupled_grad_outputs = tuple( + torch.testing.make_tensor( + x.shape, + dtype=x.dtype + if x.is_floating_point() or x.is_complex() + else torch.double, + device=x.device, + low=-1, + high=1, + requires_grad=True, + noncontiguous=gen_non_contig_grad_outputs, + ) + for x in outputs + ) + else: + tupled_grad_outputs = _as_tuple(grad_outputs) + + num_outputs = len(tupled_grad_outputs) + + # NB: We need to save the requires_grad information about the inputs here because gradcheck detaches inputs + # before running forward mode AD + diff_input_args_indices = { + i for i, x in enumerate(tupled_inputs) if is_tensor_like(x) and x.requires_grad + } + diff_grad_output_indices = { + i for i, x in enumerate(tupled_grad_outputs) if x.requires_grad + } + + def new_func(*args): + # Restore the requires_grad information + input_args = tuple( + x.requires_grad_() if i in diff_input_args_indices else x + for i, x in enumerate(args[:-num_outputs]) + ) + outputs = _differentiable_outputs(func(*input_args)) + grad_outputs = tuple( + x.requires_grad_() if i in diff_grad_output_indices else x + for i, x in enumerate(args[-num_outputs:]) + ) + diff_input_args = tuple( + x for i, x in enumerate(input_args) if i in diff_input_args_indices + ) + grad_inputs = torch.autograd.grad( + outputs, diff_input_args, grad_outputs, create_graph=True, allow_unused=True + ) + grad_inputs = tuple(g for g in grad_inputs if g is not None) + return grad_inputs + + return gradcheck( + new_func, + tupled_inputs + tupled_grad_outputs, + eps=eps, + atol=atol, + rtol=rtol, + raise_exception=raise_exception, + nondet_tol=nondet_tol, + check_undefined_grad=check_undefined_grad, + check_grad_dtypes=check_grad_dtypes, + check_batched_grad=check_batched_grad, + fast_mode=fast_mode, + check_forward_ad=check_fwd_over_rev, + check_backward_ad=check_rev_over_rev, + masked=masked, + ) diff --git a/parrot/lib/python3.10/site-packages/torch/autograd/graph.py b/parrot/lib/python3.10/site-packages/torch/autograd/graph.py new file mode 100644 index 0000000000000000000000000000000000000000..c41eee5e7d4ecddf83012a01f5d7f0822efe850c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/autograd/graph.py @@ -0,0 +1,774 @@ +# mypy: allow-untyped-defs +import abc +import collections +import contextlib +import functools +import logging +import threading +import weakref +from collections import defaultdict, namedtuple +from typing import ( + Any, + Callable, + cast, + Deque, + Dict, + List, + Optional, + Sequence, + Set, + Tuple, + Union, +) + +import torch +from torch.autograd.variable import Variable +from torch.utils._python_dispatch import TorchDispatchMode +from torch.utils.hooks import RemovableHandle + +log = logging.getLogger(__name__) + + +__all__ = [ + "saved_tensors_hooks", + "save_on_cpu", + "disable_saved_tensors_hooks", + "register_multi_grad_hook", + "allow_mutation_on_saved_tensors", + "Node", + "GradientEdge", + "get_gradient_edge", + "increment_version", +] + + +class Node(abc.ABC): + @abc.abstractmethod + def name(self) -> str: + r"""Return the name. + + Example:: + + >>> import torch + >>> a = torch.tensor([0., 0., 0.], requires_grad=True) + >>> b = a.clone() + >>> assert isinstance(b.grad_fn, torch.autograd.graph.Node) + >>> print(b.grad_fn.name()) + CloneBackward0 + """ + ... + + @property + @abc.abstractmethod + def next_functions(self) -> Tuple[Tuple[Optional["Node"], int], ...]: + ... + + @abc.abstractmethod + def metadata(self) -> dict: + r"""Return the metadata.""" + ... + + @abc.abstractmethod + def _register_hook_dict(self, tensor: torch.Tensor) -> None: + ... + + @abc.abstractmethod + def register_hook(self, fn: Callable[..., Any]) -> RemovableHandle: + r"""Register a backward hook. + + The hook will be called every time a gradient with respect to the + Node is computed. The hook should have the following signature:: + + hook(grad_inputs: Tuple[Tensor], grad_outputs: Tuple[Tensor]) -> Tuple[Tensor] or None + + + The hook should not modify its argument, but it can optionally return + a new gradient which will be used in place of :attr:`grad_inputs`. + + This function returns a handle with a method ``handle.remove()`` + that removes the hook from the module. + + .. note:: + See :ref:`backward-hooks-execution` for more information on how when this hook + is executed, and how its execution is ordered relative to other hooks. + + Example:: + + >>> import torch + >>> a = torch.tensor([0., 0., 0.], requires_grad=True) + >>> b = a.clone() + >>> assert isinstance(b.grad_fn, torch.autograd.graph.Node) + >>> handle = b.grad_fn.register_hook(lambda gI, gO: (gO[0] * 2,)) + >>> b.sum().backward(retain_graph=True) + >>> print(a.grad) + tensor([2., 2., 2.]) + >>> handle.remove() # Removes the hook + >>> a.grad = None + >>> b.sum().backward(retain_graph=True) + >>> print(a.grad) + tensor([1., 1., 1.]) + """ + ... + + @abc.abstractmethod + def register_prehook(self, fn: Callable[..., Any]) -> RemovableHandle: + r"""Register a backward pre-hook. + + The hook will be called every time a gradient with respect to the + Node is computed. The hook should have the following signature:: + + hook(grad_outputs: Tuple[Tensor]) -> Tuple[Tensor] or None + + The hook should not modify its argument, but it can optionally return + a new gradient which will be used in place of :attr:`grad_outputs`. + + This function returns a handle with a method ``handle.remove()`` + that removes the hook from the module. + + .. note:: + See :ref:`backward-hooks-execution` for more information on how when this hook + is executed, and how its execution is ordered relative to other hooks. + + Example:: + + >>> a = torch.tensor([0., 0., 0.], requires_grad=True) + >>> b = a.clone() + >>> assert isinstance(b.grad_fn, torch.autograd.graph.Node) + >>> handle = b.grad_fn.register_prehook(lambda gI: (gI[0] * 2,)) + >>> b.sum().backward(retain_graph=True) + >>> print(a.grad) + tensor([2., 2., 2.]) + >>> handle.remove() + >>> a.grad = None + >>> b.sum().backward(retain_graph=True) + >>> print(a.grad) + tensor([1., 1., 1.]) + """ + ... + + @classmethod + def __subclasshook__(cls, C): + if cls is Node: + if ( + C is not None and C is getattr(torch._C._functions, C.__name__, None) + ) or issubclass(C, torch.autograd.function.BackwardCFunction): + return True + return NotImplemented + + +def _get_grad_fn_or_grad_acc(t): + if t.requires_grad and t.grad_fn is None: + with torch.enable_grad(): + return t.view_as(t).grad_fn.next_functions[0][0] + else: + return t.grad_fn + + +GradientEdge = namedtuple("GradientEdge", ("node output_nr")) +GradientEdge.__doc__ = """\ +Object representing a given gradient edge within the autograd graph. +To get the gradient edge where a given Tensor gradient will be computed, +you can do ``edge = autograd.graph.get_gradient_edge(tensor)``. +""" + + +def get_gradient_edge(tensor): + """Get the gradient edge for computing the gradient of the given Tensor. + + In particular, it is equivalent to call + ``g = autograd.grad(loss, input)`` and ``g = autograd.grad(loss, get_gradient_edge(input))``. + """ + if not tensor.requires_grad: + raise RuntimeError( + "It is not possible to get the gradient edge for a Tensor that does not require gradients" + ) + grad_fn = _get_grad_fn_or_grad_acc(tensor) + + # Note that output_nr default to 0 which is the right value + # for the AccumulateGrad node. + return GradientEdge(grad_fn, tensor.output_nr) + + +def increment_version(tensor): + """Update autograd metadata tracking whether the given Tensor was modified in place. + + This is to enable more accurate error checking within the autograd engine. + It is already done automatically by PyTorch functions and within custom Function + when mark_dirty() is called appropriately so you only need to call this explicitly + if you are doing inplace operation on the Tensor data in a way that Pytorch doesn't + know about. For example a custom kernel that reads the Tensor data_ptr and modifies + the memory inplace based on this pointer. + + Note that incrementing the version counter multiple times for a single inplace operation + is not problematic. + """ + torch._C._increment_version(tensor) + + +class saved_tensors_hooks: + """Context-manager that sets a pair of pack / unpack hooks for saved tensors. + + Use this context-manager to define how intermediary results of an operation + should be packed before saving, and unpacked on retrieval. + + In that context, the ``pack_hook`` function will be called everytime an + operation saves a tensor for backward (this includes intermediary results + saved using + :func:`~torch.autograd.function._ContextMethodMixin.save_for_backward` but + also those recorded by a PyTorch-defined operation). The output of + ``pack_hook`` is then stored in the computation graph instead of the + original tensor. + + The ``unpack_hook`` is called when the saved tensor needs to be accessed, + namely when executing :func:`torch.Tensor.backward()` or + :func:`torch.autograd.grad()`. It takes as argument the *packed* object + returned by ``pack_hook`` and should return a tensor which has the same + content as the original tensor (passed as input to the corresponding + ``pack_hook``). + + The hooks should have the following signatures: + + pack_hook(tensor: Tensor) -> Any + + unpack_hook(Any) -> Tensor + + where the return value of ``pack_hook`` is a valid input to ``unpack_hook``. + + In general, you want ``unpack_hook(pack_hook(t))`` to be equal to ``t`` in terms + of value, size, dtype and device. + + Example:: + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD) + >>> def pack_hook(x): + ... print("Packing", x) + ... return x + >>> + >>> def unpack_hook(x): + ... print("Unpacking", x) + ... return x + >>> + >>> a = torch.ones(5, requires_grad=True) + >>> b = torch.ones(5, requires_grad=True) * 2 + >>> with torch.autograd.graph.saved_tensors_hooks(pack_hook, unpack_hook): + ... y = a * b + Packing tensor([1., 1., 1., 1., 1.], requires_grad=True) + Packing tensor([2., 2., 2., 2., 2.], grad_fn=) + >>> y.sum().backward() + Unpacking tensor([1., 1., 1., 1., 1.], requires_grad=True) + Unpacking tensor([2., 2., 2., 2., 2.], grad_fn=) + + .. warning :: + Performing an inplace operation on the input to either hooks may lead + to undefined behavior. + + .. warning :: + Only one pair of hooks is allowed at a time. When recursively nesting this + context-manager, only the inner-most pair of hooks will be applied. + """ + + def __init__( + self, + pack_hook: Callable[[torch.Tensor], Any], + unpack_hook: Callable[[Any], torch.Tensor], + ): + self.pack_hook = pack_hook + self.unpack_hook = unpack_hook + + def __enter__(self): + torch._C._autograd._push_saved_tensors_default_hooks( + self.pack_hook, self.unpack_hook + ) + + def __exit__(self, *args: object): + torch._C._autograd._pop_saved_tensors_default_hooks() + + +class save_on_cpu(saved_tensors_hooks): + """Context manager under which tensors saved by the forward pass will be stored on cpu, then retrieved for backward. + + When performing operations within this context manager, intermediary + results saved in the graph during the forward pass will be moved to CPU, + then copied back to the original device when needed for the backward pass. + If the graph was already on CPU, no tensor copy is performed. + + Use this context-manager to trade compute for GPU memory usage (e.g. + when your model doesn't fit in GPU memory during training). + + Args: + pin_memory (bool): If ``True`` tensors will be saved to CPU pinned memory + during packing and copied to GPU asynchronously during unpacking. + Defaults to ``False``. + Also see :ref:`cuda-memory-pinning`. + + + Example:: + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA) + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD) + >>> a = torch.randn(5, requires_grad=True, device="cuda") + >>> b = torch.randn(5, requires_grad=True, device="cuda") + >>> c = torch.randn(5, requires_grad=True, device="cuda") + >>> + >>> def f(a, b, c): + ... prod_1 = a * b # a and b are saved on GPU + ... with torch.autograd.graph.save_on_cpu(): + ... prod_2 = prod_1 * c # prod_1 and c are saved on CPU + ... y = prod_2 * a # prod_2 and a are saved on GPU + ... return y + >>> + >>> y = f(a, b, c) + >>> del a, b, c # for illustration only + >>> # the content of a, b, and prod_2 are still alive on GPU + >>> # the content of prod_1 and c only live on CPU + >>> y.sum().backward() # all CPU tensors are moved back to GPU, for backward + >>> # all intermediary tensors are released (deleted) after the call to backward + + """ + + def __init__(self, pin_memory=False, device_type="cuda"): + device_module = getattr(torch, device_type, torch.cuda) + + def pack_to_cpu(tensor): + if not pin_memory: + return (tensor.device, tensor.cpu()) + packed = torch.empty( + tensor.size(), + dtype=tensor.dtype, + layout=tensor.layout, + pin_memory=(device_module.is_available() and not tensor.is_sparse), + ) + packed.copy_(tensor) + return (tensor.device, packed) + + def unpack_from_cpu(packed): + device, tensor = packed + return tensor.to(device, non_blocking=pin_memory) + + super().__init__(pack_to_cpu, unpack_from_cpu) + + +@contextlib.contextmanager +def disable_saved_tensors_hooks(error_message): + """Context-manager that disables the saved tensors default hooks feature. + + Useful for if you are creating a feature that does not work with saved + tensors default hooks. + + Args: + error_message (str): When saved tensors default hooks are used when they + have been are disabled, a RuntimeError with this + error message gets raised. + + Example:: + + >>> # xdoctest: +SKIP(failing) + >>> message = "saved tensors default hooks are disabled" + >>> with torch.autograd.graph.disable_saved_tensors_hooks(message): + ... # Raises RuntimeError: saved tensors default hooks are disabled + ... with torch.autograd.graph.save_on_cpu(): + ... pass + + """ + try: + maybe_prev_message = ( + torch._C._autograd._saved_tensors_hooks_get_disabled_error_message() + ) + torch._C._autograd._saved_tensors_hooks_disable(error_message) + yield + finally: + # See NOTE: [disabled_error_message invariant] + if maybe_prev_message is None: + torch._C._autograd._saved_tensors_hooks_enable() + else: + torch._C._autograd._saved_tensors_hooks_disable(maybe_prev_message) + + +class _MultiHandle(RemovableHandle): + handles: Tuple[RemovableHandle, ...] + + def __init__(self, handles: Tuple[RemovableHandle, ...]): + self.handles = handles + + def remove(self): + for handle in self.handles: + handle.remove() + + def __getstate__(self): + return self.handles + + def __setstate__(self, state): + self.handles = state + + +def register_multi_grad_hook( + tensors: Sequence[torch.Tensor], + fn: Union[ + Callable[[Sequence[Optional[torch.Tensor]]], None], + Callable[[torch.Tensor], None], + ], + *, + mode: str = "all", +): + r"""Register a multi-grad backward hook. + + There are two supported modes: ``"all"`` and ``"any"``. + + Under the ``"all"`` mode, the hook will be called after gradients with respect to every tensor in + :attr:`tensors` have been computed. If a tensor is in :attr:`tensors` but + is not part of the graph, or if a tensor is not needed to compute the gradients + for any ``inputs`` specified for the current ``.backward()`` or ``.grad()`` call, + this tensor will be ignored and the hook will not wait for its gradient to be + computed. + + After every non-ignored tensor's gradient has been computed, :attr:`fn` will be + called with those gradients. ``None`` will be passed for tensors that did not + have their gradients computed. + + Under the ``"any"`` mode, the hook will be called after the first gradient + with respect to a tensor in :attr:`tensors` has been computed. The hook + will be called with that gradient as its argument. + + The hook should not modify its arguments. + + This function returns a handle with a method ``handle.remove()`` that removes the hook. + + .. note:: + See :ref:`backward-hooks-execution` for more information on how when this hook + is executed, and how its execution is ordered relative to other hooks. + + Example:: + + >>> import torch + >>> + >>> a = torch.rand(2, 3, requires_grad=True) + >>> b = torch.rand(2, 3, requires_grad=True) + >>> c = a * b + >>> d = a * b + >>> + >>> def fn(grads): + ... print([g is not None for g in grads]) + ... + >>> torch.autograd.graph.register_multi_grad_hook((a, b, c, d), fn) + >>> + >>> c.sum().backward(retain_graph=True) + [True, True, True, False] + >>> c.sum().backward(inputs=(a,), retain_graph=True) + [True, False, True, False] + >>> + """ + supported_modes = ("all", "any") + if mode not in supported_modes: + raise ValueError(f"Expects mode to be one of {supported_modes} but got {mode}") + + if mode == "all": + count: Dict[int, int] = dict() + nb_calls = None + buffer: Dict[int, List[Optional[torch.Tensor]]] = dict() + + grad_fns = list(map(_get_grad_fn_or_grad_acc, tensors)) + len_tensors = len(tensors) + + def get_inner_hook(idx): + def inner_hook(grad: torch.Tensor): + nonlocal count, nb_calls, buffer, fn + id = torch._C._current_graph_task_id() + assert ( + id != -1 + ), "expected this hook to be called inside a backward call" + count[id] = count.get(id, 0) + buffer[id] = buffer.get(id, [None] * len_tensors) + + if count[id] == 0: + # On the first call, compute the actual nb_calls and buffer + nb_calls = sum(torch._C._will_engine_execute_node(g) for g in grad_fns) # type: ignore[attr-defined] + + buffer[id][idx] = grad + count[id] += 1 + + if count[id] == nb_calls: + fn = cast(Callable[[Sequence[Optional[torch.Tensor]]], None], fn) + fn(buffer[id]) + del count[id] + del buffer[id] + + return inner_hook + + handles: Tuple[RemovableHandle] = tuple( + t.register_hook(get_inner_hook(i)) for i, t in enumerate(tensors) + ) + elif mode == "any": + fn = cast(Callable[[torch.Tensor], None], fn) + lock = threading.Lock() + ran_hook: Dict[int, bool] = defaultdict(bool) + + @functools.wraps(fn) + def wrapped_fn(grad: torch.Tensor): + nonlocal ran_hook + id = torch._C._current_graph_task_id() + assert id != -1, "expected this hook to be called inside a backward call" + with lock: + prev, ran_hook[id] = ran_hook[id], True + if prev: + return + fn(grad) + + handles = tuple( + tensor.register_hook(wrapped_fn) + for tensor in tensors + if tensor.requires_grad + ) + + return _MultiHandle(handles) # type: ignore[possibly-undefined] + + +# NOTE [Allow mutation on tensors saved for backward] +# +# 1. Tensor gets saved for backward +# - remember the python object id and the version of the tensor +# - remember aliasing information (data_ptr of base + version) +# - save the original so we control its lifetime +# 2. Any time a tensor gets in-placed +# - for each tensor aliased to it: +# - check using its object id and version to see if it has been saved +# - if it has been saved, clone it +# - delete the reference to the original +# 3. during backward +# - if the clone exists, the tensor must've been modified in-place +_allow_mutation_on_saved_tensors_enabled = False + + +def _get_tid(t) -> Tuple[int, int, int]: + # FIXME: This is almost definitely a bug. + if isinstance( + t, + ( + torch._subclasses.fake_tensor.FakeTensor, + torch._subclasses.functional_tensor.FunctionalTensor, + ), + ): + data_ptr = 0 + else: + data_ptr = t.data_ptr() + return (id(t), data_ptr, t._version) + + +def _get_sid(t) -> Tuple[int, int]: + # FIXME: This is almost definitely a bug. + if isinstance( + t, + ( + torch._subclasses.fake_tensor.FakeTensor, + torch._subclasses.functional_tensor.FunctionalTensor, + ), + ): + data_ptr = 0 + else: + data_ptr = t.data_ptr() + return (data_ptr, t._version) + + +class _Handle: + pass + + +class _swap_with_cloned(saved_tensors_hooks): + def __init__(self, ctx): + def pack_hook(t): + tid = _get_tid(t) + sid = _get_sid(t) + # Tensors saved for backward have an entry in _tid_to_weakhandle + handle: Optional[_Handle] = None + + # Save aliasing information + ctx.sid_to_tid[sid].add(tid) + + # NB: The same tensor (of the same version) can be saved multiple times + if tid not in ctx.tid_to_weakhandle: + handle = _Handle() + ctx.tid_to_weakhandle[tid] = handle + ctx.original[handle] = t + else: + # Store an additional strong reference to the handle + handle = ctx.tid_to_weakhandle[tid] + return handle + + def unpack_hook(tup): + handle = tup + error_msg = ( + "Trying to backward outside of the 'allow_mutation_on_saved_tensors' context" + "in which the graph was originally recorded." + ) + assert _allow_mutation_on_saved_tensors_enabled, error_msg + if handle in ctx.cloned: + res = ctx.cloned[handle] + else: + assert handle in ctx.original, error_msg + res = ctx.original[handle] + return res + + super().__init__(pack_hook, unpack_hook) + + +class _CloneArgBeforeMutateMode(TorchDispatchMode): + def __init__(self, ctx): + self.ctx = ctx + + def __torch_dispatch__(self, func, types, args=(), kwargs=None): + kwargs = kwargs or {} + + for idx, arg in enumerate(func._schema.arguments): + if arg.alias_info is not None and arg.alias_info.is_write: + t = kwargs["out"] if arg.is_out else args[idx] + tid = _get_tid(t) + sid = _get_sid(t) + ctx = self.ctx + if sid in ctx.sid_to_tid: + for tid in ctx.sid_to_tid[sid]: + if tid not in ctx.tid_to_weakhandle: + # We know that if tid is in sid_to_tid, then it must also be in + # tid_to_weakhandle. However, it is possible for the tensor to be + # saved at one point, but cleared by backward before it is modified + # in-place. Consider the following example: + # + # >>> a = torch.randn(2, 3, requires_grad=True).clone() + # >>> out = (a**2).sum() + # >>> out.backward() + # >>> a.sin_() + continue + handle = ctx.tid_to_weakhandle[tid] + if handle in ctx.cloned: + # The same exact tensor has been cloned already + continue + ctx.cloned[handle] = ctx.original[handle].clone() + del ctx.original[handle] + + rs = func(*args, **kwargs) + return rs + + +class _AllowMutationOnSavedContext: + def __init__(self): + self.cloned: weakref.WeakKeyDictionary = weakref.WeakKeyDictionary() + self.original: weakref.WeakKeyDictionary = weakref.WeakKeyDictionary() + self.tid_to_weakhandle: weakref.WeakValueDictionary = ( + weakref.WeakValueDictionary() + ) + self.sid_to_tid: Dict[Tuple[int, int], Set[Tuple[int, int, int]]] = defaultdict( + set + ) + + def clear(self): + self.cloned.clear() + self.original.clear() + self.tid_to_weakhandle.clear() + self.sid_to_tid.clear() + + +@contextlib.contextmanager +def allow_mutation_on_saved_tensors(): + """Context manager under which mutating tensors saved for backward is allowed. + + Under this context manager, tensors saved for backward are cloned on mutation, + so the original version can still be used during backward. Normally, mutating a tensor + saved for backward will result in an error raised when it's used during backward. + + To ensure the correct behavior, both the forward and backward should be run under + the same context manager. + + returns: + An _AllowMutationOnSavedContext object storing the state managed by this + context manager. This object can be useful for debugging purposes. The state + managed by the context manager is automatically cleared upon exiting. + + Example:: + + >>> import torch + >>> with torch.autograd.graph.allow_mutation_on_saved_tensors(): + ... # forward + ... a = torch.ones(2, 3, requires_grad=True) + ... b = a.clone() + ... out = (b**2).sum() + ... b.sin_() + ... # backward + ... out.sum().backward() + ... + tensor([[0.8415, 0.8415, 0.8415], + [0.8415, 0.8415, 0.8415]], grad_fn=) + """ + global _allow_mutation_on_saved_tensors_enabled + + ctx = _AllowMutationOnSavedContext() + + with _swap_with_cloned(ctx), _CloneArgBeforeMutateMode(ctx): + try: + if _allow_mutation_on_saved_tensors_enabled: + raise RuntimeError( + "allow_mutation_on_saved_tensors contexts cannot be nested" + ) + _allow_mutation_on_saved_tensors_enabled = True + yield ctx + finally: + ctx.clear() + _allow_mutation_on_saved_tensors_enabled = False + + +def _register_logging_hooks_on_whole_graph(t_outputs: List[torch.Tensor]): + grad_fns = list(map(_get_grad_fn_or_grad_acc, t_outputs)) + + def iter_graph(roots): + if not roots: + return + seen = set() + q: Deque = collections.deque() + for node in roots: + if node is not None: + seen.add(node) + q.append(node) + + while q: + node = q.popleft() + for fn, _idx in node.next_functions: + if fn in seen or fn is None: + continue + seen.add(fn) + q.append(fn) + + yield node + + def fmt(t): + # Avoid circular import + from torch.testing._internal.common_utils import dtype_abbrs + + if t is None: + return "None" + return f"{dtype_abbrs[t.dtype]}[{', '.join(map(str, t.shape))}]" + + def prehook(grad_outputs): + node = torch._C._current_autograd_node() + grad_outputs_str = f"[{','.join(fmt(t) for t in grad_outputs)}]" + log_str = f"Executing: {node} with grad_outputs: {grad_outputs_str}" + log.debug(log_str) + + handles = [] + for node in iter_graph(grad_fns): + handles.append(node.register_prehook(prehook)) + + def unregister_hooks(): + for handle in handles: + handle.remove() + + return unregister_hooks + + +def _engine_run_backward(t_outputs, *args, **kwargs): + attach_logging_hooks = log.getEffectiveLevel() <= logging.DEBUG + if attach_logging_hooks: + unregister_hooks = _register_logging_hooks_on_whole_graph(t_outputs) + try: + return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass + t_outputs, *args, **kwargs + ) # Calls into the C++ engine to run the backward pass + finally: + if attach_logging_hooks: + unregister_hooks() # type: ignore[possibly-undefined] diff --git a/parrot/lib/python3.10/site-packages/torch/autograd/profiler.py b/parrot/lib/python3.10/site-packages/torch/autograd/profiler.py new file mode 100644 index 0000000000000000000000000000000000000000..0392a876984632492aaa9e6b1055e2474e4d5065 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/autograd/profiler.py @@ -0,0 +1,1125 @@ +# mypy: allow-untyped-defs +from collections import defaultdict +from dataclasses import dataclass +from time import perf_counter_ns +from typing import Any, Dict, List, Optional +from warnings import warn + +import torch + +import torch.cuda +from torch._C import _get_privateuse1_backend_name +from torch._C._profiler import _ExperimentalConfig + +from torch.autograd import ( + _disable_profiler, + _enable_profiler, + _kineto_step, + _prepare_profiler, + _ProfilerResult, + _supported_activities, + DeviceType, + kineto_available, + ProfilerActivity, + ProfilerConfig, + ProfilerState, +) +from torch.autograd.profiler_util import ( + _filter_name, + _filter_stack_entry, + _rewrite_name, + EventList, + FunctionEvent, + MEMORY_EVENT_NAME, + MemRecordsAcc, + OUT_OF_MEMORY_EVENT_NAME, +) +from torch.futures import Future + +__all__ = [ + "profile", + "record_function", + "emit_itt", + "emit_nvtx", + "load_nvprof", + "EnforceUnique", + "parse_nvprof_trace", + "KinetoStepTracker", + "EventList", + "FunctionEvent", + "MemRecordsAcc", +] + +try: + # Available in Python >= 3.2 + from contextlib import ContextDecorator as _ContextDecorator +except ImportError: + import functools + + class _ContextDecorator: # type: ignore[no-redef] + def __enter__(self): + raise NotImplementedError + + def __exit__(self, exc_type, exc_val, exc_tb): + raise NotImplementedError + + def __call__(self, func): + @functools.wraps(func) + def wrapped(*args, **kwargs): + with self: + return func(*args, **kwargs) + + return wrapped + + +# global python state - whether profiler is currently enabled +# useful for fast python checks to reduce latency +_is_profiler_enabled: bool = False + + +def _set_is_profiler_enabled(enable: bool): + global _is_profiler_enabled + _is_profiler_enabled = enable + + +def _run_on_profiler_start(): + _set_is_profiler_enabled(True) + + +def _run_on_profiler_stop(): + _set_is_profiler_enabled(False) + + +@dataclass +class _ProfilerStats: + "Profiler timing and stats used by developers to catch issues/regressions" + profiling_window_duration_sec: float = 0 + number_of_events: int = 0 + profiler_prepare_call_duration_us: int = 0 + profiler_enable_call_duration_us: int = 0 + profiler_disable_call_duration_us: int = 0 + parse_kineto_call_duration_us: int = 0 + function_events_build_tree_call_duration_us: int = 0 + + +class profile: + """Context manager that manages autograd profiler state and holds a summary of results. + + Under the hood it just records events of functions being executed in C++ and + exposes those events to Python. You can wrap any code into it and it will + only report runtime of PyTorch functions. + Note: profiler is thread local and is automatically propagated into the async tasks + + Args: + enabled (bool, optional): Setting this to False makes this context manager a no-op. + + use_cuda (bool, optional): Enables timing of CUDA events as well + using the cudaEvent API. (will be deprecated) + + use_device (str, optional): Enables timing of device events. + Adds approximately 4us of overhead to each tensor operation when use cuda. + The valid devices options are 'cuda', 'xpu' and 'privateuseone'. + + record_shapes (bool, optional): If shapes recording is set, information + about input dimensions will be collected. This allows one to see which + dimensions have been used under the hood and further group by them + using prof.key_averages(group_by_input_shape=True). Please note that + shape recording might skew your profiling data. It is recommended to + use separate runs with and without shape recording to validate the timing. + Most likely the skew will be negligible for bottom most events (in a case + of nested function calls). But for higher level functions the total + self cpu time might be artificially increased because of the shape + collection. + + with_flops (bool, optional): If with_flops is set, the profiler will estimate + the FLOPs (floating point operations) value using the operator's input shape. + This allows one to estimate the hardware performance. Currently, + this option only works for the matrix multiplication and 2D convolution operators. + + profile_memory (bool, optional): track tensor memory allocation/deallocation. + + with_stack (bool, optional): record source information (file and line number) for the ops. + + with_modules (bool): record module hierarchy (including function names) + corresponding to the callstack of the op. e.g. If module A's forward call's + module B's forward which contains an aten::add op, + then aten::add's module hierarchy is A.B + Note that this support exist, at the moment, only for TorchScript models + and not eager mode models. + + use_kineto (bool, optional): experimental, enable profiling with Kineto profiler. + + use_cpu (bool, optional): profile CPU events; setting to ``False`` requires + ``use_kineto=True`` and can be used to lower the overhead for GPU-only profiling. + + experimental_config (_ExperimentalConfig) : A set of experimental options + used by profiler libraries like Kineto. Note, backward compatibility is not guaranteed. + + + .. warning: + Enabling memory profiling or source attribution incurs additional profiler + overhead + + .. warning: + This context managers should not be called recursively, i.e. no nested + instances are allowed + + .. warning: + Due to some CUDA multiprocessing limitations (multiprocessing-cuda-note_), + one cannot use the profiler with ``use_device = 'cuda'`` to benchmark + DataLoaders with ``num_workers > 0``. If you wish to benchmark data loading, + please use ``use_device = None`` or ``num_workers = 0``. + + Example: + >>> # xdoctest: +SKIP + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD_PROFILER) + >>> x = torch.randn((1, 1), requires_grad=True) + >>> with torch.autograd.profiler.profile() as prof: + >>> for _ in range(100): # any normal python code, really! + >>> y = x ** 2 + >>> y.backward() + >>> # NOTE: some columns were removed for brevity + >>> print(prof.key_averages().table(sort_by="self_cpu_time_total")) + ----------------------------------- --------------- --------------- --------------- + Name Self CPU total CPU time avg Number of Calls + ----------------------------------- --------------- --------------- --------------- + mul 32.048ms 32.048ms 200 + pow 27.041ms 27.041ms 200 + PowBackward0 9.727ms 55.483ms 100 + torch::autograd::AccumulateGrad 9.148ms 9.148ms 100 + torch::autograd::GraphRoot 691.816us 691.816us 100 + ----------------------------------- --------------- --------------- --------------- + + """ + + def __init__( + self, + enabled=True, + *, + use_cuda=False, # Deprecated + use_device=None, + record_shapes=False, + with_flops=False, + profile_memory=False, + with_stack=False, + with_modules=False, + use_kineto=False, + use_cpu=True, + use_mtia=False, + experimental_config=None, + ): + self.enabled: bool = enabled + if not self.enabled: + return + self.use_cuda = use_cuda + if self.use_cuda: + warn( + "The attribute `use_cuda` will be deprecated soon, " + "please use ``use_device = 'cuda'`` instead.", + FutureWarning, + stacklevel=2, + ) + self.use_device: Optional[str] = "cuda" + else: + self.use_device = use_device + self.function_events: Optional[EventList] = None + self.entered = False + self.record_shapes = record_shapes + self.with_flops = with_flops + self.record_shapes |= self.with_flops + self.profile_memory = profile_memory + self.with_stack = with_stack + self.with_modules = with_modules + self.use_cpu = use_cpu + self.use_mtia = use_mtia + if experimental_config is None: + experimental_config = _ExperimentalConfig() + self.experimental_config = experimental_config + self.kineto_results: Optional[_ProfilerResult] = None + self.profiling_start_time_ns = 0 + self.profiling_end_time_ns = 0 + self._stats = _ProfilerStats() + + if not self.use_cpu: + assert ( + use_kineto + ), "Device-only events supported only with Kineto (use_kineto=True)" + + if self.use_device is not None: + VALID_DEVICE_OPTIONS = ["cuda", "xpu"] + if _get_privateuse1_backend_name() != "privateuseone": + VALID_DEVICE_OPTIONS.append(_get_privateuse1_backend_name()) + if self.use_device not in VALID_DEVICE_OPTIONS: + warn(f"The {self.use_device} is not a valid device option.") + self.use_device = None + + if self.use_device == "cuda" and not torch.cuda.is_available(): + warn("CUDA is not available, disabling CUDA profiling") + self.use_cuda = False + self.use_device = None + + if self.use_device == "xpu" and not torch.xpu.is_available(): + warn("XPU is not available, disabling XPU profiling") + self.use_device = None + + self.kineto_activities = set() + if self.use_cpu: + self.kineto_activities.add(ProfilerActivity.CPU) + if self.use_mtia: + self.kineto_activities.add(ProfilerActivity.MTIA) + + self.profiler_kind = ProfilerState.KINETO + if self.use_device == "cuda": + if not use_kineto or ProfilerActivity.CUDA not in _supported_activities(): + assert self.use_cpu, "Legacy CUDA profiling requires use_cpu=True" + self.profiler_kind = ProfilerState.KINETO_GPU_FALLBACK + else: + self.kineto_activities.add(ProfilerActivity.CUDA) + elif self.use_device == "xpu": + assert ( + use_kineto and ProfilerActivity.XPU in _supported_activities() + ), "Legacy XPU profiling is not supported. Requires use_kineto=True on XPU devices." + self.kineto_activities.add(ProfilerActivity.XPU) + elif self.use_device is not None and self.use_device != "privateuseone": + if ( + not use_kineto + or ProfilerActivity.PrivateUse1 not in _supported_activities() + ): + assert ( + self.use_cpu + ), "Legacy custombackend profiling requires use_cpu=True" + self.profiler_kind = ProfilerState.KINETO_PRIVATEUSE1_FALLBACK + else: + self.kineto_activities.add(ProfilerActivity.PrivateUse1) + + assert ( + len(self.kineto_activities) > 0 + ), "No activities specified for the profiler" + + def config(self): + return ProfilerConfig( + self.profiler_kind, + self.record_shapes, + self.profile_memory, + self.with_stack, + self.with_flops, + self.with_modules, + self.experimental_config, + ) + + def __enter__(self): + if not self.enabled: + return + if self.entered: + raise RuntimeError("Profiler context manager is not reentrant") + self._prepare_trace() + self._start_trace() + return self + + def _prepare_trace(self): + self.entered = True + t0 = perf_counter_ns() + _prepare_profiler(self.config(), self.kineto_activities) + t1 = perf_counter_ns() + self._stats.profiler_prepare_call_duration_us = int((t1 - t0) / 1000) + + def _start_trace(self): + self.entered = True + _run_on_profiler_start() + t0 = perf_counter_ns() + _enable_profiler(self.config(), self.kineto_activities) + t1 = perf_counter_ns() + self._stats.profiler_enable_call_duration_us = int((t1 - t0) / 1000) + self.profiling_start_time_ns = t1 + + def __exit__(self, exc_type, exc_val, exc_tb): + if not self.enabled: + return + if self.use_device and hasattr(torch, self.use_device): + device_module = getattr(torch, self.use_device) + if hasattr(device_module, "synchronize"): + device_module.synchronize() + + old_function_events: Optional[EventList] = None + if self.function_events: + old_function_events = self.function_events + + t0 = perf_counter_ns() + + # TODO we are overwriting previous kineto results here + # Should combine previous results with the new results otherwise only + # the last "repeat" will be recorded in the trace + self.kineto_results = _disable_profiler() + t1 = perf_counter_ns() + self._stats.profiler_disable_call_duration_us = int((t1 - t0) / 1000) + self.profiling_end_time_ns = t0 + + _run_on_profiler_stop() + t0 = perf_counter_ns() + parsed_results = self._parse_kineto_results(self.kineto_results) + t1 = perf_counter_ns() + self._stats.parse_kineto_call_duration_us = int((t1 - t0) / 1000) + + self.function_events = EventList( + parsed_results, + use_device=self.use_device, + profile_memory=self.profile_memory, + with_flops=self.with_flops, + ) + t0 = perf_counter_ns() + self.function_events._build_tree() + t1 = perf_counter_ns() + self._stats.function_events_build_tree_call_duration_us = int((t1 - t0) / 1000) + + self._stats.number_of_events = len(self.function_events) + self._stats.profiling_window_duration_sec = ( + (self.profiling_end_time_ns - self.profiling_start_time_ns) * 1.0 / 1e9 + ) + if old_function_events: + for evt in old_function_events: + self.function_events.append(evt) + return False + + def __repr__(self): + if self.function_events is None: + return "" + return repr(self.function_events) + + def __str__(self): + if self.function_events is None: + return "" + return str(self.function_events) + + def _check_finish(self): + if self.function_events is None: + raise RuntimeError("Profiler didn't finish running") + + def table( + self, + sort_by=None, + row_limit=100, + max_src_column_width=75, + max_name_column_width=55, + max_shapes_column_width=80, + header=None, + top_level_events_only=False, + ): + self._check_finish() + assert self.function_events is not None + return self.function_events.table( + sort_by=sort_by, + row_limit=row_limit, + max_src_column_width=max_src_column_width, + max_name_column_width=max_name_column_width, + max_shapes_column_width=max_shapes_column_width, + header=header, + top_level_events_only=top_level_events_only, + ) + + table.__doc__ = EventList.table.__doc__ + + def export_chrome_trace(self, path): + """ + Exports the collected trace in Chrome JSON format. If kineto is enabled, only + last cycle in schedule is exported. + """ + self._check_finish() + if kineto_available(): + self.kineto_results.save(path) # type: ignore[union-attr] + else: + return self.function_events.export_chrome_trace(path) # type: ignore[union-attr] + + export_chrome_trace.__doc__ = EventList.export_chrome_trace.__doc__ + + def export_stacks(self, path: str, metric: str = "self_cpu_time_total"): + self._check_finish() + assert self.function_events is not None, "Expected profiling results" + assert self.with_stack, "export_stacks() requires with_stack=True" + return self.function_events.export_stacks(path, metric) + + def key_averages(self, group_by_input_shape=False, group_by_stack_n=0): + self._check_finish() + assert self.function_events is not None, "Expected profiling results" + return self.function_events.key_averages(group_by_input_shape, group_by_stack_n) + + key_averages.__doc__ = EventList.key_averages.__doc__ + + def total_average(self): + self._check_finish() + assert self.function_events is not None, "Expected profiling results" + return self.function_events.total_average() + + total_average.__doc__ = EventList.total_average.__doc__ + + @property + def self_cpu_time_total(self): + """Returns total time spent on CPU. + + The total time is a sum of all self times across all the events. + """ + self._check_finish() + assert self.function_events is not None + return self.function_events.self_cpu_time_total + + def _parse_kineto_results(self, result: _ProfilerResult): + # result.events() has most of the events - PyTorch op-level and device-level events + + trace_start_ns = result.trace_start_ns() + mem_records = [ + [evt, False] for evt in result.events() if evt.name() == MEMORY_EVENT_NAME + ] + oom_records = [ + evt for evt in result.events() if evt.name() == OUT_OF_MEMORY_EVENT_NAME + ] + mem_records_acc = MemRecordsAcc(mem_records) + + def _cpu_memory_usage(mem_record): + return ( + mem_record.nbytes() + if mem_record.device_type() + in [DeviceType.CPU, DeviceType.MKLDNN, DeviceType.IDEEP] + else 0 + ) + + def _device_memory_usage(mem_record): + return ( + mem_record.nbytes() + if mem_record.device_type() + in [DeviceType.CUDA, DeviceType.PrivateUse1, DeviceType.HIP] + else 0 + ) + + # Create and return FunctionEvent list, which contains all function events + # Here 2 function events are created: + # all_function_events contains all events associated with each kineto event from result + all_function_events = [] + # frontend_function_events contains the events in aten or torch frontend level, + # whose correlation id is 0 + frontend_function_events = [] + device_corr_map: Dict[int, List[FunctionEvent]] = {} + max_evt_id = 0 + for kineto_event in result.events(): + if _filter_name(kineto_event.name()): + continue + rel_start_ns = kineto_event.start_ns() - trace_start_ns + rel_end_ns = rel_start_ns + kineto_event.duration_ns() + abs_end_ns = kineto_event.start_ns() + kineto_event.duration_ns() + + cpu_memory_usage = 0 + device_memory_usage = 0 + if kineto_event.device_type() == DeviceType.CPU: + # find the corresponding memory allocation events + for mem_record in mem_records_acc.in_interval( + kineto_event.start_ns() / 1000, abs_end_ns / 1000 + ): + cpu_memory_usage += _cpu_memory_usage(mem_record[0]) + device_memory_usage += _device_memory_usage(mem_record[0]) + mem_record[1] = True + + is_async = kineto_event.is_async() or ( + kineto_event.start_thread_id() != kineto_event.end_thread_id() + ) + + fe = FunctionEvent( + id=kineto_event.correlation_id(), + name=_rewrite_name(name=kineto_event.name(), with_wildcard=True), + trace_name=_rewrite_name(name=kineto_event.name(), with_wildcard=False), + thread=kineto_event.start_thread_id(), + start_us=rel_start_ns / 1000, + end_us=rel_end_ns / 1000, + fwd_thread=kineto_event.fwd_thread_id(), + input_shapes=kineto_event.shapes(), + concrete_inputs=kineto_event.concrete_inputs(), + stack=[ + entry + for entry in kineto_event.stack() + if _filter_stack_entry(entry) + ], + scope=kineto_event.scope(), + use_device=self.use_device, + cpu_memory_usage=cpu_memory_usage, + device_memory_usage=device_memory_usage, + is_async=is_async, + sequence_nr=kineto_event.sequence_nr(), + device_type=kineto_event.device_type(), + device_index=kineto_event.device_index(), + device_resource_id=kineto_event.device_resource_id(), + flops=kineto_event.flops(), + ) + max_evt_id = max(max_evt_id, fe.id) + if fe.device_type == DeviceType.CPU and not fe.is_async: + if self.use_device == "privateuseone": + privateuse1_time = kineto_event.privateuse1_elapsed_us() + if privateuse1_time > 0: + fe.append_kernel(fe.name, fe.device_index, privateuse1_time) + fe.is_legacy = True + elif self.use_device == "cuda": + # Check if we have CUDA time as a fallback + cuda_time = kineto_event.cuda_elapsed_us() + if cuda_time > 0: + fe.append_kernel(fe.name, fe.device_index, cuda_time) + fe.is_legacy = True + all_function_events.append(fe) + corr_id = kineto_event.linked_correlation_id() + if corr_id > 0: + if corr_id not in device_corr_map: + device_corr_map[corr_id] = [] + device_corr_map[corr_id].append(fe) + elif corr_id == 0: + frontend_function_events.append(fe) + else: + raise RuntimeError( + f"Got negative correlation id {corr_id} in profiler post processing" + ) + + # associate device kernels and device runtime (CPU) with CPU events + for fe in frontend_function_events: + if ( + fe.device_type == DeviceType.CPU + and not fe.is_async + and fe.id in device_corr_map + ): + for f_evt in device_corr_map[fe.id]: + if ( + f_evt.device_type == DeviceType.CUDA + or f_evt.device_type == DeviceType.PrivateUse1 + ): + fe.append_kernel( + f_evt.name, + f_evt.device_index, + f_evt.time_range.end - f_evt.time_range.start, + ) + elif f_evt.device_type == DeviceType.CPU: + # make sure that 'thread' of a CPU Kineto (e.g. Device Runtime) event is associated + # with the 'thread' of the corresponding linked PyTorch event to properly track + # parents and children + f_evt.thread = fe.thread + + def createFunctionEventForMemoryEvents(evt): + rel_start_ns = evt.start_ns() - trace_start_ns + fe = FunctionEvent( + id=max_evt_id, + name=evt.name(), + trace_name=None, # not outputting in the trace + thread=evt.start_thread_id(), + start_us=rel_start_ns / 1000, + end_us=rel_start_ns / 1000, # no duration + fwd_thread=evt.start_thread_id(), + input_shapes=[], + stack=[], + scope=0, # RecordScope::FUNCTION + use_device=self.use_device, + cpu_memory_usage=_cpu_memory_usage(evt), + device_memory_usage=_device_memory_usage(evt), + is_async=False, + sequence_nr=-1, + device_type=DeviceType.CPU, + device_index=0, + ) + return fe + + # output top-level memory events + for mem_record in mem_records: + if not mem_record[1]: + max_evt_id += 1 + fe = createFunctionEventForMemoryEvents(mem_record[0]) + all_function_events.append(fe) + + for oom_record in oom_records: + max_evt_id += 1 + fe = createFunctionEventForMemoryEvents(oom_record) + all_function_events.append(fe) + + all_function_events.sort( + key=lambda evt: [evt.time_range.start, -evt.time_range.end] + ) + return all_function_events + + +class record_function(_ContextDecorator): + """Context manager/function decorator that adds a label to a code block/function when running autograd profiler. + + It is useful when tracing the code profile. + + Args: + name (str): Label assigned to the block of code. + node_id (int): ID of node, for distributed profiling. Unset in + non-distributed cases. + + Example: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD_PROFILER) + >>> x = torch.randn((1, 1), requires_grad=True) + >>> with torch.autograd.profiler.profile() as prof: + ... y = x ** 2 + ... with torch.autograd.profiler.record_function("label-z"): # label the block + ... z = y ** 3 + ... y.backward() + ... + >>> # xdoctest: +IGNORE_WANT + >>> # NOTE: some columns were removed for brevity + >>> print(prof.key_averages().table(sort_by="self_cpu_time_total")) + ----------------------------------- --------------- --------------- --------------- + Name Self CPU total % CPU time avg Number of Calls + ----------------------------------- --------------- --------------- --------------- + pow 60.77% 47.470us 3 + mul 21.73% 25.465us 2 + PowBackward0 12.03% 121.891us 1 + torch::autograd::AccumulateGrad 2.70% 6.324us 1 + label-z 2.13% 12.421us 1 + torch::autograd::GraphRoot 0.64% 1.503us 1 + ----------------------------------- --------------- --------------- --------------- + Self CPU time total: 234.344us + CUDA time total: 0.000us + + """ + + def __init__(self, name: str, args: Optional[str] = None): + self.name: str = name + self.args: Optional[str] = args + # Whether or not we should run record function's end callbacks when exiting. + self.run_callbacks_on_exit: bool = True + # TODO: TorchScript ignores standard type annotation here + # self.record: Optional["torch.classes.profiler._RecordFunction"] = None + self.record = torch.jit.annotate( + Optional["torch.classes.profiler._RecordFunction"], None + ) + + def __enter__(self): + self.record = torch.ops.profiler._record_function_enter_new( + self.name, self.args + ) + return self + + def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any): + if not self.run_callbacks_on_exit: + return + + # Local variable is needed by TorchScript to refine Optional[T] to T + record = self.record + assert record is not None + + # TODO: Too slow with __torch_function__ handling enabled + # See https://github.com/pytorch/pytorch/issues/76410 + if not torch.jit.is_scripting(): + with torch._C.DisableTorchFunctionSubclass(): + torch.ops.profiler._record_function_exit._RecordFunction(record) + else: + torch.ops.profiler._record_function_exit(record) + + def _call_end_callbacks_on_future(self, fut: Future[Any]) -> Future[Any]: + """Use for profiling async calls that return a future. + + Calling this function will extend recording beyond this scope, until the future is + satisfied. It is useful for profiling the end to end time of asynchronous calls. + This function should only be called once to attach the callback onto the future, and + will throw if called multiple times. + + Args: + fut: (torch._C.Future): future for which to schedule + callback for. + + Returns: + A future that completes with the value of the passed in future when + the profiling callbacks have ran. + + """ + # Throw if we have already attached a callback onto the future. + if not self.run_callbacks_on_exit: + raise RuntimeError("_call_end_callbacks_on_future can only be called once.") + + # We are scheduling to run this RecordFunction's end callbacks when the + # passed in future completes, so don't run end callbacks on exit. + self.run_callbacks_on_exit = False + + # Local variable is needed by TorchScript to refine Optional[T] to T + record = self.record + assert record is not None + + # TODO: Too slow with __torch_function__ handling enabled + # See https://github.com/pytorch/pytorch/issues/76410 + if not torch.jit.is_scripting(): + with torch._C.DisableTorchFunctionSubclass(): + profiled_future = ( + torch.ops.profiler._call_end_callbacks_on_jit_fut._RecordFunction( + record, fut + ) + ) + else: + profiled_future = torch.ops.profiler._call_end_callbacks_on_jit_fut( + record, fut + ) + return profiled_future + + +class emit_itt: + """Context manager that makes every autograd operation emit an ITT range. + + It is useful when running the program under Intel(R) VTune Profiler:: + + vtune <--vtune-flags> + + The Instrumentation and Tracing Technology (ITT) API enables your application to generate and + control the collection of trace data during its execution across different Intel tools. + This context manager is to annotate Intel(R) VTune Profiling trace. With help of this context manager, + you will be able to see labled ranges in Intel(R) VTune Profiler GUI. + + .. warning: + This context manager should not be called recursively, i.e. at most one + instance should be enabled at any given time. + + Args: + enabled (bool, optional): Setting ``enabled=False`` makes this context manager a no-op. + Default: ``True``. + record_shapes (bool, optional): If ``record_shapes=True``, the itt range wrapping + each autograd op will append information about the sizes of Tensor arguments received + by that op, in the following format: + ``[[arg0.size(0), arg0.size(1), ...], [arg1.size(0), arg1.size(1), ...], ...]`` + Non-tensor arguments will be represented by ``[]``. + Arguments will be listed in the order they are received by the backend op. + Please note that this order may not match the order in which those arguments were passed + on the Python side. Also note that shape recording may increase the overhead of itt range creation. + Default: ``False`` + + Example: + >>> # xdoctest: +SKIP("Undefined variables") + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD_PROFILER) + >>> with torch.autograd.profiler.emit_itt(): + ... model(x) + + """ + + def __init__(self, enabled=True, record_shapes=False): + self.enabled = enabled + self.entered = False + self.record_shapes = record_shapes + + def __enter__(self): + if not self.enabled: + return + if self.entered: + raise RuntimeError("ITT annotation context manager is not reentrant") + self.entered = True + _run_on_profiler_start() + _enable_profiler( + ProfilerConfig( + ProfilerState.ITT, + self.record_shapes, + False, + False, + False, + False, + _ExperimentalConfig(), + ), + set(), + ) + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if not self.enabled: + return + _disable_profiler() + _run_on_profiler_stop() + return False + + +class emit_nvtx: + """Context manager that makes every autograd operation emit an NVTX range. + + It is useful when running the program under nvprof:: + + nvprof --profile-from-start off -o trace_name.prof -- + + Unfortunately, there's no way to force nvprof to flush the data it collected + to disk, so for CUDA profiling one has to use this context manager to annotate + nvprof traces and wait for the process to exit before inspecting them. + Then, either NVIDIA Visual Profiler (nvvp) can be used to visualize the timeline, or + :func:`torch.autograd.profiler.load_nvprof` can load the results for inspection + e.g. in Python REPL. + + .. warning: + This context manager should not be called recursively, i.e. at most one + instance should be enabled at any given time. + + Args: + enabled (bool, optional): Setting ``enabled=False`` makes this context manager a no-op. + Default: ``True``. + record_shapes (bool, optional): If ``record_shapes=True``, the nvtx range wrapping + each autograd op will append information about the sizes of Tensor arguments received + by that op, in the following format: + ``[[arg0.size(0), arg0.size(1), ...], [arg1.size(0), arg1.size(1), ...], ...]`` + Non-tensor arguments will be represented by ``[]``. + Arguments will be listed in the order they are received by the backend op. + Please note that this order may not match the order in which those arguments were passed + on the Python side. Also note that shape recording may increase the overhead of nvtx range creation. + Default: ``False`` + + Example: + >>> # xdoctest: +SKIP("undefined variables") + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD_PROFILER) + >>> with torch.cuda.profiler.profile(): + ... model(x) # Warmup CUDA memory allocator and profiler + ... with torch.autograd.profiler.emit_nvtx(): + ... model(x) + + **Forward-backward correlation** + + When viewing a profile created using :class:`emit_nvtx` in the Nvidia Visual Profiler, + correlating each backward-pass op with the corresponding forward-pass op can be difficult. + To ease this task, :class:`emit_nvtx` appends sequence number information to the ranges it + generates. + + During the forward pass, each function range is decorated with ``seq=``. ``seq`` is a running + counter, incremented each time a new backward Function object is created and stashed for backward. + Thus, the ``seq=`` annotation associated with each forward function range tells you that + if a backward Function object is created by this forward function, + the backward object will receive sequence number N. + During the backward pass, the top-level range wrapping each C++ backward Function's + ``apply()`` call is decorated with ``stashed seq=``. ``M`` is the sequence number that + the backward object was created with. By comparing ``stashed seq`` numbers in backward with ``seq`` + numbers in forward, you can track down which forward op created each backward Function. + + Any functions executed during the backward pass are also decorated with ``seq=``. During + default backward (with ``create_graph=False``) this information is irrelevant, and in fact, + ``N`` may simply be 0 for all such functions. Only the top-level ranges associated with + backward Function objects' ``apply()`` methods are useful, as a way to correlate these Function + objects with the earlier forward pass. + + **Double-backward** + + If, on the other hand, a backward pass with ``create_graph=True`` is underway (in other words, + if you are setting up for a double-backward), each function's execution during backward + is given a nonzero, useful ``seq=``. Those functions may themselves create Function objects + to be executed later during double-backward, just as the original functions in the forward pass did. + The relationship between backward and double-backward is conceptually the same as the relationship + between forward and backward: The functions still emit current-sequence-number-tagged ranges, + the Function objects they create still stash those sequence numbers, and during the eventual + double-backward, the Function objects' ``apply()`` ranges are still tagged with ``stashed seq`` + numbers, which can be compared to `seq` numbers from the backward pass. + + .. warning: + The sequence number is thread-local, and some forward functions don't create an associated + backward Function object (instead delegating that to sub-functions further down the call chain). + For these reasons, the correspondence of stashed sequence numbers in + backward Function ``apply()`` ranges with `seq` numbers in forward-pass ranges is + not guaranteed to be 1 to 1. The sequence numbers alone may not be enough to fully + disambiguate which forward function created which + backward Function object. You may need to make a judgment based on analytic knowledge of what + the expected correspondence should be. + """ + + def __init__(self, enabled=True, record_shapes=False): + self.enabled = enabled + self.entered = False + self.record_shapes = record_shapes + + def __enter__(self): + if not self.enabled: + return + if self.entered: + raise RuntimeError("NVTX annotation context manager is not reentrant") + self.entered = True + torch.cuda.synchronize() + _run_on_profiler_start() + _enable_profiler( + ProfilerConfig( + ProfilerState.NVTX, + self.record_shapes, + False, + False, + False, + False, + _ExperimentalConfig(), + ), + set(), + ) + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if not self.enabled: + return + torch.cuda.synchronize() + _disable_profiler() + _run_on_profiler_stop() + return False + + +def load_nvprof(path): + """Open an nvprof trace file and parses autograd annotations. + + Args: + path (str): path to nvprof trace + """ + return EventList(parse_nvprof_trace(path)) + + +class EnforceUnique: + """Raises an error if a key is seen more than once.""" + + def __init__(self): + self.seen = set() + + def see(self, *key): + r""" + Observe a key and raise an error if it is seen multiple times. + """ + if key in self.seen: + raise RuntimeError("duplicate key: " + str(key)) + self.seen.add(key) + + +def parse_nvprof_trace(path): + import sqlite3 + + conn = sqlite3.connect(path) + conn.row_factory = sqlite3.Row + + # Parse strings table + strings = {} + for r in conn.execute("SELECT _id_ as id, value FROM StringTable"): + strings[r["id"]] = torch._C._demangle(r["value"]) + + # First, find all functions and create FunctionEvents for them + marker_query = """ + SELECT + start.id AS marker_id, start.name, start.timestamp AS start_time, end.timestamp AS end_time + FROM + CUPTI_ACTIVITY_KIND_MARKER AS start INNER JOIN CUPTI_ACTIVITY_KIND_MARKER AS end + ON start.id = end.id + WHERE + start.name != 0 AND end.name = 0 + """ + functions = [] + functions_map = {} + unique = EnforceUnique() + for row in conn.execute(marker_query): + unique.see(row["marker_id"]) + evt = FunctionEvent( + id=row["marker_id"], + node_id=0, # missing a node_id when calling FunctionEvent. This is just to ensure + # that pytorch doesn't crash when creating a FunctionEvent() object + name=strings[row["name"]], + start_us=row["start_time"], + end_us=row["end_time"], + thread=0, + ) # TODO: find in sqlite database + functions.append(evt) + functions_map[evt.id] = evt + + # Now, correlate all kernels with FunctionEvents + kernel_query = """ + SELECT + start.id AS marker_id, start.name, start.timestamp, end.timestamp, + runtime._id_ AS runtime_id, runtime.cbid, runtime.start AS runtime_start, runtime.end AS runtime_end, + kernel.start AS kernel_start, kernel.end AS kernel_end, kernel.name AS kernel_name + FROM + CUPTI_ACTIVITY_KIND_MARKER AS start + INNER JOIN CUPTI_ACTIVITY_KIND_MARKER AS end + ON start.id = end.id + INNER JOIN CUPTI_ACTIVITY_KIND_RUNTIME as runtime + ON (start.timestamp < runtime.start AND runtime.end < end.timestamp) + INNER JOIN CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL AS kernel + ON kernel.correlationId = runtime.correlationId + """ + unique = EnforceUnique() + for row in conn.execute(kernel_query): + unique.see(row["marker_id"], row["runtime_id"]) + # 211 is cudaKernelLaunch for cuda >= 9.2 + assert row["cbid"] == 211 + evt = functions_map[row["marker_id"]] + evt.append_kernel( + row["kernel_name"], 0, row["kernel_end"] - row["kernel_start"] + ) + + functions.sort(key=lambda evt: evt.time_range.start) + return functions + + +class KinetoStepTracker: + """Provides an abstraction for incrementing the step count globally. + + Previously, we only had one place to mark that a step() has occurred + in the program via pytorch profiler step(). We will now add step hooks + in the Optimizer class https://github.com/pytorch/pytorch/issues/88446 + + - This could mean programs that already call profiler.step() every + iteration can end up double incrementing step count. + - If a model uses multiple optimizers we can also have double or more + counting of the step. + + We fix this by adding a layer of abstraction before calling step() + to the kineto library. The idea is to maintain steps per requester in a dict: + + .. code-block:: + + { + "ProfilerStep": 100, # triggered by profiler step() call + "Optimizer1Step": 100, # Optimizer 1 or 2 are just examples, could be SGD, Adam etc + "Optimizer2Step": 100, + } + + To figure out the global step count just take the max of dict values (100). + + If one of the count increments the max will go up. + + .. code-block:: + + { + "ProfilerStep": 100, + "Optimizer1Step": 101, # Optimizer1 got incremented first say + "Optimizer2Step": 100, + } + + Then global step count is 101 + We only call the kineto step() function when global count increments. + + NOTE: Please do not use the KinetoStepTracker in modules beside the Optimizer + for now. The result could be incorrect increments of the step count. + """ + + _current_step = 0 + _step_dict: Dict[str, int] = defaultdict(int) + + @classmethod + def init_step_count(cls, requester: str): + r""" + Initialize for a given requester. + """ + cls._step_dict[requester] = cls._current_step + + @classmethod + def erase_step_count(cls, requester: str) -> bool: + r""" + Remove a given requester. + """ + return cls._step_dict.pop(requester, None) is not None + + @classmethod + def increment_step(cls, requester: str) -> int: + """Increments the step count for the requester. + + Additionally if the max over all step counts has incremented then + trigger the _kineto_step() returns global step count + """ + if requester not in cls._step_dict: + cls.init_step_count(requester) + cls._step_dict[requester] += 1 + + new_step = max(cls._step_dict.values()) + if new_step > cls._current_step: + delta = new_step - cls._current_step + if delta > 1: + warn( + "Profiler step count has increased more than 1 - " + f"current_step = {cls._current_step} step dict = {cls._step_dict}" + ) + for _ in range(0, delta): + _kineto_step() + cls._current_step = new_step + return cls._current_step + + @classmethod + def current_step(cls) -> int: + r""" + Get the latest step for any requester + """ + return cls._current_step diff --git a/parrot/lib/python3.10/site-packages/torch/autograd/profiler_legacy.py b/parrot/lib/python3.10/site-packages/torch/autograd/profiler_legacy.py new file mode 100644 index 0000000000000000000000000000000000000000..40baafd441aedab71920623870f20cab89180e6c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/autograd/profiler_legacy.py @@ -0,0 +1,314 @@ +# mypy: allow-untyped-defs +import itertools +import warnings +from typing_extensions import deprecated + +import torch +import torch.cuda + +from torch.autograd import ( + _disable_profiler_legacy, + _enable_profiler_legacy, + DeviceType, + ProfilerConfig, + ProfilerState, +) +from torch.autograd.profiler_util import ( + _filter_name, + _filter_stack_entry, + _rewrite_name, + EventList, + FunctionEvent, + MEMORY_EVENT_NAME, +) + +__all__ = ["profile"] + + +@deprecated( + "`torch.autograd.profiler_legacy.profile` is deprecated and will be removed in a future release. " + "Please use `torch.profiler` instead.", + category=None, # TODO: change to `FutureWarning` +) +class profile: + """DEPRECATED: use torch.profiler instead.""" + + def __init__( + self, + enabled=True, + *, + use_cuda=False, + record_shapes=False, + with_flops=False, + profile_memory=False, + with_stack=False, + with_modules=False, + ): + self.enabled: bool = enabled + if not self.enabled: + return + self.use_cuda = use_cuda + self.function_events = None + self.entered = False + self.record_shapes = record_shapes + self.with_flops = with_flops + self.record_shapes |= self.with_flops + self.profile_memory = profile_memory + self.with_stack = with_stack + self.with_modules = with_modules + + if self.use_cuda and not torch.cuda.is_available(): + warnings.warn( + "CUDA is not available, disabling CUDA profiling", + stacklevel=2, + ) + self.use_cuda = False + + if self.use_cuda: + self.profiler_kind = ProfilerState.CUDA + else: + self.profiler_kind = ProfilerState.CPU + + def config(self): + return ProfilerConfig( + self.profiler_kind, + self.record_shapes, + self.profile_memory, + self.with_stack, + self.with_flops, + self.with_modules, + # avoid exposing _ExperimentalConfig this in legacy public API + torch._C._profiler._ExperimentalConfig(), + ) + + def __enter__(self): + if not self.enabled: + return + if self.entered: + raise RuntimeError("Profiler context manager is not reentrant") + self.entered = True + self._start_trace() + return self + + def _start_trace(self): + _enable_profiler_legacy(self.config()) + + def __exit__(self, exc_type, exc_val, exc_tb): + if not self.enabled: + return + if self.use_cuda: + torch.cuda.synchronize() + + records = _disable_profiler_legacy() + parsed_results = _parse_legacy_records(records) + self.function_events = EventList( + parsed_results, + use_device="cuda" if self.use_cuda else None, + profile_memory=self.profile_memory, + with_flops=self.with_flops, + ) + self.function_events._build_tree() + return False + + def __repr__(self): + if self.function_events is None: + return "" + return repr(self.function_events) + + def __str__(self): + if self.function_events is None: + return "" + return str(self.function_events) + + def _check_finish(self): + if self.function_events is None: + raise RuntimeError("Profiler didn't finish running") + + def table( + self, + sort_by=None, + row_limit=100, + max_src_column_width=75, + max_name_column_width=55, + max_shapes_column_width=80, + header=None, + top_level_events_only=False, + ): + self._check_finish() + assert self.function_events is not None + return self.function_events.table( + sort_by=sort_by, + row_limit=row_limit, + max_src_column_width=max_src_column_width, + max_name_column_width=max_name_column_width, + max_shapes_column_width=max_shapes_column_width, + header=header, + top_level_events_only=top_level_events_only, + ) + + table.__doc__ = EventList.table.__doc__ + + def export_chrome_trace(self, path): + self._check_finish() + assert self.function_events is not None + return self.function_events.export_chrome_trace(path) + + export_chrome_trace.__doc__ = EventList.export_chrome_trace.__doc__ + + def export_stacks(self, path: str, metric: str = "self_cpu_time_total"): + self._check_finish() + assert self.function_events is not None, "Expected profiling results" + assert self.with_stack, "export_stacks() requires with_stack=True" + return self.function_events.export_stacks(path, metric) + + def key_averages(self, group_by_input_shape=False, group_by_stack_n=0): + self._check_finish() + assert self.function_events is not None, "Expected profiling results" + return self.function_events.key_averages(group_by_input_shape, group_by_stack_n) + + key_averages.__doc__ = EventList.key_averages.__doc__ + + def total_average(self): + self._check_finish() + assert self.function_events is not None, "Expected profiling results" + return self.function_events.total_average() + + total_average.__doc__ = EventList.total_average.__doc__ + + @property + def self_cpu_time_total(self): + """Return CPU time as the sum of self times across all events.""" + self._check_finish() + assert self.function_events is not None + return self.function_events.self_cpu_time_total + + +def _parse_legacy_records(thread_records): + def _get_record_key(record): + """Return a tuple for correlating start and end records in `_parse_legacy_records`.""" + return (record.handle(), record.node_id()) + + next_id = 0 + start_record = None + functions = [] + record_stack = [] + + # '__start_profile' is not guaranteed to be first, so we must find it here + for record in itertools.chain.from_iterable(thread_records): + name = record.name() + if start_record is None and name == "__start_profile": + start_record = record + + assert start_record is not None and not start_record.is_remote() + + for thread_record_list in thread_records: + # accumulated memory allocations per handle + cpu_memory_allocs = {} + cuda_memory_allocs = {} + # ranges per handle + range_starts = {} + + filtered_handles = set() + prev_record = None + for record in thread_record_list: + record_key = _get_record_key(record) + if _filter_name(record.name()) or record_key in filtered_handles: + filtered_handles.add(record_key) + continue + + if record.kind() == "push": + # workaround to reduce double logging from operator + # wrappers and redispatch + if prev_record is not None: + duplicate = ( + prev_record.name() == record.name() + and prev_record.kind() == record.kind() + and prev_record.node_id() == record.node_id() + ) + if duplicate: + filtered_handles.add(record_key) + continue + + range_starts[record_key] = record + cpu_memory_allocs[record_key] = 0 + cuda_memory_allocs[record_key] = 0 + elif record.kind() == "pop": + assert ( + record_key in range_starts + ), f"""Expected record with key {record_key} to exist in range_starts. + This means that the pop event did not have a corresponding push.""" + + start = range_starts[record_key] + + cpu_memory_usage = cpu_memory_allocs[record_key] + cuda_memory_usage = cuda_memory_allocs[record_key] + is_async = start.is_async() or (start.thread_id() != record.thread_id()) + is_remote_event = record.is_remote() + start_flops = start.flops() + + fe = FunctionEvent( + id=record.handle(), + node_id=record.node_id(), + name=_rewrite_name(name=start.name(), with_wildcard=True), + trace_name=_rewrite_name(name=start.name(), with_wildcard=False), + thread=start.thread_id(), + start_us=start_record.cpu_elapsed_us(start), + end_us=start_record.cpu_elapsed_us(record), + fwd_thread=start.fwd_thread_id(), + input_shapes=start.shapes(), + stack=[ + entry for entry in start.stack() if _filter_stack_entry(entry) + ], + scope=start.scope(), + use_device="cuda" if start.has_cuda() else None, + cpu_memory_usage=cpu_memory_usage, + device_memory_usage=cuda_memory_usage, + is_async=is_async, + is_remote=is_remote_event, + sequence_nr=start.sequence_nr(), + device_type=DeviceType.CPU, + is_legacy=True, + flops=start_flops, + ) + # note: async events have only cpu total time + if not is_async and start.has_cuda(): + duration = start.cuda_elapsed_us(record) + if duration > 0: + fe.append_kernel(start.name(), start.device(), duration) + functions.append(fe) + del range_starts[record_key] + del cpu_memory_allocs[record_key] + del cuda_memory_allocs[record_key] + elif record.kind() == "memory_alloc": + num_open_handles_cpu = len(cpu_memory_allocs) + num_open_handles_cuda = len(cuda_memory_allocs) + assert num_open_handles_cpu == num_open_handles_cuda + for handle in cpu_memory_allocs.keys(): + cpu_memory_allocs[handle] += record.cpu_memory_usage() + for handle in cuda_memory_allocs.keys(): + cuda_memory_allocs[handle] += record.cuda_memory_usage() + if num_open_handles_cpu == 0: + # output event as a top-level memory event + fe = FunctionEvent( + id=0, + name=MEMORY_EVENT_NAME, + trace_name=None, + thread=0, + start_us=0, + end_us=0, + stack=[], + cpu_memory_usage=record.cpu_memory_usage(), + device_memory_usage=record.cuda_memory_usage(), + is_legacy=True, + ) + functions.append(fe) + prev_record = record + + # Sort functions by start time then by end time ascending. + # This ensures that--in the case of nested events which + # have the same start time (which may happen due to the + # granularity of the given clock tick)--we always show + # the outermost nested call first. This adds stability + # in how FunctionEvents appear + functions.sort(key=lambda evt: [evt.time_range.start, -evt.time_range.end]) + return functions diff --git a/parrot/lib/python3.10/site-packages/torch/autograd/profiler_util.py b/parrot/lib/python3.10/site-packages/torch/autograd/profiler_util.py new file mode 100644 index 0000000000000000000000000000000000000000..a5cff1ea12a854b754cf60ebc8f9a076a6eb013f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/autograd/profiler_util.py @@ -0,0 +1,1079 @@ +# mypy: allow-untyped-defs +import bisect +import itertools +import math + +from collections import defaultdict, namedtuple +from operator import attrgetter + +from typing import Any, Dict, List, Optional, Tuple +from typing_extensions import deprecated + +import torch +from torch.autograd import DeviceType + +__all__ = [ + "EventList", + "FormattedTimesMixin", + "Interval", + "Kernel", + "FunctionEvent", + "FunctionEventAvg", + "StringTable", + "MemRecordsAcc", +] + + +class EventList(list): + """A list of Events (for pretty printing).""" + + def __init__(self, *args, **kwargs): + use_device = kwargs.pop("use_device", None) + profile_memory = kwargs.pop("profile_memory", False) + with_flops = kwargs.pop("with_flops", False) + super().__init__(*args, **kwargs) + self._use_device = use_device + self._profile_memory = profile_memory + self._tree_built = False + self._with_flops = with_flops + + def _build_tree(self): + self._populate_cpu_children() + self._remove_dup_nodes() + self._set_backward_stacktraces() + self._tree_built = True + + def __str__(self): + return self.table() + + def _remove_dup_nodes(self): + while True: + to_delete = set() + for idx in range(len(self)): + if ( + self[idx].cpu_parent is not None + and self[idx].cpu_parent.name == self[idx].name + and len(self[idx].cpu_parent.cpu_children) == 1 + ): + self[idx].cpu_parent.cpu_children = self[idx].cpu_children + self[idx].cpu_parent.kernels = self[idx].kernels # lift kernels up + for ch in self[idx].cpu_children: + ch.cpu_parent = self[idx].cpu_parent + to_delete.add(idx) + if len(to_delete) == 0: + break + new_evts = [ev for ind, ev in enumerate(self) if ind not in to_delete] + self.clear() + self.extend(new_evts) + + def _populate_cpu_children(self): + """Populate child events into each underlying FunctionEvent object. + + One event is a child of another if [s1, e1) is inside [s2, e2). Where + s1 and e1 would be start and end of the child event's interval. And + s2 and e2 start and end of the parent event's interval + + Example: In event list [[0, 10], [1, 3], [3, 4]] would have make [0, 10] + be a parent of two other intervals. + + If for any reason two intervals intersect only partially, this function + will not record a parent child relationship between then. + """ + # Some events can be async (i.e. start and end on different threads), + # since it's generally undefined how to attribute children ranges to + # async ranges, we do not use them when calculating nested ranges and stats + sync_events = [ + evt + for evt in self + if not evt.is_async and evt.device_type == DeviceType.CPU + ] + events = sorted( + sync_events, + key=attrgetter("thread"), + ) + # Group by both thread and node_id, so that events that happen to have + # the same thread_id but are from different nodes aren't incorrectly + # grouped together. + threads = itertools.groupby( + events, key=lambda event: (event.thread, event.node_id) + ) + + # For each thread we keep a stack of current nested parents. + # We maintain the invariant that each interval is a subset of all other + # intervals lower in the stack. + # + # First we sort the intervals by their start time. Then we iterate over them. + # Every time we see a new interval we remove several parents from + # the top until we restore the invariant. Then parent child relationship + # if recorded if the stack is not empty. + # Finally we add new interval to the list + # + # Algorithm has O(N * log(N)) complexity where N is number of + # intervals + for thread_id, thread_events in threads: + thread_events_ = sorted( + thread_events, + key=lambda event: [event.time_range.start, -event.time_range.end], + ) + current_events: List[FunctionEvent] = [] + cur_end = 0 + for event in thread_events_: + while len(current_events) > 0: + parent = current_events[-1] + if ( + event.time_range.start >= parent.time_range.end + or event.time_range.end > parent.time_range.end + ): + # this can't be a parent + current_events.pop() + else: + parent.append_cpu_child(event) + assert ( + event.cpu_parent is None + ), f"There is already a CPU parent event for {event.key}" + event.set_cpu_parent(parent) + break + + current_events.append(event) + + def _set_backward_stacktraces(self): + def bw_parent(evt): + if evt is None: + return None + elif evt.scope == 1: # BACKWARD_FUNCTION + return evt + else: + return bw_parent(evt.cpu_parent) + + fwd_stacks = {} + for evt in self: + if bw_parent(evt) is None and evt.stack is not None: + t = (evt.sequence_nr, evt.thread) + if t not in fwd_stacks: + fwd_stacks[t] = evt.stack + + for evt in self: + p = bw_parent(evt) + if p is not None: + assert p.fwd_thread is not None + t = (p.sequence_nr, p.fwd_thread) + if t in fwd_stacks: + evt.stack = fwd_stacks[t] + else: + evt.stack = [] + + @property + def self_cpu_time_total(self): + return sum(event.self_cpu_time_total for event in self) + + def table( + self, + sort_by=None, + row_limit=100, + max_src_column_width=75, + max_name_column_width=55, + max_shapes_column_width=80, + header=None, + top_level_events_only=False, + ): + """Print an EventList as a nicely formatted table. + + Args: + sort_by (str, optional): Attribute used to sort entries. By default + they are printed in the same order as they were registered. + Valid keys include: ``cpu_time``, ``cuda_time``, ``xpu_time``, + ``cpu_time_total``, ``cuda_time_total``, ``xpu_time_total``, + ``cpu_memory_usage``, ``cuda_memory_usage``, ``xpu_memory_usage``, + ``self_cpu_memory_usage``, ``self_cuda_memory_usage``, + ``self_xpu_memory_usage``, ``count``. + top_level_events_only(bool, optional): Boolean flag to determine the + selection of events to display. If true, the profiler will only + display events at top level like top-level invocation of python + `lstm`, python `add` or other functions, nested events like low-level + cpu/cuda/xpu ops events are omitted for profiler result readability. + + Returns: + A string containing the table. + """ + return _build_table( + self, + sort_by=sort_by, + row_limit=row_limit, + max_src_column_width=max_src_column_width, + max_name_column_width=max_name_column_width, + max_shapes_column_width=max_shapes_column_width, + header=header, + profile_memory=self._profile_memory, + with_flops=self._with_flops, + top_level_events_only=top_level_events_only, + ) + + def export_chrome_trace(self, path): + """Export an EventList as a Chrome tracing tools file. + + The checkpoint can be later loaded and inspected under ``chrome://tracing`` URL. + + Args: + path (str): Path where the trace will be written. + """ + import os + + device_name = "cuda" if not self._use_device else self._use_device + with open(path, "w") as f: + chrome_events = [] + next_id = 0 + # Use file IO over using json.dump since JSON dumping is very slow and + # this technique is proven to give a 4x speedup. + f.write("[") + for evt in self: + if evt.trace_name is None: + continue + f.write( + '{{"name": "{}", ' + '"ph": "X", ' + '"ts": {}, ' + '"dur": {}, ' + '"tid": {}, ' + '"pid": "CPU functions", ' + '"args": {{}}}}, '.format( + evt.trace_name, + evt.time_range.start, + evt.time_range.elapsed_us(), + evt.thread + if not evt.is_remote + else f'" node_id:{evt.node_id}, thread_id:{evt.thread} "', + ) + ) + for k in evt.kernels: + # 's' and 'f' draw Flow arrows from + # the CPU launch to the GPU kernel + f.write( + f'{{"name": "{evt.trace_name}", ' + '"ph": "s", ' + f'"ts": {evt.time_range.start}, ' + f'"tid": {evt.thread}, ' + '"pid": "CPU functions", ' + f'"id": {next_id}, ' + f'"cat": "cpu_to_{device_name}", ' + '"args": {}}, ' + ) + # Note: use torch.profiler to get device kernel trace + next_id += 1 + if len(self) > 0: + # remove trailing whitespace and comma + f.seek(f.tell() - 2, os.SEEK_SET) + f.truncate() + f.write("]") + + def supported_export_stacks_metrics(self): + return [ + "self_cpu_time_total", + "self_cuda_time_total", + "self_xpu_time_total", + "self_privateuse1_time_total", + ] + + def export_stacks(self, path: str, metric: str): + if metric not in self.supported_export_stacks_metrics(): + raise ValueError( + "metric should be one of: " + + str(self.supported_export_stacks_metrics()) + ) + translate_table = str.maketrans(" ;\t\n", "____") + with open(path, "w") as f: + for evt in self: + if evt.stack and len(evt.stack) > 0: + metric_value = getattr( + evt, + metric.replace("cuda", "device") + .replace("xpu", "device") + .replace("privateuse1", "device"), + ) + if int(metric_value) > 0: + stack_str = "" + for entry in reversed(evt.stack): + stack_str += entry.translate(translate_table) + stack_str += ";" + stack_str = stack_str[:-1] + " " + str(int(metric_value)) + f.write(stack_str + "\n") + + def key_averages(self, group_by_input_shapes=False, group_by_stack_n=0): + """Averages all function events over their keys. + + Args: + group_by_input_shapes: group entries by + (event name, input shapes) rather than just event name. + This is useful to see which input shapes contribute to the runtime + the most and may help with size-specific optimizations or + choosing the best candidates for quantization (aka fitting a roof line) + + group_by_stack_n: group by top n stack trace entries + + Returns: + An EventList containing FunctionEventAvg objects. + """ + assert self._tree_built + stats: Dict[Tuple[str, ...], FunctionEventAvg] = defaultdict(FunctionEventAvg) + + def get_key(event, group_by_input_shapes, group_by_stack_n) -> Tuple[str, ...]: + key = [ + str(event.key), + str(event.node_id), + str(event.device_type), + str(event.is_legacy), + ] + if group_by_input_shapes: + key.append(str(event.input_shapes)) + if group_by_stack_n > 0: + key += event.stack[:group_by_stack_n] + return tuple(key) + + for evt in self: + stats[get_key(evt, group_by_input_shapes, group_by_stack_n)].add(evt) + + avg_list = EventList( + stats.values(), + use_device=self._use_device, + profile_memory=self._profile_memory, + with_flops=self._with_flops, + ) + for evt in avg_list: + evt.stack = evt.stack[:group_by_stack_n] + if not group_by_input_shapes: + evt.input_shapes = "" + return avg_list + + def total_average(self): + """Averages all events. + + Returns: + A FunctionEventAvg object. + """ + total_stat = FunctionEventAvg() + for evt in self: + total_stat += evt + total_stat.key = None + total_stat.key = "Total" + return total_stat + + +def _format_time(time_us): + """Define how to format time in FunctionEvent.""" + US_IN_SECOND = 1000.0 * 1000.0 + US_IN_MS = 1000.0 + if time_us >= US_IN_SECOND: + return f"{time_us / US_IN_SECOND:.3f}s" + if time_us >= US_IN_MS: + return f"{time_us / US_IN_MS:.3f}ms" + return f"{time_us:.3f}us" + + +def _format_time_share(time_us, total_time_us): + """Define how to format time in FunctionEvent.""" + if total_time_us == 0: + assert time_us == 0, f"Expected time_us == 0 but got {time_us}" + return "NaN" + return f"{time_us * 100.0 / total_time_us:.2f}%" + + +def _format_memory(nbytes): + """Return a formatted memory size string.""" + KB = 1024 + MB = 1024 * KB + GB = 1024 * MB + if abs(nbytes) >= GB: + return f"{nbytes * 1.0 / GB:.2f} Gb" + elif abs(nbytes) >= MB: + return f"{nbytes * 1.0 / MB:.2f} Mb" + elif abs(nbytes) >= KB: + return f"{nbytes * 1.0 / KB:.2f} Kb" + else: + return str(nbytes) + " b" + + +def _attr_formatter(name): + return property(lambda self: _format_time(getattr(self, name))) + + +class FormattedTimesMixin: + """Helpers for FunctionEvent and FunctionEventAvg. + + The subclass should define `*_time_total` and `count` attributes. + """ + + cpu_time_str = _attr_formatter("cpu_time") + device_time_str = _attr_formatter("device_time") + cpu_time_total_str = _attr_formatter("cpu_time_total") + device_time_total_str = _attr_formatter("device_time_total") + self_cpu_time_total_str = _attr_formatter("self_cpu_time_total") + self_device_time_total_str = _attr_formatter("self_device_time_total") + + @property + def cpu_time(self): + return 0.0 if self.count == 0 else 1.0 * self.cpu_time_total / self.count # type: ignore[attr-defined] + + @property + def device_time(self): + return 0.0 if self.count == 0 else 1.0 * self.device_time_total / self.count # type: ignore[attr-defined] + + @property + @deprecated( + "`cuda_time` is deprecated, please use `device_time` instead.", + category=FutureWarning, + ) + def cuda_time(self): # To be deprecated + return self.device_time + + +class Interval: + def __init__(self, start, end): + self.start = start + self.end = end + + def elapsed_us(self): + r""" + Returns the length of the interval + """ + return self.end - self.start + + +Kernel = namedtuple("Kernel", ["name", "device", "duration"]) + + +class FunctionEvent(FormattedTimesMixin): + """Profiling information about a single function.""" + + def __init__( + self, + id, + name, + thread, + start_us, + end_us, + fwd_thread=None, + input_shapes=None, + stack=None, + scope=0, + use_device=None, + cpu_memory_usage=0, + device_memory_usage=0, + is_async=False, + is_remote=False, + sequence_nr=-1, + node_id=-1, + device_type=DeviceType.CPU, + device_index=0, + device_resource_id=None, + is_legacy=False, + flops=None, + trace_name=None, + concrete_inputs=None, + ): + self.id: int = id + self.node_id: int = node_id + self.name: str = name + self.trace_name: str = trace_name + self.time_range: Interval = Interval(start_us, end_us) + self.thread: int = thread + self.fwd_thread: Optional[int] = fwd_thread + self.kernels: List[Kernel] = [] + self.count: int = 1 + self.cpu_children: List[FunctionEvent] = [] + self.cpu_parent: Optional[FunctionEvent] = None + self.input_shapes: Tuple[int, ...] = input_shapes + self.concrete_inputs: List[Any] = concrete_inputs + self.stack: List = stack + self.scope: int = scope + self.use_device: Optional[str] = use_device + self.cpu_memory_usage: int = cpu_memory_usage + self.device_memory_usage: int = device_memory_usage + self.is_async: bool = is_async + self.is_remote: bool = is_remote + self.sequence_nr: int = sequence_nr + self.device_type: DeviceType = device_type + self.device_index: int = device_index + self.device_resource_id: int = ( + thread if device_resource_id is None else device_resource_id + ) + self.is_legacy: bool = is_legacy + self.flops: Optional[int] = flops + + def append_kernel(self, name, device, duration): + assert self.device_type == DeviceType.CPU + self.kernels.append(Kernel(name, device, duration)) + + def append_cpu_child(self, child): + """Append a CPU child of type FunctionEvent. + + One is supposed to append only direct children to the event to have + correct self cpu time being reported. + """ + assert self.device_type == DeviceType.CPU + assert isinstance(child, FunctionEvent) + assert child.device_type == DeviceType.CPU + self.cpu_children.append(child) + + def set_cpu_parent(self, parent): + """Set the immediate CPU parent of type FunctionEvent. + + One profiling FunctionEvent should have only one CPU parent such that + the child's range interval is completely inside the parent's. We use + this connection to determine the event is from top-level op or not. + """ + assert self.device_type == DeviceType.CPU + assert isinstance(parent, FunctionEvent) + assert parent.device_type == DeviceType.CPU + self.cpu_parent = parent + + # Note: async events don't have children, are not used when computing 'self' + # metrics of other events, have only total cpu time + @property + def self_cpu_memory_usage(self): + if self.is_async or self.device_type != DeviceType.CPU: + return 0 + return self.cpu_memory_usage - sum( + child.cpu_memory_usage for child in self.cpu_children + ) + + @property + def self_device_memory_usage(self): + if self.is_async or self.device_type != DeviceType.CPU: + return 0 + return self.device_memory_usage - sum( + child.device_memory_usage for child in self.cpu_children + ) + + @property + @deprecated( + "`self_cuda_memory_usage` is deprecated. Use `self_device_memory_usage` instead.", + category=FutureWarning, + ) + def self_cuda_memory_usage(self): # To be deprecated + return self.self_device_memory_usage + + @property + def cpu_time_total(self): + if self.device_type == DeviceType.CPU: + return self.time_range.elapsed_us() + else: + return 0 + + @property + def self_cpu_time_total(self): + if self.is_async or self.device_type != DeviceType.CPU: + return 0 + return self.cpu_time_total - sum( + child.cpu_time_total for child in self.cpu_children + ) + + @property + def device_time_total(self): + if self.is_async or not self.use_device: + return 0 + if self.device_type == DeviceType.CPU: + if not self.is_legacy: + # account for the kernels in the children ops + return sum(kinfo.duration for kinfo in self.kernels) + sum( + ch.device_time_total for ch in self.cpu_children + ) + else: + # each legacy cpu events has a single (fake) kernel + return sum(kinfo.duration for kinfo in self.kernels) + else: + assert self.device_type in [DeviceType.CUDA, DeviceType.PrivateUse1] + return self.time_range.elapsed_us() + + @property + @deprecated( + "`cuda_time_total` is deprecated. Use `device_time_total` instead.", + category=FutureWarning, + ) + def cuda_time_total(self): # To be deprecated + return self.device_time_total + + @property + def self_device_time_total(self): + if self.is_async or not self.use_device: + return 0 + if self.device_type == DeviceType.CPU: + return self.device_time_total - sum( + [child.device_time_total for child in self.cpu_children] + ) + else: + assert self.device_type in [DeviceType.CUDA, DeviceType.PrivateUse1] + return self.device_time_total + + @property + @deprecated( + "`self_cuda_time_total` is deprecated. Use `self_device_time_total` instead.", + category=FutureWarning, + ) + def self_cuda_time_total(self): # To be deprecated + return self.self_device_time_total + + @property + def key(self): + return self.name + + def __repr__(self): + device_name = self.use_device + device_time = self.device_time_str + device_memory_usage = self.device_memory_usage + return ( + f"" + ) + + +class FunctionEventAvg(FormattedTimesMixin): + """Used to average stats over multiple FunctionEvent objects.""" + + def __init__(self): + self.key: Optional[str] = None + self.count: int = 0 + self.node_id: int = 0 + self.is_async: bool = False + self.is_remote: bool = False + self.use_device: Optional[str] = None + self.cpu_time_total: int = 0 + self.device_time_total: int = 0 + self.self_cpu_time_total: int = 0 + self.self_device_time_total: int = 0 + self.input_shapes: Optional[List[List[int]]] = None + self.stack: Optional[List] = None + self.scope: Optional[int] = None + self.cpu_memory_usage: int = 0 + self.device_memory_usage: int = 0 + self.self_cpu_memory_usage: int = 0 + self.self_device_memory_usage: int = 0 + self.cpu_children: Optional[List[FunctionEvent]] = None + self.cpu_parent: Optional[FunctionEvent] = None + self.device_type: DeviceType = DeviceType.CPU + self.is_legacy: bool = False + self.flops: int = 0 + + def add(self, other): + if self.key is None: + # First function being recorded as part of FunctionEventAvg, propagate + # fields. + self.key = other.key + self.node_id = other.node_id + self.is_async = other.is_async + self.is_remote = other.is_remote + self.cpu_parent = other.cpu_parent + self.cpu_children = other.cpu_children + + self.input_shapes = other.input_shapes + self.stack = other.stack + self.scope = other.scope + self.device_type = other.device_type + self.is_legacy = other.is_legacy + self.use_device = other.use_device + + assert isinstance(other, (FunctionEvent, FunctionEventAvg)) + assert other.key == self.key + self.cpu_time_total += other.cpu_time_total + self.device_time_total += other.device_time_total + self.self_cpu_time_total += other.self_cpu_time_total + self.self_device_time_total += other.self_device_time_total + self.cpu_memory_usage += other.cpu_memory_usage + self.device_memory_usage += other.device_memory_usage + self.self_cpu_memory_usage += other.self_cpu_memory_usage + self.self_device_memory_usage += other.self_device_memory_usage + self.count += other.count + if self.flops is None: + self.flops = other.flops + elif other.flops is not None: + self.flops += other.flops + return self + + def __iadd__(self, other): + return self.add(other) + + def __repr__(self): + device_name = "cuda" if not self.use_device else self.use_device + self_device_time = self.self_device_time_total_str + device_time = self.device_time_str + device_memory = self.device_memory_usage + return ( + f"" + ) + + +class StringTable(defaultdict): + def __missing__(self, key): + # manage cases like 't' (demangled to 'unsigned short') separately, + # for now simply check the length to avoid unexpected results for + # the short sequences + self[key] = torch._C._demangle(key) if len(key) > 1 else key + return self[key] + + +class MemRecordsAcc: + """Acceleration structure for accessing mem_records in interval.""" + + def __init__(self, mem_records): + self._mem_records = mem_records + self._start_nses: List[int] = [] + self._indices: List[int] = [] + if len(mem_records) > 0: + tmp = sorted([(r[0].start_ns(), i) for i, r in enumerate(mem_records)]) + self._start_nses, self._indices = zip(*tmp) # type: ignore[assignment] + + def in_interval(self, start_us, end_us): + r""" + Return all records in the given interval + To maintain backward compatibility, convert us to ns in function + """ + start_idx = bisect.bisect_left(self._start_nses, start_us * 1000) + end_idx = bisect.bisect_right(self._start_nses, end_us * 1000) + for i in range(start_idx, end_idx): + yield self._mem_records[self._indices[i]] + + +def _filter_stack_entry(entry): + filtered_entries = [ + ("autograd/__init__", "_make_grads"), + ("autograd/__init__", "backward"), + ("torch/tensor", "backward"), + ("_internal/common_utils", "prof_callable"), + ("_internal/common_utils", "prof_func_call"), + ("_internal/common_utils", "prof_meth_call"), + ] + return all(not (f[0] in entry and f[1] in entry) for f in filtered_entries) + + +MEMORY_EVENT_NAME = "[memory]" +OUT_OF_MEMORY_EVENT_NAME = "[OutOfMemory]" + + +def _filter_name(name): + # ignoring the following utility ops + filtered_out_names = [ + MEMORY_EVENT_NAME, # used only for the top-level memory events + OUT_OF_MEMORY_EVENT_NAME, + "profiler::_record_function_enter", + "profiler::_record_function_enter_new", + "profiler::_record_function_exit", + "aten::is_leaf", + "aten::output_nr", + "aten::_version", + ] + return name in filtered_out_names + + +# Demangles and optionally rewrites the provided event name, +# with_wildcard - whether to replace certain numbered event names +# with a wildcard name to aggregate them together in the profiler table +# output +def _rewrite_name(name, with_wildcard=False): + string_table = StringTable() + name = string_table[name] + if with_wildcard: + if name.startswith("ProfilerStep#"): + name = "ProfilerStep*" + return name + + +def _build_table( + events, + sort_by=None, + header=None, + row_limit=100, + max_src_column_width=75, + max_name_column_width=55, + max_shapes_column_width=80, + with_flops=False, + profile_memory=False, + top_level_events_only=False, +): + """Print a summary of events (which can be a list of FunctionEvent or FunctionEventAvg).""" + if len(events) == 0: + return "" + + has_device_time = any(event.self_device_time_total > 0 for event in events) + has_device_mem = any(event.self_device_memory_usage > 0 for event in events) + use_device = events[0].use_device + # Running on PrivateUse1 device with profiler but not enable + # ProfilerActivity.PrivateUse1 can also catch privateuse1 memory usage. + # Here only need to check has_privateuse1_time if not use_device. + if not use_device and has_device_time: + raise RuntimeError("use_device is None, but there is device performance data.") + + has_input_shapes = any( + (event.input_shapes is not None and len(event.input_shapes) > 0) + for event in events + ) + + if sort_by is not None: + events = EventList( + sorted( + events, + key=lambda evt: getattr( + evt, + sort_by.replace("cuda", "device") + .replace("xpu", "device") + .replace("privateuse1", "device"), + ), + reverse=True, + ), + use_device=use_device, + profile_memory=profile_memory, + with_flops=with_flops, + ) + + name_column_width = max(len(evt.key) for evt in events) + 4 + if max_name_column_width is not None: + name_column_width = min(name_column_width, max_name_column_width) + + shapes_column_width = max(len(str(evt.input_shapes)) for evt in events) + 4 + if max_shapes_column_width is not None: + shapes_column_width = min(shapes_column_width, max_shapes_column_width) + + DEFAULT_COLUMN_WIDTH = 12 + flops_column_width = DEFAULT_COLUMN_WIDTH + + src_column_width = None + stacks = [] + for evt in events: + if evt.stack is not None and len(evt.stack) > 0: + stacks.append(evt.stack) + has_stack = len(stacks) > 0 + if has_stack: + src_column_width = ( + max(max(len(entry) for entry in stack) for stack in stacks) + 4 + ) + if max_src_column_width is not None: + src_column_width = min(src_column_width, max_src_column_width) + + headers = [ + "Name", + "Self CPU %", + "Self CPU", + "CPU total %", + "CPU total", + "CPU time avg", + ] + device_name = use_device.upper() if use_device is not None else "None" + if has_device_time: + headers.extend( + [ + f"Self {device_name}", + f"Self {device_name} %", + f"{device_name} total", + f"{device_name} time avg", + ] + ) + if profile_memory: + headers.extend( + [ + "CPU Mem", + "Self CPU Mem", + ] + ) + if use_device and has_device_mem: + headers.extend( + [ + f"{device_name} Mem", + f"Self {device_name} Mem", + ] + ) + headers.append("# of Calls") + # Only append Node ID if any event has a valid (>= 0) Node ID + append_node_id = any(evt.node_id != -1 for evt in events) + if append_node_id: + headers.append("Node ID") + + # Have to use a list because nonlocal is Py3 only... + SPACING_SIZE = 2 + row_format_lst = [""] + header_sep_lst = [""] + line_length_lst = [-SPACING_SIZE] + + def add_column(padding, text_dir=">"): + row_format_lst[0] += ( + "{: " + text_dir + str(padding) + "}" + (" " * SPACING_SIZE) + ) + header_sep_lst[0] += "-" * padding + (" " * SPACING_SIZE) + line_length_lst[0] += padding + SPACING_SIZE + + def auto_scale_flops(flops): + flop_headers = [ + "FLOPs", + "KFLOPs", + "MFLOPs", + "GFLOPs", + "TFLOPs", + "PFLOPs", + ] + assert flops > 0 + log_flops = max(0, min(math.log10(flops) / 3, float(len(flop_headers) - 1))) + assert log_flops >= 0 and log_flops < len(flop_headers) + return (pow(10, (math.floor(log_flops) * -3.0)), flop_headers[int(log_flops)]) + + add_column(name_column_width) + for _ in headers[1:]: + add_column(DEFAULT_COLUMN_WIDTH) + + if has_input_shapes: + headers.append("Input Shapes") + add_column(shapes_column_width) + + if has_stack: + headers.append("Source Location") + add_column(src_column_width, text_dir="<") + + if with_flops: + # Auto-scaling of flops header + raw_flops = [] + for evt in events: + if evt.flops > 0: + raw_flops.append(evt.flops) + if len(raw_flops) != 0: + (flops_scale, flops_header) = auto_scale_flops(min(raw_flops)) + headers.append(f"Total {flops_header}") + add_column(flops_column_width) + else: + with_flops = False # can't find any valid flops + + row_format = row_format_lst[0] + header_sep = header_sep_lst[0] + line_length = line_length_lst[0] + add_column = None # type: ignore[assignment] + + # Have to use a list because nonlocal is Py3 only... + result = [] + + def append(s): + result.append(s) + result.append("\n") # Yes, newline after the end as well + + sum_self_cpu_time_total = 0 + sum_self_device_time_total = 0 + for evt in events: + sum_self_cpu_time_total += evt.self_cpu_time_total + if evt.device_type == DeviceType.CPU and evt.is_legacy: + # in legacy profiler, kernel info is stored in cpu events + sum_self_device_time_total += evt.self_device_time_total + elif evt.device_type in [DeviceType.CUDA, DeviceType.PrivateUse1]: + # in kineto profiler, there're events with the correct device type (e.g. CUDA) + sum_self_device_time_total += evt.self_device_time_total + + # Actual printing + if header is not None: + append("=" * line_length) + append(header) + if top_level_events_only: + append("=" * line_length) + append("This report only display top-level ops statistics") + append(header_sep) + append(row_format.format(*headers)) + + append(header_sep) + + def trim_path(path, src_column_width): + if len(path) > src_column_width: + offset = len(path) - src_column_width + path = path[offset:] + if len(path) > 3: + path = "..." + path[3:] + return path + + event_limit = 0 + for evt in events: + if event_limit == row_limit: + break + if top_level_events_only and evt.cpu_parent is not None: + continue + else: + event_limit += 1 + name = evt.key + if max_name_column_width is not None and len(name) >= max_name_column_width - 3: + name = name[: (max_name_column_width - 3)] + "..." + row_values = [ + name, + # Self CPU total %, 0 for async events. + _format_time_share(evt.self_cpu_time_total, sum_self_cpu_time_total), + evt.self_cpu_time_total_str, # Self CPU total + # CPU total %, 0 for async events. + _format_time_share(evt.cpu_time_total, sum_self_cpu_time_total) + if not evt.is_async + else 0, + evt.cpu_time_total_str, # CPU total + evt.cpu_time_str, # CPU time avg + ] + if has_device_time: + row_values.extend( + [ + evt.self_device_time_total_str, + # device time total % + _format_time_share( + evt.self_device_time_total, sum_self_device_time_total + ), + evt.device_time_total_str, + evt.device_time_str, # device time avg + ] + ) + if profile_memory: + row_values.extend( + [ + # CPU Mem Total + _format_memory(evt.cpu_memory_usage), + # Self CPU Mem Total + _format_memory(evt.self_cpu_memory_usage), + ] + ) + if use_device and has_device_mem: + row_values.extend( + [ + # Device Mem Total + _format_memory(evt.device_memory_usage), + # Self Device Mem Total + _format_memory(evt.self_device_memory_usage), + ] + ) + row_values.append( + evt.count, # Number of calls + ) + + if append_node_id: + row_values.append(evt.node_id) + if has_input_shapes: + row_values.append(str(evt.input_shapes)[:shapes_column_width]) + if with_flops: + if evt.flops <= 0: + row_values.append("--") + else: + row_values.append(f"{evt.flops * flops_scale:8.3f}") # type: ignore[possibly-undefined] + if has_stack: + src_field = "" + if len(evt.stack) > 0: + src_field = trim_path(evt.stack[0], src_column_width) + row_values.append(src_field) + append(row_format.format(*row_values)) + + if has_stack: + empty_headers = [""] * (len(headers) - 1) + for entry in evt.stack[1:]: + append( + row_format.format( + *(empty_headers + [trim_path(entry, src_column_width)]) + ) + ) + empty_headers.append("") + append(row_format.format(*empty_headers)) + + append(header_sep) + append(f"Self CPU time total: {_format_time(sum_self_cpu_time_total)}") + if has_device_time: + append( + f"Self {use_device.upper() if use_device is not None else 'None'} " + f"time total: {_format_time(sum_self_device_time_total)}" + ) + return "".join(result) diff --git a/parrot/lib/python3.10/site-packages/torch/autograd/variable.py b/parrot/lib/python3.10/site-packages/torch/autograd/variable.py new file mode 100644 index 0000000000000000000000000000000000000000..84b504a9c82c7ab855df9ba58d934fa92d936253 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/autograd/variable.py @@ -0,0 +1,15 @@ +# mypy: allow-untyped-defs +import torch +from torch._C import _ImperativeEngine as ImperativeEngine + + +__all__ = ["VariableMeta", "Variable"] + + +class VariableMeta(type): + def __instancecheck__(cls, other): + return isinstance(other, torch.Tensor) + + +class Variable(torch._C._LegacyVariableBase, metaclass=VariableMeta): # type: ignore[misc] + _execution_engine = ImperativeEngine() diff --git a/parrot/lib/python3.10/site-packages/torch/bin/torch_shm_manager b/parrot/lib/python3.10/site-packages/torch/bin/torch_shm_manager new file mode 100644 index 0000000000000000000000000000000000000000..f2697deaa9770f09c382252399d0640e0784e10d Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/bin/torch_shm_manager differ diff --git a/parrot/lib/python3.10/site-packages/torch/optim/__init__.py b/parrot/lib/python3.10/site-packages/torch/optim/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..58d9c948416b8d0218fd4e4b71b1326d54bdb1a0 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/optim/__init__.py @@ -0,0 +1,38 @@ +""" +:mod:`torch.optim` is a package implementing various optimization algorithms. + +Most commonly used methods are already supported, and the interface is general +enough, so that more sophisticated ones can also be easily integrated in the +future. +""" + +from . import lr_scheduler, swa_utils +from .adadelta import Adadelta +from .adagrad import Adagrad +from .adam import Adam +from .adamax import Adamax +from .adamw import AdamW +from .asgd import ASGD +from .lbfgs import LBFGS +from .nadam import NAdam +from .optimizer import Optimizer +from .radam import RAdam +from .rmsprop import RMSprop +from .rprop import Rprop +from .sgd import SGD +from .sparse_adam import SparseAdam + +del adadelta # type: ignore[name-defined] # noqa: F821 +del adagrad # type: ignore[name-defined] # noqa: F821 +del adam # type: ignore[name-defined] # noqa: F821 +del adamw # type: ignore[name-defined] # noqa: F821 +del sparse_adam # type: ignore[name-defined] # noqa: F821 +del adamax # type: ignore[name-defined] # noqa: F821 +del asgd # type: ignore[name-defined] # noqa: F821 +del sgd # type: ignore[name-defined] # noqa: F821 +del radam # type: ignore[name-defined] # noqa: F821 +del rprop # type: ignore[name-defined] # noqa: F821 +del rmsprop # type: ignore[name-defined] # noqa: F821 +del optimizer # type: ignore[name-defined] # noqa: F821 +del nadam # type: ignore[name-defined] # noqa: F821 +del lbfgs # type: ignore[name-defined] # noqa: F821 diff --git a/parrot/lib/python3.10/site-packages/torch/optim/__pycache__/adagrad.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/optim/__pycache__/adagrad.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..caf5c74aab9d31ecd95465a328fa28b82cdcb0a2 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/optim/__pycache__/adagrad.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/optim/__pycache__/adamax.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/optim/__pycache__/adamax.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ed2e09d6f3fcff4187d06e2d0c5ec45f9d9208d Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/optim/__pycache__/adamax.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/optim/__pycache__/adamw.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/optim/__pycache__/adamw.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a79b074faf73b9170b718bbe3f17658f00dd96c Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/optim/__pycache__/adamw.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/optim/__pycache__/asgd.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/optim/__pycache__/asgd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4fd0595d5b5c063169423eb1d2d9d660afae88c4 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/optim/__pycache__/asgd.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/optim/__pycache__/lbfgs.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/optim/__pycache__/lbfgs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..763964393b191aaa3ec62b9a003285c8c25f4be9 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/optim/__pycache__/lbfgs.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/optim/__pycache__/nadam.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/optim/__pycache__/nadam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..174c738c8942c0f6701e08ebe3fdbb9884d96cba Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/optim/__pycache__/nadam.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/optim/__pycache__/radam.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/optim/__pycache__/radam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a80e4665431ed79500ee5579023b9b134900e2e1 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/optim/__pycache__/radam.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/optim/__pycache__/rmsprop.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/optim/__pycache__/rmsprop.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..07bf40d0d4fba09ef0105b414fb5d0ebcc9c0096 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/optim/__pycache__/rmsprop.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/optim/__pycache__/rprop.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/optim/__pycache__/rprop.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8730bb2311dc95edf74974c23166931fd8c7c5af Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/optim/__pycache__/rprop.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/optim/__pycache__/sgd.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/optim/__pycache__/sgd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84a2c03c8f34830a7ab06ea1108e12e93f18b225 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/optim/__pycache__/sgd.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/optim/__pycache__/sparse_adam.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/optim/__pycache__/sparse_adam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ba858640d2a7239088304b9570695060adde6f5 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/optim/__pycache__/sparse_adam.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/optim/_functional.py b/parrot/lib/python3.10/site-packages/torch/optim/_functional.py new file mode 100644 index 0000000000000000000000000000000000000000..a307cc76846dc2be51a47a1b5b4e70c29aafffc4 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/optim/_functional.py @@ -0,0 +1,84 @@ +# mypy: allow-untyped-defs +r"""Functional interface.""" +import math +from typing import List + +from torch import Tensor + +from .adadelta import adadelta # type: ignore[attr-defined] # noqa: F401 +from .adagrad import _make_sparse, adagrad # type: ignore[attr-defined] # noqa: F401 +from .adam import adam # type: ignore[attr-defined] # noqa: F401 +from .adamax import adamax # type: ignore[attr-defined] # noqa: F401 +from .adamw import adamw # type: ignore[attr-defined] # noqa: F401 +from .asgd import asgd # type: ignore[attr-defined] # noqa: F401 +from .nadam import nadam # type: ignore[attr-defined] # noqa: F401 +from .radam import radam # type: ignore[attr-defined] # noqa: F401 +from .rmsprop import rmsprop # type: ignore[attr-defined] # noqa: F401 +from .rprop import rprop # type: ignore[attr-defined] # noqa: F401 +from .sgd import sgd # type: ignore[attr-defined] # noqa: F401 + + +# TODO: use foreach API in optim._functional to do all the computation + + +def sparse_adam( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + state_steps: List[int], + *, + eps: float, + beta1: float, + beta2: float, + lr: float, + maximize: bool, +): + r"""Functional API that performs Sparse Adam algorithm computation. + + See :class:`~torch.optim.SparseAdam` for details. + """ + for i, param in enumerate(params): + grad = grads[i] + grad = grad if not maximize else -grad + grad = grad.coalesce() # the update is non-linear so indices must be unique + grad_indices = grad._indices() + grad_values = grad._values() + if grad_values.numel() == 0: + # Skip update for empty grad + continue + size = grad.size() + + exp_avg = exp_avgs[i] + exp_avg_sq = exp_avg_sqs[i] + step = state_steps[i] + + def make_sparse(values): + constructor = grad.new + if grad_indices.dim() == 0 or values.dim() == 0: + return constructor().resize_as_(grad) + return constructor(grad_indices, values, size) + + # Decay the first and second moment running average coefficient + # old <- b * old + (1 - b) * new + # <==> old += (1 - b) * (new - old) + old_exp_avg_values = exp_avg.sparse_mask(grad)._values() + exp_avg_update_values = grad_values.sub(old_exp_avg_values).mul_(1 - beta1) + exp_avg.add_(make_sparse(exp_avg_update_values)) + old_exp_avg_sq_values = exp_avg_sq.sparse_mask(grad)._values() + exp_avg_sq_update_values = ( + grad_values.pow(2).sub_(old_exp_avg_sq_values).mul_(1 - beta2) + ) + exp_avg_sq.add_(make_sparse(exp_avg_sq_update_values)) + + # Dense addition again is intended, avoiding another sparse_mask + numer = exp_avg_update_values.add_(old_exp_avg_values) + exp_avg_sq_update_values.add_(old_exp_avg_sq_values) + denom = exp_avg_sq_update_values.sqrt_().add_(eps) + del exp_avg_update_values, exp_avg_sq_update_values + + bias_correction1 = 1 - beta1**step + bias_correction2 = 1 - beta2**step + step_size = lr * math.sqrt(bias_correction2) / bias_correction1 + + param.add_(make_sparse(-step_size * numer.div_(denom))) diff --git a/parrot/lib/python3.10/site-packages/torch/optim/adadelta.py b/parrot/lib/python3.10/site-packages/torch/optim/adadelta.py new file mode 100644 index 0000000000000000000000000000000000000000..d6f19fb069aeacdd1324a45826a2fcfb076adde5 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/optim/adadelta.py @@ -0,0 +1,452 @@ +# mypy: allow-untyped-defs +from typing import Any, Dict, List, Optional + +import torch +from torch import Tensor + +from .optimizer import ( + _capturable_doc, + _default_to_fused_or_foreach, + _differentiable_doc, + _disable_dynamo_if_unsupported, + _foreach_doc, + _get_capturable_supported_devices, + _get_scalar_dtype, + _maximize_doc, + _use_grad_for_differentiable, + _view_as_real, + Optimizer, + ParamsT, +) + +__all__ = ["Adadelta", "adadelta"] + + +class Adadelta(Optimizer): + def __init__( + self, + params: ParamsT, + lr: float = 1.0, + rho: float = 0.9, + eps: float = 1e-6, + weight_decay: float = 0, + foreach: Optional[bool] = None, + *, + capturable: bool = False, + maximize: bool = False, + differentiable: bool = False, + ): + if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") + if not 0.0 <= rho <= 1.0: + raise ValueError(f"Invalid rho value: {rho}") + if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") + if not 0.0 <= weight_decay: + raise ValueError(f"Invalid weight_decay value: {weight_decay}") + + defaults = dict( + lr=lr, + rho=rho, + eps=eps, + weight_decay=weight_decay, + maximize=maximize, + capturable=capturable, + foreach=foreach, + differentiable=differentiable, + ) + super().__init__(params, defaults) + + def __setstate__(self, state): + super().__setstate__(state) + for group in self.param_groups: + group.setdefault("foreach", None) + group.setdefault("maximize", False) + group.setdefault("differentiable", False) + group.setdefault("capturable", False) + for p in group["params"]: + p_state = self.state.get(p, []) + if len(p_state) != 0 and not torch.is_tensor(p_state["step"]): + step_val = float(p_state["step"]) + p_state["step"] = ( + torch.tensor( + step_val, dtype=_get_scalar_dtype(), device=p.device + ) + if group["capturable"] + else torch.tensor(step_val, dtype=_get_scalar_dtype()) + ) + + def _init_group( + self, + group: Dict[str, Any], + params_with_grad: List[Tensor], + grads: List[Tensor], + square_avgs: List[Tensor], + acc_deltas: List[Tensor], + state_steps: List[Tensor], + ): + has_complex = False + p: Tensor + for p in group["params"]: + if p.grad is None: + continue + has_complex |= torch.is_complex(p) + params_with_grad.append(p) + if p.grad.is_sparse: + raise RuntimeError("Adadelta does not support sparse gradients") + grads.append(p.grad) + + state = self.state[p] + + # Lazy state initialization + if len(state) == 0: + state["step"] = ( + torch.zeros((), dtype=_get_scalar_dtype(), device=p.device) + if group["capturable"] + else torch.zeros((), dtype=_get_scalar_dtype()) + ) + + state["square_avg"] = torch.zeros_like( + p, memory_format=torch.preserve_format + ) + state["acc_delta"] = torch.zeros_like( + p, memory_format=torch.preserve_format + ) + + square_avgs.append(state["square_avg"]) + acc_deltas.append(state["acc_delta"]) + state_steps.append(state["step"]) + + return has_complex + + @_use_grad_for_differentiable + def step(self, closure=None): + """Perform a single optimization step. + + Args: + closure (Callable, optional): A closure that reevaluates the model + and returns the loss. + """ + self._cuda_graph_capture_health_check() + + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + params_with_grad: List[Tensor] = [] + grads: List[Tensor] = [] + square_avgs: List[Tensor] = [] + acc_deltas: List[Tensor] = [] + state_steps: List[Tensor] = [] + ( + lr, + rho, + eps, + weight_decay, + foreach, + maximize, + differentiable, + capturable, + ) = ( + group["lr"], + group["rho"], + group["eps"], + group["weight_decay"], + group["foreach"], + group["maximize"], + group["differentiable"], + group["capturable"], + ) + + has_complex = self._init_group( + group, params_with_grad, grads, square_avgs, acc_deltas, state_steps + ) + + adadelta( + params_with_grad, + grads, + square_avgs, + acc_deltas, + state_steps, + lr=lr, + rho=rho, + eps=eps, + weight_decay=weight_decay, + foreach=foreach, + maximize=maximize, + differentiable=differentiable, + capturable=capturable, + has_complex=has_complex, + ) + + return loss + + +Adadelta.__doc__ = ( + r"""Implements Adadelta algorithm. + + .. math:: + \begin{aligned} + &\rule{110mm}{0.4pt} \\ + &\textbf{input} : \gamma \text{ (lr)}, \: \theta_0 \text{ (params)}, + \: f(\theta) \text{ (objective)}, \: \rho \text{ (decay)}, + \: \lambda \text{ (weight decay)} \\ + &\textbf{initialize} : v_0 \leftarrow 0 \: \text{ (square avg)}, + \: u_0 \leftarrow 0 \: \text{ (accumulate variables)} \\[-1.ex] + &\rule{110mm}{0.4pt} \\ + &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ + &\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm}if \: \lambda \neq 0 \\ + &\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\ + &\hspace{5mm} v_t \leftarrow v_{t-1} \rho + g^2_t (1 - \rho) \\ + &\hspace{5mm}\Delta x_t \leftarrow \frac{\sqrt{u_{t-1} + + \epsilon }}{ \sqrt{v_t + \epsilon} }g_t \hspace{21mm} \\ + &\hspace{5mm} u_t \leftarrow u_{t-1} \rho + + \Delta x^2_t (1 - \rho) \\ + &\hspace{5mm}\theta_t \leftarrow \theta_{t-1} - \gamma \Delta x_t \\ + &\rule{110mm}{0.4pt} \\[-1.ex] + &\bf{return} \: \theta_t \\[-1.ex] + &\rule{110mm}{0.4pt} \\[-1.ex] + \end{aligned} + + For further details regarding the algorithm we refer to `ADADELTA: An Adaptive Learning Rate Method`_. + """ + + rf""" + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + rho (float, optional): coefficient used for computing a running average + of squared gradients (default: 0.9). A higher value of `rho` will + result in a slower average, which can be helpful for preventing + oscillations in the learning process. + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-6). + lr (float, optional): coefficient that scale delta before it is applied + to the parameters (default: 1.0) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + {_foreach_doc} + {_capturable_doc} + {_maximize_doc} + {_differentiable_doc} + + .. _ADADELTA\: An Adaptive Learning Rate Method: + https://arxiv.org/abs/1212.5701 + + """ +) + + +def _single_tensor_adadelta( + params: List[Tensor], + grads: List[Tensor], + square_avgs: List[Tensor], + acc_deltas: List[Tensor], + state_steps: List[Tensor], + *, + lr: float, + rho: float, + eps: float, + weight_decay: float, + maximize: bool, + differentiable: bool, + capturable: bool, + has_complex: bool, +): + # If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable] + if not torch._utils.is_compiling() and capturable: + capturable_supported_devices = _get_capturable_supported_devices( + supports_xla=False + ) + assert all( + p.device.type == step.device.type + and p.device.type in capturable_supported_devices + for p, step in zip(params, state_steps) + ), f"If capturable=True, params and state_steps must be on supported devices: {capturable_supported_devices}." + + for param, grad, square_avg, acc_delta, step in zip( + params, grads, square_avgs, acc_deltas, state_steps + ): + step += 1 + grad = grad if not maximize else -grad + + if weight_decay != 0: + grad = grad.add(param, alpha=weight_decay) + + if torch.is_complex(param): + square_avg = torch.view_as_real(square_avg) + acc_delta = torch.view_as_real(acc_delta) + grad = torch.view_as_real(grad) + + square_avg.mul_(rho).addcmul_(grad, grad, value=1 - rho) + std = square_avg.add(eps).sqrt_() + delta = acc_delta.add(eps).sqrt_() + if differentiable: + delta = delta.clone() + delta.div_(std).mul_(grad) + acc_delta.mul_(rho).addcmul_(delta, delta, value=1 - rho) + + if torch.is_complex(param): + delta = torch.view_as_complex(delta) + param.add_(delta, alpha=-lr) + + +def _multi_tensor_adadelta( + params: List[Tensor], + grads: List[Tensor], + square_avgs: List[Tensor], + acc_deltas: List[Tensor], + state_steps: List[Tensor], + *, + lr: float, + rho: float, + eps: float, + weight_decay: float, + maximize: bool, + differentiable: bool, + capturable: bool, + has_complex: bool, +): + assert not differentiable, "_foreach ops don't support autograd" + + # If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable] + if not torch._utils.is_compiling() and capturable: + capturable_supported_devices = _get_capturable_supported_devices( + supports_xla=False + ) + assert all( + p.device.type == step.device.type + and p.device.type in capturable_supported_devices + for p, step in zip(params, state_steps) + ), f"If capturable=True, params and state_steps must be on supported devices: {capturable_supported_devices}." + + if len(params) == 0: + return + + grouped_tensors = Optimizer._group_tensors_by_device_and_dtype( + [params, grads, square_avgs, acc_deltas, state_steps] + ) + for ( + device_params, + device_grads, + device_square_avgs, + device_acc_deltas, + device_state_steps, + ), _ in grouped_tensors.values(): + if has_complex: + _view_as_real( + device_params, device_grads, device_square_avgs, device_acc_deltas + ) + + # Update steps + # If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over + # and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just + # wrapped it once now. The alpha is required to assure we go to the right overload. + if device_state_steps[0].is_cpu: + torch._foreach_add_( + device_state_steps, torch.tensor(1.0, device="cpu"), alpha=1.0 + ) + else: + torch._foreach_add_(device_state_steps, 1) + + if maximize: + device_grads = torch._foreach_neg(device_grads) # type: ignore[assignment] + + if weight_decay != 0: + # Re-use the intermediate memory (device_grads) already allocated for maximize + if maximize: + torch._foreach_add_(device_grads, device_params, alpha=weight_decay) + else: + device_grads = torch._foreach_add( # type: ignore[assignment] + device_grads, device_params, alpha=weight_decay + ) + + torch._foreach_mul_(device_square_avgs, rho) + torch._foreach_addcmul_( + device_square_avgs, device_grads, device_grads, value=1 - rho + ) + + std = torch._foreach_add(device_square_avgs, eps) + torch._foreach_sqrt_(std) + + deltas = torch._foreach_add(device_acc_deltas, eps) + torch._foreach_sqrt_(deltas) + torch._foreach_div_(deltas, std) + torch._foreach_mul_(deltas, device_grads) + + torch._foreach_mul_(device_acc_deltas, rho) + torch._foreach_addcmul_(device_acc_deltas, deltas, deltas, value=1 - rho) + + # If LR is a tensor, the else branch will internally call item() + # which will cause silent incorrectness if we are capturing + if capturable and isinstance(lr, torch.Tensor): + torch._foreach_mul_(deltas, -lr) + torch._foreach_add_(device_params, deltas) + else: + torch._foreach_add_(device_params, deltas, alpha=-lr) + + +@_disable_dynamo_if_unsupported(single_tensor_fn=_single_tensor_adadelta) +def adadelta( + params: List[Tensor], + grads: List[Tensor], + square_avgs: List[Tensor], + acc_deltas: List[Tensor], + state_steps: List[Tensor], + # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 + # setting this as kwarg for now as functional API is compiled by torch/distributed/optim + capturable: bool = False, + foreach: Optional[bool] = None, + differentiable: bool = False, + has_complex: bool = False, + *, + lr: float, + rho: float, + eps: float, + weight_decay: float, + maximize: bool, +): + r"""Functional API that performs Adadelta algorithm computation. + + See :class:`~torch.optim.Adadelta` for details. + """ + + # this check is slow during compilation, so we skip it + # if it's strictly needed we can add this check back in dynamo + if not torch._utils.is_compiling() and not all( + isinstance(t, torch.Tensor) for t in state_steps + ): + raise RuntimeError( + "API has changed, `state_steps` argument must contain a list of singleton tensors" + ) + + # We still respect when the user inputs False for foreach. + if foreach is None: + _, foreach = _default_to_fused_or_foreach( + params, differentiable, use_fused=False + ) + + if foreach and torch.jit.is_scripting(): + raise RuntimeError("torch.jit.script not supported with foreach optimizers") + + if foreach and not torch.jit.is_scripting(): + func = _multi_tensor_adadelta + else: + func = _single_tensor_adadelta + + func( + params, + grads, + square_avgs, + acc_deltas, + state_steps, + lr=lr, + rho=rho, + eps=eps, + weight_decay=weight_decay, + maximize=maximize, + differentiable=differentiable, + capturable=capturable, + has_complex=has_complex, + ) diff --git a/parrot/lib/python3.10/site-packages/torch/optim/adagrad.py b/parrot/lib/python3.10/site-packages/torch/optim/adagrad.py new file mode 100644 index 0000000000000000000000000000000000000000..0b6dfe852d0879333af7a45d3fdf0eaae643b5d1 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/optim/adagrad.py @@ -0,0 +1,555 @@ +# mypy: allow-untyped-defs +from typing import List, Optional + +import torch +from torch import Tensor +from torch.utils._foreach_utils import _get_fused_kernels_supported_devices +from .optimizer import ( + _default_to_fused_or_foreach, + _differentiable_doc, + _foreach_doc, + _get_scalar_dtype, + _get_value, + _maximize_doc, + _use_grad_for_differentiable, + _view_as_real, + Optimizer, + ParamsT, +) + +__all__ = ["Adagrad", "adagrad"] + + +class Adagrad(Optimizer): + def __init__( + self, + params: ParamsT, + lr: float = 1e-2, + lr_decay: float = 0, + weight_decay: float = 0, + initial_accumulator_value: float = 0, + eps: float = 1e-10, + foreach: Optional[bool] = None, + *, + maximize: bool = False, + differentiable: bool = False, + fused: Optional[bool] = None, + ): + if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") + if not 0.0 <= lr_decay: + raise ValueError(f"Invalid lr_decay value: {lr_decay}") + if not 0.0 <= weight_decay: + raise ValueError(f"Invalid weight_decay value: {weight_decay}") + if not 0.0 <= initial_accumulator_value: + raise ValueError( + f"Invalid initial_accumulator_value value: {initial_accumulator_value}" + ) + if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") + + defaults = dict( + lr=lr, + lr_decay=lr_decay, + eps=eps, + weight_decay=weight_decay, + initial_accumulator_value=initial_accumulator_value, + foreach=foreach, + maximize=maximize, + differentiable=differentiable, + fused=fused, + ) + super().__init__(params, defaults) + + if fused: + if differentiable: + raise RuntimeError("`fused` does not support `differentiable`") + self._step_supports_amp_scaling = True + fused_supported_devices = _get_fused_kernels_supported_devices() + # Not support CUDA yet + fused_supported_devices.remove("cuda") + if not all( + p.device.type in fused_supported_devices and torch.is_floating_point(p) + for pg in self.param_groups + for p in pg["params"] + ): + raise RuntimeError( + "`fused=True` requires all the params to be floating point Tensors of " + f"supported devices: {fused_supported_devices}." + ) + if foreach: + raise RuntimeError("`fused` and `foreach` cannot be `True` together.") + + for group in self.param_groups: + for p in group["params"]: + state = self.state[p] + state["step"] = ( + torch.zeros( + (), + dtype=_get_scalar_dtype(is_fused=group["fused"]), + device=p.device, + ) + if group["fused"] + else torch.tensor(0.0, dtype=_get_scalar_dtype()) + ) + init_value = ( + complex(initial_accumulator_value, initial_accumulator_value) + if torch.is_complex(p) + else initial_accumulator_value + ) + state["sum"] = torch.full_like( + p, init_value, memory_format=torch.preserve_format + ) + + def __setstate__(self, state): + super().__setstate__(state) + # define "fused" for + # MYPY error: Name "fused" may be undefined + fused = None + for group in self.param_groups: + group.setdefault("foreach", None) + group.setdefault("maximize", False) + group.setdefault("differentiable", False) + fused = group.setdefault("fused", None) + + state_values = list(self.state.values()) + step_is_tensor = (len(state_values) != 0) and torch.is_tensor( + state_values[0]["step"] + ) + if not step_is_tensor: + for s in state_values: + s["step"] = torch.tensor( + float(s["step"]), dtype=_get_scalar_dtype(is_fused=fused) + ) + + def share_memory(self): + for group in self.param_groups: + for p in group["params"]: + state = self.state[p] + state["sum"].share_memory_() + + def _init_group(self, group, params_with_grad, grads, state_sums, state_steps): + has_sparse_grad, has_complex = False, False + for p in group["params"]: + if p.grad is not None: + has_sparse_grad |= p.grad.is_sparse + has_complex |= torch.is_complex(p) + params_with_grad.append(p) + grads.append(p.grad) + state = self.state[p] + state_sums.append(state["sum"]) + state_steps.append(state["step"]) + + return has_sparse_grad, has_complex + + @_use_grad_for_differentiable + def step(self, closure=None): + """Perform a single optimization step. + + Args: + closure (Callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + params_with_grad: List[Tensor] = [] + grads: List[Tensor] = [] + state_sums: List[Tensor] = [] + state_steps: List[Tensor] = [] + + has_sparse_grad, has_complex = self._init_group( + group, params_with_grad, grads, state_sums, state_steps + ) + + adagrad( + params_with_grad, + grads, + state_sums, + state_steps, + lr=group["lr"], + weight_decay=group["weight_decay"], + lr_decay=group["lr_decay"], + eps=group["eps"], + has_sparse_grad=has_sparse_grad, + foreach=group["foreach"], + maximize=group["maximize"], + differentiable=group["differentiable"], + has_complex=has_complex, + fused=group["fused"], + grad_scale=getattr(self, "grad_scale", None), + found_inf=getattr(self, "found_inf", None), + ) + + return loss + + +Adagrad.__doc__ = ( + r"""Implements Adagrad algorithm. + + .. math:: + \begin{aligned} + &\rule{110mm}{0.4pt} \\ + &\textbf{input} : \gamma \text{ (lr)}, \: \theta_0 \text{ (params)}, \: f(\theta) + \text{ (objective)}, \: \lambda \text{ (weight decay)}, \\ + &\hspace{12mm} \tau \text{ (initial accumulator value)}, \: \eta\text{ (lr decay)}\\ + &\textbf{initialize} : state\_sum_0 \leftarrow \tau \\[-1.ex] + &\rule{110mm}{0.4pt} \\ + &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ + &\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm} \tilde{\gamma} \leftarrow \gamma / (1 +(t-1) \eta) \\ + &\hspace{5mm} \textbf{if} \: \lambda \neq 0 \\ + &\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\ + &\hspace{5mm}state\_sum_t \leftarrow state\_sum_{t-1} + g^2_t \\ + &\hspace{5mm}\theta_t \leftarrow + \theta_{t-1}- \tilde{\gamma} \frac{g_t}{\sqrt{state\_sum_t}+\epsilon} \\ + &\rule{110mm}{0.4pt} \\[-1.ex] + &\bf{return} \: \theta_t \\[-1.ex] + &\rule{110mm}{0.4pt} \\[-1.ex] + \end{aligned} + + For further details regarding the algorithm we refer to `Adaptive Subgradient Methods for Online Learning + and Stochastic Optimization`_. + """ + + rf""" + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-2) + lr_decay (float, optional): learning rate decay (default: 0) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + initial_accumulator_value (float, optional): initial value of the + sum of squares of gradients (default: 0) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-10) + {_foreach_doc} + {_maximize_doc} + {_differentiable_doc} + fused (bool, optional): whether the fused implementation (CPU only) is used. + Currently, `torch.float64`, `torch.float32`, `torch.float16`, and `torch.bfloat16` + are supported. (default: None). Please note that the fused implementations does not + support sparse or complex gradients. + .. _Adaptive Subgradient Methods for Online Learning and Stochastic + Optimization: http://jmlr.org/papers/v12/duchi11a.html + + """ +) + + +def adagrad( + params: List[Tensor], + grads: List[Tensor], + state_sums: List[Tensor], + state_steps: List[Tensor], + fused: Optional[bool] = None, + grad_scale: Optional[Tensor] = None, + found_inf: Optional[Tensor] = None, + # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 + # setting these as kwargs for now as functional API is compiled by torch/distributed/optim + has_sparse_grad: bool = False, + foreach: Optional[bool] = None, + differentiable: bool = False, + has_complex: bool = False, + *, + lr: float, + weight_decay: float, + lr_decay: float, + eps: float, + maximize: bool, +): + r"""Functional API that performs Adagrad algorithm computation. + + See :class:`~torch.optim.Adagrad` for details. + """ + if not all(isinstance(t, torch.Tensor) for t in state_steps): + raise RuntimeError( + "API has changed, `state_steps` argument must contain a list of singleton tensors" + ) + + # Respect when the user inputs False/True for foreach or fused. We only want to change + # the default when neither have been user-specified. Note that we default to foreach + # and pass False to use_fused. This is not a mistake--we want to give the fused impl + # bake-in time before making it the default, even if it is typically faster. + if fused is None and foreach is None: + _, foreach = _default_to_fused_or_foreach( + params, differentiable, use_fused=False + ) + + if fused is None: + fused = False + if foreach is None: + foreach = False + + if foreach and torch.jit.is_scripting(): + raise RuntimeError("torch.jit.script not supported with foreach optimizers") + if fused and torch.jit.is_scripting(): + raise RuntimeError("torch.jit.script not supported with fused optimizers") + + if fused and not torch.jit.is_scripting(): + func = _fused_adagrad + elif foreach and not torch.jit.is_scripting(): + func = _multi_tensor_adagrad + else: + func = _single_tensor_adagrad + + func( + params, + grads, + state_sums, + state_steps, + lr=lr, + weight_decay=weight_decay, + lr_decay=lr_decay, + eps=eps, + has_sparse_grad=has_sparse_grad, + maximize=maximize, + differentiable=differentiable, + has_complex=has_complex, + grad_scale=grad_scale, + found_inf=found_inf, + ) + + +def _make_sparse(grad, grad_indices, values): + size = grad.size() + return torch.sparse_coo_tensor(grad_indices, values, size) + + +def _single_tensor_adagrad( + params: List[Tensor], + grads: List[Tensor], + state_sums: List[Tensor], + state_steps: List[Tensor], + grad_scale: Optional[Tensor], + found_inf: Optional[Tensor], + *, + lr: float, + weight_decay: float, + lr_decay: float, + eps: float, + has_sparse_grad: bool, + maximize: bool, + differentiable: bool, + has_complex: bool, +): + assert grad_scale is None and found_inf is None + for param, grad, state_sum, step_t in zip(params, grads, state_sums, state_steps): + # update step + step_t += 1 + step = _get_value(step_t) + grad = grad if not maximize else -grad + + if weight_decay != 0: + if grad.is_sparse: + raise RuntimeError( + "weight_decay option is not compatible with sparse gradients" + ) + grad = grad.add(param, alpha=weight_decay) + + clr = lr / (1 + (step - 1) * lr_decay) + + if grad.is_sparse: + grad = grad.coalesce() # the update is non-linear so indices must be unique + grad_indices = grad._indices() + grad_values = grad._values() + + state_sum.add_(_make_sparse(grad, grad_indices, grad_values.pow(2))) + std = state_sum.sparse_mask(grad) + std_values = std._values().sqrt_().add_(eps) + param.add_( + _make_sparse(grad, grad_indices, grad_values / std_values), alpha=-clr + ) + else: + is_complex = torch.is_complex(param) + if is_complex: + grad = torch.view_as_real(grad) + state_sum = torch.view_as_real(state_sum) + param = torch.view_as_real(param) + state_sum.addcmul_(grad, grad, value=1) + if differentiable: + std = state_sum.sqrt() + eps + else: + std = state_sum.sqrt().add_(eps) + param.addcdiv_(grad, std, value=-clr) + if is_complex: + param = torch.view_as_complex(param) + state_sum = torch.view_as_complex(state_sum) + + +def _multi_tensor_adagrad( + params: List[Tensor], + grads: List[Tensor], + state_sums: List[Tensor], + state_steps: List[Tensor], + grad_scale: Optional[Tensor], + found_inf: Optional[Tensor], + *, + lr: float, + weight_decay: float, + lr_decay: float, + eps: float, + has_sparse_grad: bool, + maximize: bool, + differentiable: bool, + has_complex: bool, +): + assert not differentiable, "_foreach ops don't support autograd" + assert grad_scale is None and found_inf is None + + # Foreach functions will throw errors if given empty lists + if len(params) == 0: + return + + grouped_tensorlists = Optimizer._group_tensors_by_device_and_dtype( + [params, grads, state_sums, state_steps] + ) + for ( + device_params, + device_grads, + device_state_sums, + device_state_steps, + ), _ in grouped_tensorlists.values(): + device_has_sparse_grad = has_sparse_grad and any( + grad.is_sparse for grad in device_grads + ) + + if device_has_sparse_grad: + _single_tensor_adagrad( + device_params, + device_grads, + device_state_sums, + device_state_steps, + lr=lr, + weight_decay=weight_decay, + lr_decay=lr_decay, + eps=eps, + has_sparse_grad=True, + maximize=maximize, + differentiable=differentiable, + has_complex=has_complex, + grad_scale=grad_scale, + found_inf=found_inf, + ) + continue + + # Handle complex parameters + if has_complex: + _view_as_real(device_params, device_grads, device_state_sums) + + if maximize: + device_grads = torch._foreach_neg(device_grads) # type: ignore[assignment] + + # Update steps + # If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over + # and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just + # wrapped it once now. The alpha is required to assure we go to the right overload. + if device_state_steps[0].is_cpu: + torch._foreach_add_( + device_state_steps, torch.tensor(1.0, device="cpu"), alpha=1.0 + ) + else: + torch._foreach_add_(device_state_steps, 1) + + if weight_decay != 0: + # Re-use the intermediate memory (device_grads) already allocated for maximize + if maximize: + torch._foreach_add_(device_grads, device_params, alpha=weight_decay) + else: + device_grads = torch._foreach_add( # type: ignore[assignment] + device_grads, device_params, alpha=weight_decay + ) + + minus_clr = [ + -lr / (1 + (_get_value(step) - 1) * lr_decay) for step in device_state_steps + ] + + torch._foreach_addcmul_(device_state_sums, device_grads, device_grads, value=1) + + std = torch._foreach_sqrt(device_state_sums) + torch._foreach_add_(std, eps) + + if weight_decay != 0 or maximize: + # Again, re-use the intermediate memory (device_grads) already allocated + torch._foreach_mul_(device_grads, minus_clr) + numerator = device_grads + else: + numerator = torch._foreach_mul(device_grads, minus_clr) # type: ignore[assignment] + + torch._foreach_addcdiv_(device_params, numerator, std) + + +def _fused_adagrad( + params: List[Tensor], + grads: List[Tensor], + state_sums: List[Tensor], + state_steps: List[Tensor], + grad_scale: Optional[Tensor], + found_inf: Optional[Tensor], + *, + lr: float, + weight_decay: float, + lr_decay: float, + eps: float, + has_sparse_grad: bool, + maximize: bool, + differentiable: bool, + has_complex: bool, +) -> None: + if not params: + return + if has_sparse_grad or has_complex: + raise RuntimeError("`fused` does not support sparse grad or complex param") + + if differentiable: + raise RuntimeError( + "adagrad with fused=True does not support differentiable=True" + ) + + grad_scale_dict = ( + {grad_scale.device: grad_scale} if grad_scale is not None else None + ) + found_inf_dict = {found_inf.device: found_inf} if found_inf is not None else None + + grouped_tensors = Optimizer._group_tensors_by_device_and_dtype( + [params, grads, state_sums, state_steps] + ) + for (device, _), ( + ( + device_params, + device_grads, + device_state_sums, + device_state_steps, + ), + _, + ) in grouped_tensors.items(): + device_grad_scale, device_found_inf = None, None + if grad_scale is not None and grad_scale_dict is not None: + if device not in grad_scale_dict: + grad_scale_dict[device] = grad_scale.to(device, non_blocking=True) # type: ignore[index] + device_grad_scale = grad_scale_dict[device] # type: ignore[index] + if found_inf is not None and found_inf_dict is not None: + if found_inf not in found_inf_dict: + found_inf_dict[device] = found_inf.to(device, non_blocking=True) # type: ignore[index] + device_found_inf = found_inf_dict[device] # type: ignore[index] + torch._foreach_add_(device_state_steps, 1) + torch._fused_adagrad_( + device_params, + device_grads, + device_state_sums, + device_state_steps, + lr=lr, + lr_decay=lr_decay, + weight_decay=weight_decay, + eps=eps, + maximize=maximize, + grad_scale=device_grad_scale, + found_inf=device_found_inf, + ) + if device_found_inf is not None: + torch._foreach_sub_( + device_state_steps, [device_found_inf] * len(device_state_steps) + ) diff --git a/parrot/lib/python3.10/site-packages/torch/optim/adam.py b/parrot/lib/python3.10/site-packages/torch/optim/adam.py new file mode 100644 index 0000000000000000000000000000000000000000..86785be4ed1795dca231f9ec493cb1b7df824e1a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/optim/adam.py @@ -0,0 +1,785 @@ +# mypy: allow-untyped-defs +from typing import List, Optional, Tuple, Union + +import torch +from torch import Tensor +from torch.utils._foreach_utils import _get_fused_kernels_supported_devices +from .optimizer import ( + _capturable_doc, + _default_to_fused_or_foreach, + _differentiable_doc, + _disable_dynamo_if_unsupported, + _dispatch_sqrt, + _foreach_doc, + _fused_doc, + _get_capturable_supported_devices, + _get_scalar_dtype, + _get_value, + _maximize_doc, + _stack_if_compiling, + _use_grad_for_differentiable, + _view_as_real, + DeviceDict, + Optimizer, + ParamsT, +) + +__all__ = ["Adam", "adam"] + + +class Adam(Optimizer): + def __init__( + self, + params: ParamsT, + lr: Union[float, Tensor] = 1e-3, + betas: Tuple[float, float] = (0.9, 0.999), + eps: float = 1e-8, + weight_decay: float = 0, + amsgrad: bool = False, + *, + foreach: Optional[bool] = None, + maximize: bool = False, + capturable: bool = False, + differentiable: bool = False, + fused: Optional[bool] = None, + ): + if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") + if isinstance(lr, Tensor) and foreach and not capturable: + raise ValueError( + "lr as a Tensor is not supported for capturable=False and foreach=True" + ) + if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") + if not 0.0 <= betas[0] < 1.0: + raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}") + if not 0.0 <= betas[1] < 1.0: + raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}") + if not 0.0 <= weight_decay: + raise ValueError(f"Invalid weight_decay value: {weight_decay}") + + defaults = dict( + lr=lr, + betas=betas, + eps=eps, + weight_decay=weight_decay, + amsgrad=amsgrad, + maximize=maximize, + foreach=foreach, + capturable=capturable, + differentiable=differentiable, + fused=fused, + ) + super().__init__(params, defaults) + + if fused: + if differentiable: + raise RuntimeError("`fused` does not support `differentiable`") + self._step_supports_amp_scaling = True + # TODO(crcrpar): [low prec params & their higher prec copy] + # Support AMP with FP16/BF16 model params which would need + # higher prec copy of params to do update math in higher prec to + # alleviate the loss of information. + fused_supported_devices = _get_fused_kernels_supported_devices() + if not all( + p.device.type in fused_supported_devices and torch.is_floating_point(p) + for pg in self.param_groups + for p in pg["params"] + ): + raise RuntimeError( + "`fused=True` requires all the params to be floating point Tensors of " + f"supported devices: {fused_supported_devices}." + ) + if foreach: + raise RuntimeError("`fused` and `foreach` cannot be `True` together.") + + def __setstate__(self, state): + super().__setstate__(state) + for group in self.param_groups: + group.setdefault("amsgrad", False) + group.setdefault("maximize", False) + group.setdefault("foreach", None) + group.setdefault("capturable", False) + group.setdefault("differentiable", False) + fused = group.setdefault("fused", None) + for p in group["params"]: + p_state = self.state.get(p, []) + if len(p_state) != 0 and not torch.is_tensor(p_state["step"]): + step_val = float(p_state["step"]) + p_state["step"] = ( + torch.tensor( + step_val, + dtype=_get_scalar_dtype(is_fused=fused), + device=p.device, + ) + if group["capturable"] or group["fused"] + else torch.tensor(step_val, dtype=_get_scalar_dtype()) + ) + + def _init_group( + self, + group, + params_with_grad, + grads, + exp_avgs, + exp_avg_sqs, + max_exp_avg_sqs, + state_steps, + ): + has_complex = False + for p in group["params"]: + if p.grad is not None: + has_complex |= torch.is_complex(p) + params_with_grad.append(p) + if p.grad.is_sparse: + raise RuntimeError( + "Adam does not support sparse gradients, please consider SparseAdam instead" + ) + grads.append(p.grad) + + state = self.state[p] + # Lazy state initialization + if len(state) == 0: + # note(crcrpar): [special device hosting for step] + # Deliberately host `step` on CPU if both capturable and fused are off. + # This is because kernel launches are costly on CUDA and XLA. + state["step"] = ( + torch.zeros( + (), + dtype=_get_scalar_dtype(is_fused=group["fused"]), + device=p.device, + ) + if group["capturable"] or group["fused"] + else torch.tensor(0.0, dtype=_get_scalar_dtype()) + ) + # Exponential moving average of gradient values + state["exp_avg"] = torch.zeros_like( + p, memory_format=torch.preserve_format + ) + # Exponential moving average of squared gradient values + state["exp_avg_sq"] = torch.zeros_like( + p, memory_format=torch.preserve_format + ) + if group["amsgrad"]: + # Maintains max of all exp. moving avg. of sq. grad. values + state["max_exp_avg_sq"] = torch.zeros_like( + p, memory_format=torch.preserve_format + ) + + exp_avgs.append(state["exp_avg"]) + exp_avg_sqs.append(state["exp_avg_sq"]) + + if group["amsgrad"]: + max_exp_avg_sqs.append(state["max_exp_avg_sq"]) + if group["differentiable"] and state["step"].requires_grad: + raise RuntimeError( + "`requires_grad` is not supported for `step` in differentiable mode" + ) + + # Foreach without capturable does not support a tensor lr + if ( + group["foreach"] + and torch.is_tensor(group["lr"]) + and not group["capturable"] + ): + raise RuntimeError( + "lr as a Tensor is not supported for capturable=False and foreach=True" + ) + + state_steps.append(state["step"]) + return has_complex + + @_use_grad_for_differentiable + def step(self, closure=None): + """Perform a single optimization step. + + Args: + closure (Callable, optional): A closure that reevaluates the model + and returns the loss. + """ + self._cuda_graph_capture_health_check() + + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + params_with_grad: List[Tensor] = [] + grads: List[Tensor] = [] + exp_avgs: List[Tensor] = [] + exp_avg_sqs: List[Tensor] = [] + max_exp_avg_sqs: List[Tensor] = [] + state_steps: List[Tensor] = [] + beta1, beta2 = group["betas"] + + has_complex = self._init_group( + group, + params_with_grad, + grads, + exp_avgs, + exp_avg_sqs, + max_exp_avg_sqs, + state_steps, + ) + + adam( + params_with_grad, + grads, + exp_avgs, + exp_avg_sqs, + max_exp_avg_sqs, + state_steps, + amsgrad=group["amsgrad"], + has_complex=has_complex, + beta1=beta1, + beta2=beta2, + lr=group["lr"], + weight_decay=group["weight_decay"], + eps=group["eps"], + maximize=group["maximize"], + foreach=group["foreach"], + capturable=group["capturable"], + differentiable=group["differentiable"], + fused=group["fused"], + grad_scale=getattr(self, "grad_scale", None), + found_inf=getattr(self, "found_inf", None), + ) + + return loss + + +Adam.__doc__ = ( + r"""Implements Adam algorithm. + + .. math:: + \begin{aligned} + &\rule{110mm}{0.4pt} \\ + &\textbf{input} : \gamma \text{ (lr)}, \beta_1, \beta_2 + \text{ (betas)},\theta_0 \text{ (params)},f(\theta) \text{ (objective)} \\ + &\hspace{13mm} \lambda \text{ (weight decay)}, \: \textit{amsgrad}, + \:\textit{maximize} \\ + &\textbf{initialize} : m_0 \leftarrow 0 \text{ ( first moment)}, + v_0\leftarrow 0 \text{ (second moment)},\: \widehat{v_0}^{max}\leftarrow 0\\[-1.ex] + &\rule{110mm}{0.4pt} \\ + &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ + + &\hspace{5mm}\textbf{if} \: \textit{maximize}: \\ + &\hspace{10mm}g_t \leftarrow -\nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm}\textbf{else} \\ + &\hspace{10mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm}\textbf{if} \: \lambda \neq 0 \\ + &\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\ + &\hspace{5mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\ + &\hspace{5mm}v_t \leftarrow \beta_2 v_{t-1} + (1-\beta_2) g^2_t \\ + &\hspace{5mm}\widehat{m_t} \leftarrow m_t/\big(1-\beta_1^t \big) \\ + &\hspace{5mm}\widehat{v_t} \leftarrow v_t/\big(1-\beta_2^t \big) \\ + &\hspace{5mm}\textbf{if} \: amsgrad \\ + &\hspace{10mm}\widehat{v_t}^{max} \leftarrow \mathrm{max}(\widehat{v_t}^{max}, + \widehat{v_t}) \\ + &\hspace{10mm}\theta_t \leftarrow \theta_{t-1} - \gamma \widehat{m_t}/ + \big(\sqrt{\widehat{v_t}^{max}} + \epsilon \big) \\ + &\hspace{5mm}\textbf{else} \\ + &\hspace{10mm}\theta_t \leftarrow \theta_{t-1} - \gamma \widehat{m_t}/ + \big(\sqrt{\widehat{v_t}} + \epsilon \big) \\ + &\rule{110mm}{0.4pt} \\[-1.ex] + &\bf{return} \: \theta_t \\[-1.ex] + &\rule{110mm}{0.4pt} \\[-1.ex] + \end{aligned} + + For further details regarding the algorithm we refer to `Adam: A Method for Stochastic Optimization`_. + """ + + rf""" + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, Tensor, optional): learning rate (default: 1e-3). A tensor LR + is not yet supported for all our implementations. Please use a float + LR if you are not also specifying fused=True or capturable=True. + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + amsgrad (bool, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) + {_foreach_doc} + {_maximize_doc} + {_capturable_doc} + {_differentiable_doc} + {_fused_doc} + .. _Adam\: A Method for Stochastic Optimization: + https://arxiv.org/abs/1412.6980 + .. _On the Convergence of Adam and Beyond: + https://openreview.net/forum?id=ryQu7f-RZ + + """ +) + + +def _single_tensor_adam( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + max_exp_avg_sqs: List[Tensor], + state_steps: List[Tensor], + grad_scale: Optional[Tensor], + found_inf: Optional[Tensor], + *, + amsgrad: bool, + has_complex: bool, + beta1: float, + beta2: float, + lr: Union[float, Tensor], + weight_decay: float, + eps: float, + maximize: bool, + capturable: bool, + differentiable: bool, +): + assert grad_scale is None and found_inf is None + + if torch.jit.is_scripting(): + # this assert is due to JIT being dumb and not realizing that the ops below + # have overloads to handle both float and Tensor lrs, so we just assert it's + # a float since most people using JIT are using floats + assert isinstance(lr, float) + + for i, param in enumerate(params): + grad = grads[i] if not maximize else -grads[i] + exp_avg = exp_avgs[i] + exp_avg_sq = exp_avg_sqs[i] + step_t = state_steps[i] + + # If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable] + if not torch._utils.is_compiling() and capturable: + capturable_supported_devices = _get_capturable_supported_devices() + assert ( + param.device.type == step_t.device.type + and param.device.type in capturable_supported_devices + ), f"If capturable=True, params and state_steps must be on supported devices: {capturable_supported_devices}." + + # update step + step_t += 1 + + if weight_decay != 0: + grad = grad.add(param, alpha=weight_decay) + + if torch.is_complex(param): + grad = torch.view_as_real(grad) + exp_avg = torch.view_as_real(exp_avg) + exp_avg_sq = torch.view_as_real(exp_avg_sq) + if amsgrad: + max_exp_avg_sqs[i] = torch.view_as_real(max_exp_avg_sqs[i]) + param = torch.view_as_real(param) + + # Decay the first and second moment running average coefficient + exp_avg.lerp_(grad, 1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(grad, grad.conj(), value=1 - beta2) + + if capturable or differentiable: + step = step_t + + bias_correction1 = 1 - beta1**step + bias_correction2 = 1 - beta2**step + + step_size = lr / bias_correction1 + step_size_neg = step_size.neg() + + bias_correction2_sqrt = bias_correction2.sqrt() + + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + if differentiable: + max_exp_avg_sq = max_exp_avg_sqs[i].clone() + else: + max_exp_avg_sq = max_exp_avg_sqs[i] + + max_exp_avg_sqs[i].copy_(torch.maximum(max_exp_avg_sq, exp_avg_sq)) + + # Uses the max. for normalizing running avg. of gradient + # Folds in (admittedly ugly) 1-elem step_size math here to avoid extra param-set-sized read+write + # (can't fold it into addcdiv_ below because addcdiv_ requires value is a Number, not a Tensor) + denom = ( + max_exp_avg_sqs[i].sqrt() / (bias_correction2_sqrt * step_size_neg) + ).add_(eps / step_size_neg) + else: + denom = ( + exp_avg_sq.sqrt() / (bias_correction2_sqrt * step_size_neg) + ).add_(eps / step_size_neg) + + param.addcdiv_(exp_avg, denom) + else: + step = _get_value(step_t) + + bias_correction1 = 1 - beta1**step + bias_correction2 = 1 - beta2**step + + step_size = lr / bias_correction1 + + bias_correction2_sqrt = _dispatch_sqrt(bias_correction2) + + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + torch.maximum(max_exp_avg_sqs[i], exp_avg_sq, out=max_exp_avg_sqs[i]) + + # Use the max. for normalizing running avg. of gradient + denom = (max_exp_avg_sqs[i].sqrt() / bias_correction2_sqrt).add_(eps) + else: + denom = (exp_avg_sq.sqrt() / bias_correction2_sqrt).add_(eps) + + param.addcdiv_(exp_avg, denom, value=-step_size) + + # Lastly, switch back to complex view + if amsgrad and torch.is_complex(params[i]): + max_exp_avg_sqs[i] = torch.view_as_complex(max_exp_avg_sqs[i]) + + +def _multi_tensor_adam( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + max_exp_avg_sqs: List[Tensor], + state_steps: List[Tensor], + grad_scale: Optional[Tensor], + found_inf: Optional[Tensor], + *, + amsgrad: bool, + has_complex: bool, + beta1: float, + beta2: float, + lr: Union[float, Tensor], + weight_decay: float, + eps: float, + maximize: bool, + capturable: bool, + differentiable: bool, +): + if len(params) == 0: + return + + if isinstance(lr, Tensor) and not capturable: + raise RuntimeError( + "lr as a Tensor is not supported for capturable=False and foreach=True" + ) + + # If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable] + if not torch._utils.is_compiling() and capturable: + capturable_supported_devices = _get_capturable_supported_devices( + supports_xla=False + ) + assert all( + p.device.type == step.device.type + and p.device.type in capturable_supported_devices + for p, step in zip(params, state_steps) + ), f"If capturable=True, params and state_steps must be on supported devices: {capturable_supported_devices}." + + assert grad_scale is None and found_inf is None + + assert not differentiable, "_foreach ops don't support autograd" + + grouped_tensors = Optimizer._group_tensors_by_device_and_dtype( + [params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps] + ) + for ( + device_params, + device_grads, + device_exp_avgs, + device_exp_avg_sqs, + device_max_exp_avg_sqs, + device_state_steps, + ), _ in grouped_tensors.values(): + # Handle complex parameters + if has_complex: + if amsgrad: + _view_as_real( + device_params, + device_grads, + device_exp_avgs, + device_exp_avg_sqs, + device_max_exp_avg_sqs, + ) + else: + _view_as_real( + device_params, device_grads, device_exp_avgs, device_exp_avg_sqs + ) + + if maximize: + device_grads = torch._foreach_neg(device_grads) # type: ignore[assignment] + + # Update steps + # If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over + # and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just + # wrapped it once now. The alpha is required to assure we go to the right overload. + if device_state_steps[0].is_cpu: + torch._foreach_add_( + device_state_steps, torch.tensor(1.0, device="cpu"), alpha=1.0 + ) + else: + torch._foreach_add_(device_state_steps, 1) + + if weight_decay != 0: + # Re-use the intermediate memory (device_grads) already allocated for maximize + if maximize: + torch._foreach_add_(device_grads, device_params, alpha=weight_decay) + else: + device_grads = torch._foreach_add( # type: ignore[assignment] + device_grads, device_params, alpha=weight_decay + ) + + # Decay the first and second moment running average coefficient + torch._foreach_lerp_(device_exp_avgs, device_grads, 1 - beta1) + + torch._foreach_mul_(device_exp_avg_sqs, beta2) + torch._foreach_addcmul_( + device_exp_avg_sqs, device_grads, device_grads, 1 - beta2 + ) + + # Delete the local intermediate since it won't be used anymore to save on peak memory + del device_grads + + bias_correction1: Union[Tuple[Tensor, ...], List[Tensor]] + bias_correction2: Union[Tuple[Tensor, ...], List[Tensor]] + bias_correction2_sqrt: Union[Tuple[Tensor, ...], List[Tensor]] + if capturable: + bias_correction1 = torch._foreach_pow(beta1, device_state_steps) + bias_correction2 = torch._foreach_pow(beta2, device_state_steps) + # foreach_sub doesn't allow a scalar as the first arg + torch._foreach_sub_(bias_correction1, 1) + torch._foreach_sub_(bias_correction2, 1) + # we do not negate bias_correction1 as it'll need to be negated later anyway + torch._foreach_neg_(bias_correction2) + + # foreach_div doesn't allow a scalar as the first arg + torch._foreach_div_(bias_correction1, lr) + torch._foreach_reciprocal_(bias_correction1) + + torch._foreach_sqrt_(bias_correction2) + + # Re-assign for clarity as we maintain minimal intermediates: we'll have + # step_size = - lr / (1 - beta1 ^ t) where t = num_steps + # bias_correction2_sqrt = sqrt(1 - beta2 ^ t) + step_size = bias_correction1 + bias_correction2_sqrt = bias_correction2 + + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + torch._foreach_maximum_(device_max_exp_avg_sqs, device_exp_avg_sqs) # type: ignore[assignment] + + # Set intermediate to the max. for normalizing running avg. of gradient when amsgrad + exp_avg_sq_sqrt = torch._foreach_sqrt(device_max_exp_avg_sqs) + else: + exp_avg_sq_sqrt = torch._foreach_sqrt(device_exp_avg_sqs) + + torch._foreach_div_(exp_avg_sq_sqrt, bias_correction2_sqrt) + torch._foreach_add_(exp_avg_sq_sqrt, eps) + torch._foreach_div_(exp_avg_sq_sqrt, step_size) + + # at this point, exp_avg_sq_sqrt = - (1 - beta^t) * [sqrt(exp_avg_sq / (1 - beta2^t)) + eps] / lr + torch._foreach_addcdiv_(device_params, device_exp_avgs, exp_avg_sq_sqrt) + else: + bias_correction1 = [ + 1 - beta1 ** _get_value(step) for step in device_state_steps + ] + bias_correction2 = [ + 1 - beta2 ** _get_value(step) for step in device_state_steps + ] + + step_size = _stack_if_compiling([(lr / bc) * -1 for bc in bias_correction1]) + + bias_correction2_sqrt = [_dispatch_sqrt(bc) for bc in bias_correction2] # type: ignore[arg-type] + + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + torch._foreach_maximum_(device_max_exp_avg_sqs, device_exp_avg_sqs) + + # Use the max. for normalizing running avg. of gradient + exp_avg_sq_sqrt = torch._foreach_sqrt(device_max_exp_avg_sqs) + else: + exp_avg_sq_sqrt = torch._foreach_sqrt(device_exp_avg_sqs) + + torch._foreach_div_(exp_avg_sq_sqrt, bias_correction2_sqrt) + torch._foreach_add_(exp_avg_sq_sqrt, eps) + torch._foreach_addcdiv_( + device_params, device_exp_avgs, exp_avg_sq_sqrt, step_size # type: ignore[arg-type] + ) + + +def _fused_adam( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + max_exp_avg_sqs: List[Tensor], + state_steps: List[Tensor], + grad_scale: Optional[Tensor], + found_inf: Optional[Tensor], + *, + amsgrad: bool, + has_complex: bool, # Needed for consistency. + beta1: float, + beta2: float, + lr: Union[float, Tensor], + weight_decay: float, + eps: float, + maximize: bool, + capturable: bool, # Needed for consistency. + differentiable: bool, +) -> None: + if not params: + return + if differentiable: + raise RuntimeError("Adam with fused=True does not support differentiable=True") + + grad_scale_dict: DeviceDict = ( + {grad_scale.device: grad_scale} if grad_scale is not None else {} + ) + found_inf_dict: DeviceDict = ( + {found_inf.device: found_inf} if found_inf is not None else {} + ) + + # We only shuffle around the lr when it is a Tensor and on CUDA, otherwise, we prefer + # treating it as a scalar. + lr_dict: Optional[DeviceDict] = ( + {lr.device: lr} if isinstance(lr, Tensor) and str(lr.device) != "cpu" else None + ) + grouped_tensors = Optimizer._group_tensors_by_device_and_dtype( + [params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps] + ) + for (device, _), ( + ( + device_params, + device_grads, + device_exp_avgs, + device_exp_avg_sqs, + device_max_exp_avg_sqs, + device_state_steps, + ), + _, + ) in grouped_tensors.items(): + device_grad_scale, device_found_inf = None, None + if grad_scale is not None: + device_grad_scale = grad_scale_dict.setdefault( + device, grad_scale.to(device, non_blocking=True) + ) + if found_inf is not None: + device_found_inf = found_inf_dict.setdefault( + device, found_inf.to(device, non_blocking=True) + ) + if lr_dict is not None and device not in lr_dict: + lr_dict[device] = lr.to(device=device, non_blocking=True) # type: ignore[union-attr] + lr = lr_dict[device] + torch._foreach_add_(device_state_steps, 1) + torch._fused_adam_( + device_params, + device_grads, + device_exp_avgs, + device_exp_avg_sqs, + device_max_exp_avg_sqs, + device_state_steps, + amsgrad=amsgrad, + lr=lr, + beta1=beta1, + beta2=beta2, + weight_decay=weight_decay, + eps=eps, + maximize=maximize, + grad_scale=device_grad_scale, + found_inf=device_found_inf, + ) + if device_found_inf is not None: + torch._foreach_sub_( + device_state_steps, [device_found_inf] * len(device_state_steps) + ) + + +@_disable_dynamo_if_unsupported(single_tensor_fn=_single_tensor_adam) +def adam( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + max_exp_avg_sqs: List[Tensor], + state_steps: List[Tensor], + # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 + # setting this as kwarg for now as functional API is compiled by torch/distributed/optim + foreach: Optional[bool] = None, + capturable: bool = False, + differentiable: bool = False, + fused: Optional[bool] = None, + grad_scale: Optional[Tensor] = None, + found_inf: Optional[Tensor] = None, + has_complex: bool = False, + *, + amsgrad: bool, + beta1: float, + beta2: float, + lr: Union[float, Tensor], + weight_decay: float, + eps: float, + maximize: bool, +): + r"""Functional API that performs Adam algorithm computation. + + See :class:`~torch.optim.Adam` for details. + """ + # Respect when the user inputs False/True for foreach or fused. We only want to change + # the default when neither have been user-specified. Note that we default to foreach + # and pass False to use_fused. This is not a mistake--we want to give the fused impl + # bake-in time before making it the default, even if it is typically faster. + if fused is None and foreach is None: + _, foreach = _default_to_fused_or_foreach( + params, differentiable, use_fused=False + ) + # Do not flip on foreach for the unsupported case where lr is a Tensor and capturable=False. + if foreach and isinstance(lr, Tensor) and not capturable: + foreach = False + if fused is None: + fused = False + if foreach is None: + foreach = False + + # this check is slow during compilation, so we skip it + # if it's strictly needed we can add this check back in dynamo + if not torch._utils.is_compiling() and not all( + isinstance(t, torch.Tensor) for t in state_steps + ): + raise RuntimeError( + "API has changed, `state_steps` argument must contain a list of singleton tensors" + ) + + if foreach and torch.jit.is_scripting(): + raise RuntimeError("torch.jit.script not supported with foreach optimizers") + if fused and torch.jit.is_scripting(): + raise RuntimeError("torch.jit.script not supported with fused optimizers") + + if fused and not torch.jit.is_scripting(): + func = _fused_adam + elif foreach and not torch.jit.is_scripting(): + func = _multi_tensor_adam + else: + func = _single_tensor_adam + + func( + params, + grads, + exp_avgs, + exp_avg_sqs, + max_exp_avg_sqs, + state_steps, + amsgrad=amsgrad, + has_complex=has_complex, + beta1=beta1, + beta2=beta2, + lr=lr, + weight_decay=weight_decay, + eps=eps, + maximize=maximize, + capturable=capturable, + differentiable=differentiable, + grad_scale=grad_scale, + found_inf=found_inf, + ) diff --git a/parrot/lib/python3.10/site-packages/torch/optim/adamax.py b/parrot/lib/python3.10/site-packages/torch/optim/adamax.py new file mode 100644 index 0000000000000000000000000000000000000000..27caa5f9d81cdf696af7bb19310267f146c55717 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/optim/adamax.py @@ -0,0 +1,463 @@ +# mypy: allow-untyped-defs +from typing import List, Optional, Tuple, Union + +import torch +from torch import Tensor + +from .optimizer import ( + _capturable_doc, + _default_to_fused_or_foreach, + _differentiable_doc, + _disable_dynamo_if_unsupported, + _foreach_doc, + _get_capturable_supported_devices, + _get_scalar_dtype, + _get_value, + _maximize_doc, + _use_grad_for_differentiable, + _view_as_real, + Optimizer, + ParamsT, +) + +__all__ = ["Adamax", "adamax"] + + +class Adamax(Optimizer): + def __init__( + self, + params: ParamsT, + lr: float = 2e-3, + betas: Tuple[float, float] = (0.9, 0.999), + eps: float = 1e-8, + weight_decay: float = 0, + foreach: Optional[bool] = None, + *, + maximize: bool = False, + differentiable: bool = False, + capturable: bool = False, + ): + if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") + if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") + if not 0.0 <= betas[0] < 1.0: + raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}") + if not 0.0 <= betas[1] < 1.0: + raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}") + if not 0.0 <= weight_decay: + raise ValueError(f"Invalid weight_decay value: {weight_decay}") + + defaults = dict( + lr=lr, + betas=betas, + eps=eps, + weight_decay=weight_decay, + foreach=foreach, + maximize=maximize, + differentiable=differentiable, + capturable=capturable, + ) + super().__init__(params, defaults) + + def __setstate__(self, state): + super().__setstate__(state) + for group in self.param_groups: + group.setdefault("foreach", None) + group.setdefault("maximize", False) + group.setdefault("differentiable", False) + group.setdefault("capturable", False) + for p in group["params"]: + p_state = self.state.get(p, []) + if len(p_state) != 0 and not torch.is_tensor(p_state["step"]): + step_val = float(p_state["step"]) + p_state["step"] = ( + torch.tensor( + step_val, dtype=_get_scalar_dtype(), device=p.device + ) + if group["capturable"] + else torch.tensor(step_val, dtype=_get_scalar_dtype()) + ) + + def _init_group( + self, group, params_with_grad, grads, exp_avgs, exp_infs, state_steps + ): + has_complex = False + for p in group["params"]: + if p.grad is None: + continue + has_complex |= torch.is_complex(p) + params_with_grad.append(p) + if p.grad.is_sparse: + raise RuntimeError("Adamax does not support sparse gradients") + grads.append(p.grad) + + state = self.state[p] + + # State initialization + if len(state) == 0: + state["step"] = ( + torch.zeros((), dtype=_get_scalar_dtype(), device=p.device) + if group["capturable"] + else torch.tensor(0.0, dtype=_get_scalar_dtype()) + ) + state["exp_avg"] = torch.zeros_like( + p, memory_format=torch.preserve_format + ) + state["exp_inf"] = torch.zeros_like( + p, memory_format=torch.preserve_format + ) + + exp_avgs.append(state["exp_avg"]) + exp_infs.append(state["exp_inf"]) + state_steps.append(state["step"]) + + return has_complex + + @_use_grad_for_differentiable + def step(self, closure=None): + """Performs a single optimization step. + + Args: + closure (Callable, optional): A closure that reevaluates the model + and returns the loss. + """ + self._cuda_graph_capture_health_check() + + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + params_with_grad: List[Tensor] = [] + grads: List[Tensor] = [] + exp_avgs: List[Tensor] = [] + exp_infs: List[Tensor] = [] + state_steps: List[Tensor] = [] + + beta1, beta2 = group["betas"] + eps = group["eps"] + lr = group["lr"] + weight_decay = group["weight_decay"] + foreach = group["foreach"] + maximize = group["maximize"] + differentiable = group["differentiable"] + capturable = group["capturable"] + + has_complex = self._init_group( + group, params_with_grad, grads, exp_avgs, exp_infs, state_steps + ) + + adamax( + params_with_grad, + grads, + exp_avgs, + exp_infs, + state_steps, + eps=eps, + beta1=beta1, + beta2=beta2, + lr=lr, + weight_decay=weight_decay, + foreach=foreach, + maximize=maximize, + differentiable=differentiable, + capturable=capturable, + has_complex=has_complex, + ) + + return loss + + +Adamax.__doc__ = ( + r"""Implements Adamax algorithm (a variant of Adam based on infinity norm). + + .. math:: + \begin{aligned} + &\rule{110mm}{0.4pt} \\ + &\textbf{input} : \gamma \text{ (lr)}, \beta_1, \beta_2 + \text{ (betas)},\theta_0 \text{ (params)},f(\theta) \text{ (objective)}, + \: \lambda \text{ (weight decay)}, \\ + &\hspace{13mm} \epsilon \text{ (epsilon)} \\ + &\textbf{initialize} : m_0 \leftarrow 0 \text{ ( first moment)}, + u_0 \leftarrow 0 \text{ ( infinity norm)} \\[-1.ex] + &\rule{110mm}{0.4pt} \\ + &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ + &\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm}if \: \lambda \neq 0 \\ + &\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\ + &\hspace{5mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\ + &\hspace{5mm}u_t \leftarrow \mathrm{max}(\beta_2 u_{t-1}, |g_{t}|+\epsilon) \\ + &\hspace{5mm}\theta_t \leftarrow \theta_{t-1} - \frac{\gamma m_t}{(1-\beta^t_1) u_t} \\ + &\rule{110mm}{0.4pt} \\[-1.ex] + &\bf{return} \: \theta_t \\[-1.ex] + &\rule{110mm}{0.4pt} \\[-1.ex] + \end{aligned} + + For further details regarding the algorithm we refer to `Adam: A Method for Stochastic Optimization`_. + """ + + rf""" + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 2e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + {_foreach_doc} + {_maximize_doc} + {_differentiable_doc} + {_capturable_doc} + + .. _Adam\: A Method for Stochastic Optimization: + https://arxiv.org/abs/1412.6980 + + """ +) + + +def _single_tensor_adamax( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_infs: List[Tensor], + state_steps: List[Tensor], + *, + eps: float, + beta1: float, + beta2: float, + lr: float, + weight_decay: float, + maximize: bool, + differentiable: bool, + capturable: bool, + has_complex: bool, +): + for i, param in enumerate(params): + grad = grads[i] + grad = grad if not maximize else -grad + exp_avg = exp_avgs[i] + exp_inf = exp_infs[i] + step_t = state_steps[i] + + # If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable] + if not torch._utils.is_compiling() and capturable: + capturable_supported_devices = _get_capturable_supported_devices() + assert ( + param.device.type == step_t.device.type + and param.device.type in capturable_supported_devices + ), f"If capturable=True, params and state_steps must be on supported devices: {capturable_supported_devices}." + + # update step + step_t += 1 + + if weight_decay != 0: + grad = grad.add(param, alpha=weight_decay) + + if torch.is_complex(param): + param = torch.view_as_real(param) + grad = torch.view_as_real(grad) + exp_avg = torch.view_as_real(exp_avg) + exp_inf = torch.view_as_real(exp_inf) + + # Update biased first moment estimate. + exp_avg.lerp_(grad, 1 - beta1) + # Update the exponentially weighted infinity norm. + if not differentiable: + torch.maximum( + exp_inf.mul_(beta2), + grad.abs().add_(eps), + out=exp_inf, + ) + else: + norm_buf = torch.cat( + [exp_inf.mul_(beta2).unsqueeze(0), grad.abs().add_(eps).unsqueeze_(0)], + 0, + ) + exp_inf.copy_(torch.amax(norm_buf, 0, keepdim=False)) + + if capturable: + # why jump through extra hoops and negate bias_correction? check out #121238 + # once fixed, we should use bias_correction with addcdiv value=-1 for readability + neg_bias_correction = beta1**step_t - 1 + neg_bias_correction.div_(lr) + denom = exp_inf * neg_bias_correction + param.addcdiv_(exp_avg, denom) + else: + bias_correction = 1 - beta1 ** _get_value(step_t) + clr = lr / bias_correction + + param.addcdiv_(exp_avg, exp_inf, value=-clr) + + +def _multi_tensor_adamax( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_infs: List[Tensor], + state_steps: List[Tensor], + *, + eps: float, + beta1: float, + beta2: float, + lr: float, + weight_decay: float, + maximize: bool, + differentiable: bool, + capturable: bool, + has_complex: bool, +): + assert not differentiable, "_foreach ops don't support autograd" + + if len(params) == 0: + return + + # If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable] + if not torch._utils.is_compiling() and capturable: + capturable_supported_devices = _get_capturable_supported_devices( + supports_xla=False + ) + assert all( + p.device.type == step.device.type + and p.device.type in capturable_supported_devices + for p, step in zip(params, state_steps) + ), f"If capturable=True, params and state_steps must be on supported devices: {capturable_supported_devices}." + + grouped_tensors = Optimizer._group_tensors_by_device_and_dtype( + [params, grads, exp_avgs, exp_infs, state_steps] + ) + for ( + grouped_params, + grouped_grads, + grouped_exp_avgs, + grouped_exp_infs, + grouped_state_steps, + ), _ in grouped_tensors.values(): + if has_complex: + _view_as_real( + grouped_params, grouped_grads, grouped_exp_avgs, grouped_exp_infs + ) + + if maximize: + grouped_grads = torch._foreach_neg(grouped_grads) # type: ignore[assignment] + + # Update steps + # If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over + # and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just + # wrapped it once now. The alpha is required to assure we go to the right overload. + if grouped_state_steps[0].is_cpu: + torch._foreach_add_( + grouped_state_steps, torch.tensor(1.0, device="cpu"), alpha=1.0 + ) + else: + torch._foreach_add_(grouped_state_steps, 1) + + if weight_decay != 0: + if maximize: + # Re-use the intermediate memory (grouped_grads) already allocated for maximize + torch._foreach_add_(grouped_grads, grouped_params, alpha=weight_decay) + else: + grouped_grads = torch._foreach_add( # type: ignore[assignment] + grouped_grads, grouped_params, alpha=weight_decay + ) + + # Update biased first moment estimate. + torch._foreach_lerp_(grouped_exp_avgs, grouped_grads, 1 - beta1) + + # Update the exponentially weighted infinity norm. + torch._foreach_mul_(grouped_exp_infs, beta2) + + # in this case, we need to introduce a copy of the grads + # since one has not been introduced previously + if not maximize and weight_decay == 0: + grouped_grads = torch._foreach_abs(grouped_grads) # type: ignore[assignment] + else: + torch._foreach_abs_(grouped_grads) + + torch._foreach_add_(grouped_grads, eps) + torch._foreach_maximum_(grouped_exp_infs, grouped_grads) + + bias_corrections: Union[Tuple[Tensor, ...], List[Tensor]] + if capturable: + bias_corrections = torch._foreach_pow(beta1, grouped_state_steps) + # foreach_sub doesn't allow a scalar as the first arg + torch._foreach_sub_(bias_corrections, 1) + torch._foreach_div_(bias_corrections, lr) + + denom = torch._foreach_mul(grouped_exp_infs, bias_corrections) + torch._foreach_addcdiv_(grouped_params, grouped_exp_avgs, denom) + else: + bias_corrections = [ + 1 - beta1 ** _get_value(step) for step in grouped_state_steps + ] + step_size = [(_get_value(lr) / bc) * -1 for bc in bias_corrections] + torch._foreach_addcdiv_( + grouped_params, grouped_exp_avgs, grouped_exp_infs, step_size + ) + + +@_disable_dynamo_if_unsupported(single_tensor_fn=_single_tensor_adamax) +def adamax( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_infs: List[Tensor], + state_steps: List[Tensor], + # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 + # setting this as kwarg for now as functional API is compiled by torch/distributed/optim + foreach: Optional[bool] = None, + maximize: bool = False, + differentiable: bool = False, + capturable: bool = False, + has_complex: bool = False, + *, + eps: float, + beta1: float, + beta2: float, + lr: float, + weight_decay: float, +): + r"""Functional API that performs adamax algorithm computation. + + See :class:`~torch.optim.Adamax` for details. + """ + + if not torch._utils.is_compiling() and not all( + isinstance(t, torch.Tensor) for t in state_steps + ): + raise RuntimeError( + "API has changed, `state_steps` argument must contain a list of singleton tensors" + ) + + if foreach is None: + _, foreach = _default_to_fused_or_foreach( + params, differentiable, use_fused=False + ) + + if foreach and torch.jit.is_scripting(): + raise RuntimeError("torch.jit.script not supported with foreach optimizers") + + if foreach and not torch.jit.is_scripting(): + func = _multi_tensor_adamax + else: + func = _single_tensor_adamax + + func( + params, + grads, + exp_avgs, + exp_infs, + state_steps, + eps=eps, + beta1=beta1, + beta2=beta2, + lr=lr, + weight_decay=weight_decay, + maximize=maximize, + differentiable=differentiable, + has_complex=has_complex, + capturable=capturable, + ) diff --git a/parrot/lib/python3.10/site-packages/torch/optim/adamw.py b/parrot/lib/python3.10/site-packages/torch/optim/adamw.py new file mode 100644 index 0000000000000000000000000000000000000000..00931bed022727b54508b239742bdaf6e3a956ff --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/optim/adamw.py @@ -0,0 +1,786 @@ +# mypy: allow-untyped-defs +from typing import cast, List, Optional, Tuple, Union + +import torch +from torch import Tensor +from torch.utils._foreach_utils import _get_fused_kernels_supported_devices +from .optimizer import ( + _capturable_doc, + _default_to_fused_or_foreach, + _differentiable_doc, + _disable_dynamo_if_unsupported, + _dispatch_sqrt, + _foreach_doc, + _fused_doc, + _get_capturable_supported_devices, + _get_scalar_dtype, + _get_value, + _maximize_doc, + _stack_if_compiling, + _use_grad_for_differentiable, + _view_as_real, + DeviceDict, + Optimizer, + ParamsT, +) + +__all__ = ["AdamW", "adamw"] + + +class AdamW(Optimizer): + def __init__( + self, + params: ParamsT, + lr: Union[float, Tensor] = 1e-3, + betas: Tuple[float, float] = (0.9, 0.999), + eps: float = 1e-8, + weight_decay: float = 1e-2, + amsgrad: bool = False, + *, + maximize: bool = False, + foreach: Optional[bool] = None, + capturable: bool = False, + differentiable: bool = False, + fused: Optional[bool] = None, + ): + if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") + if isinstance(lr, Tensor) and foreach and not capturable: + raise ValueError( + "lr as a Tensor is not supported for capturable=False and foreach=True" + ) + if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") + if not 0.0 <= betas[0] < 1.0: + raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}") + if not 0.0 <= betas[1] < 1.0: + raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}") + if not 0.0 <= weight_decay: + raise ValueError(f"Invalid weight_decay value: {weight_decay}") + defaults = dict( + lr=lr, + betas=betas, + eps=eps, + weight_decay=weight_decay, + amsgrad=amsgrad, + foreach=foreach, + maximize=maximize, + capturable=capturable, + differentiable=differentiable, + fused=fused, + ) + super().__init__(params, defaults) + + if fused: + if differentiable: + raise RuntimeError("`fused` does not support `differentiable`") + self._step_supports_amp_scaling = True + # TODO(crcrpar): [low prec params & their higher prec copy] + # Suppor AMP with FP16/BF16 model params which would need + # higher prec copy of params to do update math in higher prec to + # alleviate the loss of information. + fused_supported_devices = _get_fused_kernels_supported_devices() + if not all( + p.device.type in fused_supported_devices and torch.is_floating_point(p) + for pg in self.param_groups + for p in pg["params"] + ): + raise RuntimeError( + "`fused=True` requires all the params to be floating point Tensors of " + f"supported devices: {fused_supported_devices}." + ) + if foreach: + raise RuntimeError("`fused` and `foreach` cannot be `True` together.") + + def __setstate__(self, state): + super().__setstate__(state) + for group in self.param_groups: + group.setdefault("amsgrad", False) + group.setdefault("maximize", False) + group.setdefault("foreach", None) + group.setdefault("capturable", False) + group.setdefault("differentiable", False) + fused = group.setdefault("fused", None) + for p in group["params"]: + p_state = self.state.get(p, []) + if len(p_state) != 0 and not torch.is_tensor(p_state["step"]): + step_val = float(p_state["step"]) + p_state["step"] = ( + torch.tensor( + step_val, + dtype=_get_scalar_dtype(is_fused=fused), + device=p.device, + ) + if group["capturable"] or group["fused"] + else torch.tensor(step_val, dtype=_get_scalar_dtype()) + ) + + def _init_group( + self, + group, + params_with_grad, + grads, + amsgrad, + exp_avgs, + exp_avg_sqs, + max_exp_avg_sqs, + state_steps, + ): + has_complex = False + for p in group["params"]: + if p.grad is None: + continue + has_complex |= torch.is_complex(p) + params_with_grad.append(p) + if p.grad.is_sparse: + raise RuntimeError("AdamW does not support sparse gradients") + grads.append(p.grad) + + state = self.state[p] + + # State initialization + if len(state) == 0: + # note(crcrpar): Deliberately host `step` on CPU if both capturable and fused are off. + # This is because kernel launches are costly on CUDA and XLA. + state["step"] = ( + torch.zeros( + (), + dtype=_get_scalar_dtype(is_fused=group["fused"]), + device=p.device, + ) + if group["capturable"] or group["fused"] + else torch.tensor(0.0, dtype=_get_scalar_dtype()) + ) + # Exponential moving average of gradient values + state["exp_avg"] = torch.zeros_like( + p, memory_format=torch.preserve_format + ) + # Exponential moving average of squared gradient values + state["exp_avg_sq"] = torch.zeros_like( + p, memory_format=torch.preserve_format + ) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state["max_exp_avg_sq"] = torch.zeros_like( + p, memory_format=torch.preserve_format + ) + + exp_avgs.append(state["exp_avg"]) + exp_avg_sqs.append(state["exp_avg_sq"]) + + if group["amsgrad"]: + max_exp_avg_sqs.append(state["max_exp_avg_sq"]) + if group["differentiable"] and state["step"].requires_grad: + raise RuntimeError( + "`requires_grad` is not supported for `step` in differentiable mode" + ) + + # Foreach without capturable does not support a tensor lr + if ( + group["foreach"] + and isinstance(group["lr"], Tensor) + and not group["capturable"] + ): + raise RuntimeError( + "lr as a Tensor is not supported for capturable=False and foreach=True" + ) + + state_steps.append(state["step"]) + return has_complex + + @_use_grad_for_differentiable + def step(self, closure=None): + """Perform a single optimization step. + + Args: + closure (Callable, optional): A closure that reevaluates the model + and returns the loss. + """ + self._cuda_graph_capture_health_check() + + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + params_with_grad: List[Tensor] = [] + grads: List[Tensor] = [] + exp_avgs: List[Tensor] = [] + exp_avg_sqs: List[Tensor] = [] + max_exp_avg_sqs: List[Tensor] = [] + state_steps: List[Tensor] = [] + amsgrad: bool = group["amsgrad"] + beta1, beta2 = cast(Tuple[float, float], group["betas"]) + + has_complex = self._init_group( + group, + params_with_grad, + grads, + amsgrad, + exp_avgs, + exp_avg_sqs, + max_exp_avg_sqs, + state_steps, + ) + + adamw( + params_with_grad, + grads, + exp_avgs, + exp_avg_sqs, + max_exp_avg_sqs, + state_steps, + amsgrad=amsgrad, + beta1=beta1, + beta2=beta2, + lr=group["lr"], + weight_decay=group["weight_decay"], + eps=group["eps"], + maximize=group["maximize"], + foreach=group["foreach"], + capturable=group["capturable"], + differentiable=group["differentiable"], + fused=group["fused"], + grad_scale=getattr(self, "grad_scale", None), + found_inf=getattr(self, "found_inf", None), + has_complex=has_complex, + ) + + return loss + + +AdamW.__doc__ = ( + r"""Implements AdamW algorithm. + + .. math:: + \begin{aligned} + &\rule{110mm}{0.4pt} \\ + &\textbf{input} : \gamma \text{(lr)}, \: \beta_1, \beta_2 + \text{(betas)}, \: \theta_0 \text{(params)}, \: f(\theta) \text{(objective)}, + \: \epsilon \text{ (epsilon)} \\ + &\hspace{13mm} \lambda \text{(weight decay)}, \: \textit{amsgrad}, + \: \textit{maximize} \\ + &\textbf{initialize} : m_0 \leftarrow 0 \text{ (first moment)}, v_0 \leftarrow 0 + \text{ ( second moment)}, \: \widehat{v_0}^{max}\leftarrow 0 \\[-1.ex] + &\rule{110mm}{0.4pt} \\ + &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ + + &\hspace{5mm}\textbf{if} \: \textit{maximize}: \\ + &\hspace{10mm}g_t \leftarrow -\nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm}\textbf{else} \\ + &\hspace{10mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm} \theta_t \leftarrow \theta_{t-1} - \gamma \lambda \theta_{t-1} \\ + &\hspace{5mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\ + &\hspace{5mm}v_t \leftarrow \beta_2 v_{t-1} + (1-\beta_2) g^2_t \\ + &\hspace{5mm}\widehat{m_t} \leftarrow m_t/\big(1-\beta_1^t \big) \\ + &\hspace{5mm}\widehat{v_t} \leftarrow v_t/\big(1-\beta_2^t \big) \\ + &\hspace{5mm}\textbf{if} \: amsgrad \\ + &\hspace{10mm}\widehat{v_t}^{max} \leftarrow \mathrm{max}(\widehat{v_t}^{max}, + \widehat{v_t}) \\ + &\hspace{10mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}/ + \big(\sqrt{\widehat{v_t}^{max}} + \epsilon \big) \\ + &\hspace{5mm}\textbf{else} \\ + &\hspace{10mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}/ + \big(\sqrt{\widehat{v_t}} + \epsilon \big) \\ + &\rule{110mm}{0.4pt} \\[-1.ex] + &\bf{return} \: \theta_t \\[-1.ex] + &\rule{110mm}{0.4pt} \\[-1.ex] + \end{aligned} + + For further details regarding the algorithm we refer to `Decoupled Weight Decay Regularization`_. + """ + + rf""" + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, Tensor, optional): learning rate (default: 1e-3). A tensor LR + is not yet supported for all our implementations. Please use a float + LR if you are not also specifying fused=True or capturable=True. + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay coefficient (default: 1e-2) + amsgrad (bool, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) + {_maximize_doc} + {_foreach_doc} + {_capturable_doc} + {_differentiable_doc} + {_fused_doc} + .. _Decoupled Weight Decay Regularization: + https://arxiv.org/abs/1711.05101 + .. _On the Convergence of Adam and Beyond: + https://openreview.net/forum?id=ryQu7f-RZ + + """ +) + + +def _single_tensor_adamw( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + max_exp_avg_sqs: List[Tensor], + state_steps: List[Tensor], + grad_scale: Optional[Tensor], + found_inf: Optional[Tensor], + *, + amsgrad: bool, + beta1: float, + beta2: float, + lr: Union[Tensor, float], + weight_decay: float, + eps: float, + maximize: bool, + capturable: bool, + differentiable: bool, + has_complex: bool, +): + assert grad_scale is None and found_inf is None + + if torch.jit.is_scripting(): + # this assert is due to JIT being dumb and not realizing that the ops below + # have overloads to handle both float and Tensor lrs, so we just assert it's + # a float since most people using JIT are using floats + assert isinstance(lr, float) + + for i, param in enumerate(params): + grad = grads[i] if not maximize else -grads[i] + exp_avg = exp_avgs[i] + exp_avg_sq = exp_avg_sqs[i] + step_t = state_steps[i] + + # If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable] + if not torch._utils.is_compiling() and capturable: + capturable_supported_devices = _get_capturable_supported_devices() + assert ( + param.device.type == step_t.device.type + and param.device.type in capturable_supported_devices + ), f"If capturable=True, params and state_steps must be on supported devices: {capturable_supported_devices}." + + if torch.is_complex(param): + grad = torch.view_as_real(grad) + exp_avg = torch.view_as_real(exp_avg) + exp_avg_sq = torch.view_as_real(exp_avg_sq) + if amsgrad: + max_exp_avg_sqs[i] = torch.view_as_real(max_exp_avg_sqs[i]) + param = torch.view_as_real(param) + + # update step + step_t += 1 + + # Perform stepweight decay + param.mul_(1 - lr * weight_decay) + + # Decay the first and second moment running average coefficient + exp_avg.lerp_(grad, 1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) + + if capturable or differentiable: + step = step_t + + bias_correction1 = 1 - beta1**step + bias_correction2 = 1 - beta2**step + + step_size = lr / bias_correction1 + step_size_neg = step_size.neg() + + bias_correction2_sqrt = bias_correction2.sqrt() + + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + if differentiable: + max_exp_avg_sq = max_exp_avg_sqs[i].clone() + else: + max_exp_avg_sq = max_exp_avg_sqs[i] + + max_exp_avg_sqs[i].copy_(torch.maximum(max_exp_avg_sq, exp_avg_sq)) + + # Uses the max. for normalizing running avg. of gradient + # Folds in (admittedly ugly) 1-elem step_size math here to avoid extra param-set-sized read+write + # (can't fold it into addcdiv_ below because addcdiv_ requires value is a Number, not a Tensor) + denom = ( + max_exp_avg_sqs[i].sqrt() / (bias_correction2_sqrt * step_size_neg) + ).add_(eps / step_size_neg) + else: + denom = ( + exp_avg_sq.sqrt() / (bias_correction2_sqrt * step_size_neg) + ).add_(eps / step_size_neg) + + param.addcdiv_(exp_avg, denom) + else: + step = _get_value(step_t) + + bias_correction1 = 1 - beta1**step + bias_correction2 = 1 - beta2**step + + step_size = lr / bias_correction1 + + bias_correction2_sqrt = _dispatch_sqrt(bias_correction2) + + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + torch.maximum(max_exp_avg_sqs[i], exp_avg_sq, out=max_exp_avg_sqs[i]) + + # Use the max. for normalizing running avg. of gradient + denom = (max_exp_avg_sqs[i].sqrt() / bias_correction2_sqrt).add_(eps) + else: + denom = (exp_avg_sq.sqrt() / bias_correction2_sqrt).add_(eps) + + param.addcdiv_(exp_avg, denom, value=-step_size) + + # Lastly, switch back to complex view + if amsgrad and torch.is_complex(params[i]): + max_exp_avg_sqs[i] = torch.view_as_complex(max_exp_avg_sqs[i]) + + +def _multi_tensor_adamw( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + max_exp_avg_sqs: List[Tensor], + state_steps: List[Tensor], + grad_scale: Optional[Tensor], + found_inf: Optional[Tensor], + *, + amsgrad: bool, + beta1: float, + beta2: float, + lr: Union[Tensor, float], + weight_decay: float, + eps: float, + maximize: bool, + capturable: bool, + differentiable: bool, + has_complex: bool, +): + if len(params) == 0: + return + + if isinstance(lr, Tensor) and not capturable: + raise RuntimeError( + "lr as a Tensor is not supported for capturable=False and foreach=True" + ) + + # If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable] + if not torch._utils.is_compiling() and capturable: + capturable_supported_devices = _get_capturable_supported_devices( + supports_xla=False + ) + assert all( + p.device.type == step.device.type + and p.device.type in capturable_supported_devices + for p, step in zip(params, state_steps) + ), f"If capturable=True, params and state_steps must be on supported devices: {capturable_supported_devices}." + + assert not differentiable, "_foreach ops don't support autograd" + + assert grad_scale is None and found_inf is None + + grouped_tensors = Optimizer._group_tensors_by_device_and_dtype( + [params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps] + ) + for ( + device_params, + device_grads, + device_exp_avgs, + device_exp_avg_sqs, + device_max_exp_avg_sqs, + device_state_steps, + ), _ in grouped_tensors.values(): + if has_complex: + if amsgrad: + _view_as_real( + device_params, + device_grads, + device_exp_avgs, + device_exp_avg_sqs, + device_max_exp_avg_sqs, + ) + else: + _view_as_real( + device_params, device_grads, device_exp_avgs, device_exp_avg_sqs + ) + + if maximize: + device_grads = torch._foreach_neg(device_grads) # type: ignore[assignment] + + # Update steps + # If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over + # and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just + # wrapped it once now. The alpha is required to assure we go to the right overload. + if device_state_steps[0].is_cpu: + torch._foreach_add_( + device_state_steps, torch.tensor(1.0, device="cpu"), alpha=1.0 + ) + else: + torch._foreach_add_(device_state_steps, 1) + + # Perform stepweight decay + if weight_decay != 0: + torch._foreach_mul_(device_params, 1 - lr * weight_decay) + + # Decay the first and second moment running average coefficient + torch._foreach_lerp_(device_exp_avgs, device_grads, 1 - beta1) + + torch._foreach_mul_(device_exp_avg_sqs, beta2) + torch._foreach_addcmul_( + device_exp_avg_sqs, device_grads, device_grads, 1 - beta2 + ) + + # Delete the local intermediate since it won't be used anymore to save on peak memory + del device_grads + + bias_correction1: Union[Tuple[Tensor, ...], List[Tensor]] + bias_correction2: Union[Tuple[Tensor, ...], List[Tensor]] + bias_correction2_sqrt: Union[Tuple[Tensor, ...], List[Tensor]] + + if capturable: + bias_correction1 = torch._foreach_pow(beta1, device_state_steps) + bias_correction2 = torch._foreach_pow(beta2, device_state_steps) + # foreach_sub doesn't allow a scalar as the first arg + torch._foreach_sub_(bias_correction1, 1) + torch._foreach_sub_(bias_correction2, 1) + # we do not negate bias_correction1 as it'll need to be negated later anyway + torch._foreach_neg_(bias_correction2) + + # foreach_div doesn't allow a scalar as the first arg + torch._foreach_div_(bias_correction1, lr) + torch._foreach_reciprocal_(bias_correction1) + + torch._foreach_sqrt_(bias_correction2) + + # Re-assign for clarity as we maintain minimal intermediates: we'll have + # step_size = - lr / (1 - beta1 ^ t) where t = num_steps + # bias_correction2_sqrt = sqrt(1 - beta2 ^ t) + step_size = bias_correction1 + bias_correction2_sqrt = bias_correction2 + + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + torch._foreach_maximum_(device_max_exp_avg_sqs, device_exp_avg_sqs) + + # Use the max. for normalizing running avg. of gradient + exp_avg_sq_sqrt = torch._foreach_sqrt(device_max_exp_avg_sqs) + else: + exp_avg_sq_sqrt = torch._foreach_sqrt(device_exp_avg_sqs) + + torch._foreach_div_(exp_avg_sq_sqrt, bias_correction2_sqrt) + torch._foreach_add_(exp_avg_sq_sqrt, eps) + torch._foreach_div_(exp_avg_sq_sqrt, step_size) + + # at this point, exp_avg_sq_sqrt = - (1 - beta^t) * [sqrt(exp_avg_sq / (1 - beta2^t)) + eps] / lr + torch._foreach_addcdiv_(device_params, device_exp_avgs, exp_avg_sq_sqrt) + else: + bias_correction1 = [ + 1 - beta1 ** _get_value(step) for step in device_state_steps + ] + bias_correction2 = [ + 1 - beta2 ** _get_value(step) for step in device_state_steps + ] + + step_size = _stack_if_compiling([(lr / bc) * -1 for bc in bias_correction1]) + + bias_correction2_sqrt = [ + _dispatch_sqrt(bc) for bc in bias_correction2 # type: ignore[arg-type] + ] + + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + torch._foreach_maximum_(device_max_exp_avg_sqs, device_exp_avg_sqs) + + # Use the max. for normalizing running avg. of gradient + exp_avg_sq_sqrt = torch._foreach_sqrt(device_max_exp_avg_sqs) + else: + exp_avg_sq_sqrt = torch._foreach_sqrt(device_exp_avg_sqs) + + torch._foreach_div_(exp_avg_sq_sqrt, bias_correction2_sqrt) + torch._foreach_add_(exp_avg_sq_sqrt, eps) + torch._foreach_addcdiv_( + device_params, + device_exp_avgs, + exp_avg_sq_sqrt, + step_size, # type: ignore[arg-type] + ) + + +def _fused_adamw( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + max_exp_avg_sqs: List[Tensor], + state_steps: List[Tensor], + grad_scale: Optional[Tensor], + found_inf: Optional[Tensor], + *, + amsgrad: bool, + beta1: float, + beta2: float, + lr: Union[Tensor, float], + weight_decay: float, + eps: float, + maximize: bool, + capturable: bool, # Needed for consistency. + differentiable: bool, + has_complex: bool, # Needed for consistency. +) -> None: + if not params: + return + if differentiable: + raise RuntimeError("Adam with fused=True does not support differentiable=True") + + grad_scale_dict: DeviceDict = ( + {grad_scale.device: grad_scale} if grad_scale is not None else {} + ) + found_inf_dict: DeviceDict = ( + {found_inf.device: found_inf} if found_inf is not None else {} + ) + + # We only shuffle around the lr when it is a Tensor and on CUDA, otherwise, we prefer + # treating it as a scalar. + lr_dict: Optional[DeviceDict] = ( + {lr.device: lr} if isinstance(lr, Tensor) and str(lr.device) != "cpu" else None + ) + + grouped_tensors = Optimizer._group_tensors_by_device_and_dtype( + [params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps] + ) + for (device, _), ( + ( + device_params, + device_grads, + device_exp_avgs, + device_exp_avg_sqs, + device_max_exp_avg_sqs, + device_state_steps, + ), + _, + ) in grouped_tensors.items(): + device_grad_scale, device_found_inf = None, None + if grad_scale is not None: + device_grad_scale = grad_scale_dict.setdefault( + device, grad_scale.to(device, non_blocking=True) + ) + if found_inf is not None: + device_found_inf = found_inf_dict.setdefault( + device, found_inf.to(device, non_blocking=True) + ) + if lr_dict is not None and device not in lr_dict: + lr = lr_dict.setdefault( + device, lr.to(device=device, non_blocking=True) # type: ignore[union-attr] + ) + torch._foreach_add_(device_state_steps, 1) + torch._fused_adamw_( + device_params, + device_grads, + device_exp_avgs, + device_exp_avg_sqs, + device_max_exp_avg_sqs, + device_state_steps, + amsgrad=amsgrad, + lr=lr, + beta1=beta1, + beta2=beta2, + weight_decay=weight_decay, + eps=eps, + maximize=maximize, + grad_scale=device_grad_scale, + found_inf=device_found_inf, + ) + if device_found_inf is not None: + torch._foreach_sub_( + device_state_steps, [device_found_inf] * len(device_state_steps) + ) + + +@_disable_dynamo_if_unsupported(single_tensor_fn=_single_tensor_adamw) +def adamw( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + max_exp_avg_sqs: List[Tensor], + state_steps: List[Tensor], + # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 + # setting this as kwarg for now as functional API is compiled by torch/distributed/optim + foreach: Optional[bool] = None, + capturable: bool = False, + differentiable: bool = False, + fused: Optional[bool] = None, + grad_scale: Optional[Tensor] = None, + found_inf: Optional[Tensor] = None, + has_complex: bool = False, + *, + amsgrad: bool, + beta1: float, + beta2: float, + lr: Union[float, Tensor], + weight_decay: float, + eps: float, + maximize: bool, +): + r"""Functional API that performs AdamW algorithm computation. + + See :class:`~torch.optim.AdamW` for details. + """ + if not torch._utils.is_compiling() and not all( + isinstance(t, torch.Tensor) for t in state_steps + ): + raise RuntimeError( + "API has changed, `state_steps` argument must contain a list of singleton tensors" + ) + + # Respect when the user inputs False/True for foreach or fused. We only want to change + # the default when neither have been user-specified. Note that we default to foreach + # and pass False to use_fused. This is not a mistake--we want to give the fused impl + # bake-in time before making it the default, even if it is typically faster. + if fused is None and foreach is None: + _, foreach = _default_to_fused_or_foreach( + params, differentiable, use_fused=False + ) + # Do not flip on foreach for the unsupported case where lr is a Tensor and capturable=False. + if foreach and isinstance(lr, Tensor) and not capturable: + foreach = False + if fused is None: + fused = False + if foreach is None: + foreach = False + + if foreach and torch.jit.is_scripting(): + raise RuntimeError("torch.jit.script not supported with foreach optimizers") + if fused and torch.jit.is_scripting(): + raise RuntimeError("torch.jit.script not supported with fused optimizers") + + if fused and not torch.jit.is_scripting(): + func = _fused_adamw + elif foreach and not torch.jit.is_scripting(): + func = _multi_tensor_adamw + else: + func = _single_tensor_adamw + + func( + params, + grads, + exp_avgs, + exp_avg_sqs, + max_exp_avg_sqs, + state_steps, + amsgrad=amsgrad, + beta1=beta1, + beta2=beta2, + lr=lr, + weight_decay=weight_decay, + eps=eps, + maximize=maximize, + capturable=capturable, + differentiable=differentiable, + grad_scale=grad_scale, + found_inf=found_inf, + has_complex=has_complex, + ) diff --git a/parrot/lib/python3.10/site-packages/torch/optim/asgd.py b/parrot/lib/python3.10/site-packages/torch/optim/asgd.py new file mode 100644 index 0000000000000000000000000000000000000000..84c7602912d025d46ca7929f4a5f1139678060b2 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/optim/asgd.py @@ -0,0 +1,454 @@ +# mypy: allow-untyped-defs +from typing import List, Optional, Tuple, Union + +import torch +from torch import Tensor + +from .optimizer import ( + _capturable_doc, + _default_to_fused_or_foreach, + _differentiable_doc, + _disable_dynamo_if_unsupported, + _foreach_doc, + _get_capturable_supported_devices, + _get_scalar_dtype, + _get_value, + _maximize_doc, + _use_grad_for_differentiable, + _view_as_real, + Optimizer, + ParamsT, +) + +__all__ = ["ASGD", "asgd"] + + +class ASGD(Optimizer): + def __init__( + self, + params: ParamsT, + lr: float = 1e-2, + lambd: float = 1e-4, + alpha: float = 0.75, + t0: float = 1e6, + weight_decay: float = 0, + foreach: Optional[bool] = None, + maximize: bool = False, + differentiable: bool = False, + capturable: bool = False, + ): + if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") + if not 0.0 <= weight_decay: + raise ValueError(f"Invalid weight_decay value: {weight_decay}") + + defaults = dict( + lr=lr, + lambd=lambd, + alpha=alpha, + t0=t0, + weight_decay=weight_decay, + foreach=foreach, + maximize=maximize, + differentiable=differentiable, + capturable=capturable, + ) + super().__init__(params, defaults) + + def __setstate__(self, state): + super().__setstate__(state) + for group in self.param_groups: + group.setdefault("foreach", None) + group.setdefault("maximize", False) + group.setdefault("differentiable", False) + group.setdefault("capturable", False) + for p in group["params"]: + p_state = self.state.get(p, []) + if len(p_state) != 0: + if not torch.is_tensor(p_state["step"]): + step_val = float(p_state["step"]) + p_state["step"] = torch.tensor( + step_val, dtype=_get_scalar_dtype(), device=p.device + ) + if not torch.is_tensor(p_state["eta"]): + p_state["eta"] = torch.tensor( + p_state["eta"], dtype=_get_scalar_dtype(), device=p.device + ) + if not torch.is_tensor(p_state["mu"]): + p_state["mu"] = torch.tensor( + p_state["mu"], dtype=_get_scalar_dtype(), device=p.device + ) + + def _init_group(self, group, params_with_grad, grads, mus, axs, etas, state_steps): + has_complex = False + for p in group["params"]: + if p.grad is not None: + has_complex |= torch.is_complex(p) + params_with_grad.append(p) + if p.grad.is_sparse: + raise RuntimeError("ASGD does not support sparse gradients") + grads.append(p.grad) + + state = self.state[p] + # State initialization + if len(state) == 0: + state["step"] = torch.zeros( + (), device=p.device, dtype=_get_scalar_dtype() + ) + state["eta"] = ( + torch.as_tensor( + group["lr"], device=p.device, dtype=_get_scalar_dtype() + ) + .clone() + .detach() + ) + state["mu"] = torch.ones( + (), device=p.device, dtype=_get_scalar_dtype() + ) + state["ax"] = torch.zeros_like( + p, memory_format=torch.preserve_format + ) + + mus.append(state["mu"]) + axs.append(state["ax"]) + etas.append(state["eta"]) + state_steps.append(state["step"]) + return has_complex + + @_use_grad_for_differentiable + def step(self, closure=None): + """Perform a single optimization step. + + Args: + closure (Callable, optional): A closure that reevaluates the model + and returns the loss. + """ + self._cuda_graph_capture_health_check() + + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + params_with_grad: List[Tensor] = [] + grads: List[Tensor] = [] + mus: List[Tensor] = [] + axs: List[Tensor] = [] + etas: List[Tensor] = [] + state_steps: List[Tensor] = [] + + has_complex = self._init_group( + group, params_with_grad, grads, mus, axs, etas, state_steps + ) + + asgd( + params_with_grad, + grads, + axs, + mus, + etas, + state_steps, + lambd=group["lambd"], + lr=group["lr"], + t0=group["t0"], + alpha=group["alpha"], + weight_decay=group["weight_decay"], + foreach=group["foreach"], + maximize=group["maximize"], + differentiable=group["differentiable"], + capturable=group["capturable"], + has_complex=has_complex, + ) + + return loss + + +ASGD.__doc__ = rf"""Implements Averaged Stochastic Gradient Descent. + + It has been proposed in `Acceleration of stochastic approximation by + averaging`_. + + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-2) + lambd (float, optional): decay term (default: 1e-4) + alpha (float, optional): power for eta update (default: 0.75) + t0 (float, optional): point at which to start averaging (default: 1e6) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + {_foreach_doc} + {_maximize_doc} + {_differentiable_doc} + {_capturable_doc} + + .. _Acceleration of stochastic approximation by averaging: + https://dl.acm.org/citation.cfm?id=131098 + + """ + + +def _single_tensor_asgd( + params: List[Tensor], + grads: List[Tensor], + axs: List[Tensor], + mus: List[Tensor], + etas: List[Tensor], + state_steps: List[Tensor], + *, + lambd: float, + lr: float, + t0: float, + alpha: float, + weight_decay: float, + maximize: bool, + differentiable: bool, + capturable: bool, + has_complex: bool, +): + for i, param in enumerate(params): + grad = grads[i] + grad = grad if not maximize else -grad + mu = mus[i] + ax = axs[i] + eta = etas[i] + step_t = state_steps[i] + + # If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable] + if not torch._utils.is_compiling() and capturable: + capturable_supported_devices = _get_capturable_supported_devices() + assert ( + param.device.type + == mu.device.type + == eta.device.type + == step_t.device.type + and param.device.type in capturable_supported_devices + ), ( + f"If capturable=True, params, mus, etas, and state_steps must be " + f"on supported devices: {capturable_supported_devices}." + ) + + if torch.is_complex(param): + grad = torch.view_as_real(grad) + param = torch.view_as_real(param) + ax = torch.view_as_real(ax) + + # update step + step_t += 1 + + if weight_decay != 0: + grad = grad.add(param, alpha=weight_decay) + + if capturable: + param.mul_(1 - lambd * eta) + param.addcmul_(grad, eta, value=-1) # update parameter + else: + eta_value = _get_value(eta) + param.mul_(1 - lambd * eta_value) # decay term + param.add_(grad, alpha=-eta_value) # update parameter + + # averaging + if capturable or mu.item() != 1: + ax.add_(param.sub(ax).mul_(mu)) + else: + ax.copy_(param) + + if capturable: + eta.copy_(lr / ((1 + lambd * lr * step_t) ** alpha)) + mu.copy_(1 / torch.maximum(step_t - t0, torch.ones_like(step_t))) + else: + step = _get_value(step_t) + new_eta = torch.as_tensor(lr / ((1 + lambd * lr * step) ** alpha)) + eta.copy_(new_eta) + new_mu = torch.as_tensor(1 / max(1, step - t0)) + mu.copy_(new_mu) + + +def _multi_tensor_asgd( + params: List[Tensor], + grads: List[Tensor], + axs: List[Tensor], + mus: List[Tensor], + etas: List[Tensor], + state_steps: List[Tensor], + *, + lambd: float, + lr: float, + t0: float, + alpha: float, + weight_decay: float, + maximize: bool, + differentiable: bool, + capturable: bool, + has_complex: bool, +): + if len(params) == 0: + return + + assert not differentiable, "_foreach ops don't support autograd" + + # If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable] + if not torch._utils.is_compiling() and capturable: + capturable_supported_devices = _get_capturable_supported_devices( + supports_xla=False + ) + assert all( + p.device.type == mu.device.type == eta.device.type == step.device.type + and p.device.type in capturable_supported_devices + for p, mu, eta, step in zip(params, mus, etas, state_steps) + ), f"If capturable=True, params, mus, etas, and state_steps must be on supported devices: {capturable_supported_devices}." + + grouped_tensors = Optimizer._group_tensors_by_device_and_dtype( + [params, grads, axs, mus, etas, state_steps] + ) + for (device, _), ( + ( + grouped_params, + grouped_grads, + grouped_axs, + grouped_mus, + grouped_etas, + grouped_state_steps, + ), + _, + ) in grouped_tensors.items(): + if has_complex: + _view_as_real(grouped_params, grouped_grads, grouped_axs) + + if maximize: + grouped_grads = torch._foreach_neg(grouped_grads) # type: ignore[assignment] + + # Update steps + # If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over + # and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just + # wrapped it once now. The alpha is required to assure we go to the right overload. + if grouped_state_steps[0].is_cpu: + torch._foreach_add_( + grouped_state_steps, torch.tensor(1.0, device="cpu"), alpha=1.0 + ) + else: + torch._foreach_add_(grouped_state_steps, 1) + + # intermediate = grad + param * lambd + intermediate: Union[Tuple[Tensor, ...], List[Tensor]] + if weight_decay != 0: + if maximize: + torch._foreach_add_(grouped_grads, grouped_params, alpha=weight_decay) + intermediate = grouped_grads + else: + intermediate = torch._foreach_add( + grouped_grads, grouped_params, alpha=weight_decay + ) + + torch._foreach_add_(intermediate, grouped_params, alpha=lambd) + else: + intermediate = torch._foreach_add( + grouped_grads, grouped_params, alpha=lambd + ) + + # update param + # param * (1 - lambd * eta) - eta * grad + # => param - param * lambd * eta - eta * grad + # => param - eta * intermediate + torch._foreach_addcmul_(grouped_params, intermediate, grouped_etas, value=-1) + del intermediate + + # update grouped_axs + # averaging: ax = ax + mu * (param - ax) + # Note (mlazos): We can't use lerp here since it requires weight to be float64 + # and our grouping code requires dtypes to match for all tensors in a group (and it should, since + # we use the mus in other places) + # all dtypes need to match, so we could introduce a cast in a loop + # but since this only adds one additional kernel launch, this looks like the cleaner + # and faster solution + intermediate = torch._foreach_sub(grouped_params, grouped_axs) + torch._foreach_addcmul_(grouped_axs, intermediate, grouped_mus) + del intermediate + + new_etas: Union[Tuple[Tensor, ...], List[Tensor]] + new_mus: Union[Tuple[Tensor, ...], List[Tensor]] + if capturable: + # update grouped_mus + new_mus = torch._foreach_sub(grouped_state_steps, t0) + torch._foreach_maximum_(new_mus, 1.0) + torch._foreach_reciprocal_(new_mus) + torch._foreach_copy_(grouped_mus, new_mus) + del new_mus + + # update eta = lr / ((1 + lambd * lr * step)^alpha) + new_etas = torch._foreach_mul(grouped_state_steps, lambd) + torch._foreach_mul_(new_etas, lr) + torch._foreach_add_(new_etas, 1) + torch._foreach_pow_(new_etas, alpha) + torch._foreach_reciprocal_(new_etas) + torch._foreach_mul_(new_etas, lr) + torch._foreach_copy_(grouped_etas, new_etas) + else: + new_etas = [ + torch.as_tensor(lr / ((1 + lambd * lr * step) ** alpha), device=device) + for step in grouped_state_steps + ] + new_mus = [ + torch.as_tensor(1 / max(1, _get_value(step) - t0), device=device) + for step in grouped_state_steps + ] + torch._foreach_copy_(grouped_etas, new_etas) + torch._foreach_copy_(grouped_mus, new_mus) + + +@_disable_dynamo_if_unsupported(single_tensor_fn=_single_tensor_asgd) +def asgd( + params: List[Tensor], + grads: List[Tensor], + axs: List[Tensor], + mus: List[Tensor], + etas: List[Tensor], + state_steps: List[Tensor], + # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 + # setting this as kwarg for now as functional API is compiled by torch/distributed/optim + foreach: Optional[bool] = None, + maximize: bool = False, + differentiable: bool = False, + capturable: bool = False, + has_complex: bool = False, + *, + lambd: float, + lr: float, + t0: float, + alpha: float, + weight_decay: float, +): + r"""Functional API that performs asgd algorithm computation. + + See :class:`~torch.optim.ASGD` for details. + """ + if foreach is None: + _, foreach = _default_to_fused_or_foreach( + params, differentiable, use_fused=False + ) + + if foreach and torch.jit.is_scripting(): + raise RuntimeError("torch.jit.script not supported with foreach optimizers") + + if foreach and not torch.jit.is_scripting(): + func = _multi_tensor_asgd + else: + func = _single_tensor_asgd + + func( + params, + grads, + axs, + mus, + etas, + state_steps, + lambd=lambd, + lr=lr, + t0=t0, + alpha=alpha, + weight_decay=weight_decay, + maximize=maximize, + differentiable=differentiable, + capturable=capturable, + has_complex=has_complex, + ) diff --git a/parrot/lib/python3.10/site-packages/torch/optim/lbfgs.py b/parrot/lib/python3.10/site-packages/torch/optim/lbfgs.py new file mode 100644 index 0000000000000000000000000000000000000000..480b45c84d72a7060ccdd7019513e90f11217653 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/optim/lbfgs.py @@ -0,0 +1,488 @@ +# mypy: allow-untyped-defs +from typing import Optional + +import torch +from .optimizer import Optimizer, ParamsT + +__all__ = ["LBFGS"] + + +def _cubic_interpolate(x1, f1, g1, x2, f2, g2, bounds=None): + # ported from https://github.com/torch/optim/blob/master/polyinterp.lua + # Compute bounds of interpolation area + if bounds is not None: + xmin_bound, xmax_bound = bounds + else: + xmin_bound, xmax_bound = (x1, x2) if x1 <= x2 else (x2, x1) + + # Code for most common case: cubic interpolation of 2 points + # w/ function and derivative values for both + # Solution in this case (where x2 is the farthest point): + # d1 = g1 + g2 - 3*(f1-f2)/(x1-x2); + # d2 = sqrt(d1^2 - g1*g2); + # min_pos = x2 - (x2 - x1)*((g2 + d2 - d1)/(g2 - g1 + 2*d2)); + # t_new = min(max(min_pos,xmin_bound),xmax_bound); + d1 = g1 + g2 - 3 * (f1 - f2) / (x1 - x2) + d2_square = d1**2 - g1 * g2 + if d2_square >= 0: + d2 = d2_square.sqrt() + if x1 <= x2: + min_pos = x2 - (x2 - x1) * ((g2 + d2 - d1) / (g2 - g1 + 2 * d2)) + else: + min_pos = x1 - (x1 - x2) * ((g1 + d2 - d1) / (g1 - g2 + 2 * d2)) + return min(max(min_pos, xmin_bound), xmax_bound) + else: + return (xmin_bound + xmax_bound) / 2.0 + + +def _strong_wolfe( + obj_func, x, t, d, f, g, gtd, c1=1e-4, c2=0.9, tolerance_change=1e-9, max_ls=25 +): + # ported from https://github.com/torch/optim/blob/master/lswolfe.lua + d_norm = d.abs().max() + g = g.clone(memory_format=torch.contiguous_format) + # evaluate objective and gradient using initial step + f_new, g_new = obj_func(x, t, d) + ls_func_evals = 1 + gtd_new = g_new.dot(d) + + # bracket an interval containing a point satisfying the Wolfe criteria + t_prev, f_prev, g_prev, gtd_prev = 0, f, g, gtd + done = False + ls_iter = 0 + while ls_iter < max_ls: + # check conditions + if f_new > (f + c1 * t * gtd) or (ls_iter > 1 and f_new >= f_prev): + bracket = [t_prev, t] + bracket_f = [f_prev, f_new] + bracket_g = [g_prev, g_new.clone(memory_format=torch.contiguous_format)] + bracket_gtd = [gtd_prev, gtd_new] + break + + if abs(gtd_new) <= -c2 * gtd: + bracket = [t] + bracket_f = [f_new] + bracket_g = [g_new] + done = True + break + + if gtd_new >= 0: + bracket = [t_prev, t] + bracket_f = [f_prev, f_new] + bracket_g = [g_prev, g_new.clone(memory_format=torch.contiguous_format)] + bracket_gtd = [gtd_prev, gtd_new] + break + + # interpolate + min_step = t + 0.01 * (t - t_prev) + max_step = t * 10 + tmp = t + t = _cubic_interpolate( + t_prev, f_prev, gtd_prev, t, f_new, gtd_new, bounds=(min_step, max_step) + ) + + # next step + t_prev = tmp + f_prev = f_new + g_prev = g_new.clone(memory_format=torch.contiguous_format) + gtd_prev = gtd_new + f_new, g_new = obj_func(x, t, d) + ls_func_evals += 1 + gtd_new = g_new.dot(d) + ls_iter += 1 + + # reached max number of iterations? + if ls_iter == max_ls: + bracket = [0, t] + bracket_f = [f, f_new] + bracket_g = [g, g_new] + + # zoom phase: we now have a point satisfying the criteria, or + # a bracket around it. We refine the bracket until we find the + # exact point satisfying the criteria + insuf_progress = False + # find high and low points in bracket + low_pos, high_pos = (0, 1) if bracket_f[0] <= bracket_f[-1] else (1, 0) # type: ignore[possibly-undefined] + while not done and ls_iter < max_ls: + # line-search bracket is so small + if abs(bracket[1] - bracket[0]) * d_norm < tolerance_change: # type: ignore[possibly-undefined] + break + + # compute new trial value + t = _cubic_interpolate( + bracket[0], + bracket_f[0], + bracket_gtd[0], # type: ignore[possibly-undefined] + bracket[1], + bracket_f[1], + bracket_gtd[1], + ) + + # test that we are making sufficient progress: + # in case `t` is so close to boundary, we mark that we are making + # insufficient progress, and if + # + we have made insufficient progress in the last step, or + # + `t` is at one of the boundary, + # we will move `t` to a position which is `0.1 * len(bracket)` + # away from the nearest boundary point. + eps = 0.1 * (max(bracket) - min(bracket)) + if min(max(bracket) - t, t - min(bracket)) < eps: + # interpolation close to boundary + if insuf_progress or t >= max(bracket) or t <= min(bracket): + # evaluate at 0.1 away from boundary + if abs(t - max(bracket)) < abs(t - min(bracket)): + t = max(bracket) - eps + else: + t = min(bracket) + eps + insuf_progress = False + else: + insuf_progress = True + else: + insuf_progress = False + + # Evaluate new point + f_new, g_new = obj_func(x, t, d) + ls_func_evals += 1 + gtd_new = g_new.dot(d) + ls_iter += 1 + + if f_new > (f + c1 * t * gtd) or f_new >= bracket_f[low_pos]: + # Armijo condition not satisfied or not lower than lowest point + bracket[high_pos] = t + bracket_f[high_pos] = f_new + bracket_g[high_pos] = g_new.clone(memory_format=torch.contiguous_format) # type: ignore[possibly-undefined] + bracket_gtd[high_pos] = gtd_new + low_pos, high_pos = (0, 1) if bracket_f[0] <= bracket_f[1] else (1, 0) + else: + if abs(gtd_new) <= -c2 * gtd: + # Wolfe conditions satisfied + done = True + elif gtd_new * (bracket[high_pos] - bracket[low_pos]) >= 0: + # old high becomes new low + bracket[high_pos] = bracket[low_pos] + bracket_f[high_pos] = bracket_f[low_pos] + bracket_g[high_pos] = bracket_g[low_pos] # type: ignore[possibly-undefined] + bracket_gtd[high_pos] = bracket_gtd[low_pos] + + # new point becomes new low + bracket[low_pos] = t + bracket_f[low_pos] = f_new + bracket_g[low_pos] = g_new.clone(memory_format=torch.contiguous_format) # type: ignore[possibly-undefined] + bracket_gtd[low_pos] = gtd_new + + # return stuff + t = bracket[low_pos] # type: ignore[possibly-undefined] + f_new = bracket_f[low_pos] + g_new = bracket_g[low_pos] # type: ignore[possibly-undefined] + return f_new, g_new, t, ls_func_evals + + +class LBFGS(Optimizer): + """Implements L-BFGS algorithm. + + Heavily inspired by `minFunc + `_. + + .. warning:: + This optimizer doesn't support per-parameter options and parameter + groups (there can be only one). + + .. warning:: + Right now all parameters have to be on a single device. This will be + improved in the future. + + .. note:: + This is a very memory intensive optimizer (it requires additional + ``param_bytes * (history_size + 1)`` bytes). If it doesn't fit in memory + try reducing the history size, or use a different algorithm. + + Args: + params (iterable): iterable of parameters to optimize. Parameters must be real. + lr (float): learning rate (default: 1) + max_iter (int): maximal number of iterations per optimization step + (default: 20) + max_eval (int): maximal number of function evaluations per optimization + step (default: max_iter * 1.25). + tolerance_grad (float): termination tolerance on first order optimality + (default: 1e-7). + tolerance_change (float): termination tolerance on function + value/parameter changes (default: 1e-9). + history_size (int): update history size (default: 100). + line_search_fn (str): either 'strong_wolfe' or None (default: None). + """ + + def __init__( + self, + params: ParamsT, + lr: float = 1, + max_iter: int = 20, + max_eval: Optional[int] = None, + tolerance_grad: float = 1e-7, + tolerance_change: float = 1e-9, + history_size: int = 100, + line_search_fn: Optional[str] = None, + ): + if max_eval is None: + max_eval = max_iter * 5 // 4 + defaults = dict( + lr=lr, + max_iter=max_iter, + max_eval=max_eval, + tolerance_grad=tolerance_grad, + tolerance_change=tolerance_change, + history_size=history_size, + line_search_fn=line_search_fn, + ) + super().__init__(params, defaults) + + if len(self.param_groups) != 1: + raise ValueError( + "LBFGS doesn't support per-parameter options " "(parameter groups)" + ) + + self._params = self.param_groups[0]["params"] + self._numel_cache = None + + def _numel(self): + if self._numel_cache is None: + self._numel_cache = sum( + 2 * p.numel() if torch.is_complex(p) else p.numel() + for p in self._params + ) + + return self._numel_cache + + def _gather_flat_grad(self): + views = [] + for p in self._params: + if p.grad is None: + view = p.new(p.numel()).zero_() + elif p.grad.is_sparse: + view = p.grad.to_dense().view(-1) + else: + view = p.grad.view(-1) + if torch.is_complex(view): + view = torch.view_as_real(view).view(-1) + views.append(view) + return torch.cat(views, 0) + + def _add_grad(self, step_size, update): + offset = 0 + for p in self._params: + if torch.is_complex(p): + p = torch.view_as_real(p) + numel = p.numel() + # view as to avoid deprecated pointwise semantics + p.add_(update[offset : offset + numel].view_as(p), alpha=step_size) + offset += numel + assert offset == self._numel() + + def _clone_param(self): + return [p.clone(memory_format=torch.contiguous_format) for p in self._params] + + def _set_param(self, params_data): + for p, pdata in zip(self._params, params_data): + p.copy_(pdata) + + def _directional_evaluate(self, closure, x, t, d): + self._add_grad(t, d) + loss = float(closure()) + flat_grad = self._gather_flat_grad() + self._set_param(x) + return loss, flat_grad + + @torch.no_grad() + def step(self, closure): + """Perform a single optimization step. + + Args: + closure (Callable): A closure that reevaluates the model + and returns the loss. + """ + assert len(self.param_groups) == 1 + + # Make sure the closure is always called with grad enabled + closure = torch.enable_grad()(closure) + + group = self.param_groups[0] + lr = group["lr"] + max_iter = group["max_iter"] + max_eval = group["max_eval"] + tolerance_grad = group["tolerance_grad"] + tolerance_change = group["tolerance_change"] + line_search_fn = group["line_search_fn"] + history_size = group["history_size"] + + # NOTE: LBFGS has only global state, but we register it as state for + # the first param, because this helps with casting in load_state_dict + state = self.state[self._params[0]] + state.setdefault("func_evals", 0) + state.setdefault("n_iter", 0) + + # evaluate initial f(x) and df/dx + orig_loss = closure() + loss = float(orig_loss) + current_evals = 1 + state["func_evals"] += 1 + + flat_grad = self._gather_flat_grad() + opt_cond = flat_grad.abs().max() <= tolerance_grad + + # optimal condition + if opt_cond: + return orig_loss + + # tensors cached in state (for tracing) + d = state.get("d") + t = state.get("t") + old_dirs = state.get("old_dirs") + old_stps = state.get("old_stps") + ro = state.get("ro") + H_diag = state.get("H_diag") + prev_flat_grad = state.get("prev_flat_grad") + prev_loss = state.get("prev_loss") + + n_iter = 0 + # optimize for a max of max_iter iterations + while n_iter < max_iter: + # keep track of nb of iterations + n_iter += 1 + state["n_iter"] += 1 + + ############################################################ + # compute gradient descent direction + ############################################################ + if state["n_iter"] == 1: + d = flat_grad.neg() + old_dirs = [] + old_stps = [] + ro = [] + H_diag = 1 + else: + # do lbfgs update (update memory) + y = flat_grad.sub(prev_flat_grad) + s = d.mul(t) + ys = y.dot(s) # y*s + if ys > 1e-10: + # updating memory + if len(old_dirs) == history_size: + # shift history by one (limited-memory) + old_dirs.pop(0) + old_stps.pop(0) + ro.pop(0) + + # store new direction/step + old_dirs.append(y) + old_stps.append(s) + ro.append(1.0 / ys) + + # update scale of initial Hessian approximation + H_diag = ys / y.dot(y) # (y*y) + + # compute the approximate (L-BFGS) inverse Hessian + # multiplied by the gradient + num_old = len(old_dirs) + + if "al" not in state: + state["al"] = [None] * history_size + al = state["al"] + + # iteration in L-BFGS loop collapsed to use just one buffer + q = flat_grad.neg() + for i in range(num_old - 1, -1, -1): + al[i] = old_stps[i].dot(q) * ro[i] + q.add_(old_dirs[i], alpha=-al[i]) + + # multiply by initial Hessian + # r/d is the final direction + d = r = torch.mul(q, H_diag) + for i in range(num_old): + be_i = old_dirs[i].dot(r) * ro[i] + r.add_(old_stps[i], alpha=al[i] - be_i) + + if prev_flat_grad is None: + prev_flat_grad = flat_grad.clone(memory_format=torch.contiguous_format) + else: + prev_flat_grad.copy_(flat_grad) + prev_loss = loss + + ############################################################ + # compute step length + ############################################################ + # reset initial guess for step size + if state["n_iter"] == 1: + t = min(1.0, 1.0 / flat_grad.abs().sum()) * lr + else: + t = lr + + # directional derivative + gtd = flat_grad.dot(d) # g * d + + # directional derivative is below tolerance + if gtd > -tolerance_change: + break + + # optional line search: user function + ls_func_evals = 0 + if line_search_fn is not None: + # perform line search, using user function + if line_search_fn != "strong_wolfe": + raise RuntimeError("only 'strong_wolfe' is supported") + else: + x_init = self._clone_param() + + def obj_func(x, t, d): + return self._directional_evaluate(closure, x, t, d) + + loss, flat_grad, t, ls_func_evals = _strong_wolfe( + obj_func, x_init, t, d, loss, flat_grad, gtd + ) + self._add_grad(t, d) + opt_cond = flat_grad.abs().max() <= tolerance_grad + else: + # no line search, simply move with fixed-step + self._add_grad(t, d) + if n_iter != max_iter: + # re-evaluate function only if not in last iteration + # the reason we do this: in a stochastic setting, + # no use to re-evaluate that function here + with torch.enable_grad(): + loss = float(closure()) + flat_grad = self._gather_flat_grad() + opt_cond = flat_grad.abs().max() <= tolerance_grad + ls_func_evals = 1 + + # update func eval + current_evals += ls_func_evals + state["func_evals"] += ls_func_evals + + ############################################################ + # check conditions + ############################################################ + if n_iter == max_iter: + break + + if current_evals >= max_eval: + break + + # optimal condition + if opt_cond: + break + + # lack of progress + if d.mul(t).abs().max() <= tolerance_change: + break + + if abs(loss - prev_loss) < tolerance_change: + break + + state["d"] = d + state["t"] = t + state["old_dirs"] = old_dirs + state["old_stps"] = old_stps + state["ro"] = ro + state["H_diag"] = H_diag + state["prev_flat_grad"] = prev_flat_grad + state["prev_loss"] = prev_loss + + return orig_loss diff --git a/parrot/lib/python3.10/site-packages/torch/optim/lr_scheduler.py b/parrot/lib/python3.10/site-packages/torch/optim/lr_scheduler.py new file mode 100644 index 0000000000000000000000000000000000000000..4a5f162a0b2040a1d57e2f456bf2bc4183312c38 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/optim/lr_scheduler.py @@ -0,0 +1,2118 @@ +# mypy: allow-untyped-defs +import math +import types +import warnings +from bisect import bisect_right +from collections import Counter +from functools import partial +from typing import ( + Any, + Callable, + cast, + Dict, + Iterable, + List, + Literal, + Optional, + Sequence, + SupportsFloat, + TypedDict, + Union, +) +from weakref import ref + +from torch import inf, Tensor + +from .optimizer import Optimizer + +__all__ = [ + "LambdaLR", + "MultiplicativeLR", + "StepLR", + "MultiStepLR", + "ConstantLR", + "LinearLR", + "ExponentialLR", + "SequentialLR", + "CosineAnnealingLR", + "ChainedScheduler", + "ReduceLROnPlateau", + "CyclicLR", + "CosineAnnealingWarmRestarts", + "OneCycleLR", + "PolynomialLR", + "LRScheduler", +] + +EPOCH_DEPRECATION_WARNING = ( + "The epoch parameter in `scheduler.step()` was not necessary and is being " + "deprecated where possible. Please use `scheduler.step()` to step the " + "scheduler. During the deprecation, if epoch is different from None, the " + "closed form is used instead of the new chainable form, where available. " + "Please open an issue if you are unable to replicate your use case: " + "https://github.com/pytorch/pytorch/issues/new/choose." +) + + +def _check_verbose_deprecated_warning(verbose): + """Raises a warning when verbose is not the default value.""" + if verbose != "deprecated": + warnings.warn( + "The verbose parameter is deprecated. Please use get_last_lr() " + "to access the learning rate.", + UserWarning, + ) + return verbose + return False + + +def _format_param(name: str, optimizer: Optimizer, param): + """Return correctly formatted lr/momentum for each param group.""" + + def _copy(_param): + return _param.clone() if isinstance(_param, Tensor) else _param + + if isinstance(param, (list, tuple)): + if len(param) != len(optimizer.param_groups): + raise ValueError( + f"{name} must have the same length as optimizer.param_groups. " + f"{name} has {len(param)} values, param_groups has {len(optimizer.param_groups)}." + ) + else: + param = [param] * len(optimizer.param_groups) + + return list(map(_copy, param)) + + +class LRScheduler: + _get_lr_called_within_step: bool = False + + def __init__(self, optimizer: Optimizer, last_epoch=-1, verbose="deprecated"): + # Attach optimizer + if not isinstance(optimizer, Optimizer): + raise TypeError(f"{type(optimizer).__name__} is not an Optimizer") + self.optimizer = optimizer + + # Initialize epoch and base learning rates + if last_epoch == -1: + for group in optimizer.param_groups: + initial_lr = group["lr"] + if isinstance(initial_lr, Tensor): + initial_lr = initial_lr.clone() + group.setdefault("initial_lr", initial_lr) + else: + for i, group in enumerate(optimizer.param_groups): + if "initial_lr" not in group: + raise KeyError( + "param 'initial_lr' is not specified " + f"in param_groups[{i}] when resuming an optimizer" + ) + self.base_lrs: List[float] = [ + group["initial_lr"] for group in optimizer.param_groups + ] + self.last_epoch = last_epoch + + # Following https://github.com/pytorch/pytorch/issues/20124 + # We would like to ensure that `lr_scheduler.step()` is called after + # `optimizer.step()` + def patch_track_step_called(opt: Optimizer): + if hasattr(opt.step, "_wrapped_by_lr_sched"): + # we've already patched + return opt.step + + def wrap_step(step_fn): + opt_ref = ref(self.optimizer) + func = step_fn.__func__ + + def wrapper(*args, **kwargs): + opt = opt_ref() + opt._opt_called = True # type: ignore[union-attr] + return func.__get__(opt, opt.__class__)(*args, **kwargs) + + wrapper._wrapped_by_lr_sched = True # type: ignore[attr-defined] + return wrapper + + opt.step = wrap_step(opt.step) # type: ignore[method-assign] + + patch_track_step_called(self.optimizer) + self.verbose = _check_verbose_deprecated_warning(verbose) + self._initial_step() + + def _initial_step(self): + """Initialize step counts and performs a step""" + self._step_count = 0 + self.step() + + def state_dict(self): + """Returns the state of the scheduler as a :class:`dict`. + + It contains an entry for every variable in self.__dict__ which + is not the optimizer. + """ + return { + key: value for key, value in self.__dict__.items() if key != "optimizer" + } + + def load_state_dict(self, state_dict: Dict[str, Any]): + """Loads the schedulers state. + + Args: + state_dict (dict): scheduler state. Should be an object returned + from a call to :meth:`state_dict`. + """ + self.__dict__.update(state_dict) + + def get_last_lr(self) -> List[float]: + """Return last computed learning rate by current scheduler.""" + return self._last_lr + + def get_lr(self) -> List[float]: + # Compute learning rate using chainable form of the scheduler + raise NotImplementedError + + def print_lr( + self, + is_verbose: bool, + group: Dict[str, Any], + lr: float, + epoch: Optional[int] = None, + ): + """Display the current learning rate. + + .. deprecated:: 2.4 + ``print_lr()`` is deprecated. Please use ``get_last_lr()`` to access the + learning rate. + """ + warnings.warn( + "`LRScheduler.print_lr()` is being deprecated. To fetch the learning rate, " + "please use `get_last_lr()` instead. For more details, " + "see https://github.com/pytorch/pytorch/issues/99270.", + UserWarning, + ) + if is_verbose: + if epoch is None: + print(f"Adjusting learning rate of group {group} to {lr:.4e}.") + else: + epoch_str = ("%.2f" if isinstance(epoch, float) else "%.5d") % epoch + print( + f"Epoch {epoch_str}: adjusting learning rate of group {group} to {lr:.4e}." + ) + + def step(self, epoch: Optional[int] = None): + # Raise a warning if old pattern is detected + # https://github.com/pytorch/pytorch/issues/20124 + if self._step_count == 1: + if not hasattr(self.optimizer.step, "_wrapped_by_lr_sched"): + warnings.warn( + "Seems like `optimizer.step()` has been overridden after learning rate scheduler " + "initialization. Please, make sure to call `optimizer.step()` before " + "`lr_scheduler.step()`. See more details at " + "https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate", + UserWarning, + ) + + # Just check if there were two first lr_scheduler.step() calls before optimizer.step() + elif not getattr(self.optimizer, "_opt_called", False): + warnings.warn( + "Detected call of `lr_scheduler.step()` before `optimizer.step()`. " + "In PyTorch 1.1.0 and later, you should call them in the opposite order: " + "`optimizer.step()` before `lr_scheduler.step()`. Failure to do this " + "will result in PyTorch skipping the first value of the learning rate schedule. " + "See more details at " + "https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate", + UserWarning, + ) + self._step_count += 1 + + with _enable_get_lr_call(self): + if epoch is None: + self.last_epoch += 1 + values = self.get_lr() + else: + warnings.warn(EPOCH_DEPRECATION_WARNING, UserWarning) + self.last_epoch = epoch + if hasattr(self, "_get_closed_form_lr"): + values = cast(List[float], self._get_closed_form_lr()) + else: + values = self.get_lr() + + for i, data in enumerate(zip(self.optimizer.param_groups, values)): + param_group, lr = data + if isinstance(param_group["lr"], Tensor): + lr_val = lr.item() if isinstance(lr, Tensor) else lr # type: ignore[attr-defined] + param_group["lr"].fill_(lr_val) + else: + param_group["lr"] = lr + + self._last_lr: List[float] = [ + group["lr"] for group in self.optimizer.param_groups + ] + + +def _warn_get_lr_called_within_step(lr_scheduler: LRScheduler): + if not lr_scheduler._get_lr_called_within_step: + warnings.warn( + "To get the last learning rate computed by the scheduler, " + "please use `get_last_lr()`.", + UserWarning, + stacklevel=2, + ) + + +# Including _LRScheduler for backwards compatibility +# Subclass instead of assign because we want __name__ of _LRScheduler to be _LRScheduler (assigning would make it LRScheduler). +class _LRScheduler(LRScheduler): + pass + + +class _enable_get_lr_call: + def __init__(self, o: LRScheduler): + self.o = o + + def __enter__(self): + self.o._get_lr_called_within_step = True + return self + + def __exit__(self, type, value, traceback): + self.o._get_lr_called_within_step = False + + +class LambdaLR(LRScheduler): + """Sets the learning rate of each parameter group to the initial lr + times a given function. When last_epoch=-1, sets initial lr as lr. + + Args: + optimizer (Optimizer): Wrapped optimizer. + lr_lambda (function or list): A function which computes a multiplicative + factor given an integer parameter epoch, or a list of such + functions, one for each group in optimizer.param_groups. + last_epoch (int): The index of last epoch. Default: -1. + verbose (bool | str): If ``True``, prints a message to stdout for + each update. Default: ``False``. + + .. deprecated:: 2.2 + ``verbose`` is deprecated. Please use ``get_last_lr()`` to access the + learning rate. + + Example: + >>> # xdoctest: +SKIP + >>> # Assuming optimizer has two groups. + >>> lambda1 = lambda epoch: epoch // 30 + >>> lambda2 = lambda epoch: 0.95 ** epoch + >>> scheduler = LambdaLR(optimizer, lr_lambda=[lambda1, lambda2]) + >>> for epoch in range(100): + >>> train(...) + >>> validate(...) + >>> scheduler.step() + """ + + def __init__( + self, + optimizer: Optimizer, + lr_lambda: Union[Callable[[int], float], List[Callable[[int], float]]], + last_epoch=-1, + verbose="deprecated", + ): + self.optimizer = optimizer + + self.lr_lambdas: List[Callable[[int], float]] + if not isinstance(lr_lambda, list) and not isinstance(lr_lambda, tuple): + self.lr_lambdas = [lr_lambda] * len(optimizer.param_groups) + else: + if len(lr_lambda) != len(optimizer.param_groups): + raise ValueError( + f"Expected {len(optimizer.param_groups)} lr_lambdas, but got {len(lr_lambda)}" + ) + self.lr_lambdas = list(lr_lambda) + super().__init__(optimizer, last_epoch, verbose) + + def state_dict(self): + """Returns the state of the scheduler as a :class:`dict`. + + It contains an entry for every variable in self.__dict__ which + is not the optimizer. + The learning rate lambda functions will only be saved if they are callable objects + and not if they are functions or lambdas. + + When saving or loading the scheduler, please make sure to also save or load the state of the optimizer. + """ + + state_dict = { + key: value + for key, value in self.__dict__.items() + if key not in ("optimizer", "lr_lambdas") + } + state_dict["lr_lambdas"] = [None] * len(self.lr_lambdas) + + for idx, fn in enumerate(self.lr_lambdas): + if not isinstance(fn, types.FunctionType): + state_dict["lr_lambdas"][idx] = fn.__dict__.copy() + + return state_dict + + def load_state_dict(self, state_dict): + """Loads the schedulers state. + + When saving or loading the scheduler, please make sure to also save or load the state of the optimizer. + + Args: + state_dict (dict): scheduler state. Should be an object returned + from a call to :meth:`state_dict`. + """ + + lr_lambdas = state_dict.pop("lr_lambdas") + self.__dict__.update(state_dict) + # Restore state_dict keys in order to prevent side effects + # https://github.com/pytorch/pytorch/issues/32756 + state_dict["lr_lambdas"] = lr_lambdas + + for idx, fn in enumerate(lr_lambdas): + if fn is not None: + self.lr_lambdas[idx].__dict__.update(fn) + + def get_lr(self): + _warn_get_lr_called_within_step(self) + + return [ + base_lr * lmbda(self.last_epoch) + for lmbda, base_lr in zip(self.lr_lambdas, self.base_lrs) + ] + + +class MultiplicativeLR(LRScheduler): + """Multiply the learning rate of each parameter group by the factor given + in the specified function. When last_epoch=-1, sets initial lr as lr. + + Args: + optimizer (Optimizer): Wrapped optimizer. + lr_lambda (function or list): A function which computes a multiplicative + factor given an integer parameter epoch, or a list of such + functions, one for each group in optimizer.param_groups. + last_epoch (int): The index of last epoch. Default: -1. + verbose (bool | str): If ``True``, prints a message to stdout for + each update. Default: ``False``. + + .. deprecated:: 2.2 + ``verbose`` is deprecated. Please use ``get_last_lr()`` to access the + learning rate. + + Example: + >>> # xdoctest: +SKIP + >>> lmbda = lambda epoch: 0.95 + >>> scheduler = MultiplicativeLR(optimizer, lr_lambda=lmbda) + >>> for epoch in range(100): + >>> train(...) + >>> validate(...) + >>> scheduler.step() + """ + + def __init__( + self, + optimizer: Optimizer, + lr_lambda: Union[Callable[[int], float], List[Callable[[int], float]]], + last_epoch=-1, + verbose="deprecated", + ): + self.optimizer = optimizer + + self.lr_lambdas: List[Callable[[int], float]] + if not isinstance(lr_lambda, list) and not isinstance(lr_lambda, tuple): + self.lr_lambdas = [lr_lambda] * len(optimizer.param_groups) + else: + if len(lr_lambda) != len(optimizer.param_groups): + raise ValueError( + f"Expected {len(optimizer.param_groups)} lr_lambdas, but got {len(lr_lambda)}" + ) + self.lr_lambdas = list(lr_lambda) + super().__init__(optimizer, last_epoch, verbose) + + def state_dict(self): + """Returns the state of the scheduler as a :class:`dict`. + + It contains an entry for every variable in self.__dict__ which + is not the optimizer. + The learning rate lambda functions will only be saved if they are callable objects + and not if they are functions or lambdas. + """ + state_dict = { + key: value + for key, value in self.__dict__.items() + if key not in ("optimizer", "lr_lambdas") + } + state_dict["lr_lambdas"] = [None] * len(self.lr_lambdas) + + for idx, fn in enumerate(self.lr_lambdas): + if not isinstance(fn, types.FunctionType): + state_dict["lr_lambdas"][idx] = fn.__dict__.copy() + + return state_dict + + def load_state_dict(self, state_dict): + """Loads the schedulers state. + + Args: + state_dict (dict): scheduler state. Should be an object returned + from a call to :meth:`state_dict`. + """ + lr_lambdas = state_dict.pop("lr_lambdas") + self.__dict__.update(state_dict) + # Restore state_dict keys in order to prevent side effects + # https://github.com/pytorch/pytorch/issues/32756 + state_dict["lr_lambdas"] = lr_lambdas + + for idx, fn in enumerate(lr_lambdas): + if fn is not None: + self.lr_lambdas[idx].__dict__.update(fn) + + def get_lr(self): + _warn_get_lr_called_within_step(self) + + if self.last_epoch > 0: + return [ + group["lr"] * lmbda(self.last_epoch) + for lmbda, group in zip(self.lr_lambdas, self.optimizer.param_groups) + ] + else: + return [group["lr"] for group in self.optimizer.param_groups] + + +class StepLR(LRScheduler): + """Decays the learning rate of each parameter group by gamma every + step_size epochs. Notice that such decay can happen simultaneously with + other changes to the learning rate from outside this scheduler. When + last_epoch=-1, sets initial lr as lr. + + Args: + optimizer (Optimizer): Wrapped optimizer. + step_size (int): Period of learning rate decay. + gamma (float): Multiplicative factor of learning rate decay. + Default: 0.1. + last_epoch (int): The index of last epoch. Default: -1. + verbose (bool | str): If ``True``, prints a message to stdout for + each update. Default: ``False``. + + .. deprecated:: 2.2 + ``verbose`` is deprecated. Please use ``get_last_lr()`` to access the + learning rate. + + Example: + >>> # xdoctest: +SKIP + >>> # Assuming optimizer uses lr = 0.05 for all groups + >>> # lr = 0.05 if epoch < 30 + >>> # lr = 0.005 if 30 <= epoch < 60 + >>> # lr = 0.0005 if 60 <= epoch < 90 + >>> # ... + >>> scheduler = StepLR(optimizer, step_size=30, gamma=0.1) + >>> for epoch in range(100): + >>> train(...) + >>> validate(...) + >>> scheduler.step() + """ + + def __init__( + self, + optimizer: Optimizer, + step_size: int, + gamma=0.1, + last_epoch=-1, + verbose="deprecated", + ): + self.step_size = step_size + self.gamma = gamma + super().__init__(optimizer, last_epoch, verbose) + + def get_lr(self): + _warn_get_lr_called_within_step(self) + + if (self.last_epoch == 0) or (self.last_epoch % self.step_size != 0): + return [group["lr"] for group in self.optimizer.param_groups] + return [group["lr"] * self.gamma for group in self.optimizer.param_groups] + + def _get_closed_form_lr(self): + return [ + base_lr * self.gamma ** (self.last_epoch // self.step_size) + for base_lr in self.base_lrs + ] + + +class MultiStepLR(LRScheduler): + """Decays the learning rate of each parameter group by gamma once the + number of epoch reaches one of the milestones. Notice that such decay can + happen simultaneously with other changes to the learning rate from outside + this scheduler. When last_epoch=-1, sets initial lr as lr. + + Args: + optimizer (Optimizer): Wrapped optimizer. + milestones (list): List of epoch indices. Must be increasing. + gamma (float): Multiplicative factor of learning rate decay. + Default: 0.1. + last_epoch (int): The index of last epoch. Default: -1. + verbose (bool | str): If ``True``, prints a message to stdout for + each update. Default: ``False``. + + .. deprecated:: 2.2 + ``verbose`` is deprecated. Please use ``get_last_lr()`` to access the + learning rate. + + Example: + >>> # xdoctest: +SKIP + >>> # Assuming optimizer uses lr = 0.05 for all groups + >>> # lr = 0.05 if epoch < 30 + >>> # lr = 0.005 if 30 <= epoch < 80 + >>> # lr = 0.0005 if epoch >= 80 + >>> scheduler = MultiStepLR(optimizer, milestones=[30,80], gamma=0.1) + >>> for epoch in range(100): + >>> train(...) + >>> validate(...) + >>> scheduler.step() + """ + + def __init__( + self, + optimizer: Optimizer, + milestones: Iterable[int], + gamma=0.1, + last_epoch=-1, + verbose="deprecated", + ): + self.milestones = Counter(milestones) + self.gamma = gamma + super().__init__(optimizer, last_epoch, verbose) + + def get_lr(self): + _warn_get_lr_called_within_step(self) + + if self.last_epoch not in self.milestones: + return [group["lr"] for group in self.optimizer.param_groups] + return [ + group["lr"] * self.gamma ** self.milestones[self.last_epoch] + for group in self.optimizer.param_groups + ] + + def _get_closed_form_lr(self): + milestones = sorted(self.milestones.elements()) + return [ + base_lr * self.gamma ** bisect_right(milestones, self.last_epoch) + for base_lr in self.base_lrs + ] + + +class ConstantLR(LRScheduler): + """Multiply the learning rate of each parameter group by a small constant factor until the + number of epoch reaches a pre-defined milestone: total_iters. + Notice that such multiplication of the small constant factor can + happen simultaneously with other changes to the learning rate from outside this scheduler. + When last_epoch=-1, sets initial lr as lr. + + Args: + optimizer (Optimizer): Wrapped optimizer. + factor (float): The number we multiply learning rate until the milestone. Default: 1./3. + total_iters (int): The number of steps that the scheduler multiplies the learning rate by the factor. + Default: 5. + last_epoch (int): The index of the last epoch. Default: -1. + verbose (bool | str): If ``True``, prints a message to stdout for + each update. Default: ``False``. + + .. deprecated:: 2.2 + ``verbose`` is deprecated. Please use ``get_last_lr()`` to access the + learning rate. + + Example: + >>> # xdoctest: +SKIP + >>> # Assuming optimizer uses lr = 0.05 for all groups + >>> # lr = 0.025 if epoch == 0 + >>> # lr = 0.025 if epoch == 1 + >>> # lr = 0.025 if epoch == 2 + >>> # lr = 0.025 if epoch == 3 + >>> # lr = 0.05 if epoch >= 4 + >>> scheduler = ConstantLR(optimizer, factor=0.5, total_iters=4) + >>> for epoch in range(100): + >>> train(...) + >>> validate(...) + >>> scheduler.step() + """ + + def __init__( + self, + optimizer: Optimizer, + factor=1.0 / 3, + total_iters=5, + last_epoch=-1, + verbose="deprecated", + ): + if factor > 1.0 or factor < 0: + raise ValueError( + "Constant multiplicative factor expected to be between 0 and 1." + ) + + self.factor = factor + self.total_iters = total_iters + super().__init__(optimizer, last_epoch, verbose) + + def get_lr(self): + _warn_get_lr_called_within_step(self) + + if self.last_epoch == 0: + return [group["lr"] * self.factor for group in self.optimizer.param_groups] + + if self.last_epoch != self.total_iters: + return [group["lr"] for group in self.optimizer.param_groups] + + return [ + group["lr"] * (1.0 / self.factor) for group in self.optimizer.param_groups + ] + + def _get_closed_form_lr(self): + return [ + base_lr + * (self.factor + (self.last_epoch >= self.total_iters) * (1 - self.factor)) + for base_lr in self.base_lrs + ] + + +class LinearLR(LRScheduler): + """Decays the learning rate of each parameter group by linearly changing small + multiplicative factor until the number of epoch reaches a pre-defined milestone: total_iters. + Notice that such decay can happen simultaneously with other changes to the learning rate + from outside this scheduler. When last_epoch=-1, sets initial lr as lr. + + Args: + optimizer (Optimizer): Wrapped optimizer. + start_factor (float): The number we multiply learning rate in the first epoch. + The multiplication factor changes towards end_factor in the following epochs. + Default: 1./3. + end_factor (float): The number we multiply learning rate at the end of linear changing + process. Default: 1.0. + total_iters (int): The number of iterations that multiplicative factor reaches to 1. + Default: 5. + last_epoch (int): The index of the last epoch. Default: -1. + verbose (bool | str): If ``True``, prints a message to stdout for + each update. Default: ``False``. + + .. deprecated:: 2.2 + ``verbose`` is deprecated. Please use ``get_last_lr()`` to access the + learning rate. + + Example: + >>> # xdoctest: +SKIP + >>> # Assuming optimizer uses lr = 0.05 for all groups + >>> # lr = 0.025 if epoch == 0 + >>> # lr = 0.03125 if epoch == 1 + >>> # lr = 0.0375 if epoch == 2 + >>> # lr = 0.04375 if epoch == 3 + >>> # lr = 0.05 if epoch >= 4 + >>> scheduler = LinearLR(optimizer, start_factor=0.5, total_iters=4) + >>> for epoch in range(100): + >>> train(...) + >>> validate(...) + >>> scheduler.step() + """ + + def __init__( + self, + optimizer: Optimizer, + start_factor=1.0 / 3, + end_factor=1.0, + total_iters=5, + last_epoch=-1, + verbose="deprecated", + ): + if start_factor > 1.0 or start_factor <= 0: + raise ValueError( + "Starting multiplicative factor expected to be greater than 0 and less or equal to 1." + ) + + if end_factor > 1.0 or end_factor < 0: + raise ValueError( + "Ending multiplicative factor expected to be between 0 and 1." + ) + + self.start_factor = start_factor + self.end_factor = end_factor + self.total_iters = total_iters + super().__init__(optimizer, last_epoch, verbose) + + def get_lr(self): + _warn_get_lr_called_within_step(self) + + if self.last_epoch == 0: + return [ + group["lr"] * self.start_factor for group in self.optimizer.param_groups + ] + + if self.last_epoch > self.total_iters: + return [group["lr"] for group in self.optimizer.param_groups] + + return [ + group["lr"] + * ( + 1.0 + + (self.end_factor - self.start_factor) + / ( + self.total_iters * self.start_factor + + (self.last_epoch - 1) * (self.end_factor - self.start_factor) + ) + ) + for group in self.optimizer.param_groups + ] + + def _get_closed_form_lr(self): + return [ + base_lr + * ( + self.start_factor + + (self.end_factor - self.start_factor) + * min(self.total_iters, self.last_epoch) + / self.total_iters + ) + for base_lr in self.base_lrs + ] + + +class ExponentialLR(LRScheduler): + """Decays the learning rate of each parameter group by gamma every epoch. + When last_epoch=-1, sets initial lr as lr. + + Args: + optimizer (Optimizer): Wrapped optimizer. + gamma (float): Multiplicative factor of learning rate decay. + last_epoch (int): The index of last epoch. Default: -1. + verbose (bool | str): If ``True``, prints a message to stdout for + each update. Default: ``False``. + + .. deprecated:: 2.2 + ``verbose`` is deprecated. Please use ``get_last_lr()`` to access the + learning rate. + """ + + def __init__( + self, optimizer: Optimizer, gamma: float, last_epoch=-1, verbose="deprecated" + ): + self.gamma = gamma + super().__init__(optimizer, last_epoch, verbose) + + def get_lr(self): + _warn_get_lr_called_within_step(self) + + if self.last_epoch == 0: + return [group["lr"] for group in self.optimizer.param_groups] + return [group["lr"] * self.gamma for group in self.optimizer.param_groups] + + def _get_closed_form_lr(self): + return [base_lr * self.gamma**self.last_epoch for base_lr in self.base_lrs] + + +class SequentialLR(LRScheduler): + """Receives the list of schedulers that is expected to be called sequentially during + optimization process and milestone points that provides exact intervals to reflect + which scheduler is supposed to be called at a given epoch. + + Args: + optimizer (Optimizer): Wrapped optimizer. + schedulers (list): List of chained schedulers. + milestones (list): List of integers that reflects milestone points. + last_epoch (int): The index of last epoch. Default: -1. + verbose (bool | str): Does nothing. + + .. deprecated:: 2.2 + ``verbose`` is deprecated. Please use ``get_last_lr()`` to access the + learning rate. + + Example: + >>> # xdoctest: +SKIP + >>> # Assuming optimizer uses lr = 1. for all groups + >>> # lr = 0.1 if epoch == 0 + >>> # lr = 0.1 if epoch == 1 + >>> # lr = 0.9 if epoch == 2 + >>> # lr = 0.81 if epoch == 3 + >>> # lr = 0.729 if epoch == 4 + >>> scheduler1 = ConstantLR(optimizer, factor=0.1, total_iters=2) + >>> scheduler2 = ExponentialLR(optimizer, gamma=0.9) + >>> scheduler = SequentialLR(optimizer, schedulers=[scheduler1, scheduler2], milestones=[2]) + >>> for epoch in range(100): + >>> train(...) + >>> validate(...) + >>> scheduler.step() + """ + + def __init__( + self, + optimizer: Optimizer, + schedulers: List[LRScheduler], + milestones: List[int], + last_epoch=-1, + verbose="deprecated", + ): + if len(schedulers) < 1: + raise ValueError( + f"{self.__class__.__name__} expects at least one scheduler, but got no scheduler." + ) + + for scheduler_idx, scheduler in enumerate(schedulers): + if not hasattr(scheduler, "optimizer"): + raise TypeError( + f"{self.__class__.__name__} at index {scheduler_idx} should have `optimizer` as its attribute." + ) + if isinstance(scheduler, ReduceLROnPlateau): + raise ValueError( + f"{self.__class__.__name__} does not support `ReduceLROnPlateau` scheduler as it " + "requires additional kwargs to be specified when calling `step`, " + f"but got one at index {scheduler_idx} in the given schedulers sequence." + ) + if optimizer != scheduler.optimizer: + raise ValueError( + f"{self.__class__.__name__} expects all schedulers to belong to the same optimizer, but " + f"got scheduler {scheduler.__class__.__name__} at index {scheduler_idx} has {scheduler.optimizer}, " + f"which is different from {optimizer.__class__.__name__}." + ) + + if len(milestones) != len(schedulers) - 1: + raise ValueError( + "Sequential Schedulers expects number of schedulers provided to be one more " + f"than the number of milestone points, but got number of schedulers {len(schedulers)} and the " + f"number of milestones to be equal to {len(milestones)}" + ) + _check_verbose_deprecated_warning(verbose) + self._schedulers = schedulers + self._milestones = milestones + self.last_epoch = last_epoch + 1 + self.optimizer = optimizer + + # Reset learning rates back to initial values + for group in self.optimizer.param_groups: + group["lr"] = group["initial_lr"] + + # "Undo" the step performed by other schedulers + for scheduler in self._schedulers: + scheduler.last_epoch -= 1 + + # Perform the initial step for only the first scheduler + self._schedulers[0]._initial_step() + + self._last_lr = schedulers[0].get_last_lr() + + def step(self): + self.last_epoch += 1 + idx = bisect_right(self._milestones, self.last_epoch) + scheduler = self._schedulers[idx] + if idx > 0 and self._milestones[idx - 1] == self.last_epoch: + scheduler.step(0) + else: + scheduler.step() + + self._last_lr = scheduler.get_last_lr() + + def state_dict(self): + """Returns the state of the scheduler as a :class:`dict`. + + It contains an entry for every variable in self.__dict__ which + is not the optimizer. + The wrapped scheduler states will also be saved. + """ + state_dict = { + key: value + for key, value in self.__dict__.items() + if key not in ("optimizer", "_schedulers") + } + state_dict["_schedulers"] = [None] * len(self._schedulers) + + for idx, s in enumerate(self._schedulers): + state_dict["_schedulers"][idx] = s.state_dict() + + return state_dict + + def load_state_dict(self, state_dict): + """Loads the schedulers state. + + Args: + state_dict (dict): scheduler state. Should be an object returned + from a call to :meth:`state_dict`. + """ + _schedulers = state_dict.pop("_schedulers") + self.__dict__.update(state_dict) + # Restore state_dict keys in order to prevent side effects + # https://github.com/pytorch/pytorch/issues/32756 + state_dict["_schedulers"] = _schedulers + + for idx, s in enumerate(_schedulers): + self._schedulers[idx].load_state_dict(s) + + +class PolynomialLR(LRScheduler): + """Decays the learning rate of each parameter group using a polynomial function + in the given total_iters. When last_epoch=-1, sets initial lr as lr. + + Args: + optimizer (Optimizer): Wrapped optimizer. + total_iters (int): The number of steps that the scheduler decays the learning rate. Default: 5. + power (float): The power of the polynomial. Default: 1.0. + verbose (bool | str): If ``True``, prints a message to stdout for + each update. Default: ``False``. + + .. deprecated:: 2.2 + ``verbose`` is deprecated. Please use ``get_last_lr()`` to access the + learning rate. + + Example: + >>> # xdoctest: +SKIP("undefined vars") + >>> # Assuming optimizer uses lr = 0.001 for all groups + >>> # lr = 0.001 if epoch == 0 + >>> # lr = 0.00075 if epoch == 1 + >>> # lr = 0.00050 if epoch == 2 + >>> # lr = 0.00025 if epoch == 3 + >>> # lr = 0.0 if epoch >= 4 + >>> scheduler = PolynomialLR(optimizer, total_iters=4, power=1.0) + >>> for epoch in range(100): + >>> train(...) + >>> validate(...) + >>> scheduler.step() + """ + + def __init__( + self, + optimizer: Optimizer, + total_iters=5, + power=1.0, + last_epoch=-1, + verbose="deprecated", + ): + self.total_iters = total_iters + self.power = power + super().__init__(optimizer, last_epoch, verbose) + + def get_lr(self): + _warn_get_lr_called_within_step(self) + + if self.last_epoch == 0 or self.last_epoch > self.total_iters: + return [group["lr"] for group in self.optimizer.param_groups] + + decay_factor = ( + (1.0 - self.last_epoch / self.total_iters) + / (1.0 - (self.last_epoch - 1) / self.total_iters) + ) ** self.power + return [group["lr"] * decay_factor for group in self.optimizer.param_groups] + + def _get_closed_form_lr(self): + return [ + ( + base_lr + * (1.0 - min(self.total_iters, self.last_epoch) / self.total_iters) + ** self.power + ) + for base_lr in self.base_lrs + ] + + +class CosineAnnealingLR(LRScheduler): + r"""Set the learning rate of each parameter group using a cosine annealing + schedule, where :math:`\eta_{max}` is set to the initial lr and + :math:`T_{cur}` is the number of epochs since the last restart in SGDR: + + .. math:: + \begin{aligned} + \eta_t & = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})\left(1 + + \cos\left(\frac{T_{cur}}{T_{max}}\pi\right)\right), + & T_{cur} \neq (2k+1)T_{max}; \\ + \eta_{t+1} & = \eta_{t} + \frac{1}{2}(\eta_{max} - \eta_{min}) + \left(1 - \cos\left(\frac{1}{T_{max}}\pi\right)\right), + & T_{cur} = (2k+1)T_{max}. + \end{aligned} + + When last_epoch=-1, sets initial lr as lr. Notice that because the schedule + is defined recursively, the learning rate can be simultaneously modified + outside this scheduler by other operators. If the learning rate is set + solely by this scheduler, the learning rate at each step becomes: + + .. math:: + \eta_t = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})\left(1 + + \cos\left(\frac{T_{cur}}{T_{max}}\pi\right)\right) + + It has been proposed in + `SGDR: Stochastic Gradient Descent with Warm Restarts`_. Note that this only + implements the cosine annealing part of SGDR, and not the restarts. + + Args: + optimizer (Optimizer): Wrapped optimizer. + T_max (int): Maximum number of iterations. + eta_min (float): Minimum learning rate. Default: 0. + last_epoch (int): The index of last epoch. Default: -1. + verbose (bool | str): If ``True``, prints a message to stdout for + each update. Default: ``False``. + + .. deprecated:: 2.2 + ``verbose`` is deprecated. Please use ``get_last_lr()`` to access the + learning rate. + + .. _SGDR\: Stochastic Gradient Descent with Warm Restarts: + https://arxiv.org/abs/1608.03983 + """ + + def __init__( + self, + optimizer: Optimizer, + T_max: int, + eta_min=0, + last_epoch=-1, + verbose="deprecated", + ): + self.T_max = T_max + self.eta_min = eta_min + super().__init__(optimizer, last_epoch, verbose) + + def get_lr(self): + _warn_get_lr_called_within_step(self) + + if self.last_epoch == 0: + return [group["lr"] for group in self.optimizer.param_groups] + elif self._step_count == 1 and self.last_epoch > 0: + return [ + self.eta_min + + (base_lr - self.eta_min) + * (1 + math.cos((self.last_epoch) * math.pi / self.T_max)) + / 2 + for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups) + ] + elif (self.last_epoch - 1 - self.T_max) % (2 * self.T_max) == 0: + return [ + group["lr"] + + (base_lr - self.eta_min) * (1 - math.cos(math.pi / self.T_max)) / 2 + for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups) + ] + return [ + (1 + math.cos(math.pi * self.last_epoch / self.T_max)) + / (1 + math.cos(math.pi * (self.last_epoch - 1) / self.T_max)) + * (group["lr"] - self.eta_min) + + self.eta_min + for group in self.optimizer.param_groups + ] + + def _get_closed_form_lr(self): + return [ + self.eta_min + + (base_lr - self.eta_min) + * (1 + math.cos(math.pi * self.last_epoch / self.T_max)) + / 2 + for base_lr in self.base_lrs + ] + + +class ChainedScheduler(LRScheduler): + """Chains list of learning rate schedulers. It takes a sequence of chainable learning + rate schedulers and performs consecutive step() functions belonging to them by just + one call. + + Args: + schedulers (sequence): sequence of chained schedulers. + optimizer (Optimizer, optional): Wrapped optimizer. Default: None. + + Example: + >>> # xdoctest: +SKIP + >>> # Assuming optimizer uses lr = 1. for all groups + >>> # lr = 0.09 if epoch == 0 + >>> # lr = 0.081 if epoch == 1 + >>> # lr = 0.729 if epoch == 2 + >>> # lr = 0.6561 if epoch == 3 + >>> # lr = 0.59049 if epoch >= 4 + >>> scheduler1 = ConstantLR(optimizer, factor=0.1, total_iters=2) + >>> scheduler2 = ExponentialLR(optimizer, gamma=0.9) + >>> scheduler = ChainedScheduler([scheduler1, scheduler2], optimizer=optimizer) + >>> for epoch in range(100): + >>> train(...) + >>> validate(...) + >>> scheduler.step() + """ + + def __init__( + self, schedulers: Sequence[LRScheduler], optimizer: Optional[Optimizer] = None + ): + if len(schedulers) < 1: + raise ValueError( + f"{self.__class__.__name__} expects at least one scheduler to be chained, but got no scheduler." + ) + + optimizer = optimizer or schedulers[0].optimizer + for scheduler_idx, scheduler in enumerate(schedulers): + if not hasattr(scheduler, "optimizer"): + raise TypeError( + f"{self.__class__.__name__} at index {scheduler_idx} should have `optimizer` as its attribute." + ) + if isinstance(scheduler, ReduceLROnPlateau): + raise ValueError( + f"{self.__class__.__name__} does not support `ReduceLROnPlateau` scheduler as it " + "requires additional kwargs to be specified when calling `step`, " + f"but got one at index {scheduler_idx} in the given schedulers sequence." + ) + if optimizer != scheduler.optimizer: + raise ValueError( + f"{self.__class__.__name__} expects all schedulers to belong to the same optimizer, but " + f"got scheduler {scheduler.__class__.__name__} at index {scheduler_idx} has {scheduler.optimizer}, " + f"which is different from {optimizer.__class__.__name__}." + ) + self._schedulers = schedulers + self.optimizer = optimizer + self._last_lr = [ + group["lr"] for group in self._schedulers[-1].optimizer.param_groups + ] + + def step(self): + for scheduler in self._schedulers: + scheduler.step() + self._last_lr = [ + group["lr"] for group in self._schedulers[-1].optimizer.param_groups + ] + + def state_dict(self): + """Returns the state of the scheduler as a :class:`dict`. + + It contains an entry for every variable in self.__dict__ which + is not the optimizer. + The wrapped scheduler states will also be saved. + """ + state_dict = { + key: value + for key, value in self.__dict__.items() + if key not in ("optimizer", "_schedulers") + } + state_dict["_schedulers"] = [None] * len(self._schedulers) + + for idx, s in enumerate(self._schedulers): + state_dict["_schedulers"][idx] = s.state_dict() + + return state_dict + + def load_state_dict(self, state_dict): + """Loads the schedulers state. + + Args: + state_dict (dict): scheduler state. Should be an object returned + from a call to :meth:`state_dict`. + """ + _schedulers = state_dict.pop("_schedulers") + self.__dict__.update(state_dict) + # Restore state_dict keys in order to prevent side effects + # https://github.com/pytorch/pytorch/issues/32756 + state_dict["_schedulers"] = _schedulers + + for idx, s in enumerate(_schedulers): + self._schedulers[idx].load_state_dict(s) + + +class ReduceLROnPlateau(LRScheduler): + """Reduce learning rate when a metric has stopped improving. + Models often benefit from reducing the learning rate by a factor + of 2-10 once learning stagnates. This scheduler reads a metrics + quantity and if no improvement is seen for a 'patience' number + of epochs, the learning rate is reduced. + + Args: + optimizer (Optimizer): Wrapped optimizer. + mode (str): One of `min`, `max`. In `min` mode, lr will + be reduced when the quantity monitored has stopped + decreasing; in `max` mode it will be reduced when the + quantity monitored has stopped increasing. Default: 'min'. + factor (float): Factor by which the learning rate will be + reduced. new_lr = lr * factor. Default: 0.1. + patience (int): The number of allowed epochs with no improvement after + which the learning rate will be reduced. + For example, consider the case of having no patience (`patience = 0`). + In the first epoch, a baseline is established and is always considered good as there's no previous baseline. + In the second epoch, if the performance is worse than the baseline, + we have what is considered an intolerable epoch. + Since the count of intolerable epochs (1) is greater than the patience level (0), + the learning rate is reduced at the end of this epoch. + From the third epoch onwards, the learning rate continues to be reduced at the end of each epoch + if the performance is worse than the baseline. If the performance improves or remains the same, + the learning rate is not adjusted. + Default: 10. + threshold (float): Threshold for measuring the new optimum, + to only focus on significant changes. Default: 1e-4. + threshold_mode (str): One of `rel`, `abs`. In `rel` mode, + dynamic_threshold = best * ( 1 + threshold ) in 'max' + mode or best * ( 1 - threshold ) in `min` mode. + In `abs` mode, dynamic_threshold = best + threshold in + `max` mode or best - threshold in `min` mode. Default: 'rel'. + cooldown (int): Number of epochs to wait before resuming + normal operation after lr has been reduced. Default: 0. + min_lr (float or list): A scalar or a list of scalars. A + lower bound on the learning rate of all param groups + or each group respectively. Default: 0. + eps (float): Minimal decay applied to lr. If the difference + between new and old lr is smaller than eps, the update is + ignored. Default: 1e-8. + verbose (bool | str): If ``True``, prints a message to stdout for + each update. Default: ``False``. + + .. deprecated:: 2.2 + ``verbose`` is deprecated. Please use ``get_last_lr()`` to access the + learning rate. + + Example: + >>> # xdoctest: +SKIP + >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9) + >>> scheduler = ReduceLROnPlateau(optimizer, 'min') + >>> for epoch in range(10): + >>> train(...) + >>> val_loss = validate(...) + >>> # Note that step should be called after validate() + >>> scheduler.step(val_loss) + """ + + def __init__( + self, + optimizer: Optimizer, + mode: Literal["min", "max"] = "min", + factor=0.1, + patience=10, + threshold=1e-4, + threshold_mode: Literal["rel", "abs"] = "rel", + cooldown=0, + min_lr: Union[List[float], float] = 0, + eps=1e-8, + verbose="deprecated", + ): + if factor >= 1.0: + raise ValueError("Factor should be < 1.0.") + self.factor = factor + + # Attach optimizer + if not isinstance(optimizer, Optimizer): + raise TypeError(f"{type(optimizer).__name__} is not an Optimizer") + self.optimizer = optimizer + + if isinstance(min_lr, (list, tuple)): + if len(min_lr) != len(optimizer.param_groups): + raise ValueError( + f"expected {len(optimizer.param_groups)} min_lrs, got {len(min_lr)}" + ) + self.min_lrs = list(min_lr) + else: + self.min_lrs = [min_lr] * len(optimizer.param_groups) + + self.patience = patience + + self.verbose = _check_verbose_deprecated_warning(verbose) + self.cooldown = cooldown + self.cooldown_counter = 0 + self.mode = mode + self.threshold = threshold + self.threshold_mode = threshold_mode + self.best: float + self.num_bad_epochs: int + self.mode_worse: float # the worse value for the chosen mode + self.eps = eps + self.last_epoch = 0 + self._last_lr = [group["lr"] for group in self.optimizer.param_groups] + self._init_is_better( + mode=mode, threshold=threshold, threshold_mode=threshold_mode + ) + self._reset() + + def _reset(self): + """Resets num_bad_epochs counter and cooldown counter.""" + self.best = self.mode_worse + self.cooldown_counter = 0 + self.num_bad_epochs = 0 + + def step(self, metrics: SupportsFloat, epoch=None): # type: ignore[override] + # convert `metrics` to float, in case it's a zero-dim Tensor + current = float(metrics) + if epoch is None: + epoch = self.last_epoch + 1 + else: + warnings.warn(EPOCH_DEPRECATION_WARNING, UserWarning) + self.last_epoch = epoch + + if self.is_better(current, self.best): + self.best = current + self.num_bad_epochs = 0 + else: + self.num_bad_epochs += 1 + + if self.in_cooldown: + self.cooldown_counter -= 1 + self.num_bad_epochs = 0 # ignore any bad epochs in cooldown + + if self.num_bad_epochs > self.patience: + self._reduce_lr(epoch) + self.cooldown_counter = self.cooldown + self.num_bad_epochs = 0 + + self._last_lr = [group["lr"] for group in self.optimizer.param_groups] + + def _reduce_lr(self, epoch): + for i, param_group in enumerate(self.optimizer.param_groups): + old_lr = float(param_group["lr"]) + new_lr = max(old_lr * self.factor, self.min_lrs[i]) + if old_lr - new_lr > self.eps: + param_group["lr"] = new_lr + + @property + def in_cooldown(self): + return self.cooldown_counter > 0 + + def is_better(self, a, best): + if self.mode == "min" and self.threshold_mode == "rel": + rel_epsilon = 1.0 - self.threshold + return a < best * rel_epsilon + + elif self.mode == "min" and self.threshold_mode == "abs": + return a < best - self.threshold + + elif self.mode == "max" and self.threshold_mode == "rel": + rel_epsilon = self.threshold + 1.0 + return a > best * rel_epsilon + + else: # mode == 'max' and epsilon_mode == 'abs': + return a > best + self.threshold + + def _init_is_better(self, mode, threshold, threshold_mode): + if mode not in {"min", "max"}: + raise ValueError("mode " + mode + " is unknown!") + if threshold_mode not in {"rel", "abs"}: + raise ValueError("threshold mode " + threshold_mode + " is unknown!") + + if mode == "min": + self.mode_worse = inf + else: # mode == 'max': + self.mode_worse = -inf + + self.mode = mode + self.threshold = threshold + self.threshold_mode = threshold_mode + + def state_dict(self): + return { + key: value for key, value in self.__dict__.items() if key != "optimizer" + } + + def load_state_dict(self, state_dict): + self.__dict__.update(state_dict) + self._init_is_better( + mode=self.mode, threshold=self.threshold, threshold_mode=self.threshold_mode + ) + + +class CyclicLR(LRScheduler): + r"""Sets the learning rate of each parameter group according to + cyclical learning rate policy (CLR). The policy cycles the learning + rate between two boundaries with a constant frequency, as detailed in + the paper `Cyclical Learning Rates for Training Neural Networks`_. + The distance between the two boundaries can be scaled on a per-iteration + or per-cycle basis. + + Cyclical learning rate policy changes the learning rate after every batch. + `step` should be called after a batch has been used for training. + + This class has three built-in policies, as put forth in the paper: + + * "triangular": A basic triangular cycle without amplitude scaling. + * "triangular2": A basic triangular cycle that scales initial amplitude by half each cycle. + * "exp_range": A cycle that scales initial amplitude by :math:`\text{gamma}^{\text{cycle iterations}}` + at each cycle iteration. + + This implementation was adapted from the github repo: `bckenstler/CLR`_ + + Args: + optimizer (Optimizer): Wrapped optimizer. + base_lr (float or list): Initial learning rate which is the + lower boundary in the cycle for each parameter group. + max_lr (float or list): Upper learning rate boundaries in the cycle + for each parameter group. Functionally, + it defines the cycle amplitude (max_lr - base_lr). + The lr at any cycle is the sum of base_lr + and some scaling of the amplitude; therefore + max_lr may not actually be reached depending on + scaling function. + step_size_up (int): Number of training iterations in the + increasing half of a cycle. Default: 2000 + step_size_down (int): Number of training iterations in the + decreasing half of a cycle. If step_size_down is None, + it is set to step_size_up. Default: None + mode (str): One of {triangular, triangular2, exp_range}. + Values correspond to policies detailed above. + If scale_fn is not None, this argument is ignored. + Default: 'triangular' + gamma (float): Constant in 'exp_range' scaling function: + gamma**(cycle iterations) + Default: 1.0 + scale_fn (function): Custom scaling policy defined by a single + argument lambda function, where + 0 <= scale_fn(x) <= 1 for all x >= 0. + If specified, then 'mode' is ignored. + Default: None + scale_mode (str): {'cycle', 'iterations'}. + Defines whether scale_fn is evaluated on + cycle number or cycle iterations (training + iterations since start of cycle). + Default: 'cycle' + cycle_momentum (bool): If ``True``, momentum is cycled inversely + to learning rate between 'base_momentum' and 'max_momentum'. + Default: True + base_momentum (float or list): Lower momentum boundaries in the cycle + for each parameter group. Note that momentum is cycled inversely + to learning rate; at the peak of a cycle, momentum is + 'base_momentum' and learning rate is 'max_lr'. + Default: 0.8 + max_momentum (float or list): Upper momentum boundaries in the cycle + for each parameter group. Functionally, + it defines the cycle amplitude (max_momentum - base_momentum). + The momentum at any cycle is the difference of max_momentum + and some scaling of the amplitude; therefore + base_momentum may not actually be reached depending on + scaling function. Note that momentum is cycled inversely + to learning rate; at the start of a cycle, momentum is 'max_momentum' + and learning rate is 'base_lr' + Default: 0.9 + last_epoch (int): The index of the last batch. This parameter is used when + resuming a training job. Since `step()` should be invoked after each + batch instead of after each epoch, this number represents the total + number of *batches* computed, not the total number of epochs computed. + When last_epoch=-1, the schedule is started from the beginning. + Default: -1 + verbose (bool | str): If ``True``, prints a message to stdout for + each update. Default: ``False``. + + .. deprecated:: 2.2 + ``verbose`` is deprecated. Please use ``get_last_lr()`` to access the + learning rate. + + Example: + >>> # xdoctest: +SKIP + >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9) + >>> scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=0.01, max_lr=0.1) + >>> data_loader = torch.utils.data.DataLoader(...) + >>> for epoch in range(10): + >>> for batch in data_loader: + >>> train_batch(...) + >>> scheduler.step() + + + .. _Cyclical Learning Rates for Training Neural Networks: https://arxiv.org/abs/1506.01186 + .. _bckenstler/CLR: https://github.com/bckenstler/CLR + """ + + def __init__( + self, + optimizer: Optimizer, + base_lr: Union[float, List[float]], + max_lr: Union[float, List[float]], + step_size_up=2000, + step_size_down: Optional[int] = None, + mode: Literal["triangular", "triangular2", "exp_range"] = "triangular", + gamma=1.0, + scale_fn: Optional[Callable[[float], float]] = None, + scale_mode: Literal["cycle", "iterations"] = "cycle", + cycle_momentum=True, + base_momentum=0.8, + max_momentum=0.9, + last_epoch=-1, + verbose="deprecated", + ): + # Attach optimizer + if not isinstance(optimizer, Optimizer): + raise TypeError(f"{type(optimizer).__name__} is not an Optimizer") + self.optimizer = optimizer + + base_lrs = _format_param("base_lr", optimizer, base_lr) + if last_epoch == -1: + for lr, group in zip(base_lrs, optimizer.param_groups): + if isinstance(group["lr"], Tensor): + lr_val = lr.item() if isinstance(lr, Tensor) else lr + group["lr"].fill_(lr_val) + else: + group["lr"] = lr + + self.max_lrs = _format_param("max_lr", optimizer, max_lr) + + step_size_up = float(step_size_up) + step_size_down = ( + float(step_size_down) if step_size_down is not None else step_size_up + ) + self.total_size = step_size_up + step_size_down + self.step_ratio = step_size_up / self.total_size + + if mode not in ["triangular", "triangular2", "exp_range"] and scale_fn is None: + raise ValueError("mode is invalid and scale_fn is None") + + self.mode = mode + self.gamma = gamma + + self._scale_fn_ref: Callable[[float], float] + self._scale_fn_custom = scale_fn + self.scale_mode = scale_mode + self._init_scale_fn() + + self.cycle_momentum = cycle_momentum + if cycle_momentum: + if ( + "momentum" not in optimizer.defaults + and "betas" not in optimizer.defaults + ): + raise ValueError( + "optimizer must support momentum or beta1 with `cycle_momentum` option enabled" + ) + + self.use_beta1 = "betas" in self.optimizer.defaults + self.base_momentums = _format_param( + "base_momentum", optimizer, base_momentum + ) + self.max_momentums = _format_param("max_momentum", optimizer, max_momentum) + if last_epoch == -1: + for m_momentum, b_momentum, group in zip( + self.max_momentums, self.base_momentums, optimizer.param_groups + ): + if self.use_beta1: + group["betas"] = (m_momentum, *group["betas"][1:]) + else: + group["momentum"] = m_momentum + group["max_momentum"] = m_momentum + group["base_momentum"] = b_momentum + + super().__init__(optimizer, last_epoch, verbose) + self.base_lrs = base_lrs + + def _init_scale_fn(self): + if self._scale_fn_custom is not None: + return + if self.mode == "triangular": + self._scale_fn_ref = self._triangular_scale_fn + self.scale_mode = "cycle" + elif self.mode == "triangular2": + self._scale_fn_ref = self._triangular2_scale_fn + self.scale_mode = "cycle" + elif self.mode == "exp_range": + self._scale_fn_ref = partial(self._exp_range_scale_fn, self.gamma) + self.scale_mode = "iterations" + + def scale_fn(self, x) -> float: + if self._scale_fn_custom is not None: + return self._scale_fn_custom(x) + else: + return self._scale_fn_ref(x) # static method + + @staticmethod + def _triangular_scale_fn(x: float) -> float: + return 1.0 + + @staticmethod + def _triangular2_scale_fn(x: float) -> float: + return 1 / (2.0 ** (x - 1)) + + @staticmethod + def _exp_range_scale_fn(gamma: float, x: float) -> float: + return gamma**x + + def get_lr(self): + """Calculates the learning rate at batch index. This function treats + `self.last_epoch` as the last batch index. + + If `self.cycle_momentum` is ``True``, this function has a side effect of + updating the optimizer's momentum. + """ + + _warn_get_lr_called_within_step(self) + + cycle = math.floor(1 + self.last_epoch / self.total_size) + x = 1.0 + self.last_epoch / self.total_size - cycle + if x <= self.step_ratio: + scale_factor = x / self.step_ratio + else: + scale_factor = (x - 1) / (self.step_ratio - 1) + + lrs = [] + for base_lr, max_lr in zip(self.base_lrs, self.max_lrs): + base_height = (max_lr - base_lr) * scale_factor + if self.scale_mode == "cycle": + lr = base_lr + base_height * self.scale_fn(cycle) + else: + lr = base_lr + base_height * self.scale_fn(self.last_epoch) + lrs.append(lr) + + if self.cycle_momentum: + momentums = [] + for base_momentum, max_momentum in zip( + self.base_momentums, self.max_momentums + ): + base_height = (max_momentum - base_momentum) * scale_factor + if self.scale_mode == "cycle": + momentum = max_momentum - base_height * self.scale_fn(cycle) + else: + momentum = max_momentum - base_height * self.scale_fn( + self.last_epoch + ) + momentums.append(momentum) + for param_group, momentum in zip(self.optimizer.param_groups, momentums): + if self.use_beta1: + param_group["betas"] = (momentum, *param_group["betas"][1:]) + else: + param_group["momentum"] = momentum + + return lrs + + def state_dict(self): + state = super().state_dict() + # We are dropping the `_scale_fn_ref` attribute because it is a + # `weakref.WeakMethod` and can't be pickled. + state.pop("_scale_fn_ref", None) + fn = state.pop("_scale_fn_custom") + state["_scale_fn_custom"] = None + if fn is not None and not isinstance(fn, types.FunctionType): + # The _scale_fn_custom will only be saved if it is a callable object + # and not if it is a function or lambda. + state["_scale_fn_custom"] = fn.__dict__.copy() + + return state + + def load_state_dict(self, state_dict): + fn = state_dict.pop("_scale_fn_custom") + super().load_state_dict(state_dict) + if fn is not None: + self._scale_fn_custom.__dict__.update(fn) + self._init_scale_fn() + + +class CosineAnnealingWarmRestarts(LRScheduler): + r"""Set the learning rate of each parameter group using a cosine annealing + schedule, where :math:`\eta_{max}` is set to the initial lr, :math:`T_{cur}` + is the number of epochs since the last restart and :math:`T_{i}` is the number + of epochs between two warm restarts in SGDR: + + .. math:: + \eta_t = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})\left(1 + + \cos\left(\frac{T_{cur}}{T_{i}}\pi\right)\right) + + When :math:`T_{cur}=T_{i}`, set :math:`\eta_t = \eta_{min}`. + When :math:`T_{cur}=0` after restart, set :math:`\eta_t=\eta_{max}`. + + It has been proposed in + `SGDR: Stochastic Gradient Descent with Warm Restarts`_. + + Args: + optimizer (Optimizer): Wrapped optimizer. + T_0 (int): Number of iterations until the first restart. + T_mult (int, optional): A factor by which :math:`T_{i}` increases after a restart. Default: 1. + eta_min (float, optional): Minimum learning rate. Default: 0. + last_epoch (int, optional): The index of the last epoch. Default: -1. + verbose (bool | str): If ``True``, prints a message to stdout for + each update. Default: ``False``. + + .. deprecated:: 2.2 + ``verbose`` is deprecated. Please use ``get_last_lr()`` to access the + learning rate. + + .. _SGDR\: Stochastic Gradient Descent with Warm Restarts: + https://arxiv.org/abs/1608.03983 + """ + + def __init__( + self, + optimizer: Optimizer, + T_0: int, + T_mult=1, + eta_min=0, + last_epoch=-1, + verbose="deprecated", + ): + if T_0 <= 0 or not isinstance(T_0, int): + raise ValueError(f"Expected positive integer T_0, but got {T_0}") + if T_mult < 1 or not isinstance(T_mult, int): + raise ValueError(f"Expected integer T_mult >= 1, but got {T_mult}") + if not isinstance(eta_min, (float, int)): + raise ValueError( + f"Expected float or int eta_min, but got {eta_min} of type {type(eta_min)}" + ) + self.T_0 = T_0 + self.T_i = T_0 + self.T_mult = T_mult + self.eta_min = eta_min + self.T_cur = last_epoch + super().__init__(optimizer, last_epoch, verbose) + + def get_lr(self): + _warn_get_lr_called_within_step(self) + + return [ + self.eta_min + + (base_lr - self.eta_min) + * (1 + math.cos(math.pi * self.T_cur / self.T_i)) + / 2 + for base_lr in self.base_lrs + ] + + def step(self, epoch=None): + """Step could be called after every batch update + + Example: + >>> # xdoctest: +SKIP("Undefined vars") + >>> scheduler = CosineAnnealingWarmRestarts(optimizer, T_0, T_mult) + >>> iters = len(dataloader) + >>> for epoch in range(20): + >>> for i, sample in enumerate(dataloader): + >>> inputs, labels = sample['inputs'], sample['labels'] + >>> optimizer.zero_grad() + >>> outputs = net(inputs) + >>> loss = criterion(outputs, labels) + >>> loss.backward() + >>> optimizer.step() + >>> scheduler.step(epoch + i / iters) + + This function can be called in an interleaved way. + + Example: + >>> # xdoctest: +SKIP("Undefined vars") + >>> scheduler = CosineAnnealingWarmRestarts(optimizer, T_0, T_mult) + >>> for epoch in range(20): + >>> scheduler.step() + >>> scheduler.step(26) + >>> scheduler.step() # scheduler.step(27), instead of scheduler(20) + """ + + if epoch is None and self.last_epoch < 0: + epoch = 0 + + if epoch is None: + epoch = self.last_epoch + 1 + self.T_cur = self.T_cur + 1 + if self.T_cur >= self.T_i: + self.T_cur = self.T_cur - self.T_i + self.T_i = self.T_i * self.T_mult + else: + if epoch < 0: + raise ValueError(f"Expected non-negative epoch, but got {epoch}") + if epoch >= self.T_0: + if self.T_mult == 1: + self.T_cur = epoch % self.T_0 + else: + n = int( + math.log( + (epoch / self.T_0 * (self.T_mult - 1) + 1), self.T_mult + ) + ) + self.T_cur = epoch - self.T_0 * (self.T_mult**n - 1) / ( + self.T_mult - 1 + ) + self.T_i = self.T_0 * self.T_mult ** (n) + else: + self.T_i = self.T_0 + self.T_cur = epoch + self.last_epoch = math.floor(epoch) + + with _enable_get_lr_call(self): + for i, data in enumerate(zip(self.optimizer.param_groups, self.get_lr())): + param_group, lr = data + param_group["lr"] = lr + + self._last_lr = [group["lr"] for group in self.optimizer.param_groups] + + +class _SchedulePhase(TypedDict): + end_step: float + start_lr: str + end_lr: str + start_momentum: str + end_momentum: str + + +class OneCycleLR(LRScheduler): + r"""Sets the learning rate of each parameter group according to the + 1cycle learning rate policy. The 1cycle policy anneals the learning + rate from an initial learning rate to some maximum learning rate and then + from that maximum learning rate to some minimum learning rate much lower + than the initial learning rate. + This policy was initially described in the paper `Super-Convergence: + Very Fast Training of Neural Networks Using Large Learning Rates`_. + + The 1cycle learning rate policy changes the learning rate after every batch. + `step` should be called after a batch has been used for training. + + This scheduler is not chainable. + + Note also that the total number of steps in the cycle can be determined in one + of two ways (listed in order of precedence): + + #. A value for total_steps is explicitly provided. + #. A number of epochs (epochs) and a number of steps per epoch + (steps_per_epoch) are provided. + In this case, the number of total steps is inferred by + total_steps = epochs * steps_per_epoch + + You must either provide a value for total_steps or provide a value for both + epochs and steps_per_epoch. + + The default behaviour of this scheduler follows the fastai implementation of 1cycle, which + claims that "unpublished work has shown even better results by using only two phases". To + mimic the behaviour of the original paper instead, set ``three_phase=True``. + + Args: + optimizer (Optimizer): Wrapped optimizer. + max_lr (float or list): Upper learning rate boundaries in the cycle + for each parameter group. + total_steps (int): The total number of steps in the cycle. Note that + if a value is not provided here, then it must be inferred by providing + a value for epochs and steps_per_epoch. + Default: None + epochs (int): The number of epochs to train for. This is used along + with steps_per_epoch in order to infer the total number of steps in the cycle + if a value for total_steps is not provided. + Default: None + steps_per_epoch (int): The number of steps per epoch to train for. This is + used along with epochs in order to infer the total number of steps in the + cycle if a value for total_steps is not provided. + Default: None + pct_start (float): The percentage of the cycle (in number of steps) spent + increasing the learning rate. + Default: 0.3 + anneal_strategy (str): {'cos', 'linear'} + Specifies the annealing strategy: "cos" for cosine annealing, "linear" for + linear annealing. + Default: 'cos' + cycle_momentum (bool): If ``True``, momentum is cycled inversely + to learning rate between 'base_momentum' and 'max_momentum'. + Default: True + base_momentum (float or list): Lower momentum boundaries in the cycle + for each parameter group. Note that momentum is cycled inversely + to learning rate; at the peak of a cycle, momentum is + 'base_momentum' and learning rate is 'max_lr'. + Default: 0.85 + max_momentum (float or list): Upper momentum boundaries in the cycle + for each parameter group. Functionally, + it defines the cycle amplitude (max_momentum - base_momentum). + Note that momentum is cycled inversely + to learning rate; at the start of a cycle, momentum is 'max_momentum' + and learning rate is 'base_lr' + Default: 0.95 + div_factor (float): Determines the initial learning rate via + initial_lr = max_lr/div_factor + Default: 25 + final_div_factor (float): Determines the minimum learning rate via + min_lr = initial_lr/final_div_factor + Default: 1e4 + three_phase (bool): If ``True``, use a third phase of the schedule to annihilate the + learning rate according to 'final_div_factor' instead of modifying the second + phase (the first two phases will be symmetrical about the step indicated by + 'pct_start'). + last_epoch (int): The index of the last batch. This parameter is used when + resuming a training job. Since `step()` should be invoked after each + batch instead of after each epoch, this number represents the total + number of *batches* computed, not the total number of epochs computed. + When last_epoch=-1, the schedule is started from the beginning. + Default: -1 + verbose (bool | str): If ``True``, prints a message to stdout for + each update. Default: ``False``. + + .. deprecated:: 2.2 + ``verbose`` is deprecated. Please use ``get_last_lr()`` to access the + learning rate. + + Example: + >>> # xdoctest: +SKIP + >>> data_loader = torch.utils.data.DataLoader(...) + >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9) + >>> scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=0.01, steps_per_epoch=len(data_loader), epochs=10) + >>> for epoch in range(10): + >>> for batch in data_loader: + >>> train_batch(...) + >>> optimizer.step() + >>> scheduler.step() + + + .. _Super-Convergence\: Very Fast Training of Neural Networks Using Large Learning Rates: + https://arxiv.org/abs/1708.07120 + """ + + def __init__( + self, + optimizer: Optimizer, + max_lr: Union[float, List[float]], + total_steps: Optional[int] = None, + epochs: Optional[int] = None, + steps_per_epoch: Optional[int] = None, + pct_start=0.3, + anneal_strategy: Literal["cos", "linear"] = "cos", + cycle_momentum=True, + base_momentum: Union[float, List[float]] = 0.85, + max_momentum: Union[float, List[float]] = 0.95, + div_factor=25.0, + final_div_factor=1e4, + three_phase=False, + last_epoch=-1, + verbose="deprecated", + ): + # Validate optimizer + if not isinstance(optimizer, Optimizer): + raise TypeError(f"{type(optimizer).__name__} is not an Optimizer") + self.optimizer = optimizer + + # Validate total_steps + if total_steps is not None: + if total_steps <= 0 or not isinstance(total_steps, int): + raise ValueError( + f"Expected positive integer total_steps, but got {total_steps}" + ) + self.total_steps = total_steps + elif epochs is not None and steps_per_epoch is not None: + if not isinstance(epochs, int) or epochs <= 0: + raise ValueError(f"Expected positive integer epochs, but got {epochs}") + if not isinstance(steps_per_epoch, int) or steps_per_epoch <= 0: + raise ValueError( + f"Expected positive integer steps_per_epoch, but got {steps_per_epoch}" + ) + self.total_steps = epochs * steps_per_epoch + else: + raise ValueError( + "You must define either total_steps OR (epochs AND steps_per_epoch)" + ) + + self._schedule_phases: List[_SchedulePhase] + if three_phase: + self._schedule_phases = [ + { + "end_step": float(pct_start * self.total_steps) - 1, + "start_lr": "initial_lr", + "end_lr": "max_lr", + "start_momentum": "max_momentum", + "end_momentum": "base_momentum", + }, + { + "end_step": float(2 * pct_start * self.total_steps) - 2, + "start_lr": "max_lr", + "end_lr": "initial_lr", + "start_momentum": "base_momentum", + "end_momentum": "max_momentum", + }, + { + "end_step": self.total_steps - 1, + "start_lr": "initial_lr", + "end_lr": "min_lr", + "start_momentum": "max_momentum", + "end_momentum": "max_momentum", + }, + ] + else: + self._schedule_phases = [ + { + "end_step": float(pct_start * self.total_steps) - 1, + "start_lr": "initial_lr", + "end_lr": "max_lr", + "start_momentum": "max_momentum", + "end_momentum": "base_momentum", + }, + { + "end_step": self.total_steps - 1, + "start_lr": "max_lr", + "end_lr": "min_lr", + "start_momentum": "base_momentum", + "end_momentum": "max_momentum", + }, + ] + + # Validate pct_start + if pct_start < 0 or pct_start > 1 or not isinstance(pct_start, float): + raise ValueError( + f"Expected float between 0 and 1 pct_start, but got {pct_start}" + ) + + # Validate anneal_strategy + if anneal_strategy not in ["cos", "linear"]: + raise ValueError( + f"anneal_strategy must be one of 'cos' or 'linear', instead got {anneal_strategy}" + ) + else: + self._anneal_func_type = anneal_strategy + + # Initialize learning rate variables + max_lrs = _format_param("max_lr", self.optimizer, max_lr) + if last_epoch == -1: + for idx, group in enumerate(self.optimizer.param_groups): + group["initial_lr"] = max_lrs[idx] / div_factor + group["max_lr"] = max_lrs[idx] + group["min_lr"] = group["initial_lr"] / final_div_factor + + # Initialize momentum variables + self.cycle_momentum = cycle_momentum + if self.cycle_momentum: + if ( + "momentum" not in self.optimizer.defaults + and "betas" not in self.optimizer.defaults + ): + raise ValueError( + "optimizer must support momentum or beta1 with `cycle_momentum` option enabled" + ) + self.use_beta1 = "betas" in self.optimizer.defaults + max_momentums = _format_param("max_momentum", optimizer, max_momentum) + base_momentums = _format_param("base_momentum", optimizer, base_momentum) + if last_epoch == -1: + for m_momentum, b_momentum, group in zip( + max_momentums, base_momentums, optimizer.param_groups + ): + if self.use_beta1: + group["betas"] = (m_momentum, *group["betas"][1:]) + else: + group["momentum"] = m_momentum + group["max_momentum"] = m_momentum + group["base_momentum"] = b_momentum + + super().__init__(optimizer, last_epoch, verbose) + + def _anneal_func(self, *args, **kwargs): + if hasattr(self, "_anneal_func_type"): + if self._anneal_func_type == "cos": + return self._annealing_cos(*args, **kwargs) + elif self._anneal_func_type == "linear": + return self._annealing_linear(*args, **kwargs) + else: + raise ValueError(f"Unknown _anneal_func_type: {self._anneal_func_type}") + else: + # For BC + return self.anneal_func(*args, **kwargs) # type: ignore[attr-defined] + + @staticmethod + def _annealing_cos(start, end, pct): + "Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0." + cos_out = math.cos(math.pi * pct) + 1 + return end + (start - end) / 2.0 * cos_out + + @staticmethod + def _annealing_linear(start, end, pct): + "Linearly anneal from `start` to `end` as pct goes from 0.0 to 1.0." + return (end - start) * pct + start + + def get_lr(self): + _warn_get_lr_called_within_step(self) + + lrs = [] + step_num = self.last_epoch + + if step_num > self.total_steps: + raise ValueError( + f"Tried to step {step_num} times. The specified number of total steps is {self.total_steps}" # noqa: UP032 + ) + + for group in self.optimizer.param_groups: + start_step = 0.0 + for i, phase in enumerate(self._schedule_phases): + end_step = phase["end_step"] + if step_num <= end_step or i == len(self._schedule_phases) - 1: + pct = (step_num - start_step) / (end_step - start_step) + computed_lr = self._anneal_func( + group[phase["start_lr"]], group[phase["end_lr"]], pct + ) + if self.cycle_momentum: + computed_momentum = self._anneal_func( + group[phase["start_momentum"]], + group[phase["end_momentum"]], + pct, + ) + break + start_step = phase["end_step"] + + lrs.append(computed_lr) # type: ignore[possibly-undefined] + if self.cycle_momentum: + if self.use_beta1: + group["betas"] = (computed_momentum, *group["betas"][1:]) # type: ignore[possibly-undefined] + else: + group[ + "momentum" + ] = computed_momentum # type: ignore[possibly-undefined] + + return lrs diff --git a/parrot/lib/python3.10/site-packages/torch/optim/nadam.py b/parrot/lib/python3.10/site-packages/torch/optim/nadam.py new file mode 100644 index 0000000000000000000000000000000000000000..cd2eeff92c058404ef7b6f78b160ea682612bed6 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/optim/nadam.py @@ -0,0 +1,639 @@ +# mypy: allow-untyped-defs +from typing import cast, List, Optional, Tuple, Union + +import torch +from torch import Tensor +from .optimizer import ( + _capturable_doc, + _default_to_fused_or_foreach, + _differentiable_doc, + _disable_dynamo_if_unsupported, + _dispatch_sqrt, + _foreach_doc, + _get_capturable_supported_devices, + _get_scalar_dtype, + _get_value, + _maximize_doc, + _stack_if_compiling, + _use_grad_for_differentiable, + _view_as_real, + Optimizer, + ParamsT, +) + +__all__ = ["NAdam", "nadam"] + + +class NAdam(Optimizer): + def __init__( + self, + params: ParamsT, + lr: float = 2e-3, + betas: Tuple[float, float] = (0.9, 0.999), + eps: float = 1e-8, + weight_decay: float = 0, + momentum_decay: float = 4e-3, + decoupled_weight_decay: bool = False, + *, + foreach: Optional[bool] = None, + maximize: bool = False, + capturable: bool = False, + differentiable: bool = False, + ): + if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") + if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") + if not 0.0 <= betas[0] < 1.0: + raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}") + if not 0.0 <= betas[1] < 1.0: + raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}") + if not 0.0 <= weight_decay: + raise ValueError(f"Invalid weight_decay value: {weight_decay}") + if not 0.0 <= momentum_decay: + raise ValueError(f"Invalid momentum_decay value: {momentum_decay}") + defaults = dict( + lr=lr, + betas=betas, + eps=eps, + weight_decay=weight_decay, + momentum_decay=momentum_decay, + decoupled_weight_decay=decoupled_weight_decay, + maximize=maximize, + foreach=foreach, + capturable=capturable, + differentiable=differentiable, + ) + super().__init__(params, defaults) + + def __setstate__(self, state): + super().__setstate__(state) + for group in self.param_groups: + group.setdefault("maximize", False) + group.setdefault("foreach", None) + group.setdefault("capturable", False) + group.setdefault("differentiable", False) + group.setdefault("decoupled_weight_decay", False) + for p in group["params"]: + p_state = self.state.get(p, []) + if len(p_state) != 0: + if not torch.is_tensor(p_state["step"]): + step_val = float(p_state["step"]) + p_state["step"] = ( + torch.tensor( + step_val, dtype=_get_scalar_dtype(), device=p.device + ) + if group["capturable"] + else torch.tensor(step_val, dtype=_get_scalar_dtype()) + ) + if not torch.is_tensor(p_state["mu_product"]): + mu_prod_val = p_state["mu_product"] + p_state["mu_product"] = ( + torch.tensor( + mu_prod_val, dtype=_get_scalar_dtype(), device=p.device + ) + if group["capturable"] + else torch.tensor(mu_prod_val, dtype=_get_scalar_dtype()) + ) + + def _init_group( + self, + group, + params_with_grad, + grads, + exp_avgs, + exp_avg_sqs, + mu_products, + state_steps, + ): + has_complex = False + for p in group["params"]: + if p.grad is not None: + has_complex |= torch.is_complex(p) + params_with_grad.append(p) + if p.grad.is_sparse: + raise RuntimeError("NAdam does not support sparse gradients") + grads.append(p.grad) + + state = self.state[p] + # Lazy state initialization + if len(state) == 0: + # note(crcrpar): [special device hosting for step] + # Deliberately host `step` and `mu_product` on CPU if capturable is False. + # This is because kernel launches are costly on CUDA and XLA. + state["step"] = ( + torch.zeros((), dtype=_get_scalar_dtype(), device=p.device) + if group["capturable"] + else torch.tensor(0.0, dtype=_get_scalar_dtype()) + ) + state["mu_product"] = ( + torch.ones((), dtype=_get_scalar_dtype(), device=p.device) + if group["capturable"] + else torch.tensor(1.0, dtype=_get_scalar_dtype()) + ) + # Exponential moving average of gradient values + state["exp_avg"] = torch.zeros_like( + p, memory_format=torch.preserve_format + ) + # Exponential moving average of squared gradient values + state["exp_avg_sq"] = torch.zeros_like( + p, memory_format=torch.preserve_format + ) + + exp_avgs.append(state["exp_avg"]) + exp_avg_sqs.append(state["exp_avg_sq"]) + mu_products.append(state["mu_product"]) + state_steps.append(state["step"]) + return has_complex + + @_use_grad_for_differentiable + def step(self, closure=None): + """Performs a single optimization step. + + Args: + closure (Callable, optional): A closure that reevaluates the model + and returns the loss. + """ + self._cuda_graph_capture_health_check() + + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + params_with_grad: List[Tensor] = [] + grads: List[Tensor] = [] + exp_avgs: List[Tensor] = [] + exp_avg_sqs: List[Tensor] = [] + mu_products: List[Tensor] = [] + state_steps: List[Tensor] = [] + beta1, beta2 = cast(Tuple[float, float], group["betas"]) + + has_complex = self._init_group( + group, + params_with_grad, + grads, + exp_avgs, + exp_avg_sqs, + mu_products, + state_steps, + ) + + nadam( + params_with_grad, + grads, + exp_avgs, + exp_avg_sqs, + mu_products, + state_steps, + beta1=beta1, + beta2=beta2, + lr=group["lr"], + weight_decay=group["weight_decay"], + momentum_decay=group["momentum_decay"], + eps=group["eps"], + maximize=group["maximize"], + decoupled_weight_decay=group["decoupled_weight_decay"], + foreach=group["foreach"], + capturable=group["capturable"], + differentiable=group["differentiable"], + has_complex=has_complex, + ) + + return loss + + +NAdam.__doc__ = ( + r"""Implements NAdam algorithm. + + .. math:: + \begin{aligned} + &\rule{110mm}{0.4pt} \\ + &\textbf{input} : \gamma_t \text{ (lr)}, \: \beta_1,\beta_2 \text{ (betas)}, + \: \theta_0 \text{ (params)}, \: f(\theta) \text{ (objective)} \\ + &\hspace{13mm} \: \lambda \text{ (weight decay)}, \:\psi \text{ (momentum decay)} \\ + &\hspace{13mm} \: \textit{decoupled\_weight\_decay}, \:\textit{maximize} \\ + &\textbf{initialize} : m_0 \leftarrow 0 \text{ ( first moment)}, + v_0 \leftarrow 0 \text{ ( second moment)} \\[-1.ex] + &\rule{110mm}{0.4pt} \\ + &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ + &\hspace{5mm}\textbf{if} \: \textit{maximize}: \\ + &\hspace{10mm}g_t \leftarrow -\nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm}\textbf{else} \\ + &\hspace{10mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm} \theta_t \leftarrow \theta_{t-1} \\ + &\hspace{5mm} \textbf{if} \: \lambda \neq 0 \\ + &\hspace{10mm}\textbf{if} \: \textit{decoupled\_weight\_decay} \\ + &\hspace{15mm} \theta_t \leftarrow \theta_{t-1} - \gamma \lambda \theta_{t-1} \\ + &\hspace{10mm}\textbf{else} \\ + &\hspace{15mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\ + &\hspace{5mm} \mu_t \leftarrow \beta_1 \big(1 - \frac{1}{2} 0.96^{t \psi} \big) \\ + &\hspace{5mm} \mu_{t+1} \leftarrow \beta_1 \big(1 - \frac{1}{2} 0.96^{(t+1)\psi}\big)\\ + &\hspace{5mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\ + &\hspace{5mm}v_t \leftarrow \beta_2 v_{t-1} + (1-\beta_2) g^2_t \\ + &\hspace{5mm}\widehat{m_t} \leftarrow \mu_{t+1} m_t/(1-\prod_{i=1}^{t+1}\mu_i)\\[-1.ex] + & \hspace{11mm} + (1-\mu_t) g_t /(1-\prod_{i=1}^{t} \mu_{i}) \\ + &\hspace{5mm}\widehat{v_t} \leftarrow v_t/\big(1-\beta_2^t \big) \\ + &\hspace{5mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}/ + \big(\sqrt{\widehat{v_t}} + \epsilon \big) \\ + &\rule{110mm}{0.4pt} \\[-1.ex] + &\bf{return} \: \theta_t \\[-1.ex] + &\rule{110mm}{0.4pt} \\[-1.ex] + \end{aligned} + + For further details regarding the algorithm we refer to `Incorporating Nesterov Momentum into Adam`_. + """ + + rf""" + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 2e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + momentum_decay (float, optional): momentum momentum_decay (default: 4e-3) + decoupled_weight_decay (bool, optional): whether to use decoupled weight + decay as in AdamW to obtain NAdamW (default: False) + {_foreach_doc} + {_maximize_doc} + {_capturable_doc} + {_differentiable_doc} + + .. _Incorporating Nesterov Momentum into Adam: + https://openreview.net/forum?id=OM0jvwB8jIp57ZJjtNEZ + .. _Decoupled Weight Decay Regularization: + https://arxiv.org/abs/1711.05101 + + """ +) + + +def _single_tensor_nadam( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + mu_products: List[Tensor], + state_steps: List[Tensor], + *, + beta1: float, + beta2: float, + lr: float, + weight_decay: float, + momentum_decay: float, + eps: float, + decoupled_weight_decay: bool, + maximize: bool, + capturable: bool, + differentiable: bool, + has_complex: bool, +): + for i, param in enumerate(params): + grad = grads[i] if not maximize else -grads[i] + exp_avg = exp_avgs[i] + exp_avg_sq = exp_avg_sqs[i] + mu_product = mu_products[i] + step_t = state_steps[i] + + if torch.is_complex(param): + param = torch.view_as_real(param) + grad = torch.view_as_real(grad) + exp_avg = torch.view_as_real(exp_avg) + exp_avg_sq = torch.view_as_real(exp_avg_sq) + + # If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable] + if not torch._utils.is_compiling() and capturable: + capturable_supported_devices = _get_capturable_supported_devices() + assert ( + param.device.type == mu_product.device.type == step_t.device.type + and param.device.type in capturable_supported_devices + ), ( + f"If capturable=True, params, mu_products and state_steps must be " + f"on supported devices: {capturable_supported_devices}." + ) + + # update step + step_t += 1 + + if capturable: + step = step_t + else: + step = _get_value(step_t) + + bias_correction2 = 1 - beta2**step + + if weight_decay != 0: + if decoupled_weight_decay: + # Perform stepweight decay + param.mul_(1 - lr * weight_decay) + else: + grad = grad.add(param, alpha=weight_decay) + + # calculate the momentum cache \mu^{t} and \mu^{t+1} + mu = beta1 * (1.0 - 0.5 * (0.96 ** (step * momentum_decay))) + mu_next = beta1 * (1.0 - 0.5 * (0.96 ** ((step + 1) * momentum_decay))) + + # update mu_product + mu_product *= mu + + # decay the first and second moment running average coefficient + exp_avg.lerp_(grad, 1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) + denom = exp_avg_sq.div(bias_correction2).sqrt() + + if differentiable or capturable: + denom = denom.add(eps) + # Make autograd track the operations + # by updating the grad and exp_avg directly and not using the + # scalar "value" argument of addcdiv. + mu_product_next = mu_product * mu_next + grad = grad * (-lr * (1.0 - mu) / (1.0 - mu_product)) + exp_avg = exp_avg * (-lr * mu_next / (1.0 - mu_product_next)) + param.addcdiv_(grad, denom) + param.addcdiv_(exp_avg, denom) + else: + mu_product_next = _get_value(mu_product) * mu_next + denom.add_(eps) + param.addcdiv_( + grad, denom, value=(-lr * (1.0 - mu) / (1.0 - _get_value(mu_product))) + ) + param.addcdiv_( + exp_avg, denom, value=(-lr * mu_next) / (1.0 - mu_product_next) + ) + + +def _multi_tensor_nadam( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + mu_products: List[Tensor], + state_steps: List[Tensor], + *, + beta1: float, + beta2: float, + lr: float, + weight_decay: float, + momentum_decay: float, + eps: float, + decoupled_weight_decay: bool, + maximize: bool, + capturable: bool, + differentiable: bool, + has_complex: bool, +): + if len(params) == 0: + return + + assert not differentiable, "_foreach ops don't support autograd" + + # If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable] + if not torch._utils.is_compiling() and capturable: + capturable_supported_devices = _get_capturable_supported_devices( + supports_xla=False + ) + assert all( + p.device.type == mp.device.type == step.device.type + and p.device.type in capturable_supported_devices + for p, mp, step in zip(params, mu_products, state_steps) + ), f"If capturable=True, params, mu_products, and state_steps must be on supported devices: {capturable_supported_devices}." + + grouped_tensors = Optimizer._group_tensors_by_device_and_dtype( + [params, grads, exp_avgs, exp_avg_sqs, mu_products, state_steps] + ) + for ( + grouped_params, + grouped_grads, + grouped_exp_avgs, + grouped_exp_avg_sqs, + grouped_mu_products, + grouped_state_steps, + ), _ in grouped_tensors.values(): + # handle complex + if has_complex: + _view_as_real( + grouped_params, grouped_grads, grouped_exp_avgs, grouped_exp_avg_sqs + ) + + if maximize: + grouped_grads = torch._foreach_neg(grouped_grads) # type: ignore[assignment] + + # Update steps + # If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over + # and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just + # wrapped it once now. The alpha is required to assure we go to the right overload. + if grouped_state_steps[0].is_cpu: + torch._foreach_add_( + grouped_state_steps, torch.tensor(1.0, device="cpu"), alpha=1.0 + ) + else: + torch._foreach_add_(grouped_state_steps, 1) + + if weight_decay != 0: + if decoupled_weight_decay: + # Perform stepweight decay + torch._foreach_mul_(grouped_params, 1 - lr * weight_decay) + else: + # Re-use the intermediate memory (grouped_grads) already allocated for maximize + if maximize: + torch._foreach_add_( + grouped_grads, grouped_params, alpha=weight_decay + ) + else: + grouped_grads = torch._foreach_add( # type: ignore[assignment] + grouped_grads, grouped_params, alpha=weight_decay + ) + + # Decay the first and second moment running average coefficient + torch._foreach_lerp_(grouped_exp_avgs, grouped_grads, 1 - beta1) + + torch._foreach_mul_(grouped_exp_avg_sqs, beta2) + torch._foreach_addcmul_( + grouped_exp_avg_sqs, grouped_grads, grouped_grads, 1 - beta2 + ) + + exp_avg_sq_sqrt = torch._foreach_sqrt(grouped_exp_avg_sqs) + + bias_correction_sqrt: Union[Tuple[Tensor, ...], List[Tensor]] + mus: Union[Tuple[Tensor, ...], List[Tensor]] + mu_nexts: Union[Tuple[Tensor, ...], List[Tensor]] + if capturable: + # mus will be beta1 * (1 - 0.5 * 0.96 ** (step * momentum_decay)) + exponent = torch._foreach_mul(grouped_state_steps, momentum_decay) + mus = torch._foreach_pow(0.96, exponent) + torch._foreach_mul_(mus, -0.5) + torch._foreach_add_(mus, 1.0) + torch._foreach_mul_(mus, beta1) + + # mu_nexts will be beta1 * (1 - 0.5 * 0.96 ** ((step + 1) * momentum_decay)) + torch._foreach_add_(exponent, momentum_decay) + mu_nexts = torch._foreach_pow(0.96, exponent) + torch._foreach_mul_(mu_nexts, -0.5) + torch._foreach_add_(mu_nexts, 1.0) + torch._foreach_mul_(mu_nexts, beta1) + + # save peak memory as we don't need exponent anymore + del exponent + + bias_correction_sqrt = torch._foreach_pow(beta2, grouped_state_steps) + # foreach_sub doesn't allow a scalar as the first arg + torch._foreach_sub_(bias_correction_sqrt, 1.0) + torch._foreach_neg_(bias_correction_sqrt) + torch._foreach_sqrt_(bias_correction_sqrt) + else: + bias_correction_sqrt = [ + _dispatch_sqrt(1 - beta2 ** _get_value(step)) + for step in grouped_state_steps + ] + mus = [ + beta1 * (1.0 - 0.5 * (0.96 ** (_get_value(step) * momentum_decay))) + for step in grouped_state_steps + ] + mu_nexts = [ + beta1 + * (1.0 - 0.5 * (0.96 ** ((_get_value(step) + 1) * momentum_decay))) + for step in grouped_state_steps + ] + + # update mu_products + torch._foreach_mul_(grouped_mu_products, mus) + + torch._foreach_div_(exp_avg_sq_sqrt, bias_correction_sqrt) + torch._foreach_add_(exp_avg_sq_sqrt, eps) + + # explicitly delete bias_correction refs to save memory + del bias_correction_sqrt + + if capturable: + # Build up the step_size multiplier for grad, reusing mus' memory + torch._foreach_sub_(mus, 1.0) + torch._foreach_mul_(mus, lr) + # foreach_sub doesn't allow a scalar as the first arg + denom = torch._foreach_sub(grouped_mu_products, 1.0) + torch._foreach_neg_(denom) + torch._foreach_div_(mus, denom) + # - lr * (1 - mu) / (1 - mu_product) + step_size_grads = mus + # explicitly delete denom to save memory + del denom + + # Build up the step_size multiplier for exp_avg, reusing mu_nexts' memory + denom = torch._foreach_mul(grouped_mu_products, mu_nexts) + torch._foreach_mul_(mu_nexts, lr) + # foreach_sub doesn't allow a scalar as the first arg, but it's okay because + # we need a negative here anyway + torch._foreach_sub_(denom, 1.0) + torch._foreach_div_(mu_nexts, denom) + # - lr * mu_next / (1 - mu_product * mu_next) + step_size_expavg = mu_nexts + # explicitly delete denom to save memory + del denom + + # we cannot inplace into step_size_grads cuz it is a list of ScalarTensors + # and mul'ing with grouped_grads will result in a list of bigger Tensors + numerator = torch._foreach_mul(step_size_grads, grouped_grads) + torch._foreach_addcmul_(numerator, step_size_expavg, grouped_exp_avgs) + + # finally, update params + torch._foreach_addcdiv_(grouped_params, numerator, exp_avg_sq_sqrt) + else: + step_size_grads = _stack_if_compiling( + [ + (_get_value(lr) * (1.0 - mu) / (1.0 - _get_value(mu_product))) * -1 + for mu_product, mu in zip(grouped_mu_products, mus) + ] + ) + step_size_expavg = _stack_if_compiling( + [ + ( + _get_value(lr) + * mu_next + / (1.0 - _get_value(mu_product) * mu_next) + ) + * -1 + for mu_product, mu_next in zip(grouped_mu_products, mu_nexts) + ] + ) + + torch._foreach_addcdiv_( + grouped_params, grouped_grads, exp_avg_sq_sqrt, step_size_grads # type: ignore[arg-type] + ) + torch._foreach_addcdiv_( + grouped_params, grouped_exp_avgs, exp_avg_sq_sqrt, step_size_expavg # type: ignore[arg-type] + ) + + +@_disable_dynamo_if_unsupported(single_tensor_fn=_single_tensor_nadam) +def nadam( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + mu_products: List[Tensor], + state_steps: List[Tensor], + # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 + # setting this as kwarg for now as functional API is compiled by torch/distributed/optim + decoupled_weight_decay: bool = False, + foreach: Optional[bool] = None, + capturable: bool = False, + differentiable: bool = False, + has_complex: bool = False, + maximize: bool = False, + *, + beta1: float, + beta2: float, + lr: float, + weight_decay: float, + momentum_decay: float, + eps: float, +): + r"""Functional API that performs NAdam algorithm computation. + + See :class:`~torch.optim.NAdam` for details. + """ + + if not all(isinstance(t, torch.Tensor) for t in state_steps): + raise RuntimeError( + "API has changed, `state_steps` argument must contain a list of singleton tensors" + ) + + if not all(isinstance(t, torch.Tensor) for t in mu_products): + raise RuntimeError( + "API has changed, `mu_products` argument must contain a list of singleton tensors" + ) + + if foreach is None: + _, foreach = _default_to_fused_or_foreach( + params, differentiable, use_fused=False + ) + + if foreach and torch.jit.is_scripting(): + raise RuntimeError("torch.jit.script not supported with foreach optimizers") + + if foreach and not torch.jit.is_scripting(): + func = _multi_tensor_nadam + else: + func = _single_tensor_nadam + + func( + params, + grads, + exp_avgs, + exp_avg_sqs, + mu_products, + state_steps, + beta1=beta1, + beta2=beta2, + lr=lr, + weight_decay=weight_decay, + momentum_decay=momentum_decay, + maximize=maximize, + decoupled_weight_decay=decoupled_weight_decay, + eps=eps, + capturable=capturable, + differentiable=differentiable, + has_complex=has_complex, + ) diff --git a/parrot/lib/python3.10/site-packages/torch/optim/optimizer.py b/parrot/lib/python3.10/site-packages/torch/optim/optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..582dc2105a5a48918ed67f1fe6bacc5efb436278 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/optim/optimizer.py @@ -0,0 +1,1047 @@ +# mypy: allow-untyped-defs +import functools +import math +import warnings +from collections import defaultdict, OrderedDict +from copy import deepcopy +from itertools import chain +from typing import ( + Any, + Callable, + cast, + DefaultDict, + Dict, + Hashable, + Iterable, + List, + Optional, + overload, + Set, + Tuple, + TypeVar, + Union, +) +from typing_extensions import ParamSpec, Self, TypeAlias + +import torch +import torch.utils.hooks as hooks +from torch._utils import is_compiling +from torch.utils._foreach_utils import ( + _get_foreach_kernels_supported_devices, + _get_fused_kernels_supported_devices, + _group_tensors_by_device_and_dtype, + Indices, +) +from torch.utils.hooks import RemovableHandle + +Args: TypeAlias = Tuple[Any, ...] +Kwargs: TypeAlias = Dict[str, Any] +StateDict: TypeAlias = Dict[str, Any] +TensorListList: TypeAlias = List[List[torch.Tensor]] +DeviceDict = Dict[Optional[torch.device], torch.Tensor] + + +GlobalOptimizerPreHook: TypeAlias = Callable[ + ["Optimizer", Args, Kwargs], Optional[Tuple[Args, Kwargs]] +] +GlobalOptimizerPostHook: TypeAlias = Callable[["Optimizer", Args, Kwargs], None] + +__all__ = [ + "Optimizer", + "register_optimizer_step_pre_hook", + "register_optimizer_step_post_hook", +] +_global_optimizer_pre_hooks: Dict[int, GlobalOptimizerPreHook] = OrderedDict() +_global_optimizer_post_hooks: Dict[int, GlobalOptimizerPostHook] = OrderedDict() +_foreach_supported_types = [torch.Tensor, torch.nn.parameter.Parameter] + + +class _RequiredParameter: + """Singleton class representing a required parameter for an Optimizer.""" + + def __repr__(self) -> str: + return "" + + +required = _RequiredParameter() + + +def _use_grad_for_differentiable(func): + def _use_grad(self, *args, **kwargs): + import torch._dynamo + + prev_grad = torch.is_grad_enabled() + try: + # Note on graph break below: + # we need to graph break to ensure that aot respects the no_grad annotation. + # This is important for perf because without this, functionalization will generate an epilogue + # which updates the mutated parameters of the optimizer which is *not* visible to inductor, as a result, + # inductor will allocate for every parameter in the model, which is horrible. + # With this, aot correctly sees that this is an inference graph, and functionalization will generate + # an epilogue which is appended to the graph, which *is* visible to inductor, as a result, inductor sees that + # step is in place and is able to avoid the extra allocation. + # In the future, we will either 1) continue to graph break on backward, so this graph break does not matter + # or 2) have a fully fused forward and backward graph, which will have no_grad by default, and we can remove this + # graph break to allow the fully fused fwd-bwd-optimizer graph to be compiled. + # see https://github.com/pytorch/pytorch/issues/104053 + torch.set_grad_enabled(self.defaults["differentiable"]) + torch._dynamo.graph_break() + ret = func(self, *args, **kwargs) + finally: + torch._dynamo.graph_break() + torch.set_grad_enabled(prev_grad) + return ret + + functools.update_wrapper(_use_grad, func) + return _use_grad + + +def _get_value(x): + # item is significantly faster than a cpu tensor in eager mode + if not torch.jit.is_scripting() and is_compiling(): + return x + else: + return x.item() if isinstance(x, torch.Tensor) else x + + +def _stack_if_compiling(x): + if not torch.jit.is_scripting() and is_compiling(): + return torch.stack(x) + else: + return x + + +def _dispatch_sqrt( + x: float, +): # float annotation is needed because of torchscript type inference + if not torch.jit.is_scripting() and isinstance(x, torch.Tensor): + return x.sqrt() + else: + return math.sqrt(x) + + +def _disable_dynamo_if_unsupported(single_tensor_fn=None): + # workaround for torchscript BC + # it requires all called functions to be in the + # global environment at the site at which the + # maybe_fallback closure is created + if single_tensor_fn: + globals()[single_tensor_fn.__name__] = single_tensor_fn + + def wrapper(func): + import inspect + + disabled_func = torch._disable_dynamo(func) + ps = inspect.signature(func).parameters + has_state_steps = True + try: + state_steps_ind = list(ps.keys()).index("state_steps") + except ValueError: + has_state_steps = False + + # Today, there are cases where we stack state steps + # and pass them as the value arg of foreach ops. + # Having state steps on cuda as the value arg is not supported in eager, + # but this only occurs in the rare case that the user explicitly deletes + # the capturable flag. If capturable=True, this is not a problem. + @functools.wraps(func) + def maybe_fallback(*args, **kwargs): + if is_compiling() and ( + not kwargs.get("capturable", False) + and has_state_steps + and (args[state_steps_ind] and args[state_steps_ind][0].is_cuda) + or ( + "state_steps" in kwargs + and kwargs["state_steps"] + and kwargs["state_steps"][0].is_cuda + ) + ): + return disabled_func(*args, **kwargs) + else: + return func(*args, **kwargs) + + return maybe_fallback + + return wrapper + + +# For any optimizer with a faster implementation, we attempt to default to the +# fastest + stablest whenever possible. For foreach, the requirements are to have +# native params all on CUDA. For fused, there's currently the additional requirement +# that the tensors' dtypes must be floating point. Neither alternative supports +# torch.jit.script nor differentiable, so we fall back to the single tensor +# implementation in those cases. +def _default_to_fused_or_foreach( + params: List[torch.Tensor], differentiable: bool, use_fused: bool = False +) -> Tuple[bool, bool]: + if torch.jit.is_scripting() or differentiable: + return False, False + + fused_supported_devices = _get_fused_kernels_supported_devices() + foreach_supported_devices = _get_foreach_kernels_supported_devices() + fused = use_fused and all( + p is None + or ( + type(p) in _foreach_supported_types + and p.device.type in fused_supported_devices + and torch.is_floating_point(p) + ) + for p in params + ) + foreach = not fused and all( + p is None + or ( + type(p) in _foreach_supported_types + and p.device.type in foreach_supported_devices + ) + for p in params + ) + return fused, foreach + + +def _view_as_real(params, *state_and_grads): + for i, p in enumerate(params): + if torch.is_complex(p): + params[i] = torch.view_as_real(params[i]) + for s in state_and_grads: + s[i] = torch.view_as_real(s[i]) + + +def _get_scalar_dtype(is_fused=None): + if is_fused: + return torch.float32 + return ( + torch.float64 if torch.get_default_dtype() == torch.float64 else torch.float32 + ) + + +def _get_capturable_supported_devices(supports_xla: bool = True) -> List[str]: + r"""Return the device type list that supports capturable optimizer.""" + capturable_supported_devices = ["cuda"] + if not torch.jit.is_scripting(): + capturable_supported_devices.append(torch._C._get_privateuse1_backend_name()) + if supports_xla: + capturable_supported_devices.append("xla") + return capturable_supported_devices + + +# Common doc strings among optimizers +_foreach_doc = r"""foreach (bool, optional): whether foreach implementation of optimizer + is used. If unspecified by the user (so foreach is None), we will try to use + foreach over the for-loop implementation on CUDA, since it is usually + significantly more performant. Note that the foreach implementation uses + ~ sizeof(params) more peak memory than the for-loop version due to the intermediates + being a tensorlist vs just one tensor. If memory is prohibitive, batch fewer + parameters through the optimizer at a time or switch this flag to False (default: None)""" + +_fused_doc = r"""fused (bool, optional): whether the fused implementation is used. + Currently, `torch.float64`, `torch.float32`, `torch.float16`, and `torch.bfloat16` + are supported. (default: None) + + .. note:: The foreach and fused implementations are typically faster than the for-loop, + single-tensor implementation. Thus, if the user has not specified BOTH flags + (i.e., when foreach = fused = None), we will attempt defaulting to the foreach + implementation when the tensors are all on CUDA. For example, if the user specifies + True for fused but nothing for foreach, we will run the fused implementation. If + the user specifies False for foreach but nothing for fused (or False for fused but + nothing for foreach), we will run the for-loop implementation. If the user specifies + True for both foreach and fused, we will prioritize fused over foreach, as it is + typically faster. We attempt to use the fastest, so the hierarchy goes fused -> + foreach -> for-loop. HOWEVER, since the fused implementation is relatively new, + we want to give it sufficient bake-in time, so we default to foreach and NOT + fused when the user has not specified either flag.""" + +_capturable_doc = r"""capturable (bool, optional): whether this instance is safe to + capture in a CUDA graph. Passing True can impair ungraphed performance, + so if you don't intend to graph capture this instance, leave it False + (default: False)""" + +_differentiable_doc = r"""differentiable (bool, optional): whether autograd should + occur through the optimizer step in training. Otherwise, the step() + function runs in a torch.no_grad() context. Setting to True can impair + performance, so leave it False if you don't intend to run autograd + through this instance (default: False)""" + +_maximize_doc = r"""maximize (bool, optional): maximize the objective with respect to the + params, instead of minimizing (default: False)""" + + +def register_optimizer_step_pre_hook(hook: GlobalOptimizerPreHook) -> RemovableHandle: + r"""Register a pre hook common to all optimizers. The hook should have the following + signature:: + + hook(optimizer, args, kwargs) -> None or modified args and kwargs + + Args: + hook (Callable): A user defined hook which is registered on all optimizers. + + Returns: + :class:`torch.utils.hooks.RemovableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + """ + handle = hooks.RemovableHandle(_global_optimizer_pre_hooks) + _global_optimizer_pre_hooks[handle.id] = hook + return handle + + +def register_optimizer_step_post_hook(hook: GlobalOptimizerPostHook) -> RemovableHandle: + r"""Register a post hook common to all optimizers. The hook should have the following + signature:: + + hook(optimizer, args, kwargs) -> None + + Args: + hook (Callable): A user defined hook which is registered on all optimizers. + + Returns: + :class:`torch.utils.hooks.RemovableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + """ + handle = hooks.RemovableHandle(_global_optimizer_post_hooks) + _global_optimizer_post_hooks[handle.id] = hook + return handle + + +ParamsT: TypeAlias = Union[Iterable[torch.Tensor], Iterable[Dict[str, Any]]] + +_P = ParamSpec("_P") +R = TypeVar("R") +T = TypeVar("T") + + +class Optimizer: + r"""Base class for all optimizers. + + .. warning:: + Parameters need to be specified as collections that have a deterministic + ordering that is consistent between runs. Examples of objects that don't + satisfy those properties are sets and iterators over values of dictionaries. + + Args: + params (iterable): an iterable of :class:`torch.Tensor` s or + :class:`dict` s. Specifies what Tensors should be optimized. + defaults: (dict): a dict containing default values of optimization + options (used when a parameter group doesn't specify them). + """ + + OptimizerPreHook: TypeAlias = Callable[[Self, Args, Kwargs], Optional[Tuple[Args, Kwargs]]] # type: ignore[misc] + OptimizerPostHook: TypeAlias = Callable[[Self, Args, Kwargs], None] # type: ignore[misc] + + _optimizer_step_pre_hooks: Dict[int, OptimizerPreHook] + _optimizer_step_post_hooks: Dict[int, OptimizerPostHook] + _optimizer_state_dict_pre_hooks: 'OrderedDict[int, Callable[["Optimizer"], None]]' + _optimizer_state_dict_post_hooks: 'OrderedDict[int, Callable[["Optimizer", StateDict], Optional[StateDict]]]' + _optimizer_load_state_dict_pre_hooks: 'OrderedDict[int, Callable[["Optimizer", StateDict], Optional[StateDict]]]' + _optimizer_load_state_dict_post_hooks: 'OrderedDict[int, Callable[["Optimizer"], None]]' + + def __init__(self, params: ParamsT, defaults: Dict[str, Any]) -> None: + torch._C._log_api_usage_once("python.optimizer") + self.defaults = defaults + self._optimizer_step_pre_hooks = OrderedDict() + self._optimizer_step_post_hooks = OrderedDict() + self._optimizer_state_dict_pre_hooks = OrderedDict() + self._optimizer_state_dict_post_hooks = OrderedDict() + self._optimizer_load_state_dict_pre_hooks = OrderedDict() + self._optimizer_load_state_dict_post_hooks = OrderedDict() + + self._patch_step_function() + + if isinstance(params, torch.Tensor): + raise TypeError( + "params argument given to the optimizer should be " + "an iterable of Tensors or dicts, but got " + torch.typename(params) + ) + + self.state: DefaultDict[torch.Tensor, Any] = defaultdict(dict) + self.param_groups: List[Dict[str, Any]] = [] + + param_groups = list(params) + if len(param_groups) == 0: + raise ValueError("optimizer got an empty parameter list") + if not isinstance(param_groups[0], dict): + param_groups = [{"params": param_groups}] + + for param_group in param_groups: + self.add_param_group(cast(dict, param_group)) + + # Allows _cuda_graph_capture_health_check to rig a poor man's TORCH_WARN_ONCE in python, + # which I don't think exists + # https://github.com/pytorch/pytorch/issues/72948 + self._warned_capturable_if_run_uncaptured = True + + def __getstate__(self) -> Dict[str, Any]: + return { + "defaults": self.defaults, + "state": self.state, + "param_groups": self.param_groups, + } + + def __setstate__(self, state: Dict[str, Any]) -> None: + self.__dict__.update(state) + if "_optimizer_step_pre_hooks" not in self.__dict__: + self._optimizer_step_pre_hooks = OrderedDict() + if "_optimizer_step_post_hooks" not in self.__dict__: + self._optimizer_step_post_hooks = OrderedDict() + if "_optimizer_state_dict_pre_hooks" not in self.__dict__: + self._optimizer_state_dict_pre_hooks = OrderedDict() + if "_optimizer_state_dict_post_hooks" not in self.__dict__: + self._optimizer_state_dict_post_hooks = OrderedDict() + if "_optimizer_load_state_dict_pre_hooks" not in self.__dict__: + self._optimizer_load_state_dict_pre_hooks = OrderedDict() + if "_optimizer_load_state_dict_post_hooks" not in self.__dict__: + self._optimizer_load_state_dict_post_hooks = OrderedDict() + self._patch_step_function() # To support multiprocessing pickle/unpickle + self.defaults.setdefault("differentiable", False) + + def __repr__(self) -> str: + format_string = self.__class__.__name__ + " (" + for i, group in enumerate(self.param_groups): + format_string += "\n" + format_string += f"Parameter Group {i}\n" + for key in sorted(group.keys()): + if key != "params": + format_string += f" {key}: {group[key]}\n" + format_string += ")" + return format_string + + # Currently needed by Adam and AdamW + def _cuda_graph_capture_health_check(self) -> None: + # Note [torch.compile x capturable] + # If we are compiling, we try to take the capturable path automatically by + # setting the flag to True during tracing. Due to this, we skip all the checks + # normally required for determining whether we can use CUDA graphs and + # shunt the responsibility to torch.inductor. This saves time during tracing + # since the checks are slow without sacrificing UX since inductor will warn + # later if CUDA graphs cannot be enabled, e.g., + # https://github.com/pytorch/pytorch/blob/d3ba8901d8640eb16f88b2bfef9df7fa383d4b47/torch/_inductor/compile_fx.py#L390. + # Thus, when compiling, inductor will determine if cudagraphs + # can be enabled based on whether there is input mutation or CPU tensors. + if ( + not is_compiling() + and torch.backends.cuda.is_built() + and torch.cuda.is_available() + ): + capturing = torch.cuda.is_current_stream_capturing() + + if capturing and not all( + group["capturable"] for group in self.param_groups + ): + raise RuntimeError( + "Attempting CUDA graph capture of step() for an instance of " + + self.__class__.__name__ + + " but param_groups' capturable is False." + ) + + if ( + (not getattr(self, "_warned_capturable_if_run_uncaptured", False)) + and all(group["capturable"] for group in self.param_groups) + and (not capturing) + ): + warnings.warn( + "This instance was constructed with capturable=True or some of all the param_groups came with capturable=True, " + "but step() is running without CUDA graph capture. If you never intend to graph-capture this " + "instance, capturable=True can impair performance, and you should set capturable=False." + ) + self._warned_capturable_if_run_uncaptured = True + + def _optimizer_step_code(self) -> None: + """Entry point for `torch.profile.profiler`. + + When python tracing is enabled the profiler will hook into this + function at the CPython level to inspect the optimizer's parameters and + param groups. It is called it after `step()` since many optimizers + lazily initialize state. + + This is a workaround due to lack of a proper step hook on the optimizer, + and will be removed if it exists. + """ + pass + + @staticmethod + def profile_hook_step(func: Callable[_P, R]) -> Callable[_P, R]: + @functools.wraps(func) + def wrapper(*args: _P.args, **kwargs: _P.kwargs) -> R: + self, *_ = args + self = cast(Optimizer, self) + profile_name = f"Optimizer.step#{self.__class__.__name__}.step" + with torch.autograd.profiler.record_function(profile_name): + # call optimizer step pre hooks + for pre_hook in chain( + _global_optimizer_pre_hooks.values(), + self._optimizer_step_pre_hooks.values(), + ): + result = pre_hook(self, args, kwargs) + if result is not None: + if isinstance(result, tuple) and len(result) == 2: + args, kwargs = result # type: ignore[assignment] + else: + raise RuntimeError( + f"{func} must return None or a tuple of (new_args, new_kwargs), but got {result}." + ) + + out = func(*args, **kwargs) + self._optimizer_step_code() + + # call optimizer step post hooks + for post_hook in chain( + self._optimizer_step_post_hooks.values(), + _global_optimizer_post_hooks.values(), + ): + post_hook(self, args, kwargs) + + return out + + return wrapper + + @staticmethod + def _group_tensors_by_device_and_dtype( + tensorlistlist: TensorListList, + with_indices: bool = False, + ) -> Union[ + Dict[Tuple[None, None], Tuple[TensorListList, Indices]], + Dict[Tuple[torch.device, torch.dtype], Tuple[TensorListList, Indices]], + ]: + """Groups a list of lists of tensors by device and dtype. + Skips this step if we are compiling since this will occur during inductor lowering. + """ + if is_compiling(): + return {(None, None): (tensorlistlist, list(range(len(tensorlistlist[0]))))} + else: + return _group_tensors_by_device_and_dtype(tensorlistlist, with_indices) # type: ignore[return-value, arg-type] + + def _patch_step_function(self) -> None: + self._zero_grad_profile_name = ( + f"Optimizer.zero_grad#{self.__class__.__name__}.zero_grad" + ) + hooked = getattr(self.__class__.step, "hooked", None) + if not hooked: + self.__class__.step = self.profile_hook_step(self.__class__.step) # type: ignore[assignment] + self.__class__.step.hooked = True # type: ignore[attr-defined] + + def register_step_pre_hook(self, hook: OptimizerPreHook) -> RemovableHandle: + r"""Register an optimizer step pre hook which will be called before + optimizer step. It should have the following signature:: + + hook(optimizer, args, kwargs) -> None or modified args and kwargs + + The ``optimizer`` argument is the optimizer instance being used. If + args and kwargs are modified by the pre-hook, then the transformed + values are returned as a tuple containing the new_args and new_kwargs. + + Args: + hook (Callable): The user defined hook to be registered. + + Returns: + :class:`torch.utils.hooks.RemovableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + """ + handle = hooks.RemovableHandle(self._optimizer_step_pre_hooks) + self._optimizer_step_pre_hooks[handle.id] = hook + return handle + + def register_step_post_hook(self, hook: OptimizerPostHook) -> RemovableHandle: + r"""Register an optimizer step post hook which will be called after optimizer step. + It should have the following signature:: + + hook(optimizer, args, kwargs) -> None + + The ``optimizer`` argument is the optimizer instance being used. + + Args: + hook (Callable): The user defined hook to be registered. + + Returns: + :class:`torch.utils.hooks.RemovableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + """ + handle = hooks.RemovableHandle(self._optimizer_step_post_hooks) + self._optimizer_step_post_hooks[handle.id] = hook + return handle + + def register_state_dict_pre_hook( + self, hook: Callable[["Optimizer"], None], prepend: bool = False + ) -> RemovableHandle: + r"""Register a state dict pre-hook which will be called before + :meth:`~torch.optim.Optimizer.state_dict` is called. It should have the + following signature:: + + hook(optimizer) -> None + + The ``optimizer`` argument is the optimizer instance being used. + The hook will be called with argument ``self`` before calling ``state_dict`` on ``self``. + The registered hook can be used to perform pre-processing before the ``state_dict`` + call is made. + + Args: + hook (Callable): The user defined hook to be registered. + prepend (bool): If True, the provided pre ``hook`` will be fired before + all the already registered pre-hooks on ``state_dict``. Otherwise, + the provided ``hook`` will be fired after all the already registered + pre-hooks. (default: False) + + Returns: + :class:`torch.utils.hooks.RemoveableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + """ + handle = hooks.RemovableHandle(self._optimizer_state_dict_pre_hooks) + self._optimizer_state_dict_pre_hooks[handle.id] = hook + if prepend: + self._optimizer_state_dict_pre_hooks.move_to_end(handle.id, last=False) + return handle + + def register_state_dict_post_hook( + self, + hook: Callable[["Optimizer", StateDict], Optional[StateDict]], + prepend: bool = False, + ) -> RemovableHandle: + r"""Register a state dict post-hook which will be called after + :meth:`~torch.optim.Optimizer.state_dict` is called. It should have the + following signature:: + + hook(optimizer, state_dict) -> state_dict or None + + The hook will be called with arguments ``self`` and ``state_dict`` after generating + a ``state_dict`` on ``self``. The hook may modify the state_dict inplace or optionally + return a new one. The registered hook can be used to perform post-processing + on the ``state_dict`` before it is returned. + + Args: + hook (Callable): The user defined hook to be registered. + prepend (bool): If True, the provided post ``hook`` will be fired before + all the already registered post-hooks on ``state_dict``. Otherwise, + the provided ``hook`` will be fired after all the already registered + post-hooks. (default: False) + + Returns: + :class:`torch.utils.hooks.RemoveableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + """ + handle = hooks.RemovableHandle(self._optimizer_state_dict_post_hooks) + self._optimizer_state_dict_post_hooks[handle.id] = hook + if prepend: + self._optimizer_state_dict_post_hooks.move_to_end(handle.id, last=False) + return handle + + @torch._disable_dynamo + def state_dict(self) -> StateDict: + r"""Returns the state of the optimizer as a :class:`dict`. + + It contains two entries: + + * ``state``: a Dict holding current optimization state. Its content + differs between optimizer classes, but some common characteristics + hold. For example, state is saved per parameter, and the parameter + itself is NOT saved. ``state`` is a Dictionary mapping parameter ids + to a Dict with state corresponding to each parameter. + * ``param_groups``: a List containing all parameter groups where each + parameter group is a Dict. Each parameter group contains metadata + specific to the optimizer, such as learning rate and weight decay, + as well as a List of parameter IDs of the parameters in the group. + + NOTE: The parameter IDs may look like indices but they are just IDs + associating state with param_group. When loading from a state_dict, + the optimizer will zip the param_group ``params`` (int IDs) and the + optimizer ``param_groups`` (actual ``nn.Parameter`` s) in order to + match state WITHOUT additional verification. + + A returned state dict might look something like: + + .. code-block:: text + + { + 'state': { + 0: {'momentum_buffer': tensor(...), ...}, + 1: {'momentum_buffer': tensor(...), ...}, + 2: {'momentum_buffer': tensor(...), ...}, + 3: {'momentum_buffer': tensor(...), ...} + }, + 'param_groups': [ + { + 'lr': 0.01, + 'weight_decay': 0, + ... + 'params': [0] + }, + { + 'lr': 0.001, + 'weight_decay': 0.5, + ... + 'params': [1, 2, 3] + } + ] + } + + """ + + for pre_hook in self._optimizer_state_dict_pre_hooks.values(): + pre_hook(self) + + # Save order indices instead of Tensors + param_mappings: Dict[int, int] = {} + start_index = 0 + + def pack_group(group: Dict[str, Any]) -> Dict[str, Any]: + nonlocal start_index + packed = {k: v for k, v in group.items() if k != "params"} + param_mappings.update( + { + id(p): i + for i, p in enumerate(group["params"], start_index) + if id(p) not in param_mappings + } + ) + packed["params"] = [param_mappings[id(p)] for p in group["params"]] + start_index += len(packed["params"]) + return packed + + param_groups = [pack_group(g) for g in self.param_groups] + # Remap state to use order indices as keys + packed_state = { + (param_mappings[id(k)] if isinstance(k, torch.Tensor) else k): v + for k, v in self.state.items() + } + + state_dict = { + "state": packed_state, + "param_groups": param_groups, + } + + for post_hook in self._optimizer_state_dict_post_hooks.values(): + hook_result = post_hook(self, state_dict) + if hook_result is not None: + state_dict = hook_result + return state_dict + + @staticmethod + def _process_value_according_to_param_policy( + param: torch.Tensor, + value: torch.Tensor, + param_id: int, + param_groups: List[Dict[Any, Any]], + key: Hashable = None, + ) -> torch.Tensor: + # Floating-point types are a bit special here. They are the only ones + # that are assumed to always match the type of params. + # Make sure state['step'] is not casted https://github.com/pytorch/pytorch/issues/74424 + # UNLESS fused or capturable, see note [special device hosting for step] + fused = False + capturable = False + assert param_groups is not None + for pg in param_groups: + if param_id in pg["params"]: + fused = pg["fused"] if "fused" in pg else False + capturable = pg["capturable"] if "capturable" in pg else False + break + if key == "step": + if capturable or fused: + return value.to(dtype=torch.float32, device=param.device) + else: + return value + else: + if param.is_floating_point(): + return value.to(dtype=param.dtype, device=param.device) + else: + return value.to(device=param.device) + + def register_load_state_dict_pre_hook( + self, + hook: Callable[["Optimizer", StateDict], Optional[StateDict]], + prepend: bool = False, + ) -> RemovableHandle: + r"""Register a load_state_dict pre-hook which will be called before + :meth:`~torch.optim.Optimizer.load_state_dict` is called. It should have the + following signature:: + + hook(optimizer, state_dict) -> state_dict or None + + The ``optimizer`` argument is the optimizer instance being used and the + ``state_dict`` argument is a shallow copy of the ``state_dict`` the user + passed in to ``load_state_dict``. The hook may modify the state_dict inplace + or optionally return a new one. If a state_dict is returned, it will be used + to be loaded into the optimizer. + + The hook will be called with argument ``self`` and ``state_dict`` before + calling ``load_state_dict`` on ``self``. The registered hook can be used to + perform pre-processing before the ``load_state_dict`` call is made. + + Args: + hook (Callable): The user defined hook to be registered. + prepend (bool): If True, the provided pre ``hook`` will be fired before + all the already registered pre-hooks on ``load_state_dict``. Otherwise, + the provided ``hook`` will be fired after all the already registered + pre-hooks. (default: False) + + Returns: + :class:`torch.utils.hooks.RemoveableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + """ + handle = hooks.RemovableHandle(self._optimizer_load_state_dict_pre_hooks) + self._optimizer_load_state_dict_pre_hooks[handle.id] = hook + if prepend: + self._optimizer_load_state_dict_pre_hooks.move_to_end(handle.id, last=False) + return handle + + def register_load_state_dict_post_hook( + self, hook: Callable[["Optimizer"], None], prepend: bool = False + ) -> RemovableHandle: + r"""Register a load_state_dict post-hook which will be called after + :meth:`~torch.optim.Optimizer.load_state_dict` is called. It should have the + following signature:: + + hook(optimizer) -> None + + The ``optimizer`` argument is the optimizer instance being used. + + The hook will be called with argument ``self`` after calling + ``load_state_dict`` on ``self``. The registered hook can be used to + perform post-processing after ``load_state_dict`` has loaded the + ``state_dict``. + + Args: + hook (Callable): The user defined hook to be registered. + prepend (bool): If True, the provided post ``hook`` will be fired before + all the already registered post-hooks on ``load_state_dict``. Otherwise, + the provided ``hook`` will be fired after all the already registered + post-hooks. (default: False) + + Returns: + :class:`torch.utils.hooks.RemoveableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + """ + handle = hooks.RemovableHandle(self._optimizer_load_state_dict_post_hooks) + self._optimizer_load_state_dict_post_hooks[handle.id] = hook + if prepend: + self._optimizer_load_state_dict_post_hooks.move_to_end(handle.id, last=False) # type: ignore[attr-defined] + return handle + + @torch._disable_dynamo + def load_state_dict(self, state_dict: StateDict) -> None: + r"""Loads the optimizer state. + + Args: + state_dict (dict): optimizer state. Should be an object returned + from a call to :meth:`state_dict`. + """ + # shallow copy, to be consistent with module API + state_dict = state_dict.copy() + + for pre_hook in self._optimizer_load_state_dict_pre_hooks.values(): + hook_result = pre_hook(self, state_dict) + if hook_result is not None: + state_dict = hook_result + + # Validate the state_dict + groups = self.param_groups + + # Deepcopy as we write into saved_groups later to update state + saved_groups = deepcopy(state_dict["param_groups"]) + + if len(groups) != len(saved_groups): + raise ValueError( + "loaded state dict has a different number of " "parameter groups" + ) + param_lens = (len(g["params"]) for g in groups) + saved_lens = (len(g["params"]) for g in saved_groups) + if any(p_len != s_len for p_len, s_len in zip(param_lens, saved_lens)): + raise ValueError( + "loaded state dict contains a parameter group " + "that doesn't match the size of optimizer's group" + ) + + # Update the state + id_map = dict( + zip( + chain.from_iterable(g["params"] for g in saved_groups), + chain.from_iterable(g["params"] for g in groups), + ) + ) + + def _cast(param, value, param_id=None, param_groups=None, key=None): + r"""Make a deep copy of value, casting all tensors to device of param.""" + if isinstance(value, torch.Tensor): + return Optimizer._process_value_according_to_param_policy( + param, value, param_id, param_groups, key + ) + elif isinstance(value, dict): + return { + k: _cast( + param, v, param_id=param_id, param_groups=param_groups, key=k + ) + for k, v in value.items() + } + elif isinstance(value, Iterable): + return type(value)(_cast(param, v, param_id=param_id, param_groups=param_groups) for v in value) # type: ignore[call-arg] + else: + return value + + # Copy state assigned to params (and cast tensors to appropriate types). + # State that is not assigned to params is copied as is (needed for + # backward compatibility). + state: DefaultDict[torch.Tensor, Dict[Any, Any]] = defaultdict(dict) + for k, v in state_dict["state"].items(): + if k in id_map: + param = id_map[k] + state[param] = _cast( + param, v, param_id=k, param_groups=state_dict["param_groups"] + ) + else: + state[k] = v + + # Update parameter groups, setting their 'params' value + def update_group( + group: Dict[str, Any], new_group: Dict[str, Any] + ) -> Dict[str, Any]: + new_group["params"] = group["params"] + return new_group + + param_groups = [update_group(g, ng) for g, ng in zip(groups, saved_groups)] + self.__setstate__({"state": state, "param_groups": param_groups}) + + for post_hook in self._optimizer_load_state_dict_post_hooks.values(): + post_hook(self) + + @torch._disable_dynamo + def zero_grad(self, set_to_none: bool = True) -> None: + r"""Resets the gradients of all optimized :class:`torch.Tensor` s. + + Args: + set_to_none (bool): instead of setting to zero, set the grads to None. + This will in general have lower memory footprint, and can modestly improve performance. + However, it changes certain behaviors. For example: + 1. When the user tries to access a gradient and perform manual ops on it, + a None attribute or a Tensor full of 0s will behave differently. + 2. If the user requests ``zero_grad(set_to_none=True)`` followed by a backward pass, ``.grad``\ s + are guaranteed to be None for params that did not receive a gradient. + 3. ``torch.optim`` optimizers have a different behavior if the gradient is 0 or None + (in one case it does the step with a gradient of 0 and in the other it skips + the step altogether). + """ + foreach = self.defaults.get("foreach", False) or self.defaults.get( + "fused", False + ) + + if not hasattr(self, "_zero_grad_profile_name"): + self._patch_step_function() + + per_device_and_dtype_grads: Optional[ + DefaultDict[torch.device, DefaultDict[torch.dtype, List[torch.Tensor]]] + ] + if foreach: + per_device_and_dtype_grads = defaultdict(lambda: defaultdict(list)) + else: + per_device_and_dtype_grads = None + + with torch.autograd.profiler.record_function(self._zero_grad_profile_name): + for group in self.param_groups: + for p in group["params"]: + if p.grad is not None: + if set_to_none: + p.grad = None + else: + if p.grad.grad_fn is not None: + p.grad.detach_() + else: + p.grad.requires_grad_(False) + if not foreach or p.grad.is_sparse: + p.grad.zero_() + else: + assert per_device_and_dtype_grads is not None + per_device_and_dtype_grads[p.grad.device][ + p.grad.dtype + ].append(p.grad) + if foreach: + assert per_device_and_dtype_grads is not None + for per_dtype_grads in per_device_and_dtype_grads.values(): + for grads in per_dtype_grads.values(): + torch._foreach_zero_(grads) + + @overload + def step(self, closure: None = ...) -> None: + ... + + @overload + def step(self, closure: Callable[[], float]) -> float: + ... + + def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]: + r"""Performs a single optimization step (parameter update). + + Args: + closure (Callable): A closure that reevaluates the model and + returns the loss. Optional for most optimizers. + + .. note:: + Unless otherwise specified, this function should not modify the + ``.grad`` field of the parameters. + """ + raise NotImplementedError + + @torch._disable_dynamo + def add_param_group(self, param_group: Dict[str, Any]) -> None: + r"""Add a param group to the :class:`Optimizer` s `param_groups`. + + This can be useful when fine tuning a pre-trained network as frozen layers can be made + trainable and added to the :class:`Optimizer` as training progresses. + + Args: + param_group (dict): Specifies what Tensors should be optimized along with group + specific optimization options. + """ + if not isinstance(param_group, dict): + raise TypeError(f"param_group must be a dict, but got {type(param_group)}") + + params = param_group["params"] + if isinstance(params, torch.Tensor): + param_group["params"] = [params] + elif isinstance(params, set): + raise TypeError( + "optimizer parameters need to be organized in ordered collections, but " + "the ordering of tensors in sets will change between runs. Please use a list instead." + ) + else: + param_group["params"] = list(params) + + for param in param_group["params"]: + if not isinstance(param, torch.Tensor): + raise TypeError( + "optimizer can only optimize Tensors, " + "but one of the params is " + torch.typename(param) + ) + if not self.defaults.get("differentiable", None) and not ( + param.is_leaf or param.retains_grad + ): + raise ValueError("can't optimize a non-leaf Tensor") + + for name, default in self.defaults.items(): + if default is required and name not in param_group: + raise ValueError( + f"parameter group didn't specify a value of required optimization parameter {name}" + ) + else: + param_group.setdefault(name, default) + + params = param_group["params"] + if len(params) != len(set(params)): + warnings.warn( + "optimizer contains a parameter group with duplicate parameters; " + "in future, this will cause an error; " + "see github.com/pytorch/pytorch/issues/40967 for more information", + stacklevel=3, + ) + + param_set: Set[torch.Tensor] = set() + for group in self.param_groups: + param_set.update(set(group["params"])) + + if not param_set.isdisjoint(set(param_group["params"])): + raise ValueError("some parameters appear in more than one parameter group") + + self.param_groups.append(param_group) diff --git a/parrot/lib/python3.10/site-packages/torch/optim/radam.py b/parrot/lib/python3.10/site-packages/torch/optim/radam.py new file mode 100644 index 0000000000000000000000000000000000000000..1ecf20ffde86d9a1e5e309c1f6bcd5c3f57de1c7 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/optim/radam.py @@ -0,0 +1,598 @@ +# mypy: allow-untyped-defs +from typing import cast, List, Optional, Tuple, Union + +import torch +from torch import Tensor + +from .optimizer import ( + _capturable_doc, + _default_to_fused_or_foreach, + _differentiable_doc, + _disable_dynamo_if_unsupported, + _dispatch_sqrt, + _foreach_doc, + _get_capturable_supported_devices, + _get_scalar_dtype, + _get_value, + _maximize_doc, + _use_grad_for_differentiable, + _view_as_real, + Optimizer, + ParamsT, +) + +__all__ = ["RAdam", "radam"] + + +class RAdam(Optimizer): + def __init__( + self, + params: ParamsT, + lr: float = 1e-3, + betas: Tuple[float, float] = (0.9, 0.999), + eps: float = 1e-8, + weight_decay: float = 0, + decoupled_weight_decay: bool = False, + *, + foreach: Optional[bool] = None, + maximize: bool = False, + capturable: bool = False, + differentiable: bool = False, + ): + if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") + if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") + if not 0.0 <= betas[0] < 1.0: + raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}") + if not 0.0 <= betas[1] < 1.0: + raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}") + if not 0.0 <= weight_decay: + raise ValueError(f"Invalid weight_decay value: {weight_decay}") + + defaults = dict( + lr=lr, + betas=betas, + eps=eps, + weight_decay=weight_decay, + maximize=maximize, + foreach=foreach, + capturable=capturable, + decoupled_weight_decay=decoupled_weight_decay, + differentiable=differentiable, + ) + super().__init__(params, defaults) + + def __setstate__(self, state): + super().__setstate__(state) + for group in self.param_groups: + group.setdefault("foreach", None) + group.setdefault("maximize", False) + group.setdefault("differentiable", False) + group.setdefault("decoupled_weight_decay", False) + group.setdefault("capturable", False) + for p in group["params"]: + p_state = self.state.get(p, []) + if len(p_state) != 0 and not torch.is_tensor(p_state["step"]): + step_val = float(p_state["step"]) + p_state["step"] = ( + torch.tensor( + step_val, dtype=_get_scalar_dtype(), device=p.device + ) + if group["capturable"] + else torch.tensor(step_val, dtype=_get_scalar_dtype()) + ) + + def _init_group( + self, group, params_with_grad, grads, exp_avgs, exp_avg_sqs, state_steps + ): + has_complex = False + for p in group["params"]: + if p.grad is not None: + has_complex |= torch.is_complex(p) + params_with_grad.append(p) + if p.grad.is_sparse: + raise RuntimeError("RAdam does not support sparse gradients") + grads.append(p.grad) + + state = self.state[p] + # Lazy state initialization + if len(state) == 0: + state["step"] = ( + torch.zeros((), dtype=_get_scalar_dtype(), device=p.device) + if group["capturable"] + else torch.tensor(0.0, dtype=_get_scalar_dtype()) + ) + # Exponential moving average of gradient values + state["exp_avg"] = torch.zeros_like( + p, memory_format=torch.preserve_format + ) + # Exponential moving average of squared gradient values + state["exp_avg_sq"] = torch.zeros_like( + p, memory_format=torch.preserve_format + ) + + exp_avgs.append(state["exp_avg"]) + exp_avg_sqs.append(state["exp_avg_sq"]) + state_steps.append(state["step"]) + + return has_complex + + @_use_grad_for_differentiable + def step(self, closure=None): + """Performs a single optimization step. + + Args: + closure (Callable, optional): A closure that reevaluates the model + and returns the loss. + """ + self._cuda_graph_capture_health_check() + + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + params_with_grad: List[Tensor] = [] + grads: List[Tensor] = [] + exp_avgs: List[Tensor] = [] + exp_avg_sqs: List[Tensor] = [] + state_steps: List[Tensor] = [] + beta1, beta2 = cast(Tuple[float, float], group["betas"]) + + has_complex = self._init_group( + group, params_with_grad, grads, exp_avgs, exp_avg_sqs, state_steps + ) + + radam( + params_with_grad, + grads, + exp_avgs, + exp_avg_sqs, + state_steps, + beta1=beta1, + beta2=beta2, + lr=group["lr"], + weight_decay=group["weight_decay"], + eps=group["eps"], + maximize=group["maximize"], + foreach=group["foreach"], + capturable=group["capturable"], + differentiable=group["differentiable"], + decoupled_weight_decay=group["decoupled_weight_decay"], + has_complex=has_complex, + ) + + return loss + + +RAdam.__doc__ = ( + r"""Implements RAdam algorithm. + + .. math:: + \begin{aligned} + &\rule{110mm}{0.4pt} \\ + &\textbf{input} : \gamma \text{ (lr)}, \: \beta_1, \beta_2 + \text{ (betas)}, \: \theta_0 \text{ (params)}, \:f(\theta) \text{ (objective)}, \: + \lambda \text{ (weightdecay)}, \:\textit{maximize} \\ + &\hspace{13mm} \epsilon \text{ (epsilon)}, \textit{decoupled\_weight\_decay} \\ + &\textbf{initialize} : m_0 \leftarrow 0 \text{ ( first moment)}, + v_0 \leftarrow 0 \text{ ( second moment)}, \\ + &\hspace{18mm} \rho_{\infty} \leftarrow 2/(1-\beta_2) -1 \\[-1.ex] + &\rule{110mm}{0.4pt} \\ + &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ + &\hspace{6mm}\textbf{if} \: \textit{maximize}: \\ + &\hspace{12mm}g_t \leftarrow -\nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{6mm}\textbf{else} \\ + &\hspace{12mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{6mm} \theta_t \leftarrow \theta_{t-1} \\ + &\hspace{6mm} \textbf{if} \: \lambda \neq 0 \\ + &\hspace{12mm}\textbf{if} \: \textit{decoupled\_weight\_decay} \\ + &\hspace{18mm} \theta_t \leftarrow \theta_{t} - \gamma \lambda \theta_{t} \\ + &\hspace{12mm}\textbf{else} \\ + &\hspace{18mm} g_t \leftarrow g_t + \lambda \theta_{t} \\ + &\hspace{6mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\ + &\hspace{6mm}v_t \leftarrow \beta_2 v_{t-1} + (1-\beta_2) g^2_t \\ + &\hspace{6mm}\widehat{m_t} \leftarrow m_t/\big(1-\beta_1^t \big) \\ + &\hspace{6mm}\rho_t \leftarrow \rho_{\infty} - + 2 t \beta^t_2 /\big(1-\beta_2^t \big) \\[0.1.ex] + &\hspace{6mm}\textbf{if} \: \rho_t > 5 \\ + &\hspace{12mm} l_t \leftarrow \frac{\sqrt{ (1-\beta^t_2) }}{ \sqrt{v_t} +\epsilon } \\ + &\hspace{12mm} r_t \leftarrow + \sqrt{\frac{(\rho_t-4)(\rho_t-2)\rho_{\infty}}{(\rho_{\infty}-4)(\rho_{\infty}-2) \rho_t}} \\ + &\hspace{12mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t} r_t l_t \\ + &\hspace{6mm}\textbf{else} \\ + &\hspace{12mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t} \\ + &\rule{110mm}{0.4pt} \\[-1.ex] + &\bf{return} \: \theta_t \\[-1.ex] + &\rule{110mm}{0.4pt} \\[-1.ex] + \end{aligned} + + For further details regarding the algorithm we refer to `On the variance of the adaptive learning rate and beyond`_. + + This implementation provides an option to use either the original weight_decay implementation as in Adam + (where the weight_decay is applied to the gradient) or the one from AdamW (where weight_decay is applied + to the weight) through the decoupled_weight_decay option. When decoupled_weight_decay is set to False + (default), it uses the original Adam style weight decay, otherwise, it uses the AdamW style which + corresponds more closely to the `author's implementation`_ in the RAdam paper. Further information + about decoupled weight decay can be found in `Decoupled Weight Decay Regularization`_. + + """ + + rf""" + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + decoupled_weight_decay (bool, optional): whether to use decoupled weight + decay as in AdamW to obtain RAdamW (default: False) + {_foreach_doc} + {_maximize_doc} + {_differentiable_doc} + {_capturable_doc} + + .. _On the variance of the adaptive learning rate and beyond: + https://arxiv.org/abs/1908.03265 + .. _author's implementation: + https://github.com/LiyuanLucasLiu/RAdam + .. _Decoupled Weight Decay Regularization: + https://arxiv.org/abs/1711.05101 + + """ +) + + +def _single_tensor_radam( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + state_steps: List[Tensor], + *, + beta1: float, + beta2: float, + lr: float, + weight_decay: float, + eps: float, + decoupled_weight_decay: bool, + differentiable: bool, + maximize: bool, + capturable: bool, + has_complex: bool, +): + for i, param in enumerate(params): + grad = grads[i] if not maximize else -grads[i] + exp_avg = exp_avgs[i] + exp_avg_sq = exp_avg_sqs[i] + step_t = state_steps[i] + + # If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable] + if not torch._utils.is_compiling() and capturable: + capturable_supported_devices = _get_capturable_supported_devices() + assert ( + param.device.type == step_t.device.type + and param.device.type in capturable_supported_devices + ), f"If capturable=True, params and state_steps must be on supported devices: {capturable_supported_devices}." + + if torch.is_complex(param): + param = torch.view_as_real(param) + grad = torch.view_as_real(grad) + exp_avg = torch.view_as_real(exp_avg) + exp_avg_sq = torch.view_as_real(exp_avg_sq) + + # update step + step_t += 1 + step = step_t if capturable else _get_value(step_t) + + if weight_decay != 0: + if decoupled_weight_decay: + param.mul_(1 - lr * weight_decay) + else: + grad = grad.add(param, alpha=weight_decay) + + # Decay the first and second moment running average coefficient + exp_avg.lerp_(grad, 1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) + + bias_correction1 = 1 - beta1**step + bias_correction2 = 1 - beta2**step + + # correcting bias for the first moving moment + bias_corrected_exp_avg = exp_avg / bias_correction1 + + # maximum length of the approximated SMA + rho_inf = 2 / (1 - beta2) - 1 + # compute the length of the approximated SMA + rho_t = rho_inf - 2 * step * (beta2**step) / bias_correction2 + + def _compute_rect(): + return ( + (rho_t - 4) + * (rho_t - 2) + * rho_inf + / ((rho_inf - 4) * (rho_inf - 2) * rho_t) + ) ** 0.5 + + def _compute_adaptive_lr(): + exp_avg_sq_sqrt = exp_avg_sq.sqrt() + if differentiable: + exp_avg_sq_sqrt = exp_avg_sq_sqrt.add(eps) + else: + exp_avg_sq_sqrt = exp_avg_sq_sqrt.add_(eps) + + return (bias_correction2**0.5) / exp_avg_sq_sqrt + + # Compute the variance rectification term and update parameters accordingly + if capturable: + update = torch.where( + rho_t > 5.0, _compute_rect() * _compute_adaptive_lr(), 1.0 + ) + param.add_(bias_corrected_exp_avg * lr * update, alpha=-1.0) + else: + if rho_t > 5.0: + param.add_( + bias_corrected_exp_avg + * lr + * _compute_adaptive_lr() + * _compute_rect(), + alpha=-1.0, + ) + else: + param.add_(bias_corrected_exp_avg * lr, alpha=-1.0) + + +def _multi_tensor_radam( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + state_steps: List[Tensor], + *, + beta1: float, + beta2: float, + lr: float, + weight_decay: float, + eps: float, + decoupled_weight_decay: bool, + differentiable: bool, + maximize: bool, + capturable: bool, + has_complex: bool, +): + if len(params) == 0: + return + + assert not differentiable, "_foreach ops don't support autograd" + + # If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable] + if not torch._utils.is_compiling() and capturable: + capturable_supported_devices = _get_capturable_supported_devices( + supports_xla=False + ) + assert all( + p.device.type == step.device.type + and p.device.type in capturable_supported_devices + for p, step in zip(params, state_steps) + ), f"If capturable=True, params and state_steps must be on supported devices: {capturable_supported_devices}." + + grouped_tensors = Optimizer._group_tensors_by_device_and_dtype( + [params, grads, exp_avgs, exp_avg_sqs, state_steps] + ) + for ( + grouped_params, + grouped_grads, + grouped_exp_avgs, + grouped_exp_avg_sqs, + grouped_state_steps, + ), _ in grouped_tensors.values(): + # Update steps + # If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over + # and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just + # wrapped it once now. The alpha is required to assure we go to the right overload. + if grouped_state_steps[0].is_cpu: + torch._foreach_add_( + grouped_state_steps, torch.tensor(1.0, device="cpu"), alpha=1.0 + ) + else: + torch._foreach_add_(grouped_state_steps, 1) + + if has_complex: + _view_as_real( + grouped_params, grouped_grads, grouped_exp_avgs, grouped_exp_avg_sqs + ) + + if maximize: + grouped_grads = torch._foreach_neg(grouped_grads) # type: ignore[assignment] + + # maximum length of the approximated SMA + rho_inf = 2 / (1 - beta2) - 1 + # compute the length of the approximated SMA + bias_correction1: Union[Tuple[Tensor, ...], List[Tensor]] + bias_correction2: Union[Tuple[Tensor, ...], List[Tensor]] + rho_t_list: Union[Tuple[Tensor, ...], List[Tensor]] + if capturable: + bias_correction1 = torch._foreach_pow(beta2, grouped_state_steps) + torch._foreach_neg_(bias_correction1) + torch._foreach_add_(bias_correction1, 1) + bias_correction2 = torch._foreach_pow(beta2, grouped_state_steps) + torch._foreach_mul_(bias_correction2, grouped_state_steps) + torch._foreach_mul_(bias_correction2, 2) + torch._foreach_div_(bias_correction2, bias_correction1) + torch._foreach_neg_(bias_correction2) + torch._foreach_add_(bias_correction2, rho_inf) + rho_t_list = bias_correction2 + else: + rho_t_list = [ + rho_inf + - 2 + * _get_value(step) + * (beta2 ** _get_value(step)) + / (1 - beta2 ** _get_value(step)) + for step in grouped_state_steps + ] + + if weight_decay != 0: + if decoupled_weight_decay: + torch._foreach_mul_(grouped_params, 1 - lr * weight_decay) + else: + # Re-use the intermediate memory (grouped_grads) already allocated for maximize + if maximize: + torch._foreach_add_( + grouped_grads, grouped_params, alpha=weight_decay + ) + else: + grouped_grads = torch._foreach_add( # type: ignore[assignment] + grouped_grads, grouped_params, alpha=weight_decay + ) + + # Decay the first and second moment running average coefficient + torch._foreach_lerp_(grouped_exp_avgs, grouped_grads, 1 - beta1) + + torch._foreach_mul_(grouped_exp_avg_sqs, beta2) + torch._foreach_addcmul_( + grouped_exp_avg_sqs, grouped_grads, grouped_grads, 1 - beta2 + ) + + # Delete the local intermediate since it won't be used anymore to save on peak memory + del grouped_grads + + if capturable: + num = torch._foreach_sub(rho_t_list, 4) + sub2 = torch._foreach_sub(rho_t_list, 2) + torch._foreach_mul_(num, sub2) + del sub2 + torch._foreach_mul_(num, rho_inf) + rho_inf = (rho_inf - 4) * (rho_inf - 2) + denom = torch._foreach_mul(rho_t_list, rho_inf) + torch._foreach_div_(num, denom) + del denom + torch._foreach_sqrt_(num) + + # TODO(mlazos): we should try and get a foreach_where op https://github.com/pytorch/pytorch/issues/117884 + rect = [ + torch.where(rho_t > 5.0, n, 0.0) for n, rho_t in zip(num, rho_t_list) + ] + del num + del rho_t_list + unrect_step_size = [torch.where(rect > 0, 0.0, 1.0) for rect in rect] + torch._foreach_mul_(unrect_step_size, lr) + + bias_correction1 = torch._foreach_pow(beta1, grouped_state_steps) + torch._foreach_neg_(bias_correction1) + torch._foreach_add_(bias_correction1, 1) + + torch._foreach_div_(unrect_step_size, bias_correction1) + torch._foreach_neg_(unrect_step_size) + + bias_correction2 = torch._foreach_pow(beta2, grouped_state_steps) + torch._foreach_neg_(bias_correction2) + torch._foreach_add_(bias_correction2, 1) + torch._foreach_sqrt_(bias_correction2) + torch._foreach_mul_(bias_correction2, lr) + torch._foreach_mul_(bias_correction2, rect) + del rect + torch._foreach_neg_(bias_correction2) + torch._foreach_div_(bias_correction2, bias_correction1) + del bias_correction1 + else: + rect = [ + _dispatch_sqrt( + (rho_t - 4) # type: ignore[arg-type] + * (rho_t - 2) + * rho_inf + / ((rho_inf - 4) * (rho_inf - 2) * rho_t) + ) + if rho_t > 5 + else 0 + for rho_t in rho_t_list + ] + unrectified = [0 if rect > 0 else 1.0 for rect in rect] + + bias_correction1 = [ + 1 - beta1 ** _get_value(step) for step in grouped_state_steps + ] + unrect_step_size = [ + (lr * rect / bc) * -1 for rect, bc in zip(unrectified, bias_correction1) + ] + bias_correction2 = [ + _dispatch_sqrt(1 - beta2 ** _get_value(step)) * (lr * rect / bc) * -1 + for step, rect, bc in zip(grouped_state_steps, rect, bias_correction1) + ] + + buffer = torch._foreach_sqrt(grouped_exp_avg_sqs) + torch._foreach_add_(buffer, eps) + torch._foreach_div_(buffer, bias_correction2) + torch._foreach_reciprocal_(buffer) + torch._foreach_add_(buffer, unrect_step_size) + + # Here, buffer = sqrt(1 - beta2^t) * rect_step_size / (sqrt(v) + eps) + unrect_step_size + torch._foreach_addcmul_(grouped_params, grouped_exp_avgs, buffer) + + +@_disable_dynamo_if_unsupported(single_tensor_fn=_single_tensor_radam) +def radam( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + state_steps: List[Tensor], + # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 + # setting this as kwarg for now as functional API is compiled by torch/distributed/optim + decoupled_weight_decay: bool = False, + foreach: Optional[bool] = None, + differentiable: bool = False, + capturable: bool = False, + has_complex: bool = False, + maximize: bool = False, + *, + beta1: float, + beta2: float, + lr: float, + weight_decay: float, + eps: float, +): + r"""Functional API that performs RAdam algorithm computation. + + See :class:`~torch.optim.RAdam` for details. + """ + + if not all(isinstance(t, torch.Tensor) for t in state_steps): + raise RuntimeError( + "API has changed, `state_steps` argument must contain a list of singleton tensors" + ) + + if foreach is None: + _, foreach = _default_to_fused_or_foreach( + params, differentiable, use_fused=False + ) + + if foreach and torch.jit.is_scripting(): + raise RuntimeError("torch.jit.script not supported with foreach optimizers") + + if foreach and not torch.jit.is_scripting(): + func = _multi_tensor_radam + else: + func = _single_tensor_radam + + func( + params, + grads, + exp_avgs, + exp_avg_sqs, + state_steps, + beta1=beta1, + beta2=beta2, + lr=lr, + weight_decay=weight_decay, + eps=eps, + maximize=maximize, + decoupled_weight_decay=decoupled_weight_decay, + differentiable=differentiable, + capturable=capturable, + has_complex=has_complex, + ) diff --git a/parrot/lib/python3.10/site-packages/torch/optim/rmsprop.py b/parrot/lib/python3.10/site-packages/torch/optim/rmsprop.py new file mode 100644 index 0000000000000000000000000000000000000000..5311aa2fd6b8545503e9d92cd2932b6f8d8eb88a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/optim/rmsprop.py @@ -0,0 +1,508 @@ +# mypy: allow-untyped-defs +from typing import List, Optional + +import torch +from torch import Tensor +from .optimizer import ( + _capturable_doc, + _default_to_fused_or_foreach, + _differentiable_doc, + _disable_dynamo_if_unsupported, + _foreach_doc, + _get_capturable_supported_devices, + _get_scalar_dtype, + _maximize_doc, + _use_grad_for_differentiable, + _view_as_real, + Optimizer, + ParamsT, +) + +__all__ = ["RMSprop", "rmsprop"] + + +class RMSprop(Optimizer): + def __init__( + self, + params: ParamsT, + lr: float = 1e-2, + alpha: float = 0.99, + eps: float = 1e-8, + weight_decay: float = 0, + momentum: float = 0, + centered=False, + capturable=False, + foreach: Optional[bool] = None, + maximize: bool = False, + differentiable: bool = False, + ): + if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") + if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") + if not 0.0 <= momentum: + raise ValueError(f"Invalid momentum value: {momentum}") + if not 0.0 <= weight_decay: + raise ValueError(f"Invalid weight_decay value: {weight_decay}") + if not 0.0 <= alpha: + raise ValueError(f"Invalid alpha value: {alpha}") + + defaults = dict( + lr=lr, + momentum=momentum, + alpha=alpha, + eps=eps, + centered=centered, + weight_decay=weight_decay, + capturable=capturable, + foreach=foreach, + maximize=maximize, + differentiable=differentiable, + ) + super().__init__(params, defaults) + + def __setstate__(self, state): + super().__setstate__(state) + for group in self.param_groups: + group.setdefault("momentum", 0) + group.setdefault("centered", False) + group.setdefault("foreach", None) + group.setdefault("maximize", False) + group.setdefault("differentiable", False) + group.setdefault("capturable", False) + for p in group["params"]: + p_state = self.state.get(p, []) + if len(p_state) != 0 and not torch.is_tensor(p_state["step"]): + step_val = float(p_state["step"]) + p_state["step"] = ( + torch.tensor( + step_val, dtype=_get_scalar_dtype(), device=p.device + ) + if group["capturable"] + else torch.tensor(step_val, dtype=_get_scalar_dtype()) + ) + + def _init_group( + self, + group, + params_with_grad, + grads, + square_avgs, + momentum_buffer_list, + grad_avgs, + state_steps, + ): + has_complex = False + for p in group["params"]: + if p.grad is None: + continue + has_complex |= torch.is_complex(p) + params_with_grad.append(p) + + if p.grad.is_sparse: + raise RuntimeError("RMSprop does not support sparse gradients") + grads.append(p.grad) + + state = self.state[p] + + # State initialization + if len(state) == 0: + state["step"] = ( + torch.zeros((), dtype=_get_scalar_dtype(), device=p.device) + if group["capturable"] + else torch.zeros((), dtype=_get_scalar_dtype()) + ) + state["square_avg"] = torch.zeros_like( + p, memory_format=torch.preserve_format + ) + if group["momentum"] > 0: + state["momentum_buffer"] = torch.zeros_like( + p, memory_format=torch.preserve_format + ) + if group["centered"]: + state["grad_avg"] = torch.zeros_like( + p, memory_format=torch.preserve_format + ) + square_avgs.append(state["square_avg"]) + state_steps.append(state["step"]) + + if group["momentum"] > 0: + momentum_buffer_list.append(state["momentum_buffer"]) + if group["centered"]: + grad_avgs.append(state["grad_avg"]) + + return has_complex + + @_use_grad_for_differentiable + def step(self, closure=None): + """Performs a single optimization step. + + Args: + closure (Callable, optional): A closure that reevaluates the model + and returns the loss. + """ + self._cuda_graph_capture_health_check() + + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + params_with_grad: List[Tensor] = [] + grads: List[Tensor] = [] + square_avgs: List[Tensor] = [] + grad_avgs: List[Tensor] = [] + momentum_buffer_list: List[Tensor] = [] + state_steps: List[Tensor] = [] + + has_complex = self._init_group( + group, + params_with_grad, + grads, + square_avgs, + momentum_buffer_list, + grad_avgs, + state_steps, + ) + + rmsprop( + params_with_grad, + grads, + square_avgs, + grad_avgs, + momentum_buffer_list, + state_steps, + lr=group["lr"], + alpha=group["alpha"], + eps=group["eps"], + weight_decay=group["weight_decay"], + momentum=group["momentum"], + centered=group["centered"], + foreach=group["foreach"], + maximize=group["maximize"], + differentiable=group["differentiable"], + capturable=group["capturable"], + has_complex=has_complex, + ) + + return loss + + +RMSprop.__doc__ = ( + r"""Implements RMSprop algorithm. + + .. math:: + \begin{aligned} + &\rule{110mm}{0.4pt} \\ + &\textbf{input} : \alpha \text{ (alpha)},\: \gamma \text{ (lr)}, + \: \theta_0 \text{ (params)}, \: f(\theta) \text{ (objective)} \\ + &\hspace{13mm} \lambda \text{ (weight decay)},\: \mu \text{ (momentum)},\: centered\\ + &\textbf{initialize} : v_0 \leftarrow 0 \text{ (square average)}, \: + \textbf{b}_0 \leftarrow 0 \text{ (buffer)}, \: g^{ave}_0 \leftarrow 0 \\[-1.ex] + &\rule{110mm}{0.4pt} \\ + &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ + &\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm}if \: \lambda \neq 0 \\ + &\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\ + &\hspace{5mm}v_t \leftarrow \alpha v_{t-1} + (1 - \alpha) g^2_t + \hspace{8mm} \\ + &\hspace{5mm} \tilde{v_t} \leftarrow v_t \\ + &\hspace{5mm}if \: centered \\ + &\hspace{10mm} g^{ave}_t \leftarrow g^{ave}_{t-1} \alpha + (1-\alpha) g_t \\ + &\hspace{10mm} \tilde{v_t} \leftarrow \tilde{v_t} - \big(g^{ave}_{t} \big)^2 \\ + &\hspace{5mm}if \: \mu > 0 \\ + &\hspace{10mm} \textbf{b}_t\leftarrow \mu \textbf{b}_{t-1} + + g_t/ \big(\sqrt{\tilde{v_t}} + \epsilon \big) \\ + &\hspace{10mm} \theta_t \leftarrow \theta_{t-1} - \gamma \textbf{b}_t \\ + &\hspace{5mm} else \\ + &\hspace{10mm}\theta_t \leftarrow \theta_{t-1} - + \gamma g_t/ \big(\sqrt{\tilde{v_t}} + \epsilon \big) \hspace{3mm} \\ + &\rule{110mm}{0.4pt} \\[-1.ex] + &\bf{return} \: \theta_t \\[-1.ex] + &\rule{110mm}{0.4pt} \\[-1.ex] + \end{aligned} + + For further details regarding the algorithm we refer to + `lecture notes `_ by G. Hinton. + and centered version `Generating Sequences + With Recurrent Neural Networks `_. + The implementation here takes the square root of the gradient average before + adding epsilon (note that TensorFlow interchanges these two operations). The effective + learning rate is thus :math:`\gamma/(\sqrt{v} + \epsilon)` where :math:`\gamma` + is the scheduled learning rate and :math:`v` is the weighted moving average + of the squared gradient. + """ + + rf""" + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-2) + momentum (float, optional): momentum factor (default: 0) + alpha (float, optional): smoothing constant (default: 0.99) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + centered (bool, optional) : if ``True``, compute the centered RMSProp, + the gradient is normalized by an estimation of its variance + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + {_foreach_doc} + {_maximize_doc} + {_capturable_doc} + {_differentiable_doc} + + """ +) + + +def _single_tensor_rmsprop( + params: List[Tensor], + grads: List[Tensor], + square_avgs: List[Tensor], + grad_avgs: List[Tensor], + momentum_buffer_list: List[Tensor], + state_steps: List[Tensor], + *, + lr: float, + alpha: float, + eps: float, + weight_decay: float, + momentum: float, + centered: bool, + maximize: bool, + differentiable: bool, + capturable: bool, + has_complex: bool, +): + for i, param in enumerate(params): + step = state_steps[i] + + # If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable] + if not torch._utils.is_compiling() and capturable: + capturable_supported_devices = _get_capturable_supported_devices() + assert ( + param.device.type == step.device.type + and param.device.type in capturable_supported_devices + ), f"If capturable=True, params and state_steps must be on supported devices: {capturable_supported_devices}." + + grad = grads[i] + grad = grad if not maximize else -grad + square_avg = square_avgs[i] + + step += 1 + + if weight_decay != 0: + grad = grad.add(param, alpha=weight_decay) + + is_complex_param = torch.is_complex(param) + if is_complex_param: + param = torch.view_as_real(param) + grad = torch.view_as_real(grad) + square_avg = torch.view_as_real(square_avg) + + square_avg.mul_(alpha).addcmul_(grad, grad, value=1 - alpha) + + if centered: + grad_avg = grad_avgs[i] + if is_complex_param: + grad_avg = torch.view_as_real(grad_avg) + grad_avg.lerp_(grad, 1 - alpha) + avg = square_avg.addcmul(grad_avg, grad_avg, value=-1).sqrt_() + else: + avg = square_avg.sqrt() + + if differentiable: + avg = avg.add(eps) + else: + avg = avg.add_(eps) + + if momentum > 0: + buf = momentum_buffer_list[i] + if is_complex_param: + buf = torch.view_as_real(buf) + buf.mul_(momentum).addcdiv_(grad, avg) + param.add_(buf, alpha=-lr) + else: + param.addcdiv_(grad, avg, value=-lr) + + +def _multi_tensor_rmsprop( + params: List[Tensor], + grads: List[Tensor], + square_avgs: List[Tensor], + grad_avgs: List[Tensor], + momentum_buffer_list: List[Tensor], + state_steps: List[Tensor], + *, + lr: float, + alpha: float, + eps: float, + weight_decay: float, + momentum: float, + centered: bool, + maximize: bool, + differentiable: bool, + capturable: bool, + has_complex: bool, +): + if len(params) == 0: + return + + assert not differentiable, "_foreach ops don't support autograd" + + # If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable] + if not torch._utils.is_compiling() and capturable: + capturable_supported_devices = _get_capturable_supported_devices() + assert all( + p.device.type == step.device.type + and p.device.type in capturable_supported_devices + for p, step in zip(params, state_steps) + ), f"If capturable=True, params and state_steps must be on supported devices: {capturable_supported_devices}." + + grouped_tensors = Optimizer._group_tensors_by_device_and_dtype( + [params, grads, square_avgs, grad_avgs, momentum_buffer_list, state_steps] + ) + for ( + ( + grouped_params, + grouped_grads, + grouped_square_avgs, + grouped_grad_avgs, + grouped_momentum_buffer_list, + grouped_state_steps, + ) + ), _ in grouped_tensors.values(): + if has_complex: + state_and_grads = [grouped_grads, grouped_square_avgs] + if momentum > 0: + state_and_grads.append(grouped_momentum_buffer_list) + if centered: + state_and_grads.append(grouped_grad_avgs) + _view_as_real(grouped_params, *state_and_grads) + + if maximize: + grouped_grads = torch._foreach_neg(grouped_grads) # type: ignore[assignment] + + # Update steps + # If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over + # and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just + # wrapped it once now. The alpha is required to assure we go to the right overload. + if grouped_state_steps[0].is_cpu: + torch._foreach_add_( + grouped_state_steps, torch.tensor(1.0, device="cpu"), alpha=1.0 + ) + else: + torch._foreach_add_(grouped_state_steps, 1) + + if weight_decay != 0: + # Re-use the intermediate memory (grouped_grads) already allocated for maximize + if maximize: + torch._foreach_add_(grouped_grads, grouped_params, alpha=weight_decay) + else: + grouped_grads = torch._foreach_add( # type: ignore[assignment] + grouped_grads, grouped_params, alpha=weight_decay + ) + + torch._foreach_mul_(grouped_square_avgs, alpha) + torch._foreach_addcmul_( + grouped_square_avgs, grouped_grads, grouped_grads, value=1 - alpha + ) + + if centered: + torch._foreach_lerp_(grouped_grad_avgs, grouped_grads, 1 - alpha) + avg = torch._foreach_addcmul( + grouped_square_avgs, grouped_grad_avgs, grouped_grad_avgs, value=-1 + ) + torch._foreach_sqrt_(avg) + torch._foreach_add_(avg, eps) + else: + avg = torch._foreach_sqrt(grouped_square_avgs) + torch._foreach_add_(avg, eps) + + if momentum > 0: + torch._foreach_mul_(grouped_momentum_buffer_list, momentum) + torch._foreach_addcdiv_(grouped_momentum_buffer_list, grouped_grads, avg) + # If LR is a tensor, the else branch will internally call item() + # which will cause silent incorrectness if we are capturing + if capturable and isinstance(lr, torch.Tensor): + momentum_lr = torch._foreach_mul(grouped_momentum_buffer_list, -lr) + torch._foreach_add_(grouped_params, momentum_lr) + else: + torch._foreach_add_( + grouped_params, grouped_momentum_buffer_list, alpha=-lr + ) + else: + # If LR is a tensor, the else branch will internally call item() + # which will cause silent incorrectness if we are capturing + if capturable and isinstance(lr, torch.Tensor): + torch._foreach_div_(avg, -lr) + torch._foreach_addcdiv_(grouped_params, grouped_grads, avg) + else: + torch._foreach_addcdiv_(grouped_params, grouped_grads, avg, value=-lr) + + +@_disable_dynamo_if_unsupported(single_tensor_fn=_single_tensor_rmsprop) +def rmsprop( + params: List[Tensor], + grads: List[Tensor], + square_avgs: List[Tensor], + grad_avgs: List[Tensor], + momentum_buffer_list: List[Tensor], + state_steps: List[Tensor], + # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 + # setting this as kwarg for now as functional API is compiled by torch/distributed/optim + foreach: Optional[bool] = None, + maximize: bool = False, + differentiable: bool = False, + capturable: bool = False, + has_complex: bool = False, + *, + lr: float, + alpha: float, + eps: float, + weight_decay: float, + momentum: float, + centered: bool, +): + r"""Functional API that performs rmsprop algorithm computation. + See :class:`~torch.optim.RMSProp` for details. + """ + # this check is slow during compilation, so we skip it + # if it's strictly needed we can add this check back in dynamo + if not torch._utils.is_compiling() and not all( + isinstance(t, torch.Tensor) for t in state_steps + ): + raise RuntimeError( + "API has changed, `state_steps` argument must contain a list of singleton tensors" + ) + + if foreach is None: + _, foreach = _default_to_fused_or_foreach( + params, differentiable, use_fused=False + ) + + if foreach and torch.jit.is_scripting(): + raise RuntimeError("torch.jit.script not supported with foreach optimizers") + + if foreach and not torch.jit.is_scripting(): + func = _multi_tensor_rmsprop + else: + func = _single_tensor_rmsprop + + func( + params, + grads, + square_avgs, + grad_avgs, + momentum_buffer_list, + state_steps, + lr=lr, + alpha=alpha, + eps=eps, + weight_decay=weight_decay, + momentum=momentum, + centered=centered, + maximize=maximize, + capturable=capturable, + differentiable=differentiable, + has_complex=has_complex, + ) diff --git a/parrot/lib/python3.10/site-packages/torch/optim/rprop.py b/parrot/lib/python3.10/site-packages/torch/optim/rprop.py new file mode 100644 index 0000000000000000000000000000000000000000..ae34865f1c154f1ceacd2a1e03cc95d53a2bc5b7 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/optim/rprop.py @@ -0,0 +1,452 @@ +# mypy: allow-untyped-defs +from typing import List, Optional, Tuple + +import torch +from torch import Tensor +from .optimizer import ( + _capturable_doc, + _default_to_fused_or_foreach, + _differentiable_doc, + _disable_dynamo_if_unsupported, + _foreach_doc, + _get_capturable_supported_devices, + _get_scalar_dtype, + _maximize_doc, + _use_grad_for_differentiable, + _view_as_real, + Optimizer, + ParamsT, +) + +__all__ = ["Rprop", "rprop"] + + +class Rprop(Optimizer): + def __init__( + self, + params: ParamsT, + lr: float = 1e-2, + etas: Tuple[float, float] = (0.5, 1.2), + step_sizes: Tuple[float, float] = (1e-6, 50), + *, + capturable: bool = False, + foreach: Optional[bool] = None, + maximize: bool = False, + differentiable: bool = False, + ): + if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") + if not 0.0 < etas[0] < 1.0 < etas[1]: + raise ValueError(f"Invalid eta values: {etas[0]}, {etas[1]}") + + defaults = dict( + lr=lr, + etas=etas, + step_sizes=step_sizes, + foreach=foreach, + maximize=maximize, + differentiable=differentiable, + capturable=capturable, + ) + super().__init__(params, defaults) + + def __setstate__(self, state): + super().__setstate__(state) + for group in self.param_groups: + group.setdefault("foreach", None) + group.setdefault("maximize", False) + group.setdefault("differentiable", False) + group.setdefault("capturable", False) + for p in group["params"]: + p_state = self.state.get(p, []) + if len(p_state) != 0 and not torch.is_tensor(p_state["step"]): + step_val = float(p_state["step"]) + p_state["step"] = ( + torch.tensor( + step_val, dtype=_get_scalar_dtype(), device=p.device + ) + if group["capturable"] + else torch.tensor(step_val, dtype=_get_scalar_dtype()) + ) + + def _init_group(self, group, params, grads, prevs, step_sizes, state_steps): + has_complex = False + for p in group["params"]: + if p.grad is None: + continue + has_complex |= torch.is_complex(p) + params.append(p) + grad = p.grad + if grad.is_sparse: + raise RuntimeError("Rprop does not support sparse gradients") + + grads.append(grad) + state = self.state[p] + + # State initialization + if len(state) == 0: + state["step"] = ( + torch.zeros((), dtype=_get_scalar_dtype(), device=p.device) + if group["capturable"] + else torch.zeros((), dtype=_get_scalar_dtype()) + ) + + state["prev"] = torch.zeros_like(p, memory_format=torch.preserve_format) + if p.dtype.is_complex: + # Complex Number should be as if they are two independent real numbers. + # Hence the step_size shouldn't be zero for imaginary part. + state["step_size"] = torch.full_like( + grad, complex(group["lr"], group["lr"]) + ) + else: + state["step_size"] = torch.full_like(grad, group["lr"]) + + prevs.append(state["prev"]) + step_sizes.append(state["step_size"]) + state_steps.append(state["step"]) + + return has_complex + + @_use_grad_for_differentiable + def step(self, closure=None): + """Performs a single optimization step. + + Args: + closure (Callable, optional): A closure that reevaluates the model + and returns the loss. + """ + self._cuda_graph_capture_health_check() + + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + params: List[Tensor] = [] + grads: List[Tensor] = [] + prevs: List[Tensor] = [] + step_sizes: List[Tensor] = [] + state_steps: List[Tensor] = [] + + etaminus, etaplus = group["etas"] + step_size_min, step_size_max = group["step_sizes"] + foreach = group["foreach"] + maximize = group["maximize"] + + has_complex = self._init_group( + group, params, grads, prevs, step_sizes, state_steps + ) + + rprop( + params, + grads, + prevs, + step_sizes, + state_steps, + step_size_min=step_size_min, + step_size_max=step_size_max, + etaminus=etaminus, + etaplus=etaplus, + foreach=foreach, + maximize=maximize, + differentiable=group["differentiable"], + capturable=group["capturable"], + has_complex=has_complex, + ) + + return loss + + +Rprop.__doc__ = ( + r"""Implements the resilient backpropagation algorithm. + + .. math:: + \begin{aligned} + &\rule{110mm}{0.4pt} \\ + &\textbf{input} : \theta_0 \in \mathbf{R}^d \text{ (params)},f(\theta) + \text{ (objective)}, \\ + &\hspace{13mm} \eta_{+/-} \text{ (etaplus, etaminus)}, \Gamma_{max/min} + \text{ (step sizes)} \\ + &\textbf{initialize} : g^0_{prev} \leftarrow 0, + \: \eta_0 \leftarrow \text{lr (learning rate)} \\ + &\rule{110mm}{0.4pt} \\ + &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ + &\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm} \textbf{for} \text{ } i = 0, 1, \ldots, d-1 \: \mathbf{do} \\ + &\hspace{10mm} \textbf{if} \: g^i_{prev} g^i_t > 0 \\ + &\hspace{15mm} \eta^i_t \leftarrow \mathrm{min}(\eta^i_{t-1} \eta_{+}, + \Gamma_{max}) \\ + &\hspace{10mm} \textbf{else if} \: g^i_{prev} g^i_t < 0 \\ + &\hspace{15mm} \eta^i_t \leftarrow \mathrm{max}(\eta^i_{t-1} \eta_{-}, + \Gamma_{min}) \\ + &\hspace{15mm} g^i_t \leftarrow 0 \\ + &\hspace{10mm} \textbf{else} \: \\ + &\hspace{15mm} \eta^i_t \leftarrow \eta^i_{t-1} \\ + &\hspace{5mm}\theta_t \leftarrow \theta_{t-1}- \eta_t \mathrm{sign}(g_t) \\ + &\hspace{5mm}g_{prev} \leftarrow g_t \\ + &\rule{110mm}{0.4pt} \\[-1.ex] + &\bf{return} \: \theta_t \\[-1.ex] + &\rule{110mm}{0.4pt} \\[-1.ex] + \end{aligned} + + For further details regarding the algorithm we refer to the paper + `A Direct Adaptive Method for Faster Backpropagation Learning: The RPROP Algorithm + `_. + """ + + rf""" + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-2) + etas (Tuple[float, float], optional): pair of (etaminus, etaplus), that + are multiplicative increase and decrease factors + (default: (0.5, 1.2)) + step_sizes (Tuple[float, float], optional): a pair of minimal and + maximal allowed step sizes (default: (1e-6, 50)) + {_foreach_doc} + {_capturable_doc} + {_maximize_doc} + {_differentiable_doc} + + """ +) + + +def _single_tensor_rprop( + params: List[Tensor], + grads: List[Tensor], + prevs: List[Tensor], + step_sizes: List[Tensor], + state_steps: List[Tensor], + *, + step_size_min: float, + step_size_max: float, + etaminus: float, + etaplus: float, + maximize: bool, + capturable: bool, + differentiable: bool, + has_complex: bool, +): + for i, param in enumerate(params): + grad = grads[i] + grad = grad if not maximize else -grad + prev = prevs[i] + step_size = step_sizes[i] + step = state_steps[i] + + # If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable] + if not torch._utils.is_compiling() and capturable: + capturable_supported_devices = _get_capturable_supported_devices() + assert ( + param.device.type == step.device.type + and param.device.type in capturable_supported_devices + ), f"If capturable=True, params and state_steps must be on supported devices: {capturable_supported_devices}." + + step += 1 + + if torch.is_complex(param): + grad = torch.view_as_real(grad) + prev = torch.view_as_real(prev) + param = torch.view_as_real(param) + step_size = torch.view_as_real(step_size) + if differentiable: + sign = grad.mul(prev.clone()).sign() + else: + sign = grad.mul(prev).sign() + + if capturable: + sign.copy_(torch.where(sign.gt(0), etaplus, sign)) + sign.copy_(torch.where(sign.lt(0), etaminus, sign)) + sign.copy_(torch.where(sign.eq(0), 1, sign)) + else: + sign[sign.gt(0)] = etaplus + sign[sign.lt(0)] = etaminus + sign[sign.eq(0)] = 1 + + # update stepsizes with step size updates + step_size.mul_(sign).clamp_(step_size_min, step_size_max) + + # for dir<0, dfdx=0 + # for dir>=0 dfdx=dfdx + grad = grad.clone(memory_format=torch.preserve_format) + if capturable: + grad.copy_(torch.where(sign.eq(etaminus), 0, grad)) + else: + grad[sign.eq(etaminus)] = 0 + + # update parameters + param.addcmul_(grad.sign(), step_size, value=-1) + prev.copy_(grad) + + +def _multi_tensor_rprop( + params: List[Tensor], + grads: List[Tensor], + prevs: List[Tensor], + step_sizes: List[Tensor], + state_steps: List[Tensor], + *, + step_size_min: float, + step_size_max: float, + etaminus: float, + etaplus: float, + maximize: bool, + capturable: bool, + differentiable: bool, + has_complex: bool, +): + if len(params) == 0: + return + + assert not differentiable, "_foreach ops don't support autograd" + + # If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable] + if not torch._utils.is_compiling() and capturable: + capturable_supported_devices = _get_capturable_supported_devices() + assert all( + p.device.type == step.device.type + and p.device.type in capturable_supported_devices + for p, step in zip(params, state_steps) + ), f"If capturable=True, params and state_steps must be on supported devices: {capturable_supported_devices}." + + grouped_tensors = Optimizer._group_tensors_by_device_and_dtype( + [params, grads, prevs, step_sizes, state_steps] + ) + for ( + grouped_params, + grouped_grads, + grouped_prevs, + grouped_step_sizes, + grouped_state_steps, + ), _ in grouped_tensors.values(): + # Update steps + # If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over + # and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just + # wrapped it once now. The alpha is required to assure we go to the right overload. + if grouped_state_steps[0].is_cpu: + torch._foreach_add_( + grouped_state_steps, torch.tensor(1.0, device="cpu"), alpha=1.0 + ) + else: + torch._foreach_add_(grouped_state_steps, 1) + + # Handle complex params + if has_complex: + _view_as_real( + grouped_params, grouped_grads, grouped_prevs, grouped_step_sizes + ) + + signs = torch._foreach_mul(grouped_grads, grouped_prevs) + if maximize: + torch._foreach_neg_(signs) + + # At the end of the step, grouped_prevs will contain the current grads, so we reuse + # grouped_prevs memory instead of creating a new buffer, but, for clarity, we reassign + # to keep referring to the buffer as grouped_grads. + torch._foreach_copy_(grouped_prevs, grouped_grads) + if maximize: + torch._foreach_neg_(grouped_prevs) + grouped_grads = grouped_prevs + + torch._foreach_sign_(signs) + if capturable: + for sign in signs: + sign.copy_(torch.where(sign.gt(0), etaplus, sign)) + sign.copy_(torch.where(sign.lt(0), etaminus, sign)) + sign.copy_(torch.where(sign.eq(0), 1, sign)) + else: + for sign in signs: + sign[sign.gt(0)] = etaplus + sign[sign.lt(0)] = etaminus + sign[sign.eq(0)] = 1 + + # update stepsizes with step size updates + torch._foreach_mul_(grouped_step_sizes, signs) + for step_size in grouped_step_sizes: + step_size.clamp_(step_size_min, step_size_max) + + # for dir<0, dfdx=0 + # for dir>=0 dfdx=dfdx + grouped_grads = list(grouped_grads) + for i in range(len(grouped_grads)): + grouped_grads[i].copy_( + torch.where(signs[i].eq(etaminus), 0, grouped_grads[i]) + ) + + # explicitly del signs as it's not used after here to save memory + del signs + + # update parameters + grad_signs = [grad.sign() for grad in grouped_grads] + torch._foreach_addcmul_( + grouped_params, grad_signs, grouped_step_sizes, value=-1 + ) + + # Logically, you may expect grouped_prevs to get updated to grouped_grads, but that's + # basically already happened since we've been using grouped_prevs' memory to store + # updated grouped_grads! + + +@_disable_dynamo_if_unsupported(single_tensor_fn=_single_tensor_rprop) +def rprop( + params: List[Tensor], + grads: List[Tensor], + prevs: List[Tensor], + step_sizes: List[Tensor], + state_steps: List[Tensor], + # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 + # setting this as kwarg for now as functional API is compiled by torch/distributed/optim + foreach: Optional[bool] = None, + capturable: bool = False, + maximize: bool = False, + differentiable: bool = False, + has_complex: bool = False, + *, + step_size_min: float, + step_size_max: float, + etaminus: float, + etaplus: float, +): + r"""Functional API that performs rprop algorithm computation. + + See :class:`~torch.optim.Rprop` for details. + """ + # this check is slow during compilation, so we skip it + # if it's strictly needed we can add this check back in dynamo + if not torch._utils.is_compiling() and not all( + isinstance(t, torch.Tensor) for t in state_steps + ): + raise RuntimeError( + "API has changed, `state_steps` argument must contain a list of singleton tensors" + ) + + if foreach is None: + _, foreach = _default_to_fused_or_foreach( + params, differentiable, use_fused=False + ) + + if foreach and torch.jit.is_scripting(): + raise RuntimeError("torch.jit.script not supported with foreach optimizers") + + if foreach and not torch.jit.is_scripting(): + func = _multi_tensor_rprop + else: + func = _single_tensor_rprop + + func( + params, + grads, + prevs, + step_sizes, + state_steps, + step_size_min=step_size_min, + step_size_max=step_size_max, + etaminus=etaminus, + etaplus=etaplus, + capturable=capturable, + maximize=maximize, + differentiable=differentiable, + has_complex=has_complex, + ) diff --git a/parrot/lib/python3.10/site-packages/torch/optim/sgd.py b/parrot/lib/python3.10/site-packages/torch/optim/sgd.py new file mode 100644 index 0000000000000000000000000000000000000000..8cf26cfcf95cfda8edd6a80c4ef20ff8b798f733 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/optim/sgd.py @@ -0,0 +1,504 @@ +# mypy: allow-untyped-defs +from typing import List, Optional + +import torch +from torch import Tensor +from torch.utils._foreach_utils import _get_fused_kernels_supported_devices +from .optimizer import ( + _default_to_fused_or_foreach, + _differentiable_doc, + _foreach_doc, + _fused_doc, + _maximize_doc, + _use_grad_for_differentiable, + DeviceDict, + Optimizer, +) + +__all__ = ["SGD", "sgd"] + + +class SGD(Optimizer): + def __init__( + self, + params, + lr: float = 1e-3, + momentum: float = 0, + dampening: float = 0, + weight_decay: float = 0, + nesterov=False, + *, + maximize: bool = False, + foreach: Optional[bool] = None, + differentiable: bool = False, + fused: Optional[bool] = None, + ): + if lr < 0.0: + raise ValueError(f"Invalid learning rate: {lr}") + if momentum < 0.0: + raise ValueError(f"Invalid momentum value: {momentum}") + if weight_decay < 0.0: + raise ValueError(f"Invalid weight_decay value: {weight_decay}") + + defaults = dict( + lr=lr, + momentum=momentum, + dampening=dampening, + weight_decay=weight_decay, + nesterov=nesterov, + maximize=maximize, + foreach=foreach, + differentiable=differentiable, + fused=fused, + ) + if nesterov and (momentum <= 0 or dampening != 0): + raise ValueError("Nesterov momentum requires a momentum and zero dampening") + super().__init__(params, defaults) + + if fused: + self._step_supports_amp_scaling = True + + fused_supported_devices = _get_fused_kernels_supported_devices() + if not all( + p.device.type in fused_supported_devices and torch.is_floating_point(p) + for pg in self.param_groups + for p in pg["params"] + ): + raise RuntimeError( + "`fused=True` requires all the params to be floating point Tensors of " + f"supported devices: {fused_supported_devices}." + ) + if differentiable: + raise RuntimeError("`fused` does not support `differentiable`") + if foreach: + raise RuntimeError("`fused` and `foreach` cannot be `True` together.") + + def __setstate__(self, state): + super().__setstate__(state) + for group in self.param_groups: + group.setdefault("nesterov", False) + group.setdefault("maximize", False) + group.setdefault("foreach", None) + group.setdefault("differentiable", False) + group.setdefault("fused", False) + + def _init_group(self, group, params, grads, momentum_buffer_list): + has_sparse_grad = False + + for p in group["params"]: + if p.grad is not None: + params.append(p) + grads.append(p.grad) + if p.grad.is_sparse: + has_sparse_grad = True + + if group["momentum"] != 0: + state = self.state[p] + momentum_buffer_list.append(state.get("momentum_buffer")) + + return has_sparse_grad + + @_use_grad_for_differentiable + def step(self, closure=None): + """Performs a single optimization step. + + Args: + closure (Callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + params: List[Tensor] = [] + grads: List[Tensor] = [] + momentum_buffer_list: List[Optional[Tensor]] = [] + + has_sparse_grad = self._init_group( + group, params, grads, momentum_buffer_list + ) + + sgd( + params, + grads, + momentum_buffer_list, + weight_decay=group["weight_decay"], + momentum=group["momentum"], + lr=group["lr"], + dampening=group["dampening"], + nesterov=group["nesterov"], + maximize=group["maximize"], + has_sparse_grad=has_sparse_grad, + foreach=group["foreach"], + fused=group["fused"], + grad_scale=getattr(self, "grad_scale", None), + found_inf=getattr(self, "found_inf", None), + ) + + if group["momentum"] != 0: + # update momentum_buffers in state + for p, momentum_buffer in zip(params, momentum_buffer_list): + state = self.state[p] + state["momentum_buffer"] = momentum_buffer + + return loss + + +SGD.__doc__ = ( + r"""Implements stochastic gradient descent (optionally with momentum). + + .. math:: + \begin{aligned} + &\rule{110mm}{0.4pt} \\ + &\textbf{input} : \gamma \text{ (lr)}, \: \theta_0 \text{ (params)}, \: f(\theta) + \text{ (objective)}, \: \lambda \text{ (weight decay)}, \\ + &\hspace{13mm} \:\mu \text{ (momentum)}, \:\tau \text{ (dampening)}, + \:\textit{ nesterov,}\:\textit{ maximize} \\[-1.ex] + &\rule{110mm}{0.4pt} \\ + &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ + &\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm}\textbf{if} \: \lambda \neq 0 \\ + &\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\ + &\hspace{5mm}\textbf{if} \: \mu \neq 0 \\ + &\hspace{10mm}\textbf{if} \: t > 1 \\ + &\hspace{15mm} \textbf{b}_t \leftarrow \mu \textbf{b}_{t-1} + (1-\tau) g_t \\ + &\hspace{10mm}\textbf{else} \\ + &\hspace{15mm} \textbf{b}_t \leftarrow g_t \\ + &\hspace{10mm}\textbf{if} \: \textit{nesterov} \\ + &\hspace{15mm} g_t \leftarrow g_{t} + \mu \textbf{b}_t \\ + &\hspace{10mm}\textbf{else} \\[-1.ex] + &\hspace{15mm} g_t \leftarrow \textbf{b}_t \\ + &\hspace{5mm}\textbf{if} \: \textit{maximize} \\ + &\hspace{10mm}\theta_t \leftarrow \theta_{t-1} + \gamma g_t \\[-1.ex] + &\hspace{5mm}\textbf{else} \\[-1.ex] + &\hspace{10mm}\theta_t \leftarrow \theta_{t-1} - \gamma g_t \\[-1.ex] + &\rule{110mm}{0.4pt} \\[-1.ex] + &\bf{return} \: \theta_t \\[-1.ex] + &\rule{110mm}{0.4pt} \\[-1.ex] + \end{aligned} + + Nesterov momentum is based on the formula from + `On the importance of initialization and momentum in deep learning`__. + """ + + rf""" + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-3) + momentum (float, optional): momentum factor (default: 0) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + dampening (float, optional): dampening for momentum (default: 0) + nesterov (bool, optional): enables Nesterov momentum (default: False) + {_maximize_doc} + {_foreach_doc} + {_differentiable_doc} + {_fused_doc} + """ + + r""" + + Example: + >>> # xdoctest: +SKIP + >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9) + >>> optimizer.zero_grad() + >>> loss_fn(model(input), target).backward() + >>> optimizer.step() + + __ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf + + .. note:: + The implementation of SGD with Momentum/Nesterov subtly differs from + Sutskever et al. and implementations in some other frameworks. + + Considering the specific case of Momentum, the update can be written as + + .. math:: + \begin{aligned} + v_{t+1} & = \mu * v_{t} + g_{t+1}, \\ + p_{t+1} & = p_{t} - \text{lr} * v_{t+1}, + \end{aligned} + + where :math:`p`, :math:`g`, :math:`v` and :math:`\mu` denote the + parameters, gradient, velocity, and momentum respectively. + + This is in contrast to Sutskever et al. and + other frameworks which employ an update of the form + + .. math:: + \begin{aligned} + v_{t+1} & = \mu * v_{t} + \text{lr} * g_{t+1}, \\ + p_{t+1} & = p_{t} - v_{t+1}. + \end{aligned} + + The Nesterov version is analogously modified. + + Moreover, the initial value of the momentum buffer is set to the + gradient value at the first step. This is in contrast to some other + frameworks that initialize it to all zeros. + + """ +) + + +def sgd( + params: List[Tensor], + d_p_list: List[Tensor], + momentum_buffer_list: List[Optional[Tensor]], + # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 + # setting this as kwarg for now as functional API is compiled by torch/distributed/optim + has_sparse_grad: bool = False, + foreach: Optional[bool] = None, + fused: Optional[bool] = None, + grad_scale: Optional[Tensor] = None, + found_inf: Optional[Tensor] = None, + *, + weight_decay: float, + momentum: float, + lr: float, + dampening: float, + nesterov: bool, + maximize: bool, +): + r"""Functional API that performs SGD algorithm computation. + + See :class:`~torch.optim.SGD` for details. + """ + + # Respect when the user inputs False/True for foreach or fused. We only want to change + # the default when neither have been user-specified. Note that we default to foreach + # and pass False to use_fused. This is not a mistake--we want to give the fused impl + # bake-in time before making it the default, even if it is typically faster. + if foreach is None and fused is None: + # why must we be explicit about an if statement for torch.jit.is_scripting here? + # because JIT can't handle Optionals nor fancy conditionals when scripting + if not torch.jit.is_scripting(): + fused, foreach = _default_to_fused_or_foreach( + params, differentiable=False, use_fused=False + ) + else: + foreach = False + fused = False + if foreach is None: + foreach = False + if fused is None: + fused = False + + if foreach and torch.jit.is_scripting(): + raise RuntimeError("torch.jit.script not supported with foreach optimizers") + if fused and torch.jit.is_scripting(): + raise RuntimeError("torch.jit.script not supported with fused optimizers") + + if foreach and not torch.jit.is_scripting(): + func = _multi_tensor_sgd + elif fused and not torch.jit.is_scripting(): + func = _fused_sgd + else: + func = _single_tensor_sgd + + func( + params, + d_p_list, + momentum_buffer_list, + weight_decay=weight_decay, + momentum=momentum, + lr=lr, + dampening=dampening, + nesterov=nesterov, + has_sparse_grad=has_sparse_grad, + maximize=maximize, + grad_scale=grad_scale, + found_inf=found_inf, + ) + + +def _single_tensor_sgd( + params: List[Tensor], + grads: List[Tensor], + momentum_buffer_list: List[Optional[Tensor]], + grad_scale: Optional[Tensor], + found_inf: Optional[Tensor], + *, + weight_decay: float, + momentum: float, + lr: float, + dampening: float, + nesterov: bool, + maximize: bool, + has_sparse_grad: bool, +): + assert grad_scale is None and found_inf is None + + for i, param in enumerate(params): + grad = grads[i] if not maximize else -grads[i] + + if weight_decay != 0: + grad = grad.add(param, alpha=weight_decay) + + if momentum != 0: + buf = momentum_buffer_list[i] + + if buf is None: + buf = torch.clone(grad).detach() + momentum_buffer_list[i] = buf + else: + buf.mul_(momentum).add_(grad, alpha=1 - dampening) + + if nesterov: + grad = grad.add(buf, alpha=momentum) + else: + grad = buf + + param.add_(grad, alpha=-lr) + + +def _multi_tensor_sgd( + params: List[Tensor], + grads: List[Tensor], + momentum_buffer_list: List[Optional[Tensor]], + grad_scale: Optional[Tensor], + found_inf: Optional[Tensor], + *, + weight_decay: float, + momentum: float, + lr: float, + dampening: float, + nesterov: bool, + maximize: bool, + has_sparse_grad: bool, +): + assert grad_scale is None and found_inf is None + + if len(params) == 0: + return + + grouped_tensors = Optimizer._group_tensors_by_device_and_dtype( + [params, grads, momentum_buffer_list], with_indices=True # type: ignore[list-item] + ) + for ( + device_params, + device_grads, + device_momentum_buffer_list, + ), indices in grouped_tensors.values(): + device_has_sparse_grad = has_sparse_grad and any( + grad.is_sparse for grad in device_grads + ) + + if maximize: + device_grads = torch._foreach_neg(device_grads) # type: ignore[assignment] + + if weight_decay != 0: + # Re-use the intermediate memory (device_grads) already allocated for maximize + if maximize: + torch._foreach_add_(device_grads, device_params, alpha=weight_decay) + else: + device_grads = torch._foreach_add( # type: ignore[assignment] + device_grads, device_params, alpha=weight_decay + ) + + if momentum != 0: + bufs = [] + + all_states_with_momentum_buffer = True + for i in range(len(device_momentum_buffer_list)): + if device_momentum_buffer_list[i] is None: + all_states_with_momentum_buffer = False + break + else: + bufs.append(device_momentum_buffer_list[i]) + + if all_states_with_momentum_buffer: + torch._foreach_mul_(bufs, momentum) + torch._foreach_add_(bufs, device_grads, alpha=1 - dampening) + else: + bufs = [] + for i in range(len(device_momentum_buffer_list)): + if device_momentum_buffer_list[i] is None: + buf = device_momentum_buffer_list[i] = momentum_buffer_list[ + indices[i] + ] = torch.clone(device_grads[i]).detach() + else: + buf = device_momentum_buffer_list[i] + buf.mul_(momentum).add_(device_grads[i], alpha=1 - dampening) + + bufs.append(buf) + + if nesterov: + torch._foreach_add_(device_grads, bufs, alpha=momentum) + else: + device_grads = bufs + + if not device_has_sparse_grad: + # handle internal item() call if lr is a tensor + if isinstance(lr, torch.Tensor) and torch._utils.is_compiling(): + grads_x_lr = torch._foreach_mul(device_grads, -lr) + torch._foreach_add_(device_params, grads_x_lr) + else: + torch._foreach_add_(device_params, device_grads, alpha=-lr) + else: + # foreach APIs don't support sparse + for i in range(len(device_params)): + device_params[i].add_(device_grads[i], alpha=-lr) + + +def _fused_sgd( + params: List[Tensor], + grads: List[Tensor], + momentum_buffer_list: List[Optional[Tensor]], + grad_scale: Optional[Tensor], + found_inf: Optional[Tensor], + *, + weight_decay: float, + momentum: float, + lr: float, + dampening: float, + nesterov: bool, + maximize: bool, + has_sparse_grad: bool, +) -> None: + if not params: + return + if has_sparse_grad: + raise RuntimeError("`_fused_sgd` does not support sparse gradients") + grad_scale_dict: DeviceDict = ( + {grad_scale.device: grad_scale} if grad_scale is not None else {} + ) + found_inf_dict: DeviceDict = ( + {found_inf.device: found_inf} if found_inf is not None else {} + ) + + no_momentum_buffer = momentum == 0 + is_first_step = ( + all(t is None for t in momentum_buffer_list) and not no_momentum_buffer + ) + if is_first_step: + for i, g in enumerate(grads): + momentum_buffer_list[i] = torch.empty_like(g) + grouped_tensors = Optimizer._group_tensors_by_device_and_dtype( + [params, grads, momentum_buffer_list], with_indices=False # type: ignore[list-item] + ) + for (device, _), ( + (device_params, device_grads, device_momentum_buffer_list), + _, + ) in grouped_tensors.items(): + device_grad_scale, device_found_inf = None, None + if grad_scale is not None: + device_grad_scale = grad_scale_dict.setdefault( + device, grad_scale.to(device) + ) + if found_inf_dict is not None and found_inf is not None: + device_found_inf = found_inf_dict.setdefault(device, found_inf.to(device)) + torch._fused_sgd_( + device_params, + device_grads, + [] if no_momentum_buffer else device_momentum_buffer_list, + weight_decay=weight_decay, + momentum=momentum, + lr=lr, + dampening=dampening, + nesterov=nesterov, + maximize=maximize, + is_first_step=is_first_step, + grad_scale=device_grad_scale, + found_inf=device_found_inf, + ) diff --git a/parrot/lib/python3.10/site-packages/torch/optim/sparse_adam.py b/parrot/lib/python3.10/site-packages/torch/optim/sparse_adam.py new file mode 100644 index 0000000000000000000000000000000000000000..adb7c17629c236b8d4638c23e809350225a06c35 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/optim/sparse_adam.py @@ -0,0 +1,181 @@ +# mypy: allow-untyped-defs +from typing import List, Tuple + +import torch +from torch import Tensor +from . import _functional as F +from .optimizer import _maximize_doc, Optimizer, ParamsT + +__all__ = ["SparseAdam"] + + +class SparseAdam(Optimizer): + def __init__( + self, + params: ParamsT, + lr: float = 1e-3, + betas: Tuple[float, float] = (0.9, 0.999), + eps: float = 1e-8, + maximize: bool = False, + ): + if not 0.0 < lr: + raise ValueError(f"Invalid learning rate: {lr}") + if not 0.0 < eps: + raise ValueError(f"Invalid epsilon value: {eps}") + if not 0.0 <= betas[0] < 1.0: + raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}") + if not 0.0 <= betas[1] < 1.0: + raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}") + + defaults = dict(lr=lr, betas=betas, eps=eps, maximize=maximize) + super().__init__(params, defaults) + + sparse_params = [] + complex_params = [] + for index, param_group in enumerate(self.param_groups): + assert isinstance( + param_group, dict + ), f"param_groups must be a list of dicts, but got {type(param_group)}" + # given param group, convert given params to a list first before iterating + for d_index, d_param in enumerate(param_group["params"]): + if d_param.is_sparse: + sparse_params.append([index, d_index]) + if d_param.is_complex(): + complex_params.append([index, d_index]) + if sparse_params: + raise ValueError( + f"Sparse params at indices {sparse_params}: SparseAdam requires dense parameter tensors" + ) + if complex_params: + raise ValueError( + f"Complex params at indices {complex_params}: SparseAdam does not support complex parameters" + ) + + @torch.no_grad() + def step(self, closure=None): + """Perform a single optimization step. + + Args: + closure (Callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + params_with_grad: List[Tensor] = [] + grads: List[Tensor] = [] + exp_avgs: List[Tensor] = [] + exp_avg_sqs: List[Tensor] = [] + state_steps: List[int] = [] + beta1, beta2 = group["betas"] + maximize = group.get("maximize", False) + + for p in group["params"]: + if p.grad is not None: + params_with_grad.append(p) + if not p.grad.is_sparse: + raise RuntimeError( + "SparseAdam does not support dense gradients, please consider Adam instead" + ) + grads.append(p.grad) + + state = self.state[p] + + # State initialization + if len(state) == 0: + state["step"] = 0 + # Exponential moving average of gradient values + state["exp_avg"] = torch.zeros_like( + p, memory_format=torch.preserve_format + ) + # Exponential moving average of squared gradient values + state["exp_avg_sq"] = torch.zeros_like( + p, memory_format=torch.preserve_format + ) + + exp_avgs.append(state["exp_avg"]) + exp_avg_sqs.append(state["exp_avg_sq"]) + + # update the steps for each param group update + state["step"] += 1 + # record the step after step update + state_steps.append(state["step"]) + + F.sparse_adam( + params_with_grad, + grads, + exp_avgs, + exp_avg_sqs, + state_steps, + eps=group["eps"], + beta1=beta1, + beta2=beta2, + lr=group["lr"], + maximize=maximize, + ) + + return loss + + +SparseAdam.__doc__ = rf"""SparseAdam implements a masked version of the Adam algorithm + suitable for sparse gradients. Currently, due to implementation constraints (explained + below), SparseAdam is only intended for a narrow subset of use cases, specifically + parameters of a dense layout with gradients of a sparse layout. This occurs in a + special case where the module backwards produces grads already in a sparse layout. + One example NN module that behaves as such is ``nn.Embedding(sparse=True)``. + + SparseAdam approximates the Adam algorithm by masking out the parameter and moment + updates corresponding to the zero values in the gradients. Whereas the Adam algorithm + will update the first moment, the second moment, and the parameters based on all values + of the gradients, SparseAdam only updates the moments and parameters corresponding + to the non-zero values of the gradients. + + A simplified way of thinking about the `intended` implementation is as such: + + 1. Create a mask of the non-zero values in the sparse gradients. For example, + if your gradient looks like [0, 5, 0, 0, 9], the mask would be [0, 1, 0, 0, 1]. + 2. Apply this mask over the running moments and do computation on only the + non-zero values. + 3. Apply this mask over the parameters and only apply an update on non-zero values. + + In actuality, we use sparse layout Tensors to optimize this approximation, which means the + more gradients that are masked by not being materialized, the more performant the optimization. + Since we rely on using sparse layout tensors, we infer that any materialized value in the + sparse layout is non-zero and we do NOT actually verify that all values are not zero! + It is important to not conflate a semantically sparse tensor (a tensor where many + of its values are zeros) with a sparse layout tensor (a tensor where ``.is_sparse`` + returns ``True``). The SparseAdam approximation is intended for `semantically` sparse + tensors and the sparse layout is only a implementation detail. A clearer implementation + would be to use MaskedTensors, but those are experimental. + + + .. note:: + + If you suspect your gradients are semantically sparse (but do not have sparse + layout), this variant may not be the best for you. Ideally, you want to avoid + materializing anything that is suspected to be sparse in the first place, since + needing to convert all your grads from dense layout to sparse layout may outweigh + the performance gain. Here, using Adam may be the best alternative, unless you + can easily rig up your module to output sparse grads similar to + ``nn.Embedding(sparse=True)``. If you insist on converting your grads, you can do + so by manually overriding your parameters' ``.grad`` fields with their sparse + equivalents before calling ``.step()``. + + + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + {_maximize_doc} + + .. _Adam\: A Method for Stochastic Optimization: + https://arxiv.org/abs/1412.6980 + + """ diff --git a/parrot/lib/python3.10/site-packages/torch/optim/swa_utils.py b/parrot/lib/python3.10/site-packages/torch/optim/swa_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..440897e6041e57469f84fee5de51a7be93a76f59 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/optim/swa_utils.py @@ -0,0 +1,451 @@ +# mypy: allow-untyped-defs +import itertools +import math +import warnings +from copy import deepcopy +from typing import Any, Callable, Iterable, List, Literal, Optional, Tuple, Union + +import torch +from torch import Tensor +from torch.nn import Module +from torch.optim.lr_scheduler import _format_param, LRScheduler +from torch.utils._foreach_utils import _get_foreach_kernels_supported_devices +from .optimizer import Optimizer + +__all__ = [ + "AveragedModel", + "update_bn", + "SWALR", + "get_ema_multi_avg_fn", + "get_swa_multi_avg_fn", + "get_ema_avg_fn", + "get_swa_avg_fn", +] + +from torch.utils._foreach_utils import _group_tensors_by_device_and_dtype + +PARAM_LIST = Union[Tuple[Tensor, ...], List[Tensor]] + + +def get_ema_multi_avg_fn(decay=0.999): + @torch.no_grad() + def ema_update(ema_param_list: PARAM_LIST, current_param_list: PARAM_LIST, _): + # foreach lerp only handles float and complex + if torch.is_floating_point(ema_param_list[0]) or torch.is_complex( + ema_param_list[0] + ): + torch._foreach_lerp_(ema_param_list, current_param_list, 1 - decay) + else: + for p_ema, p_model in zip(ema_param_list, current_param_list): + p_ema.copy_(p_ema * decay + p_model * (1 - decay)) + + return ema_update + + +def get_swa_multi_avg_fn(): + @torch.no_grad() + def swa_update( + averaged_param_list: PARAM_LIST, + current_param_list: PARAM_LIST, + num_averaged: Union[Tensor, int], + ): + # foreach lerp only handles float and complex + if torch.is_floating_point(averaged_param_list[0]) or torch.is_complex( + averaged_param_list[0] + ): + torch._foreach_lerp_( + averaged_param_list, current_param_list, 1 / (num_averaged + 1) + ) + else: + diffs = torch._foreach_sub(current_param_list, averaged_param_list) + if isinstance(num_averaged, Tensor): + torch._foreach_addcdiv_( + averaged_param_list, + diffs, + [num_averaged + 1] * len(averaged_param_list), + ) + else: + torch._foreach_add_( + averaged_param_list, diffs, alpha=1.0 / (num_averaged + 1) + ) + + return swa_update + + +def get_ema_avg_fn(decay=0.999): + @torch.no_grad() + def ema_update(ema_param: Tensor, current_param: Tensor, num_averaged): + return decay * ema_param + (1 - decay) * current_param + + return ema_update + + +def get_swa_avg_fn(): + @torch.no_grad() + def swa_update( + averaged_param: Tensor, current_param: Tensor, num_averaged: Union[Tensor, int] + ): + return averaged_param + (current_param - averaged_param) / (num_averaged + 1) + + return swa_update + + +class AveragedModel(Module): + r"""Implements averaged model for Stochastic Weight Averaging (SWA) and + Exponential Moving Average (EMA). + + Stochastic Weight Averaging was proposed in `Averaging Weights Leads to + Wider Optima and Better Generalization`_ by Pavel Izmailov, Dmitrii + Podoprikhin, Timur Garipov, Dmitry Vetrov and Andrew Gordon Wilson + (UAI 2018). + + Exponential Moving Average is a variation of `Polyak averaging`_, + but using exponential weights instead of equal weights across iterations. + + AveragedModel class creates a copy of the provided module :attr:`model` + on the device :attr:`device` and allows to compute running averages of the + parameters of the :attr:`model`. + + Args: + model (torch.nn.Module): model to use with SWA/EMA + device (torch.device, optional): if provided, the averaged model will be + stored on the :attr:`device` + avg_fn (function, optional): the averaging function used to update + parameters; the function must take in the current value of the + :class:`AveragedModel` parameter, the current value of :attr:`model` + parameter, and the number of models already averaged; if None, + an equally weighted average is used (default: None) + multi_avg_fn (function, optional): the averaging function used to update + parameters inplace; the function must take in the current values of the + :class:`AveragedModel` parameters as a list, the current values of :attr:`model` + parameters as a list, and the number of models already averaged; if None, + an equally weighted average is used (default: None) + use_buffers (bool): if ``True``, it will compute running averages for + both the parameters and the buffers of the model. (default: ``False``) + + Example: + >>> # xdoctest: +SKIP("undefined variables") + >>> loader, optimizer, model, loss_fn = ... + >>> swa_model = torch.optim.swa_utils.AveragedModel(model) + >>> scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, + >>> T_max=300) + >>> swa_start = 160 + >>> swa_scheduler = SWALR(optimizer, swa_lr=0.05) + >>> for i in range(300): + >>> for input, target in loader: + >>> optimizer.zero_grad() + >>> loss_fn(model(input), target).backward() + >>> optimizer.step() + >>> if i > swa_start: + >>> swa_model.update_parameters(model) + >>> swa_scheduler.step() + >>> else: + >>> scheduler.step() + >>> + >>> # Update bn statistics for the swa_model at the end + >>> torch.optim.swa_utils.update_bn(loader, swa_model) + + You can also use custom averaging functions with the `avg_fn` or `multi_avg_fn` parameters. + If no averaging function is provided, the default is to compute + equally-weighted average of the weights (SWA). + + Example: + >>> # xdoctest: +SKIP("undefined variables") + >>> # Compute exponential moving averages of the weights and buffers + >>> ema_model = torch.optim.swa_utils.AveragedModel(model, + >>> torch.optim.swa_utils.get_ema_multi_avg_fn(0.9), use_buffers=True) + + .. note:: + When using SWA/EMA with models containing Batch Normalization you may + need to update the activation statistics for Batch Normalization. + This can be done either by using the :meth:`torch.optim.swa_utils.update_bn` + or by setting :attr:`use_buffers` to `True`. The first approach updates the + statistics in a post-training step by passing data through the model. The + second does it during the parameter update phase by averaging all buffers. + Empirical evidence has shown that updating the statistics in normalization + layers increases accuracy, but you may wish to empirically test which + approach yields the best results in your problem. + + .. note:: + :attr:`avg_fn` and `multi_avg_fn` are not saved in the :meth:`state_dict` of the model. + + .. note:: + When :meth:`update_parameters` is called for the first time (i.e. + :attr:`n_averaged` is `0`) the parameters of `model` are copied + to the parameters of :class:`AveragedModel`. For every subsequent + call of :meth:`update_parameters` the function `avg_fn` is used + to update the parameters. + + .. _Averaging Weights Leads to Wider Optima and Better Generalization: + https://arxiv.org/abs/1803.05407 + .. _There Are Many Consistent Explanations of Unlabeled Data: Why You Should + Average: + https://arxiv.org/abs/1806.05594 + .. _SWALP: Stochastic Weight Averaging in Low-Precision Training: + https://arxiv.org/abs/1904.11943 + .. _Stochastic Weight Averaging in Parallel: Large-Batch Training That + Generalizes Well: + https://arxiv.org/abs/2001.02312 + .. _Polyak averaging: + https://paperswithcode.com/method/polyak-averaging + """ + n_averaged: Tensor + + def __init__( + self, + model: Module, + device: Optional[Union[int, torch.device]] = None, + avg_fn: Optional[Callable[[Tensor, Tensor, Union[Tensor, int]], Tensor]] = None, + multi_avg_fn: Optional[ + Callable[[PARAM_LIST, PARAM_LIST, Union[Tensor, int]], None] + ] = None, + use_buffers=False, + ): + super().__init__() + assert ( + avg_fn is None or multi_avg_fn is None + ), "Only one of avg_fn and multi_avg_fn should be provided" + self.module = deepcopy(model) + if device is not None: + self.module = self.module.to(device) + self.register_buffer( + "n_averaged", torch.tensor(0, dtype=torch.long, device=device) + ) + self.avg_fn = avg_fn + self.multi_avg_fn = multi_avg_fn + self.use_buffers = use_buffers + + def forward(self, *args, **kwargs): + return self.module(*args, **kwargs) + + def update_parameters(self, model: Module): + self_param = ( + itertools.chain(self.module.parameters(), self.module.buffers()) + if self.use_buffers + else self.parameters() + ) + model_param = ( + itertools.chain(model.parameters(), model.buffers()) + if self.use_buffers + else model.parameters() + ) + self_param_detached: List[Optional[Tensor]] = [] + model_param_detached: List[Optional[Tensor]] = [] + for p_averaged, p_model in zip(self_param, model_param): + p_model_ = p_model.detach().to(p_averaged.device) + self_param_detached.append(p_averaged.detach()) + model_param_detached.append(p_model_) + if self.n_averaged == 0: + p_averaged.detach().copy_(p_model_) + + if self.n_averaged > 0: + if self.multi_avg_fn is not None or self.avg_fn is None: + grouped_tensors = _group_tensors_by_device_and_dtype( + [self_param_detached, model_param_detached] + ) + for (device, _), ( + [self_params, model_params], + _, + ) in grouped_tensors.items(): + if self.multi_avg_fn: + self.multi_avg_fn( + self_params, model_params, self.n_averaged.to(device) # type: ignore[arg-type] + ) + elif ( + device is not None + and device.type in _get_foreach_kernels_supported_devices() + ): + multi_avg_fn = get_swa_multi_avg_fn() + multi_avg_fn( + self_params, model_params, self.n_averaged.to(device) + ) + else: + avg_fn = get_swa_avg_fn() + n_averaged = self.n_averaged.to(device) + for p_averaged, p_model in zip(self_params, model_params): # type: ignore[assignment] + p_averaged.copy_(avg_fn(p_averaged, p_model, n_averaged)) + else: + for p_averaged, p_model in zip( # type: ignore[assignment] + self_param_detached, model_param_detached + ): + n_averaged = self.n_averaged.to(p_averaged.device) + p_averaged.detach().copy_( + self.avg_fn(p_averaged.detach(), p_model, n_averaged) + ) + + if not self.use_buffers: + # If not apply running averages to the buffers, + # keep the buffers in sync with the source model. + for b_swa, b_model in zip(self.module.buffers(), model.buffers()): + b_swa.detach().copy_(b_model.detach().to(b_swa.device)) + self.n_averaged += 1 + + +@torch.no_grad() +def update_bn( + loader: Iterable[Any], + model: Module, + device: Optional[Union[int, torch.device]] = None, +): + r"""Updates BatchNorm running_mean, running_var buffers in the model. + + It performs one pass over data in `loader` to estimate the activation + statistics for BatchNorm layers in the model. + Args: + loader (torch.utils.data.DataLoader): dataset loader to compute the + activation statistics on. Each data batch should be either a + tensor, or a list/tuple whose first element is a tensor + containing data. + model (torch.nn.Module): model for which we seek to update BatchNorm + statistics. + device (torch.device, optional): If set, data will be transferred to + :attr:`device` before being passed into :attr:`model`. + + Example: + >>> # xdoctest: +SKIP("Undefined variables") + >>> loader, model = ... + >>> torch.optim.swa_utils.update_bn(loader, model) + + .. note:: + The `update_bn` utility assumes that each data batch in :attr:`loader` + is either a tensor or a list or tuple of tensors; in the latter case it + is assumed that :meth:`model.forward()` should be called on the first + element of the list or tuple corresponding to the data batch. + """ + momenta = {} + for module in model.modules(): + if isinstance(module, torch.nn.modules.batchnorm._BatchNorm): + module.reset_running_stats() + momenta[module] = module.momentum + + if not momenta: + return + + was_training = model.training + model.train() + for module in momenta.keys(): + module.momentum = None + + for input in loader: + if isinstance(input, (list, tuple)): + input = input[0] + if device is not None: + input = input.to(device) + + model(input) + + for bn_module in momenta.keys(): + bn_module.momentum = momenta[bn_module] + model.train(was_training) + + +class SWALR(LRScheduler): + r"""Anneals the learning rate in each parameter group to a fixed value. + + This learning rate scheduler is meant to be used with Stochastic Weight + Averaging (SWA) method (see `torch.optim.swa_utils.AveragedModel`). + + Args: + optimizer (torch.optim.Optimizer): wrapped optimizer + swa_lrs (float or list): the learning rate value for all param groups + together or separately for each group. + annealing_epochs (int): number of epochs in the annealing phase + (default: 10) + annealing_strategy (str): "cos" or "linear"; specifies the annealing + strategy: "cos" for cosine annealing, "linear" for linear annealing + (default: "cos") + last_epoch (int): the index of the last epoch (default: -1) + + The :class:`SWALR` scheduler can be used together with other + schedulers to switch to a constant learning rate late in the training + as in the example below. + + Example: + >>> # xdoctest: +SKIP("Undefined variables") + >>> loader, optimizer, model = ... + >>> lr_lambda = lambda epoch: 0.9 + >>> scheduler = torch.optim.lr_scheduler.MultiplicativeLR(optimizer, + >>> lr_lambda=lr_lambda) + >>> swa_scheduler = torch.optim.swa_utils.SWALR(optimizer, + >>> anneal_strategy="linear", anneal_epochs=20, swa_lr=0.05) + >>> swa_start = 160 + >>> for i in range(300): + >>> for input, target in loader: + >>> optimizer.zero_grad() + >>> loss_fn(model(input), target).backward() + >>> optimizer.step() + >>> if i > swa_start: + >>> swa_scheduler.step() + >>> else: + >>> scheduler.step() + + .. _Averaging Weights Leads to Wider Optima and Better Generalization: + https://arxiv.org/abs/1803.05407 + """ + + def __init__( + self, + optimizer: Optimizer, + swa_lr: float, + anneal_epochs=10, + anneal_strategy: Literal["cos", "linear"] = "cos", + last_epoch=-1, + ): + swa_lrs = _format_param("swa_lr", optimizer, swa_lr) + for swa_lr, group in zip(swa_lrs, optimizer.param_groups): + group["swa_lr"] = swa_lr + if anneal_strategy not in ["cos", "linear"]: + raise ValueError( + "anneal_strategy must by one of 'cos' or 'linear', " + f"instead got {anneal_strategy}" + ) + elif anneal_strategy == "cos": + self.anneal_func = self._cosine_anneal + elif anneal_strategy == "linear": + self.anneal_func = self._linear_anneal + if not isinstance(anneal_epochs, int) or anneal_epochs < 0: + raise ValueError( + f"anneal_epochs must be equal or greater than 0, got {anneal_epochs}" + ) + self.anneal_epochs = anneal_epochs + super().__init__(optimizer, last_epoch) + + @staticmethod + def _linear_anneal(t): + return t + + @staticmethod + def _cosine_anneal(t): + return (1 - math.cos(math.pi * t)) / 2 + + @staticmethod + def _get_initial_lr(lr, swa_lr, alpha): + if alpha == 1: + return swa_lr + return (lr - alpha * swa_lr) / (1 - alpha) + + def get_lr(self): + # `_get_lr_called_within_step` is only available `_enable_get_lr_call`, + # so we ignore the type error here. See `LRScheduler.step()` for more details. + if not self._get_lr_called_within_step: # type: ignore[attr-defined] + warnings.warn( + "To get the last learning rate computed by the scheduler, " + "please use `get_last_lr()`.", + UserWarning, + ) + # Set in `LRScheduler._initial_step()` + step = self._step_count - 1 # type: ignore[attr-defined] + if self.anneal_epochs == 0: + step = max(1, step) + prev_t = max(0, min(1, (step - 1) / max(1, self.anneal_epochs))) + prev_alpha = self.anneal_func(prev_t) + prev_lrs = [ + self._get_initial_lr(group["lr"], group["swa_lr"], prev_alpha) + for group in self.optimizer.param_groups + ] + t = max(0, min(1, step / max(1, self.anneal_epochs))) + alpha = self.anneal_func(t) + return [ + group["swa_lr"] * alpha + lr * (1 - alpha) + for group, lr in zip(self.optimizer.param_groups, prev_lrs) + ] diff --git a/parrot/lib/python3.10/site-packages/torch/profiler/python_tracer.py b/parrot/lib/python3.10/site-packages/torch/profiler/python_tracer.py new file mode 100644 index 0000000000000000000000000000000000000000..b3e624911f95812a523d4dd927a74eec7fe5171b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/profiler/python_tracer.py @@ -0,0 +1,20 @@ +import os +import site +import sys +import typing + +import torch + + +def _prefix_regex() -> typing.List[str]: + raw_paths = ( + site.getsitepackages() + + sys.path + + [site.getuserbase()] + + [site.getusersitepackages()] + + [os.path.dirname(os.path.dirname(torch.__file__))] + ) + + path_prefixes = sorted({os.path.abspath(i) for i in raw_paths}, reverse=True) + assert all(isinstance(i, str) for i in path_prefixes) + return [i + os.sep for i in path_prefixes]