diff --git a/.gitattributes b/.gitattributes index 6b6bed12e1d89c4320fe14a70d8c65360825d334..9996c2e4a0fa88aa28fd28e0030ea2e75140880b 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1656,3 +1656,5 @@ evalkit_internvl/lib/python3.10/site-packages/torchvision.libs/libcudart.60cfec8 evalkit_tf437/lib/python3.10/site-packages/accelerate/__pycache__/accelerator.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text evalkit_internvl/lib/python3.10/site-packages/torchvision.libs/libnvjpeg.70530407.so.11 filter=lfs diff=lfs merge=lfs -text evalkit_internvl/lib/python3.10/site-packages/torchvision.libs/libpng16.7f72a3c5.so.16 filter=lfs diff=lfs merge=lfs -text +evalkit_tf437/lib/python3.10/site-packages/__pycache__/markdown2.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_extraction/_hashing_fast.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/evalkit_internvl/lib/python3.10/site-packages/annotated_types-0.7.0.dist-info/INSTALLER b/evalkit_internvl/lib/python3.10/site-packages/annotated_types-0.7.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/annotated_types-0.7.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/evalkit_internvl/lib/python3.10/site-packages/annotated_types-0.7.0.dist-info/METADATA b/evalkit_internvl/lib/python3.10/site-packages/annotated_types-0.7.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..3ac05cfd1077ba5664e98ecd1342f7c54360b936 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/annotated_types-0.7.0.dist-info/METADATA @@ -0,0 +1,295 @@ +Metadata-Version: 2.3 +Name: annotated-types +Version: 0.7.0 +Summary: Reusable constraint types to use with typing.Annotated +Project-URL: Homepage, https://github.com/annotated-types/annotated-types +Project-URL: Source, https://github.com/annotated-types/annotated-types +Project-URL: Changelog, https://github.com/annotated-types/annotated-types/releases +Author-email: Adrian Garcia Badaracco <1755071+adriangb@users.noreply.github.com>, Samuel Colvin , Zac Hatfield-Dodds +License-File: LICENSE +Classifier: Development Status :: 4 - Beta +Classifier: Environment :: Console +Classifier: Environment :: MacOS X +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Information Technology +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: POSIX :: Linux +Classifier: Operating System :: Unix +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Typing :: Typed +Requires-Python: >=3.8 +Requires-Dist: typing-extensions>=4.0.0; python_version < '3.9' +Description-Content-Type: text/markdown + +# annotated-types + +[![CI](https://github.com/annotated-types/annotated-types/workflows/CI/badge.svg?event=push)](https://github.com/annotated-types/annotated-types/actions?query=event%3Apush+branch%3Amain+workflow%3ACI) +[![pypi](https://img.shields.io/pypi/v/annotated-types.svg)](https://pypi.python.org/pypi/annotated-types) +[![versions](https://img.shields.io/pypi/pyversions/annotated-types.svg)](https://github.com/annotated-types/annotated-types) +[![license](https://img.shields.io/github/license/annotated-types/annotated-types.svg)](https://github.com/annotated-types/annotated-types/blob/main/LICENSE) + +[PEP-593](https://peps.python.org/pep-0593/) added `typing.Annotated` as a way of +adding context-specific metadata to existing types, and specifies that +`Annotated[T, x]` _should_ be treated as `T` by any tool or library without special +logic for `x`. + +This package provides metadata objects which can be used to represent common +constraints such as upper and lower bounds on scalar values and collection sizes, +a `Predicate` marker for runtime checks, and +descriptions of how we intend these metadata to be interpreted. In some cases, +we also note alternative representations which do not require this package. + +## Install + +```bash +pip install annotated-types +``` + +## Examples + +```python +from typing import Annotated +from annotated_types import Gt, Len, Predicate + +class MyClass: + age: Annotated[int, Gt(18)] # Valid: 19, 20, ... + # Invalid: 17, 18, "19", 19.0, ... + factors: list[Annotated[int, Predicate(is_prime)]] # Valid: 2, 3, 5, 7, 11, ... + # Invalid: 4, 8, -2, 5.0, "prime", ... + + my_list: Annotated[list[int], Len(0, 10)] # Valid: [], [10, 20, 30, 40, 50] + # Invalid: (1, 2), ["abc"], [0] * 20 +``` + +## Documentation + +_While `annotated-types` avoids runtime checks for performance, users should not +construct invalid combinations such as `MultipleOf("non-numeric")` or `Annotated[int, Len(3)]`. +Downstream implementors may choose to raise an error, emit a warning, silently ignore +a metadata item, etc., if the metadata objects described below are used with an +incompatible type - or for any other reason!_ + +### Gt, Ge, Lt, Le + +Express inclusive and/or exclusive bounds on orderable values - which may be numbers, +dates, times, strings, sets, etc. Note that the boundary value need not be of the +same type that was annotated, so long as they can be compared: `Annotated[int, Gt(1.5)]` +is fine, for example, and implies that the value is an integer x such that `x > 1.5`. + +We suggest that implementors may also interpret `functools.partial(operator.le, 1.5)` +as being equivalent to `Gt(1.5)`, for users who wish to avoid a runtime dependency on +the `annotated-types` package. + +To be explicit, these types have the following meanings: + +* `Gt(x)` - value must be "Greater Than" `x` - equivalent to exclusive minimum +* `Ge(x)` - value must be "Greater than or Equal" to `x` - equivalent to inclusive minimum +* `Lt(x)` - value must be "Less Than" `x` - equivalent to exclusive maximum +* `Le(x)` - value must be "Less than or Equal" to `x` - equivalent to inclusive maximum + +### Interval + +`Interval(gt, ge, lt, le)` allows you to specify an upper and lower bound with a single +metadata object. `None` attributes should be ignored, and non-`None` attributes +treated as per the single bounds above. + +### MultipleOf + +`MultipleOf(multiple_of=x)` might be interpreted in two ways: + +1. Python semantics, implying `value % multiple_of == 0`, or +2. [JSONschema semantics](https://json-schema.org/draft/2020-12/json-schema-validation.html#rfc.section.6.2.1), + where `int(value / multiple_of) == value / multiple_of`. + +We encourage users to be aware of these two common interpretations and their +distinct behaviours, especially since very large or non-integer numbers make +it easy to cause silent data corruption due to floating-point imprecision. + +We encourage libraries to carefully document which interpretation they implement. + +### MinLen, MaxLen, Len + +`Len()` implies that `min_length <= len(value) <= max_length` - lower and upper bounds are inclusive. + +As well as `Len()` which can optionally include upper and lower bounds, we also +provide `MinLen(x)` and `MaxLen(y)` which are equivalent to `Len(min_length=x)` +and `Len(max_length=y)` respectively. + +`Len`, `MinLen`, and `MaxLen` may be used with any type which supports `len(value)`. + +Examples of usage: + +* `Annotated[list, MaxLen(10)]` (or `Annotated[list, Len(max_length=10))`) - list must have a length of 10 or less +* `Annotated[str, MaxLen(10)]` - string must have a length of 10 or less +* `Annotated[list, MinLen(3))` (or `Annotated[list, Len(min_length=3))`) - list must have a length of 3 or more +* `Annotated[list, Len(4, 6)]` - list must have a length of 4, 5, or 6 +* `Annotated[list, Len(8, 8)]` - list must have a length of exactly 8 + +#### Changed in v0.4.0 + +* `min_inclusive` has been renamed to `min_length`, no change in meaning +* `max_exclusive` has been renamed to `max_length`, upper bound is now **inclusive** instead of **exclusive** +* The recommendation that slices are interpreted as `Len` has been removed due to ambiguity and different semantic + meaning of the upper bound in slices vs. `Len` + +See [issue #23](https://github.com/annotated-types/annotated-types/issues/23) for discussion. + +### Timezone + +`Timezone` can be used with a `datetime` or a `time` to express which timezones +are allowed. `Annotated[datetime, Timezone(None)]` must be a naive datetime. +`Timezone[...]` ([literal ellipsis](https://docs.python.org/3/library/constants.html#Ellipsis)) +expresses that any timezone-aware datetime is allowed. You may also pass a specific +timezone string or [`tzinfo`](https://docs.python.org/3/library/datetime.html#tzinfo-objects) +object such as `Timezone(timezone.utc)` or `Timezone("Africa/Abidjan")` to express that you only +allow a specific timezone, though we note that this is often a symptom of fragile design. + +#### Changed in v0.x.x + +* `Timezone` accepts [`tzinfo`](https://docs.python.org/3/library/datetime.html#tzinfo-objects) objects instead of + `timezone`, extending compatibility to [`zoneinfo`](https://docs.python.org/3/library/zoneinfo.html) and third party libraries. + +### Unit + +`Unit(unit: str)` expresses that the annotated numeric value is the magnitude of +a quantity with the specified unit. For example, `Annotated[float, Unit("m/s")]` +would be a float representing a velocity in meters per second. + +Please note that `annotated_types` itself makes no attempt to parse or validate +the unit string in any way. That is left entirely to downstream libraries, +such as [`pint`](https://pint.readthedocs.io) or +[`astropy.units`](https://docs.astropy.org/en/stable/units/). + +An example of how a library might use this metadata: + +```python +from annotated_types import Unit +from typing import Annotated, TypeVar, Callable, Any, get_origin, get_args + +# given a type annotated with a unit: +Meters = Annotated[float, Unit("m")] + + +# you can cast the annotation to a specific unit type with any +# callable that accepts a string and returns the desired type +T = TypeVar("T") +def cast_unit(tp: Any, unit_cls: Callable[[str], T]) -> T | None: + if get_origin(tp) is Annotated: + for arg in get_args(tp): + if isinstance(arg, Unit): + return unit_cls(arg.unit) + return None + + +# using `pint` +import pint +pint_unit = cast_unit(Meters, pint.Unit) + + +# using `astropy.units` +import astropy.units as u +astropy_unit = cast_unit(Meters, u.Unit) +``` + +### Predicate + +`Predicate(func: Callable)` expresses that `func(value)` is truthy for valid values. +Users should prefer the statically inspectable metadata above, but if you need +the full power and flexibility of arbitrary runtime predicates... here it is. + +For some common constraints, we provide generic types: + +* `IsLower = Annotated[T, Predicate(str.islower)]` +* `IsUpper = Annotated[T, Predicate(str.isupper)]` +* `IsDigit = Annotated[T, Predicate(str.isdigit)]` +* `IsFinite = Annotated[T, Predicate(math.isfinite)]` +* `IsNotFinite = Annotated[T, Predicate(Not(math.isfinite))]` +* `IsNan = Annotated[T, Predicate(math.isnan)]` +* `IsNotNan = Annotated[T, Predicate(Not(math.isnan))]` +* `IsInfinite = Annotated[T, Predicate(math.isinf)]` +* `IsNotInfinite = Annotated[T, Predicate(Not(math.isinf))]` + +so that you can write e.g. `x: IsFinite[float] = 2.0` instead of the longer +(but exactly equivalent) `x: Annotated[float, Predicate(math.isfinite)] = 2.0`. + +Some libraries might have special logic to handle known or understandable predicates, +for example by checking for `str.isdigit` and using its presence to both call custom +logic to enforce digit-only strings, and customise some generated external schema. +Users are therefore encouraged to avoid indirection like `lambda s: s.lower()`, in +favor of introspectable methods such as `str.lower` or `re.compile("pattern").search`. + +To enable basic negation of commonly used predicates like `math.isnan` without introducing introspection that makes it impossible for implementers to introspect the predicate we provide a `Not` wrapper that simply negates the predicate in an introspectable manner. Several of the predicates listed above are created in this manner. + +We do not specify what behaviour should be expected for predicates that raise +an exception. For example `Annotated[int, Predicate(str.isdigit)]` might silently +skip invalid constraints, or statically raise an error; or it might try calling it +and then propagate or discard the resulting +`TypeError: descriptor 'isdigit' for 'str' objects doesn't apply to a 'int' object` +exception. We encourage libraries to document the behaviour they choose. + +### Doc + +`doc()` can be used to add documentation information in `Annotated`, for function and method parameters, variables, class attributes, return types, and any place where `Annotated` can be used. + +It expects a value that can be statically analyzed, as the main use case is for static analysis, editors, documentation generators, and similar tools. + +It returns a `DocInfo` class with a single attribute `documentation` containing the value passed to `doc()`. + +This is the early adopter's alternative form of the [`typing-doc` proposal](https://github.com/tiangolo/fastapi/blob/typing-doc/typing_doc.md). + +### Integrating downstream types with `GroupedMetadata` + +Implementers may choose to provide a convenience wrapper that groups multiple pieces of metadata. +This can help reduce verbosity and cognitive overhead for users. +For example, an implementer like Pydantic might provide a `Field` or `Meta` type that accepts keyword arguments and transforms these into low-level metadata: + +```python +from dataclasses import dataclass +from typing import Iterator +from annotated_types import GroupedMetadata, Ge + +@dataclass +class Field(GroupedMetadata): + ge: int | None = None + description: str | None = None + + def __iter__(self) -> Iterator[object]: + # Iterating over a GroupedMetadata object should yield annotated-types + # constraint metadata objects which describe it as fully as possible, + # and may include other unknown objects too. + if self.ge is not None: + yield Ge(self.ge) + if self.description is not None: + yield Description(self.description) +``` + +Libraries consuming annotated-types constraints should check for `GroupedMetadata` and unpack it by iterating over the object and treating the results as if they had been "unpacked" in the `Annotated` type. The same logic should be applied to the [PEP 646 `Unpack` type](https://peps.python.org/pep-0646/), so that `Annotated[T, Field(...)]`, `Annotated[T, Unpack[Field(...)]]` and `Annotated[T, *Field(...)]` are all treated consistently. + +Libraries consuming annotated-types should also ignore any metadata they do not recongize that came from unpacking a `GroupedMetadata`, just like they ignore unrecognized metadata in `Annotated` itself. + +Our own `annotated_types.Interval` class is a `GroupedMetadata` which unpacks itself into `Gt`, `Lt`, etc., so this is not an abstract concern. Similarly, `annotated_types.Len` is a `GroupedMetadata` which unpacks itself into `MinLen` (optionally) and `MaxLen`. + +### Consuming metadata + +We intend to not be prescriptive as to _how_ the metadata and constraints are used, but as an example of how one might parse constraints from types annotations see our [implementation in `test_main.py`](https://github.com/annotated-types/annotated-types/blob/f59cf6d1b5255a0fe359b93896759a180bec30ae/tests/test_main.py#L94-L103). + +It is up to the implementer to determine how this metadata is used. +You could use the metadata for runtime type checking, for generating schemas or to generate example data, amongst other use cases. + +## Design & History + +This package was designed at the PyCon 2022 sprints by the maintainers of Pydantic +and Hypothesis, with the goal of making it as easy as possible for end-users to +provide more informative annotations for use by runtime libraries. + +It is deliberately minimal, and following PEP-593 allows considerable downstream +discretion in what (if anything!) they choose to support. Nonetheless, we expect +that staying simple and covering _only_ the most common use-cases will give users +and maintainers the best experience we can. If you'd like more constraints for your +types - follow our lead, by defining them and documenting them downstream! diff --git a/evalkit_internvl/lib/python3.10/site-packages/annotated_types-0.7.0.dist-info/RECORD b/evalkit_internvl/lib/python3.10/site-packages/annotated_types-0.7.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..30741a49ce010ade63cae2348d105cbb525378f7 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/annotated_types-0.7.0.dist-info/RECORD @@ -0,0 +1,11 @@ +annotated_types-0.7.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +annotated_types-0.7.0.dist-info/METADATA,sha256=7ltqxksJJ0wCYFGBNIQCWTlWQGeAH0hRFdnK3CB895E,15046 +annotated_types-0.7.0.dist-info/RECORD,, +annotated_types-0.7.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +annotated_types-0.7.0.dist-info/WHEEL,sha256=zEMcRr9Kr03x1ozGwg5v9NQBKn3kndp6LSoSlVg-jhU,87 +annotated_types-0.7.0.dist-info/licenses/LICENSE,sha256=_hBJiEsaDZNCkB6I4H8ykl0ksxIdmXK2poBfuYJLCV0,1083 +annotated_types/__init__.py,sha256=RynLsRKUEGI0KimXydlD1fZEfEzWwDo0Uon3zOKhG1Q,13819 +annotated_types/__pycache__/__init__.cpython-310.pyc,, +annotated_types/__pycache__/test_cases.cpython-310.pyc,, +annotated_types/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +annotated_types/test_cases.py,sha256=zHFX6EpcMbGJ8FzBYDbO56bPwx_DYIVSKbZM-4B3_lg,6421 diff --git a/evalkit_internvl/lib/python3.10/site-packages/annotated_types-0.7.0.dist-info/REQUESTED b/evalkit_internvl/lib/python3.10/site-packages/annotated_types-0.7.0.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_internvl/lib/python3.10/site-packages/annotated_types-0.7.0.dist-info/WHEEL b/evalkit_internvl/lib/python3.10/site-packages/annotated_types-0.7.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..516596c76787b10928cbab24f22c0ea00433b15d --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/annotated_types-0.7.0.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.24.2 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/evalkit_internvl/lib/python3.10/site-packages/annotated_types-0.7.0.dist-info/licenses/LICENSE b/evalkit_internvl/lib/python3.10/site-packages/annotated_types-0.7.0.dist-info/licenses/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..d99323a9965f146d5b0888c4ca1bf0727e12b04f --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/annotated_types-0.7.0.dist-info/licenses/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2022 the contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/__pycache__/__init__.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df789aa269637b37500fae43adbbe3a99c459499 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/__pycache__/ebay.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/__pycache__/ebay.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2a3248b56b8af7e625975cd9423c53e349b8a77 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/__pycache__/ebay.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/__pycache__/facebook.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/__pycache__/facebook.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5714c06bf831dbb17070c466a57c3d2ba86d5a35 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/__pycache__/facebook.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/__pycache__/weibo.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/__pycache__/weibo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e9aef23a8e5db25a322a204dc9206c676d7bb36b Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/__pycache__/weibo.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/torch/_jit_internal.py b/evalkit_internvl/lib/python3.10/site-packages/torch/_jit_internal.py new file mode 100644 index 0000000000000000000000000000000000000000..be1b86f5c860179fc9301ea27a17093bf1f5a9ae --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/torch/_jit_internal.py @@ -0,0 +1,1510 @@ +""" +The weak_script annotation needs to be here instead of inside torch/jit/ so it +can be used in other places in torch/ (namely torch.nn) without running into +circular dependency problems +""" + +import ast +import builtins +import collections +import contextlib +import enum +import inspect +import io +import pickle +import sys +import threading +import types +import typing +import warnings +import weakref +from textwrap import dedent +from typing import ( # noqa: F401 + Any, + Callable, + Dict, + Final, + ForwardRef, + Generic, + get_args, # new in 3.8 + get_origin, # new in 3.8 + List, + Optional, + Tuple, + Type, + TypeVar, + Union, +) + +import torch + +# This is needed. `torch._jit_internal` is imported before `torch.distributed.__init__`. +# Explicitly ask to import `torch.distributed.__init__` first. +# Otherwise, "AttributeError: module 'torch' has no attribute 'distributed'" is raised. +import torch.distributed.rpc +import torch.package._mangling as package_mangling +from torch._awaits import _Await +from torch._C import _Await as CAwait, Future as CFuture +from torch._sources import fake_range, get_source_lines_and_file, parse_def +from torch.futures import Future + +IS_PY39_PLUS: Final[bool] = sys.version_info >= (3, 9) +IS_PY310_PLUS: Final[bool] = sys.version_info >= (3, 10) + +BuiltinUnionType: Union[Type, Tuple[Type, ...]] +if sys.version_info >= (3, 10): + # NOTE: IS_PY310_PLUS doesn't work with mypy. + # cf. https://mypy.readthedocs.io/en/stable/common_issues.html#python-version-and-system-platform-checks + BuiltinUnionType = types.UnionType +else: + BuiltinUnionType = () # trick: this makes isinstance short circuit. + +LockType: Type +try: + import _thread + + LockType = _thread.LockType +except ImportError: + import _dummy_thread + + LockType = _dummy_thread.LockType + +# Wrapper functions that can call either of 2 functions depending on a boolean +# argument +boolean_dispatched: "weakref.WeakKeyDictionary[Callable, Dict[str, Callable]]" = ( + weakref.WeakKeyDictionary() +) # noqa: T484 + + +FAKE_FILENAME_PREFIX = "__torch_jit_dataclass" + + +class SourceLoader: + def __init__(self): + self.content = {} + + def cache(self, fn, source): + self.content[fn] = source + + def get_source(self, fn): + return self.content.get(fn) + + +loader = SourceLoader() + + +def createResolutionCallbackFromEnv(lookup_base): + """ + Creates a resolution callback that will look up qualified names in an + environment, starting with `lookup_base` for the base of any qualified + names, then proceeding down the lookup chain with the resolved object. + + You should not use this directly, it should only be used from the other + createResolutionCallbackFrom* functions. + """ + + def lookupInModule(qualified_name, module): + if "." in qualified_name: + parts = qualified_name.split(".") + base = parts[0] + remaining_pieces = ".".join(parts[1:]) + module_value = getattr(module, base) + return lookupInModule(remaining_pieces, module_value) + else: + return getattr(module, qualified_name) + + def parseNestedExpr(expr, module) -> Tuple[Any, int]: + i = 0 + while i < len(expr) and expr[i] not in (",", "[", "]"): + i += 1 + + # Special case logic for the empty Tuple as a subscript (used + # in the type annotation `Tuple[()]`) + if expr[:i] == "()": + return (), i + + base = lookupInModule(expr[:i].strip(), module) + assert base is not None, f"Unresolvable type {expr[:i]}" + if i == len(expr) or expr[i] != "[": + return base, i + + assert expr[i] == "[" + parts = [] + while expr[i] != "]": + part_len = 0 + i += 1 + part, part_len = parseNestedExpr(expr[i:], module) + parts.append(part) + i += part_len + if len(parts) > 1: + return base[tuple(parts)], i + 1 + else: + return base[parts[0]], i + 1 + + def parseExpr(expr, module): + try: + value, len_parsed = parseNestedExpr(expr, module) + assert len_parsed == len( + expr + ), "whole expression was not parsed, falling back to c++ parser" + return value + except Exception: + """ + The python resolver fails in several cases in known unit tests, and is intended + to fall back gracefully to the c++ resolver in general. For example, python 2 style + annotations which are frequent in our unit tests often fail with types e.g. int not + resolvable from the calling frame. + """ + return None + + return lambda expr: parseExpr(expr, lookup_base) + + +def createResolutionCallbackFromFrame(frames_up: int = 0): + """ + Creates a function which, given a string variable name, + returns the value of the variable in the scope of the caller of + the function which called createResolutionCallbackFromFrame (by default). + + This is used to enable access in-scope Python variables inside + TorchScript fragments. + + frames_up is number of additional frames to go up on the stack. + The default value is 0, which correspond to the frame of the caller + of createResolutionCallbackFromFrame. Also for example, if frames_up is set + to 1, then the frame of the caller's caller of createResolutionCallbackFromFrame + will be taken. + + For example, the following program prints 2:: + + def bar(): + cb = createResolutionCallbackFromFrame(1) + print(cb("foo")) + + def baz(): + foo = 2 + bar() + + baz() + """ + frame = inspect.currentframe() + i = 0 + while i < frames_up + 1: + assert frame is not None + frame = frame.f_back + i += 1 + + assert frame is not None + f_locals = frame.f_locals + f_globals = frame.f_globals + + class env: + def __getattr__(self, key): + if key in f_locals: + return f_locals[key] + elif key in f_globals: + return f_globals[key] + elif key in dir(builtins): + return getattr(builtins, key) + + return createResolutionCallbackFromEnv(env()) + + +def get_closure(fn): + """ + Get a dictionary of closed over variables from a function + """ + captures = {} + captures.update(fn.__globals__) + + for index, captured_name in enumerate(fn.__code__.co_freevars): + captures[captured_name] = fn.__closure__[index].cell_contents + + return captures + + +# [local resolution in python] +# Depending on where a variable is defined, and where it is used, we may +# or may not be able to recover its value when recursively compiling a +# script function. Remember in the general case, a module or function is +# first defined and then later scripted. This means we do not have a +# chance to capture the active frames when the function is defined. Hence any +# name resolution has to happen later on the created closure. The way +# python captures type annotations restricts what we can recover. The +# follow example illustrates the different cases: +# +# class MyGlobalClass: +# ... +# def my_local_scope(): +# @torch.jit.script +# class MyClass: +# ... +# @torch.jit.script +# class MyClassUsedAsVar: +# ... +# def eg(x: MyClass, y: MyGlobalClass): +# a_local_capture : Foo +# return MyClassUsedAsVar(x) +# +# MyGlobalClass is defined in the __globals__ dictionary of function +# 'eg', so it is always recoverable. my_local_scope introduces a new local +# variable scope in the function. Classes defined here are only visible as +# local variables. For the case of MyClassUsedAsVar, it is captured +# because it is used as a variable inside the body of the function, and we +# can resolve it using the captures returned from `get_closure`. However, +# the type annotations are not captured by the closure. In Python +# 3.0--3.9, the _value_ of MyClass and MyGlobalClass will be available as +# annotations on `eg``, but starting in Python 4.0, they will represented as +# strings and no longer present. Furthermore, since the body of `eg` does +# not reference those names, they do not appear in the list of closed over +# variables. In Python 2.x, type annotations are in comments, leading to a +# similar situation where their definitions are not available. We anticipate +# that most users will not run into this issue because their modules and +# functions will be defined at a global scope like MyGlobalClass. In cases +# where they are not, it is possible to work around issues by declaring the +# values global in the function. +# In Python 3.9 declaring class as global will make it invisible to +# `inspect.getsource`, see https://bugs.python.org/issue42666 . +# This could be worked around by manualy adding it to `global()` dictionary. + + +def createResolutionCallbackFromClosure(fn): + """ + Create a resolutionCallback by introspecting the function instead of + looking up the stack for the enclosing scope + """ + closure = get_closure(fn) + + class closure_lookup: + # This is a class since `closure` is a dict and it's easier in + # `env_helper` if everything just works with `getattr` calls + def __getattr__(self, key): + if key in closure: + return closure[key] + elif hasattr(typing, key): + return getattr(typing, key) + elif hasattr(builtins, key): + return getattr(builtins, key) + return None + + return createResolutionCallbackFromEnv(closure_lookup()) + + +def can_compile_class(cls) -> bool: + # If any of the functions on a type don't have a code object, this type can't + # be compiled and is probably a builtin / bound from C + if is_ignored_fn(cls): + return False + + # Ignore the following list of built-in classes. + ignored_builtin_classes = (torch.nn.Module, tuple, list, Exception) + if issubclass(cls, ignored_builtin_classes): + return False + + names = cls.__dict__ + fns = [ + getattr(cls, name) + for name in names + if inspect.isroutine(getattr(cls, name, None)) + ] + has_code = [hasattr(fn, "__code__") for fn in fns] + return all(has_code) + + +def get_callable_argument_names(fn) -> List[str]: + """ + Gets names of all POSITIONAL_OR_KEYWORD arguments for callable `fn`. + Returns an empty list when other types of arguments are present. + + This is used by `torch.jit.trace` to assign meaningful argument names to + traced functions and modules. + + Args: + fn: A callable. + Returns: + Argument names: List[str] + """ + # inspect.signature may fail, give up in that case. + try: + callable_signature = inspect.signature(fn) + except Exception: + return [] + + argument_names = [] + for name, param in callable_signature.parameters.items(): + # All four other types of arguments do not map to individual values + # with a keyword as name. + if not param.kind == param.POSITIONAL_OR_KEYWORD: + continue + + argument_names.append(name) + + return argument_names + + +def get_annotation_str(annotation): + """ + Convert an AST node containing a type annotation to the string present in the source + that represents the same annotation. + """ + if isinstance(annotation, ast.Name): + return annotation.id + elif isinstance(annotation, ast.Attribute): + return ".".join([get_annotation_str(annotation.value), annotation.attr]) + elif isinstance(annotation, ast.Subscript): + # In Python3.9+ subscript indicies are not wrapped in ast.Index + subscript_slice = annotation.slice if IS_PY39_PLUS else annotation.slice.value # type: ignore[attr-defined] + return f"{get_annotation_str(annotation.value)}[{get_annotation_str(subscript_slice)}]" + elif isinstance(annotation, ast.Tuple): + return ",".join([get_annotation_str(elt) for elt in annotation.elts]) + elif isinstance(annotation, (ast.Constant, ast.NameConstant)): + return f"{annotation.value}" + + # If an AST node is not handled here, it's probably handled in ScriptTypeParser. + return None + + +def get_type_hint_captures(fn): + """ + Get a dictionary containing type resolution mappings necessary to resolve types + for the literal annotations on 'fn'. These are not considered to be closed-over by fn + and must be obtained separately (e.g. using this function). + + Args: + fn: A callable. + Returns: + A Dict[str, Any] containing a mapping from the literal annotations used on + fn to the Python objects they refer to. + """ + # First, try to get the source of the function. We'll need to parse it to find the actual string names + # that were used to annotate the types, since inspect.signature() will only return the class object that + # the annotation refers to, not the string name. If we can't get the source, simply return an empty dict. + # This may happen in cases where the function is synthesized dynamically at runtime. + src = loader.get_source(fn) + if src is None: + src = inspect.getsource(fn) + + # Gather a dictionary of parameter name -> type, skipping any parameters whose annotated + # types are strings. These are only understood by TorchScript in the context of a type annotation + # that refers to a class in its own definition, but trying to include a mapping for this in the result + # function would cause infinite recursion because the class is currently being compiled. + # In addition, there is logic in ScriptTypeParser to handle this. + signature = inspect.signature(fn) + name_to_type = { + name: parameter.annotation + for name, parameter in signature.parameters.items() + if parameter.annotation is not inspect.Parameter.empty + and not isinstance(parameter.annotation, str) + } + + # Then, get the literal type annotations from the function declaration + # by source inspection. This accounts for the case in which aliases are used + # to annotate the arguments (e.g device_t = torch.device, and then d: device_t). + # frontend.py cannot be used here because it includes _jit_internal, so use ast instead. + a = ast.parse(dedent(src)) + if len(a.body) != 1 or not isinstance(a.body[0], ast.FunctionDef): + raise RuntimeError(f"Expected {fn} to be a function") + f = a.body[0] + + # Prepare a dictionary of source annotation -> type, which will be the final result of this function, + # by using the parsed AST (f) to reconstruct source annotations as strings for each parameter and mapping + # them to the type object corresponding to the annotation via name_to_type using the parameter name. + annotation_to_type = {} + + for arg in f.args.args: + # Get the source type annotation string for this argument if possible. + arg_annotation_str = ( + get_annotation_str(arg.annotation) if arg.annotation else None + ) + + # If the argument has no annotation or get_annotation_str cannot convert it to a string, + # arg_annotation_str will be None. Skip this arg; ScriptTypeParser will probably handle + # this in the latter case. + if arg_annotation_str is None: + continue + + # Insert {arg_annotation_str: type} into annotation_to_type if possible. One reason arg_name may not + # be present in name_to_type is that the annotation itself is a string and not a type object + # (common for self-refential annotations in classes). Once again, let ScriptTypeParser handle this. + arg_name = arg.arg + if arg_name in name_to_type: + annotation_to_type[arg_annotation_str] = name_to_type[arg_name] + + # If there is a valid return annotation, include it in annotation_to_type. As with argument annotations, + # the literal annotation has to be convertible to a string by get_annotation_str, and the actual type + # of the annotation cannot be a string. + literal_return_annotation = get_annotation_str(f.returns) + valid_literal_annotation = literal_return_annotation is not None + return_annotation = signature.return_annotation + valid_return_annotation_type = ( + return_annotation is not inspect.Parameter.empty + and not isinstance(return_annotation, str) + ) + if valid_literal_annotation and valid_return_annotation_type: + annotation_to_type[literal_return_annotation] = return_annotation + + return annotation_to_type + + +def createResolutionCallbackForClassMethods(cls): + """ + This looks at all the methods defined in a class and pulls their closed-over + variables into a dictionary and uses that to resolve variables. + """ + # cls is a type here, so `ismethod` is false since the methods on the type + # aren't bound to anything, so Python treats them as regular functions + fns = [ + getattr(cls, name) + for name in cls.__dict__ + if inspect.isroutine(getattr(cls, name)) + ] + # Skip built-ins, as they do not have global scope nor type hints + # Needed to support `enum.Enum` derived classes in Python-3.11 + # That adds `_new_member_` property which is an alias to `__new__` + fns = [fn for fn in fns if not inspect.isbuiltin(fn) and hasattr(fn, "__globals__")] + captures = {} + + for fn in fns: + captures.update(get_closure(fn)) + captures.update(get_type_hint_captures(fn)) + + def lookup_in_class(key): + if key in captures: + return captures[key] + else: + return getattr(builtins, key, None) + + return lookup_in_class + + +def boolean_dispatch( + arg_name, arg_index, default, if_true, if_false, module_name, func_name +): + """ + Dispatches to either of 2 script functions based on a boolean argument. + In TorchScript, the boolean argument must be constant so that the correct + function to use can be determined at compile time. + """ + + def fn(*args, **kwargs): + dispatch_flag = default + if arg_name in kwargs: + dispatch_flag = kwargs[arg_name] + elif arg_index < len(args): + dispatch_flag = args[arg_index] + + if dispatch_flag: + return if_true(*args, **kwargs) + else: + return if_false(*args, **kwargs) + + if if_true.__doc__ is None and if_false.__doc__ is not None: + doc = if_false.__doc__ + if_true.__doc__ = doc + elif if_false.__doc__ is None and if_true.__doc__ is not None: + doc = if_true.__doc__ + if_false.__doc__ = doc + elif if_false.__doc__ is None and if_true.__doc__ is None: + # neither function has a docstring + doc = None + else: + raise RuntimeError("only one function can have a docstring") + fn.__doc__ = doc + + if module_name is not None: + fn.__module__ = module_name + if func_name is not None: + fn.__name__ = func_name + + boolean_dispatched[fn] = { + "if_true": if_true, + "if_false": if_false, + "index": arg_index, + "default": default, + "arg_name": arg_name, + } + return fn + + +class FunctionModifiers: + """ + Used to denote the behavior of a function in TorchScript. See export() and + ignore() for details. + """ + + UNUSED = "unused (ignored and replaced with raising of an exception)" + IGNORE = "ignore (leave as a call to Python, cannot be torch.jit.save'd)" + EXPORT = "export (compile this function even if nothing calls it)" + DEFAULT = "default (compile if called from a exported function / forward)" + COPY_TO_SCRIPT_WRAPPER = ( + "if this method is not scripted, copy the python method onto the scripted model" + ) + _DROP = "_drop (function is fully ignored, declaration can be unscriptable)" + + +def export(fn): + """ + This decorator indicates that a method on an ``nn.Module`` is used as an entry point into a + :class:`ScriptModule` and should be compiled. + + ``forward`` implicitly is assumed to be an entry point, so it does not need this decorator. + Functions and methods called from ``forward`` are compiled as they are seen + by the compiler, so they do not need this decorator either. + + Example (using ``@torch.jit.export`` on a method): + + .. testcode:: + + import torch + import torch.nn as nn + + class MyModule(nn.Module): + def implicitly_compiled_method(self, x): + return x + 99 + + # `forward` is implicitly decorated with `@torch.jit.export`, + # so adding it here would have no effect + def forward(self, x): + return x + 10 + + @torch.jit.export + def another_forward(self, x): + # When the compiler sees this call, it will compile + # `implicitly_compiled_method` + return self.implicitly_compiled_method(x) + + def unused_method(self, x): + return x - 20 + + # `m` will contain compiled methods: + # `forward` + # `another_forward` + # `implicitly_compiled_method` + # `unused_method` will not be compiled since it was not called from + # any compiled methods and wasn't decorated with `@torch.jit.export` + m = torch.jit.script(MyModule()) + """ + fn._torchscript_modifier = FunctionModifiers.EXPORT + return fn + + +def unused(fn): + """ + This decorator indicates to the compiler that a function or method should + be ignored and replaced with the raising of an exception. This allows you + to leave code in your model that is not yet TorchScript compatible and still + export your model. + + Example (using ``@torch.jit.unused`` on a method):: + + import torch + import torch.nn as nn + + class MyModule(nn.Module): + def __init__(self, use_memory_efficient): + super().__init__() + self.use_memory_efficient = use_memory_efficient + + @torch.jit.unused + def memory_efficient(self, x): + import pdb + pdb.set_trace() + return x + 10 + + def forward(self, x): + # Use not-yet-scriptable memory efficient mode + if self.use_memory_efficient: + return self.memory_efficient(x) + else: + return x + 10 + + m = torch.jit.script(MyModule(use_memory_efficient=False)) + m.save("m.pt") + + m = torch.jit.script(MyModule(use_memory_efficient=True)) + # exception raised + m(torch.rand(100)) + """ + if isinstance(fn, property): + prop = fn + setattr( # noqa: B010 + prop.fget, "_torchscript_modifier", FunctionModifiers.UNUSED + ) + + if prop.fset: + setattr( # noqa: B010 + prop.fset, "_torchscript_modifier", FunctionModifiers.UNUSED + ) + + return prop + + fn._torchscript_modifier = FunctionModifiers.UNUSED + return fn + + +# No op context manager from python side +class _IgnoreContextManager(contextlib.AbstractContextManager): + def __init__(self, **kwargs): + pass + + def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: + pass + + +def ignore(drop=False, **kwargs): + """ + This decorator indicates to the compiler that a function or method should + be ignored and left as a Python function. This allows you to leave code in + your model that is not yet TorchScript compatible. If called from TorchScript, + ignored functions will dispatch the call to the Python interpreter. Models with ignored + functions cannot be exported; use :func:`@torch.jit.unused ` instead. + + Example (using ``@torch.jit.ignore`` on a method):: + + import torch + import torch.nn as nn + + class MyModule(nn.Module): + @torch.jit.ignore + def debugger(self, x): + import pdb + pdb.set_trace() + + def forward(self, x): + x += 10 + # The compiler would normally try to compile `debugger`, + # but since it is `@ignore`d, it will be left as a call + # to Python + self.debugger(x) + return x + + m = torch.jit.script(MyModule()) + + # Error! The call `debugger` cannot be saved since it calls into Python + m.save("m.pt") + + Example (using ``@torch.jit.ignore(drop=True)`` on a method): + + .. testcode:: + + import torch + import torch.nn as nn + + class MyModule(nn.Module): + @torch.jit.ignore(drop=True) + def training_method(self, x): + import pdb + pdb.set_trace() + + def forward(self, x): + if self.training: + self.training_method(x) + return x + + m = torch.jit.script(MyModule()) + + # This is OK since `training_method` is not saved, the call is replaced + # with a `raise`. + m.save("m.pt") + + .. testcleanup:: + + import os + os.remove('m.pt') + """ + + if callable(drop): + # used without any args, so drop is actually a function + # @torch.jit.ignore + # def fn(...): + fn = drop + fn._torchscript_modifier = FunctionModifiers.IGNORE + return fn + + if not isinstance(drop, bool): + raise RuntimeError( + "Argument to @torch.jit.ignore must be a bool or " + f"a function but got {drop}" + ) + + # for backwards compat + drop_on_export = kwargs.pop("drop_on_export", None) + if drop_on_export: + warnings.warn( + "ignore(drop_on_export=True) has been deprecated. TorchScript will now drop the function " + "call on compilation. Use torch.jit.unused now. {}", + category=FutureWarning, + ) + + drop = drop_on_export + elif drop: + warnings.warn( + "ignore(True) has been deprecated. TorchScript will now drop the function " + "call on compilation. Use torch.jit.unused now. {}", + category=FutureWarning, + ) + + def decorator(fn): + if drop: + fn._torchscript_modifier = FunctionModifiers.UNUSED + else: + fn._torchscript_modifier = FunctionModifiers.IGNORE + return fn + + return decorator + + +def _drop(fn): + fn._torchscript_modifier = FunctionModifiers._DROP + return fn + + +def _copy_to_script_wrapper(fn): + fn._torchscript_modifier = FunctionModifiers.COPY_TO_SCRIPT_WRAPPER + return fn + + +def module_has_exports(mod): + for name in dir(mod): + if hasattr(mod, name): + item = getattr(mod, name) + if callable(item): + if get_torchscript_modifier(item) is FunctionModifiers.EXPORT: + return True + return False + + +# WARNING: should_drop is currently being used by our JIT code coverage plug-in to mark JIT'd code as covered. If you +# rename this function, please update references in tools/coverage_plugins_package/src/coverage_plugins/jit_plugin.py to +# allow JIT'd code to still be covered. +def should_drop(fn) -> bool: + attr = get_torchscript_modifier(fn) + if attr is None: + return False + return attr is FunctionModifiers.UNUSED or attr is FunctionModifiers._DROP + + +def is_ignored_fn(fn) -> bool: + mod = get_torchscript_modifier(fn) + return ( + mod is FunctionModifiers.UNUSED + or mod is FunctionModifiers.IGNORE + or mod is FunctionModifiers._DROP + ) + + +def _is_drop_fn(fn) -> bool: + mod = get_torchscript_modifier(fn) + return mod is FunctionModifiers._DROP + + +def is_static_fn(cls, fn) -> bool: + return isinstance(inspect.getattr_static(cls, fn, default=None), staticmethod) + + +def get_static_fn(cls, fn): + return inspect.getattr_static(cls, fn).__func__ + + +def get_torchscript_modifier(fn): + if not callable(fn): + return None + if hasattr(fn, "__func__"): + fn = fn.__func__ + return getattr(fn, "_torchscript_modifier", FunctionModifiers.DEFAULT) + + +def copy_torchscript_modifier(orig, new) -> None: + attr = get_torchscript_modifier(orig) + if attr is None: + return + new._torchscript_modifier = attr + + +# overloading registration +# overloads get registered in this file, and compiled in torch/jit/__init__.py +# so that they can be imported in nn/functional.py without an import cycle + +# qualified_name => list[overload_functions] +_overloaded_fns: Dict[str, List[Callable]] = {} # noqa: T484 + + +_OVERLOAD_EXAMPLE = """ +Example usage of overload function: +@torch.jit._overload +def my_function(x: type0) -> type0: # decl 1 + pass + +@torch.jit._overload +def my_function(x: type1) -> type1: # decl 2 + pass + +def my_function(x): # implementation + if isinstance(x, type0): + return x + elif isinstance(x, type1): + return x +""" + + +def get_overload_no_implementation_error_message(kind, obj): + sourcelines, file_lineno, filename = get_source_lines_and_file(obj) + return ( + f'Implementation for the {kind} "{_qualified_name(obj)}" is missing. Please make ' + f"sure a definition is provided and defined after all overload declarations.\n" + f'File "{filename}", line {file_lineno}:\n' + + "".join(sourcelines) + + "\n" + + _OVERLOAD_EXAMPLE + ) + + +def _check_overload_body(func): + try: + parsed_def = parse_def(func) + except OSError as e: + # Parsing the function definition can raise an OSError if source is unavailable. + # Since this is just an initial check, just raise a warning if this is the case. + warnings.warn( + f"Unable to retrieve source for @torch.jit._overload function: {func}." + ) + return + + body = parsed_def.ast.body[0].body + + def is_pass(x): + return isinstance(x, ast.Pass) + + def is_ellipsis(x): + return isinstance(x, ast.Expr) and isinstance(x.value, ast.Ellipsis) + + if len(body) != 1 or not (is_pass(body[0]) or is_ellipsis(body[0])): + msg = ( + "Only `pass` statement or `...` can be the body of overload declaration:\n" + ) + msg += "\n".join(parsed_def.source.split("\n")[:3]) + msg += " <- Expecting `pass` or `...` here!\n" + _OVERLOAD_EXAMPLE + raise RuntimeError(msg) + + +def _overload(func): + _check_overload_body(func) + qual_name = _qualified_name(func) + global _overloaded_fns + fn_overload_list = _overloaded_fns.get(qual_name) + if fn_overload_list is None: + fn_overload_list = [] + _overloaded_fns[qual_name] = fn_overload_list + fn_overload_list.append(func) + return func + + +def _get_fn_overloads(qual_name): + return _overloaded_fns.get(qual_name) + + +def _clear_fn_overloads(qual_name) -> None: + del _overloaded_fns[qual_name] + + +def get_class_name_lineno(method) -> Tuple[str, int]: + current_frame = inspect.currentframe() + + # one for the get_class_name call, one for _overload_method call + for i in range(2): + assert ( + current_frame is not None + ) # assert current frame is not an Optional[FrameType] + current_frame = current_frame.f_back + + assert current_frame is not None # same here + class_name = current_frame.f_code.co_name + line_no = current_frame.f_code.co_firstlineno + return class_name, line_no + + +# At the point the decorator is applied to class methods the method +# has no reference to its owning class. _qualified_name would not include +# the class it is defined in, so any methods with the same name in the same file +# would have the same _qualified_name, even if they were defined in different +# classes. This problem only exists in python 2. +# We get around this problem by looking at the stack frame and identifying +# the class name, and throwing an error whenever overloads are used +# when modules of the same name are in the same file + +# qualified_name => class name => list[overload_functions] +_overloaded_methods: Dict[str, Dict[str, List[Callable]]] = {} # noqa: T484 + + +# (qualified_name, class name) => class_fileno +_overloaded_method_class_fileno = {} + + +def _overload_method(func): + _check_overload_body(func) + qual_name = _qualified_name(func) + global _overloaded_methods + class_name_map = _overloaded_methods.get(qual_name, None) + if class_name_map is None: + class_name_map = {} + _overloaded_methods[qual_name] = class_name_map + + class_name, line_no = get_class_name_lineno(func) + method_overloads = class_name_map.get(class_name, None) + if method_overloads is None: + method_overloads = [] + class_name_map[class_name] = method_overloads + _overloaded_method_class_fileno[(qual_name, class_name)] = line_no + else: + existing_lineno = _overloaded_method_class_fileno[(qual_name, class_name)] + if existing_lineno != line_no: + raise RuntimeError( + "Cannot currently overload the same method name in two different" + " classes with the same name in the same module" + ) + + method_overloads.append(func) + return func + + +def _get_overloaded_methods(method, mod_class): + # TODO: __name__ not set for submodules in recursive script + if not hasattr(method, "__name__"): + return None + qual_name = _qualified_name(method) + class_name_map = _overloaded_methods.get(qual_name, None) + if class_name_map is None: + return None + overloads = class_name_map.get(mod_class.__name__, None) + if overloads is None: + return None + + method_line_no = get_source_lines_and_file(method)[1] + mod_class_fileno = get_source_lines_and_file(mod_class)[1] + mod_end_fileno = mod_class_fileno + len(get_source_lines_and_file(mod_class)[0]) + if not (method_line_no >= mod_class_fileno and method_line_no <= mod_end_fileno): + raise Exception( + "Overloads are not useable when a module is redeclared within the same file: " + + str(method) + ) + return overloads + + +def is_tuple(ann) -> bool: + if ann is Tuple: + raise_error_container_parameter_missing("Tuple") + + # For some reason Python 3.7 violates the Type[A, B].__origin__ == Type rule + if not hasattr(ann, "__module__"): + return False + + ann_origin = get_origin(ann) + if IS_PY39_PLUS and ann.__module__ == "builtins" and ann_origin is tuple: + return True + return ann.__module__ == "typing" and (ann_origin is Tuple or ann_origin is tuple) + + +def is_list(ann) -> bool: + if ann is List: + raise_error_container_parameter_missing("List") + + if not hasattr(ann, "__module__"): + return False + + ann_origin = get_origin(ann) + if IS_PY39_PLUS and ann.__module__ == "builtins" and ann_origin is list: + return True + return ann.__module__ == "typing" and (ann_origin is List or ann_origin is list) + + +def is_dict(ann) -> bool: + if ann is Dict: + raise_error_container_parameter_missing("Dict") + + if not hasattr(ann, "__module__"): + return False + + ann_origin = get_origin(ann) + if IS_PY39_PLUS and ann.__module__ == "builtins" and ann_origin is dict: + return True + return ann.__module__ == "typing" and (ann_origin is Dict or ann_origin is dict) + + +def is_union(ann): + if ann is Union: + raise_error_container_parameter_missing("Union") + + return isinstance(ann, BuiltinUnionType) or ( + hasattr(ann, "__module__") + and ann.__module__ == "typing" + and (get_origin(ann) is Union) + ) + + +def is_optional(ann): + if ann is Optional: + raise_error_container_parameter_missing("Optional") + + def is_optional_as_optional(ann): + return ( + hasattr(ann, "__module__") + and ann.__module__ == "typing" + and (get_origin(ann) is Optional) + ) + + def is_union_as_optional(ann): + ann_args = get_args(ann) + return len(ann_args) == 2 and (None in ann_args or type(None) in ann_args) + + return is_optional_as_optional(ann) or (is_union(ann) and is_union_as_optional(ann)) + + +def is_future(ann) -> bool: + if ann is Future: + raise RuntimeError( + "Attempted to use Future without a " + "contained type. Please add a contained type, e.g. " + "Future[int]" + ) + return get_origin(ann) is Future + + +def is_await(ann) -> bool: + if ann is _Await: + return True + return get_origin(ann) is _Await + + +if torch.distributed.rpc.is_available(): + from torch._C._distributed_rpc import PyRRef + from torch.distributed.rpc import RRef + + def is_rref(ann) -> bool: + if ann is RRef: + raise RuntimeError( + "Attempted to use RRef without a " + "contained type. Please add a contained type, e.g. " + "RRef[int]" + ) + return get_origin(ann) is RRef + + def is_rref_instance(obj) -> bool: + return isinstance(obj, PyRRef) + +else: + + def is_rref_instance(obj) -> bool: + # If the RPC module doesn't exist then RRefs don't exist either. + return False + + +def is_final(ann) -> bool: + return ann.__module__ in {"typing", "typing_extensions"} and ( + get_origin(ann) is Final or isinstance(ann, type(Final)) + ) + + +# allows BroadcastingList instance to be subscriptable +class BroadcastingListCls: + def __getitem__(self, types): + return + + +# mypy doesn't support parameters on types, so we have to explicitly type each +# list size +BroadcastingList1 = BroadcastingListCls() +for i in range(2, 7): + globals()[f"BroadcastingList{i}"] = BroadcastingList1 + + +def is_scripting() -> bool: + r""" + Function that returns True when in compilation and False otherwise. This + is useful especially with the @unused decorator to leave code in your + model that is not yet TorchScript compatible. + .. testcode:: + + import torch + + @torch.jit.unused + def unsupported_linear_op(x): + return x + + def linear(x): + if torch.jit.is_scripting(): + return torch.linear(x) + else: + return unsupported_linear_op(x) + """ + return False + + +# Retrieves a fully-qualified name (module hierarchy + classname) for a given obj. +def _qualified_name(obj, mangle_name=True) -> str: + # This special case allows us to override the qualified name on a type. + # It's currently used in conjunction with tracing, where we create a + # fake module to filter only supported attributes. However, since this + # new type is defined as a local class, we need a mechanism to override + # its qualname so it appears correctly in the TorchScript system. This, + # we set '_jit_override_qualname' with the original traced module's + # qualified name, which is picked up here + if hasattr(obj, "_jit_override_qualname"): + return obj._jit_override_qualname + # short-circuit in cases where the object already has a known qualified name + if isinstance(obj, torch._C.ScriptFunction): + return obj.qualified_name + + if getattr(obj, "__name__", None): + name = obj.__name__ + # Enum classes do not have `__name__` attr, instead they have `name`. + elif isinstance(obj, enum.Enum): + name = obj.name + else: + raise RuntimeError("Could not get name of python class object") + + if name == "": + name = "_lambda" # make name a valid identifier + + module_name = obj.__module__ + + # If the module is actually a torchbind module, then we should short circuit + if module_name == "torch._classes": + return obj.qualified_name + + # The Python docs are very clear that `__module__` can be None, but I can't + # figure out when it actually would be. + if module_name is None: + raise RuntimeError( + f"Could not get qualified name for class '{name}': " + "__module__ can't be None." + ) + + # if getattr(sys.modules[module_name], name) is not obj: + # raise RuntimeError(f"Could not get qualified name for class '{name}': " + # f"the attr {name} on module {module_name} is not the class") + + # torch.package and TorchScript have separate mangling schemes to avoid + # name collisions from multiple packages. To avoid them interfering with + # each other, normalize the package manging here. + if package_mangling.is_mangled(module_name): + module_name = module_name.replace("<", "_") + module_name = module_name.replace(">", "_") + + # The PythonExceptionValue C++ class in torch/csrc/jit/python/python_sugared_value.h + # does not need mangle the python class name. + if mangle_name: + # __main__ is a builtin module, so rewrite it to "__torch__". + if module_name == "__main__": + module_name = "__torch__" + else: + # Everything else gets a "__torch__" prefix to avoid name collisions + # with the names of user values. + module_name = "__torch__." + module_name + + if "." in name: + raise RuntimeError( + f"Could not get qualified name for class '{name}': " + f"'{name}' is not a valid identifier" + ) + + return module_name + "." + name + + +def _try_get_dispatched_fn(fn): + if not callable(fn): + return None + return boolean_dispatched.get(fn) + + +def _get_named_tuple_properties( + obj, loc: Optional[torch._C._jit_tree_views.SourceRange] = None, rcb=None +): + if loc is None: + loc = fake_range() + + assert issubclass(obj, tuple) and hasattr(obj, "_fields") + if hasattr(obj, "_field_defaults"): + defaults = [ + obj._field_defaults[field] + for field in obj._fields + if field in obj._field_defaults + ] + else: + defaults = [] + # In 3.10 recommended way to get annotations is to call `inspect.get_annotations` function + # Also, annotations from base class are not inherited so they need to be queried explicitly + if sys.version_info[:2] < (3, 10): + obj_annotations = getattr(obj, "__annotations__", {}) + else: + obj_annotations = inspect.get_annotations(obj) + if len(obj_annotations) == 0 and hasattr(obj, "__base__"): + obj_annotations = inspect.get_annotations(obj.__base__) + + annotations = [] + for field in obj._fields: + if field in obj_annotations: + field_type = obj_annotations[field] + # [Note: ForwardRef annotations in NamedTuple attributes] + # NamedTuple types are slightly different from normal types. + # + # Normally, annotations are evaluted like this (during jit.script): + # 1. Load strings of python code into c++ and parse. + # 2. Get annotations as strings + # 3. Use the PythonResolver's resolution callback (rcb) to convert + # the string into a python object + # 4. We call into annotations.py:ann_to_type to convert python obj + # from step 3 into a type that torchscript understands. + # + # NamedTuples are more complicated, because it has sub-types. + # Normally, once we have the NamedTuple type object from #3, + # we can just look at the annotation literal values and use + # ann_to_type directly on them. + # + # But sometimes, users will annotate with string literals, e.g. + # x: 'int' + # This also happens with PEP563 (from __forward__ import annotations) + # + # These annotations appear in the annotation dict as ForwardRef('int'). + # + # Then, we need to convert the string into a python object. This + # requires having local context for custom objects or imported types. + # rcb() is what gives us this. So, we plumb rcb through the stack so + # it can be used in this context for the if block below. + # + # FAQ: + # - Why do we need this special handling for NamedTuple but string + # annotations work fine for normal types? Normally, we parse the + # string directly and then call rcb() directly from C++. + # - Why not use ForwardRef._evaluate? For that, we need globals() + # and locals() for the local context where the NamedTuple was defined. + # rcb is what lets us look up into these. So, basically rcb does the + # hard work for us. + if isinstance(field_type, ForwardRef) and rcb is not None: + rcb_type = rcb(field_type.__forward_arg__) + # rcb returns None if it can't find anything. + if rcb_type is None: + raise ValueError( + f"Unknown type annotation: '{field_type}' in NamedTuple {obj.__name__}." + f" Likely due to partial support for ForwardRef parameters in NamedTuples, see #95858." + f" Issue occurred at {loc.highlight()}" + ) + field_type = rcb_type + the_type = torch.jit.annotations.ann_to_type(field_type, loc, rcb) + annotations.append(the_type) + else: + annotations.append(torch._C.TensorType.getInferred()) + return type(obj).__name__, obj._fields, annotations, defaults + + +def _create_named_tuple( + t, unqual_name: str, field_names: List[str], defaults: Tuple[Any, ...] +): + TupleType = collections.namedtuple(unqual_name, field_names, defaults=defaults) # type: ignore[call-arg, no-redef, misc] + return TupleType(*t) + + +@contextlib.contextmanager +def _disable_emit_hooks(): + hooks = torch._C._jit_get_emit_hooks() + torch._C._jit_set_emit_hooks(None, None) + try: + yield + finally: + torch._C._jit_set_emit_hooks(hooks[0], hooks[1]) + + +def _disable_emit_hooks_decorator(_DecoratorContextManager) -> None: # noqa: F811 + def __enter__(self) -> None: + self.hooks = torch._C._jit_get_emit_hooks() + torch._C._jit_set_emit_hooks(None, None) + + def __exit__(self, *args) -> None: + torch._C._jit_set_emit_hooks(self.hooks[0], self.hooks[1]) + + +def _is_exception(obj) -> bool: + if not inspect.isclass(obj): + return False + return issubclass(obj, Exception) + + +def raise_error_container_parameter_missing(target_type) -> None: + if target_type == "Dict": + raise RuntimeError( + "Attempted to use Dict without " + "contained types. Please add contained type, e.g. " + "Dict[int, int]" + ) + raise RuntimeError( + f"Attempted to use {target_type} without a " + "contained type. Please add a contained type, e.g. " + f"{target_type}[int]" + ) + + +def check_args_exist(target_type) -> None: + if target_type is List or target_type is list: + raise_error_container_parameter_missing("List") + elif target_type is Tuple or target_type is tuple: + raise_error_container_parameter_missing("Tuple") + elif target_type is Dict or target_type is dict: + raise_error_container_parameter_missing("Dict") + elif target_type is None or target_type is Optional: + raise_error_container_parameter_missing("Optional") + + +def check_empty_containers(obj) -> None: + if obj == [] or obj == {} or obj == (): + warnings.warn( + "The inner type of a container is lost when " + "calling torch.jit.isinstance in eager mode. For " + "example, List[int] would become list and " + "therefore falsely return True for List[float] or" + " List[str]." + ) + + +# supports List/Dict/Tuple and Optional types +# TODO support future +def container_checker(obj, target_type) -> bool: + origin_type = get_origin(target_type) + check_args_exist(target_type) + if origin_type is None: + return False + elif origin_type is list or origin_type is List: + check_empty_containers(obj) + if not isinstance(obj, list): + return False + arg_type = get_args(target_type)[0] + arg_origin = get_origin(arg_type) + for el in obj: + # check if nested container, ex: List[List[str]] + if arg_origin: # processes nested container, ex: List[List[str]] + if not container_checker(el, arg_type): + return False + elif not isinstance(el, arg_type): + return False + return True + elif origin_type is Dict or origin_type is dict: + check_empty_containers(obj) + if not isinstance(obj, dict): + return False + key_type = get_args(target_type)[0] + val_type = get_args(target_type)[1] + for key, val in obj.items(): + # check if keys are of right type + if not isinstance(key, key_type): + return False + val_origin = get_origin(val_type) + if val_origin: + if not container_checker(val, val_type): + return False + elif not isinstance(val, val_type): + return False + return True + elif origin_type is Tuple or origin_type is tuple: + check_empty_containers(obj) + if not isinstance(obj, tuple): + return False + arg_types = get_args(target_type) + if len(obj) != len(arg_types): + return False + for el, el_type in zip(obj, arg_types): + el_origin = get_origin(el_type) + if el_origin: + if not container_checker(el, el_type): + return False + elif not isinstance(el, el_type): + return False + return True + elif origin_type is Union or issubclass( + origin_type, BuiltinUnionType + ): # also handles Optional + if obj is None: # check before recursion because None is always fine + return True + inner_types = get_args(target_type) + for t in inner_types: + t_origin = get_origin(t) + if t_origin: + return container_checker(obj, t) + elif isinstance(obj, t): + return True + return False + + +def _isinstance(obj, target_type) -> bool: + if isinstance(target_type, collections.abc.Container): + if not isinstance(target_type, tuple): + raise RuntimeError( + "The second argument to " + "`torch.jit.isinstance` must be a type " + "or a tuple of types" + ) + for t_type in target_type: + if _isinstance(obj, t_type): + return True + return False + + origin_type = get_origin(target_type) + if origin_type: + return container_checker(obj, target_type) + + # Check to handle non-typed optional origin returns as none instead + # of as optional in 3.7-3.8 + check_args_exist(target_type) + + # handle non-containers + return isinstance(obj, target_type) + + +class _TensorExtractor(pickle.Pickler): + def __init__(self, *args, tensors: List[torch.Tensor], **kwargs): + super().__init__(*args, **kwargs) + self.tensors = tensors + + def persistent_id(self, obj): + if isinstance(obj, torch.Tensor): + self.tensors.append(obj) + return "" + # Since we just want to extract tensors, we don't mind if an object is + # unpicklable if it doesn't contain tensors, as we can just ignore/skip + # it. To play it safe, we only do so for common objects that we're sure + # don't contain tensors. Feel free to add new types here. Note also that + # even if a type isn't listed here this won't block users, since thet + # can just add a __getstate__ or __reduce__ method to their class. + if isinstance(obj, LockType): + return "" + # Futures and RRefs don't technically contain a value, they just offer + # the means to access a value. + if isinstance(obj, CFuture) or is_rref_instance(obj): + return "" + if isinstance(obj, CAwait): + return "" + if isinstance(obj, torch.cuda.Event): + return "" + if isinstance(obj, threading.Thread): + return "" + return None + + +def _extract_tensors(obj): + r""" + This function is exclusively called from C++. + See ``torch/csrc/jit/python/python_ivalue.h``. + + It extracts the tensors contained in the given object, through pickling. + """ + tensors: List[torch.Tensor] = [] + extractor = _TensorExtractor(io.BytesIO(), protocol=-1, tensors=tensors) + extractor.dump(obj) + return tensors + + +# In Python-3.11+ typed enums (i.e. IntEnum for example) retain number of base class methods in subclass +# that were previously dropped. To preserve the behavior, explicitly drop them there + +if sys.version_info > (3, 10): + _drop(enum.Enum.__new__) + _drop(enum.Enum.__format__) + _drop(enum.Enum.__repr__) + _drop(enum.Enum.__str__) diff --git a/evalkit_internvl/lib/python3.10/site-packages/torch/_meta_registrations.py b/evalkit_internvl/lib/python3.10/site-packages/torch/_meta_registrations.py new file mode 100644 index 0000000000000000000000000000000000000000..23e0f8ed21387f8236008aeda82436d6e167ddcd --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/torch/_meta_registrations.py @@ -0,0 +1,6242 @@ +import math +from enum import Enum +from functools import partial +from typing import List, Optional, Sequence, Tuple, Union + +import torch +import torch._prims_common as utils +from torch import SymBool, SymFloat, Tensor +from torch._decomp import ( + _add_op_to_registry, + _convert_out_params, + global_decomposition_table, + meta_table, +) +from torch._ops import OpOverload +from torch._prims import _prim_elementwise_meta, ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND +from torch._prims_common import ( + corresponding_complex_dtype, + corresponding_real_dtype, + elementwise_dtypes, + ELEMENTWISE_TYPE_PROMOTION_KIND, + IntLike, + make_contiguous_strides_for, + TensorLike, +) + +from torch._prims_common.wrappers import ( + _maybe_convert_to_dtype, + _maybe_resize_out, + _resize_output_check, + _safe_copy_out, + out_wrapper, +) +from torch._refs import _broadcast_shapes, _maybe_broadcast +from torch.utils import _pytree as pytree + + +aten = torch.ops.aten + +_meta_lib_dont_use_me_use_register_meta = torch.library.Library("aten", "IMPL", "Meta") + + +def register_meta(op): + def wrapper(fn): + fn = _convert_out_params(fn) + + def register(op): + _add_op_to_registry(meta_table, op, fn) + + pytree.tree_map_(register, op) + return fn + + return wrapper + + +def elementwise_meta( + *args, + type_promotion: ELEMENTWISE_TYPE_PROMOTION_KIND, +): + # Perform type promotion, as this is expected from prim_metafunction + _, result_dtype = utils.elementwise_dtypes( + *args, + type_promotion_kind=type_promotion, + ) + args = [_maybe_convert_to_dtype(x, result_dtype) for x in args] + + # Broadcast + args = _maybe_broadcast(*args) + + # Perform prim checks + return _prim_elementwise_meta( + *args, type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT + ) + + +def toRealValueType(dtype): + from_complex = { + torch.complex32: torch.half, + torch.cfloat: torch.float, + torch.cdouble: torch.double, + } + return from_complex.get(dtype, dtype) + + +def check_inplace_broadcast(self_shape, *args_shape): + broadcasted_shape = tuple(_broadcast_shapes(self_shape, *args_shape)) + torch._check( + broadcasted_shape == self_shape, + lambda: f"output with shape {self_shape} doesn't match the broadcast shape {broadcasted_shape}", + ) + + +@register_meta([aten.linspace, aten.logspace]) +@out_wrapper() +def meta_linspace_logspace( + start, + end, + steps, + base=None, + dtype=None, + device=None, + layout=torch.strided, + pin_memory=False, + requires_grad=False, +): + if isinstance(start, torch.Tensor): + torch._check( + start.dim() == 0, + lambda: "linspace only supports 0-dimensional start and end tensors", + ) + if isinstance(end, torch.Tensor): + torch._check( + end.dim() == 0, + lambda: "linspace only supports 0-dimensional start and end tensors", + ) + + if any(isinstance(arg, complex) for arg in (start, end, steps)): + default_complex_dtype = utils.corresponding_complex_dtype( + torch.get_default_dtype() + ) + if dtype is None: + dtype = default_complex_dtype + else: + torch._check( + utils.is_complex_dtype(dtype), + lambda: f"linspace(): inferred dtype {default_complex_dtype} can't be safely cast to passed dtype {dtype}", + ) + else: + dtype = dtype or torch.get_default_dtype() + assert isinstance(dtype, torch.dtype) + + # steps does not participate in the computation of the dtype + torch._check_type( + isinstance(steps, IntLike), + lambda: f"received an invalid combination of arguments - got \ +({type(start).__name__}, {type(end).__name__}, {type(steps).__name__})", + ) + assert isinstance(steps, IntLike) # for mypy + torch._check(steps >= 0, lambda: "number of steps must be non-negative") + + return torch.empty( + (steps,), # type: ignore[arg-type] + dtype=dtype, + layout=layout, + device="meta", + pin_memory=pin_memory, + requires_grad=requires_grad, + ) + + +@register_meta([aten.take.default, aten.take.out]) +@out_wrapper() +def meta_take(self, index): + # Type and device checks + torch._check( + index.dtype == torch.long, + lambda: f"take(): Expected a long tensor for index, but got {index.dtype}", + ) + # Index checks + torch._check_index( + not (self.numel() == 0 and index.numel() != 0), + lambda: "take(): tried to take from an empty tensor", + ) + return self.new_empty(index.shape) + + +@register_meta([aten.linalg_cross.default, aten.linalg_cross.out]) +@out_wrapper() +def linalg_cross(self, other, *, dim=-1): + x_d = self.ndim + y_d = other.ndim + torch._check( + x_d == y_d, + lambda: "linalg.cross: inputs must have the same number of dimensions.", + ) + torch._check( + self.size(dim) == 3 and other.size(dim) == 3, + lambda: ( + f"linalg.cross: inputs dimension {dim} must have length 3. " + f"Got {self.size(dim)} and {other.size(dim)}" + ), + ) + out_shape = _broadcast_shapes(self.shape, other.shape) + return self.new_empty(out_shape) + + +@register_meta(aten.linalg_matrix_exp) +@out_wrapper() +def linalg_matrix_exp(self): + squareCheckInputs(self, "linalg.matrix_exp") + checkFloatingOrComplex(self, "linalg.matrix_exp") + return torch.empty_like(self, memory_format=torch.contiguous_format) + + +@register_meta( + [aten.cummax.default, aten.cummax.out, aten.cummin.default, aten.cummin.out] +) +@out_wrapper("values", "indices") +def cummaxmin(self, dim): + values = torch.empty(self.shape, device=self.device, dtype=self.dtype) + indices = torch.empty(self.shape, device=self.device, dtype=torch.int64) + if self.numel() != 0 and self.ndim != 0: + # Checks that dim is within bounds + maybe_wrap_dim(dim, self.ndim) + return values, indices + + +@register_meta([aten.logcumsumexp.default, aten.logcumsumexp.out]) +@out_wrapper() +def logcumsumexp(self, dim): + # Checks that dim is within bounds + maybe_wrap_dim(dim, self.ndim) + return torch.empty_like(self).contiguous() + + +# Stride-related code from _exec_fft in aten/src/ATen/native/cuda/SpectralOps.cpp +def _exec_fft(out, self, out_sizes, dim, forward): + ndim = self.ndim + signal_ndim = len(dim) + batch_dims = ndim - signal_ndim + + # Permute dimensions so batch dimensions come first, and in stride order + dim_permute = list(range(ndim)) + + is_transformed_dim = [False for _ in range(ndim)] + for d in dim: + is_transformed_dim[d] = True + + # std::partition + left, right = [], [] + for d in dim_permute: + if not is_transformed_dim[d]: + left.append(d) + else: + right.append(d) + dim_permute = left + right + batch_end = len(left) + + self_strides = self.stride() + tmp = dim_permute[:batch_end] + tmp.sort(key=lambda x: self_strides[x], reverse=True) + dim_permute = tmp + dim_permute[batch_end:] + input = self.permute(dim_permute) + + # Collapse batch dimensions into a single dimension + batched_sizes = [-1] + list(input.shape[batch_dims:]) + input = input.reshape(batched_sizes) + + batch_size = input.size(0) + batched_sizes[0] = batch_size + batched_out_sizes = batched_sizes + for i in range(len(dim)): + batched_out_sizes[i + 1] = out_sizes[dim[i]] + out = out.reshape(batched_out_sizes) + + # Reshaping to original batch shape and inverting the dimension permutation + out_strides = [0 for _ in range(ndim)] + batch_numel = 1 + i = batch_dims - 1 + while i >= 0: + out_strides[dim_permute[i]] = batch_numel * out.stride(0) + batch_numel *= out_sizes[dim_permute[i]] + i -= 1 + for i in range(batch_dims, ndim): + out_strides[dim_permute[i]] = out.stride(1 + (i - batch_dims)) + return out.as_strided(out_sizes, out_strides, out.storage_offset()) + + +# See _fft_c2c_cufft in aten/src/ATen/native/cuda/SpectralOps.cpp +# and _fft_c2c_mkl in aten/src/ATen/native/mkl/SpectralOps.cpp +@register_meta([aten._fft_c2c.default, aten._fft_c2c.out]) +@out_wrapper() +def meta_fft_c2c(self, dim, normalization, forward): + assert self.dtype.is_complex + + out_sizes = self.shape + output = self.new_empty(out_sizes) + + if not dim: + return output + + sorted_dims = dim[:] + self_strides = self.stride() + sorted_dims.sort(key=lambda x: self_strides[x], reverse=True) + output = _exec_fft(output, self, out_sizes, sorted_dims, forward) + + return output + + +@register_meta([aten._fft_r2c.default, aten._fft_r2c.out]) +@out_wrapper() +def meta_fft_r2c(self, dim, normalization, onesided): + assert self.dtype.is_floating_point + output_sizes = list(self.size()) + + if onesided: + last_dim = dim[-1] + last_dim_halfsize = (output_sizes[last_dim] // 2) + 1 + output_sizes[last_dim] = last_dim_halfsize + + return self.new_empty( + output_sizes, dtype=utils.corresponding_complex_dtype(self.dtype) + ) + + +@register_meta(aten.randperm.generator_out) +def meta_randperm(n, *, generator=None, out): + return _maybe_resize_out(out, torch.Size([n])) + + +@register_meta(aten.randperm.default) +def meta_randperm_default( + n, *, dtype=torch.long, layout=None, device=None, pin_memory=None +): + return torch.empty( + n, dtype=dtype, layout=layout, device=device, pin_memory=pin_memory + ) + + +@register_meta(aten.randint.default) +def meta_randint( + high, size, *, dtype=torch.long, layout=None, device=None, pin_memory=None +): + return torch.empty( + size, dtype=dtype, layout=layout, device=device, pin_memory=pin_memory + ) + + +@register_meta(aten.randint.low) +def meta_randint_low( + low, + high, + size, + *, + dtype=torch.long, + layout=None, + device=None, + pin_memory=None, +): + return torch.empty( + size, dtype=dtype, layout=layout, device=device, pin_memory=pin_memory + ) + + +@register_meta(aten.rand.default) +def meta_rand_default(size, *, dtype=None, layout=None, device=None, pin_memory=None): + return torch.empty( + size, dtype=dtype, layout=layout, device=device, pin_memory=pin_memory + ) + + +@register_meta([aten._fft_c2r.default, aten._fft_c2r.out]) +@out_wrapper() +def meta_fft_c2r(self, dim, normalization, lastdim): + assert self.dtype.is_complex + output_sizes = list(self.size()) + output_sizes[dim[-1]] = lastdim + return self.new_empty(output_sizes, dtype=toRealValueType(self.dtype)) + + +@register_meta(aten.copy_.default) +def meta_copy_(self, src, non_blocking=False): + # This code simulates the original decomp from inductor, + # which runs most of the meta checks that we care about. + # In theory, we should make this more robust by carefully + # auditing our C++ copy_() kernel and copying the checks here. + + if torch._debug_has_internal_overlap(self) == 1: # 1 == MemOverlap::Yes + raise RuntimeError( + "more than one element of the written-to tensor refers to a single memory location" + ) + + if isinstance(src, Tensor): + intermediate = src.to(self, non_blocking) + if self.size() != intermediate.size(): + aten.expand_copy.default(intermediate, self.size()) + return self + + +def inferUnsqueezeGeometry(tensor, dim): + result_sizes = list(tensor.size()) + result_strides = list(tensor.stride()) + new_stride = 1 if dim >= tensor.dim() else result_sizes[dim] * result_strides[dim] + result_sizes.insert(dim, 1) + result_strides.insert(dim, new_stride) + return result_sizes, result_strides + + +@register_meta(aten.unsqueeze_.default) +def meta_unsqueeze_(self, dim): + dim = maybe_wrap_dim(dim, self.dim() + 1) + g_sizes, g_strides = inferUnsqueezeGeometry(self, dim) + self.as_strided_(g_sizes, g_strides) + return self + + +@register_meta(aten._sparse_semi_structured_linear) +def meta_sparse_structured_linear( + input: Tensor, + weight: Tensor, + _meta: Tensor, + bias: Optional[Tensor] = None, + _activation_opt: Optional[str] = None, +): + output_sizes = list(input.shape) + if bias is not None: + assert weight.size(0) == bias.size(0), "output size mismatch" + assert weight.size(1) == input.size(-1) / 2 + output_sizes[-1] = weight.size(0) + + # see: https://github.com/pytorch/pytorch/pull/114477#issuecomment-1830121375 + # We assume that we have already squashed the inputs into a 2-D tensor + # Then, as the output is transposed, we need to propagate the transposed + # stride information to the output tensor + assert len(input.shape) == 2, "we can only handle the squashed input case" + transposed_strides = (1, input.size(0)) + + output = input.new_empty( + output_sizes, + dtype=input.dtype if input.dtype != torch.int8 else torch.int32, + ).as_strided(output_sizes, transposed_strides) + + return output + + +@register_meta(aten._cslt_sparse_mm) +def meta__cslt_sparse_mm( + compressed_A: torch.Tensor, + dense_B: torch.Tensor, + bias: Optional[Tensor] = None, + alpha: Optional[Tensor] = None, + out_dtype: Optional[torch.dtype] = None, + transpose_result: bool = False, +): + assert dense_B.dtype in { + torch.float16, + torch.bfloat16, + torch.int8, + }, "_cslt_sparse_mm only supports fp16, bf16, and int8" + assert compressed_A.dtype == dense_B.dtype, "inputs must have the same dtype" + assert len(dense_B.shape) == 2, "_cslt_sparse_mm only supports 2d inputs" + + is_int8_input_type = compressed_A.dtype == torch.int8 + compression_factor = 10 if is_int8_input_type else 9 + k = dense_B.size(0) + n = dense_B.size(1) + m = (compressed_A.numel() * 16) // (compression_factor * k) + if bias is not None: + assert m == bias.size(0) + + if out_dtype is not None: + assert ( + is_int8_input_type and out_dtype == torch.float16 + ), "out_dtype is only supported for i8i8->fp16 matmul" + output_shape = (n, m) if transpose_result else (m, n) + result = dense_B.new_empty(output_shape, dtype=out_dtype) + return result + + +@register_meta(aten.index_reduce.default) +def meta_index_reduce( + self: Tensor, + dim: int, + index: Tensor, + source: torch.Tensor, + reduce: str, + *, + include_self: bool = True, +) -> Tensor: + return torch.empty_like(self, memory_format=torch.contiguous_format) + + +@register_meta(aten.index_reduce_.default) +def meta_index_reduce_( + self: Tensor, + dim: int, + index: Tensor, + source: torch.Tensor, + reduce: str, + *, + include_self: bool = True, +) -> Tensor: + return self + + +# Implementations below are taken from https://github.com/albanD/subclass_zoo/blob/main/python_meta_tensor.py +@out_wrapper() +@register_meta(aten.index_select.default) +def meta_index_select(self, dim, index): + result_size = list(self.size()) + if self.dim() > 0: + result_size[dim] = index.numel() + return self.new_empty(result_size) + + +@register_meta(aten.segment_reduce.default) +def meta_segment_reduce( + data: Tensor, + reduce: str, + *, + lengths: Optional[Tensor] = None, + indices: Optional[Tensor] = None, + offsets: Optional[Tensor] = None, + axis: int = 0, + unsafe: bool = False, + initial=None, +) -> Tensor: + if indices is not None: + raise NotImplementedError( + "segment_reduce(): indices based reduction is not supported yet." + ) + + def segment_reduce_lengths_tensor(lengths_shape): + return torch.empty( + lengths_shape + data.shape[axis + 1 :], + dtype=data.dtype, + device="meta", + memory_format=torch.contiguous_format, + ) + + if lengths is not None: + return segment_reduce_lengths_tensor(lengths.shape) + # FIXME should probably check that lengths and offset aren't both set, but + # the ATen implementation neglects this too + if offsets is not None: + # lengths == torch.diff(offsets) + lengths_shape = offsets.shape[:-1] + (offsets.shape[-1] - 1,) + return segment_reduce_lengths_tensor(lengths_shape) + raise RuntimeError("segment_reduce(): Either lengths or offsets must be defined.") + + +@register_meta([aten.max.default, aten.max.unary_out]) +@out_wrapper() +def meta_max(self): + return self.new_empty(()) + + +@register_meta(aten.max.dim) +def meta_max_dim(self, dim, keepdim=False): + dim = utils.reduction_dims(self.shape, (dim,)) + output_shape = _compute_reduction_shape(self, dim, keepdim) + return ( + self.new_empty(output_shape), + self.new_empty(output_shape, dtype=torch.long), + ) + + +@register_meta([aten.min.default, aten.min.unary_out]) +@out_wrapper() +def meta_min(self): + return self.new_empty(()) + + +@register_meta(aten.min.dim) +def meta_min_dim(self, dim, keepdim=False): + dim = utils.reduction_dims(self.shape, (dim,)) + output_shape = _compute_reduction_shape(self, dim, keepdim) + return ( + self.new_empty(output_shape), + self.new_empty(output_shape, dtype=torch.long), + ) + + +@register_meta(aten.angle.default) +def meta_angle(self): + if self.is_complex(): + result_dtype = corresponding_real_dtype(self.dtype) + else: + _, result_dtype = elementwise_dtypes( + self, + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + ) + return torch.empty_like(self, dtype=result_dtype) + + +@register_meta(aten.angle.out) +def meta_angle_out(self, out): + torch._resize_output_(out, self.size(), self.device) + return out.copy_(torch.angle(self)) + + +@register_meta(aten._assert_async.default) +def assert_async(val): + return + + +@register_meta(aten._assert_async.msg) +def assert_async_meta(val, assert_msg): + return + + +@register_meta(aten._make_dep_token.default) +def make_dep_token( + *, + dtype=None, + layout=None, + device=None, + pin_memory=None, + memory_format=None, +): + return torch.empty([], device="meta") + + +@register_meta(aten.sym_constrain_range.default) +def sym_constrain_range(size, min=None, max=None): + # Avoid importing sympy at a module level + from torch.fx.experimental.symbolic_shapes import constrain_range + + if isinstance(size, (SymFloat, SymBool)): + raise ValueError("Constraining SymFloat or Symbool is nyi") + constrain_range(size, min=min, max=max) + + +@register_meta(aten._functional_sym_constrain_range.default) +def functional_sym_constrain_range(size, min=None, max=None, dep_token=None): + aten.sym_constrain_range(size, min=min, max=max) + return dep_token + + +@register_meta(aten.sym_constrain_range_for_size.default) +def sym_constrain_range_for_size(size, min=None, max=None): + # Avoid importing sympy at a module level + from torch.fx.experimental.symbolic_shapes import _constrain_range_for_size + + if isinstance(size, (SymFloat, SymBool)): + raise ValueError("Constraining SymFloat or Symbool is nyi") + _constrain_range_for_size(size, min=min, max=max) + + +@register_meta(aten._functional_sym_constrain_range_for_size.default) +def functional_sym_constrain_range_for_size(size, min, max, dep_token): + aten.sym_constrain_range_for_size(size, min=min, max=max) + return dep_token + + +@register_meta(aten._functional_assert_async.msg) +def functional_assert_async_meta(val, assert_msg, dep_token): + return dep_token + + +# From aten/src/ATen/native/LinearAlgebraUtils.h +def squareCheckInputs(self: Tensor, f_name: str): + assert ( + self.dim() >= 2 + ), f"{f_name}: The input tensor must have at least 2 dimensions." + assert self.size(-1) == self.size( + -2 + ), f"{f_name}: A must be batches of square matrices, but they are {self.size(-2)} by {self.size(-1)} matrices" + + +# Validates input shapes and devices +# for linear solve methods (solve, cholesky_solve, lu_solve, triangular_solve) +# From aten/src/ATen/native/LinearAlgebraUtils.h +def linearSolveCheckInputs( + self: Tensor, + A: Tensor, + name: str, +): + torch._check( + self.device == A.device, + lambda: ( + f"Expected b and A to be on the same device, but found b on " + f"{self.device} and A on {A.device} instead." + ), + ) + + torch._check( + self.dtype == A.dtype, + lambda: ( + f"Expected b and A to have the same dtype, but found b of type " + f"{self.dtype} and A of type {A.dtype} instead." + ), + ) + + torch._check( + A.size(-1) == A.size(-2), + lambda: ( + f"A must be batches of square matrices, " + f"but they are {A.size(-2)} by {A.size(-1)} matrices" + ), + ) + + torch._check( + A.size(-1) == self.size(-2), + lambda: ( + f"Incompatible matrix sizes for {name}: each A " + f"matrix is {A.size(-1)} by {A.size(-1)}" + f" but each b matrix is {self.size(-2)} by {self.size(-1)}" + ), + ) + + +# From aten/src/ATen/native/LinearAlgebraUtils.h +def checkFloatingOrComplex( + t: Tensor, f_name: str, allow_low_precision_dtypes: bool = True +): + dtype = t.dtype + torch._check( + t.is_floating_point() or t.is_complex(), + lambda: f"{f_name}: Expected a floating point or complex tensor as input. Got {dtype}", + ) + if not allow_low_precision_dtypes: + torch._check( + dtype in (torch.float, torch.double, torch.cfloat, torch.cdouble), + lambda: f"{f_name}: Low precision dtypes not supported. Got {dtype}", + ) + + +# From aten/src/ATen/native/LinearAlgebraUtils.h +def checkIsMatrix(A: Tensor, f_name: str, arg_name: str = "A"): + torch._check( + A.dim() >= 2, + lambda: f"{f_name}: The input tensor {arg_name} must have at least 2 dimensions.", + ) + + +def checkInputsSolver( + A: Tensor, + B: Tensor, + left: bool, + f_name: str, +): + squareCheckInputs(A, f_name) + checkIsMatrix(B, f_name) + torch._check( + A.size(-2) == B.size(-2) if left else A.size(-1) == B.size(-1), + lambda: ( + f"{f_name}: Incompatible shapes of A and B for the equation " + f"{'AX = B' if left else 'XA = B'}" + f" ({A.size(-2)}x{A.size(-1)} and {B.size(-2)}x{B.size(-1)})" + ), + ) + + +def checkSameDevice( + fn_name: str, result: Tensor, input: Tensor, result_name: str = "result" +): + torch._check( + result.device == input.device, + lambda: ( + f"{fn_name}: Expected {result_name} and input tensors to be on the same device, but got " + f"{result_name} on {result.device} and input on {input.device}" + ), + ) + + +def checkUplo(UPLO: str): + UPLO_uppercase = UPLO.upper() + torch._check( + len(UPLO) == 1 and (UPLO_uppercase == "U" or UPLO_uppercase == "L"), + lambda: f"Expected UPLO argument to be 'L' or 'U', but got {UPLO}", + ) + + +@register_meta([aten._linalg_eigh.default, aten._linalg_eigh.eigenvalues]) +@out_wrapper("eigenvalues", "eigenvectors") +def meta__linalg_eigh( + A: Tensor, + UPLO: str = "L", + compute_v: bool = True, +): + squareCheckInputs(A, "linalg.eigh") + checkUplo(UPLO) + + shape = list(A.shape) + if compute_v: + vecs = A.new_empty(shape) + vecs.as_strided_(shape, make_contiguous_strides_for(shape, row_major=False)) + else: + vecs = A.new_empty([0]) + + shape.pop() + vals = A.new_empty(shape, dtype=toRealValueType(A.dtype)) + + return vals, vecs + + +def cloneBatchedColumnMajor(src: Tensor) -> Tensor: + return src.mT.clone(memory_format=torch.contiguous_format).transpose(-2, -1) + + +@register_meta(aten._cholesky_solve_helper) +@out_wrapper() +def _cholesky_solve_helper(self: Tensor, A: Tensor, upper: bool) -> Tensor: + return cloneBatchedColumnMajor(self) + + +@register_meta(aten.cholesky_solve) +@out_wrapper() +def cholesky_solve(self: Tensor, A: Tensor, upper: bool = False) -> Tensor: + torch._check( + self.ndim >= 2, + lambda: f"b should have at least 2 dimensions, but has {self.ndim} dimensions instead", + ) + torch._check( + A.ndim >= 2, + lambda: f"u should have at least 2 dimensions, but has {A.ndim} dimensions instead", + ) + self_broadcasted, A_broadcasted = _linalg_broadcast_batch_dims_name( + self, A, "cholesky_solve" + ) + return _cholesky_solve_helper(self_broadcasted, A_broadcasted, upper) + + +@register_meta(aten.cholesky) +@out_wrapper() +def cholesky(self: Tensor, upper: bool = False) -> Tensor: + if self.numel() == 0: + return torch.empty_like(self, memory_format=torch.legacy_contiguous_format) + squareCheckInputs(self, "cholesky") + return cloneBatchedColumnMajor(self) + + +@register_meta(aten.cholesky_inverse) +@out_wrapper() +def cholesky_inverse(self: Tensor, upper: bool = False) -> Tensor: + squareCheckInputs(self, "cholesky_inverse") + return cloneBatchedColumnMajor(self) + + +# From aten/src/ATen/native/BatchLinearAlgebra.cpp +@register_meta(aten.linalg_cholesky_ex.default) +def linalg_cholesky_ex(A: Tensor, upper: bool = False, check_errors: bool = False): + squareCheckInputs(A, "linalg.cholesky") + checkFloatingOrComplex(A, "linalg.cholesky") + + A_shape = A.shape + ndim = len(A_shape) + + # L + L_strides = make_contiguous_strides_for(A_shape, False) + L = A.new_empty(A_shape) + L.as_strided_(A_shape, L_strides) + + # infos + infos = A.new_empty(A_shape[0 : ndim - 2], dtype=torch.int32) + return L, infos + + +@register_meta( + [aten.linalg_householder_product.default, aten.linalg_householder_product.out] +) +@out_wrapper() +def linalg_householder_product(input: Tensor, tau: Tensor) -> Tensor: + torch._check( + input.ndim >= 2, + lambda: "torch.linalg.householder_product: input must have at least 2 dimensions.", + ) + torch._check( + input.size(-2) >= input.size(-1), + lambda: "torch.linalg.householder_product: input.shape[-2] must be greater than or equal to input.shape[-1]", + ) + torch._check( + input.size(-1) >= tau.size(-1), + lambda: "torch.linalg.householder_product: input.shape[-1] must be greater than or equal to tau.shape[-1]", + ) + + torch._check( + input.ndim - tau.ndim == 1, + lambda: ( + f"torch.linalg.householder_product: Expected tau to have one dimension less than input, " + f"but got tau.ndim equal to {tau.ndim} and input.ndim is equal to {input.ndim}" + ), + ) + if input.ndim > 2: + expected_batch_tau_shape = input.shape[:-2] + actual_batch_tau_shape = tau.shape[:-1] + torch._check( + actual_batch_tau_shape == expected_batch_tau_shape, + lambda: ( + f"torch.linalg.householder_product: Expected batch dimensions of tau to be " + f"equal to input.shape[:-2], but got {actual_batch_tau_shape}" + ), + ) + + torch._check( + tau.dtype == input.dtype, + lambda: ( + f"torch.linalg.householder_product: tau dtype {tau.dtype}" + f" does not match input dtype {input.dtype}" + ), + ) + checkSameDevice("torch.linalg.householder_product", tau, input, "tau") + + return torch.empty_strided( + size=input.shape, + stride=make_contiguous_strides_for(input.shape, row_major=False), + dtype=input.dtype, + device=input.device, + ) + + +# From aten/src/ATen/native/BatchLinearAlgebra.cpp +@register_meta(aten.linalg_inv_ex.default) +def linalg_inv_ex_meta(A: Tensor, check_errors: bool = False): + squareCheckInputs(A, "linalg.inv_ex") + checkFloatingOrComplex(A, "linalg.inv_ex", allow_low_precision_dtypes=False) + + L = A.new_empty(A.shape) + L.as_strided_(A.shape, make_contiguous_strides_for(A.shape, row_major=False)) + + infos = A.new_empty(A.shape[:-2], dtype=torch.int32) + return L, infos + + +@register_meta([aten.linalg_ldl_factor_ex.default, aten.linalg_ldl_factor_ex.out]) +@out_wrapper("LD", "pivots", "info") +def linalg_ldl_factor_ex_meta( + self: Tensor, + *, + hermitian: bool = False, + check_errors: bool = False, +) -> Tuple[Tensor, Tensor, Tensor]: + squareCheckInputs(self, "torch.linalg.ldl_factor_ex") + checkFloatingOrComplex(self, "torch.linalg.ldl_factor_ex") + LD = torch.empty_strided( + size=self.shape, + stride=make_contiguous_strides_for(self.shape, row_major=False), + dtype=self.dtype, + device=self.device, + ) + pivots = self.new_empty(self.shape[:-1], dtype=torch.int) + info = self.new_empty(self.shape[:-2], dtype=torch.int) + return LD, pivots, info + + +@register_meta([aten.linalg_ldl_solve.default, aten.linalg_ldl_solve.out]) +@out_wrapper() +def linalg_ldl_solve_meta( + LD: Tensor, pivots: Tensor, B: Tensor, *, hermitian: bool = False +) -> Tensor: + squareCheckInputs(LD, "torch.linalg.ldl_solve") + checkFloatingOrComplex(LD, "torch.linalg.ldl_solve") + linearSolveCheckInputs(B, LD, "torch.linalg.ldl_solve") + torch._check( + B.ndim >= 2, + lambda: ( + f"torch.linalg.ldl_solve: Expected B to have at least 2 dimensions, " + f"but it has {B.ndim} dimensions instead" + ), + ) + expected_pivots_shape = LD.shape[:-1] + torch._check( + expected_pivots_shape == pivots.shape, + lambda: ( + f"torch.linalg.ldl_solve: Expected LD.shape[:-1] and pivots.shape to be the same, " + f"but got pivots with shape {pivots.shape} instead" + ), + ) + torch._check( + utils.is_integer_dtype(pivots.dtype), + lambda: f"torch.linalg.ldl_solve: Expected pivots to be integers. Got {pivots.dtype}", + ) + torch._check( + LD.dtype == B.dtype, + lambda: f"torch.linalg.ldl_solve: LD dtype {LD.dtype} does not match b dtype {B.dtype}", + ) + B_broadcast_size, _ = _linalg_broadcast_batch_dims(B, LD) + return torch.empty_strided( + size=B_broadcast_size, + stride=make_contiguous_strides_for(B_broadcast_size, row_major=False), + dtype=B.dtype, + device=B.device, + ) + + +@register_meta([aten.linalg_lu.default, aten.linalg_lu.out]) +@out_wrapper("P", "L", "U") +def linalg_lu_meta(A: Tensor, *, pivot: bool = True) -> Tuple[Tensor, Tensor, Tensor]: + torch._check( + A.ndim >= 2, + lambda: f"linalg.lu: Expected tensor with 2 or more dimensions. Got size: {A.shape} instead", + ) + + sizes = list(A.shape) + m = sizes[-2] + n = sizes[-1] + k = min(m, n) + + sizes[-1] = m + if pivot: + P = A.new_empty(sizes) + else: + P = A.new_empty([0]) + + sizes[-1] = k + L = A.new_empty(sizes) + + sizes[-2] = k + sizes[-1] = n + U = A.new_empty(sizes) + return P, L, U + + +@register_meta([aten.linalg_lu_factor_ex.default, aten.linalg_lu_factor_ex.out]) +@out_wrapper("LU", "pivots", "info") +def linalg_lu_factor_ex_meta( + A: Tensor, *, pivot: bool = True, check_errors: bool = False +) -> Tuple[Tensor, Tensor, Tensor]: + torch._check( + A.ndim >= 2, + lambda: f"torch.lu_factor: Expected tensor with 2 or more dimensions. Got size: {A.shape} instead", + ) + + sizes = list(A.shape) + m = sizes[-2] + n = sizes[-1] + + LU = torch.empty_strided( + size=sizes, + stride=make_contiguous_strides_for(sizes, row_major=False), + dtype=A.dtype, + device=A.device, + ) + + # Sets sizes to the size of pivots + sizes.pop() + sizes[-1] = min(m, n) + pivots = A.new_empty(sizes, dtype=torch.int) + + # Sets sizes to the size of info + sizes.pop() + info = A.new_empty(sizes, dtype=torch.int) + + return LU, pivots, info + + +@register_meta([aten.linalg_lu_solve.default, aten.linalg_lu_solve.out]) +@out_wrapper() +def linalg_lu_solve_meta( + LU: Tensor, + pivots: Tensor, + B: Tensor, + *, + left: bool = True, + adjoint: bool = False, +) -> Tensor: + # dtype + checkFloatingOrComplex(LU, "torch.linalg.lu_solve") + torch._check( + LU.dtype == B.dtype, + lambda: ( + f"linalg.lu_solve: Expected LU and B to have the same dtype, " + f"but found LU of type {LU.dtype} and B of type {B.dtype} instead" + ), + ) + torch._check( + pivots.dtype == torch.int, + lambda: "linalg.lu_solve: pivots should be a Tensor of scalar type torch.int32", + ) + + # matrix shapes + squareCheckInputs(LU, "torch.linalg.lu_solve") + checkInputsSolver(LU, B, left, "linalg.lu_solve") + torch._check( + LU.size(-1) == pivots.size(-1), + lambda: "linalg.lu_solve: Number of pivots per batch should be same as the dimension of the matrix", + ) + + # batches + torch._check( + LU.shape[:-1] == pivots.shape, + lambda: ( + f"linalg.lu_solve: Expected LU.shape[:-1] and pivots.shape to be the same, " + f"but got pivots with shape {pivots.shape} instead" + ), + ) + + B_broadcast_size, _ = _linalg_broadcast_batch_dims(B, LU) + + result = torch.empty_strided( + size=B_broadcast_size, + stride=make_contiguous_strides_for(B_broadcast_size, row_major=not left), + dtype=B.dtype, + device=B.device, + ) + + if result.numel() != 0 and not left: + if result.is_complex(): + result = result.conj() + + return result + + +@register_meta(aten.lu_unpack) +@out_wrapper("P", "L", "U") +def lu_unpack_meta( + LU: Tensor, + pivots: Tensor, + unpack_data: bool = True, + unpack_pivots: bool = True, +) -> Tuple[Tensor, Tensor, Tensor]: + torch._check( + LU.ndim >= 2, + lambda: f"torch.lu_unpack: Expected tensor with 2 or more dimensions. Got size: {LU.shape} instead", + ) + if unpack_pivots: + torch._check( + pivots.dtype == torch.int32, + lambda: ( + "torch.lu_unpack: LU_pivots is expected to be a contiguous tensor of torch.int32 dtype.\n" + "Note: this function is intended to be used with the output produced by torch.linalg.lu_factor" + ), + ) + sizes = list(LU.shape) + m = sizes[-2] + n = sizes[-1] + k = min(m, n) + sizes[-1] = m + if unpack_pivots: + P = LU.new_empty(sizes) + else: + P = LU.new_empty([0]) + if unpack_data: + sizes[-1] = k + L = LU.new_empty(sizes) + sizes[-2] = k + sizes[-1] = n + U = LU.new_empty(sizes) + else: + L = LU.new_empty([0]) + U = LU.new_empty([0]) + return P, L, U + + +# parse the "mode" param in linalg_qr: return a tuple of bools (compute_q, reduced) +def _parse_qr_mode(mode: str) -> Tuple[bool, bool]: + if mode == "reduced": + compute_q = True + reduced = True + elif mode == "complete": + compute_q = True + reduced = False + elif mode == "r": + compute_q = False + reduced = True # this is actually irrelevant in this mode + else: + torch._check( + False, + lambda: ( + f"qr received unrecognized mode '{mode}' " + f"but expected one of 'reduced' (default), 'r', or 'complete'" + ), + ) + return compute_q, reduced + + +@register_meta([aten.linalg_qr.default, aten.linalg_qr.out]) +@out_wrapper("Q", "R") +def linalg_qr_meta( + A: Tensor, + mode: str = "reduced", +) -> Tuple[Tensor, Tensor]: + checkIsMatrix(A, "linalg.qr") + checkFloatingOrComplex(A, "linalg.qr") + + compute_q, reduced_mode = _parse_qr_mode(mode) + + m = A.shape[-2] + n = A.shape[-1] + k = min(m, n) + + if compute_q: + Q_shape = list(A.shape) + Q_shape[-1] = k if reduced_mode else m + Q = A.new_empty(Q_shape) + Q.as_strided_(Q_shape, make_contiguous_strides_for(Q_shape, row_major=False)) + else: + Q = A.new_empty([0]) + + # For readability + R_shape = list(A.shape) + R_shape[-2] = k if reduced_mode or not compute_q else m + R = A.new_empty(R_shape) + R.as_strided_(R_shape, make_contiguous_strides_for(R_shape, row_major=False)) + return Q, R + + +@register_meta([aten._linalg_slogdet.default, aten._linalg_slogdet.sign]) +@out_wrapper("sign", "logabsdet", "LU", "pivots") +def _linalg_slogdet(A: Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor]: + squareCheckInputs(A, "linalg.slogdet") + checkFloatingOrComplex(A, "linalg.slogdet", False) + shape = A.shape + sign = A.new_empty(shape[:-2]) + logabsdet = A.new_empty(shape[:-2], dtype=toRealValueType(A.dtype)) + LU = torch.empty_strided( + size=shape, + stride=make_contiguous_strides_for(shape, False), + dtype=A.dtype, + device=A.device, + ) + pivots = A.new_empty(shape[:-1], dtype=torch.int32) + return sign, logabsdet, LU, pivots + + +# From aten/src/ATen/native/BatchLinearAlgebra.cpp +# NOTE: matching defaults in aten/src/ATen/native/native_functions.yaml +@register_meta(aten._linalg_svd.default) +def _linalg_svd_meta( + A: Tensor, + full_matrices: bool = False, + compute_uv: bool = True, + driver: Optional[str] = None, +): + checkIsMatrix(A, "linalg.svd") + checkFloatingOrComplex(A, "linalg.svd") + + batch_dims = list(A.shape[:-2]) + m = A.shape[-2] + n = A.shape[-1] + k = min(m, n) + + if compute_uv: + U_shape = batch_dims + [m, m if full_matrices else k] + U = A.new_empty(U_shape) + U.as_strided_(U_shape, make_contiguous_strides_for(U_shape, row_major=False)) + + V_shape = batch_dims + [n if full_matrices else k, n] + V = A.new_empty(V_shape) + # NB: This checks for CUDA since there is no way to check for cuSolver. + # Also, this might not work correctly on CPU when fake_device is not + # available as device_hint just defaults to CUDA in that case. See + # _linalg_svd meta in core. + is_cuda = device_hint(A) == "cuda" + V.as_strided_(V_shape, make_contiguous_strides_for(V_shape, row_major=is_cuda)) + else: + # doesn't matter + U = A.new_empty([0]) + V = A.new_empty([0]) + + # S is always real, even when A is complex. + S = A.new_empty(batch_dims + [k], dtype=toRealValueType(A.dtype)) + return U, S, V + + +def _linalg_broadcast_batch_dims( + arg1: Tensor, arg2: Tensor +) -> Tuple[List[int], List[int]]: + # broadcast the batch dimensions of arg1 and arg2. + arg1_batch_sizes = arg1.shape[:-2] + arg2_batch_sizes = arg2.shape[:-2] + expand_batch_portion = _broadcast_shapes(arg1_batch_sizes, arg2_batch_sizes) + + arg1_expand_size = list(expand_batch_portion) + arg1_expand_size += [arg1.size(-2), arg1.size(-1)] + + arg2_expand_size = list(expand_batch_portion) + arg2_expand_size += [arg2.size(-2), arg2.size(-1)] + return arg1_expand_size, arg2_expand_size + + +def _linalg_broadcast_batch_dims_name( + arg1: Tensor, arg2: Tensor, name: Optional[str] +) -> Tuple[Tensor, Tensor]: + # If there's no name we assume we don't want to check the errors + if name: + linearSolveCheckInputs(arg1, arg2, name) + + arg1_expand_size, arg2_expand_size = _linalg_broadcast_batch_dims(arg1, arg2) + + arg1_broadcasted = ( + arg1 if arg1_expand_size == arg1.shape else arg1.expand(arg1_expand_size) + ) + arg2_broadcasted = ( + arg2 if arg2_expand_size == arg2.shape else arg2.expand(arg2_expand_size) + ) + return arg1_broadcasted, arg2_broadcasted + + +def linalg_solve_is_vector_rhs(input: Tensor, other: Tensor) -> bool: + expected_batched_rhs_shape = input.shape[:-1] + vector_case = other.ndim == 1 or ( + input.ndim - 1 == other.ndim and other.shape == expected_batched_rhs_shape + ) + return vector_case + + +@register_meta(aten._linalg_solve_ex) +def _linalg_solve_ex( + A: Tensor, + B: Tensor, + *, + left: bool = True, + check_errors: bool = False, + result: Optional[Tensor] = None, + LU: Optional[Tensor] = None, + pivots: Optional[Tensor] = None, + info: Optional[Tensor] = None, +) -> Tuple[Tensor, Tensor, Tensor, Tensor]: + checkFloatingOrComplex(A, "linalg.solve") + torch._check( + A.dtype == B.dtype, + lambda: ( + f"linalg.solve: Expected A and B to have the same dtype, but found A of type " + f"{A.dtype} and B of type {B.dtype} instead" + ), + ) + vector_case = linalg_solve_is_vector_rhs(A, B) + B_ = B.unsqueeze(-1) if vector_case else B + checkInputsSolver(A, B_, left, "linalg.solve") + B_broad_shape, _ = _linalg_broadcast_batch_dims(B_, A) + torch._check( + left or not vector_case, + lambda: ( + "linalg.solve: Vector broadcasting of the left hand side is not supported for left=False. " + "In this case linalg.solve is equivalent to B / A.squeeze(-1)" + ), + ) + result_shape = B_broad_shape[:-1] if vector_case else B_broad_shape + result_ = torch.empty_strided( + size=result_shape, + stride=make_contiguous_strides_for(result_shape, not left), + dtype=B.dtype, + device=B.device, + ) + shape = A.shape + ndim = A.ndim + LU_ = torch.empty_strided( + size=shape, + stride=make_contiguous_strides_for(shape, False), + dtype=A.dtype, + device=A.device, + ) + pivots_ = A.new_empty(shape[:-1], dtype=torch.int32) + info_ = A.new_empty(shape[:-2], dtype=torch.int32) + out = (result, LU, pivots, info) + res = (result_, LU_, pivots_, info_) + if all(x is not None for x in out): + for r, o in zip(res, out): + # resize and copy operations are done in-place + _maybe_resize_out(o, r.shape) # type: ignore[arg-type] + # strides are not copied in out_wrapper + o.as_strided_(r.shape, r.stride()) # type: ignore[union-attr] + _safe_copy_out(copy_from=r, copy_to=o, exact_dtype=False) # type: ignore[arg-type] + return res + + +@register_meta([aten.linalg_solve_triangular.default, aten.linalg_solve_triangular.out]) +def linalg_solve_triangular_meta( + A: Tensor, + B: Tensor, + *, + upper: bool, + left: bool = True, + unitriangular: bool = False, + out: Optional[Tensor] = None, +) -> Tensor: + if out is None: + out = A.new_empty([0]) + assert isinstance(out, TensorLike) + checkInputsSolver(A, B, left, "linalg.solve_triangular") + B_, A_ = _linalg_broadcast_batch_dims_name(B, A, None) + avoid_copy_A = A_.transpose(-2, -1).is_contiguous() and A_.is_conj() + if avoid_copy_A: + out = _maybe_resize_out(out, B_.shape) + else: + # reimplementation of resize_output with result F-contig + if _resize_output_check(out, B_.shape): + out.resize_(B_.transpose(-2, -1).shape) + out.transpose_(-2, -1) + return out # type: ignore[return-value] + + +@register_meta(aten.triangular_solve) +@out_wrapper("solution", "cloned_coefficient") +def triangular_solve_meta( + self: Tensor, + A: Tensor, + upper: bool = True, + transpose: bool = False, + unitriangular: bool = False, +) -> Tuple[Tensor, Tensor]: + torch._check( + self.ndim >= 2, + lambda: ( + f"torch.triangular_solve: Expected b to have at least 2 dimensions, " + f"but it has {self.ndim} dimensions instead" + ), + ) + torch._check( + A.ndim >= 2, + lambda: ( + f"torch.triangular_solve: Expected A to have at least 2 dimensions, " + f"but it has {A.ndim} dimensions instead" + ), + ) + + linearSolveCheckInputs(self, A, "triangular_solve") + + if A.layout == torch.strided: + self_broadcast_size, A_broadcast_size = _linalg_broadcast_batch_dims(self, A) + solution = torch.empty_strided( + size=self_broadcast_size, + stride=make_contiguous_strides_for(self_broadcast_size, row_major=False), + dtype=self.dtype, + device=self.device, + ) + cloned_coefficient = torch.empty_strided( + size=A_broadcast_size, + stride=make_contiguous_strides_for(A_broadcast_size, row_major=False), + dtype=A.dtype, + device=A.device, + ) + elif A.layout == torch.sparse_csr or A.layout == torch.sparse_bsr: + solution = torch.empty_like(self) + cloned_coefficient = self.new_empty([0]) + else: + torch._check(False, lambda: "triangular_solve: Got an unexpected layout.") + return solution, cloned_coefficient + + +# From aten/src/ATen/native/LinearAlgebra.cpp +@register_meta(aten._linalg_det.default) +def _linalg_det_meta(A): + squareCheckInputs(A, "linalg.det") + checkFloatingOrComplex(A, "linalg.det") + + det = A.new_empty(A.shape[:-2]) + + LU = A.new_empty(A.shape) + LU.as_strided_(A.shape, make_contiguous_strides_for(A.shape, row_major=False)) + + pivots = A.new_empty(A.shape[:-1], dtype=torch.int32) + return det, LU, pivots + + +@register_meta(aten.ormqr) +@out_wrapper() +def ormqr( + input: Tensor, + tau: Tensor, + other: Tensor, + left: bool = True, + transpose: bool = False, +) -> Tensor: + torch._check( + input.ndim >= 2, lambda: "torch.ormqr: input must have at least 2 dimensions." + ) + torch._check( + other.ndim >= 2, lambda: "torch.ormqr: other must have at least 2 dimensions." + ) + + left_size_condition = -2 if left else -1 + torch._check( + other.shape[left_size_condition] >= tau.shape[-1], + lambda: f"torch.ormqr: other.shape[{left_size_condition}] must be greater than or equal to tau.shape[-1]", + ) + torch._check( + other.shape[left_size_condition] == input.shape[-2], + lambda: f"torch.ormqr: other.shape[{left_size_condition}] must be equal to input.shape[-2]", + ) + + torch._check( + tau.shape[-1] <= input.shape[-1], + lambda: "torch.ormqr: tau.shape[-1] must be less than or equal to input.shape[-1]", + ) + + torch._check( + input.ndim - tau.ndim == 1, + lambda: ( + f"torch.ormqr: Expected tau to have one dimension less than input, " + f"but got tau.ndim equal to {tau.ndim} and input.ndim is equal to {input.ndim}" + ), + ) + torch._check( + input.ndim == other.ndim, + lambda: ( + f"torch.ormqr: Expected other to have the same number of dimensions as input, " + f"but got other.ndim equal to {other.ndim} and input.ndim is equal to {input.ndim}" + ), + ) + + if input.ndim > 2: + expected_batch_shape = input.shape[:-2] + actual_batch_tau_shape = tau.shape[:-1] + torch._check( + actual_batch_tau_shape == expected_batch_shape, + lambda: ( + f"torch.ormqr: Expected batch dimensions of tau to be " + f"equal to input.shape[:-2], but got {actual_batch_tau_shape}" + ), + ) + + actual_batch_other_shape = other.shape[:-2] + torch._check( + actual_batch_other_shape == expected_batch_shape, + lambda: ( + f"torch.ormqr: Expected batch dimensions of other to be " + f"equal to input.shape[:-2], but got {actual_batch_other_shape}" + ), + ) + + torch._check( + tau.dtype == input.dtype, + lambda: ( + f"torch.ormqr: Expected input and tau to have the same dtype, " + f"but input has dtype {input.dtype} and tau has dtype {tau.dtype}" + ), + ) + torch._check( + other.dtype == input.dtype, + lambda: ( + f"torch.ormqr: Expected input and other to have the same dtype, " + f"but input has dtype {input.dtype} and other has dtype {other.dtype}" + ), + ) + + checkSameDevice("torch.ormqr", tau, input, "tau") + checkSameDevice("torch.ormqr", other, input, "other") + + return torch.empty_strided( + size=other.shape, + stride=make_contiguous_strides_for(other.shape, row_major=False), + dtype=other.dtype, + device=other.device, + ) + + +def _padding_check_valid_input(input, padding, *, dim): + torch._check( + len(padding) == 2 * dim, + lambda: f"padding size is expected to be {2 * dim}, but got: {len(padding)}", + ) + + input_dim = input.ndim + + is_batch_mode = input_dim == (dim + 2) + + valid_batch_mode = is_batch_mode + valid_non_batch_mode = not is_batch_mode + + if is_batch_mode: + # allow batch size of 0-dim. + for d in range(1, input_dim): + valid_batch_mode = valid_batch_mode and input.size(d) != 0 + else: + for d in range(0, input_dim): + valid_non_batch_mode = valid_non_batch_mode and input.size(d) != 0 + + # allow empty batch size but not other dimensions. + torch._check( + valid_batch_mode or valid_non_batch_mode, + lambda: ( + f"Expected {dim + 1}D or {dim + 2}D (batch mode) tensor with possibly 0 batch size " + f"and other non-zero dimensions for input, but got: {input.shape}" + ), + ) + + +def _pad1d_common(input, padding, *, is_reflection): + dim_plane = 0 + dim_w = 1 + nbatch = 1 + + if input.ndim == 3: + nbatch = input.size(0) + dim_w += 1 + dim_plane += 1 + + _padding_check_valid_input(input, padding, dim=1) + + pad_l, pad_r = padding + + nplane = input.size(dim_plane) + input_w = input.size(dim_w) + output_w = input_w + pad_l + pad_r + + if is_reflection: + torch._check( + pad_l < input_w and pad_r < input_w, + lambda: ( + f"Argument #4: Padding size should be less than the corresponding input dimension, " + f"but got: padding ({pad_l}, {pad_r}) at dimension {dim_w} of input {input.shape}" + ), + ) + + torch._check( + output_w >= 1, + lambda: f"input (W: {input_w}) is too small. Calculated output W: {output_w}", + ) + + if input.ndim == 2: + return input.new_empty((nplane, output_w)) + else: + return input.new_empty((nbatch, nplane, output_w)) + + +@register_meta(aten.reflection_pad1d) +@out_wrapper() +def meta_reflection_pad1d(input, padding): + return _pad1d_common(input, padding, is_reflection=True) + + +@register_meta(aten.replication_pad1d) +@out_wrapper() +def meta_replication_pad1d(input, padding): + return _pad1d_common(input, padding, is_reflection=False) + + +def _pad1d_backward_common(grad_output, input, padding, *, is_reflection): + dim_w = 1 + if not is_reflection: + torch._check(len(padding) == 2, lambda: "padding size is expected to be 2") + + if input.ndim == 3: + dim_w += 1 + + pad_l, pad_r = padding + + input_w = input.size(dim_w) + output_w = input_w + pad_l + pad_r + + if is_reflection: + torch._check( + pad_l < input_w and pad_r < input_w, + lambda: ( + f"Argument #4: Padding size should be less than the corresponding input dimension, " + f"but got: padding ({pad_l}, {pad_r}) at dimension {dim_w} of input {input.shape}" + ), + ) + + torch._check( + output_w == grad_output.size(dim_w), + lambda: f"grad_output width unexpected. Expected: {output_w}, Got: {grad_output.size(dim_w)}", + ) + + return input.new_empty(input.shape) + + +@register_meta(aten.reflection_pad1d_backward) +@out_wrapper("grad_input") +def meta_reflection_pad1d_backward(grad_output, input, padding): + return _pad1d_backward_common(grad_output, input, padding, is_reflection=True) + + +@register_meta(aten.replication_pad1d_backward) +@out_wrapper("grad_input") +def meta_replication_pad1d_backward(grad_output, input, padding): + return _pad1d_backward_common(grad_output, input, padding, is_reflection=False) + + +def _pad2d_common(input, padding, *, is_reflection): + dim_w = 2 + dim_h = 1 + dim_slices = 0 + nbatch = 1 + + _padding_check_valid_input(input, padding, dim=2) + + ndim = input.ndim + if ndim == 4: + nbatch = input.size(0) + dim_w += 1 + dim_h += 1 + dim_slices += 1 + + pad_l, pad_r, pad_t, pad_b = padding + + nplane = input.size(dim_slices) + input_h = input.size(dim_h) + input_w = input.size(dim_w) + output_h = input_h + pad_t + pad_b + output_w = input_w + pad_l + pad_r + + if is_reflection: + torch._check( + pad_l < input_w and pad_r < input_w, + lambda: ( + f"Argument #4: Padding size should be less than the corresponding input dimension, " + f"but got: padding ({pad_l}, {pad_r}) at dimension {dim_w} of input {input.shape}" + ), + ) + torch._check( + pad_t < input_h and pad_b < input_h, + lambda: ( + f"Argument #6: Padding size should be less than the corresponding input dimension, " + f"but got: padding ({pad_t}, {pad_b}) at dimension {dim_h} of input {input.shape}" + ), + ) + + torch._check( + output_w >= 1 or output_h >= 1, + lambda: ( + f"input (H: {input_h} W: {input_w}) is too small. " + f"Calculated output H: {output_h} W: {output_w}" + ), + ) + + if input.ndim == 3: + return input.new_empty((nplane, output_h, output_w)) + else: + return input.new_empty((nbatch, nplane, output_h, output_w)) + + +@register_meta(aten.reflection_pad2d) +@out_wrapper() +def meta_reflection_pad2d(input, padding): + return _pad2d_common(input, padding, is_reflection=True) + + +@register_meta(aten.replication_pad2d) +@out_wrapper() +def meta_replication_pad2d(input, padding): + return _pad2d_common(input, padding, is_reflection=False) + + +@register_meta( + [ + aten.reflection_pad2d_backward.default, + aten.reflection_pad2d_backward.grad_input, + aten.replication_pad2d_backward.default, + aten.replication_pad2d_backward.grad_input, + ] +) +@out_wrapper("grad_input") +def meta_pad2d_backward(grad_output, self, padding): + dim_w = 2 + dim_h = 1 + dim_plane = 0 + nbatch = 1 + + self_shape = self.shape + if self.dim() == 4: + nbatch = self_shape[0] + dim_w += 1 + dim_h += 1 + dim_plane += 1 + + pad_l, pad_r, pad_t, pad_b = padding + + nplane = self_shape[dim_plane] + input_h = self_shape[dim_h] + input_w = self_shape[dim_w] + output_h = input_h + pad_t + pad_b + output_w = input_w + pad_l + pad_r + + torch._check( + output_w == grad_output.size(dim_w), + lambda: f"grad_output width unexpected. Expected: {output_w}, Got: {grad_output.size(dim_w)}", + ) + torch._check( + output_h == grad_output.size(dim_h), + lambda: f"grad_output height unexpected. Expected: {output_h}, Got: {grad_output.size(dim_h)}", + ) + return self.new_empty(self.shape) + + +def _pad3d_common(input, padding, *, is_reflection): + dim_w = 3 + dim_h = 2 + dim_d = 1 + dim_plane = 0 + + _padding_check_valid_input(input, padding, dim=3) + + batch_mode = input.ndim == 5 + if batch_mode: + nbatch = input.size(0) + dim_w += 1 + dim_h += 1 + dim_d += 1 + dim_plane += 1 + + pad_l, pad_r, pad_t, pad_b, pad_f, pad_bk = padding + + nplane = input.size(dim_plane) + input_d = input.size(dim_d) + input_h = input.size(dim_h) + input_w = input.size(dim_w) + output_d = input_d + pad_f + pad_bk + output_h = input_h + pad_t + pad_b + output_w = input_w + pad_l + pad_r + + if is_reflection: + torch._check( + pad_l < input_w and pad_r < input_w, + lambda: ( + f"Argument #4: Padding size should be less than the corresponding input dimension, " + f"but got: padding ({pad_l}, {pad_r}) at dimension {dim_w} of input {input.shape}" + ), + ) + torch._check( + pad_t < input_h and pad_b < input_h, + lambda: ( + f"Argument #6: Padding size should be less than the corresponding input dimension, " + f"but got: padding ({pad_t}, {pad_b}) at dimension {dim_h} of input {input.shape}" + ), + ) + torch._check( + pad_f < input_d and pad_bk < input_d, + lambda: ( + f"Argument #8: Padding size should be less than the corresponding input dimension, " + f"but got: padding ({pad_f}, {pad_bk}) at dimension {dim_d} of input {input.shape}" + ), + ) + + torch._check( + output_w >= 1 or output_h >= 1 or output_d >= 1, + lambda: ( + f"input (D: {input_d} H: {input_h} W: {input_w}) is too small. " + f"Calculated output D: {output_d} H: {output_h} W: {output_w}" + ), + ) + + if batch_mode: + return input.new_empty((nbatch, nplane, output_d, output_h, output_w)) + else: + return input.new_empty((nplane, output_d, output_h, output_w)) + + +@register_meta(aten.reflection_pad3d) +@out_wrapper() +def meta_reflection_pad3d(input, padding): + return _pad3d_common(input, padding, is_reflection=True) + + +@register_meta(aten.replication_pad3d) +@out_wrapper() +def meta_replication_pad3d(input, padding): + return _pad3d_common(input, padding, is_reflection=False) + + +@register_meta( + [ + aten.reflection_pad3d_backward.default, + aten.reflection_pad3d_backward.grad_input, + aten.replication_pad3d_backward.default, + aten.replication_pad3d_backward.grad_input, + ] +) +@out_wrapper("grad_input") +def meta_pad3d_backward(grad_output, input, padding): + torch._check(len(padding) == 6, lambda: "padding size is expected to be 6") + assert input.ndim > 3 + assert grad_output.ndim == input.ndim + + dim_w = 3 + dim_h = 2 + dim_d = 1 + + if input.ndim == 5: + dim_w += 1 + dim_h += 1 + dim_d += 1 + + pad_l, pad_r, pad_t, pad_b, pad_f, pad_bk = padding + + input_d = input.size(dim_d) + input_h = input.size(dim_h) + input_w = input.size(dim_w) + output_d = input_d + pad_f + pad_bk + output_h = input_h + pad_t + pad_b + output_w = input_w + pad_l + pad_r + + torch._check( + output_w == grad_output.size(dim_w), + lambda: f"grad_output width unexpected. Expected: {output_w}, Got: {grad_output.size(dim_w)}", + ) + torch._check( + output_h == grad_output.size(dim_h), + lambda: f"grad_output height unexpected. Expected: {output_h}, Got: {grad_output.size(dim_h)}", + ) + torch._check( + output_d == grad_output.size(dim_d), + lambda: f"grad_output depth unexpected. Expected: {output_d}, Got: {grad_output.size(dim_d)}", + ) + + return input.new_empty(input.shape) + + +@register_meta(aten._pdist_forward) +@out_wrapper() +def meta__pdist_forward(self: Tensor, p: float = 2) -> Tensor: + torch._check( + self.is_contiguous(), lambda: "_pdist_forward requires contiguous input" + ) + n = self.size(0) + if n <= 1: + return self.new_empty([0]).to(memory_format=torch.legacy_contiguous_format) # type: ignore[call-overload] + else: + return self.new_empty((n * (n - 1) // 2,)).to( + memory_format=torch.legacy_contiguous_format + ) # type: ignore[call-overload] + + +@register_meta(aten._pdist_backward) +@out_wrapper() +def meta__pdist_backward(grad: Tensor, self: Tensor, p: float, pdist: Tensor) -> Tensor: + torch._check( + self.is_contiguous(), lambda: "_pdist_backward requires self to be contiguous" + ) + torch._check( + pdist.is_contiguous(), lambda: "_pdist_backward requires pdist to be contiguous" + ) + return torch.empty_like(self, memory_format=torch.legacy_contiguous_format) + + +@register_meta([aten.baddbmm.default, aten.baddbmm.out]) +@out_wrapper() +def meta_baddbmm(self, batch1, batch2, *, beta=1, alpha=1): + dim1 = batch1.size(0) + dim2 = batch1.size(1) + dim3 = batch2.size(2) + self = self.expand((dim1, dim2, dim3)) + torch._check(batch1.dim() == 3, lambda: "batch1 must be a 3D tensor") + torch._check(batch2.dim() == 3, lambda: "batch2 must be a 3D tensor") + torch._check( + self.dtype == batch1.dtype == batch2.dtype, + lambda: f"Input dtypes must be the same, got: input: {self.dtype}, batch1: {batch1.dtype}, batch2: {batch2.dtype}", + ) + batch1_sizes = batch1.shape + batch2_sizes = batch2.shape + bs = batch1_sizes[0] + contraction_size = batch1_sizes[2] + torch._check( + batch2_sizes[0] == bs and batch2_sizes[1] == contraction_size, + lambda: ( + f"Expected size for first two dimensions of batch2 tensor to be: " + f"[{bs}, {contraction_size}] but got: [{batch2_sizes[0]}, {batch2_sizes[1]}]." + ), + ) + return self.new_empty(self.size()) + + +@register_meta([aten.bernoulli.default, aten.bernoulli.out]) +@out_wrapper() +def meta_bernoulli(self, *, generator=None): + # https://github.com/pytorch/pytorch/issues/88612 + return torch.empty_like(self).contiguous() + + +@register_meta(aten.bernoulli_.float) +def meta_bernoulli_(self, p=0.5, generator=None): + return self + + +@register_meta(aten.bernoulli.p) +def meta_bernoulli_p(self, p=0.5, generator=None): + # https://github.com/pytorch/pytorch/issues/88612 + return torch.empty_like(self).contiguous() + + +@register_meta(aten._fused_moving_avg_obs_fq_helper.default) +def meta__fused_moving_avg_obs_fq_helper( + self, + observer_on, + fake_quant_on, + running_min, + running_max, + scale, + zero_point, + averaging_const, + quant_min, + quant_max, + ch_axis, + per_row_fake_quant=False, + symmetric_quant=False, +): + torch._check( + ch_axis < self.dim(), + lambda: "Error in fused_moving_avg_obs_fake_quant_cpu: ch_axis must be < self.dim()", + ) + mask = torch.empty_like(self, dtype=torch.bool) + return (torch.empty_like(self), mask) + + +@register_meta(aten.mm) +@out_wrapper() +def meta_mm(a, b): + torch._check(a.dim() == 2, lambda: "a must be 2D") + torch._check(b.dim() == 2, lambda: "b must be 2D") + N, M1 = a.shape + M2, P = b.shape + torch._check( + M1 == M2, + lambda: f"a and b must have same reduction dim, but got [{N}, {M1}] X [{M2}, {P}].", + ) + return a.new_empty(N, P) + + +def _compute_reduction_shape(self, dims, keepdim): + if keepdim: + return tuple(self.shape[i] if i not in dims else 1 for i in range(self.ndim)) + + return utils.compute_reduction_output_shape(self.shape, dims) + + +# FakeTensors (meta tensors with a device) will report device as meta +# when running meta kernels. Here, access the "fake device" of FakeTensor if it +# exists so meta kernels which have diverge per device will be more +# accurate when run with FakeTensors +def device_hint(tensor) -> "str": + if isinstance(tensor, torch._subclasses.FakeTensor): + return tensor.fake_device.type + else: + return "cuda" # default to cuda + + +def calc_conv_nd_return_shape( + input_tensor: torch.Tensor, + weight: torch.Tensor, + stride: Union[List[int], int], + padding: Union[List[int], int], + dilation: Union[List[int], int], + is_transposed: bool, + groups: int, + output_padding: Optional[Union[List[int], int]] = None, +): + def _formula(ln: int, p: int, d: int, k: int, s: int) -> int: + """ + Formula to apply to calculate the length of some dimension of the output + + See: https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html + + Args: + ln: length of the dimension + p: padding in that dim + d: dilation in that dim + k: kernel size in that dim + s: stride in that dim + Returns: + The output length + """ + return (ln + 2 * p - d * (k - 1) - 1) // s + 1 + + def _formula_transposed(ln: int, p: int, d: int, k: int, s: int, op: int) -> int: + """ + Formula to apply to calculate the length of some dimension of the output + if transposed convolution is used. + See: https://pytorch.org/docs/stable/generated/torch.nn.ConvTranspose2d.html + + Args: + ln: length of the dimension + p: padding in that dim + d: dilation in that dim + k: kernel size in that dim + s: stride in that dim + op: output padding in that dim + + Returns: + The output length + """ + return (ln - 1) * s - 2 * p + d * (k - 1) + op + 1 + + kernel_size = weight.shape[2:] + dims = input_tensor.shape[2:] + if is_transposed: + out_channels = groups * weight.shape[1] + else: + out_channels = weight.shape[0] + if weight.shape[1] * groups != input_tensor.shape[1]: + raise RuntimeError("Invalid channel dimensions") + + ret_shape = [input_tensor.shape[0], out_channels] + if isinstance(stride, IntLike): + stride = [stride] * len(dims) + elif len(stride) == 1: + stride = [stride[0]] * len(dims) + + if isinstance(padding, IntLike): + padding = [padding] * len(dims) + elif len(padding) == 1: + padding = [padding[0]] * len(dims) + + if isinstance(dilation, IntLike): + dilation = [dilation] * len(dims) + elif len(dilation) == 1: + dilation = [dilation[0]] * len(dims) + + output_padding_list: Optional[List[int]] = None + if output_padding: + if isinstance(output_padding, IntLike): + output_padding_list = [output_padding] * len(dims) + elif len(output_padding) == 1: + output_padding_list = [output_padding[0]] * len(dims) + else: + output_padding_list = output_padding + + for i in range(len(dims)): + # If output_padding is present, we are dealing with a transposed convolution + if output_padding_list: + ret_shape.append( + _formula_transposed( + dims[i], + padding[i], + dilation[i], + kernel_size[i], + stride[i], + output_padding_list[i], + ) + ) + else: + ret_shape.append( + _formula(dims[i], padding[i], dilation[i], kernel_size[i], stride[i]) + ) + + return ret_shape + + +def is_channels_last(ten): + return torch._prims_common.suggest_memory_format(ten) == torch.channels_last + + +@register_meta(aten.convolution.default) +def meta_conv( + input_tensor: torch.Tensor, + weight: torch.Tensor, + bias: torch.Tensor, + stride: List[int], + padding: List[int], + dilation: List[int], + is_transposed: bool, + output_padding: List[int], + groups: int, +): + def pick_memory_format(): + if device_hint(input_tensor) == "cuda": + if is_channels_last(input_tensor) or is_channels_last(weight): + return torch.channels_last + else: + if is_channels_last(input_tensor): + return torch.channels_last + if input_tensor.is_contiguous(memory_format=torch.contiguous_format): + return torch.contiguous_format + elif input_tensor.is_contiguous(memory_format=torch.preserve_format): + return torch.preserve_format + + shape_out = calc_conv_nd_return_shape( + input_tensor, + weight, + stride, + padding, + dilation, + is_transposed, + groups, + output_padding if is_transposed else None, + ) + + out = input_tensor.new_empty(shape_out) + out = out.to(memory_format=pick_memory_format()) # type: ignore[call-overload] + return out + + +if torch._C._has_mkldnn: + _meta_lib_dont_use_me_use_register_meta_for_mkldnn = torch.library.Library( + "mkldnn", "IMPL", "Meta" + ) + + @register_meta(torch.ops.mkldnn._convolution_pointwise.default) + def meta_mkldnn_convolution_default( + input_tensor, + weight, + bias, + padding, + stride, + dilation, + groups, + attr, + scalars, + algorithm, + ): + shape_out = calc_conv_nd_return_shape( + input_tensor, weight, stride, padding, dilation, False, groups, [] + ) + out = input_tensor.new_empty(shape_out) + out_memory_format = torch.channels_last + out = out.to(memory_format=out_memory_format) # type: ignore[call-overload] + return out + + @register_meta(torch.ops.mkldnn._linear_pointwise.default) + def meta_linear_pointwise_default( + input_tensor, weight, bias, attr, scalars, algorithm + ): + return input_tensor.new_empty((*input_tensor.shape[:-1], weight.shape[0])) + + if torch._C.has_mkl: + _meta_lib_dont_use_me_use_register_meta_for_mkl = torch.library.Library( + "mkl", "IMPL", "Meta" + ) + + @register_meta(torch.ops.mkl._mkl_linear) + def meta_mkl_linear( + input_tensor, + packed_weight, + orig_weight, + bias, + batch_size, + ): + return input_tensor.new_empty( + (*input_tensor.shape[:-1], orig_weight.shape[0]) + ) + + _meta_lib_dont_use_me_use_register_meta_for_onednn = torch.library.Library( + "onednn", "IMPL", "Meta" + ) + + @register_meta(torch.ops.onednn.qconv2d_pointwise.default) + def meta_qconv2d_pointwise( + x, + x_scale, + x_zp, + w, # prepacked_weight + w_scale, + w_zp, + bias, + stride, + padding, + dilation, + groups, + output_scale, + output_zero_point, + output_dtype, + attr, + scalars, + algorithm, + ): + shape_out = calc_conv_nd_return_shape( + x, + w, + stride, + padding, + dilation, + False, + groups, + None, + ) + assert output_dtype in [torch.float32, torch.bfloat16] + out = x.new_empty(shape_out, dtype=output_dtype) + out = out.to(memory_format=torch.channels_last) + return out + + @register_meta(torch.ops.onednn.qlinear_pointwise.default) + def meta_qlinear_pointwise( + x, + x_scale, + x_zp, + w, + w_scale, + w_zp, + bias, + output_scale, + output_zero_point, + output_dtype, + post_op_name, + post_op_args, + post_op_algorithm, + ): + output_shape = list(x.shape) + # The weight has been transposed during the qlinear weight prepack process. + output_shape[-1] = w.shape[1] + assert output_dtype in [torch.float32, torch.bfloat16] + out = x.new_empty(output_shape, dtype=output_dtype) + return out + + _meta_lib_dont_use_me_use_register_meta_for_quantized = torch.library.Library( + "quantized", "IMPL", "Meta" + ) + + @register_meta(torch.ops.quantized.max_pool2d) + def meta_quantized_max_pool2d( + input, + kernel_size, + stride=(), + padding=(0,), + dilation=(1,), + ceil_mode=False, + ): + ( + nInputPlane, + outputHeight, + outputWidth, + ) = max_pool2d_checks_and_compute_shape( + input, kernel_size, stride, padding, dilation, ceil_mode + ) + nbatch = input.size(-4) if input.dim() == 4 else 1 + memory_format = torch.channels_last + if input.dim() == 3: + size = [nInputPlane, outputHeight, outputWidth] + else: + size = [nbatch, nInputPlane, outputHeight, outputWidth] + return torch.empty( + size, + dtype=input.dtype, + device=input.device, + memory_format=memory_format, + ) + + +# from check_dim_size() in aten/src/ATen/TensorUtils.cpp. +def check_dim_size(tensor, dim, dim_size, size): + torch._check( + tensor.dim() == dim and tensor.shape[dim_size] == size, + lambda: f"Expected a tensor of dimension {dim} and tensor.size[{dim_size}] == {size}, " + + f"but got : dimension {tensor.dim()} and tensor.size[{dim_size}] = {tensor.shape[dim_size]}", + ) + + +@register_meta(aten.avg_pool2d.default) +def meta_avg_pool2d( + input, + kernel_size, + stride=(), + padding=(0,), + ceil_mode=False, + count_include_pad=True, + divisor_override=None, +): + def unpack(name, val): + torch._check( + len(val) in [1, 2], + lambda: f"avg_pool2d: {name} must either be a single int, or a tuple of two ints", + ) + H = val[0] + W = H if len(val) == 1 else val[1] + return H, W + + kH, kW = unpack("kernel_size", kernel_size) + torch._check( + len(stride) in [0, 1, 2], + lambda: "avg_pool2d: stride must either be omitted, a single int, or a tuple of two ints", + ) + if len(stride) == 0: + dH, dW = kH, kW + elif len(stride) == 1: + dH, dW = stride[0], stride[0] + else: + dH, dW = unpack("stride", stride) + + padH, padW = unpack("padding", padding) + + torch._check( + divisor_override is None or divisor_override != 0, + lambda: "divisor must be not zero", + ) + + nbatch = input.size(-4) if input.dim() == 4 else 1 + nInputPlane = input.size(-3) + inputHeight = input.size(-2) + inputWidth = input.size(-1) + + outputHeight = pooling_output_shape(inputHeight, kH, padH, dH, 1, ceil_mode) + outputWidth = pooling_output_shape(inputWidth, kW, padW, dW, 1, ceil_mode) + + memory_format = utils.suggest_memory_format(input) + pool2d_shape_check( + input, + kH, + kW, + dH, + dW, + padH, + padW, + 1, + 1, + nInputPlane, + inputHeight, + inputWidth, + outputHeight, + outputWidth, + memory_format, + ) + + if input.dim() == 3: + size = [nInputPlane, outputHeight, outputWidth] + else: + size = [nbatch, nInputPlane, outputHeight, outputWidth] + return torch.empty( + size, + dtype=input.dtype, + device=input.device, + memory_format=memory_format, + ) + + +# from avg_pool2d_backward_shape_check() in aten/src/ATen/native/Pool.h. +def avg_pool2d_backward_shape_check( + input, + gradOutput, + nbatch, + kH, + kW, + dH, + dW, + padH, + padW, + nInputPlane, + inputHeight, + inputWidth, + outputHeight, + outputWidth, + mem_format, +): + pool2d_shape_check( + input, + kH, + kW, + dH, + dW, + padH, + padW, + 1, + 1, + nInputPlane, + inputHeight, + inputWidth, + outputHeight, + outputWidth, + mem_format, + ) + + ndim = input.dim() + nOutputPlane = nInputPlane + + check_dim_size(gradOutput, ndim, ndim - 3, nOutputPlane) + check_dim_size(gradOutput, ndim, ndim - 2, outputHeight) + check_dim_size(gradOutput, ndim, ndim - 1, outputWidth) + + +# Don't override the C++ registration. +@register_meta(aten.avg_pool2d_backward.default) +def meta_avg_pool2d_backward( + gradOutput_, + input, + kernel_size, + stride, + padding, + ceil_mode, + count_include_pad, + divisor_override, +): + # From aten/src/ATen/native/AveragePool2d.cpp structured kernel meta func. + torch._check( + len(kernel_size) == 1 or len(kernel_size) == 2, + lambda: "avg_pool2d: kernel_size must either be a single int, or a tuple of two ints", + ) + kH = kernel_size[0] + kW = kH if len(kernel_size) == 1 else kernel_size[1] + torch._check( + len(stride) == 0 or len(stride) == 1 or len(stride) == 2, + lambda: "avg_pool2d: stride must either be omitted, a single int, or a tuple of two ints", + ) + dH = kH if len(stride) == 0 else stride[0] + dW = kW if len(stride) == 0 else dH if len(stride) == 1 else stride[1] + torch._check( + len(padding) == 1 or len(padding) == 2, + lambda: "avg_pool2d: padding must either be a single int, or a tuple of two ints", + ) + padH = padding[0] + padW = padH if len(padding) == 1 else padding[1] + + torch._check( + divisor_override is None or divisor_override != 0, + lambda: "divisor must be not zero", + ) + + input_size = input.shape + nbatch = input_size[-4] if input.dim() == 4 else 1 + nInputPlane = input_size[-3] + inputHeight = input_size[-2] + inputWidth = input_size[-1] + + outputHeight = pooling_output_shape(inputHeight, kH, padH, dH, 1, ceil_mode) + outputWidth = pooling_output_shape(inputWidth, kW, padW, dW, 1, ceil_mode) + + mem_format = utils.suggest_memory_format(input) + + avg_pool2d_backward_shape_check( + input, + gradOutput_, + nbatch, + kH, + kW, + dH, + dW, + padH, + padW, + nInputPlane, + inputHeight, + inputWidth, + outputHeight, + outputWidth, + mem_format, + ) + + return torch.empty( + input_size, + dtype=input.dtype, + device=input.device, + memory_format=mem_format, + ) + + +@register_meta(aten.avg_pool3d) +@out_wrapper() +def meta_avg_pool3d( + input, + kernel_size, + stride=(), + padding=(0,), + ceil_mode=False, + count_include_pad=True, + divisor_override=None, +): + torch._check( + len(kernel_size) in (1, 3), + lambda: "avg_pool3d: kernel_size must be a single int, or a tuple of three ints", + ) + kT = kernel_size[0] + kH = kT if len(kernel_size) == 1 else kernel_size[1] + kW = kT if len(kernel_size) == 1 else kernel_size[2] + + torch._check( + not stride or len(stride) in (1, 3), + lambda: "avg_pool3d: stride must be omitted, a single int, or a tuple of three ints", + ) + dT = kT if not stride else stride[0] + dH = kH if not stride else (dT if len(stride) == 1 else stride[1]) + dW = kW if not stride else (dT if len(stride) == 1 else stride[2]) + + torch._check( + len(padding) in (1, 3), + lambda: "avg_pool3d: padding must be a single int, or a tuple of three ints", + ) + padT = padding[0] + padH = padT if len(padding) == 1 else padding[1] + padW = padT if len(padding) == 1 else padding[2] + + torch._check( + input.ndim in (4, 5), + lambda: "non-empty 4D or 5D (batch mode) tensor expected for input", + ) + + torch._check( + not divisor_override or divisor_override != 0, + lambda: "divisor must be not zero", + ) + + nbatch = input.size(0) + nslices = input.size(-4) + itime = input.size(-3) + iheight = input.size(-2) + iwidth = input.size(-1) + + otime = pooling_output_shape(itime, kT, padT, dT, 1, ceil_mode) + oheight = pooling_output_shape(iheight, kH, padH, dH, 1, ceil_mode) + owidth = pooling_output_shape(iwidth, kW, padW, dW, 1, ceil_mode) + + pool3d_shape_check( + input, + nslices, + kT, + kH, + kW, + dT, + dH, + dW, + padT, + padH, + padW, + 1, + 1, + 1, + itime, + iheight, + iwidth, + otime, + oheight, + owidth, + "avg_pool3d()", + check_input_size=True, + ) + + if input.ndim == 4: + return input.new_empty((nslices, otime, oheight, owidth)) + else: + return input.new_empty((nbatch, nslices, otime, oheight, owidth)) + + +@register_meta(aten.avg_pool3d_backward) +@out_wrapper("grad_input") +def meta_avg_pool3d_backward( + grad_output, + input, + kernel_size, + stride, + padding, + ceil_mode, + count_include_pad, + divisor_override, +): + torch._check( + len(kernel_size) in (1, 3), + lambda: "avg_pool3d: kernel_size must be a single int, or a tuple of three ints", + ) + kT = kernel_size[0] + kH = kT if len(kernel_size) == 1 else kernel_size[1] + kW = kT if len(kernel_size) == 1 else kernel_size[2] + + torch._check( + not stride or len(stride) in (1, 3), + lambda: "avg_pool3d: stride must be omitted, a single int, or a tuple of three ints", + ) + dT = kT if not stride else stride[0] + dH = kH if not stride else (dT if len(stride) == 1 else stride[1]) + dW = kW if not stride else (dT if len(stride) == 1 else stride[2]) + + torch._check( + len(padding) in (1, 3), + lambda: "avg_pool3d: padding must be a single int, or a tuple of three ints", + ) + padT = padding[0] + padH = padT if len(padding) == 1 else padding[1] + padW = padT if len(padding) == 1 else padding[2] + + torch._check( + input.ndim in (4, 5), + lambda: "non-empty 4D or 5D (batch mode) tensor expected for input", + ) + + torch._check( + not divisor_override or divisor_override != 0, + lambda: "divisor must be not zero", + ) + + nslices = input.size(-4) + itime = input.size(-3) + iheight = input.size(-2) + iwidth = input.size(-1) + + otime_for_shape_check = pooling_output_shape(itime, kT, padT, dT, 1, ceil_mode) + oheight_for_shape_check = pooling_output_shape(iheight, kH, padH, dH, 1, ceil_mode) + owidth_for_shape_check = pooling_output_shape(iwidth, kW, padW, dW, 1, ceil_mode) + + avg_pool3d_backward_shape_check( + input, + grad_output, + nslices, + kT, + kH, + kW, + dT, + dH, + dW, + padT, + padH, + padW, + itime, + iheight, + iwidth, + otime_for_shape_check, + oheight_for_shape_check, + owidth_for_shape_check, + "avg_pool3d_backward()", + ) + + return input.new_empty(input.shape) + + +@register_meta(aten._adaptive_avg_pool2d.default) +def meta_adaptive_avg_pool2d(self, output_size): + torch._check( + self.ndim == 3 or self.ndim == 4, + lambda: f"Expected 3D or 4D tensor, but got {self.shape}", + ) + output_shape = self.shape[:-2] + tuple(output_size) + memory_format = utils.suggest_memory_format(self) + # need to set memory_format to preserve the memory format of the input + # channel last input should have channel last output + return torch.empty( + output_shape, + dtype=self.dtype, + device=self.device, + memory_format=memory_format, + ) + + +@register_meta(aten._adaptive_avg_pool3d.default) +def meta_adaptive_avg_pool3d(self, output_size): + torch._check( + self.ndim == 4 or self.ndim == 5, + lambda: f"Expected 4D or 5D tensor, but got {self.shape}", + ) + return self.new_empty(self.shape[:-3] + tuple(output_size)) + + +@register_meta(aten._adaptive_avg_pool2d_backward.default) +def meta__adaptive_avg_pool2d_backward(grad_out, self): + ndim = grad_out.ndim + for i in range(1, ndim): + torch._check( + grad_out.size(i) > 0, + lambda: f"adaptive_avg_pool2d_backward(): Expected grad_output to have non-zero \ + size for non-batch dimensions, {grad_out.shape} with dimension {i} being empty", + ) + torch._check( + ndim == 3 or ndim == 4, + lambda: f"adaptive_avg_pool2d_backward(): Expected 3D or 4D tensor, but got {self.shape}", + ) + torch._check( + self.dtype == grad_out.dtype, + lambda: f"expected dtype {self.dtype} for `grad_output` but got dtype {grad_out.dtype}", + ) + memory_format = torch.contiguous_format + if is_channels_last(self): + memory_format = torch.channels_last + return self.new_empty(self.shape).to(memory_format=memory_format) + + +@register_meta(aten._adaptive_avg_pool3d_backward) +@out_wrapper("grad_input") +def meta__adaptive_avg_pool3d_backward(grad_output, self): + _adaptive_pool_empty_output_check(grad_output, "adaptive_avg_pool3d_backward") + return torch.empty_like(self, memory_format=torch.legacy_contiguous_format) + + +def _adaptive_pool_empty_output_check(grad_output: Tensor, arg_name: str): + ndim = grad_output.ndim + for i in range(1, ndim): + torch._check( + grad_output.size(i) > 0, + lambda: ( + f"{arg_name}(): Expected grad_output to have non-zero size for non-batch dimensions, " + f"but grad_output has sizes {grad_output.shape} with dimension {i} being empty" + ), + ) + + +@register_meta(aten.adaptive_max_pool2d) +@out_wrapper("out", "indices") +def meta_adaptive_max_pool2d(input, output_size): + ndim = input.ndim + torch._check( + ndim in (3, 4), + lambda: f"adaptive_max_pool2d(): Expected 3D or 4D tensor, but got: {input.shape}", + ) + for i in range(1, ndim): + torch._check( + input.size(i) > 0, + lambda: ( + f"adaptive_max_pool2d(): Expected input to have non-zero size for non-batch dimensions, " + f"but input has sizes {input.shape} with dimension {i} being empty" + ), + ) + + torch._check( + len(output_size) == 2, + lambda: "adaptive_max_pool2d(): internal error: output_size.size() must be 2", + ) + + dimH = 1 + sizeB = 1 + sizeD = 0 + + if input.ndim == 4: + sizeB = input.size(0) + dimH += 1 + + sizeD = input.size(dimH - 1) + osizeH, osizeW = output_size + + if input.ndim == 3: + out_shape = (sizeD, osizeH, osizeW) + out = input.new_empty(out_shape) + indices = input.new_empty(out_shape, dtype=torch.int64) + return out, indices + else: + out_shape = (sizeB, sizeD, osizeH, osizeW) # type: ignore[assignment] + memory_format = utils.suggest_memory_format(input) + out = input.new_empty(out_shape).to(memory_format=memory_format) + indices = input.new_empty(out_shape, dtype=torch.int64).to( + memory_format=memory_format + ) + return out, indices + + +@register_meta(aten.adaptive_max_pool2d_backward) +@out_wrapper("grad_input") +def meta_adaptive_max_pool2d_backward(grad_output, input, indices): + ndim = grad_output.ndim + torch._check( + ndim in (3, 4), + lambda: f"adaptive_max_pooling2d_backward(): Expected 3D or 4D grad_output, but got: {grad_output.shape}", + ) + + _adaptive_pool_empty_output_check(grad_output, "adaptive_max_pool2d_backward") + + torch._check( + input.dtype == grad_output.dtype, + lambda: f"expected dtype {input.dtype} for `grad_output` but got dtype {grad_output.dtype}", + ) + + memory_format = utils.suggest_memory_format(input) + return input.new_empty(input.shape).to(memory_format=memory_format) + + +@register_meta(aten.adaptive_max_pool3d) +@out_wrapper("out", "indices") +def meta_adaptive_max_pool3d(input, output_size): + ndim = input.ndim + torch._check( + ndim in (4, 5), + lambda: f"adaptive_max_pool3d(): Expected 4D or 5D tensor, but got: {input.shape}", + ) + for i in range(1, ndim): + torch._check( + input.size(i) > 0, + lambda: ( + f"adaptive_max_pool3d(): Expected input to have non-zero size for non-batch dimensions, " + f"but input has sizes {input.shape} with dimension {i} being empty" + ), + ) + + torch._check( + len(output_size) == 3, + lambda: "adaptive_max_pool3d(): internal error: output_size.size() must be 3", + ) + + dimD = 0 + sizeB = 1 + sizeD = 0 + + if ndim == 5: + sizeB = input.size(0) + dimD += 1 + + sizeD = input.size(dimD) + osizeT, osizeH, osizeW = output_size + + if ndim == 4: + out_shape = (sizeD, osizeT, osizeH, osizeW) + else: + out_shape = (sizeB, sizeD, osizeT, osizeH, osizeW) # type: ignore[assignment] + + out = input.new_empty(out_shape) + indices = input.new_empty(out_shape, dtype=torch.int64) + + return out, indices + + +@register_meta(aten.adaptive_max_pool3d_backward) +@out_wrapper("grad_input") +def meta_adaptive_max_pool3d_backward(grad_output, input, indices): + _adaptive_pool_empty_output_check(grad_output, "adaptive_max_pool3d_backward") + return input.new_empty(input.shape) + + +@register_meta(aten.repeat_interleave.Tensor) +def meta_repeat_interleave_Tensor(repeats, output_size=None): + if output_size is None: + raise RuntimeError("cannot repeat_interleave a meta tensor without output_size") + return repeats.new_empty(output_size) + + +@register_meta([aten.complex.default, aten.complex.out]) +@out_wrapper() +def meta_complex(real, imag): + assert real.dtype.is_floating_point + assert imag.dtype.is_floating_point + out_shape = _broadcast_shapes(real.shape, imag.shape) + return real.new_empty(out_shape, dtype=corresponding_complex_dtype(real.dtype)) + + +@register_meta([aten.nonzero_static.default, aten.nonzero_static.out]) +@out_wrapper() +def nonzero_static(self, *, size: int, fill_value: int = -1): + return self.new_empty((size, self.dim()), dtype=torch.long) + + +@register_meta([aten.index.Tensor, aten._unsafe_index.Tensor]) +def meta_index_Tensor(self, indices): + torch._check(bool(indices), lambda: "at least one index must be provided") + # aten::index is the internal advanced indexing implementation + # checkIndexTensorTypes and expandTensors + result: List[Optional[Tensor]] = [] + for i, index in enumerate(indices): + if index is not None: + torch._check( + index.dtype in [torch.long, torch.int, torch.int8, torch.bool], + lambda: "tensors used as indices must be long, int, byte or bool tensors", + ) + if index.dtype in [torch.int8, torch.bool]: + nonzero = index.nonzero() + k = len(result) + torch._check_index( + k + index.ndim <= self.ndim, + lambda: f"too many indices for tensor of dimension {self.ndim}", + ) + for j in range(index.ndim): + torch._check_index( + index.shape[j] == self.shape[k + j], + lambda: f"The shape of the mask {index.shape} at index {i} " + f"does not match the shape of the indexed tensor {self.shape} at index {k + j}", + ) + result.append(nonzero.select(1, j)) + else: + result.append(index) + else: + result.append(index) + indices = result + torch._check( + len(indices) <= self.ndim, + lambda: f"too many indices for tensor of dimension {self.ndim} (got {len(indices)})", + ) + # expand_outplace + import torch._refs as refs # avoid import cycle in mypy + + indices = list(refs._maybe_broadcast(*indices)) + # add missing null tensors + while len(indices) < self.ndim: + indices.append(None) + + # hasContiguousSubspace + # true if all non-null tensors are adjacent + # See: + # https://numpy.org/doc/stable/user/basics.indexing.html#combining-advanced-and-basic-indexing + # https://stackoverflow.com/questions/53841497/why-does-numpy-mixed-basic-advanced-indexing-depend-on-slice-adjacency + state = 0 + has_contiguous_subspace = False + for index in indices: + if state == 0: + if index is not None: + state = 1 + elif state == 1: + if index is None: + state = 2 + else: + if index is not None: + break + else: + has_contiguous_subspace = True + + # transposeToFront + # This is the logic that causes the newly inserted dimensions to show up + # at the beginning of the tensor, if they're not contiguous + if not has_contiguous_subspace: + dims = [] + transposed_indices = [] + for i, index in enumerate(indices): + if index is not None: + dims.append(i) + transposed_indices.append(index) + for i, index in enumerate(indices): + if index is None: + dims.append(i) + transposed_indices.append(index) + self = self.permute(dims) + indices = transposed_indices + + # AdvancedIndex::AdvancedIndex + # Now we can assume the indices have contiguous subspace + # This is simplified from AdvancedIndex which goes to more effort + # to put the input and indices in a form so that TensorIterator can + # take them. If we write a ref for this, probably that logic should + # get implemented + before_shape: List[int] = [] + after_shape: List[int] = [] + replacement_shape: List[int] = [] + for dim, index in enumerate(indices): + if index is None: + if replacement_shape: + after_shape.append(self.shape[dim]) + else: + before_shape.append(self.shape[dim]) + else: + replacement_shape = list(index.shape) + return self.new_empty(before_shape + replacement_shape + after_shape) + + +@register_meta([aten.convolution_backward.default]) +def meta_convolution_backward( + grad_output_, + input_, + weight_, + bias_sizes_opt, + stride, + padding, + dilation, + transposed, + output_padding, + groups, + output_mask, +): + # High level logic taken from slow_conv3d_backward_cpu which should + # be representative of all convolution_backward impls + backend_grad_input = None + backend_grad_weight = None + backend_grad_bias = None + + if output_mask[0]: + backend_grad_input = grad_output_.new_empty(input_.size()) + if output_mask[1]: + backend_grad_weight = grad_output_.new_empty(weight_.size()) + if output_mask[2]: + backend_grad_bias = grad_output_.new_empty(bias_sizes_opt) + + return (backend_grad_input, backend_grad_weight, backend_grad_bias) + + +@register_meta([aten.addbmm.default, aten.addbmm.out]) +@out_wrapper() +def meta_addbmm(self, batch1, batch2, *, beta=1, alpha=1): + dim1 = batch1.size(1) + dim2 = batch2.size(2) + self = self.expand((dim1, dim2)) + torch._check(batch1.dim() == 3, lambda: "batch1 must be a 3D tensor") + torch._check(batch2.dim() == 3, lambda: "batch2 must be a 3D tensor") + torch._check( + batch1.size(0) == batch2.size(0), + lambda: f"batch1 and batch2 must have same number of batches, got {batch1.size(0)} and {batch2.size(0)}", + ) + torch._check( + batch1.size(2) == batch2.size(1), + lambda: ( + f"Incompatible matrix sizes for bmm ({batch1.size(1)}x{batch1.size(2)} " + f"and {batch2.size(1)}x{batch2.size(2)})" + ), + ) + torch._check( + self.size(0) == dim1 and self.size(1) == dim2, + lambda: "self tensor does not match matmul output shape", + ) + return self.new_empty(self.size()) + + +def register_meta_foreach(ops): + def wrapper(fn): + def register(op): + op_name = str(op).split(".")[1] + scalar_op = getattr(aten, op_name.replace("_foreach_", "")) + + _add_op_to_registry( + meta_table, + op, + partial( + fn, + _scalar_op=scalar_op, + ), + ) + + pytree.tree_map_(register, ops) + return fn + + return wrapper + + +@register_meta_foreach( + [ + aten._foreach_abs, + aten._foreach_acos, + aten._foreach_asin, + aten._foreach_atan, + aten._foreach_ceil, + aten._foreach_cos, + aten._foreach_cosh, + aten._foreach_erf, + aten._foreach_erfc, + aten._foreach_exp, + aten._foreach_expm1, + aten._foreach_frac, + aten._foreach_floor, + aten._foreach_lgamma, + aten._foreach_log, + aten._foreach_log10, + aten._foreach_log1p, + aten._foreach_log2, + aten._foreach_neg, + aten._foreach_reciprocal, + aten._foreach_round, + aten._foreach_sigmoid, + aten._foreach_sign, + aten._foreach_sin, + aten._foreach_sinh, + aten._foreach_sqrt, + aten._foreach_tan, + aten._foreach_tanh, + aten._foreach_trunc, + aten._foreach_zero, + aten._foreach_add, + aten._foreach_sub, + aten._foreach_mul, + aten._foreach_div, + aten._foreach_clamp_min, + aten._foreach_clamp_max, + aten._foreach_lerp, + ], +) +def _meta_foreach_out_of_place(*args, _scalar_op=None, **kwargs): + torch._check( + isinstance(args[0], list), + lambda: (f"The first argument must be List[Tensor], but got {type(args[0])}."), + ) + + nelem = len(args[0]) + torch._check( + nelem > 0, + lambda: ("Tensor list must have at least one tensor."), + ) + + nlists = 1 + for iarg, arg in enumerate(args[1:]): + if isinstance(arg, list): + nlists += 1 + torch._check( + len(arg) == nelem, + lambda: ( + f"self and argument-{iarg+2} must match in length, " + f"but got {nelem} and {len(arg)}." + ), + ) + elif isinstance(arg, Tensor): + torch._check( + arg.dim() == 0 and arg.numel() == 1, + lambda: ( + "scalar tensor expected to be 0 dim but it has " + f"{arg.dim()} dimensions and {arg.numel()} elements." + ), + ) + else: + break + + result = [] + for elem in range(nelem): + each_args = [args[i][elem] for i in range(nlists)] + result.append(_scalar_op(*each_args, *args[nlists:], **kwargs)) + + return result + + +@register_meta_foreach( + [ + aten._foreach_abs_, + aten._foreach_acos_, + aten._foreach_asin_, + aten._foreach_atan_, + aten._foreach_ceil_, + aten._foreach_cos_, + aten._foreach_cosh_, + aten._foreach_erf_, + aten._foreach_erfc_, + aten._foreach_exp_, + aten._foreach_expm1_, + aten._foreach_frac_, + aten._foreach_floor_, + aten._foreach_lgamma_, + aten._foreach_log_, + aten._foreach_log10_, + aten._foreach_log1p_, + aten._foreach_log2_, + aten._foreach_neg_, + aten._foreach_reciprocal_, + aten._foreach_round_, + aten._foreach_sigmoid_, + aten._foreach_sign_, + aten._foreach_sin_, + aten._foreach_sinh_, + aten._foreach_sqrt_, + aten._foreach_tan_, + aten._foreach_tanh_, + aten._foreach_trunc_, + aten._foreach_zero_, + aten._foreach_add_, + aten._foreach_sub_, + aten._foreach_mul_, + aten._foreach_div_, + aten._foreach_clamp_min_, + aten._foreach_clamp_max_, + aten._foreach_lerp_, + aten._foreach_copy_, + ] +) +def _meta_foreach_inplace(*args, _scalar_op=None, **kwargs): + _meta_foreach_out_of_place(*args, _scalar_op=_scalar_op, **kwargs) + return + + +@register_meta([aten._foreach_pow.ScalarAndTensor]) +def meta__foreach_pow_scalar_and_tensor(self, exponent): + # Only foreach_pow has a ScalarAndTensor method and needs special + # handling because it does not work with _meta_foreach_out_of_place. + torch._check( + isinstance(exponent, List), + lambda: f"exponent must be a tensor list but got {type(exponent)}", + ) + return [torch.empty_like(e) for e in exponent] + + +def _check_foreach_binop_tensor_lists(self, other): + torch._check( + isinstance(self, List) and isinstance(other, List), + lambda: ( + "The first two arguments of must be List[Tensor], " + f"but got {type(self)} and {type(other)}." + ), + ) + torch._check( + len(self) > 0 and len(self) == len(other), + lambda: ( + "self and other must be non-empty and match in length, " + f"but got {len(self)} and {len(other)}." + ), + ) + + +@register_meta( + [ + aten._foreach_maximum, + aten._foreach_minimum, + ] +) +def meta__foreach_binop_scalar(*args): + # aten.maximum(Tensor, Scalar) does not exist. + return _meta_foreach_out_of_place(*args, _scalar_op=aten.clamp_min) + + +@register_meta( + [ + aten._foreach_maximum_, + aten._foreach_minimum_, + ] +) +def meta__foreach_binop__scalar(*args): + # aten.maximum(Tensor, Scalar) does not exist + _meta_foreach_inplace(*args, _scalar_op=aten.clamp_min_) + return + + +@register_meta( + [ + aten._foreach_addcdiv.Scalar, + aten._foreach_addcmul.Scalar, + ] +) +def meta__foreach_addcop_scalar(self, tensor1, tensor2, scalar=1): + # forach_addcdiv and addcdiv have different signatures and + # cannot use _meta_foreach_out_of_place. + torch._check( + all(isinstance(l, List) for l in [self, tensor1, tensor2]), + lambda: ( + "All arguments must be List[Tensor], " + f"but got {type(self)}, {type(tensor1)}, and {type(tensor2)}" + ), + ) + torch._check(len(self) > 0, lambda: "input tensor list must not be empty.") + torch._check( + len(self) == len(tensor1) and len(self) == len(tensor2), + lambda: "All input tensor lists must have the same length", + ) + + return [torch.empty_like(s) for s in self] + + +@register_meta([aten._foreach_addcdiv_.Tensor, aten._foreach_addcmul_.Tensor]) +def meta__foreach_addcop_tensor(self, tensor1, tensor2, scalars): + torch._check( + all(isinstance(l, List) for l in [self, tensor1, tensor2]) + and isinstance(scalars, torch.Tensor), + lambda: ( + "_foreach_addc*_ op expects arguments of type: List[Tensor], List[Tensor], List[Tensor], tensor, " + f"but got: {type(self)}, {type(tensor1)}, {type(tensor2)}, and {type(scalars)}" + ), + ) + torch._check(len(self) > 0, lambda: "input tensor list must not be empty.") + torch._check( + len(self) == len(tensor1) and len(self) == len(tensor2), + lambda: "All input tensor lists must have the same length", + ) + + +@register_meta( + [ + aten._foreach_addcdiv_.Scalar, + aten._foreach_addcmul_.Scalar, + ] +) +def meta__foreach_addcop__scalar(self, tensor1, tensor2, scalar=1): + torch._check( + all(isinstance(l, List) for l in [self, tensor1, tensor2]), + lambda: ( + "All arguments of _foreach_addc*_ must be List[Tensor], " + f"but got {type(self)}, {type(tensor1)}, and {type(tensor2)}" + ), + ) + torch._check(len(self) > 0, lambda: "input tensor list must not be empty.") + torch._check( + len(self) == len(tensor1) and len(self) == len(tensor2), + lambda: "All input tensor lists must have the same length", + ) + + +@register_meta([aten._fused_adam_.default]) +def meta__fused_adam_( + self, + grads, + exp_avgs, + exp_avg_sqs, + max_exp_avg_sqs, + state_steps, + *, + lr, + beta1, + beta2, + weight_decay, + eps, + amsgrad, + maximize, + grad_scale=None, + found_inf=None, +): + for l in [self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps]: + torch._check( + isinstance(l, List), + lambda: f"exponent must be a tensor list but got {type(l)}", + ) + + +@register_meta([aten._fused_adam.default]) +def meta__fused_adam( + self, + grads, + exp_avgs, + exp_avg_sqs, + max_exp_avg_sqs, + state_steps, + *, + lr, + beta1, + beta2, + weight_decay, + eps, + amsgrad, + maximize, + grad_scale=None, + found_inf=None, +): + for l in [self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps]: + torch._check( + isinstance(l, List), + lambda: f"exponent must be a tensor list but got {type(l)}", + ) + + def empty_like_list(tensor_list): + return [torch.empty_like(t) for t in tensor_list] + + return ( + empty_like_list(self), + empty_like_list(grads), + empty_like_list(exp_avgs), + empty_like_list(exp_avg_sqs), + empty_like_list(max_exp_avg_sqs), + ) + + +@register_meta([aten._int_mm]) +@out_wrapper() +def meta__int_mm(a, b): + torch._check(a.dim() == 2, lambda: "a must be a 2D tensor") + torch._check(b.dim() == 2, lambda: "b must be a 2D tensor") + torch._check( + a.dtype is torch.int8, + lambda: f"expected self to be int8, got {a.dtype}", + ) + torch._check( + b.dtype is torch.int8, + lambda: f"expected mat2 to be int8, got {b.dtype}", + ) + torch._check( + a.size(1) == b.size(0), + lambda: ( + f"Incompatible matrix sizes for _int_mm ({a.size(0)}x{a.size(1)} " + f"and {b.size(0)}x{b.size(1)})" + ), + ) + return a.new_empty((a.size(0), b.size(1)), dtype=torch.int32) + + +@register_meta([aten._convert_weight_to_int4pack]) +def meta__convert_weight_to_int4pack(w, inner_k_tiles): + torch._check(w.dim() == 2, lambda: "w must be a 2D tensor") + torch._check( + w.dtype is torch.int32, + lambda: f"expected w to be int32, got {w.dtype}", + ) + n = w.size(0) + k = w.size(1) + return w.new_empty( + ( + n // 8, + k // (inner_k_tiles * 16), + 32, + inner_k_tiles // 2, + ), + dtype=torch.int32, + ) + + +@register_meta([aten._weight_int4pack_mm]) +def meta__weight_int4pack_mm(x, w, q_group_size, q_scale_and_zeros): + torch._check(x.dim() == 2, lambda: "x must be a 2D tensor") + torch._check(w.dim() == 4, lambda: "w must be a 4D tensor") + torch._check( + x.dtype is torch.bfloat16, + lambda: f"expected x to be bf16, got {x.dtype}", + ) + torch._check( + w.dtype is torch.int32, + lambda: f"expected w to be int32, got {w.dtype}", + ) + return x.new_empty(x.size(0), w.size(0) * 8, dtype=x.dtype) + + +@register_meta(aten._cdist_forward.default) +def meta_cdist_forward(x1, x2, p, compute_mode): + torch._check( + x1.dim() >= 2, + lambda: f"cdist only supports at least 2D tensors, X1 got: {x1.dim()}D", + ) + torch._check( + x2.dim() >= 2, + lambda: f"cdist only supports at least 2D tensors, X2 got: {x2.dim()}D", + ) + torch._check( + x1.size(-1) == x2.size(-1), + lambda: f"X1 and X2 must have the same number of columns. X1: {x1.size(-1)} X2: {x2.size(-1)}", + ) + torch._check( + utils.is_float_dtype(x1.dtype), + lambda: "cdist only supports floating-point dtypes, X1 got: {x1.dtype}", + ) + torch._check( + utils.is_float_dtype(x2.dtype), + lambda: "cdist only supports floating-point dtypes, X2 got: {x2.dtype}", + ) + torch._check(p >= 0, lambda: "cdist only supports non-negative p values") + torch._check( + compute_mode in (None, 1, 2), + lambda: f"possible modes: None, 1, 2, but was: {compute_mode}", + ) + r1 = x1.size(-2) + r2 = x2.size(-2) + batch_tensor1 = x1.shape[:-2] + batch_tensor2 = x2.shape[:-2] + output_shape = list(torch.broadcast_shapes(batch_tensor1, batch_tensor2)) + output_shape.extend([r1, r2]) + return x1.new_empty(output_shape) + + +@register_meta(aten._cdist_backward) +@out_wrapper() +def meta_cdist_backward(grad, x1, x2, p, cdist): + c1 = x1.shape[-1] + r1 = x1.shape[-2] + r2 = x2.shape[-2] + batch_tensor1 = x1.shape[:-2] + batch_tensor2 = x2.shape[:-2] + expand_batch_portion = list(torch.broadcast_shapes(batch_tensor1, batch_tensor2)) + tensor1_expand_size = expand_batch_portion.copy() + tensor1_expand_size.extend([r1, c1]) + batch_product = math.prod(expand_batch_portion) + if r1 == 0 or r2 == 0 or c1 == 0 or batch_product == 0: + return torch.zeros_like(x1) + if tensor1_expand_size != list(x1.shape): + x1 = x1.expand(tensor1_expand_size) + return torch.empty_like(x1, memory_format=torch.contiguous_format) + + +# NB: This meta function accepts non-meta arguments! When this behavior +# was originally introduced this was accidental, but it is now load bearing +# as people are using this so that they can conveniently test code involving +# embeddings (feeding CPU tensor inputs with meta device EmbeddingBag module) +@register_meta(aten._embedding_bag.default) +def meta_embedding_bag( + weight, + indices, + offsets, + scale_grad_by_freq=False, + mode=0, + sparse=False, + per_sample_weights=None, + include_last_offset=False, + padding_idx=-1, +): + torch._check( + indices.dtype in (torch.long, torch.int), + lambda: f"expected indices to be long or int, got {indices.dtype}", + ) + torch._check( + offsets.dtype in (torch.long, torch.int), + lambda: f"expected offsets to be long or int, got {offsets.dtype}", + ) + torch._check( + utils.is_float_dtype(weight.dtype), + lambda: f"expected weight to be floating point type, got {weight.dtype}", + ) + + num_bags = offsets.size(0) + if include_last_offset: + torch._check( + num_bags >= 1, + lambda: "include_last_offset: numBags should be at least 1", + ) + num_bags -= 1 + + output = weight.new_empty(num_bags, weight.size(1)) + MODE_SUM, MODE_MEAN, MODE_MAX = range(3) + + if per_sample_weights is not None: + torch._check( + mode == MODE_SUM, + lambda: "embedding_bag: per_sample_weights only supported with mode='sum'", + ) + torch._check( + per_sample_weights.dtype == weight.dtype, + lambda: f"expected weight ({weight.dtype}) and per_sample_weights ({per_sample_weights.dtype}) to have same dtype", + ) + torch._check( + per_sample_weights.ndim == 1, + lambda: f"expected per_sample_weights to be 1D tensor, got {per_sample_weights.ndim}D", + ) + torch._check( + per_sample_weights.numel() == indices.numel(), + lambda: ( + f"expected per_sample_weights.numel() ({per_sample_weights.numel()} " + f"to be the same as indices.numel() ({indices.numel()})" + ), + ) + + def is_fast_path_index_select_scale(src, scale, output, padding_idx): + return ( + is_fast_path_index_select(src, output, padding_idx) and scale.stride(0) == 1 + ) + + def is_fast_path_index_select(src, output, padding_idx): + return ( + (src.dtype == torch.float or src.dtype == torch.half) + and src.stride(1) == 1 + and output.stride(1) == 1 + and padding_idx < 0 + ) + + def is_fast_path(src, scale, output, padding_idx): + if scale is not None: + return is_fast_path_index_select_scale(src, scale, output, padding_idx) + else: + return is_fast_path_index_select(src, output, padding_idx) + + if device_hint(offsets) != "cpu": + offset2bag = indices.new_empty(indices.size(0)) + bag_size = indices.new_empty(offsets.size()) + if mode == MODE_MAX: + max_indices = indices.new_empty(num_bags, weight.size(1)) + else: + max_indices = indices.new_empty(0) + else: + fast_path_sum = is_fast_path(weight, per_sample_weights, output, padding_idx) + if mode in (MODE_MEAN, MODE_MAX) or not fast_path_sum: + offset2bag = offsets.new_empty(indices.size(0)) + else: + offset2bag = offsets.new_empty(0) + bag_size = offsets.new_empty(num_bags) + # This part of the logic comes from make_max_indices_out in EmbeddingBag.cpp + numBags = offsets.shape[0] + if mode == MODE_MAX: + if include_last_offset: + torch._check( + numBags >= 1, + lambda: "include_last_offset: numBags should be at least 1", + ) + numBags -= 1 + max_indices = offsets.new_empty(numBags, weight.shape[1]) + else: + max_indices = offsets.new_empty(bag_size.size()) + return output, offset2bag, bag_size, max_indices + + +@register_meta(aten._embedding_bag_forward_only.default) +def meta_embedding_bag_forward_only(weight, indices, offsets, *args): + output, offset2bag, bag_size, max_indices = meta_embedding_bag( + weight, indices, offsets, *args + ) + if device_hint(offsets) == "cpu": + bag_size = offsets.new_empty(offsets.size()) + return output, offset2bag, bag_size, max_indices + + +def _get_reduction_dtype(input, dtype, promote_int_to_long=True): + # if specified, dtype takes precedence + if dtype: + return dtype + + if input.dtype.is_floating_point or input.dtype.is_complex: + return input.dtype + elif promote_int_to_long: + return torch.long + + return input.dtype + + +@register_meta([aten.nansum.default, aten.nansum.out]) +@out_wrapper() +def meta_nansum(input, dims=None, keepdim=False, *, dtype=None): + output_dtype = _get_reduction_dtype(input, dtype, promote_int_to_long=True) + dims = utils.reduction_dims(input.shape, dims) + output_shape = _compute_reduction_shape(input, dims, keepdim) + return input.new_empty(output_shape, dtype=output_dtype) + + +@register_meta([aten.median.default, aten.nanmedian.default]) +def meta_median(input): + output_shape = utils.compute_reduction_output_shape( + input.shape, tuple(range(input.dim())) + ) + return input.new_empty(output_shape) + + +@register_meta( + [ + aten.median.dim, + aten.median.dim_values, + aten.nanmedian.dim, + aten.nanmedian.dim_values, + aten.mode.default, + aten.mode.values, + ] +) +@out_wrapper("values", "indices") +def meta_median_mode_dim(input, dim=-1, keepdim=False): + if device_hint(input) == "cuda": + utils.alert_not_deterministic("median CUDA with indices output") + dim = utils.reduction_dims(input.shape, (dim,)) + output_shape = _compute_reduction_shape(input, dim, keepdim) + return ( + input.new_empty(output_shape), + input.new_empty(output_shape, dtype=torch.long), + ) + + +@register_meta(aten.logical_not_.default) +def meta_logical_not_(self): + return self + + +@register_meta(aten.repeat.default) +def meta_repeat(self, repeats): + torch._check( + len(repeats) >= self.dim(), + lambda: "Number of dimensions of repeat dims can not be smaller than number of dimensions of tensor", + ) + # Add new leading dimensions to the tensor if the + # number of target dimensions is larger than the + # number of source dimensions. + num_new_dimensions = len(repeats) - self.dim() + padded_size = (1,) * num_new_dimensions + tuple(self.shape) + target_size = [padded_size[i] * repeats[i] for i in range(len(repeats))] + return self.new_empty(target_size) + + +@register_meta(aten.zero_.default) +def meta_zero_(self): + return self + + +@register_meta( + [ + aten.mul_.Scalar, + aten.div_.Scalar, + aten.mul_.Tensor, + aten.div_.Tensor, + aten.logical_and_.default, + aten.logical_or_.default, + aten.logical_xor_.default, + ], +) +def meta_binop_inplace(self, other): + if isinstance(other, torch.Tensor): + check_inplace_broadcast(self.shape, other.shape) + return self + + +@register_meta( + [ + aten.add_.Scalar, + aten.sub_.Scalar, + aten.add_.Tensor, + aten.sub_.Tensor, + ], +) +def meta_binop_inplace_alpha(self, other, alpha=1): + if isinstance(other, torch.Tensor): + check_inplace_broadcast(self.shape, other.shape) + return self + + +@register_meta([aten.round.default, aten.round.decimals]) +def meta_round(self, **kwargs): + return elementwise_meta( + self, type_promotion=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ) + + +def shift_dtype_check(fn_name, self, val): + torch._check( + utils.is_integer_dtype(self.dtype), + lambda: f"{fn_name}: Expected input tensor to have an integral dtype. Got {self.dtype}", + ) + if isinstance(val, torch.Tensor): + torch._check( + utils.is_integer_dtype(val.dtype), + lambda: f"{fn_name}: Expected shift value to have an integral dtype. Got {val.dtype}", + ) + else: + torch._check( + isinstance(val, IntLike), + lambda: f"{fn_name}: Expected shift value to be an int. Got {val}", + ) + + +@register_meta([aten.__rshift__.Tensor, aten.__rshift__.Scalar]) +def meta_rshifts(self, other): + shift_dtype_check("rshift", self, other) + return elementwise_meta( + self, other, type_promotion=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ) + + +@register_meta([aten.__lshift__.Tensor, aten.__lshift__.Scalar]) +def meta_lshifts(self, other): + shift_dtype_check("lshift", self, other) + return elementwise_meta( + self, other, type_promotion=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ) + + +@register_meta(aten.zero.default) +def meta_zero(self): + return self.new_empty(self.shape) + + +@register_meta([aten.fill_.Tensor, aten.fill_.Scalar]) +def meta_fill_(self, val): + return self + + +@register_meta([aten.fill.Tensor, aten.fill.Scalar]) +def meta_fill(self, val): + return torch.empty_like(self) + + +@register_meta(aten.relu_.default) +def meta_relu_(self): + return self + + +@register_meta([aten.index_put.default, aten._unsafe_index_put.default]) +def meta_index_put(self, indices, values, accumulate=False): + return torch.empty_like(self) + + +@register_meta(aten.masked_fill_.Scalar) +def meta_masked_fill_(self, mask, value): + check_inplace_broadcast(self.shape, mask.shape) + return self + + +@register_meta(aten.masked_scatter_) +def meta_masked_scatter_(self, mask, source): + torch._check( + mask.dtype in (torch.bool, torch.uint8), lambda: "Mask must be bool or uint8" + ) + torch._check( + self.dtype == source.dtype, + lambda: "masked_scatter: expected self and source to have same " + "dtypes but got {self.dtype} and {source.dtype}", + ) + return self + + +@register_meta(aten.masked_scatter) +@out_wrapper() +def meta_masked_scatter(self, mask, source): + self, mask = _maybe_broadcast(self, mask) + output = torch.empty_like(self, memory_format=torch.contiguous_format) + return meta_masked_scatter_(output, mask, source) + + +@register_meta(aten.masked_scatter_backward) +def meta_masked_scatter_backward(self, mask, sizes): + return self.new_empty(sizes) + + +@register_meta(aten.index_put_.default) +def meta_index_put_(self, indices, values, accumulate=False): + return self + + +@register_meta(aten.alias.default) +def meta_alias(self): + return self.view(self.shape) + + +def common_meta_baddbmm_bmm(batch1, batch2, is_bmm, self_baddbmm=None): + torch._check(batch1.dim() == 3, lambda: "batch1 must be a 3D tensor") + torch._check(batch2.dim() == 3, lambda: "batch2 must be a 3D tensor") + + batch1_sizes = batch1.size() + batch2_sizes = batch2.size() + + bs = batch1_sizes[0] + contraction_size = batch1_sizes[2] + res_rows = batch1_sizes[1] + res_cols = batch2_sizes[2] + output_size = (bs, res_rows, res_cols) + + torch._check( + batch2_sizes[0] == bs and batch2_sizes[1] == contraction_size, + lambda: f"Expected size for first two dimensions of batch2 tensor to be: [{bs}" + f", {contraction_size}] but got: [{batch2_sizes[0]}, {batch2_sizes[1]}].", + ) + + # TODO: handle out + + output = batch2.new_empty(output_size) + + if not is_bmm and self_baddbmm is not None: + torch._check(self_baddbmm.dim() == 3, lambda: "self must be a 3D tensor") + torch._check( + self_baddbmm.size() == output_size, + lambda: f"Expected an input tensor shape with shape {output_size} but got shape: {self_baddbmm.size()}", + ) + + return output + + +@register_meta(aten.bmm.default) +def meta_bmm(self, mat2): + return common_meta_baddbmm_bmm(self, mat2, True) + + +def div_rtn(x, y): + q = x // y + r = x % y + # WARNING: explicit bool conversion here is necessary; + # would be fixed by SymBool + if r != 0 and (bool(r < 0) != bool(y < 0)): + q -= 1 + return q + + +def pooling_output_shape_pad_lr( + inputSize, kernelSize, pad_l, pad_r, stride, dilation, ceil_mode +): + outputSize = ( + div_rtn( + inputSize + + pad_l + + pad_r + - dilation * (kernelSize - 1) + - 1 + + (stride - 1 if ceil_mode else 0), + stride, + ) + + 1 + ) + if ceil_mode: + if (outputSize - 1) * stride >= inputSize + pad_l: + outputSize -= 1 + return outputSize + + +def pooling_output_shape(inputSize, kernelSize, pad, stride, dilation, ceil_mode): + torch._check(stride != 0, lambda: "stride should not be zero") + torch._check(pad >= 0, lambda: f"pad must be non-negative, but got pad: {pad}") + torch._check( + pad <= kernelSize // 2, + lambda: f"pad should be at most half of kernel size, but got pad={pad} and kernel_size={kernelSize}", + ) + return pooling_output_shape_pad_lr( + inputSize, kernelSize, pad, pad, stride, dilation, ceil_mode + ) + + +def pool2d_shape_check( + input, + kH, + kW, + dH, + dW, + padH, + padW, + dilationH, + dilationW, + nInputPlane, + inputHeight, + inputWidth, + outputHeight, + outputWidth, + memory_format, +): + ndim = input.dim() + nOutputPlane = nInputPlane + + torch._check( + kW > 0 and kH > 0, + lambda: "kernel size should be greater than zero, but got kH: {kH}, kW: {kW}", + ) + torch._check( + dW > 0 and dH > 0, + lambda: "stride should be greater than zero, but got dH: {dH}, dW: {dW}", + ) + torch._check( + dilationH > 0 and dilationW > 0, + lambda: "dilation should be greater than zero, but got dilationH: {dilationH}, dilationW: {dilationW}", + ) + + valid_dims = input.size(1) != 0 and input.size(2) != 0 + + if memory_format == torch.channels_last: + torch._check( + ndim == 4 and valid_dims and input.size(3) != 0, + lambda: "Expected 4D (batch mode) tensor expected for input with channels_last layout" + " with optional 0 dim batch size for input, but got: {input.size()}", + ) + else: + torch._check( + (ndim == 3 and input.size(0) != 0 and valid_dims) + or (ndim == 4 and valid_dims and input.size(3) != 0), + lambda: f"Expected 3D or 4D (batch mode) tensor with optional 0 dim batch size for input, but got: {input.size()}", + ) + + torch._check( + kW // 2 >= padW and kH // 2 >= padH, + lambda: "pad should be smaller than or equal to half of kernel size, but got " + f"padW = {padW}, padH = {padH}, kW = {kW}, kH = {kH}", + ) + + torch._check( + outputWidth >= 1 and outputHeight >= 1, + lambda: f"Given input size: ({nInputPlane}x{inputHeight}x{inputWidth}). " + f"Calculated output size: ({nOutputPlane}x{outputHeight}x{outputWidth}). " + "Output size is too small", + ) + + +def pool3d_shape_check( + input: Tensor, + nslices: int, + kT: int, + kH: int, + kW: int, + dT: int, + dH: int, + dW: int, + pT: int, + pH: int, + pW: int, + dilationT: int, + dilationH: int, + dilationW: int, + itime: int, + iheight: int, + iwidth: int, + otime: int, + oheight: int, + owidth: int, + fn_name: str, + check_input_size: bool = False, +): + ndim = input.ndim + + torch._check( + kT > 0 and kW > 0 and kH > 0, + lambda: ( + f"kernel size should be greater than zero, but got " + f"kT: {kT}, kH: {kH}, kW: {kW}" + ), + ) + torch._check( + dT > 0 and dW > 0 and dH > 0, + lambda: ( + f"stride should be greater than zero, but got " + f"dT: {dT}, dH: {dH}, dW: {dW}" + ), + ) + torch._check( + dilationT > 0 and dilationW > 0 and dilationH > 0, + lambda: ( + f"dilation should be greater than zero, but got " + f"dilationT: {dilationT}, dilationH: {dilationH}, dilationW: {dilationW}" + ), + ) + + torch._check( + ndim in (4, 5), + lambda: f"{fn_name}: Expected 4D or 5D tensor for input, but got: {input.shape}", + ) + + for i in range(ndim): + if ndim == 5 and i == 0: + # size of batch-dim can be 0. + continue + torch._check( + input.size(i) > 0, + lambda: ( + f"{fn_name}: Expected input's non-batch dimensions to have positive length," + f" but input has a shape of {input.shape}" + f" and non-batch dimension {input.size(i)} has length zero!" + ), + ) + + if check_input_size: # AveragePool3d + torch._check( + itime >= kT and iheight >= kH and iwidth >= kW, + lambda: ( + f"input image (T: {itime} H: {iheight} W: {iwidth}) smaller than " + f"kernel size (kT: {kT} kH: {kH} kW: {kW})" + ), + ) + + torch._check( + kT / 2 >= pT and kW / 2 >= pW and kH / 2 >= pH, + lambda: ( + f"pad should be smaller than or equal to half of kernel size, but got " + f"kT: {kT} kW: {kW} kH: {kH} padT: {pT} padW: {pW} padH: {pH}" + ), + ) + + torch._check( + otime >= 1 and owidth >= 1 and oheight >= 1, + lambda: ( + f"Given input size: ({nslices}x{itime}x{iheight}x{iwidth}). " + f"Calculated output size: ({nslices}x{otime}x{oheight}x{owidth}). " + f"Output size is too small" + ), + ) + + +def max_pool3d_backward_shape_check( + input, + grad_output, + indices, + nslices, + kT, + kH, + kW, + dT, + dH, + dW, + pT, + pH, + pW, + dilationT, + dilationH, + dilationW, + itime, + iheight, + iwidth, + otime, + oheight, + owidth, + fn_name, +): + ndim = input.ndim + + pool3d_shape_check( + input, + nslices, + kT, + kH, + kW, + dT, + dH, + dW, + pT, + pH, + pW, + dilationT, + dilationH, + dilationW, + itime, + iheight, + iwidth, + otime, + oheight, + owidth, + fn_name, + ) + + check_dim_size(grad_output, ndim, ndim - 4, nslices) + check_dim_size(grad_output, ndim, ndim - 3, otime) + check_dim_size(grad_output, ndim, ndim - 2, oheight) + check_dim_size(grad_output, ndim, ndim - 1, owidth) + + check_dim_size(indices, ndim, ndim - 4, nslices) + check_dim_size(indices, ndim, ndim - 3, otime) + check_dim_size(indices, ndim, ndim - 2, oheight) + check_dim_size(indices, ndim, ndim - 1, owidth) + + +def avg_pool3d_backward_shape_check( + input: Tensor, + grad_output: Tensor, + nslices: int, + kT: int, + kH: int, + kW: int, + dT: int, + dH: int, + dW: int, + pT: int, + pH: int, + pW: int, + itime: int, + iheight: int, + iwidth: int, + otime: int, + oheight: int, + owidth: int, + fn_name: str, +): + ndim = input.ndim + + pool3d_shape_check( + input, + nslices, + kT, + kH, + kW, + dT, + dH, + dW, + pT, + pH, + pW, + 1, + 1, + 1, + itime, + iheight, + iwidth, + otime, + oheight, + owidth, + fn_name, + True, + ) + + check_dim_size(grad_output, ndim, ndim - 4, nslices) + check_dim_size(grad_output, ndim, ndim - 3, otime) + check_dim_size(grad_output, ndim, ndim - 2, oheight) + check_dim_size(grad_output, ndim, ndim - 1, owidth) + + +def max_pool2d_checks_and_compute_shape( + input, kernel_size, stride, padding, dilation, ceil_mode +): + # Reference: aten/src/ATen/native/DilatedMaxPool2d.cpp + def unpack(name, val): + torch._check( + len(val) in [1, 2], + lambda: f"max_pool2d: {name} must either be a single int, or a tuple of two ints", + ) + H = val[0] + W = H if len(val) == 1 else val[1] + return H, W + + kH, kW = unpack("kernel_size", kernel_size) + + torch._check( + len(stride) in [0, 1, 2], + lambda: "max_pool2d: stride must either be omitted, a single int, or a tuple of two ints", + ) + if len(stride) == 0: + dH, dW = kH, kW + else: + dH, dW = unpack("stride", stride) + + padH, padW = unpack("padding", padding) + dilationH, dilationW = unpack("dilation", dilation) + nInputPlane = input.size(-3) + inputHeight = input.size(-2) + inputWidth = input.size(-1) + + memory_format = utils.suggest_memory_format(input) + if memory_format == torch.channels_last: + torch._check( + input.dim() == 4, + lambda: "non-empty 4D (batch mode) tensor expected for input with channels_last layout", + ) + elif memory_format == torch.contiguous_format: + torch._check( + input.dim() in [3, 4], + lambda: "non-empty 3D or 4D (batch mode) tensor expected for input", + ) + else: + torch._check( + False, + lambda: "Unsupport memory format. Supports only ChannelsLast, Contiguous", + ) + + outputHeight = pooling_output_shape(inputHeight, kH, padH, dH, dilationH, ceil_mode) + outputWidth = pooling_output_shape(inputWidth, kW, padW, dW, dilationW, ceil_mode) + + pool2d_shape_check( + input, + kH, + kW, + dH, + dW, + padH, + padW, + dilationH, + dilationW, + nInputPlane, + inputHeight, + inputWidth, + outputHeight, + outputWidth, + memory_format, + ) + + return nInputPlane, outputHeight, outputWidth + + +@register_meta(aten.max_pool2d_with_indices_backward.default) +def meta_max_pool2d_with_indices_backward( + grad_output, + self, + kernel_size, + stride, + padding, + dilation, + ceil_mode, + indices, +): + ( + nInputPlane, + outputHeight, + outputWidth, + ) = max_pool2d_checks_and_compute_shape( + self, kernel_size, stride, padding, dilation, ceil_mode + ) + + torch._check( + self.dtype == grad_output.dtype, + lambda: f"Expected dtype {self.dtype} for `gradOutput` but got dtype {grad_output.dtype}", + ) + + nOutputPlane = nInputPlane + ndim = self.ndim + + def _check_dim_size(t): + check_dim_size(t, ndim, ndim - 3, nOutputPlane) + check_dim_size(t, ndim, ndim - 2, outputHeight) + check_dim_size(t, ndim, ndim - 1, outputWidth) + + _check_dim_size(grad_output) + _check_dim_size(indices) + + memory_format = utils.suggest_memory_format(self) + return torch.empty( + self.shape, + dtype=self.dtype, + device=self.device, + memory_format=memory_format, + ) + + +@register_meta(aten.max_pool2d_with_indices.default) +def meta_max_pool2d_with_indices( + input, kernel_size, stride=(), padding=(0,), dilation=(1,), ceil_mode=False +): + ( + nInputPlane, + outputHeight, + outputWidth, + ) = max_pool2d_checks_and_compute_shape( + input, kernel_size, stride, padding, dilation, ceil_mode + ) + + nbatch = input.size(-4) if input.dim() == 4 else 1 + memory_format = utils.suggest_memory_format(input) + if input.dim() == 3: + size = [nInputPlane, outputHeight, outputWidth] + else: + size = [nbatch, nInputPlane, outputHeight, outputWidth] + return ( + torch.empty( + size, + dtype=input.dtype, + device=input.device, + memory_format=memory_format, + ), + torch.empty( + size, + dtype=torch.int64, + device=input.device, + memory_format=memory_format, + ), + ) + + +@register_meta(aten.max_unpool2d) +@out_wrapper() +def meta_max_unpool2d(self_, indices, output_size): + utils.alert_not_deterministic("max_unpooling2d_forward_out") + + torch._check( + indices.dtype == torch.int64, + lambda: f"elements in indices should be type int64 but got: {indices.dtype}", + ) + torch._check( + len(output_size) == 2, + lambda: ( + f"There should be exactly two elements (height, width) in output_size, " + f"but got {len(output_size)} elements." + ), + ) + + oheight, owidth = output_size + + torch._check( + self_.ndim in (3, 4), + lambda: ( + f"Input to max_unpooling2d should be a 3d or 4d Tensor, " + f"but got a tensor with {self_.ndim} dimensions." + ), + ) + torch._check( + self_.shape == indices.shape, + lambda: ( + f"Expected shape of indices to be same as that of the input tensor ({self_.shape}) " + f"but got indices tensor with shape: {indices.shape}" + ), + ) + + for i in range(1, self_.ndim): + torch._check( + self_.size(i) > 0, + lambda: ( + f"max_unpooling2d(): " + f"Expected input to have non-zero size for non-batch dimensions, " + f"but got {self_.shape} with dimension {i} being empty." + ), + ) + + self = self_.contiguous() + + if self_.ndim == 3: + nchannels = self.size(0) + result = self.new_empty((nchannels, oheight, owidth)) + else: + nbatch = self.size(0) + nchannels = self.size(1) + result = self.new_empty((nbatch, nchannels, oheight, owidth)) + + return result + + +def _max_unpooling3d_shape_check(input, indices, output_size, stride, padding, fn_name): + torch._check( + indices.dtype == torch.int64, lambda: "elements in indices should be type int64" + ) + torch._check( + input.ndim in (4, 5), + lambda: f"Input to max_unpooling3d should be a 4d or 5d Tensor, but got a tensor with {input.ndim} dimensions.", + ) + torch._check( + len(output_size) == 3, + lambda: ( + f"There should be exactly three elements (depth, height, width) in output_size, " + f"but got {len(output_size)} elements." + ), + ) + torch._check( + len(stride) == 3, + lambda: f"There should be exactly three elements (depth, height, width) in stride, but got: {len(stride)} elements.", + ) + torch._check( + len(padding) == 3, + lambda: f"There should be exactly three elements (depth, height, width) in padding, but got: {len(padding)} elements.", + ) + torch._check( + input.shape == indices.shape, + lambda: ( + f"Expected shape of indices to be same as that of the input tensor ({input.shape}) " + f"but got indices tensor with shape: {indices.shape}" + ), + ) + + for i in range(1, input.ndim): + torch._check( + input.size(i) > 0, + lambda: ( + f"{fn_name}: " + f"Expected input to have non-zero size for non-batch dimensions, " + f"but got {input.shape} with dimension {i} being empty." + ), + ) + + torch._check( + stride[0] > 0 and stride[1] > 0 and stride[2] > 0, + lambda: f"strides should be greater than zero, but got stride: {stride}", + ) + + +@register_meta(aten.max_unpool3d) +@out_wrapper() +def meta_max_unpool3d(self_, indices, output_size, stride, padding): + utils.alert_not_deterministic("max_unpooling3d_forward_out") + + _max_unpooling3d_shape_check( + self_, indices, output_size, stride, padding, "max_unpooling3d()" + ) + + self = self_.contiguous() + + odepth, oheight, owidth = output_size + + if self_.ndim == 4: + nchannels = self.size(0) + result = self.new_empty((nchannels, odepth, oheight, owidth)) + else: + nbatch = self.size(0) + nchannels = self.size(1) + result = self.new_empty((nbatch, nchannels, odepth, oheight, owidth)) + + return result + + +@register_meta(aten.max_pool3d_with_indices) +@out_wrapper("out", "indices") +def meta_max_pool3d_with_indices( + input, + kernel_size, + stride=(), + padding=(0,), + dilation=(1,), + ceil_mode=False, +): + torch._check( + len(kernel_size) in (1, 3), + lambda: "max_pool3d: kernel_size must either be a single int, or a tuple of three ints", + ) + kT = kernel_size[0] + kH = kT if len(kernel_size) == 1 else kernel_size[1] + kW = kT if len(kernel_size) == 1 else kernel_size[2] + + torch._check( + not stride or len(stride) in (1, 3), + lambda: "max_pool3d: stride must either be omitted, a single int, or a tuple of three ints", + ) + dT = kT if not stride else stride[0] + dH = kH if not stride else (dT if len(stride) == 1 else stride[1]) + dW = kW if not stride else (dT if len(stride) == 1 else stride[2]) + + torch._check( + len(padding) in (1, 3), + lambda: "max_pool3d: padding must either be a single int, or a tuple of three ints", + ) + pT = padding[0] + pH = pT if len(padding) == 1 else padding[1] + pW = pT if len(padding) == 1 else padding[2] + + torch._check( + len(dilation) in (1, 3), + lambda: "max_pool3d: dilation must be either a single int, or a tuple of three ints", + ) + dilationT = dilation[0] + dilationH = dilationT if len(dilation) == 1 else dilation[1] + dilationW = dilationT if len(dilation) == 1 else dilation[2] + + torch._check( + input.ndim in (4, 5), + lambda: "non-empty 4D or 5D (batch mode) tensor expected for input", + ) + + nbatch = input.size(-5) if input.ndim == 5 else 1 + nslices = input.size(-4) + itime = input.size(-3) + iheight = input.size(-2) + iwidth = input.size(-1) + + otime = pooling_output_shape(itime, kT, pT, dT, dilationT, ceil_mode) + oheight = pooling_output_shape(iheight, kH, pH, dH, dilationH, ceil_mode) + owidth = pooling_output_shape(iwidth, kW, pW, dW, dilationW, ceil_mode) + + pool3d_shape_check( + input, + nslices, + kT, + kH, + kW, + dT, + dH, + dW, + pT, + pH, + pW, + dilationT, + dilationH, + dilationW, + itime, + iheight, + iwidth, + otime, + oheight, + owidth, + "max_pool3d_with_indices()", + ) + + channels_last = ( + input.ndim == 5 and utils.suggest_memory_format(input) == torch.channels_last_3d + ) + if input.ndim == 4: + input_channels_last_check = input.unsqueeze(0) + channels_last = ( + not input_channels_last_check.is_contiguous() + ) and input_channels_last_check.is_contiguous( + memory_format=torch.channels_last_3d + ) + out_shape = (nslices, otime, oheight, owidth) + else: + out_shape = (nbatch, nslices, otime, oheight, owidth) # type: ignore[assignment] + + out = input.new_empty(out_shape) + indices = input.new_empty(out_shape, dtype=torch.int64) + + if channels_last: + out = out.to(memory_format=torch.channels_last_3d) + indices = indices.to(memory_format=torch.channels_last_3d) + + return out, indices + + +@register_meta(aten.max_pool3d_with_indices_backward) +@out_wrapper("grad_input") +def meta_max_pool3d_with_indices_backward( + grad_output, + input, + kernel_size, + stride, + padding, + dilation, + ceil_mode, + indices, +): + torch._check( + len(kernel_size) in (1, 3), + lambda: "max_pool3d: kernel_size must either be a single int, or a tuple of three ints", + ) + kT = kernel_size[0] + kH = kT if len(kernel_size) == 1 else kernel_size[1] + kW = kT if len(kernel_size) == 1 else kernel_size[2] + + torch._check( + not stride or len(stride) in (1, 3), + lambda: "max_pool3d: stride must either be omitted, a single int, or a tuple of three ints", + ) + dT = kT if not stride else stride[0] + dH = kH if not stride else (dT if len(stride) == 1 else stride[1]) + dW = kW if not stride else (dT if len(stride) == 1 else stride[2]) + + torch._check( + len(padding) in (1, 3), + lambda: "max_pool3d: padding must either be a single int, or a tuple of three ints", + ) + pT = padding[0] + pH = pT if len(padding) == 1 else padding[1] + pW = pT if len(padding) == 1 else padding[2] + + torch._check( + len(dilation) in (1, 3), + lambda: "max_pool3d: dilation must be either a single int, or a tuple of three ints", + ) + dilationT = dilation[0] + dilationH = dilationT if len(dilation) == 1 else dilation[1] + dilationW = dilationT if len(dilation) == 1 else dilation[2] + + torch._check( + input.ndim in (4, 5), + lambda: "non-empty 4D or 5D (batch mode) tensor expected for input", + ) + + nslices = input.size(-4) + itime = input.size(-3) + iheight = input.size(-2) + iwidth = input.size(-1) + + otime = grad_output.size(-3) + oheight = grad_output.size(-2) + owidth = grad_output.size(-1) + + max_pool3d_backward_shape_check( + input, + grad_output, + indices, + nslices, + kT, + kH, + kW, + dT, + dH, + dW, + pT, + pH, + pW, + dilationT, + dilationH, + dilationW, + itime, + iheight, + iwidth, + otime, + oheight, + owidth, + "max_pool3d_with_indices_backward()", + ) + + channels_last = ( + input.ndim == 5 and utils.suggest_memory_format(input) == torch.channels_last_3d + ) + if input.ndim == 4: + input_channels_last_check = input.unsqueeze(0) + channels_last = ( + not input_channels_last_check.is_contiguous() + ) and input_channels_last_check.is_contiguous( + memory_format=torch.channels_last_3d + ) + + grad_input = input.new_empty(input.shape) + + if channels_last: + grad_input = grad_input.to(memory_format=torch.channels_last_3d) + + return grad_input + + +def check_grid_sampler_common(input: Tensor, grid: Tensor): + torch._check( + input.device == grid.device, + lambda: ( + f"grid_sampler(): expected input and grid to be on same device, but input " + f"is on {input.device} and grid is on {grid.device}" + ), + ) + torch._check( + input.layout == torch.strided and grid.layout == torch.strided, + lambda: ( + f"grid_sampler(): expected input and grid to have torch.strided layout, but " + f"input has {input.layout} and grid has {grid.layout}" + ), + ) + torch._check( + input.shape[0] == grid.shape[0], + lambda: ( + f"grid_sampler(): expected grid and input to have same batch size, but got " + f"input with sizes {input.shape} and grid with sizes {grid.shape}" + ), + ) + torch._check( + grid.shape[-1] == input.ndim - 2, + lambda: ( + f"grid_sampler(): expected grid to have size {input.ndim - 2} in last " + f"dimension, but got grid with sizes {grid.shape}" + ), + ) + + for i in range(2, input.ndim): + torch._check( + input.shape[i] > 0, + lambda: ( + f"grid_sampler(): expected input to have non-empty spatial dimensions, " + f"but input has sizes {input.shape} with dimension {i} being empty" + ), + ) + + +class GridSamplerInterpolation(Enum): + BILINEAR = 0 + NEAREST = 1 + BICUBIC = 2 + + +def check_grid_sampler_3d(input: Tensor, grid: Tensor, interpolation_mode: int): + torch._check( + input.ndim == 5 and input.ndim == grid.ndim, + lambda: ( + f"grid_sampler(): expected 5D input and grid with same number of " + f"dimensions, but got input with sizes {input.shape}" + f" and grid with sizes {grid.shape}" + ), + ) + torch._check( + not ( + input.ndim == 5 + and interpolation_mode == GridSamplerInterpolation.BICUBIC.value + ), + lambda: "grid_sampler(): bicubic interpolation only supports 4D input", + ) + + +@register_meta(aten.grid_sampler_2d_backward.default) +def grid_sampler_2d_backward_meta( + grad_output, + input, + grid, + interpolation_mode, + padding_mode, + align_corners, + output_mask, +): + input_requires_grad = output_mask[0] + if input_requires_grad: + grad_input = torch.zeros_like(input, memory_format=torch.contiguous_format) + else: + grad_input = None + grad_grid = torch.empty_like(grid, memory_format=torch.contiguous_format) + return (grad_input, grad_grid) + + +@register_meta(aten.grid_sampler_3d) +@out_wrapper() +def grid_sampler_3d( + input, + grid, + interpolation_mode, + padding_mode, + align_corners, +): + check_grid_sampler_common(input, grid) + check_grid_sampler_3d(input, grid, interpolation_mode) + N = input.shape[0] + C = input.shape[1] + out_D = grid.shape[1] + out_H = grid.shape[2] + out_W = grid.shape[3] + return input.new_empty((N, C, out_D, out_H, out_W)) + + +@register_meta(aten.grid_sampler_3d_backward) +@out_wrapper("grad_input", "grad_grid") +def grid_sampler_3d_backward( + grad_output, + input, + grid, + interpolation_mode, + padding_mode, + align_corners, + output_mask, +): + check_grid_sampler_common(input, grid) + check_grid_sampler_3d(input, grid, interpolation_mode) + input_requires_grad = output_mask[0] + if input_requires_grad: + grad_input = torch.zeros_like( + input, memory_format=torch.legacy_contiguous_format + ) + else: + grad_input = None + grad_grid = torch.empty_like(grid, memory_format=torch.legacy_contiguous_format) + return grad_input, grad_grid + + +@register_meta([aten.full.default]) +def full(size, fill_value, *args, **kwargs): + dtype = kwargs.get("dtype", None) + if not dtype: + dtype = utils.get_dtype(fill_value) + kwargs["dtype"] = dtype + return torch.empty(size, *args, **kwargs) + + +# zeros_like is special cased to work for sparse +@register_meta(aten.zeros_like.default) +def zeros_like( + self, + dtype=None, + layout=None, + device=None, + pin_memory=None, + memory_format=None, +): + if layout == torch.sparse_coo: + torch._check( + memory_format is None, + lambda: "memory format option is only supported by strided tensors", + ) + + res = torch.empty( + 0, + dtype=self.dtype if dtype is None else dtype, + layout=layout, + device=self.device if device is None else device, + pin_memory=pin_memory, + ) + + if self.is_sparse: + res.sparse_resize_and_clear_( + self.size(), self.sparse_dim(), self.dense_dim() + ) + else: + res.sparse_resize_and_clear_(self.size(), self.dim(), 0) + + res._coalesced_(True) + return res + res = aten.empty_like.default( + self, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + memory_format=memory_format, + ) + # device can be not "meta" + res.fill_(0) + return res + + +@register_meta(aten.select.int) +def meta_select(self, dim, index): + ndim = self.dim() + torch._check_index( + ndim != 0, + lambda: "select() cannot be applied to a 0-dim tensor.", + ) + + dim = dim if dim >= 0 else dim + ndim + size = self.size(dim) + + torch._check_index( + not (-index > size or index >= size), + lambda: f"select(): index {index} out of range for tensor of size " + f"{self.size()} at dimension {dim}", + ) + + index = index if index >= 0 else index + size + + new_size = list(self.size()) + new_stride = list(self.stride()) + + new_storage_offset = self.storage_offset() + index * new_stride[dim] + del new_size[dim] + del new_stride[dim] + + return self.as_strided(new_size, new_stride, new_storage_offset) + + +@register_meta(aten.select_scatter.default) +def meta_select_scatter(self, src, dim, index): + return utils.clone_preserve_strides(self) + + +@register_meta(aten.slice_scatter.default) +def meta_slice_scatter(self, src, dim=0, start=None, end=None, step=1): + return utils.clone_preserve_strides(self) + + +# TODO: Deduplicate this with canonicalize_dim +def maybe_wrap_dim(dim: int, dim_post_expr: int, wrap_scalar: bool = True): + if dim_post_expr <= 0: + assert wrap_scalar + dim_post_expr = 1 + min = -dim_post_expr + max = dim_post_expr - 1 + assert not (dim < min or dim > max), f"dim {dim} out of bounds ({min}, {max})" + if dim < 0: + dim += dim_post_expr + return dim + + +def ensure_nonempty_size(t, dim): + return 1 if t.dim() == 0 else t.shape[dim] + + +# From aten/src/ATen/native/ScatterGatherChecks.h +def gather_shape_check(self, dim, index): + self_dims = max(self.dim(), 1) + index_dims = max(index.dim(), 1) + torch._check( + self_dims == index_dims, + lambda: "Index tensor must have the same number of dimensions as input tensor", + ) + for i in range(self_dims): + if i != dim: + torch._check( + ensure_nonempty_size(index, i) <= ensure_nonempty_size(self, i), + lambda: f"Size does not match at dimension {i} expected index {index.shape}" + + f" to be smaller than self {self.shape} apart from dimension {dim}", + ) + + +@register_meta(aten.gather.default) +def meta_gather(self, dim, index, sparse_grad=False): + wrapped_dim = maybe_wrap_dim(dim, self.dim()) + is_index_empty = index.numel() == 0 + if not is_index_empty: + torch._check( + index.dtype == torch.long, + lambda: f"gather(): Expected dtype int64 for index, but got {index.dtype}", + ) + gather_shape_check(self, wrapped_dim, index) + return self.new_empty(index.shape) + + +# From aten/src/ATen/native/TensorAdvancedIndexing.cpp +def get_operator_enum(reduce_, use_new_options=False): + if use_new_options: + if reduce_ == "sum": + return "REDUCE_ADD" + elif reduce_ == "prod": + return "REDUCE_MULTIPLY" + elif reduce_ == "mean": + return "REDUCE_MEAN" + elif reduce_ == "amax": + return "REDUCE_MAXIMUM" + elif reduce_ == "amin": + return "REDUCE_MINIMUM" + torch._check( + False, + lambda: "reduce argument must be either sum, prod, mean, amax or amin.", + ) + return + else: + if reduce_ == "add": + return "REDUCE_ADD" + elif reduce_ == "multiply": + return "REDUCE_MULTIPLY" + torch._check(False, lambda: "reduce argument must be either add or multiply.") + return + + +# From aten/src/ATen/native/ScatterGatherChecks.h +def scatter_gather_dtype_check(method_name, self, index, src_opt=None): + if index.numel() != 0: + torch._check( + index.dtype == torch.long, + lambda: f"{method_name}(): Expected dtype int64 for index", + ) + + if src_opt is not None: + torch._check( + self.dtype == src_opt.dtype, + lambda: f"{method_name}(): Expected self.dtype to be equal to src.dtype", + ) + + +def ensure_nonempty_dim(dim): + return max(dim, 1) + + +# From aten/src/ATen/native/ScatterGatherChecks.h +def scatter_shape_check(self, dim, index, src_opt=None): + if index.numel() == 0: + return + torch._check( + ensure_nonempty_dim(self.dim()) == ensure_nonempty_dim(index.dim()), + lambda: "Index tensor must have the same number of dimensions as self tensor", + ) + + is_wrong_shape = False + self_dims = ensure_nonempty_dim(self.dim()) + + # Check: index.size(d) <= self.size(d) for all d != dim + for d in range(self_dims): + index_d_size = ensure_nonempty_size(index, d) + if d == dim: + continue + if index_d_size > ensure_nonempty_size(self, d): + is_wrong_shape = True + break + + # Check: index.size(d) <= src.size(d) for all d if src is Tensor + if not is_wrong_shape and src_opt is not None: + for d in range(self_dims): + index_d_size = ensure_nonempty_size(index, d) + if index_d_size > ensure_nonempty_size(src_opt, d): + is_wrong_shape = True + break + + if src_opt is not None: + torch._check( + ensure_nonempty_dim(self.dim()) == ensure_nonempty_dim(index.dim()), + lambda: "Index tensor must have the same number of dimensions as self tensor", + ) + torch._check( + not is_wrong_shape, + lambda: f"Expected index {index.shape} to be smaller than self {self.shape}" + + f" apart from dimension {dim} and to be smaller than src {src_opt.shape}", + ) + else: + torch._check( + not is_wrong_shape, + lambda: f"Expected index {index.shape} to be smaller than self {self.shape}" + + f" apart from dimension {dim}", + ) + + +# From aten/src/ATen/native/TensorAdvancedIndexing.cpp +def scatter_meta_impl(self, dim, index, src=None, reduce_=None, use_new_options=False): + wrapped_dim = maybe_wrap_dim(dim, self.dim()) + scatter_gather_dtype_check("scatter", self, index, src) + scatter_shape_check(self, wrapped_dim, index, src) + if reduce_ is not None: + # Check if we have a valid reduce operator. + get_operator_enum(reduce_, use_new_options) + + +@register_meta(aten.scatter_add.default) +def meta_scatter_add(self, dim, index, src): + scatter_meta_impl(self, dim, index, src, "add") + return self.new_empty(self.shape) + + +@register_meta(aten.scatter_add_) +def meta_scatter_add_(self, dim, index, src): + scatter_meta_impl(self, dim, index, src, "add") + return self + + +@register_meta( + [ + aten.scatter.src, + aten.scatter.value, + aten.scatter.reduce, + aten.scatter.value_reduce, + ] +) +@out_wrapper() +def meta_scatter(self, dim, index, src_or_value, reduce=None): + src = src_or_value if isinstance(src_or_value, torch.Tensor) else None + scatter_meta_impl(self, dim, index, src, reduce) + return self.new_empty(self.shape) + + +@register_meta( + [ + aten.scatter_.src, + aten.scatter_.value, + aten.scatter_.reduce, + aten.scatter_.value_reduce, + ] +) +def meta_scatter_(self, dim, index, src_or_value, reduce=None): + src = src_or_value if isinstance(src_or_value, torch.Tensor) else None + scatter_meta_impl(self, dim, index, src, reduce) + return self + + +@register_meta( + [ + aten._scaled_dot_product_flash_attention, + ] +) +def meta__scaled_dot_product_flash( + query: Tensor, + key: Tensor, + value: Tensor, + dropout_p: float = 0.0, + is_causal: bool = False, + return_debug_mask: bool = False, + scale: Optional[float] = None, +): + batch_size = query.size(0) + num_heads = query.size(1) + max_seqlen_batch_q = query.size(2) + head_dim = query.size(3) + + max_seqlen_batch_k = key.size(2) + + if device_hint(query) == "cpu": + attention = torch.empty( + (batch_size, max_seqlen_batch_q, num_heads, head_dim), + dtype=query.dtype, + device=query.device, + ).transpose(1, 2) + logsumexp = torch.empty( + ( + batch_size, + max_seqlen_batch_q, + num_heads, + ), + dtype=torch.float, + device=query.device, + ).transpose(1, 2) + return ( + attention, + logsumexp, + torch.empty((), dtype=torch.int32, device="meta"), + torch.empty((), dtype=torch.int32, device="meta"), + 0, + 0, + torch.empty((), dtype=torch.long, device="meta"), + torch.empty((), dtype=torch.long, device="meta"), + torch.empty((), dtype=query.dtype, device=query.device), + ) + + # Cuda Path + query_t = query.transpose(1, 2) + attention = torch.empty_like(query_t).transpose(1, 2) + logsumexp = torch.empty( + (batch_size, num_heads, max_seqlen_batch_q), + dtype=torch.float, + device=query.device, + ) + + if return_debug_mask: + blocksize_c = 128 if head_dim > 64 else 256 + max_seqlen_k = math.ceil(max_seqlen_batch_q / blocksize_c) + if max_seqlen_batch_k <= 128: + max_seqlen_k = 128 + elif max_seqlen_batch_k <= 256: + max_seqlen_k = 256 + debug_mask = torch.empty( + (batch_size, num_heads, max_seqlen_batch_q, max_seqlen_k), + dtype=query.dtype, + device=query.device, + ) + else: + debug_mask = torch.empty(0, dtype=query.dtype, device=query.device) + + # Note [Seed and Offset]: device for seed and offset below depends on whether we are + # capturing or not, but at the time of tracing we don't know if we + # are going to use cudagraphs or not, so we return meta tensors here + # it's possible we'll need to have some special handling in inductor for sdpa + + return ( + attention, + logsumexp, + None, + None, + max_seqlen_batch_q, + max_seqlen_batch_k, + torch.empty((), dtype=torch.long, device="meta"), + torch.empty((), dtype=torch.long, device="meta"), + debug_mask, + ) + + +@register_meta( + [ + aten._scaled_dot_product_flash_attention_backward, + ] +) +def meta__scaled_dot_product_flash_backward( + grad_out: Tensor, + query: Tensor, + key: Tensor, + value: Tensor, + out: Tensor, + logsumexp: Tensor, + cum_seq_q: Tensor, + cum_seq_k: Tensor, + max_q: int, + max_k: int, + dropout_p: float, + is_causal: bool, + philox_seed: Tensor, + philox_offset: Tensor, + scale: Optional[float] = None, +): + if device_hint(query) != "cpu": + grad_q = torch.empty_like(query.transpose(1, 2)).transpose(1, 2) + grad_k = torch.empty_like(key.transpose(1, 2)).transpose(1, 2) + grad_v = torch.empty_like(value.transpose(1, 2)).transpose(1, 2) + return grad_q, grad_k, grad_v + + batch_size = query.size(0) + num_heads = query.size(1) + head_dim = query.size(3) + len_q = query.size(2) if device_hint(query) == "cpu" else max_q + len_k = key.size(2) if device_hint(query) == "cpu" else max_k + + grad_q = torch.empty_permuted( + (batch_size, num_heads, len_q, head_dim), + (0, 2, 1, 3), + dtype=query.dtype, + device=query.device, + ) + grad_k = torch.empty_permuted( + (batch_size, num_heads, len_k, head_dim), + (0, 2, 1, 3), + dtype=key.dtype, + device=key.device, + ) + grad_v = torch.empty_permuted( + (batch_size, num_heads, len_k, head_dim), + (0, 2, 1, 3), + dtype=value.dtype, + device=value.device, + ) + + return grad_q, grad_k, grad_v + + +@register_meta( + [ + aten._scaled_dot_product_efficient_attention, + ] +) +def meta__scaled_dot_product_efficient( + query: Tensor, + key: Tensor, + value: Tensor, + attn_bias: Optional[Tensor], + compute_log_sumexp: bool, + dropout_p=0.0, + is_causal: bool = False, + scale: Optional[float] = None, +): + query = query.transpose(1, 2) + key = key.transpose(1, 2) + value = value.transpose(1, 2) + + B = query.size(0) + M = query.size(1) + N = key.size(1) + num_heads = query.size(-2) + K = query.size(-1) + Kv = value.size(-1) + + res = torch.empty(B, M, num_heads, Kv, dtype=query.dtype, device=query.device) + + logsumexp_dim = math.ceil(M / 32) * 32 if compute_log_sumexp else 0 + logsum_exp = torch.empty( + (B, num_heads, logsumexp_dim), + dtype=torch.float, + device=query.device, + ) + + res = res.transpose(1, 2) + + # See Note [Seed and Offset]: + seed = torch.empty((), dtype=torch.long, device="meta") + offset = torch.empty((), dtype=torch.long, device="meta") + + return res, logsum_exp, seed, offset + + +@register_meta( + [ + aten._scaled_dot_product_efficient_attention_backward, + ] +) +def meta__scaled_dot_product_efficient_backward( + grad_out: Tensor, + query: Tensor, + key: Tensor, + value: Tensor, + attn_bias: Optional[Tensor], + out: Tensor, + logsumexp: Tensor, + philox_seed: Tensor, + philox_offset: Tensor, + dropout_p: float, + grad_input_mask: List[bool], + is_causal: bool = False, + scale: Optional[float] = None, +): + batch_size = query.size(0) + num_heads = query.size(1) + max_q = query.size(2) + head_dim = query.size(3) + head_dim_v = value.size(3) + + max_k = key.size(2) + + grad_q = torch.empty_permuted( + (batch_size, num_heads, max_q, head_dim), + (0, 2, 1, 3), + dtype=query.dtype, + device=query.device, + ) + grad_k = torch.empty_permuted( + (batch_size, num_heads, max_k, head_dim), + (0, 2, 1, 3), + dtype=key.dtype, + device=key.device, + ) + grad_v = torch.empty_permuted( + (batch_size, num_heads, max_k, head_dim_v), + (0, 2, 1, 3), + dtype=value.dtype, + device=value.device, + ) + grad_bias = None + if attn_bias is not None and grad_input_mask[3]: + lastDim = attn_bias.size(-1) + lastDimAligned = lastDim if lastDim % 16 == 0 else lastDim + 16 - lastDim % 16 + new_sizes = list(attn_bias.size()) + new_sizes[-1] = lastDimAligned + grad_bias = torch.empty( + new_sizes, dtype=attn_bias.dtype, device=attn_bias.device + ) + grad_bias = grad_bias[..., :lastDim] + + return grad_q, grad_k, grad_v, grad_bias + + +@register_meta( + [ + aten._flash_attention_forward, + ] +) +def meta__flash_attention_forward( + query: Tensor, + key: Tensor, + value: Tensor, + cum_seq_q: Optional[Tensor], + cum_seq_k: Optional[Tensor], + max_q: int, + max_k: int, + dropout_p: float, + is_causal: bool, + return_debug_mask: bool, + scale: Optional[float] = None, +): + batch_size = query.size(0) + max_seqlen_batch_q = query.size(1) + num_heads = query.size(2) + head_dim = query.size(3) + + max_seqlen_batch_k = key.size(1) + + # Cuda Path + attention = torch.empty_like(query) + logsumexp = torch.empty( + (batch_size, num_heads, max_seqlen_batch_q), + dtype=torch.float, + device=query.device, + ) + + if return_debug_mask: + blocksize_c = 128 if head_dim > 64 else 256 + max_seqlen_k = math.ceil(max_seqlen_batch_q / blocksize_c) + if max_seqlen_batch_k <= 128: + max_seqlen_k = 128 + elif max_seqlen_batch_k <= 256: + max_seqlen_k = 256 + debug_mask = torch.empty( + (batch_size, num_heads, max_seqlen_batch_q, max_seqlen_k), + dtype=query.dtype, + device=query.device, + ) + else: + debug_mask = torch.empty(0, dtype=query.dtype, device=query.device) + + # See Note [Seed and Offset]: + return ( + attention, + logsumexp, + torch.empty((), dtype=torch.long, device="meta"), + torch.empty((), dtype=torch.long, device="meta"), + debug_mask, + ) + + +@register_meta( + [ + aten._flash_attention_backward, + ] +) +def meta__flash_attention_backward( + grad_out: Tensor, + query: Tensor, + key: Tensor, + value: Tensor, + out: Tensor, + logsumexp: Tensor, + cum_seq_q: Tensor, + cum_seq_k: Tensor, + max_q: int, + max_k: int, + dropout_p: float, + is_causal: bool, + philox_seed: Tensor, + philox_offset: Tensor, + scale: Optional[float] = None, +): + grad_query = torch.empty_like(query) + grad_key = torch.empty_like(key) + grad_value = torch.empty_like(value) + + return grad_query, grad_key, grad_value + + +@register_meta( + [ + aten._efficient_attention_forward, + ] +) +def meta__efficient_attention_forward( + query: Tensor, + key: Tensor, + value: Tensor, + bias: Optional[Tensor], + cu_seqlens_q: Optional[Tensor], + cu_seqlens_k: Optional[Tensor], + max_seqlen_q: Optional[int], + dropout_p: float, + custom_mask_type: int, + compute_log_sumexp: bool = False, + scale: Optional[float] = None, + causal_diagonal: Optional[Tensor] = None, + seqlen_k: Optional[Tensor] = None, +): + B = query.size(0) + M = query.size(1) + N = key.size(1) + num_heads = query.size(-2) + K = query.size(-1) + Kv = value.size(-1) + + res = torch.empty(B, M, num_heads, Kv, dtype=query.dtype, device=query.device) + + logsumexp_dim = math.ceil(M / 32) * 32 if compute_log_sumexp else 0 + logsum_exp = torch.empty( + (B, num_heads, logsumexp_dim), + dtype=torch.float, + device=query.device, + ) + + # See Note [Seed and Offset]: + seed = torch.empty((), dtype=torch.long, device="meta") + offset = torch.empty((), dtype=torch.long, device="meta") + + return res, logsum_exp, seed, offset, M, N + + +@register_meta( + [ + aten._efficient_attention_backward, + ] +) +def meta__efficient_attention_backward( + grad_out: Tensor, + query: Tensor, + key: Tensor, + value: Tensor, + bias: Optional[Tensor], + cu_seqlens_q: Optional[Tensor], + cu_seqlens_k: Optional[Tensor], + max_seqlen_q: int, + max_seqlen_k: int, + logsumexp: Tensor, + dropout_p: float, + philox_seed: Tensor, + philox_offset: Tensor, + custom_mask_type: int, + bias_requires_grad: bool, + scale: Optional[float] = None, + num_splits_key: Optional[int] = None, +): + grad_query = torch.empty_like(query) + grad_key = torch.empty_like(key) + grad_value = torch.empty_like(value) + + if bias is not None: + lastDim = bias.size(-1) + lastDimAligned = lastDim if lastDim % 16 == 0 else lastDim + 16 - lastDim % 16 + new_sizes = list(bias.size()) + new_sizes[-1] = lastDimAligned + grad_bias = torch.empty(new_sizes, dtype=bias.dtype, device=bias.device) + grad_bias = grad_bias[..., :lastDim] + else: + grad_bias = torch.empty((), device=query.device) + + return grad_query, grad_key, grad_value, grad_bias + + +@register_meta([aten._scaled_mm.default]) +def meta_scaled_mm( + self: torch.Tensor, + mat2: torch.Tensor, + bias: Optional[torch.Tensor] = None, + out_dtype: Optional[torch.dtype] = None, + scale_a: Optional[torch.Tensor] = None, + scale_b: Optional[torch.Tensor] = None, + scale_result: Optional[torch.Tensor] = None, + use_fast_accum: bool = False, +): + def is_row_major(stride): + return stride[0] > stride[1] and stride[1] == 1 + + def is_col_major(shape, stride): + return stride[0] == 1 and stride[1] == shape[0] + + def is_fp8_type(dtype): + return dtype in (torch.float8_e4m3fn, torch.float8_e5m2) + + torch._check( + self.dim() == 2 and mat2.dim() == 2, + lambda: f"Inputs must be 2D but got self.dim()={self.dim()} and mat2.dim()={mat2.dim()}", + ) + torch._check( + is_row_major(self.stride()), + lambda: "self must be row_major", + ) + torch._check( + is_col_major(mat2.shape, mat2.stride()), + lambda: "mat2 must be col_major", + ) + torch._check( + self.size(1) % 16 == 0, + lambda: f"Expected self.size(0) to be divisible by 16, but got self.size(1)={self.size(1)}", + ) + torch._check( + mat2.size(0) % 16 == 0 and mat2.size(1) % 16 == 0, + lambda: f"Expected both dimensions of mat2 to be divisble by 16 but got {mat2.shape}", + ) + torch._check( + is_fp8_type(self.dtype) and is_fp8_type(mat2.dtype), + lambda: f"Expected both inputs to be fp8 types but got self.dtype={self.dtype} and mat2.dtype={mat2.dtype}", + ) + _out_dtype = out_dtype if out_dtype is not None else self.dtype + return torch.empty( + self.size(0), mat2.size(1), dtype=_out_dtype, device=self.device + ), torch.empty((), dtype=torch.float32, device=self.device) + + +@register_meta([aten.scatter_reduce.two, aten.scatter_reduce.two_out]) +@out_wrapper() +def meta_scatter_reduce_two(self, dim, index, src, reduce, include_self=True): + scatter_meta_impl(self, dim, index, src, reduce, use_new_options=True) + return self.new_empty(self.shape) + + +@register_meta(aten.scatter_reduce_.two) +def meta_scatter_reduce__two(self, dim, index, src, reduce, include_self=True): + scatter_meta_impl(self, dim, index, src, reduce, use_new_options=True) + return self + + +@register_meta([aten.multinomial.default, aten.multinomial.out]) +@out_wrapper() +def meta_multinomial(input, num_samples, replacement=False, *, generator=None): + torch._check( + 0 < input.dim() <= 2, + lambda: f"The probabilty distributions dimensions must be 1 or 2, but got {input.dim()}", + ) + if input.dim() == 1: + return torch.empty(num_samples, dtype=torch.long, device=input.device) + return torch.empty( + input.size(0), num_samples, dtype=torch.long, device=input.device + ) + + +def multiply_integers(vs): + r = 1 + for v in vs: + r *= v + return r + + +def upsample_common_check(input_size, output_size, num_spatial_dims): + torch._check( + len(output_size) == num_spatial_dims, + lambda: f"It is expected output_size equals to {num_spatial_dims}, but got size {len(output_size)}", + ) + expected_input_dims = num_spatial_dims + 2 # N, C, ... + torch._check( + len(input_size) == expected_input_dims, + lambda: f"It is expected input_size equals to {expected_input_dims}, but got size {len(input_size)}", + ) + + torch._check( + all(s > 0 for s in input_size[2:]) and all(s > 0 for s in output_size), + lambda: f"Input and output sizes should be greater than 0, but got " + f"input size {input_size} and output size {output_size}", + ) + + nbatch, channels = input_size[:2] + return (nbatch, channels, *output_size) + + +@register_meta( + [aten.upsample_nearest1d.default, aten._upsample_nearest_exact1d.default] +) +def upsample_nearest1d(input, output_size, scales=None): + torch._check( + input.numel() != 0 or multiply_integers(input.size()[1:]), + lambda: f"Non-empty 3D data tensor expected but got a tensor with sizes {input.size()}", + ) + full_output_size = upsample_common_check( + input.size(), output_size, num_spatial_dims=1 + ) + return input.new_empty(full_output_size).to( + memory_format=utils.suggest_memory_format(input) + ) + + +@register_meta( + [aten.upsample_nearest2d.default, aten._upsample_nearest_exact2d.default] +) +def upsample_nearest2d(input, output_size, scales_h=None, scales_w=None): + torch._check( + input.numel() != 0 or multiply_integers(input.size()[1:]), + lambda: f"Non-empty 4D data tensor expected but got a tensor with sizes {input.size()}", + ) + full_output_size = upsample_common_check( + input.size(), output_size, num_spatial_dims=2 + ) + output = input.new_empty(full_output_size) + + # convert output to correct memory format, if necessary + memory_format = utils.suggest_memory_format(input) + + # following "heuristic: only use channels_last path when it's faster than the contiguous path" + _, n_channels, _, _ = input.shape + if input.device.type == "cuda" and n_channels < 4: + memory_format = torch.contiguous_format + + output = output.contiguous(memory_format=memory_format) + + return output + + +@register_meta( + [ + aten.upsample_nearest2d_backward.default, + aten._upsample_nearest_exact2d_backward.default, + ] +) +def upsample_nearest2d_backward( + grad_output: Tensor, + output_size: Sequence[Union[int, torch.SymInt]], + input_size: Sequence[Union[int, torch.SymInt]], + scales_h: Optional[float] = None, + scales_w: Optional[float] = None, +): + full_output_size = upsample_common_check( + input_size, output_size, num_spatial_dims=2 + ) + torch._check( + grad_output.ndim == 4, + lambda: f"Expected grad_output to be a tensor of dimension 4 but got: dimension {grad_output.ndim}", + ) + for i in range(4): + torch._check( + grad_output.size(i) == full_output_size[i], + lambda: ( + f"Expected grad_output to have the same shape as output;" + f" output.size({i}) = {full_output_size[i]}" + f" but got grad_output.size({i}) = {grad_output.size(i)}" + ), + ) + + return grad_output.new_empty(input_size).to( + memory_format=utils.suggest_memory_format(grad_output) + ) # type: ignore[call-overload] + + +@register_meta( + [aten.upsample_nearest3d.default, aten._upsample_nearest_exact3d.default] +) +def upsample_nearest3d(input, output_size, scales_d=None, scales_h=None, scales_w=None): + torch._check( + input.numel() != 0 or multiply_integers(input.size()[1:]), + lambda: f"Non-empty 5D data tensor expected but got a tensor with sizes {input.size()}", + ) + full_output_size = upsample_common_check( + input.size(), output_size, num_spatial_dims=3 + ) + return input.new_empty(full_output_size).to( + memory_format=utils.suggest_memory_format(input) + ) + + +@register_meta( + [ + aten.sort.default, + aten.sort.stable, + aten.sort.values, + aten.sort.values_stable, + ] +) +def meta_sort(self, stable=None, dim=-1, descending=False, values=None, indices=None): + v, i = torch.empty_like(self), torch.empty_like(self, dtype=torch.int64) + if values is not None and indices is not None: + assert isinstance(values, TensorLike) + assert isinstance(indices, TensorLike) + # Makes sure values and indices have the same strides. For cases where + # these have different shapes, like (5, 10, 5) and (0) in msort. + out_shape = v.shape + out_stride = v.stride() + values = _maybe_resize_out(values, out_shape) + indices = _maybe_resize_out(indices, out_shape) + values.as_strided_(out_shape, out_stride) + indices.as_strided_(out_shape, out_stride) + _safe_copy_out(copy_from=v, copy_to=values) # type: ignore[arg-type] + _safe_copy_out(copy_from=i, copy_to=indices) # type: ignore[arg-type] + return values, indices + return v, i + + +@register_meta(aten.argsort.stable) +def meta_argsort(self, *, stable, dim=-1, descending=False): + return meta_sort(self, stable=stable, dim=dim, descending=descending)[1] + + +def rnn_cell_checkSizes( + input_gates, hidden_gates, input_bias, hidden_bias, factor, prev_hidden +): + torch._check(input_gates.ndim == 2, lambda: f"{input_gates.ndim} != 2") + torch._check( + input_gates.shape == hidden_gates.shape, + lambda: f"{input_gates.shape} != {hidden_gates.shape}", + ) + gates_size = input_gates.size(1) + if input_bias is not None: + torch._check(input_bias.ndim == 1, lambda: f"{input_bias.ndim} != 1") + torch._check( + input_bias.numel() == gates_size, + lambda: f"{input_bias.numel()} != {gates_size}", + ) + torch._check( + input_bias.shape == hidden_bias.shape, + lambda: f"{input_bias.shape} != {hidden_bias.shape}", + ) + torch._check(prev_hidden.ndim == 2, lambda: f"{prev_hidden.ndim} != 2") + expected_prev_hidden_numel = input_gates.size(0) * gates_size // factor + torch._check( + prev_hidden.numel() == expected_prev_hidden_numel, + lambda: f"{prev_hidden.numel()} != {input_gates.size(0)} * {gates_size} // {factor} (aka {expected_prev_hidden_numel})", + ) + torch._check( + all( + x.device == input_gates.device + for x in [hidden_gates, input_bias, hidden_bias, prev_hidden] + ), + lambda: "expected all inputs to be same device", + ) + + +@register_meta(aten._thnn_fused_lstm_cell.default) +def _thnn_fused_lstm_cell_meta( + input_gates, hidden_gates, cx, input_bias=None, hidden_bias=None +): + rnn_cell_checkSizes(input_gates, hidden_gates, input_bias, hidden_bias, 4, cx) + workspace = torch.empty_like(input_gates, memory_format=torch.contiguous_format) + hy = torch.empty_like(cx, memory_format=torch.contiguous_format) + cy = torch.empty_like(cx, memory_format=torch.contiguous_format) + return (hy, cy, workspace) + + +@register_meta(aten._cudnn_rnn.default) +def _cudnn_rnn( + input, + weight, + weight_stride0, + weight_buf, + hx, + cx, + mode, + hidden_size, + proj_size, + num_layers, + batch_first, + dropout, + train, + bidirectional, + batch_sizes, + dropout_state, +): + is_input_packed = len(batch_sizes) != 0 + if is_input_packed: + seq_length = len(batch_sizes) + mini_batch = batch_sizes[0] + batch_sizes_sum = input.shape[0] + else: + seq_length = input.shape[1] if batch_first else input.shape[0] + mini_batch = input.shape[0] if batch_first else input.shape[1] + batch_sizes_sum = -1 + + num_directions = 2 if bidirectional else 1 + out_size = proj_size if proj_size != 0 else hidden_size + if is_input_packed: + out_shape = [batch_sizes_sum, out_size * num_directions] + else: + out_shape = ( + [mini_batch, seq_length, out_size * num_directions] + if batch_first + else [seq_length, mini_batch, out_size * num_directions] + ) + output = input.new_empty(out_shape) + + cell_shape = [num_layers * num_directions, mini_batch, hidden_size] + if cx is None: + cy = torch.empty(0, device=input.device) + else: + cy = cx.new_empty(cell_shape) + + hy = hx.new_empty([num_layers * num_directions, mini_batch, out_size]) + + # TODO: Query cudnnGetRNNTrainingReserveSize (expose to python) + reserve_shape = 0 if train else 0 + reserve = input.new_empty(reserve_shape, dtype=torch.uint8) + + return output, hy, cy, reserve, weight_buf + + +@register_meta(aten.mkldnn_rnn_layer.default) +def mkldnn_rnn_layer( + input, + w0, + w1, + w2, + w3, + hx_, + cx_, + reverse, + batch_sizes, + mode, + hidden_size, + num_layers, + has_biases, + bidirectional, + batch_first, + train, +): + seq_length = input.shape[1] if batch_first else input.shape[0] + mini_batch = input.shape[0] if batch_first else input.shape[1] + output_chanels = hidden_size + out_shape = ( + [mini_batch, seq_length, output_chanels] + if batch_first + else [seq_length, mini_batch, output_chanels] + ) + output = input.new_empty(out_shape) + if hx_ is None: + hy = torch.empty(0, device=input.device) + else: + hy = hx_.new_empty(hx_.shape) + if cx_ is None: + cy = torch.empty(0, device=input.device) + else: + cy = cx_.new_empty(cx_.shape) + workspace = torch.empty(0, device=input.device, dtype=torch.uint8) + return output, hy, cy, workspace + + +def zero_numel_check_dims(self, dim, fn_name): + if self.ndim == 0: + torch._check_index( + dim == 0 or dim == -1, + lambda: f"{fn_name}: Expected reduction dim -1 or 0 for scalar but got {dim}", + ) + else: + torch._check_index( + self.size(dim) != 0, + lambda: f"{fn_name}: Expected reduction dim {dim} to have non-zero size.", + ) + + +# From aten/src/ATen/native/ReduceOps.cpp +def check_argmax_argmin(name, self, dim): + if dim is not None: + dim = maybe_wrap_dim(dim, self.dim()) + zero_numel_check_dims(self, dim, name) + else: + torch._check( + self.numel() != 0, + lambda: f"{name}: Expected reduction dim to be specified for input.numel() == 0.", + ) + + +@register_meta([aten.argmax.default, aten.argmin.default]) +def argmax_argmin_meta(self, dim=None, keepdim=False): + check_argmax_argmin("argmax", self, dim) + dims = utils.reduction_dims(self.shape, (dim,) if dim is not None else None) + shape = _compute_reduction_shape(self, dims, keepdim) + return self.new_empty(shape, dtype=torch.int64) + + +@register_meta(aten.scalar_tensor.default) +def scalar_tensor(s, dtype=None, layout=None, device=None, pin_memory=None): + return torch.empty( + (), dtype=dtype, layout=layout, device=device, pin_memory=pin_memory + ) + + +@register_meta(aten.topk.default) +def topk_meta(self, k, dim=-1, largest=True, sorted=True): + # From aten/src/ATen/native/Sorting.cpp + dim = maybe_wrap_dim(dim, self.dim(), wrap_scalar=True) + torch._check( + k >= 0 and k <= (self.size(dim) if self.dim() > 0 else 1), + lambda: "selected index k out of range", + ) + sliceSize = 1 if self.dim() == 0 else self.size(dim) + torch._check(k >= 0 and k <= sliceSize, lambda: "k not in range for dimension") + + topKSize = list(self.shape) + if len(topKSize) > 0: + topKSize[dim] = k + return self.new_empty(topKSize), self.new_empty(topKSize, dtype=torch.int64) + + +legacy_contiguous_memory_format = torch.contiguous_format + + +# From aten/src/ATen/native/cuda/RNN.cu +def checkLSTMBackwardSizes(grad_hy, grad_cy, cx, cy, workspace): + defined_grad = grad_hy if grad_hy is not None else grad_cy + torch._check(defined_grad.dim() == 2, lambda: "") + exp_size = defined_grad.size() + if grad_hy is not None: + torch._check(grad_hy.size() == exp_size, lambda: "") + if grad_cy is not None: + torch._check(grad_cy.size() == exp_size, lambda: "") + torch._check(cx.size() == exp_size, lambda: "") + torch._check(cy.size() == exp_size, lambda: "") + torch._check(workspace.dim() == 2, lambda: "") + torch._check(workspace.numel() == exp_size[0] * exp_size[1] * 4, lambda: "") + + +# From aten/src/ATen/native/cuda/RNN.cu +@register_meta(aten._thnn_fused_lstm_cell_backward_impl.default) +def _thnn_fused_lstm_cell_backward_impl(grad_hy, grad_cy, cx, cy, workspace, has_bias): + if grad_hy is None and grad_cy is None: + return None, None, None + checkLSTMBackwardSizes(grad_hy, grad_cy, cx, cy, workspace) + grad_gates = torch.empty_like( + workspace, memory_format=legacy_contiguous_memory_format + ) + grad_cx = torch.empty_like(cx, memory_format=legacy_contiguous_memory_format) + grad_bias = grad_gates.sum(0, keepdim=False) if has_bias else None + return grad_gates, grad_cx, grad_bias + + +# From aten/src/ATen/native/mps/operations/Linear.mm +@register_meta(aten.linear_backward.default) +def linear_backward(input_, grad_output_, weight_, output_mask): + grad_input = None + grad_weight = None + grad_bias = None + if output_mask[0]: + grad_input = grad_output_.new_empty(input_.size()) + if output_mask[1] or output_mask[2]: + grad_weight = grad_output_.new_empty((grad_output_.size(-1), input_.size(-1))) + grad_bias = grad_output_.new_empty(grad_output_.size(-1)) + return (grad_input, grad_weight, grad_bias) + + +@register_meta(aten.pixel_shuffle.default) +def meta_pixel_shuffle(self, upscale_factor): + assert ( + len(self.shape) > 2 and self.shape[-3] % (upscale_factor * upscale_factor) == 0 + ), f"Invalid input shape for pixel_shuffle: {self.shape} with upscale_factor = {upscale_factor}" + + def is_channels_last(ten): + return torch._prims_common.suggest_memory_format(ten) == torch.channels_last + + def pick_memory_format(): + if is_channels_last(self): + if device_hint(self) == "cuda": + return torch.contiguous_format + else: + return torch.channels_last + elif self.is_contiguous(memory_format=torch.contiguous_format): + return torch.contiguous_format + elif self.is_contiguous(memory_format=torch.preserve_format): + return torch.preserve_format + + C = self.shape[-3] // (upscale_factor * upscale_factor) + Hr = self.shape[-2] * upscale_factor + Wr = self.shape[-1] * upscale_factor + out_shape = (*self.shape[:-3], C, Hr, Wr) + + out = self.new_empty(out_shape) + out = out.to(memory_format=pick_memory_format()) # type: ignore[call-overload] + return out + + +@register_meta(aten.mkldnn_rnn_layer_backward.default) +def mkldnn_rnn_layer_backward( + input, + weight0, + weight1, + weight2, + weight3, + hx_, + cx_tmp, + output, + hy_, + cy_, + grad_output_r_opt, + grad_hy_r_opt, + grad_cy_r_opt, + reverse, + mode, + hidden_size, + num_layers, + has_biases, + train, + bidirectional, + batch_sizes, + batch_first, + workspace, +): + diff_x = input.new_empty(input.shape) + diff_hx = hx_.new_empty(hx_.shape) + diff_cx = cx_tmp.new_empty(cx_tmp.shape) + diff_w1 = weight0.new_empty(weight0.shape) + diff_w2 = weight1.new_empty(weight1.shape) + diff_b = weight2.new_empty(weight2.shape) + return diff_x, diff_w1, diff_w2, diff_b, diff_b, diff_hx, diff_cx + + +@register_meta([aten.bucketize.Tensor, aten.bucketize.Tensor_out]) +@out_wrapper() +def meta_bucketize(self, boundaries, *, out_int32=False, right=False): + return torch.empty_like( + self, dtype=torch.int32 if out_int32 else torch.int64 + ).contiguous() + + +@register_meta(aten._upsample_bilinear2d_aa.default) +def meta_upsample_bilinear2d_aa( + input, output_size, align_corners, scales_h=None, scales_w=None +): + full_output_size = upsample_common_check( + input.size(), output_size, num_spatial_dims=2 + ) + torch._check( + input.numel() != 0 or all(size > 0 for size in input.size()[1:]), + lambda: f"Non-empty 4D data tensor expected but got a tensor with sizes {input.size()}", + ) + return input.new_empty(full_output_size).to( + memory_format=utils.suggest_memory_format(input) + ) + + +# From aten/src/ATen/native/cuda/AmpKernels.cu +@register_meta(aten._amp_foreach_non_finite_check_and_unscale_.default) +def _amp_foreach_non_finite_check_and_unscale_(self, found_inf, inv_scale): + torch._check( + found_inf.numel() == 1, lambda: "found_inf must be a 1-element tensor." + ) + torch._check( + inv_scale.numel() == 1, lambda: "inv_scale must be a 1-element tensor." + ) + torch._check( + found_inf.dtype.is_floating_point, + lambda: "found_inf must be a float tensor.", + ) + torch._check( + inv_scale.dtype.is_floating_point, + lambda: "inv_scale must be a float tensor.", + ) + + +# From aten/src/ATen/native/UnaryOps.cpp +@register_meta([aten.nan_to_num.default, aten.nan_to_num.out]) +@out_wrapper() +def nan_to_num(self, nan=None, posinf=None, neginf=None): + result_size = list(self.size()) + return self.new_empty(result_size) + + +@register_meta(torch.ops.aten.transpose_) +def transpose_(self, dim0, dim1): + assert self.layout not in { + torch.sparse_csr, + torch.sparse_csc, + torch.sparse_bsr, + torch.sparse_bsc, + }, f"torch.transpose_: in-place transposition is not supported for {self.layout} layout" + + ndims = self.ndim + + dim0 = maybe_wrap_dim(dim0, ndims) + dim1 = maybe_wrap_dim(dim1, ndims) + + if dim0 == dim1: + return self + + size = list(self.size()) + stride = list(self.stride()) + + stride[dim0], stride[dim1] = stride[dim1], stride[dim0] + size[dim0], size[dim1] = size[dim1], size[dim0] + + self.as_strided_(size, stride) + return self + + +@register_meta(torch.ops.aten.t_) +def t_(self): + ndims = self.ndim + + if self.is_sparse: + sparse_dim = self.sparse_dim() + dense_dim = self.dense_dim() + assert ( + sparse_dim <= 2 and dense_dim == 0 + ), f"t_ expects a tensor with <= 2 sparse and 0 dense dimensions, but got {sparse_dim} sparse and {dense_dim} dense dimensions" # noqa: B950 + else: + assert ( + self.dim() <= 2 + ), f"t_ expects a tensor with <= 2 dimensions, but self is {ndims}D" + + return transpose_(self, 0, 0 if ndims < 2 else 1) + + +@register_meta(aten.searchsorted) +@out_wrapper() +def meta_searchsorted( + sorted_sequence, self, *, out_int32=False, right=False, side=None, sorter=None +): + dtype = torch.int32 if out_int32 else torch.int64 + if isinstance(self, torch.Tensor): + return torch.empty_like(self, dtype=dtype).contiguous() + else: # Scalar + return torch.empty((), dtype=dtype, device=sorted_sequence.device) + + +@register_meta(aten.polygamma) +@out_wrapper() +def meta_polygamma(n: int, self: Tensor) -> Tensor: + torch._check(n >= 0, lambda: "polygamma(n, x) does not support negative n.") + _, result_dtype = elementwise_dtypes( + self, + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + ) + return torch.empty_like(self, dtype=result_dtype) + + +def _create_unary_float_meta_func(func): + @register_meta(func) + @out_wrapper() + def _f(x): + return elementwise_meta( + x, type_promotion=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ) + + return _f + + +def _create_binary_float_meta_func(func): + @register_meta(func) + @out_wrapper() + def _f(x, y): + return elementwise_meta( + x, y, type_promotion=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT + ) + + return _f + + +_create_unary_float_meta_func(aten.special_airy_ai) +_create_unary_float_meta_func(aten.special_bessel_y0) +_create_unary_float_meta_func(aten.special_bessel_y1) +_create_unary_float_meta_func(aten.special_modified_bessel_i0) +_create_unary_float_meta_func(aten.special_modified_bessel_i1) +_create_unary_float_meta_func(aten.special_modified_bessel_k0) +_create_unary_float_meta_func(aten.special_modified_bessel_k1) +_create_unary_float_meta_func(aten.special_scaled_modified_bessel_k0) +_create_unary_float_meta_func(aten.special_scaled_modified_bessel_k1) + + +_create_binary_float_meta_func(aten.special_chebyshev_polynomial_t) +_create_binary_float_meta_func(aten.special_chebyshev_polynomial_u) +_create_binary_float_meta_func(aten.special_hermite_polynomial_h) +_create_binary_float_meta_func(aten.special_hermite_polynomial_he) +_create_binary_float_meta_func(aten.special_laguerre_polynomial_l) + + +# We must also trigger meta registrations from PrimTorch ref +# decompositions +import torch._refs +import torch._refs.nn.functional +import torch._refs.special + + +def activate_meta(): + activate_meta_table = {} + + # For a given op, we pick the most specific decomp function from + # global_decomp_table in the precedence order of meta > post_autograd > pre_autograd + for type in ["meta", "post_autograd", "pre_autograd"]: + registry = global_decomposition_table[type] + + for opo in registry: + if opo not in activate_meta_table: + activate_meta_table[opo] = registry[opo] + + for op_overload, fn in activate_meta_table.items(): + # Don't register meta for HigherOrderOp's decomp. + # We can reconsider this in the future, but in general, + # the way you do a meta for a HigherOrderOp is different from + # OpOverload. + if isinstance(op_overload, torch._ops.HigherOrderOperator): + continue + assert isinstance(op_overload, OpOverload) + + op_overload.py_impl(torch._C.DispatchKey.Meta)(fn) + + if torch._C._dispatch_has_kernel_for_dispatch_key( + op_overload.name(), "CompositeImplicitAutograd" + ): + # Internally, we shouldn't be registering meta kernels for any operators that + # have CompositeImplicitAutograd kernels. + # Instead, we should be letting those decompositions run, and writing meta kernels + # only for the base operators. + if op_overload in global_decomposition_table["meta"]: + raise RuntimeError( + f"{op_overload} is a CompositeImplicitAutograd op, we shouldn't " + "register meta function for it. Instead, we should let the decomposition run and write " + "meta kernels for the base operators." + ) + pass + elif op_overload.is_view: + # Attempting to register a python meta kernel for a view operator. + # We shouldn't do this, because the output will report as not having aliased storages. + # All view ops have meta kernels in C++ today, so we should use those instead. + pass + elif op_overload.name() in { + "aten::empty_strided", # causing infinite recursion, test_meta.py + "aten::clone", # causing infinite recursion + "aten::_to_copy", # causing infinite recursion, test_serialization.py -k test_tensor_subclass_getstate_overwrite # noqa: B950 + "aten::copy_", # Exception not raised, test_torch.py -k test_storage_meta_errors_cpu_int64 # noqa: B950 + "aten::constant_pad_nd", # requires_grad mismatch, test_ops.py -k test_fake_crossref_backward_amp_istft_cuda_float32 # noqa: B950 + "aten::rot90", # requires_grad mismatch! test_ops.py -k test_fake_crossref_backward_amp_rot90_cuda_float32 # noqa: B950 + "aten::as_strided_scatter", # requires_grad mismatch, test_ops.py -k test_fake_crossref_backward_no_amp_as_strided_scatter_cuda_float32 # noqa: B950 + }: + pass + else: + if "mkldnn::" in op_overload.name(): + _meta_lib_dont_use_me_use_register_meta_for_mkldnn.impl(op_overload, fn) + elif "mkl::" in op_overload.name(): + _meta_lib_dont_use_me_use_register_meta_for_mkl.impl(op_overload, fn) + elif "onednn::" in op_overload.name(): + _meta_lib_dont_use_me_use_register_meta_for_onednn.impl(op_overload, fn) + elif "quantized::" in op_overload.name(): + _meta_lib_dont_use_me_use_register_meta_for_quantized.impl( + op_overload, fn + ) + else: + _meta_lib_dont_use_me_use_register_meta.impl(op_overload, fn) + + +activate_meta() diff --git a/evalkit_internvl/lib/python3.10/site-packages/torch/_sources.py b/evalkit_internvl/lib/python3.10/site-packages/torch/_sources.py new file mode 100644 index 0000000000000000000000000000000000000000..3f56bd8ef2473aa9c35ad6232448c9d5d44b8056 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/torch/_sources.py @@ -0,0 +1,137 @@ +import ast +import functools +import inspect +from textwrap import dedent +from typing import Any, List, NamedTuple, Optional, Tuple + +from torch._C import ErrorReport +from torch._C._jit_tree_views import SourceRangeFactory + + +def get_source_lines_and_file( + obj: Any, + error_msg: Optional[str] = None, +) -> Tuple[List[str], int, Optional[str]]: + """ + Wrapper around inspect.getsourcelines and inspect.getsourcefile. + + Returns: (sourcelines, file_lino, filename) + """ + filename = None # in case getsourcefile throws + try: + filename = inspect.getsourcefile(obj) + sourcelines, file_lineno = inspect.getsourcelines(obj) + except OSError as e: + msg = ( + f"Can't get source for {obj}. TorchScript requires source access in " + "order to carry out compilation, make sure original .py files are " + "available." + ) + if error_msg: + msg += "\n" + error_msg + raise OSError(msg) from e + + return sourcelines, file_lineno, filename + + +def normalize_source_lines(sourcelines: List[str]) -> List[str]: + """ + This helper function accepts a list of source lines. It finds the + indentation level of the function definition (`def`), then it indents + all lines in the function body to a point at or greater than that + level. This allows for comments and continued string literals that + are at a lower indentation than the rest of the code. + Args: + sourcelines: function source code, separated into lines by + the '\n' character + Returns: + A list of source lines that have been correctly aligned + """ + + def remove_prefix(text, prefix): + return text[text.startswith(prefix) and len(prefix) :] + + # Find the line and line number containing the function definition + idx = None + for i, l in enumerate(sourcelines): + if l.lstrip().startswith("def"): + idx = i + break + + # This will happen when the function is a lambda- we won't find "def" anywhere in the source + # lines in that case. Currently trying to JIT compile a lambda will throw an error up in + # `parse_def()`, but we might want to handle this case in the future. + if idx is None: + return sourcelines + + # Get a string representing the amount of leading whitespace + fn_def = sourcelines[idx] + whitespace = fn_def.split("def")[0] + + # Add this leading whitespace to all lines before and after the `def` + aligned_prefix = [ + whitespace + remove_prefix(s, whitespace) for s in sourcelines[:idx] + ] + aligned_suffix = [ + whitespace + remove_prefix(s, whitespace) for s in sourcelines[idx + 1 :] + ] + + # Put it together again + aligned_prefix.append(fn_def) + return aligned_prefix + aligned_suffix + + +# Thin wrapper around SourceRangeFactory to store extra metadata +# about the function-to-be-compiled. +class SourceContext(SourceRangeFactory): + def __init__( + self, + source, + filename, + file_lineno, + leading_whitespace_len, + uses_true_division=True, + funcname=None, + ): + super().__init__(source, filename, file_lineno, leading_whitespace_len) + self.uses_true_division = uses_true_division + self.filename = filename + self.funcname = funcname + + +@functools.lru_cache(maxsize=None) +def make_source_context(*args): + return SourceContext(*args) + + +def fake_range(): + return SourceContext("", None, 0, 0).make_raw_range(0, 1) + + +class ParsedDef(NamedTuple): + ast: ast.Module + ctx: SourceContext + source: str + filename: Optional[str] + file_lineno: int + + +def parse_def(fn): + sourcelines, file_lineno, filename = get_source_lines_and_file( + fn, ErrorReport.call_stack() + ) + sourcelines = normalize_source_lines(sourcelines) + source = "".join(sourcelines) + dedent_src = dedent(source) + py_ast = ast.parse(dedent_src) + if len(py_ast.body) != 1 or not isinstance(py_ast.body[0], ast.FunctionDef): + raise RuntimeError( + f"Expected a single top-level function: {filename}:{file_lineno}" + ) + leading_whitespace_len = len(source.split("\n", 1)[0]) - len( + dedent_src.split("\n", 1)[0] + ) + ctx = make_source_context( + source, filename, file_lineno, leading_whitespace_len, True, fn.__name__ + ) + return ParsedDef(py_ast, ctx, source, filename, file_lineno) diff --git a/evalkit_internvl/lib/python3.10/site-packages/torch/_tensor_str.py b/evalkit_internvl/lib/python3.10/site-packages/torch/_tensor_str.py new file mode 100644 index 0000000000000000000000000000000000000000..1293a0fd61aec91368e36c733c3687a2361366fb --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/torch/_tensor_str.py @@ -0,0 +1,677 @@ +import contextlib +import dataclasses +import math +import textwrap +from typing import Any, Dict, Optional + +import torch +from torch import inf + + +@dataclasses.dataclass +class __PrinterOptions: + precision: int = 4 + threshold: float = 1000 + edgeitems: int = 3 + linewidth: int = 80 + sci_mode: Optional[bool] = None + + +PRINT_OPTS = __PrinterOptions() + + +# We could use **kwargs, but this will give better docs +def set_printoptions( + precision=None, + threshold=None, + edgeitems=None, + linewidth=None, + profile=None, + sci_mode=None, +): + r"""Set options for printing. Items shamelessly taken from NumPy + + Args: + precision: Number of digits of precision for floating point output + (default = 4). + threshold: Total number of array elements which trigger summarization + rather than full `repr` (default = 1000). + edgeitems: Number of array items in summary at beginning and end of + each dimension (default = 3). + linewidth: The number of characters per line for the purpose of + inserting line breaks (default = 80). Thresholded matrices will + ignore this parameter. + profile: Sane defaults for pretty printing. Can override with any of + the above options. (any one of `default`, `short`, `full`) + sci_mode: Enable (True) or disable (False) scientific notation. If + None (default) is specified, the value is defined by + `torch._tensor_str._Formatter`. This value is automatically chosen + by the framework. + + Example:: + + >>> # Limit the precision of elements + >>> torch.set_printoptions(precision=2) + >>> torch.tensor([1.12345]) + tensor([1.12]) + >>> # Limit the number of elements shown + >>> torch.set_printoptions(threshold=5) + >>> torch.arange(10) + tensor([0, 1, 2, ..., 7, 8, 9]) + >>> # Restore defaults + >>> torch.set_printoptions(profile='default') + >>> torch.tensor([1.12345]) + tensor([1.1235]) + >>> torch.arange(10) + tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + + """ + if profile is not None: + if profile == "default": + PRINT_OPTS.precision = 4 + PRINT_OPTS.threshold = 1000 + PRINT_OPTS.edgeitems = 3 + PRINT_OPTS.linewidth = 80 + elif profile == "short": + PRINT_OPTS.precision = 2 + PRINT_OPTS.threshold = 1000 + PRINT_OPTS.edgeitems = 2 + PRINT_OPTS.linewidth = 80 + elif profile == "full": + PRINT_OPTS.precision = 4 + PRINT_OPTS.threshold = inf + PRINT_OPTS.edgeitems = 3 + PRINT_OPTS.linewidth = 80 + + if precision is not None: + PRINT_OPTS.precision = precision + if threshold is not None: + PRINT_OPTS.threshold = threshold + if edgeitems is not None: + PRINT_OPTS.edgeitems = edgeitems + if linewidth is not None: + PRINT_OPTS.linewidth = linewidth + PRINT_OPTS.sci_mode = sci_mode + + +def get_printoptions() -> Dict[str, Any]: + r"""Gets the current options for printing, as a dictionary that + can be passed as ``**kwargs`` to set_printoptions(). + """ + return dataclasses.asdict(PRINT_OPTS) + + +@contextlib.contextmanager +def printoptions(**kwargs): + r"""Context manager that temporarily changes the print options. Accepted + arguments are same as :func:`set_printoptions`.""" + old_kwargs = get_printoptions() + set_printoptions(**kwargs) + try: + yield + finally: + set_printoptions(**old_kwargs) + + +def tensor_totype(t): + dtype = torch.float if t.is_mps else torch.double + return t.to(dtype=dtype) + + +class _Formatter: + def __init__(self, tensor): + self.floating_dtype = tensor.dtype.is_floating_point + self.int_mode = True + self.sci_mode = False + self.max_width = 1 + + with torch.no_grad(): + tensor_view = tensor.reshape(-1) + + if not self.floating_dtype: + for value in tensor_view: + value_str = f"{value}" + self.max_width = max(self.max_width, len(value_str)) + + else: + nonzero_finite_vals = torch.masked_select( + tensor_view, torch.isfinite(tensor_view) & tensor_view.ne(0) + ) + + if nonzero_finite_vals.numel() == 0: + # no valid number, do nothing + return + + # Convert to double for easy calculation. HalfTensor overflows with 1e8, and there's no div() on CPU. + nonzero_finite_abs = tensor_totype(nonzero_finite_vals.abs()) + nonzero_finite_min = tensor_totype(nonzero_finite_abs.min()) + nonzero_finite_max = tensor_totype(nonzero_finite_abs.max()) + + for value in nonzero_finite_vals: + if value != torch.ceil(value): + self.int_mode = False + break + + if self.int_mode: + # in int_mode for floats, all numbers are integers, and we append a decimal to nonfinites + # to indicate that the tensor is of floating type. add 1 to the len to account for this. + if ( + nonzero_finite_max / nonzero_finite_min > 1000.0 + or nonzero_finite_max > 1.0e8 + ): + self.sci_mode = True + for value in nonzero_finite_vals: + value_str = f"{{:.{PRINT_OPTS.precision}e}}".format(value) + self.max_width = max(self.max_width, len(value_str)) + else: + for value in nonzero_finite_vals: + value_str = f"{value:.0f}" + self.max_width = max(self.max_width, len(value_str) + 1) + else: + # Check if scientific representation should be used. + if ( + nonzero_finite_max / nonzero_finite_min > 1000.0 + or nonzero_finite_max > 1.0e8 + or nonzero_finite_min < 1.0e-4 + ): + self.sci_mode = True + for value in nonzero_finite_vals: + value_str = f"{{:.{PRINT_OPTS.precision}e}}".format(value) + self.max_width = max(self.max_width, len(value_str)) + else: + for value in nonzero_finite_vals: + value_str = f"{{:.{PRINT_OPTS.precision}f}}".format(value) + self.max_width = max(self.max_width, len(value_str)) + + if PRINT_OPTS.sci_mode is not None: + self.sci_mode = PRINT_OPTS.sci_mode + + def width(self): + return self.max_width + + def format(self, value): + if self.floating_dtype: + if self.sci_mode: + ret = f"{{:{self.max_width}.{PRINT_OPTS.precision}e}}".format(value) + elif self.int_mode: + ret = f"{value:.0f}" + if not (math.isinf(value) or math.isnan(value)): + ret += "." + else: + ret = f"{{:.{PRINT_OPTS.precision}f}}".format(value) + else: + ret = f"{value}" + return (self.max_width - len(ret)) * " " + ret + + +def _scalar_str(self, formatter1, formatter2=None): + if formatter2 is not None: + real_str = _scalar_str(self.real, formatter1) + imag_str = (_scalar_str(self.imag, formatter2) + "j").lstrip() + # handles negative numbers, +0.0, -0.0 + if imag_str[0] == "+" or imag_str[0] == "-": + return real_str + imag_str + else: + return real_str + "+" + imag_str + else: + return formatter1.format(self.item()) + + +def _vector_str(self, indent, summarize, formatter1, formatter2=None): + # length includes spaces and comma between elements + element_length = formatter1.width() + 2 + if formatter2 is not None: + # width for imag_formatter + an extra j for complex + element_length += formatter2.width() + 1 + + elements_per_line = max( + 1, int(math.floor((PRINT_OPTS.linewidth - indent) / (element_length))) + ) + + def _val_formatter(val, formatter1=formatter1, formatter2=formatter2): + if formatter2 is not None: + real_str = formatter1.format(val.real) + imag_str = (formatter2.format(val.imag) + "j").lstrip() + # handles negative numbers, +0.0, -0.0 + if imag_str[0] == "+" or imag_str[0] == "-": + return real_str + imag_str + else: + return real_str + "+" + imag_str + else: + return formatter1.format(val) + + if summarize and not PRINT_OPTS.edgeitems: + # Deal with edge case that negative zero is zero + data = ["..."] + elif summarize and self.size(0) > 2 * PRINT_OPTS.edgeitems: + data = ( + [_val_formatter(val) for val in self[: PRINT_OPTS.edgeitems].tolist()] + + [" ..."] + + [_val_formatter(val) for val in self[-PRINT_OPTS.edgeitems :].tolist()] + ) + else: + data = [_val_formatter(val) for val in self.tolist()] + + data_lines = [ + data[i : i + elements_per_line] for i in range(0, len(data), elements_per_line) + ] + lines = [", ".join(line) for line in data_lines] + return "[" + ("," + "\n" + " " * (indent + 1)).join(lines) + "]" + + +# formatter2 is only used for printing complex tensors. +# For complex tensors, formatter1 and formatter2 are the formatters for tensor.real +# and tensor.imag respesectively +def _tensor_str_with_formatter(self, indent, summarize, formatter1, formatter2=None): + dim = self.dim() + + if dim == 0: + return _scalar_str(self, formatter1, formatter2) + + if dim == 1: + return _vector_str(self, indent, summarize, formatter1, formatter2) + + if summarize and self.size(0) > 2 * PRINT_OPTS.edgeitems: + slices = ( + [ + _tensor_str_with_formatter( + self[i], indent + 1, summarize, formatter1, formatter2 + ) + for i in range(0, PRINT_OPTS.edgeitems) + ] + + ["..."] + + [ + _tensor_str_with_formatter( + self[i], indent + 1, summarize, formatter1, formatter2 + ) + for i in range(len(self) - PRINT_OPTS.edgeitems, len(self)) + ] + ) + else: + slices = [ + _tensor_str_with_formatter( + self[i], indent + 1, summarize, formatter1, formatter2 + ) + for i in range(0, self.size(0)) + ] + + tensor_str = ("," + "\n" * (dim - 1) + " " * (indent + 1)).join(slices) + return "[" + tensor_str + "]" + + +def _tensor_str(self, indent): + if self.numel() == 0: + return "[]" + + if self.has_names(): + # There are two main codepaths (possibly more) that tensor printing goes through: + # - tensor data can fit comfortably on screen + # - tensor data needs to be summarized + # Some of the codepaths don't fully support named tensors, so we send in + # an unnamed tensor to the formatting code as a workaround. + self = self.rename(None) + + summarize = self.numel() > PRINT_OPTS.threshold + + if self._is_zerotensor(): + self = self.clone() + + # handle the negative bit + if self.is_neg(): + self = self.resolve_neg() + + if self.dtype in [ + torch.float16, + torch.bfloat16, + torch.float8_e5m2, + torch.float8_e5m2fnuz, + torch.float8_e4m3fn, + torch.float8_e4m3fnuz, + ]: + self = self.float() + + if self.dtype is torch.complex32: + self = self.cfloat() + + if self.dtype.is_complex: + # handle the conjugate bit + self = self.resolve_conj() + real_formatter = _Formatter( + get_summarized_data(self.real) if summarize else self.real + ) + imag_formatter = _Formatter( + get_summarized_data(self.imag) if summarize else self.imag + ) + return _tensor_str_with_formatter( + self, indent, summarize, real_formatter, imag_formatter + ) + else: + formatter = _Formatter(get_summarized_data(self) if summarize else self) + return _tensor_str_with_formatter(self, indent, summarize, formatter) + + +def _add_suffixes(tensor_str, suffixes, indent, force_newline): + tensor_strs = [tensor_str] + last_line_len = len(tensor_str) - tensor_str.rfind("\n") + 1 + for suffix in suffixes: + suffix_len = len(suffix) + if force_newline or last_line_len + suffix_len + 2 > PRINT_OPTS.linewidth: + tensor_strs.append(",\n" + " " * indent + suffix) + last_line_len = indent + suffix_len + force_newline = False + else: + tensor_strs.append(", " + suffix) + last_line_len += suffix_len + 2 + tensor_strs.append(")") + return "".join(tensor_strs) + + +def get_summarized_data(self): + dim = self.dim() + if dim == 0: + return self + if dim == 1: + if self.size(0) > 2 * PRINT_OPTS.edgeitems: + return torch.cat( + (self[: PRINT_OPTS.edgeitems], self[-PRINT_OPTS.edgeitems :]) + ) + else: + return self + if not PRINT_OPTS.edgeitems: + return self.new_empty([0] * self.dim()) + elif self.size(0) > 2 * PRINT_OPTS.edgeitems: + start = [self[i] for i in range(0, PRINT_OPTS.edgeitems)] + end = [self[i] for i in range(len(self) - PRINT_OPTS.edgeitems, len(self))] + return torch.stack([get_summarized_data(x) for x in (start + end)]) + else: + return torch.stack([get_summarized_data(x) for x in self]) + + +def _str_intern(inp, *, tensor_contents=None): + if torch._C._functorch.is_functorch_wrapped_tensor(inp): + return _functorch_wrapper_str_intern(inp, tensor_contents=tensor_contents) + is_plain_tensor = type(inp) is torch.Tensor or type(inp) is torch.nn.Parameter + if inp.is_nested: + prefix = "nested_tensor(" + elif is_plain_tensor: + prefix = "tensor(" + else: + prefix = f"{type(inp).__name__}(" + indent = len(prefix) + suffixes = [] + custom_contents_provided = tensor_contents is not None + if custom_contents_provided: + tensor_str = tensor_contents + + # This is used to extract the primal value and thus disable the forward AD + # within this function. + # TODO(albanD) This needs to be updated when more than one level is supported + self, tangent = torch.autograd.forward_ad.unpack_dual(inp) + + # Note [Print tensor device]: + # A general logic here is we only print device when it doesn't match + # the device specified in default tensor type. + # Currently torch.set_default_tensor_type() only supports CPU/CUDA, thus + # torch._C._get_default_device() only returns either cpu or cuda. + # In other cases, we don't have a way to set them as default yet, + # and we should always print out device for them. + if ( + self.device.type != torch._C._get_default_device() + or ( + self.device.type == "cuda" + and torch.cuda.current_device() != self.device.index + ) + or (self.device.type == "mps") + ): + suffixes.append("device='" + str(self.device) + "'") + + # Tensor printing performs tensor operations like slice, indexing, etc to make it in a + # representable format. These operations on ipu/xla/lazy/mtia tensor results in compilations. Hence, + # to avoid compilations, copying the tensor to cpu before printing. + if self.device.type in ["xla", "lazy", "ipu", "mtia"]: + self = self.to("cpu") + + # TODO: add an API to map real -> complex dtypes + _default_complex_dtype = ( + torch.cdouble if torch.get_default_dtype() == torch.double else torch.cfloat + ) + has_default_dtype = self.dtype in ( + torch.get_default_dtype(), + _default_complex_dtype, + torch.int64, + torch.bool, + ) + if self.is_sparse: + suffixes.append("size=" + str(tuple(self.shape))) + from torch._subclasses.fake_tensor import FakeTensor + + if not self.is_meta and not isinstance(self, FakeTensor): + suffixes.append("nnz=" + str(self._nnz())) + if not has_default_dtype: + suffixes.append("dtype=" + str(self.dtype)) + if not custom_contents_provided: + indices_prefix = "indices=tensor(" + indices = self._indices().detach() + indices_str = _tensor_str(indices, indent + len(indices_prefix)) + if indices.numel() == 0: + indices_str += ", size=" + str(tuple(indices.shape)) + values_prefix = "values=tensor(" + values = self._values().detach() + values_str = _tensor_str(values, indent + len(values_prefix)) + if values.numel() == 0: + values_str += ", size=" + str(tuple(values.shape)) + tensor_str = ( + indices_prefix + + indices_str + + "),\n" + + " " * indent + + values_prefix + + values_str + + ")" + ) + elif self.layout in { + torch.sparse_csr, + torch.sparse_csc, + torch.sparse_bsr, + torch.sparse_bsc, + }: + suffixes.append("size=" + str(tuple(self.shape))) + suffixes.append("nnz=" + str(self._nnz())) + if not has_default_dtype: + suffixes.append("dtype=" + str(self.dtype)) + if not custom_contents_provided: + compressed_indices_method, plain_indices_method = { + torch.sparse_csr: (torch.Tensor.crow_indices, torch.Tensor.col_indices), + torch.sparse_csc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices), + torch.sparse_bsr: (torch.Tensor.crow_indices, torch.Tensor.col_indices), + torch.sparse_bsc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices), + }[self.layout] + if self.layout in {torch.sparse_csr, torch.sparse_bsr}: + cdimname, pdimname = "row", "column" + else: + cdimname, pdimname = "column", "row" + compressed_indices_prefix = f"c{cdimname[:3]}_indices=tensor(" + compressed_indices = compressed_indices_method(self).detach() + compressed_indices_str = _tensor_str( + compressed_indices, indent + len(compressed_indices_prefix) + ) + if compressed_indices.numel() == 0: + compressed_indices_str += ", size=" + str( + tuple(compressed_indices.shape) + ) + plain_indices_prefix = f"{pdimname[:3]}_indices=tensor(" + plain_indices = plain_indices_method(self).detach() + plain_indices_str = _tensor_str( + plain_indices, indent + len(plain_indices_prefix) + ) + if plain_indices.numel() == 0: + plain_indices_str += ", size=" + str(tuple(plain_indices.shape)) + values_prefix = "values=tensor(" + values = self.values().detach() + values_str = _tensor_str(values, indent + len(values_prefix)) + if values.numel() == 0: + values_str += ", size=" + str(tuple(values.shape)) + tensor_str = ( + compressed_indices_prefix + + compressed_indices_str + + "),\n" + + " " * indent + + plain_indices_prefix + + plain_indices_str + + "),\n" + + " " * indent + + values_prefix + + values_str + + ")" + ) + elif self.is_quantized: + suffixes.append("size=" + str(tuple(self.shape))) + if not has_default_dtype: + suffixes.append("dtype=" + str(self.dtype)) + suffixes.append("quantization_scheme=" + str(self.qscheme())) + if ( + self.qscheme() == torch.per_tensor_affine + or self.qscheme() == torch.per_tensor_symmetric + ): + suffixes.append("scale=" + str(self.q_scale())) + suffixes.append("zero_point=" + str(self.q_zero_point())) + elif ( + self.qscheme() == torch.per_channel_affine + or self.qscheme() == torch.per_channel_symmetric + or self.qscheme() == torch.per_channel_affine_float_qparams + ): + suffixes.append("scale=" + str(self.q_per_channel_scales())) + suffixes.append("zero_point=" + str(self.q_per_channel_zero_points())) + suffixes.append("axis=" + str(self.q_per_channel_axis())) + if not custom_contents_provided: + tensor_str = _tensor_str(self.dequantize(), indent) + elif self.is_nested: + if not custom_contents_provided: + + def indented_str(s, indent): + return "\n".join(f" {line}" for line in s.split("\n")) + + strs = ",\n".join( + indented_str(str(t), indent + 1) + for t in torch.ops.aten.unbind.int(self, 0) + ) + tensor_str = f"[\n{strs}\n]" + elif torch._is_functional_tensor(self): + prefix = "_to_functional_tensor(" + tensor_str = repr(torch._from_functional_tensor(self)) + else: + # Circular import problem, so we import it here + from torch._subclasses.fake_tensor import FakeTensor + + if self.is_meta or isinstance(self, FakeTensor): + suffixes.append("size=" + str(tuple(self.shape))) + if self.dtype != torch.get_default_dtype(): + suffixes.append("dtype=" + str(self.dtype)) + # TODO: This implies that ellipses is valid syntax for allocating + # a meta tensor or FakeTensor, which it could be, but it isn't right now + if not custom_contents_provided: + tensor_str = "..." + else: + if self.numel() == 0 and not self.is_sparse: + # Explicitly print the shape if it is not (0,), to match NumPy behavior + if self.dim() != 1: + suffixes.append("size=" + str(tuple(self.shape))) + + # In an empty tensor, there are no elements to infer if the dtype + # should be int64, so it must be shown explicitly. + if self.dtype != torch.get_default_dtype(): + suffixes.append("dtype=" + str(self.dtype)) + if not custom_contents_provided: + tensor_str = "[]" + else: + if not PRINT_OPTS.edgeitems: + suffixes.append("size=" + str(tuple(self.shape))) + + if not has_default_dtype: + suffixes.append("dtype=" + str(self.dtype)) + + if not custom_contents_provided: + if self.layout != torch.strided: + tensor_str = _tensor_str(self.to_dense(), indent) + else: + tensor_str = _tensor_str(self, indent) + + if self.layout != torch.strided: + suffixes.append("layout=" + str(self.layout)) + + # Use inp here to get the original grad_fn and not the one generated by the forward grad + # unpacking. + grad_fn_name = None + try: + grad_fn = inp.grad_fn + except RuntimeError: + # Accessing the grad_fn calls rebasing logic which would cause an error + # if that tensor is a view created in no-grad mode modified in-place in + # no-grad mode. See: https://github.com/pytorch/pytorch/issues/99968 + grad_fn_name = "Invalid" + + if grad_fn_name is None and grad_fn is not None: + grad_fn_name = type(grad_fn).__name__ + if grad_fn_name == "CppFunction": + grad_fn_name = grad_fn.name().rsplit("::", 1)[-1] + + if grad_fn_name is not None: + suffixes.append(f"grad_fn=<{grad_fn_name}>") + elif inp.requires_grad: + suffixes.append("requires_grad=True") + + if self.has_names(): + suffixes.append(f"names={self.names}") + + if tangent is not None: + suffixes.append(f"tangent={tangent}") + + string_repr = _add_suffixes( + prefix + tensor_str, suffixes, indent, force_newline=self.is_sparse + ) + + # Check if this instance is flagged as a parameter and change the repr accordingly. + # Unfortunately, this function has to be aware of this detail. + # NB: This is currently skipped for plain tensor parameters to maintain BC. In the future, + # this should be done for those as well to produce a valid repr. + if isinstance(self, torch.nn.Parameter) and not is_plain_tensor: + string_repr = f"Parameter({string_repr})" + + return string_repr + + +def _functorch_wrapper_str_intern(tensor, *, tensor_contents=None): + level = torch._C._functorch.maybe_get_level(tensor) + assert level != -1 + + if torch._C._functorch.is_functionaltensor(tensor): + # Since we're unwrapping the FunctionalTensorWrapper, we need to make sure + # that it's up to date first + torch._sync(tensor) + + value = torch._C._functorch.get_unwrapped(tensor) + value_repr = repr(value) + + indented_value_repr = textwrap.indent(value_repr, " " * 4) + if torch._C._functorch.is_batchedtensor(tensor): + bdim = torch._C._functorch.maybe_get_bdim(tensor) + assert bdim != -1 + return ( + f"BatchedTensor(lvl={level}, bdim={bdim}, value=\n" + f"{indented_value_repr}\n" + f")" + ) + if torch._C._functorch.is_gradtrackingtensor(tensor): + return ( + f"GradTrackingTensor(lvl={level}, value=\n" f"{indented_value_repr}\n" f")" + ) + if torch._C._functorch.is_functionaltensor(tensor): + return f"FunctionalTensor(lvl={level}, value=\\\n{value_repr})" + + raise ValueError("We don't know how to print this, please file us an issue") + + +def _str(self, *, tensor_contents=None): + with torch.no_grad(), torch.utils._python_dispatch._disable_current_modes(): + guard = torch._C._DisableFuncTorch() + return _str_intern(self, tensor_contents=tensor_contents) diff --git a/evalkit_internvl/lib/python3.10/site-packages/torch/_torch_docs.py b/evalkit_internvl/lib/python3.10/site-packages/torch/_torch_docs.py new file mode 100644 index 0000000000000000000000000000000000000000..454afa16c88b650b44e76d9329bc0c71743bad06 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/torch/_torch_docs.py @@ -0,0 +1,14136 @@ +"""Adds docstrings to functions defined in the torch._C""" + +import re + +import torch._C +from torch._C import _add_docstr as add_docstr + + +def parse_kwargs(desc): + """Maps a description of args to a dictionary of {argname: description}. + Input: + (' weight (Tensor): a weight tensor\n' + + ' Some optional description') + Output: { + 'weight': \ + 'weight (Tensor): a weight tensor\n Some optional description' + } + """ + # Split on exactly 4 spaces after a newline + regx = re.compile(r"\n\s{4}(?!\s)") + kwargs = [section.strip() for section in regx.split(desc)] + kwargs = [section for section in kwargs if len(section) > 0] + return {desc.split(" ")[0]: desc for desc in kwargs} + + +def merge_dicts(*dicts): + return {x: d[x] for d in dicts for x in d} + + +common_args = parse_kwargs( + """ + input (Tensor): the input tensor. + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned tensor. Default: ``torch.preserve_format``. +""" +) + +reduceops_common_args = merge_dicts( + common_args, + parse_kwargs( + """ + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. +""" + ), +) + +multi_dim_common = merge_dicts( + reduceops_common_args, + parse_kwargs( + """ + dim (int or tuple of ints): the dimension or dimensions to reduce. +""" + ), + { + "keepdim_details": """ +If :attr:`keepdim` is ``True``, the output tensor is of the same size +as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. +Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the +output tensor having 1 (or ``len(dim)``) fewer dimension(s). +""" + }, + { + "opt_dim": """ + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. +""" + }, +) + +single_dim_common = merge_dicts( + reduceops_common_args, + parse_kwargs( + """ + dim (int): the dimension to reduce. +""" + ), + { + "keepdim_details": """If :attr:`keepdim` is ``True``, the output tensor is of the same size +as :attr:`input` except in the dimension :attr:`dim` where it is of size 1. +Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in +the output tensor having 1 fewer dimension than :attr:`input`.""" + }, +) + +factory_common_args = merge_dicts( + common_args, + parse_kwargs( + """ + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.contiguous_format``. + check_invariants (bool, optional): If sparse tensor invariants are checked. + Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`, + initially False. +""" + ), + { + "sparse_factory_device_note": """\ +.. note:: + + If the ``device`` argument is not specified the device of the given + :attr:`values` and indices tensor(s) must match. If, however, the + argument is specified the input Tensors will be converted to the + given device and in turn determine the device of the constructed + sparse tensor.""" + }, +) + +factory_like_common_args = parse_kwargs( + """ + input (Tensor): the size of :attr:`input` will determine size of the output tensor. + layout (:class:`torch.layout`, optional): the desired layout of returned tensor. + Default: if ``None``, defaults to the layout of :attr:`input`. + dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor. + Default: if ``None``, defaults to the dtype of :attr:`input`. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, defaults to the device of :attr:`input`. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.preserve_format``. +""" +) + +factory_data_common_args = parse_kwargs( + """ + data (array_like): Initial data for the tensor. Can be a list, tuple, + NumPy ``ndarray``, scalar, and other types. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, infers data type from :attr:`data`. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. +""" +) + +tf32_notes = { + "tf32_note": """This operator supports :ref:`TensorFloat32`.""" +} + +rocm_fp16_notes = { + "rocm_fp16_note": """On certain ROCm devices, when using float16 inputs this module will use \ +:ref:`different precision` for backward.""" +} + +reproducibility_notes = { + "forward_reproducibility_note": """This operation may behave nondeterministically when given tensors on \ +a CUDA device. See :doc:`/notes/randomness` for more information.""", + "backward_reproducibility_note": """This operation may produce nondeterministic gradients when given tensors on \ +a CUDA device. See :doc:`/notes/randomness` for more information.""", + "cudnn_reproducibility_note": """In some circumstances when given tensors on a CUDA device \ +and using CuDNN, this operator may select a nondeterministic algorithm to increase performance. If this is \ +undesirable, you can try to make the operation deterministic (potentially at \ +a performance cost) by setting ``torch.backends.cudnn.deterministic = True``. \ +See :doc:`/notes/randomness` for more information.""", +} + +sparse_support_notes = { + "sparse_beta_warning": """ +.. warning:: + Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported, + or may not have autograd support. If you notice missing functionality please + open a feature request.""", +} + +add_docstr( + torch.abs, + r""" +abs(input, *, out=None) -> Tensor + +Computes the absolute value of each element in :attr:`input`. + +.. math:: + \text{out}_{i} = |\text{input}_{i}| +""" + + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> torch.abs(torch.tensor([-1, -2, 3])) + tensor([ 1, 2, 3]) +""".format( + **common_args + ), +) + +add_docstr( + torch.absolute, + r""" +absolute(input, *, out=None) -> Tensor + +Alias for :func:`torch.abs` +""", +) + +add_docstr( + torch.acos, + r""" +acos(input, *, out=None) -> Tensor + +Computes the inverse cosine of each element in :attr:`input`. + +.. math:: + \text{out}_{i} = \cos^{-1}(\text{input}_{i}) +""" + + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.3348, -0.5889, 0.2005, -0.1584]) + >>> torch.acos(a) + tensor([ 1.2294, 2.2004, 1.3690, 1.7298]) +""".format( + **common_args + ), +) + +add_docstr( + torch.arccos, + r""" +arccos(input, *, out=None) -> Tensor + +Alias for :func:`torch.acos`. +""", +) + +add_docstr( + torch.acosh, + r""" +acosh(input, *, out=None) -> Tensor + +Returns a new tensor with the inverse hyperbolic cosine of the elements of :attr:`input`. + +.. math:: + \text{out}_{i} = \cosh^{-1}(\text{input}_{i}) + +Note: + The domain of the inverse hyperbolic cosine is `[1, inf)` and values outside this range + will be mapped to ``NaN``, except for `+ INF` for which the output is mapped to `+ INF`. +""" + + r""" +Args: + {input} + +Keyword arguments: + {out} + +Example:: + + >>> a = torch.randn(4).uniform_(1, 2) + >>> a + tensor([ 1.3192, 1.9915, 1.9674, 1.7151 ]) + >>> torch.acosh(a) + tensor([ 0.7791, 1.3120, 1.2979, 1.1341 ]) +""".format( + **common_args + ), +) + +add_docstr( + torch.arccosh, + r""" +arccosh(input, *, out=None) -> Tensor + +Alias for :func:`torch.acosh`. +""", +) + +add_docstr( + torch.index_add, + r""" +index_add(input, dim, index, source, *, alpha=1, out=None) -> Tensor + +See :meth:`~Tensor.index_add_` for function description. +""", +) + +add_docstr( + torch.index_copy, + r""" +index_copy(input, dim, index, source, *, out=None) -> Tensor + +See :meth:`~Tensor.index_add_` for function description. +""", +) + +add_docstr( + torch.index_reduce, + r""" +index_reduce(input, dim, index, source, reduce, *, include_self=True, out=None) -> Tensor + +See :meth:`~Tensor.index_reduce_` for function description. +""", +) + +add_docstr( + torch.add, + r""" +add(input, other, *, alpha=1, out=None) -> Tensor + +Adds :attr:`other`, scaled by :attr:`alpha`, to :attr:`input`. + +.. math:: + \text{{out}}_i = \text{{input}}_i + \text{{alpha}} \times \text{{other}}_i +""" + + r""" + +Supports :ref:`broadcasting to a common shape `, +:ref:`type promotion `, and integer, float, and complex inputs. + +Args: + {input} + other (Tensor or Number): the tensor or number to add to :attr:`input`. + +Keyword arguments: + alpha (Number): the multiplier for :attr:`other`. + {out} + +Examples:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.0202, 1.0985, 1.3506, -0.6056]) + >>> torch.add(a, 20) + tensor([ 20.0202, 21.0985, 21.3506, 19.3944]) + + >>> b = torch.randn(4) + >>> b + tensor([-0.9732, -0.3497, 0.6245, 0.4022]) + >>> c = torch.randn(4, 1) + >>> c + tensor([[ 0.3743], + [-1.7724], + [-0.5811], + [-0.8017]]) + >>> torch.add(b, c, alpha=10) + tensor([[ 2.7695, 3.3930, 4.3672, 4.1450], + [-18.6971, -18.0736, -17.0994, -17.3216], + [ -6.7845, -6.1610, -5.1868, -5.4090], + [ -8.9902, -8.3667, -7.3925, -7.6147]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.addbmm, + r""" +addbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor + +Performs a batch matrix-matrix product of matrices stored +in :attr:`batch1` and :attr:`batch2`, +with a reduced add step (all matrix multiplications get accumulated +along the first dimension). +:attr:`input` is added to the final result. + +:attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the +same number of matrices. + +If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a +:math:`(b \times m \times p)` tensor, :attr:`input` must be +:ref:`broadcastable ` with a :math:`(n \times p)` tensor +and :attr:`out` will be a :math:`(n \times p)` tensor. + +.. math:: + out = \beta\ \text{input} + \alpha\ (\sum_{i=0}^{b-1} \text{batch1}_i \mathbin{@} \text{batch2}_i) + +If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in +it will not be propagated. +""" + + r""" +For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and :attr:`alpha` +must be real numbers, otherwise they should be integers. + +{tf32_note} + +{rocm_fp16_note} + +Args: + batch1 (Tensor): the first batch of matrices to be multiplied + batch2 (Tensor): the second batch of matrices to be multiplied + +Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + input (Tensor): matrix to be added + alpha (Number, optional): multiplier for `batch1 @ batch2` (:math:`\alpha`) + {out} + +Example:: + + >>> M = torch.randn(3, 5) + >>> batch1 = torch.randn(10, 3, 4) + >>> batch2 = torch.randn(10, 4, 5) + >>> torch.addbmm(M, batch1, batch2) + tensor([[ 6.6311, 0.0503, 6.9768, -12.0362, -2.1653], + [ -4.8185, -1.4255, -6.6760, 8.9453, 2.5743], + [ -3.8202, 4.3691, 1.0943, -1.1109, 5.4730]]) +""".format( + **common_args, **tf32_notes, **rocm_fp16_notes + ), +) + +add_docstr( + torch.addcdiv, + r""" +addcdiv(input, tensor1, tensor2, *, value=1, out=None) -> Tensor + +Performs the element-wise division of :attr:`tensor1` by :attr:`tensor2`, +multiplies the result by the scalar :attr:`value` and adds it to :attr:`input`. + +.. warning:: + Integer division with addcdiv is no longer supported, and in a future + release addcdiv will perform a true division of tensor1 and tensor2. + The historic addcdiv behavior can be implemented as + (input + value * torch.trunc(tensor1 / tensor2)).to(input.dtype) + for integer inputs and as (input + value * tensor1 / tensor2) for float inputs. + The future addcdiv behavior is just the latter implementation: + (input + value * tensor1 / tensor2), for all dtypes. + +.. math:: + \text{out}_i = \text{input}_i + \text{value} \times \frac{\text{tensor1}_i}{\text{tensor2}_i} +""" + + r""" + +The shapes of :attr:`input`, :attr:`tensor1`, and :attr:`tensor2` must be +:ref:`broadcastable `. + +For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be +a real number, otherwise an integer. + +Args: + input (Tensor): the tensor to be added + tensor1 (Tensor): the numerator tensor + tensor2 (Tensor): the denominator tensor + +Keyword args: + value (Number, optional): multiplier for :math:`\text{{tensor1}} / \text{{tensor2}}` + {out} + +Example:: + + >>> t = torch.randn(1, 3) + >>> t1 = torch.randn(3, 1) + >>> t2 = torch.randn(1, 3) + >>> torch.addcdiv(t, t1, t2, value=0.1) + tensor([[-0.2312, -3.6496, 0.1312], + [-1.0428, 3.4292, -0.1030], + [-0.5369, -0.9829, 0.0430]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.addcmul, + r""" +addcmul(input, tensor1, tensor2, *, value=1, out=None) -> Tensor + +Performs the element-wise multiplication of :attr:`tensor1` +by :attr:`tensor2`, multiplies the result by the scalar :attr:`value` +and adds it to :attr:`input`. + +.. math:: + \text{out}_i = \text{input}_i + \text{value} \times \text{tensor1}_i \times \text{tensor2}_i +""" + + r""" +The shapes of :attr:`tensor`, :attr:`tensor1`, and :attr:`tensor2` must be +:ref:`broadcastable `. + +For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be +a real number, otherwise an integer. + +Args: + input (Tensor): the tensor to be added + tensor1 (Tensor): the tensor to be multiplied + tensor2 (Tensor): the tensor to be multiplied + +Keyword args: + value (Number, optional): multiplier for :math:`tensor1 .* tensor2` + {out} + +Example:: + + >>> t = torch.randn(1, 3) + >>> t1 = torch.randn(3, 1) + >>> t2 = torch.randn(1, 3) + >>> torch.addcmul(t, t1, t2, value=0.1) + tensor([[-0.8635, -0.6391, 1.6174], + [-0.7617, -0.5879, 1.7388], + [-0.8353, -0.6249, 1.6511]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.addmm, + r""" +addmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor + +Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`. +The matrix :attr:`input` is added to the final result. + +If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a +:math:`(m \times p)` tensor, then :attr:`input` must be +:ref:`broadcastable ` with a :math:`(n \times p)` tensor +and :attr:`out` will be a :math:`(n \times p)` tensor. + +:attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between +:attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively. + +.. math:: + \text{out} = \beta\ \text{input} + \alpha\ (\text{mat1}_i \mathbin{@} \text{mat2}_i) + +If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in +it will not be propagated. +""" + + r""" +For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and +:attr:`alpha` must be real numbers, otherwise they should be integers. + +This operation has support for arguments with :ref:`sparse layouts`. If +:attr:`input` is sparse the result will have the same layout and if :attr:`out` +is provided it must have the same layout as :attr:`input`. + +{sparse_beta_warning} + +{tf32_note} + +{rocm_fp16_note} + +Args: + input (Tensor): matrix to be added + mat1 (Tensor): the first matrix to be matrix multiplied + mat2 (Tensor): the second matrix to be matrix multiplied + +Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`) + {out} + +Example:: + + >>> M = torch.randn(2, 3) + >>> mat1 = torch.randn(2, 3) + >>> mat2 = torch.randn(3, 3) + >>> torch.addmm(M, mat1, mat2) + tensor([[-4.8716, 1.4671, -1.3746], + [ 0.7573, -3.9555, -2.8681]]) +""".format( + **common_args, **tf32_notes, **rocm_fp16_notes, **sparse_support_notes + ), +) + +add_docstr( + torch.adjoint, + r""" +adjoint(Tensor) -> Tensor +Returns a view of the tensor conjugated and with the last two dimensions transposed. + +``x.adjoint()`` is equivalent to ``x.transpose(-2, -1).conj()`` for complex tensors and +to ``x.transpose(-2, -1)`` for real tensors. + +Example:: + >>> x = torch.arange(4, dtype=torch.float) + >>> A = torch.complex(x, x).reshape(2, 2) + >>> A + tensor([[0.+0.j, 1.+1.j], + [2.+2.j, 3.+3.j]]) + >>> A.adjoint() + tensor([[0.-0.j, 2.-2.j], + [1.-1.j, 3.-3.j]]) + >>> (A.adjoint() == A.mH).all() + tensor(True) +""", +) + +add_docstr( + torch.sspaddmm, + r""" +sspaddmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor + +Matrix multiplies a sparse tensor :attr:`mat1` with a dense tensor +:attr:`mat2`, then adds the sparse tensor :attr:`input` to the result. + +Note: This function is equivalent to :func:`torch.addmm`, except +:attr:`input` and :attr:`mat1` are sparse. + +Args: + input (Tensor): a sparse matrix to be added + mat1 (Tensor): a sparse matrix to be matrix multiplied + mat2 (Tensor): a dense matrix to be matrix multiplied + +Keyword args: + beta (Number, optional): multiplier for :attr:`mat` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`) + {out} +""".format( + **common_args + ), +) + +add_docstr( + torch.smm, + r""" +smm(input, mat) -> Tensor + +Performs a matrix multiplication of the sparse matrix :attr:`input` +with the dense matrix :attr:`mat`. + +Args: + input (Tensor): a sparse matrix to be matrix multiplied + mat (Tensor): a dense matrix to be matrix multiplied +""", +) + +add_docstr( + torch.addmv, + r""" +addmv(input, mat, vec, *, beta=1, alpha=1, out=None) -> Tensor + +Performs a matrix-vector product of the matrix :attr:`mat` and +the vector :attr:`vec`. +The vector :attr:`input` is added to the final result. + +If :attr:`mat` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of +size `m`, then :attr:`input` must be +:ref:`broadcastable ` with a 1-D tensor of size `n` and +:attr:`out` will be 1-D tensor of size `n`. + +:attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between +:attr:`mat` and :attr:`vec` and the added tensor :attr:`input` respectively. + +.. math:: + \text{out} = \beta\ \text{input} + \alpha\ (\text{mat} \mathbin{@} \text{vec}) + +If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in +it will not be propagated. +""" + + r""" +For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and +:attr:`alpha` must be real numbers, otherwise they should be integers. + +Args: + input (Tensor): vector to be added + mat (Tensor): matrix to be matrix multiplied + vec (Tensor): vector to be matrix multiplied + +Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`mat @ vec` (:math:`\alpha`) + {out} + +Example:: + + >>> M = torch.randn(2) + >>> mat = torch.randn(2, 3) + >>> vec = torch.randn(3) + >>> torch.addmv(M, mat, vec) + tensor([-0.3768, -5.5565]) +""".format( + **common_args + ), +) + +add_docstr( + torch.addr, + r""" +addr(input, vec1, vec2, *, beta=1, alpha=1, out=None) -> Tensor + +Performs the outer-product of vectors :attr:`vec1` and :attr:`vec2` +and adds it to the matrix :attr:`input`. + +Optional values :attr:`beta` and :attr:`alpha` are scaling factors on the +outer product between :attr:`vec1` and :attr:`vec2` and the added matrix +:attr:`input` respectively. + +.. math:: + \text{out} = \beta\ \text{input} + \alpha\ (\text{vec1} \otimes \text{vec2}) + +If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in +it will not be propagated. +""" + + r""" +If :attr:`vec1` is a vector of size `n` and :attr:`vec2` is a vector +of size `m`, then :attr:`input` must be +:ref:`broadcastable ` with a matrix of size +:math:`(n \times m)` and :attr:`out` will be a matrix of size +:math:`(n \times m)`. + +Args: + input (Tensor): matrix to be added + vec1 (Tensor): the first vector of the outer product + vec2 (Tensor): the second vector of the outer product + +Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`\text{{vec1}} \otimes \text{{vec2}}` (:math:`\alpha`) + {out} + +Example:: + + >>> vec1 = torch.arange(1., 4.) + >>> vec2 = torch.arange(1., 3.) + >>> M = torch.zeros(3, 2) + >>> torch.addr(M, vec1, vec2) + tensor([[ 1., 2.], + [ 2., 4.], + [ 3., 6.]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.allclose, + r""" +allclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False) -> bool + +This function checks if :attr:`input` and :attr:`other` satisfy the condition: + +.. math:: + \lvert \text{input} - \text{other} \rvert \leq \texttt{atol} + \texttt{rtol} \times \lvert \text{other} \rvert +""" + + r""" +elementwise, for all elements of :attr:`input` and :attr:`other`. The behaviour of this function is analogous to +`numpy.allclose `_ + +Args: + input (Tensor): first tensor to compare + other (Tensor): second tensor to compare + atol (float, optional): absolute tolerance. Default: 1e-08 + rtol (float, optional): relative tolerance. Default: 1e-05 + equal_nan (bool, optional): if ``True``, then two ``NaN`` s will be considered equal. Default: ``False`` + +Example:: + + >>> torch.allclose(torch.tensor([10000., 1e-07]), torch.tensor([10000.1, 1e-08])) + False + >>> torch.allclose(torch.tensor([10000., 1e-08]), torch.tensor([10000.1, 1e-09])) + True + >>> torch.allclose(torch.tensor([1.0, float('nan')]), torch.tensor([1.0, float('nan')])) + False + >>> torch.allclose(torch.tensor([1.0, float('nan')]), torch.tensor([1.0, float('nan')]), equal_nan=True) + True +""", +) + +add_docstr( + torch.all, + r""" +all(input) -> Tensor + +Tests if all elements in :attr:`input` evaluate to `True`. + +.. note:: This function matches the behaviour of NumPy in returning + output of dtype `bool` for all supported dtypes except `uint8`. + For `uint8` the dtype of output is `uint8` itself. + +Example:: + + >>> a = torch.rand(1, 2).bool() + >>> a + tensor([[False, True]], dtype=torch.bool) + >>> torch.all(a) + tensor(False, dtype=torch.bool) + >>> a = torch.arange(0, 3) + >>> a + tensor([0, 1, 2]) + >>> torch.all(a) + tensor(False) + +.. function:: all(input, dim, keepdim=False, *, out=None) -> Tensor + :noindex: + +For each row of :attr:`input` in the given dimension :attr:`dim`, +returns `True` if all elements in the row evaluate to `True` and `False` otherwise. + +{keepdim_details} + +Args: + {input} + {dim} + {keepdim} + +Keyword args: + {out} + +Example:: + + >>> a = torch.rand(4, 2).bool() + >>> a + tensor([[True, True], + [True, False], + [True, True], + [True, True]], dtype=torch.bool) + >>> torch.all(a, dim=1) + tensor([ True, False, True, True], dtype=torch.bool) + >>> torch.all(a, dim=0) + tensor([ True, False], dtype=torch.bool) +""".format( + **multi_dim_common + ), +) + +add_docstr( + torch.any, + r""" +any(input) -> Tensor + +Tests if any element in :attr:`input` evaluates to `True`. + +.. note:: This function matches the behaviour of NumPy in returning + output of dtype `bool` for all supported dtypes except `uint8`. + For `uint8` the dtype of output is `uint8` itself. + +Example:: + + >>> a = torch.rand(1, 2).bool() + >>> a + tensor([[False, True]], dtype=torch.bool) + >>> torch.any(a) + tensor(True, dtype=torch.bool) + >>> a = torch.arange(0, 3) + >>> a + tensor([0, 1, 2]) + >>> torch.any(a) + tensor(True) + +.. function:: any(input, dim, keepdim=False, *, out=None) -> Tensor + :noindex: + +For each row of :attr:`input` in the given dimension :attr:`dim`, +returns `True` if any element in the row evaluate to `True` and `False` otherwise. + +{keepdim_details} + +Args: + {input} + {dim} + {keepdim} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4, 2) < 0 + >>> a + tensor([[ True, True], + [False, True], + [ True, True], + [False, False]]) + >>> torch.any(a, 1) + tensor([ True, True, True, False]) + >>> torch.any(a, 0) + tensor([True, True]) +""".format( + **multi_dim_common + ), +) + +add_docstr( + torch.angle, + r""" +angle(input, *, out=None) -> Tensor + +Computes the element-wise angle (in radians) of the given :attr:`input` tensor. + +.. math:: + \text{out}_{i} = angle(\text{input}_{i}) +""" + + r""" +Args: + {input} + +Keyword args: + {out} + +.. note:: Starting in PyTorch 1.8, angle returns pi for negative real numbers, + zero for non-negative real numbers, and propagates NaNs. Previously + the function would return zero for all real numbers and not propagate + floating-point NaNs. + +Example:: + + >>> torch.angle(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]))*180/3.14159 + tensor([ 135., 135, -45]) +""".format( + **common_args + ), +) + +add_docstr( + torch.as_strided, + r""" +as_strided(input, size, stride, storage_offset=None) -> Tensor + +Create a view of an existing `torch.Tensor` :attr:`input` with specified +:attr:`size`, :attr:`stride` and :attr:`storage_offset`. + +.. warning:: + Prefer using other view functions, like :meth:`torch.Tensor.expand`, + to setting a view's strides manually with `as_strided`, as this + function's behavior depends on the implementation of a tensor's storage. + The constructed view of the storage must only refer to elements within + the storage or a runtime error will be thrown, and if the view is + "overlapped" (with multiple indices referring to the same element in + memory) its behavior is undefined. + +Args: + {input} + size (tuple or ints): the shape of the output tensor + stride (tuple or ints): the stride of the output tensor + storage_offset (int, optional): the offset in the underlying storage of the output tensor. + If ``None``, the storage_offset of the output tensor will match the input tensor. + +Example:: + + >>> x = torch.randn(3, 3) + >>> x + tensor([[ 0.9039, 0.6291, 1.0795], + [ 0.1586, 2.1939, -0.4900], + [-0.1909, -0.7503, 1.9355]]) + >>> t = torch.as_strided(x, (2, 2), (1, 2)) + >>> t + tensor([[0.9039, 1.0795], + [0.6291, 0.1586]]) + >>> t = torch.as_strided(x, (2, 2), (1, 2), 1) + tensor([[0.6291, 0.1586], + [1.0795, 2.1939]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.as_tensor, + r""" +as_tensor(data, dtype=None, device=None) -> Tensor + +Converts :attr:`data` into a tensor, sharing data and preserving autograd +history if possible. + +If :attr:`data` is already a tensor with the requested dtype and device +then :attr:`data` itself is returned, but if :attr:`data` is a +tensor with a different dtype or device then it's copied as if using +`data.to(dtype=dtype, device=device)`. + +If :attr:`data` is a NumPy array (an ndarray) with the same dtype and device then a +tensor is constructed using :func:`torch.from_numpy`. + +.. seealso:: + + :func:`torch.tensor` never shares its data and creates a new "leaf tensor" (see :doc:`/notes/autograd`). + + +Args: + {data} + {dtype} + device (:class:`torch.device`, optional): the device of the constructed tensor. If None and data is a tensor + then the device of data is used. If None and data is not a tensor then + the result tensor is constructed on the current device. + + +Example:: + + >>> a = numpy.array([1, 2, 3]) + >>> t = torch.as_tensor(a) + >>> t + tensor([ 1, 2, 3]) + >>> t[0] = -1 + >>> a + array([-1, 2, 3]) + + >>> a = numpy.array([1, 2, 3]) + >>> t = torch.as_tensor(a, device=torch.device('cuda')) + >>> t + tensor([ 1, 2, 3]) + >>> t[0] = -1 + >>> a + array([1, 2, 3]) +""".format( + **factory_data_common_args + ), +) + +add_docstr( + torch.asin, + r""" +asin(input, *, out=None) -> Tensor + +Returns a new tensor with the arcsine of the elements of :attr:`input`. + +.. math:: + \text{out}_{i} = \sin^{-1}(\text{input}_{i}) +""" + + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-0.5962, 1.4985, -0.4396, 1.4525]) + >>> torch.asin(a) + tensor([-0.6387, nan, -0.4552, nan]) +""".format( + **common_args + ), +) + +add_docstr( + torch.arcsin, + r""" +arcsin(input, *, out=None) -> Tensor + +Alias for :func:`torch.asin`. +""", +) + +add_docstr( + torch.asinh, + r""" +asinh(input, *, out=None) -> Tensor + +Returns a new tensor with the inverse hyperbolic sine of the elements of :attr:`input`. + +.. math:: + \text{out}_{i} = \sinh^{-1}(\text{input}_{i}) +""" + + r""" +Args: + {input} + +Keyword arguments: + {out} + +Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.1606, -1.4267, -1.0899, -1.0250 ]) + >>> torch.asinh(a) + tensor([ 0.1599, -1.1534, -0.9435, -0.8990 ]) +""".format( + **common_args + ), +) + +add_docstr( + torch.arcsinh, + r""" +arcsinh(input, *, out=None) -> Tensor + +Alias for :func:`torch.asinh`. +""", +) + +add_docstr( + torch.atan, + r""" +atan(input, *, out=None) -> Tensor + +Returns a new tensor with the arctangent of the elements of :attr:`input`. + +.. math:: + \text{out}_{i} = \tan^{-1}(\text{input}_{i}) +""" + + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.2341, 0.2539, -0.6256, -0.6448]) + >>> torch.atan(a) + tensor([ 0.2299, 0.2487, -0.5591, -0.5727]) +""".format( + **common_args + ), +) + +add_docstr( + torch.arctan, + r""" +arctan(input, *, out=None) -> Tensor + +Alias for :func:`torch.atan`. +""", +) + +add_docstr( + torch.atan2, + r""" +atan2(input, other, *, out=None) -> Tensor + +Element-wise arctangent of :math:`\text{{input}}_{{i}} / \text{{other}}_{{i}}` +with consideration of the quadrant. Returns a new tensor with the signed angles +in radians between vector :math:`(\text{{other}}_{{i}}, \text{{input}}_{{i}})` +and vector :math:`(1, 0)`. (Note that :math:`\text{{other}}_{{i}}`, the second +parameter, is the x-coordinate, while :math:`\text{{input}}_{{i}}`, the first +parameter, is the y-coordinate.) + +The shapes of ``input`` and ``other`` must be +:ref:`broadcastable `. + +Args: + input (Tensor): the first input tensor + other (Tensor): the second input tensor + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.9041, 0.0196, -0.3108, -2.4423]) + >>> torch.atan2(a, torch.randn(4)) + tensor([ 0.9833, 0.0811, -1.9743, -1.4151]) +""".format( + **common_args + ), +) + +add_docstr( + torch.arctan2, + r""" +arctan2(input, other, *, out=None) -> Tensor +Alias for :func:`torch.atan2`. +""", +) + +add_docstr( + torch.atanh, + r""" +atanh(input, *, out=None) -> Tensor + +Returns a new tensor with the inverse hyperbolic tangent of the elements of :attr:`input`. + +Note: + The domain of the inverse hyperbolic tangent is `(-1, 1)` and values outside this range + will be mapped to ``NaN``, except for the values `1` and `-1` for which the output is + mapped to `+/-INF` respectively. + +.. math:: + \text{out}_{i} = \tanh^{-1}(\text{input}_{i}) +""" + + r""" +Args: + {input} + +Keyword arguments: + {out} + +Example:: + + >>> a = torch.randn(4).uniform_(-1, 1) + >>> a + tensor([ -0.9385, 0.2968, -0.8591, -0.1871 ]) + >>> torch.atanh(a) + tensor([ -1.7253, 0.3060, -1.2899, -0.1893 ]) +""".format( + **common_args + ), +) + +add_docstr( + torch.arctanh, + r""" +arctanh(input, *, out=None) -> Tensor + +Alias for :func:`torch.atanh`. +""", +) + +add_docstr( + torch.asarray, + r""" +asarray(obj, *, dtype=None, device=None, copy=None, requires_grad=False) -> Tensor + +Converts :attr:`obj` to a tensor. + +:attr:`obj` can be one of: + +1. a tensor +2. a NumPy array or a NumPy scalar +3. a DLPack capsule +4. an object that implements Python's buffer protocol +5. a scalar +6. a sequence of scalars + +When :attr:`obj` is a tensor, NumPy array, or DLPack capsule the returned tensor will, +by default, not require a gradient, have the same datatype as :attr:`obj`, be on the +same device, and share memory with it. These properties can be controlled with the +:attr:`dtype`, :attr:`device`, :attr:`copy`, and :attr:`requires_grad` keyword arguments. +If the returned tensor is of a different datatype, on a different device, or a copy is +requested then it will not share its memory with :attr:`obj`. If :attr:`requires_grad` +is ``True`` then the returned tensor will require a gradient, and if :attr:`obj` is +also a tensor with an autograd history then the returned tensor will have the same history. + +When :attr:`obj` is not a tensor, NumPy array, or DLPack capsule but implements Python's +buffer protocol then the buffer is interpreted as an array of bytes grouped according to +the size of the datatype passed to the :attr:`dtype` keyword argument. (If no datatype is +passed then the default floating point datatype is used, instead.) The returned tensor +will have the specified datatype (or default floating point datatype if none is specified) +and, by default, be on the CPU device and share memory with the buffer. + +When :attr:`obj` is a NumPy scalar, the returned tensor will be a 0-dimensional tensor on +the CPU and that doesn't share its memory (i.e. ``copy=True``). By default datatype will +be the PyTorch datatype corresponding to the NumPy's scalar's datatype. + +When :attr:`obj` is none of the above but a scalar, or a sequence of scalars then the +returned tensor will, by default, infer its datatype from the scalar values, be on the +current default device, and not share its memory. + +.. seealso:: + + :func:`torch.tensor` creates a tensor that always copies the data from the input object. + :func:`torch.from_numpy` creates a tensor that always shares memory from NumPy arrays. + :func:`torch.frombuffer` creates a tensor that always shares memory from objects that + implement the buffer protocol. + :func:`torch.from_dlpack` creates a tensor that always shares memory from + DLPack capsules. + +Args: + obj (object): a tensor, NumPy array, DLPack Capsule, object that implements Python's + buffer protocol, scalar, or sequence of scalars. + +Keyword args: + dtype (:class:`torch.dtype`, optional): the datatype of the returned tensor. + Default: ``None``, which causes the datatype of the returned tensor to be + inferred from :attr:`obj`. + copy (bool, optional): controls whether the returned tensor shares memory with :attr:`obj`. + Default: ``None``, which causes the returned tensor to share memory with :attr:`obj` + whenever possible. If ``True`` then the returned tensor does not share its memory. + If ``False`` then the returned tensor shares its memory with :attr:`obj` and an + error is thrown if it cannot. + device (:class:`torch.device`, optional): the device of the returned tensor. + Default: ``None``, which causes the device of :attr:`obj` to be used. Or, if + :attr:`obj` is a Python sequence, the current default device will be used. + requires_grad (bool, optional): whether the returned tensor requires grad. + Default: ``False``, which causes the returned tensor not to require a gradient. + If ``True``, then the returned tensor will require a gradient, and if :attr:`obj` + is also a tensor with an autograd history then the returned tensor will have + the same history. + +Example:: + + >>> a = torch.tensor([1, 2, 3]) + >>> # Shares memory with tensor 'a' + >>> b = torch.asarray(a) + >>> a.data_ptr() == b.data_ptr() + True + >>> # Forces memory copy + >>> c = torch.asarray(a, copy=True) + >>> a.data_ptr() == c.data_ptr() + False + + >>> a = torch.tensor([1., 2., 3.], requires_grad=True) + >>> b = a + 2 + >>> b + tensor([3., 4., 5.], grad_fn=) + >>> # Shares memory with tensor 'b', with no grad + >>> c = torch.asarray(b) + >>> c + tensor([3., 4., 5.]) + >>> # Shares memory with tensor 'b', retaining autograd history + >>> d = torch.asarray(b, requires_grad=True) + >>> d + tensor([3., 4., 5.], grad_fn=) + + >>> array = numpy.array([1, 2, 3]) + >>> # Shares memory with array 'array' + >>> t1 = torch.asarray(array) + >>> array.__array_interface__['data'][0] == t1.data_ptr() + True + >>> # Copies memory due to dtype mismatch + >>> t2 = torch.asarray(array, dtype=torch.float32) + >>> array.__array_interface__['data'][0] == t2.data_ptr() + False + + >>> scalar = numpy.float64(0.5) + >>> torch.asarray(scalar) + tensor(0.5000, dtype=torch.float64) +""", +) + +add_docstr( + torch.baddbmm, + r""" +baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor + +Performs a batch matrix-matrix product of matrices in :attr:`batch1` +and :attr:`batch2`. +:attr:`input` is added to the final result. + +:attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the same +number of matrices. + +If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a +:math:`(b \times m \times p)` tensor, then :attr:`input` must be +:ref:`broadcastable ` with a +:math:`(b \times n \times p)` tensor and :attr:`out` will be a +:math:`(b \times n \times p)` tensor. Both :attr:`alpha` and :attr:`beta` mean the +same as the scaling factors used in :meth:`torch.addbmm`. + +.. math:: + \text{out}_i = \beta\ \text{input}_i + \alpha\ (\text{batch1}_i \mathbin{@} \text{batch2}_i) + +If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in +it will not be propagated. +""" + + r""" +For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and +:attr:`alpha` must be real numbers, otherwise they should be integers. + +{tf32_note} + +{rocm_fp16_note} + +Args: + input (Tensor): the tensor to be added + batch1 (Tensor): the first batch of matrices to be multiplied + batch2 (Tensor): the second batch of matrices to be multiplied + +Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`\text{{batch1}} \mathbin{{@}} \text{{batch2}}` (:math:`\alpha`) + {out} + +Example:: + + >>> M = torch.randn(10, 3, 5) + >>> batch1 = torch.randn(10, 3, 4) + >>> batch2 = torch.randn(10, 4, 5) + >>> torch.baddbmm(M, batch1, batch2).size() + torch.Size([10, 3, 5]) +""".format( + **common_args, **tf32_notes, **rocm_fp16_notes + ), +) + +add_docstr( + torch.bernoulli, + r""" +bernoulli(input, *, generator=None, out=None) -> Tensor + +Draws binary random numbers (0 or 1) from a Bernoulli distribution. + +The :attr:`input` tensor should be a tensor containing probabilities +to be used for drawing the binary random number. +Hence, all values in :attr:`input` have to be in the range: +:math:`0 \leq \text{input}_i \leq 1`. + +The :math:`\text{i}^{th}` element of the output tensor will draw a +value :math:`1` according to the :math:`\text{i}^{th}` probability value given +in :attr:`input`. + +.. math:: + \text{out}_{i} \sim \mathrm{Bernoulli}(p = \text{input}_{i}) +""" + + r""" +The returned :attr:`out` tensor only has values 0 or 1 and is of the same +shape as :attr:`input`. + +:attr:`out` can have integral ``dtype``, but :attr:`input` must have floating +point ``dtype``. + +Args: + input (Tensor): the input tensor of probability values for the Bernoulli distribution + +Keyword args: + {generator} + {out} + +Example:: + + >>> a = torch.empty(3, 3).uniform_(0, 1) # generate a uniform random matrix with range [0, 1] + >>> a + tensor([[ 0.1737, 0.0950, 0.3609], + [ 0.7148, 0.0289, 0.2676], + [ 0.9456, 0.8937, 0.7202]]) + >>> torch.bernoulli(a) + tensor([[ 1., 0., 0.], + [ 0., 0., 0.], + [ 1., 1., 1.]]) + + >>> a = torch.ones(3, 3) # probability of drawing "1" is 1 + >>> torch.bernoulli(a) + tensor([[ 1., 1., 1.], + [ 1., 1., 1.], + [ 1., 1., 1.]]) + >>> a = torch.zeros(3, 3) # probability of drawing "1" is 0 + >>> torch.bernoulli(a) + tensor([[ 0., 0., 0.], + [ 0., 0., 0.], + [ 0., 0., 0.]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.bincount, + r""" +bincount(input, weights=None, minlength=0) -> Tensor + +Count the frequency of each value in an array of non-negative ints. + +The number of bins (size 1) is one larger than the largest value in +:attr:`input` unless :attr:`input` is empty, in which case the result is a +tensor of size 0. If :attr:`minlength` is specified, the number of bins is at least +:attr:`minlength` and if :attr:`input` is empty, then the result is tensor of size +:attr:`minlength` filled with zeros. If ``n`` is the value at position ``i``, +``out[n] += weights[i]`` if :attr:`weights` is specified else +``out[n] += 1``. + +Note: + {backward_reproducibility_note} + +Arguments: + input (Tensor): 1-d int tensor + weights (Tensor): optional, weight for each value in the input tensor. + Should be of same size as input tensor. + minlength (int): optional, minimum number of bins. Should be non-negative. + +Returns: + output (Tensor): a tensor of shape ``Size([max(input) + 1])`` if + :attr:`input` is non-empty, else ``Size(0)`` + +Example:: + + >>> input = torch.randint(0, 8, (5,), dtype=torch.int64) + >>> weights = torch.linspace(0, 1, steps=5) + >>> input, weights + (tensor([4, 3, 6, 3, 4]), + tensor([ 0.0000, 0.2500, 0.5000, 0.7500, 1.0000]) + + >>> torch.bincount(input) + tensor([0, 0, 0, 2, 2, 0, 1]) + + >>> input.bincount(weights) + tensor([0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 0.0000, 0.5000]) +""".format( + **reproducibility_notes + ), +) + +add_docstr( + torch.bitwise_not, + r""" +bitwise_not(input, *, out=None) -> Tensor + +Computes the bitwise NOT of the given input tensor. The input tensor must be of +integral or Boolean types. For bool tensors, it computes the logical NOT. + +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> torch.bitwise_not(torch.tensor([-1, -2, 3], dtype=torch.int8)) + tensor([ 0, 1, -4], dtype=torch.int8) +""".format( + **common_args + ), +) + +add_docstr( + torch.bmm, + r""" +bmm(input, mat2, *, out=None) -> Tensor + +Performs a batch matrix-matrix product of matrices stored in :attr:`input` +and :attr:`mat2`. + +:attr:`input` and :attr:`mat2` must be 3-D tensors each containing +the same number of matrices. + +If :attr:`input` is a :math:`(b \times n \times m)` tensor, :attr:`mat2` is a +:math:`(b \times m \times p)` tensor, :attr:`out` will be a +:math:`(b \times n \times p)` tensor. + +.. math:: + \text{out}_i = \text{input}_i \mathbin{@} \text{mat2}_i +""" + + r""" +{tf32_note} + +{rocm_fp16_note} + +.. note:: This function does not :ref:`broadcast `. + For broadcasting matrix products, see :func:`torch.matmul`. + +Args: + input (Tensor): the first batch of matrices to be multiplied + mat2 (Tensor): the second batch of matrices to be multiplied + +Keyword Args: + {out} + +Example:: + + >>> input = torch.randn(10, 3, 4) + >>> mat2 = torch.randn(10, 4, 5) + >>> res = torch.bmm(input, mat2) + >>> res.size() + torch.Size([10, 3, 5]) +""".format( + **common_args, **tf32_notes, **rocm_fp16_notes + ), +) + +add_docstr( + torch.bitwise_and, + r""" +bitwise_and(input, other, *, out=None) -> Tensor + +Computes the bitwise AND of :attr:`input` and :attr:`other`. The input tensor must be of +integral or Boolean types. For bool tensors, it computes the logical AND. + +Args: + input: the first input tensor + other: the second input tensor + +Keyword args: + {out} + +Example:: + + >>> torch.bitwise_and(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) + tensor([1, 0, 3], dtype=torch.int8) + >>> torch.bitwise_and(torch.tensor([True, True, False]), torch.tensor([False, True, False])) + tensor([ False, True, False]) +""".format( + **common_args + ), +) + +add_docstr( + torch.bitwise_or, + r""" +bitwise_or(input, other, *, out=None) -> Tensor + +Computes the bitwise OR of :attr:`input` and :attr:`other`. The input tensor must be of +integral or Boolean types. For bool tensors, it computes the logical OR. + +Args: + input: the first input tensor + other: the second input tensor + +Keyword args: + {out} + +Example:: + + >>> torch.bitwise_or(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) + tensor([-1, -2, 3], dtype=torch.int8) + >>> torch.bitwise_or(torch.tensor([True, True, False]), torch.tensor([False, True, False])) + tensor([ True, True, False]) +""".format( + **common_args + ), +) + +add_docstr( + torch.bitwise_xor, + r""" +bitwise_xor(input, other, *, out=None) -> Tensor + +Computes the bitwise XOR of :attr:`input` and :attr:`other`. The input tensor must be of +integral or Boolean types. For bool tensors, it computes the logical XOR. + +Args: + input: the first input tensor + other: the second input tensor + +Keyword args: + {out} + +Example:: + + >>> torch.bitwise_xor(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) + tensor([-2, -2, 0], dtype=torch.int8) + >>> torch.bitwise_xor(torch.tensor([True, True, False]), torch.tensor([False, True, False])) + tensor([ True, False, False]) +""".format( + **common_args + ), +) + +add_docstr( + torch.bitwise_left_shift, + r""" +bitwise_left_shift(input, other, *, out=None) -> Tensor + +Computes the left arithmetic shift of :attr:`input` by :attr:`other` bits. +The input tensor must be of integral type. This operator supports +:ref:`broadcasting to a common shape ` and +:ref:`type promotion `. + +The operation applied is: + +.. math:: + \text{{out}}_i = \text{{input}}_i << \text{{other}}_i + +Args: + input (Tensor or Scalar): the first input tensor + other (Tensor or Scalar): the second input tensor + +Keyword args: + {out} + +Example:: + + >>> torch.bitwise_left_shift(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) + tensor([-2, -2, 24], dtype=torch.int8) +""".format( + **common_args + ), +) + +add_docstr( + torch.bitwise_right_shift, + r""" +bitwise_right_shift(input, other, *, out=None) -> Tensor + +Computes the right arithmetic shift of :attr:`input` by :attr:`other` bits. +The input tensor must be of integral type. This operator supports +:ref:`broadcasting to a common shape ` and +:ref:`type promotion `. +In any case, if the value of the right operand is negative or is greater +or equal to the number of bits in the promoted left operand, the behavior is undefined. + +The operation applied is: + +.. math:: + \text{{out}}_i = \text{{input}}_i >> \text{{other}}_i + +Args: + input (Tensor or Scalar): the first input tensor + other (Tensor or Scalar): the second input tensor + +Keyword args: + {out} + +Example:: + + >>> torch.bitwise_right_shift(torch.tensor([-2, -7, 31], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) + tensor([-1, -7, 3], dtype=torch.int8) +""".format( + **common_args + ), +) + +add_docstr( + torch.broadcast_to, + r""" +broadcast_to(input, shape) -> Tensor + +Broadcasts :attr:`input` to the shape :attr:`\shape`. +Equivalent to calling ``input.expand(shape)``. See :meth:`~Tensor.expand` for details. + +Args: + {input} + shape (list, tuple, or :class:`torch.Size`): the new shape. + +Example:: + + >>> x = torch.tensor([1, 2, 3]) + >>> torch.broadcast_to(x, (3, 3)) + tensor([[1, 2, 3], + [1, 2, 3], + [1, 2, 3]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.stack, + r""" +stack(tensors, dim=0, *, out=None) -> Tensor + +Concatenates a sequence of tensors along a new dimension. + +All tensors need to be of the same size. + +.. seealso:: + + :func:`torch.cat` concatenates the given sequence along an existing dimension. + +Arguments: + tensors (sequence of Tensors): sequence of tensors to concatenate + dim (int): dimension to insert. Has to be between 0 and the number + of dimensions of concatenated tensors (inclusive) + +Keyword args: + {out} +""".format( + **common_args + ), +) + +add_docstr( + torch.hstack, + r""" +hstack(tensors, *, out=None) -> Tensor + +Stack tensors in sequence horizontally (column wise). + +This is equivalent to concatenation along the first axis for 1-D tensors, and along the second axis for all other tensors. + +Args: + tensors (sequence of Tensors): sequence of tensors to concatenate + +Keyword args: + {out} + +Example:: + + >>> a = torch.tensor([1, 2, 3]) + >>> b = torch.tensor([4, 5, 6]) + >>> torch.hstack((a,b)) + tensor([1, 2, 3, 4, 5, 6]) + >>> a = torch.tensor([[1],[2],[3]]) + >>> b = torch.tensor([[4],[5],[6]]) + >>> torch.hstack((a,b)) + tensor([[1, 4], + [2, 5], + [3, 6]]) + +""".format( + **common_args + ), +) + +add_docstr( + torch.vstack, + r""" +vstack(tensors, *, out=None) -> Tensor + +Stack tensors in sequence vertically (row wise). + +This is equivalent to concatenation along the first axis after all 1-D tensors have been reshaped by :func:`torch.atleast_2d`. + +Args: + tensors (sequence of Tensors): sequence of tensors to concatenate + +Keyword args: + {out} + +Example:: + + >>> a = torch.tensor([1, 2, 3]) + >>> b = torch.tensor([4, 5, 6]) + >>> torch.vstack((a,b)) + tensor([[1, 2, 3], + [4, 5, 6]]) + >>> a = torch.tensor([[1],[2],[3]]) + >>> b = torch.tensor([[4],[5],[6]]) + >>> torch.vstack((a,b)) + tensor([[1], + [2], + [3], + [4], + [5], + [6]]) + + +""".format( + **common_args + ), +) + +add_docstr( + torch.dstack, + r""" +dstack(tensors, *, out=None) -> Tensor + +Stack tensors in sequence depthwise (along third axis). + +This is equivalent to concatenation along the third axis after 1-D and 2-D tensors have been reshaped by :func:`torch.atleast_3d`. + +Args: + tensors (sequence of Tensors): sequence of tensors to concatenate + +Keyword args: + {out} + +Example:: + + >>> a = torch.tensor([1, 2, 3]) + >>> b = torch.tensor([4, 5, 6]) + >>> torch.dstack((a,b)) + tensor([[[1, 4], + [2, 5], + [3, 6]]]) + >>> a = torch.tensor([[1],[2],[3]]) + >>> b = torch.tensor([[4],[5],[6]]) + >>> torch.dstack((a,b)) + tensor([[[1, 4]], + [[2, 5]], + [[3, 6]]]) + + +""".format( + **common_args + ), +) + +add_docstr( + torch.tensor_split, + r""" +tensor_split(input, indices_or_sections, dim=0) -> List of Tensors + +Splits a tensor into multiple sub-tensors, all of which are views of :attr:`input`, +along dimension :attr:`dim` according to the indices or number of sections specified +by :attr:`indices_or_sections`. This function is based on NumPy's +:func:`numpy.array_split`. + +Args: + input (Tensor): the tensor to split + indices_or_sections (Tensor, int or list or tuple of ints): + If :attr:`indices_or_sections` is an integer ``n`` or a zero dimensional long tensor + with value ``n``, :attr:`input` is split into ``n`` sections along dimension :attr:`dim`. + If :attr:`input` is divisible by ``n`` along dimension :attr:`dim`, each + section will be of equal size, :code:`input.size(dim) / n`. If :attr:`input` + is not divisible by ``n``, the sizes of the first :code:`int(input.size(dim) % n)` + sections will have size :code:`int(input.size(dim) / n) + 1`, and the rest will + have size :code:`int(input.size(dim) / n)`. + + If :attr:`indices_or_sections` is a list or tuple of ints, or a one-dimensional long + tensor, then :attr:`input` is split along dimension :attr:`dim` at each of the indices + in the list, tuple or tensor. For instance, :code:`indices_or_sections=[2, 3]` and :code:`dim=0` + would result in the tensors :code:`input[:2]`, :code:`input[2:3]`, and :code:`input[3:]`. + + If :attr:`indices_or_sections` is a tensor, it must be a zero-dimensional or one-dimensional + long tensor on the CPU. + + dim (int, optional): dimension along which to split the tensor. Default: ``0`` + +Example:: + + >>> x = torch.arange(8) + >>> torch.tensor_split(x, 3) + (tensor([0, 1, 2]), tensor([3, 4, 5]), tensor([6, 7])) + + >>> x = torch.arange(7) + >>> torch.tensor_split(x, 3) + (tensor([0, 1, 2]), tensor([3, 4]), tensor([5, 6])) + >>> torch.tensor_split(x, (1, 6)) + (tensor([0]), tensor([1, 2, 3, 4, 5]), tensor([6])) + + >>> x = torch.arange(14).reshape(2, 7) + >>> x + tensor([[ 0, 1, 2, 3, 4, 5, 6], + [ 7, 8, 9, 10, 11, 12, 13]]) + >>> torch.tensor_split(x, 3, dim=1) + (tensor([[0, 1, 2], + [7, 8, 9]]), + tensor([[ 3, 4], + [10, 11]]), + tensor([[ 5, 6], + [12, 13]])) + >>> torch.tensor_split(x, (1, 6), dim=1) + (tensor([[0], + [7]]), + tensor([[ 1, 2, 3, 4, 5], + [ 8, 9, 10, 11, 12]]), + tensor([[ 6], + [13]])) +""", +) + +add_docstr( + torch.chunk, + r""" +chunk(input, chunks, dim=0) -> List of Tensors + +Attempts to split a tensor into the specified number of chunks. Each chunk is a view of +the input tensor. + + +.. note:: + + This function may return fewer than the specified number of chunks! + +.. seealso:: + + :func:`torch.tensor_split` a function that always returns exactly the specified number of chunks + +If the tensor size along the given dimension :attr:`dim` is divisible by :attr:`chunks`, +all returned chunks will be the same size. +If the tensor size along the given dimension :attr:`dim` is not divisible by :attr:`chunks`, +all returned chunks will be the same size, except the last one. +If such division is not possible, this function may return fewer +than the specified number of chunks. + +Arguments: + input (Tensor): the tensor to split + chunks (int): number of chunks to return + dim (int): dimension along which to split the tensor + +Example: + >>> torch.arange(11).chunk(6) + (tensor([0, 1]), + tensor([2, 3]), + tensor([4, 5]), + tensor([6, 7]), + tensor([8, 9]), + tensor([10])) + >>> torch.arange(12).chunk(6) + (tensor([0, 1]), + tensor([2, 3]), + tensor([4, 5]), + tensor([6, 7]), + tensor([8, 9]), + tensor([10, 11])) + >>> torch.arange(13).chunk(6) + (tensor([0, 1, 2]), + tensor([3, 4, 5]), + tensor([6, 7, 8]), + tensor([ 9, 10, 11]), + tensor([12])) +""", +) + +add_docstr( + torch.unsafe_chunk, + r""" +unsafe_chunk(input, chunks, dim=0) -> List of Tensors + +Works like :func:`torch.chunk` but without enforcing the autograd restrictions +on inplace modification of the outputs. + +.. warning:: + This function is safe to use as long as only the input, or only the outputs + are modified inplace after calling this function. It is user's + responsibility to ensure that is the case. If both the input and one or more + of the outputs are modified inplace, gradients computed by autograd will be + silently incorrect. +""", +) + +add_docstr( + torch.unsafe_split, + r""" +unsafe_split(tensor, split_size_or_sections, dim=0) -> List of Tensors + +Works like :func:`torch.split` but without enforcing the autograd restrictions +on inplace modification of the outputs. + +.. warning:: + This function is safe to use as long as only the input, or only the outputs + are modified inplace after calling this function. It is user's + responsibility to ensure that is the case. If both the input and one or more + of the outputs are modified inplace, gradients computed by autograd will be + silently incorrect. +""", +) + +add_docstr( + torch.hsplit, + r""" +hsplit(input, indices_or_sections) -> List of Tensors + +Splits :attr:`input`, a tensor with one or more dimensions, into multiple tensors +horizontally according to :attr:`indices_or_sections`. Each split is a view of +:attr:`input`. + +If :attr:`input` is one dimensional this is equivalent to calling +torch.tensor_split(input, indices_or_sections, dim=0) (the split dimension is +zero), and if :attr:`input` has two or more dimensions it's equivalent to calling +torch.tensor_split(input, indices_or_sections, dim=1) (the split dimension is 1), +except that if :attr:`indices_or_sections` is an integer it must evenly divide +the split dimension or a runtime error will be thrown. + +This function is based on NumPy's :func:`numpy.hsplit`. + +Args: + input (Tensor): tensor to split. + indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`. + +Example:: + >>> t = torch.arange(16.0).reshape(4,4) + >>> t + tensor([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [12., 13., 14., 15.]]) + >>> torch.hsplit(t, 2) + (tensor([[ 0., 1.], + [ 4., 5.], + [ 8., 9.], + [12., 13.]]), + tensor([[ 2., 3.], + [ 6., 7.], + [10., 11.], + [14., 15.]])) + >>> torch.hsplit(t, [3, 6]) + (tensor([[ 0., 1., 2.], + [ 4., 5., 6.], + [ 8., 9., 10.], + [12., 13., 14.]]), + tensor([[ 3.], + [ 7.], + [11.], + [15.]]), + tensor([], size=(4, 0))) + +""", +) + +add_docstr( + torch.vsplit, + r""" +vsplit(input, indices_or_sections) -> List of Tensors + +Splits :attr:`input`, a tensor with two or more dimensions, into multiple tensors +vertically according to :attr:`indices_or_sections`. Each split is a view of +:attr:`input`. + +This is equivalent to calling torch.tensor_split(input, indices_or_sections, dim=0) +(the split dimension is 0), except that if :attr:`indices_or_sections` is an integer +it must evenly divide the split dimension or a runtime error will be thrown. + +This function is based on NumPy's :func:`numpy.vsplit`. + +Args: + input (Tensor): tensor to split. + indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`. + +Example:: + >>> t = torch.arange(16.0).reshape(4,4) + >>> t + tensor([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [12., 13., 14., 15.]]) + >>> torch.vsplit(t, 2) + (tensor([[0., 1., 2., 3.], + [4., 5., 6., 7.]]), + tensor([[ 8., 9., 10., 11.], + [12., 13., 14., 15.]])) + >>> torch.vsplit(t, [3, 6]) + (tensor([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]]), + tensor([[12., 13., 14., 15.]]), + tensor([], size=(0, 4))) + +""", +) + +add_docstr( + torch.dsplit, + r""" +dsplit(input, indices_or_sections) -> List of Tensors + +Splits :attr:`input`, a tensor with three or more dimensions, into multiple tensors +depthwise according to :attr:`indices_or_sections`. Each split is a view of +:attr:`input`. + +This is equivalent to calling torch.tensor_split(input, indices_or_sections, dim=2) +(the split dimension is 2), except that if :attr:`indices_or_sections` is an integer +it must evenly divide the split dimension or a runtime error will be thrown. + +This function is based on NumPy's :func:`numpy.dsplit`. + +Args: + input (Tensor): tensor to split. + indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`. + +Example:: + >>> t = torch.arange(16.0).reshape(2, 2, 4) + >>> t + tensor([[[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.]], + [[ 8., 9., 10., 11.], + [12., 13., 14., 15.]]]) + >>> torch.dsplit(t, 2) + (tensor([[[ 0., 1.], + [ 4., 5.]], + [[ 8., 9.], + [12., 13.]]]), + tensor([[[ 2., 3.], + [ 6., 7.]], + [[10., 11.], + [14., 15.]]])) + + >>> torch.dsplit(t, [3, 6]) + (tensor([[[ 0., 1., 2.], + [ 4., 5., 6.]], + [[ 8., 9., 10.], + [12., 13., 14.]]]), + tensor([[[ 3.], + [ 7.]], + [[11.], + [15.]]]), + tensor([], size=(2, 2, 0))) + +""", +) + +add_docstr( + torch.can_cast, + r""" +can_cast(from, to) -> bool + +Determines if a type conversion is allowed under PyTorch casting rules +described in the type promotion :ref:`documentation `. + +Args: + from (dtype): The original :class:`torch.dtype`. + to (dtype): The target :class:`torch.dtype`. + +Example:: + + >>> torch.can_cast(torch.double, torch.float) + True + >>> torch.can_cast(torch.float, torch.int) + False +""", +) + +add_docstr( + torch.corrcoef, + r""" +corrcoef(input) -> Tensor + +Estimates the Pearson product-moment correlation coefficient matrix of the variables given by the :attr:`input` matrix, +where rows are the variables and columns are the observations. + +.. note:: + + The correlation coefficient matrix R is computed using the covariance matrix C as given by + :math:`R_{ij} = \frac{ C_{ij} } { \sqrt{ C_{ii} * C_{jj} } }` + +.. note:: + + Due to floating point rounding, the resulting array may not be Hermitian and its diagonal elements may not be 1. + The real and imaginary values are clipped to the interval [-1, 1] in an attempt to improve this situation. + +Args: + input (Tensor): A 2D matrix containing multiple variables and observations, or a + Scalar or 1D vector representing a single variable. + +Returns: + (Tensor) The correlation coefficient matrix of the variables. + +.. seealso:: + + :func:`torch.cov` covariance matrix. + +Example:: + + >>> x = torch.tensor([[0, 1, 2], [2, 1, 0]]) + >>> torch.corrcoef(x) + tensor([[ 1., -1.], + [-1., 1.]]) + >>> x = torch.randn(2, 4) + >>> x + tensor([[-0.2678, -0.0908, -0.3766, 0.2780], + [-0.5812, 0.1535, 0.2387, 0.2350]]) + >>> torch.corrcoef(x) + tensor([[1.0000, 0.3582], + [0.3582, 1.0000]]) + >>> torch.corrcoef(x[0]) + tensor(1.) +""", +) + +add_docstr( + torch.cov, + r""" +cov(input, *, correction=1, fweights=None, aweights=None) -> Tensor + +Estimates the covariance matrix of the variables given by the :attr:`input` matrix, where rows are +the variables and columns are the observations. + +A covariance matrix is a square matrix giving the covariance of each pair of variables. The diagonal contains +the variance of each variable (covariance of a variable with itself). By definition, if :attr:`input` represents +a single variable (Scalar or 1D) then its variance is returned. + +The sample covariance of the variables :math:`x` and :math:`y` is given by: + +.. math:: + \text{cov}(x,y) = \frac{\sum^{N}_{i = 1}(x_{i} - \bar{x})(y_{i} - \bar{y})}{\max(0,~N~-~\delta N)} + +where :math:`\bar{x}` and :math:`\bar{y}` are the simple means of the :math:`x` and :math:`y` respectively, and +:math:`\delta N` is the :attr:`correction`. + +If :attr:`fweights` and/or :attr:`aweights` are provided, the weighted covariance +is calculated, which is given by: + +.. math:: + \text{cov}_w(x,y) = \frac{\sum^{N}_{i = 1}w_i(x_{i} - \mu_x^*)(y_{i} - \mu_y^*)} + {\max(0,~\sum^{N}_{i = 1}w_i~-~\frac{\sum^{N}_{i = 1}w_ia_i}{\sum^{N}_{i = 1}w_i}~\delta N)} + +where :math:`w` denotes :attr:`fweights` or :attr:`aweights` (``f`` and ``a`` for brevity) based on whichever is +provided, or :math:`w = f \times a` if both are provided, and +:math:`\mu_x^* = \frac{\sum^{N}_{i = 1}w_ix_{i} }{\sum^{N}_{i = 1}w_i}` is the weighted mean of the variable. If not +provided, ``f`` and/or ``a`` can be seen as a :math:`\mathbb{1}` vector of appropriate size. + +Args: + input (Tensor): A 2D matrix containing multiple variables and observations, or a + Scalar or 1D vector representing a single variable. + +Keyword Args: + correction (int, optional): difference between the sample size and sample degrees of freedom. + Defaults to Bessel's correction, ``correction = 1`` which returns the unbiased estimate, + even if both :attr:`fweights` and :attr:`aweights` are specified. ``correction = 0`` + will return the simple average. Defaults to ``1``. + fweights (tensor, optional): A Scalar or 1D tensor of observation vector frequencies representing the number of + times each observation should be repeated. Its numel must equal the number of columns of :attr:`input`. + Must have integral dtype. Ignored if ``None``. Defaults to ``None``. + aweights (tensor, optional): A Scalar or 1D array of observation vector weights. + These relative weights are typically large for observations considered “important” and smaller for + observations considered less “important”. Its numel must equal the number of columns of :attr:`input`. + Must have floating point dtype. Ignored if ``None``. Defaults to ``None``. + +Returns: + (Tensor) The covariance matrix of the variables. + +.. seealso:: + + :func:`torch.corrcoef` normalized covariance matrix. + +Example:: + >>> x = torch.tensor([[0, 2], [1, 1], [2, 0]]).T + >>> x + tensor([[0, 1, 2], + [2, 1, 0]]) + >>> torch.cov(x) + tensor([[ 1., -1.], + [-1., 1.]]) + >>> torch.cov(x, correction=0) + tensor([[ 0.6667, -0.6667], + [-0.6667, 0.6667]]) + >>> fw = torch.randint(1, 10, (3,)) + >>> fw + tensor([1, 6, 9]) + >>> aw = torch.rand(3) + >>> aw + tensor([0.4282, 0.0255, 0.4144]) + >>> torch.cov(x, fweights=fw, aweights=aw) + tensor([[ 0.4169, -0.4169], + [-0.4169, 0.4169]]) +""", +) + +add_docstr( + torch.cat, + r""" +cat(tensors, dim=0, *, out=None) -> Tensor + +Concatenates the given sequence of :attr:`seq` tensors in the given dimension. +All tensors must either have the same shape (except in the concatenating +dimension) or be empty. + +:func:`torch.cat` can be seen as an inverse operation for :func:`torch.split` +and :func:`torch.chunk`. + +:func:`torch.cat` can be best understood via examples. + +.. seealso:: + + :func:`torch.stack` concatenates the given sequence along a new dimension. + +Args: + tensors (sequence of Tensors): any python sequence of tensors of the same type. + Non-empty tensors provided must have the same shape, except in the + cat dimension. + dim (int, optional): the dimension over which the tensors are concatenated + +Keyword args: + {out} + +Example:: + + >>> x = torch.randn(2, 3) + >>> x + tensor([[ 0.6580, -1.0969, -0.4614], + [-0.1034, -0.5790, 0.1497]]) + >>> torch.cat((x, x, x), 0) + tensor([[ 0.6580, -1.0969, -0.4614], + [-0.1034, -0.5790, 0.1497], + [ 0.6580, -1.0969, -0.4614], + [-0.1034, -0.5790, 0.1497], + [ 0.6580, -1.0969, -0.4614], + [-0.1034, -0.5790, 0.1497]]) + >>> torch.cat((x, x, x), 1) + tensor([[ 0.6580, -1.0969, -0.4614, 0.6580, -1.0969, -0.4614, 0.6580, + -1.0969, -0.4614], + [-0.1034, -0.5790, 0.1497, -0.1034, -0.5790, 0.1497, -0.1034, + -0.5790, 0.1497]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.concat, + r""" +concat(tensors, dim=0, *, out=None) -> Tensor + +Alias of :func:`torch.cat`. +""", +) + +add_docstr( + torch.concatenate, + r""" +concatenate(tensors, axis=0, out=None) -> Tensor + +Alias of :func:`torch.cat`. +""", +) + +add_docstr( + torch.ceil, + r""" +ceil(input, *, out=None) -> Tensor + +Returns a new tensor with the ceil of the elements of :attr:`input`, +the smallest integer greater than or equal to each element. + +For integer inputs, follows the array-api convention of returning a +copy of the input tensor. + +.. math:: + \text{out}_{i} = \left\lceil \text{input}_{i} \right\rceil +""" + + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-0.6341, -1.4208, -1.0900, 0.5826]) + >>> torch.ceil(a) + tensor([-0., -1., -1., 1.]) +""".format( + **common_args + ), +) + +add_docstr( + torch.real, + r""" +real(input) -> Tensor + +Returns a new tensor containing real values of the :attr:`self` tensor. +The returned tensor and :attr:`self` share the same underlying storage. + +Args: + {input} + +Example:: + + >>> x=torch.randn(4, dtype=torch.cfloat) + >>> x + tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)]) + >>> x.real + tensor([ 0.3100, -0.5445, -1.6492, -0.0638]) + +""".format( + **common_args + ), +) + +add_docstr( + torch.imag, + r""" +imag(input) -> Tensor + +Returns a new tensor containing imaginary values of the :attr:`self` tensor. +The returned tensor and :attr:`self` share the same underlying storage. + +.. warning:: + :func:`imag` is only supported for tensors with complex dtypes. + +Args: + {input} + +Example:: + + >>> x=torch.randn(4, dtype=torch.cfloat) + >>> x + tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)]) + >>> x.imag + tensor([ 0.3553, -0.7896, -0.0633, -0.8119]) + +""".format( + **common_args + ), +) + +add_docstr( + torch.view_as_real, + r""" +view_as_real(input) -> Tensor + +Returns a view of :attr:`input` as a real tensor. For an input complex tensor of +:attr:`size` :math:`m1, m2, \dots, mi`, this function returns a new +real tensor of size :math:`m1, m2, \dots, mi, 2`, where the last dimension of size 2 +represents the real and imaginary components of complex numbers. + +.. warning:: + :func:`view_as_real` is only supported for tensors with ``complex dtypes``. + +Args: + {input} + +Example:: + + >>> x=torch.randn(4, dtype=torch.cfloat) + >>> x + tensor([(0.4737-0.3839j), (-0.2098-0.6699j), (0.3470-0.9451j), (-0.5174-1.3136j)]) + >>> torch.view_as_real(x) + tensor([[ 0.4737, -0.3839], + [-0.2098, -0.6699], + [ 0.3470, -0.9451], + [-0.5174, -1.3136]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.view_as_complex, + r""" +view_as_complex(input) -> Tensor + +Returns a view of :attr:`input` as a complex tensor. For an input complex +tensor of :attr:`size` :math:`m1, m2, \dots, mi, 2`, this function returns a +new complex tensor of :attr:`size` :math:`m1, m2, \dots, mi` where the last +dimension of the input tensor is expected to represent the real and imaginary +components of complex numbers. + +.. warning:: + :func:`view_as_complex` is only supported for tensors with + :class:`torch.dtype` ``torch.float64`` and ``torch.float32``. The input is + expected to have the last dimension of :attr:`size` 2. In addition, the + tensor must have a `stride` of 1 for its last dimension. The strides of all + other dimensions must be even numbers. + +Args: + {input} + +Example:: + + >>> x=torch.randn(4, 2) + >>> x + tensor([[ 1.6116, -0.5772], + [-1.4606, -0.9120], + [ 0.0786, -1.7497], + [-0.6561, -1.6623]]) + >>> torch.view_as_complex(x) + tensor([(1.6116-0.5772j), (-1.4606-0.9120j), (0.0786-1.7497j), (-0.6561-1.6623j)]) +""".format( + **common_args + ), +) + +add_docstr( + torch.reciprocal, + r""" +reciprocal(input, *, out=None) -> Tensor + +Returns a new tensor with the reciprocal of the elements of :attr:`input` + +.. math:: + \text{out}_{i} = \frac{1}{\text{input}_{i}} + +.. note:: + Unlike NumPy's reciprocal, torch.reciprocal supports integral inputs. Integral + inputs to reciprocal are automatically :ref:`promoted ` to + the default scalar type. +""" + + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-0.4595, -2.1219, -1.4314, 0.7298]) + >>> torch.reciprocal(a) + tensor([-2.1763, -0.4713, -0.6986, 1.3702]) +""".format( + **common_args + ), +) + +add_docstr( + torch.cholesky, + r""" +cholesky(input, upper=False, *, out=None) -> Tensor + +Computes the Cholesky decomposition of a symmetric positive-definite +matrix :math:`A` or for batches of symmetric positive-definite matrices. + +If :attr:`upper` is ``True``, the returned matrix ``U`` is upper-triangular, and +the decomposition has the form: + +.. math:: + + A = U^TU + +If :attr:`upper` is ``False``, the returned matrix ``L`` is lower-triangular, and +the decomposition has the form: + +.. math:: + + A = LL^T + +If :attr:`upper` is ``True``, and :math:`A` is a batch of symmetric positive-definite +matrices, then the returned tensor will be composed of upper-triangular Cholesky factors +of each of the individual matrices. Similarly, when :attr:`upper` is ``False``, the returned +tensor will be composed of lower-triangular Cholesky factors of each of the individual +matrices. + +.. warning:: + + :func:`torch.cholesky` is deprecated in favor of :func:`torch.linalg.cholesky` + and will be removed in a future PyTorch release. + + ``L = torch.cholesky(A)`` should be replaced with + + .. code:: python + + L = torch.linalg.cholesky(A) + + ``U = torch.cholesky(A, upper=True)`` should be replaced with + + .. code:: python + + U = torch.linalg.cholesky(A).mH + + This transform will produce equivalent results for all valid (symmetric positive definite) inputs. + +Args: + input (Tensor): the input tensor :math:`A` of size :math:`(*, n, n)` where `*` is zero or more + batch dimensions consisting of symmetric positive-definite matrices. + upper (bool, optional): flag that indicates whether to return a + upper or lower triangular matrix. Default: ``False`` + +Keyword args: + out (Tensor, optional): the output matrix + +Example:: + + >>> a = torch.randn(3, 3) + >>> a = a @ a.mT + 1e-3 # make symmetric positive-definite + >>> l = torch.cholesky(a) + >>> a + tensor([[ 2.4112, -0.7486, 1.4551], + [-0.7486, 1.3544, 0.1294], + [ 1.4551, 0.1294, 1.6724]]) + >>> l + tensor([[ 1.5528, 0.0000, 0.0000], + [-0.4821, 1.0592, 0.0000], + [ 0.9371, 0.5487, 0.7023]]) + >>> l @ l.mT + tensor([[ 2.4112, -0.7486, 1.4551], + [-0.7486, 1.3544, 0.1294], + [ 1.4551, 0.1294, 1.6724]]) + >>> a = torch.randn(3, 2, 2) # Example for batched input + >>> a = a @ a.mT + 1e-03 # make symmetric positive-definite + >>> l = torch.cholesky(a) + >>> z = l @ l.mT + >>> torch.dist(z, a) + tensor(2.3842e-07) +""", +) + +add_docstr( + torch.cholesky_solve, + r""" +cholesky_solve(B, L, upper=False, *, out=None) -> Tensor + +Computes the solution of a system of linear equations with complex Hermitian +or real symmetric positive-definite lhs given its Cholesky decomposition. + +Let :math:`A` be a complex Hermitian or real symmetric positive-definite matrix, +and :math:`L` its Cholesky decomposition such that: + +.. math:: + + A = LL^{\text{H}} + +where :math:`L^{\text{H}}` is the conjugate transpose when :math:`L` is complex, +and the transpose when :math:`L` is real-valued. + +Returns the solution :math:`X` of the following linear system: + +.. math:: + + AX = B + +Supports inputs of float, double, cfloat and cdouble dtypes. +Also supports batches of matrices, and if :math:`A` or :math:`B` is a batch of matrices +then the output has the same batch dimensions. + +Args: + B (Tensor): right-hand side tensor of shape `(*, n, k)` + where :math:`*` is zero or more batch dimensions + L (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions + consisting of lower or upper triangular Cholesky decompositions of + symmetric or Hermitian positive-definite matrices. + upper (bool, optional): flag that indicates whether :math:`L` is lower triangular + or upper triangular. Default: ``False``. + +Keyword args: + out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`. + +Example:: + + >>> A = torch.randn(3, 3) + >>> A = A @ A.T + torch.eye(3) * 1e-3 # Creates a symmetric positive-definite matrix + >>> L = torch.linalg.cholesky(A) # Extract Cholesky decomposition + >>> B = torch.randn(3, 2) + >>> torch.cholesky_solve(B, L) + tensor([[ -8.1625, 19.6097], + [ -5.8398, 14.2387], + [ -4.3771, 10.4173]]) + >>> A.inverse() @ B + tensor([[ -8.1626, 19.6097], + [ -5.8398, 14.2387], + [ -4.3771, 10.4173]]) + + >>> A = torch.randn(3, 2, 2, dtype=torch.complex64) + >>> A = A @ A.mH + torch.eye(2) * 1e-3 # Batch of Hermitian positive-definite matrices + >>> L = torch.linalg.cholesky(A) + >>> B = torch.randn(2, 1, dtype=torch.complex64) + >>> X = torch.cholesky_solve(B, L) + >>> torch.dist(X, A.inverse() @ B) + tensor(1.6881e-5) +""", +) + +add_docstr( + torch.cholesky_inverse, + r""" +cholesky_inverse(L, upper=False, *, out=None) -> Tensor + +Computes the inverse of a complex Hermitian or real symmetric +positive-definite matrix given its Cholesky decomposition. + +Let :math:`A` be a complex Hermitian or real symmetric positive-definite matrix, +and :math:`L` its Cholesky decomposition such that: + +.. math:: + + A = LL^{\text{H}} + +where :math:`L^{\text{H}}` is the conjugate transpose when :math:`L` is complex, +and the transpose when :math:`L` is real-valued. + +Computes the inverse matrix :math:`A^{-1}`. + +Supports input of float, double, cfloat and cdouble dtypes. +Also supports batches of matrices, and if :math:`A` is a batch of matrices +then the output has the same batch dimensions. + +Args: + L (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions + consisting of lower or upper triangular Cholesky decompositions of + symmetric or Hermitian positive-definite matrices. + upper (bool, optional): flag that indicates whether :math:`L` is lower triangular + or upper triangular. Default: ``False`` + +Keyword args: + out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`. + +Example:: + + >>> A = torch.randn(3, 3) + >>> A = A @ A.T + torch.eye(3) * 1e-3 # Creates a symmetric positive-definite matrix + >>> L = torch.linalg.cholesky(A) # Extract Cholesky decomposition + >>> torch.cholesky_inverse(L) + tensor([[ 1.9314, 1.2251, -0.0889], + [ 1.2251, 2.4439, 0.2122], + [-0.0889, 0.2122, 0.1412]]) + >>> A.inverse() + tensor([[ 1.9314, 1.2251, -0.0889], + [ 1.2251, 2.4439, 0.2122], + [-0.0889, 0.2122, 0.1412]]) + + >>> A = torch.randn(3, 2, 2, dtype=torch.complex64) + >>> A = A @ A.mH + torch.eye(2) * 1e-3 # Batch of Hermitian positive-definite matrices + >>> L = torch.linalg.cholesky(A) + >>> torch.dist(torch.inverse(A), torch.cholesky_inverse(L)) + tensor(5.6358e-7) +""", +) + +add_docstr( + torch.clone, + r""" +clone(input, *, memory_format=torch.preserve_format) -> Tensor + +Returns a copy of :attr:`input`. + +.. note:: + + This function is differentiable, so gradients will flow back from the + result of this operation to :attr:`input`. To create a tensor without an + autograd relationship to :attr:`input` see :meth:`~Tensor.detach`. + +Args: + {input} + +Keyword args: + {memory_format} +""".format( + **common_args + ), +) + +add_docstr( + torch.clamp, + r""" +clamp(input, min=None, max=None, *, out=None) -> Tensor + +Clamps all elements in :attr:`input` into the range `[` :attr:`min`, :attr:`max` `]`. +Letting min_value and max_value be :attr:`min` and :attr:`max`, respectively, this returns: + +.. math:: + y_i = \min(\max(x_i, \text{min\_value}_i), \text{max\_value}_i) + +If :attr:`min` is ``None``, there is no lower bound. +Or, if :attr:`max` is ``None`` there is no upper bound. +""" + + r""" + +.. note:: + If :attr:`min` is greater than :attr:`max` :func:`torch.clamp(..., min, max) ` + sets all elements in :attr:`input` to the value of :attr:`max`. + +Args: + {input} + min (Number or Tensor, optional): lower-bound of the range to be clamped to + max (Number or Tensor, optional): upper-bound of the range to be clamped to + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-1.7120, 0.1734, -0.0478, -0.0922]) + >>> torch.clamp(a, min=-0.5, max=0.5) + tensor([-0.5000, 0.1734, -0.0478, -0.0922]) + + >>> min = torch.linspace(-1, 1, steps=4) + >>> torch.clamp(a, min=min) + tensor([-1.0000, 0.1734, 0.3333, 1.0000]) + +""".format( + **common_args + ), +) + +add_docstr( + torch.clip, + r""" +clip(input, min=None, max=None, *, out=None) -> Tensor + +Alias for :func:`torch.clamp`. +""", +) + +add_docstr( + torch.column_stack, + r""" +column_stack(tensors, *, out=None) -> Tensor + +Creates a new tensor by horizontally stacking the tensors in :attr:`tensors`. + +Equivalent to ``torch.hstack(tensors)``, except each zero or one dimensional tensor ``t`` +in :attr:`tensors` is first reshaped into a ``(t.numel(), 1)`` column before being stacked horizontally. + +Args: + tensors (sequence of Tensors): sequence of tensors to concatenate + +Keyword args: + {out} + +Example:: + + >>> a = torch.tensor([1, 2, 3]) + >>> b = torch.tensor([4, 5, 6]) + >>> torch.column_stack((a, b)) + tensor([[1, 4], + [2, 5], + [3, 6]]) + >>> a = torch.arange(5) + >>> b = torch.arange(10).reshape(5, 2) + >>> torch.column_stack((a, b, b)) + tensor([[0, 0, 1, 0, 1], + [1, 2, 3, 2, 3], + [2, 4, 5, 4, 5], + [3, 6, 7, 6, 7], + [4, 8, 9, 8, 9]]) + +""".format( + **common_args + ), +) + +add_docstr( + torch.complex, + r""" +complex(real, imag, *, out=None) -> Tensor + +Constructs a complex tensor with its real part equal to :attr:`real` and its +imaginary part equal to :attr:`imag`. + +Args: + real (Tensor): The real part of the complex tensor. Must be half, float or double. + imag (Tensor): The imaginary part of the complex tensor. Must be same dtype + as :attr:`real`. + +Keyword args: + out (Tensor): If the inputs are ``torch.float32``, must be + ``torch.complex64``. If the inputs are ``torch.float64``, must be + ``torch.complex128``. + +Example:: + + >>> real = torch.tensor([1, 2], dtype=torch.float32) + >>> imag = torch.tensor([3, 4], dtype=torch.float32) + >>> z = torch.complex(real, imag) + >>> z + tensor([(1.+3.j), (2.+4.j)]) + >>> z.dtype + torch.complex64 + +""", +) + +add_docstr( + torch.polar, + r""" +polar(abs, angle, *, out=None) -> Tensor + +Constructs a complex tensor whose elements are Cartesian coordinates +corresponding to the polar coordinates with absolute value :attr:`abs` and angle +:attr:`angle`. + +.. math:: + \text{out} = \text{abs} \cdot \cos(\text{angle}) + \text{abs} \cdot \sin(\text{angle}) \cdot j + +.. note:: + `torch.polar` is similar to + `std::polar `_ + and does not compute the polar decomposition + of a complex tensor like Python's `cmath.polar` and SciPy's `linalg.polar` do. + The behavior of this function is undefined if `abs` is negative or NaN, or if `angle` is + infinite. + +""" + + r""" +Args: + abs (Tensor): The absolute value the complex tensor. Must be float or double. + angle (Tensor): The angle of the complex tensor. Must be same dtype as + :attr:`abs`. + +Keyword args: + out (Tensor): If the inputs are ``torch.float32``, must be + ``torch.complex64``. If the inputs are ``torch.float64``, must be + ``torch.complex128``. + +Example:: + + >>> import numpy as np + >>> abs = torch.tensor([1, 2], dtype=torch.float64) + >>> angle = torch.tensor([np.pi / 2, 5 * np.pi / 4], dtype=torch.float64) + >>> z = torch.polar(abs, angle) + >>> z + tensor([(0.0000+1.0000j), (-1.4142-1.4142j)], dtype=torch.complex128) +""", +) + +add_docstr( + torch.conj_physical, + r""" +conj_physical(input, *, out=None) -> Tensor + +Computes the element-wise conjugate of the given :attr:`input` tensor. +If :attr:`input` has a non-complex dtype, this function just returns :attr:`input`. + +.. note:: + This performs the conjugate operation regardless of the fact conjugate bit is set or not. + +.. warning:: In the future, :func:`torch.conj_physical` may return a non-writeable view for an :attr:`input` of + non-complex dtype. It's recommended that programs not modify the tensor returned by :func:`torch.conj_physical` + when :attr:`input` is of non-complex dtype to be compatible with this change. + +.. math:: + \text{out}_{i} = conj(\text{input}_{i}) +""" + + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> torch.conj_physical(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j])) + tensor([-1 - 1j, -2 - 2j, 3 + 3j]) +""".format( + **common_args + ), +) + +add_docstr( + torch.conj, + r""" +conj(input) -> Tensor + +Returns a view of :attr:`input` with a flipped conjugate bit. If :attr:`input` has a non-complex dtype, +this function just returns :attr:`input`. + +.. note:: + :func:`torch.conj` performs a lazy conjugation, but the actual conjugated tensor can be materialized + at any time using :func:`torch.resolve_conj`. + +.. warning:: In the future, :func:`torch.conj` may return a non-writeable view for an :attr:`input` of + non-complex dtype. It's recommended that programs not modify the tensor returned by :func:`torch.conj_physical` + when :attr:`input` is of non-complex dtype to be compatible with this change. + +Args: + {input} + +Example:: + + >>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]) + >>> x.is_conj() + False + >>> y = torch.conj(x) + >>> y.is_conj() + True +""".format( + **common_args + ), +) + +add_docstr( + torch.resolve_conj, + r""" +resolve_conj(input) -> Tensor + +Returns a new tensor with materialized conjugation if :attr:`input`'s conjugate bit is set to `True`, +else returns :attr:`input`. The output tensor will always have its conjugate bit set to `False`. + +Args: + {input} + +Example:: + + >>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]) + >>> y = x.conj() + >>> y.is_conj() + True + >>> z = y.resolve_conj() + >>> z + tensor([-1 - 1j, -2 - 2j, 3 + 3j]) + >>> z.is_conj() + False +""".format( + **common_args + ), +) + +add_docstr( + torch.resolve_neg, + r""" +resolve_neg(input) -> Tensor + +Returns a new tensor with materialized negation if :attr:`input`'s negative bit is set to `True`, +else returns :attr:`input`. The output tensor will always have its negative bit set to `False`. + +Args: + {input} + +Example:: + + >>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]) + >>> y = x.conj() + >>> z = y.imag + >>> z.is_neg() + True + >>> out = z.resolve_neg() + >>> out + tensor([-1., -2., 3.]) + >>> out.is_neg() + False +""".format( + **common_args + ), +) + +add_docstr( + torch.copysign, + r""" +copysign(input, other, *, out=None) -> Tensor + +Create a new floating-point tensor with the magnitude of :attr:`input` and the sign of :attr:`other`, elementwise. + +.. math:: + \text{out}_{i} = \begin{cases} + -|\text{input}_{i}| & \text{if } \text{other}_{i} \leq -0.0 \\ + |\text{input}_{i}| & \text{if } \text{other}_{i} \geq 0.0 \\ + \end{cases} +""" + + r""" + +Supports :ref:`broadcasting to a common shape `, +and integer and float inputs. + +Args: + input (Tensor): magnitudes. + other (Tensor or Number): contains value(s) whose signbit(s) are + applied to the magnitudes in :attr:`input`. + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(5) + >>> a + tensor([-1.2557, -0.0026, -0.5387, 0.4740, -0.9244]) + >>> torch.copysign(a, 1) + tensor([1.2557, 0.0026, 0.5387, 0.4740, 0.9244]) + >>> a = torch.randn(4, 4) + >>> a + tensor([[ 0.7079, 0.2778, -1.0249, 0.5719], + [-0.0059, -0.2600, -0.4475, -1.3948], + [ 0.3667, -0.9567, -2.5757, -0.1751], + [ 0.2046, -0.0742, 0.2998, -0.1054]]) + >>> b = torch.randn(4) + tensor([ 0.2373, 0.3120, 0.3190, -1.1128]) + >>> torch.copysign(a, b) + tensor([[ 0.7079, 0.2778, 1.0249, -0.5719], + [ 0.0059, 0.2600, 0.4475, -1.3948], + [ 0.3667, 0.9567, 2.5757, -0.1751], + [ 0.2046, 0.0742, 0.2998, -0.1054]]) + >>> a = torch.tensor([1.]) + >>> b = torch.tensor([-0.]) + >>> torch.copysign(a, b) + tensor([-1.]) + +.. note:: + copysign handles signed zeros. If the other argument has a negative zero (-0), + the corresponding output value will be negative. + +""".format( + **common_args + ), +) + +add_docstr( + torch.cos, + r""" +cos(input, *, out=None) -> Tensor + +Returns a new tensor with the cosine of the elements of :attr:`input`. + +.. math:: + \text{out}_{i} = \cos(\text{input}_{i}) +""" + + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 1.4309, 1.2706, -0.8562, 0.9796]) + >>> torch.cos(a) + tensor([ 0.1395, 0.2957, 0.6553, 0.5574]) +""".format( + **common_args + ), +) + +add_docstr( + torch.cosh, + r""" +cosh(input, *, out=None) -> Tensor + +Returns a new tensor with the hyperbolic cosine of the elements of +:attr:`input`. + +.. math:: + \text{out}_{i} = \cosh(\text{input}_{i}) +""" + + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.1632, 1.1835, -0.6979, -0.7325]) + >>> torch.cosh(a) + tensor([ 1.0133, 1.7860, 1.2536, 1.2805]) + +.. note:: + When :attr:`input` is on the CPU, the implementation of torch.cosh may use + the Sleef library, which rounds very large results to infinity or negative + infinity. See `here `_ for details. +""".format( + **common_args + ), +) + +add_docstr( + torch.cross, + r""" +cross(input, other, dim=None, *, out=None) -> Tensor + + +Returns the cross product of vectors in dimension :attr:`dim` of :attr:`input` +and :attr:`other`. + +Supports input of float, double, cfloat and cdouble dtypes. Also supports batches +of vectors, for which it computes the product along the dimension :attr:`dim`. +In this case, the output has the same batch dimensions as the inputs. + +.. warning:: + If :attr:`dim` is not given, it defaults to the first dimension found + with the size 3. Note that this might be unexpected. + + This behavior is deprecated and will be changed to match that of :func:`torch.linalg.cross` + in a future release. + +.. seealso:: + :func:`torch.linalg.cross` which has dim=-1 as default. + + +Args: + {input} + other (Tensor): the second input tensor + dim (int, optional): the dimension to take the cross-product in. + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4, 3) + >>> a + tensor([[-0.3956, 1.1455, 1.6895], + [-0.5849, 1.3672, 0.3599], + [-1.1626, 0.7180, -0.0521], + [-0.1339, 0.9902, -2.0225]]) + >>> b = torch.randn(4, 3) + >>> b + tensor([[-0.0257, -1.4725, -1.2251], + [-1.1479, -0.7005, -1.9757], + [-1.3904, 0.3726, -1.1836], + [-0.9688, -0.7153, 0.2159]]) + >>> torch.cross(a, b, dim=1) + tensor([[ 1.0844, -0.5281, 0.6120], + [-2.4490, -1.5687, 1.9792], + [-0.8304, -1.3037, 0.5650], + [-1.2329, 1.9883, 1.0551]]) + >>> torch.cross(a, b) + tensor([[ 1.0844, -0.5281, 0.6120], + [-2.4490, -1.5687, 1.9792], + [-0.8304, -1.3037, 0.5650], + [-1.2329, 1.9883, 1.0551]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.logcumsumexp, + r""" +logcumsumexp(input, dim, *, out=None) -> Tensor +Returns the logarithm of the cumulative summation of the exponentiation of +elements of :attr:`input` in the dimension :attr:`dim`. + +For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is + + .. math:: + \text{{logcumsumexp}}(x)_{{ij}} = \log \sum\limits_{{j=0}}^{{i}} \exp(x_{{ij}}) + +Args: + {input} + dim (int): the dimension to do the operation over + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(10) + >>> torch.logcumsumexp(a, dim=0) + tensor([-0.42296738, -0.04462666, 0.86278635, 0.94622083, 1.05277811, + 1.39202815, 1.83525007, 1.84492621, 2.06084887, 2.06844475])) +""".format( + **reduceops_common_args + ), +) + +add_docstr( + torch.cummax, + r""" +cummax(input, dim, *, out=None) -> (Tensor, LongTensor) +Returns a namedtuple ``(values, indices)`` where ``values`` is the cumulative maximum of +elements of :attr:`input` in the dimension :attr:`dim`. And ``indices`` is the index +location of each maximum value found in the dimension :attr:`dim`. + +.. math:: + y_i = max(x_1, x_2, x_3, \dots, x_i) + +Args: + {input} + dim (int): the dimension to do the operation over + +Keyword args: + out (tuple, optional): the result tuple of two output tensors (values, indices) + +Example:: + + >>> a = torch.randn(10) + >>> a + tensor([-0.3449, -1.5447, 0.0685, -1.5104, -1.1706, 0.2259, 1.4696, -1.3284, + 1.9946, -0.8209]) + >>> torch.cummax(a, dim=0) + torch.return_types.cummax( + values=tensor([-0.3449, -0.3449, 0.0685, 0.0685, 0.0685, 0.2259, 1.4696, 1.4696, + 1.9946, 1.9946]), + indices=tensor([0, 0, 2, 2, 2, 5, 6, 6, 8, 8])) +""".format( + **reduceops_common_args + ), +) + +add_docstr( + torch.cummin, + r""" +cummin(input, dim, *, out=None) -> (Tensor, LongTensor) +Returns a namedtuple ``(values, indices)`` where ``values`` is the cumulative minimum of +elements of :attr:`input` in the dimension :attr:`dim`. And ``indices`` is the index +location of each maximum value found in the dimension :attr:`dim`. + +.. math:: + y_i = min(x_1, x_2, x_3, \dots, x_i) + +Args: + {input} + dim (int): the dimension to do the operation over + +Keyword args: + out (tuple, optional): the result tuple of two output tensors (values, indices) + +Example:: + + >>> a = torch.randn(10) + >>> a + tensor([-0.2284, -0.6628, 0.0975, 0.2680, -1.3298, -0.4220, -0.3885, 1.1762, + 0.9165, 1.6684]) + >>> torch.cummin(a, dim=0) + torch.return_types.cummin( + values=tensor([-0.2284, -0.6628, -0.6628, -0.6628, -1.3298, -1.3298, -1.3298, -1.3298, + -1.3298, -1.3298]), + indices=tensor([0, 1, 1, 1, 4, 4, 4, 4, 4, 4])) +""".format( + **reduceops_common_args + ), +) + +add_docstr( + torch.cumprod, + r""" +cumprod(input, dim, *, dtype=None, out=None) -> Tensor + +Returns the cumulative product of elements of :attr:`input` in the dimension +:attr:`dim`. + +For example, if :attr:`input` is a vector of size N, the result will also be +a vector of size N, with elements. + +.. math:: + y_i = x_1 \times x_2\times x_3\times \dots \times x_i + +Args: + {input} + dim (int): the dimension to do the operation over + +Keyword args: + {dtype} + {out} + +Example:: + + >>> a = torch.randn(10) + >>> a + tensor([ 0.6001, 0.2069, -0.1919, 0.9792, 0.6727, 1.0062, 0.4126, + -0.2129, -0.4206, 0.1968]) + >>> torch.cumprod(a, dim=0) + tensor([ 0.6001, 0.1241, -0.0238, -0.0233, -0.0157, -0.0158, -0.0065, + 0.0014, -0.0006, -0.0001]) + + >>> a[5] = 0.0 + >>> torch.cumprod(a, dim=0) + tensor([ 0.6001, 0.1241, -0.0238, -0.0233, -0.0157, -0.0000, -0.0000, + 0.0000, -0.0000, -0.0000]) +""".format( + **reduceops_common_args + ), +) + +add_docstr( + torch.cumsum, + r""" +cumsum(input, dim, *, dtype=None, out=None) -> Tensor + +Returns the cumulative sum of elements of :attr:`input` in the dimension +:attr:`dim`. + +For example, if :attr:`input` is a vector of size N, the result will also be +a vector of size N, with elements. + +.. math:: + y_i = x_1 + x_2 + x_3 + \dots + x_i + +Args: + {input} + dim (int): the dimension to do the operation over + +Keyword args: + {dtype} + {out} + +Example:: + + >>> a = torch.randn(10) + >>> a + tensor([-0.8286, -0.4890, 0.5155, 0.8443, 0.1865, -0.1752, -2.0595, + 0.1850, -1.1571, -0.4243]) + >>> torch.cumsum(a, dim=0) + tensor([-0.8286, -1.3175, -0.8020, 0.0423, 0.2289, 0.0537, -2.0058, + -1.8209, -2.9780, -3.4022]) +""".format( + **reduceops_common_args + ), +) + +add_docstr( + torch.count_nonzero, + r""" +count_nonzero(input, dim=None) -> Tensor + +Counts the number of non-zero values in the tensor :attr:`input` along the given :attr:`dim`. +If no dim is specified then all non-zeros in the tensor are counted. + +Args: + {input} + dim (int or tuple of ints, optional): Dim or tuple of dims along which to count non-zeros. + +Example:: + + >>> x = torch.zeros(3,3) + >>> x[torch.randn(3,3) > 0.5] = 1 + >>> x + tensor([[0., 1., 1.], + [0., 0., 0.], + [0., 0., 1.]]) + >>> torch.count_nonzero(x) + tensor(3) + >>> torch.count_nonzero(x, dim=0) + tensor([0, 1, 2]) +""".format( + **reduceops_common_args + ), +) + +add_docstr( + torch.dequantize, + r""" +dequantize(tensor) -> Tensor + +Returns an fp32 Tensor by dequantizing a quantized Tensor + +Args: + tensor (Tensor): A quantized Tensor + +.. function:: dequantize(tensors) -> sequence of Tensors + :noindex: + +Given a list of quantized Tensors, dequantize them and return a list of fp32 Tensors + +Args: + tensors (sequence of Tensors): A list of quantized Tensors +""", +) + +add_docstr( + torch.diag, + r""" +diag(input, diagonal=0, *, out=None) -> Tensor + +- If :attr:`input` is a vector (1-D tensor), then returns a 2-D square tensor + with the elements of :attr:`input` as the diagonal. +- If :attr:`input` is a matrix (2-D tensor), then returns a 1-D tensor with + the diagonal elements of :attr:`input`. + +The argument :attr:`diagonal` controls which diagonal to consider: + +- If :attr:`diagonal` = 0, it is the main diagonal. +- If :attr:`diagonal` > 0, it is above the main diagonal. +- If :attr:`diagonal` < 0, it is below the main diagonal. + +Args: + {input} + diagonal (int, optional): the diagonal to consider + +Keyword args: + {out} + +.. seealso:: + + :func:`torch.diagonal` always returns the diagonal of its input. + + :func:`torch.diagflat` always constructs a tensor with diagonal elements + specified by the input. + +Examples: + +Get the square matrix where the input vector is the diagonal:: + + >>> a = torch.randn(3) + >>> a + tensor([ 0.5950,-0.0872, 2.3298]) + >>> torch.diag(a) + tensor([[ 0.5950, 0.0000, 0.0000], + [ 0.0000,-0.0872, 0.0000], + [ 0.0000, 0.0000, 2.3298]]) + >>> torch.diag(a, 1) + tensor([[ 0.0000, 0.5950, 0.0000, 0.0000], + [ 0.0000, 0.0000,-0.0872, 0.0000], + [ 0.0000, 0.0000, 0.0000, 2.3298], + [ 0.0000, 0.0000, 0.0000, 0.0000]]) + +Get the k-th diagonal of a given matrix:: + + >>> a = torch.randn(3, 3) + >>> a + tensor([[-0.4264, 0.0255,-0.1064], + [ 0.8795,-0.2429, 0.1374], + [ 0.1029,-0.6482,-1.6300]]) + >>> torch.diag(a, 0) + tensor([-0.4264,-0.2429,-1.6300]) + >>> torch.diag(a, 1) + tensor([ 0.0255, 0.1374]) +""".format( + **common_args + ), +) + +add_docstr( + torch.diag_embed, + r""" +diag_embed(input, offset=0, dim1=-2, dim2=-1) -> Tensor + +Creates a tensor whose diagonals of certain 2D planes (specified by +:attr:`dim1` and :attr:`dim2`) are filled by :attr:`input`. +To facilitate creating batched diagonal matrices, the 2D planes formed by +the last two dimensions of the returned tensor are chosen by default. + +The argument :attr:`offset` controls which diagonal to consider: + +- If :attr:`offset` = 0, it is the main diagonal. +- If :attr:`offset` > 0, it is above the main diagonal. +- If :attr:`offset` < 0, it is below the main diagonal. + +The size of the new matrix will be calculated to make the specified diagonal +of the size of the last input dimension. +Note that for :attr:`offset` other than :math:`0`, the order of :attr:`dim1` +and :attr:`dim2` matters. Exchanging them is equivalent to changing the +sign of :attr:`offset`. + +Applying :meth:`torch.diagonal` to the output of this function with +the same arguments yields a matrix identical to input. However, +:meth:`torch.diagonal` has different default dimensions, so those +need to be explicitly specified. + +Args: + {input} Must be at least 1-dimensional. + offset (int, optional): which diagonal to consider. Default: 0 + (main diagonal). + dim1 (int, optional): first dimension with respect to which to + take diagonal. Default: -2. + dim2 (int, optional): second dimension with respect to which to + take diagonal. Default: -1. + +Example:: + + >>> a = torch.randn(2, 3) + >>> torch.diag_embed(a) + tensor([[[ 1.5410, 0.0000, 0.0000], + [ 0.0000, -0.2934, 0.0000], + [ 0.0000, 0.0000, -2.1788]], + + [[ 0.5684, 0.0000, 0.0000], + [ 0.0000, -1.0845, 0.0000], + [ 0.0000, 0.0000, -1.3986]]]) + + >>> torch.diag_embed(a, offset=1, dim1=0, dim2=2) + tensor([[[ 0.0000, 1.5410, 0.0000, 0.0000], + [ 0.0000, 0.5684, 0.0000, 0.0000]], + + [[ 0.0000, 0.0000, -0.2934, 0.0000], + [ 0.0000, 0.0000, -1.0845, 0.0000]], + + [[ 0.0000, 0.0000, 0.0000, -2.1788], + [ 0.0000, 0.0000, 0.0000, -1.3986]], + + [[ 0.0000, 0.0000, 0.0000, 0.0000], + [ 0.0000, 0.0000, 0.0000, 0.0000]]]) +""".format( + **common_args + ), +) + + +add_docstr( + torch.diagflat, + r""" +diagflat(input, offset=0) -> Tensor + +- If :attr:`input` is a vector (1-D tensor), then returns a 2-D square tensor + with the elements of :attr:`input` as the diagonal. +- If :attr:`input` is a tensor with more than one dimension, then returns a + 2-D tensor with diagonal elements equal to a flattened :attr:`input`. + +The argument :attr:`offset` controls which diagonal to consider: + +- If :attr:`offset` = 0, it is the main diagonal. +- If :attr:`offset` > 0, it is above the main diagonal. +- If :attr:`offset` < 0, it is below the main diagonal. + +Args: + {input} + offset (int, optional): the diagonal to consider. Default: 0 (main + diagonal). + +Examples:: + + >>> a = torch.randn(3) + >>> a + tensor([-0.2956, -0.9068, 0.1695]) + >>> torch.diagflat(a) + tensor([[-0.2956, 0.0000, 0.0000], + [ 0.0000, -0.9068, 0.0000], + [ 0.0000, 0.0000, 0.1695]]) + >>> torch.diagflat(a, 1) + tensor([[ 0.0000, -0.2956, 0.0000, 0.0000], + [ 0.0000, 0.0000, -0.9068, 0.0000], + [ 0.0000, 0.0000, 0.0000, 0.1695], + [ 0.0000, 0.0000, 0.0000, 0.0000]]) + + >>> a = torch.randn(2, 2) + >>> a + tensor([[ 0.2094, -0.3018], + [-0.1516, 1.9342]]) + >>> torch.diagflat(a) + tensor([[ 0.2094, 0.0000, 0.0000, 0.0000], + [ 0.0000, -0.3018, 0.0000, 0.0000], + [ 0.0000, 0.0000, -0.1516, 0.0000], + [ 0.0000, 0.0000, 0.0000, 1.9342]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.diagonal, + r""" +diagonal(input, offset=0, dim1=0, dim2=1) -> Tensor + +Returns a partial view of :attr:`input` with the its diagonal elements +with respect to :attr:`dim1` and :attr:`dim2` appended as a dimension +at the end of the shape. + +The argument :attr:`offset` controls which diagonal to consider: + +- If :attr:`offset` = 0, it is the main diagonal. +- If :attr:`offset` > 0, it is above the main diagonal. +- If :attr:`offset` < 0, it is below the main diagonal. + +Applying :meth:`torch.diag_embed` to the output of this function with +the same arguments yields a diagonal matrix with the diagonal entries +of the input. However, :meth:`torch.diag_embed` has different default +dimensions, so those need to be explicitly specified. + +Args: + {input} Must be at least 2-dimensional. + offset (int, optional): which diagonal to consider. Default: 0 + (main diagonal). + dim1 (int, optional): first dimension with respect to which to + take diagonal. Default: 0. + dim2 (int, optional): second dimension with respect to which to + take diagonal. Default: 1. + +.. note:: To take a batch diagonal, pass in dim1=-2, dim2=-1. + +Examples:: + + >>> a = torch.randn(3, 3) + >>> a + tensor([[-1.0854, 1.1431, -0.1752], + [ 0.8536, -0.0905, 0.0360], + [ 0.6927, -0.3735, -0.4945]]) + + + >>> torch.diagonal(a, 0) + tensor([-1.0854, -0.0905, -0.4945]) + + + >>> torch.diagonal(a, 1) + tensor([ 1.1431, 0.0360]) + + + >>> x = torch.randn(2, 5, 4, 2) + >>> torch.diagonal(x, offset=-1, dim1=1, dim2=2) + tensor([[[-1.2631, 0.3755, -1.5977, -1.8172], + [-1.1065, 1.0401, -0.2235, -0.7938]], + + [[-1.7325, -0.3081, 0.6166, 0.2335], + [ 1.0500, 0.7336, -0.3836, -1.1015]]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.diagonal_scatter, + r""" +diagonal_scatter(input, src, offset=0, dim1=0, dim2=1) -> Tensor + +Embeds the values of the :attr:`src` tensor into :attr:`input` along +the diagonal elements of :attr:`input`, with respect to :attr:`dim1` +and :attr:`dim2`. + +This function returns a tensor with fresh storage; it does not +return a view. + +The argument :attr:`offset` controls which diagonal to consider: + +- If :attr:`offset` = 0, it is the main diagonal. +- If :attr:`offset` > 0, it is above the main diagonal. +- If :attr:`offset` < 0, it is below the main diagonal. + +Args: + {input} Must be at least 2-dimensional. + src (Tensor): the tensor to embed into :attr:`input`. + offset (int, optional): which diagonal to consider. Default: 0 + (main diagonal). + dim1 (int, optional): first dimension with respect to which to + take diagonal. Default: 0. + dim2 (int, optional): second dimension with respect to which to + take diagonal. Default: 1. + +.. note:: + + :attr:`src` must be of the proper size in order to be embedded + into :attr:`input`. Specifically, it should have the same shape as + ``torch.diagonal(input, offset, dim1, dim2)`` + +Examples:: + + >>> a = torch.zeros(3, 3) + >>> a + tensor([[0., 0., 0.], + [0., 0., 0.], + [0., 0., 0.]]) + + >>> torch.diagonal_scatter(a, torch.ones(3), 0) + tensor([[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]]) + + >>> torch.diagonal_scatter(a, torch.ones(2), 1) + tensor([[0., 1., 0.], + [0., 0., 1.], + [0., 0., 0.]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.as_strided_scatter, + r""" +as_strided_scatter(input, src, size, stride, storage_offset=None) -> Tensor + +Embeds the values of the :attr:`src` tensor into :attr:`input` along +the elements corresponding to the result of calling +input.as_strided(size, stride, storage_offset). + +This function returns a tensor with fresh storage; it does not +return a view. + +Args: + {input} + size (tuple or ints): the shape of the output tensor + stride (tuple or ints): the stride of the output tensor + storage_offset (int, optional): the offset in the underlying storage of the output tensor + +.. note:: + + :attr:`src` must be of the proper size in order to be embedded + into :attr:`input`. Specifically, it should have the same shape as + `torch.as_strided(input, size, stride, storage_offset)` + +Example:: + + >>> a = torch.arange(4).reshape(2, 2) + 1 + >>> a + tensor([[1, 2], + [3, 4]]) + >>> b = torch.zeros(3, 3) + >>> b + tensor([[0., 0., 0.], + [0., 0., 0.], + [0., 0., 0.]]) + >>> torch.as_strided_scatter(b, a, (2, 2), (1, 2)) + tensor([[1., 3., 2.], + [4., 0., 0.], + [0., 0., 0.]]) + +""".format( + **common_args + ), +) + +add_docstr( + torch.diff, + r""" +diff(input, n=1, dim=-1, prepend=None, append=None) -> Tensor + +Computes the n-th forward difference along the given dimension. + +The first-order differences are given by `out[i] = input[i + 1] - input[i]`. Higher-order +differences are calculated by using :func:`torch.diff` recursively. + +Args: + input (Tensor): the tensor to compute the differences on + n (int, optional): the number of times to recursively compute the difference + dim (int, optional): the dimension to compute the difference along. + Default is the last dimension. + prepend, append (Tensor, optional): values to prepend or append to + :attr:`input` along :attr:`dim` before computing the difference. + Their dimensions must be equivalent to that of input, and their shapes + must match input's shape except on :attr:`dim`. + +Keyword args: + {out} + +Example:: + + >>> a = torch.tensor([1, 3, 2]) + >>> torch.diff(a) + tensor([ 2, -1]) + >>> b = torch.tensor([4, 5]) + >>> torch.diff(a, append=b) + tensor([ 2, -1, 2, 1]) + >>> c = torch.tensor([[1, 2, 3], [3, 4, 5]]) + >>> torch.diff(c, dim=0) + tensor([[2, 2, 2]]) + >>> torch.diff(c, dim=1) + tensor([[1, 1], + [1, 1]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.digamma, + r""" +digamma(input, *, out=None) -> Tensor + +Alias for :func:`torch.special.digamma`. +""", +) + +add_docstr( + torch.dist, + r""" +dist(input, other, p=2) -> Tensor + +Returns the p-norm of (:attr:`input` - :attr:`other`) + +The shapes of :attr:`input` and :attr:`other` must be +:ref:`broadcastable `. + +Args: + {input} + other (Tensor): the Right-hand-side input tensor + p (float, optional): the norm to be computed + +Example:: + + >>> x = torch.randn(4) + >>> x + tensor([-1.5393, -0.8675, 0.5916, 1.6321]) + >>> y = torch.randn(4) + >>> y + tensor([ 0.0967, -1.0511, 0.6295, 0.8360]) + >>> torch.dist(x, y, 3.5) + tensor(1.6727) + >>> torch.dist(x, y, 3) + tensor(1.6973) + >>> torch.dist(x, y, 0) + tensor(4.) + >>> torch.dist(x, y, 1) + tensor(2.6537) +""".format( + **common_args + ), +) + +add_docstr( + torch.div, + r""" +div(input, other, *, rounding_mode=None, out=None) -> Tensor + +Divides each element of the input ``input`` by the corresponding element of +:attr:`other`. + +.. math:: + \text{{out}}_i = \frac{{\text{{input}}_i}}{{\text{{other}}_i}} + +.. note:: + By default, this performs a "true" division like Python 3. + See the :attr:`rounding_mode` argument for floor division. + +Supports :ref:`broadcasting to a common shape `, +:ref:`type promotion `, and integer, float, and complex inputs. +Always promotes integer types to the default scalar type. + +Args: + input (Tensor): the dividend + other (Tensor or Number): the divisor + +Keyword args: + rounding_mode (str, optional): Type of rounding applied to the result: + + * None - default behavior. Performs no rounding and, if both :attr:`input` and + :attr:`other` are integer types, promotes the inputs to the default scalar type. + Equivalent to true division in Python (the ``/`` operator) and NumPy's ``np.true_divide``. + * ``"trunc"`` - rounds the results of the division towards zero. + Equivalent to C-style integer division. + * ``"floor"`` - rounds the results of the division down. + Equivalent to floor division in Python (the ``//`` operator) and NumPy's ``np.floor_divide``. + + {out} + +Examples:: + + >>> x = torch.tensor([ 0.3810, 1.2774, -0.2972, -0.3719, 0.4637]) + >>> torch.div(x, 0.5) + tensor([ 0.7620, 2.5548, -0.5944, -0.7438, 0.9274]) + + >>> a = torch.tensor([[-0.3711, -1.9353, -0.4605, -0.2917], + ... [ 0.1815, -1.0111, 0.9805, -1.5923], + ... [ 0.1062, 1.4581, 0.7759, -1.2344], + ... [-0.1830, -0.0313, 1.1908, -1.4757]]) + >>> b = torch.tensor([ 0.8032, 0.2930, -0.8113, -0.2308]) + >>> torch.div(a, b) + tensor([[-0.4620, -6.6051, 0.5676, 1.2639], + [ 0.2260, -3.4509, -1.2086, 6.8990], + [ 0.1322, 4.9764, -0.9564, 5.3484], + [-0.2278, -0.1068, -1.4678, 6.3938]]) + + >>> torch.div(a, b, rounding_mode='trunc') + tensor([[-0., -6., 0., 1.], + [ 0., -3., -1., 6.], + [ 0., 4., -0., 5.], + [-0., -0., -1., 6.]]) + + >>> torch.div(a, b, rounding_mode='floor') + tensor([[-1., -7., 0., 1.], + [ 0., -4., -2., 6.], + [ 0., 4., -1., 5.], + [-1., -1., -2., 6.]]) + +""".format( + **common_args + ), +) + +add_docstr( + torch.divide, + r""" +divide(input, other, *, rounding_mode=None, out=None) -> Tensor + +Alias for :func:`torch.div`. +""", +) + +add_docstr( + torch.dot, + r""" +dot(input, other, *, out=None) -> Tensor + +Computes the dot product of two 1D tensors. + +.. note:: + + Unlike NumPy's dot, torch.dot intentionally only supports computing the dot product + of two 1D tensors with the same number of elements. + +Args: + input (Tensor): first tensor in the dot product, must be 1D. + other (Tensor): second tensor in the dot product, must be 1D. + +Keyword args: + {out} + +Example:: + + >>> torch.dot(torch.tensor([2, 3]), torch.tensor([2, 1])) + tensor(7) +""".format( + **common_args + ), +) + +add_docstr( + torch.vdot, + r""" +vdot(input, other, *, out=None) -> Tensor + +Computes the dot product of two 1D vectors along a dimension. + +In symbols, this function computes + +.. math:: + + \sum_{i=1}^n \overline{x_i}y_i. + +where :math:`\overline{x_i}` denotes the conjugate for complex +vectors, and it is the identity for real vectors. + +.. note:: + + Unlike NumPy's vdot, torch.vdot intentionally only supports computing the dot product + of two 1D tensors with the same number of elements. + +.. seealso:: + + :func:`torch.linalg.vecdot` computes the dot product of two batches of vectors along a dimension. + +Args: + input (Tensor): first tensor in the dot product, must be 1D. Its conjugate is used if it's complex. + other (Tensor): second tensor in the dot product, must be 1D. + +Keyword args: +""" + + rf""" +.. note:: {common_args["out"]} +""" + + r""" + +Example:: + + >>> torch.vdot(torch.tensor([2, 3]), torch.tensor([2, 1])) + tensor(7) + >>> a = torch.tensor((1 +2j, 3 - 1j)) + >>> b = torch.tensor((2 +1j, 4 - 0j)) + >>> torch.vdot(a, b) + tensor([16.+1.j]) + >>> torch.vdot(b, a) + tensor([16.-1.j]) +""", +) + +add_docstr( + torch.eq, + r""" +eq(input, other, *, out=None) -> Tensor + +Computes element-wise equality + +The second argument can be a number or a tensor whose shape is +:ref:`broadcastable ` with the first argument. + +Args: + input (Tensor): the tensor to compare + other (Tensor or float): the tensor or value to compare + +Keyword args: + {out} + +Returns: + A boolean tensor that is True where :attr:`input` is equal to :attr:`other` and False elsewhere + +Example:: + + >>> torch.eq(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]])) + tensor([[ True, False], + [False, True]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.equal, + r""" +equal(input, other) -> bool + +``True`` if two tensors have the same size and elements, ``False`` otherwise. + +Example:: + + >>> torch.equal(torch.tensor([1, 2]), torch.tensor([1, 2])) + True +""", +) + +add_docstr( + torch.erf, + r""" +erf(input, *, out=None) -> Tensor + +Alias for :func:`torch.special.erf`. +""", +) + +add_docstr( + torch.erfc, + r""" +erfc(input, *, out=None) -> Tensor + +Alias for :func:`torch.special.erfc`. +""", +) + +add_docstr( + torch.erfinv, + r""" +erfinv(input, *, out=None) -> Tensor + +Alias for :func:`torch.special.erfinv`. +""", +) + +add_docstr( + torch.exp, + r""" +exp(input, *, out=None) -> Tensor + +Returns a new tensor with the exponential of the elements +of the input tensor :attr:`input`. + +.. math:: + y_{i} = e^{x_{i}} +""" + + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> torch.exp(torch.tensor([0, math.log(2.)])) + tensor([ 1., 2.]) +""".format( + **common_args + ), +) + +add_docstr( + torch.exp2, + r""" +exp2(input, *, out=None) -> Tensor + +Alias for :func:`torch.special.exp2`. +""", +) + +add_docstr( + torch.expm1, + r""" +expm1(input, *, out=None) -> Tensor + +Alias for :func:`torch.special.expm1`. +""", +) + +add_docstr( + torch.eye, + r""" +eye(n, m=None, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + +Returns a 2-D tensor with ones on the diagonal and zeros elsewhere. + +Args: + n (int): the number of rows + m (int, optional): the number of columns with default being :attr:`n` + +Keyword arguments: + {out} + {dtype} + {layout} + {device} + {requires_grad} + +Returns: + Tensor: A 2-D tensor with ones on the diagonal and zeros elsewhere + +Example:: + + >>> torch.eye(3) + tensor([[ 1., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 1.]]) +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.floor, + r""" +floor(input, *, out=None) -> Tensor + +Returns a new tensor with the floor of the elements of :attr:`input`, +the largest integer less than or equal to each element. + +For integer inputs, follows the array-api convention of returning a +copy of the input tensor. + +.. math:: + \text{out}_{i} = \left\lfloor \text{input}_{i} \right\rfloor +""" + + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-0.8166, 1.5308, -0.2530, -0.2091]) + >>> torch.floor(a) + tensor([-1., 1., -1., -1.]) +""".format( + **common_args + ), +) + +add_docstr( + torch.floor_divide, + r""" +floor_divide(input, other, *, out=None) -> Tensor + +.. note:: + + Before PyTorch 1.13 :func:`torch.floor_divide` incorrectly performed + truncation division. To restore the previous behavior use + :func:`torch.div` with ``rounding_mode='trunc'``. + +Computes :attr:`input` divided by :attr:`other`, elementwise, and floors +the result. + +.. math:: + \text{{out}}_i = \text{floor} \left( \frac{{\text{{input}}_i}}{{\text{{other}}_i}} \right) + +""" + + r""" + +Supports broadcasting to a common shape, type promotion, and integer and float inputs. + +Args: + input (Tensor or Number): the dividend + other (Tensor or Number): the divisor + +Keyword args: + {out} + +Example:: + + >>> a = torch.tensor([4.0, 3.0]) + >>> b = torch.tensor([2.0, 2.0]) + >>> torch.floor_divide(a, b) + tensor([2.0, 1.0]) + >>> torch.floor_divide(a, 1.4) + tensor([2.0, 2.0]) +""".format( + **common_args + ), +) + +add_docstr( + torch.fmod, + r""" +fmod(input, other, *, out=None) -> Tensor + +Applies C++'s `std::fmod `_ entrywise. +The result has the same sign as the dividend :attr:`input` and its absolute value +is less than that of :attr:`other`. + +This function may be defined in terms of :func:`torch.div` as + +.. code:: python + + torch.fmod(a, b) == a - a.div(b, rounding_mode="trunc") * b + +Supports :ref:`broadcasting to a common shape `, +:ref:`type promotion `, and integer and float inputs. + +.. note:: + + When the divisor is zero, returns ``NaN`` for floating point dtypes + on both CPU and GPU; raises ``RuntimeError`` for integer division by + zero on CPU; Integer division by zero on GPU may return any value. + +.. note:: + + Complex inputs are not supported. In some cases, it is not mathematically + possible to satisfy the definition of a modulo operation with complex numbers. + +.. seealso:: + + :func:`torch.remainder` which implements Python's modulus operator. + This one is defined using division rounding down the result. + +Args: + input (Tensor): the dividend + other (Tensor or Scalar): the divisor + +Keyword args: + {out} + +Example:: + + >>> torch.fmod(torch.tensor([-3., -2, -1, 1, 2, 3]), 2) + tensor([-1., -0., -1., 1., 0., 1.]) + >>> torch.fmod(torch.tensor([1, 2, 3, 4, 5]), -1.5) + tensor([1.0000, 0.5000, 0.0000, 1.0000, 0.5000]) + +""".format( + **common_args + ), +) + +add_docstr( + torch.frac, + r""" +frac(input, *, out=None) -> Tensor + +Computes the fractional portion of each element in :attr:`input`. + +.. math:: + \text{out}_{i} = \text{input}_{i} - \left\lfloor |\text{input}_{i}| \right\rfloor * \operatorname{sgn}(\text{input}_{i}) + +Example:: + + >>> torch.frac(torch.tensor([1, 2.5, -3.2])) + tensor([ 0.0000, 0.5000, -0.2000]) +""", +) + +add_docstr( + torch.frexp, + r""" +frexp(input, *, out=None) -> (Tensor mantissa, Tensor exponent) + +Decomposes :attr:`input` into mantissa and exponent tensors +such that :math:`\text{input} = \text{mantissa} \times 2^{\text{exponent}}`. + +The range of mantissa is the open interval (-1, 1). + +Supports float inputs. + +Args: + input (Tensor): the input tensor + + +Keyword args: + out (tuple, optional): the output tensors + +Example:: + + >>> x = torch.arange(9.) + >>> mantissa, exponent = torch.frexp(x) + >>> mantissa + tensor([0.0000, 0.5000, 0.5000, 0.7500, 0.5000, 0.6250, 0.7500, 0.8750, 0.5000]) + >>> exponent + tensor([0, 1, 2, 2, 3, 3, 3, 3, 4], dtype=torch.int32) + >>> torch.ldexp(mantissa, exponent) + tensor([0., 1., 2., 3., 4., 5., 6., 7., 8.]) +""", +) + +add_docstr( + torch.from_numpy, + r""" +from_numpy(ndarray) -> Tensor + +Creates a :class:`Tensor` from a :class:`numpy.ndarray`. + +The returned tensor and :attr:`ndarray` share the same memory. Modifications to +the tensor will be reflected in the :attr:`ndarray` and vice versa. The returned +tensor is not resizable. + +It currently accepts :attr:`ndarray` with dtypes of ``numpy.float64``, +``numpy.float32``, ``numpy.float16``, ``numpy.complex64``, ``numpy.complex128``, +``numpy.int64``, ``numpy.int32``, ``numpy.int16``, ``numpy.int8``, ``numpy.uint8``, +and ``bool``. + +.. warning:: + Writing to a tensor created from a read-only NumPy array is not supported and will result in undefined behavior. + +Example:: + + >>> a = numpy.array([1, 2, 3]) + >>> t = torch.from_numpy(a) + >>> t + tensor([ 1, 2, 3]) + >>> t[0] = -1 + >>> a + array([-1, 2, 3]) +""", +) + +add_docstr( + torch.frombuffer, + r""" +frombuffer(buffer, *, dtype, count=-1, offset=0, requires_grad=False) -> Tensor + +Creates a 1-dimensional :class:`Tensor` from an object that implements +the Python buffer protocol. + +Skips the first :attr:`offset` bytes in the buffer, and interprets the rest of +the raw bytes as a 1-dimensional tensor of type :attr:`dtype` with :attr:`count` +elements. + +Note that either of the following must be true: + +1. :attr:`count` is a positive non-zero number, and the total number of bytes +in the buffer is less than :attr:`offset` plus :attr:`count` times the size +(in bytes) of :attr:`dtype`. + +2. :attr:`count` is negative, and the length (number of bytes) of the buffer +subtracted by the :attr:`offset` is a multiple of the size (in bytes) of +:attr:`dtype`. + +The returned tensor and buffer share the same memory. Modifications to +the tensor will be reflected in the buffer and vice versa. The returned +tensor is not resizable. + +.. note:: + This function increments the reference count for the object that + owns the shared memory. Therefore, such memory will not be deallocated + before the returned tensor goes out of scope. + +.. warning:: + This function's behavior is undefined when passed an object implementing + the buffer protocol whose data is not on the CPU. Doing so is likely to + cause a segmentation fault. + +.. warning:: + This function does not try to infer the :attr:`dtype` (hence, it is not + optional). Passing a different :attr:`dtype` than its source may result + in unexpected behavior. + +Args: + buffer (object): a Python object that exposes the buffer interface. + +Keyword args: + dtype (:class:`torch.dtype`): the desired data type of returned tensor. + count (int, optional): the number of desired elements to be read. + If negative, all the elements (until the end of the buffer) will be + read. Default: -1. + offset (int, optional): the number of bytes to skip at the start of + the buffer. Default: 0. + {requires_grad} + +Example:: + + >>> import array + >>> a = array.array('i', [1, 2, 3]) + >>> t = torch.frombuffer(a, dtype=torch.int32) + >>> t + tensor([ 1, 2, 3]) + >>> t[0] = -1 + >>> a + array([-1, 2, 3]) + + >>> # Interprets the signed char bytes as 32-bit integers. + >>> # Each 4 signed char elements will be interpreted as + >>> # 1 signed 32-bit integer. + >>> import array + >>> a = array.array('b', [-1, 0, 0, 0]) + >>> torch.frombuffer(a, dtype=torch.int32) + tensor([255], dtype=torch.int32) +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.from_file, + r""" +from_file(filename, shared=None, size=0, *, dtype=None, layout=None, device=None, pin_memory=False) + +Creates a CPU tensor with a storage backed by a memory-mapped file. + +If ``shared`` is True, then memory is shared between processes. All changes are written to the file. +If ``shared`` is False, then changes to the tensor do not affect the file. + +``size`` is the number of elements in the Tensor. If ``shared`` is ``False``, then the file must contain +at least ``size * sizeof(dtype)`` bytes. If ``shared`` is ``True`` the file will be created if needed. + +.. note:: + Only CPU tensors can be mapped to files. + +.. note:: + For now, tensors with storages backed by a memory-mapped file cannot be created in pinned memory. + + +Args: + filename (str): file name to map + shared (bool): whether to share memory (whether ``MAP_SHARED`` or ``MAP_PRIVATE`` is passed to the + underlying `mmap(2) call `_) + size (int): number of elements in the tensor + +Keyword args: + {dtype} + {layout} + {device} + {pin_memory} + +Example:: + >>> t = torch.randn(2, 5, dtype=torch.float64) + >>> t.numpy().tofile('storage.pt') + >>> t_mapped = torch.from_file('storage.pt', shared=False, size=10, dtype=torch.float64) + """.format( + **factory_common_args + ), +) + +add_docstr( + torch.flatten, + r""" +flatten(input, start_dim=0, end_dim=-1) -> Tensor + +Flattens :attr:`input` by reshaping it into a one-dimensional tensor. If :attr:`start_dim` or :attr:`end_dim` +are passed, only dimensions starting with :attr:`start_dim` and ending with :attr:`end_dim` are flattened. +The order of elements in :attr:`input` is unchanged. + +Unlike NumPy's flatten, which always copies input's data, this function may return the original object, a view, +or copy. If no dimensions are flattened, then the original object :attr:`input` is returned. Otherwise, if input can +be viewed as the flattened shape, then that view is returned. Finally, only if the input cannot be viewed as the +flattened shape is input's data copied. See :meth:`torch.Tensor.view` for details on when a view will be returned. + +.. note:: + Flattening a zero-dimensional tensor will return a one-dimensional view. + +Args: + {input} + start_dim (int): the first dim to flatten + end_dim (int): the last dim to flatten + +Example:: + + >>> t = torch.tensor([[[1, 2], + ... [3, 4]], + ... [[5, 6], + ... [7, 8]]]) + >>> torch.flatten(t) + tensor([1, 2, 3, 4, 5, 6, 7, 8]) + >>> torch.flatten(t, start_dim=1) + tensor([[1, 2, 3, 4], + [5, 6, 7, 8]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.unflatten, + r""" +unflatten(input, dim, sizes) -> Tensor + +Expands a dimension of the input tensor over multiple dimensions. + +.. seealso:: + + :func:`torch.flatten` the inverse of this function. It coalesces several dimensions into one. + +Args: + {input} + dim (int): Dimension to be unflattened, specified as an index into + ``input.shape``. + sizes (Tuple[int]): New shape of the unflattened dimension. + One of its elements can be `-1` in which case the corresponding output + dimension is inferred. Otherwise, the product of ``sizes`` *must* + equal ``input.shape[dim]``. + +Returns: + A View of input with the specified dimension unflattened. + +Examples:: + >>> torch.unflatten(torch.randn(3, 4, 1), 1, (2, 2)).shape + torch.Size([3, 2, 2, 1]) + >>> torch.unflatten(torch.randn(3, 4, 1), 1, (-1, 2)).shape + torch.Size([3, 2, 2, 1]) + >>> torch.unflatten(torch.randn(5, 12, 3), -2, (2, 2, 3, 1, 1)).shape + torch.Size([5, 2, 2, 3, 1, 1, 3]) +""".format( + **common_args + ), +) + +add_docstr( + torch.gather, + r""" +gather(input, dim, index, *, sparse_grad=False, out=None) -> Tensor + +Gathers values along an axis specified by `dim`. + +For a 3-D tensor the output is specified by:: + + out[i][j][k] = input[index[i][j][k]][j][k] # if dim == 0 + out[i][j][k] = input[i][index[i][j][k]][k] # if dim == 1 + out[i][j][k] = input[i][j][index[i][j][k]] # if dim == 2 + +:attr:`input` and :attr:`index` must have the same number of dimensions. +It is also required that ``index.size(d) <= input.size(d)`` for all +dimensions ``d != dim``. :attr:`out` will have the same shape as :attr:`index`. +Note that ``input`` and ``index`` do not broadcast against each other. + +Args: + input (Tensor): the source tensor + dim (int): the axis along which to index + index (LongTensor): the indices of elements to gather + +Keyword arguments: + sparse_grad (bool, optional): If ``True``, gradient w.r.t. :attr:`input` will be a sparse tensor. + out (Tensor, optional): the destination tensor + +Example:: + + >>> t = torch.tensor([[1, 2], [3, 4]]) + >>> torch.gather(t, 1, torch.tensor([[0, 0], [1, 0]])) + tensor([[ 1, 1], + [ 4, 3]]) +""", +) + + +add_docstr( + torch.gcd, + r""" +gcd(input, other, *, out=None) -> Tensor + +Computes the element-wise greatest common divisor (GCD) of :attr:`input` and :attr:`other`. + +Both :attr:`input` and :attr:`other` must have integer types. + +.. note:: + This defines :math:`gcd(0, 0) = 0`. + +Args: + {input} + other (Tensor): the second input tensor + +Keyword arguments: + {out} + +Example:: + + >>> a = torch.tensor([5, 10, 15]) + >>> b = torch.tensor([3, 4, 5]) + >>> torch.gcd(a, b) + tensor([1, 2, 5]) + >>> c = torch.tensor([3]) + >>> torch.gcd(a, c) + tensor([1, 1, 3]) +""".format( + **common_args + ), +) + +add_docstr( + torch.ge, + r""" +ge(input, other, *, out=None) -> Tensor + +Computes :math:`\text{input} \geq \text{other}` element-wise. +""" + + r""" + +The second argument can be a number or a tensor whose shape is +:ref:`broadcastable ` with the first argument. + +Args: + input (Tensor): the tensor to compare + other (Tensor or float): the tensor or value to compare + +Keyword args: + {out} + +Returns: + A boolean tensor that is True where :attr:`input` is greater than or equal to :attr:`other` and False elsewhere + +Example:: + + >>> torch.ge(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]])) + tensor([[True, True], [False, True]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.greater_equal, + r""" +greater_equal(input, other, *, out=None) -> Tensor + +Alias for :func:`torch.ge`. +""", +) + +add_docstr( + torch.gradient, + r""" +gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors + +Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in +one or more dimensions using the `second-order accurate central differences method +`_ and +either first or second order estimates at the boundaries. + +The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not +specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates +to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional +:attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and +:math:`g(1, 2, 3)\ == input[1, 2, 3]`. + +When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates. +This is detailed in the "Keyword Arguments" section below. + +The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is +accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be +improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative +is estimated using `Taylor’s theorem with remainder `_. +Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring +it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using: + +.. math:: + \begin{aligned} + f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(\xi_1)}{6}, \xi_1 \in (x, x+h_r) \\ + f(x-h_l) = f(x) - h_l f'(x) + {h_l}^2 \frac{f''(x)}{2} - {h_l}^3 \frac{f'''(\xi_2)}{6}, \xi_2 \in (x, x-h_l) \\ + \end{aligned} + +Using the fact that :math:`f \in C^3` and solving the linear system, we derive: + +.. math:: + f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l) + + ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} } + +.. note:: + We estimate the gradient of functions in complex domain + :math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way. + +The value of each partial derivative at the boundary points is computed differently. See edge_order below. + +Args: + input (``Tensor``): the tensor that represents the values of the function + +Keyword args: + spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify + how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then + the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the + indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding + indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9). + Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for + the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then + the coordinates are (t0[1], t1[2], t2[3]) + + dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default + the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of + the :attr:`spacing` argument must correspond with the specified dims." + + edge_order (``int``, optional): 1 or 2, for `first-order + `_ or + `second-order `_ + estimation of the boundary ("edge") values, respectively. + +Examples:: + + >>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4] + >>> coordinates = (torch.tensor([-2., -1., 1., 4.]),) + >>> values = torch.tensor([4., 1., 1., 16.], ) + >>> torch.gradient(values, spacing = coordinates) + (tensor([-3., -2., 2., 5.]),) + + >>> # Estimates the gradient of the R^2 -> R function whose samples are + >>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost + >>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates + >>> # partial derivative for both dimensions. + >>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]]) + >>> torch.gradient(t) + (tensor([[ 9., 18., 36., 72.], + [ 9., 18., 36., 72.]]), + tensor([[ 1.0000, 1.5000, 3.0000, 4.0000], + [10.0000, 15.0000, 30.0000, 40.0000]])) + + >>> # A scalar value for spacing modifies the relationship between tensor indices + >>> # and input coordinates by multiplying the indices to find the + >>> # coordinates. For example, below the indices of the innermost + >>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of + >>> # the outermost dimension 0, 1 translate to coordinates of [0, 2]. + >>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1]) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.5000, 0.7500, 1.5000, 2.0000], + [ 5.0000, 7.5000, 15.0000, 20.0000]])) + >>> # doubling the spacing between samples halves the estimated partial gradients. + + >>> + >>> # Estimates only the partial derivative for dimension 1 + >>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.) + (tensor([[ 1.0000, 1.5000, 3.0000, 4.0000], + [10.0000, 15.0000, 30.0000, 40.0000]]),) + + >>> # When spacing is a list of scalars, the relationship between the tensor + >>> # indices and input coordinates changes based on dimension. + >>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate + >>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension + >>> # 0, 1 translate to coordinates of [0, 2]. + >>> torch.gradient(t, spacing = [3., 2.]) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.3333, 0.5000, 1.0000, 1.3333], + [ 3.3333, 5.0000, 10.0000, 13.3333]])) + + >>> # The following example is a replication of the previous one with explicit + >>> # coordinates. + >>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9])) + >>> torch.gradient(t, spacing = coords) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.3333, 0.5000, 1.0000, 1.3333], + [ 3.3333, 5.0000, 10.0000, 13.3333]])) + +""", +) + +add_docstr( + torch.geqrf, + r""" +geqrf(input, *, out=None) -> (Tensor, Tensor) + +This is a low-level function for calling LAPACK's geqrf directly. This function +returns a namedtuple (a, tau) as defined in `LAPACK documentation for geqrf`_ . + +Computes a QR decomposition of :attr:`input`. +Both `Q` and `R` matrices are stored in the same output tensor `a`. +The elements of `R` are stored on and above the diagonal. +Elementary reflectors (or Householder vectors) implicitly defining matrix `Q` +are stored below the diagonal. +The results of this function can be used together with :func:`torch.linalg.householder_product` +to obtain the `Q` matrix or +with :func:`torch.ormqr`, which uses an implicit representation of the `Q` matrix, +for an efficient matrix-matrix multiplication. + +See `LAPACK documentation for geqrf`_ for further details. + +.. note:: + See also :func:`torch.linalg.qr`, which computes Q and R matrices, and :func:`torch.linalg.lstsq` + with the ``driver="gels"`` option for a function that can solve matrix equations using a QR decomposition. + +Args: + input (Tensor): the input matrix + +Keyword args: + out (tuple, optional): the output tuple of (Tensor, Tensor). Ignored if `None`. Default: `None`. + +.. _LAPACK documentation for geqrf: + http://www.netlib.org/lapack/explore-html/df/dc5/group__variants_g_ecomputational_ga3766ea903391b5cf9008132f7440ec7b.html + +""", +) + +add_docstr( + torch.inner, + r""" +inner(input, other, *, out=None) -> Tensor + +Computes the dot product for 1D tensors. For higher dimensions, sums the product +of elements from :attr:`input` and :attr:`other` along their last dimension. + +.. note:: + + If either :attr:`input` or :attr:`other` is a scalar, the result is equivalent + to `torch.mul(input, other)`. + + If both :attr:`input` and :attr:`other` are non-scalars, the size of their last + dimension must match and the result is equivalent to `torch.tensordot(input, + other, dims=([-1], [-1]))` + +Args: + input (Tensor): First input tensor + other (Tensor): Second input tensor + +Keyword args: + out (Tensor, optional): Optional output tensor to write result into. The output + shape is `input.shape[:-1] + other.shape[:-1]`. + +Example:: + + # Dot product + >>> torch.inner(torch.tensor([1, 2, 3]), torch.tensor([0, 2, 1])) + tensor(7) + + # Multidimensional input tensors + >>> a = torch.randn(2, 3) + >>> a + tensor([[0.8173, 1.0874, 1.1784], + [0.3279, 0.1234, 2.7894]]) + >>> b = torch.randn(2, 4, 3) + >>> b + tensor([[[-0.4682, -0.7159, 0.1506], + [ 0.4034, -0.3657, 1.0387], + [ 0.9892, -0.6684, 0.1774], + [ 0.9482, 1.3261, 0.3917]], + + [[ 0.4537, 0.7493, 1.1724], + [ 0.2291, 0.5749, -0.2267], + [-0.7920, 0.3607, -0.3701], + [ 1.3666, -0.5850, -1.7242]]]) + >>> torch.inner(a, b) + tensor([[[-0.9837, 1.1560, 0.2907, 2.6785], + [ 2.5671, 0.5452, -0.6912, -1.5509]], + + [[ 0.1782, 2.9843, 0.7366, 1.5672], + [ 3.5115, -0.4864, -1.2476, -4.4337]]]) + + # Scalar input + >>> torch.inner(a, torch.tensor(2)) + tensor([[1.6347, 2.1748, 2.3567], + [0.6558, 0.2469, 5.5787]]) +""", +) + +add_docstr( + torch.outer, + r""" +outer(input, vec2, *, out=None) -> Tensor + +Outer product of :attr:`input` and :attr:`vec2`. +If :attr:`input` is a vector of size :math:`n` and :attr:`vec2` is a vector of +size :math:`m`, then :attr:`out` must be a matrix of size :math:`(n \times m)`. + +.. note:: This function does not :ref:`broadcast `. + +Args: + input (Tensor): 1-D input vector + vec2 (Tensor): 1-D input vector + +Keyword args: + out (Tensor, optional): optional output matrix + +Example:: + + >>> v1 = torch.arange(1., 5.) + >>> v2 = torch.arange(1., 4.) + >>> torch.outer(v1, v2) + tensor([[ 1., 2., 3.], + [ 2., 4., 6.], + [ 3., 6., 9.], + [ 4., 8., 12.]]) +""", +) + +add_docstr( + torch.ger, + r""" +ger(input, vec2, *, out=None) -> Tensor + +Alias of :func:`torch.outer`. + +.. warning:: + This function is deprecated and will be removed in a future PyTorch release. + Use :func:`torch.outer` instead. +""", +) + +add_docstr( + torch.get_default_dtype, + r""" +get_default_dtype() -> torch.dtype + +Get the current default floating point :class:`torch.dtype`. + +Example:: + + >>> torch.get_default_dtype() # initial default for floating point is torch.float32 + torch.float32 + >>> torch.set_default_dtype(torch.float64) + >>> torch.get_default_dtype() # default is now changed to torch.float64 + torch.float64 + >>> torch.set_default_tensor_type(torch.FloatTensor) # setting tensor type also affects this + >>> torch.get_default_dtype() # changed to torch.float32, the dtype for torch.FloatTensor + torch.float32 + +""", +) + +add_docstr( + torch.get_num_threads, + r""" +get_num_threads() -> int + +Returns the number of threads used for parallelizing CPU operations +""", +) + +add_docstr( + torch.get_num_interop_threads, + r""" +get_num_interop_threads() -> int + +Returns the number of threads used for inter-op parallelism on CPU +(e.g. in JIT interpreter) +""", +) + +add_docstr( + torch.gt, + r""" +gt(input, other, *, out=None) -> Tensor + +Computes :math:`\text{input} > \text{other}` element-wise. +""" + + r""" + +The second argument can be a number or a tensor whose shape is +:ref:`broadcastable ` with the first argument. + +Args: + input (Tensor): the tensor to compare + other (Tensor or float): the tensor or value to compare + +Keyword args: + {out} + +Returns: + A boolean tensor that is True where :attr:`input` is greater than :attr:`other` and False elsewhere + +Example:: + + >>> torch.gt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]])) + tensor([[False, True], [False, False]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.greater, + r""" +greater(input, other, *, out=None) -> Tensor + +Alias for :func:`torch.gt`. +""", +) + +add_docstr( + torch.histc, + r""" +histc(input, bins=100, min=0, max=0, *, out=None) -> Tensor + +Computes the histogram of a tensor. + +The elements are sorted into equal width bins between :attr:`min` and +:attr:`max`. If :attr:`min` and :attr:`max` are both zero, the minimum and +maximum values of the data are used. + +Elements lower than min and higher than max and ``NaN`` elements are ignored. + +Args: + {input} + bins (int): number of histogram bins + min (Scalar): lower end of the range (inclusive) + max (Scalar): upper end of the range (inclusive) + +Keyword args: + {out} + +Returns: + Tensor: Histogram represented as a tensor + +Example:: + + >>> torch.histc(torch.tensor([1., 2, 1]), bins=4, min=0, max=3) + tensor([ 0., 2., 1., 0.]) +""".format( + **common_args + ), +) + +add_docstr( + torch.histogram, + r""" +histogram(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor) + +Computes a histogram of the values in a tensor. + +:attr:`bins` can be an integer or a 1D tensor. + +If :attr:`bins` is an int, it specifies the number of equal-width bins. +By default, the lower and upper range of the bins is determined by the +minimum and maximum elements of the input tensor. The :attr:`range` +argument can be provided to specify a range for the bins. + +If :attr:`bins` is a 1D tensor, it specifies the sequence of bin edges +including the rightmost edge. It should contain at least 2 elements +and its elements should be increasing. + +Args: + {input} + bins: int or 1D Tensor. If int, defines the number of equal-width bins. If tensor, + defines the sequence of bin edges including the rightmost edge. + +Keyword args: + range (tuple of float): Defines the range of the bins. + weight (Tensor): If provided, weight should have the same shape as input. Each value in + input contributes its associated weight towards its bin's result. + density (bool): If False, the result will contain the count (or total weight) in each bin. + If True, the result is the value of the probability density function over the bins, + normalized such that the integral over the range of the bins is 1. + {out} (tuple, optional): The result tuple of two output tensors (hist, bin_edges). + +Returns: + hist (Tensor): 1D Tensor containing the values of the histogram. + bin_edges(Tensor): 1D Tensor containing the edges of the histogram bins. + +Example:: + + >>> torch.histogram(torch.tensor([1., 2, 1]), bins=4, range=(0., 3.), weight=torch.tensor([1., 2., 4.])) + (tensor([ 0., 5., 2., 0.]), tensor([0., 0.75, 1.5, 2.25, 3.])) + >>> torch.histogram(torch.tensor([1., 2, 1]), bins=4, range=(0., 3.), weight=torch.tensor([1., 2., 4.]), density=True) + (tensor([ 0., 0.9524, 0.3810, 0.]), tensor([0., 0.75, 1.5, 2.25, 3.])) +""".format( + **common_args + ), +) + +add_docstr( + torch.histogramdd, + r""" +histogramdd(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor[]) + +Computes a multi-dimensional histogram of the values in a tensor. + +Interprets the elements of an input tensor whose innermost dimension has size N +as a collection of N-dimensional points. Maps each of the points into a set of +N-dimensional bins and returns the number of points (or total weight) in each bin. + +:attr:`input` must be a tensor with at least 2 dimensions. +If input has shape (M, N), each of its M rows defines a point in N-dimensional space. +If input has three or more dimensions, all but the last dimension are flattened. + +Each dimension is independently associated with its own strictly increasing sequence +of bin edges. Bin edges may be specified explicitly by passing a sequence of 1D +tensors. Alternatively, bin edges may be constructed automatically by passing a +sequence of integers specifying the number of equal-width bins in each dimension. + +For each N-dimensional point in input: + - Each of its coordinates is binned independently among the bin edges + corresponding to its dimension + - Binning results are combined to identify the N-dimensional bin (if any) + into which the point falls + - If the point falls into a bin, the bin's count (or total weight) is incremented + - Points which do not fall into any bin do not contribute to the output + +:attr:`bins` can be a sequence of N 1D tensors, a sequence of N ints, or a single int. + +If :attr:`bins` is a sequence of N 1D tensors, it explicitly specifies the N sequences +of bin edges. Each 1D tensor should contain a strictly increasing sequence with at +least one element. A sequence of K bin edges defines K-1 bins, explicitly specifying +the left and right edges of all bins. Every bin is exclusive of its left edge. Only +the rightmost bin is inclusive of its right edge. + +If :attr:`bins` is a sequence of N ints, it specifies the number of equal-width bins +in each dimension. By default, the leftmost and rightmost bin edges in each dimension +are determined by the minimum and maximum elements of the input tensor in the +corresponding dimension. The :attr:`range` argument can be provided to manually +specify the leftmost and rightmost bin edges in each dimension. + +If :attr:`bins` is an int, it specifies the number of equal-width bins for all dimensions. + +.. note:: + See also :func:`torch.histogram`, which specifically computes 1D histograms. + While :func:`torch.histogramdd` infers the dimensionality of its bins and + binned values from the shape of :attr:`input`, :func:`torch.histogram` + accepts and flattens :attr:`input` of any shape. + +Args: + {input} + bins: Tensor[], int[], or int. + If Tensor[], defines the sequences of bin edges. + If int[], defines the number of equal-width bins in each dimension. + If int, defines the number of equal-width bins for all dimensions. +Keyword args: + range (sequence of float): Defines the leftmost and rightmost bin edges + in each dimension. + weight (Tensor): By default, each value in the input has weight 1. If a weight + tensor is passed, each N-dimensional coordinate in input + contributes its associated weight towards its bin's result. + The weight tensor should have the same shape as the :attr:`input` + tensor excluding its innermost dimension N. + density (bool): If False (default), the result will contain the count (or total weight) + in each bin. If True, each count (weight) is divided by the total count + (total weight), then divided by the volume of its associated bin. +Returns: + hist (Tensor): N-dimensional Tensor containing the values of the histogram. + bin_edges(Tensor[]): sequence of N 1D Tensors containing the bin edges. + +Example:: + >>> torch.histogramdd(torch.tensor([[0., 1.], [1., 0.], [2., 0.], [2., 2.]]), bins=[3, 3], + ... weight=torch.tensor([1., 2., 4., 8.])) + torch.return_types.histogramdd( + hist=tensor([[0., 1., 0.], + [2., 0., 0.], + [4., 0., 8.]]), + bin_edges=(tensor([0.0000, 0.6667, 1.3333, 2.0000]), + tensor([0.0000, 0.6667, 1.3333, 2.0000]))) + + >>> torch.histogramdd(torch.tensor([[0., 0.], [1., 1.], [2., 2.]]), bins=[2, 2], + ... range=[0., 1., 0., 1.], density=True) + torch.return_types.histogramdd( + hist=tensor([[2., 0.], + [0., 2.]]), + bin_edges=(tensor([0.0000, 0.5000, 1.0000]), + tensor([0.0000, 0.5000, 1.0000]))) + +""".format( + **common_args + ), +) +# TODO: Fix via https://github.com/pytorch/pytorch/issues/75798 +torch.histogramdd.__module__ = "torch" + +add_docstr( + torch.hypot, + r""" +hypot(input, other, *, out=None) -> Tensor + +Given the legs of a right triangle, return its hypotenuse. + +.. math:: + \text{out}_{i} = \sqrt{\text{input}_{i}^{2} + \text{other}_{i}^{2}} + +The shapes of ``input`` and ``other`` must be +:ref:`broadcastable `. +""" + + r""" +Args: + input (Tensor): the first input tensor + other (Tensor): the second input tensor + +Keyword args: + {out} + +Example:: + + >>> a = torch.hypot(torch.tensor([4.0]), torch.tensor([3.0, 4.0, 5.0])) + tensor([5.0000, 5.6569, 6.4031]) + +""".format( + **common_args + ), +) + +add_docstr( + torch.i0, + r""" +i0(input, *, out=None) -> Tensor + +Alias for :func:`torch.special.i0`. +""", +) + +add_docstr( + torch.igamma, + r""" +igamma(input, other, *, out=None) -> Tensor + +Alias for :func:`torch.special.gammainc`. +""", +) + +add_docstr( + torch.igammac, + r""" +igammac(input, other, *, out=None) -> Tensor + +Alias for :func:`torch.special.gammaincc`. +""", +) + +add_docstr( + torch.index_select, + r""" +index_select(input, dim, index, *, out=None) -> Tensor + +Returns a new tensor which indexes the :attr:`input` tensor along dimension +:attr:`dim` using the entries in :attr:`index` which is a `LongTensor`. + +The returned tensor has the same number of dimensions as the original tensor +(:attr:`input`). The :attr:`dim`\ th dimension has the same size as the length +of :attr:`index`; other dimensions have the same size as in the original tensor. + +.. note:: The returned tensor does **not** use the same storage as the original + tensor. If :attr:`out` has a different shape than expected, we + silently change it to the correct shape, reallocating the underlying + storage if necessary. + +Args: + {input} + dim (int): the dimension in which we index + index (IntTensor or LongTensor): the 1-D tensor containing the indices to index + +Keyword args: + {out} + +Example:: + + >>> x = torch.randn(3, 4) + >>> x + tensor([[ 0.1427, 0.0231, -0.5414, -1.0009], + [-0.4664, 0.2647, -0.1228, -1.1068], + [-1.1734, -0.6571, 0.7230, -0.6004]]) + >>> indices = torch.tensor([0, 2]) + >>> torch.index_select(x, 0, indices) + tensor([[ 0.1427, 0.0231, -0.5414, -1.0009], + [-1.1734, -0.6571, 0.7230, -0.6004]]) + >>> torch.index_select(x, 1, indices) + tensor([[ 0.1427, -0.5414], + [-0.4664, -0.1228], + [-1.1734, 0.7230]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.inverse, + r""" +inverse(input, *, out=None) -> Tensor + +Alias for :func:`torch.linalg.inv` +""", +) + +add_docstr( + torch.isin, + r""" +isin(elements, test_elements, *, assume_unique=False, invert=False) -> Tensor + +Tests if each element of :attr:`elements` is in :attr:`test_elements`. Returns +a boolean tensor of the same shape as :attr:`elements` that is True for elements +in :attr:`test_elements` and False otherwise. + +.. note:: + One of :attr:`elements` or :attr:`test_elements` can be a scalar, but not both. + +Args: + elements (Tensor or Scalar): Input elements + test_elements (Tensor or Scalar): Values against which to test for each input element + assume_unique (bool, optional): If True, assumes both :attr:`elements` and + :attr:`test_elements` contain unique elements, which can speed up the + calculation. Default: False + invert (bool, optional): If True, inverts the boolean return tensor, resulting in True + values for elements *not* in :attr:`test_elements`. Default: False + +Returns: + A boolean tensor of the same shape as :attr:`elements` that is True for elements in + :attr:`test_elements` and False otherwise + +Example: + >>> torch.isin(torch.tensor([[1, 2], [3, 4]]), torch.tensor([2, 3])) + tensor([[False, True], + [ True, False]]) +""", +) + +add_docstr( + torch.isinf, + r""" +isinf(input) -> Tensor + +Tests if each element of :attr:`input` is infinite +(positive or negative infinity) or not. + +.. note:: + Complex values are infinite when their real or imaginary part is + infinite. + +Args: + {input} + +Returns: + A boolean tensor that is True where :attr:`input` is infinite and False elsewhere + +Example:: + + >>> torch.isinf(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')])) + tensor([False, True, False, True, False]) +""".format( + **common_args + ), +) + +add_docstr( + torch.isposinf, + r""" +isposinf(input, *, out=None) -> Tensor +Tests if each element of :attr:`input` is positive infinity or not. + +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.tensor([-float('inf'), float('inf'), 1.2]) + >>> torch.isposinf(a) + tensor([False, True, False]) +""".format( + **common_args + ), +) + +add_docstr( + torch.isneginf, + r""" +isneginf(input, *, out=None) -> Tensor +Tests if each element of :attr:`input` is negative infinity or not. + +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.tensor([-float('inf'), float('inf'), 1.2]) + >>> torch.isneginf(a) + tensor([ True, False, False]) +""".format( + **common_args + ), +) + +add_docstr( + torch.isclose, + r""" +isclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor + +Returns a new tensor with boolean elements representing if each element of +:attr:`input` is "close" to the corresponding element of :attr:`other`. +Closeness is defined as: + +.. math:: + \lvert \text{input} - \text{other} \rvert \leq \texttt{atol} + \texttt{rtol} \times \lvert \text{other} \rvert +""" + + r""" + +where :attr:`input` and :attr:`other` are finite. Where :attr:`input` +and/or :attr:`other` are nonfinite they are close if and only if +they are equal, with NaNs being considered equal to each other when +:attr:`equal_nan` is True. + +Args: + input (Tensor): first tensor to compare + other (Tensor): second tensor to compare + atol (float, optional): absolute tolerance. Default: 1e-08 + rtol (float, optional): relative tolerance. Default: 1e-05 + equal_nan (bool, optional): if ``True``, then two ``NaN`` s will be considered equal. Default: ``False`` + +Examples:: + + >>> torch.isclose(torch.tensor((1., 2, 3)), torch.tensor((1 + 1e-10, 3, 4))) + tensor([ True, False, False]) + >>> torch.isclose(torch.tensor((float('inf'), 4)), torch.tensor((float('inf'), 6)), rtol=.5) + tensor([True, True]) +""", +) + +add_docstr( + torch.isfinite, + r""" +isfinite(input) -> Tensor + +Returns a new tensor with boolean elements representing if each element is `finite` or not. + +Real values are finite when they are not NaN, negative infinity, or infinity. +Complex values are finite when both their real and imaginary parts are finite. + +Args: + {input} + +Returns: + A boolean tensor that is True where :attr:`input` is finite and False elsewhere + +Example:: + + >>> torch.isfinite(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')])) + tensor([True, False, True, False, False]) +""".format( + **common_args + ), +) + +add_docstr( + torch.isnan, + r""" +isnan(input) -> Tensor + +Returns a new tensor with boolean elements representing if each element of :attr:`input` +is NaN or not. Complex values are considered NaN when either their real +and/or imaginary part is NaN. + +Arguments: + {input} + +Returns: + A boolean tensor that is True where :attr:`input` is NaN and False elsewhere + +Example:: + + >>> torch.isnan(torch.tensor([1, float('nan'), 2])) + tensor([False, True, False]) +""".format( + **common_args + ), +) + +add_docstr( + torch.isreal, + r""" +isreal(input) -> Tensor + +Returns a new tensor with boolean elements representing if each element of :attr:`input` is real-valued or not. +All real-valued types are considered real. Complex values are considered real when their imaginary part is 0. + +Arguments: + {input} + +Returns: + A boolean tensor that is True where :attr:`input` is real and False elsewhere + +Example:: + + >>> torch.isreal(torch.tensor([1, 1+1j, 2+0j])) + tensor([True, False, True]) +""".format( + **common_args + ), +) + +add_docstr( + torch.is_floating_point, + r""" +is_floating_point(input) -> (bool) + +Returns True if the data type of :attr:`input` is a floating point data type i.e., +one of ``torch.float64``, ``torch.float32``, ``torch.float16``, and ``torch.bfloat16``. + +Args: + {input} +""".format( + **common_args + ), +) + +add_docstr( + torch.is_complex, + r""" +is_complex(input) -> (bool) + +Returns True if the data type of :attr:`input` is a complex data type i.e., +one of ``torch.complex64``, and ``torch.complex128``. + +Args: + {input} +""".format( + **common_args + ), +) + +add_docstr( + torch.is_grad_enabled, + r""" +is_grad_enabled() -> (bool) + +Returns True if grad mode is currently enabled. +""".format( + **common_args + ), +) + +add_docstr( + torch.is_inference_mode_enabled, + r""" +is_inference_mode_enabled() -> (bool) + +Returns True if inference mode is currently enabled. +""".format( + **common_args + ), +) + +add_docstr( + torch.is_inference, + r""" +is_inference(input) -> (bool) + +Returns True if :attr:`input` is an inference tensor. + +A non-view tensor is an inference tensor if and only if it was +allocated during inference mode. A view tensor is an inference +tensor if and only if the tensor it is a view of is an inference tensor. + +For details on inference mode please see +`Inference Mode `_. + +Args: + {input} +""".format( + **common_args + ), +) + +add_docstr( + torch.is_conj, + r""" +is_conj(input) -> (bool) + +Returns True if the :attr:`input` is a conjugated tensor, i.e. its conjugate bit is set to `True`. + +Args: + {input} +""".format( + **common_args + ), +) + +add_docstr( + torch.is_nonzero, + r""" +is_nonzero(input) -> (bool) + +Returns True if the :attr:`input` is a single element tensor which is not equal to zero +after type conversions. +i.e. not equal to ``torch.tensor([0.])`` or ``torch.tensor([0])`` or +``torch.tensor([False])``. +Throws a ``RuntimeError`` if ``torch.numel() != 1`` (even in case +of sparse tensors). + +Args: + {input} + +Examples:: + + >>> torch.is_nonzero(torch.tensor([0.])) + False + >>> torch.is_nonzero(torch.tensor([1.5])) + True + >>> torch.is_nonzero(torch.tensor([False])) + False + >>> torch.is_nonzero(torch.tensor([3])) + True + >>> torch.is_nonzero(torch.tensor([1, 3, 5])) + Traceback (most recent call last): + ... + RuntimeError: bool value of Tensor with more than one value is ambiguous + >>> torch.is_nonzero(torch.tensor([])) + Traceback (most recent call last): + ... + RuntimeError: bool value of Tensor with no values is ambiguous +""".format( + **common_args + ), +) + +add_docstr( + torch.kron, + r""" +kron(input, other, *, out=None) -> Tensor + +Computes the Kronecker product, denoted by :math:`\otimes`, of :attr:`input` and :attr:`other`. + +If :attr:`input` is a :math:`(a_0 \times a_1 \times \dots \times a_n)` tensor and :attr:`other` is a +:math:`(b_0 \times b_1 \times \dots \times b_n)` tensor, the result will be a +:math:`(a_0*b_0 \times a_1*b_1 \times \dots \times a_n*b_n)` tensor with the following entries: + +.. math:: + (\text{input} \otimes \text{other})_{k_0, k_1, \dots, k_n} = + \text{input}_{i_0, i_1, \dots, i_n} * \text{other}_{j_0, j_1, \dots, j_n}, + +where :math:`k_t = i_t * b_t + j_t` for :math:`0 \leq t \leq n`. +If one tensor has fewer dimensions than the other it is unsqueezed until it has the same number of dimensions. + +Supports real-valued and complex-valued inputs. + +.. note:: + This function generalizes the typical definition of the Kronecker product for two matrices to two tensors, + as described above. When :attr:`input` is a :math:`(m \times n)` matrix and :attr:`other` is a + :math:`(p \times q)` matrix, the result will be a :math:`(p*m \times q*n)` block matrix: + + .. math:: + \mathbf{A} \otimes \mathbf{B}=\begin{bmatrix} + a_{11} \mathbf{B} & \cdots & a_{1 n} \mathbf{B} \\ + \vdots & \ddots & \vdots \\ + a_{m 1} \mathbf{B} & \cdots & a_{m n} \mathbf{B} \end{bmatrix} + + where :attr:`input` is :math:`\mathbf{A}` and :attr:`other` is :math:`\mathbf{B}`. + +Arguments: + input (Tensor) + other (Tensor) + +Keyword args: + out (Tensor, optional): The output tensor. Ignored if ``None``. Default: ``None`` + +Examples:: + + >>> mat1 = torch.eye(2) + >>> mat2 = torch.ones(2, 2) + >>> torch.kron(mat1, mat2) + tensor([[1., 1., 0., 0.], + [1., 1., 0., 0.], + [0., 0., 1., 1.], + [0., 0., 1., 1.]]) + + >>> mat1 = torch.eye(2) + >>> mat2 = torch.arange(1, 5).reshape(2, 2) + >>> torch.kron(mat1, mat2) + tensor([[1., 2., 0., 0.], + [3., 4., 0., 0.], + [0., 0., 1., 2.], + [0., 0., 3., 4.]]) +""", +) + +add_docstr( + torch.kthvalue, + r""" +kthvalue(input, k, dim=None, keepdim=False, *, out=None) -> (Tensor, LongTensor) + +Returns a namedtuple ``(values, indices)`` where ``values`` is the :attr:`k` th +smallest element of each row of the :attr:`input` tensor in the given dimension +:attr:`dim`. And ``indices`` is the index location of each element found. + +If :attr:`dim` is not given, the last dimension of the `input` is chosen. + +If :attr:`keepdim` is ``True``, both the :attr:`values` and :attr:`indices` tensors +are the same size as :attr:`input`, except in the dimension :attr:`dim` where +they are of size 1. Otherwise, :attr:`dim` is squeezed +(see :func:`torch.squeeze`), resulting in both the :attr:`values` and +:attr:`indices` tensors having 1 fewer dimension than the :attr:`input` tensor. + +.. note:: + When :attr:`input` is a CUDA tensor and there are multiple valid + :attr:`k` th values, this function may nondeterministically return + :attr:`indices` for any of them. + +Args: + {input} + k (int): k for the k-th smallest element + dim (int, optional): the dimension to find the kth value along + {keepdim} + +Keyword args: + out (tuple, optional): the output tuple of (Tensor, LongTensor) + can be optionally given to be used as output buffers + +Example:: + + >>> x = torch.arange(1., 6.) + >>> x + tensor([ 1., 2., 3., 4., 5.]) + >>> torch.kthvalue(x, 4) + torch.return_types.kthvalue(values=tensor(4.), indices=tensor(3)) + + >>> x=torch.arange(1.,7.).resize_(2,3) + >>> x + tensor([[ 1., 2., 3.], + [ 4., 5., 6.]]) + >>> torch.kthvalue(x, 2, 0, True) + torch.return_types.kthvalue(values=tensor([[4., 5., 6.]]), indices=tensor([[1, 1, 1]])) +""".format( + **single_dim_common + ), +) + +add_docstr( + torch.lcm, + r""" +lcm(input, other, *, out=None) -> Tensor + +Computes the element-wise least common multiple (LCM) of :attr:`input` and :attr:`other`. + +Both :attr:`input` and :attr:`other` must have integer types. + +.. note:: + This defines :math:`lcm(0, 0) = 0` and :math:`lcm(0, a) = 0`. + +Args: + {input} + other (Tensor): the second input tensor + +Keyword arguments: + {out} + +Example:: + + >>> a = torch.tensor([5, 10, 15]) + >>> b = torch.tensor([3, 4, 5]) + >>> torch.lcm(a, b) + tensor([15, 20, 15]) + >>> c = torch.tensor([3]) + >>> torch.lcm(a, c) + tensor([15, 30, 15]) +""".format( + **common_args + ), +) + +add_docstr( + torch.ldexp, + r""" +ldexp(input, other, *, out=None) -> Tensor + +Multiplies :attr:`input` by 2 ** :attr:`other`. + +.. math:: + \text{{out}}_i = \text{{input}}_i * 2^\text{{other}}_i +""" + + r""" + +Typically this function is used to construct floating point numbers by multiplying +mantissas in :attr:`input` with integral powers of two created from the exponents +in :attr:`other`. + +Args: + {input} + other (Tensor): a tensor of exponents, typically integers. + +Keyword args: + {out} + +Example:: + + >>> torch.ldexp(torch.tensor([1.]), torch.tensor([1])) + tensor([2.]) + >>> torch.ldexp(torch.tensor([1.0]), torch.tensor([1, 2, 3, 4])) + tensor([ 2., 4., 8., 16.]) + + +""".format( + **common_args + ), +) + +add_docstr( + torch.le, + r""" +le(input, other, *, out=None) -> Tensor + +Computes :math:`\text{input} \leq \text{other}` element-wise. +""" + + r""" + +The second argument can be a number or a tensor whose shape is +:ref:`broadcastable ` with the first argument. + +Args: + input (Tensor): the tensor to compare + other (Tensor or Scalar): the tensor or value to compare + +Keyword args: + {out} + +Returns: + A boolean tensor that is True where :attr:`input` is less than or equal to + :attr:`other` and False elsewhere + +Example:: + + >>> torch.le(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]])) + tensor([[True, False], [True, True]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.less_equal, + r""" +less_equal(input, other, *, out=None) -> Tensor + +Alias for :func:`torch.le`. +""", +) + +add_docstr( + torch.lerp, + r""" +lerp(input, end, weight, *, out=None) + +Does a linear interpolation of two tensors :attr:`start` (given by :attr:`input`) and :attr:`end` based +on a scalar or tensor :attr:`weight` and returns the resulting :attr:`out` tensor. + +.. math:: + \text{out}_i = \text{start}_i + \text{weight}_i \times (\text{end}_i - \text{start}_i) +""" + + r""" +The shapes of :attr:`start` and :attr:`end` must be +:ref:`broadcastable `. If :attr:`weight` is a tensor, then +the shapes of :attr:`weight`, :attr:`start`, and :attr:`end` must be :ref:`broadcastable `. + +Args: + input (Tensor): the tensor with the starting points + end (Tensor): the tensor with the ending points + weight (float or tensor): the weight for the interpolation formula + +Keyword args: + {out} + +Example:: + + >>> start = torch.arange(1., 5.) + >>> end = torch.empty(4).fill_(10) + >>> start + tensor([ 1., 2., 3., 4.]) + >>> end + tensor([ 10., 10., 10., 10.]) + >>> torch.lerp(start, end, 0.5) + tensor([ 5.5000, 6.0000, 6.5000, 7.0000]) + >>> torch.lerp(start, end, torch.full_like(start, 0.5)) + tensor([ 5.5000, 6.0000, 6.5000, 7.0000]) +""".format( + **common_args + ), +) + +add_docstr( + torch.lgamma, + r""" +lgamma(input, *, out=None) -> Tensor + +Computes the natural logarithm of the absolute value of the gamma function on :attr:`input`. + +.. math:: + \text{out}_{i} = \ln |\Gamma(\text{input}_{i})| +""" + + """ +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.arange(0.5, 2, 0.5) + >>> torch.lgamma(a) + tensor([ 0.5724, 0.0000, -0.1208]) +""".format( + **common_args + ), +) + +add_docstr( + torch.linspace, + r""" +linspace(start, end, steps, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + +Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly +spaced from :attr:`start` to :attr:`end`, inclusive. That is, the value are: + +.. math:: + (\text{start}, + \text{start} + \frac{\text{end} - \text{start}}{\text{steps} - 1}, + \ldots, + \text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{\text{steps} - 1}, + \text{end}) +""" + + """ + +From PyTorch 1.11 linspace requires the steps argument. Use steps=100 to restore the previous behavior. + +Args: + start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional + end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional + steps (int): size of the constructed tensor + +Keyword arguments: + {out} + dtype (torch.dtype, optional): the data type to perform the computation in. + Default: if None, uses the global default dtype (see torch.get_default_dtype()) + when both :attr:`start` and :attr:`end` are real, + and corresponding complex dtype when either is complex. + {layout} + {device} + {requires_grad} + + +Example:: + + >>> torch.linspace(3, 10, steps=5) + tensor([ 3.0000, 4.7500, 6.5000, 8.2500, 10.0000]) + >>> torch.linspace(-10, 10, steps=5) + tensor([-10., -5., 0., 5., 10.]) + >>> torch.linspace(start=-10, end=10, steps=5) + tensor([-10., -5., 0., 5., 10.]) + >>> torch.linspace(start=-10, end=10, steps=1) + tensor([-10.]) +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.log, + r""" +log(input, *, out=None) -> Tensor + +Returns a new tensor with the natural logarithm of the elements +of :attr:`input`. + +.. math:: + y_{i} = \log_{e} (x_{i}) +""" + + r""" + +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.rand(5) * 5 + >>> a + tensor([4.7767, 4.3234, 1.2156, 0.2411, 4.5739]) + >>> torch.log(a) + tensor([ 1.5637, 1.4640, 0.1952, -1.4226, 1.5204]) +""".format( + **common_args + ), +) + +add_docstr( + torch.log10, + r""" +log10(input, *, out=None) -> Tensor + +Returns a new tensor with the logarithm to the base 10 of the elements +of :attr:`input`. + +.. math:: + y_{i} = \log_{10} (x_{i}) +""" + + r""" + +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.rand(5) + >>> a + tensor([ 0.5224, 0.9354, 0.7257, 0.1301, 0.2251]) + + + >>> torch.log10(a) + tensor([-0.2820, -0.0290, -0.1392, -0.8857, -0.6476]) + +""".format( + **common_args + ), +) + +add_docstr( + torch.log1p, + r""" +log1p(input, *, out=None) -> Tensor + +Returns a new tensor with the natural logarithm of (1 + :attr:`input`). + +.. math:: + y_i = \log_{e} (x_i + 1) +""" + + r""" +.. note:: This function is more accurate than :func:`torch.log` for small + values of :attr:`input` + +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(5) + >>> a + tensor([-1.0090, -0.9923, 1.0249, -0.5372, 0.2492]) + >>> torch.log1p(a) + tensor([ nan, -4.8653, 0.7055, -0.7705, 0.2225]) +""".format( + **common_args + ), +) + +add_docstr( + torch.log2, + r""" +log2(input, *, out=None) -> Tensor + +Returns a new tensor with the logarithm to the base 2 of the elements +of :attr:`input`. + +.. math:: + y_{i} = \log_{2} (x_{i}) +""" + + r""" + +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.rand(5) + >>> a + tensor([ 0.8419, 0.8003, 0.9971, 0.5287, 0.0490]) + + + >>> torch.log2(a) + tensor([-0.2483, -0.3213, -0.0042, -0.9196, -4.3504]) + +""".format( + **common_args + ), +) + +add_docstr( + torch.logaddexp, + r""" +logaddexp(input, other, *, out=None) -> Tensor + +Logarithm of the sum of exponentiations of the inputs. + +Calculates pointwise :math:`\log\left(e^x + e^y\right)`. This function is useful +in statistics where the calculated probabilities of events may be so small as to +exceed the range of normal floating point numbers. In such cases the logarithm +of the calculated probability is stored. This function allows adding +probabilities stored in such a fashion. + +This op should be disambiguated with :func:`torch.logsumexp` which performs a +reduction on a single tensor. + +Args: + {input} + other (Tensor): the second input tensor + +Keyword arguments: + {out} + +Example:: + + >>> torch.logaddexp(torch.tensor([-1.0]), torch.tensor([-1.0, -2, -3])) + tensor([-0.3069, -0.6867, -0.8731]) + >>> torch.logaddexp(torch.tensor([-100.0, -200, -300]), torch.tensor([-1.0, -2, -3])) + tensor([-1., -2., -3.]) + >>> torch.logaddexp(torch.tensor([1.0, 2000, 30000]), torch.tensor([-1.0, -2, -3])) + tensor([1.1269e+00, 2.0000e+03, 3.0000e+04]) +""".format( + **common_args + ), +) + +add_docstr( + torch.logaddexp2, + r""" +logaddexp2(input, other, *, out=None) -> Tensor + +Logarithm of the sum of exponentiations of the inputs in base-2. + +Calculates pointwise :math:`\log_2\left(2^x + 2^y\right)`. See +:func:`torch.logaddexp` for more details. + +Args: + {input} + other (Tensor): the second input tensor + +Keyword arguments: + {out} +""".format( + **common_args + ), +) + +add_docstr( + torch.xlogy, + r""" +xlogy(input, other, *, out=None) -> Tensor + +Alias for :func:`torch.special.xlogy`. +""", +) + +add_docstr( + torch.logical_and, + r""" +logical_and(input, other, *, out=None) -> Tensor + +Computes the element-wise logical AND of the given input tensors. Zeros are treated as ``False`` and nonzeros are +treated as ``True``. + +Args: + {input} + other (Tensor): the tensor to compute AND with + +Keyword args: + {out} + +Example:: + + >>> torch.logical_and(torch.tensor([True, False, True]), torch.tensor([True, False, False])) + tensor([ True, False, False]) + >>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8) + >>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8) + >>> torch.logical_and(a, b) + tensor([False, False, True, False]) + >>> torch.logical_and(a.double(), b.double()) + tensor([False, False, True, False]) + >>> torch.logical_and(a.double(), b) + tensor([False, False, True, False]) + >>> torch.logical_and(a, b, out=torch.empty(4, dtype=torch.bool)) + tensor([False, False, True, False]) +""".format( + **common_args + ), +) + +add_docstr( + torch.logical_not, + r""" +logical_not(input, *, out=None) -> Tensor + +Computes the element-wise logical NOT of the given input tensor. If not specified, the output tensor will have the bool +dtype. If the input tensor is not a bool tensor, zeros are treated as ``False`` and non-zeros are treated as ``True``. + +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> torch.logical_not(torch.tensor([True, False])) + tensor([False, True]) + >>> torch.logical_not(torch.tensor([0, 1, -10], dtype=torch.int8)) + tensor([ True, False, False]) + >>> torch.logical_not(torch.tensor([0., 1.5, -10.], dtype=torch.double)) + tensor([ True, False, False]) + >>> torch.logical_not(torch.tensor([0., 1., -10.], dtype=torch.double), out=torch.empty(3, dtype=torch.int16)) + tensor([1, 0, 0], dtype=torch.int16) +""".format( + **common_args + ), +) + +add_docstr( + torch.logical_or, + r""" +logical_or(input, other, *, out=None) -> Tensor + +Computes the element-wise logical OR of the given input tensors. Zeros are treated as ``False`` and nonzeros are +treated as ``True``. + +Args: + {input} + other (Tensor): the tensor to compute OR with + +Keyword args: + {out} + +Example:: + + >>> torch.logical_or(torch.tensor([True, False, True]), torch.tensor([True, False, False])) + tensor([ True, False, True]) + >>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8) + >>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8) + >>> torch.logical_or(a, b) + tensor([ True, True, True, False]) + >>> torch.logical_or(a.double(), b.double()) + tensor([ True, True, True, False]) + >>> torch.logical_or(a.double(), b) + tensor([ True, True, True, False]) + >>> torch.logical_or(a, b, out=torch.empty(4, dtype=torch.bool)) + tensor([ True, True, True, False]) +""".format( + **common_args + ), +) + +add_docstr( + torch.logical_xor, + r""" +logical_xor(input, other, *, out=None) -> Tensor + +Computes the element-wise logical XOR of the given input tensors. Zeros are treated as ``False`` and nonzeros are +treated as ``True``. + +Args: + {input} + other (Tensor): the tensor to compute XOR with + +Keyword args: + {out} + +Example:: + + >>> torch.logical_xor(torch.tensor([True, False, True]), torch.tensor([True, False, False])) + tensor([False, False, True]) + >>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8) + >>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8) + >>> torch.logical_xor(a, b) + tensor([ True, True, False, False]) + >>> torch.logical_xor(a.double(), b.double()) + tensor([ True, True, False, False]) + >>> torch.logical_xor(a.double(), b) + tensor([ True, True, False, False]) + >>> torch.logical_xor(a, b, out=torch.empty(4, dtype=torch.bool)) + tensor([ True, True, False, False]) +""".format( + **common_args + ), +) + +add_docstr( + torch.logspace, + """ +logspace(start, end, steps, base=10.0, *, \ + out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor +""" + + r""" + +Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly +spaced from :math:`{{\text{{base}}}}^{{\text{{start}}}}` to +:math:`{{\text{{base}}}}^{{\text{{end}}}}`, inclusive, on a logarithmic scale +with base :attr:`base`. That is, the values are: + +.. math:: + (\text{base}^{\text{start}}, + \text{base}^{(\text{start} + \frac{\text{end} - \text{start}}{ \text{steps} - 1})}, + \ldots, + \text{base}^{(\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{ \text{steps} - 1})}, + \text{base}^{\text{end}}) +""" + + """ + + +From PyTorch 1.11 logspace requires the steps argument. Use steps=100 to restore the previous behavior. + +Args: + start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional + end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional + steps (int): size of the constructed tensor + base (float, optional): base of the logarithm function. Default: ``10.0``. + +Keyword arguments: + {out} + dtype (torch.dtype, optional): the data type to perform the computation in. + Default: if None, uses the global default dtype (see torch.get_default_dtype()) + when both :attr:`start` and :attr:`end` are real, + and corresponding complex dtype when either is complex. + {layout} + {device} + {requires_grad} + +Example:: + + >>> torch.logspace(start=-10, end=10, steps=5) + tensor([ 1.0000e-10, 1.0000e-05, 1.0000e+00, 1.0000e+05, 1.0000e+10]) + >>> torch.logspace(start=0.1, end=1.0, steps=5) + tensor([ 1.2589, 2.1135, 3.5481, 5.9566, 10.0000]) + >>> torch.logspace(start=0.1, end=1.0, steps=1) + tensor([1.2589]) + >>> torch.logspace(start=2, end=2, steps=1, base=2) + tensor([4.0]) +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.logsumexp, + r""" +logsumexp(input, dim, keepdim=False, *, out=None) + +Returns the log of summed exponentials of each row of the :attr:`input` +tensor in the given dimension :attr:`dim`. The computation is numerically +stabilized. + +For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is + + .. math:: + \text{{logsumexp}}(x)_{{i}} = \log \sum_j \exp(x_{{ij}}) + +{keepdim_details} + +Args: + {input} + {opt_dim} + {keepdim} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(3, 3) + >>> torch.logsumexp(a, 1) + tensor([1.4907, 1.0593, 1.5696]) + >>> torch.dist(torch.logsumexp(a, 1), torch.log(torch.sum(torch.exp(a), 1))) + tensor(1.6859e-07) +""".format( + **multi_dim_common + ), +) + +add_docstr( + torch.lt, + r""" +lt(input, other, *, out=None) -> Tensor + +Computes :math:`\text{input} < \text{other}` element-wise. +""" + + r""" + +The second argument can be a number or a tensor whose shape is +:ref:`broadcastable ` with the first argument. + +Args: + input (Tensor): the tensor to compare + other (Tensor or float): the tensor or value to compare + +Keyword args: + {out} + +Returns: + A boolean tensor that is True where :attr:`input` is less than :attr:`other` and False elsewhere + +Example:: + + >>> torch.lt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]])) + tensor([[False, False], [True, False]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.lu_unpack, + r""" +lu_unpack(LU_data, LU_pivots, unpack_data=True, unpack_pivots=True, *, out=None) -> (Tensor, Tensor, Tensor) + +Unpacks the LU decomposition returned by :func:`~linalg.lu_factor` into the `P, L, U` matrices. + +.. seealso:: + + :func:`~linalg.lu` returns the matrices from the LU decomposition. Its gradient formula is more efficient + than that of doing :func:`~linalg.lu_factor` followed by :func:`~linalg.lu_unpack`. + +Args: + LU_data (Tensor): the packed LU factorization data + LU_pivots (Tensor): the packed LU factorization pivots + unpack_data (bool): flag indicating if the data should be unpacked. + If ``False``, then the returned ``L`` and ``U`` are empty tensors. + Default: ``True`` + unpack_pivots (bool): flag indicating if the pivots should be unpacked into a permutation matrix ``P``. + If ``False``, then the returned ``P`` is an empty tensor. + Default: ``True`` + +Keyword args: + out (tuple, optional): output tuple of three tensors. Ignored if `None`. + +Returns: + A namedtuple ``(P, L, U)`` + +Examples:: + + >>> A = torch.randn(2, 3, 3) + >>> LU, pivots = torch.linalg.lu_factor(A) + >>> P, L, U = torch.lu_unpack(LU, pivots) + >>> # We can recover A from the factorization + >>> A_ = P @ L @ U + >>> torch.allclose(A, A_) + True + + >>> # LU factorization of a rectangular matrix: + >>> A = torch.randn(2, 3, 2) + >>> LU, pivots = torch.linalg.lu_factor(A) + >>> P, L, U = torch.lu_unpack(LU, pivots) + >>> # P, L, U are the same as returned by linalg.lu + >>> P_, L_, U_ = torch.linalg.lu(A) + >>> torch.allclose(P, P_) and torch.allclose(L, L_) and torch.allclose(U, U_) + True + +""".format( + **common_args + ), +) + +add_docstr( + torch.less, + r""" +less(input, other, *, out=None) -> Tensor + +Alias for :func:`torch.lt`. +""", +) + +add_docstr( + torch.lu_solve, + r""" +lu_solve(b, LU_data, LU_pivots, *, out=None) -> Tensor + +Returns the LU solve of the linear system :math:`Ax = b` using the partially pivoted +LU factorization of A from :func:`~linalg.lu_factor`. + +This function supports ``float``, ``double``, ``cfloat`` and ``cdouble`` dtypes for :attr:`input`. + +.. warning:: + + :func:`torch.lu_solve` is deprecated in favor of :func:`torch.linalg.lu_solve`. + :func:`torch.lu_solve` will be removed in a future PyTorch release. + ``X = torch.lu_solve(B, LU, pivots)`` should be replaced with + + .. code:: python + + X = linalg.lu_solve(LU, pivots, B) + +Arguments: + b (Tensor): the RHS tensor of size :math:`(*, m, k)`, where :math:`*` + is zero or more batch dimensions. + LU_data (Tensor): the pivoted LU factorization of A from :meth:`~linalg.lu_factor` of size :math:`(*, m, m)`, + where :math:`*` is zero or more batch dimensions. + LU_pivots (IntTensor): the pivots of the LU factorization from :meth:`~linalg.lu_factor` of size :math:`(*, m)`, + where :math:`*` is zero or more batch dimensions. + The batch dimensions of :attr:`LU_pivots` must be equal to the batch dimensions of + :attr:`LU_data`. + +Keyword args: + {out} + +Example:: + + >>> A = torch.randn(2, 3, 3) + >>> b = torch.randn(2, 3, 1) + >>> LU, pivots = torch.linalg.lu_factor(A) + >>> x = torch.lu_solve(b, LU, pivots) + >>> torch.dist(A @ x, b) + tensor(1.00000e-07 * + 2.8312) +""".format( + **common_args + ), +) + +add_docstr( + torch.masked_select, + r""" +masked_select(input, mask, *, out=None) -> Tensor + +Returns a new 1-D tensor which indexes the :attr:`input` tensor according to +the boolean mask :attr:`mask` which is a `BoolTensor`. + +The shapes of the :attr:`mask` tensor and the :attr:`input` tensor don't need +to match, but they must be :ref:`broadcastable `. + +.. note:: The returned tensor does **not** use the same storage + as the original tensor + +Args: + {input} + mask (BoolTensor): the tensor containing the binary mask to index with + +Keyword args: + {out} + +Example:: + + >>> x = torch.randn(3, 4) + >>> x + tensor([[ 0.3552, -2.3825, -0.8297, 0.3477], + [-1.2035, 1.2252, 0.5002, 0.6248], + [ 0.1307, -2.0608, 0.1244, 2.0139]]) + >>> mask = x.ge(0.5) + >>> mask + tensor([[False, False, False, False], + [False, True, True, True], + [False, False, False, True]]) + >>> torch.masked_select(x, mask) + tensor([ 1.2252, 0.5002, 0.6248, 2.0139]) +""".format( + **common_args + ), +) + +add_docstr( + torch.matrix_power, + r""" +matrix_power(input, n, *, out=None) -> Tensor + +Alias for :func:`torch.linalg.matrix_power` +""", +) + +add_docstr( + torch.matrix_exp, + r""" +matrix_exp(A) -> Tensor + +Alias for :func:`torch.linalg.matrix_exp`. +""", +) + +add_docstr( + torch.max, + r""" +max(input) -> Tensor + +Returns the maximum value of all elements in the ``input`` tensor. + +.. warning:: + This function produces deterministic (sub)gradients unlike ``max(dim=0)`` + +Args: + {input} + +Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 0.6763, 0.7445, -2.2369]]) + >>> torch.max(a) + tensor(0.7445) + +.. function:: max(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor) + :noindex: + +Returns a namedtuple ``(values, indices)`` where ``values`` is the maximum +value of each row of the :attr:`input` tensor in the given dimension +:attr:`dim`. And ``indices`` is the index location of each maximum value found +(argmax). + +If ``keepdim`` is ``True``, the output tensors are of the same size +as ``input`` except in the dimension ``dim`` where they are of size 1. +Otherwise, ``dim`` is squeezed (see :func:`torch.squeeze`), resulting +in the output tensors having 1 fewer dimension than ``input``. + +.. note:: If there are multiple maximal values in a reduced row then + the indices of the first maximal value are returned. + +Args: + {input} + {dim} + {keepdim} Default: ``False``. + +Keyword args: + out (tuple, optional): the result tuple of two output tensors (max, max_indices) + +Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[-1.2360, -0.2942, -0.1222, 0.8475], + [ 1.1949, -1.1127, -2.2379, -0.6702], + [ 1.5717, -0.9207, 0.1297, -1.8768], + [-0.6172, 1.0036, -0.6060, -0.2432]]) + >>> torch.max(a, 1) + torch.return_types.max(values=tensor([0.8475, 1.1949, 1.5717, 1.0036]), indices=tensor([3, 0, 0, 1])) + +.. function:: max(input, other, *, out=None) -> Tensor + :noindex: + +See :func:`torch.maximum`. + +""".format( + **single_dim_common + ), +) + +add_docstr( + torch.maximum, + r""" +maximum(input, other, *, out=None) -> Tensor + +Computes the element-wise maximum of :attr:`input` and :attr:`other`. + +.. note:: + If one of the elements being compared is a NaN, then that element is returned. + :func:`maximum` is not supported for tensors with complex dtypes. + +Args: + {input} + other (Tensor): the second input tensor + +Keyword args: + {out} + +Example:: + + >>> a = torch.tensor((1, 2, -1)) + >>> b = torch.tensor((3, 0, 4)) + >>> torch.maximum(a, b) + tensor([3, 2, 4]) +""".format( + **common_args + ), +) + +add_docstr( + torch.fmax, + r""" +fmax(input, other, *, out=None) -> Tensor + +Computes the element-wise maximum of :attr:`input` and :attr:`other`. + +This is like :func:`torch.maximum` except it handles NaNs differently: +if exactly one of the two elements being compared is a NaN then the non-NaN element is taken as the maximum. +Only if both elements are NaN is NaN propagated. + +This function is a wrapper around C++'s ``std::fmax`` and is similar to NumPy's ``fmax`` function. + +Supports :ref:`broadcasting to a common shape `, +:ref:`type promotion `, and integer and floating-point inputs. + +Args: + {input} + other (Tensor): the second input tensor + +Keyword args: + {out} + +Example:: + + >>> a = torch.tensor([9.7, float('nan'), 3.1, float('nan')]) + >>> b = torch.tensor([-2.2, 0.5, float('nan'), float('nan')]) + >>> torch.fmax(a, b) + tensor([9.7000, 0.5000, 3.1000, nan]) +""".format( + **common_args + ), +) + +add_docstr( + torch.amax, + r""" +amax(input, dim, keepdim=False, *, out=None) -> Tensor + +Returns the maximum value of each slice of the :attr:`input` tensor in the given +dimension(s) :attr:`dim`. + +.. note:: + The difference between ``max``/``min`` and ``amax``/``amin`` is: + - ``amax``/``amin`` supports reducing on multiple dimensions, + - ``amax``/``amin`` does not return indices, + - ``amax``/``amin`` evenly distributes gradient between equal values, + while ``max(dim)``/``min(dim)`` propagates gradient only to a single + index in the source tensor. + +{keepdim_details} + +Args: + {input} + {dim} + {keepdim} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[ 0.8177, 1.4878, -0.2491, 0.9130], + [-0.7158, 1.1775, 2.0992, 0.4817], + [-0.0053, 0.0164, -1.3738, -0.0507], + [ 1.9700, 1.1106, -1.0318, -1.0816]]) + >>> torch.amax(a, 1) + tensor([1.4878, 2.0992, 0.0164, 1.9700]) +""".format( + **multi_dim_common + ), +) + +add_docstr( + torch.argmax, + r""" +argmax(input) -> LongTensor + +Returns the indices of the maximum value of all elements in the :attr:`input` tensor. + +This is the second value returned by :meth:`torch.max`. See its +documentation for the exact semantics of this method. + +.. note:: If there are multiple maximal values then the indices of the first maximal value are returned. + +Args: + {input} + +Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[ 1.3398, 0.2663, -0.2686, 0.2450], + [-0.7401, -0.8805, -0.3402, -1.1936], + [ 0.4907, -1.3948, -1.0691, -0.3132], + [-1.6092, 0.5419, -0.2993, 0.3195]]) + >>> torch.argmax(a) + tensor(0) + +.. function:: argmax(input, dim, keepdim=False) -> LongTensor + :noindex: + +Returns the indices of the maximum values of a tensor across a dimension. + +This is the second value returned by :meth:`torch.max`. See its +documentation for the exact semantics of this method. + +Args: + {input} + {dim} If ``None``, the argmax of the flattened input is returned. + {keepdim} + +Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[ 1.3398, 0.2663, -0.2686, 0.2450], + [-0.7401, -0.8805, -0.3402, -1.1936], + [ 0.4907, -1.3948, -1.0691, -0.3132], + [-1.6092, 0.5419, -0.2993, 0.3195]]) + >>> torch.argmax(a, dim=1) + tensor([ 0, 2, 0, 1]) +""".format( + **single_dim_common + ), +) + +add_docstr( + torch.argwhere, + r""" +argwhere(input) -> Tensor + +Returns a tensor containing the indices of all non-zero elements of +:attr:`input`. Each row in the result contains the indices of a non-zero +element in :attr:`input`. The result is sorted lexicographically, with +the last index changing the fastest (C-style). + +If :attr:`input` has :math:`n` dimensions, then the resulting indices tensor +:attr:`out` is of size :math:`(z \times n)`, where :math:`z` is the total number of +non-zero elements in the :attr:`input` tensor. + +.. note:: + This function is similar to NumPy's `argwhere`. + + When :attr:`input` is on CUDA, this function causes host-device synchronization. + +Args: + {input} + +Example:: + + >>> t = torch.tensor([1, 0, 1]) + >>> torch.argwhere(t) + tensor([[0], + [2]]) + >>> t = torch.tensor([[1, 0, 1], [0, 1, 1]]) + >>> torch.argwhere(t) + tensor([[0, 0], + [0, 2], + [1, 1], + [1, 2]]) +""", +) + +add_docstr( + torch.mean, + r""" +mean(input, *, dtype=None) -> Tensor + +Returns the mean value of all elements in the :attr:`input` tensor. + +Args: + {input} + +Keyword args: + {dtype} + +Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 0.2294, -0.5481, 1.3288]]) + >>> torch.mean(a) + tensor(0.3367) + +.. function:: mean(input, dim, keepdim=False, *, dtype=None, out=None) -> Tensor + :noindex: + +Returns the mean value of each row of the :attr:`input` tensor in the given +dimension :attr:`dim`. If :attr:`dim` is a list of dimensions, +reduce over all of them. + +{keepdim_details} + +Args: + {input} + {dim} + {keepdim} + +Keyword args: + {dtype} + {out} + +.. seealso:: + + :func:`torch.nanmean` computes the mean value of `non-NaN` elements. + +Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[-0.3841, 0.6320, 0.4254, -0.7384], + [-0.9644, 1.0131, -0.6549, -1.4279], + [-0.2951, -1.3350, -0.7694, 0.5600], + [ 1.0842, -0.9580, 0.3623, 0.2343]]) + >>> torch.mean(a, 1) + tensor([-0.0163, -0.5085, -0.4599, 0.1807]) + >>> torch.mean(a, 1, True) + tensor([[-0.0163], + [-0.5085], + [-0.4599], + [ 0.1807]]) +""".format( + **multi_dim_common + ), +) + +add_docstr( + torch.nanmean, + r""" +nanmean(input, dim=None, keepdim=False, *, dtype=None, out=None) -> Tensor + +Computes the mean of all `non-NaN` elements along the specified dimensions. + +This function is identical to :func:`torch.mean` when there are no `NaN` values +in the :attr:`input` tensor. In the presence of `NaN`, :func:`torch.mean` will +propagate the `NaN` to the output whereas :func:`torch.nanmean` will ignore the +`NaN` values (`torch.nanmean(a)` is equivalent to `torch.mean(a[~a.isnan()])`). + +{keepdim_details} + +Args: + {input} + {opt_dim} + {keepdim} + +Keyword args: + {dtype} + {out} + +.. seealso:: + + :func:`torch.mean` computes the mean value, propagating `NaN`. + +Example:: + + >>> x = torch.tensor([[torch.nan, 1, 2], [1, 2, 3]]) + >>> x.mean() + tensor(nan) + >>> x.nanmean() + tensor(1.8000) + >>> x.mean(dim=0) + tensor([ nan, 1.5000, 2.5000]) + >>> x.nanmean(dim=0) + tensor([1.0000, 1.5000, 2.5000]) + + # If all elements in the reduced dimensions are NaN then the result is NaN + >>> torch.tensor([torch.nan]).nanmean() + tensor(nan) +""".format( + **multi_dim_common + ), +) + +add_docstr( + torch.median, + r""" +median(input) -> Tensor + +Returns the median of the values in :attr:`input`. + +.. note:: + The median is not unique for :attr:`input` tensors with an even number + of elements. In this case the lower of the two medians is returned. To + compute the mean of both medians, use :func:`torch.quantile` with ``q=0.5`` instead. + +.. warning:: + This function produces deterministic (sub)gradients unlike ``median(dim=0)`` + +Args: + {input} + +Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 1.5219, -1.5212, 0.2202]]) + >>> torch.median(a) + tensor(0.2202) + +.. function:: median(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor) + :noindex: + +Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input` +in the dimension :attr:`dim`, and ``indices`` contains the index of the median values found in the dimension :attr:`dim`. + +By default, :attr:`dim` is the last dimension of the :attr:`input` tensor. + +If :attr:`keepdim` is ``True``, the output tensors are of the same size +as :attr:`input` except in the dimension :attr:`dim` where they are of size 1. +Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in +the outputs tensor having 1 fewer dimension than :attr:`input`. + +.. note:: + The median is not unique for :attr:`input` tensors with an even number + of elements in the dimension :attr:`dim`. In this case the lower of the + two medians is returned. To compute the mean of both medians in + :attr:`input`, use :func:`torch.quantile` with ``q=0.5`` instead. + +.. warning:: + ``indices`` does not necessarily contain the first occurrence of each + median value found, unless it is unique. + The exact implementation details are device-specific. + Do not expect the same result when run on CPU and GPU in general. + For the same reason do not expect the gradients to be deterministic. + +Args: + {input} + {dim} + {keepdim} + +Keyword args: + out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second + tensor, which must have dtype long, with their indices in the dimension + :attr:`dim` of :attr:`input`. + +Example:: + + >>> a = torch.randn(4, 5) + >>> a + tensor([[ 0.2505, -0.3982, -0.9948, 0.3518, -1.3131], + [ 0.3180, -0.6993, 1.0436, 0.0438, 0.2270], + [-0.2751, 0.7303, 0.2192, 0.3321, 0.2488], + [ 1.0778, -1.9510, 0.7048, 0.4742, -0.7125]]) + >>> torch.median(a, 1) + torch.return_types.median(values=tensor([-0.3982, 0.2270, 0.2488, 0.4742]), indices=tensor([1, 4, 4, 3])) +""".format( + **single_dim_common + ), +) + +add_docstr( + torch.nanmedian, + r""" +nanmedian(input) -> Tensor + +Returns the median of the values in :attr:`input`, ignoring ``NaN`` values. + +This function is identical to :func:`torch.median` when there are no ``NaN`` values in :attr:`input`. +When :attr:`input` has one or more ``NaN`` values, :func:`torch.median` will always return ``NaN``, +while this function will return the median of the non-``NaN`` elements in :attr:`input`. +If all the elements in :attr:`input` are ``NaN`` it will also return ``NaN``. + +Args: + {input} + +Example:: + + >>> a = torch.tensor([1, float('nan'), 3, 2]) + >>> a.median() + tensor(nan) + >>> a.nanmedian() + tensor(2.) + +.. function:: nanmedian(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor) + :noindex: + +Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input` +in the dimension :attr:`dim`, ignoring ``NaN`` values, and ``indices`` contains the index of the median values +found in the dimension :attr:`dim`. + +This function is identical to :func:`torch.median` when there are no ``NaN`` values in a reduced row. When a reduced row has +one or more ``NaN`` values, :func:`torch.median` will always reduce it to ``NaN``, while this function will reduce it to the +median of the non-``NaN`` elements. If all the elements in a reduced row are ``NaN`` then it will be reduced to ``NaN``, too. + +Args: + {input} + {dim} + {keepdim} + +Keyword args: + out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second + tensor, which must have dtype long, with their indices in the dimension + :attr:`dim` of :attr:`input`. + +Example:: + + >>> a = torch.tensor([[2, 3, 1], [float('nan'), 1, float('nan')]]) + >>> a + tensor([[2., 3., 1.], + [nan, 1., nan]]) + >>> a.median(0) + torch.return_types.median(values=tensor([nan, 1., nan]), indices=tensor([1, 1, 1])) + >>> a.nanmedian(0) + torch.return_types.nanmedian(values=tensor([2., 1., 1.]), indices=tensor([0, 1, 0])) +""".format( + **single_dim_common + ), +) + +add_docstr( + torch.quantile, + r""" +quantile(input, q, dim=None, keepdim=False, *, interpolation='linear', out=None) -> Tensor + +Computes the q-th quantiles of each row of the :attr:`input` tensor along the dimension :attr:`dim`. + +To compute the quantile, we map q in [0, 1] to the range of indices [0, n] to find the location +of the quantile in the sorted input. If the quantile lies between two data points ``a < b`` with +indices ``i`` and ``j`` in the sorted order, result is computed according to the given +:attr:`interpolation` method as follows: + +- ``linear``: ``a + (b - a) * fraction``, where ``fraction`` is the fractional part of the computed quantile index. +- ``lower``: ``a``. +- ``higher``: ``b``. +- ``nearest``: ``a`` or ``b``, whichever's index is closer to the computed quantile index (rounding down for .5 fractions). +- ``midpoint``: ``(a + b) / 2``. + +If :attr:`q` is a 1D tensor, the first dimension of the output represents the quantiles and has size +equal to the size of :attr:`q`, the remaining dimensions are what remains from the reduction. + +.. note:: + By default :attr:`dim` is ``None`` resulting in the :attr:`input` tensor being flattened before computation. + +Args: + {input} + q (float or Tensor): a scalar or 1D tensor of values in the range [0, 1]. + {dim} + {keepdim} + +Keyword arguments: + interpolation (str): interpolation method to use when the desired quantile lies between two data points. + Can be ``linear``, ``lower``, ``higher``, ``midpoint`` and ``nearest``. + Default is ``linear``. + {out} + +Example:: + + >>> a = torch.randn(2, 3) + >>> a + tensor([[ 0.0795, -1.2117, 0.9765], + [ 1.1707, 0.6706, 0.4884]]) + >>> q = torch.tensor([0.25, 0.5, 0.75]) + >>> torch.quantile(a, q, dim=1, keepdim=True) + tensor([[[-0.5661], + [ 0.5795]], + + [[ 0.0795], + [ 0.6706]], + + [[ 0.5280], + [ 0.9206]]]) + >>> torch.quantile(a, q, dim=1, keepdim=True).shape + torch.Size([3, 2, 1]) + >>> a = torch.arange(4.) + >>> a + tensor([0., 1., 2., 3.]) + >>> torch.quantile(a, 0.6, interpolation='linear') + tensor(1.8000) + >>> torch.quantile(a, 0.6, interpolation='lower') + tensor(1.) + >>> torch.quantile(a, 0.6, interpolation='higher') + tensor(2.) + >>> torch.quantile(a, 0.6, interpolation='midpoint') + tensor(1.5000) + >>> torch.quantile(a, 0.6, interpolation='nearest') + tensor(2.) + >>> torch.quantile(a, 0.4, interpolation='nearest') + tensor(1.) +""".format( + **single_dim_common + ), +) + +add_docstr( + torch.nanquantile, + r""" +nanquantile(input, q, dim=None, keepdim=False, *, interpolation='linear', out=None) -> Tensor + +This is a variant of :func:`torch.quantile` that "ignores" ``NaN`` values, +computing the quantiles :attr:`q` as if ``NaN`` values in :attr:`input` did +not exist. If all values in a reduced row are ``NaN`` then the quantiles for +that reduction will be ``NaN``. See the documentation for :func:`torch.quantile`. + +Args: + {input} + q (float or Tensor): a scalar or 1D tensor of quantile values in the range [0, 1] + {dim} + {keepdim} + +Keyword arguments: + interpolation (str): interpolation method to use when the desired quantile lies between two data points. + Can be ``linear``, ``lower``, ``higher``, ``midpoint`` and ``nearest``. + Default is ``linear``. + {out} + +Example:: + + >>> t = torch.tensor([float('nan'), 1, 2]) + >>> t.quantile(0.5) + tensor(nan) + >>> t.nanquantile(0.5) + tensor(1.5000) + >>> t = torch.tensor([[float('nan'), float('nan')], [1, 2]]) + >>> t + tensor([[nan, nan], + [1., 2.]]) + >>> t.nanquantile(0.5, dim=0) + tensor([1., 2.]) + >>> t.nanquantile(0.5, dim=1) + tensor([ nan, 1.5000]) +""".format( + **single_dim_common + ), +) + +add_docstr( + torch.min, + r""" +min(input) -> Tensor + +Returns the minimum value of all elements in the :attr:`input` tensor. + +.. warning:: + This function produces deterministic (sub)gradients unlike ``min(dim=0)`` + +Args: + {input} + +Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 0.6750, 1.0857, 1.7197]]) + >>> torch.min(a) + tensor(0.6750) + +.. function:: min(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor) + :noindex: + +Returns a namedtuple ``(values, indices)`` where ``values`` is the minimum +value of each row of the :attr:`input` tensor in the given dimension +:attr:`dim`. And ``indices`` is the index location of each minimum value found +(argmin). + +If :attr:`keepdim` is ``True``, the output tensors are of the same size as +:attr:`input` except in the dimension :attr:`dim` where they are of size 1. +Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in +the output tensors having 1 fewer dimension than :attr:`input`. + +.. note:: If there are multiple minimal values in a reduced row then + the indices of the first minimal value are returned. + +Args: + {input} + {dim} + {keepdim} + +Keyword args: + out (tuple, optional): the tuple of two output tensors (min, min_indices) + +Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[-0.6248, 1.1334, -1.1899, -0.2803], + [-1.4644, -0.2635, -0.3651, 0.6134], + [ 0.2457, 0.0384, 1.0128, 0.7015], + [-0.1153, 2.9849, 2.1458, 0.5788]]) + >>> torch.min(a, 1) + torch.return_types.min(values=tensor([-1.1899, -1.4644, 0.0384, -0.1153]), indices=tensor([2, 0, 1, 0])) + +.. function:: min(input, other, *, out=None) -> Tensor + :noindex: + +See :func:`torch.minimum`. +""".format( + **single_dim_common + ), +) + +add_docstr( + torch.minimum, + r""" +minimum(input, other, *, out=None) -> Tensor + +Computes the element-wise minimum of :attr:`input` and :attr:`other`. + +.. note:: + If one of the elements being compared is a NaN, then that element is returned. + :func:`minimum` is not supported for tensors with complex dtypes. + +Args: + {input} + other (Tensor): the second input tensor + +Keyword args: + {out} + +Example:: + + >>> a = torch.tensor((1, 2, -1)) + >>> b = torch.tensor((3, 0, 4)) + >>> torch.minimum(a, b) + tensor([1, 0, -1]) +""".format( + **common_args + ), +) + +add_docstr( + torch.fmin, + r""" +fmin(input, other, *, out=None) -> Tensor + +Computes the element-wise minimum of :attr:`input` and :attr:`other`. + +This is like :func:`torch.minimum` except it handles NaNs differently: +if exactly one of the two elements being compared is a NaN then the non-NaN element is taken as the minimum. +Only if both elements are NaN is NaN propagated. + +This function is a wrapper around C++'s ``std::fmin`` and is similar to NumPy's ``fmin`` function. + +Supports :ref:`broadcasting to a common shape `, +:ref:`type promotion `, and integer and floating-point inputs. + +Args: + {input} + other (Tensor): the second input tensor + +Keyword args: + {out} + +Example:: + + >>> a = torch.tensor([2.2, float('nan'), 2.1, float('nan')]) + >>> b = torch.tensor([-9.3, 0.1, float('nan'), float('nan')]) + >>> torch.fmin(a, b) + tensor([-9.3000, 0.1000, 2.1000, nan]) +""".format( + **common_args + ), +) + +add_docstr( + torch.amin, + r""" +amin(input, dim, keepdim=False, *, out=None) -> Tensor + +Returns the minimum value of each slice of the :attr:`input` tensor in the given +dimension(s) :attr:`dim`. + +.. note:: + The difference between ``max``/``min`` and ``amax``/``amin`` is: + - ``amax``/``amin`` supports reducing on multiple dimensions, + - ``amax``/``amin`` does not return indices, + - ``amax``/``amin`` evenly distributes gradient between equal values, + while ``max(dim)``/``min(dim)`` propagates gradient only to a single + index in the source tensor. + +{keepdim_details} + +Args: + {input} + {dim} + {keepdim} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[ 0.6451, -0.4866, 0.2987, -1.3312], + [-0.5744, 1.2980, 1.8397, -0.2713], + [ 0.9128, 0.9214, -1.7268, -0.2995], + [ 0.9023, 0.4853, 0.9075, -1.6165]]) + >>> torch.amin(a, 1) + tensor([-1.3312, -0.5744, -1.7268, -1.6165]) +""".format( + **multi_dim_common + ), +) + +add_docstr( + torch.aminmax, + r""" +aminmax(input, *, dim=None, keepdim=False, out=None) -> (Tensor min, Tensor max) + +Computes the minimum and maximum values of the :attr:`input` tensor. + +Args: + input (Tensor): + The input tensor + +Keyword Args: + dim (Optional[int]): + The dimension along which to compute the values. If `None`, + computes the values over the entire :attr:`input` tensor. + Default is `None`. + keepdim (bool): + If `True`, the reduced dimensions will be kept in the output + tensor as dimensions with size 1 for broadcasting, otherwise + they will be removed, as if calling (:func:`torch.squeeze`). + Default is `False`. + out (Optional[Tuple[Tensor, Tensor]]): + Optional tensors on which to write the result. Must have the same + shape and dtype as the expected output. + Default is `None`. + +Returns: + A named tuple `(min, max)` containing the minimum and maximum values. + +Raises: + RuntimeError + If any of the dimensions to compute the values over has size 0. + +.. note:: + NaN values are propagated to the output if at least one value is NaN. + +.. seealso:: + :func:`torch.amin` computes just the minimum value + :func:`torch.amax` computes just the maximum value + +Example:: + + >>> torch.aminmax(torch.tensor([1, -3, 5])) + torch.return_types.aminmax( + min=tensor(-3), + max=tensor(5)) + + >>> # aminmax propagates NaNs + >>> torch.aminmax(torch.tensor([1, -3, 5, torch.nan])) + torch.return_types.aminmax( + min=tensor(nan), + max=tensor(nan)) + + >>> t = torch.arange(10).view(2, 5) + >>> t + tensor([[0, 1, 2, 3, 4], + [5, 6, 7, 8, 9]]) + >>> t.aminmax(dim=0, keepdim=True) + torch.return_types.aminmax( + min=tensor([[0, 1, 2, 3, 4]]), + max=tensor([[5, 6, 7, 8, 9]])) +""", +) + +add_docstr( + torch.argmin, + r""" +argmin(input, dim=None, keepdim=False) -> LongTensor + +Returns the indices of the minimum value(s) of the flattened tensor or along a dimension + +This is the second value returned by :meth:`torch.min`. See its +documentation for the exact semantics of this method. + +.. note:: If there are multiple minimal values then the indices of the first minimal value are returned. + +Args: + {input} + {dim} If ``None``, the argmin of the flattened input is returned. + {keepdim} + +Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[ 0.1139, 0.2254, -0.1381, 0.3687], + [ 1.0100, -1.1975, -0.0102, -0.4732], + [-0.9240, 0.1207, -0.7506, -1.0213], + [ 1.7809, -1.2960, 0.9384, 0.1438]]) + >>> torch.argmin(a) + tensor(13) + >>> torch.argmin(a, dim=1) + tensor([ 2, 1, 3, 1]) + >>> torch.argmin(a, dim=1, keepdim=True) + tensor([[2], + [1], + [3], + [1]]) +""".format( + **single_dim_common + ), +) + +add_docstr( + torch.mm, + r""" +mm(input, mat2, *, out=None) -> Tensor + +Performs a matrix multiplication of the matrices :attr:`input` and :attr:`mat2`. + +If :attr:`input` is a :math:`(n \times m)` tensor, :attr:`mat2` is a +:math:`(m \times p)` tensor, :attr:`out` will be a :math:`(n \times p)` tensor. + +.. note:: This function does not :ref:`broadcast `. + For broadcasting matrix products, see :func:`torch.matmul`. + +Supports strided and sparse 2-D tensors as inputs, autograd with +respect to strided inputs. + +This operation has support for arguments with :ref:`sparse layouts`. +If :attr:`out` is provided it's layout will be used. Otherwise, the result +layout will be deduced from that of :attr:`input`. + +{sparse_beta_warning} + +{tf32_note} + +{rocm_fp16_note} + +Args: + input (Tensor): the first matrix to be matrix multiplied + mat2 (Tensor): the second matrix to be matrix multiplied + +Keyword args: + {out} + +Example:: + + >>> mat1 = torch.randn(2, 3) + >>> mat2 = torch.randn(3, 3) + >>> torch.mm(mat1, mat2) + tensor([[ 0.4851, 0.5037, -0.3633], + [-0.0760, -3.6705, 2.4784]]) +""".format( + **common_args, **tf32_notes, **rocm_fp16_notes, **sparse_support_notes + ), +) + +add_docstr( + torch.hspmm, + r""" +hspmm(mat1, mat2, *, out=None) -> Tensor + +Performs a matrix multiplication of a :ref:`sparse COO matrix +` :attr:`mat1` and a strided matrix :attr:`mat2`. The +result is a (1 + 1)-dimensional :ref:`hybrid COO matrix +`. + +Args: + mat1 (Tensor): the first sparse matrix to be matrix multiplied + mat2 (Tensor): the second strided matrix to be matrix multiplied + +Keyword args: + {out} +""".format( + **common_args + ), +) + +add_docstr( + torch.matmul, + r""" +matmul(input, other, *, out=None) -> Tensor + +Matrix product of two tensors. + +The behavior depends on the dimensionality of the tensors as follows: + +- If both tensors are 1-dimensional, the dot product (scalar) is returned. +- If both arguments are 2-dimensional, the matrix-matrix product is returned. +- If the first argument is 1-dimensional and the second argument is 2-dimensional, + a 1 is prepended to its dimension for the purpose of the matrix multiply. + After the matrix multiply, the prepended dimension is removed. +- If the first argument is 2-dimensional and the second argument is 1-dimensional, + the matrix-vector product is returned. +- If both arguments are at least 1-dimensional and at least one argument is + N-dimensional (where N > 2), then a batched matrix multiply is returned. If the first + argument is 1-dimensional, a 1 is prepended to its dimension for the purpose of the + batched matrix multiply and removed after. If the second argument is 1-dimensional, a + 1 is appended to its dimension for the purpose of the batched matrix multiple and removed after. + The non-matrix (i.e. batch) dimensions are :ref:`broadcasted ` (and thus + must be broadcastable). For example, if :attr:`input` is a + :math:`(j \times 1 \times n \times n)` tensor and :attr:`other` is a :math:`(k \times n \times n)` + tensor, :attr:`out` will be a :math:`(j \times k \times n \times n)` tensor. + + Note that the broadcasting logic only looks at the batch dimensions when determining if the inputs + are broadcastable, and not the matrix dimensions. For example, if :attr:`input` is a + :math:`(j \times 1 \times n \times m)` tensor and :attr:`other` is a :math:`(k \times m \times p)` + tensor, these inputs are valid for broadcasting even though the final two dimensions (i.e. the + matrix dimensions) are different. :attr:`out` will be a :math:`(j \times k \times n \times p)` tensor. + +This operation has support for arguments with :ref:`sparse layouts`. In particular the +matrix-matrix (both arguments 2-dimensional) supports sparse arguments with the same restrictions +as :func:`torch.mm` + +{sparse_beta_warning} + +{tf32_note} + +{rocm_fp16_note} + +.. note:: + + The 1-dimensional dot product version of this function does not support an :attr:`out` parameter. + +Arguments: + input (Tensor): the first tensor to be multiplied + other (Tensor): the second tensor to be multiplied + +Keyword args: + {out} + +Example:: + + >>> # vector x vector + >>> tensor1 = torch.randn(3) + >>> tensor2 = torch.randn(3) + >>> torch.matmul(tensor1, tensor2).size() + torch.Size([]) + >>> # matrix x vector + >>> tensor1 = torch.randn(3, 4) + >>> tensor2 = torch.randn(4) + >>> torch.matmul(tensor1, tensor2).size() + torch.Size([3]) + >>> # batched matrix x broadcasted vector + >>> tensor1 = torch.randn(10, 3, 4) + >>> tensor2 = torch.randn(4) + >>> torch.matmul(tensor1, tensor2).size() + torch.Size([10, 3]) + >>> # batched matrix x batched matrix + >>> tensor1 = torch.randn(10, 3, 4) + >>> tensor2 = torch.randn(10, 4, 5) + >>> torch.matmul(tensor1, tensor2).size() + torch.Size([10, 3, 5]) + >>> # batched matrix x broadcasted matrix + >>> tensor1 = torch.randn(10, 3, 4) + >>> tensor2 = torch.randn(4, 5) + >>> torch.matmul(tensor1, tensor2).size() + torch.Size([10, 3, 5]) + +""".format( + **common_args, **tf32_notes, **rocm_fp16_notes, **sparse_support_notes + ), +) + +add_docstr( + torch.mode, + r""" +mode(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor) + +Returns a namedtuple ``(values, indices)`` where ``values`` is the mode +value of each row of the :attr:`input` tensor in the given dimension +:attr:`dim`, i.e. a value which appears most often +in that row, and ``indices`` is the index location of each mode value found. + +By default, :attr:`dim` is the last dimension of the :attr:`input` tensor. + +If :attr:`keepdim` is ``True``, the output tensors are of the same size as +:attr:`input` except in the dimension :attr:`dim` where they are of size 1. +Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting +in the output tensors having 1 fewer dimension than :attr:`input`. + +.. note:: This function is not defined for ``torch.cuda.Tensor`` yet. + +Args: + {input} + {dim} + {keepdim} + +Keyword args: + out (tuple, optional): the result tuple of two output tensors (values, indices) + +Example:: + + >>> a = torch.randint(10, (5,)) + >>> a + tensor([6, 5, 1, 0, 2]) + >>> b = a + (torch.randn(50, 1) * 5).long() + >>> torch.mode(b, 0) + torch.return_types.mode(values=tensor([6, 5, 1, 0, 2]), indices=tensor([2, 2, 2, 2, 2])) +""".format( + **single_dim_common + ), +) + +add_docstr( + torch.mul, + r""" +mul(input, other, *, out=None) -> Tensor + +Multiplies :attr:`input` by :attr:`other`. + + +.. math:: + \text{out}_i = \text{input}_i \times \text{other}_i +""" + + r""" + +Supports :ref:`broadcasting to a common shape `, +:ref:`type promotion `, and integer, float, and complex inputs. + +Args: + {input} + other (Tensor or Number) - the tensor or number to multiply input by. + +Keyword args: + {out} + +Examples:: + + >>> a = torch.randn(3) + >>> a + tensor([ 0.2015, -0.4255, 2.6087]) + >>> torch.mul(a, 100) + tensor([ 20.1494, -42.5491, 260.8663]) + + >>> b = torch.randn(4, 1) + >>> b + tensor([[ 1.1207], + [-0.3137], + [ 0.0700], + [ 0.8378]]) + >>> c = torch.randn(1, 4) + >>> c + tensor([[ 0.5146, 0.1216, -0.5244, 2.2382]]) + >>> torch.mul(b, c) + tensor([[ 0.5767, 0.1363, -0.5877, 2.5083], + [-0.1614, -0.0382, 0.1645, -0.7021], + [ 0.0360, 0.0085, -0.0367, 0.1567], + [ 0.4312, 0.1019, -0.4394, 1.8753]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.multiply, + r""" +multiply(input, other, *, out=None) + +Alias for :func:`torch.mul`. +""", +) + +add_docstr( + torch.multinomial, + r""" +multinomial(input, num_samples, replacement=False, *, generator=None, out=None) -> LongTensor + +Returns a tensor where each row contains :attr:`num_samples` indices sampled +from the multinomial (a stricter definition would be multivariate, +refer to torch.distributions.multinomial.Multinomial for more details) +probability distribution located in the corresponding row +of tensor :attr:`input`. + +.. note:: + The rows of :attr:`input` do not need to sum to one (in which case we use + the values as weights), but must be non-negative, finite and have + a non-zero sum. + +Indices are ordered from left to right according to when each was sampled +(first samples are placed in first column). + +If :attr:`input` is a vector, :attr:`out` is a vector of size :attr:`num_samples`. + +If :attr:`input` is a matrix with `m` rows, :attr:`out` is an matrix of shape +:math:`(m \times \text{{num\_samples}})`. + +If replacement is ``True``, samples are drawn with replacement. + +If not, they are drawn without replacement, which means that when a +sample index is drawn for a row, it cannot be drawn again for that row. + +.. note:: + When drawn without replacement, :attr:`num_samples` must be lower than + number of non-zero elements in :attr:`input` (or the min number of non-zero + elements in each row of :attr:`input` if it is a matrix). + +Args: + input (Tensor): the input tensor containing probabilities + num_samples (int): number of samples to draw + replacement (bool, optional): whether to draw with replacement or not + +Keyword args: + {generator} + {out} + +Example:: + + >>> weights = torch.tensor([0, 10, 3, 0], dtype=torch.float) # create a tensor of weights + >>> torch.multinomial(weights, 2) + tensor([1, 2]) + >>> torch.multinomial(weights, 4) # ERROR! + RuntimeError: invalid argument 2: invalid multinomial distribution (with replacement=False, + not enough non-negative category to sample) at ../aten/src/TH/generic/THTensorRandom.cpp:320 + >>> torch.multinomial(weights, 4, replacement=True) + tensor([ 2, 1, 1, 1]) +""".format( + **common_args + ), +) + +add_docstr( + torch.mv, + r""" +mv(input, vec, *, out=None) -> Tensor + +Performs a matrix-vector product of the matrix :attr:`input` and the vector +:attr:`vec`. + +If :attr:`input` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of +size :math:`m`, :attr:`out` will be 1-D of size :math:`n`. + +.. note:: This function does not :ref:`broadcast `. + +Args: + input (Tensor): matrix to be multiplied + vec (Tensor): vector to be multiplied + +Keyword args: + {out} + +Example:: + + >>> mat = torch.randn(2, 3) + >>> vec = torch.randn(3) + >>> torch.mv(mat, vec) + tensor([ 1.0404, -0.6361]) +""".format( + **common_args + ), +) + +add_docstr( + torch.mvlgamma, + r""" +mvlgamma(input, p, *, out=None) -> Tensor + +Alias for :func:`torch.special.multigammaln`. +""", +) + +add_docstr( + torch.movedim, + r""" +movedim(input, source, destination) -> Tensor + +Moves the dimension(s) of :attr:`input` at the position(s) in :attr:`source` +to the position(s) in :attr:`destination`. + +Other dimensions of :attr:`input` that are not explicitly moved remain in +their original order and appear at the positions not specified in :attr:`destination`. + +Args: + {input} + source (int or tuple of ints): Original positions of the dims to move. These must be unique. + destination (int or tuple of ints): Destination positions for each of the original dims. These must also be unique. + +Examples:: + + >>> t = torch.randn(3,2,1) + >>> t + tensor([[[-0.3362], + [-0.8437]], + + [[-0.9627], + [ 0.1727]], + + [[ 0.5173], + [-0.1398]]]) + >>> torch.movedim(t, 1, 0).shape + torch.Size([2, 3, 1]) + >>> torch.movedim(t, 1, 0) + tensor([[[-0.3362], + [-0.9627], + [ 0.5173]], + + [[-0.8437], + [ 0.1727], + [-0.1398]]]) + >>> torch.movedim(t, (1, 2), (0, 1)).shape + torch.Size([2, 1, 3]) + >>> torch.movedim(t, (1, 2), (0, 1)) + tensor([[[-0.3362, -0.9627, 0.5173]], + + [[-0.8437, 0.1727, -0.1398]]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.moveaxis, + r""" +moveaxis(input, source, destination) -> Tensor + +Alias for :func:`torch.movedim`. + +This function is equivalent to NumPy's moveaxis function. + +Examples:: + + >>> t = torch.randn(3,2,1) + >>> t + tensor([[[-0.3362], + [-0.8437]], + + [[-0.9627], + [ 0.1727]], + + [[ 0.5173], + [-0.1398]]]) + >>> torch.moveaxis(t, 1, 0).shape + torch.Size([2, 3, 1]) + >>> torch.moveaxis(t, 1, 0) + tensor([[[-0.3362], + [-0.9627], + [ 0.5173]], + + [[-0.8437], + [ 0.1727], + [-0.1398]]]) + >>> torch.moveaxis(t, (1, 2), (0, 1)).shape + torch.Size([2, 1, 3]) + >>> torch.moveaxis(t, (1, 2), (0, 1)) + tensor([[[-0.3362, -0.9627, 0.5173]], + + [[-0.8437, 0.1727, -0.1398]]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.swapdims, + r""" +swapdims(input, dim0, dim1) -> Tensor + +Alias for :func:`torch.transpose`. + +This function is equivalent to NumPy's swapaxes function. + +Examples:: + + >>> x = torch.tensor([[[0,1],[2,3]],[[4,5],[6,7]]]) + >>> x + tensor([[[0, 1], + [2, 3]], + + [[4, 5], + [6, 7]]]) + >>> torch.swapdims(x, 0, 1) + tensor([[[0, 1], + [4, 5]], + + [[2, 3], + [6, 7]]]) + >>> torch.swapdims(x, 0, 2) + tensor([[[0, 4], + [2, 6]], + + [[1, 5], + [3, 7]]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.swapaxes, + r""" +swapaxes(input, axis0, axis1) -> Tensor + +Alias for :func:`torch.transpose`. + +This function is equivalent to NumPy's swapaxes function. + +Examples:: + + >>> x = torch.tensor([[[0,1],[2,3]],[[4,5],[6,7]]]) + >>> x + tensor([[[0, 1], + [2, 3]], + + [[4, 5], + [6, 7]]]) + >>> torch.swapaxes(x, 0, 1) + tensor([[[0, 1], + [4, 5]], + + [[2, 3], + [6, 7]]]) + >>> torch.swapaxes(x, 0, 2) + tensor([[[0, 4], + [2, 6]], + + [[1, 5], + [3, 7]]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.narrow, + r""" +narrow(input, dim, start, length) -> Tensor + +Returns a new tensor that is a narrowed version of :attr:`input` tensor. The +dimension :attr:`dim` is input from :attr:`start` to ``start + length``. The +returned tensor and :attr:`input` tensor share the same underlying storage. + +Args: + input (Tensor): the tensor to narrow + dim (int): the dimension along which to narrow + start (int or Tensor): index of the element to start the narrowed dimension + from. Can be negative, which means indexing from the end of `dim`. If + `Tensor`, it must be an 0-dim integral `Tensor` (bools not allowed) + length (int): length of the narrowed dimension, must be weakly positive + +Example:: + + >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + >>> torch.narrow(x, 0, 0, 2) + tensor([[ 1, 2, 3], + [ 4, 5, 6]]) + >>> torch.narrow(x, 1, 1, 2) + tensor([[ 2, 3], + [ 5, 6], + [ 8, 9]]) + >>> torch.narrow(x, -1, torch.tensor(-1), 1) + tensor([[3], + [6], + [9]]) +""", +) + +add_docstr( + torch.narrow_copy, + r""" +narrow_copy(input, dim, start, length, *, out=None) -> Tensor + +Same as :meth:`Tensor.narrow` except this returns a copy rather +than shared storage. This is primarily for sparse tensors, which +do not have a shared-storage narrow method. + +Args: + input (Tensor): the tensor to narrow + dim (int): the dimension along which to narrow + start (int): index of the element to start the narrowed dimension from. Can + be negative, which means indexing from the end of `dim` + length (int): length of the narrowed dimension, must be weakly positive + +Keyword args: + {out} + +Example:: + + >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + >>> torch.narrow_copy(x, 0, 0, 2) + tensor([[ 1, 2, 3], + [ 4, 5, 6]]) + >>> torch.narrow_copy(x, 1, 1, 2) + tensor([[ 2, 3], + [ 5, 6], + [ 8, 9]]) + >>> s = torch.arange(16).reshape(2, 2, 2, 2).to_sparse(2) + >>> torch.narrow_copy(s, 0, 0, 1) + tensor(indices=tensor([[0, 0], + [0, 1]]), + values=tensor([[[0, 1], + [2, 3]], + + [[4, 5], + [6, 7]]]), + size=(1, 2, 2, 2), nnz=2, layout=torch.sparse_coo) + +.. seealso:: + + :func:`torch.narrow` for a non copy variant + +""".format( + **common_args + ), +) + +add_docstr( + torch.nan_to_num, + r""" +nan_to_num(input, nan=0.0, posinf=None, neginf=None, *, out=None) -> Tensor + +Replaces :literal:`NaN`, positive infinity, and negative infinity values in :attr:`input` +with the values specified by :attr:`nan`, :attr:`posinf`, and :attr:`neginf`, respectively. +By default, :literal:`NaN`\ s are replaced with zero, positive infinity is replaced with the +greatest finite value representable by :attr:`input`'s dtype, and negative infinity +is replaced with the least finite value representable by :attr:`input`'s dtype. + +Args: + {input} + nan (Number, optional): the value to replace :literal:`NaN`\s with. Default is zero. + posinf (Number, optional): if a Number, the value to replace positive infinity values with. + If None, positive infinity values are replaced with the greatest finite value representable by :attr:`input`'s dtype. + Default is None. + neginf (Number, optional): if a Number, the value to replace negative infinity values with. + If None, negative infinity values are replaced with the lowest finite value representable by :attr:`input`'s dtype. + Default is None. + +Keyword args: + {out} + +Example:: + + >>> x = torch.tensor([float('nan'), float('inf'), -float('inf'), 3.14]) + >>> torch.nan_to_num(x) + tensor([ 0.0000e+00, 3.4028e+38, -3.4028e+38, 3.1400e+00]) + >>> torch.nan_to_num(x, nan=2.0) + tensor([ 2.0000e+00, 3.4028e+38, -3.4028e+38, 3.1400e+00]) + >>> torch.nan_to_num(x, nan=2.0, posinf=1.0) + tensor([ 2.0000e+00, 1.0000e+00, -3.4028e+38, 3.1400e+00]) + +""".format( + **common_args + ), +) + +add_docstr( + torch.ne, + r""" +ne(input, other, *, out=None) -> Tensor + +Computes :math:`\text{input} \neq \text{other}` element-wise. +""" + + r""" + +The second argument can be a number or a tensor whose shape is +:ref:`broadcastable ` with the first argument. + +Args: + input (Tensor): the tensor to compare + other (Tensor or float): the tensor or value to compare + +Keyword args: + {out} + +Returns: + A boolean tensor that is True where :attr:`input` is not equal to :attr:`other` and False elsewhere + +Example:: + + >>> torch.ne(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]])) + tensor([[False, True], [True, False]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.not_equal, + r""" +not_equal(input, other, *, out=None) -> Tensor + +Alias for :func:`torch.ne`. +""", +) + +add_docstr( + torch.neg, + r""" +neg(input, *, out=None) -> Tensor + +Returns a new tensor with the negative of the elements of :attr:`input`. + +.. math:: + \text{out} = -1 \times \text{input} +""" + + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(5) + >>> a + tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940]) + >>> torch.neg(a) + tensor([-0.0090, 0.2262, 0.0682, 0.2866, -0.3940]) +""".format( + **common_args + ), +) + +add_docstr( + torch.negative, + r""" +negative(input, *, out=None) -> Tensor + +Alias for :func:`torch.neg` +""", +) + +add_docstr( + torch.nextafter, + r""" +nextafter(input, other, *, out=None) -> Tensor + +Return the next floating-point value after :attr:`input` towards :attr:`other`, elementwise. + +The shapes of ``input`` and ``other`` must be +:ref:`broadcastable `. + +Args: + input (Tensor): the first input tensor + other (Tensor): the second input tensor + +Keyword args: + {out} + +Example:: + + >>> eps = torch.finfo(torch.float32).eps + >>> torch.nextafter(torch.tensor([1.0, 2.0]), torch.tensor([2.0, 1.0])) == torch.tensor([eps + 1, 2 - eps]) + tensor([True, True]) + +""".format( + **common_args + ), +) + +add_docstr( + torch.nonzero, + r""" +nonzero(input, *, out=None, as_tuple=False) -> LongTensor or tuple of LongTensors + +.. note:: + :func:`torch.nonzero(..., as_tuple=False) ` (default) returns a + 2-D tensor where each row is the index for a nonzero value. + + :func:`torch.nonzero(..., as_tuple=True) ` returns a tuple of 1-D + index tensors, allowing for advanced indexing, so ``x[x.nonzero(as_tuple=True)]`` + gives all nonzero values of tensor ``x``. Of the returned tuple, each index tensor + contains nonzero indices for a certain dimension. + + See below for more details on the two behaviors. + + When :attr:`input` is on CUDA, :func:`torch.nonzero() ` causes + host-device synchronization. + +**When** :attr:`as_tuple` **is** ``False`` **(default)**: + +Returns a tensor containing the indices of all non-zero elements of +:attr:`input`. Each row in the result contains the indices of a non-zero +element in :attr:`input`. The result is sorted lexicographically, with +the last index changing the fastest (C-style). + +If :attr:`input` has :math:`n` dimensions, then the resulting indices tensor +:attr:`out` is of size :math:`(z \times n)`, where :math:`z` is the total number of +non-zero elements in the :attr:`input` tensor. + +**When** :attr:`as_tuple` **is** ``True``: + +Returns a tuple of 1-D tensors, one for each dimension in :attr:`input`, +each containing the indices (in that dimension) of all non-zero elements of +:attr:`input` . + +If :attr:`input` has :math:`n` dimensions, then the resulting tuple contains :math:`n` +tensors of size :math:`z`, where :math:`z` is the total number of +non-zero elements in the :attr:`input` tensor. + +As a special case, when :attr:`input` has zero dimensions and a nonzero scalar +value, it is treated as a one-dimensional tensor with one element. + +Args: + {input} + +Keyword args: + out (LongTensor, optional): the output tensor containing indices + +Returns: + LongTensor or tuple of LongTensor: If :attr:`as_tuple` is ``False``, the output + tensor containing indices. If :attr:`as_tuple` is ``True``, one 1-D tensor for + each dimension, containing the indices of each nonzero element along that + dimension. + +Example:: + + >>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1])) + tensor([[ 0], + [ 1], + [ 2], + [ 4]]) + >>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0], + ... [0.0, 0.4, 0.0, 0.0], + ... [0.0, 0.0, 1.2, 0.0], + ... [0.0, 0.0, 0.0,-0.4]])) + tensor([[ 0, 0], + [ 1, 1], + [ 2, 2], + [ 3, 3]]) + >>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1]), as_tuple=True) + (tensor([0, 1, 2, 4]),) + >>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0], + ... [0.0, 0.4, 0.0, 0.0], + ... [0.0, 0.0, 1.2, 0.0], + ... [0.0, 0.0, 0.0,-0.4]]), as_tuple=True) + (tensor([0, 1, 2, 3]), tensor([0, 1, 2, 3])) + >>> torch.nonzero(torch.tensor(5), as_tuple=True) + (tensor([0]),) +""".format( + **common_args + ), +) + +add_docstr( + torch.normal, + r""" +normal(mean, std, *, generator=None, out=None) -> Tensor + +Returns a tensor of random numbers drawn from separate normal distributions +whose mean and standard deviation are given. + +The :attr:`mean` is a tensor with the mean of +each output element's normal distribution + +The :attr:`std` is a tensor with the standard deviation of +each output element's normal distribution + +The shapes of :attr:`mean` and :attr:`std` don't need to match, but the +total number of elements in each tensor need to be the same. + +.. note:: When the shapes do not match, the shape of :attr:`mean` + is used as the shape for the returned output tensor + +.. note:: When :attr:`std` is a CUDA tensor, this function synchronizes + its device with the CPU. + +Args: + mean (Tensor): the tensor of per-element means + std (Tensor): the tensor of per-element standard deviations + +Keyword args: + {generator} + {out} + +Example:: + + >>> torch.normal(mean=torch.arange(1., 11.), std=torch.arange(1, 0, -0.1)) + tensor([ 1.0425, 3.5672, 2.7969, 4.2925, 4.7229, 6.2134, + 8.0505, 8.1408, 9.0563, 10.0566]) + +.. function:: normal(mean=0.0, std, *, out=None) -> Tensor + :noindex: + +Similar to the function above, but the means are shared among all drawn +elements. + +Args: + mean (float, optional): the mean for all distributions + std (Tensor): the tensor of per-element standard deviations + +Keyword args: + {out} + +Example:: + + >>> torch.normal(mean=0.5, std=torch.arange(1., 6.)) + tensor([-1.2793, -1.0732, -2.0687, 5.1177, -1.2303]) + +.. function:: normal(mean, std=1.0, *, out=None) -> Tensor + :noindex: + +Similar to the function above, but the standard deviations are shared among +all drawn elements. + +Args: + mean (Tensor): the tensor of per-element means + std (float, optional): the standard deviation for all distributions + +Keyword args: + out (Tensor, optional): the output tensor + +Example:: + + >>> torch.normal(mean=torch.arange(1., 6.)) + tensor([ 1.1552, 2.6148, 2.6535, 5.8318, 4.2361]) + +.. function:: normal(mean, std, size, *, out=None) -> Tensor + :noindex: + +Similar to the function above, but the means and standard deviations are shared +among all drawn elements. The resulting tensor has size given by :attr:`size`. + +Args: + mean (float): the mean for all distributions + std (float): the standard deviation for all distributions + size (int...): a sequence of integers defining the shape of the output tensor. + +Keyword args: + {out} + +Example:: + + >>> torch.normal(2, 3, size=(1, 4)) + tensor([[-1.3987, -1.9544, 3.6048, 0.7909]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.numel, + r""" +numel(input) -> int + +Returns the total number of elements in the :attr:`input` tensor. + +Args: + {input} + +Example:: + + >>> a = torch.randn(1, 2, 3, 4, 5) + >>> torch.numel(a) + 120 + >>> a = torch.zeros(4,4) + >>> torch.numel(a) + 16 + +""".format( + **common_args + ), +) + +add_docstr( + torch.ones, + r""" +ones(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + +Returns a tensor filled with the scalar value `1`, with the shape defined +by the variable argument :attr:`size`. + +Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + +Keyword arguments: + {out} + {dtype} + {layout} + {device} + {requires_grad} + +Example:: + + >>> torch.ones(2, 3) + tensor([[ 1., 1., 1.], + [ 1., 1., 1.]]) + + >>> torch.ones(5) + tensor([ 1., 1., 1., 1., 1.]) + +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.ones_like, + r""" +ones_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor + +Returns a tensor filled with the scalar value `1`, with the same size as +:attr:`input`. ``torch.ones_like(input)`` is equivalent to +``torch.ones(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``. + +.. warning:: + As of 0.4, this function does not support an :attr:`out` keyword. As an alternative, + the old ``torch.ones_like(input, out=output)`` is equivalent to + ``torch.ones(input.size(), out=output)``. + +Args: + {input} + +Keyword arguments: + {dtype} + {layout} + {device} + {requires_grad} + {memory_format} + +Example:: + + >>> input = torch.empty(2, 3) + >>> torch.ones_like(input) + tensor([[ 1., 1., 1.], + [ 1., 1., 1.]]) +""".format( + **factory_like_common_args + ), +) + +add_docstr( + torch.orgqr, + r""" +orgqr(input, tau) -> Tensor + +Alias for :func:`torch.linalg.householder_product`. +""", +) + +add_docstr( + torch.ormqr, + r""" +ormqr(input, tau, other, left=True, transpose=False, *, out=None) -> Tensor + +Computes the matrix-matrix multiplication of a product of Householder matrices with a general matrix. + +Multiplies a :math:`m \times n` matrix `C` (given by :attr:`other`) with a matrix `Q`, +where `Q` is represented using Householder reflectors `(input, tau)`. +See `Representation of Orthogonal or Unitary Matrices`_ for further details. + +If :attr:`left` is `True` then `op(Q)` times `C` is computed, otherwise the result is `C` times `op(Q)`. +When :attr:`left` is `True`, the implicit matrix `Q` has size :math:`m \times m`. +It has size :math:`n \times n` otherwise. +If :attr:`transpose` is `True` then `op` is the conjugate transpose operation, otherwise it's a no-op. + +Supports inputs of float, double, cfloat and cdouble dtypes. +Also supports batched inputs, and, if the input is batched, the output is batched with the same dimensions. + +.. seealso:: + :func:`torch.geqrf` can be used to form the Householder representation `(input, tau)` of matrix `Q` + from the QR decomposition. + +.. note:: + This function supports backward but it is only fast when ``(input, tau)`` do not require gradients + and/or ``tau.size(-1)`` is very small. + `` + +Args: + input (Tensor): tensor of shape `(*, mn, k)` where `*` is zero or more batch dimensions + and `mn` equals to `m` or `n` depending on the :attr:`left`. + tau (Tensor): tensor of shape `(*, min(mn, k))` where `*` is zero or more batch dimensions. + other (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions. + left (bool): controls the order of multiplication. + transpose (bool): controls whether the matrix `Q` is conjugate transposed or not. + +Keyword args: + out (Tensor, optional): the output Tensor. Ignored if `None`. Default: `None`. + +.. _Representation of Orthogonal or Unitary Matrices: + https://www.netlib.org/lapack/lug/node128.html +""", +) + +add_docstr( + torch.permute, + r""" +permute(input, dims) -> Tensor + +Returns a view of the original tensor :attr:`input` with its dimensions permuted. + +Args: + {input} + dims (tuple of int): The desired ordering of dimensions + +Example: + >>> x = torch.randn(2, 3, 5) + >>> x.size() + torch.Size([2, 3, 5]) + >>> torch.permute(x, (2, 0, 1)).size() + torch.Size([5, 2, 3]) +""".format( + **common_args + ), +) + +add_docstr( + torch.poisson, + r""" +poisson(input, generator=None) -> Tensor + +Returns a tensor of the same size as :attr:`input` with each element +sampled from a Poisson distribution with rate parameter given by the corresponding +element in :attr:`input` i.e., + +.. math:: + \text{{out}}_i \sim \text{{Poisson}}(\text{{input}}_i) + +:attr:`input` must be non-negative. + +Args: + input (Tensor): the input tensor containing the rates of the Poisson distribution + +Keyword args: + {generator} + +Example:: + + >>> rates = torch.rand(4, 4) * 5 # rate parameter between 0 and 5 + >>> torch.poisson(rates) + tensor([[9., 1., 3., 5.], + [8., 6., 6., 0.], + [0., 4., 5., 3.], + [2., 1., 4., 2.]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.polygamma, + r""" +polygamma(n, input, *, out=None) -> Tensor + +Alias for :func:`torch.special.polygamma`. +""", +) + +add_docstr( + torch.positive, + r""" +positive(input) -> Tensor + +Returns :attr:`input`. +Throws a runtime error if :attr:`input` is a bool tensor. +""" + + r""" +Args: + {input} + +Example:: + + >>> t = torch.randn(5) + >>> t + tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940]) + >>> torch.positive(t) + tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940]) +""".format( + **common_args + ), +) + +add_docstr( + torch.pow, + r""" +pow(input, exponent, *, out=None) -> Tensor + +Takes the power of each element in :attr:`input` with :attr:`exponent` and +returns a tensor with the result. + +:attr:`exponent` can be either a single ``float`` number or a `Tensor` +with the same number of elements as :attr:`input`. + +When :attr:`exponent` is a scalar value, the operation applied is: + +.. math:: + \text{out}_i = x_i ^ \text{exponent} + +When :attr:`exponent` is a tensor, the operation applied is: + +.. math:: + \text{out}_i = x_i ^ {\text{exponent}_i} +""" + + r""" +When :attr:`exponent` is a tensor, the shapes of :attr:`input` +and :attr:`exponent` must be :ref:`broadcastable `. + +Args: + {input} + exponent (float or tensor): the exponent value + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.4331, 1.2475, 0.6834, -0.2791]) + >>> torch.pow(a, 2) + tensor([ 0.1875, 1.5561, 0.4670, 0.0779]) + >>> exp = torch.arange(1., 5.) + + >>> a = torch.arange(1., 5.) + >>> a + tensor([ 1., 2., 3., 4.]) + >>> exp + tensor([ 1., 2., 3., 4.]) + >>> torch.pow(a, exp) + tensor([ 1., 4., 27., 256.]) + +.. function:: pow(self, exponent, *, out=None) -> Tensor + :noindex: + +:attr:`self` is a scalar ``float`` value, and :attr:`exponent` is a tensor. +The returned tensor :attr:`out` is of the same shape as :attr:`exponent` + +The operation applied is: + +.. math:: + \text{{out}}_i = \text{{self}} ^ {{\text{{exponent}}_i}} + +Args: + self (float): the scalar base value for the power operation + exponent (Tensor): the exponent tensor + +Keyword args: + {out} + +Example:: + + >>> exp = torch.arange(1., 5.) + >>> base = 2 + >>> torch.pow(base, exp) + tensor([ 2., 4., 8., 16.]) +""".format( + **common_args + ), +) + +add_docstr( + torch.float_power, + r""" +float_power(input, exponent, *, out=None) -> Tensor + +Raises :attr:`input` to the power of :attr:`exponent`, elementwise, in double precision. +If neither input is complex returns a ``torch.float64`` tensor, +and if one or more inputs is complex returns a ``torch.complex128`` tensor. + +.. note:: + This function always computes in double precision, unlike :func:`torch.pow`, + which implements more typical :ref:`type promotion `. + This is useful when the computation needs to be performed in a wider or more precise dtype, + or the results of the computation may contain fractional values not representable in the input dtypes, + like when an integer base is raised to a negative integer exponent. + +Args: + input (Tensor or Number): the base value(s) + exponent (Tensor or Number): the exponent value(s) + +Keyword args: + {out} + +Example:: + + >>> a = torch.randint(10, (4,)) + >>> a + tensor([6, 4, 7, 1]) + >>> torch.float_power(a, 2) + tensor([36., 16., 49., 1.], dtype=torch.float64) + + >>> a = torch.arange(1, 5) + >>> a + tensor([ 1, 2, 3, 4]) + >>> exp = torch.tensor([2, -3, 4, -5]) + >>> exp + tensor([ 2, -3, 4, -5]) + >>> torch.float_power(a, exp) + tensor([1.0000e+00, 1.2500e-01, 8.1000e+01, 9.7656e-04], dtype=torch.float64) +""".format( + **common_args + ), +) + +add_docstr( + torch.prod, + r""" +prod(input, *, dtype=None) -> Tensor + +Returns the product of all elements in the :attr:`input` tensor. + +Args: + {input} + +Keyword args: + {dtype} + +Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[-0.8020, 0.5428, -1.5854]]) + >>> torch.prod(a) + tensor(0.6902) + +.. function:: prod(input, dim, keepdim=False, *, dtype=None) -> Tensor + :noindex: + +Returns the product of each row of the :attr:`input` tensor in the given +dimension :attr:`dim`. + +{keepdim_details} + +Args: + {input} + {dim} + {keepdim} + +Keyword args: + {dtype} + +Example:: + + >>> a = torch.randn(4, 2) + >>> a + tensor([[ 0.5261, -0.3837], + [ 1.1857, -0.2498], + [-1.1646, 0.0705], + [ 1.1131, -1.0629]]) + >>> torch.prod(a, 1) + tensor([-0.2018, -0.2962, -0.0821, -1.1831]) +""".format( + **single_dim_common + ), +) + +add_docstr( + torch.promote_types, + r""" +promote_types(type1, type2) -> dtype + +Returns the :class:`torch.dtype` with the smallest size and scalar kind that is +not smaller nor of lower kind than either `type1` or `type2`. See type promotion +:ref:`documentation ` for more information on the type +promotion logic. + +Args: + type1 (:class:`torch.dtype`) + type2 (:class:`torch.dtype`) + +Example:: + + >>> torch.promote_types(torch.int32, torch.float32) + torch.float32 + >>> torch.promote_types(torch.uint8, torch.long) + torch.long +""", +) + +add_docstr( + torch.qr, + r""" +qr(input, some=True, *, out=None) -> (Tensor, Tensor) + +Computes the QR decomposition of a matrix or a batch of matrices :attr:`input`, +and returns a namedtuple (Q, R) of tensors such that :math:`\text{input} = Q R` +with :math:`Q` being an orthogonal matrix or batch of orthogonal matrices and +:math:`R` being an upper triangular matrix or batch of upper triangular matrices. + +If :attr:`some` is ``True``, then this function returns the thin (reduced) QR factorization. +Otherwise, if :attr:`some` is ``False``, this function returns the complete QR factorization. + +.. warning:: + + :func:`torch.qr` is deprecated in favor of :func:`torch.linalg.qr` + and will be removed in a future PyTorch release. The boolean parameter :attr:`some` has been + replaced with a string parameter :attr:`mode`. + + ``Q, R = torch.qr(A)`` should be replaced with + + .. code:: python + + Q, R = torch.linalg.qr(A) + + ``Q, R = torch.qr(A, some=False)`` should be replaced with + + .. code:: python + + Q, R = torch.linalg.qr(A, mode="complete") + +.. warning:: + If you plan to backpropagate through QR, note that the current backward implementation + is only well-defined when the first :math:`\min(input.size(-1), input.size(-2))` + columns of :attr:`input` are linearly independent. + This behavior will probably change once QR supports pivoting. + +.. note:: This function uses LAPACK for CPU inputs and MAGMA for CUDA inputs, + and may produce different (valid) decompositions on different device types + or different platforms. + +Args: + input (Tensor): the input tensor of size :math:`(*, m, n)` where `*` is zero or more + batch dimensions consisting of matrices of dimension :math:`m \times n`. + some (bool, optional): Set to ``True`` for reduced QR decomposition and ``False`` for + complete QR decomposition. If `k = min(m, n)` then: + + * ``some=True`` : returns `(Q, R)` with dimensions (m, k), (k, n) (default) + + * ``'some=False'``: returns `(Q, R)` with dimensions (m, m), (m, n) + +Keyword args: + out (tuple, optional): tuple of `Q` and `R` tensors. + The dimensions of `Q` and `R` are detailed in the description of :attr:`some` above. + +Example:: + + >>> a = torch.tensor([[12., -51, 4], [6, 167, -68], [-4, 24, -41]]) + >>> q, r = torch.qr(a) + >>> q + tensor([[-0.8571, 0.3943, 0.3314], + [-0.4286, -0.9029, -0.0343], + [ 0.2857, -0.1714, 0.9429]]) + >>> r + tensor([[ -14.0000, -21.0000, 14.0000], + [ 0.0000, -175.0000, 70.0000], + [ 0.0000, 0.0000, -35.0000]]) + >>> torch.mm(q, r).round() + tensor([[ 12., -51., 4.], + [ 6., 167., -68.], + [ -4., 24., -41.]]) + >>> torch.mm(q.t(), q).round() + tensor([[ 1., 0., 0.], + [ 0., 1., -0.], + [ 0., -0., 1.]]) + >>> a = torch.randn(3, 4, 5) + >>> q, r = torch.qr(a, some=False) + >>> torch.allclose(torch.matmul(q, r), a) + True + >>> torch.allclose(torch.matmul(q.mT, q), torch.eye(5)) + True +""", +) + +add_docstr( + torch.rad2deg, + r""" +rad2deg(input, *, out=None) -> Tensor + +Returns a new tensor with each of the elements of :attr:`input` +converted from angles in radians to degrees. + +Args: + {input} + +Keyword arguments: + {out} + +Example:: + + >>> a = torch.tensor([[3.142, -3.142], [6.283, -6.283], [1.570, -1.570]]) + >>> torch.rad2deg(a) + tensor([[ 180.0233, -180.0233], + [ 359.9894, -359.9894], + [ 89.9544, -89.9544]]) + +""".format( + **common_args + ), +) + +add_docstr( + torch.deg2rad, + r""" +deg2rad(input, *, out=None) -> Tensor + +Returns a new tensor with each of the elements of :attr:`input` +converted from angles in degrees to radians. + +Args: + {input} + +Keyword arguments: + {out} + +Example:: + + >>> a = torch.tensor([[180.0, -180.0], [360.0, -360.0], [90.0, -90.0]]) + >>> torch.deg2rad(a) + tensor([[ 3.1416, -3.1416], + [ 6.2832, -6.2832], + [ 1.5708, -1.5708]]) + +""".format( + **common_args + ), +) + +add_docstr( + torch.heaviside, + r""" +heaviside(input, values, *, out=None) -> Tensor + +Computes the Heaviside step function for each element in :attr:`input`. +The Heaviside step function is defined as: + +.. math:: + \text{{heaviside}}(input, values) = \begin{cases} + 0, & \text{if input < 0}\\ + values, & \text{if input == 0}\\ + 1, & \text{if input > 0} + \end{cases} +""" + + r""" + +Args: + {input} + values (Tensor): The values to use where :attr:`input` is zero. + +Keyword arguments: + {out} + +Example:: + + >>> input = torch.tensor([-1.5, 0, 2.0]) + >>> values = torch.tensor([0.5]) + >>> torch.heaviside(input, values) + tensor([0.0000, 0.5000, 1.0000]) + >>> values = torch.tensor([1.2, -2.0, 3.5]) + >>> torch.heaviside(input, values) + tensor([0., -2., 1.]) + +""".format( + **common_args + ), +) + +add_docstr( + torch.rand, + """ +rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, \ +requires_grad=False, pin_memory=False) -> Tensor +""" + + r""" +Returns a tensor filled with random numbers from a uniform distribution +on the interval :math:`[0, 1)` + +The shape of the tensor is defined by the variable argument :attr:`size`. + +Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + +Keyword args: + {generator} + {out} + {dtype} + {layout} + {device} + {requires_grad} + {pin_memory} + +Example:: + + >>> torch.rand(4) + tensor([ 0.5204, 0.2503, 0.3525, 0.5673]) + >>> torch.rand(2, 3) + tensor([[ 0.8237, 0.5781, 0.6879], + [ 0.3816, 0.7249, 0.0998]]) +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.rand_like, + r""" +rand_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor + +Returns a tensor with the same size as :attr:`input` that is filled with +random numbers from a uniform distribution on the interval :math:`[0, 1)`. +``torch.rand_like(input)`` is equivalent to +``torch.rand(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``. + +Args: + {input} + +Keyword args: + {dtype} + {layout} + {device} + {requires_grad} + {memory_format} + +""".format( + **factory_like_common_args + ), +) + +add_docstr( + torch.randint, + """ +randint(low=0, high, size, \\*, generator=None, out=None, \ +dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + +Returns a tensor filled with random integers generated uniformly +between :attr:`low` (inclusive) and :attr:`high` (exclusive). + +The shape of the tensor is defined by the variable argument :attr:`size`. + +.. note:: + With the global dtype default (``torch.float32``), this function returns + a tensor with dtype ``torch.int64``. + +Args: + low (int, optional): Lowest integer to be drawn from the distribution. Default: 0. + high (int): One above the highest integer to be drawn from the distribution. + size (tuple): a tuple defining the shape of the output tensor. + +Keyword args: + {generator} + {out} + dtype (`torch.dtype`, optional) - the desired data type of returned tensor. Default: if ``None``, + this function returns a tensor with dtype ``torch.int64``. + {layout} + {device} + {requires_grad} + +Example:: + + >>> torch.randint(3, 5, (3,)) + tensor([4, 3, 4]) + + + >>> torch.randint(10, (2, 2)) + tensor([[0, 2], + [5, 5]]) + + + >>> torch.randint(3, 10, (2, 2)) + tensor([[4, 5], + [6, 7]]) + + +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.randint_like, + """ +randint_like(input, low=0, high, \\*, dtype=None, layout=torch.strided, device=None, requires_grad=False, \ +memory_format=torch.preserve_format) -> Tensor + +Returns a tensor with the same shape as Tensor :attr:`input` filled with +random integers generated uniformly between :attr:`low` (inclusive) and +:attr:`high` (exclusive). + +.. note: + With the global dtype default (``torch.float32``), this function returns + a tensor with dtype ``torch.int64``. + +Args: + {input} + low (int, optional): Lowest integer to be drawn from the distribution. Default: 0. + high (int): One above the highest integer to be drawn from the distribution. + +Keyword args: + {dtype} + {layout} + {device} + {requires_grad} + {memory_format} + +""".format( + **factory_like_common_args + ), +) + +add_docstr( + torch.randn, + """ +randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, \ +pin_memory=False) -> Tensor +""" + + r""" + +Returns a tensor filled with random numbers from a normal distribution +with mean `0` and variance `1` (also called the standard normal +distribution). + +.. math:: + \text{{out}}_{{i}} \sim \mathcal{{N}}(0, 1) + +The shape of the tensor is defined by the variable argument :attr:`size`. + +Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + +Keyword args: + {generator} + {out} + {dtype} + {layout} + {device} + {requires_grad} + {pin_memory} + +Example:: + + >>> torch.randn(4) + tensor([-2.1436, 0.9966, 2.3426, -0.6366]) + >>> torch.randn(2, 3) + tensor([[ 1.5954, 2.8929, -1.0923], + [ 1.1719, -0.4709, -0.1996]]) +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.randn_like, + r""" +randn_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor + +Returns a tensor with the same size as :attr:`input` that is filled with +random numbers from a normal distribution with mean 0 and variance 1. +``torch.randn_like(input)`` is equivalent to +``torch.randn(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``. + +Args: + {input} + +Keyword args: + {dtype} + {layout} + {device} + {requires_grad} + {memory_format} + +""".format( + **factory_like_common_args + ), +) + +add_docstr( + torch.randperm, + """ +randperm(n, *, generator=None, out=None, dtype=torch.int64,layout=torch.strided, \ +device=None, requires_grad=False, pin_memory=False) -> Tensor +""" + + r""" +Returns a random permutation of integers from ``0`` to ``n - 1``. + +Args: + n (int): the upper bound (exclusive) + +Keyword args: + {generator} + {out} + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: ``torch.int64``. + {layout} + {device} + {requires_grad} + {pin_memory} + +Example:: + + >>> torch.randperm(4) + tensor([2, 1, 0, 3]) +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.tensor, + r""" +tensor(data, *, dtype=None, device=None, requires_grad=False, pin_memory=False) -> Tensor + +Constructs a tensor with no autograd history (also known as a "leaf tensor", see :doc:`/notes/autograd`) by copying :attr:`data`. + +.. warning:: + + When working with tensors prefer using :func:`torch.Tensor.clone`, + :func:`torch.Tensor.detach`, and :func:`torch.Tensor.requires_grad_` for + readability. Letting `t` be a tensor, ``torch.tensor(t)`` is equivalent to + ``t.clone().detach()``, and ``torch.tensor(t, requires_grad=True)`` + is equivalent to ``t.clone().detach().requires_grad_(True)``. + +.. seealso:: + + :func:`torch.as_tensor` preserves autograd history and avoids copies where possible. + :func:`torch.from_numpy` creates a tensor that shares storage with a NumPy array. + +Args: + {data} + +Keyword args: + {dtype} + device (:class:`torch.device`, optional): the device of the constructed tensor. If None and data is a tensor + then the device of data is used. If None and data is not a tensor then + the result tensor is constructed on the current device. + {requires_grad} + {pin_memory} + + +Example:: + + >>> torch.tensor([[0.1, 1.2], [2.2, 3.1], [4.9, 5.2]]) + tensor([[ 0.1000, 1.2000], + [ 2.2000, 3.1000], + [ 4.9000, 5.2000]]) + + >>> torch.tensor([0, 1]) # Type inference on data + tensor([ 0, 1]) + + >>> torch.tensor([[0.11111, 0.222222, 0.3333333]], + ... dtype=torch.float64, + ... device=torch.device('cuda:0')) # creates a double tensor on a CUDA device + tensor([[ 0.1111, 0.2222, 0.3333]], dtype=torch.float64, device='cuda:0') + + >>> torch.tensor(3.14159) # Create a zero-dimensional (scalar) tensor + tensor(3.1416) + + >>> torch.tensor([]) # Create an empty tensor (of size (0,)) + tensor([]) +""".format( + **factory_data_common_args + ), +) + +add_docstr( + torch.range, + r""" +range(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + +Returns a 1-D tensor of size :math:`\left\lfloor \frac{\text{end} - \text{start}}{\text{step}} \right\rfloor + 1` +with values from :attr:`start` to :attr:`end` with step :attr:`step`. Step is +the gap between two values in the tensor. + +.. math:: + \text{out}_{i+1} = \text{out}_i + \text{step}. +""" + + r""" +.. warning:: + This function is deprecated and will be removed in a future release because its behavior is inconsistent with + Python's range builtin. Instead, use :func:`torch.arange`, which produces values in [start, end). + +Args: + start (float): the starting value for the set of points. Default: ``0``. + end (float): the ending value for the set of points + step (float): the gap between each pair of adjacent points. Default: ``1``. + +Keyword args: + {out} + {dtype} If `dtype` is not given, infer the data type from the other input + arguments. If any of `start`, `end`, or `stop` are floating-point, the + `dtype` is inferred to be the default dtype, see + :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to + be `torch.int64`. + {layout} + {device} + {requires_grad} + +Example:: + + >>> torch.range(1, 4) + tensor([ 1., 2., 3., 4.]) + >>> torch.range(1, 4, 0.5) + tensor([ 1.0000, 1.5000, 2.0000, 2.5000, 3.0000, 3.5000, 4.0000]) +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.arange, + r""" +arange(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + +Returns a 1-D tensor of size :math:`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil` +with values from the interval ``[start, end)`` taken with common difference +:attr:`step` beginning from `start`. + +Note that non-integer :attr:`step` is subject to floating point rounding errors when +comparing against :attr:`end`; to avoid inconsistency, we advise subtracting a small epsilon from :attr:`end` +in such cases. + +.. math:: + \text{out}_{{i+1}} = \text{out}_{i} + \text{step} +""" + + r""" +Args: + start (Number): the starting value for the set of points. Default: ``0``. + end (Number): the ending value for the set of points + step (Number): the gap between each pair of adjacent points. Default: ``1``. + +Keyword args: + {out} + {dtype} If `dtype` is not given, infer the data type from the other input + arguments. If any of `start`, `end`, or `stop` are floating-point, the + `dtype` is inferred to be the default dtype, see + :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to + be `torch.int64`. + {layout} + {device} + {requires_grad} + +Example:: + + >>> torch.arange(5) + tensor([ 0, 1, 2, 3, 4]) + >>> torch.arange(1, 4) + tensor([ 1, 2, 3]) + >>> torch.arange(1, 2.5, 0.5) + tensor([ 1.0000, 1.5000, 2.0000]) +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.ravel, + r""" +ravel(input) -> Tensor + +Return a contiguous flattened tensor. A copy is made only if needed. + +Args: + {input} + +Example:: + + >>> t = torch.tensor([[[1, 2], + ... [3, 4]], + ... [[5, 6], + ... [7, 8]]]) + >>> torch.ravel(t) + tensor([1, 2, 3, 4, 5, 6, 7, 8]) +""".format( + **common_args + ), +) + +add_docstr( + torch.remainder, + r""" +remainder(input, other, *, out=None) -> Tensor + +Computes +`Python's modulus operation `_ +entrywise. The result has the same sign as the divisor :attr:`other` and its absolute value +is less than that of :attr:`other`. + +It may also be defined in terms of :func:`torch.div` as + +.. code:: python + + torch.remainder(a, b) == a - a.div(b, rounding_mode="floor") * b + +Supports :ref:`broadcasting to a common shape `, +:ref:`type promotion `, and integer and float inputs. + +.. note:: + Complex inputs are not supported. In some cases, it is not mathematically + possible to satisfy the definition of a modulo operation with complex numbers. + See :func:`torch.fmod` for how division by zero is handled. + +.. seealso:: + + :func:`torch.fmod` which implements C++'s `std::fmod `_. + This one is defined in terms of division rounding towards zero. + +Args: + input (Tensor or Scalar): the dividend + other (Tensor or Scalar): the divisor + +Keyword args: + {out} + +Example:: + + >>> torch.remainder(torch.tensor([-3., -2, -1, 1, 2, 3]), 2) + tensor([ 1., 0., 1., 1., 0., 1.]) + >>> torch.remainder(torch.tensor([1, 2, 3, 4, 5]), -1.5) + tensor([ -0.5000, -1.0000, 0.0000, -0.5000, -1.0000 ]) +""".format( + **common_args + ), +) + +add_docstr( + torch.renorm, + r""" +renorm(input, p, dim, maxnorm, *, out=None) -> Tensor + +Returns a tensor where each sub-tensor of :attr:`input` along dimension +:attr:`dim` is normalized such that the `p`-norm of the sub-tensor is lower +than the value :attr:`maxnorm` + +.. note:: If the norm of a row is lower than `maxnorm`, the row is unchanged + +Args: + {input} + p (float): the power for the norm computation + dim (int): the dimension to slice over to get the sub-tensors + maxnorm (float): the maximum norm to keep each sub-tensor under + +Keyword args: + {out} + +Example:: + + >>> x = torch.ones(3, 3) + >>> x[1].fill_(2) + tensor([ 2., 2., 2.]) + >>> x[2].fill_(3) + tensor([ 3., 3., 3.]) + >>> x + tensor([[ 1., 1., 1.], + [ 2., 2., 2.], + [ 3., 3., 3.]]) + >>> torch.renorm(x, 1, 0, 5) + tensor([[ 1.0000, 1.0000, 1.0000], + [ 1.6667, 1.6667, 1.6667], + [ 1.6667, 1.6667, 1.6667]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.reshape, + r""" +reshape(input, shape) -> Tensor + +Returns a tensor with the same data and number of elements as :attr:`input`, +but with the specified shape. When possible, the returned tensor will be a view +of :attr:`input`. Otherwise, it will be a copy. Contiguous inputs and inputs +with compatible strides can be reshaped without copying, but you should not +depend on the copying vs. viewing behavior. + +See :meth:`torch.Tensor.view` on when it is possible to return a view. + +A single dimension may be -1, in which case it's inferred from the remaining +dimensions and the number of elements in :attr:`input`. + +Args: + input (Tensor): the tensor to be reshaped + shape (tuple of int): the new shape + +Example:: + + >>> a = torch.arange(4.) + >>> torch.reshape(a, (2, 2)) + tensor([[ 0., 1.], + [ 2., 3.]]) + >>> b = torch.tensor([[0, 1], [2, 3]]) + >>> torch.reshape(b, (-1,)) + tensor([ 0, 1, 2, 3]) +""", +) + + +add_docstr( + torch.result_type, + r""" +result_type(tensor1, tensor2) -> dtype + +Returns the :class:`torch.dtype` that would result from performing an arithmetic +operation on the provided input tensors. See type promotion :ref:`documentation ` +for more information on the type promotion logic. + +Args: + tensor1 (Tensor or Number): an input tensor or number + tensor2 (Tensor or Number): an input tensor or number + +Example:: + + >>> torch.result_type(torch.tensor([1, 2], dtype=torch.int), 1.0) + torch.float32 + >>> torch.result_type(torch.tensor([1, 2], dtype=torch.uint8), torch.tensor(1)) + torch.uint8 +""", +) + +add_docstr( + torch.row_stack, + r""" +row_stack(tensors, *, out=None) -> Tensor + +Alias of :func:`torch.vstack`. +""", +) + +add_docstr( + torch.round, + r""" +round(input, *, decimals=0, out=None) -> Tensor + +Rounds elements of :attr:`input` to the nearest integer. + +For integer inputs, follows the array-api convention of returning a +copy of the input tensor. +The return type of output is same as that of input's dtype. + +.. note:: + This function implements the "round half to even" to + break ties when a number is equidistant from two + integers (e.g. `round(2.5)` is 2). + + When the :attr:\`decimals\` argument is specified the + algorithm used is similar to NumPy's `around`. This + algorithm is fast but inexact and it can easily + overflow for low precision dtypes. + Eg. `round(tensor([10000], dtype=torch.float16), decimals=3)` is `inf`. + +.. seealso:: + :func:`torch.ceil`, which rounds up. + :func:`torch.floor`, which rounds down. + :func:`torch.trunc`, which rounds towards zero. + +Args: + {input} + decimals (int): Number of decimal places to round to (default: 0). + If decimals is negative, it specifies the number of positions + to the left of the decimal point. + +Keyword args: + {out} + +Example:: + + >>> torch.round(torch.tensor((4.7, -2.3, 9.1, -7.7))) + tensor([ 5., -2., 9., -8.]) + + >>> # Values equidistant from two integers are rounded towards the + >>> # the nearest even value (zero is treated as even) + >>> torch.round(torch.tensor([-0.5, 0.5, 1.5, 2.5])) + tensor([-0., 0., 2., 2.]) + + >>> # A positive decimals argument rounds to the to that decimal place + >>> torch.round(torch.tensor([0.1234567]), decimals=3) + tensor([0.1230]) + + >>> # A negative decimals argument rounds to the left of the decimal + >>> torch.round(torch.tensor([1200.1234567]), decimals=-3) + tensor([1000.]) +""".format( + **common_args + ), +) + +add_docstr( + torch.rsqrt, + r""" +rsqrt(input, *, out=None) -> Tensor + +Returns a new tensor with the reciprocal of the square-root of each of +the elements of :attr:`input`. + +.. math:: + \text{out}_{i} = \frac{1}{\sqrt{\text{input}_{i}}} +""" + + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-0.0370, 0.2970, 1.5420, -0.9105]) + >>> torch.rsqrt(a) + tensor([ nan, 1.8351, 0.8053, nan]) +""".format( + **common_args + ), +) + +add_docstr( + torch.scatter, + r""" +scatter(input, dim, index, src) -> Tensor + +Out-of-place version of :meth:`torch.Tensor.scatter_` +""", +) + +add_docstr( + torch.scatter_add, + r""" +scatter_add(input, dim, index, src) -> Tensor + +Out-of-place version of :meth:`torch.Tensor.scatter_add_` +""", +) + +add_docstr( + torch.scatter_reduce, + r""" +scatter_reduce(input, dim, index, src, reduce, *, include_self=True) -> Tensor + +Out-of-place version of :meth:`torch.Tensor.scatter_reduce_` +""", +) + +add_docstr( + torch.select, + r""" +select(input, dim, index) -> Tensor + +Slices the :attr:`input` tensor along the selected dimension at the given index. +This function returns a view of the original tensor with the given dimension removed. + +.. note:: If :attr:`input` is a sparse tensor and returning a view of + the tensor is not possible, a RuntimeError exception is + raised. In this is the case, consider using + :func:`torch.select_copy` function. + +Args: + {input} + dim (int): the dimension to slice + index (int): the index to select with + +.. note:: + + :meth:`select` is equivalent to slicing. For example, + ``tensor.select(0, index)`` is equivalent to ``tensor[index]`` and + ``tensor.select(2, index)`` is equivalent to ``tensor[:,:,index]``. +""".format( + **common_args + ), +) + +add_docstr( + torch.select_scatter, + r""" +select_scatter(input, src, dim, index) -> Tensor + +Embeds the values of the :attr:`src` tensor into :attr:`input` at the given index. +This function returns a tensor with fresh storage; it does not create a view. + + +Args: + {input} + src (Tensor): The tensor to embed into :attr:`input` + dim (int): the dimension to insert the slice into. + index (int): the index to select with + +.. note:: + + :attr:`src` must be of the proper size in order to be embedded + into :attr:`input`. Specifically, it should have the same shape as + ``torch.select(input, dim, index)`` + +Example:: + + >>> a = torch.zeros(2, 2) + >>> b = torch.ones(2) + >>> a.select_scatter(b, 0, 0) + tensor([[1., 1.], + [0., 0.]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.slice_scatter, + r""" +slice_scatter(input, src, dim=0, start=None, end=None, step=1) -> Tensor + +Embeds the values of the :attr:`src` tensor into :attr:`input` at the given +dimension. +This function returns a tensor with fresh storage; it does not create a view. + + +Args: + {input} + src (Tensor): The tensor to embed into :attr:`input` + dim (int): the dimension to insert the slice into + start (Optional[int]): the start index of where to insert the slice + end (Optional[int]): the end index of where to insert the slice + step (int): the how many elements to skip in + +Example:: + + >>> a = torch.zeros(8, 8) + >>> b = torch.ones(2, 8) + >>> a.slice_scatter(b, start=6) + tensor([[0., 0., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0., 0., 0., 0.], + [1., 1., 1., 1., 1., 1., 1., 1.], + [1., 1., 1., 1., 1., 1., 1., 1.]]) + + >>> b = torch.ones(8, 2) + >>> a.slice_scatter(b, dim=1, start=2, end=6, step=2) + tensor([[0., 0., 1., 0., 1., 0., 0., 0.], + [0., 0., 1., 0., 1., 0., 0., 0.], + [0., 0., 1., 0., 1., 0., 0., 0.], + [0., 0., 1., 0., 1., 0., 0., 0.], + [0., 0., 1., 0., 1., 0., 0., 0.], + [0., 0., 1., 0., 1., 0., 0., 0.], + [0., 0., 1., 0., 1., 0., 0., 0.], + [0., 0., 1., 0., 1., 0., 0., 0.]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.set_flush_denormal, + r""" +set_flush_denormal(mode) -> bool + +Disables denormal floating numbers on CPU. + +Returns ``True`` if your system supports flushing denormal numbers and it +successfully configures flush denormal mode. :meth:`~torch.set_flush_denormal` +is only supported on x86 architectures supporting SSE3. + +Args: + mode (bool): Controls whether to enable flush denormal mode or not + +Example:: + + >>> torch.set_flush_denormal(True) + True + >>> torch.tensor([1e-323], dtype=torch.float64) + tensor([ 0.], dtype=torch.float64) + >>> torch.set_flush_denormal(False) + True + >>> torch.tensor([1e-323], dtype=torch.float64) + tensor(9.88131e-324 * + [ 1.0000], dtype=torch.float64) +""", +) + +add_docstr( + torch.set_num_threads, + r""" +set_num_threads(int) + +Sets the number of threads used for intraop parallelism on CPU. + +.. warning:: + To ensure that the correct number of threads is used, set_num_threads + must be called before running eager, JIT or autograd code. +""", +) + +add_docstr( + torch.set_num_interop_threads, + r""" +set_num_interop_threads(int) + +Sets the number of threads used for interop parallelism +(e.g. in JIT interpreter) on CPU. + +.. warning:: + Can only be called once and before any inter-op parallel work + is started (e.g. JIT execution). +""", +) + +add_docstr( + torch.sigmoid, + r""" +sigmoid(input, *, out=None) -> Tensor + +Alias for :func:`torch.special.expit`. +""", +) + +add_docstr( + torch.logit, + r""" +logit(input, eps=None, *, out=None) -> Tensor + +Alias for :func:`torch.special.logit`. +""", +) + +add_docstr( + torch.sign, + r""" +sign(input, *, out=None) -> Tensor + +Returns a new tensor with the signs of the elements of :attr:`input`. + +.. math:: + \text{out}_{i} = \operatorname{sgn}(\text{input}_{i}) +""" + + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.tensor([0.7, -1.2, 0., 2.3]) + >>> a + tensor([ 0.7000, -1.2000, 0.0000, 2.3000]) + >>> torch.sign(a) + tensor([ 1., -1., 0., 1.]) +""".format( + **common_args + ), +) + +add_docstr( + torch.signbit, + r""" +signbit(input, *, out=None) -> Tensor + +Tests if each element of :attr:`input` has its sign bit set or not. + +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.tensor([0.7, -1.2, 0., 2.3]) + >>> torch.signbit(a) + tensor([ False, True, False, False]) + >>> a = torch.tensor([-0.0, 0.0]) + >>> torch.signbit(a) + tensor([ True, False]) + +.. note:: + signbit handles signed zeros, so negative zero (-0) returns True. + +""".format( + **common_args + ), +) + +add_docstr( + torch.sgn, + r""" +sgn(input, *, out=None) -> Tensor + +This function is an extension of torch.sign() to complex tensors. +It computes a new tensor whose elements have +the same angles as the corresponding elements of :attr:`input` and +absolute values (i.e. magnitudes) of one for complex tensors and +is equivalent to torch.sign() for non-complex tensors. + +.. math:: + \text{out}_{i} = \begin{cases} + 0 & |\text{{input}}_i| == 0 \\ + \frac{{\text{{input}}_i}}{|{\text{{input}}_i}|} & \text{otherwise} + \end{cases} + +""" + + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> t = torch.tensor([3+4j, 7-24j, 0, 1+2j]) + >>> t.sgn() + tensor([0.6000+0.8000j, 0.2800-0.9600j, 0.0000+0.0000j, 0.4472+0.8944j]) +""".format( + **common_args + ), +) + +add_docstr( + torch.sin, + r""" +sin(input, *, out=None) -> Tensor + +Returns a new tensor with the sine of the elements of :attr:`input`. + +.. math:: + \text{out}_{i} = \sin(\text{input}_{i}) +""" + + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-0.5461, 0.1347, -2.7266, -0.2746]) + >>> torch.sin(a) + tensor([-0.5194, 0.1343, -0.4032, -0.2711]) +""".format( + **common_args + ), +) + +add_docstr( + torch.sinc, + r""" +sinc(input, *, out=None) -> Tensor + +Alias for :func:`torch.special.sinc`. +""", +) + +add_docstr( + torch.sinh, + r""" +sinh(input, *, out=None) -> Tensor + +Returns a new tensor with the hyperbolic sine of the elements of +:attr:`input`. + +.. math:: + \text{out}_{i} = \sinh(\text{input}_{i}) +""" + + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.5380, -0.8632, -0.1265, 0.9399]) + >>> torch.sinh(a) + tensor([ 0.5644, -0.9744, -0.1268, 1.0845]) + +.. note:: + When :attr:`input` is on the CPU, the implementation of torch.sinh may use + the Sleef library, which rounds very large results to infinity or negative + infinity. See `here `_ for details. +""".format( + **common_args + ), +) + +add_docstr( + torch.sort, + r""" +sort(input, dim=-1, descending=False, stable=False, *, out=None) -> (Tensor, LongTensor) + +Sorts the elements of the :attr:`input` tensor along a given dimension +in ascending order by value. + +If :attr:`dim` is not given, the last dimension of the `input` is chosen. + +If :attr:`descending` is ``True`` then the elements are sorted in descending +order by value. + +If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving +the order of equivalent elements. + +A namedtuple of (values, indices) is returned, where the `values` are the +sorted values and `indices` are the indices of the elements in the original +`input` tensor. + +Args: + {input} + dim (int, optional): the dimension to sort along + descending (bool, optional): controls the sorting order (ascending or descending) + stable (bool, optional): makes the sorting routine stable, which guarantees that the order + of equivalent elements is preserved. + +Keyword args: + out (tuple, optional): the output tuple of (`Tensor`, `LongTensor`) that can + be optionally given to be used as output buffers + +Example:: + + >>> x = torch.randn(3, 4) + >>> sorted, indices = torch.sort(x) + >>> sorted + tensor([[-0.2162, 0.0608, 0.6719, 2.3332], + [-0.5793, 0.0061, 0.6058, 0.9497], + [-0.5071, 0.3343, 0.9553, 1.0960]]) + >>> indices + tensor([[ 1, 0, 2, 3], + [ 3, 1, 0, 2], + [ 0, 3, 1, 2]]) + + >>> sorted, indices = torch.sort(x, 0) + >>> sorted + tensor([[-0.5071, -0.2162, 0.6719, -0.5793], + [ 0.0608, 0.0061, 0.9497, 0.3343], + [ 0.6058, 0.9553, 1.0960, 2.3332]]) + >>> indices + tensor([[ 2, 0, 0, 1], + [ 0, 1, 1, 2], + [ 1, 2, 2, 0]]) + >>> x = torch.tensor([0, 1] * 9) + >>> x.sort() + torch.return_types.sort( + values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]), + indices=tensor([ 2, 16, 4, 6, 14, 8, 0, 10, 12, 9, 17, 15, 13, 11, 7, 5, 3, 1])) + >>> x.sort(stable=True) + torch.return_types.sort( + values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]), + indices=tensor([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 1, 3, 5, 7, 9, 11, 13, 15, 17])) +""".format( + **common_args + ), +) + +add_docstr( + torch.argsort, + r""" +argsort(input, dim=-1, descending=False, stable=False) -> Tensor + +Returns the indices that sort a tensor along a given dimension in ascending +order by value. + +This is the second value returned by :meth:`torch.sort`. See its documentation +for the exact semantics of this method. + +If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving +the order of equivalent elements. If ``False``, the relative order of values +which compare equal is not guaranteed. ``True`` is slower. + +Args: + {input} + dim (int, optional): the dimension to sort along + descending (bool, optional): controls the sorting order (ascending or descending) + stable (bool, optional): controls the relative order of equivalent elements + +Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[ 0.0785, 1.5267, -0.8521, 0.4065], + [ 0.1598, 0.0788, -0.0745, -1.2700], + [ 1.2208, 1.0722, -0.7064, 1.2564], + [ 0.0669, -0.2318, -0.8229, -0.9280]]) + + + >>> torch.argsort(a, dim=1) + tensor([[2, 0, 3, 1], + [3, 2, 1, 0], + [2, 1, 0, 3], + [3, 2, 1, 0]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.msort, + r""" +msort(input, *, out=None) -> Tensor + +Sorts the elements of the :attr:`input` tensor along its first dimension +in ascending order by value. + +.. note:: `torch.msort(t)` is equivalent to `torch.sort(t, dim=0)[0]`. + See also :func:`torch.sort`. + +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> t = torch.randn(3, 4) + >>> t + tensor([[-0.1321, 0.4370, -1.2631, -1.1289], + [-2.0527, -1.1250, 0.2275, 0.3077], + [-0.0881, -0.1259, -0.5495, 1.0284]]) + >>> torch.msort(t) + tensor([[-2.0527, -1.1250, -1.2631, -1.1289], + [-0.1321, -0.1259, -0.5495, 0.3077], + [-0.0881, 0.4370, 0.2275, 1.0284]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.sparse_compressed_tensor, + r"""sparse_compressed_tensor(compressed_indices, plain_indices, values, size=None, """ + r"""*, dtype=None, layout=None, device=None, requires_grad=False, check_invariants=None) -> Tensor + +Constructs a :ref:`sparse tensor in Compressed Sparse format - CSR, +CSC, BSR, or BSC - ` with specified values at +the given :attr:`compressed_indices` and :attr:`plain_indices`. Sparse +matrix multiplication operations in Compressed Sparse format are +typically faster than that for sparse tensors in COO format. Make you +have a look at :ref:`the note on the data type of the indices +`. + +{sparse_factory_device_note} + +Args: + compressed_indices (array_like): (B+1)-dimensional array of size + ``(*batchsize, compressed_dim_size + 1)``. The last element of + each batch is the number of non-zero elements or blocks. This + tensor encodes the index in ``values`` and ``plain_indices`` + depending on where the given compressed dimension (row or + column) starts. Each successive number in the tensor + subtracted by the number before it denotes the number of + elements or blocks in a given compressed dimension. + plain_indices (array_like): Plain dimension (column or row) + co-ordinates of each element or block in values. (B+1)-dimensional + tensor with the same length as values. + + values (array_list): Initial values for the tensor. Can be a list, + tuple, NumPy ``ndarray``, scalar, and other types. that + represents a (1+K)-dimensional (for CSR and CSC layouts) or + (1+2+K)-dimensional tensor (for BSR and BSC layouts) where + ``K`` is the number of dense dimensions. + size (list, tuple, :class:`torch.Size`, optional): Size of the + sparse tensor: ``(*batchsize, nrows * blocksize[0], ncols * + blocksize[1], *densesize)`` where ``blocksize[0] == + blocksize[1] == 1`` for CSR and CSC formats. If not provided, + the size will be inferred as the minimum size big enough to + hold all non-zero elements or blocks. + +Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of + returned tensor. Default: if None, infers data type from + :attr:`values`. + layout (:class:`torch.layout`, required): the desired layout of + returned tensor: :attr:`torch.sparse_csr`, + :attr:`torch.sparse_csc`, :attr:`torch.sparse_bsr`, or + :attr:`torch.sparse_bsc`. + device (:class:`torch.device`, optional): the desired device of + returned tensor. Default: if None, uses the current device + for the default tensor type (see + :func:`torch.set_default_tensor_type`). :attr:`device` will be + the CPU for CPU tensor types and the current CUDA device for + CUDA tensor types. + {requires_grad} + {check_invariants} + +Example:: + >>> compressed_indices = [0, 2, 4] + >>> plain_indices = [0, 1, 0, 1] + >>> values = [1, 2, 3, 4] + >>> torch.sparse_compressed_tensor(torch.tensor(compressed_indices, dtype=torch.int64), + ... torch.tensor(plain_indices, dtype=torch.int64), + ... torch.tensor(values), dtype=torch.double, layout=torch.sparse_csr) + tensor(crow_indices=tensor([0, 2, 4]), + col_indices=tensor([0, 1, 0, 1]), + values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4, + dtype=torch.float64, layout=torch.sparse_csr) +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.sparse_csr_tensor, + r"""sparse_csr_tensor(crow_indices, col_indices, values, size=None, """ + r"""*, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor + +Constructs a :ref:`sparse tensor in CSR (Compressed Sparse Row) ` with specified +values at the given :attr:`crow_indices` and :attr:`col_indices`. Sparse matrix multiplication operations +in CSR format are typically faster than that for sparse tensors in COO format. Make you have a look +at :ref:`the note on the data type of the indices `. + +{sparse_factory_device_note} + +Args: + crow_indices (array_like): (B+1)-dimensional array of size + ``(*batchsize, nrows + 1)``. The last element of each batch + is the number of non-zeros. This tensor encodes the index in + values and col_indices depending on where the given row + starts. Each successive number in the tensor subtracted by the + number before it denotes the number of elements in a given + row. + col_indices (array_like): Column co-ordinates of each element in + values. (B+1)-dimensional tensor with the same length + as values. + values (array_list): Initial values for the tensor. Can be a list, + tuple, NumPy ``ndarray``, scalar, and other types that + represents a (1+K)-dimensional tensor where ``K`` is the number + of dense dimensions. + size (list, tuple, :class:`torch.Size`, optional): Size of the + sparse tensor: ``(*batchsize, nrows, ncols, *densesize)``. If + not provided, the size will be inferred as the minimum size + big enough to hold all non-zero elements. + +Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of + returned tensor. Default: if None, infers data type from + :attr:`values`. + device (:class:`torch.device`, optional): the desired device of + returned tensor. Default: if None, uses the current device + for the default tensor type (see + :func:`torch.set_default_tensor_type`). :attr:`device` will be + the CPU for CPU tensor types and the current CUDA device for + CUDA tensor types. + {requires_grad} + {check_invariants} + +Example:: + >>> crow_indices = [0, 2, 4] + >>> col_indices = [0, 1, 0, 1] + >>> values = [1, 2, 3, 4] + >>> torch.sparse_csr_tensor(torch.tensor(crow_indices, dtype=torch.int64), + ... torch.tensor(col_indices, dtype=torch.int64), + ... torch.tensor(values), dtype=torch.double) + tensor(crow_indices=tensor([0, 2, 4]), + col_indices=tensor([0, 1, 0, 1]), + values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4, + dtype=torch.float64, layout=torch.sparse_csr) +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.sparse_csc_tensor, + r"""sparse_csc_tensor(ccol_indices, row_indices, values, size=None, """ + r"""*, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor + +Constructs a :ref:`sparse tensor in CSC (Compressed Sparse Column) +` with specified values at the given +:attr:`ccol_indices` and :attr:`row_indices`. Sparse matrix +multiplication operations in CSC format are typically faster than that +for sparse tensors in COO format. Make you have a look at :ref:`the +note on the data type of the indices `. + +{sparse_factory_device_note} + +Args: + ccol_indices (array_like): (B+1)-dimensional array of size + ``(*batchsize, ncols + 1)``. The last element of each batch + is the number of non-zeros. This tensor encodes the index in + values and row_indices depending on where the given column + starts. Each successive number in the tensor subtracted by the + number before it denotes the number of elements in a given + column. + row_indices (array_like): Row co-ordinates of each element in + values. (B+1)-dimensional tensor with the same length as + values. + values (array_list): Initial values for the tensor. Can be a list, + tuple, NumPy ``ndarray``, scalar, and other types that + represents a (1+K)-dimensional tensor where ``K`` is the number + of dense dimensions. + size (list, tuple, :class:`torch.Size`, optional): Size of the + sparse tensor: ``(*batchsize, nrows, ncols, *densesize)``. If + not provided, the size will be inferred as the minimum size + big enough to hold all non-zero elements. + +Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of + returned tensor. Default: if None, infers data type from + :attr:`values`. + device (:class:`torch.device`, optional): the desired device of + returned tensor. Default: if None, uses the current device + for the default tensor type (see + :func:`torch.set_default_tensor_type`). :attr:`device` will be + the CPU for CPU tensor types and the current CUDA device for + CUDA tensor types. + {requires_grad} + {check_invariants} + +Example:: + >>> ccol_indices = [0, 2, 4] + >>> row_indices = [0, 1, 0, 1] + >>> values = [1, 2, 3, 4] + >>> torch.sparse_csc_tensor(torch.tensor(ccol_indices, dtype=torch.int64), + ... torch.tensor(row_indices, dtype=torch.int64), + ... torch.tensor(values), dtype=torch.double) + tensor(ccol_indices=tensor([0, 2, 4]), + row_indices=tensor([0, 1, 0, 1]), + values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4, + dtype=torch.float64, layout=torch.sparse_csc) +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.sparse_bsr_tensor, + r"""sparse_bsr_tensor(crow_indices, col_indices, values, size=None, """ + r"""*, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor + +Constructs a :ref:`sparse tensor in BSR (Block Compressed Sparse Row)) +` with specified 2-dimensional blocks at the given +:attr:`crow_indices` and :attr:`col_indices`. Sparse matrix +multiplication operations in BSR format are typically faster than that +for sparse tensors in COO format. Make you have a look at :ref:`the +note on the data type of the indices `. + +{sparse_factory_device_note} + +Args: + crow_indices (array_like): (B+1)-dimensional array of size + ``(*batchsize, nrowblocks + 1)``. The last element of each + batch is the number of non-zeros. This tensor encodes the + block index in values and col_indices depending on where the + given row block starts. Each successive number in the tensor + subtracted by the number before it denotes the number of + blocks in a given row. + col_indices (array_like): Column block co-ordinates of each block + in values. (B+1)-dimensional tensor with the same length as + values. + values (array_list): Initial values for the tensor. Can be a list, + tuple, NumPy ``ndarray``, scalar, and other types that + represents a (1 + 2 + K)-dimensional tensor where ``K`` is the + number of dense dimensions. + size (list, tuple, :class:`torch.Size`, optional): Size of the + sparse tensor: ``(*batchsize, nrows * blocksize[0], ncols * + blocksize[1], *densesize)`` where ``blocksize == + values.shape[1:3]``. If not provided, the size will be + inferred as the minimum size big enough to hold all non-zero + blocks. + +Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of + returned tensor. Default: if None, infers data type from + :attr:`values`. + device (:class:`torch.device`, optional): the desired device of + returned tensor. Default: if None, uses the current device + for the default tensor type (see + :func:`torch.set_default_tensor_type`). :attr:`device` will be + the CPU for CPU tensor types and the current CUDA device for + CUDA tensor types. + {requires_grad} + {check_invariants} + +Example:: + >>> crow_indices = [0, 1, 2] + >>> col_indices = [0, 1] + >>> values = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] + >>> torch.sparse_bsr_tensor(torch.tensor(crow_indices, dtype=torch.int64), + ... torch.tensor(col_indices, dtype=torch.int64), + ... torch.tensor(values), dtype=torch.double) + tensor(crow_indices=tensor([0, 1, 2]), + col_indices=tensor([0, 1]), + values=tensor([[[1., 2.], + [3., 4.]], + [[5., 6.], + [7., 8.]]]), size=(2, 2), nnz=2, dtype=torch.float64, + layout=torch.sparse_bsr) +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.sparse_bsc_tensor, + r"""sparse_bsc_tensor(ccol_indices, row_indices, values, size=None, """ + r"""*, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor + +Constructs a :ref:`sparse tensor in BSC (Block Compressed Sparse +Column)) ` with specified 2-dimensional blocks at the +given :attr:`ccol_indices` and :attr:`row_indices`. Sparse matrix +multiplication operations in BSC format are typically faster than that +for sparse tensors in COO format. Make you have a look at :ref:`the +note on the data type of the indices `. + +{sparse_factory_device_note} + +Args: + ccol_indices (array_like): (B+1)-dimensional array of size + ``(*batchsize, ncolblocks + 1)``. The last element of each + batch is the number of non-zeros. This tensor encodes the + index in values and row_indices depending on where the given + column starts. Each successive number in the tensor subtracted + by the number before it denotes the number of elements in a + given column. + row_indices (array_like): Row block co-ordinates of each block in + values. (B+1)-dimensional tensor with the same length + as values. + values (array_list): Initial blocks for the tensor. Can be a list, + tuple, NumPy ``ndarray``, and other types that + represents a (1 + 2 + K)-dimensional tensor where ``K`` is the + number of dense dimensions. + size (list, tuple, :class:`torch.Size`, optional): Size of the + sparse tensor: ``(*batchsize, nrows * blocksize[0], ncols * + blocksize[1], *densesize)`` If not provided, the size will be + inferred as the minimum size big enough to hold all non-zero + blocks. + +Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of + returned tensor. Default: if None, infers data type from + :attr:`values`. + device (:class:`torch.device`, optional): the desired device of + returned tensor. Default: if None, uses the current device + for the default tensor type (see + :func:`torch.set_default_tensor_type`). :attr:`device` will be + the CPU for CPU tensor types and the current CUDA device for + CUDA tensor types. + {requires_grad} + {check_invariants} + +Example:: + >>> ccol_indices = [0, 1, 2] + >>> row_indices = [0, 1] + >>> values = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] + >>> torch.sparse_bsc_tensor(torch.tensor(ccol_indices, dtype=torch.int64), + ... torch.tensor(row_indices, dtype=torch.int64), + ... torch.tensor(values), dtype=torch.double) + tensor(ccol_indices=tensor([0, 1, 2]), + row_indices=tensor([0, 1]), + values=tensor([[[1., 2.], + [3., 4.]], + [[5., 6.], + [7., 8.]]]), size=(2, 2), nnz=2, dtype=torch.float64, + layout=torch.sparse_bsc) +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.sparse_coo_tensor, + r"""sparse_coo_tensor(indices, values, size=None, """ + r"""*, dtype=None, device=None, requires_grad=False, check_invariants=None, is_coalesced=None) -> Tensor + +Constructs a :ref:`sparse tensor in COO(rdinate) format +` with specified values at the given +:attr:`indices`. + +.. note:: + + This function returns an :ref:`uncoalesced tensor + ` when :attr:`is_coalesced` is + unspecified or ``None``. + +{sparse_factory_device_note} + +Args: + indices (array_like): Initial data for the tensor. Can be a list, tuple, + NumPy ``ndarray``, scalar, and other types. Will be cast to a :class:`torch.LongTensor` + internally. The indices are the coordinates of the non-zero values in the matrix, and thus + should be two-dimensional where the first dimension is the number of tensor dimensions and + the second dimension is the number of non-zero values. + values (array_like): Initial values for the tensor. Can be a list, tuple, + NumPy ``ndarray``, scalar, and other types. + size (list, tuple, or :class:`torch.Size`, optional): Size of the sparse tensor. If not + provided the size will be inferred as the minimum size big enough to hold all non-zero + elements. + +Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if None, infers data type from :attr:`values`. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if None, uses the current device for the default tensor type + (see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + {requires_grad} + {check_invariants} + is_coalesced (bool, optional): When``True``, the caller is + responsible for providing tensor indices that correspond to a + coalesced tensor. If the :attr:`check_invariants` flag is + False, no error will be raised if the prerequisites are not + met and this will lead to silently incorrect results. To force + coalescion please use :meth:`coalesce` on the resulting + Tensor. + Default: None: except for trivial cases (e.g. nnz < 2) the + resulting Tensor has is_coalesced set to ``False```. + +Example:: + + >>> i = torch.tensor([[0, 1, 1], + ... [2, 0, 2]]) + >>> v = torch.tensor([3, 4, 5], dtype=torch.float32) + >>> torch.sparse_coo_tensor(i, v, [2, 4]) + tensor(indices=tensor([[0, 1, 1], + [2, 0, 2]]), + values=tensor([3., 4., 5.]), + size=(2, 4), nnz=3, layout=torch.sparse_coo) + + >>> torch.sparse_coo_tensor(i, v) # Shape inference + tensor(indices=tensor([[0, 1, 1], + [2, 0, 2]]), + values=tensor([3., 4., 5.]), + size=(2, 3), nnz=3, layout=torch.sparse_coo) + + >>> torch.sparse_coo_tensor(i, v, [2, 4], + ... dtype=torch.float64, + ... device=torch.device('cuda:0')) + tensor(indices=tensor([[0, 1, 1], + [2, 0, 2]]), + values=tensor([3., 4., 5.]), + device='cuda:0', size=(2, 4), nnz=3, dtype=torch.float64, + layout=torch.sparse_coo) + + # Create an empty sparse tensor with the following invariants: + # 1. sparse_dim + dense_dim = len(SparseTensor.shape) + # 2. SparseTensor._indices().shape = (sparse_dim, nnz) + # 3. SparseTensor._values().shape = (nnz, SparseTensor.shape[sparse_dim:]) + # + # For instance, to create an empty sparse tensor with nnz = 0, dense_dim = 0 and + # sparse_dim = 1 (hence indices is a 2D tensor of shape = (1, 0)) + >>> S = torch.sparse_coo_tensor(torch.empty([1, 0]), [], [1]) + tensor(indices=tensor([], size=(1, 0)), + values=tensor([], size=(0,)), + size=(1,), nnz=0, layout=torch.sparse_coo) + + # and to create an empty sparse tensor with nnz = 0, dense_dim = 1 and + # sparse_dim = 1 + >>> S = torch.sparse_coo_tensor(torch.empty([1, 0]), torch.empty([0, 2]), [1, 2]) + tensor(indices=tensor([], size=(1, 0)), + values=tensor([], size=(0, 2)), + size=(1, 2), nnz=0, layout=torch.sparse_coo) + +.. _torch.sparse: https://pytorch.org/docs/stable/sparse.html +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.sqrt, + r""" +sqrt(input, *, out=None) -> Tensor + +Returns a new tensor with the square-root of the elements of :attr:`input`. + +.. math:: + \text{out}_{i} = \sqrt{\text{input}_{i}} +""" + + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-2.0755, 1.0226, 0.0831, 0.4806]) + >>> torch.sqrt(a) + tensor([ nan, 1.0112, 0.2883, 0.6933]) +""".format( + **common_args + ), +) + +add_docstr( + torch.square, + r""" +square(input, *, out=None) -> Tensor + +Returns a new tensor with the square of the elements of :attr:`input`. + +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-2.0755, 1.0226, 0.0831, 0.4806]) + >>> torch.square(a) + tensor([ 4.3077, 1.0457, 0.0069, 0.2310]) +""".format( + **common_args + ), +) + +add_docstr( + torch.squeeze, + r""" +squeeze(input, dim=None) -> Tensor + +Returns a tensor with all specified dimensions of :attr:`input` of size `1` removed. + +For example, if `input` is of shape: +:math:`(A \times 1 \times B \times C \times 1 \times D)` then the `input.squeeze()` +will be of shape: :math:`(A \times B \times C \times D)`. + +When :attr:`dim` is given, a squeeze operation is done only in the given +dimension(s). If `input` is of shape: :math:`(A \times 1 \times B)`, +``squeeze(input, 0)`` leaves the tensor unchanged, but ``squeeze(input, 1)`` +will squeeze the tensor to the shape :math:`(A \times B)`. + +.. note:: The returned tensor shares the storage with the input tensor, + so changing the contents of one will change the contents of the other. + +.. warning:: If the tensor has a batch dimension of size 1, then `squeeze(input)` + will also remove the batch dimension, which can lead to unexpected + errors. Consider specifying only the dims you wish to be squeezed. + +Args: + {input} + dim (int or tuple of ints, optional): if given, the input will be squeezed + only in the specified dimensions. + + .. versionchanged:: 2.0 + :attr:`dim` now accepts tuples of dimensions. + +Example:: + + >>> x = torch.zeros(2, 1, 2, 1, 2) + >>> x.size() + torch.Size([2, 1, 2, 1, 2]) + >>> y = torch.squeeze(x) + >>> y.size() + torch.Size([2, 2, 2]) + >>> y = torch.squeeze(x, 0) + >>> y.size() + torch.Size([2, 1, 2, 1, 2]) + >>> y = torch.squeeze(x, 1) + >>> y.size() + torch.Size([2, 2, 1, 2]) + >>> y = torch.squeeze(x, (1, 2, 3)) + torch.Size([2, 2, 2]) +""".format( + **common_args + ), +) + +add_docstr( + torch.std, + r""" +std(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor + +Calculates the standard deviation over the dimensions specified by :attr:`dim`. +:attr:`dim` can be a single dimension, list of dimensions, or ``None`` to +reduce over all dimensions. + +The standard deviation (:math:`\sigma`) is calculated as + +.. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2} + +where :math:`x` is the sample set of elements, :math:`\bar{x}` is the +sample mean, :math:`N` is the number of samples and :math:`\delta N` is +the :attr:`correction`. +""" + + r""" + +{keepdim_details} + +Args: + {input} + {dim} + +Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + {keepdim} + {out} + +Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.std(a, dim=1, keepdim=True) + tensor([[1.0311], + [0.7477], + [1.2204], + [0.9087]]) + +.. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + +""".format( + **multi_dim_common + ), +) + +add_docstr( + torch.std_mean, + r""" +std_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor) + +Calculates the standard deviation and mean over the dimensions specified by +:attr:`dim`. :attr:`dim` can be a single dimension, list of dimensions, or +``None`` to reduce over all dimensions. + +The standard deviation (:math:`\sigma`) is calculated as + +.. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2} + +where :math:`x` is the sample set of elements, :math:`\bar{x}` is the +sample mean, :math:`N` is the number of samples and :math:`\delta N` is +the :attr:`correction`. + +""" + + r""" + +{keepdim_details} + +Args: + {input} + {opt_dim} + +Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + {keepdim} + {out} + +Returns: + A tuple (std, mean) containing the standard deviation and mean. + +Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.std_mean(a, dim=0, keepdim=True) + (tensor([[1.2620, 1.0028, 1.0957, 0.6038]]), + tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]])) + +.. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + +""".format( + **multi_dim_common + ), +) + +add_docstr( + torch.sub, + r""" +sub(input, other, *, alpha=1, out=None) -> Tensor + +Subtracts :attr:`other`, scaled by :attr:`alpha`, from :attr:`input`. + +.. math:: + \text{{out}}_i = \text{{input}}_i - \text{{alpha}} \times \text{{other}}_i +""" + + r""" + +Supports :ref:`broadcasting to a common shape `, +:ref:`type promotion `, and integer, float, and complex inputs. + +Args: + {input} + other (Tensor or Number): the tensor or number to subtract from :attr:`input`. + +Keyword args: + alpha (Number): the multiplier for :attr:`other`. + {out} + +Example:: + + >>> a = torch.tensor((1, 2)) + >>> b = torch.tensor((0, 1)) + >>> torch.sub(a, b, alpha=2) + tensor([1, 0]) +""".format( + **common_args + ), +) + +add_docstr( + torch.subtract, + r""" +subtract(input, other, *, alpha=1, out=None) -> Tensor + +Alias for :func:`torch.sub`. +""", +) + +add_docstr( + torch.sum, + r""" +sum(input, *, dtype=None) -> Tensor + +Returns the sum of all elements in the :attr:`input` tensor. + +Args: + {input} + +Keyword args: + {dtype} + +Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 0.1133, -0.9567, 0.2958]]) + >>> torch.sum(a) + tensor(-0.5475) + +.. function:: sum(input, dim, keepdim=False, *, dtype=None) -> Tensor + :noindex: + +Returns the sum of each row of the :attr:`input` tensor in the given +dimension :attr:`dim`. If :attr:`dim` is a list of dimensions, +reduce over all of them. + +{keepdim_details} + +Args: + {input} + {opt_dim} + {keepdim} + +Keyword args: + {dtype} + +Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[ 0.0569, -0.2475, 0.0737, -0.3429], + [-0.2993, 0.9138, 0.9337, -1.6864], + [ 0.1132, 0.7892, -0.1003, 0.5688], + [ 0.3637, -0.9906, -0.4752, -1.5197]]) + >>> torch.sum(a, 1) + tensor([-0.4598, -0.1381, 1.3708, -2.6217]) + >>> b = torch.arange(4 * 5 * 6).view(4, 5, 6) + >>> torch.sum(b, (2, 1)) + tensor([ 435., 1335., 2235., 3135.]) +""".format( + **multi_dim_common + ), +) + +add_docstr( + torch.nansum, + r""" +nansum(input, *, dtype=None) -> Tensor + +Returns the sum of all elements, treating Not a Numbers (NaNs) as zero. + +Args: + {input} + +Keyword args: + {dtype} + +Example:: + + >>> a = torch.tensor([1., 2., float('nan'), 4.]) + >>> torch.nansum(a) + tensor(7.) + +.. function:: nansum(input, dim, keepdim=False, *, dtype=None) -> Tensor + :noindex: + +Returns the sum of each row of the :attr:`input` tensor in the given +dimension :attr:`dim`, treating Not a Numbers (NaNs) as zero. +If :attr:`dim` is a list of dimensions, reduce over all of them. + +{keepdim_details} + +Args: + {input} + {opt_dim} + {keepdim} + +Keyword args: + {dtype} + +Example:: + + >>> torch.nansum(torch.tensor([1., float("nan")])) + 1.0 + >>> a = torch.tensor([[1, 2], [3., float("nan")]]) + >>> torch.nansum(a) + tensor(6.) + >>> torch.nansum(a, dim=0) + tensor([4., 2.]) + >>> torch.nansum(a, dim=1) + tensor([3., 3.]) +""".format( + **multi_dim_common + ), +) + +add_docstr( + torch.svd, + r""" +svd(input, some=True, compute_uv=True, *, out=None) -> (Tensor, Tensor, Tensor) + +Computes the singular value decomposition of either a matrix or batch of +matrices :attr:`input`. The singular value decomposition is represented as a +namedtuple `(U, S, V)`, such that :attr:`input` :math:`= U \text{diag}(S) V^{\text{H}}`. +where :math:`V^{\text{H}}` is the transpose of `V` for real inputs, +and the conjugate transpose of `V` for complex inputs. +If :attr:`input` is a batch of matrices, then `U`, `S`, and `V` are also +batched with the same batch dimensions as :attr:`input`. + +If :attr:`some` is `True` (default), the method returns the reduced singular +value decomposition. In this case, if the last two dimensions of :attr:`input` are +`m` and `n`, then the returned `U` and `V` matrices will contain only +`min(n, m)` orthonormal columns. + +If :attr:`compute_uv` is `False`, the returned `U` and `V` will be +zero-filled matrices of shape `(m, m)` and `(n, n)` +respectively, and the same device as :attr:`input`. The argument :attr:`some` +has no effect when :attr:`compute_uv` is `False`. + +Supports :attr:`input` of float, double, cfloat and cdouble data types. +The dtypes of `U` and `V` are the same as :attr:`input`'s. `S` will +always be real-valued, even if :attr:`input` is complex. + +.. warning:: + + :func:`torch.svd` is deprecated in favor of :func:`torch.linalg.svd` + and will be removed in a future PyTorch release. + + ``U, S, V = torch.svd(A, some=some, compute_uv=True)`` (default) should be replaced with + + .. code:: python + + U, S, Vh = torch.linalg.svd(A, full_matrices=not some) + V = Vh.mH + + ``_, S, _ = torch.svd(A, some=some, compute_uv=False)`` should be replaced with + + .. code:: python + + S = torch.linalg.svdvals(A) + +.. note:: Differences with :func:`torch.linalg.svd`: + + * :attr:`some` is the opposite of + :func:`torch.linalg.svd`'s :attr:`full_matrices`. Note that + default value for both is `True`, so the default behavior is + effectively the opposite. + * :func:`torch.svd` returns `V`, whereas :func:`torch.linalg.svd` returns + `Vh`, that is, :math:`V^{\text{H}}`. + * If :attr:`compute_uv` is `False`, :func:`torch.svd` returns zero-filled + tensors for `U` and `Vh`, whereas :func:`torch.linalg.svd` returns + empty tensors. + +.. note:: The singular values are returned in descending order. If :attr:`input` is a batch of matrices, + then the singular values of each matrix in the batch are returned in descending order. + +.. note:: The `S` tensor can only be used to compute gradients if :attr:`compute_uv` is `True`. + +.. note:: When :attr:`some` is `False`, the gradients on `U[..., :, min(m, n):]` + and `V[..., :, min(m, n):]` will be ignored in the backward pass, as those vectors + can be arbitrary bases of the corresponding subspaces. + +.. note:: The implementation of :func:`torch.linalg.svd` on CPU uses LAPACK's routine `?gesdd` + (a divide-and-conquer algorithm) instead of `?gesvd` for speed. Analogously, + on GPU, it uses cuSOLVER's routines `gesvdj` and `gesvdjBatched` on CUDA 10.1.243 + and later, and MAGMA's routine `gesdd` on earlier versions of CUDA. + +.. note:: The returned `U` will not be contiguous. The matrix (or batch of matrices) will + be represented as a column-major matrix (i.e. Fortran-contiguous). + +.. warning:: The gradients with respect to `U` and `V` will only be finite when the input does not + have zero nor repeated singular values. + +.. warning:: If the distance between any two singular values is close to zero, the gradients with respect to + `U` and `V` will be numerically unstable, as they depends on + :math:`\frac{1}{\min_{i \neq j} \sigma_i^2 - \sigma_j^2}`. The same happens when the matrix + has small singular values, as these gradients also depend on `S⁻¹`. + +.. warning:: For complex-valued :attr:`input` the singular value decomposition is not unique, + as `U` and `V` may be multiplied by an arbitrary phase factor :math:`e^{i \phi}` on every column. + The same happens when :attr:`input` has repeated singular values, where one may multiply + the columns of the spanning subspace in `U` and `V` by a rotation matrix + and `the resulting vectors will span the same subspace`_. + Different platforms, like NumPy, or inputs on different device types, + may produce different `U` and `V` tensors. + +Args: + input (Tensor): the input tensor of size `(*, m, n)` where `*` is zero or more + batch dimensions consisting of `(m, n)` matrices. + some (bool, optional): controls whether to compute the reduced or full decomposition, and + consequently, the shape of returned `U` and `V`. Default: `True`. + compute_uv (bool, optional): controls whether to compute `U` and `V`. Default: `True`. + +Keyword args: + out (tuple, optional): the output tuple of tensors + +Example:: + + >>> a = torch.randn(5, 3) + >>> a + tensor([[ 0.2364, -0.7752, 0.6372], + [ 1.7201, 0.7394, -0.0504], + [-0.3371, -1.0584, 0.5296], + [ 0.3550, -0.4022, 1.5569], + [ 0.2445, -0.0158, 1.1414]]) + >>> u, s, v = torch.svd(a) + >>> u + tensor([[ 0.4027, 0.0287, 0.5434], + [-0.1946, 0.8833, 0.3679], + [ 0.4296, -0.2890, 0.5261], + [ 0.6604, 0.2717, -0.2618], + [ 0.4234, 0.2481, -0.4733]]) + >>> s + tensor([2.3289, 2.0315, 0.7806]) + >>> v + tensor([[-0.0199, 0.8766, 0.4809], + [-0.5080, 0.4054, -0.7600], + [ 0.8611, 0.2594, -0.4373]]) + >>> torch.dist(a, torch.mm(torch.mm(u, torch.diag(s)), v.t())) + tensor(8.6531e-07) + >>> a_big = torch.randn(7, 5, 3) + >>> u, s, v = torch.svd(a_big) + >>> torch.dist(a_big, torch.matmul(torch.matmul(u, torch.diag_embed(s)), v.mT)) + tensor(2.6503e-06) + +.. _the resulting vectors will span the same subspace: + (https://en.wikipedia.org/wiki/Singular_value_decomposition#Singular_values,_singular_vectors,_and_their_relation_to_the_SVD) +""", +) + + +add_docstr( + torch.t, + r""" +t(input) -> Tensor + +Expects :attr:`input` to be <= 2-D tensor and transposes dimensions 0 +and 1. + +0-D and 1-D tensors are returned as is. When input is a 2-D tensor this +is equivalent to ``transpose(input, 0, 1)``. + +Args: + {input} + +Example:: + + >>> x = torch.randn(()) + >>> x + tensor(0.1995) + >>> torch.t(x) + tensor(0.1995) + >>> x = torch.randn(3) + >>> x + tensor([ 2.4320, -0.4608, 0.7702]) + >>> torch.t(x) + tensor([ 2.4320, -0.4608, 0.7702]) + >>> x = torch.randn(2, 3) + >>> x + tensor([[ 0.4875, 0.9158, -0.5872], + [ 0.3938, -0.6929, 0.6932]]) + >>> torch.t(x) + tensor([[ 0.4875, 0.3938], + [ 0.9158, -0.6929], + [-0.5872, 0.6932]]) + +See also :func:`torch.transpose`. +""".format( + **common_args + ), +) + +add_docstr( + torch.flip, + r""" +flip(input, dims) -> Tensor + +Reverse the order of an n-D tensor along given axis in dims. + +.. note:: + `torch.flip` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.flip`, + which returns a view in constant time. Since copying a tensor's data is more work than viewing that data, + `torch.flip` is expected to be slower than `np.flip`. + +Args: + {input} + dims (a list or tuple): axis to flip on + +Example:: + + >>> x = torch.arange(8).view(2, 2, 2) + >>> x + tensor([[[ 0, 1], + [ 2, 3]], + + [[ 4, 5], + [ 6, 7]]]) + >>> torch.flip(x, [0, 1]) + tensor([[[ 6, 7], + [ 4, 5]], + + [[ 2, 3], + [ 0, 1]]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.fliplr, + r""" +fliplr(input) -> Tensor + +Flip tensor in the left/right direction, returning a new tensor. + +Flip the entries in each row in the left/right direction. +Columns are preserved, but appear in a different order than before. + +Note: + Requires the tensor to be at least 2-D. + +.. note:: + `torch.fliplr` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.fliplr`, + which returns a view in constant time. Since copying a tensor's data is more work than viewing that data, + `torch.fliplr` is expected to be slower than `np.fliplr`. + +Args: + input (Tensor): Must be at least 2-dimensional. + +Example:: + + >>> x = torch.arange(4).view(2, 2) + >>> x + tensor([[0, 1], + [2, 3]]) + >>> torch.fliplr(x) + tensor([[1, 0], + [3, 2]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.flipud, + r""" +flipud(input) -> Tensor + +Flip tensor in the up/down direction, returning a new tensor. + +Flip the entries in each column in the up/down direction. +Rows are preserved, but appear in a different order than before. + +Note: + Requires the tensor to be at least 1-D. + +.. note:: + `torch.flipud` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.flipud`, + which returns a view in constant time. Since copying a tensor's data is more work than viewing that data, + `torch.flipud` is expected to be slower than `np.flipud`. + +Args: + input (Tensor): Must be at least 1-dimensional. + +Example:: + + >>> x = torch.arange(4).view(2, 2) + >>> x + tensor([[0, 1], + [2, 3]]) + >>> torch.flipud(x) + tensor([[2, 3], + [0, 1]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.roll, + r""" +roll(input, shifts, dims=None) -> Tensor + +Roll the tensor :attr:`input` along the given dimension(s). Elements that are +shifted beyond the last position are re-introduced at the first position. If +:attr:`dims` is `None`, the tensor will be flattened before rolling and then +restored to the original shape. + +Args: + {input} + shifts (int or tuple of ints): The number of places by which the elements + of the tensor are shifted. If shifts is a tuple, dims must be a tuple of + the same size, and each dimension will be rolled by the corresponding + value + dims (int or tuple of ints): Axis along which to roll + +Example:: + + >>> x = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8]).view(4, 2) + >>> x + tensor([[1, 2], + [3, 4], + [5, 6], + [7, 8]]) + >>> torch.roll(x, 1) + tensor([[8, 1], + [2, 3], + [4, 5], + [6, 7]]) + >>> torch.roll(x, 1, 0) + tensor([[7, 8], + [1, 2], + [3, 4], + [5, 6]]) + >>> torch.roll(x, -1, 0) + tensor([[3, 4], + [5, 6], + [7, 8], + [1, 2]]) + >>> torch.roll(x, shifts=(2, 1), dims=(0, 1)) + tensor([[6, 5], + [8, 7], + [2, 1], + [4, 3]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.rot90, + r""" +rot90(input, k=1, dims=[0,1]) -> Tensor + +Rotate an n-D tensor by 90 degrees in the plane specified by dims axis. +Rotation direction is from the first towards the second axis if k > 0, and from the second towards the first for k < 0. + +Args: + {input} + k (int): number of times to rotate. Default value is 1 + dims (a list or tuple): axis to rotate. Default value is [0, 1] + +Example:: + + >>> x = torch.arange(4).view(2, 2) + >>> x + tensor([[0, 1], + [2, 3]]) + >>> torch.rot90(x, 1, [0, 1]) + tensor([[1, 3], + [0, 2]]) + + >>> x = torch.arange(8).view(2, 2, 2) + >>> x + tensor([[[0, 1], + [2, 3]], + + [[4, 5], + [6, 7]]]) + >>> torch.rot90(x, 1, [1, 2]) + tensor([[[1, 3], + [0, 2]], + + [[5, 7], + [4, 6]]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.take, + r""" +take(input, index) -> Tensor + +Returns a new tensor with the elements of :attr:`input` at the given indices. +The input tensor is treated as if it were viewed as a 1-D tensor. The result +takes the same shape as the indices. + +Args: + {input} + index (LongTensor): the indices into tensor + +Example:: + + >>> src = torch.tensor([[4, 3, 5], + ... [6, 7, 8]]) + >>> torch.take(src, torch.tensor([0, 2, 5])) + tensor([ 4, 5, 8]) +""".format( + **common_args + ), +) + +add_docstr( + torch.take_along_dim, + r""" +take_along_dim(input, indices, dim=None, *, out=None) -> Tensor + +Selects values from :attr:`input` at the 1-dimensional indices from :attr:`indices` along the given :attr:`dim`. + +If :attr:`dim` is None, the input array is treated as if it has been flattened to 1d. + +Functions that return indices along a dimension, like :func:`torch.argmax` and :func:`torch.argsort`, +are designed to work with this function. See the examples below. + +.. note:: + This function is similar to NumPy's `take_along_axis`. + See also :func:`torch.gather`. + +Args: + {input} + indices (tensor): the indices into :attr:`input`. Must have long dtype. + dim (int, optional): dimension to select along. + +Keyword args: + {out} + +Example:: + + >>> t = torch.tensor([[10, 30, 20], [60, 40, 50]]) + >>> max_idx = torch.argmax(t) + >>> torch.take_along_dim(t, max_idx) + tensor([60]) + >>> sorted_idx = torch.argsort(t, dim=1) + >>> torch.take_along_dim(t, sorted_idx, dim=1) + tensor([[10, 20, 30], + [40, 50, 60]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.tan, + r""" +tan(input, *, out=None) -> Tensor + +Returns a new tensor with the tangent of the elements of :attr:`input`. + +.. math:: + \text{out}_{i} = \tan(\text{input}_{i}) +""" + + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-1.2027, -1.7687, 0.4412, -1.3856]) + >>> torch.tan(a) + tensor([-2.5930, 4.9859, 0.4722, -5.3366]) +""".format( + **common_args + ), +) + +add_docstr( + torch.tanh, + r""" +tanh(input, *, out=None) -> Tensor + +Returns a new tensor with the hyperbolic tangent of the elements +of :attr:`input`. + +.. math:: + \text{out}_{i} = \tanh(\text{input}_{i}) +""" + + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.8986, -0.7279, 1.1745, 0.2611]) + >>> torch.tanh(a) + tensor([ 0.7156, -0.6218, 0.8257, 0.2553]) +""".format( + **common_args + ), +) + +add_docstr( + # torch.softmax doc str. Point this to torch.nn.functional.softmax + torch.softmax, + r""" +softmax(input, dim, *, dtype=None) -> Tensor + +Alias for :func:`torch.nn.functional.softmax`. +""", +) + +add_docstr( + torch.topk, + r""" +topk(input, k, dim=None, largest=True, sorted=True, *, out=None) -> (Tensor, LongTensor) + +Returns the :attr:`k` largest elements of the given :attr:`input` tensor along +a given dimension. + +If :attr:`dim` is not given, the last dimension of the `input` is chosen. + +If :attr:`largest` is ``False`` then the `k` smallest elements are returned. + +A namedtuple of `(values, indices)` is returned with the `values` and +`indices` of the largest `k` elements of each row of the `input` tensor in the +given dimension `dim`. + +The boolean option :attr:`sorted` if ``True``, will make sure that the returned +`k` elements are themselves sorted + +Args: + {input} + k (int): the k in "top-k" + dim (int, optional): the dimension to sort along + largest (bool, optional): controls whether to return largest or + smallest elements + sorted (bool, optional): controls whether to return the elements + in sorted order + +Keyword args: + out (tuple, optional): the output tuple of (Tensor, LongTensor) that can be + optionally given to be used as output buffers + +Example:: + + >>> x = torch.arange(1., 6.) + >>> x + tensor([ 1., 2., 3., 4., 5.]) + >>> torch.topk(x, 3) + torch.return_types.topk(values=tensor([5., 4., 3.]), indices=tensor([4, 3, 2])) +""".format( + **common_args + ), +) + +add_docstr( + torch.trace, + r""" +trace(input) -> Tensor + +Returns the sum of the elements of the diagonal of the input 2-D matrix. + +Example:: + + >>> x = torch.arange(1., 10.).view(3, 3) + >>> x + tensor([[ 1., 2., 3.], + [ 4., 5., 6.], + [ 7., 8., 9.]]) + >>> torch.trace(x) + tensor(15.) +""", +) + +add_docstr( + torch.transpose, + r""" +transpose(input, dim0, dim1) -> Tensor + +Returns a tensor that is a transposed version of :attr:`input`. +The given dimensions :attr:`dim0` and :attr:`dim1` are swapped. + +If :attr:`input` is a strided tensor then the resulting :attr:`out` +tensor shares its underlying storage with the :attr:`input` tensor, so +changing the content of one would change the content of the other. + +If :attr:`input` is a :ref:`sparse tensor ` then the +resulting :attr:`out` tensor *does not* share the underlying storage +with the :attr:`input` tensor. + +If :attr:`input` is a :ref:`sparse tensor ` with compressed +layout (SparseCSR, SparseBSR, SparseCSC or SparseBSC) the arguments +:attr:`dim0` and :attr:`dim1` must be both batch dimensions, or must +both be sparse dimensions. The batch dimensions of a sparse tensor are the +dimensions preceding the sparse dimensions. + +.. note:: + Transpositions which interchange the sparse dimensions of a `SparseCSR` + or `SparseCSC` layout tensor will result in the layout changing between + the two options. Transposition of the sparse dimensions of a ` SparseBSR` + or `SparseBSC` layout tensor will likewise generate a result with the + opposite layout. + + +Args: + {input} + dim0 (int): the first dimension to be transposed + dim1 (int): the second dimension to be transposed + +Example:: + + >>> x = torch.randn(2, 3) + >>> x + tensor([[ 1.0028, -0.9893, 0.5809], + [-0.1669, 0.7299, 0.4942]]) + >>> torch.transpose(x, 0, 1) + tensor([[ 1.0028, -0.1669], + [-0.9893, 0.7299], + [ 0.5809, 0.4942]]) + +See also :func:`torch.t`. +""".format( + **common_args + ), +) + +add_docstr( + torch.triangular_solve, + r""" +triangular_solve(b, A, upper=True, transpose=False, unitriangular=False, *, out=None) -> (Tensor, Tensor) + +Solves a system of equations with a square upper or lower triangular invertible matrix :math:`A` +and multiple right-hand sides :math:`b`. + +In symbols, it solves :math:`AX = b` and assumes :math:`A` is square upper-triangular +(or lower-triangular if :attr:`upper`\ `= False`) and does not have zeros on the diagonal. + +`torch.triangular_solve(b, A)` can take in 2D inputs `b, A` or inputs that are +batches of 2D matrices. If the inputs are batches, then returns +batched outputs `X` + +If the diagonal of :attr:`A` contains zeros or elements that are very close to zero and +:attr:`unitriangular`\ `= False` (default) or if the input matrix is badly conditioned, +the result may contain `NaN` s. + +Supports input of float, double, cfloat and cdouble data types. + +.. warning:: + + :func:`torch.triangular_solve` is deprecated in favor of :func:`torch.linalg.solve_triangular` + and will be removed in a future PyTorch release. + :func:`torch.linalg.solve_triangular` has its arguments reversed and does not return a + copy of one of the inputs. + + ``X = torch.triangular_solve(B, A).solution`` should be replaced with + + .. code:: python + + X = torch.linalg.solve_triangular(A, B) + +Args: + b (Tensor): multiple right-hand sides of size :math:`(*, m, k)` where + :math:`*` is zero of more batch dimensions + A (Tensor): the input triangular coefficient matrix of size :math:`(*, m, m)` + where :math:`*` is zero or more batch dimensions + upper (bool, optional): whether :math:`A` is upper or lower triangular. Default: ``True``. + transpose (bool, optional): solves `op(A)X = b` where `op(A) = A^T` if this flag is ``True``, + and `op(A) = A` if it is ``False``. Default: ``False``. + unitriangular (bool, optional): whether :math:`A` is unit triangular. + If True, the diagonal elements of :math:`A` are assumed to be + 1 and not referenced from :math:`A`. Default: ``False``. + +Keyword args: + out ((Tensor, Tensor), optional): tuple of two tensors to write + the output to. Ignored if `None`. Default: `None`. + +Returns: + A namedtuple `(solution, cloned_coefficient)` where `cloned_coefficient` + is a clone of :math:`A` and `solution` is the solution :math:`X` to :math:`AX = b` + (or whatever variant of the system of equations, depending on the keyword arguments.) + +Examples:: + + >>> A = torch.randn(2, 2).triu() + >>> A + tensor([[ 1.1527, -1.0753], + [ 0.0000, 0.7986]]) + >>> b = torch.randn(2, 3) + >>> b + tensor([[-0.0210, 2.3513, -1.5492], + [ 1.5429, 0.7403, -1.0243]]) + >>> torch.triangular_solve(b, A) + torch.return_types.triangular_solve( + solution=tensor([[ 1.7841, 2.9046, -2.5405], + [ 1.9320, 0.9270, -1.2826]]), + cloned_coefficient=tensor([[ 1.1527, -1.0753], + [ 0.0000, 0.7986]])) +""", +) + +add_docstr( + torch.tril, + r""" +tril(input, diagonal=0, *, out=None) -> Tensor + +Returns the lower triangular part of the matrix (2-D tensor) or batch of matrices +:attr:`input`, the other elements of the result tensor :attr:`out` are set to 0. + +The lower triangular part of the matrix is defined as the elements on and +below the diagonal. + +The argument :attr:`diagonal` controls which diagonal to consider. If +:attr:`diagonal` = 0, all elements on and below the main diagonal are +retained. A positive value includes just as many diagonals above the main +diagonal, and similarly a negative value excludes just as many diagonals below +the main diagonal. The main diagonal are the set of indices +:math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` where +:math:`d_{1}, d_{2}` are the dimensions of the matrix. +""" + + r""" +Args: + {input} + diagonal (int, optional): the diagonal to consider + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(3, 3) + >>> a + tensor([[-1.0813, -0.8619, 0.7105], + [ 0.0935, 0.1380, 2.2112], + [-0.3409, -0.9828, 0.0289]]) + >>> torch.tril(a) + tensor([[-1.0813, 0.0000, 0.0000], + [ 0.0935, 0.1380, 0.0000], + [-0.3409, -0.9828, 0.0289]]) + + >>> b = torch.randn(4, 6) + >>> b + tensor([[ 1.2219, 0.5653, -0.2521, -0.2345, 1.2544, 0.3461], + [ 0.4785, -0.4477, 0.6049, 0.6368, 0.8775, 0.7145], + [ 1.1502, 3.2716, -1.1243, -0.5413, 0.3615, 0.6864], + [-0.0614, -0.7344, -1.3164, -0.7648, -1.4024, 0.0978]]) + >>> torch.tril(b, diagonal=1) + tensor([[ 1.2219, 0.5653, 0.0000, 0.0000, 0.0000, 0.0000], + [ 0.4785, -0.4477, 0.6049, 0.0000, 0.0000, 0.0000], + [ 1.1502, 3.2716, -1.1243, -0.5413, 0.0000, 0.0000], + [-0.0614, -0.7344, -1.3164, -0.7648, -1.4024, 0.0000]]) + >>> torch.tril(b, diagonal=-1) + tensor([[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], + [ 0.4785, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], + [ 1.1502, 3.2716, 0.0000, 0.0000, 0.0000, 0.0000], + [-0.0614, -0.7344, -1.3164, 0.0000, 0.0000, 0.0000]]) +""".format( + **common_args + ), +) + +# docstr is split in two parts to avoid format mis-captureing :math: braces '{}' +# as common args. +add_docstr( + torch.tril_indices, + r""" +tril_indices(row, col, offset=0, *, dtype=torch.long, device='cpu', layout=torch.strided) -> Tensor + +Returns the indices of the lower triangular part of a :attr:`row`-by- +:attr:`col` matrix in a 2-by-N Tensor, where the first row contains row +coordinates of all indices and the second row contains column coordinates. +Indices are ordered based on rows and then columns. + +The lower triangular part of the matrix is defined as the elements on and +below the diagonal. + +The argument :attr:`offset` controls which diagonal to consider. If +:attr:`offset` = 0, all elements on and below the main diagonal are +retained. A positive value includes just as many diagonals above the main +diagonal, and similarly a negative value excludes just as many diagonals below +the main diagonal. The main diagonal are the set of indices +:math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` +where :math:`d_{1}, d_{2}` are the dimensions of the matrix. + +.. note:: + When running on CUDA, ``row * col`` must be less than :math:`2^{59}` to + prevent overflow during calculation. +""" + + r""" +Args: + row (``int``): number of rows in the 2-D matrix. + col (``int``): number of columns in the 2-D matrix. + offset (``int``): diagonal offset from the main diagonal. + Default: if not provided, 0. + +Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, ``torch.long``. + {device} + layout (:class:`torch.layout`, optional): currently only support ``torch.strided``. + +Example:: + + >>> a = torch.tril_indices(3, 3) + >>> a + tensor([[0, 1, 1, 2, 2, 2], + [0, 0, 1, 0, 1, 2]]) + + >>> a = torch.tril_indices(4, 3, -1) + >>> a + tensor([[1, 2, 2, 3, 3, 3], + [0, 0, 1, 0, 1, 2]]) + + >>> a = torch.tril_indices(4, 3, 1) + >>> a + tensor([[0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3], + [0, 1, 0, 1, 2, 0, 1, 2, 0, 1, 2]]) +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.triu, + r""" +triu(input, diagonal=0, *, out=None) -> Tensor + +Returns the upper triangular part of a matrix (2-D tensor) or batch of matrices +:attr:`input`, the other elements of the result tensor :attr:`out` are set to 0. + +The upper triangular part of the matrix is defined as the elements on and +above the diagonal. + +The argument :attr:`diagonal` controls which diagonal to consider. If +:attr:`diagonal` = 0, all elements on and above the main diagonal are +retained. A positive value excludes just as many diagonals above the main +diagonal, and similarly a negative value includes just as many diagonals below +the main diagonal. The main diagonal are the set of indices +:math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` where +:math:`d_{1}, d_{2}` are the dimensions of the matrix. +""" + + r""" +Args: + {input} + diagonal (int, optional): the diagonal to consider + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(3, 3) + >>> a + tensor([[ 0.2309, 0.5207, 2.0049], + [ 0.2072, -1.0680, 0.6602], + [ 0.3480, -0.5211, -0.4573]]) + >>> torch.triu(a) + tensor([[ 0.2309, 0.5207, 2.0049], + [ 0.0000, -1.0680, 0.6602], + [ 0.0000, 0.0000, -0.4573]]) + >>> torch.triu(a, diagonal=1) + tensor([[ 0.0000, 0.5207, 2.0049], + [ 0.0000, 0.0000, 0.6602], + [ 0.0000, 0.0000, 0.0000]]) + >>> torch.triu(a, diagonal=-1) + tensor([[ 0.2309, 0.5207, 2.0049], + [ 0.2072, -1.0680, 0.6602], + [ 0.0000, -0.5211, -0.4573]]) + + >>> b = torch.randn(4, 6) + >>> b + tensor([[ 0.5876, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235], + [-0.2447, 0.9556, -1.2919, 1.3378, -0.1768, -1.0857], + [ 0.4333, 0.3146, 0.6576, -1.0432, 0.9348, -0.4410], + [-0.9888, 1.0679, -1.3337, -1.6556, 0.4798, 0.2830]]) + >>> torch.triu(b, diagonal=1) + tensor([[ 0.0000, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235], + [ 0.0000, 0.0000, -1.2919, 1.3378, -0.1768, -1.0857], + [ 0.0000, 0.0000, 0.0000, -1.0432, 0.9348, -0.4410], + [ 0.0000, 0.0000, 0.0000, 0.0000, 0.4798, 0.2830]]) + >>> torch.triu(b, diagonal=-1) + tensor([[ 0.5876, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235], + [-0.2447, 0.9556, -1.2919, 1.3378, -0.1768, -1.0857], + [ 0.0000, 0.3146, 0.6576, -1.0432, 0.9348, -0.4410], + [ 0.0000, 0.0000, -1.3337, -1.6556, 0.4798, 0.2830]]) +""".format( + **common_args + ), +) + +# docstr is split in two parts to avoid format mis-capturing :math: braces '{}' +# as common args. +add_docstr( + torch.triu_indices, + r""" +triu_indices(row, col, offset=0, *, dtype=torch.long, device='cpu', layout=torch.strided) -> Tensor + +Returns the indices of the upper triangular part of a :attr:`row` by +:attr:`col` matrix in a 2-by-N Tensor, where the first row contains row +coordinates of all indices and the second row contains column coordinates. +Indices are ordered based on rows and then columns. + +The upper triangular part of the matrix is defined as the elements on and +above the diagonal. + +The argument :attr:`offset` controls which diagonal to consider. If +:attr:`offset` = 0, all elements on and above the main diagonal are +retained. A positive value excludes just as many diagonals above the main +diagonal, and similarly a negative value includes just as many diagonals below +the main diagonal. The main diagonal are the set of indices +:math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` +where :math:`d_{1}, d_{2}` are the dimensions of the matrix. + +.. note:: + When running on CUDA, ``row * col`` must be less than :math:`2^{59}` to + prevent overflow during calculation. +""" + + r""" +Args: + row (``int``): number of rows in the 2-D matrix. + col (``int``): number of columns in the 2-D matrix. + offset (``int``): diagonal offset from the main diagonal. + Default: if not provided, 0. + +Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, ``torch.long``. + {device} + layout (:class:`torch.layout`, optional): currently only support ``torch.strided``. + +Example:: + + >>> a = torch.triu_indices(3, 3) + >>> a + tensor([[0, 0, 0, 1, 1, 2], + [0, 1, 2, 1, 2, 2]]) + + >>> a = torch.triu_indices(4, 3, -1) + >>> a + tensor([[0, 0, 0, 1, 1, 1, 2, 2, 3], + [0, 1, 2, 0, 1, 2, 1, 2, 2]]) + + >>> a = torch.triu_indices(4, 3, 1) + >>> a + tensor([[0, 0, 1], + [1, 2, 2]]) +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.true_divide, + r""" +true_divide(dividend, divisor, *, out) -> Tensor + +Alias for :func:`torch.div` with ``rounding_mode=None``. +""", +) + +add_docstr( + torch.trunc, + r""" +trunc(input, *, out=None) -> Tensor + +Returns a new tensor with the truncated integer values of +the elements of :attr:`input`. + +For integer inputs, follows the array-api convention of returning a +copy of the input tensor. + +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 3.4742, 0.5466, -0.8008, -0.9079]) + >>> torch.trunc(a) + tensor([ 3., 0., -0., -0.]) +""".format( + **common_args + ), +) + +add_docstr( + torch.fake_quantize_per_tensor_affine, + r""" +fake_quantize_per_tensor_affine(input, scale, zero_point, quant_min, quant_max) -> Tensor + +Returns a new tensor with the data in :attr:`input` fake quantized using :attr:`scale`, +:attr:`zero_point`, :attr:`quant_min` and :attr:`quant_max`. + +.. math:: + \text{output} = ( + min( + \text{quant\_max}, + max( + \text{quant\_min}, + \text{std::nearby\_int}(\text{input} / \text{scale}) + \text{zero\_point} + ) + ) - \text{zero\_point} + ) \times \text{scale} + +Args: + input (Tensor): the input value(s), ``torch.float32`` tensor + scale (double scalar or ``float32`` Tensor): quantization scale + zero_point (int64 scalar or ``int32`` Tensor): quantization zero_point + quant_min (int64): lower bound of the quantized domain + quant_max (int64): upper bound of the quantized domain + +Returns: + Tensor: A newly fake_quantized ``torch.float32`` tensor + +Example:: + + >>> x = torch.randn(4) + >>> x + tensor([ 0.0552, 0.9730, 0.3973, -1.0780]) + >>> torch.fake_quantize_per_tensor_affine(x, 0.1, 0, 0, 255) + tensor([0.1000, 1.0000, 0.4000, 0.0000]) + >>> torch.fake_quantize_per_tensor_affine(x, torch.tensor(0.1), torch.tensor(0), 0, 255) + tensor([0.1000, 1.0000, 0.4000, 0.0000]) +""", +) + +add_docstr( + torch.fake_quantize_per_channel_affine, + r""" +fake_quantize_per_channel_affine(input, scale, zero_point, axis, quant_min, quant_max) -> Tensor + +Returns a new tensor with the data in :attr:`input` fake quantized per channel using :attr:`scale`, +:attr:`zero_point`, :attr:`quant_min` and :attr:`quant_max`, across the channel specified by :attr:`axis`. + +.. math:: + \text{output} = ( + min( + \text{quant\_max}, + max( + \text{quant\_min}, + \text{std::nearby\_int}(\text{input} / \text{scale}) + \text{zero\_point} + ) + ) - \text{zero\_point} + ) \times \text{scale} + +Args: + input (Tensor): the input value(s), in ``torch.float32`` + scale (Tensor): quantization scale, per channel in ``torch.float32`` + zero_point (Tensor): quantization zero_point, per channel in ``torch.int32`` or ``torch.half`` or ``torch.float32`` + axis (int32): channel axis + quant_min (int64): lower bound of the quantized domain + quant_max (int64): upper bound of the quantized domain + +Returns: + Tensor: A newly fake_quantized per channel ``torch.float32`` tensor + +Example:: + + >>> x = torch.randn(2, 2, 2) + >>> x + tensor([[[-0.2525, -0.0466], + [ 0.3491, -0.2168]], + + [[-0.5906, 1.6258], + [ 0.6444, -0.0542]]]) + >>> scales = (torch.randn(2) + 1) * 0.05 + >>> scales + tensor([0.0475, 0.0486]) + >>> zero_points = torch.zeros(2).to(torch.int32) + >>> zero_points + tensor([0, 0]) + >>> torch.fake_quantize_per_channel_affine(x, scales, zero_points, 1, 0, 255) + tensor([[[0.0000, 0.0000], + [0.3405, 0.0000]], + + [[0.0000, 1.6134], + [0.6323, 0.0000]]]) +""", +) + +add_docstr( + torch.fix, + r""" +fix(input, *, out=None) -> Tensor + +Alias for :func:`torch.trunc` +""", +) + +add_docstr( + torch.unsqueeze, + r""" +unsqueeze(input, dim) -> Tensor + +Returns a new tensor with a dimension of size one inserted at the +specified position. + +The returned tensor shares the same underlying data with this tensor. + +A :attr:`dim` value within the range ``[-input.dim() - 1, input.dim() + 1)`` +can be used. Negative :attr:`dim` will correspond to :meth:`unsqueeze` +applied at :attr:`dim` = ``dim + input.dim() + 1``. + +Args: + {input} + dim (int): the index at which to insert the singleton dimension + +Example:: + + >>> x = torch.tensor([1, 2, 3, 4]) + >>> torch.unsqueeze(x, 0) + tensor([[ 1, 2, 3, 4]]) + >>> torch.unsqueeze(x, 1) + tensor([[ 1], + [ 2], + [ 3], + [ 4]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.var, + r""" +var(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor + +Calculates the variance over the dimensions specified by :attr:`dim`. :attr:`dim` +can be a single dimension, list of dimensions, or ``None`` to reduce over all +dimensions. + +The variance (:math:`\sigma^2`) is calculated as + +.. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2 + +where :math:`x` is the sample set of elements, :math:`\bar{x}` is the +sample mean, :math:`N` is the number of samples and :math:`\delta N` is +the :attr:`correction`. +""" + + r""" + +{keepdim_details} + +Args: + {input} + {opt_dim} + +Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + {keepdim} + {out} + +Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.var(a, dim=1, keepdim=True) + tensor([[1.0631], + [0.5590], + [1.4893], + [0.8258]]) + +.. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + +""".format( + **multi_dim_common + ), +) + +add_docstr( + torch.var_mean, + r""" +var_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor) + +Calculates the variance and mean over the dimensions specified by :attr:`dim`. +:attr:`dim` can be a single dimension, list of dimensions, or ``None`` to +reduce over all dimensions. + +The variance (:math:`\sigma^2`) is calculated as + +.. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2 + +where :math:`x` is the sample set of elements, :math:`\bar{x}` is the +sample mean, :math:`N` is the number of samples and :math:`\delta N` is +the :attr:`correction`. +""" + + r""" + +{keepdim_details} + +Args: + {input} + {opt_dim} + +Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + {keepdim} + {out} + +Returns: + A tuple (var, mean) containing the variance and mean. + +Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.var_mean(a, dim=0, keepdim=True) + (tensor([[1.5926, 1.0056, 1.2005, 0.3646]]), + tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]])) + +.. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + +""".format( + **multi_dim_common + ), +) + +add_docstr( + torch.zeros, + r""" +zeros(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + +Returns a tensor filled with the scalar value `0`, with the shape defined +by the variable argument :attr:`size`. + +Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + +Keyword args: + {out} + {dtype} + {layout} + {device} + {requires_grad} + +Example:: + + >>> torch.zeros(2, 3) + tensor([[ 0., 0., 0.], + [ 0., 0., 0.]]) + + >>> torch.zeros(5) + tensor([ 0., 0., 0., 0., 0.]) +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.zeros_like, + r""" +zeros_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor + +Returns a tensor filled with the scalar value `0`, with the same size as +:attr:`input`. ``torch.zeros_like(input)`` is equivalent to +``torch.zeros(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``. + +.. warning:: + As of 0.4, this function does not support an :attr:`out` keyword. As an alternative, + the old ``torch.zeros_like(input, out=output)`` is equivalent to + ``torch.zeros(input.size(), out=output)``. + +Args: + {input} + +Keyword args: + {dtype} + {layout} + {device} + {requires_grad} + {memory_format} + +Example:: + + >>> input = torch.empty(2, 3) + >>> torch.zeros_like(input) + tensor([[ 0., 0., 0.], + [ 0., 0., 0.]]) +""".format( + **factory_like_common_args + ), +) + +add_docstr( + torch.empty, + """ +empty(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False, \ +memory_format=torch.contiguous_format) -> Tensor + +Returns a tensor filled with uninitialized data. The shape of the tensor is +defined by the variable argument :attr:`size`. + +.. note:: + If :func:`torch.use_deterministic_algorithms()` and + :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to + ``True``, the output tensor is initialized to prevent any possible + nondeterministic behavior from using the data as an input to an operation. + Floating point and complex tensors are filled with NaN, and integer tensors + are filled with the maximum value. + +Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + +Keyword args: + {out} + {dtype} + {layout} + {device} + {requires_grad} + {pin_memory} + {memory_format} + +Example:: + + >>> torch.empty((2,3), dtype=torch.int64) + tensor([[ 9.4064e+13, 2.8000e+01, 9.3493e+13], + [ 7.5751e+18, 7.1428e+18, 7.5955e+18]]) +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.empty_like, + r""" +empty_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor + +Returns an uninitialized tensor with the same size as :attr:`input`. +``torch.empty_like(input)`` is equivalent to +``torch.empty(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``. + +.. note:: + If :func:`torch.use_deterministic_algorithms()` and + :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to + ``True``, the output tensor is initialized to prevent any possible + nondeterministic behavior from using the data as an input to an operation. + Floating point and complex tensors are filled with NaN, and integer tensors + are filled with the maximum value. + +Args: + {input} + +Keyword args: + {dtype} + {layout} + {device} + {requires_grad} + {memory_format} + +Example:: + + >>> a=torch.empty((2,3), dtype=torch.int32, device = 'cuda') + >>> torch.empty_like(a) + tensor([[0, 0, 0], + [0, 0, 0]], device='cuda:0', dtype=torch.int32) +""".format( + **factory_like_common_args + ), +) + +add_docstr( + torch.empty_strided, + r""" +empty_strided(size, stride, *, dtype=None, layout=None, device=None, requires_grad=False, pin_memory=False) -> Tensor + +Creates a tensor with the specified :attr:`size` and :attr:`stride` and filled with undefined data. + +.. warning:: + If the constructed tensor is "overlapped" (with multiple indices referring to the same element + in memory) its behavior is undefined. + +.. note:: + If :func:`torch.use_deterministic_algorithms()` and + :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to + ``True``, the output tensor is initialized to prevent any possible + nondeterministic behavior from using the data as an input to an operation. + Floating point and complex tensors are filled with NaN, and integer tensors + are filled with the maximum value. + +Args: + size (tuple of int): the shape of the output tensor + stride (tuple of int): the strides of the output tensor + +Keyword args: + {dtype} + {layout} + {device} + {requires_grad} + {pin_memory} + +Example:: + + >>> a = torch.empty_strided((2, 3), (1, 2)) + >>> a + tensor([[8.9683e-44, 4.4842e-44, 5.1239e+07], + [0.0000e+00, 0.0000e+00, 3.0705e-41]]) + >>> a.stride() + (1, 2) + >>> a.size() + torch.Size([2, 3]) +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.empty_permuted, + r""" +empty_permuted(size, physical_layout, *, dtype=None, layout=None, device=None, requires_grad=False, pin_memory=False) -> Tensor + +Creates an uninitialized, non-overlapping and dense tensor with the +specified :attr:`size`, with :attr:`physical_layout` specifying how the +dimensions are physically laid out in memory (each logical dimension is listed +from outermost to innermost). :attr:`physical_layout` is a generalization +of NCHW/NHWC notation: if each dimension is assigned a number according to +what order they occur in size (N=0, C=1, H=2, W=3), then NCHW is ``(0, 1, 2, 3)`` +while NHWC is ``(0, 2, 3, 1)``. Equivalently, the strides of the output +tensor ``t`` are such that ``t.stride(physical_layout[i]) == contiguous_strides[i]`` +(notably, this function is *not* equivalent to ``torch.empty(size).permute(physical_layout)``). + +Unlike :func:`torch.empty_strided`, this is guaranteed to produce a dense +tensor with no overlaps. If possible, prefer using this function over +:func:`torch.empty_strided` or manual use of :func:`torch.as_strided`. + +.. note:: + If :func:`torch.use_deterministic_algorithms()` and + :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to + ``True``, the output tensor is initialized to prevent any possible + nondeterministic behavior from using the data as an input to an operation. + Floating point and complex tensors are filled with NaN, and integer tensors + are filled with the maximum value. + +Args: + size (tuple of int): the shape of the output tensor + physical_layout (tuple of int): the ordering of dimensions physically in memory + +Keyword args: + {dtype} + {layout} + {device} + {requires_grad} + {pin_memory} + +Examples: + + >>> torch.empty((2, 3, 5, 7)).stride() + (105, 35, 7, 1) + >>> torch.empty_permuted((2, 3, 5, 7), (0, 1, 2, 3)).stride() + (105, 35, 7, 1) + >>> torch.empty((2, 3, 5, 7), memory_format=torch.channels_last).stride() + (105, 1, 21, 3) + >>> torch.empty_permuted((2, 3, 5, 7), (0, 2, 3, 1)).stride() + (105, 1, 21, 3) + >>> torch.empty_permuted((2, 3, 5, 7), (0, 2, 3, 1)).dim_order() + (0, 2, 3, 1) +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.full, + r""" +full(size, fill_value, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + +Creates a tensor of size :attr:`size` filled with :attr:`fill_value`. The +tensor's dtype is inferred from :attr:`fill_value`. + +Args: + size (int...): a list, tuple, or :class:`torch.Size` of integers defining the + shape of the output tensor. + fill_value (Scalar): the value to fill the output tensor with. + +Keyword args: + {out} + {dtype} + {layout} + {device} + {requires_grad} + +Example:: + + >>> torch.full((2, 3), 3.141592) + tensor([[ 3.1416, 3.1416, 3.1416], + [ 3.1416, 3.1416, 3.1416]]) +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.full_like, + """ +full_like(input, fill_value, \\*, dtype=None, layout=torch.strided, device=None, requires_grad=False, \ +memory_format=torch.preserve_format) -> Tensor + +Returns a tensor with the same size as :attr:`input` filled with :attr:`fill_value`. +``torch.full_like(input, fill_value)`` is equivalent to +``torch.full(input.size(), fill_value, dtype=input.dtype, layout=input.layout, device=input.device)``. + +Args: + {input} + fill_value: the number to fill the output tensor with. + +Keyword args: + {dtype} + {layout} + {device} + {requires_grad} + {memory_format} +""".format( + **factory_like_common_args + ), +) + +add_docstr( + torch.det, + r""" +det(input) -> Tensor + +Alias for :func:`torch.linalg.det` +""", +) + +add_docstr( + torch.where, + r""" +where(condition, input, other, *, out=None) -> Tensor + +Return a tensor of elements selected from either :attr:`input` or :attr:`other`, depending on :attr:`condition`. + +The operation is defined as: + +.. math:: + \text{out}_i = \begin{cases} + \text{input}_i & \text{if } \text{condition}_i \\ + \text{other}_i & \text{otherwise} \\ + \end{cases} +""" + + r""" +.. note:: + The tensors :attr:`condition`, :attr:`input`, :attr:`other` must be :ref:`broadcastable `. + +Arguments: + condition (BoolTensor): When True (nonzero), yield input, otherwise yield other + input (Tensor or Scalar): value (if :attr:`input` is a scalar) or values selected at indices + where :attr:`condition` is ``True`` + other (Tensor or Scalar): value (if :attr:`other` is a scalar) or values selected at indices + where :attr:`condition` is ``False`` + +Keyword args: + {out} + +Returns: + Tensor: A tensor of shape equal to the broadcasted shape of :attr:`condition`, :attr:`input`, :attr:`other` + +Example:: + + >>> x = torch.randn(3, 2) + >>> y = torch.ones(3, 2) + >>> x + tensor([[-0.4620, 0.3139], + [ 0.3898, -0.7197], + [ 0.0478, -0.1657]]) + >>> torch.where(x > 0, 1.0, 0.0) + tensor([[0., 1.], + [1., 0.], + [1., 0.]]) + >>> torch.where(x > 0, x, y) + tensor([[ 1.0000, 0.3139], + [ 0.3898, 1.0000], + [ 0.0478, 1.0000]]) + >>> x = torch.randn(2, 2, dtype=torch.double) + >>> x + tensor([[ 1.0779, 0.0383], + [-0.8785, -1.1089]], dtype=torch.float64) + >>> torch.where(x > 0, x, 0.) + tensor([[1.0779, 0.0383], + [0.0000, 0.0000]], dtype=torch.float64) + +.. function:: where(condition) -> tuple of LongTensor + :noindex: + +``torch.where(condition)`` is identical to +``torch.nonzero(condition, as_tuple=True)``. + +.. note:: + See also :func:`torch.nonzero`. +""".format( + **common_args + ), +) + +add_docstr( + torch.logdet, + r""" +logdet(input) -> Tensor + +Calculates log determinant of a square matrix or batches of square matrices. + +It returns ``-inf`` if the input has a determinant of zero, and ``NaN`` if it has +a negative determinant. + +.. note:: + Backward through :meth:`logdet` internally uses SVD results when :attr:`input` + is not invertible. In this case, double backward through :meth:`logdet` will + be unstable in when :attr:`input` doesn't have distinct singular values. See + :func:`torch.linalg.svd` for details. + +.. seealso:: + + :func:`torch.linalg.slogdet` computes the sign (resp. angle) and natural logarithm of the + absolute value of the determinant of real-valued (resp. complex) square matrices. + +Arguments: + input (Tensor): the input tensor of size ``(*, n, n)`` where ``*`` is zero or more + batch dimensions. + +Example:: + + >>> A = torch.randn(3, 3) + >>> torch.det(A) + tensor(0.2611) + >>> torch.logdet(A) + tensor(-1.3430) + >>> A + tensor([[[ 0.9254, -0.6213], + [-0.5787, 1.6843]], + + [[ 0.3242, -0.9665], + [ 0.4539, -0.0887]], + + [[ 1.1336, -0.4025], + [-0.7089, 0.9032]]]) + >>> A.det() + tensor([1.1990, 0.4099, 0.7386]) + >>> A.det().log() + tensor([ 0.1815, -0.8917, -0.3031]) +""", +) + +add_docstr( + torch.slogdet, + r""" +slogdet(input) -> (Tensor, Tensor) + +Alias for :func:`torch.linalg.slogdet` +""", +) + +add_docstr( + torch.pinverse, + r""" +pinverse(input, rcond=1e-15) -> Tensor + +Alias for :func:`torch.linalg.pinv` +""", +) + +add_docstr( + torch.hann_window, + """ +hann_window(window_length, periodic=True, *, dtype=None, \ +layout=torch.strided, device=None, requires_grad=False) -> Tensor +""" + + r""" +Hann window function. + +.. math:: + w[n] = \frac{1}{2}\ \left[1 - \cos \left( \frac{2 \pi n}{N - 1} \right)\right] = + \sin^2 \left( \frac{\pi n}{N - 1} \right), + +where :math:`N` is the full window size. + +The input :attr:`window_length` is a positive integer controlling the +returned window size. :attr:`periodic` flag determines whether the returned +window trims off the last duplicate value from the symmetric window and is +ready to be used as a periodic window with functions like +:meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in +above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have +``torch.hann_window(L, periodic=True)`` equal to +``torch.hann_window(L + 1, periodic=False)[:-1])``. + +.. note:: + If :attr:`window_length` :math:`=1`, the returned window contains a single value 1. +""" + + r""" +Arguments: + window_length (int): the size of returned window + periodic (bool, optional): If True, returns a window to be used as periodic + function. If False, return a symmetric window. + +Keyword args: + {dtype} Only floating point types are supported. + layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only + ``torch.strided`` (dense layout) is supported. + {device} + {requires_grad} + +Returns: + Tensor: A 1-D tensor of size :math:`(\text{{window\_length}},)` containing the window + +""".format( + **factory_common_args + ), +) + + +add_docstr( + torch.hamming_window, + """ +hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype=None, \ +layout=torch.strided, device=None, requires_grad=False) -> Tensor +""" + + r""" +Hamming window function. + +.. math:: + w[n] = \alpha - \beta\ \cos \left( \frac{2 \pi n}{N - 1} \right), + +where :math:`N` is the full window size. + +The input :attr:`window_length` is a positive integer controlling the +returned window size. :attr:`periodic` flag determines whether the returned +window trims off the last duplicate value from the symmetric window and is +ready to be used as a periodic window with functions like +:meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in +above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have +``torch.hamming_window(L, periodic=True)`` equal to +``torch.hamming_window(L + 1, periodic=False)[:-1])``. + +.. note:: + If :attr:`window_length` :math:`=1`, the returned window contains a single value 1. + +.. note:: + This is a generalized version of :meth:`torch.hann_window`. +""" + + r""" +Arguments: + window_length (int): the size of returned window + periodic (bool, optional): If True, returns a window to be used as periodic + function. If False, return a symmetric window. + alpha (float, optional): The coefficient :math:`\alpha` in the equation above + beta (float, optional): The coefficient :math:`\beta` in the equation above + +Keyword args: + {dtype} Only floating point types are supported. + layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only + ``torch.strided`` (dense layout) is supported. + {device} + {requires_grad} + +Returns: + Tensor: A 1-D tensor of size :math:`(\text{{window\_length}},)` containing the window. + +""".format( + **factory_common_args + ), +) + + +add_docstr( + torch.bartlett_window, + """ +bartlett_window(window_length, periodic=True, *, dtype=None, \ +layout=torch.strided, device=None, requires_grad=False) -> Tensor +""" + + r""" +Bartlett window function. + +.. math:: + w[n] = 1 - \left| \frac{2n}{N-1} - 1 \right| = \begin{cases} + \frac{2n}{N - 1} & \text{if } 0 \leq n \leq \frac{N - 1}{2} \\ + 2 - \frac{2n}{N - 1} & \text{if } \frac{N - 1}{2} < n < N \\ + \end{cases}, + +where :math:`N` is the full window size. + +The input :attr:`window_length` is a positive integer controlling the +returned window size. :attr:`periodic` flag determines whether the returned +window trims off the last duplicate value from the symmetric window and is +ready to be used as a periodic window with functions like +:meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in +above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have +``torch.bartlett_window(L, periodic=True)`` equal to +``torch.bartlett_window(L + 1, periodic=False)[:-1])``. + +.. note:: + If :attr:`window_length` :math:`=1`, the returned window contains a single value 1. +""" + + r""" +Arguments: + window_length (int): the size of returned window + periodic (bool, optional): If True, returns a window to be used as periodic + function. If False, return a symmetric window. + +Keyword args: + {dtype} Only floating point types are supported. + layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only + ``torch.strided`` (dense layout) is supported. + {device} + {requires_grad} + +Returns: + Tensor: A 1-D tensor of size :math:`(\text{{window\_length}},)` containing the window + +""".format( + **factory_common_args + ), +) + + +add_docstr( + torch.blackman_window, + """ +blackman_window(window_length, periodic=True, *, dtype=None, \ +layout=torch.strided, device=None, requires_grad=False) -> Tensor +""" + + r""" +Blackman window function. + +.. math:: + w[n] = 0.42 - 0.5 \cos \left( \frac{2 \pi n}{N - 1} \right) + 0.08 \cos \left( \frac{4 \pi n}{N - 1} \right) + +where :math:`N` is the full window size. + +The input :attr:`window_length` is a positive integer controlling the +returned window size. :attr:`periodic` flag determines whether the returned +window trims off the last duplicate value from the symmetric window and is +ready to be used as a periodic window with functions like +:meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in +above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have +``torch.blackman_window(L, periodic=True)`` equal to +``torch.blackman_window(L + 1, periodic=False)[:-1])``. + +.. note:: + If :attr:`window_length` :math:`=1`, the returned window contains a single value 1. +""" + + r""" +Arguments: + window_length (int): the size of returned window + periodic (bool, optional): If True, returns a window to be used as periodic + function. If False, return a symmetric window. + +Keyword args: + {dtype} Only floating point types are supported. + layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only + ``torch.strided`` (dense layout) is supported. + {device} + {requires_grad} + +Returns: + Tensor: A 1-D tensor of size :math:`(\text{{window\_length}},)` containing the window + +""".format( + **factory_common_args + ), +) + + +add_docstr( + torch.kaiser_window, + """ +kaiser_window(window_length, periodic=True, beta=12.0, *, dtype=None, \ +layout=torch.strided, device=None, requires_grad=False) -> Tensor +""" + + r""" +Computes the Kaiser window with window length :attr:`window_length` and shape parameter :attr:`beta`. + +Let I_0 be the zeroth order modified Bessel function of the first kind (see :func:`torch.i0`) and +``N = L - 1`` if :attr:`periodic` is False and ``L`` if :attr:`periodic` is True, +where ``L`` is the :attr:`window_length`. This function computes: + +.. math:: + out_i = I_0 \left( \beta \sqrt{1 - \left( {\frac{i - N/2}{N/2}} \right) ^2 } \right) / I_0( \beta ) + +Calling ``torch.kaiser_window(L, B, periodic=True)`` is equivalent to calling +``torch.kaiser_window(L + 1, B, periodic=False)[:-1])``. +The :attr:`periodic` argument is intended as a helpful shorthand +to produce a periodic window as input to functions like :func:`torch.stft`. + +.. note:: + If :attr:`window_length` is one, then the returned window is a single element tensor containing a one. + +""" + + r""" +Args: + window_length (int): length of the window. + periodic (bool, optional): If True, returns a periodic window suitable for use in spectral analysis. + If False, returns a symmetric window suitable for use in filter design. + beta (float, optional): shape parameter for the window. + +Keyword args: + {dtype} + layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only + ``torch.strided`` (dense layout) is supported. + {device} + {requires_grad} + +""".format( + **factory_common_args + ), +) + + +add_docstr( + torch.vander, + """ +vander(x, N=None, increasing=False) -> Tensor +""" + + r""" +Generates a Vandermonde matrix. + +The columns of the output matrix are elementwise powers of the input vector :math:`x^{{(N-1)}}, x^{{(N-2)}}, ..., x^0`. +If increasing is True, the order of the columns is reversed :math:`x^0, x^1, ..., x^{{(N-1)}}`. Such a +matrix with a geometric progression in each row is named for Alexandre-Theophile Vandermonde. + +Arguments: + x (Tensor): 1-D input tensor. + N (int, optional): Number of columns in the output. If N is not specified, + a square array is returned :math:`(N = len(x))`. + increasing (bool, optional): Order of the powers of the columns. If True, + the powers increase from left to right, if False (the default) they are reversed. + +Returns: + Tensor: Vandermonde matrix. If increasing is False, the first column is :math:`x^{{(N-1)}}`, + the second :math:`x^{{(N-2)}}` and so forth. If increasing is True, the columns + are :math:`x^0, x^1, ..., x^{{(N-1)}}`. + +Example:: + + >>> x = torch.tensor([1, 2, 3, 5]) + >>> torch.vander(x) + tensor([[ 1, 1, 1, 1], + [ 8, 4, 2, 1], + [ 27, 9, 3, 1], + [125, 25, 5, 1]]) + >>> torch.vander(x, N=3) + tensor([[ 1, 1, 1], + [ 4, 2, 1], + [ 9, 3, 1], + [25, 5, 1]]) + >>> torch.vander(x, N=3, increasing=True) + tensor([[ 1, 1, 1], + [ 1, 2, 4], + [ 1, 3, 9], + [ 1, 5, 25]]) + +""".format( + **factory_common_args + ), +) + + +add_docstr( + torch.unbind, + r""" +unbind(input, dim=0) -> seq + +Removes a tensor dimension. + +Returns a tuple of all slices along a given dimension, already without it. + +Arguments: + input (Tensor): the tensor to unbind + dim (int): dimension to remove + +Example:: + + >>> torch.unbind(torch.tensor([[1, 2, 3], + >>> [4, 5, 6], + >>> [7, 8, 9]])) + (tensor([1, 2, 3]), tensor([4, 5, 6]), tensor([7, 8, 9])) +""", +) + + +add_docstr( + torch.combinations, + r""" +combinations(input, r=2, with_replacement=False) -> seq + +Compute combinations of length :math:`r` of the given tensor. The behavior is similar to +python's `itertools.combinations` when `with_replacement` is set to `False`, and +`itertools.combinations_with_replacement` when `with_replacement` is set to `True`. + +Arguments: + input (Tensor): 1D vector. + r (int, optional): number of elements to combine + with_replacement (bool, optional): whether to allow duplication in combination + +Returns: + Tensor: A tensor equivalent to converting all the input tensors into lists, do + `itertools.combinations` or `itertools.combinations_with_replacement` on these + lists, and finally convert the resulting list into tensor. + +Example:: + + >>> a = [1, 2, 3] + >>> list(itertools.combinations(a, r=2)) + [(1, 2), (1, 3), (2, 3)] + >>> list(itertools.combinations(a, r=3)) + [(1, 2, 3)] + >>> list(itertools.combinations_with_replacement(a, r=2)) + [(1, 1), (1, 2), (1, 3), (2, 2), (2, 3), (3, 3)] + >>> tensor_a = torch.tensor(a) + >>> torch.combinations(tensor_a) + tensor([[1, 2], + [1, 3], + [2, 3]]) + >>> torch.combinations(tensor_a, r=3) + tensor([[1, 2, 3]]) + >>> torch.combinations(tensor_a, with_replacement=True) + tensor([[1, 1], + [1, 2], + [1, 3], + [2, 2], + [2, 3], + [3, 3]]) + +""", +) + +add_docstr( + torch.trapezoid, + r""" +trapezoid(y, x=None, *, dx=None, dim=-1) -> Tensor + +Computes the `trapezoidal rule `_ along +:attr:`dim`. By default the spacing between elements is assumed to be 1, but +:attr:`dx` can be used to specify a different constant spacing, and :attr:`x` can be +used to specify arbitrary spacing along :attr:`dim`. + + +Assuming :attr:`y` is a one-dimensional tensor with elements :math:`{y_0, y_1, ..., y_n}`, +the default computation is + +.. math:: + \begin{aligned} + \sum_{i = 1}^{n-1} \frac{1}{2} (y_i + y_{i-1}) + \end{aligned} + +When :attr:`dx` is specified the computation becomes + +.. math:: + \begin{aligned} + \sum_{i = 1}^{n-1} \frac{\Delta x}{2} (y_i + y_{i-1}) + \end{aligned} + +effectively multiplying the result by :attr:`dx`. When :attr:`x` is specified, +assuming :attr:`x` is also a one-dimensional tensor with +elements :math:`{x_0, x_1, ..., x_n}`, the computation becomes + +.. math:: + \begin{aligned} + \sum_{i = 1}^{n-1} \frac{(x_i - x_{i-1})}{2} (y_i + y_{i-1}) + \end{aligned} + +When :attr:`x` and :attr:`y` have the same size, the computation is as described above and no broadcasting is needed. +The broadcasting behavior of this function is as follows when their sizes are different. For both :attr:`x` +and :attr:`y`, the function computes the difference between consecutive elements along +dimension :attr:`dim`. This effectively creates two tensors, `x_diff` and `y_diff`, that have +the same shape as the original tensors except their lengths along the dimension :attr:`dim` is reduced by 1. +After that, those two tensors are broadcast together to compute final output as part of the trapezoidal rule. +See the examples below for details. + +.. note:: + The trapezoidal rule is a technique for approximating the definite integral of a function + by averaging its left and right Riemann sums. The approximation becomes more accurate as + the resolution of the partition increases. + +Arguments: + y (Tensor): Values to use when computing the trapezoidal rule. + x (Tensor): If specified, defines spacing between values as specified above. + +Keyword arguments: + dx (float): constant spacing between values. If neither :attr:`x` or :attr:`dx` + are specified then this defaults to 1. Effectively multiplies the result by its value. + dim (int): The dimension along which to compute the trapezoidal rule. + The last (inner-most) dimension by default. + +Examples:: + + >>> # Computes the trapezoidal rule in 1D, spacing is implicitly 1 + >>> y = torch.tensor([1, 5, 10]) + >>> torch.trapezoid(y) + tensor(10.5) + + >>> # Computes the same trapezoidal rule directly to verify + >>> (1 + 10 + 10) / 2 + 10.5 + + >>> # Computes the trapezoidal rule in 1D with constant spacing of 2 + >>> # NOTE: the result is the same as before, but multiplied by 2 + >>> torch.trapezoid(y, dx=2) + 21.0 + + >>> # Computes the trapezoidal rule in 1D with arbitrary spacing + >>> x = torch.tensor([1, 3, 6]) + >>> torch.trapezoid(y, x) + 28.5 + + >>> # Computes the same trapezoidal rule directly to verify + >>> ((3 - 1) * (1 + 5) + (6 - 3) * (5 + 10)) / 2 + 28.5 + + >>> # Computes the trapezoidal rule for each row of a 3x3 matrix + >>> y = torch.arange(9).reshape(3, 3) + tensor([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> torch.trapezoid(y) + tensor([ 2., 8., 14.]) + + >>> # Computes the trapezoidal rule for each column of the matrix + >>> torch.trapezoid(y, dim=0) + tensor([ 6., 8., 10.]) + + >>> # Computes the trapezoidal rule for each row of a 3x3 ones matrix + >>> # with the same arbitrary spacing + >>> y = torch.ones(3, 3) + >>> x = torch.tensor([1, 3, 6]) + >>> torch.trapezoid(y, x) + array([5., 5., 5.]) + + >>> # Computes the trapezoidal rule for each row of a 3x3 ones matrix + >>> # with different arbitrary spacing per row + >>> y = torch.ones(3, 3) + >>> x = torch.tensor([[1, 2, 3], [1, 3, 5], [1, 4, 7]]) + >>> torch.trapezoid(y, x) + array([2., 4., 6.]) +""", +) + +add_docstr( + torch.trapz, + r""" +trapz(y, x, *, dim=-1) -> Tensor + +Alias for :func:`torch.trapezoid`. +""", +) + +add_docstr( + torch.cumulative_trapezoid, + r""" +cumulative_trapezoid(y, x=None, *, dx=None, dim=-1) -> Tensor + +Cumulatively computes the `trapezoidal rule `_ +along :attr:`dim`. By default the spacing between elements is assumed to be 1, but +:attr:`dx` can be used to specify a different constant spacing, and :attr:`x` can be +used to specify arbitrary spacing along :attr:`dim`. + +For more details, please read :func:`torch.trapezoid`. The difference between :func:`torch.trapezoid` +and this function is that, :func:`torch.trapezoid` returns a value for each integration, +where as this function returns a cumulative value for every spacing within the integration. This +is analogous to how `.sum` returns a value and `.cumsum` returns a cumulative sum. + +Arguments: + y (Tensor): Values to use when computing the trapezoidal rule. + x (Tensor): If specified, defines spacing between values as specified above. + +Keyword arguments: + dx (float): constant spacing between values. If neither :attr:`x` or :attr:`dx` + are specified then this defaults to 1. Effectively multiplies the result by its value. + dim (int): The dimension along which to compute the trapezoidal rule. + The last (inner-most) dimension by default. + +Examples:: + + >>> # Cumulatively computes the trapezoidal rule in 1D, spacing is implicitly 1. + >>> y = torch.tensor([1, 5, 10]) + >>> torch.cumulative_trapezoid(y) + tensor([3., 10.5]) + + >>> # Computes the same trapezoidal rule directly up to each element to verify + >>> (1 + 5) / 2 + 3.0 + >>> (1 + 10 + 10) / 2 + 10.5 + + >>> # Cumulatively computes the trapezoidal rule in 1D with constant spacing of 2 + >>> # NOTE: the result is the same as before, but multiplied by 2 + >>> torch.cumulative_trapezoid(y, dx=2) + tensor([6., 21.]) + + >>> # Cumulatively computes the trapezoidal rule in 1D with arbitrary spacing + >>> x = torch.tensor([1, 3, 6]) + >>> torch.cumulative_trapezoid(y, x) + tensor([6., 28.5]) + + >>> # Computes the same trapezoidal rule directly up to each element to verify + >>> ((3 - 1) * (1 + 5)) / 2 + 6.0 + >>> ((3 - 1) * (1 + 5) + (6 - 3) * (5 + 10)) / 2 + 28.5 + + >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 matrix + >>> y = torch.arange(9).reshape(3, 3) + tensor([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> torch.cumulative_trapezoid(y) + tensor([[ 0.5, 2.], + [ 3.5, 8.], + [ 6.5, 14.]]) + + >>> # Cumulatively computes the trapezoidal rule for each column of the matrix + >>> torch.cumulative_trapezoid(y, dim=0) + tensor([[ 1.5, 2.5, 3.5], + [ 6.0, 8.0, 10.0]]) + + >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 ones matrix + >>> # with the same arbitrary spacing + >>> y = torch.ones(3, 3) + >>> x = torch.tensor([1, 3, 6]) + >>> torch.cumulative_trapezoid(y, x) + tensor([[2., 5.], + [2., 5.], + [2., 5.]]) + + >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 ones matrix + >>> # with different arbitrary spacing per row + >>> y = torch.ones(3, 3) + >>> x = torch.tensor([[1, 2, 3], [1, 3, 5], [1, 4, 7]]) + >>> torch.cumulative_trapezoid(y, x) + tensor([[1., 2.], + [2., 4.], + [3., 6.]]) +""", +) + +add_docstr( + torch.repeat_interleave, + r""" +repeat_interleave(input, repeats, dim=None, *, output_size=None) -> Tensor + +Repeat elements of a tensor. + +.. warning:: + + This is different from :meth:`torch.Tensor.repeat` but similar to ``numpy.repeat``. + +Args: + {input} + repeats (Tensor or int): The number of repetitions for each element. + repeats is broadcasted to fit the shape of the given axis. + dim (int, optional): The dimension along which to repeat values. + By default, use the flattened input array, and return a flat output + array. + +Keyword args: + output_size (int, optional): Total output size for the given axis + ( e.g. sum of repeats). If given, it will avoid stream synchronization + needed to calculate output shape of the tensor. + +Returns: + Tensor: Repeated tensor which has the same shape as input, except along the given axis. + +Example:: + + >>> x = torch.tensor([1, 2, 3]) + >>> x.repeat_interleave(2) + tensor([1, 1, 2, 2, 3, 3]) + >>> y = torch.tensor([[1, 2], [3, 4]]) + >>> torch.repeat_interleave(y, 2) + tensor([1, 1, 2, 2, 3, 3, 4, 4]) + >>> torch.repeat_interleave(y, 3, dim=1) + tensor([[1, 1, 1, 2, 2, 2], + [3, 3, 3, 4, 4, 4]]) + >>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0) + tensor([[1, 2], + [3, 4], + [3, 4]]) + >>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0, output_size=3) + tensor([[1, 2], + [3, 4], + [3, 4]]) + +If the `repeats` is `tensor([n1, n2, n3, ...])`, then the output will be +`tensor([0, 0, ..., 1, 1, ..., 2, 2, ..., ...])` where `0` appears `n1` times, +`1` appears `n2` times, `2` appears `n3` times, etc. + +.. function:: repeat_interleave(repeats, *) -> Tensor + :noindex: + +Repeats 0 repeats[0] times, 1 repeats[1] times, 2 repeats[2] times, etc. + +Args: + repeats (Tensor): The number of repetitions for each element. + +Returns: + Tensor: Repeated tensor of size `sum(repeats)`. + +Example:: + + >>> torch.repeat_interleave(torch.tensor([1, 2, 3])) + tensor([0, 1, 1, 2, 2, 2]) + +""".format( + **common_args + ), +) + +add_docstr( + torch.tile, + r""" +tile(input, dims) -> Tensor + +Constructs a tensor by repeating the elements of :attr:`input`. +The :attr:`dims` argument specifies the number of repetitions +in each dimension. + +If :attr:`dims` specifies fewer dimensions than :attr:`input` has, then +ones are prepended to :attr:`dims` until all dimensions are specified. +For example, if :attr:`input` has shape (8, 6, 4, 2) and :attr:`dims` +is (2, 2), then :attr:`dims` is treated as (1, 1, 2, 2). + +Analogously, if :attr:`input` has fewer dimensions than :attr:`dims` +specifies, then :attr:`input` is treated as if it were unsqueezed at +dimension zero until it has as many dimensions as :attr:`dims` specifies. +For example, if :attr:`input` has shape (4, 2) and :attr:`dims` +is (3, 3, 2, 2), then :attr:`input` is treated as if it had the +shape (1, 1, 4, 2). + +.. note:: + + This function is similar to NumPy's tile function. + +Args: + input (Tensor): the tensor whose elements to repeat. + dims (tuple): the number of repetitions per dimension. + +Example:: + + >>> x = torch.tensor([1, 2, 3]) + >>> x.tile((2,)) + tensor([1, 2, 3, 1, 2, 3]) + >>> y = torch.tensor([[1, 2], [3, 4]]) + >>> torch.tile(y, (2, 2)) + tensor([[1, 2, 1, 2], + [3, 4, 3, 4], + [1, 2, 1, 2], + [3, 4, 3, 4]]) +""", +) + +add_docstr( + torch.quantize_per_tensor, + r""" +quantize_per_tensor(input, scale, zero_point, dtype) -> Tensor + +Converts a float tensor to a quantized tensor with given scale and zero point. + +Arguments: + input (Tensor): float tensor or list of tensors to quantize + scale (float or Tensor): scale to apply in quantization formula + zero_point (int or Tensor): offset in integer value that maps to float zero + dtype (:class:`torch.dtype`): the desired data type of returned tensor. + Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32`` + +Returns: + Tensor: A newly quantized tensor or list of quantized tensors. + +Example:: + + >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8) + tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10) + >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8).int_repr() + tensor([ 0, 10, 20, 30], dtype=torch.uint8) + >>> torch.quantize_per_tensor([torch.tensor([-1.0, 0.0]), torch.tensor([-2.0, 2.0])], + >>> torch.tensor([0.1, 0.2]), torch.tensor([10, 20]), torch.quint8) + (tensor([-1., 0.], size=(2,), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10), + tensor([-2., 2.], size=(2,), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.2, zero_point=20)) + >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), torch.tensor(0.1), torch.tensor(10), torch.quint8) + tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.10, zero_point=10) +""", +) + +add_docstr( + torch.quantize_per_tensor_dynamic, + r""" +quantize_per_tensor_dynamic(input, dtype, reduce_range) -> Tensor + +Converts a float tensor to a quantized tensor with scale and zero_point calculated +dynamically based on the input. + +Arguments: + input (Tensor): float tensor or list of tensors to quantize + dtype (:class:`torch.dtype`): the desired data type of returned tensor. + Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8`` + reduce_range (bool): a flag to indicate whether to reduce the range of quantized + data by 1 bit, it's required to avoid instruction overflow for some hardwares + +Returns: + Tensor: A newly (dynamically) quantized tensor + +Example:: + + >>> t = torch.quantize_per_tensor_dynamic(torch.tensor([-1.0, 0.0, 1.0, 2.0]), torch.quint8, False) + >>> print(t) + tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.011764705882352941, + zero_point=85) + >>> t.int_repr() + tensor([ 0, 85, 170, 255], dtype=torch.uint8) +""", +) + +add_docstr( + torch.quantize_per_channel, + r""" +quantize_per_channel(input, scales, zero_points, axis, dtype) -> Tensor + +Converts a float tensor to a per-channel quantized tensor with given scales and zero points. + +Arguments: + input (Tensor): float tensor to quantize + scales (Tensor): float 1D tensor of scales to use, size should match ``input.size(axis)`` + zero_points (int): integer 1D tensor of offset to use, size should match ``input.size(axis)`` + axis (int): dimension on which apply per-channel quantization + dtype (:class:`torch.dtype`): the desired data type of returned tensor. + Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32`` + +Returns: + Tensor: A newly quantized tensor + +Example:: + + >>> x = torch.tensor([[-1.0, 0.0], [1.0, 2.0]]) + >>> torch.quantize_per_channel(x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8) + tensor([[-1., 0.], + [ 1., 2.]], size=(2, 2), dtype=torch.quint8, + quantization_scheme=torch.per_channel_affine, + scale=tensor([0.1000, 0.0100], dtype=torch.float64), + zero_point=tensor([10, 0]), axis=0) + >>> torch.quantize_per_channel(x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8).int_repr() + tensor([[ 0, 10], + [100, 200]], dtype=torch.uint8) +""", +) + + +add_docstr( + torch.quantized_batch_norm, + r""" +quantized_batch_norm(input, weight=None, bias=None, mean, var, eps, output_scale, output_zero_point) -> Tensor + +Applies batch normalization on a 4D (NCHW) quantized tensor. + +.. math:: + + y = \frac{x - \mathrm{E}[x]}{\sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta + +Arguments: + input (Tensor): quantized tensor + weight (Tensor): float tensor that corresponds to the gamma, size C + bias (Tensor): float tensor that corresponds to the beta, size C + mean (Tensor): float mean value in batch normalization, size C + var (Tensor): float tensor for variance, size C + eps (float): a value added to the denominator for numerical stability. + output_scale (float): output quantized tensor scale + output_zero_point (int): output quantized tensor zero_point + +Returns: + Tensor: A quantized tensor with batch normalization applied. + +Example:: + + >>> qx = torch.quantize_per_tensor(torch.rand(2, 2, 2, 2), 1.5, 3, torch.quint8) + >>> torch.quantized_batch_norm(qx, torch.ones(2), torch.zeros(2), torch.rand(2), torch.rand(2), 0.00001, 0.2, 2) + tensor([[[[-0.2000, -0.2000], + [ 1.6000, -0.2000]], + + [[-0.4000, -0.4000], + [-0.4000, 0.6000]]], + + + [[[-0.2000, -0.2000], + [-0.2000, -0.2000]], + + [[ 0.6000, -0.4000], + [ 0.6000, -0.4000]]]], size=(2, 2, 2, 2), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.2, zero_point=2) +""", +) + + +add_docstr( + torch.quantized_max_pool1d, + r""" +quantized_max_pool1d(input, kernel_size, stride=[], padding=0, dilation=1, ceil_mode=False) -> Tensor + +Applies a 1D max pooling over an input quantized tensor composed of several input planes. + +Arguments: + input (Tensor): quantized tensor + kernel_size (list of int): the size of the sliding window + stride (``list of int``, optional): the stride of the sliding window + padding (``list of int``, optional): padding to be added on both sides, must be >= 0 and <= kernel_size / 2 + dilation (``list of int``, optional): The stride between elements within a sliding window, must be > 0. Default 1 + ceil_mode (bool, optional): If True, will use ceil instead of floor to compute the output shape. + Defaults to False. + + +Returns: + Tensor: A quantized tensor with max_pool1d applied. + +Example:: + + >>> qx = torch.quantize_per_tensor(torch.rand(2, 2), 1.5, 3, torch.quint8) + >>> torch.quantized_max_pool1d(qx, [2]) + tensor([[0.0000], + [1.5000]], size=(2, 1), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=1.5, zero_point=3) +""", +) + + +add_docstr( + torch.quantized_max_pool2d, + r""" +quantized_max_pool2d(input, kernel_size, stride=[], padding=0, dilation=1, ceil_mode=False) -> Tensor + +Applies a 2D max pooling over an input quantized tensor composed of several input planes. + +Arguments: + input (Tensor): quantized tensor + kernel_size (``list of int``): the size of the sliding window + stride (``list of int``, optional): the stride of the sliding window + padding (``list of int``, optional): padding to be added on both sides, must be >= 0 and <= kernel_size / 2 + dilation (``list of int``, optional): The stride between elements within a sliding window, must be > 0. Default 1 + ceil_mode (bool, optional): If True, will use ceil instead of floor to compute the output shape. + Defaults to False. + + +Returns: + Tensor: A quantized tensor with max_pool2d applied. + +Example:: + + >>> qx = torch.quantize_per_tensor(torch.rand(2, 2, 2, 2), 1.5, 3, torch.quint8) + >>> torch.quantized_max_pool2d(qx, [2,2]) + tensor([[[[1.5000]], + + [[1.5000]]], + + + [[[0.0000]], + + [[0.0000]]]], size=(2, 2, 1, 1), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=1.5, zero_point=3) +""", +) + + +add_docstr( + torch.Generator, + r""" +Generator(device='cpu') -> Generator + +Creates and returns a generator object that manages the state of the algorithm which +produces pseudo random numbers. Used as a keyword argument in many :ref:`inplace-random-sampling` +functions. + +Arguments: + device (:class:`torch.device`, optional): the desired device for the generator. + +Returns: + Generator: An torch.Generator object. + +Example:: + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA) + >>> g_cpu = torch.Generator() + >>> g_cuda = torch.Generator(device='cuda') +""", +) + + +add_docstr( + torch.Generator.set_state, + r""" +Generator.set_state(new_state) -> void + +Sets the Generator state. + +Arguments: + new_state (torch.ByteTensor): The desired state. + +Example:: + + >>> g_cpu = torch.Generator() + >>> g_cpu_other = torch.Generator() + >>> g_cpu.set_state(g_cpu_other.get_state()) +""", +) + + +add_docstr( + torch.Generator.get_state, + r""" +Generator.get_state() -> Tensor + +Returns the Generator state as a ``torch.ByteTensor``. + +Returns: + Tensor: A ``torch.ByteTensor`` which contains all the necessary bits + to restore a Generator to a specific point in time. + +Example:: + + >>> g_cpu = torch.Generator() + >>> g_cpu.get_state() +""", +) + + +add_docstr( + torch.Generator.manual_seed, + r""" +Generator.manual_seed(seed) -> Generator + +Sets the seed for generating random numbers. Returns a `torch.Generator` object. Any 32-bit integer is a valid seed. + +Arguments: + seed (int): The desired seed. Value must be within the inclusive range + `[-0x8000_0000_0000_0000, 0xffff_ffff_ffff_ffff]`. Otherwise, a RuntimeError + is raised. Negative inputs are remapped to positive values with the formula + `0xffff_ffff_ffff_ffff + seed`. + +Returns: + Generator: An torch.Generator object. + +Example:: + + >>> g_cpu = torch.Generator() + >>> g_cpu.manual_seed(2147483647) +""", +) + + +add_docstr( + torch.Generator.initial_seed, + r""" +Generator.initial_seed() -> int + +Returns the initial seed for generating random numbers. + +Example:: + + >>> g_cpu = torch.Generator() + >>> g_cpu.initial_seed() + 2147483647 +""", +) + + +add_docstr( + torch.Generator.seed, + r""" +Generator.seed() -> int + +Gets a non-deterministic random number from std::random_device or the current +time and uses it to seed a Generator. + +Example:: + + >>> g_cpu = torch.Generator() + >>> g_cpu.seed() + 1516516984916 +""", +) + + +add_docstr( + torch.Generator.device, + r""" +Generator.device -> device + +Gets the current device of the generator. + +Example:: + + >>> g_cpu = torch.Generator() + >>> g_cpu.device + device(type='cpu') +""", +) + +add_docstr( + torch._assert_async, + r""" +_assert_async(tensor) -> void + +Asynchronously assert that the contents of tensor are nonzero. For CPU tensors, +this is equivalent to ``assert tensor`` or ``assert tensor.is_nonzero()``; for +CUDA tensors, we DO NOT synchronize and you may only find out the assertion +failed at a later CUDA kernel launch. Asynchronous assertion can be helpful for +testing invariants in CUDA tensors without giving up performance. This function +is NOT intended to be used for regular error checking, as it will trash your CUDA +context if the assert fails (forcing you to restart your PyTorch process.) + +Args: + tensor (Tensor): a one element tensor to test to see if it is nonzero. Zero + elements (including False for boolean tensors) cause an assertion failure + to be raised. +""", +) + +add_docstr( + torch.searchsorted, + r""" +searchsorted(sorted_sequence, values, *, out_int32=False, right=False, side='left', out=None, sorter=None) -> Tensor + +Find the indices from the *innermost* dimension of :attr:`sorted_sequence` such that, if the +corresponding values in :attr:`values` were inserted before the indices, when sorted, the order +of the corresponding *innermost* dimension within :attr:`sorted_sequence` would be preserved. +Return a new tensor with the same size as :attr:`values`. More formally, +the returned index satisfies the following rules: + +.. list-table:: + :widths: 12 10 78 + :header-rows: 1 + + * - :attr:`sorted_sequence` + - :attr:`right` + - *returned index satisfies* + * - 1-D + - False + - ``sorted_sequence[i-1] < values[m][n]...[l][x] <= sorted_sequence[i]`` + * - 1-D + - True + - ``sorted_sequence[i-1] <= values[m][n]...[l][x] < sorted_sequence[i]`` + * - N-D + - False + - ``sorted_sequence[m][n]...[l][i-1] < values[m][n]...[l][x] <= sorted_sequence[m][n]...[l][i]`` + * - N-D + - True + - ``sorted_sequence[m][n]...[l][i-1] <= values[m][n]...[l][x] < sorted_sequence[m][n]...[l][i]`` + +Args: + sorted_sequence (Tensor): N-D or 1-D tensor, containing monotonically increasing sequence on the *innermost* + dimension unless :attr:`sorter` is provided, in which case the sequence does not + need to be sorted + values (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s). + +Keyword args: + out_int32 (bool, optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise. + Default value is False, i.e. default output data type is torch.int64. + right (bool, optional): if False, return the first suitable location that is found. If True, return the + last such index. If no suitable index found, return 0 for non-numerical value + (eg. nan, inf) or the size of *innermost* dimension within :attr:`sorted_sequence` + (one pass the last index of the *innermost* dimension). In other words, if False, + gets the lower bound index for each value in :attr:`values` on the corresponding + *innermost* dimension of the :attr:`sorted_sequence`. If True, gets the upper + bound index instead. Default value is False. :attr:`side` does the same and is + preferred. It will error if :attr:`side` is set to "left" while this is True. + side (str, optional): the same as :attr:`right` but preferred. "left" corresponds to False for :attr:`right` + and "right" corresponds to True for :attr:`right`. It will error if this is set to + "left" while :attr:`right` is True. + out (Tensor, optional): the output tensor, must be the same size as :attr:`values` if provided. + sorter (LongTensor, optional): if provided, a tensor matching the shape of the unsorted + :attr:`sorted_sequence` containing a sequence of indices that sort it in the + ascending order on the innermost dimension + + +Example:: + + >>> sorted_sequence = torch.tensor([[1, 3, 5, 7, 9], [2, 4, 6, 8, 10]]) + >>> sorted_sequence + tensor([[ 1, 3, 5, 7, 9], + [ 2, 4, 6, 8, 10]]) + >>> values = torch.tensor([[3, 6, 9], [3, 6, 9]]) + >>> values + tensor([[3, 6, 9], + [3, 6, 9]]) + >>> torch.searchsorted(sorted_sequence, values) + tensor([[1, 3, 4], + [1, 2, 4]]) + >>> torch.searchsorted(sorted_sequence, values, side='right') + tensor([[2, 3, 5], + [1, 3, 4]]) + + >>> sorted_sequence_1d = torch.tensor([1, 3, 5, 7, 9]) + >>> sorted_sequence_1d + tensor([1, 3, 5, 7, 9]) + >>> torch.searchsorted(sorted_sequence_1d, values) + tensor([[1, 3, 4], + [1, 3, 4]]) +""", +) + +add_docstr( + torch.bucketize, + r""" +bucketize(input, boundaries, *, out_int32=False, right=False, out=None) -> Tensor + +Returns the indices of the buckets to which each value in the :attr:`input` belongs, where the +boundaries of the buckets are set by :attr:`boundaries`. Return a new tensor with the same size +as :attr:`input`. If :attr:`right` is False (default), then the left boundary is open. Note that +this behavior is opposite the behavior of +`numpy.digitize `_. +More formally, the returned index satisfies the following rules: + +.. list-table:: + :widths: 15 85 + :header-rows: 1 + + * - :attr:`right` + - *returned index satisfies* + * - False + - ``boundaries[i-1] < input[m][n]...[l][x] <= boundaries[i]`` + * - True + - ``boundaries[i-1] <= input[m][n]...[l][x] < boundaries[i]`` + +Args: + input (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s). + boundaries (Tensor): 1-D tensor, must contain a strictly increasing sequence, or the return value is undefined. + +Keyword args: + out_int32 (bool, optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise. + Default value is False, i.e. default output data type is torch.int64. + right (bool, optional): if False, return the first suitable location that is found. If True, return the + last such index. If no suitable index found, return 0 for non-numerical value + (eg. nan, inf) or the size of :attr:`boundaries` (one pass the last index). + In other words, if False, gets the lower bound index for each value in :attr:`input` + from :attr:`boundaries`. If True, gets the upper bound index instead. + Default value is False. + out (Tensor, optional): the output tensor, must be the same size as :attr:`input` if provided. + + +Example:: + + >>> boundaries = torch.tensor([1, 3, 5, 7, 9]) + >>> boundaries + tensor([1, 3, 5, 7, 9]) + >>> v = torch.tensor([[3, 6, 9], [3, 6, 9]]) + >>> v + tensor([[3, 6, 9], + [3, 6, 9]]) + >>> torch.bucketize(v, boundaries) + tensor([[1, 3, 4], + [1, 3, 4]]) + >>> torch.bucketize(v, boundaries, right=True) + tensor([[2, 3, 5], + [2, 3, 5]]) +""", +) + +add_docstr( + torch.view_as_real_copy, + r""" +Performs the same operation as :func:`torch.view_as_real`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.view_as_complex_copy, + r""" +Performs the same operation as :func:`torch.view_as_complex`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.as_strided_copy, + r""" +Performs the same operation as :func:`torch.as_strided`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.diagonal_copy, + r""" +Performs the same operation as :func:`torch.diagonal`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.expand_copy, + r""" +Performs the same operation as :func:`torch.expand`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.permute_copy, + r""" +Performs the same operation as :func:`torch.permute`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.select_copy, + r""" +Performs the same operation as :func:`torch.select`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.detach_copy, + r""" +Performs the same operation as :func:`torch.detach`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.slice_copy, + r""" +Performs the same operation as :func:`torch.slice`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.split_copy, + r""" +Performs the same operation as :func:`torch.split`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.split_with_sizes_copy, + r""" +Performs the same operation as :func:`torch.split_with_sizes`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.squeeze_copy, + r""" +Performs the same operation as :func:`torch.squeeze`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.t_copy, + r""" +Performs the same operation as :func:`torch.t`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.transpose_copy, + r""" +Performs the same operation as :func:`torch.transpose`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.unsqueeze_copy, + r""" +Performs the same operation as :func:`torch.unsqueeze`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.indices_copy, + r""" +Performs the same operation as :func:`torch.indices`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.values_copy, + r""" +Performs the same operation as :func:`torch.values`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.crow_indices_copy, + r""" +Performs the same operation as :func:`torch.crow_indices`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.col_indices_copy, + r""" +Performs the same operation as :func:`torch.col_indices`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.unbind_copy, + r""" +Performs the same operation as :func:`torch.unbind`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.view_copy, + r""" +Performs the same operation as :func:`torch.view`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.unfold_copy, + r""" +Performs the same operation as :func:`torch.unfold`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.alias_copy, + r""" +Performs the same operation as :func:`torch.alias`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +for unary_base_func_name in ( + "exp", + "sqrt", + "abs", + "acos", + "asin", + "atan", + "ceil", + "cos", + "cosh", + "erf", + "erfc", + "expm1", + "floor", + "log", + "log10", + "log1p", + "log2", + "neg", + "tan", + "tanh", + "sin", + "sinh", + "round", + "lgamma", + "frac", + "reciprocal", + "sigmoid", + "trunc", + "zero", +): + unary_foreach_func_name = f"_foreach_{unary_base_func_name}" + if hasattr(torch, unary_foreach_func_name): + add_docstr( + getattr(torch, unary_foreach_func_name), + rf""" +{unary_foreach_func_name}(self: List[Tensor]) -> List[Tensor] + +Apply :func:`torch.{unary_base_func_name}` to each Tensor of the input list. + """, + ) + unary_inplace_foreach_func_name = f"{unary_foreach_func_name}_" + if hasattr(torch, unary_inplace_foreach_func_name): + add_docstr( + getattr(torch, unary_inplace_foreach_func_name), + rf""" +{unary_inplace_foreach_func_name}(self: List[Tensor]) -> None + +Apply :func:`torch.{unary_base_func_name}` to each Tensor of the input list. + """, + ) diff --git a/evalkit_internvl/lib/python3.10/site-packages/torch/functional.py b/evalkit_internvl/lib/python3.10/site-packages/torch/functional.py new file mode 100644 index 0000000000000000000000000000000000000000..a6c124177a0c61d409c7a58b0a165fc1d89ca4dd --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/torch/functional.py @@ -0,0 +1,1978 @@ +from typing import ( + List, Tuple, Optional, Union, Any, Sequence, TYPE_CHECKING +) +import operator +import itertools + +import torch +from torch._C import _add_docstr +import torch.nn.functional as F +from ._lowrank import svd_lowrank, pca_lowrank +from .overrides import ( + has_torch_function, has_torch_function_unary, has_torch_function_variadic, + handle_torch_function) +from ._jit_internal import boolean_dispatch +from ._jit_internal import _overload as overload + +Tensor = torch.Tensor +from torch import _VF + +__all__ = [ + 'atleast_1d', + 'atleast_2d', + 'atleast_3d', + 'align_tensors', + 'broadcast_shapes', + 'broadcast_tensors', + 'cartesian_prod', + 'block_diag', + 'cdist', + 'chain_matmul', + 'einsum', + 'istft', + 'lu', + 'norm', + 'meshgrid', + 'pca_lowrank', + 'split', + 'stft', + 'svd_lowrank', + 'tensordot', + 'unique', + 'unique_consecutive', + 'unravel_index', +] + + +def broadcast_tensors(*tensors): + r"""broadcast_tensors(*tensors) -> List of Tensors + + Broadcasts the given tensors according to :ref:`broadcasting-semantics`. + + Args: + *tensors: any number of tensors of the same type + + .. warning:: + + More than one element of a broadcasted tensor may refer to a single + memory location. As a result, in-place operations (especially ones that + are vectorized) may result in incorrect behavior. If you need to write + to the tensors, please clone them first. + + Example:: + + >>> x = torch.arange(3).view(1, 3) + >>> y = torch.arange(2).view(2, 1) + >>> a, b = torch.broadcast_tensors(x, y) + >>> a.size() + torch.Size([2, 3]) + >>> a + tensor([[0, 1, 2], + [0, 1, 2]]) + """ + # This wrapper exists to support variadic args. + if has_torch_function(tensors): + return handle_torch_function(broadcast_tensors, tensors, *tensors) + return _VF.broadcast_tensors(tensors) # type: ignore[attr-defined] + + +def broadcast_shapes(*shapes): + r"""broadcast_shapes(*shapes) -> Size + + Similar to :func:`broadcast_tensors` but for shapes. + + This is equivalent to + ``torch.broadcast_tensors(*map(torch.empty, shapes))[0].shape`` + but avoids the need create to intermediate tensors. This is useful for + broadcasting tensors of common batch shape but different rightmost shape, + e.g. to broadcast mean vectors with covariance matrices. + + Example:: + + >>> torch.broadcast_shapes((2,), (3, 1), (1, 1, 1)) + torch.Size([1, 3, 2]) + + Args: + \*shapes (torch.Size): Shapes of tensors. + + Returns: + shape (torch.Size): A shape compatible with all input shapes. + + Raises: + RuntimeError: If shapes are incompatible. + """ + # This wrapper exists to support variadic args. + # TODO Move this to C++ once the jit has better support for torch.Size. + if not torch.jit.is_tracing(): + max_len = 0 + for shape in shapes: + if isinstance(shape, (int, torch.SymInt)): + if max_len < 1: + max_len = 1 + elif isinstance(shape, (tuple, list)): + s = len(shape) + if max_len < s: + max_len = s + result = [1] * max_len + for shape in shapes: + if isinstance(shape, (int, torch.SymInt)): + shape = (shape,) + if isinstance(shape, (tuple, list)): + for i in range(-1, -1 - len(shape), -1): + if shape[i] < 0: + raise RuntimeError(f"Trying to create tensor with negative dimension ({shape[i]}): ({shape[i]})") + if shape[i] == 1 or shape[i] == result[i]: + continue + if result[i] != 1: + raise RuntimeError("Shape mismatch: objects cannot be broadcast to a single shape") + result[i] = shape[i] + else: + raise RuntimeError("Input shapes should be of type ints, a tuple of ints, or a list of ints, got ", shape) + return torch.Size(result) + else: + # with implementation above, torch.jit.trace hardcodes the sizes which makes subsequent replays fail + with torch.no_grad(): + scalar = torch.zeros((), device="cpu") + tensors = [scalar.expand(shape) for shape in shapes] + tensors = broadcast_tensors(*tensors) + return tensors[0].shape + + +def split( + tensor: Tensor, split_size_or_sections: Union[int, List[int]], dim: int = 0 +) -> Tuple[Tensor, ...]: + r"""Splits the tensor into chunks. Each chunk is a view of the original tensor. + + If :attr:`split_size_or_sections` is an integer type, then :attr:`tensor` will + be split into equally sized chunks (if possible). Last chunk will be smaller if + the tensor size along the given dimension :attr:`dim` is not divisible by + :attr:`split_size`. + + If :attr:`split_size_or_sections` is a list, then :attr:`tensor` will be split + into ``len(split_size_or_sections)`` chunks with sizes in :attr:`dim` according + to :attr:`split_size_or_sections`. + + Args: + tensor (Tensor): tensor to split. + split_size_or_sections (int) or (list(int)): size of a single chunk or + list of sizes for each chunk + dim (int): dimension along which to split the tensor. + + Example:: + + >>> a = torch.arange(10).reshape(5, 2) + >>> a + tensor([[0, 1], + [2, 3], + [4, 5], + [6, 7], + [8, 9]]) + >>> torch.split(a, 2) + (tensor([[0, 1], + [2, 3]]), + tensor([[4, 5], + [6, 7]]), + tensor([[8, 9]])) + >>> torch.split(a, [1, 4]) + (tensor([[0, 1]]), + tensor([[2, 3], + [4, 5], + [6, 7], + [8, 9]])) + """ + if has_torch_function_unary(tensor): + return handle_torch_function( + split, (tensor,), tensor, split_size_or_sections, dim=dim) + # Overwriting reason: + # This dispatches to two ATen functions depending on the type of + # split_size_or_sections. The branching code is in _tensor.py, which we + # call here. + return tensor.split(split_size_or_sections, dim) + + +def einsum(*args: Any) -> Tensor: + r"""einsum(equation, *operands) -> Tensor + + Sums the product of the elements of the input :attr:`operands` along dimensions specified using a notation + based on the Einstein summation convention. + + Einsum allows computing many common multi-dimensional linear algebraic array operations by representing them + in a short-hand format based on the Einstein summation convention, given by :attr:`equation`. The details of + this format are described below, but the general idea is to label every dimension of the input :attr:`operands` + with some subscript and define which subscripts are part of the output. The output is then computed by summing + the product of the elements of the :attr:`operands` along the dimensions whose subscripts are not part of the + output. For example, matrix multiplication can be computed using einsum as `torch.einsum("ij,jk->ik", A, B)`. + Here, j is the summation subscript and i and k the output subscripts (see section below for more details on why). + + Equation: + + The :attr:`equation` string specifies the subscripts (letters in `[a-zA-Z]`) for each dimension of + the input :attr:`operands` in the same order as the dimensions, separating subscripts for each operand by a + comma (','), e.g. `'ij,jk'` specify subscripts for two 2D operands. The dimensions labeled with the same subscript + must be broadcastable, that is, their size must either match or be `1`. The exception is if a subscript is + repeated for the same input operand, in which case the dimensions labeled with this subscript for this operand + must match in size and the operand will be replaced by its diagonal along these dimensions. The subscripts that + appear exactly once in the :attr:`equation` will be part of the output, sorted in increasing alphabetical order. + The output is computed by multiplying the input :attr:`operands` element-wise, with their dimensions aligned based + on the subscripts, and then summing out the dimensions whose subscripts are not part of the output. + + Optionally, the output subscripts can be explicitly defined by adding an arrow ('->') at the end of the equation + followed by the subscripts for the output. For instance, the following equation computes the transpose of a + matrix multiplication: 'ij,jk->ki'. The output subscripts must appear at least once for some input operand and + at most once for the output. + + Ellipsis ('...') can be used in place of subscripts to broadcast the dimensions covered by the ellipsis. + Each input operand may contain at most one ellipsis which will cover the dimensions not covered by subscripts, + e.g. for an input operand with 5 dimensions, the ellipsis in the equation `'ab...c'` cover the third and fourth + dimensions. The ellipsis does not need to cover the same number of dimensions across the :attr:`operands` but the + 'shape' of the ellipsis (the size of the dimensions covered by them) must broadcast together. If the output is not + explicitly defined with the arrow ('->') notation, the ellipsis will come first in the output (left-most dimensions), + before the subscript labels that appear exactly once for the input operands. e.g. the following equation implements + batch matrix multiplication `'...ij,...jk'`. + + A few final notes: the equation may contain whitespaces between the different elements (subscripts, ellipsis, + arrow and comma) but something like `'. . .'` is not valid. An empty string `''` is valid for scalar operands. + + .. note:: + + ``torch.einsum`` handles ellipsis ('...') differently from NumPy in that it allows dimensions + covered by the ellipsis to be summed over, that is, ellipsis are not required to be part of the output. + + .. note:: + + This function uses opt_einsum (https://optimized-einsum.readthedocs.io/en/stable/) to speed up computation or to + consume less memory by optimizing contraction order. This optimization occurs when there are at least three + inputs, since the order does not matter otherwise. Note that finding _the_ optimal path is an NP-hard problem, + thus, opt_einsum relies on different heuristics to achieve near-optimal results. If opt_einsum is not available, + the default order is to contract from left to right. + + To bypass this default behavior, add the following line to disable the usage of opt_einsum and skip path + calculation: `torch.backends.opt_einsum.enabled = False` + + To specify which strategy you'd like for opt_einsum to compute the contraction path, add the following line: + `torch.backends.opt_einsum.strategy = 'auto'`. The default strategy is 'auto', and we also support 'greedy' and + 'optimal'. Disclaimer that the runtime of 'optimal' is factorial in the number of inputs! See more details in + the opt_einsum documentation (https://optimized-einsum.readthedocs.io/en/stable/path_finding.html). + + .. note:: + + As of PyTorch 1.10 :func:`torch.einsum` also supports the sublist format (see examples below). In this format, + subscripts for each operand are specified by sublists, list of integers in the range [0, 52). These sublists + follow their operands, and an extra sublist can appear at the end of the input to specify the output's + subscripts., e.g. `torch.einsum(op1, sublist1, op2, sublist2, ..., [subslist_out])`. Python's `Ellipsis` object + may be provided in a sublist to enable broadcasting as described in the Equation section above. + + Args: + equation (str): The subscripts for the Einstein summation. + operands (List[Tensor]): The tensors to compute the Einstein summation of. + + Examples:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> # trace + >>> torch.einsum('ii', torch.randn(4, 4)) + tensor(-1.2104) + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> # diagonal + >>> torch.einsum('ii->i', torch.randn(4, 4)) + tensor([-0.1034, 0.7952, -0.2433, 0.4545]) + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> # outer product + >>> x = torch.randn(5) + >>> y = torch.randn(4) + >>> torch.einsum('i,j->ij', x, y) + tensor([[ 0.1156, -0.2897, -0.3918, 0.4963], + [-0.3744, 0.9381, 1.2685, -1.6070], + [ 0.7208, -1.8058, -2.4419, 3.0936], + [ 0.1713, -0.4291, -0.5802, 0.7350], + [ 0.5704, -1.4290, -1.9323, 2.4480]]) + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> # batch matrix multiplication + >>> As = torch.randn(3, 2, 5) + >>> Bs = torch.randn(3, 5, 4) + >>> torch.einsum('bij,bjk->bik', As, Bs) + tensor([[[-1.0564, -1.5904, 3.2023, 3.1271], + [-1.6706, -0.8097, -0.8025, -2.1183]], + + [[ 4.2239, 0.3107, -0.5756, -0.2354], + [-1.4558, -0.3460, 1.5087, -0.8530]], + + [[ 2.8153, 1.8787, -4.3839, -1.2112], + [ 0.3728, -2.1131, 0.0921, 0.8305]]]) + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> # with sublist format and ellipsis + >>> torch.einsum(As, [..., 0, 1], Bs, [..., 1, 2], [..., 0, 2]) + tensor([[[-1.0564, -1.5904, 3.2023, 3.1271], + [-1.6706, -0.8097, -0.8025, -2.1183]], + + [[ 4.2239, 0.3107, -0.5756, -0.2354], + [-1.4558, -0.3460, 1.5087, -0.8530]], + + [[ 2.8153, 1.8787, -4.3839, -1.2112], + [ 0.3728, -2.1131, 0.0921, 0.8305]]]) + + >>> # batch permute + >>> A = torch.randn(2, 3, 4, 5) + >>> torch.einsum('...ij->...ji', A).shape + torch.Size([2, 3, 5, 4]) + + >>> # equivalent to torch.nn.functional.bilinear + >>> A = torch.randn(3, 5, 4) + >>> l = torch.randn(2, 5) + >>> r = torch.randn(2, 4) + >>> torch.einsum('bn,anm,bm->ba', l, A, r) + tensor([[-0.3430, -5.2405, 0.4494], + [ 0.3311, 5.5201, -3.0356]]) + """ + import torch.backends.opt_einsum as opt_einsum + # This wrapper exists to support variadic args. + if len(args) < 2: + raise ValueError('einsum(): must specify the equation string and at least one operand, ' + 'or at least one operand and its subscripts list') + + equation = None + operands = None + + if isinstance(args[0], torch.Tensor): + # Convert the subscript list format which is an interleaving of operand and its subscripts + # list with an optional output subscripts list at the end (see documentation for more details on this) + # to the equation string format by creating the equation string from the subscripts list and grouping the + # input operands into a tensorlist (List[Tensor]). + def parse_subscript(n: int) -> str: + if n == Ellipsis: + return '...' + if n >= 0 and n < 26: + return chr(ord('A') + n) + if n >= 26 and n < 52: + return chr(ord('a') + n - 26) + raise ValueError('einsum(): subscript in subscript list is not within the valid range [0, 52)') + + # Parse subscripts for input operands + equation = ','.join(''.join(parse_subscript(s) for s in l) for l in args[1::2]) + + # Parse optional output subscripts (provided when the number of arguments is odd) + if len(args) % 2 == 1: + equation += '->' + ''.join(parse_subscript(s) for s in args[-1]) + operands = args[:-1:2] + else: + operands = args[::2] + else: + equation = args[0] + operands = args[1:] + + if has_torch_function(operands): + return handle_torch_function(einsum, operands, equation, *operands) + + if len(operands) == 1 and isinstance(operands[0], (list, tuple)): + # the old interface of passing the operands as one list argument + _operands = operands[0] + # recurse incase operands contains value that has torch function + # in the original implementation this line is omitted + return einsum(equation, *_operands) + + if len(operands) <= 2 or not opt_einsum.enabled: + # the path for contracting 0 or 1 time(s) is already optimized + # or the user has disabled using opt_einsum + return _VF.einsum(equation, operands) # type: ignore[attr-defined] + + path = None + if opt_einsum.is_available(): + _opt_einsum = opt_einsum.get_opt_einsum() + tupled_path = _opt_einsum.contract_path(equation, *operands, optimize=opt_einsum.strategy)[0] + # flatten path for dispatching to C++ + path = [item for pair in tupled_path for item in pair] + return _VF.einsum(equation, operands, path=path) # type: ignore[attr-defined] + + +# This wrapper exists to support variadic args. +if TYPE_CHECKING: + # The JIT doesn't understand Union, so only add type annotation for mypy + def meshgrid(*tensors: Union[Tensor, List[Tensor]], + indexing: Optional[str] = None) -> Tuple[Tensor, ...]: + return _meshgrid(*tensors, indexing=indexing) +else: + def meshgrid(*tensors, indexing: Optional[str] = None) -> Tuple[Tensor, ...]: + r"""Creates grids of coordinates specified by the 1D inputs in `attr`:tensors. + + This is helpful when you want to visualize data over some + range of inputs. See below for a plotting example. + + Given :math:`N` 1D tensors :math:`T_0 \ldots T_{N-1}` as + inputs with corresponding sizes :math:`S_0 \ldots S_{N-1}`, + this creates :math:`N` N-dimensional tensors :math:`G_0 \ldots + G_{N-1}`, each with shape :math:`(S_0, ..., S_{N-1})` where + the output :math:`G_i` is constructed by expanding :math:`T_i` + to the result shape. + + .. note:: + 0D inputs are treated equivalently to 1D inputs of a + single element. + + .. warning:: + `torch.meshgrid(*tensors)` currently has the same behavior + as calling `numpy.meshgrid(*arrays, indexing='ij')`. + + In the future `torch.meshgrid` will transition to + `indexing='xy'` as the default. + + https://github.com/pytorch/pytorch/issues/50276 tracks + this issue with the goal of migrating to NumPy's behavior. + + .. seealso:: + + :func:`torch.cartesian_prod` has the same effect but it + collects the data in a tensor of vectors. + + Args: + tensors (list of Tensor): list of scalars or 1 dimensional tensors. Scalars will be + treated as tensors of size :math:`(1,)` automatically + + indexing: (str, optional): the indexing mode, either "xy" + or "ij", defaults to "ij". See warning for future changes. + + If "xy" is selected, the first dimension corresponds + to the cardinality of the second input and the second + dimension corresponds to the cardinality of the first + input. + + If "ij" is selected, the dimensions are in the same + order as the cardinality of the inputs. + + Returns: + seq (sequence of Tensors): If the input has :math:`N` + tensors of size :math:`S_0 \ldots S_{N-1}``, then the + output will also have :math:`N` tensors, where each tensor + is of shape :math:`(S_0, ..., S_{N-1})`. + + Example:: + + >>> x = torch.tensor([1, 2, 3]) + >>> y = torch.tensor([4, 5, 6]) + + Observe the element-wise pairings across the grid, (1, 4), + (1, 5), ..., (3, 6). This is the same thing as the + cartesian product. + >>> grid_x, grid_y = torch.meshgrid(x, y, indexing='ij') + >>> grid_x + tensor([[1, 1, 1], + [2, 2, 2], + [3, 3, 3]]) + >>> grid_y + tensor([[4, 5, 6], + [4, 5, 6], + [4, 5, 6]]) + + This correspondence can be seen when these grids are + stacked properly. + >>> torch.equal(torch.cat(tuple(torch.dstack([grid_x, grid_y]))), + ... torch.cartesian_prod(x, y)) + True + + `torch.meshgrid` is commonly used to produce a grid for + plotting. + >>> # xdoctest: +REQUIRES(module:matplotlib) + >>> # xdoctest: +REQUIRES(env:DOCTEST_SHOW) + >>> import matplotlib.pyplot as plt + >>> xs = torch.linspace(-5, 5, steps=100) + >>> ys = torch.linspace(-5, 5, steps=100) + >>> x, y = torch.meshgrid(xs, ys, indexing='xy') + >>> z = torch.sin(torch.sqrt(x * x + y * y)) + >>> ax = plt.axes(projection='3d') + >>> ax.plot_surface(x.numpy(), y.numpy(), z.numpy()) + >>> plt.show() + + .. image:: ../_static/img/meshgrid.png + :width: 512 + + """ + return _meshgrid(*tensors, indexing=indexing) + + +def _meshgrid(*tensors, indexing: Optional[str]): + if has_torch_function(tensors): + return handle_torch_function(meshgrid, tensors, *tensors, indexing=indexing) + if len(tensors) == 1 and isinstance(tensors[0], (list, tuple)): + # the old interface of passing the operands as one list argument + tensors = tensors[0] # type: ignore[assignment] + + # Continue allowing call of old method that takes no indexing + # kwarg for forward compatibility reasons. + # + # Remove this two weeks after landing. + kwargs = {} if indexing is None else {'indexing': indexing} + return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined] + + +def stft(input: Tensor, n_fft: int, hop_length: Optional[int] = None, + win_length: Optional[int] = None, window: Optional[Tensor] = None, + center: bool = True, pad_mode: str = 'reflect', normalized: bool = False, + onesided: Optional[bool] = None, + return_complex: Optional[bool] = None) -> Tensor: + r"""Short-time Fourier transform (STFT). + + .. warning:: + From version 1.8.0, :attr:`return_complex` must always be given + explicitly for real inputs and `return_complex=False` has been + deprecated. Strongly prefer `return_complex=True` as in a future + pytorch release, this function will only return complex tensors. + + Note that :func:`torch.view_as_real` can be used to recover a real + tensor with an extra last dimension for real and imaginary components. + + .. warning:: + From version 2.1, a warning will be provided if a :attr:`window` is + not specified. In a future release, this attribute will be required. + Not providing a window currently defaults to using a rectangular window, + which may result in undesirable artifacts. Consider using tapered windows, + such as :func:`torch.hann_window`. + + The STFT computes the Fourier transform of short overlapping windows of the + input. This giving frequency components of the signal as they change over + time. The interface of this function is modeled after (but *not* a drop-in + replacement for) librosa_ stft function. + + .. _librosa: https://librosa.org/doc/latest/generated/librosa.stft.html + + Ignoring the optional batch dimension, this method computes the following + expression: + + .. math:: + X[\omega, m] = \sum_{k = 0}^{\text{win\_length-1}}% + \text{window}[k]\ \text{input}[m \times \text{hop\_length} + k]\ % + \exp\left(- j \frac{2 \pi \cdot \omega k}{\text{n\_fft}}\right), + + where :math:`m` is the index of the sliding window, and :math:`\omega` is + the frequency :math:`0 \leq \omega < \text{n\_fft}` for ``onesided=False``, + or :math:`0 \leq \omega < \lfloor \text{n\_fft} / 2 \rfloor + 1` for ``onesided=True``. + + * :attr:`input` must be either a 1-D time sequence or a 2-D batch of time + sequences. + + * If :attr:`hop_length` is ``None`` (default), it is treated as equal to + ``floor(n_fft / 4)``. + + * If :attr:`win_length` is ``None`` (default), it is treated as equal to + :attr:`n_fft`. + + * :attr:`window` can be a 1-D tensor of size :attr:`win_length`, e.g., from + :meth:`torch.hann_window`. If :attr:`window` is ``None`` (default), it is + treated as if having :math:`1` everywhere in the window. If + :math:`\text{win\_length} < \text{n\_fft}`, :attr:`window` will be padded on + both sides to length :attr:`n_fft` before being applied. + + * If :attr:`center` is ``True`` (default), :attr:`input` will be padded on + both sides so that the :math:`t`-th frame is centered at time + :math:`t \times \text{hop\_length}`. Otherwise, the :math:`t`-th frame + begins at time :math:`t \times \text{hop\_length}`. + + * :attr:`pad_mode` determines the padding method used on :attr:`input` when + :attr:`center` is ``True``. See :meth:`torch.nn.functional.pad` for + all available options. Default is ``"reflect"``. + + * If :attr:`onesided` is ``True`` (default for real input), only values for + :math:`\omega` in :math:`\left[0, 1, 2, \dots, \left\lfloor + \frac{\text{n\_fft}}{2} \right\rfloor + 1\right]` are returned because + the real-to-complex Fourier transform satisfies the conjugate symmetry, + i.e., :math:`X[m, \omega] = X[m, \text{n\_fft} - \omega]^*`. + Note if the input or window tensors are complex, then :attr:`onesided` + output is not possible. + + * If :attr:`normalized` is ``True`` (default is ``False``), the function + returns the normalized STFT results, i.e., multiplied by :math:`(\text{frame\_length})^{-0.5}`. + + * If :attr:`return_complex` is ``True`` (default if input is complex), the + return is a ``input.dim() + 1`` dimensional complex tensor. If ``False``, + the output is a ``input.dim() + 2`` dimensional real tensor where the last + dimension represents the real and imaginary components. + + Returns either a complex tensor of size :math:`(* \times N \times T)` if + :attr:`return_complex` is true, or a real tensor of size :math:`(* \times N + \times T \times 2)`. Where :math:`*` is the optional batch size of + :attr:`input`, :math:`N` is the number of frequencies where STFT is applied + and :math:`T` is the total number of frames used. + + .. warning:: + This function changed signature at version 0.4.1. Calling with the + previous signature may cause error or return incorrect result. + + Args: + input (Tensor): the input tensor of shape `(B?, L)` where `B?` is an optional + batch dimension + n_fft (int): size of Fourier transform + hop_length (int, optional): the distance between neighboring sliding window + frames. Default: ``None`` (treated as equal to ``floor(n_fft / 4)``) + win_length (int, optional): the size of window frame and STFT filter. + Default: ``None`` (treated as equal to :attr:`n_fft`) + window (Tensor, optional): the optional window function. + Shape must be 1d and `<= n_fft` + Default: ``None`` (treated as window of all :math:`1` s) + center (bool, optional): whether to pad :attr:`input` on both sides so + that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`. + Default: ``True`` + pad_mode (str, optional): controls the padding method used when + :attr:`center` is ``True``. Default: ``"reflect"`` + normalized (bool, optional): controls whether to return the normalized STFT results + Default: ``False`` + onesided (bool, optional): controls whether to return half of results to + avoid redundancy for real inputs. + Default: ``True`` for real :attr:`input` and :attr:`window`, ``False`` otherwise. + return_complex (bool, optional): whether to return a complex tensor, or + a real tensor with an extra last dimension for the real and + imaginary components. + + .. versionchanged:: 2.0 + ``return_complex`` is now a required argument for real inputs, + as the default is being transitioned to ``True``. + + .. deprecated:: 2.0 + ``return_complex=False`` is deprecated, instead use ``return_complex=True`` + Note that calling :func:`torch.view_as_real` on the output will + recover the deprecated output format. + + Returns: + Tensor: A tensor containing the STFT result with shape `(B?, N, T, C?)` where + - `B?` is an optional batch dimnsion from the input + - `N` is the number of frequency samples, `(n_fft // 2) + 1` for + `onesided=True`, or otherwise `n_fft`. + - `T` is the number of frames, `1 + L // hop_length` + for `center=True`, or `1 + (L - n_fft) // hop_length` otherwise. + - `C?` is an optional length-2 dimension of real and imaginary + components, present when `return_complex=False`. + + """ + if has_torch_function_unary(input): + return handle_torch_function( + stft, (input,), input, n_fft, hop_length=hop_length, win_length=win_length, + window=window, center=center, pad_mode=pad_mode, normalized=normalized, + onesided=onesided, return_complex=return_complex) + # NOTE: Do not edit. This code will be removed once the forward-compatibility + # period is over for PR #73432 + if center: + signal_dim = input.dim() + extended_shape = [1] * (3 - signal_dim) + list(input.size()) + pad = int(n_fft // 2) + input = F.pad(input.view(extended_shape), [pad, pad], pad_mode) + input = input.view(input.shape[-signal_dim:]) + return _VF.stft(input, n_fft, hop_length, win_length, window, # type: ignore[attr-defined] + normalized, onesided, return_complex) + + +istft = _add_docstr( + torch.istft, + "istft(input, n_fft, hop_length=None, win_length=None, window=None, center=True, " + "normalized=False, onesided=None, length=None, return_complex=False) -> Tensor:\n" + r""" +Inverse short time Fourier Transform. This is expected to be the inverse of :func:`~torch.stft`. + +.. warning:: + From version 2.1, a warning will be provided if a :attr:`window` is + not specified. In a future release, this attribute will be required. + Please provide the same window used in the stft call. + +It has the same parameters (+ additional optional parameter of :attr:`length`) and it should return the +least squares estimation of the original signal. The algorithm will check using the NOLA condition ( +nonzero overlap). + +Important consideration in the parameters :attr:`window` and :attr:`center` so that the envelop +created by the summation of all the windows is never zero at certain point in time. Specifically, +:math:`\sum_{t=-\infty}^{\infty} |w|^2[n-t\times hop\_length] \cancel{=} 0`. + +Since :func:`~torch.stft` discards elements at the end of the signal if they do not fit in a frame, +``istft`` may return a shorter signal than the original signal (can occur if :attr:`center` is False +since the signal isn't padded). If `length` is given in the arguments and is longer than expected, +``istft`` will pad zeros to the end of the returned signal. + +If :attr:`center` is ``True``, then there will be padding e.g. ``'constant'``, ``'reflect'``, etc. +Left padding can be trimmed off exactly because they can be calculated but right padding cannot be +calculated without additional information. + +Example: Suppose the last window is: +``[17, 18, 0, 0, 0]`` vs ``[18, 0, 0, 0, 0]`` + +The :attr:`n_fft`, :attr:`hop_length`, :attr:`win_length` are all the same which prevents the calculation +of right padding. These additional values could be zeros or a reflection of the signal so providing +:attr:`length` could be useful. If :attr:`length` is ``None`` then padding will be aggressively removed +(some loss of signal). + +[1] D. W. Griffin and J. S. Lim, "Signal estimation from modified short-time Fourier transform," +IEEE Trans. ASSP, vol.32, no.2, pp.236-243, Apr. 1984. + +Args: + input (Tensor): The input tensor. Expected to be in the format of :func:`~torch.stft`, + output. That is a complex tensor of shape `(B?, N, T)` where + + - `B?` is an optional batch dimension + - `N` is the number of frequency samples, `(n_fft // 2) + 1` + for onesided input, or otherwise `n_fft`. + - `T` is the number of frames, `1 + length // hop_length` for centered stft, + or `1 + (length - n_fft) // hop_length` otherwise. + + .. versionchanged:: 2.0 + Real datatype inputs are no longer supported. Input must now have a + complex datatype, as returned by ``stft(..., return_complex=True)``. + n_fft (int): Size of Fourier transform + hop_length (Optional[int]): The distance between neighboring sliding window frames. + (Default: ``n_fft // 4``) + win_length (Optional[int]): The size of window frame and STFT filter. (Default: ``n_fft``) + window (Optional[torch.Tensor]): The optional window function. + Shape must be 1d and `<= n_fft` + (Default: ``torch.ones(win_length)``) + center (bool): Whether :attr:`input` was padded on both sides so that the :math:`t`-th frame is + centered at time :math:`t \times \text{hop\_length}`. + (Default: ``True``) + normalized (bool): Whether the STFT was normalized. (Default: ``False``) + onesided (Optional[bool]): Whether the STFT was onesided. + (Default: ``True`` if `n_fft != fft_size` in the input size) + length (Optional[int]): The amount to trim the signal by (i.e. the + original signal length). Defaults to `(T - 1) * hop_length` for + centered stft, or `n_fft + (T - 1) * hop_length` otherwise, where `T` + is the number of input frames. + return_complex (Optional[bool]): + Whether the output should be complex, or if the input should be + assumed to derive from a real signal and window. + Note that this is incompatible with ``onesided=True``. + (Default: ``False``) + +Returns: + Tensor: Least squares estimation of the original signal of shape `(B?, length)` where + `B?` is an optional batch dimension from the input tensor. +""") + + +if TYPE_CHECKING: + # These _impl functions return a variable number of tensors as output with + # __torch_function__; tuple unpacking is done already rather than being + # done by the caller of the _impl function + _unique_impl_out = Any +else: + _unique_impl_out = Tuple[Tensor, Tensor, Tensor] + + +def _unique_impl(input: Tensor, sorted: bool = True, + return_inverse: bool = False, return_counts: bool = False, + dim: Optional[int] = None) -> _unique_impl_out: + r"""unique(input, sorted=True, return_inverse=False, return_counts=False, dim=None) -> Tuple[Tensor, Tensor, Tensor] + + Returns the unique elements of the input tensor. + + .. note:: This function is different from :func:`torch.unique_consecutive` in the sense that + this function also eliminates non-consecutive duplicate values. + + .. note:: Currently in the CUDA implementation and the CPU implementation, + `torch.unique` always sort the tensor at the beginning regardless of the `sort` argument. + Sorting could be slow, so if your input tensor is already sorted, it is recommended to use + :func:`torch.unique_consecutive` which avoids the sorting. + + Args: + input (Tensor): the input tensor + sorted (bool): Whether to sort the unique elements in ascending order + before returning as output. + return_inverse (bool): Whether to also return the indices for where + elements in the original input ended up in the returned unique list. + return_counts (bool): Whether to also return the counts for each unique + element. + dim (int, optional): the dimension to operate upon. If ``None``, the + unique of the flattened input is returned. Otherwise, each of the + tensors indexed by the given dimension is treated as one of the + elements to apply the unique operation upon. See examples for more + details. Default: ``None`` + + Returns: + (Tensor, Tensor (optional), Tensor (optional)): A tensor or a tuple of tensors containing + + - **output** (*Tensor*): the output list of unique scalar elements. + - **inverse_indices** (*Tensor*): (optional) if + :attr:`return_inverse` is True, there will be an additional + returned tensor (same shape as input) representing the indices + for where elements in the original input map to in the output; + otherwise, this function will only return a single tensor. + - **counts** (*Tensor*): (optional) if + :attr:`return_counts` is True, there will be an additional + returned tensor (same shape as output or output.size(dim), + if dim was specified) representing the number of occurrences + for each unique value or tensor. + + Example:: + + >>> output = torch.unique(torch.tensor([1, 3, 2, 3], dtype=torch.long)) + >>> output + tensor([1, 2, 3]) + + >>> output, inverse_indices = torch.unique( + ... torch.tensor([1, 3, 2, 3], dtype=torch.long), sorted=True, return_inverse=True) + >>> output + tensor([1, 2, 3]) + >>> inverse_indices + tensor([0, 2, 1, 2]) + + >>> output, inverse_indices = torch.unique( + ... torch.tensor([[1, 3], [2, 3]], dtype=torch.long), sorted=True, return_inverse=True) + >>> output + tensor([1, 2, 3]) + >>> inverse_indices + tensor([[0, 2], + [1, 2]]) + + >>> a = torch.tensor([ + ... [ + ... [1, 1, 0, 0], + ... [1, 1, 0, 0], + ... [0, 0, 1, 1], + ... ], + ... [ + ... [0, 0, 1, 1], + ... [0, 0, 1, 1], + ... [1, 1, 1, 1], + ... ], + ... [ + ... [1, 1, 0, 0], + ... [1, 1, 0, 0], + ... [0, 0, 1, 1], + ... ], + ... ]) + + >>> # If we call `torch.unique(a, dim=0)`, each of the tensors `a[idx, :, :]` + >>> # will be compared. We can see that `a[0, :, :]` and `a[2, :, :]` match + >>> # each other, so one of them will be removed. + >>> (a[0, :, :] == a[2, :, :]).all() + tensor(True) + >>> a_unique_dim0 = torch.unique(a, dim=0) + >>> a_unique_dim0 + tensor([[[0, 0, 1, 1], + [0, 0, 1, 1], + [1, 1, 1, 1]], + [[1, 1, 0, 0], + [1, 1, 0, 0], + [0, 0, 1, 1]]]) + + >>> # Notice which sub-tensors from `a` match with the sub-tensors from + >>> # `a_unique_dim0`: + >>> (a_unique_dim0[0, :, :] == a[1, :, :]).all() + tensor(True) + >>> (a_unique_dim0[1, :, :] == a[0, :, :]).all() + tensor(True) + + >>> # For `torch.unique(a, dim=1)`, each of the tensors `a[:, idx, :]` are + >>> # compared. `a[:, 0, :]` and `a[:, 1, :]` match each other, so one of + >>> # them will be removed. + >>> (a[:, 0, :] == a[:, 1, :]).all() + tensor(True) + >>> torch.unique(a, dim=1) + tensor([[[0, 0, 1, 1], + [1, 1, 0, 0]], + [[1, 1, 1, 1], + [0, 0, 1, 1]], + [[0, 0, 1, 1], + [1, 1, 0, 0]]]) + + >>> # For `torch.unique(a, dim=2)`, the tensors `a[:, :, idx]` are compared. + >>> # `a[:, :, 0]` and `a[:, :, 1]` match each other. Also, `a[:, :, 2]` and + >>> # `a[:, :, 3]` match each other as well. So in this case, two of the + >>> # sub-tensors will be removed. + >>> (a[:, :, 0] == a[:, :, 1]).all() + tensor(True) + >>> (a[:, :, 2] == a[:, :, 3]).all() + tensor(True) + >>> torch.unique(a, dim=2) + tensor([[[0, 1], + [0, 1], + [1, 0]], + [[1, 0], + [1, 0], + [1, 1]], + [[0, 1], + [0, 1], + [1, 0]]]) + """ + if has_torch_function_unary(input): + return handle_torch_function( + unique, (input,), input, sorted=sorted, return_inverse=return_inverse, + return_counts=return_counts, dim=dim) + + if dim is not None: + output, inverse_indices, counts = _VF.unique_dim( + input, + dim, + sorted=sorted, + return_inverse=return_inverse, + return_counts=return_counts, + ) + else: + output, inverse_indices, counts = torch._unique2( + input, + sorted=sorted, + return_inverse=return_inverse, + return_counts=return_counts, + ) + return output, inverse_indices, counts + + +def _unique_consecutive_impl(input: Tensor, return_inverse: bool = False, + return_counts: bool = False, + dim: Optional[int] = None) -> _unique_impl_out: + r"""Eliminates all but the first element from every consecutive group of equivalent elements. + + .. note:: This function is different from :func:`torch.unique` in the sense that this function + only eliminates consecutive duplicate values. This semantics is similar to `std::unique` + in C++. + + Args: + input (Tensor): the input tensor + return_inverse (bool): Whether to also return the indices for where + elements in the original input ended up in the returned unique list. + return_counts (bool): Whether to also return the counts for each unique + element. + dim (int): the dimension to apply unique. If ``None``, the unique of the + flattened input is returned. default: ``None`` + + Returns: + (Tensor, Tensor (optional), Tensor (optional)): A tensor or a tuple of tensors containing + + - **output** (*Tensor*): the output list of unique scalar elements. + - **inverse_indices** (*Tensor*): (optional) if + :attr:`return_inverse` is True, there will be an additional + returned tensor (same shape as input) representing the indices + for where elements in the original input map to in the output; + otherwise, this function will only return a single tensor. + - **counts** (*Tensor*): (optional) if + :attr:`return_counts` is True, there will be an additional + returned tensor (same shape as output or output.size(dim), + if dim was specified) representing the number of occurrences + for each unique value or tensor. + + Example:: + + >>> x = torch.tensor([1, 1, 2, 2, 3, 1, 1, 2]) + >>> output = torch.unique_consecutive(x) + >>> output + tensor([1, 2, 3, 1, 2]) + + >>> output, inverse_indices = torch.unique_consecutive(x, return_inverse=True) + >>> output + tensor([1, 2, 3, 1, 2]) + >>> inverse_indices + tensor([0, 0, 1, 1, 2, 3, 3, 4]) + + >>> output, counts = torch.unique_consecutive(x, return_counts=True) + >>> output + tensor([1, 2, 3, 1, 2]) + >>> counts + tensor([2, 2, 1, 2, 1]) + """ + if has_torch_function_unary(input): + return handle_torch_function( + unique_consecutive, (input,), input, return_inverse=return_inverse, + return_counts=return_counts, dim=dim) + output, inverse_indices, counts = _VF.unique_consecutive( # type: ignore[attr-defined] + input, return_inverse=return_inverse, return_counts=return_counts, dim=dim) + return output, inverse_indices, counts + + +def _return_counts(input, sorted=True, return_inverse=False, return_counts=False, dim=None): + # type: (Tensor, bool, bool, bool, Optional[int]) -> Tuple[Tensor, Tensor] + + if has_torch_function_unary(input): + return _unique_impl(input, sorted, return_inverse, return_counts, dim) + + output, _, counts = _unique_impl(input, sorted, return_inverse, return_counts, dim) + return output, counts + + +def _return_output(input, sorted=True, return_inverse=False, return_counts=False, dim=None): + # type: (Tensor, bool, bool, bool, Optional[int]) -> Tensor + + if has_torch_function_unary(input): + return _unique_impl(input, sorted, return_inverse, return_counts, dim) + + output, _, _ = _unique_impl(input, sorted, return_inverse, return_counts, dim) + return output + + +def _return_inverse(input, sorted=True, return_inverse=False, return_counts=False, dim=None): + # type: (Tensor, bool, bool, bool, Optional[int]) -> Tuple[Tensor, Tensor] + + if has_torch_function_unary(input): + return _unique_impl(input, sorted, return_inverse, return_counts, dim) + + output, inverse_indices, _ = _unique_impl(input, sorted, return_inverse, return_counts, dim) + return output, inverse_indices + + +_return_inverse_false = boolean_dispatch( + arg_name='return_counts', + arg_index=3, + default=False, + if_true=_return_counts, + if_false=_return_output, + module_name=__name__, + func_name='unique') + +_return_inverse_true = boolean_dispatch( + arg_name='return_counts', + arg_index=3, + default=False, + if_true=_unique_impl, + if_false=_return_inverse, + module_name=__name__, + func_name='unique') + +# The return type of unique depends on `return_inverse`, and `return_counts` so in order to +# resolve the output type in TorchScript we need to statically know the value of both parameters + +unique = boolean_dispatch( + arg_name='return_inverse', + arg_index=2, + default=False, + if_true=_return_inverse_true, + if_false=_return_inverse_false, + module_name=__name__, + func_name='unique') +unique.__doc__ = _unique_impl.__doc__ + + +def _consecutive_return_counts(input, return_inverse=False, return_counts=False, dim=None): + # type: (Tensor, bool, bool, Optional[int]) -> Tuple[Tensor, Tensor] + + if has_torch_function_unary(input): + return _unique_consecutive_impl(input, return_inverse, return_counts, dim) + + output, _, counts = _unique_consecutive_impl(input, return_inverse, return_counts, dim) + return output, counts + + +def _consecutive_return_output(input, return_inverse=False, return_counts=False, dim=None): + # type: (Tensor, bool, bool, Optional[int]) -> Tensor + + if has_torch_function_unary(input): + return _unique_consecutive_impl(input, return_inverse, return_counts, dim) + + output, _, _ = _unique_consecutive_impl(input, return_inverse, return_counts, dim) + return output + + +def _consecutive_return_inverse(input, return_inverse=False, return_counts=False, dim=None): + # type: (Tensor, bool, bool, Optional[int]) -> Tuple[Tensor, Tensor] + + if has_torch_function_unary(input): + return _unique_consecutive_impl(input, return_inverse, return_counts, dim) + + output, inverse_indices, _ = _unique_consecutive_impl(input, return_inverse, return_counts, dim) + return output, inverse_indices + + +_consecutive_return_inverse_false = boolean_dispatch( + arg_name='return_counts', + arg_index=1, + default=False, + if_true=_consecutive_return_counts, + if_false=_consecutive_return_output, + module_name=__name__, + func_name='unique_consecutive') + +_consecutive_return_inverse_true = boolean_dispatch( + arg_name='return_counts', + arg_index=1, + default=False, + if_true=_unique_consecutive_impl, + if_false=_consecutive_return_inverse, + module_name=__name__, + func_name='unique_consecutive') + +# The return type of unique depends on `return_inverse`, and `return_counts` so in order to +# resolve the output type in TorchScript we need to statically know the value of both parameters + +unique_consecutive = boolean_dispatch( + arg_name='return_inverse', + arg_index=2, + default=False, + if_true=_consecutive_return_inverse_true, + if_false=_consecutive_return_inverse_false, + module_name=__name__, + func_name='unique_consecutive') +unique_consecutive.__doc__ = _unique_consecutive_impl.__doc__ + +if TYPE_CHECKING: + pass + # There's no good way to use this type annotation without breaking JIT + # overloads. So leave untyped for mypy for now. +else: + @overload + def tensordot(a, b, dims: int = 2, out: Optional[torch.Tensor] = None): + pass + + @overload # noqa: F811 + def tensordot(a, b, dims: Tuple[List[int], List[int]], out: Optional[torch.Tensor] = None): # noqa: F811 + pass + + @overload # noqa: F811 + def tensordot(a, b, dims: List[List[int]], out: Optional[torch.Tensor] = None): # noqa: F811 + pass + + @overload # noqa: F811 + def tensordot(a, b, dims: torch.Tensor, out: Optional[torch.Tensor] = None): # noqa: F811 + pass + + +def tensordot(a, b, dims=2, out: Optional[torch.Tensor] = None): # noqa: F811 + r"""Returns a contraction of a and b over multiple dimensions. + + :attr:`tensordot` implements a generalized matrix product. + + Args: + a (Tensor): Left tensor to contract + b (Tensor): Right tensor to contract + dims (int or Tuple[List[int], List[int]] or List[List[int]] containing two lists or Tensor): number of dimensions to + contract or explicit lists of dimensions for :attr:`a` and + :attr:`b` respectively + + When called with a non-negative integer argument :attr:`dims` = :math:`d`, and + the number of dimensions of :attr:`a` and :attr:`b` is :math:`m` and :math:`n`, + respectively, :func:`~torch.tensordot` computes + + .. math:: + r_{i_0,...,i_{m-d}, i_d,...,i_n} + = \sum_{k_0,...,k_{d-1}} a_{i_0,...,i_{m-d},k_0,...,k_{d-1}} \times b_{k_0,...,k_{d-1}, i_d,...,i_n}. + + When called with :attr:`dims` of the list form, the given dimensions will be contracted + in place of the last :math:`d` of :attr:`a` and the first :math:`d` of :math:`b`. The sizes + in these dimensions must match, but :func:`~torch.tensordot` will deal with broadcasted + dimensions. + + Examples:: + + >>> a = torch.arange(60.).reshape(3, 4, 5) + >>> b = torch.arange(24.).reshape(4, 3, 2) + >>> torch.tensordot(a, b, dims=([1, 0], [0, 1])) + tensor([[4400., 4730.], + [4532., 4874.], + [4664., 5018.], + [4796., 5162.], + [4928., 5306.]]) + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA) + >>> a = torch.randn(3, 4, 5, device='cuda') + >>> b = torch.randn(4, 5, 6, device='cuda') + >>> c = torch.tensordot(a, b, dims=2).cpu() + tensor([[ 8.3504, -2.5436, 6.2922, 2.7556, -1.0732, 3.2741], + [ 3.3161, 0.0704, 5.0187, -0.4079, -4.3126, 4.8744], + [ 0.8223, 3.9445, 3.2168, -0.2400, 3.4117, 1.7780]]) + + >>> a = torch.randn(3, 5, 4, 6) + >>> b = torch.randn(6, 4, 5, 3) + >>> torch.tensordot(a, b, dims=([2, 1, 3], [1, 2, 0])) + tensor([[ 7.7193, -2.4867, -10.3204], + [ 1.5513, -14.4737, -6.5113], + [ -0.2850, 4.2573, -3.5997]]) + """ + if has_torch_function_variadic(a, b): + return handle_torch_function(tensordot, (a, b), a, b, dims=dims, out=out) + + if not isinstance(dims, (tuple, list, torch.Tensor, int, torch.SymInt)): + raise RuntimeError("tensordot expects dims to be int or " + + "Tuple[List[int], List[int]] or " + + "List[List[int]] containing two lists, but got " + + f"dims={dims}") + + dims_a: List[int] = [] + dims_b: List[int] = [] + + if isinstance(dims, (tuple, list)): + dims_a, dims_b = dims + + if isinstance(dims, torch.Tensor): + num_elements = dims.numel() + if num_elements > 1: + assert dims.size()[0] == 2 + dims_a = torch.jit.annotate(List[int], dims[0].tolist()) + dims_b = torch.jit.annotate(List[int], dims[1].tolist()) + else: + dims_val = int(dims.item()) + if dims_val < 0: + raise RuntimeError(f"tensordot expects dims >= 0, but got dims={dims}") + dims_a = list(range(-dims_val, 0)) + dims_b = list(range(dims_val)) + + if isinstance(dims, (int, torch.SymInt)): + if dims < 0: + raise RuntimeError(f"tensordot expects dims >= 0, but got dims={dims}") + if dims > min(a.dim(), b.dim()): + raise RuntimeError(f"tensordot expects dims < ndim_a or ndim_b, but got dims={dims}") + dims_a = list(range(-dims, 0)) + dims_b = list(range(dims)) + + if out is None: + return _VF.tensordot(a, b, dims_a, dims_b) # type: ignore[attr-defined] + else: + return _VF.tensordot(a, b, dims_a, dims_b, out=out) # type: ignore[attr-defined] + + +def cartesian_prod(*tensors: Tensor) -> Tensor: + """Do cartesian product of the given sequence of tensors. The behavior is similar to + python's `itertools.product`. + + Args: + *tensors: any number of 1 dimensional tensors. + + Returns: + Tensor: A tensor equivalent to converting all the input tensors into lists, + do `itertools.product` on these lists, and finally convert the resulting list + into tensor. + + Example:: + + >>> import itertools + >>> a = [1, 2, 3] + >>> b = [4, 5] + >>> list(itertools.product(a, b)) + [(1, 4), (1, 5), (2, 4), (2, 5), (3, 4), (3, 5)] + >>> tensor_a = torch.tensor(a) + >>> tensor_b = torch.tensor(b) + >>> torch.cartesian_prod(tensor_a, tensor_b) + tensor([[1, 4], + [1, 5], + [2, 4], + [2, 5], + [3, 4], + [3, 5]]) + """ + # This wrapper exists to support variadic args. + if has_torch_function(tensors): + return handle_torch_function(cartesian_prod, tensors, *tensors) + return _VF.cartesian_prod(tensors) # type: ignore[attr-defined] + + +def block_diag(*tensors): + """Create a block diagonal matrix from provided tensors. + + Args: + *tensors: One or more tensors with 0, 1, or 2 dimensions. + + Returns: + Tensor: A 2 dimensional tensor with all the input tensors arranged in + order such that their upper left and lower right corners are + diagonally adjacent. All other elements are set to 0. + + Example:: + + >>> import torch + >>> A = torch.tensor([[0, 1], [1, 0]]) + >>> B = torch.tensor([[3, 4, 5], [6, 7, 8]]) + >>> C = torch.tensor(7) + >>> D = torch.tensor([1, 2, 3]) + >>> E = torch.tensor([[4], [5], [6]]) + >>> torch.block_diag(A, B, C, D, E) + tensor([[0, 1, 0, 0, 0, 0, 0, 0, 0, 0], + [1, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 3, 4, 5, 0, 0, 0, 0, 0], + [0, 0, 6, 7, 8, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 7, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 2, 3, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 4], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 5], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 6]]) + """ + # This wrapper exists to support variadic args. + if has_torch_function(tensors): + return handle_torch_function(block_diag, tensors, *tensors) + return torch._C._VariableFunctions.block_diag(tensors) # type: ignore[attr-defined] + + +def cdist(x1, x2, p=2., compute_mode='use_mm_for_euclid_dist_if_necessary'): + # type: (Tensor, Tensor, float, str) -> (Tensor) + r"""Computes batched the p-norm distance between each pair of the two collections of row vectors. + + Args: + x1 (Tensor): input tensor of shape :math:`B \times P \times M`. + x2 (Tensor): input tensor of shape :math:`B \times R \times M`. + p: p value for the p-norm distance to calculate between each vector pair + :math:`\in [0, \infty]`. + compute_mode: + 'use_mm_for_euclid_dist_if_necessary' - will use matrix multiplication approach to calculate + euclidean distance (p = 2) if P > 25 or R > 25 + 'use_mm_for_euclid_dist' - will always use matrix multiplication approach to calculate + euclidean distance (p = 2) + 'donot_use_mm_for_euclid_dist' - will never use matrix multiplication approach to calculate + euclidean distance (p = 2) + Default: use_mm_for_euclid_dist_if_necessary. + + If x1 has shape :math:`B \times P \times M` and x2 has shape :math:`B \times R \times M` then the + output will have shape :math:`B \times P \times R`. + + This function is equivalent to `scipy.spatial.distance.cdist(input,'minkowski', p=p)` + if :math:`p \in (0, \infty)`. When :math:`p = 0` it is equivalent to + `scipy.spatial.distance.cdist(input, 'hamming') * M`. When :math:`p = \infty`, the closest + scipy function is `scipy.spatial.distance.cdist(xn, lambda x, y: np.abs(x - y).max())`. + + Example: + + >>> a = torch.tensor([[0.9041, 0.0196], [-0.3108, -2.4423], [-0.4821, 1.059]]) + >>> a + tensor([[ 0.9041, 0.0196], + [-0.3108, -2.4423], + [-0.4821, 1.0590]]) + >>> b = torch.tensor([[-2.1763, -0.4713], [-0.6986, 1.3702]]) + >>> b + tensor([[-2.1763, -0.4713], + [-0.6986, 1.3702]]) + >>> torch.cdist(a, b, p=2) + tensor([[3.1193, 2.0959], + [2.7138, 3.8322], + [2.2830, 0.3791]]) + """ + if has_torch_function_variadic(x1, x2): + return handle_torch_function( + cdist, (x1, x2), x1, x2, p=p, compute_mode=compute_mode) + if compute_mode == 'use_mm_for_euclid_dist_if_necessary': + return _VF.cdist(x1, x2, p, None) # type: ignore[attr-defined] + elif compute_mode == 'use_mm_for_euclid_dist': + return _VF.cdist(x1, x2, p, 1) # type: ignore[attr-defined] + elif compute_mode == 'donot_use_mm_for_euclid_dist': + return _VF.cdist(x1, x2, p, 2) # type: ignore[attr-defined] + else: + raise ValueError(f"{compute_mode} is not a valid value for compute_mode") + + +def atleast_1d(*tensors): + r""" + Returns a 1-dimensional view of each input tensor with zero dimensions. + Input tensors with one or more dimensions are returned as-is. + + Args: + input (Tensor or list of Tensors) + + Returns: + output (Tensor or tuple of Tensors) + + Example:: + + >>> x = torch.arange(2) + >>> x + tensor([0, 1]) + >>> torch.atleast_1d(x) + tensor([0, 1]) + >>> x = torch.tensor(1.) + >>> x + tensor(1.) + >>> torch.atleast_1d(x) + tensor([1.]) + >>> x = torch.tensor(0.5) + >>> y = torch.tensor(1.) + >>> torch.atleast_1d((x, y)) + (tensor([0.5000]), tensor([1.])) + """ + # This wrapper exists to support variadic args. + if has_torch_function(tensors): + return handle_torch_function(atleast_1d, tensors, *tensors) + if len(tensors) == 1: + tensors = tensors[0] + return _VF.atleast_1d(tensors) # type: ignore[attr-defined] + + +def atleast_2d(*tensors): + r""" + Returns a 2-dimensional view of each input tensor with zero dimensions. + Input tensors with two or more dimensions are returned as-is. + + Args: + input (Tensor or list of Tensors) + + Returns: + output (Tensor or tuple of Tensors) + + Example:: + + >>> x = torch.tensor(1.) + >>> x + tensor(1.) + >>> torch.atleast_2d(x) + tensor([[1.]]) + >>> x = torch.arange(4).view(2, 2) + >>> x + tensor([[0, 1], + [2, 3]]) + >>> torch.atleast_2d(x) + tensor([[0, 1], + [2, 3]]) + >>> x = torch.tensor(0.5) + >>> y = torch.tensor(1.) + >>> torch.atleast_2d((x, y)) + (tensor([[0.5000]]), tensor([[1.]])) + """ + # This wrapper exists to support variadic args. + if has_torch_function(tensors): + return handle_torch_function(atleast_2d, tensors, *tensors) + if len(tensors) == 1: + tensors = tensors[0] + return _VF.atleast_2d(tensors) # type: ignore[attr-defined] + + +def atleast_3d(*tensors): + r""" + Returns a 3-dimensional view of each input tensor with zero dimensions. + Input tensors with three or more dimensions are returned as-is. + + Args: + input (Tensor or list of Tensors) + + Returns: + output (Tensor or tuple of Tensors) + + Example: + + >>> x = torch.tensor(0.5) + >>> x + tensor(0.5000) + >>> torch.atleast_3d(x) + tensor([[[0.5000]]]) + >>> y = torch.arange(4).view(2, 2) + >>> y + tensor([[0, 1], + [2, 3]]) + >>> torch.atleast_3d(y) + tensor([[[0], + [1]], + + [[2], + [3]]]) + >>> x = torch.tensor(1).view(1, 1, 1) + >>> x + tensor([[[1]]]) + >>> torch.atleast_3d(x) + tensor([[[1]]]) + >>> x = torch.tensor(0.5) + >>> y = torch.tensor(1.) + >>> torch.atleast_3d((x, y)) + (tensor([[[0.5000]]]), tensor([[[1.]]])) + """ + # This wrapper exists to support variadic args. + if has_torch_function(tensors): + return handle_torch_function(atleast_3d, tensors, *tensors) + if len(tensors) == 1: + tensors = tensors[0] + return _VF.atleast_3d(tensors) # type: ignore[attr-defined] + + +if TYPE_CHECKING: + pass + # There's no good way to use this type annotation; cannot rename norm() to + # _norm_impl() in a way that doesn't break JIT overloads. So leave untyped + # for mypy for now. + # def norm(input: Tensor, + # p: Optional[Union[str, Number]] = "fro", + # dim: Optional[Union[int, List[int]]] = None, + # keepdim: bool = False, + # out: Optional[Tensor] = None, + # dtype: _dtype = None) -> Tensor: + # return _norm_impl(input, p, dim, keepdim, out, dtype) +else: + # TODO: type dim as BroadcastingList when + # https://github.com/pytorch/pytorch/issues/33782 is fixed + @overload + def norm(input, p="fro", dim=None, keepdim=False, out=None, dtype=None): + # type: (Tensor, str, Optional[List[int]], bool, Optional[Tensor], Optional[int]) -> Tensor + pass + + @overload # noqa: F811 + def norm(input, p="fro", dim=None, keepdim=False, out=None, dtype=None): # noqa: F811 + # type: (Tensor, Optional[number], Optional[List[int]], bool, Optional[Tensor], Optional[int]) -> Tensor + pass + + @overload # noqa: F811 + def norm(input, p="fro", dim=None, keepdim=False, out=None, dtype=None): # noqa: F811 + # type: (Tensor, Optional[number], Optional[int], bool, Optional[Tensor], Optional[int]) -> Tensor + pass + + @overload # noqa: F811 + def norm(input, p="fro", dim=None, keepdim=False, out=None, dtype=None): # noqa: F811 + # type: (Tensor, str, Optional[int], bool, Optional[Tensor], Optional[int]) -> Tensor + pass + + +def norm(input, p: Optional[Union[float, str]] = "fro", dim=None, keepdim=False, out=None, dtype=None): # noqa: F811 + r"""Returns the matrix norm or vector norm of a given tensor. + + .. warning:: + + torch.norm is deprecated and may be removed in a future PyTorch release. + Its documentation and behavior may be incorrect, and it is no longer + actively maintained. + + Use :func:`torch.linalg.vector_norm` when computing vector norms and + :func:`torch.linalg.matrix_norm` when computing matrix norms. + For a function with a similar behavior as this one see :func:`torch.linalg.norm`. + Note, however, the signature for these functions is slightly different than the + signature for ``torch.norm``. + + Args: + input (Tensor): The input tensor. Its data type must be either a floating + point or complex type. For complex inputs, the norm is calculated using the + absolute value of each element. If the input is complex and neither + :attr:`dtype` nor :attr:`out` is specified, the result's data type will + be the corresponding floating point type (e.g. float if :attr:`input` is + complexfloat). + + p (int, float, inf, -inf, 'fro', 'nuc', optional): the order of norm. Default: ``'fro'`` + The following norms can be calculated: + + ====== ============== ========================== + ord matrix norm vector norm + ====== ============== ========================== + 'fro' Frobenius norm -- + 'nuc' nuclear norm -- + Number -- sum(abs(x)**ord)**(1./ord) + ====== ============== ========================== + + The vector norm can be calculated across any number of dimensions. + The corresponding dimensions of :attr:`input` are flattened into + one dimension, and the norm is calculated on the flattened + dimension. + + Frobenius norm produces the same result as ``p=2`` in all cases + except when :attr:`dim` is a list of three or more dims, in which + case Frobenius norm throws an error. + + Nuclear norm can only be calculated across exactly two dimensions. + + dim (int, tuple of ints, list of ints, optional): + Specifies which dimension or dimensions of :attr:`input` to + calculate the norm across. If :attr:`dim` is ``None``, the norm will + be calculated across all dimensions of :attr:`input`. If the norm + type indicated by :attr:`p` does not support the specified number of + dimensions, an error will occur. + keepdim (bool, optional): whether the output tensors have :attr:`dim` + retained or not. Ignored if :attr:`dim` = ``None`` and + :attr:`out` = ``None``. Default: ``False`` + out (Tensor, optional): the output tensor. Ignored if + :attr:`dim` = ``None`` and :attr:`out` = ``None``. + dtype (:class:`torch.dtype`, optional): the desired data type of + returned tensor. If specified, the input tensor is casted to + :attr:`dtype` while performing the operation. Default: None. + + .. note:: + Even though ``p='fro'`` supports any number of dimensions, the true + mathematical definition of Frobenius norm only applies to tensors with + exactly two dimensions. :func:`torch.linalg.matrix_norm` with ``ord='fro'`` + aligns with the mathematical definition, since it can only be applied across + exactly two dimensions. + + Example:: + + >>> import torch + >>> a = torch.arange(9, dtype= torch.float) - 4 + >>> b = a.reshape((3, 3)) + >>> torch.norm(a) + tensor(7.7460) + >>> torch.norm(b) + tensor(7.7460) + >>> torch.norm(a, float('inf')) + tensor(4.) + >>> torch.norm(b, float('inf')) + tensor(4.) + >>> c = torch.tensor([[ 1, 2, 3], [-1, 1, 4]] , dtype=torch.float) + >>> torch.norm(c, dim=0) + tensor([1.4142, 2.2361, 5.0000]) + >>> torch.norm(c, dim=1) + tensor([3.7417, 4.2426]) + >>> torch.norm(c, p=1, dim=1) + tensor([6., 6.]) + >>> d = torch.arange(8, dtype=torch.float).reshape(2, 2, 2) + >>> torch.norm(d, dim=(1, 2)) + tensor([ 3.7417, 11.2250]) + >>> torch.norm(d[0, :, :]), torch.norm(d[1, :, :]) + (tensor(3.7417), tensor(11.2250)) + """ + + if has_torch_function_unary(input): + return handle_torch_function( + norm, (input,), input, p=p, dim=dim, keepdim=keepdim, out=out, dtype=dtype) + + # NB. All the repeated code and weird python is to please TorchScript. + # For a more compact implementation see the relevant function in `_refs/__init__.py` + + # We don't do this for MPS or sparse tensors + if input.layout == torch.strided and input.device.type in \ + ("cpu", "cuda", "meta", torch.utils.backend_registration._privateuse1_backend_name): + if dim is not None: + if isinstance(dim, (int, torch.SymInt)): + _dim = [dim] + else: + _dim = dim + else: + _dim = None # type: ignore[assignment] + + if isinstance(p, str): + if p == "fro" and (dim is None or isinstance(dim, (int, torch.SymInt)) or len(dim) <= 2): + if out is None: + return torch.linalg.vector_norm(input, 2, _dim, keepdim, dtype=dtype) + else: + return torch.linalg.vector_norm(input, 2, _dim, keepdim, dtype=dtype, out=out) + + # Here we either call the nuclear norm, or we call matrix_norm with some arguments + # that will throw an error + if _dim is None: + _dim = list(range(input.ndim)) + if out is None: + return torch.linalg.matrix_norm(input, p, _dim, keepdim, dtype=dtype) + else: + return torch.linalg.matrix_norm(input, p, _dim, keepdim, dtype=dtype, out=out) + else: + # NB. p should be Union[str, number], not Optional! + _p = 2.0 if p is None else p + if out is None: + return torch.linalg.vector_norm(input, _p, _dim, keepdim, dtype=dtype) + else: + return torch.linalg.vector_norm(input, _p, _dim, keepdim, dtype=dtype, out=out) + + ndim = input.dim() + + # catch default case + if dim is None and out is None and dtype is None and p is not None: + if isinstance(p, str): + if p == "fro": + return _VF.frobenius_norm(input, dim=(), keepdim=keepdim) + if not isinstance(p, str): + _dim = [i for i in range(ndim)] # noqa: C416 TODO: rewrite as list(range(m)) + return _VF.norm(input, p, dim=_dim, keepdim=keepdim) # type: ignore[attr-defined] + + # TODO: when https://github.com/pytorch/pytorch/issues/33782 is fixed + # remove the overloads where dim is an int and replace with BraodcastingList1 + # and remove next four lines, replace _dim with dim + if dim is not None: + if isinstance(dim, (int, torch.SymInt)): + _dim = [dim] + else: + _dim = dim + else: + _dim = None # type: ignore[assignment] + + if isinstance(p, str): + if p == "fro": + if dtype is not None: + raise ValueError("dtype argument is not supported in frobenius norm") + + if _dim is None: + _dim = list(range(ndim)) + if out is None: + return _VF.frobenius_norm(input, _dim, keepdim=keepdim) # type: ignore[arg-type] + else: + return _VF.frobenius_norm(input, _dim, keepdim=keepdim, out=out) # type: ignore[arg-type] + elif p == "nuc": + if dtype is not None: + raise ValueError("dtype argument is not supported in nuclear norm") + if _dim is None: + if out is None: + return _VF.nuclear_norm(input, keepdim=keepdim) # type: ignore[arg-type] + else: + return _VF.nuclear_norm(input, keepdim=keepdim, out=out) # type: ignore[arg-type] + else: + if out is None: + return _VF.nuclear_norm(input, _dim, keepdim=keepdim) # type: ignore[arg-type] + else: + return _VF.nuclear_norm(input, _dim, keepdim=keepdim, out=out) # type: ignore[arg-type] + raise RuntimeError(f"only valid string values are 'fro' and 'nuc', found {p}") + else: + if _dim is None: + _dim = list(range(ndim)) + + if out is None: + if dtype is None: + return _VF.norm(input, p, _dim, keepdim=keepdim) # type: ignore[attr-defined] + else: + return _VF.norm(input, p, _dim, keepdim=keepdim, dtype=dtype) # type: ignore[attr-defined] + else: + if dtype is None: + return _VF.norm(input, p, _dim, keepdim=keepdim, out=out) # type: ignore[attr-defined] + else: + return _VF.norm(input, p, _dim, keepdim=keepdim, dtype=dtype, out=out) # type: ignore[attr-defined] + +def unravel_index(indices: Tensor, shape: Union[int, Sequence[int], torch.Size]) -> List[Tensor]: + r"""Converts a tensor of flat indices into a tuple of coordinate tensors that + index into an arbitrary tensor of the specified shape. + + Args: + indices (Tensor): An integer tensor containing indices into the + flattened version of an arbitrary tensor of shape :attr:`shape`. + All elements must be in the range ``[0, prod(shape) - 1]``. + + shape (int, sequence of ints, or torch.Size): The shape of the arbitrary + tensor. All elements must be non-negative. + + Returns: + tuple of Tensors: Each ``i``-th tensor in the ouput corresponds with + dimension ``i`` of :attr:`shape`. Each tensor has the same shape as + ``indices`` and contains one index into dimension ``i`` for each of the + flat indices given by ``indices``. + + Example:: + + >>> import torch + >>> torch.unravel_index(torch.tensor(4), (3, 2)) + (tensor(2), + tensor(0)) + + >>> torch.unravel_index(torch.tensor([4, 1]), (3, 2)) + (tensor([2, 0]), + tensor([0, 1])) + + >>> torch.unravel_index(torch.tensor([0, 1, 2, 3, 4, 5]), (3, 2)) + (tensor([0, 0, 1, 1, 2, 2]), + tensor([0, 1, 0, 1, 0, 1])) + + >>> torch.unravel_index(torch.tensor([1234, 5678]), (10, 10, 10, 10)) + (tensor([1, 5]), + tensor([2, 6]), + tensor([3, 7]), + tensor([4, 8])) + + >>> torch.unravel_index(torch.tensor([[1234], [5678]]), (10, 10, 10, 10)) + (tensor([[1], [5]]), + tensor([[2], [6]]), + tensor([[3], [7]]), + tensor([[4], [8]])) + + >>> torch.unravel_index(torch.tensor([[1234], [5678]]), (100, 100)) + (tensor([[12], [56]]), + tensor([[34], [78]])) + """ + if has_torch_function_unary(indices): + return handle_torch_function( + unravel_index, (indices,), indices, shape=shape) + res_tensor = _unravel_index(indices, shape) + return res_tensor.unbind(-1) + +def _unravel_index(indices: Tensor, shape: Union[int, Sequence[int]]) -> Tensor: + torch._check_type( + not indices.is_complex() and not indices.is_floating_point() and not indices.dtype == torch.bool, + lambda: f"expected 'indices' to be integer dtype, but got {indices.dtype}") + + torch._check_type( + isinstance(shape, (int, torch.SymInt, Sequence)), + lambda: f"expected 'shape' to be int or sequence of ints, but got {type(shape)}") + + if isinstance(shape, (int, torch.SymInt)): + shape = torch.Size([shape]) + else: + for dim in shape: + torch._check_type( + isinstance(dim, (int, torch.SymInt)), + lambda: f"expected 'shape' sequence to only contain ints, but got {type(dim)}") + shape = torch.Size(shape) + + torch._check_value( + all(dim >= 0 for dim in shape), + lambda: f"'shape' cannot have negative values, but got {tuple(shape)}") + + coefs = list(reversed(list(itertools.accumulate(reversed(shape[1:] + torch.Size([1])), func=operator.mul)))) + return indices.unsqueeze(-1).floor_divide( + torch.tensor(coefs, device=indices.device, dtype=torch.int64) + ) % torch.tensor(shape, device=indices.device, dtype=torch.int64) + +def chain_matmul(*matrices, out=None): + r"""Returns the matrix product of the :math:`N` 2-D tensors. This product is efficiently computed + using the matrix chain order algorithm which selects the order in which incurs the lowest cost in terms + of arithmetic operations (`[CLRS]`_). Note that since this is a function to compute the product, :math:`N` + needs to be greater than or equal to 2; if equal to 2 then a trivial matrix-matrix product is returned. + If :math:`N` is 1, then this is a no-op - the original matrix is returned as is. + + .. warning:: + + :func:`torch.chain_matmul` is deprecated and will be removed in a future PyTorch release. + Use :func:`torch.linalg.multi_dot` instead, which accepts a list of two or more tensors + rather than multiple arguments. + + Args: + matrices (Tensors...): a sequence of 2 or more 2-D tensors whose product is to be determined. + out (Tensor, optional): the output tensor. Ignored if :attr:`out` = ``None``. + + Returns: + Tensor: if the :math:`i^{th}` tensor was of dimensions :math:`p_{i} \times p_{i + 1}`, then the product + would be of dimensions :math:`p_{1} \times p_{N + 1}`. + + Example:: + + >>> # xdoctest: +SKIP + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> a = torch.randn(3, 4) + >>> b = torch.randn(4, 5) + >>> c = torch.randn(5, 6) + >>> d = torch.randn(6, 7) + >>> # will raise a deprecation warning + >>> torch.chain_matmul(a, b, c, d) + tensor([[ -2.3375, -3.9790, -4.1119, -6.6577, 9.5609, -11.5095, -3.2614], + [ 21.4038, 3.3378, -8.4982, -5.2457, -10.2561, -2.4684, 2.7163], + [ -0.9647, -5.8917, -2.3213, -5.2284, 12.8615, -12.2816, -2.5095]]) + + .. _`[CLRS]`: https://mitpress.mit.edu/books/introduction-algorithms-third-edition + """ + # This wrapper exists to support variadic args. + if has_torch_function(matrices): + return handle_torch_function(chain_matmul, matrices, *matrices) + + if out is None: + return _VF.chain_matmul(matrices) # type: ignore[attr-defined] + else: + return _VF.chain_matmul(matrices, out=out) # type: ignore[attr-defined] + + +def _lu_impl(A, pivot=True, get_infos=False, out=None): + # type: (Tensor, bool, bool, Any) -> Tuple[Tensor, Tensor, Tensor] + r"""Computes the LU factorization of a matrix or batches of matrices + :attr:`A`. Returns a tuple containing the LU factorization and + pivots of :attr:`A`. Pivoting is done if :attr:`pivot` is set to + ``True``. + + .. warning:: + + :func:`torch.lu` is deprecated in favor of :func:`torch.linalg.lu_factor` + and :func:`torch.linalg.lu_factor_ex`. :func:`torch.lu` will be removed in a + future PyTorch release. + ``LU, pivots, info = torch.lu(A, compute_pivots)`` should be replaced with + + .. code:: python + + LU, pivots = torch.linalg.lu_factor(A, compute_pivots) + + ``LU, pivots, info = torch.lu(A, compute_pivots, get_infos=True)`` should be replaced with + + .. code:: python + + LU, pivots, info = torch.linalg.lu_factor_ex(A, compute_pivots) + + .. note:: + * The returned permutation matrix for every matrix in the batch is + represented by a 1-indexed vector of size ``min(A.shape[-2], A.shape[-1])``. + ``pivots[i] == j`` represents that in the ``i``-th step of the algorithm, + the ``i``-th row was permuted with the ``j-1``-th row. + * LU factorization with :attr:`pivot` = ``False`` is not available + for CPU, and attempting to do so will throw an error. However, + LU factorization with :attr:`pivot` = ``False`` is available for + CUDA. + * This function does not check if the factorization was successful + or not if :attr:`get_infos` is ``True`` since the status of the + factorization is present in the third element of the return tuple. + * In the case of batches of square matrices with size less or equal + to 32 on a CUDA device, the LU factorization is repeated for + singular matrices due to the bug in the MAGMA library + (see magma issue 13). + * ``L``, ``U``, and ``P`` can be derived using :func:`torch.lu_unpack`. + + .. warning:: + The gradients of this function will only be finite when :attr:`A` is full rank. + This is because the LU decomposition is just differentiable at full rank matrices. + Furthermore, if :attr:`A` is close to not being full rank, + the gradient will be numerically unstable as it depends on the computation of :math:`L^{-1}` and :math:`U^{-1}`. + + Args: + A (Tensor): the tensor to factor of size :math:`(*, m, n)` + pivot (bool, optional): controls whether pivoting is done. Default: ``True`` + get_infos (bool, optional): if set to ``True``, returns an info IntTensor. + Default: ``False`` + out (tuple, optional): optional output tuple. If :attr:`get_infos` is ``True``, + then the elements in the tuple are Tensor, IntTensor, + and IntTensor. If :attr:`get_infos` is ``False``, then the + elements in the tuple are Tensor, IntTensor. Default: ``None`` + + Returns: + (Tensor, IntTensor, IntTensor (optional)): A tuple of tensors containing + + - **factorization** (*Tensor*): the factorization of size :math:`(*, m, n)` + + - **pivots** (*IntTensor*): the pivots of size :math:`(*, \text{min}(m, n))`. + ``pivots`` stores all the intermediate transpositions of rows. + The final permutation ``perm`` could be reconstructed by + applying ``swap(perm[i], perm[pivots[i] - 1])`` for ``i = 0, ..., pivots.size(-1) - 1``, + where ``perm`` is initially the identity permutation of :math:`m` elements + (essentially this is what :func:`torch.lu_unpack` is doing). + + - **infos** (*IntTensor*, *optional*): if :attr:`get_infos` is ``True``, this is a tensor of + size :math:`(*)` where non-zero values indicate whether factorization for the matrix or + each minibatch has succeeded or failed + + Example:: + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_LAPACK) + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> A = torch.randn(2, 3, 3) + >>> A_LU, pivots = torch.lu(A) + >>> A_LU + tensor([[[ 1.3506, 2.5558, -0.0816], + [ 0.1684, 1.1551, 0.1940], + [ 0.1193, 0.6189, -0.5497]], + + [[ 0.4526, 1.2526, -0.3285], + [-0.7988, 0.7175, -0.9701], + [ 0.2634, -0.9255, -0.3459]]]) + >>> pivots + tensor([[ 3, 3, 3], + [ 3, 3, 3]], dtype=torch.int32) + >>> A_LU, pivots, info = torch.lu(A, get_infos=True) + >>> if info.nonzero().size(0) == 0: + ... print('LU factorization succeeded for all samples!') + LU factorization succeeded for all samples! + """ + # If get_infos is True, then we don't need to check for errors and vice versa + return torch._lu_with_info(A, pivot=pivot, check_errors=(not get_infos)) + +if TYPE_CHECKING: + _ListOrSeq = Sequence[Tensor] +else: + _ListOrSeq = List[Tensor] + + +def _check_list_size(out_len: int, get_infos: bool, out: _ListOrSeq) -> None: + get_infos_int = 1 if get_infos else 0 + if out_len - get_infos_int != 2: + raise TypeError(f"expected tuple of {2 + int(get_infos)} elements but got {out_len}") + if not isinstance(out, (tuple, list)): + raise TypeError(f"argument 'out' must be tuple of Tensors, not {type(out).__name__}") + + +def _lu_with_infos(A, pivot=True, get_infos=False, out=None): + # type: (Tensor, bool, bool, Optional[Tuple[Tensor, Tensor, Tensor]]) -> Tuple[Tensor, Tensor, Tensor] + if has_torch_function_unary(A): + return handle_torch_function( + lu, (A,), A, pivot=pivot, get_infos=get_infos, out=out) + result = _lu_impl(A, pivot, get_infos, out) + if out is not None: + _check_list_size(len(out), get_infos, out) + for i in range(len(out)): + out[i].resize_as_(result[i]).copy_(result[i]) + return out + else: + return result # A_LU, pivots, infos + + +def _lu_no_infos(A, pivot=True, get_infos=False, out=None): + # type: (Tensor, bool, bool, Optional[Tuple[Tensor, Tensor]]) -> Tuple[Tensor, Tensor] + # need to check for torch_function here so that we exit if + if has_torch_function_unary(A): + return handle_torch_function( + lu, (A,), A, pivot=pivot, get_infos=get_infos, out=out) + result = _lu_impl(A, pivot, get_infos, out) + if out is not None: + _check_list_size(len(out), get_infos, out) + for i in range(len(out)): + out[i].resize_as_(result[i]).copy_(result[i]) + return out + else: + return result[0], result[1] # A_LU, pivots + +# The return type of lu depends on `get_infos`, so in order to resolve the output type +# of lu in TorchScript we need to statically know the value of `get_infos` +lu = boolean_dispatch( + arg_name='get_infos', + arg_index=2, + default=False, + if_true=_lu_with_infos, + if_false=_lu_no_infos, + module_name=__name__, + func_name='lu') +lu.__doc__ = _lu_impl.__doc__ + + +def align_tensors(*tensors): + raise RuntimeError('`align_tensors` not yet implemented.') diff --git a/evalkit_internvl/lib/python3.10/site-packages/torch/overrides.py b/evalkit_internvl/lib/python3.10/site-packages/torch/overrides.py new file mode 100644 index 0000000000000000000000000000000000000000..3084bf0668258ab625793db82a126c9d4573e36c --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/torch/overrides.py @@ -0,0 +1,1958 @@ +""" +Python implementation of ``__torch_function__`` + +While most of the torch API and handling for ``__torch_function__`` happens +at the C++ level, some of the torch API is written in Python so we need +python-level handling for ``__torch_function__`` overrides as well. The main +developer-facing functionality in this file are handle_torch_function and +has_torch_function. See torch/functional.py and test/test_overrides.py +for usage examples. + +Note +---- +heavily inspired by NumPy's ``__array_function__`` (see: +https://github.com/pytorch/pytorch/issues/24015 and +https://www.numpy.org/neps/nep-0018-array-function-protocol.html +) + +If changing this file in a way that can affect ``__torch_function__`` overhead, +please report the benchmarks in ``benchmarks/overrides_benchmark``. See the +instructions in the ``README.md`` in that directory. +""" + +import __future__ # noqa: F404 + +import collections +import functools +import types +import warnings +from typing import Dict, Set, List, Any, Callable, Iterable, Type, Tuple +from functools import wraps +import contextlib + +import torch +from torch._C import ( + _has_torch_function, _has_torch_function_unary, + _has_torch_function_variadic, _add_docstr, + _push_on_torch_function_stack, _pop_torch_function_stack, _get_function_stack_at, _len_torch_function_stack, + _is_torch_function_mode_enabled) + +__all__ = [ + "get_ignored_functions", + "get_overridable_functions", + "get_testing_overrides", + "handle_torch_function", + "has_torch_function", + "resolve_name", + "is_tensor_like", + "is_tensor_method_or_property", + "wrap_torch_function", + "enable_reentrant_dispatch", +] + + +def _disable_user_warnings( + func: Callable, regex: str = '.*is deprecated, please use.*', module: str = 'torch') -> Callable: + """ + Decorator that temporarily disables ``UserWarning``s for the given ``module`` if the warning message matches the + given ``regex`` pattern. + + Arguments + --------- + func : function + Function to disable the warnings for. + regex : str + A regex pattern compilable by ``re.compile``. This is used to match the ``UserWarning`` message. + module : str + The python module to which the filtering should be restricted. + + Returns + ------- + function + The wrapped function. + """ + + @wraps(func) + def wrapper(*args, **kwargs): + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=UserWarning, message=regex, module=module) + return func(*args, **kwargs) + return wrapper + + +@functools.lru_cache(None) +@_disable_user_warnings +def get_ignored_functions() -> Set[Callable]: + """ + Return public functions that cannot be overridden by ``__torch_function__``. + + Returns + ------- + Set[Callable] + A tuple of functions that are publicly available in the torch API but cannot + be overridden with ``__torch_function__``. Mostly this is because none of the + arguments of these functions are tensors or tensor-likes. + + Examples + -------- + >>> torch.Tensor.as_subclass in torch.overrides.get_ignored_functions() + True + >>> torch.add in torch.overrides.get_ignored_functions() + False + """ + Tensor = torch.Tensor + return { + torch.typename, + torch.is_tensor, + torch.is_storage, + torch.set_default_tensor_type, + torch.set_default_device, + torch.set_rng_state, + torch.get_rng_state, + torch.manual_seed, + torch.initial_seed, + torch.seed, + torch.save, + torch.load, + torch.set_printoptions, + torch.fork, + torch.get_default_dtype, + torch.get_num_interop_threads, + torch.get_num_threads, + torch.init_num_threads, + torch.import_ir_module, + torch.import_ir_module_from_buffer, + torch.is_anomaly_enabled, + torch.is_anomaly_check_nan_enabled, + torch.is_grad_enabled, + torch.merge_type_from_type_comment, + torch.parse_ir, + torch.parse_schema, + torch.parse_type_comment, + torch.set_anomaly_enabled, + torch.set_flush_denormal, + torch.set_num_interop_threads, + torch.set_num_threads, + torch.wait, + torch.as_tensor, + torch.from_numpy, + torch.get_device, + torch.tensor, + torch.default_generator, + torch.has_cuda, + torch.has_cudnn, + torch.has_lapack, + torch.device, + torch.dtype, + torch.finfo, + torch.has_mkl, + torch.has_mps, + torch.has_mkldnn, + torch.has_openmp, + torch.iinfo, + torch.memory_format, + torch.qscheme, + torch.set_grad_enabled, + torch.no_grad, + torch.enable_grad, + torch.inference_mode, + torch.is_inference_mode_enabled, + torch.layout, + torch.align_tensors, + torch.arange, + torch.as_strided, + torch.bartlett_window, + torch.blackman_window, + torch.broadcast_shapes, + torch.can_cast, + torch.compile, + torch.cudnn_affine_grid_generator, + torch.cudnn_batch_norm, + torch.cudnn_convolution, + torch.cudnn_convolution_transpose, + torch.cudnn_convolution_relu, + torch.cudnn_convolution_add_relu, + torch.cudnn_grid_sampler, + torch.cudnn_is_acceptable, + torch.empty, + torch.empty_permuted, + torch.empty_strided, + torch.empty_quantized, + torch.export.dynamic_dim, + torch.export.export, + torch.export.load, + torch.export.register_dataclass, + torch.export.save, + torch.eye, + torch.fft.fftfreq, + torch.fft.rfftfreq, + torch.from_file, + torch.full, + torch.fill, + torch.hamming_window, + torch.hann_window, + torch.kaiser_window, + torch.linspace, + torch.logspace, + torch.mkldnn_adaptive_avg_pool2d, + torch.mkldnn_convolution, + torch.mkldnn_max_pool2d, + torch.mkldnn_max_pool3d, + torch.mkldnn_linear_backward_weights, + torch.mkldnn_rnn_layer, + torch.normal, + torch.ones, + torch.promote_types, + torch.rand, + torch.randn, + torch.randint, + torch.randperm, + torch.range, + torch.result_type, + torch.scalar_tensor, + torch.sparse_coo_tensor, + torch.sparse_compressed_tensor, + torch.sparse_csr_tensor, + torch.sparse_csc_tensor, + torch.sparse_bsr_tensor, + torch.sparse_bsc_tensor, + torch.sym_constrain_range, + torch.sym_constrain_range_for_size, + torch.tril_indices, + torch.triu_indices, + torch.vander, + torch.zeros, + torch._jit_internal.boolean_dispatch, + torch.nn.functional.assert_int_or_pair, + torch.nn.functional.upsample, + torch.nn.functional.upsample_bilinear, + torch.nn.functional.upsample_nearest, + torch.nn.functional.has_torch_function, + torch.nn.functional.has_torch_function_unary, + torch.nn.functional.has_torch_function_variadic, + torch.nn.functional.handle_torch_function, + torch.nn.functional.sigmoid, + torch.nn.functional.hardsigmoid, + torch.nn.functional.tanh, + torch.nn.functional._canonical_mask, + torch.nn.functional._none_or_dtype, + # Doesn't actually take or return tensor arguments + torch.nn.init.calculate_gain, + # These are deprecated; don't test them + torch.nn.init.uniform, + torch.nn.init.normal, + torch.nn.init.constant, + torch.nn.init.eye, + torch.nn.init.dirac, + torch.nn.init.xavier_uniform, + torch.nn.init.xavier_normal, + torch.nn.init.kaiming_uniform, + torch.nn.init.kaiming_normal, + torch.nn.init.orthogonal, + torch.nn.init.sparse, + torch.nested.to_padded_tensor, + has_torch_function, + handle_torch_function, + torch.set_autocast_enabled, + torch.is_autocast_enabled, + torch.clear_autocast_cache, + torch.set_autocast_cpu_enabled, + torch.is_autocast_cpu_enabled, + torch.set_autocast_xla_enabled, + torch.is_autocast_xla_enabled, + torch.set_autocast_ipu_enabled, + torch.is_autocast_ipu_enabled, + torch.set_autocast_cpu_dtype, + torch.get_autocast_cpu_dtype, + torch.set_autocast_ipu_dtype, + torch.get_autocast_ipu_dtype, + torch.get_autocast_gpu_dtype, + torch.set_autocast_gpu_dtype, + torch.get_autocast_xla_dtype, + torch.set_autocast_xla_dtype, + torch.autocast_increment_nesting, + torch.autocast_decrement_nesting, + torch.is_autocast_cache_enabled, + torch.set_autocast_cache_enabled, + torch.nn.functional.hardswish, + torch.is_vulkan_available, + torch.are_deterministic_algorithms_enabled, + torch.use_deterministic_algorithms, + torch.is_deterministic_algorithms_warn_only_enabled, + torch.set_deterministic_debug_mode, + torch.get_deterministic_debug_mode, + torch.set_float32_matmul_precision, + torch.get_float32_matmul_precision, + torch.unify_type_list, + torch.is_warn_always_enabled, + torch.set_warn_always, + torch.vitals_enabled, + torch.set_vital, + torch.read_vitals, + torch.vmap, + torch.cond, + torch.frombuffer, + torch.asarray, + torch._functional_sym_constrain_range, + torch._make_dep_token, + Tensor.__delitem__, + Tensor.__dir__, + Tensor.__getattribute__, + Tensor.__init__, + Tensor.__iter__, + Tensor.__init_subclass__, + Tensor.__delattr__, + Tensor.__setattr__, + Tensor.__torch_function__, + Tensor.__torch_dispatch__, + Tensor.__new__, + Tensor.__class__, + Tensor.__subclasshook__, + Tensor.__hash__, + Tensor.as_subclass, + Tensor.eig, + Tensor.lstsq, + Tensor.reinforce, + Tensor.new, + Tensor.new_tensor, + Tensor.new_empty, + Tensor.new_empty_strided, + Tensor.new_zeros, + Tensor.new_ones, + Tensor.new_full, + Tensor._make_subclass, + Tensor.solve, + Tensor.symeig, + Tensor.stride, + Tensor.unflatten, + Tensor.to_sparse_coo, + Tensor.to_sparse_csr, + Tensor.to_sparse_csc, + Tensor.to_sparse_bsr, + Tensor.to_sparse_bsc, + Tensor._to_sparse, + Tensor._to_sparse_csr, + Tensor._to_sparse_csc, + Tensor._to_sparse_bsr, + Tensor._to_sparse_bsc, + Tensor._typed_storage, + Tensor._reduce_ex_internal, + Tensor._fix_weakref, + Tensor._view_func, + Tensor._view_func_unsafe, + Tensor._make_wrapper_subclass, + Tensor._python_dispatch.__get__, + Tensor._has_symbolic_sizes_strides.__get__, + Tensor._conj, + Tensor._conj_physical, + Tensor._neg_view, + Tensor._is_zerotensor, + Tensor._is_all_true, + Tensor._is_any_true, + Tensor._addmm_activation, + Tensor.to_padded_tensor, + } + + +@functools.lru_cache(None) +def get_default_nowrap_functions() -> Set[Callable]: + """ + Return public functions that do not wrap in a subclass when invoked by + the default ``Tensor.__torch_function__`` that preserves subclasses. Typically, + these functions represent field accesses (i.e., retrieving a Tensor that + is stored somewhere on the Tensor) as opposed to computation. Users of + these functions expect object identity to be preserved over multiple accesses + (e.g., ``a.grad is a.grad``) which cannot be upheld if we're wrapping on + the fly every time (furthermore, the tensor stored here might already be + the subclass, in which case wrapping really ought not to happen). + + Not ALL property accessors have this property; for example ``Tensor.T`` actually + just creates a new transposed tensor on the fly, and so we SHOULD interpose on + these calls (you need to check the implementation of the function to see if + this is the case or not). Additionally, if a property accessor doesn't return a Tensor, + it doesn't have to be on this list (though it is harmless if it is). + """ + Tensor = torch.Tensor + return { + Tensor._base.__get__, + Tensor.grad.__get__, + Tensor._grad.__get__, + } + + +@functools.lru_cache(None) +@_disable_user_warnings +def get_testing_overrides() -> Dict[Callable, Callable]: + """Return a dict containing dummy overrides for all overridable functions + + Returns + ------- + Dict[Callable, Callable] + A dictionary that maps overridable functions in the PyTorch API to + lambda functions that have the same signature as the real function + and unconditionally return -1. These lambda functions are useful + for testing API coverage for a type that defines ``__torch_function__``. + + Examples + -------- + >>> import inspect + >>> my_add = torch.overrides.get_testing_overrides()[torch.add] + >>> inspect.signature(my_add) + + """ + # Every function in the PyTorchAPI that can be overriden needs an entry + # in this dict. + # + # Optimally we would use inspect to get the function signature and define + # the lambda function procedurally but that is blocked by generating + # function signatures for native kernels that can be consumed by inspect. + # See Issue #28233. + Tensor = torch.Tensor + ret: Dict[Callable, Callable] = { + torch.abs: lambda input, out=None: -1, + torch.absolute: lambda input, out=None: -1, + torch.adaptive_avg_pool1d: lambda input, output_size: -1, + torch.adaptive_max_pool1d: lambda inputs, output_size: -1, + torch.acos: lambda input, out=None: -1, + torch.adjoint: lambda input: -1, + torch.arccos: lambda input, out=None: -1, + torch.acosh: lambda input, out=None: -1, + torch.arccosh: lambda input, out=None: -1, + torch.add: lambda input, other, out=None: -1, + torch.addbmm: lambda input, batch1, batch2, alpha=1, beta=1, out=None: -1, + torch.addcdiv: lambda input, tensor1, tensor2, value=1, out=None: -1, + torch.addcmul: lambda input, tensor1, tensor2, value=1, out=None: -1, + torch.addmm: lambda input, mat1, mat2, beta=1, alpha=1, out=None: -1, + torch.addmv: lambda input, mat, vec, beta=1, alpha=1, out=None: -1, + torch.addr: lambda input, vec1, vec2, beta=1, alpha=1, out=None: -1, + torch.affine_grid_generator: lambda theta, size, align_corners: -1, + torch.all: lambda input, dim=None: -1, + torch.allclose: lambda input, other, trol=1e-05, atol=1e-08, equal_nan=False: -1, + torch.alpha_dropout: lambda input, p, train, inplace=False: -1, + torch.amax: lambda input, dim=None: -1, + torch.amin: lambda input, dim=None: -1, + torch.aminmax: lambda input, dim=None, keepdim=False, out=None: -1, + torch.angle: lambda input, out=None: -1, + torch.any: lambda input, dim=None, keepdim=False, out=None: -1, + torch.argmax: lambda input: -1, + torch.argmin: lambda input: -1, + torch.argsort: lambda input, dim=None: -1, + torch.asin: lambda input, out=None: -1, + torch._assert_async: lambda input, msg: -1, + torch.arcsin: lambda input, out=None: -1, + torch.asinh: lambda input, out=None: -1, + torch.arcsinh: lambda input, out=None: -1, + torch.atan: lambda input, out=None: -1, + torch.arctan: lambda input, out=None: -1, + torch.atan2: lambda input, other, out=None: -1, + torch.arctan2: lambda input, other, out=None: -1, + torch.atanh: lambda input, out=None: -1, + torch.arctanh: lambda input, out=None: -1, + torch.atleast_1d: lambda *tensors: -1, + torch.atleast_2d: lambda *tensors: -1, + torch.atleast_3d: lambda *tensors: -1, + torch.avg_pool1d: lambda input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True: -1, + torch.baddbmm: lambda input, batch1, batch2, alpha=1, beta=1, out=None: -1, + torch.batch_norm: lambda input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled: -1, + torch.batch_norm_backward_elemt: lambda grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count_tensor: -1, + torch.batch_norm_backward_reduce: lambda grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g: -1, + torch.batch_norm_elemt: lambda input, weight, bias, mean, invstd, eps: -1, + torch.batch_norm_gather_stats: lambda input, mean, invstd, running_mean, running_var, momentum, eps, count: -1, + torch.batch_norm_gather_stats_with_counts: lambda input, mean, invstd, running_mean, running_var, momentum, eps, count: -1, + torch.batch_norm_stats: lambda input, eps: -1, + torch.batch_norm_update_stats: lambda input, running_mean, running_var, momentum: -1, + torch.bernoulli: lambda input, generator=None, out=None: -1, + torch.bilinear: lambda input1, input2, weight, bias: -1, + torch.binary_cross_entropy_with_logits: (lambda input, target, weight=None, size_average=None, reduce=None, + reduction='mean', pos_weight=None: -1), + torch.bincount: lambda input, weights=None, minlength=0: -1, + torch.binomial: lambda count, prob, generator=None: -1, + torch.bitwise_and: lambda input, other, out=None: -1, + torch.bitwise_not: lambda input, out=None: -1, + torch.bitwise_or: lambda input, other, out=None: -1, + torch.bitwise_xor: lambda input, other, out=None: -1, + torch.bitwise_left_shift: lambda input, other, out=None: -1, + torch.bitwise_right_shift: lambda input, other, out=None: -1, + torch.block_diag: lambda *tensors: -1, + torch.bmm: lambda input, mat2, out=None: -1, + torch.broadcast_tensors: lambda *tensors: -1, + torch.broadcast_to: lambda self, size: -1, + torch.bucketize: lambda input, boundaries, out_int32=False, right=False, out=None: -1, + torch.cartesian_prod: lambda *tensors: -1, + torch.cat: lambda tensors, dim=0, out=None: -1, + torch.concat: lambda tensors, dim=0, out=None: -1, # alias for torch.cat + torch.concatenate: lambda tensors, dim=0, out=None: -1, # alias for torch.concatenate + torch.cdist: lambda x1, x2, p=2.0, compute_mode='use_mm_for_euclid_dist_if_necessary': -1, + torch.ceil: lambda input, out=None: -1, + torch.celu: lambda input, alpha=1., inplace=False: -1, + torch.chain_matmul: lambda *matrices, out=None: -1, + torch.channel_shuffle: lambda input, groups : -1, + torch.cholesky: lambda input, upper=False, out=None: -1, + torch.linalg.cholesky: lambda input, out=None: -1, + torch.linalg.cholesky_ex: lambda input, check_errors=False, out=None: -1, + torch.cholesky_inverse: lambda input, upper=False, out=None: -1, + torch.cholesky_solve: lambda input1, input2, upper=False, out=None: -1, + torch.choose_qparams_optimized: lambda input, numel, n_bins, ratio, bit_width: -1, + torch.chunk: lambda input, chunks, dim=0: -1, + torch.clamp: lambda input, min=None, max=None, out=None: -1, + torch.clip: lambda input, min=None, max=None, out=None: -1, + torch.clamp_min: lambda input, min, out=None: -1, + torch.clamp_max: lambda input, max, out=None: -1, + torch.column_stack: lambda tensors, out=None: -1, + torch.cov: lambda input, correction=1, fweights=None, aweights=None: -1, + torch.clone: lambda input: -1, + torch.combinations: lambda input, r=2, with_replacement=False: -1, + torch.complex: lambda real, imag: -1, + torch.copysign: lambda input, other, out=None: -1, + torch.polar: lambda abs, ang: -1, + torch.linalg.cond: lambda input, ord=None: -1, + torch.conj: lambda input, out=None: -1, + torch.conj_physical: lambda input, out=None: -1, + torch.resolve_conj: lambda input, out=None: -1, + torch.resolve_neg: lambda input, out=None: -1, + torch.constant_pad_nd: lambda input, pad, value=0: -1, + torch.conv1d: lambda input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1: -1, + torch.conv2d: lambda input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1: -1, + torch.conv3d: lambda input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1: -1, + torch.convolution: lambda input, weight, bias, stride, padding, dilation, transposed, output_adding, groups: -1, + torch.conv_tbc: lambda input, weight, bias, pad=0: -1, + torch.conv_transpose1d: lambda input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1: -1, + torch.conv_transpose2d: lambda input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1: -1, + torch.conv_transpose3d: lambda input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1: -1, + torch.corrcoef: lambda input: -1, + torch.cos: lambda input, out=None: -1, + torch.cosine_embedding_loss: lambda input1, input2, target, margin=0, size_average=None, reduce=None, reduction='mean': -1, + torch.cosh: lambda input, out=None: -1, + torch.cosine_similarity: lambda x1, x2, dim=1, eps=1e-8: -1, + torch.count_nonzero: lambda input: -1, + torch.cross: lambda input, other, dim=None, out=None: -1, + torch.linalg.cross: lambda input, other, dim=-1, out=None: -1, + torch.ctc_loss: (lambda log_probs, targets, input_lengths, target_lengths, blank=0, reduction='mean', + zero_infinity=False: -1), + torch.cummax: lambda input, dim, out=None: -1, + torch.cummin: lambda input, dim, out=None: -1, + torch.cumprod: lambda input, dim, out=None, dtype=None: -1, + torch.cumsum: lambda input, dim, out=None, dtype=None: -1, + torch.cumulative_trapezoid: lambda y, x=None, dim=-1: -1, + torch.logcumsumexp: lambda input, dim, out=None: -1, + torch.deg2rad: lambda input, out=None: -1, + torch.dequantize: lambda input: -1, + torch.det: lambda input: -1, + torch.linalg.det: lambda input: -1, # alias for torch.det # type: ignore[attr-defined] + torch.detach: lambda input: -1, + torch.diag: lambda input, diagonal=0, out=None: -1, + torch.diag_embed: lambda input, diagonal=0, out=None: -1, + torch.diagflat: lambda input, offset=0: -1, + torch.diff: lambda input, n=1, dim=-1, prepend=None, append=None, out=None: -1, + torch.diagonal: lambda input, offset=0, dim1=0, dim2=1: -1, + torch.linalg.diagonal: lambda input, offset=0, dim1=-2, dim2=-1: -1, + torch.diagonal_scatter: lambda input, src, offset=0, dim1=0, dim2=1: -1, + torch.as_strided_scatter: lambda self, src, size, stride, storage_offset=None: -1, + torch.digamma: lambda input, out=None: -1, + torch.dist: lambda input, other, p=2: -1, + torch.div: lambda input, other, rounding_mode=None, out=None: -1, + torch.divide: lambda input, other, rounding_mode=None, out=None: -1, + torch.dot: lambda input, other, out=None: -1, + torch.dropout: lambda input, p, train, inplace=False: -1, + torch.dsmm: lambda input, mat2: -1, + torch.hsmm: lambda mat1, mat2: -1, + torch.dsplit: lambda input, indices_or_sections: -1, + torch.dstack: lambda tensors, out=None: -1, + torch.linalg.eig: lambda input, out=None: -1, + torch.linalg.eigvals: lambda input, out=None: -1, + torch.linalg.eigh: lambda input, UPLO="L", out=None: -1, + torch.linalg.eigvalsh: lambda input, UPLO="L", out=None: -1, + torch.einsum: lambda equation, *operands: -1, + torch.embedding: (lambda input, weight, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, + sparse=False: -1), + torch.embedding_bag: (lambda input, weight, offsets, max_norm=None, norm_type=2, scale_grad_by_freq=False, + mode='mean', sparse=False, per_sample_weights=None, padding_idx=None: -1), + torch.empty_like: lambda input, dtype=None, layout=None, device=None, requires_grad=False: -1, + torch.eq: lambda input, other, out=None: -1, + torch.equal: lambda input, other: -1, + torch.erf: lambda input, out=None: -1, + torch.erfc: lambda input, out=None: -1, + torch.erfinv: lambda input, out=None: -1, + torch.exp: lambda input, out=None: -1, + torch.exp2: lambda input, out=None: -1, + torch.expm1: lambda input, out=None: -1, + torch.fake_quantize_per_channel_affine: lambda input, scale, zero_point, axis, quant_min, quant_max: -1, + torch.fake_quantize_per_tensor_affine: lambda input, scale, zero_point, quant_min, quant_max: -1, + torch.fused_moving_avg_obs_fake_quant: (lambda x, observer_on, fake_quant_on, averaging_const, running_min, + running_max, scale, zero_point, quant_min, quant_max, ch_axis, + per_row_fake_quant=False, symmetric_quant=False: -1), + torch.fbgemm_linear_fp16_weight: lambda input, packed_weight, bias: -1, + torch.fbgemm_linear_fp16_weight_fp32_activation: lambda input, packed_weight, bias: -1, + torch.fbgemm_linear_int8_weight: lambda input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias: -1, + torch.fbgemm_linear_int8_weight_fp32_activation: (lambda input, weight, packed, col_offsets, weight_scale, + weight_zero_point, bias: -1), + torch.fbgemm_linear_quantize_weight: lambda input: -1, + torch.fbgemm_pack_gemm_matrix_fp16: lambda input: -1, + torch.fbgemm_pack_quantized_matrix: lambda input, a, b: -1, + torch.feature_alpha_dropout: lambda input, p, train: -1, + torch.feature_dropout: lambda input, p, train: -1, + torch.fft.ifft: lambda input, n=None, dim=-1, norm=None: -1, + torch.fft.rfft: lambda input, n=None, dim=-1, norm=None: -1, + torch.fft.irfft: lambda input, n=None, dim=-1, norm=None: -1, + torch.fft.hfft: lambda input, n=None, dim=-1, norm=None: -1, + torch.fft.ihfft: lambda input, n=None, dim=-1, norm=None: -1, + torch.fft.hfft2: lambda input, s=None, dim=(-2, -1), norm=None: -1, + torch.fft.ihfft2: lambda input, s=None, dim=(-2, -1), norm=None: -1, + torch.fft.hfftn: lambda input, s=None, dim=-1, norm=None: -1, + torch.fft.ihfftn: lambda input, s=None, dim=-1, norm=None: -1, + torch.fft.fftn: lambda input, s=None, dim=None, norm=None: -1, + torch.fft.ifftn: lambda input, s=None, dim=None, norm=None: -1, + torch.fft.rfftn: lambda input, s=None, dim=None, norm=None: -1, + torch.fft.irfftn: lambda input, s=None, dim=None, norm=None: -1, + torch.fft.fft2: lambda input, s=None, dim=(-2, -1), norm=None: -1, + torch.fft.ifft2: lambda input, s=None, dim=(-2, -1), norm=None: -1, + torch.fft.rfft2: lambda input, s=None, dim=(-2, -1), norm=None: -1, + torch.fft.irfft2: lambda input, s=None, dim=(-2, -1), norm=None: -1, + torch.fft.fftshift: lambda input, dim=None: -1, + torch.fft.ifftshift: lambda input, dim=None: -1, + torch.fft.fft: lambda input, n=None, dim=-1, norm=None: -1, + torch.fix: lambda input, out=None: -1, + torch.flatten: lambda input, start_dim=0, end_dim=-1: -1, + torch.flip: lambda input, dims: -1, + torch.fliplr: lambda input: -1, + torch.flipud: lambda input: -1, + torch.frobenius_norm: lambda input, dim=None, keepdim=False, out=None: -1, + torch.floor: lambda input, out=None: -1, + torch.floor_divide: lambda input, other: -1, + torch.float_power: lambda input, exponent, out=None: -1, + torch.fmod: lambda input, other, out=None: -1, + torch.frac: lambda input, out=None: -1, + torch.frexp: lambda input, out=None: -1, + torch.full_like: lambda input, fill_value, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False: -1, + torch._functional_assert_async: lambda input, msg, dep_token: -1, + torch.lu_unpack: lambda LU_data, LU_pivots, unpack_data=True, unpack_pivots=True: -1, + torch.gather: lambda input, dim, index, out=None, sparse_grad=False: -1, + torch.gcd: lambda input, other, out=None: -1, + torch.ge: lambda input, other, out=None: -1, + torch.greater_equal: lambda input, other, out=None: -1, + torch.geqrf: lambda input, out=None: -1, + torch.i0: lambda input, out=None: -1, + torch.inner: lambda input, other, out=None: -1, + torch.outer: lambda input, vec2, out=None: -1, + torch.ger: lambda input, vec2, out=None: -1, # alias for torch.outer + torch.gradient: lambda input, spacing=None, dim=None, edge_order=1: -1, + torch.grid_sampler: lambda input, grid, interpolation_mode, padding_mode, align_corners: -1, + torch.grid_sampler_2d: lambda input, grid, interpolation_mode, padding_mode, align_corners: -1, + torch.grid_sampler_3d: lambda input, grid, interpolation_mode, padding_mode, align_corners: -1, + torch.group_norm: lambda input, num_groups, weight=None, bias=None, eps=1e-05, cudnn_enabled=True: -1, + torch.gru: lambda input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first: -1, + torch.gru_cell: lambda input, hx, w_ih, w_hh, b_ih=None, b_hh=None: -1, + torch.gt: lambda input, other, out=None: -1, + torch.greater: lambda input, other, out=None: -1, + torch.hardshrink: lambda input, lambd=0.5: -1, + torch.heaviside: lambda input, values, out=None: -1, + torch.hinge_embedding_loss: lambda input, target, margin=1.0, size_average=None, reduce=None, reduction='mean': -1, + torch.histc: lambda input, bins=100, min=0, max=0, out=None: -1, + torch.histogram: lambda input, bins=100, min=None, max=None, weight=None, density=False, out=None: -1, + torch.histogramdd: lambda input, bins, range=None, weight=None, density=False: -1, + torch.linalg.householder_product: lambda input, tau: -1, + torch.hspmm: lambda mat1, mat2, out=None: -1, + torch.hsplit: lambda input, indices_or_sections: -1, + torch.hstack: lambda tensors, out=None: -1, + torch.hypot: lambda input, other, out=None: -1, + torch.igamma: lambda input, other, out=None: -1, + torch.igammac: lambda input, other, out=None: -1, + torch.imag: lambda input, out=None: -1, + torch.index_add: lambda input, dim, index, source: -1, + torch.index_copy: lambda input, dim, index, source: -1, + torch.index_put: lambda input, indices, values, accumulate=False: -1, + torch.index_select: lambda input, dim, index, out=None: -1, + torch.index_fill: lambda input, dim, index, value: -1, + torch.index_reduce: lambda input, dim, index, source, reduce, include_input=True: -1, + torch.isfinite: lambda tensor: -1, + torch.isin: lambda e, te, assume_unique=False, invert=False: -1, + torch.isinf: lambda tensor: -1, + torch.isreal: lambda tensor: -1, + torch.isposinf: lambda input, out=None: -1, + torch.isneginf: lambda input, out=None: -1, + torch.instance_norm: (lambda input, running_mean, running_var, weight, bias, use_input_stats, momentum, eps, + cudnn_enabled: -1), + torch.int_repr: lambda input: -1, + torch.inverse: lambda input, out=None: -1, + torch.linalg.inv: lambda input, out=None: -1, + torch.linalg.inv_ex: lambda input, check_errors=False, out=None: -1, + torch.is_complex: lambda input: -1, + torch.is_conj: lambda input: -1, + torch.is_neg: lambda input: -1, + torch.is_distributed: lambda input: -1, + torch.is_inference: lambda input: -1, + torch.is_floating_point: lambda input: -1, + torch.is_nonzero: lambda input: -1, + torch.is_same_size: lambda input, other: -1, + torch.is_signed: lambda input: -1, + torch.isclose: lambda input, other, rtol=1e-05, atol=1e-08, equal_nan=False: -1, + torch.isnan: lambda input: -1, + torch.istft: (lambda input, n_fft, hop_length=None, win_length=None, window=None, center=True, + normalized=False, onesided=None, length=None, return_complex=False: -1), + torch.kl_div: lambda input, target, size_average=None, reduce=None, reduction='mean', log_target=False: -1, + torch.kron: lambda input, other: -1, + torch.kthvalue: lambda input, k, dim=None, keepdim=False, out=None: -1, + torch.linalg.ldl_factor_ex: lambda input, hermitian=False, check_errors=False, out=None: -1, + torch.linalg.ldl_factor: lambda input, hermitian=False, out=None: -1, + torch.linalg.ldl_solve: lambda LD, pivots, B, hermitian=False, out=None: -1, + torch.layer_norm: lambda input, normalized_shape, weight=None, bias=None, esp=1e-05, cudnn_enabled=True: -1, + torch.lcm: lambda input, other, out=None: -1, + torch.ldexp: lambda input, other, out=None: -1, + torch.le: lambda input, other, out=None: -1, + torch.less_equal: lambda input, other, out=None: -1, + torch.lerp: lambda input, end, weight, out=None: -1, + torch.lgamma: lambda input, out=None: -1, + torch.lobpcg: lambda input, k=None, B=None, X=None, n=None, iK=None, niter=None, tol=None, largest=None, method=None, + tracker=None, ortho_iparams=None, ortho_fparams=None, ortho_bparams=None: -1, + torch.log: lambda input, out=None: -1, + torch.log_softmax: lambda input, dim, dtype=None: -1, + torch.log10: lambda input, out=None: -1, + torch.log1p: lambda input, out=None: -1, + torch.log2: lambda input, out=None: -1, + torch.logaddexp: lambda input, other, out=None: -1, + torch.logaddexp2: lambda input, other, out=None: -1, + torch.logdet: lambda input: -1, + torch.xlogy: lambda x, y, out=None: -1, + torch.logical_and: lambda input, other, out=None: -1, + torch.logical_not: lambda input, out=None: -1, + torch.logical_or: lambda input, other, out=None: -1, + torch.logical_xor: lambda input, other, out=None: -1, + torch.logit: lambda input, eps=None: -1, + torch.logsumexp: lambda input, names, keepdim=False, out=None: -1, + torch.lstm: lambda data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional: -1, + torch.lstm_cell: lambda input, hx, w_ih, w_hh, b_ih=None, b_hh=None: -1, + torch.lt: lambda input, other, out=None: -1, + torch.less: lambda input, other, out=None: -1, + torch.lu: lambda A, pivot=True, get_infos=False, out=None: -1, + torch.lu_solve: lambda b, LU_data, LU_pivots, out=None: -1, + torch.margin_ranking_loss: lambda input1, input2, target, margin=0, size_average=None, reduce=None, reduction='mean': -1, # type: ignore[attr-defined] # noqa: B950 + torch.masked_fill: lambda input, mask, value: -1, + torch.masked_scatter: lambda input, mask, source: -1, + torch.masked_select: lambda input, mask, out=None: -1, + torch.matmul: lambda input, other, out=None: -1, + torch.linalg.lu: lambda input, pivot=True, out=None: -1, + torch.linalg.lu_factor: lambda input, pivot=True, out=None: -1, + torch.linalg.lu_factor_ex: lambda input, pivot=True, check_errors=False, out=None: -1, + torch.linalg.lu_solve: lambda LU, pivots, B, left=True, adjoint=False, out=None: -1, + torch.linalg.matmul: lambda input, other, out=None: -1, # alias for torch.matmul + torch.matrix_power: lambda input, n: -1, + torch.linalg.matrix_power: lambda input, n, out=None: -1, + torch.linalg.matrix_rank: lambda input, tol=None, hermitian=False: -1, + torch.linalg.multi_dot: lambda tensors, out=None: -1, + torch.matrix_exp: lambda input: -1, + torch.linalg.matrix_exp: lambda input: -1, + torch.max: lambda input, out=None: -1, + torch.maximum: lambda input, other, out=None: -1, + torch.fmax: lambda input, other, out=None: -1, + torch.max_pool1d: lambda input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False: -1, + torch.max_pool2d: lambda input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False: -1, + torch.max_pool3d: lambda input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False: -1, + torch.max_pool1d_with_indices: (lambda input, kernel_size, stride=None, padding=0, dilation=1, + return_indices=False, ceil_mode=False: -1), + torch.mean: lambda input, dim=None: -1, + torch.nanmean: lambda input, dim=None, keepdim=False, dtype=None, out=None: -1, + torch.median: lambda input, dim=None: -1, + torch.nanmedian: lambda input, dim=None: -1, + torch.meshgrid: lambda *tensors, **kwargs: -1, + torch.min: lambda input, out=None: -1, + torch.minimum: lambda input, other, out=None: -1, + torch.fmin: lambda input, other, out=None: -1, + torch.miopen_batch_norm: (lambda input, weight, bias, running_mean, running_var, training, + exponential_average_factor, epsilon: -1), + torch.miopen_convolution: lambda input, weight, bias, padding, stride, dilation, groups, benchmark, deterministic: -1, + torch.miopen_convolution_add_relu: lambda input, weight, z, alpha, bias, stride, padding, dilation, groups: -1, + torch.miopen_convolution_relu: lambda input, weight, bias, stride, padding, dilation, groups: -1, + torch.miopen_convolution_transpose: (lambda input, weight, bias, padding, output_padding, stride, dilation, + groups, benchmark, deterministic: -1), + torch.miopen_depthwise_convolution: (lambda input, weight, bias, padding, stride, dilation, groups, benchmark, + deterministic: -1), + torch.miopen_rnn: (lambda input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first, + dropout, train, bidirectional, batch_sizes, dropout_state: -1), + torch.mm: lambda input, mat2, out=None: -1, + torch.mode: lambda input, dim=-1, keepdim=False, out=None: -1, + torch.movedim: lambda input, source, destination: -1, + torch.moveaxis: lambda input, source, destination: -1, + torch.msort: lambda input, descending=False, out=None: -1, + torch.mul: lambda input, other, out=None: -1, + torch.multiply: lambda input, other, out=None: -1, + torch.multinomial: lambda input, num_samples, replacement=False, out=None: -1, + torch.mv: lambda input, vec, out=None: -1, + torch.mvlgamma: lambda input, p: -1, + torch.narrow: lambda input, dim, start, length: -1, + torch.nan_to_num: lambda input, nan=0.0, posinf=None, neginf=None, out=None: -1, + torch.native_batch_norm: lambda input, weight, bias, running_mean, running_var, training, momentum, eps: -1, + torch._native_batch_norm_legit: lambda input, weight, bias, training, momentum, eps: -1, + torch.native_dropout: lambda input, p, train: -1, + torch.native_layer_norm: lambda input, normalized_shape, weight=None, bias=None, eps=1e-05: -1, + torch.native_group_norm: lambda input, weight, bias, N, C, HxW, group, eps: -1, + torch.native_norm: lambda input, p=2, dim=None, keepdim=False, dtype=None: -1, + torch.native_channel_shuffle: lambda input, groups : -1, + torch.ne: lambda input, other, out=None: -1, + torch.not_equal: lambda input, other, out=None: -1, + torch.neg: lambda input, out=None: -1, + torch.negative: lambda input, out=None: -1, + torch.nextafter: lambda input, other, out=None: -1, + torch.nn.functional.adaptive_avg_pool2d: lambda input, output_size: -1, + torch.nn.functional.adaptive_avg_pool3d: lambda input, output_size: -1, + torch.nn.functional.adaptive_max_pool1d: lambda input, output_size, return_indices=False: -1, + torch.nn.functional.adaptive_max_pool1d_with_indices: lambda input, output_size, return_indices=False: -1, + torch.nn.functional.adaptive_max_pool2d: lambda input, output_size, return_indices=False: -1, + torch.nn.functional.adaptive_max_pool2d_with_indices: lambda input, output_size, return_indices=False: -1, + torch.nn.functional.adaptive_max_pool3d: lambda input, output_size, return_indices=False: -1, + torch.nn.functional.adaptive_max_pool3d_with_indices: lambda input, output_size, return_indices=False: -1, + torch.nn.functional.affine_grid: lambda theta, size, align_corners=None: -1, + torch.nn.functional.alpha_dropout: lambda input, p=0.5, training=False, inplace=False: -1, + torch.nn.functional.avg_pool2d: (lambda input, kernel_size, stride=None, padding=0, ceil_mode=False, + count_include_pad=True, divisor_override=None: -1), + torch.nn.functional.avg_pool3d: (lambda input, kernel_size, stride=None, padding=0, ceil_mode=False, + count_include_pad=True, divisor_override=None: -1), + torch.nn.functional.batch_norm: (lambda input, running_mean, running_var, weight=None, bias=None, training=False, + momentum=0.1, eps=1e-05: -1), + torch.nn.functional.bilinear: lambda input1, input2, weight, bias=None: -1, + torch.nn.functional.binary_cross_entropy: (lambda input, target, weight=None, size_average=None, reduce=None, + reduction="mean": -1), + torch.nn.functional.binary_cross_entropy_with_logits: (lambda input, target, weight=None, size_average=None, + reduce=None, reduction="mean", pos_weight=None: -1), + torch.nn.functional.celu: lambda input, alpha=1.0, inplace=False: -1, + torch.nn.functional.cosine_embedding_loss: (lambda input1, input2, target, margin=0, size_average=None, + reduce=None, reduction='mean': -1), + torch.nn.functional.cross_entropy: (lambda input, target, weight=None, size_average=None, ignore_index=-100, + reduce=None, reduction="mean", label_smoothing=0.0: -1), + torch.nn.functional.ctc_loss: (lambda log_probs, targets, input_lengths, target_lengths, blank=0, + reduction='mean', zero_infinity=False: -1), + torch.nn.functional.dropout: lambda input, p=0.5, training=True, inplace=False: -1, + torch.nn.functional.dropout1d: lambda input, p=0.5, training=True, inplace=False: -1, + torch.nn.functional.dropout2d: lambda input, p=0.5, training=True, inplace=False: -1, + torch.nn.functional.dropout3d: lambda input, p=0.5, training=True, inplace=False: -1, + torch.nn.functional.elu: lambda input, alpha=1.0, inplace=False: -1, + torch.nn.functional.embedding: (lambda input, weight, padding_idx=None, max_norm=None, norm_type=2.0, + scale_grad_by_freq=False, sparse=False: -1), + torch.nn.functional.embedding_bag: (lambda input, weight, offsets=None, max_norm=None, norm_type=2, + scale_grad_by_freq=False, mode='mean', sparse=False, per_sample_weights=None, + include_last_offset=False, padding_idx=None: -1), + torch.nn.functional.feature_alpha_dropout: lambda input, p=0.5, training=False, inplace=False: -1, + torch.nn.functional.fold: lambda input, output_size, kernel_size, dilation=1, padding=0, stride=1: -1, + torch.nn.functional.fractional_max_pool2d: (lambda input, kernel_size, output_size=None, output_ratio=None, + return_indices=False, _random_samples=None: -1), + torch.nn.functional.fractional_max_pool2d_with_indices: ( + lambda input, kernel_size, output_size=None, output_ratio=None, return_indices=False, + _random_samples=None: -1), + torch.nn.functional.fractional_max_pool3d: (lambda input, kernel_size, output_size=None, output_ratio=None, + return_indices=False, _random_samples=None: -1), + torch.nn.functional.fractional_max_pool3d_with_indices: ( + lambda input, kernel_size, output_size=None, output_ratio=None, return_indices=False, + _random_samples=None: -1), + torch.nn.functional.gaussian_nll_loss: lambda input, target, var, full=False, eps=1e-06, reduction='mean': -1, + torch.nn.functional.gelu: lambda input, approximate='none': -1, + torch.nn.functional.glu: lambda input, dim=-1: -1, + torch.nn.functional.grid_sample: lambda input, grid, mode='bilinear', padding_mode='zeros', align_corners=None: -1, + torch.nn.functional.group_norm: lambda input, num_groups, weight=None, bias=None, eps=1e-05: -1, + torch.nn.functional.gumbel_softmax: lambda logits, tau=1, hard=False, eps=1e-10, dim=-1: -1, + torch.nn.functional.hardshrink: lambda input, lambd=0.5: -1, + torch.nn.functional.hardtanh: lambda input, min_val=-1., max_val=1., inplace=False: -1, + torch.nn.functional.hinge_embedding_loss: (lambda input, target, margin=1.0, size_average=None, reduce=None, + reduction='mean': -1), + torch.nn.functional.instance_norm: (lambda input, running_mean=None, running_var=None, weight=None, bias=None, + use_input_stats=True, momentum=0.1, eps=1e-05: -1), + torch.nn.functional.interpolate: (lambda input, size=None, scale_factor=None, mode='nearest', align_corners=None, + recompute_scale_factor=None, antialias=False: -1), + torch.nn.functional.kl_div: lambda input, target, size_average=None, reduce=None, reduction='mean', log_target=False: -1, + torch.nn.functional.l1_loss: lambda input, target, size_average=None, reduce=None, reduction='mean': -1, + torch.nn.functional.layer_norm: lambda input, normalized_shape, weight=None, bias=None, eps=1e-05: -1, + torch.nn.functional.leaky_relu: lambda input, negative_slope=0.01, inplace=False: -1, + torch.nn.functional.linear: lambda input, weight, bias=None: -1, + torch.nn.functional.local_response_norm: lambda input, size, alpha=0.0001, beta=0.75, k=1.0: -1, + torch.nn.functional.log_softmax: lambda input, dim=None, _stacklevel=3, dtype=None: -1, + torch.nn.functional.logsigmoid: lambda input: -1, + torch.nn.functional.lp_pool1d: lambda input, norm_type, kernel_size, stride=None, ceil_mode=False: -1, + torch.nn.functional.lp_pool2d: lambda input, norm_type, kernel_size, stride=None, ceil_mode=False: -1, + torch.nn.functional.margin_ranking_loss: (lambda input1, input2, target, margin=0, size_average=None, + reduce=None, reduction='mean': -1), + torch.nn.functional.max_pool1d: (lambda input, kernel_size, stride=None, padding=0, dilation=1, + ceil_mode=False, return_indices=False: -1), + torch.nn.functional.max_pool1d_with_indices: (lambda input, kernel_size, stride=None, padding=0, dilation=1, + return_indices=False, ceil_mode=False: -1), + torch.nn.functional.max_pool2d: (lambda input, kernel_size, stride=None, padding=0, dilation=1, + ceil_mode=False, return_indices=False: -1), + torch.nn.functional.max_pool2d_with_indices: (lambda input, kernel_size, stride=None, padding=0, dilation=1, + return_indices=False, ceil_mode=False: -1), + torch.nn.functional.max_pool3d: (lambda input, kernel_size, stride=None, padding=0, dilation=1, + return_indices=False, ceil_mode=False: -1), + torch.nn.functional.max_pool3d_with_indices: (lambda input, kernel_size, stride=None, padding=0, dilation=1, + return_indices=False, ceil_mode=False: -1), + torch.nn.functional.max_unpool1d: lambda input, indices, kernel_size, stride=None, padding=0, output_size=None: -1, + torch.nn.functional.max_unpool2d: lambda input, indices, kernel_size, stride=None, padding=0, output_size=None: -1, + torch.nn.functional.max_unpool3d: lambda input, indices, kernel_size, stride=None, padding=0, output_size=None: -1, + torch.nn.functional.mse_loss: lambda input, target, size_average=None, reduce=None, reduction='mean': -1, + torch.nn.functional.multi_head_attention_forward: ( + lambda query, key, value, embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias, bias_k, bias_v, + add_zero_attn, dropout_p, out_proj_weight, out_proj_bias, training=True, key_padding_mask=None, + need_weights=True, attn_mask=None, use_separate_proj_weight=False, q_proj_weight=None, k_proj_weight=None, + v_proj_weight=None, static_k=None, static_v=None, average_attn_weights=None, is_causal=False: -1), + torch.nn.functional.multi_margin_loss: (lambda input, target, p=1, margin=1.0, weight=None, size_average=None, + reduce=None, reduction='mean': -1), + torch.nn.functional.multilabel_margin_loss: (lambda input, target, size_average=None, reduce=None, + reduction='mean': -1), + torch.nn.functional.multilabel_soft_margin_loss: (lambda input, target, weight=None, size_average=None, + reduce=None, reduction='mean': -1), + torch.nn.functional.nll_loss: (lambda input, target, weight=None, size_average=None, ignore_index=-100, + reduce=None, reduction='mean': -1), + torch.nn.functional.normalize: lambda input, p=2, dim=1, eps=1e-12, out=None: -1, + torch.nn.functional.one_hot: lambda tensor, num_classes=-1: -1, + torch.nn.functional.pad: lambda input, pad, mode='constant', value=0: -1, + torch.nn.functional.pairwise_distance: lambda x1, x2, p=2.0, eps=1e-06, keepdim=False: -1, + torch.nn.functional.poisson_nll_loss: (lambda input, target, log_input=True, full=False, size_average=None, + eps=1e-08, reduce=None, reduction='mean': -1), + torch.nn.functional.prelu: lambda input, weight: -1, + torch.nn.functional.relu: lambda input, inplace=False: -1, + torch.nn.functional.relu6: lambda input, inplace=False: -1, + torch.nn.functional.rrelu: lambda input, lower=0.125, upper=0.3333333333333333, training=False, inplace=False: -1, + torch.nn.functional.selu: lambda input, inplace=False: -1, + torch.nn.functional.silu: lambda input, inplace=False: -1, + torch.nn.functional.mish: lambda input, inplace=False: -1, + torch.nn.functional.scaled_dot_product_attention: lambda query, key, value, attn_mask=None, dropout_p=0.0: -1, + torch.nn.functional.smooth_l1_loss: lambda input, target, size_average=None, reduce=None, reduction='mean', beta=1.: -1, + torch.nn.functional.huber_loss: lambda input, target, reduction='mean', delta=1.: -1, + torch.nn.functional.soft_margin_loss: lambda input, target, size_average=None, reduce=None, reduction='mean': -1, + torch.nn.functional.softmax: lambda input, dim=None, _stacklevel=3, dtype=None: -1, + torch.nn.functional.softmin: lambda input, dim=None, _stacklevel=3, dtype=None: -1, + torch.nn.functional.softplus: lambda input, beta=1, threshold=20: -1, + torch.nn.functional.softshrink: lambda input, lambd=0.5: -1, + torch.nn.functional.softsign: lambda input: -1, + torch.nn.functional.tanhshrink: lambda input: -1, + torch.nn.functional.threshold: lambda input, threshold, value, inplace=False: -1, + torch.nn.functional.triplet_margin_loss: (lambda anchor, positive, negative, margin=1.0, p=2, eps=1e-06, + swap=False, size_average=None, reduce=None, reduction='mean': -1), + torch.nn.functional.triplet_margin_with_distance_loss: (lambda anchor, positive, negative, *, + distance_function=None, margin=1.0, + swap=False, reduction='mean': -1), + torch.nn.functional.unfold: lambda input, kernel_size, dilation=1, padding=0, stride=1: -1, + torch.nn.init.uniform_: lambda tensor, a=0., b=1., generator=None: -1, + torch.nn.init.normal_: lambda tensor, mean=0., std=1., generator=None: -1, + torch.nn.init.constant_: lambda tensor, val: -1, + torch.nn.init.kaiming_uniform_: lambda tensor, a=0, mode='fan_in', nonlinearity='leaky_relu', generator=None: -1, + torch.nonzero: lambda input, as_tuple=False: -1, + torch.nonzero_static: lambda input, *, size, fill_value=-1: -1, + torch.argwhere: lambda input: -1, + torch.norm: lambda input, p='fro', dim=None, keepdim=False, out=None, dtype=None: -1, + torch.linalg.norm: lambda input, ord=None, dim=None, keepdim=False, out=None, dtype=None: -1, + torch.linalg.vector_norm: lambda input, ord=2, dim=None, keepdim=False, out=None, dtype=None: -1, + torch.linalg.matrix_norm: lambda input, ord='fro', dim=(-2, -1), keepdim=False, out=None, dtype=None: -1, + torch.norm_except_dim: lambda v, pow=2, dim=0: -1, + torch.nuclear_norm: lambda input, p='fro', dim=None, keepdim=False, out=None, dtype=None: -1, + torch.numel: lambda input: -1, + torch.orgqr: lambda input, tau: -1, + torch.ormqr: lambda input, input2, input3, left=True, transpose=False: -1, + torch.pairwise_distance: lambda x1, x2, p=2.0, eps=1e-06, keepdim=False: -1, + torch.permute: lambda self, dim: -1, + torch.pca_lowrank: lambda input, q=None, center=True, niter=2: -1, + torch.pdist: lambda input, p=2: -1, + torch.pinverse: lambda input, rcond=1e-15: -1, + torch.linalg.pinv: lambda input, rcond=1e-15, hermitian=False: -1, + torch.pixel_shuffle: lambda input, upscale_factor: -1, + torch.pixel_unshuffle: lambda input, downscale_factor: -1, + torch.poisson: lambda input, generator=None: -1, + torch.poisson_nll_loss: lambda input, target, log_input, full, eps, reduction: -1, + torch.polygamma: lambda input, n, out=None: -1, + torch.positive: lambda input, out=None: -1, + torch.prelu: lambda input, weight: -1, + torch.ones_like: lambda input, dtype=None, layout=None, device=None, requires_grad=False: -1, + torch.pow: lambda input, exponent, out=None: -1, + torch.prod: lambda input, dtype=None: -1, + torch.put: lambda input, index, source, accumulate=False: -1, + torch.q_per_channel_axis: lambda input: -1, + torch.q_per_channel_scales: lambda input: -1, + torch.q_per_channel_zero_points: lambda input: -1, + torch.q_scale: lambda input: -1, + torch.q_zero_point: lambda input: -1, + torch.qr: lambda input, some=True, out=None: -1, + torch.linalg.qr: lambda input, mode='reduced', out=None: -1, + torch.quantile: lambda input, q, dim=None, keepdim=False, interpolation='linear', out=None: -1, + torch.nanquantile: lambda input, q, dim=None, keepdim=False, interpolation='linear', out=None: -1, + torch.quantize_per_channel: lambda input, scales, zero_points, axis, dtype: -1, + torch.quantize_per_tensor: lambda input, scale, zero_point, dtype: -1, + torch.quantize_per_tensor_dynamic: lambda input, dtype, reduce_range: -1, + torch.quantized_batch_norm: lambda input, weight, bias, mean, var, eps, output_scale, output_zero_point: -1, + torch.quantized_gru_cell: (lambda input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, + col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh: -1), + + torch.quantized_lstm_cell: (lambda input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, + col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh: -1), + torch.quantized_max_pool1d: (lambda input, kernel_size, stride=tuple(), padding=(0,), + dilation=(1,), ceil_mode=False: -1), + torch.quantized_max_pool2d: (lambda input, kernel_size, stride=tuple(), padding=(0, 0), + dilation=(1, 1), ceil_mode=False: -1), + torch.quantized_max_pool3d: (lambda input, kernel_size, stride=tuple(), padding=(0, 0, 0), + dilation=(1, 1, 1), ceil_mode=False: -1), + torch.quantized_rnn_relu_cell: (lambda input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, + col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh: -1), + torch.quantized_rnn_tanh_cell: (lambda input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, + col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh: -1), + torch.rad2deg: lambda input, out=None: -1, + torch.rand_like: lambda input, dtype=None, layout=None, device=None, requires_grad=False: -1, + torch.randint_like: lambda input, high, dtype=None, layout=torch.strided, device=None, requires_grad=False: -1, + torch.randn_like: lambda input, dtype=None, layout=None, device=None, requires_grad=False: -1, + torch.ravel: lambda input: -1, + torch.real: lambda input, out=None: -1, + torch.vdot: lambda input, other, out=None: -1, + torch.linalg.vecdot: lambda input, other, dim=-1, out=None: -1, + torch.view_as_real: lambda input: -1, + torch.view_as_complex: lambda input: -1, + torch.reciprocal: lambda input, out=None: -1, + torch.relu: lambda input, inplace=False: -1, + torch.remainder: lambda input, other, out=None: -1, + torch.renorm: lambda input, p, dim, maxnorm, out=None: -1, + torch.repeat_interleave: lambda input, dim=None: -1, + torch.reshape: lambda input, shape: -1, + torch.rnn_relu: lambda input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first: -1, + torch.rnn_relu_cell: lambda input, hx, w_ih, w_hh, b_ih=None, b_hh=None: -1, + torch.rnn_tanh: lambda input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first: -1, + torch.rnn_tanh_cell: lambda input, hx, w_ih, w_hh, b_ih=None, b_hh=None: -1, + torch.roll: lambda input, shifts, dims=None: -1, + torch.rot90: lambda input, k=1, dims=(0, 1): -1, + torch.round: lambda input, out=None: -1, + torch.row_stack: lambda tensors, out=None: -1, # alias for torch.vstack + torch._rowwise_prune: (lambda weight, mask, compressed_indices_dtype: -1), + torch.rrelu: lambda input, lower=1. / 8, upper=1. / 3, training=False, inplace=False: -1, + torch.rsqrt: lambda input, out=None: -1, + torch.rsub: lambda input, other, alpha=1: -1, + torch.saddmm: lambda input, mat1, mat2, beta=1, alpha=1, out=None: -1, + torch.scatter: lambda input, dim, index, src: -1, + torch.scatter_add: lambda input, dim, index, src: -1, + torch.scatter_reduce: lambda input, dim, index, src, reduce, include_self=True: -1, + torch.searchsorted: lambda sorted_sequence, input, out_int32=False, right=False, out=None: -1, + torch._segment_reduce: lambda data, reduce="max", lengths=None, indices=None, offsets=None, axis=0, unsafe=False: -1, + torch.select: lambda input, dim, index: -1, + torch.select_scatter: lambda input, src, dim, index: -1, + torch.slice_scatter: lambda input, src, dim=0, start=None, end=None, step=1: -1, + torch.selu: lambda input, inplace=False: -1, + torch.sigmoid: lambda input, out=None: -1, + torch.sign: lambda input, out=None: -1, + torch.signbit: lambda input, out=None: -1, + torch.sgn: lambda input, out=None: -1, + torch.sin: lambda input, out=None: -1, + torch.sinc: lambda input, out=None: -1, + torch.sinh: lambda input, out=None: -1, + torch.slogdet: lambda input: -1, + torch.linalg.slogdet: lambda input: -1, + torch.smm: lambda input, mat2: -1, + torch.spmm: lambda input, mat2: -1, + torch.softmax: lambda input, dim, dtype=None: -1, + torch.linalg.solve: lambda A, B, left=True, out=None: -1, + torch.linalg.solve_ex: lambda A, B, left=True, check_errors=False, out=None: -1, + torch.sort: lambda input, dim=-1, descending=False, *, stable=False, out=None: -1, + torch.split: lambda tensor, split_size_or_sections, dim=0: -1, + torch.split_with_sizes: lambda tensor, split_size_or_sections, dim=0: -1, + torch.sqrt: lambda input, out=None: -1, + torch.square: lambda input, out=None: -1, + torch.squeeze: lambda input, dim=None, out=None: -1, + torch.sspaddmm: lambda input, mat1, mat2, beta=1, alpha=1, out=None: -1, + torch.stack: lambda tensors, dim=0, out=None: -1, + torch.std: lambda input, dim=None: -1, + torch.std_mean: lambda input, dim=None: -1, + torch.stft: (lambda input, n_fft, hop_length=None, win_length=None, window=None, center=True, + pad_mode='reflect', normalized=False, onesided=True, return_complex=None: -1), + torch.sub: lambda input, other, out=None: -1, + torch.subtract: lambda input, other, out=None: -1, + torch.sum: lambda input, dim=None: -1, + torch.sym_float: lambda input: -1, + torch.sym_int: lambda input: -1, + torch.sym_max: lambda a, b: -1, + torch.sym_min: lambda a, b: -1, + torch.sym_not: lambda input: -1, + torch.sym_ite: lambda a, b, c: -1, + torch.sym_sqrt: lambda input: -1, + torch.nansum: lambda input, dim=None: -1, + torch.svd: lambda input, some=True, compute_uv=True, out=None: -1, + torch.svd_lowrank: lambda input, q=6, niter=2, M=None: -1, + torch.linalg.svd: lambda input, full_matrices=True, out=None: -1, + torch.linalg.svdvals: lambda input, out=None: -1, + torch.swapaxes: lambda input, dim0, dim1: -1, + torch.swapdims: lambda input, axis0, axis1: -1, + torch.special.airy_ai: lambda input: -1, + torch.special.bessel_j0: lambda input: -1, + torch.special.bessel_j1: lambda input: -1, + torch.special.bessel_y0: lambda input: -1, + torch.special.bessel_y1: lambda input: -1, + torch.special.chebyshev_polynomial_t: lambda input, n, out=None: -1, + torch.special.chebyshev_polynomial_u: lambda input, n, out=None: -1, + torch.special.chebyshev_polynomial_v: lambda input, n, out=None: -1, + torch.special.chebyshev_polynomial_w: lambda input, n, out=None: -1, + torch.special.digamma: lambda input: -1, + torch.special.entr: lambda input: -1, + torch.special.erf: lambda input: -1, + torch.special.erfc: lambda input: -1, + torch.special.erfcx: lambda input: -1, + torch.special.erfinv: lambda input: -1, + torch.special.exp2: lambda input: -1, + torch.special.expit: lambda input: -1, + torch.special.expm1: lambda input: -1, + torch.special.gammainc: lambda input, other, out=None: -1, + torch.special.gammaincc: lambda input, other, out=None: -1, + torch.special.gammaln: lambda input: -1, + torch.special.hermite_polynomial_h: lambda input, n, out=None: -1, + torch.special.hermite_polynomial_he: lambda input, n, out=None: -1, + torch.special.i0: lambda input: -1, + torch.special.i0e: lambda input: -1, + torch.special.i1: lambda input: -1, + torch.special.i1e: lambda input: -1, + torch.special.laguerre_polynomial_l: lambda input, n, out=None: -1, + torch.special.legendre_polynomial_p: lambda input, n, out=None: -1, + torch.special.log1p: lambda input: -1, + torch.special.log_ndtr: lambda input: -1, + torch.special.log_softmax: lambda input, dim, dtype=None: -1, + torch.special.logit: lambda input: -1, + torch.special.logsumexp: lambda input, dim, keepdim=False, out=None: -1, + torch.special.modified_bessel_i0: lambda input: -1, + torch.special.modified_bessel_i1: lambda input: -1, + torch.special.modified_bessel_k0: lambda input: -1, + torch.special.modified_bessel_k1: lambda input: -1, + torch.special.multigammaln: lambda input, p: -1, + torch.special.ndtr: lambda input: -1, + torch.special.ndtri: lambda input: -1, + torch.special.polygamma: lambda input, n, out=None: -1, + torch.special.psi: lambda input: -1, + torch.special.round: lambda input: -1, + torch.special.scaled_modified_bessel_k0: lambda input: -1, + torch.special.scaled_modified_bessel_k1: lambda input: -1, + torch.special.shifted_chebyshev_polynomial_t: lambda input, n, out=None: -1, + torch.special.shifted_chebyshev_polynomial_u: lambda input, n, out=None: -1, + torch.special.shifted_chebyshev_polynomial_v: lambda input, n, out=None: -1, + torch.special.shifted_chebyshev_polynomial_w: lambda input, n, out=None: -1, + torch.special.sinc: lambda input: -1, + torch.special.softmax: lambda input, dim, dtype=None: -1, + torch.special.spherical_bessel_j0: lambda input: -1, + torch.special.xlog1py: lambda input, other, out=None: -1, + torch.special.xlogy: lambda input, other, out=None: -1, + torch.special.zeta: lambda self, other, out=None: -1, + torch.t: lambda input: -1, + torch.take: lambda input, index: -1, + torch.take_along_dim: lambda input, indices, dim=None, out=None: -1, + torch.tan: lambda input, out=None: -1, + torch.tanh: lambda input, out=None: -1, + torch.linalg.tensorinv: lambda a, ind=2: -1, + torch.linalg.tensorsolve: lambda a, b, dims=None: -1, + torch.tensordot: lambda a, b, dims=2, out=None: -1, + torch.tensor_split: lambda input, indices_or_sections, dim=0: -1, + torch.threshold: lambda input, threshold, value, inplace=False: -1, + torch.tile: lambda input, dims: -1, + torch.topk: lambda input, k, dim=-1, descending=False, out=None: -1, + torch.trace: lambda input: -1, + torch.transpose: lambda input, dim0, dim1: -1, + torch.trapz: lambda y, x=None, dim=-1: -1, + torch.trapezoid: lambda y, x=None, dim=-1: -1, + torch.triangular_solve: lambda input, A, upper=True, transpose=False, unitriangular=False: -1, + torch.linalg.solve_triangular: lambda input, B, upper, left=True, unitriangular=False: -1, + torch.tril: lambda input, diagonal=0, out=None: -1, + torch.triplet_margin_loss: (lambda anchor, positive, negative, margin=1.0, p=2, eps=1e-06, swap=False, + + size_average=None, reduce=None, reduction='mean': -1), + torch.triu: lambda input, diagonal=0, out=None: -1, + torch.true_divide: lambda input, other: -1, + torch.trunc: lambda input, out=None: -1, + torch.unbind: lambda input, dim=0: -1, + torch.unflatten: lambda input, dim, sizes, names: -1, + torch.unique: lambda input, sorted=True, return_inverse=False, return_counts=False, dim=None: -1, + torch.unique_consecutive: lambda input, return_inverse=False, return_counts=False, dim=None: -1, + torch.unravel_index: lambda indices, shape: -1, + torch.unsafe_chunk: lambda input, chunks, dim=0: -1, + torch.unsafe_split: lambda tensor, split_size_or_sections, dim=0: -1, + torch.unsafe_split_with_sizes: lambda tensor, split_size_or_sections, dim=0: -1, + torch.unsqueeze: lambda input, dim, out=None: -1, + torch.linalg.vander: lambda x, N=None: -1, + torch.var: lambda input, dim=None: -1, + torch.var_mean: lambda input, dim=None: -1, + torch.vsplit: lambda input, indices_or_sections: -1, + torch.vstack: lambda tensors, out=None: -1, + torch.where: lambda condition, x=None, y=None: -1, + torch.zeros_like: lambda input, dtype=None, layout=None, device=None, requires_grad=False: -1, + torch._fw_primal_copy: lambda self, level: -1, + torch._make_dual_copy: lambda primal, tangent, level: -1, + torch.view_as_real_copy: lambda self: -1, + torch.view_as_complex_copy: lambda self: -1, + torch._conj_copy: lambda self: -1, + torch._neg_view_copy: lambda self: -1, + torch.as_strided_copy: lambda self, size, stride, storage_offset=None: -1, + torch._sparse_broadcast_to_copy: lambda self, size: -1, + torch.diagonal_copy: lambda self, offset=0, dim1=0, dim2=1: -1, + torch.expand_copy: lambda self, size, *, implicit=False: -1, + torch.narrow_copy: lambda self, dim, start, length: -1, + torch.permute_copy: lambda self, dims: -1, + torch._reshape_alias_copy: lambda self, size, stride: -1, + torch.select_copy: lambda self, dim, index: -1, + torch.detach_copy: lambda self: -1, + torch.slice_copy: lambda self, dim=0, start=None, end=None, step=1: -1, + torch.split_copy: lambda self, split_size, dim=0: -1, + torch.split_with_sizes_copy: lambda self, split_sizes, dim=0: -1, + torch.squeeze_copy: lambda self, dim: -1, + torch.t_copy: lambda self: -1, + torch.transpose_copy: lambda self, dim0, dim1: -1, + torch.unsqueeze_copy: lambda self, dim: -1, + torch._indices_copy: lambda self: -1, + torch._values_copy: lambda self: -1, + torch.indices_copy: lambda self: -1, + torch.values_copy: lambda self: -1, + torch.crow_indices_copy: lambda self: -1, + torch.col_indices_copy: lambda self: -1, + torch.ccol_indices_copy: lambda self: -1, + torch.row_indices_copy: lambda self: -1, + torch.unbind_copy: lambda self, dim=0: -1, + torch.view_copy: lambda self, dtype: -1, + torch.unfold_copy: lambda self, dimension, size, step: -1, + torch.alias_copy: lambda self: -1, + Tensor.__floordiv__: lambda self, other: -1, + Tensor.__rfloordiv__: lambda self, other: -1, + Tensor.__ifloordiv__: lambda self, other: -1, + Tensor.__truediv__: lambda self, other: -1, + Tensor.__rtruediv__: lambda self, other: -1, + Tensor.__itruediv__: lambda self, other: -1, + Tensor.__lshift__: lambda self, other: -1, + Tensor.__rlshift__: lambda self, other: -1, + Tensor.__ilshift__: lambda self, other: -1, + Tensor.__rshift__: lambda self, other: -1, + Tensor.__rrshift__: lambda self, other: -1, + Tensor.__irshift__: lambda self, other: -1, + Tensor.__and__: lambda self, other: -1, + Tensor.__or__: lambda self, other: -1, + Tensor.__xor__: lambda self, other: -1, + Tensor.__float__: lambda self: -1, + Tensor.__complex__: lambda self: -1, + Tensor.__array__: lambda self, dtype: -1, + Tensor.__bool__: lambda self: -1, + Tensor.__contains__: lambda self, other: -1, + Tensor.__neg__: lambda self: -1, + Tensor.__invert__: lambda self: -1, + Tensor.__mod__: lambda self, other: -1, + Tensor.__rmod__: lambda self, other: -1, + Tensor.__imod__: lambda self, other: -1, + Tensor.__array_wrap__: lambda self, array: -1, + Tensor.__getitem__: lambda self, idx: -1, + Tensor.__deepcopy__: lambda self, memo: -1, + Tensor.__int__: lambda self: -1, + Tensor.__long__: lambda self: -1, + Tensor.__index__: lambda self: -1, + Tensor.__len__: lambda self: -1, + Tensor.__format__: lambda self, format_spec: -1, + Tensor.__reduce_ex__: lambda self, proto: -1, + Tensor.__reversed__: lambda self: -1, + Tensor.__repr__: lambda self, *, tensor_contents=None: -1, + Tensor.__setitem__: lambda self, k, v: -1, + Tensor.__setstate__: lambda self, d: -1, + Tensor.T.__get__: lambda self: -1, + Tensor.H.__get__: lambda self: -1, + Tensor.mT.__get__: lambda self: -1, + Tensor.mH.__get__: lambda self: -1, + Tensor._backward_hooks.__get__: lambda self: -1, + Tensor._post_accumulate_grad_hooks.__get__: lambda self: -1, + Tensor._base.__get__: lambda self: -1, + Tensor._cdata.__get__: lambda self: -1, + Tensor.grad.__get__: lambda self: -1, + Tensor._grad.__get__: lambda self: -1, + Tensor._grad_fn.__get__: lambda self: -1, + Tensor.grad_fn.__get__: lambda self: -1, + Tensor._version.__get__: lambda self: -1, + Tensor._autocast_to_reduced_precision: lambda self, cuda_enabled, cpu_enabled, cuda_dtype, cpu_dtype: -1, + Tensor._autocast_to_full_precision: lambda self, cuda_enabled, cpu_enabled: -1, + Tensor.data.__get__: lambda self: -1, + Tensor.device.__get__: lambda self: -1, + Tensor.dtype.__get__: lambda self: -1, + Tensor.is_cuda.__get__: lambda self: -1, + Tensor.is_cpu.__get__: lambda self: -1, + Tensor.is_xla.__get__: lambda self: -1, + Tensor.is_xpu.__get__: lambda self: -1, + Tensor.is_ipu.__get__: lambda self: -1, + Tensor.is_leaf.__get__: lambda self: -1, + Tensor.retains_grad.__get__: lambda self: -1, + Tensor.is_meta.__get__: lambda self: -1, + Tensor.is_mps.__get__: lambda self: -1, + Tensor.is_mtia.__get__: lambda self: -1, + Tensor.is_nested.__get__: lambda self: -1, + Tensor.is_ort.__get__: lambda self: -1, + Tensor.is_mkldnn.__get__: lambda self: -1, + Tensor.is_quantized.__get__: lambda self: -1, + Tensor.is_sparse.__get__: lambda self: -1, + Tensor.is_sparse_csr.__get__: lambda self: -1, + Tensor.is_vulkan.__get__: lambda self: -1, + Tensor.itemsize.__get__: lambda self: -1, + Tensor.layout.__get__: lambda self: -1, + Tensor.name.__get__: lambda self: -1, + Tensor.names.__get__: lambda self: -1, + Tensor.nbytes.__get__: lambda self: -1, + Tensor.ndim.__get__: lambda self: -1, + Tensor.output_nr.__get__: lambda self: -1, + Tensor.requires_grad.__get__: lambda self: -1, + Tensor.shape.__get__: lambda self: -1, + Tensor.volatile.__get__: lambda self: -1, + Tensor.real.__get__: lambda self: -1, + Tensor.imag.__get__: lambda self: -1, + Tensor.__cuda_array_interface__.__get__: lambda self: -1, + Tensor.type: lambda self, dtype=None, non_blocking=False, **kwargs: -1, + Tensor._dimI: lambda self: -1, + Tensor._dimV: lambda self: -1, + Tensor._indices: lambda self: -1, + Tensor._is_view: lambda self: -1, + Tensor._nnz: lambda self: -1, + Tensor.crow_indices: lambda self: -1, + Tensor.col_indices: lambda self: -1, + Tensor.ccol_indices: lambda self: -1, + Tensor.row_indices: lambda self: -1, + Tensor._update_names: lambda self, names, inplace: -1, + Tensor._values: lambda self: -1, + Tensor.adjoint: lambda self: -1, + Tensor.align_as: lambda self, other: -1, + Tensor.align_to: lambda self, order, ellipsis_idx: -1, + Tensor.apply_: lambda self, callable: -1, + Tensor.as_strided: lambda self, size, stride: -1, + Tensor.as_strided_: lambda self, size, stride: -1, + Tensor.backward: lambda self, gradient=None, retain_graph=None, create_graph=False, inputs=None: -1, + Tensor.bfloat16: lambda self, memory_format=torch.preserve_format: -1, + Tensor.bool: lambda self, memory_format=torch.preserve_format: -1, + Tensor.byte: lambda self, memory_format=torch.preserve_format: -1, + Tensor.char: lambda self, memory_format=torch.preserve_format: -1, + Tensor.cauchy_: lambda self, median=0, sigma=1, *, generator=None: -1, + Tensor.coalesce: lambda self: -1, + Tensor._coalesced_: lambda self, coalesced: -1, + Tensor.contiguous: lambda self, memory_format=torch.contiguous_format: -1, + Tensor.copy_: lambda self, src, non_blocking=False: -1, + Tensor.cpu: lambda self, memory_format=torch.preserve_format: -1, + Tensor.cuda: lambda self, memory_format=torch.preserve_format: -1, + Tensor.xpu: lambda self, memory_format=torch.preserve_format: -1, + Tensor.ipu: lambda self, memory_format=torch.preserve_format: -1, + Tensor.data_ptr: lambda self: -1, + Tensor.dense_dim: lambda self: -1, + Tensor.diagonal_scatter: lambda self, src, offset=0, dim1=0, dim2=1: -1, + Tensor.dim: lambda self: -1, + Tensor.dim_order: lambda self: -1, + Tensor.double: lambda self, memory_format=torch.preserve_format: -1, + Tensor.cdouble: lambda self, memory_format=torch.preserve_format: -1, + Tensor.element_size: lambda self: -1, + Tensor.expand: lambda self, size: -1, + Tensor.expand_as: lambda self, other: -1, + Tensor.exponential_: lambda self, lambd=1, *, generator=None: -1, + Tensor.fill_: lambda self, value: -1, + Tensor.fill_diagonal_: lambda self, value: -1, + Tensor.float: lambda self, memory_format=torch.preserve_format: -1, + Tensor.cfloat: lambda self, memory_format=torch.preserve_format: -1, + Tensor.geometric_: lambda self, p, *, generator=None: -1, + Tensor.get_device: lambda self: -1, + Tensor.half: lambda self, memory_format=torch.preserve_format: -1, + Tensor.chalf: lambda self, memory_format=torch.preserve_format: -1, + Tensor.has_names: lambda self: -1, + Tensor.indices: lambda self: -1, + Tensor.int: lambda self, memory_format=torch.preserve_format: -1, + Tensor.is_coalesced: lambda self: -1, + Tensor.is_contiguous: lambda self: -1, + Tensor.is_inference: lambda self: -1, + Tensor.is_pinned: lambda self: -1, + Tensor.is_set_to: lambda self, tensor: -1, + Tensor.is_shared: lambda self: -1, + Tensor.item: lambda self: -1, + Tensor.log_normal_: lambda self, mean=1, std=2, *, generator=None: -1, + Tensor.log_softmax: lambda self, dim: -1, + Tensor.long: lambda self, memory_format=torch.preserve_format: -1, + Tensor.map_: lambda self, tensor, callable: -1, + Tensor.map2_: lambda self, x, y, callable: -1, + Tensor.mm: lambda self, mat2: -1, + Tensor.narrow_copy: lambda self, dimension, start, length: -1, + Tensor.ndimension: lambda self: -1, + Tensor.nelement: lambda self: -1, + Tensor._nested_tensor_size: lambda self: -1, + Tensor._nested_tensor_storage_offsets: lambda self: -1, + Tensor._nested_tensor_strides: lambda self: -1, + Tensor.normal_: lambda self: -1, + Tensor.numpy: lambda self: -1, + Tensor.permute: lambda self, dim: -1, + Tensor.pin_memory: lambda self: -1, + Tensor.put_: lambda self, indices, tensor, accumulate=False: -1, + Tensor.qscheme: lambda self: -1, + Tensor.random_: lambda self, from_=0, to=None, *, generator=None: -1, + Tensor.record_stream: lambda self, stream: -1, + Tensor.refine_names: lambda self, names: -1, + Tensor.register_hook: lambda self, hook: -1, + Tensor.register_post_accumulate_grad_hook: lambda self, hook: -1, + Tensor.rename: lambda self, name: -1, + Tensor.repeat: lambda self, *size: -1, + Tensor.requires_grad_: lambda self, requires_grad=True: -1, + Tensor.reshape_as: lambda self, other: -1, + Tensor.resize: lambda self, *size: -1, + Tensor.resize_: lambda self, size: -1, + Tensor.resize_as: lambda self, other: -1, + Tensor.resize_as_sparse_: lambda self, other: -1, + Tensor.retain_grad: lambda self: -1, + Tensor.set_: lambda self, source=None, storage_offset=0, size=None, stride=None: -1, + Tensor.select_scatter: lambda self, src, dim, index: -1, + Tensor.share_memory_: lambda self: -1, + Tensor.short: lambda self, memory_format=torch.preserve_format: -1, + Tensor.size: lambda self: -1, + Tensor.slice_scatter: lambda self, src, dim=0, start=None, end=None, step=1: -1, + Tensor.sparse_dim: lambda self: -1, + Tensor.sparse_mask: lambda self, mask: -1, + Tensor._sparse_mask_projection: lambda self, mask, accumulate_matches=False: -1, + Tensor.sparse_resize_: lambda self, size1, size2, dense_dim: -1, + Tensor.sparse_resize_and_clear_: lambda self, size1, size2, dense_dim: -1, + Tensor.sspaddmm: lambda self, mat1, mat2, beta=1, alpha=1, out=None: -1, + Tensor.storage: lambda self: -1, + Tensor.untyped_storage: lambda self: -1, + Tensor.storage_offset: lambda self: -1, + Tensor.storage_type: lambda self: -1, + Tensor.sum_to_size: lambda self, size: -1, + Tensor.tile: lambda self, *reps: -1, + Tensor.to: lambda self, dtype, non_blocking=False, copy=False, memory_format=torch.preserve_format: -1, + Tensor.to_dense: lambda self, dtype=None, *, masked_grad=None: -1, + Tensor._to_dense: lambda self, dtype=None, masked_grad=None: -1, + Tensor.to_sparse: lambda self: -1, + Tensor.tolist: lambda self: -1, + Tensor.to_mkldnn: lambda self: -1, + Tensor.type_as: lambda self, other: -1, + Tensor.unfold: lambda self, dimension, size, step: -1, + Tensor.uniform_: lambda self, from_=0, to=1: -1, + Tensor.values: lambda self: -1, + Tensor.view: lambda self, shape: -1, + Tensor.view_as: lambda self, other: -1, + Tensor.zero_: lambda self: -1, + Tensor.__dlpack__: lambda self, stream=None: -1, + Tensor.__dlpack_device__: lambda self: -1, + torch.linalg.lstsq: lambda self, b, cond=None, driver=None: -1, + } + + ret2 = {} + ignored = get_ignored_functions() + + for k, v in ret.items(): + # Generate methods like __add__ and add_ by default from add + names = [ + k.__name__, # Default method + k.__name__ + "_", # Inplace variant + "__" + k.__name__ + "__", # Dunder method + "__i" + k.__name__ + "__", # Inplace dunder method + "__r" + k.__name__ + "__", # Reverse dunder method + ] + + if k.__name__.startswith("bitwise_"): + # bitwise_ have dunder methods of the form ____ + # And so on. + subname = k.__name__[len("bitwise_"):] + names.extend([ + "__" + subname + "__", + "__i" + subname + "__", + "__r" + subname + "__" + ]) + + for name in names: + func = getattr(Tensor, name, None) + if callable(func) and func not in ret and func not in ignored: + ret2[func] = v + + ret.update(ret2) + return ret + +def wrap_torch_function(dispatcher: Callable): + """Wraps a given function with ``__torch_function__`` -related functionality. + + Parameters + ---------- + dispatcher: Callable + A callable that returns an iterable of Tensor-likes passed into the function. + + Note + ---- + This decorator may reduce the performance of your code. Generally, it's enough to express + your code as a series of functions that, themselves, support __torch_function__. If you + find yourself in the rare situation where this is not the case, e.g. if you're wrapping a + low-level library and you also need it to work for Tensor-likes, then this function is available. + + Examples + -------- + >>> def dispatcher(a): # Must have the same signature as func + ... return (a,) + >>> @torch.overrides.wrap_torch_function(dispatcher) + >>> def func(a): # This will make func dispatchable by __torch_function__ + ... return a + 0 + """ + def inner(func): + @functools.wraps(func) + def wrapped(*args, **kwargs): + relevant_args = dispatcher(*args, **kwargs) + if has_torch_function(relevant_args): + return handle_torch_function(wrapped, relevant_args, *args, **kwargs) + + return func(*args, **kwargs) + + return wrapped + + return inner + +def _get_overloaded_args(relevant_args: Iterable[Any], get_type_fn: Callable[[Any], Type] = None) -> List[Any]: + """Returns a list of arguments on which to call __torch_function__. + + Checks arguments in relevant_args for __torch_function__ implementations, + storing references to the arguments and their types in overloaded_args and + overloaded_types in order of calling precedence. Only distinct types are + considered. If a type is a subclass of another type it will have higher + precedence, otherwise the precedence order is the same as the order of + arguments in relevant_args, that is, from left-to-right in the argument list. + + The precedence-determining algorithm implemented in this function is + described in `NEP-0018`_. + + See torch::append_overloaded_arg for the equivalent function in the C++ + implementation. + + Parameters + ---------- + relevant_args : iterable of array-like + Iterable of array-like arguments to check for __torch_function__ + methods. + + get_type_fn : callable, optional + Function to call on each argument in relevant_args to get its type. + + Returns + ------- + overloaded_args : list + Arguments from relevant_args on which to call __torch_function__ + methods, in the order in which they should be called. + + .. _NEP-0018: + https://numpy.org/neps/nep-0018-array-function-protocol.html + """ + if get_type_fn is None: + get_type_fn = type + + # If torch function is not enabled, there are no overloaded types + if not torch._C._is_torch_function_enabled(): + return [] + # Runtime is O(num_arguments * num_unique_types) + overloaded_types: Set[Type] = set() + overloaded_args: List[Any] = [] + for arg in relevant_args: + arg_type = get_type_fn(arg) + # We only collect arguments if they have a unique type, which ensures + # reasonable performance even with a long list of possibly overloaded + # arguments. + # + # NB: Important to exclude _disabled_torch_function_impl, otherwise + # https://github.com/pytorch/pytorch/issues/64687 + if (arg_type not in overloaded_types and hasattr(arg_type, '__torch_function__') and + arg_type.__torch_function__ != torch._C._disabled_torch_function_impl): + # Create lists explicitly for the first type (usually the only one + # done) to avoid setting up the iterator for overloaded_args. + if overloaded_types: + overloaded_types.add(arg_type) + # By default, insert argument at the end, but if it is + # subclass of another argument, insert it before that argument. + # This ensures "subclasses before superclasses". + index = len(overloaded_args) + for i, old_arg in enumerate(overloaded_args): + if issubclass(arg_type, get_type_fn(old_arg)): + index = i + break + overloaded_args.insert(index, arg) + else: + overloaded_types = {arg_type} + overloaded_args = [arg] + return overloaded_args + + +def handle_torch_function( + public_api: Callable, relevant_args: Iterable[Any], *args, **kwargs) -> Any: + """Implement a function with checks for ``__torch_function__`` overrides. + + See torch::autograd::handle_torch_function for the equivalent of this + function in the C++ implementation. + + Arguments + --------- + public_api : function + Function exposed by the public torch API originally called like + ``public_api(*args, **kwargs)`` on which arguments are now being + checked. + relevant_args : iterable + Iterable of arguments to check for __torch_function__ methods. + args : tuple + Arbitrary positional arguments originally passed into ``public_api``. + kwargs : tuple + Arbitrary keyword arguments originally passed into ``public_api``. + + Returns + ------- + object + Result from calling ``implementation`` or an ``__torch_function__`` + method, as appropriate. + + Raises + ------ + TypeError : if no implementation is found. + + Example + ------- + >>> def func(a): + ... if has_torch_function_unary(a): + ... return handle_torch_function(func, (a,), a) + ... return a + 0 + """ + # Check for __torch_function__ methods. + overloaded_args = _get_overloaded_args(relevant_args) + # overloaded_args already have unique types. + types = tuple(map(type, overloaded_args)) + + # Check for __torch_function__ mode. + if _is_torch_function_mode_enabled(): + # if we're here, the mode must be set to a TorchFunctionStackMode + # this unsets it and calls directly into TorchFunctionStackMode's torch function + with _pop_mode_temporarily() as mode: + result = mode.__torch_function__(public_api, types, args, kwargs) + if result is not NotImplemented: + return result + + # Call overrides + for overloaded_arg in overloaded_args: + # This call needs to become a classmethod call in the future. + # See https://github.com/pytorch/pytorch/issues/63767 + torch_func_method = overloaded_arg.__torch_function__ + if hasattr(torch_func_method, "__self__") and torch_func_method.__self__ is overloaded_arg and \ + torch_func_method is not torch._C._disabled_torch_function_impl: + warnings.warn("Defining your `__torch_function__ as a plain method is deprecated and " + "will be an error in future, please define it as a classmethod.", + DeprecationWarning) + + # Use `public_api` instead of `implementation` so __torch_function__ + # implementations can do equality/identity comparisons. + result = torch_func_method(public_api, types, args, kwargs) + + if result is not NotImplemented: + return result + + func_name = f'{public_api.__module__}.{public_api.__name__}' + msg = ( + f"no implementation found for '{func_name}' on types that implement " + f'__torch_function__: {[type(arg) for arg in overloaded_args]}' + ) + if _is_torch_function_mode_enabled(): + msg += f" nor in mode {_get_current_function_mode()}" + raise TypeError(msg) + +has_torch_function = _add_docstr( + _has_torch_function, + r"""Check for __torch_function__ implementations in the elements of an iterable + or if a __torch_function__ mode is enabled. Considers exact ``Tensor`` s + and ``Parameter`` s non-dispatchable. Use this to guard a call to + :func:`handle_torch_function`; don't use it to test if something + is Tensor-like, use :func:`is_tensor_like` instead. + Arguments + --------- + relevant_args : iterable + Iterable or arguments to check for __torch_function__ methods. + Returns + ------- + bool + True if any of the elements of relevant_args have __torch_function__ + implementations, False otherwise. + See Also + ________ + torch.is_tensor_like + Checks if something is a Tensor-like, including an exact ``Tensor``. + """ +) + +has_torch_function_unary = _add_docstr( + _has_torch_function_unary, + r"""Special case of `has_torch_function` for single inputs. + Instead of: + `has_torch_function((t,))` + call: + `has_torch_function_unary(t)` + which skips unnecessary packing and unpacking work. + """ +) + +has_torch_function_variadic = _add_docstr( + _has_torch_function_variadic, + r"""Special case of `has_torch_function` that skips tuple creation. + + This uses the METH_FASTCALL protocol introduced in Python 3.7 + + Instead of: + `has_torch_function((a, b))` + call: + `has_torch_function_variadic(a, b)` + which skips unnecessary packing and unpacking work. + """ +) + +@functools.lru_cache(None) +def _get_overridable_functions() -> Tuple[Dict[Any, List[Callable]], Dict[Callable, str]]: + overridable_funcs = collections.defaultdict(list) + index = {} + tested_namespaces = [ + ("torch", torch, torch.__all__), + ("torch.functional", torch.functional, torch.functional.__all__), + ("torch.nn.functional", torch.nn.functional, dir(torch.nn.functional)), + ("torch.nn.init", torch.nn.init, dir(torch.nn.init)), + ("torch.Tensor", torch.Tensor, dir(torch.Tensor)), + ("torch.linalg", torch.linalg, dir(torch.linalg)), + ("torch.fft", torch.fft, dir(torch.fft)), + ("torch.special", torch.special, dir(torch.special)), + ] + for namespace_str, namespace, ns_funcs in tested_namespaces: + for func_name in ns_funcs: + ignore = False + # ignore private functions or functions that are deleted in torch.__init__ + if namespace is not torch.Tensor: + if func_name.startswith('__'): + continue + elif func_name.startswith('_'): + ignore = True + elif func_name.endswith('_'): + ignore = True + elif not func_name[0].islower(): + ignore = True + elif func_name == 'unique_dim': + continue + else: + func = getattr(namespace, func_name) + if getattr(object, func_name, None) == func: + continue + if func_name == '__weakref__': + continue + func = getattr(namespace, func_name) + if namespace is torch.Tensor and getattr(object, func_name, None) == func: + continue + # ignore re-exported modules + if isinstance(func, types.ModuleType): + continue + # ignore __future__ imports + if isinstance(func, __future__._Feature): + continue + + if not callable(func) and hasattr(func, "__get__"): + index[func.__get__] = f"{namespace_str}.{func_name}.__get__" + index[func.__set__] = f"{namespace_str}.{func_name}.__set__" + if ignore: + continue + if func.__get__ in get_ignored_functions(): + msg = ("{}.{} is in the tuple returned by torch._overrides.get_ignored_functions " + "but still has an explicit override") + assert func.__get__ not in get_testing_overrides(), msg.format(namespace, func.__name__) + continue + else: + overridable_funcs[func].append(func.__get__) + continue + + if not callable(func): + continue + + index[func] = f"{namespace_str}.{func_name}" + + if ignore: + continue + + # cannot be overriden by __torch_function__ + if func in get_ignored_functions(): + msg = ("{}.{} is in the tuple returned by torch._overrides.get_ignored_functions " + "but still has an explicit override") + assert func not in get_testing_overrides(), msg.format(namespace, func.__name__) + continue + overridable_funcs[namespace].append(func) + return overridable_funcs, index + +@_disable_user_warnings +def get_overridable_functions() -> Dict[Any, List[Callable]]: + """List functions that are overridable via __torch_function__ + + Returns + ------- + Dict[Any, List[Callable]] + A dictionary that maps namespaces that contain overridable functions + to functions in that namespace that can be overridden. + """ + return _get_overridable_functions()[0] + +@_disable_user_warnings +def resolve_name(f): + """Get a human readable string name for a function passed to + __torch_function__ + + Arguments + --------- + f : Callable + Function to resolve the name of. + + Returns + ------- + str + Name of the function; if eval'ed it should give back the input + function. + """ + if isinstance(f, (torch._ops.OpOverload, torch._ops.OpOverloadPacket)): + return str(f) + return _get_overridable_functions()[1].get(f) + +@functools.lru_cache(None) +def _get_tensor_methods() -> Set[Callable]: + """ Returns a set of the overridable methods on ``torch.Tensor`` """ + overridable_funcs = get_overridable_functions() + methods = set(overridable_funcs[torch.Tensor]) + return methods + +@_disable_user_warnings +def is_tensor_method_or_property(func: Callable) -> bool: + """ + Returns True if the function passed in is a handler for a + method or property belonging to ``torch.Tensor``, as passed + into ``__torch_function__``. + + .. note:: + For properties, their ``__get__`` method must be passed in. + + This may be needed, in particular, for the following reasons: + + 1. Methods/properties sometimes don't contain a `__module__` slot. + 2. They require that the first passed-in argument is an instance + of ``torch.Tensor``. + + Examples + -------- + >>> is_tensor_method_or_property(torch.Tensor.add) + True + >>> is_tensor_method_or_property(torch.add) + False + """ + return func in _get_tensor_methods() or func.__name__ == "__get__" + +def is_tensor_like(inp): + """ + Returns ``True`` if the passed-in input is a Tensor-like. + + Currently, this occurs whenever there's a ``__torch_function__`` + attribute on the type of the input. + + Examples + -------- + A subclass of tensor is generally a Tensor-like. + + >>> class SubTensor(torch.Tensor): ... + >>> is_tensor_like(SubTensor([0])) + True + + Built-in or user types aren't usually Tensor-like. + + >>> is_tensor_like(6) + False + >>> is_tensor_like(None) + False + >>> class NotATensor: ... + >>> is_tensor_like(NotATensor()) + False + + But, they can be made Tensor-like by implementing __torch_function__. + + >>> class TensorLike: + ... @classmethod + ... def __torch_function__(cls, func, types, args, kwargs): + ... return -1 + >>> is_tensor_like(TensorLike()) + True + """ + return type(inp) is torch.Tensor or hasattr(inp, "__torch_function__") + +class TorchFunctionMode: + """ + A ``TorchFunctionMode`` allows you to override the meaning of all + ``__torch_function__`` overrideable functions within a dynamic scope, + without having to actually create a tensor subclass or manually + monkey-patch functions in the PyTorch API. Some common situations + where you should use a mode: + + * You want to override the meaning of factory functions, or other + functions that do not otherwise take a tensor as an argument + (these cannot be overridden with tensor subclasses). + + * You want to override the behavior of all functions without needing + to wrap your inputs in tensor subclasses; e.g., if you are just + interested in logging intermediate computations. + + * You want to control the order of execution of various tensor + subclasses explicitly, rather than implicitly via the return of + ``NotImplemented``. + + Independent subclasses of :class:`TorchFunctionMode` are compositional: + modes can be pushed onto a stack using ``with MyMode():``. + When you call functions in the PyTorch API inside your + ``__torch_function__`` implementation, by default, they will forward on to + the next mode on the mode stack. If you want recursively call back into + your current ``__torch_function__`` implementation, either explicitly + invoke ``self.__torch_function__(...)``, or use the context manager + ``enable_torch_function_mode(self, replace=self.inner)`` to make PyTorch + API self-referential (beware of infinite loops, in this case!) + """ + inner: "TorchFunctionMode" + + # Force metaclass to generate constructor at the base of the hierarchy + def __init__(self): + pass + + def __torch_function__(self, func, types, args=(), kwargs=None): + raise NotImplementedError() + + def __enter__(self): + _push_mode(self) + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + _pop_mode() + + @classmethod + def push(cls, *args, **kwargs): + warnings.warn("`Mode.push()` is no longer necessary and can be replaced with just `with Mode()`") + instance = cls(*args, **kwargs) + return instance + + +def _get_current_function_mode(): + stack_len = _len_torch_function_stack() + return _get_function_stack_at(stack_len - 1) if stack_len > 0 else None + + +def _get_current_function_mode_stack(): + stack_len = _len_torch_function_stack() + return [_get_function_stack_at(i) for i in range(stack_len)] + +def _push_mode(mode): + _push_on_torch_function_stack(mode) + + +def _pop_mode(): + old = _pop_torch_function_stack() + return old + + +@contextlib.contextmanager +def _pop_mode_temporarily(): + old = _pop_mode() + try: + yield old + finally: + _push_mode(old) + +class BaseTorchFunctionMode(TorchFunctionMode): + def __torch_function__(self, func, types, args=(), kwargs=None): + if kwargs is None: + kwargs = {} + return func(*args, **kwargs) + + +@contextlib.contextmanager +def enable_reentrant_dispatch(): + # NB: this can't simply be + # `enable_reentrant_dispatch = torch._C._RestorePythonTLSSnapshot` + # because: + # 1. torch._C._RestorePythonTLSSnapshot is unavailable when this file + # initially gets imported. Probably an import order thing. + # 2. enable_reentrant_dispatch is technically public API; assigning + # it the object would change the __module__ to look private. + with torch._C._RestorePythonTLSSnapshot(): + try: + yield + finally: + pass diff --git a/evalkit_internvl/lib/python3.10/site-packages/torch/random.py b/evalkit_internvl/lib/python3.10/site-packages/torch/random.py new file mode 100644 index 0000000000000000000000000000000000000000..668443a2b2dd0b35db2f01882d1c7f991c70f22e --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/torch/random.py @@ -0,0 +1,175 @@ +import contextlib +from typing import Generator +import warnings + +from torch._C import default_generator +import torch + + +def set_rng_state(new_state: torch.Tensor) -> None: + r"""Sets the random number generator state. + + .. note: This function only works for CPU. For CUDA, please use + torch.manual_seed(seed), which works for both CPU and CUDA. + + Args: + new_state (torch.ByteTensor): The desired state + """ + default_generator.set_state(new_state) + + +def get_rng_state() -> torch.Tensor: + r"""Returns the random number generator state as a `torch.ByteTensor`.""" + return default_generator.get_state() + + +def manual_seed(seed) -> torch._C.Generator: + r"""Sets the seed for generating random numbers. Returns a + `torch.Generator` object. + + Args: + seed (int): The desired seed. Value must be within the inclusive range + `[-0x8000_0000_0000_0000, 0xffff_ffff_ffff_ffff]`. Otherwise, a RuntimeError + is raised. Negative inputs are remapped to positive values with the formula + `0xffff_ffff_ffff_ffff + seed`. + """ + seed = int(seed) + import torch.cuda + + if not torch.cuda._is_in_bad_fork(): + torch.cuda.manual_seed_all(seed) + + import torch.mps + if not torch.mps._is_in_bad_fork(): + torch.mps.manual_seed(seed) + + if hasattr(torch, 'xpu') and not torch.xpu._is_in_bad_fork(): + torch.xpu.manual_seed_all(seed) + + _seed_custom_device(seed) + + return default_generator.manual_seed(seed) + + +def seed() -> int: + r"""Sets the seed for generating random numbers to a non-deterministic + random number. Returns a 64 bit number used to seed the RNG. + """ + seed = default_generator.seed() + import torch.cuda + + if not torch.cuda._is_in_bad_fork(): + torch.cuda.manual_seed_all(seed) + + import torch.mps + if not torch.mps._is_in_bad_fork(): + torch.mps.manual_seed(seed) + + if hasattr(torch, 'xpu') and not torch.xpu._is_in_bad_fork(): + torch.xpu.manual_seed_all(seed) + + _seed_custom_device(seed) + + return seed + + +def _seed_custom_device(seed) -> None: + r"""Sets the seed to generate random numbers for custom device. + + Args: + seed (int): The desired seed. + + See [Note: support the custom device with privateuse1] + """ + seed = int(seed) + custom_backend_name = torch._C._get_privateuse1_backend_name() + if hasattr(torch, custom_backend_name): + custom_device_mod = getattr(torch, custom_backend_name) + _bad_fork_name = "_is_in_bad_fork" + _seed_all_name = "manual_seed_all" + if hasattr(custom_device_mod, _bad_fork_name) and hasattr(custom_device_mod, _seed_all_name): + if not getattr(custom_device_mod, _bad_fork_name)(): + getattr(custom_device_mod, _seed_all_name)(seed) + else: + message = f"Set seed for `{custom_backend_name}` device does not take effect, please add API's " + message += f"`{_bad_fork_name}` and `{_seed_all_name}` to `{custom_backend_name}` device module." + warnings.warn(message, UserWarning, stacklevel=3) + + +def initial_seed() -> int: + r"""Returns the initial seed for generating random numbers as a + Python `long`. + """ + return default_generator.initial_seed() + + +_fork_rng_warned_already = False + + +@contextlib.contextmanager +def fork_rng(devices=None, enabled=True, _caller="fork_rng", _devices_kw="devices", device_type="cuda") -> Generator: + """ + Forks the RNG, so that when you return, the RNG is reset + to the state that it was previously in. + + Args: + devices (iterable of Device IDs): devices for which to fork + the RNG. CPU RNG state is always forked. By default, :meth:`fork_rng` operates + on all devices, but will emit a warning if your machine has a lot + of devices, since this function will run very slowly in that case. + If you explicitly specify devices, this warning will be suppressed + enabled (bool): if ``False``, the RNG is not forked. This is a convenience + argument for easily disabling the context manager without having + to delete it and unindent your Python code under it. + deivce_type (str): device type str, default is `cuda`. As for custom device, + see details in [Note: support the custom device with privateuse1] + """ + + device_type = torch.device(device_type).type + device_mod = getattr(torch, device_type, None) + if device_mod is None: + raise RuntimeError(f"torch has no module of `{device_type}`, you should register " + + "a module by `torch._register_device_module`.") + global _fork_rng_warned_already + + # Internal arguments: + # _caller: the function which called fork_rng, which the user used + # _devices_kw: the devices keyword of _caller + + if not enabled: + yield + return + + if devices is None: + num_devices = device_mod.device_count() + if num_devices > 1 and not _fork_rng_warned_already: + message = (f"{device_type.upper()} reports that you have {num_devices} available devices, and " + f"you have used {_caller} without explicitly specifying which devices are being used. " + f"For safety, we initialize *every* {device_type.upper()} device by default, which can " + f"be quite slow if you have a lot of {device_type.upper()}s. If you know that you are only" + f" making use of a few {device_type.upper()} devices, set the environment variable " + f"{device_type.upper()}_VISIBLE_DEVICES or the '{_devices_kw}' keyword argument of {_caller} " + "with the set of devices you are actually using. For example, if you are using CPU only, " + "set device.upper()_VISIBLE_DEVICES= or devices=[]; if you are using device 0 only, " + f"set {device_type.upper()}_VISIBLE_DEVICES=0 or devices=[0]. To initialize all devices " + f"and suppress this warning, set the '{_devices_kw}' keyword argument to " + f"`range(torch.{device_type}.device_count())`.") + warnings.warn(message) + _fork_rng_warned_already = True + devices = list(range(num_devices)) + else: + # Protect against user passing us a generator; we need to traverse this + # multiple times but a generator will be exhausted upon first traversal + devices = list(devices) + + cpu_rng_state = torch.get_rng_state() + device_rng_states = [] + for device in devices: + device_rng_states.append(device_mod.get_rng_state(device)) + + try: + yield + finally: + torch.set_rng_state(cpu_rng_state) + for device, device_rng_state in zip(devices, device_rng_states): + device_mod.set_rng_state(device_rng_state, device) diff --git a/evalkit_internvl/lib/python3.10/site-packages/torch/return_types.pyi b/evalkit_internvl/lib/python3.10/site-packages/torch/return_types.pyi new file mode 100644 index 0000000000000000000000000000000000000000..f617e000fff88ed114b46e36fa89aa5379d0b6ea --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/torch/return_types.pyi @@ -0,0 +1,172 @@ +# @generated from torch/_C/return_types.pyi + +from typing import ( + Any, + Callable, + ContextManager, + Iterator, + List, + Literal, + NamedTuple, + Optional, + overload, + Sequence, + Tuple, + TypeVar, + Union, +) + +from torch import contiguous_format, Generator, inf, memory_format, strided, Tensor, SymInt +from torch.types import ( + _bool, + _device, + _dtype, + _float, + _int, + _layout, + _qscheme, + _size, + Number, +) + +class _fake_quantize_per_tensor_affine_cachemask_tensor_qparams(NamedTuple): + output: Tensor + mask: Tensor + +class _fused_moving_avg_obs_fq_helper(NamedTuple): + output: Tensor + mask: Tensor + +class _linalg_det(NamedTuple): + result: Tensor + LU: Tensor + pivots: Tensor + +class _linalg_eigh(NamedTuple): + eigenvalues: Tensor + eigenvectors: Tensor + +class _linalg_slogdet(NamedTuple): + sign: Tensor + logabsdet: Tensor + LU: Tensor + pivots: Tensor + +class _linalg_solve_ex(NamedTuple): + result: Tensor + LU: Tensor + pivots: Tensor + info: Tensor + +class _linalg_svd(NamedTuple): + U: Tensor + S: Tensor + Vh: Tensor + +class _lu_with_info(NamedTuple): + LU: Tensor + pivots: Tensor + info: Tensor + +class _scaled_dot_product_efficient_attention(NamedTuple): + output: Tensor + log_sumexp: Tensor + philox_seed: Tensor + philox_offset: Tensor + +class _scaled_dot_product_flash_attention(NamedTuple): + output: Tensor + logsumexp: Tensor + cum_seq_q: Tensor + cum_seq_k: Tensor + max_q: Union[_int, SymInt] + max_k: Union[_int, SymInt] + philox_seed: Tensor + philox_offset: Tensor + debug_attn_mask: Tensor + +class _unpack_dual(NamedTuple): + primal: Tensor + tangent: Tensor + +class aminmax(NamedTuple): + min: Tensor + max: Tensor + +class cummax(NamedTuple): + values: Tensor + indices: Tensor + +class cummin(NamedTuple): + values: Tensor + indices: Tensor + +class frexp(NamedTuple): + mantissa: Tensor + exponent: Tensor + +class geqrf(NamedTuple): + a: Tensor + tau: Tensor + +class histogram(NamedTuple): + hist: Tensor + bin_edges: Tensor + +class histogramdd(NamedTuple): + hist: Tensor + bin_edges: List[Tensor] + +class kthvalue(NamedTuple): + values: Tensor + indices: Tensor + +class lu_unpack(NamedTuple): + P: Tensor + L: Tensor + U: Tensor + +class max(NamedTuple): + values: Tensor + indices: Tensor + +class median(NamedTuple): + values: Tensor + indices: Tensor + +class min(NamedTuple): + values: Tensor + indices: Tensor + +class mode(NamedTuple): + values: Tensor + indices: Tensor + +class nanmedian(NamedTuple): + values: Tensor + indices: Tensor + +class qr(NamedTuple): + Q: Tensor + R: Tensor + +class slogdet(NamedTuple): + sign: Tensor + logabsdet: Tensor + +class sort(NamedTuple): + values: Tensor + indices: Tensor + +class svd(NamedTuple): + U: Tensor + S: Tensor + V: Tensor + +class topk(NamedTuple): + values: Tensor + indices: Tensor + +class triangular_solve(NamedTuple): + solution: Tensor + cloned_coefficient: Tensor diff --git a/evalkit_internvl/lib/python3.10/site-packages/torch/serialization.py b/evalkit_internvl/lib/python3.10/site-packages/torch/serialization.py new file mode 100644 index 0000000000000000000000000000000000000000..9d02efd53f27b39088c2ba39c7e9162888f3f79a --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/torch/serialization.py @@ -0,0 +1,1448 @@ +import difflib +import os +import io +import shutil +import struct +import sys +import torch +import tarfile +import tempfile +import warnings +from contextlib import closing, contextmanager +from enum import Enum +from ._utils import _import_dotted_name +from torch._sources import get_source_lines_and_file +from torch.types import Storage +from torch.storage import _get_dtype_from_pickle_storage_type +from typing import Any, BinaryIO, Callable, cast, Dict, Optional, Type, Tuple, Union, IO +from typing_extensions import TypeAlias # Python 3.10+ +import copyreg +import pickle +import pathlib +import torch._weights_only_unpickler as _weights_only_unpickler + +DEFAULT_PROTOCOL = 2 + +LONG_SIZE = struct.Struct('=l').size +INT_SIZE = struct.Struct('=i').size +SHORT_SIZE = struct.Struct('=h').size + +MAGIC_NUMBER = 0x1950a86a20f9469cfc6c +PROTOCOL_VERSION = 1001 +STORAGE_KEY_SEPARATOR = ',' + +FILE_LIKE: TypeAlias = Union[str, os.PathLike, BinaryIO, IO[bytes]] +MAP_LOCATION: TypeAlias = Optional[Union[Callable[[torch.Tensor, str], torch.Tensor], torch.device, str, Dict[str, str]]] +STORAGE: TypeAlias = Union[Storage, torch.storage.TypedStorage, torch.UntypedStorage] + +__all__ = [ + 'SourceChangeWarning', + 'mkdtemp', + 'register_package', + 'check_module_version_greater_or_equal', + 'validate_cuda_device', + 'validate_hpu_device', + 'location_tag', + 'default_restore_location', + 'normalize_storage_type', + 'storage_to_tensor_type', + 'save', + 'load', + 'StorageType', + 'LoadEndianness', + 'get_default_load_endianness', + 'set_default_load_endianness', +] + + +class SourceChangeWarning(Warning): + pass + + +@contextmanager +def mkdtemp(): + path = tempfile.mkdtemp() + try: + yield path + finally: + shutil.rmtree(path) + + +_package_registry = [] + +class LoadEndianness(Enum): + NATIVE = 1 + LITTLE = 2 + BIG = 3 + +_default_load_endian: Optional[LoadEndianness] = None + +def get_default_load_endianness() -> Optional[LoadEndianness]: + ''' + Get fallback byte order for loading files + + If byteorder mark is not present in saved checkpoint, + this byte order is used as fallback. + By default, it's "native" byte order. + + Returns: + default_load_endian: Optional[LoadEndianness] + ''' + return _default_load_endian + +def set_default_load_endianness(endianness): + ''' + Set fallback byte order for loading files + + If byteorder mark is not present in saved checkpoint, + this byte order is used as fallback. + By default, it's "native" byte order. + + Args: + endianness: the new fallback byte order + ''' + global _default_load_endian + if not isinstance(endianness, LoadEndianness) and endianness is not None: + raise TypeError("Invalid argument type in function set_default_load_endianness") + _default_load_endian = endianness + +def _is_zipfile(f) -> bool: + # This is a stricter implementation than zipfile.is_zipfile(). + # zipfile.is_zipfile() is True if the magic number appears anywhere in the + # binary. Since we expect the files here to be generated by torch.save or + # torch.jit.save, it's safe to only check the start bytes and avoid + # collisions and assume the zip has only 1 file. + # See bugs.python.org/issue28494. + + start = f.tell() + # Read the first few bytes and match against the ZIP file signature + local_header_magic_number = b'PK\x03\x04' + read_bytes = f.read(len(local_header_magic_number)) + f.seek(start) + return read_bytes == local_header_magic_number + + +def register_package( + priority: int, + tagger: Callable[[STORAGE], Optional[str]], + deserializer: Callable[[STORAGE, str], Optional[STORAGE]] +): + ''' + Registers callables for tagging and deserializing storage objects with an associated priority. + Tagging associates a device with a storage object at save time while deserializing moves a + storage object to an appropriate device at load time. :attr:`tagger` and :attr:`deserializer` + are run in the order given by their :attr:`priority` until a tagger/deserializer returns a + value that is not `None`. + + To override the deserialization behavior for a device in the global registry, one can register a + tagger with a higher priority than the existing tagger. + + This function can also be used to register a tagger and deserializer for new devices. + + Args: + priority: Indicates the priority associated with the tagger and deserializer, where a lower + value indicates higher priority. + tagger: Callable that takes in a storage object and returns its tagged device as a string + or None. + deserializer: Callable that takes in storage object and a device string and returns a storage + object on the appropriate device or None. + + Returns: + `None` + + Example: + >>> def ipu_tag(obj): + >>> if obj.device.type == 'ipu': + >>> return 'ipu' + >>> def ipu_deserialize(obj, location): + >>> if location.startswith('ipu'): + >>> ipu = getattr(torch, "ipu", None) + >>> assert ipu is not None, "IPU device module is not loaded" + >>> assert torch.ipu.is_available(), "ipu is not available" + >>> return obj.ipu(location) + >>> torch.serialization.register_package(11, ipu_tag, ipu_deserialize) + ''' + queue_elem = (priority, tagger, deserializer) + _package_registry.append(queue_elem) + _package_registry.sort() + + +def check_module_version_greater_or_equal(module, req_version_tuple, error_if_malformed=True): + ''' + Check if a module's version satisfies requirements + + Usually, a module's version string will be like 'x.y.z', which would be represented + as a tuple (x, y, z), but sometimes it could be an unexpected format. If the version + string does not match the given tuple's format up to the length of the tuple, then + error and exit or emit a warning. + + Args: + module: the module to check the version of + req_version_tuple: tuple (usually of ints) representing the required version + error_if_malformed: whether we should exit if module version string is malformed + + Returns: + requirement_is_met: bool + ''' + try: + version_strs = module.__version__.split('.') + # Cast module version fields to match the types of the required version + module_version = tuple( + type(req_field)(version_strs[idx]) for idx, req_field in enumerate(req_version_tuple) + ) + requirement_is_met = module_version >= req_version_tuple + + except Exception as e: + message = ( + f"'{module.__name__}' module version string is malformed '{module.__version__}' and cannot be compared" + f" with tuple {str(req_version_tuple)}" + ) + if error_if_malformed: + raise RuntimeError(message) from e + else: + warnings.warn(message + ', but continuing assuming that requirement is met') + requirement_is_met = True + + return requirement_is_met + + +def _cpu_tag(obj): + if obj.device.type == 'cpu': + return 'cpu' + + +def _cuda_tag(obj): + if obj.device.type == 'cuda': + return 'cuda:' + str(obj.device.index) + +def _hpu_tag(obj): + if obj.device.type == 'hpu': + return 'hpu:' + str(obj.device.index) + +def _mps_tag(obj): + if obj.device.type == 'mps': + return 'mps' + + +def _meta_tag(obj): + if obj.device.type == 'meta': + return 'meta' + + +def _privateuse1_tag(obj): + backend_name = torch._C._get_privateuse1_backend_name() + if obj.device.type == backend_name: + if obj.device.index is None: + return backend_name + else: + return backend_name + ':' + str(obj.device.index) + + +def _cpu_deserialize(obj, location): + if location == 'cpu': + return obj + + +def validate_cuda_device(location): + device = torch.cuda._utils._get_device_index(location, True) + + if not torch.cuda.is_available(): + raise RuntimeError('Attempting to deserialize object on a CUDA ' + 'device but torch.cuda.is_available() is False. ' + 'If you are running on a CPU-only machine, ' + 'please use torch.load with map_location=torch.device(\'cpu\') ' + 'to map your storages to the CPU.') + device_count = torch.cuda.device_count() + if device >= device_count: + raise RuntimeError('Attempting to deserialize object on CUDA device ' + f'{device} but torch.cuda.device_count() is {device_count}. Please use ' + 'torch.load with map_location to map your storages ' + 'to an existing device.') + return device + + +def _cuda_deserialize(obj, location): + if location.startswith('cuda'): + device = validate_cuda_device(location) + if getattr(obj, "_torch_load_uninitialized", False): + with torch.cuda.device(device): + return torch.UntypedStorage(obj.nbytes(), device=torch.device(location)) + else: + return obj.cuda(device) + + +def validate_hpu_device(location): + hpu = getattr(torch, "hpu", None) + assert hpu is not None, "HPU device module is not loaded" + device = hpu._utils._get_device_index(location, optional=True) + + if not hpu.is_available(): + raise RuntimeError('Attempting to deserialize object on a HPU ' + 'device but torch.hpu.is_available() is False. ' + 'If you are running on a CPU-only machine, ' + 'please use torch.load with map_location=torch.device(\'cpu\') ' + 'to map your storages to the CPU.') + device_count = hpu.device_count() + if device >= device_count: + raise RuntimeError('Attempting to deserialize object on HPU device ' + f'{device} but torch.hpu.device_count() is {device_count}. Please use ' + 'torch.load with map_location to map your storages ' + 'to an existing device.') + return device + + +def _hpu_deserialize(obj, location): + if location.startswith('hpu'): + hpu = getattr(torch, "hpu", None) + assert hpu is not None, "HPU device module is not loaded" + device = validate_hpu_device(location) + if getattr(obj, "_torch_load_uninitialized", False): + with hpu.device(device): + return torch.UntypedStorage(obj.nbytes(), device=torch.device(location)) + else: + return obj.hpu(device) + + +def _mps_deserialize(obj, location): + if location.startswith('mps'): + return obj.mps() + + +def _meta_deserialize(obj, location): + if location == 'meta': + return torch.UntypedStorage(obj.nbytes(), device='meta') + + +def _validate_privateuse1_device(location, backend_name): + ''' + Check whether the device index of privateuse1 is valid + + Register a device_module of privateuse1 by torch._register_device_module. + Implement the following methods in device_module like cuda: + device_module._utils._get_device_index(location, True), + device_module.device_count(). + + Args: + location: string of device + backend_name: the name of privateuse1, which can be renamed + + Returns: + device_index: int + ''' + if not hasattr(torch, backend_name): + raise RuntimeError(f'The {backend_name.upper()} device module is not registered. ' + 'If you are running on a CPU-only machine, ' + 'please use torch.load with map_location=torch.device(\'cpu\') ' + 'to map your storages to the CPU.') + device_module = getattr(torch, backend_name) + if hasattr(device_module, '_utils') and hasattr(device_module._utils, '_get_device_index'): + device_index = device_module._utils._get_device_index(location, True) + else: + device = torch.device(location) + device_index = device.index if device.index else 0 + if hasattr(device_module, 'is_available') and not device_module.is_available(): + raise RuntimeError(f'Attempting to deserialize object on a {backend_name.upper()} ' + f'device but torch.{backend_name}.is_available() is False. ' + 'If you are running on a CPU-only machine, ' + 'please use torch.load with map_location=torch.device(\'cpu\') ' + 'to map your storages to the CPU.') + if hasattr(device_module, 'device_count'): + device_count = device_module.device_count() + if device_index >= device_count: + raise RuntimeError(f'Attempting to deserialize object on {backend_name.upper()} device ' + f'{device_index} but torch.{backend_name}.device_count() is {device_count}. ' + 'Please use torch.load with map_location to map your storages ' + 'to an existing device.') + return device_index + + +def _privateuse1_deserialize(obj, location): + backend_name = torch._C._get_privateuse1_backend_name() + if location.startswith(backend_name): + if not hasattr(obj, backend_name): + raise RuntimeError(f'Attempting to load the storages to the {backend_name.upper()} device ' + f'but torch.storage._StorageBase.{backend_name}() or ' + f'torch.storage.TypedStorage.{backend_name}() is not generated. ' + 'Please use torch.utils.generate_methods_for_privateuse1_backend ' + f'to generate storage.{backend_name}() method first.') + device_index = _validate_privateuse1_device(location, backend_name) + return getattr(obj, backend_name)(device_index) + + +register_package(10, _cpu_tag, _cpu_deserialize) +register_package(20, _cuda_tag, _cuda_deserialize) +register_package(21, _mps_tag, _mps_deserialize) +register_package(22, _meta_tag, _meta_deserialize) +register_package(23, _privateuse1_tag, _privateuse1_deserialize) +register_package(24, _hpu_tag, _hpu_deserialize) + + +def location_tag(storage: Union[Storage, torch.storage.TypedStorage, torch.UntypedStorage]): + for _, tagger, _ in _package_registry: + location = tagger(storage) + if location: + return location + raise RuntimeError("don't know how to determine data location of " + + torch.typename(storage)) + + +def default_restore_location(storage, location): + for _, _, fn in _package_registry: + result = fn(storage, location) + if result is not None: + return result + raise RuntimeError("don't know how to restore data location of " + + torch.typename(storage) + " (tagged with " + + location + ")") + + +def normalize_storage_type(storage_type): + return getattr(torch, storage_type.__name__) + + +def storage_to_tensor_type(storage): + storage_type = type(storage) + module = _import_dotted_name(storage_type.__module__) + return getattr(module, storage_type.__name__.replace('Storage', 'Tensor')) + + +def _is_path(name_or_buffer): + return isinstance(name_or_buffer, (str, pathlib.Path)) + + +class _opener: + def __init__(self, file_like): + self.file_like = file_like + + def __enter__(self): + return self.file_like + + def __exit__(self, *args): + pass + + +class _open_file(_opener): + def __init__(self, name, mode): + super().__init__(open(name, mode)) + + def __exit__(self, *args): + self.file_like.close() + + +class _open_buffer_reader(_opener): + def __init__(self, buffer): + super().__init__(buffer) + _check_seekable(buffer) + + +class _open_buffer_writer(_opener): + def __exit__(self, *args): + self.file_like.flush() + + +def _open_file_like(name_or_buffer, mode): + if _is_path(name_or_buffer): + return _open_file(name_or_buffer, mode) + else: + if 'w' in mode: + return _open_buffer_writer(name_or_buffer) + elif 'r' in mode: + return _open_buffer_reader(name_or_buffer) + else: + raise RuntimeError(f"Expected 'r' or 'w' in mode but got {mode}") + + +class _open_zipfile_reader(_opener): + def __init__(self, name_or_buffer) -> None: + super().__init__(torch._C.PyTorchFileReader(name_or_buffer)) + + +class _open_zipfile_writer_file(_opener): + def __init__(self, name) -> None: + self.file_stream = None + self.name = str(name) + try: + self.name.encode('ascii') + except UnicodeEncodeError: + # PyTorchFileWriter only supports ascii filename. + # For filenames with non-ascii characters, we rely on Python + # for writing out the file. + self.file_stream = io.FileIO(self.name, mode='w') + super().__init__(torch._C.PyTorchFileWriter(self.file_stream)) + else: + super().__init__(torch._C.PyTorchFileWriter(self.name)) + + def __exit__(self, *args) -> None: + self.file_like.write_end_of_file() + if self.file_stream is not None: + self.file_stream.close() + + +class _open_zipfile_writer_buffer(_opener): + def __init__(self, buffer) -> None: + if not callable(getattr(buffer, "write", None)): + msg = f"Buffer of {str(type(buffer)).strip('<>')} has no callable attribute 'write'" + if not hasattr(buffer, "write"): + raise AttributeError(msg) + raise TypeError(msg) + self.buffer = buffer + super().__init__(torch._C.PyTorchFileWriter(buffer)) + + def __exit__(self, *args) -> None: + self.file_like.write_end_of_file() + self.buffer.flush() + + +def _open_zipfile_writer(name_or_buffer): + container: Type[_opener] + if _is_path(name_or_buffer): + container = _open_zipfile_writer_file + else: + container = _open_zipfile_writer_buffer + return container(name_or_buffer) + + +def _is_compressed_file(f) -> bool: + compress_modules = ['gzip'] + try: + return f.__module__ in compress_modules + except AttributeError: + return False + + +def _should_read_directly(f): + """ + Checks if f is a file that should be read directly. It should be read + directly if it is backed by a real file (has a fileno) and is not a + a compressed file (e.g. gzip) + """ + if _is_compressed_file(f): + return False + try: + return f.fileno() >= 0 + except io.UnsupportedOperation: + return False + except AttributeError: + return False + + +def _check_seekable(f) -> bool: + + def raise_err_msg(patterns, e): + for p in patterns: + if p in str(e): + msg = (str(e) + ". You can only torch.load from a file that is seekable." + + " Please pre-load the data into a buffer like io.BytesIO and" + + " try to load from it instead.") + raise type(e)(msg) + raise e + + try: + f.seek(f.tell()) + return True + except (io.UnsupportedOperation, AttributeError) as e: + raise_err_msg(["seek", "tell"], e) + return False + + +def _check_dill_version(pickle_module) -> None: + '''Checks if using dill as the pickle module, and if so, checks if it is the correct version. + If dill version is lower than 0.3.1, a ValueError is raised. + + Args: + pickle_module: module used for pickling metadata and objects + + ''' + if pickle_module is not None and pickle_module.__name__ == 'dill': + required_dill_version = (0, 3, 1) + if not check_module_version_greater_or_equal(pickle_module, required_dill_version, False): + raise ValueError(( + "'torch' supports dill >= {}, but you have dill {}." + " Please upgrade dill or switch to 'pickle'" + ).format( + '.'.join([str(num) for num in required_dill_version]), + pickle_module.__version__ + )) + + +def _check_save_filelike(f): + if not isinstance(f, (str, os.PathLike)) and not hasattr(f, 'write'): + raise AttributeError( + "expected 'f' to be string, path, or a file-like object with " + "a 'write' attribute") + + +def save( + obj: object, + f: FILE_LIKE, + pickle_module: Any = pickle, + pickle_protocol: int = DEFAULT_PROTOCOL, + _use_new_zipfile_serialization: bool = True, + _disable_byteorder_record: bool = False +) -> None: + # Reference: https://github.com/pytorch/pytorch/issues/54354 + # The first line of this docstring overrides the one Sphinx generates for the + # documentation. We need it so that Sphinx doesn't leak `pickle`s path from + # the build environment (e.g. `>> # xdoctest: +SKIP("makes cwd dirty") + >>> # Save to file + >>> x = torch.tensor([0, 1, 2, 3, 4]) + >>> torch.save(x, 'tensor.pt') + >>> # Save to io.BytesIO buffer + >>> buffer = io.BytesIO() + >>> torch.save(x, buffer) + """ + torch._C._log_api_usage_once("torch.save") + _check_dill_version(pickle_module) + _check_save_filelike(f) + + if _use_new_zipfile_serialization: + with _open_zipfile_writer(f) as opened_zipfile: + _save(obj, opened_zipfile, pickle_module, pickle_protocol, _disable_byteorder_record) + return + else: + with _open_file_like(f, 'wb') as opened_file: + _legacy_save(obj, opened_file, pickle_module, pickle_protocol) + + +def _legacy_save(obj, f, pickle_module, pickle_protocol) -> None: + import torch.nn as nn + serialized_container_types = {} + serialized_storages = {} + + # Since loading storages that view the same data with different dtypes is + # not supported, we need to keep track of the dtype associated with each + # storage data_ptr and throw an error if the dtype is ever different. + # TODO: This feature could be added in the future + storage_dtypes: Dict[int, torch.dtype] = {} + + def persistent_id(obj: Any) -> Optional[Tuple]: + # FIXME: the docs say that persistent_id should only return a string + # but torch store returns tuples. This works only in the binary protocol + # see + # https://docs.python.org/2/library/pickle.html#pickling-and-unpickling-external-objects + # https://github.com/python/cpython/blob/master/Lib/pickle.py#L527-L537 + if isinstance(obj, type) and issubclass(obj, nn.Module): + if obj in serialized_container_types: + return None + serialized_container_types[obj] = True + source_file = source = None + try: + source_lines, _, source_file = get_source_lines_and_file(obj) + source = ''.join(source_lines) + except Exception: # saving the source is optional, so we can ignore any errors + warnings.warn("Couldn't retrieve source code for container of " + "type " + obj.__name__ + ". It won't be checked " + "for correctness upon loading.") + return ('module', obj, source_file, source) + + if isinstance(obj, torch.storage.TypedStorage) or torch.is_storage(obj): + storage: torch.UntypedStorage + + if isinstance(obj, torch.storage.TypedStorage): + # TODO: Once we decide to break serialization FC, this case + # can be deleted + storage = obj._untyped_storage + storage_dtype = obj.dtype + storage_type_str = obj._pickle_storage_type() + storage_type = getattr(torch, storage_type_str) + dtype = obj.dtype + storage_numel = obj._size() + + elif isinstance(obj, torch.UntypedStorage): + storage = obj + storage_dtype = torch.uint8 + storage_type = normalize_storage_type(type(obj)) + dtype = torch.uint8 + storage_numel = storage.nbytes() + else: + raise TypeError(f'type not recognized: {type(obj)}') + + # If storage is allocated, ensure that any other saved storages + # pointing to the same data all have the same dtype. If storage is + # not allocated, don't perform this check + if storage.data_ptr() != 0: + if storage.data_ptr() in storage_dtypes: + if storage_dtype != storage_dtypes[storage.data_ptr()]: + raise RuntimeError( + 'Cannot save multiple tensors or storages that ' + 'view the same data as different types') + else: + storage_dtypes[storage.data_ptr()] = storage_dtype + + view_metadata: Optional[Tuple[str, int, int]] + + # Offset is always 0, but we keep it for backwards compatibility + # with the old serialization format (which supported storage views) + offset = 0 + storage_key = str(storage._cdata) + location = location_tag(storage) + + # TODO: There's an issue here with FC. It might be impossible to + # solve, but it's worth noting. Imagine we save a list `[storage, + # tensor]`, where `tensor.storage()` is the same as `storage`, and + # `tensor.element_size() > 1`. Let's say that `tensor.dtype == + # torch.float`. The storage will be serialized with element size + # of 1, since we're choosing to serialize the first occurance of + # a duplicate storage. Since this legacy serialization format saves + # the numel of the storage, rather than nbytes directly, we'll be + # effectively saving nbytes in this case. We'll be able to load it + # and the tensor back up with no problems in _this_ and future + # versions of pytorch, but in older versions, here's the problem: + # the storage will be loaded up as a UntypedStorage, and then the + # FloatTensor will loaded and the UntypedStorage will be assigned to + # it. Since the storage dtype does not match the tensor dtype, this + # will cause an error. If we reverse the list, like `[tensor, + # storage]`, then we will save the `tensor.storage()` as a faked + # `FloatStorage`, and the saved size will be the correct + # dtype-specific numel count that old versions expect. `tensor` + # will be able to load up properly in old versions, pointing to + # a FloatStorage. However, `storage` is still being translated to + # a UntypedStorage, and it will try to resolve to the same + # FloatStorage that `tensor` contains. This will also cause an + # error. It doesn't seem like there's any way around this. + # Probably, we just cannot maintain FC for the legacy format if the + # saved list contains both a tensor and a storage that point to the + # same data. We should still be able to maintain FC for lists of + # just tensors, as long as all views share the same dtype as the + # tensor they are viewing. + + if storage_key not in serialized_storages: + serialized_storages[storage_key] = (storage, dtype) + is_view = storage._cdata != storage._cdata + if is_view: + view_metadata = (str(storage._cdata), offset, storage.nbytes()) + else: + view_metadata = None + + res = ('storage', + storage_type, + storage_key, + location, + storage_numel, + view_metadata) + return res + return None + + sys_info = dict( + protocol_version=PROTOCOL_VERSION, + little_endian=sys.byteorder == 'little', + type_sizes=dict( + short=SHORT_SIZE, + int=INT_SIZE, + long=LONG_SIZE, + ), + ) + + pickle_module.dump(MAGIC_NUMBER, f, protocol=pickle_protocol) + pickle_module.dump(PROTOCOL_VERSION, f, protocol=pickle_protocol) + pickle_module.dump(sys_info, f, protocol=pickle_protocol) + pickler = pickle_module.Pickler(f, protocol=pickle_protocol) + pickler.persistent_id = persistent_id + pickler.dump(obj) + + serialized_storage_keys = sorted(serialized_storages.keys()) + pickle_module.dump(serialized_storage_keys, f, protocol=pickle_protocol) + f.flush() + for key in serialized_storage_keys: + storage, dtype = serialized_storages[key] + storage._write_file(f, _should_read_directly(f), True, torch._utils._element_size(dtype)) + + +def _save(obj, zip_file, pickle_module, pickle_protocol, _disable_byteorder_record): + serialized_storages = {} + id_map: Dict[int, str] = {} + + # Since loading storages that view the same data with different dtypes is + # not supported, we need to keep track of the dtype associated with each + # storage data_ptr and throw an error if the dtype is ever different. + # TODO: This feature could be added in the future + storage_dtypes: Dict[int, torch.dtype] = {} + + def persistent_id(obj): + # FIXME: the docs say that persistent_id should only return a string + # but torch store returns tuples. This works only in the binary protocol + # see + # https://docs.python.org/2/library/pickle.html#pickling-and-unpickling-external-objects + # https://github.com/python/cpython/blob/master/Lib/pickle.py#L527-L537 + if isinstance(obj, torch.storage.TypedStorage) or torch.is_storage(obj): + + if isinstance(obj, torch.storage.TypedStorage): + # TODO: Once we decide to break serialization FC, this case + # can be deleted + storage = obj._untyped_storage + storage_dtype = obj.dtype + storage_type_str = obj._pickle_storage_type() + storage_type = getattr(torch, storage_type_str) + storage_numel = obj._size() + + else: + storage = obj + storage_dtype = torch.uint8 + storage_type = normalize_storage_type(type(obj)) + storage_numel = storage.nbytes() + + # If storage is allocated, ensure that any other saved storages + # pointing to the same data all have the same dtype. If storage is + # not allocated, don't perform this check + if storage.data_ptr() != 0: + if storage.data_ptr() in storage_dtypes: + if storage_dtype != storage_dtypes[storage.data_ptr()]: + raise RuntimeError( + 'Cannot save multiple tensors or storages that ' + 'view the same data as different types') + else: + storage_dtypes[storage.data_ptr()] = storage_dtype + + storage_key = id_map.setdefault(storage._cdata, str(len(id_map))) + location = location_tag(storage) + serialized_storages[storage_key] = storage + + return ('storage', + storage_type, + storage_key, + location, + storage_numel) + + return None + + # Write the pickle data for `obj` + data_buf = io.BytesIO() + pickler = pickle_module.Pickler(data_buf, protocol=pickle_protocol) + pickler.persistent_id = persistent_id + pickler.dump(obj) + data_value = data_buf.getvalue() + zip_file.write_record('data.pkl', data_value, len(data_value)) + + # Write byte order marker + if not _disable_byteorder_record: + if sys.byteorder not in ['little', 'big']: + raise ValueError('Unknown endianness type: ' + sys.byteorder) + + zip_file.write_record('byteorder', sys.byteorder, len(sys.byteorder)) + + # Write each tensor to a file named tensor/the_tensor_key in the zip archive + for key in sorted(serialized_storages.keys()): + name = f'data/{key}' + storage = serialized_storages[key] + # given that we copy things around anyway, we might use storage.cpu() + # this means to that to get tensors serialized, you need to implement + # .cpu() on the underlying Storage + if storage.device.type != 'cpu': + storage = storage.cpu() + # Now that it is on the CPU we can directly copy it into the zip file + num_bytes = storage.nbytes() + zip_file.write_record(name, storage.data_ptr(), num_bytes) + + +def load( + f: FILE_LIKE, + map_location: MAP_LOCATION = None, + pickle_module: Any = None, + *, + weights_only: bool = False, + mmap: Optional[bool] = None, + **pickle_load_args: Any +) -> Any: + # Reference: https://github.com/pytorch/pytorch/issues/54354 + # The first line of this docstring overrides the one Sphinx generates for the + # documentation. We need it so that Sphinx doesn't leak `pickle`s path from + # the build environment (e.g. `>> # xdoctest: +SKIP("undefined filepaths") + >>> torch.load('tensors.pt', weights_only=True) + # Load all tensors onto the CPU + >>> torch.load('tensors.pt', map_location=torch.device('cpu'), weights_only=True) + # Load all tensors onto the CPU, using a function + >>> torch.load('tensors.pt', map_location=lambda storage, loc: storage, weights_only=True) + # Load all tensors onto GPU 1 + >>> torch.load('tensors.pt', map_location=lambda storage, loc: storage.cuda(1), weights_only=True) + # Map tensors from GPU 1 to GPU 0 + >>> torch.load('tensors.pt', map_location={'cuda:1': 'cuda:0'}, weights_only=True) + # Load tensor from io.BytesIO object + # Loading from a buffer setting weights_only=False, warning this can be unsafe + >>> with open('tensor.pt', 'rb') as f: + ... buffer = io.BytesIO(f.read()) + >>> torch.load(buffer, weights_only=False) + # Load a module with 'ascii' encoding for unpickling + # Loading from a module setting weights_only=False, warning this can be unsafe + >>> torch.load('module.pt', encoding='ascii', weights_only=False) + """ + torch._C._log_api_usage_once("torch.load") + UNSAFE_MESSAGE = ( + "Weights only load failed. Re-running `torch.load` with `weights_only` set to `False`" + " will likely succeed, but it can result in arbitrary code execution." + "Do it only if you get the file from a trusted source. WeightsUnpickler error: " + ) + # Add ability to force safe only weight loads via environment variable + if os.getenv("TORCH_FORCE_WEIGHTS_ONLY_LOAD", "0").lower() in ['1', 'y', 'yes', 'true']: + weights_only = True + + if weights_only: + if pickle_module is not None: + raise RuntimeError("Can not safely load weights when explicit pickle_module is specified") + else: + if pickle_module is None: + pickle_module = pickle + + # make flipping default BC-compatible + if mmap is None: + mmap = False + + _check_dill_version(pickle_module) + + if 'encoding' not in pickle_load_args.keys(): + pickle_load_args['encoding'] = 'utf-8' + + with _open_file_like(f, 'rb') as opened_file: + if _is_zipfile(opened_file): + # The zipfile reader is going to advance the current file position. + # If we want to actually tail call to torch.jit.load, we need to + # reset back to the original position. + orig_position = opened_file.tell() + overall_storage = None + with _open_zipfile_reader(opened_file) as opened_zipfile: + if _is_torchscript_zip(opened_zipfile): + warnings.warn("'torch.load' received a zip file that looks like a TorchScript archive" + " dispatching to 'torch.jit.load' (call 'torch.jit.load' directly to" + " silence this warning)", UserWarning) + opened_file.seek(orig_position) + return torch.jit.load(opened_file, map_location=map_location) + if mmap: + if not isinstance(f, str): + raise ValueError("f must be a string filename in order to use mmap argument") + size = os.path.getsize(f) + overall_storage = torch.UntypedStorage.from_file(f, False, size) + if weights_only: + try: + return _load(opened_zipfile, + map_location, + _weights_only_unpickler, + overall_storage=overall_storage, + **pickle_load_args) + except RuntimeError as e: + raise pickle.UnpicklingError(UNSAFE_MESSAGE + str(e)) from None + return _load(opened_zipfile, + map_location, + pickle_module, + overall_storage=overall_storage, + **pickle_load_args) + if mmap: + raise RuntimeError("mmap can only be used with files saved with " + "`torch.save(_use_new_zipfile_serialization=True), " + "please torch.save your checkpoint with this option in order to use mmap.") + if weights_only: + try: + return _legacy_load(opened_file, map_location, _weights_only_unpickler, **pickle_load_args) + except RuntimeError as e: + raise pickle.UnpicklingError(UNSAFE_MESSAGE + str(e)) from None + return _legacy_load(opened_file, map_location, pickle_module, **pickle_load_args) + + +# Register pickling support for layout instances such as +# torch.sparse_coo, etc +def _get_layout(name): + """Get layout extension object from its string representation. + """ + cache = _get_layout.cache # type: ignore[attr-defined] + if not cache: + for v in torch.__dict__.values(): + if isinstance(v, torch.layout): + cache[str(v)] = v + return cache[name] + +# There are yet not good way to type annotate function attributes https://github.com/python/mypy/issues/2087 +_get_layout.cache = {} # type: ignore[attr-defined] +copyreg.pickle(torch.layout, lambda obj: (_get_layout, (str(obj),))) + + +def _legacy_load(f, map_location, pickle_module, **pickle_load_args): + deserialized_objects: Dict[int, Any] = {} + + restore_location = _get_restore_location(map_location) + + class UnpicklerWrapper(pickle_module.Unpickler): # type: ignore[name-defined] + + def find_class(self, mod_name, name): + if type(name) is str and 'Storage' in name: + try: + return StorageType(name) + except KeyError: + pass + return super().find_class(mod_name, name) + + def _check_container_source(container_type, source_file, original_source): + try: + current_source = ''.join(get_source_lines_and_file(container_type)[0]) + except Exception: # saving the source is optional, so we can ignore any errors + warnings.warn("Couldn't retrieve source code for container of " + "type " + container_type.__name__ + ". It won't be checked " + "for correctness upon loading.") + return + if original_source != current_source: + if container_type.dump_patches: + file_name = container_type.__name__ + '.patch' + diff = difflib.unified_diff(current_source.split('\n'), + original_source.split('\n'), + source_file, + source_file, lineterm="") + lines = '\n'.join(diff) + try: + with open(file_name, 'a+') as f: + file_size = f.seek(0, 2) + f.seek(0) + if file_size == 0: + f.write(lines) + elif file_size != len(lines) or f.read() != lines: + raise OSError + msg = ("Saved a reverse patch to " + file_name + ". " + "Run `patch -p0 < " + file_name + "` to revert your " + "changes.") + except OSError: + msg = ("Tried to save a patch, but couldn't create a " + "writable file " + file_name + ". Make sure it " + "doesn't exist and your working directory is " + "writable.") + else: + msg = ("you can retrieve the original source code by " + "accessing the object's source attribute or set " + "`torch.nn.Module.dump_patches = True` and use the " + "patch tool to revert the changes.") + msg = f"source code of class '{torch.typename(container_type)}' has changed. {msg}" + warnings.warn(msg, SourceChangeWarning) + + def legacy_load(f): + deserialized_objects: Dict[int, Any] = {} + + def persistent_load(saved_id): + if isinstance(saved_id, tuple): + # Ignore containers that don't have any sources saved + if all(saved_id[1:]): + _check_container_source(*saved_id) + return saved_id[0] + return deserialized_objects[int(saved_id)] + + with closing(tarfile.open(fileobj=f, mode='r:', format=tarfile.PAX_FORMAT)) as tar, \ + mkdtemp() as tmpdir: + + tar.extract('storages', path=tmpdir) + with open(os.path.join(tmpdir, 'storages'), 'rb', 0) as f: + num_storages = pickle_module.load(f, **pickle_load_args) + for i in range(num_storages): + args = pickle_module.load(f, **pickle_load_args) + key, location, storage_type = args + dtype = storage_type._dtype + obj = cast(Storage, torch.UntypedStorage)._new_with_file(f, torch._utils._element_size(dtype)) + obj = restore_location(obj, location) + # TODO: Once we decide to break serialization FC, we can + # stop wrapping with TypedStorage + deserialized_objects[key] = torch.storage.TypedStorage( + wrap_storage=obj, + dtype=dtype, + _internal=True) + + storage_views = pickle_module.load(f, **pickle_load_args) + for target_cdata, root_cdata, offset, numel in storage_views: + root = deserialized_objects[root_cdata] + element_size = torch._utils._element_size(root.dtype) + offset_bytes = offset * element_size + # TODO: Once we decide to break serialization FC, we can + # stop wrapping with TypedStorage + deserialized_objects[target_cdata] = torch.storage.TypedStorage( + wrap_storage=root._untyped_storage[offset_bytes:offset_bytes + numel * element_size], + dtype=root.dtype, + _internal=True) + + tar.extract('tensors', path=tmpdir) + with open(os.path.join(tmpdir, 'tensors'), 'rb', 0) as f: + num_tensors = pickle_module.load(f, **pickle_load_args) + for _ in range(num_tensors): + args = pickle_module.load(f, **pickle_load_args) + key, storage_id, original_tensor_type = args + storage = deserialized_objects[storage_id] + ndim, = struct.unpack(' str: + # When using encoding='bytes' in Py3, some **internal** keys stored as + # strings in Py2 are loaded as bytes. This function decodes them with + # ascii encoding, one that Py3 uses by default. + # + # NOTE: This should only be used on internal keys (e.g., `typename` and + # `location` in `persistent_load` below! + if isinstance(bytes_str, bytes): + return bytes_str.decode('ascii') + return bytes_str + + +def _get_restore_location(map_location): + if map_location is None: + restore_location = default_restore_location + elif isinstance(map_location, dict): + def restore_location(storage, location): + location = map_location.get(location, location) + return default_restore_location(storage, location) + elif isinstance(map_location, (str, bytes)): + def restore_location(storage, location): + return default_restore_location(storage, map_location) + elif isinstance(map_location, torch.device): + def restore_location(storage, location): + return default_restore_location(storage, str(map_location)) + else: + def restore_location(storage, location): + result = map_location(storage, location) + if result is None: + result = default_restore_location(storage, location) + return result + return restore_location + + +class StorageType: + def __init__(self, name): + self._dtype = _get_dtype_from_pickle_storage_type(name) + + @property + def dtype(self): + return self._dtype + + def __str__(self): + return f'StorageType(dtype={self.dtype})' + + +def _load(zip_file, map_location, pickle_module, pickle_file='data.pkl', overall_storage=None, **pickle_load_args): + restore_location = _get_restore_location(map_location) + + loaded_storages = {} + + # check if byteswapping is needed + byteordername = 'byteorder' + byteorderdata = None + if zip_file.has_record(byteordername): + byteorderdata = zip_file.get_record(byteordername) + if byteorderdata not in [b'little', b'big']: + raise ValueError('Unknown endianness type: ' + byteorderdata.decode()) + elif get_default_load_endianness() == LoadEndianness.LITTLE or \ + get_default_load_endianness() is None: + byteorderdata = b'little' + elif get_default_load_endianness() == LoadEndianness.BIG: + byteorderdata = b'big' + elif get_default_load_endianness() == LoadEndianness.NATIVE: + pass + else: + raise ValueError('Invalid load endianness type') + + if not zip_file.has_record(byteordername) and \ + get_default_load_endianness() is None and \ + sys.byteorder == 'big': + # Default behaviour was changed + # See https://github.com/pytorch/pytorch/issues/101688 + warnings.warn("The default load endianness for checkpoints without a byteorder mark " + "on big endian machines was changed from 'native' to 'little' endian, " + "to avoid this behavior please use " + "torch.serialization.set_default_load_endianness to set " + "the desired default load endianness", + UserWarning) + + def load_tensor(dtype, numel, key, location): + name = f'data/{key}' + if overall_storage is not None: + storage_offset = zip_file.get_record_offset(name) + storage = overall_storage[storage_offset:storage_offset + numel] + else: + storage = zip_file.get_storage_from_record(name, numel, torch.UntypedStorage)._typed_storage()._untyped_storage + # swap here if byteswapping is needed + if byteorderdata is not None: + if byteorderdata.decode() != sys.byteorder: + storage.byteswap(dtype) + + # TODO: Once we decide to break serialization FC, we can + # stop wrapping with TypedStorage + typed_storage = torch.storage.TypedStorage( + wrap_storage=restore_location(storage, location), + dtype=dtype, + _internal=True) + + if typed_storage._data_ptr() != 0: + loaded_storages[key] = typed_storage + + return typed_storage + + def persistent_load(saved_id): + assert isinstance(saved_id, tuple) + typename = _maybe_decode_ascii(saved_id[0]) + data = saved_id[1:] + + assert typename == 'storage', \ + f"Unknown typename for persistent_load, expected 'storage' but got '{typename}'" + storage_type, key, location, numel = data + if storage_type is torch.UntypedStorage: + dtype = torch.uint8 + else: + dtype = storage_type.dtype + + if key in loaded_storages: + typed_storage = loaded_storages[key] + else: + nbytes = numel * torch._utils._element_size(dtype) + typed_storage = load_tensor(dtype, nbytes, key, _maybe_decode_ascii(location)) + + return typed_storage + + load_module_mapping: Dict[str, str] = { + # See https://github.com/pytorch/pytorch/pull/51633 + 'torch.tensor': 'torch._tensor' + } + + # Need to subclass Unpickler instead of directly monkey-patching the find_class method + # because it's marked readonly in pickle. + # The type: ignore is because mypy can't statically determine the type of this class. + class UnpicklerWrapper(pickle_module.Unpickler): # type: ignore[name-defined] + # from https://stackoverflow.com/questions/13398462/unpickling-python-objects-with-a-changed-module-path/13405732 + # Lets us override the imports that pickle uses when unpickling an object. + # This is useful for maintaining BC if we change a module path that tensor instantiation relies on. + def find_class(self, mod_name, name): + if type(name) is str and 'Storage' in name: + try: + return StorageType(name) + except KeyError: + pass + mod_name = load_module_mapping.get(mod_name, mod_name) + return super().find_class(mod_name, name) + + # Load the data (which may in turn use `persistent_load` to load tensors) + data_file = io.BytesIO(zip_file.get_record(pickle_file)) + + unpickler = UnpicklerWrapper(data_file, **pickle_load_args) + unpickler.persistent_load = persistent_load + result = unpickler.load() + + torch._utils._validate_loaded_sparse_tensors() + torch._C._log_api_usage_metadata( + "torch.load.metadata", {"serialization_id": zip_file.serialization_id()} + ) + return result + + +def _is_torchscript_zip(zip_file): + return 'constants.pkl' in zip_file.get_all_records() diff --git a/evalkit_internvl/lib/python3.10/site-packages/torch/types.py b/evalkit_internvl/lib/python3.10/site-packages/torch/types.py new file mode 100644 index 0000000000000000000000000000000000000000..22c01e3bb9795ec2ca23d6149ebbbfc0ab19bb7e --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/torch/types.py @@ -0,0 +1,79 @@ +import torch +from typing import Any, List, Optional, Sequence, Tuple, Union + +import builtins + +# Convenience aliases for common composite types that we need +# to talk about in PyTorch + +_TensorOrTensors = Union[torch.Tensor, Sequence[torch.Tensor]] +_TensorOrTensorsOrGradEdge = Union[ + torch.Tensor, Sequence[torch.Tensor], + "torch.autograd.graph.GradientEdge", + Sequence["torch.autograd.graph.GradientEdge"]] + +# In some cases, these basic types are shadowed by corresponding +# top-level values. The underscore variants let us refer to these +# types. See https://github.com/python/mypy/issues/4146 for why these +# workarounds is necessary +_int = builtins.int +_float = builtins.float +_bool = builtins.bool +_complex = builtins.complex + +_dtype = torch.dtype +_device = torch.device +_qscheme = torch.qscheme +_size = Union[torch.Size, List[_int], Tuple[_int, ...]] +_layout = torch.layout +_dispatchkey = Union[str, torch._C.DispatchKey] + +# Meta-type for "numeric" things; matches our docs +Number = Union[builtins.int, builtins.float, builtins.bool] + +# Meta-type for "device-like" things. Not to be confused with 'device' (a +# literal device object). This nomenclature is consistent with PythonArgParser. +# None means use the default device (typically CPU) +Device = Optional[Union[_device, str, _int]] +del Optional + +# Storage protocol implemented by ${Type}StorageBase classes + +class Storage: + _cdata: int + device: torch.device + dtype: torch.dtype + _torch_load_uninitialized: bool + + def __deepcopy__(self, memo) -> 'Storage': # type: ignore[empty-body] + ... + + def _new_shared(self, int) -> 'Storage': # type: ignore[empty-body] + ... + + def _write_file(self, f: Any, is_real_file: _bool, save_size: _bool, element_size: int) -> None: + ... + + def element_size(self) -> int: # type: ignore[empty-body] + ... + + def is_shared(self) -> bool: # type: ignore[empty-body] + ... + + def share_memory_(self) -> 'Storage': # type: ignore[empty-body] + ... + + def nbytes(self) -> int: # type: ignore[empty-body] + ... + + def cpu(self) -> 'Storage': # type: ignore[empty-body] + ... + + def data_ptr(self) -> int: # type: ignore[empty-body] + ... + + def from_file(self, filename: str, shared: bool = False, nbytes: int = 0) -> 'Storage': # type: ignore[empty-body] + ... + + def _new_with_file(self, f: Any, element_size: int) -> 'Storage': # type: ignore[empty-body] + ... diff --git a/evalkit_internvl/lib/python3.10/site-packages/torch/version.py b/evalkit_internvl/lib/python3.10/site-packages/torch/version.py new file mode 100644 index 0000000000000000000000000000000000000000..6f7ddc823cd7188120ad6c45ff87b5cc4e927e21 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/torch/version.py @@ -0,0 +1,8 @@ +from typing import Optional + +__all__ = ['__version__', 'debug', 'cuda', 'git_version', 'hip'] +__version__ = '2.2.0+cu118' +debug = False +cuda: Optional[str] = '11.8' +git_version = '8ac9b20d4b090c213799e81acf48a55ea8d437d6' +hip: Optional[str] = None diff --git a/evalkit_tf437/lib/python3.10/site-packages/__pycache__/markdown2.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/__pycache__/markdown2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..daa1bbbb6b05c983e3a9607ab2874eaed0ae3901 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/__pycache__/markdown2.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ca59ebaba93465505b3720fdc964ee8b75d1a67353000e4aee3d98c489745ea +size 110002 diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_extraction/_hashing_fast.cpython-310-x86_64-linux-gnu.so b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_extraction/_hashing_fast.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..f7aea3d84d853628ef3004ff1dd3ac16d98198d1 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_extraction/_hashing_fast.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31608925ac5554e7279f30b6d0ba652bd63e641e4eb1b1ae2429ecb3ffc427e6 +size 101480 diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_perceptron.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_perceptron.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08e5889fafc1e705dcbd1453ec58e0271c3742fd Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_perceptron.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_ridge.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_ridge.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4cae97fb916b34828f21e4746b81c1a98af0946 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_ridge.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_theil_sen.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_theil_sen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..826ded9ab4e352eaad085c657765d8f17e0d7e47 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_theil_sen.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_base.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..bb71cbe9ed55059fa78bd21022d6b1f907553d59 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_base.py @@ -0,0 +1,850 @@ +""" +Generalized Linear Models. +""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import numbers +import warnings +from abc import ABCMeta, abstractmethod +from numbers import Integral + +import numpy as np +import scipy.sparse as sp +from scipy import linalg, optimize, sparse +from scipy.sparse.linalg import lsqr +from scipy.special import expit + +from ..base import ( + BaseEstimator, + ClassifierMixin, + MultiOutputMixin, + RegressorMixin, + _fit_context, +) +from ..utils import check_array, check_random_state +from ..utils._array_api import ( + _asarray_with_order, + _average, + get_namespace, + get_namespace_and_device, + indexing_dtype, + supported_float_dtypes, +) +from ..utils._seq_dataset import ( + ArrayDataset32, + ArrayDataset64, + CSRDataset32, + CSRDataset64, +) +from ..utils.extmath import safe_sparse_dot +from ..utils.parallel import Parallel, delayed +from ..utils.sparsefuncs import mean_variance_axis +from ..utils.validation import _check_sample_weight, check_is_fitted, validate_data + +# TODO: bayesian_ridge_regression and bayesian_regression_ard +# should be squashed into its respective objects. + +SPARSE_INTERCEPT_DECAY = 0.01 +# For sparse data intercept updates are scaled by this decay factor to avoid +# intercept oscillation. + + +def make_dataset(X, y, sample_weight, random_state=None): + """Create ``Dataset`` abstraction for sparse and dense inputs. + + This also returns the ``intercept_decay`` which is different + for sparse datasets. + + Parameters + ---------- + X : array-like, shape (n_samples, n_features) + Training data + + y : array-like, shape (n_samples, ) + Target values. + + sample_weight : numpy array of shape (n_samples,) + The weight of each sample + + random_state : int, RandomState instance or None (default) + Determines random number generation for dataset random sampling. It is not + used for dataset shuffling. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + dataset + The ``Dataset`` abstraction + intercept_decay + The intercept decay + """ + + rng = check_random_state(random_state) + # seed should never be 0 in SequentialDataset64 + seed = rng.randint(1, np.iinfo(np.int32).max) + + if X.dtype == np.float32: + CSRData = CSRDataset32 + ArrayData = ArrayDataset32 + else: + CSRData = CSRDataset64 + ArrayData = ArrayDataset64 + + if sp.issparse(X): + dataset = CSRData(X.data, X.indptr, X.indices, y, sample_weight, seed=seed) + intercept_decay = SPARSE_INTERCEPT_DECAY + else: + X = np.ascontiguousarray(X) + dataset = ArrayData(X, y, sample_weight, seed=seed) + intercept_decay = 1.0 + + return dataset, intercept_decay + + +def _preprocess_data( + X, + y, + *, + fit_intercept, + copy=True, + copy_y=True, + sample_weight=None, + check_input=True, +): + """Common data preprocessing for fitting linear models. + + This helper is in charge of the following steps: + + - Ensure that `sample_weight` is an array or `None`. + - If `check_input=True`, perform standard input validation of `X`, `y`. + - Perform copies if requested to avoid side-effects in case of inplace + modifications of the input. + + Then, if `fit_intercept=True` this preprocessing centers both `X` and `y` as + follows: + - if `X` is dense, center the data and + store the mean vector in `X_offset`. + - if `X` is sparse, store the mean in `X_offset` + without centering `X`. The centering is expected to be handled by the + linear solver where appropriate. + - in either case, always center `y` and store the mean in `y_offset`. + - both `X_offset` and `y_offset` are always weighted by `sample_weight` + if not set to `None`. + + If `fit_intercept=False`, no centering is performed and `X_offset`, `y_offset` + are set to zero. + + Returns + ------- + X_out : {ndarray, sparse matrix} of shape (n_samples, n_features) + If copy=True a copy of the input X is triggered, otherwise operations are + inplace. + If input X is dense, then X_out is centered. + y_out : {ndarray, sparse matrix} of shape (n_samples,) or (n_samples, n_targets) + Centered version of y. Possibly performed inplace on input y depending + on the copy_y parameter. + X_offset : ndarray of shape (n_features,) + The mean per column of input X. + y_offset : float or ndarray of shape (n_features,) + X_scale : ndarray of shape (n_features,) + Always an array of ones. TODO: refactor the code base to make it + possible to remove this unused variable. + """ + xp, _, device_ = get_namespace_and_device(X, y, sample_weight) + n_samples, n_features = X.shape + X_is_sparse = sp.issparse(X) + + if isinstance(sample_weight, numbers.Number): + sample_weight = None + if sample_weight is not None: + sample_weight = xp.asarray(sample_weight) + + if check_input: + X = check_array( + X, copy=copy, accept_sparse=["csr", "csc"], dtype=supported_float_dtypes(xp) + ) + y = check_array(y, dtype=X.dtype, copy=copy_y, ensure_2d=False) + else: + y = xp.astype(y, X.dtype, copy=copy_y) + if copy: + if X_is_sparse: + X = X.copy() + else: + X = _asarray_with_order(X, order="K", copy=True, xp=xp) + + dtype_ = X.dtype + + if fit_intercept: + if X_is_sparse: + X_offset, X_var = mean_variance_axis(X, axis=0, weights=sample_weight) + else: + X_offset = _average(X, axis=0, weights=sample_weight, xp=xp) + + X_offset = xp.astype(X_offset, X.dtype, copy=False) + X -= X_offset + + y_offset = _average(y, axis=0, weights=sample_weight, xp=xp) + y -= y_offset + else: + X_offset = xp.zeros(n_features, dtype=X.dtype, device=device_) + if y.ndim == 1: + y_offset = xp.asarray(0.0, dtype=dtype_, device=device_) + else: + y_offset = xp.zeros(y.shape[1], dtype=dtype_, device=device_) + + # XXX: X_scale is no longer needed. It is an historic artifact from the + # time where linear model exposed the normalize parameter. + X_scale = xp.ones(n_features, dtype=X.dtype, device=device_) + return X, y, X_offset, y_offset, X_scale + + +# TODO: _rescale_data should be factored into _preprocess_data. +# Currently, the fact that sag implements its own way to deal with +# sample_weight makes the refactoring tricky. + + +def _rescale_data(X, y, sample_weight, inplace=False): + """Rescale data sample-wise by square root of sample_weight. + + For many linear models, this enables easy support for sample_weight because + + (y - X w)' S (y - X w) + + with S = diag(sample_weight) becomes + + ||y_rescaled - X_rescaled w||_2^2 + + when setting + + y_rescaled = sqrt(S) y + X_rescaled = sqrt(S) X + + Returns + ------- + X_rescaled : {array-like, sparse matrix} + + y_rescaled : {array-like, sparse matrix} + """ + # Assume that _validate_data and _check_sample_weight have been called by + # the caller. + xp, _ = get_namespace(X, y, sample_weight) + n_samples = X.shape[0] + sample_weight_sqrt = xp.sqrt(sample_weight) + + if sp.issparse(X) or sp.issparse(y): + sw_matrix = sparse.dia_matrix( + (sample_weight_sqrt, 0), shape=(n_samples, n_samples) + ) + + if sp.issparse(X): + X = safe_sparse_dot(sw_matrix, X) + else: + if inplace: + X *= sample_weight_sqrt[:, None] + else: + X = X * sample_weight_sqrt[:, None] + + if sp.issparse(y): + y = safe_sparse_dot(sw_matrix, y) + else: + if inplace: + if y.ndim == 1: + y *= sample_weight_sqrt + else: + y *= sample_weight_sqrt[:, None] + else: + if y.ndim == 1: + y = y * sample_weight_sqrt + else: + y = y * sample_weight_sqrt[:, None] + return X, y, sample_weight_sqrt + + +class LinearModel(BaseEstimator, metaclass=ABCMeta): + """Base class for Linear Models""" + + @abstractmethod + def fit(self, X, y): + """Fit model.""" + + def _decision_function(self, X): + check_is_fitted(self) + + X = validate_data(self, X, accept_sparse=["csr", "csc", "coo"], reset=False) + coef_ = self.coef_ + if coef_.ndim == 1: + return X @ coef_ + self.intercept_ + else: + return X @ coef_.T + self.intercept_ + + def predict(self, X): + """ + Predict using the linear model. + + Parameters + ---------- + X : array-like or sparse matrix, shape (n_samples, n_features) + Samples. + + Returns + ------- + C : array, shape (n_samples,) + Returns predicted values. + """ + return self._decision_function(X) + + def _set_intercept(self, X_offset, y_offset, X_scale): + """Set the intercept_""" + + xp, _ = get_namespace(X_offset, y_offset, X_scale) + + if self.fit_intercept: + # We always want coef_.dtype=X.dtype. For instance, X.dtype can differ from + # coef_.dtype if warm_start=True. + coef_ = xp.astype(self.coef_, X_scale.dtype, copy=False) + coef_ = self.coef_ = xp.divide(coef_, X_scale) + + if coef_.ndim == 1: + intercept_ = y_offset - X_offset @ coef_ + else: + intercept_ = y_offset - X_offset @ coef_.T + + self.intercept_ = intercept_ + + else: + self.intercept_ = 0.0 + + +# XXX Should this derive from LinearModel? It should be a mixin, not an ABC. +# Maybe the n_features checking can be moved to LinearModel. +class LinearClassifierMixin(ClassifierMixin): + """Mixin for linear classifiers. + + Handles prediction for sparse and dense X. + """ + + def decision_function(self, X): + """ + Predict confidence scores for samples. + + The confidence score for a sample is proportional to the signed + distance of that sample to the hyperplane. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data matrix for which we want to get the confidence scores. + + Returns + ------- + scores : ndarray of shape (n_samples,) or (n_samples, n_classes) + Confidence scores per `(n_samples, n_classes)` combination. In the + binary case, confidence score for `self.classes_[1]` where >0 means + this class would be predicted. + """ + check_is_fitted(self) + xp, _ = get_namespace(X) + + X = validate_data(self, X, accept_sparse="csr", reset=False) + scores = safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_ + return ( + xp.reshape(scores, (-1,)) + if (scores.ndim > 1 and scores.shape[1] == 1) + else scores + ) + + def predict(self, X): + """ + Predict class labels for samples in X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data matrix for which we want to get the predictions. + + Returns + ------- + y_pred : ndarray of shape (n_samples,) + Vector containing the class labels for each sample. + """ + xp, _ = get_namespace(X) + scores = self.decision_function(X) + if len(scores.shape) == 1: + indices = xp.astype(scores > 0, indexing_dtype(xp)) + else: + indices = xp.argmax(scores, axis=1) + + return xp.take(self.classes_, indices, axis=0) + + def _predict_proba_lr(self, X): + """Probability estimation for OvR logistic regression. + + Positive class probabilities are computed as + 1. / (1. + np.exp(-self.decision_function(X))); + multiclass is handled by normalizing that over all classes. + """ + prob = self.decision_function(X) + expit(prob, out=prob) + if prob.ndim == 1: + return np.vstack([1 - prob, prob]).T + else: + # OvR normalization, like LibLinear's predict_probability + prob /= prob.sum(axis=1).reshape((prob.shape[0], -1)) + return prob + + +class SparseCoefMixin: + """Mixin for converting coef_ to and from CSR format. + + L1-regularizing estimators should inherit this. + """ + + def densify(self): + """ + Convert coefficient matrix to dense array format. + + Converts the ``coef_`` member (back) to a numpy.ndarray. This is the + default format of ``coef_`` and is required for fitting, so calling + this method is only required on models that have previously been + sparsified; otherwise, it is a no-op. + + Returns + ------- + self + Fitted estimator. + """ + msg = "Estimator, %(name)s, must be fitted before densifying." + check_is_fitted(self, msg=msg) + if sp.issparse(self.coef_): + self.coef_ = self.coef_.toarray() + return self + + def sparsify(self): + """ + Convert coefficient matrix to sparse format. + + Converts the ``coef_`` member to a scipy.sparse matrix, which for + L1-regularized models can be much more memory- and storage-efficient + than the usual numpy.ndarray representation. + + The ``intercept_`` member is not converted. + + Returns + ------- + self + Fitted estimator. + + Notes + ----- + For non-sparse models, i.e. when there are not many zeros in ``coef_``, + this may actually *increase* memory usage, so use this method with + care. A rule of thumb is that the number of zero elements, which can + be computed with ``(coef_ == 0).sum()``, must be more than 50% for this + to provide significant benefits. + + After calling this method, further fitting with the partial_fit + method (if any) will not work until you call densify. + """ + msg = "Estimator, %(name)s, must be fitted before sparsifying." + check_is_fitted(self, msg=msg) + self.coef_ = sp.csr_matrix(self.coef_) + return self + + +class LinearRegression(MultiOutputMixin, RegressorMixin, LinearModel): + """ + Ordinary least squares Linear Regression. + + LinearRegression fits a linear model with coefficients w = (w1, ..., wp) + to minimize the residual sum of squares between the observed targets in + the dataset, and the targets predicted by the linear approximation. + + Parameters + ---------- + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to False, no intercept will be used in calculations + (i.e. data is expected to be centered). + + copy_X : bool, default=True + If True, X will be copied; else, it may be overwritten. + + n_jobs : int, default=None + The number of jobs to use for the computation. This will only provide + speedup in case of sufficiently large problems, that is if firstly + `n_targets > 1` and secondly `X` is sparse or if `positive` is set + to `True`. ``None`` means 1 unless in a + :obj:`joblib.parallel_backend` context. ``-1`` means using all + processors. See :term:`Glossary ` for more details. + + positive : bool, default=False + When set to ``True``, forces the coefficients to be positive. This + option is only supported for dense arrays. + + .. versionadded:: 0.24 + + Attributes + ---------- + coef_ : array of shape (n_features, ) or (n_targets, n_features) + Estimated coefficients for the linear regression problem. + If multiple targets are passed during the fit (y 2D), this + is a 2D array of shape (n_targets, n_features), while if only + one target is passed, this is a 1D array of length n_features. + + rank_ : int + Rank of matrix `X`. Only available when `X` is dense. + + singular_ : array of shape (min(X, y),) + Singular values of `X`. Only available when `X` is dense. + + intercept_ : float or array of shape (n_targets,) + Independent term in the linear model. Set to 0.0 if + `fit_intercept = False`. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + Ridge : Ridge regression addresses some of the + problems of Ordinary Least Squares by imposing a penalty on the + size of the coefficients with l2 regularization. + Lasso : The Lasso is a linear model that estimates + sparse coefficients with l1 regularization. + ElasticNet : Elastic-Net is a linear regression + model trained with both l1 and l2 -norm regularization of the + coefficients. + + Notes + ----- + From the implementation point of view, this is just plain Ordinary + Least Squares (scipy.linalg.lstsq) or Non Negative Least Squares + (scipy.optimize.nnls) wrapped as a predictor object. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.linear_model import LinearRegression + >>> X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]]) + >>> # y = 1 * x_0 + 2 * x_1 + 3 + >>> y = np.dot(X, np.array([1, 2])) + 3 + >>> reg = LinearRegression().fit(X, y) + >>> reg.score(X, y) + 1.0 + >>> reg.coef_ + array([1., 2.]) + >>> reg.intercept_ + np.float64(3.0...) + >>> reg.predict(np.array([[3, 5]])) + array([16.]) + """ + + _parameter_constraints: dict = { + "fit_intercept": ["boolean"], + "copy_X": ["boolean"], + "n_jobs": [None, Integral], + "positive": ["boolean"], + } + + def __init__( + self, + *, + fit_intercept=True, + copy_X=True, + n_jobs=None, + positive=False, + ): + self.fit_intercept = fit_intercept + self.copy_X = copy_X + self.n_jobs = n_jobs + self.positive = positive + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """ + Fit linear model. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values. Will be cast to X's dtype if necessary. + + sample_weight : array-like of shape (n_samples,), default=None + Individual weights for each sample. + + .. versionadded:: 0.17 + parameter *sample_weight* support to LinearRegression. + + Returns + ------- + self : object + Fitted Estimator. + """ + n_jobs_ = self.n_jobs + + accept_sparse = False if self.positive else ["csr", "csc", "coo"] + + X, y = validate_data( + self, + X, + y, + accept_sparse=accept_sparse, + y_numeric=True, + multi_output=True, + force_writeable=True, + ) + + has_sw = sample_weight is not None + if has_sw: + sample_weight = _check_sample_weight( + sample_weight, X, dtype=X.dtype, ensure_non_negative=True + ) + + # Note that neither _rescale_data nor the rest of the fit method of + # LinearRegression can benefit from in-place operations when X is a + # sparse matrix. Therefore, let's not copy X when it is sparse. + copy_X_in_preprocess_data = self.copy_X and not sp.issparse(X) + + X, y, X_offset, y_offset, X_scale = _preprocess_data( + X, + y, + fit_intercept=self.fit_intercept, + copy=copy_X_in_preprocess_data, + sample_weight=sample_weight, + ) + + if has_sw: + # Sample weight can be implemented via a simple rescaling. Note + # that we safely do inplace rescaling when _preprocess_data has + # already made a copy if requested. + X, y, sample_weight_sqrt = _rescale_data( + X, y, sample_weight, inplace=copy_X_in_preprocess_data + ) + + if self.positive: + if y.ndim < 2: + self.coef_ = optimize.nnls(X, y)[0] + else: + # scipy.optimize.nnls cannot handle y with shape (M, K) + outs = Parallel(n_jobs=n_jobs_)( + delayed(optimize.nnls)(X, y[:, j]) for j in range(y.shape[1]) + ) + self.coef_ = np.vstack([out[0] for out in outs]) + elif sp.issparse(X): + X_offset_scale = X_offset / X_scale + + if has_sw: + + def matvec(b): + return X.dot(b) - sample_weight_sqrt * b.dot(X_offset_scale) + + def rmatvec(b): + return X.T.dot(b) - X_offset_scale * b.dot(sample_weight_sqrt) + + else: + + def matvec(b): + return X.dot(b) - b.dot(X_offset_scale) + + def rmatvec(b): + return X.T.dot(b) - X_offset_scale * b.sum() + + X_centered = sparse.linalg.LinearOperator( + shape=X.shape, matvec=matvec, rmatvec=rmatvec + ) + + if y.ndim < 2: + self.coef_ = lsqr(X_centered, y)[0] + else: + # sparse_lstsq cannot handle y with shape (M, K) + outs = Parallel(n_jobs=n_jobs_)( + delayed(lsqr)(X_centered, y[:, j].ravel()) + for j in range(y.shape[1]) + ) + self.coef_ = np.vstack([out[0] for out in outs]) + else: + # cut-off ratio for small singular values + cond = max(X.shape) * np.finfo(X.dtype).eps + self.coef_, _, self.rank_, self.singular_ = linalg.lstsq(X, y, cond=cond) + self.coef_ = self.coef_.T + + if y.ndim == 1: + self.coef_ = np.ravel(self.coef_) + self._set_intercept(X_offset, y_offset, X_scale) + return self + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.input_tags.sparse = not self.positive + return tags + + +def _check_precomputed_gram_matrix( + X, precompute, X_offset, X_scale, rtol=None, atol=1e-5 +): + """Computes a single element of the gram matrix and compares it to + the corresponding element of the user supplied gram matrix. + + If the values do not match a ValueError will be thrown. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Data array. + + precompute : array-like of shape (n_features, n_features) + User-supplied gram matrix. + + X_offset : ndarray of shape (n_features,) + Array of feature means used to center design matrix. + + X_scale : ndarray of shape (n_features,) + Array of feature scale factors used to normalize design matrix. + + rtol : float, default=None + Relative tolerance; see numpy.allclose + If None, it is set to 1e-4 for arrays of dtype numpy.float32 and 1e-7 + otherwise. + + atol : float, default=1e-5 + absolute tolerance; see :func`numpy.allclose`. Note that the default + here is more tolerant than the default for + :func:`numpy.testing.assert_allclose`, where `atol=0`. + + Raises + ------ + ValueError + Raised when the provided Gram matrix is not consistent. + """ + + n_features = X.shape[1] + f1 = n_features // 2 + f2 = min(f1 + 1, n_features - 1) + + v1 = (X[:, f1] - X_offset[f1]) * X_scale[f1] + v2 = (X[:, f2] - X_offset[f2]) * X_scale[f2] + + expected = np.dot(v1, v2) + actual = precompute[f1, f2] + + dtypes = [precompute.dtype, expected.dtype] + if rtol is None: + rtols = [1e-4 if dtype == np.float32 else 1e-7 for dtype in dtypes] + rtol = max(rtols) + + if not np.isclose(expected, actual, rtol=rtol, atol=atol): + raise ValueError( + "Gram matrix passed in via 'precompute' parameter " + "did not pass validation when a single element was " + "checked - please check that it was computed " + f"properly. For element ({f1},{f2}) we computed " + f"{expected} but the user-supplied value was " + f"{actual}." + ) + + +def _pre_fit( + X, + y, + Xy, + precompute, + fit_intercept, + copy, + check_input=True, + sample_weight=None, +): + """Function used at beginning of fit in linear models with L1 or L0 penalty. + + This function applies _preprocess_data and additionally computes the gram matrix + `precompute` as needed as well as `Xy`. + """ + n_samples, n_features = X.shape + + if sparse.issparse(X): + # copy is not needed here as X is not modified inplace when X is sparse + precompute = False + X, y, X_offset, y_offset, X_scale = _preprocess_data( + X, + y, + fit_intercept=fit_intercept, + copy=False, + check_input=check_input, + sample_weight=sample_weight, + ) + else: + # copy was done in fit if necessary + X, y, X_offset, y_offset, X_scale = _preprocess_data( + X, + y, + fit_intercept=fit_intercept, + copy=copy, + check_input=check_input, + sample_weight=sample_weight, + ) + # Rescale only in dense case. Sparse cd solver directly deals with + # sample_weight. + if sample_weight is not None: + # This triggers copies anyway. + X, y, _ = _rescale_data(X, y, sample_weight=sample_weight) + + if hasattr(precompute, "__array__"): + if fit_intercept and not np.allclose(X_offset, np.zeros(n_features)): + warnings.warn( + ( + "Gram matrix was provided but X was centered to fit " + "intercept: recomputing Gram matrix." + ), + UserWarning, + ) + # TODO: instead of warning and recomputing, we could just center + # the user provided Gram matrix a-posteriori (after making a copy + # when `copy=True`). + # recompute Gram + precompute = "auto" + Xy = None + elif check_input: + # If we're going to use the user's precomputed gram matrix, we + # do a quick check to make sure its not totally bogus. + _check_precomputed_gram_matrix(X, precompute, X_offset, X_scale) + + # precompute if n_samples > n_features + if isinstance(precompute, str) and precompute == "auto": + precompute = n_samples > n_features + + if precompute is True: + # make sure that the 'precompute' array is contiguous. + precompute = np.empty(shape=(n_features, n_features), dtype=X.dtype, order="C") + np.dot(X.T, X, out=precompute) + + if not hasattr(precompute, "__array__"): + Xy = None # cannot use Xy if precompute is not Gram + + if hasattr(precompute, "__array__") and Xy is None: + common_dtype = np.result_type(X.dtype, y.dtype) + if y.ndim == 1: + # Xy is 1d, make sure it is contiguous. + Xy = np.empty(shape=n_features, dtype=common_dtype, order="C") + np.dot(X.T, y, out=Xy) + else: + # Make sure that Xy is always F contiguous even if X or y are not + # contiguous: the goal is to make it fast to extract the data for a + # specific target. + n_targets = y.shape[1] + Xy = np.empty(shape=(n_features, n_targets), dtype=common_dtype, order="F") + np.dot(y.T, X, out=Xy.T) + + return X, y, X_offset, y_offset, X_scale, precompute, Xy diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_cd_fast.pyx b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_cd_fast.pyx new file mode 100644 index 0000000000000000000000000000000000000000..c4c530d907e2636e2a01fcaf9d301420b6b2937d --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_cd_fast.pyx @@ -0,0 +1,956 @@ +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +from libc.math cimport fabs +import numpy as np + +from cython cimport floating +import warnings +from ..exceptions import ConvergenceWarning + +from ..utils._cython_blas cimport ( + _axpy, _dot, _asum, _gemv, _nrm2, _copy, _scal +) +from ..utils._cython_blas cimport ColMajor, Trans, NoTrans +from ..utils._typedefs cimport uint32_t +from ..utils._random cimport our_rand_r + + +# The following two functions are shamelessly copied from the tree code. + +cdef enum: + # Max value for our rand_r replacement (near the bottom). + # We don't use RAND_MAX because it's different across platforms and + # particularly tiny on Windows/MSVC. + # It corresponds to the maximum representable value for + # 32-bit signed integers (i.e. 2^31 - 1). + RAND_R_MAX = 2147483647 + + +cdef inline uint32_t rand_int(uint32_t end, uint32_t* random_state) noexcept nogil: + """Generate a random integer in [0; end).""" + return our_rand_r(random_state) % end + + +cdef inline floating fmax(floating x, floating y) noexcept nogil: + if x > y: + return x + return y + + +cdef inline floating fsign(floating f) noexcept nogil: + if f == 0: + return 0 + elif f > 0: + return 1.0 + else: + return -1.0 + + +cdef floating abs_max(int n, const floating* a) noexcept nogil: + """np.max(np.abs(a))""" + cdef int i + cdef floating m = fabs(a[0]) + cdef floating d + for i in range(1, n): + d = fabs(a[i]) + if d > m: + m = d + return m + + +cdef floating max(int n, floating* a) noexcept nogil: + """np.max(a)""" + cdef int i + cdef floating m = a[0] + cdef floating d + for i in range(1, n): + d = a[i] + if d > m: + m = d + return m + + +cdef floating diff_abs_max(int n, const floating* a, floating* b) noexcept nogil: + """np.max(np.abs(a - b))""" + cdef int i + cdef floating m = fabs(a[0] - b[0]) + cdef floating d + for i in range(1, n): + d = fabs(a[i] - b[i]) + if d > m: + m = d + return m + + +def enet_coordinate_descent( + floating[::1] w, + floating alpha, + floating beta, + const floating[::1, :] X, + const floating[::1] y, + unsigned int max_iter, + floating tol, + object rng, + bint random=0, + bint positive=0 +): + """Cython version of the coordinate descent algorithm + for Elastic-Net regression + + We minimize + + (1/2) * norm(y - X w, 2)^2 + alpha norm(w, 1) + (beta/2) norm(w, 2)^2 + + Returns + ------- + w : ndarray of shape (n_features,) + ElasticNet coefficients. + gap : float + Achieved dual gap. + tol : float + Equals input `tol` times `np.dot(y, y)`. The tolerance used for the dual gap. + n_iter : int + Number of coordinate descent iterations. + """ + + if floating is float: + dtype = np.float32 + else: + dtype = np.float64 + + # get the data information into easy vars + cdef unsigned int n_samples = X.shape[0] + cdef unsigned int n_features = X.shape[1] + + # compute norms of the columns of X + cdef floating[::1] norm_cols_X = np.square(X).sum(axis=0) + + # initial value of the residuals + cdef floating[::1] R = np.empty(n_samples, dtype=dtype) + cdef floating[::1] XtA = np.empty(n_features, dtype=dtype) + + cdef floating tmp + cdef floating w_ii + cdef floating d_w_max + cdef floating w_max + cdef floating d_w_ii + cdef floating gap = tol + 1.0 + cdef floating d_w_tol = tol + cdef floating dual_norm_XtA + cdef floating R_norm2 + cdef floating w_norm2 + cdef floating l1_norm + cdef floating const + cdef floating A_norm2 + cdef unsigned int ii + cdef unsigned int n_iter = 0 + cdef unsigned int f_iter + cdef uint32_t rand_r_state_seed = rng.randint(0, RAND_R_MAX) + cdef uint32_t* rand_r_state = &rand_r_state_seed + + if alpha == 0 and beta == 0: + warnings.warn("Coordinate descent with no regularization may lead to " + "unexpected results and is discouraged.") + + with nogil: + # R = y - np.dot(X, w) + _copy(n_samples, &y[0], 1, &R[0], 1) + _gemv(ColMajor, NoTrans, n_samples, n_features, -1.0, &X[0, 0], + n_samples, &w[0], 1, 1.0, &R[0], 1) + + # tol *= np.dot(y, y) + tol *= _dot(n_samples, &y[0], 1, &y[0], 1) + + for n_iter in range(max_iter): + w_max = 0.0 + d_w_max = 0.0 + for f_iter in range(n_features): # Loop over coordinates + if random: + ii = rand_int(n_features, rand_r_state) + else: + ii = f_iter + + if norm_cols_X[ii] == 0.0: + continue + + w_ii = w[ii] # Store previous value + + if w_ii != 0.0: + # R += w_ii * X[:,ii] + _axpy(n_samples, w_ii, &X[0, ii], 1, &R[0], 1) + + # tmp = (X[:,ii]*R).sum() + tmp = _dot(n_samples, &X[0, ii], 1, &R[0], 1) + + if positive and tmp < 0: + w[ii] = 0.0 + else: + w[ii] = (fsign(tmp) * fmax(fabs(tmp) - alpha, 0) + / (norm_cols_X[ii] + beta)) + + if w[ii] != 0.0: + # R -= w[ii] * X[:,ii] # Update residual + _axpy(n_samples, -w[ii], &X[0, ii], 1, &R[0], 1) + + # update the maximum absolute coefficient update + d_w_ii = fabs(w[ii] - w_ii) + d_w_max = fmax(d_w_max, d_w_ii) + + w_max = fmax(w_max, fabs(w[ii])) + + if ( + w_max == 0.0 + or d_w_max / w_max < d_w_tol + or n_iter == max_iter - 1 + ): + # the biggest coordinate update of this iteration was smaller + # than the tolerance: check the duality gap as ultimate + # stopping criterion + + # XtA = np.dot(X.T, R) - beta * w + _copy(n_features, &w[0], 1, &XtA[0], 1) + _gemv(ColMajor, Trans, + n_samples, n_features, 1.0, &X[0, 0], n_samples, + &R[0], 1, + -beta, &XtA[0], 1) + + if positive: + dual_norm_XtA = max(n_features, &XtA[0]) + else: + dual_norm_XtA = abs_max(n_features, &XtA[0]) + + # R_norm2 = np.dot(R, R) + R_norm2 = _dot(n_samples, &R[0], 1, &R[0], 1) + + # w_norm2 = np.dot(w, w) + w_norm2 = _dot(n_features, &w[0], 1, &w[0], 1) + + if (dual_norm_XtA > alpha): + const = alpha / dual_norm_XtA + A_norm2 = R_norm2 * (const ** 2) + gap = 0.5 * (R_norm2 + A_norm2) + else: + const = 1.0 + gap = R_norm2 + + l1_norm = _asum(n_features, &w[0], 1) + + # np.dot(R.T, y) + gap += (alpha * l1_norm + - const * _dot(n_samples, &R[0], 1, &y[0], 1) + + 0.5 * beta * (1 + const ** 2) * (w_norm2)) + + if gap < tol: + # return if we reached desired tolerance + break + + else: + # for/else, runs if for doesn't end with a `break` + with gil: + message = ( + "Objective did not converge. You might want to increase " + "the number of iterations, check the scale of the " + "features or consider increasing regularisation. " + f"Duality gap: {gap:.3e}, tolerance: {tol:.3e}" + ) + if alpha < np.finfo(np.float64).eps: + message += ( + " Linear regression models with null weight for the " + "l1 regularization term are more efficiently fitted " + "using one of the solvers implemented in " + "sklearn.linear_model.Ridge/RidgeCV instead." + ) + warnings.warn(message, ConvergenceWarning) + + return np.asarray(w), gap, tol, n_iter + 1 + + +def sparse_enet_coordinate_descent( + floating[::1] w, + floating alpha, + floating beta, + const floating[::1] X_data, + const int[::1] X_indices, + const int[::1] X_indptr, + const floating[::1] y, + const floating[::1] sample_weight, + const floating[::1] X_mean, + unsigned int max_iter, + floating tol, + object rng, + bint random=0, + bint positive=0, +): + """Cython version of the coordinate descent algorithm for Elastic-Net + + We minimize: + + 1/2 * norm(y - Z w, 2)^2 + alpha * norm(w, 1) + (beta/2) * norm(w, 2)^2 + + where Z = X - X_mean. + With sample weights sw, this becomes + + 1/2 * sum(sw * (y - Z w)^2, axis=0) + alpha * norm(w, 1) + + (beta/2) * norm(w, 2)^2 + + and X_mean is the weighted average of X (per column). + + Returns + ------- + w : ndarray of shape (n_features,) + ElasticNet coefficients. + gap : float + Achieved dual gap. + tol : float + Equals input `tol` times `np.dot(y, y)`. The tolerance used for the dual gap. + n_iter : int + Number of coordinate descent iterations. + """ + # Notes for sample_weight: + # For dense X, one centers X and y and then rescales them by sqrt(sample_weight). + # Here, for sparse X, we get the sample_weight averaged center X_mean. We take care + # that every calculation results as if we had rescaled y and X (and therefore also + # X_mean) by sqrt(sample_weight) without actually calculating the square root. + # We work with: + # yw = sample_weight + # R = sample_weight * residual + # norm_cols_X = np.sum(sample_weight * (X - X_mean)**2, axis=0) + + # get the data information into easy vars + cdef unsigned int n_samples = y.shape[0] + cdef unsigned int n_features = w.shape[0] + + # compute norms of the columns of X + cdef unsigned int ii + cdef floating[:] norm_cols_X + + cdef unsigned int startptr = X_indptr[0] + cdef unsigned int endptr + + # initial value of the residuals + # R = y - Zw, weighted version R = sample_weight * (y - Zw) + cdef floating[::1] R + cdef floating[::1] XtA + cdef const floating[::1] yw + + if floating is float: + dtype = np.float32 + else: + dtype = np.float64 + + norm_cols_X = np.zeros(n_features, dtype=dtype) + XtA = np.zeros(n_features, dtype=dtype) + + cdef floating tmp + cdef floating w_ii + cdef floating d_w_max + cdef floating w_max + cdef floating d_w_ii + cdef floating X_mean_ii + cdef floating R_sum = 0.0 + cdef floating R_norm2 + cdef floating w_norm2 + cdef floating A_norm2 + cdef floating l1_norm + cdef floating normalize_sum + cdef floating gap = tol + 1.0 + cdef floating d_w_tol = tol + cdef floating dual_norm_XtA + cdef unsigned int jj + cdef unsigned int n_iter = 0 + cdef unsigned int f_iter + cdef uint32_t rand_r_state_seed = rng.randint(0, RAND_R_MAX) + cdef uint32_t* rand_r_state = &rand_r_state_seed + cdef bint center = False + cdef bint no_sample_weights = sample_weight is None + cdef int kk + + if no_sample_weights: + yw = y + R = y.copy() + else: + yw = np.multiply(sample_weight, y) + R = yw.copy() + + with nogil: + # center = (X_mean != 0).any() + for ii in range(n_features): + if X_mean[ii]: + center = True + break + + for ii in range(n_features): + X_mean_ii = X_mean[ii] + endptr = X_indptr[ii + 1] + normalize_sum = 0.0 + w_ii = w[ii] + + if no_sample_weights: + for jj in range(startptr, endptr): + normalize_sum += (X_data[jj] - X_mean_ii) ** 2 + R[X_indices[jj]] -= X_data[jj] * w_ii + norm_cols_X[ii] = normalize_sum + \ + (n_samples - endptr + startptr) * X_mean_ii ** 2 + if center: + for jj in range(n_samples): + R[jj] += X_mean_ii * w_ii + else: + for jj in range(startptr, endptr): + tmp = sample_weight[X_indices[jj]] + # second term will be subtracted by loop over range(n_samples) + normalize_sum += (tmp * (X_data[jj] - X_mean_ii) ** 2 + - tmp * X_mean_ii ** 2) + R[X_indices[jj]] -= tmp * X_data[jj] * w_ii + if center: + for jj in range(n_samples): + normalize_sum += sample_weight[jj] * X_mean_ii ** 2 + R[jj] += sample_weight[jj] * X_mean_ii * w_ii + norm_cols_X[ii] = normalize_sum + startptr = endptr + + # tol *= np.dot(y, y) + # with sample weights: tol *= y @ (sw * y) + tol *= _dot(n_samples, &y[0], 1, &yw[0], 1) + + for n_iter in range(max_iter): + + w_max = 0.0 + d_w_max = 0.0 + + for f_iter in range(n_features): # Loop over coordinates + if random: + ii = rand_int(n_features, rand_r_state) + else: + ii = f_iter + + if norm_cols_X[ii] == 0.0: + continue + + startptr = X_indptr[ii] + endptr = X_indptr[ii + 1] + w_ii = w[ii] # Store previous value + X_mean_ii = X_mean[ii] + + if w_ii != 0.0: + # R += w_ii * X[:,ii] + if no_sample_weights: + for jj in range(startptr, endptr): + R[X_indices[jj]] += X_data[jj] * w_ii + if center: + for jj in range(n_samples): + R[jj] -= X_mean_ii * w_ii + else: + for jj in range(startptr, endptr): + tmp = sample_weight[X_indices[jj]] + R[X_indices[jj]] += tmp * X_data[jj] * w_ii + if center: + for jj in range(n_samples): + R[jj] -= sample_weight[jj] * X_mean_ii * w_ii + + # tmp = (X[:,ii] * R).sum() + tmp = 0.0 + for jj in range(startptr, endptr): + tmp += R[X_indices[jj]] * X_data[jj] + + if center: + R_sum = 0.0 + for jj in range(n_samples): + R_sum += R[jj] + tmp -= R_sum * X_mean_ii + + if positive and tmp < 0.0: + w[ii] = 0.0 + else: + w[ii] = fsign(tmp) * fmax(fabs(tmp) - alpha, 0) \ + / (norm_cols_X[ii] + beta) + + if w[ii] != 0.0: + # R -= w[ii] * X[:,ii] # Update residual + if no_sample_weights: + for jj in range(startptr, endptr): + R[X_indices[jj]] -= X_data[jj] * w[ii] + if center: + for jj in range(n_samples): + R[jj] += X_mean_ii * w[ii] + else: + for jj in range(startptr, endptr): + tmp = sample_weight[X_indices[jj]] + R[X_indices[jj]] -= tmp * X_data[jj] * w[ii] + if center: + for jj in range(n_samples): + R[jj] += sample_weight[jj] * X_mean_ii * w[ii] + + # update the maximum absolute coefficient update + d_w_ii = fabs(w[ii] - w_ii) + d_w_max = fmax(d_w_max, d_w_ii) + + w_max = fmax(w_max, fabs(w[ii])) + + if w_max == 0.0 or d_w_max / w_max < d_w_tol or n_iter == max_iter - 1: + # the biggest coordinate update of this iteration was smaller than + # the tolerance: check the duality gap as ultimate stopping + # criterion + + # sparse X.T / dense R dot product + if center: + R_sum = 0.0 + for jj in range(n_samples): + R_sum += R[jj] + + # XtA = X.T @ R - beta * w + for ii in range(n_features): + XtA[ii] = 0.0 + for kk in range(X_indptr[ii], X_indptr[ii + 1]): + XtA[ii] += X_data[kk] * R[X_indices[kk]] + + if center: + XtA[ii] -= X_mean[ii] * R_sum + XtA[ii] -= beta * w[ii] + + if positive: + dual_norm_XtA = max(n_features, &XtA[0]) + else: + dual_norm_XtA = abs_max(n_features, &XtA[0]) + + # R_norm2 = np.dot(R, R) + if no_sample_weights: + R_norm2 = _dot(n_samples, &R[0], 1, &R[0], 1) + else: + R_norm2 = 0.0 + for jj in range(n_samples): + # R is already multiplied by sample_weight + if sample_weight[jj] != 0: + R_norm2 += (R[jj] ** 2) / sample_weight[jj] + + # w_norm2 = np.dot(w, w) + w_norm2 = _dot(n_features, &w[0], 1, &w[0], 1) + if (dual_norm_XtA > alpha): + const = alpha / dual_norm_XtA + A_norm2 = R_norm2 * const**2 + gap = 0.5 * (R_norm2 + A_norm2) + else: + const = 1.0 + gap = R_norm2 + + l1_norm = _asum(n_features, &w[0], 1) + + gap += (alpha * l1_norm - const * _dot( + n_samples, + &R[0], 1, + &y[0], 1 + ) + + 0.5 * beta * (1 + const ** 2) * w_norm2) + + if gap < tol: + # return if we reached desired tolerance + break + + else: + # for/else, runs if for doesn't end with a `break` + with gil: + warnings.warn("Objective did not converge. You might want to " + "increase the number of iterations. Duality " + "gap: {}, tolerance: {}".format(gap, tol), + ConvergenceWarning) + + return np.asarray(w), gap, tol, n_iter + 1 + + +def enet_coordinate_descent_gram( + floating[::1] w, + floating alpha, + floating beta, + const floating[:, ::1] Q, + const floating[::1] q, + const floating[:] y, + unsigned int max_iter, + floating tol, + object rng, + bint random=0, + bint positive=0 +): + """Cython version of the coordinate descent algorithm + for Elastic-Net regression + + We minimize + + (1/2) * w^T Q w - q^T w + alpha norm(w, 1) + (beta/2) * norm(w, 2)^2 + + which amount to the Elastic-Net problem when: + Q = X^T X (Gram matrix) + q = X^T y + + Returns + ------- + w : ndarray of shape (n_features,) + ElasticNet coefficients. + gap : float + Achieved dual gap. + tol : float + Equals input `tol` times `np.dot(y, y)`. The tolerance used for the dual gap. + n_iter : int + Number of coordinate descent iterations. + """ + + if floating is float: + dtype = np.float32 + else: + dtype = np.float64 + + # get the data information into easy vars + cdef unsigned int n_features = Q.shape[0] + + # initial value "Q w" which will be kept of up to date in the iterations + cdef floating[:] H = np.dot(Q, w) + + cdef floating[:] XtA = np.zeros(n_features, dtype=dtype) + cdef floating tmp + cdef floating w_ii + cdef floating d_w_max + cdef floating w_max + cdef floating d_w_ii + cdef floating q_dot_w + cdef floating w_norm2 + cdef floating gap = tol + 1.0 + cdef floating d_w_tol = tol + cdef floating dual_norm_XtA + cdef unsigned int ii + cdef unsigned int n_iter = 0 + cdef unsigned int f_iter + cdef uint32_t rand_r_state_seed = rng.randint(0, RAND_R_MAX) + cdef uint32_t* rand_r_state = &rand_r_state_seed + + cdef floating y_norm2 = np.dot(y, y) + cdef floating* w_ptr = &w[0] + cdef const floating* Q_ptr = &Q[0, 0] + cdef const floating* q_ptr = &q[0] + cdef floating* H_ptr = &H[0] + cdef floating* XtA_ptr = &XtA[0] + tol = tol * y_norm2 + + if alpha == 0: + warnings.warn( + "Coordinate descent without L1 regularization may " + "lead to unexpected results and is discouraged. " + "Set l1_ratio > 0 to add L1 regularization." + ) + + with nogil: + for n_iter in range(max_iter): + w_max = 0.0 + d_w_max = 0.0 + for f_iter in range(n_features): # Loop over coordinates + if random: + ii = rand_int(n_features, rand_r_state) + else: + ii = f_iter + + if Q[ii, ii] == 0.0: + continue + + w_ii = w[ii] # Store previous value + + if w_ii != 0.0: + # H -= w_ii * Q[ii] + _axpy(n_features, -w_ii, Q_ptr + ii * n_features, 1, + H_ptr, 1) + + tmp = q[ii] - H[ii] + + if positive and tmp < 0: + w[ii] = 0.0 + else: + w[ii] = fsign(tmp) * fmax(fabs(tmp) - alpha, 0) \ + / (Q[ii, ii] + beta) + + if w[ii] != 0.0: + # H += w[ii] * Q[ii] # Update H = X.T X w + _axpy(n_features, w[ii], Q_ptr + ii * n_features, 1, + H_ptr, 1) + + # update the maximum absolute coefficient update + d_w_ii = fabs(w[ii] - w_ii) + if d_w_ii > d_w_max: + d_w_max = d_w_ii + + if fabs(w[ii]) > w_max: + w_max = fabs(w[ii]) + + if w_max == 0.0 or d_w_max / w_max < d_w_tol or n_iter == max_iter - 1: + # the biggest coordinate update of this iteration was smaller than + # the tolerance: check the duality gap as ultimate stopping + # criterion + + # q_dot_w = np.dot(w, q) + q_dot_w = _dot(n_features, w_ptr, 1, q_ptr, 1) + + for ii in range(n_features): + XtA[ii] = q[ii] - H[ii] - beta * w[ii] + if positive: + dual_norm_XtA = max(n_features, XtA_ptr) + else: + dual_norm_XtA = abs_max(n_features, XtA_ptr) + + # temp = np.sum(w * H) + tmp = 0.0 + for ii in range(n_features): + tmp += w[ii] * H[ii] + R_norm2 = y_norm2 + tmp - 2.0 * q_dot_w + + # w_norm2 = np.dot(w, w) + w_norm2 = _dot(n_features, &w[0], 1, &w[0], 1) + + if (dual_norm_XtA > alpha): + const = alpha / dual_norm_XtA + A_norm2 = R_norm2 * (const ** 2) + gap = 0.5 * (R_norm2 + A_norm2) + else: + const = 1.0 + gap = R_norm2 + + # The call to asum is equivalent to the L1 norm of w + gap += ( + alpha * _asum(n_features, &w[0], 1) + - const * y_norm2 + + const * q_dot_w + + 0.5 * beta * (1 + const ** 2) * w_norm2 + ) + + if gap < tol: + # return if we reached desired tolerance + break + + else: + # for/else, runs if for doesn't end with a `break` + with gil: + warnings.warn("Objective did not converge. You might want to " + "increase the number of iterations. Duality " + "gap: {}, tolerance: {}".format(gap, tol), + ConvergenceWarning) + + return np.asarray(w), gap, tol, n_iter + 1 + + +def enet_coordinate_descent_multi_task( + const floating[::1, :] W, + floating l1_reg, + floating l2_reg, + const floating[::1, :] X, + const floating[::1, :] Y, + unsigned int max_iter, + floating tol, + object rng, + bint random=0 +): + """Cython version of the coordinate descent algorithm + for Elastic-Net multi-task regression + + We minimize + + 0.5 * norm(Y - X W.T, 2)^2 + l1_reg ||W.T||_21 + 0.5 * l2_reg norm(W.T, 2)^2 + + Returns + ------- + W : ndarray of shape (n_tasks, n_features) + ElasticNet coefficients. + gap : float + Achieved dual gap. + tol : float + Equals input `tol` times `np.dot(y, y)`. The tolerance used for the dual gap. + n_iter : int + Number of coordinate descent iterations. + """ + + if floating is float: + dtype = np.float32 + else: + dtype = np.float64 + + # get the data information into easy vars + cdef unsigned int n_samples = X.shape[0] + cdef unsigned int n_features = X.shape[1] + cdef unsigned int n_tasks = Y.shape[1] + + # to store XtA + cdef floating[:, ::1] XtA = np.zeros((n_features, n_tasks), dtype=dtype) + cdef floating XtA_axis1norm + cdef floating dual_norm_XtA + + # initial value of the residuals + cdef floating[::1, :] R = np.zeros((n_samples, n_tasks), dtype=dtype, order='F') + + cdef floating[::1] norm_cols_X = np.zeros(n_features, dtype=dtype) + cdef floating[::1] tmp = np.zeros(n_tasks, dtype=dtype) + cdef floating[::1] w_ii = np.zeros(n_tasks, dtype=dtype) + cdef floating d_w_max + cdef floating w_max + cdef floating d_w_ii + cdef floating nn + cdef floating W_ii_abs_max + cdef floating gap = tol + 1.0 + cdef floating d_w_tol = tol + cdef floating R_norm + cdef floating w_norm + cdef floating ry_sum + cdef floating l21_norm + cdef unsigned int ii + cdef unsigned int jj + cdef unsigned int n_iter = 0 + cdef unsigned int f_iter + cdef uint32_t rand_r_state_seed = rng.randint(0, RAND_R_MAX) + cdef uint32_t* rand_r_state = &rand_r_state_seed + + cdef const floating* X_ptr = &X[0, 0] + cdef const floating* Y_ptr = &Y[0, 0] + + if l1_reg == 0: + warnings.warn( + "Coordinate descent with l1_reg=0 may lead to unexpected" + " results and is discouraged." + ) + + with nogil: + # norm_cols_X = (np.asarray(X) ** 2).sum(axis=0) + for ii in range(n_features): + norm_cols_X[ii] = _nrm2(n_samples, X_ptr + ii * n_samples, 1) ** 2 + + # R = Y - np.dot(X, W.T) + _copy(n_samples * n_tasks, Y_ptr, 1, &R[0, 0], 1) + for ii in range(n_features): + for jj in range(n_tasks): + if W[jj, ii] != 0: + _axpy(n_samples, -W[jj, ii], X_ptr + ii * n_samples, 1, + &R[0, jj], 1) + + # tol = tol * linalg.norm(Y, ord='fro') ** 2 + tol = tol * _nrm2(n_samples * n_tasks, Y_ptr, 1) ** 2 + + for n_iter in range(max_iter): + w_max = 0.0 + d_w_max = 0.0 + for f_iter in range(n_features): # Loop over coordinates + if random: + ii = rand_int(n_features, rand_r_state) + else: + ii = f_iter + + if norm_cols_X[ii] == 0.0: + continue + + # w_ii = W[:, ii] # Store previous value + _copy(n_tasks, &W[0, ii], 1, &w_ii[0], 1) + + # Using Numpy: + # R += np.dot(X[:, ii][:, None], w_ii[None, :]) # rank 1 update + # Using Blas Level2: + # _ger(RowMajor, n_samples, n_tasks, 1.0, + # &X[0, ii], 1, + # &w_ii[0], 1, &R[0, 0], n_tasks) + # Using Blas Level1 and for loop to avoid slower threads + # for such small vectors + for jj in range(n_tasks): + if w_ii[jj] != 0: + _axpy(n_samples, w_ii[jj], X_ptr + ii * n_samples, 1, + &R[0, jj], 1) + + # Using numpy: + # tmp = np.dot(X[:, ii][None, :], R).ravel() + # Using BLAS Level 2: + # _gemv(RowMajor, Trans, n_samples, n_tasks, 1.0, &R[0, 0], + # n_tasks, &X[0, ii], 1, 0.0, &tmp[0], 1) + # Using BLAS Level 1 (faster for small vectors like here): + for jj in range(n_tasks): + tmp[jj] = _dot(n_samples, X_ptr + ii * n_samples, 1, + &R[0, jj], 1) + + # nn = sqrt(np.sum(tmp ** 2)) + nn = _nrm2(n_tasks, &tmp[0], 1) + + # W[:, ii] = tmp * fmax(1. - l1_reg / nn, 0) / (norm_cols_X[ii] + l2_reg) + _copy(n_tasks, &tmp[0], 1, &W[0, ii], 1) + _scal(n_tasks, fmax(1. - l1_reg / nn, 0) / (norm_cols_X[ii] + l2_reg), + &W[0, ii], 1) + + # Using numpy: + # R -= np.dot(X[:, ii][:, None], W[:, ii][None, :]) + # Using BLAS Level 2: + # Update residual : rank 1 update + # _ger(RowMajor, n_samples, n_tasks, -1.0, + # &X[0, ii], 1, &W[0, ii], 1, + # &R[0, 0], n_tasks) + # Using BLAS Level 1 (faster for small vectors like here): + for jj in range(n_tasks): + if W[jj, ii] != 0: + _axpy(n_samples, -W[jj, ii], X_ptr + ii * n_samples, 1, + &R[0, jj], 1) + + # update the maximum absolute coefficient update + d_w_ii = diff_abs_max(n_tasks, &W[0, ii], &w_ii[0]) + + if d_w_ii > d_w_max: + d_w_max = d_w_ii + + W_ii_abs_max = abs_max(n_tasks, &W[0, ii]) + if W_ii_abs_max > w_max: + w_max = W_ii_abs_max + + if w_max == 0.0 or d_w_max / w_max < d_w_tol or n_iter == max_iter - 1: + # the biggest coordinate update of this iteration was smaller than + # the tolerance: check the duality gap as ultimate stopping + # criterion + + # XtA = np.dot(X.T, R) - l2_reg * W.T + for ii in range(n_features): + for jj in range(n_tasks): + XtA[ii, jj] = _dot( + n_samples, X_ptr + ii * n_samples, 1, &R[0, jj], 1 + ) - l2_reg * W[jj, ii] + + # dual_norm_XtA = np.max(np.sqrt(np.sum(XtA ** 2, axis=1))) + dual_norm_XtA = 0.0 + for ii in range(n_features): + # np.sqrt(np.sum(XtA ** 2, axis=1)) + XtA_axis1norm = _nrm2(n_tasks, &XtA[ii, 0], 1) + if XtA_axis1norm > dual_norm_XtA: + dual_norm_XtA = XtA_axis1norm + + # TODO: use squared L2 norm directly + # R_norm = linalg.norm(R, ord='fro') + # w_norm = linalg.norm(W, ord='fro') + R_norm = _nrm2(n_samples * n_tasks, &R[0, 0], 1) + w_norm = _nrm2(n_features * n_tasks, &W[0, 0], 1) + if (dual_norm_XtA > l1_reg): + const = l1_reg / dual_norm_XtA + A_norm = R_norm * const + gap = 0.5 * (R_norm ** 2 + A_norm ** 2) + else: + const = 1.0 + gap = R_norm ** 2 + + # ry_sum = np.sum(R * y) + ry_sum = _dot(n_samples * n_tasks, &R[0, 0], 1, &Y[0, 0], 1) + + # l21_norm = np.sqrt(np.sum(W ** 2, axis=0)).sum() + l21_norm = 0.0 + for ii in range(n_features): + l21_norm += _nrm2(n_tasks, &W[0, ii], 1) + + gap += ( + l1_reg * l21_norm + - const * ry_sum + + 0.5 * l2_reg * (1 + const ** 2) * (w_norm ** 2) + ) + + if gap <= tol: + # return if we reached desired tolerance + break + else: + # for/else, runs if for doesn't end with a `break` + with gil: + warnings.warn("Objective did not converge. You might want to " + "increase the number of iterations. Duality " + "gap: {}, tolerance: {}".format(gap, tol), + ConvergenceWarning) + + return np.asarray(W), gap, tol, n_iter + 1 diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_coordinate_descent.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_coordinate_descent.py new file mode 100644 index 0000000000000000000000000000000000000000..b98cf08925910c0120121ab5239fff18f834704a --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_coordinate_descent.py @@ -0,0 +1,3302 @@ +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import numbers +import sys +import warnings +from abc import ABC, abstractmethod +from functools import partial +from numbers import Integral, Real + +import numpy as np +from joblib import effective_n_jobs +from scipy import sparse + +from sklearn.utils import metadata_routing + +from ..base import MultiOutputMixin, RegressorMixin, _fit_context +from ..model_selection import check_cv +from ..utils import Bunch, check_array, check_scalar +from ..utils._metadata_requests import ( + MetadataRouter, + MethodMapping, + _raise_for_params, + get_routing_for_object, +) +from ..utils._param_validation import Interval, StrOptions, validate_params +from ..utils.extmath import safe_sparse_dot +from ..utils.metadata_routing import ( + _routing_enabled, + process_routing, +) +from ..utils.parallel import Parallel, delayed +from ..utils.validation import ( + _check_sample_weight, + check_consistent_length, + check_is_fitted, + check_random_state, + column_or_1d, + has_fit_parameter, + validate_data, +) + +# mypy error: Module 'sklearn.linear_model' has no attribute '_cd_fast' +from . import _cd_fast as cd_fast # type: ignore +from ._base import LinearModel, _pre_fit, _preprocess_data + + +def _set_order(X, y, order="C"): + """Change the order of X and y if necessary. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + + y : ndarray of shape (n_samples,) + Target values. + + order : {None, 'C', 'F'} + If 'C', dense arrays are returned as C-ordered, sparse matrices in csr + format. If 'F', dense arrays are return as F-ordered, sparse matrices + in csc format. + + Returns + ------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data with guaranteed order. + + y : ndarray of shape (n_samples,) + Target values with guaranteed order. + """ + if order not in [None, "C", "F"]: + raise ValueError( + "Unknown value for order. Got {} instead of None, 'C' or 'F'.".format(order) + ) + sparse_X = sparse.issparse(X) + sparse_y = sparse.issparse(y) + if order is not None: + sparse_format = "csc" if order == "F" else "csr" + if sparse_X: + X = X.asformat(sparse_format, copy=False) + else: + X = np.asarray(X, order=order) + if sparse_y: + y = y.asformat(sparse_format) + else: + y = np.asarray(y, order=order) + return X, y + + +############################################################################### +# Paths functions + + +def _alpha_grid( + X, + y, + Xy=None, + l1_ratio=1.0, + fit_intercept=True, + eps=1e-3, + n_alphas=100, + copy_X=True, + sample_weight=None, +): + """Compute the grid of alpha values for elastic net parameter search + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. Pass directly as Fortran-contiguous data to avoid + unnecessary memory duplication + + y : ndarray of shape (n_samples,) or (n_samples, n_outputs) + Target values + + Xy : array-like of shape (n_features,) or (n_features, n_outputs),\ + default=None + Xy = np.dot(X.T, y) that can be precomputed. + + l1_ratio : float, default=1.0 + The elastic net mixing parameter, with ``0 < l1_ratio <= 1``. + For ``l1_ratio = 0`` the penalty is an L2 penalty. (currently not + supported) ``For l1_ratio = 1`` it is an L1 penalty. For + ``0 < l1_ratio <1``, the penalty is a combination of L1 and L2. + + eps : float, default=1e-3 + Length of the path. ``eps=1e-3`` means that + ``alpha_min / alpha_max = 1e-3`` + + n_alphas : int, default=100 + Number of alphas along the regularization path + + fit_intercept : bool, default=True + Whether to fit an intercept or not + + copy_X : bool, default=True + If ``True``, X will be copied; else, it may be overwritten. + + sample_weight : ndarray of shape (n_samples,), default=None + """ + if l1_ratio == 0: + raise ValueError( + "Automatic alpha grid generation is not supported for" + " l1_ratio=0. Please supply a grid by providing " + "your estimator with the appropriate `alphas=` " + "argument." + ) + if Xy is not None: + Xyw = Xy + else: + X, y, X_offset, _, _ = _preprocess_data( + X, + y, + fit_intercept=fit_intercept, + copy=copy_X, + sample_weight=sample_weight, + check_input=False, + ) + if sample_weight is not None: + if y.ndim > 1: + yw = y * sample_weight.reshape(-1, 1) + else: + yw = y * sample_weight + else: + yw = y + if sparse.issparse(X): + Xyw = safe_sparse_dot(X.T, yw, dense_output=True) - np.sum(yw) * X_offset + else: + Xyw = np.dot(X.T, yw) + + if Xyw.ndim == 1: + Xyw = Xyw[:, np.newaxis] + if sample_weight is not None: + n_samples = sample_weight.sum() + else: + n_samples = X.shape[0] + alpha_max = np.sqrt(np.sum(Xyw**2, axis=1)).max() / (n_samples * l1_ratio) + + if alpha_max <= np.finfo(np.float64).resolution: + return np.full(n_alphas, np.finfo(np.float64).resolution) + + return np.geomspace(alpha_max, alpha_max * eps, num=n_alphas) + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "y": ["array-like", "sparse matrix"], + "eps": [Interval(Real, 0, None, closed="neither")], + "n_alphas": [Interval(Integral, 1, None, closed="left")], + "alphas": ["array-like", None], + "precompute": [StrOptions({"auto"}), "boolean", "array-like"], + "Xy": ["array-like", None], + "copy_X": ["boolean"], + "coef_init": ["array-like", None], + "verbose": ["verbose"], + "return_n_iter": ["boolean"], + "positive": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def lasso_path( + X, + y, + *, + eps=1e-3, + n_alphas=100, + alphas=None, + precompute="auto", + Xy=None, + copy_X=True, + coef_init=None, + verbose=False, + return_n_iter=False, + positive=False, + **params, +): + """Compute Lasso path with coordinate descent. + + The Lasso optimization function varies for mono and multi-outputs. + + For mono-output tasks it is:: + + (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 + + For multi-output tasks it is:: + + (1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21 + + Where:: + + ||W||_21 = \\sum_i \\sqrt{\\sum_j w_{ij}^2} + + i.e. the sum of norm of each row. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. Pass directly as Fortran-contiguous data to avoid + unnecessary memory duplication. If ``y`` is mono-output then ``X`` + can be sparse. + + y : {array-like, sparse matrix} of shape (n_samples,) or \ + (n_samples, n_targets) + Target values. + + eps : float, default=1e-3 + Length of the path. ``eps=1e-3`` means that + ``alpha_min / alpha_max = 1e-3``. + + n_alphas : int, default=100 + Number of alphas along the regularization path. + + alphas : array-like, default=None + List of alphas where to compute the models. + If ``None`` alphas are set automatically. + + precompute : 'auto', bool or array-like of shape \ + (n_features, n_features), default='auto' + Whether to use a precomputed Gram matrix to speed up + calculations. If set to ``'auto'`` let us decide. The Gram + matrix can also be passed as argument. + + Xy : array-like of shape (n_features,) or (n_features, n_targets),\ + default=None + Xy = np.dot(X.T, y) that can be precomputed. It is useful + only when the Gram matrix is precomputed. + + copy_X : bool, default=True + If ``True``, X will be copied; else, it may be overwritten. + + coef_init : array-like of shape (n_features, ), default=None + The initial values of the coefficients. + + verbose : bool or int, default=False + Amount of verbosity. + + return_n_iter : bool, default=False + Whether to return the number of iterations or not. + + positive : bool, default=False + If set to True, forces coefficients to be positive. + (Only allowed when ``y.ndim == 1``). + + **params : kwargs + Keyword arguments passed to the coordinate descent solver. + + Returns + ------- + alphas : ndarray of shape (n_alphas,) + The alphas along the path where models are computed. + + coefs : ndarray of shape (n_features, n_alphas) or \ + (n_targets, n_features, n_alphas) + Coefficients along the path. + + dual_gaps : ndarray of shape (n_alphas,) + The dual gaps at the end of the optimization for each alpha. + + n_iters : list of int + The number of iterations taken by the coordinate descent optimizer to + reach the specified tolerance for each alpha. + + See Also + -------- + lars_path : Compute Least Angle Regression or Lasso path using LARS + algorithm. + Lasso : The Lasso is a linear model that estimates sparse coefficients. + LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars. + LassoCV : Lasso linear model with iterative fitting along a regularization + path. + LassoLarsCV : Cross-validated Lasso using the LARS algorithm. + sklearn.decomposition.sparse_encode : Estimator that can be used to + transform signals into sparse linear combination of atoms from a fixed. + + Notes + ----- + For an example, see + :ref:`examples/linear_model/plot_lasso_lasso_lars_elasticnet_path.py + `. + + To avoid unnecessary memory duplication the X argument of the fit method + should be directly passed as a Fortran-contiguous numpy array. + + Note that in certain cases, the Lars solver may be significantly + faster to implement this functionality. In particular, linear + interpolation can be used to retrieve model coefficients between the + values output by lars_path + + Examples + -------- + + Comparing lasso_path and lars_path with interpolation: + + >>> import numpy as np + >>> from sklearn.linear_model import lasso_path + >>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T + >>> y = np.array([1, 2, 3.1]) + >>> # Use lasso_path to compute a coefficient path + >>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5]) + >>> print(coef_path) + [[0. 0. 0.46874778] + [0.2159048 0.4425765 0.23689075]] + + >>> # Now use lars_path and 1D linear interpolation to compute the + >>> # same path + >>> from sklearn.linear_model import lars_path + >>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso') + >>> from scipy import interpolate + >>> coef_path_continuous = interpolate.interp1d(alphas[::-1], + ... coef_path_lars[:, ::-1]) + >>> print(coef_path_continuous([5., 1., .5])) + [[0. 0. 0.46915237] + [0.2159048 0.4425765 0.23668876]] + """ + return enet_path( + X, + y, + l1_ratio=1.0, + eps=eps, + n_alphas=n_alphas, + alphas=alphas, + precompute=precompute, + Xy=Xy, + copy_X=copy_X, + coef_init=coef_init, + verbose=verbose, + positive=positive, + return_n_iter=return_n_iter, + **params, + ) + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "y": ["array-like", "sparse matrix"], + "l1_ratio": [Interval(Real, 0.0, 1.0, closed="both")], + "eps": [Interval(Real, 0.0, None, closed="neither")], + "n_alphas": [Interval(Integral, 1, None, closed="left")], + "alphas": ["array-like", None], + "precompute": [StrOptions({"auto"}), "boolean", "array-like"], + "Xy": ["array-like", None], + "copy_X": ["boolean"], + "coef_init": ["array-like", None], + "verbose": ["verbose"], + "return_n_iter": ["boolean"], + "positive": ["boolean"], + "check_input": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def enet_path( + X, + y, + *, + l1_ratio=0.5, + eps=1e-3, + n_alphas=100, + alphas=None, + precompute="auto", + Xy=None, + copy_X=True, + coef_init=None, + verbose=False, + return_n_iter=False, + positive=False, + check_input=True, + **params, +): + """Compute elastic net path with coordinate descent. + + The elastic net optimization function varies for mono and multi-outputs. + + For mono-output tasks it is:: + + 1 / (2 * n_samples) * ||y - Xw||^2_2 + + alpha * l1_ratio * ||w||_1 + + 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2 + + For multi-output tasks it is:: + + (1 / (2 * n_samples)) * ||Y - XW||_Fro^2 + + alpha * l1_ratio * ||W||_21 + + 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2 + + Where:: + + ||W||_21 = \\sum_i \\sqrt{\\sum_j w_{ij}^2} + + i.e. the sum of norm of each row. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. Pass directly as Fortran-contiguous data to avoid + unnecessary memory duplication. If ``y`` is mono-output then ``X`` + can be sparse. + + y : {array-like, sparse matrix} of shape (n_samples,) or \ + (n_samples, n_targets) + Target values. + + l1_ratio : float, default=0.5 + Number between 0 and 1 passed to elastic net (scaling between + l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso. + + eps : float, default=1e-3 + Length of the path. ``eps=1e-3`` means that + ``alpha_min / alpha_max = 1e-3``. + + n_alphas : int, default=100 + Number of alphas along the regularization path. + + alphas : array-like, default=None + List of alphas where to compute the models. + If None alphas are set automatically. + + precompute : 'auto', bool or array-like of shape \ + (n_features, n_features), default='auto' + Whether to use a precomputed Gram matrix to speed up + calculations. If set to ``'auto'`` let us decide. The Gram + matrix can also be passed as argument. + + Xy : array-like of shape (n_features,) or (n_features, n_targets),\ + default=None + Xy = np.dot(X.T, y) that can be precomputed. It is useful + only when the Gram matrix is precomputed. + + copy_X : bool, default=True + If ``True``, X will be copied; else, it may be overwritten. + + coef_init : array-like of shape (n_features, ), default=None + The initial values of the coefficients. + + verbose : bool or int, default=False + Amount of verbosity. + + return_n_iter : bool, default=False + Whether to return the number of iterations or not. + + positive : bool, default=False + If set to True, forces coefficients to be positive. + (Only allowed when ``y.ndim == 1``). + + check_input : bool, default=True + If set to False, the input validation checks are skipped (including the + Gram matrix when provided). It is assumed that they are handled + by the caller. + + **params : kwargs + Keyword arguments passed to the coordinate descent solver. + + Returns + ------- + alphas : ndarray of shape (n_alphas,) + The alphas along the path where models are computed. + + coefs : ndarray of shape (n_features, n_alphas) or \ + (n_targets, n_features, n_alphas) + Coefficients along the path. + + dual_gaps : ndarray of shape (n_alphas,) + The dual gaps at the end of the optimization for each alpha. + + n_iters : list of int + The number of iterations taken by the coordinate descent optimizer to + reach the specified tolerance for each alpha. + (Is returned when ``return_n_iter`` is set to True). + + See Also + -------- + MultiTaskElasticNet : Multi-task ElasticNet model trained with L1/L2 mixed-norm \ + as regularizer. + MultiTaskElasticNetCV : Multi-task L1/L2 ElasticNet with built-in cross-validation. + ElasticNet : Linear regression with combined L1 and L2 priors as regularizer. + ElasticNetCV : Elastic Net model with iterative fitting along a regularization path. + + Notes + ----- + For an example, see + :ref:`examples/linear_model/plot_lasso_lasso_lars_elasticnet_path.py + `. + + Examples + -------- + >>> from sklearn.linear_model import enet_path + >>> from sklearn.datasets import make_regression + >>> X, y, true_coef = make_regression( + ... n_samples=100, n_features=5, n_informative=2, coef=True, random_state=0 + ... ) + >>> true_coef + array([ 0. , 0. , 0. , 97.9..., 45.7...]) + >>> alphas, estimated_coef, _ = enet_path(X, y, n_alphas=3) + >>> alphas.shape + (3,) + >>> estimated_coef + array([[ 0. , 0.78..., 0.56...], + [ 0. , 1.12..., 0.61...], + [-0. , -2.12..., -1.12...], + [ 0. , 23.04..., 88.93...], + [ 0. , 10.63..., 41.56...]]) + """ + X_offset_param = params.pop("X_offset", None) + X_scale_param = params.pop("X_scale", None) + sample_weight = params.pop("sample_weight", None) + tol = params.pop("tol", 1e-4) + max_iter = params.pop("max_iter", 1000) + random_state = params.pop("random_state", None) + selection = params.pop("selection", "cyclic") + + if len(params) > 0: + raise ValueError("Unexpected parameters in params", params.keys()) + + # We expect X and y to be already Fortran ordered when bypassing + # checks + if check_input: + X = check_array( + X, + accept_sparse="csc", + dtype=[np.float64, np.float32], + order="F", + copy=copy_X, + ) + y = check_array( + y, + accept_sparse="csc", + dtype=X.dtype.type, + order="F", + copy=False, + ensure_2d=False, + ) + if Xy is not None: + # Xy should be a 1d contiguous array or a 2D C ordered array + Xy = check_array( + Xy, dtype=X.dtype.type, order="C", copy=False, ensure_2d=False + ) + + n_samples, n_features = X.shape + + multi_output = False + if y.ndim != 1: + multi_output = True + n_targets = y.shape[1] + + if multi_output and positive: + raise ValueError("positive=True is not allowed for multi-output (y.ndim != 1)") + + # MultiTaskElasticNet does not support sparse matrices + if not multi_output and sparse.issparse(X): + if X_offset_param is not None: + # As sparse matrices are not actually centered we need this to be passed to + # the CD solver. + X_sparse_scaling = X_offset_param / X_scale_param + X_sparse_scaling = np.asarray(X_sparse_scaling, dtype=X.dtype) + else: + X_sparse_scaling = np.zeros(n_features, dtype=X.dtype) + + # X should have been passed through _pre_fit already if function is called + # from ElasticNet.fit + if check_input: + X, y, _, _, _, precompute, Xy = _pre_fit( + X, + y, + Xy, + precompute, + fit_intercept=False, + copy=False, + check_input=check_input, + ) + if alphas is None: + # No need to normalize of fit_intercept: it has been done + # above + alphas = _alpha_grid( + X, + y, + Xy=Xy, + l1_ratio=l1_ratio, + fit_intercept=False, + eps=eps, + n_alphas=n_alphas, + copy_X=False, + ) + elif len(alphas) > 1: + alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered + + n_alphas = len(alphas) + dual_gaps = np.empty(n_alphas) + n_iters = [] + + rng = check_random_state(random_state) + if selection not in ["random", "cyclic"]: + raise ValueError("selection should be either random or cyclic.") + random = selection == "random" + + if not multi_output: + coefs = np.empty((n_features, n_alphas), dtype=X.dtype) + else: + coefs = np.empty((n_targets, n_features, n_alphas), dtype=X.dtype) + + if coef_init is None: + coef_ = np.zeros(coefs.shape[:-1], dtype=X.dtype, order="F") + else: + coef_ = np.asfortranarray(coef_init, dtype=X.dtype) + + for i, alpha in enumerate(alphas): + # account for n_samples scaling in objectives between here and cd_fast + l1_reg = alpha * l1_ratio * n_samples + l2_reg = alpha * (1.0 - l1_ratio) * n_samples + if not multi_output and sparse.issparse(X): + model = cd_fast.sparse_enet_coordinate_descent( + w=coef_, + alpha=l1_reg, + beta=l2_reg, + X_data=X.data, + X_indices=X.indices, + X_indptr=X.indptr, + y=y, + sample_weight=sample_weight, + X_mean=X_sparse_scaling, + max_iter=max_iter, + tol=tol, + rng=rng, + random=random, + positive=positive, + ) + elif multi_output: + model = cd_fast.enet_coordinate_descent_multi_task( + coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random + ) + elif isinstance(precompute, np.ndarray): + # We expect precompute to be already Fortran ordered when bypassing + # checks + if check_input: + precompute = check_array(precompute, dtype=X.dtype.type, order="C") + model = cd_fast.enet_coordinate_descent_gram( + coef_, + l1_reg, + l2_reg, + precompute, + Xy, + y, + max_iter, + tol, + rng, + random, + positive, + ) + elif precompute is False: + model = cd_fast.enet_coordinate_descent( + coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random, positive + ) + else: + raise ValueError( + "Precompute should be one of True, False, 'auto' or array-like. Got %r" + % precompute + ) + coef_, dual_gap_, eps_, n_iter_ = model + coefs[..., i] = coef_ + # we correct the scale of the returned dual gap, as the objective + # in cd_fast is n_samples * the objective in this docstring. + dual_gaps[i] = dual_gap_ / n_samples + n_iters.append(n_iter_) + + if verbose: + if verbose > 2: + print(model) + elif verbose > 1: + print("Path: %03i out of %03i" % (i, n_alphas)) + else: + sys.stderr.write(".") + + if return_n_iter: + return alphas, coefs, dual_gaps, n_iters + return alphas, coefs, dual_gaps + + +############################################################################### +# ElasticNet model + + +class ElasticNet(MultiOutputMixin, RegressorMixin, LinearModel): + """Linear regression with combined L1 and L2 priors as regularizer. + + Minimizes the objective function:: + + 1 / (2 * n_samples) * ||y - Xw||^2_2 + + alpha * l1_ratio * ||w||_1 + + 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2 + + If you are interested in controlling the L1 and L2 penalty + separately, keep in mind that this is equivalent to:: + + a * ||w||_1 + 0.5 * b * ||w||_2^2 + + where:: + + alpha = a + b and l1_ratio = a / (a + b) + + The parameter l1_ratio corresponds to alpha in the glmnet R package while + alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio + = 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable, + unless you supply your own sequence of alpha. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + alpha : float, default=1.0 + Constant that multiplies the penalty terms. Defaults to 1.0. + See the notes for the exact mathematical meaning of this + parameter. ``alpha = 0`` is equivalent to an ordinary least square, + solved by the :class:`LinearRegression` object. For numerical + reasons, using ``alpha = 0`` with the ``Lasso`` object is not advised. + Given this, you should use the :class:`LinearRegression` object. + + l1_ratio : float, default=0.5 + The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For + ``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it + is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a + combination of L1 and L2. + + fit_intercept : bool, default=True + Whether the intercept should be estimated or not. If ``False``, the + data is assumed to be already centered. + + precompute : bool or array-like of shape (n_features, n_features),\ + default=False + Whether to use a precomputed Gram matrix to speed up + calculations. The Gram matrix can also be passed as argument. + For sparse input this option is always ``False`` to preserve sparsity. + Check :ref:`an example on how to use a precomputed Gram Matrix in ElasticNet + ` + for details. + + max_iter : int, default=1000 + The maximum number of iterations. + + copy_X : bool, default=True + If ``True``, X will be copied; else, it may be overwritten. + + tol : float, default=1e-4 + The tolerance for the optimization: if the updates are + smaller than ``tol``, the optimization code checks the + dual gap for optimality and continues until it is smaller + than ``tol``, see Notes below. + + warm_start : bool, default=False + When set to ``True``, reuse the solution of the previous call to fit as + initialization, otherwise, just erase the previous solution. + See :term:`the Glossary `. + + positive : bool, default=False + When set to ``True``, forces the coefficients to be positive. + + random_state : int, RandomState instance, default=None + The seed of the pseudo random number generator that selects a random + feature to update. Used when ``selection`` == 'random'. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + selection : {'cyclic', 'random'}, default='cyclic' + If set to 'random', a random coefficient is updated every iteration + rather than looping over features sequentially by default. This + (setting to 'random') often leads to significantly faster convergence + especially when tol is higher than 1e-4. + + Attributes + ---------- + coef_ : ndarray of shape (n_features,) or (n_targets, n_features) + Parameter vector (w in the cost function formula). + + sparse_coef_ : sparse matrix of shape (n_features,) or \ + (n_targets, n_features) + Sparse representation of the `coef_`. + + intercept_ : float or ndarray of shape (n_targets,) + Independent term in decision function. + + n_iter_ : list of int + Number of iterations run by the coordinate descent solver to reach + the specified tolerance. + + dual_gap_ : float or ndarray of shape (n_targets,) + Given param alpha, the dual gaps at the end of the optimization, + same shape as each observation of y. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + ElasticNetCV : Elastic net model with best model selection by + cross-validation. + SGDRegressor : Implements elastic net regression with incremental training. + SGDClassifier : Implements logistic regression with elastic net penalty + (``SGDClassifier(loss="log_loss", penalty="elasticnet")``). + + Notes + ----- + To avoid unnecessary memory duplication the X argument of the fit method + should be directly passed as a Fortran-contiguous numpy array. + + The precise stopping criteria based on `tol` are the following: First, check that + that maximum coordinate update, i.e. :math:`\\max_j |w_j^{new} - w_j^{old}|` + is smaller than `tol` times the maximum absolute coefficient, :math:`\\max_j |w_j|`. + If so, then additionally check whether the dual gap is smaller than `tol` times + :math:`||y||_2^2 / n_{\text{samples}}`. + + Examples + -------- + >>> from sklearn.linear_model import ElasticNet + >>> from sklearn.datasets import make_regression + + >>> X, y = make_regression(n_features=2, random_state=0) + >>> regr = ElasticNet(random_state=0) + >>> regr.fit(X, y) + ElasticNet(random_state=0) + >>> print(regr.coef_) + [18.83816048 64.55968825] + >>> print(regr.intercept_) + 1.451... + >>> print(regr.predict([[0, 0]])) + [1.451...] + """ + + # "check_input" is used for optimisation and isn't something to be passed + # around in a pipeline. + __metadata_request__fit = {"check_input": metadata_routing.UNUSED} + + _parameter_constraints: dict = { + "alpha": [Interval(Real, 0, None, closed="left")], + "l1_ratio": [Interval(Real, 0, 1, closed="both")], + "fit_intercept": ["boolean"], + "precompute": ["boolean", "array-like"], + "max_iter": [Interval(Integral, 1, None, closed="left"), None], + "copy_X": ["boolean"], + "tol": [Interval(Real, 0, None, closed="left")], + "warm_start": ["boolean"], + "positive": ["boolean"], + "random_state": ["random_state"], + "selection": [StrOptions({"cyclic", "random"})], + } + + path = staticmethod(enet_path) + + def __init__( + self, + alpha=1.0, + *, + l1_ratio=0.5, + fit_intercept=True, + precompute=False, + max_iter=1000, + copy_X=True, + tol=1e-4, + warm_start=False, + positive=False, + random_state=None, + selection="cyclic", + ): + self.alpha = alpha + self.l1_ratio = l1_ratio + self.fit_intercept = fit_intercept + self.precompute = precompute + self.max_iter = max_iter + self.copy_X = copy_X + self.tol = tol + self.warm_start = warm_start + self.positive = positive + self.random_state = random_state + self.selection = selection + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None, check_input=True): + """Fit model with coordinate descent. + + Parameters + ---------- + X : {ndarray, sparse matrix, sparse array} of (n_samples, n_features) + Data. + + Note that large sparse matrices and arrays requiring `int64` + indices are not accepted. + + y : ndarray of shape (n_samples,) or (n_samples, n_targets) + Target. Will be cast to X's dtype if necessary. + + sample_weight : float or array-like of shape (n_samples,), default=None + Sample weights. Internally, the `sample_weight` vector will be + rescaled to sum to `n_samples`. + + .. versionadded:: 0.23 + + check_input : bool, default=True + Allow to bypass several input checking. + Don't use this parameter unless you know what you do. + + Returns + ------- + self : object + Fitted estimator. + + Notes + ----- + Coordinate descent is an algorithm that considers each column of + data at a time hence it will automatically convert the X input + as a Fortran-contiguous numpy array if necessary. + + To avoid memory re-allocation it is advised to allocate the + initial data in memory directly using that format. + """ + if self.alpha == 0: + warnings.warn( + ( + "With alpha=0, this algorithm does not converge " + "well. You are advised to use the LinearRegression " + "estimator" + ), + stacklevel=2, + ) + + # Remember if X is copied + X_copied = False + # We expect X and y to be float64 or float32 Fortran ordered arrays + # when bypassing checks + if check_input: + X_copied = self.copy_X and self.fit_intercept + X, y = validate_data( + self, + X, + y, + accept_sparse="csc", + order="F", + dtype=[np.float64, np.float32], + force_writeable=True, + accept_large_sparse=False, + copy=X_copied, + multi_output=True, + y_numeric=True, + ) + y = check_array( + y, order="F", copy=False, dtype=X.dtype.type, ensure_2d=False + ) + + n_samples, n_features = X.shape + alpha = self.alpha + + if isinstance(sample_weight, numbers.Number): + sample_weight = None + if sample_weight is not None: + if check_input: + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + # TLDR: Rescale sw to sum up to n_samples. + # Long: The objective function of Enet + # + # 1/2 * np.average(squared error, weights=sw) + # + alpha * penalty (1) + # + # is invariant under rescaling of sw. + # But enet_path coordinate descent minimizes + # + # 1/2 * sum(squared error) + alpha' * penalty (2) + # + # and therefore sets + # + # alpha' = n_samples * alpha (3) + # + # inside its function body, which results in objective (2) being + # equivalent to (1) in case of no sw. + # With sw, however, enet_path should set + # + # alpha' = sum(sw) * alpha (4) + # + # Therefore, we use the freedom of Eq. (1) to rescale sw before + # calling enet_path, i.e. + # + # sw *= n_samples / sum(sw) + # + # such that sum(sw) = n_samples. This way, (3) and (4) are the same. + sample_weight = sample_weight * (n_samples / np.sum(sample_weight)) + # Note: Alternatively, we could also have rescaled alpha instead + # of sample_weight: + # + # alpha *= np.sum(sample_weight) / n_samples + + # Ensure copying happens only once, don't do it again if done above. + # X and y will be rescaled if sample_weight is not None, order='F' + # ensures that the returned X and y are still F-contiguous. + should_copy = self.copy_X and not X_copied + X, y, X_offset, y_offset, X_scale, precompute, Xy = _pre_fit( + X, + y, + None, + self.precompute, + fit_intercept=self.fit_intercept, + copy=should_copy, + check_input=check_input, + sample_weight=sample_weight, + ) + # coordinate descent needs F-ordered arrays and _pre_fit might have + # called _rescale_data + if check_input or sample_weight is not None: + X, y = _set_order(X, y, order="F") + if y.ndim == 1: + y = y[:, np.newaxis] + if Xy is not None and Xy.ndim == 1: + Xy = Xy[:, np.newaxis] + + n_targets = y.shape[1] + + if not self.warm_start or not hasattr(self, "coef_"): + coef_ = np.zeros((n_targets, n_features), dtype=X.dtype, order="F") + else: + coef_ = self.coef_ + if coef_.ndim == 1: + coef_ = coef_[np.newaxis, :] + + dual_gaps_ = np.zeros(n_targets, dtype=X.dtype) + self.n_iter_ = [] + + for k in range(n_targets): + if Xy is not None: + this_Xy = Xy[:, k] + else: + this_Xy = None + _, this_coef, this_dual_gap, this_iter = self.path( + X, + y[:, k], + l1_ratio=self.l1_ratio, + eps=None, + n_alphas=None, + alphas=[alpha], + precompute=precompute, + Xy=this_Xy, + copy_X=True, + coef_init=coef_[k], + verbose=False, + return_n_iter=True, + positive=self.positive, + check_input=False, + # from here on **params + tol=self.tol, + X_offset=X_offset, + X_scale=X_scale, + max_iter=self.max_iter, + random_state=self.random_state, + selection=self.selection, + sample_weight=sample_weight, + ) + coef_[k] = this_coef[:, 0] + dual_gaps_[k] = this_dual_gap[0] + self.n_iter_.append(this_iter[0]) + + if n_targets == 1: + self.n_iter_ = self.n_iter_[0] + self.coef_ = coef_[0] + self.dual_gap_ = dual_gaps_[0] + else: + self.coef_ = coef_ + self.dual_gap_ = dual_gaps_ + + self._set_intercept(X_offset, y_offset, X_scale) + + # check for finiteness of coefficients + if not all(np.isfinite(w).all() for w in [self.coef_, self.intercept_]): + raise ValueError( + "Coordinate descent iterations resulted in non-finite parameter" + " values. The input data may contain large values and need to" + " be preprocessed." + ) + + # return self for chaining fit and predict calls + return self + + @property + def sparse_coef_(self): + """Sparse representation of the fitted `coef_`.""" + return sparse.csr_matrix(self.coef_) + + def _decision_function(self, X): + """Decision function of the linear model. + + Parameters + ---------- + X : numpy array or scipy.sparse matrix of shape (n_samples, n_features) + + Returns + ------- + T : ndarray of shape (n_samples,) + The predicted decision function. + """ + check_is_fitted(self) + if sparse.issparse(X): + return safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_ + else: + return super()._decision_function(X) + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.input_tags.sparse = True + return tags + + +############################################################################### +# Lasso model + + +class Lasso(ElasticNet): + """Linear Model trained with L1 prior as regularizer (aka the Lasso). + + The optimization objective for Lasso is:: + + (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 + + Technically the Lasso model is optimizing the same objective function as + the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty). + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + alpha : float, default=1.0 + Constant that multiplies the L1 term, controlling regularization + strength. `alpha` must be a non-negative float i.e. in `[0, inf)`. + + When `alpha = 0`, the objective is equivalent to ordinary least + squares, solved by the :class:`LinearRegression` object. For numerical + reasons, using `alpha = 0` with the `Lasso` object is not advised. + Instead, you should use the :class:`LinearRegression` object. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to False, no intercept will be used in calculations + (i.e. data is expected to be centered). + + precompute : bool or array-like of shape (n_features, n_features),\ + default=False + Whether to use a precomputed Gram matrix to speed up + calculations. The Gram matrix can also be passed as argument. + For sparse input this option is always ``False`` to preserve sparsity. + + copy_X : bool, default=True + If ``True``, X will be copied; else, it may be overwritten. + + max_iter : int, default=1000 + The maximum number of iterations. + + tol : float, default=1e-4 + The tolerance for the optimization: if the updates are + smaller than ``tol``, the optimization code checks the + dual gap for optimality and continues until it is smaller + than ``tol``, see Notes below. + + warm_start : bool, default=False + When set to True, reuse the solution of the previous call to fit as + initialization, otherwise, just erase the previous solution. + See :term:`the Glossary `. + + positive : bool, default=False + When set to ``True``, forces the coefficients to be positive. + + random_state : int, RandomState instance, default=None + The seed of the pseudo random number generator that selects a random + feature to update. Used when ``selection`` == 'random'. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + selection : {'cyclic', 'random'}, default='cyclic' + If set to 'random', a random coefficient is updated every iteration + rather than looping over features sequentially by default. This + (setting to 'random') often leads to significantly faster convergence + especially when tol is higher than 1e-4. + + Attributes + ---------- + coef_ : ndarray of shape (n_features,) or (n_targets, n_features) + Parameter vector (w in the cost function formula). + + dual_gap_ : float or ndarray of shape (n_targets,) + Given param alpha, the dual gaps at the end of the optimization, + same shape as each observation of y. + + sparse_coef_ : sparse matrix of shape (n_features, 1) or \ + (n_targets, n_features) + Readonly property derived from ``coef_``. + + intercept_ : float or ndarray of shape (n_targets,) + Independent term in decision function. + + n_iter_ : int or list of int + Number of iterations run by the coordinate descent solver to reach + the specified tolerance. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + lars_path : Regularization path using LARS. + lasso_path : Regularization path using Lasso. + LassoLars : Lasso Path along the regularization parameter using LARS algorithm. + LassoCV : Lasso alpha parameter by cross-validation. + LassoLarsCV : Lasso least angle parameter algorithm by cross-validation. + sklearn.decomposition.sparse_encode : Sparse coding array estimator. + + Notes + ----- + The algorithm used to fit the model is coordinate descent. + + To avoid unnecessary memory duplication the X argument of the fit method + should be directly passed as a Fortran-contiguous numpy array. + + Regularization improves the conditioning of the problem and + reduces the variance of the estimates. Larger values specify stronger + regularization. Alpha corresponds to `1 / (2C)` in other linear + models such as :class:`~sklearn.linear_model.LogisticRegression` or + :class:`~sklearn.svm.LinearSVC`. If an array is passed, penalties are + assumed to be specific to the targets. Hence they must correspond in + number. + + The precise stopping criteria based on `tol` are the following: First, check that + that maximum coordinate update, i.e. :math:`\\max_j |w_j^{new} - w_j^{old}|` + is smaller than `tol` times the maximum absolute coefficient, :math:`\\max_j |w_j|`. + If so, then additionally check whether the dual gap is smaller than `tol` times + :math:`||y||_2^2 / n_{\\text{samples}}`. + + The target can be a 2-dimensional array, resulting in the optimization of the + following objective:: + + (1 / (2 * n_samples)) * ||Y - XW||^2_F + alpha * ||W||_11 + + where :math:`||W||_{1,1}` is the sum of the magnitude of the matrix coefficients. + It should not be confused with :class:`~sklearn.linear_model.MultiTaskLasso` which + instead penalizes the :math:`L_{2,1}` norm of the coefficients, yielding row-wise + sparsity in the coefficients. + + Examples + -------- + >>> from sklearn import linear_model + >>> clf = linear_model.Lasso(alpha=0.1) + >>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2]) + Lasso(alpha=0.1) + >>> print(clf.coef_) + [0.85 0. ] + >>> print(clf.intercept_) + 0.15... + """ + + _parameter_constraints: dict = { + **ElasticNet._parameter_constraints, + } + _parameter_constraints.pop("l1_ratio") + + path = staticmethod(enet_path) + + def __init__( + self, + alpha=1.0, + *, + fit_intercept=True, + precompute=False, + copy_X=True, + max_iter=1000, + tol=1e-4, + warm_start=False, + positive=False, + random_state=None, + selection="cyclic", + ): + super().__init__( + alpha=alpha, + l1_ratio=1.0, + fit_intercept=fit_intercept, + precompute=precompute, + copy_X=copy_X, + max_iter=max_iter, + tol=tol, + warm_start=warm_start, + positive=positive, + random_state=random_state, + selection=selection, + ) + + +############################################################################### +# Functions for CV with paths functions + + +def _path_residuals( + X, + y, + sample_weight, + train, + test, + fit_intercept, + path, + path_params, + alphas=None, + l1_ratio=1, + X_order=None, + dtype=None, +): + """Returns the MSE for the models computed by 'path'. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values. + + sample_weight : None or array-like of shape (n_samples,) + Sample weights. + + train : list of indices + The indices of the train set. + + test : list of indices + The indices of the test set. + + path : callable + Function returning a list of models on the path. See + enet_path for an example of signature. + + path_params : dictionary + Parameters passed to the path function. + + alphas : array-like, default=None + Array of float that is used for cross-validation. If not + provided, computed using 'path'. + + l1_ratio : float, default=1 + float between 0 and 1 passed to ElasticNet (scaling between + l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an + L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0 + < l1_ratio < 1``, the penalty is a combination of L1 and L2. + + X_order : {'F', 'C'}, default=None + The order of the arrays expected by the path function to + avoid memory copies. + + dtype : a numpy dtype, default=None + The dtype of the arrays expected by the path function to + avoid memory copies. + """ + X_train = X[train] + y_train = y[train] + X_test = X[test] + y_test = y[test] + if sample_weight is None: + sw_train, sw_test = None, None + else: + sw_train = sample_weight[train] + sw_test = sample_weight[test] + n_samples = X_train.shape[0] + # TLDR: Rescale sw_train to sum up to n_samples on the training set. + # See TLDR and long comment inside ElasticNet.fit. + sw_train *= n_samples / np.sum(sw_train) + # Note: Alternatively, we could also have rescaled alpha instead + # of sample_weight: + # + # alpha *= np.sum(sample_weight) / n_samples + + if not sparse.issparse(X): + for array, array_input in ( + (X_train, X), + (y_train, y), + (X_test, X), + (y_test, y), + ): + if array.base is not array_input and not array.flags["WRITEABLE"]: + # fancy indexing should create a writable copy but it doesn't + # for read-only memmaps (cf. numpy#14132). + array.setflags(write=True) + + if y.ndim == 1: + precompute = path_params["precompute"] + else: + # No Gram variant of multi-task exists right now. + # Fall back to default enet_multitask + precompute = False + + X_train, y_train, X_offset, y_offset, X_scale, precompute, Xy = _pre_fit( + X_train, + y_train, + None, + precompute, + fit_intercept=fit_intercept, + copy=False, + sample_weight=sw_train, + ) + + path_params = path_params.copy() + path_params["Xy"] = Xy + path_params["X_offset"] = X_offset + path_params["X_scale"] = X_scale + path_params["precompute"] = precompute + path_params["copy_X"] = False + path_params["alphas"] = alphas + # needed for sparse cd solver + path_params["sample_weight"] = sw_train + + if "l1_ratio" in path_params: + path_params["l1_ratio"] = l1_ratio + + # Do the ordering and type casting here, as if it is done in the path, + # X is copied and a reference is kept here + X_train = check_array(X_train, accept_sparse="csc", dtype=dtype, order=X_order) + alphas, coefs, _ = path(X_train, y_train, **path_params) + del X_train, y_train + + if y.ndim == 1: + # Doing this so that it becomes coherent with multioutput. + coefs = coefs[np.newaxis, :, :] + y_offset = np.atleast_1d(y_offset) + y_test = y_test[:, np.newaxis] + + intercepts = y_offset[:, np.newaxis] - np.dot(X_offset, coefs) + X_test_coefs = safe_sparse_dot(X_test, coefs) + residues = X_test_coefs - y_test[:, :, np.newaxis] + residues += intercepts + if sample_weight is None: + this_mse = (residues**2).mean(axis=0) + else: + this_mse = np.average(residues**2, weights=sw_test, axis=0) + + return this_mse.mean(axis=0) + + +class LinearModelCV(MultiOutputMixin, LinearModel, ABC): + """Base class for iterative model fitting along a regularization path.""" + + _parameter_constraints: dict = { + "eps": [Interval(Real, 0, None, closed="neither")], + "n_alphas": [Interval(Integral, 1, None, closed="left")], + "alphas": ["array-like", None], + "fit_intercept": ["boolean"], + "precompute": [StrOptions({"auto"}), "array-like", "boolean"], + "max_iter": [Interval(Integral, 1, None, closed="left")], + "tol": [Interval(Real, 0, None, closed="left")], + "copy_X": ["boolean"], + "cv": ["cv_object"], + "verbose": ["verbose"], + "n_jobs": [Integral, None], + "positive": ["boolean"], + "random_state": ["random_state"], + "selection": [StrOptions({"cyclic", "random"})], + } + + @abstractmethod + def __init__( + self, + eps=1e-3, + n_alphas=100, + alphas=None, + fit_intercept=True, + precompute="auto", + max_iter=1000, + tol=1e-4, + copy_X=True, + cv=None, + verbose=False, + n_jobs=None, + positive=False, + random_state=None, + selection="cyclic", + ): + self.eps = eps + self.n_alphas = n_alphas + self.alphas = alphas + self.fit_intercept = fit_intercept + self.precompute = precompute + self.max_iter = max_iter + self.tol = tol + self.copy_X = copy_X + self.cv = cv + self.verbose = verbose + self.n_jobs = n_jobs + self.positive = positive + self.random_state = random_state + self.selection = selection + + @abstractmethod + def _get_estimator(self): + """Model to be fitted after the best alpha has been determined.""" + + @abstractmethod + def _is_multitask(self): + """Bool indicating if class is meant for multidimensional target.""" + + @staticmethod + @abstractmethod + def path(X, y, **kwargs): + """Compute path with coordinate descent.""" + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None, **params): + """Fit linear model with coordinate descent. + + Fit is on grid of alphas and best alpha estimated by cross-validation. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. Pass directly as Fortran-contiguous data + to avoid unnecessary memory duplication. If y is mono-output, + X can be sparse. Note that large sparse matrices and arrays + requiring `int64` indices are not accepted. + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values. + + sample_weight : float or array-like of shape (n_samples,), \ + default=None + Sample weights used for fitting and evaluation of the weighted + mean squared error of each cv-fold. Note that the cross validated + MSE that is finally used to find the best model is the unweighted + mean over the (weighted) MSEs of each test fold. + + **params : dict, default=None + Parameters to be passed to the CV splitter. + + .. versionadded:: 1.4 + Only available if `enable_metadata_routing=True`, + which can be set by using + ``sklearn.set_config(enable_metadata_routing=True)``. + See :ref:`Metadata Routing User Guide ` for + more details. + + Returns + ------- + self : object + Returns an instance of fitted model. + """ + _raise_for_params(params, self, "fit") + + # This makes sure that there is no duplication in memory. + # Dealing right with copy_X is important in the following: + # Multiple functions touch X and subsamples of X and can induce a + # lot of duplication of memory + copy_X = self.copy_X and self.fit_intercept + + check_y_params = dict( + copy=False, dtype=[np.float64, np.float32], ensure_2d=False + ) + if isinstance(X, np.ndarray) or sparse.issparse(X): + # Keep a reference to X + reference_to_old_X = X + # Let us not impose fortran ordering so far: it is + # not useful for the cross-validation loop and will be done + # by the model fitting itself + + # Need to validate separately here. + # We can't pass multi_output=True because that would allow y to be + # csr. We also want to allow y to be 64 or 32 but check_X_y only + # allows to convert for 64. + check_X_params = dict( + accept_sparse="csc", + dtype=[np.float64, np.float32], + force_writeable=True, + copy=False, + accept_large_sparse=False, + ) + X, y = validate_data( + self, X, y, validate_separately=(check_X_params, check_y_params) + ) + if sparse.issparse(X): + if hasattr(reference_to_old_X, "data") and not np.may_share_memory( + reference_to_old_X.data, X.data + ): + # X is a sparse matrix and has been copied + copy_X = False + elif not np.may_share_memory(reference_to_old_X, X): + # X has been copied + copy_X = False + del reference_to_old_X + else: + # Need to validate separately here. + # We can't pass multi_output=True because that would allow y to be + # csr. We also want to allow y to be 64 or 32 but check_X_y only + # allows to convert for 64. + check_X_params = dict( + accept_sparse="csc", + dtype=[np.float64, np.float32], + order="F", + force_writeable=True, + copy=copy_X, + ) + X, y = validate_data( + self, X, y, validate_separately=(check_X_params, check_y_params) + ) + copy_X = False + + check_consistent_length(X, y) + + if not self._is_multitask(): + if y.ndim > 1 and y.shape[1] > 1: + raise ValueError( + "For multi-task outputs, use MultiTask%s" % self.__class__.__name__ + ) + y = column_or_1d(y, warn=True) + else: + if sparse.issparse(X): + raise TypeError("X should be dense but a sparse matrix waspassed") + elif y.ndim == 1: + raise ValueError( + "For mono-task outputs, use %sCV" % self.__class__.__name__[9:] + ) + + if isinstance(sample_weight, numbers.Number): + sample_weight = None + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + + model = self._get_estimator() + + # All LinearModelCV parameters except 'cv' are acceptable + path_params = self.get_params() + + # Pop `intercept` that is not parameter of the path function + path_params.pop("fit_intercept", None) + + if "l1_ratio" in path_params: + l1_ratios = np.atleast_1d(path_params["l1_ratio"]) + # For the first path, we need to set l1_ratio + path_params["l1_ratio"] = l1_ratios[0] + else: + l1_ratios = [ + 1, + ] + path_params.pop("cv", None) + path_params.pop("n_jobs", None) + + alphas = self.alphas + n_l1_ratio = len(l1_ratios) + + check_scalar_alpha = partial( + check_scalar, + target_type=Real, + min_val=0.0, + include_boundaries="left", + ) + + if alphas is None: + alphas = [ + _alpha_grid( + X, + y, + l1_ratio=l1_ratio, + fit_intercept=self.fit_intercept, + eps=self.eps, + n_alphas=self.n_alphas, + copy_X=self.copy_X, + sample_weight=sample_weight, + ) + for l1_ratio in l1_ratios + ] + else: + # Making sure alphas entries are scalars. + for index, alpha in enumerate(alphas): + check_scalar_alpha(alpha, f"alphas[{index}]") + # Making sure alphas is properly ordered. + alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1)) + + # We want n_alphas to be the number of alphas used for each l1_ratio. + n_alphas = len(alphas[0]) + path_params.update({"n_alphas": n_alphas}) + + path_params["copy_X"] = copy_X + # We are not computing in parallel, we can modify X + # inplace in the folds + if effective_n_jobs(self.n_jobs) > 1: + path_params["copy_X"] = False + + # init cross-validation generator + cv = check_cv(self.cv) + + if _routing_enabled(): + splitter_supports_sample_weight = get_routing_for_object(cv).consumes( + method="split", params=["sample_weight"] + ) + if ( + sample_weight is not None + and not splitter_supports_sample_weight + and not has_fit_parameter(self, "sample_weight") + ): + raise ValueError( + "The CV splitter and underlying estimator do not support" + " sample weights." + ) + + if splitter_supports_sample_weight: + params["sample_weight"] = sample_weight + + routed_params = process_routing(self, "fit", **params) + + if sample_weight is not None and not has_fit_parameter( + self, "sample_weight" + ): + # MultiTaskElasticNetCV does not (yet) support sample_weight + sample_weight = None + else: + routed_params = Bunch() + routed_params.splitter = Bunch(split=Bunch()) + + # Compute path for all folds and compute MSE to get the best alpha + folds = list(cv.split(X, y, **routed_params.splitter.split)) + best_mse = np.inf + + # We do a double for loop folded in one, in order to be able to + # iterate in parallel on l1_ratio and folds + jobs = ( + delayed(_path_residuals)( + X, + y, + sample_weight, + train, + test, + self.fit_intercept, + self.path, + path_params, + alphas=this_alphas, + l1_ratio=this_l1_ratio, + X_order="F", + dtype=X.dtype.type, + ) + for this_l1_ratio, this_alphas in zip(l1_ratios, alphas) + for train, test in folds + ) + mse_paths = Parallel( + n_jobs=self.n_jobs, + verbose=self.verbose, + prefer="threads", + )(jobs) + mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1)) + # The mean is computed over folds. + mean_mse = np.mean(mse_paths, axis=1) + self.mse_path_ = np.squeeze(np.moveaxis(mse_paths, 2, 1)) + for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas, mean_mse): + i_best_alpha = np.argmin(mse_alphas) + this_best_mse = mse_alphas[i_best_alpha] + if this_best_mse < best_mse: + best_alpha = l1_alphas[i_best_alpha] + best_l1_ratio = l1_ratio + best_mse = this_best_mse + + self.l1_ratio_ = best_l1_ratio + self.alpha_ = best_alpha + if self.alphas is None: + self.alphas_ = np.asarray(alphas) + if n_l1_ratio == 1: + self.alphas_ = self.alphas_[0] + # Remove duplicate alphas in case alphas is provided. + else: + self.alphas_ = np.asarray(alphas[0]) + + # Refit the model with the parameters selected + common_params = { + name: value + for name, value in self.get_params().items() + if name in model.get_params() + } + model.set_params(**common_params) + model.alpha = best_alpha + model.l1_ratio = best_l1_ratio + model.copy_X = copy_X + precompute = getattr(self, "precompute", None) + if isinstance(precompute, str) and precompute == "auto": + model.precompute = False + + if sample_weight is None: + # MultiTaskElasticNetCV does not (yet) support sample_weight, even + # not sample_weight=None. + model.fit(X, y) + else: + model.fit(X, y, sample_weight=sample_weight) + if not hasattr(self, "l1_ratio"): + del self.l1_ratio_ + self.coef_ = model.coef_ + self.intercept_ = model.intercept_ + self.dual_gap_ = model.dual_gap_ + self.n_iter_ = model.n_iter_ + return self + + def get_metadata_routing(self): + """Get metadata routing of this object. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + .. versionadded:: 1.4 + + Returns + ------- + routing : MetadataRouter + A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating + routing information. + """ + router = ( + MetadataRouter(owner=self.__class__.__name__) + .add_self_request(self) + .add( + splitter=check_cv(self.cv), + method_mapping=MethodMapping().add(caller="fit", callee="split"), + ) + ) + return router + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + multitask = self._is_multitask() + tags.input_tags.sparse = not multitask + tags.target_tags.multi_output = multitask + return tags + + +class LassoCV(RegressorMixin, LinearModelCV): + """Lasso linear model with iterative fitting along a regularization path. + + See glossary entry for :term:`cross-validation estimator`. + + The best model is selected by cross-validation. + + The optimization objective for Lasso is:: + + (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + eps : float, default=1e-3 + Length of the path. ``eps=1e-3`` means that + ``alpha_min / alpha_max = 1e-3``. + + n_alphas : int, default=100 + Number of alphas along the regularization path. + + alphas : array-like, default=None + List of alphas where to compute the models. + If ``None`` alphas are set automatically. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + precompute : 'auto', bool or array-like of shape \ + (n_features, n_features), default='auto' + Whether to use a precomputed Gram matrix to speed up + calculations. If set to ``'auto'`` let us decide. The Gram + matrix can also be passed as argument. + + max_iter : int, default=1000 + The maximum number of iterations. + + tol : float, default=1e-4 + The tolerance for the optimization: if the updates are + smaller than ``tol``, the optimization code checks the + dual gap for optimality and continues until it is smaller + than ``tol``. + + copy_X : bool, default=True + If ``True``, X will be copied; else, it may be overwritten. + + cv : int, cross-validation generator or iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross-validation, + - int, to specify the number of folds. + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For int/None inputs, :class:`~sklearn.model_selection.KFold` is used. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value if None changed from 3-fold to 5-fold. + + verbose : bool or int, default=False + Amount of verbosity. + + n_jobs : int, default=None + Number of CPUs to use during the cross validation. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + positive : bool, default=False + If positive, restrict regression coefficients to be positive. + + random_state : int, RandomState instance, default=None + The seed of the pseudo random number generator that selects a random + feature to update. Used when ``selection`` == 'random'. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + selection : {'cyclic', 'random'}, default='cyclic' + If set to 'random', a random coefficient is updated every iteration + rather than looping over features sequentially by default. This + (setting to 'random') often leads to significantly faster convergence + especially when tol is higher than 1e-4. + + Attributes + ---------- + alpha_ : float + The amount of penalization chosen by cross validation. + + coef_ : ndarray of shape (n_features,) or (n_targets, n_features) + Parameter vector (w in the cost function formula). + + intercept_ : float or ndarray of shape (n_targets,) + Independent term in decision function. + + mse_path_ : ndarray of shape (n_alphas, n_folds) + Mean square error for the test set on each fold, varying alpha. + + alphas_ : ndarray of shape (n_alphas,) + The grid of alphas used for fitting. + + dual_gap_ : float or ndarray of shape (n_targets,) + The dual gap at the end of the optimization for the optimal alpha + (``alpha_``). + + n_iter_ : int + Number of iterations run by the coordinate descent solver to reach + the specified tolerance for the optimal alpha. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + lars_path : Compute Least Angle Regression or Lasso path using LARS + algorithm. + lasso_path : Compute Lasso path with coordinate descent. + Lasso : The Lasso is a linear model that estimates sparse coefficients. + LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars. + LassoCV : Lasso linear model with iterative fitting along a regularization + path. + LassoLarsCV : Cross-validated Lasso using the LARS algorithm. + + Notes + ----- + In `fit`, once the best parameter `alpha` is found through + cross-validation, the model is fit again using the entire training set. + + To avoid unnecessary memory duplication the `X` argument of the `fit` + method should be directly passed as a Fortran-contiguous numpy array. + + For an example, see :ref:`examples/linear_model/plot_lasso_model_selection.py + `. + + :class:`LassoCV` leads to different results than a hyperparameter + search using :class:`~sklearn.model_selection.GridSearchCV` with a + :class:`Lasso` model. In :class:`LassoCV`, a model for a given + penalty `alpha` is warm started using the coefficients of the + closest model (trained at the previous iteration) on the + regularization path. It tends to speed up the hyperparameter + search. + + Examples + -------- + >>> from sklearn.linear_model import LassoCV + >>> from sklearn.datasets import make_regression + >>> X, y = make_regression(noise=4, random_state=0) + >>> reg = LassoCV(cv=5, random_state=0).fit(X, y) + >>> reg.score(X, y) + 0.9993... + >>> reg.predict(X[:1,]) + array([-78.4951...]) + """ + + path = staticmethod(lasso_path) + + def __init__( + self, + *, + eps=1e-3, + n_alphas=100, + alphas=None, + fit_intercept=True, + precompute="auto", + max_iter=1000, + tol=1e-4, + copy_X=True, + cv=None, + verbose=False, + n_jobs=None, + positive=False, + random_state=None, + selection="cyclic", + ): + super().__init__( + eps=eps, + n_alphas=n_alphas, + alphas=alphas, + fit_intercept=fit_intercept, + precompute=precompute, + max_iter=max_iter, + tol=tol, + copy_X=copy_X, + cv=cv, + verbose=verbose, + n_jobs=n_jobs, + positive=positive, + random_state=random_state, + selection=selection, + ) + + def _get_estimator(self): + return Lasso() + + def _is_multitask(self): + return False + + def fit(self, X, y, sample_weight=None, **params): + """Fit Lasso model with coordinate descent. + + Fit is on grid of alphas and best alpha estimated by cross-validation. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. Pass directly as Fortran-contiguous data + to avoid unnecessary memory duplication. If y is mono-output, + X can be sparse. Note that large sparse matrices and arrays + requiring `int64` indices are not accepted. + + y : array-like of shape (n_samples,) + Target values. + + sample_weight : float or array-like of shape (n_samples,), \ + default=None + Sample weights used for fitting and evaluation of the weighted + mean squared error of each cv-fold. Note that the cross validated + MSE that is finally used to find the best model is the unweighted + mean over the (weighted) MSEs of each test fold. + + **params : dict, default=None + Parameters to be passed to the CV splitter. + + .. versionadded:: 1.4 + Only available if `enable_metadata_routing=True`, + which can be set by using + ``sklearn.set_config(enable_metadata_routing=True)``. + See :ref:`Metadata Routing User Guide ` for + more details. + + Returns + ------- + self : object + Returns an instance of fitted model. + """ + return super().fit(X, y, sample_weight=sample_weight, **params) + + +class ElasticNetCV(RegressorMixin, LinearModelCV): + """Elastic Net model with iterative fitting along a regularization path. + + See glossary entry for :term:`cross-validation estimator`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + l1_ratio : float or list of float, default=0.5 + Float between 0 and 1 passed to ElasticNet (scaling between + l1 and l2 penalties). For ``l1_ratio = 0`` + the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. + For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2 + This parameter can be a list, in which case the different + values are tested by cross-validation and the one giving the best + prediction score is used. Note that a good choice of list of + values for l1_ratio is often to put more values close to 1 + (i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7, + .9, .95, .99, 1]``. + + eps : float, default=1e-3 + Length of the path. ``eps=1e-3`` means that + ``alpha_min / alpha_max = 1e-3``. + + n_alphas : int, default=100 + Number of alphas along the regularization path, used for each l1_ratio. + + alphas : array-like, default=None + List of alphas where to compute the models. + If None alphas are set automatically. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + precompute : 'auto', bool or array-like of shape \ + (n_features, n_features), default='auto' + Whether to use a precomputed Gram matrix to speed up + calculations. If set to ``'auto'`` let us decide. The Gram + matrix can also be passed as argument. + + max_iter : int, default=1000 + The maximum number of iterations. + + tol : float, default=1e-4 + The tolerance for the optimization: if the updates are + smaller than ``tol``, the optimization code checks the + dual gap for optimality and continues until it is smaller + than ``tol``. + + cv : int, cross-validation generator or iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross-validation, + - int, to specify the number of folds. + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For int/None inputs, :class:`~sklearn.model_selection.KFold` is used. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value if None changed from 3-fold to 5-fold. + + copy_X : bool, default=True + If ``True``, X will be copied; else, it may be overwritten. + + verbose : bool or int, default=0 + Amount of verbosity. + + n_jobs : int, default=None + Number of CPUs to use during the cross validation. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + positive : bool, default=False + When set to ``True``, forces the coefficients to be positive. + + random_state : int, RandomState instance, default=None + The seed of the pseudo random number generator that selects a random + feature to update. Used when ``selection`` == 'random'. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + selection : {'cyclic', 'random'}, default='cyclic' + If set to 'random', a random coefficient is updated every iteration + rather than looping over features sequentially by default. This + (setting to 'random') often leads to significantly faster convergence + especially when tol is higher than 1e-4. + + Attributes + ---------- + alpha_ : float + The amount of penalization chosen by cross validation. + + l1_ratio_ : float + The compromise between l1 and l2 penalization chosen by + cross validation. + + coef_ : ndarray of shape (n_features,) or (n_targets, n_features) + Parameter vector (w in the cost function formula). + + intercept_ : float or ndarray of shape (n_targets, n_features) + Independent term in the decision function. + + mse_path_ : ndarray of shape (n_l1_ratio, n_alpha, n_folds) + Mean square error for the test set on each fold, varying l1_ratio and + alpha. + + alphas_ : ndarray of shape (n_alphas,) or (n_l1_ratio, n_alphas) + The grid of alphas used for fitting, for each l1_ratio. + + dual_gap_ : float + The dual gaps at the end of the optimization for the optimal alpha. + + n_iter_ : int + Number of iterations run by the coordinate descent solver to reach + the specified tolerance for the optimal alpha. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + enet_path : Compute elastic net path with coordinate descent. + ElasticNet : Linear regression with combined L1 and L2 priors as regularizer. + + Notes + ----- + In `fit`, once the best parameters `l1_ratio` and `alpha` are found through + cross-validation, the model is fit again using the entire training set. + + To avoid unnecessary memory duplication the `X` argument of the `fit` + method should be directly passed as a Fortran-contiguous numpy array. + + The parameter `l1_ratio` corresponds to alpha in the glmnet R package + while alpha corresponds to the lambda parameter in glmnet. + More specifically, the optimization objective is:: + + 1 / (2 * n_samples) * ||y - Xw||^2_2 + + alpha * l1_ratio * ||w||_1 + + 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2 + + If you are interested in controlling the L1 and L2 penalty + separately, keep in mind that this is equivalent to:: + + a * L1 + b * L2 + + for:: + + alpha = a + b and l1_ratio = a / (a + b). + + For an example, see + :ref:`examples/linear_model/plot_lasso_model_selection.py + `. + + Examples + -------- + >>> from sklearn.linear_model import ElasticNetCV + >>> from sklearn.datasets import make_regression + + >>> X, y = make_regression(n_features=2, random_state=0) + >>> regr = ElasticNetCV(cv=5, random_state=0) + >>> regr.fit(X, y) + ElasticNetCV(cv=5, random_state=0) + >>> print(regr.alpha_) + 0.199... + >>> print(regr.intercept_) + 0.398... + >>> print(regr.predict([[0, 0]])) + [0.398...] + """ + + _parameter_constraints: dict = { + **LinearModelCV._parameter_constraints, + "l1_ratio": [Interval(Real, 0, 1, closed="both"), "array-like"], + } + + path = staticmethod(enet_path) + + def __init__( + self, + *, + l1_ratio=0.5, + eps=1e-3, + n_alphas=100, + alphas=None, + fit_intercept=True, + precompute="auto", + max_iter=1000, + tol=1e-4, + cv=None, + copy_X=True, + verbose=0, + n_jobs=None, + positive=False, + random_state=None, + selection="cyclic", + ): + self.l1_ratio = l1_ratio + self.eps = eps + self.n_alphas = n_alphas + self.alphas = alphas + self.fit_intercept = fit_intercept + self.precompute = precompute + self.max_iter = max_iter + self.tol = tol + self.cv = cv + self.copy_X = copy_X + self.verbose = verbose + self.n_jobs = n_jobs + self.positive = positive + self.random_state = random_state + self.selection = selection + + def _get_estimator(self): + return ElasticNet() + + def _is_multitask(self): + return False + + def fit(self, X, y, sample_weight=None, **params): + """Fit ElasticNet model with coordinate descent. + + Fit is on grid of alphas and best alpha estimated by cross-validation. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. Pass directly as Fortran-contiguous data + to avoid unnecessary memory duplication. If y is mono-output, + X can be sparse. Note that large sparse matrices and arrays + requiring `int64` indices are not accepted. + + y : array-like of shape (n_samples,) + Target values. + + sample_weight : float or array-like of shape (n_samples,), \ + default=None + Sample weights used for fitting and evaluation of the weighted + mean squared error of each cv-fold. Note that the cross validated + MSE that is finally used to find the best model is the unweighted + mean over the (weighted) MSEs of each test fold. + + **params : dict, default=None + Parameters to be passed to the CV splitter. + + .. versionadded:: 1.4 + Only available if `enable_metadata_routing=True`, + which can be set by using + ``sklearn.set_config(enable_metadata_routing=True)``. + See :ref:`Metadata Routing User Guide ` for + more details. + + Returns + ------- + self : object + Returns an instance of fitted model. + """ + return super().fit(X, y, sample_weight=sample_weight, **params) + + +############################################################################### +# Multi Task ElasticNet and Lasso models (with joint feature selection) + + +class MultiTaskElasticNet(Lasso): + """Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer. + + The optimization objective for MultiTaskElasticNet is:: + + (1 / (2 * n_samples)) * ||Y - XW||_Fro^2 + + alpha * l1_ratio * ||W||_21 + + 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2 + + Where:: + + ||W||_21 = sum_i sqrt(sum_j W_ij ^ 2) + + i.e. the sum of norms of each row. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + alpha : float, default=1.0 + Constant that multiplies the L1/L2 term. Defaults to 1.0. + + l1_ratio : float, default=0.5 + The ElasticNet mixing parameter, with 0 < l1_ratio <= 1. + For l1_ratio = 1 the penalty is an L1/L2 penalty. For l1_ratio = 0 it + is an L2 penalty. + For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + copy_X : bool, default=True + If ``True``, X will be copied; else, it may be overwritten. + + max_iter : int, default=1000 + The maximum number of iterations. + + tol : float, default=1e-4 + The tolerance for the optimization: if the updates are + smaller than ``tol``, the optimization code checks the + dual gap for optimality and continues until it is smaller + than ``tol``. + + warm_start : bool, default=False + When set to ``True``, reuse the solution of the previous call to fit as + initialization, otherwise, just erase the previous solution. + See :term:`the Glossary `. + + random_state : int, RandomState instance, default=None + The seed of the pseudo random number generator that selects a random + feature to update. Used when ``selection`` == 'random'. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + selection : {'cyclic', 'random'}, default='cyclic' + If set to 'random', a random coefficient is updated every iteration + rather than looping over features sequentially by default. This + (setting to 'random') often leads to significantly faster convergence + especially when tol is higher than 1e-4. + + Attributes + ---------- + intercept_ : ndarray of shape (n_targets,) + Independent term in decision function. + + coef_ : ndarray of shape (n_targets, n_features) + Parameter vector (W in the cost function formula). If a 1D y is + passed in at fit (non multi-task usage), ``coef_`` is then a 1D array. + Note that ``coef_`` stores the transpose of ``W``, ``W.T``. + + n_iter_ : int + Number of iterations run by the coordinate descent solver to reach + the specified tolerance. + + dual_gap_ : float + The dual gaps at the end of the optimization. + + eps_ : float + The tolerance scaled scaled by the variance of the target `y`. + + sparse_coef_ : sparse matrix of shape (n_features,) or \ + (n_targets, n_features) + Sparse representation of the `coef_`. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + MultiTaskElasticNetCV : Multi-task L1/L2 ElasticNet with built-in + cross-validation. + ElasticNet : Linear regression with combined L1 and L2 priors as regularizer. + MultiTaskLasso : Multi-task Lasso model trained with L1/L2 + mixed-norm as regularizer. + + Notes + ----- + The algorithm used to fit the model is coordinate descent. + + To avoid unnecessary memory duplication the X and y arguments of the fit + method should be directly passed as Fortran-contiguous numpy arrays. + + Examples + -------- + >>> from sklearn import linear_model + >>> clf = linear_model.MultiTaskElasticNet(alpha=0.1) + >>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]]) + MultiTaskElasticNet(alpha=0.1) + >>> print(clf.coef_) + [[0.45663524 0.45612256] + [0.45663524 0.45612256]] + >>> print(clf.intercept_) + [0.0872422 0.0872422] + """ + + _parameter_constraints: dict = { + **ElasticNet._parameter_constraints, + } + for param in ("precompute", "positive"): + _parameter_constraints.pop(param) + + def __init__( + self, + alpha=1.0, + *, + l1_ratio=0.5, + fit_intercept=True, + copy_X=True, + max_iter=1000, + tol=1e-4, + warm_start=False, + random_state=None, + selection="cyclic", + ): + self.l1_ratio = l1_ratio + self.alpha = alpha + self.fit_intercept = fit_intercept + self.max_iter = max_iter + self.copy_X = copy_X + self.tol = tol + self.warm_start = warm_start + self.random_state = random_state + self.selection = selection + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y): + """Fit MultiTaskElasticNet model with coordinate descent. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Data. + y : ndarray of shape (n_samples, n_targets) + Target. Will be cast to X's dtype if necessary. + + Returns + ------- + self : object + Fitted estimator. + + Notes + ----- + Coordinate descent is an algorithm that considers each column of + data at a time hence it will automatically convert the X input + as a Fortran-contiguous numpy array if necessary. + + To avoid memory re-allocation it is advised to allocate the + initial data in memory directly using that format. + """ + # Need to validate separately here. + # We can't pass multi_output=True because that would allow y to be csr. + check_X_params = dict( + dtype=[np.float64, np.float32], + order="F", + force_writeable=True, + copy=self.copy_X and self.fit_intercept, + ) + check_y_params = dict(ensure_2d=False, order="F") + X, y = validate_data( + self, X, y, validate_separately=(check_X_params, check_y_params) + ) + check_consistent_length(X, y) + y = y.astype(X.dtype) + + if hasattr(self, "l1_ratio"): + model_str = "ElasticNet" + else: + model_str = "Lasso" + if y.ndim == 1: + raise ValueError("For mono-task outputs, use %s" % model_str) + + n_samples, n_features = X.shape + n_targets = y.shape[1] + + X, y, X_offset, y_offset, X_scale = _preprocess_data( + X, y, fit_intercept=self.fit_intercept, copy=False + ) + + if not self.warm_start or not hasattr(self, "coef_"): + self.coef_ = np.zeros( + (n_targets, n_features), dtype=X.dtype.type, order="F" + ) + + l1_reg = self.alpha * self.l1_ratio * n_samples + l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples + + self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory + + random = self.selection == "random" + + ( + self.coef_, + self.dual_gap_, + self.eps_, + self.n_iter_, + ) = cd_fast.enet_coordinate_descent_multi_task( + self.coef_, + l1_reg, + l2_reg, + X, + y, + self.max_iter, + self.tol, + check_random_state(self.random_state), + random, + ) + + # account for different objective scaling here and in cd_fast + self.dual_gap_ /= n_samples + + self._set_intercept(X_offset, y_offset, X_scale) + + # return self for chaining fit and predict calls + return self + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.input_tags.sparse = False + tags.target_tags.multi_output = True + tags.target_tags.single_output = False + return tags + + +class MultiTaskLasso(MultiTaskElasticNet): + """Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer. + + The optimization objective for Lasso is:: + + (1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21 + + Where:: + + ||W||_21 = \\sum_i \\sqrt{\\sum_j w_{ij}^2} + + i.e. the sum of norm of each row. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + alpha : float, default=1.0 + Constant that multiplies the L1/L2 term. Defaults to 1.0. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + copy_X : bool, default=True + If ``True``, X will be copied; else, it may be overwritten. + + max_iter : int, default=1000 + The maximum number of iterations. + + tol : float, default=1e-4 + The tolerance for the optimization: if the updates are + smaller than ``tol``, the optimization code checks the + dual gap for optimality and continues until it is smaller + than ``tol``. + + warm_start : bool, default=False + When set to ``True``, reuse the solution of the previous call to fit as + initialization, otherwise, just erase the previous solution. + See :term:`the Glossary `. + + random_state : int, RandomState instance, default=None + The seed of the pseudo random number generator that selects a random + feature to update. Used when ``selection`` == 'random'. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + selection : {'cyclic', 'random'}, default='cyclic' + If set to 'random', a random coefficient is updated every iteration + rather than looping over features sequentially by default. This + (setting to 'random') often leads to significantly faster convergence + especially when tol is higher than 1e-4. + + Attributes + ---------- + coef_ : ndarray of shape (n_targets, n_features) + Parameter vector (W in the cost function formula). + Note that ``coef_`` stores the transpose of ``W``, ``W.T``. + + intercept_ : ndarray of shape (n_targets,) + Independent term in decision function. + + n_iter_ : int + Number of iterations run by the coordinate descent solver to reach + the specified tolerance. + + dual_gap_ : ndarray of shape (n_alphas,) + The dual gaps at the end of the optimization for each alpha. + + eps_ : float + The tolerance scaled scaled by the variance of the target `y`. + + sparse_coef_ : sparse matrix of shape (n_features,) or \ + (n_targets, n_features) + Sparse representation of the `coef_`. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + Lasso: Linear Model trained with L1 prior as regularizer (aka the Lasso). + MultiTaskLassoCV: Multi-task L1 regularized linear model with built-in + cross-validation. + MultiTaskElasticNetCV: Multi-task L1/L2 ElasticNet with built-in cross-validation. + + Notes + ----- + The algorithm used to fit the model is coordinate descent. + + To avoid unnecessary memory duplication the X and y arguments of the fit + method should be directly passed as Fortran-contiguous numpy arrays. + + Examples + -------- + >>> from sklearn import linear_model + >>> clf = linear_model.MultiTaskLasso(alpha=0.1) + >>> clf.fit([[0, 1], [1, 2], [2, 4]], [[0, 0], [1, 1], [2, 3]]) + MultiTaskLasso(alpha=0.1) + >>> print(clf.coef_) + [[0. 0.60809415] + [0. 0.94592424]] + >>> print(clf.intercept_) + [-0.41888636 -0.87382323] + """ + + _parameter_constraints: dict = { + **MultiTaskElasticNet._parameter_constraints, + } + _parameter_constraints.pop("l1_ratio") + + def __init__( + self, + alpha=1.0, + *, + fit_intercept=True, + copy_X=True, + max_iter=1000, + tol=1e-4, + warm_start=False, + random_state=None, + selection="cyclic", + ): + self.alpha = alpha + self.fit_intercept = fit_intercept + self.max_iter = max_iter + self.copy_X = copy_X + self.tol = tol + self.warm_start = warm_start + self.l1_ratio = 1.0 + self.random_state = random_state + self.selection = selection + + +class MultiTaskElasticNetCV(RegressorMixin, LinearModelCV): + """Multi-task L1/L2 ElasticNet with built-in cross-validation. + + See glossary entry for :term:`cross-validation estimator`. + + The optimization objective for MultiTaskElasticNet is:: + + (1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + + alpha * l1_ratio * ||W||_21 + + 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2 + + Where:: + + ||W||_21 = \\sum_i \\sqrt{\\sum_j w_{ij}^2} + + i.e. the sum of norm of each row. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.15 + + Parameters + ---------- + l1_ratio : float or list of float, default=0.5 + The ElasticNet mixing parameter, with 0 < l1_ratio <= 1. + For l1_ratio = 1 the penalty is an L1/L2 penalty. For l1_ratio = 0 it + is an L2 penalty. + For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2. + This parameter can be a list, in which case the different + values are tested by cross-validation and the one giving the best + prediction score is used. Note that a good choice of list of + values for l1_ratio is often to put more values close to 1 + (i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7, + .9, .95, .99, 1]``. + + eps : float, default=1e-3 + Length of the path. ``eps=1e-3`` means that + ``alpha_min / alpha_max = 1e-3``. + + n_alphas : int, default=100 + Number of alphas along the regularization path. + + alphas : array-like, default=None + List of alphas where to compute the models. + If not provided, set automatically. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + max_iter : int, default=1000 + The maximum number of iterations. + + tol : float, default=1e-4 + The tolerance for the optimization: if the updates are + smaller than ``tol``, the optimization code checks the + dual gap for optimality and continues until it is smaller + than ``tol``. + + cv : int, cross-validation generator or iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross-validation, + - int, to specify the number of folds. + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For int/None inputs, :class:`~sklearn.model_selection.KFold` is used. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value if None changed from 3-fold to 5-fold. + + copy_X : bool, default=True + If ``True``, X will be copied; else, it may be overwritten. + + verbose : bool or int, default=0 + Amount of verbosity. + + n_jobs : int, default=None + Number of CPUs to use during the cross validation. Note that this is + used only if multiple values for l1_ratio are given. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + random_state : int, RandomState instance, default=None + The seed of the pseudo random number generator that selects a random + feature to update. Used when ``selection`` == 'random'. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + selection : {'cyclic', 'random'}, default='cyclic' + If set to 'random', a random coefficient is updated every iteration + rather than looping over features sequentially by default. This + (setting to 'random') often leads to significantly faster convergence + especially when tol is higher than 1e-4. + + Attributes + ---------- + intercept_ : ndarray of shape (n_targets,) + Independent term in decision function. + + coef_ : ndarray of shape (n_targets, n_features) + Parameter vector (W in the cost function formula). + Note that ``coef_`` stores the transpose of ``W``, ``W.T``. + + alpha_ : float + The amount of penalization chosen by cross validation. + + mse_path_ : ndarray of shape (n_alphas, n_folds) or \ + (n_l1_ratio, n_alphas, n_folds) + Mean square error for the test set on each fold, varying alpha. + + alphas_ : ndarray of shape (n_alphas,) or (n_l1_ratio, n_alphas) + The grid of alphas used for fitting, for each l1_ratio. + + l1_ratio_ : float + Best l1_ratio obtained by cross-validation. + + n_iter_ : int + Number of iterations run by the coordinate descent solver to reach + the specified tolerance for the optimal alpha. + + dual_gap_ : float + The dual gap at the end of the optimization for the optimal alpha. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + MultiTaskElasticNet : Multi-task L1/L2 ElasticNet with built-in cross-validation. + ElasticNetCV : Elastic net model with best model selection by + cross-validation. + MultiTaskLassoCV : Multi-task Lasso model trained with L1 norm + as regularizer and built-in cross-validation. + + Notes + ----- + The algorithm used to fit the model is coordinate descent. + + In `fit`, once the best parameters `l1_ratio` and `alpha` are found through + cross-validation, the model is fit again using the entire training set. + + To avoid unnecessary memory duplication the `X` and `y` arguments of the + `fit` method should be directly passed as Fortran-contiguous numpy arrays. + + Examples + -------- + >>> from sklearn import linear_model + >>> clf = linear_model.MultiTaskElasticNetCV(cv=3) + >>> clf.fit([[0,0], [1, 1], [2, 2]], + ... [[0, 0], [1, 1], [2, 2]]) + MultiTaskElasticNetCV(cv=3) + >>> print(clf.coef_) + [[0.52875032 0.46958558] + [0.52875032 0.46958558]] + >>> print(clf.intercept_) + [0.00166409 0.00166409] + """ + + _parameter_constraints: dict = { + **LinearModelCV._parameter_constraints, + "l1_ratio": [Interval(Real, 0, 1, closed="both"), "array-like"], + } + _parameter_constraints.pop("precompute") + _parameter_constraints.pop("positive") + + path = staticmethod(enet_path) + + def __init__( + self, + *, + l1_ratio=0.5, + eps=1e-3, + n_alphas=100, + alphas=None, + fit_intercept=True, + max_iter=1000, + tol=1e-4, + cv=None, + copy_X=True, + verbose=0, + n_jobs=None, + random_state=None, + selection="cyclic", + ): + self.l1_ratio = l1_ratio + self.eps = eps + self.n_alphas = n_alphas + self.alphas = alphas + self.fit_intercept = fit_intercept + self.max_iter = max_iter + self.tol = tol + self.cv = cv + self.copy_X = copy_X + self.verbose = verbose + self.n_jobs = n_jobs + self.random_state = random_state + self.selection = selection + + def _get_estimator(self): + return MultiTaskElasticNet() + + def _is_multitask(self): + return True + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.target_tags.single_output = False + return tags + + # This is necessary as LinearModelCV now supports sample_weight while + # MultiTaskElasticNetCV does not (yet). + def fit(self, X, y, **params): + """Fit MultiTaskElasticNet model with coordinate descent. + + Fit is on grid of alphas and best alpha estimated by cross-validation. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Training data. + y : ndarray of shape (n_samples, n_targets) + Training target variable. Will be cast to X's dtype if necessary. + + **params : dict, default=None + Parameters to be passed to the CV splitter. + + .. versionadded:: 1.4 + Only available if `enable_metadata_routing=True`, + which can be set by using + ``sklearn.set_config(enable_metadata_routing=True)``. + See :ref:`Metadata Routing User Guide ` for + more details. + + Returns + ------- + self : object + Returns MultiTaskElasticNet instance. + """ + return super().fit(X, y, **params) + + +class MultiTaskLassoCV(RegressorMixin, LinearModelCV): + """Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer. + + See glossary entry for :term:`cross-validation estimator`. + + The optimization objective for MultiTaskLasso is:: + + (1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21 + + Where:: + + ||W||_21 = \\sum_i \\sqrt{\\sum_j w_{ij}^2} + + i.e. the sum of norm of each row. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.15 + + Parameters + ---------- + eps : float, default=1e-3 + Length of the path. ``eps=1e-3`` means that + ``alpha_min / alpha_max = 1e-3``. + + n_alphas : int, default=100 + Number of alphas along the regularization path. + + alphas : array-like, default=None + List of alphas where to compute the models. + If not provided, set automatically. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + max_iter : int, default=1000 + The maximum number of iterations. + + tol : float, default=1e-4 + The tolerance for the optimization: if the updates are + smaller than ``tol``, the optimization code checks the + dual gap for optimality and continues until it is smaller + than ``tol``. + + copy_X : bool, default=True + If ``True``, X will be copied; else, it may be overwritten. + + cv : int, cross-validation generator or iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross-validation, + - int, to specify the number of folds. + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For int/None inputs, :class:`~sklearn.model_selection.KFold` is used. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value if None changed from 3-fold to 5-fold. + + verbose : bool or int, default=False + Amount of verbosity. + + n_jobs : int, default=None + Number of CPUs to use during the cross validation. Note that this is + used only if multiple values for l1_ratio are given. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + random_state : int, RandomState instance, default=None + The seed of the pseudo random number generator that selects a random + feature to update. Used when ``selection`` == 'random'. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + selection : {'cyclic', 'random'}, default='cyclic' + If set to 'random', a random coefficient is updated every iteration + rather than looping over features sequentially by default. This + (setting to 'random') often leads to significantly faster convergence + especially when tol is higher than 1e-4. + + Attributes + ---------- + intercept_ : ndarray of shape (n_targets,) + Independent term in decision function. + + coef_ : ndarray of shape (n_targets, n_features) + Parameter vector (W in the cost function formula). + Note that ``coef_`` stores the transpose of ``W``, ``W.T``. + + alpha_ : float + The amount of penalization chosen by cross validation. + + mse_path_ : ndarray of shape (n_alphas, n_folds) + Mean square error for the test set on each fold, varying alpha. + + alphas_ : ndarray of shape (n_alphas,) + The grid of alphas used for fitting. + + n_iter_ : int + Number of iterations run by the coordinate descent solver to reach + the specified tolerance for the optimal alpha. + + dual_gap_ : float + The dual gap at the end of the optimization for the optimal alpha. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + MultiTaskElasticNet : Multi-task ElasticNet model trained with L1/L2 + mixed-norm as regularizer. + ElasticNetCV : Elastic net model with best model selection by + cross-validation. + MultiTaskElasticNetCV : Multi-task L1/L2 ElasticNet with built-in + cross-validation. + + Notes + ----- + The algorithm used to fit the model is coordinate descent. + + In `fit`, once the best parameter `alpha` is found through + cross-validation, the model is fit again using the entire training set. + + To avoid unnecessary memory duplication the `X` and `y` arguments of the + `fit` method should be directly passed as Fortran-contiguous numpy arrays. + + Examples + -------- + >>> from sklearn.linear_model import MultiTaskLassoCV + >>> from sklearn.datasets import make_regression + >>> from sklearn.metrics import r2_score + >>> X, y = make_regression(n_targets=2, noise=4, random_state=0) + >>> reg = MultiTaskLassoCV(cv=5, random_state=0).fit(X, y) + >>> r2_score(y, reg.predict(X)) + 0.9994... + >>> reg.alpha_ + np.float64(0.5713...) + >>> reg.predict(X[:1,]) + array([[153.7971..., 94.9015...]]) + """ + + _parameter_constraints: dict = { + **LinearModelCV._parameter_constraints, + } + _parameter_constraints.pop("precompute") + _parameter_constraints.pop("positive") + + path = staticmethod(lasso_path) + + def __init__( + self, + *, + eps=1e-3, + n_alphas=100, + alphas=None, + fit_intercept=True, + max_iter=1000, + tol=1e-4, + copy_X=True, + cv=None, + verbose=False, + n_jobs=None, + random_state=None, + selection="cyclic", + ): + super().__init__( + eps=eps, + n_alphas=n_alphas, + alphas=alphas, + fit_intercept=fit_intercept, + max_iter=max_iter, + tol=tol, + copy_X=copy_X, + cv=cv, + verbose=verbose, + n_jobs=n_jobs, + random_state=random_state, + selection=selection, + ) + + def _get_estimator(self): + return MultiTaskLasso() + + def _is_multitask(self): + return True + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.target_tags.single_output = False + return tags + + # This is necessary as LinearModelCV now supports sample_weight while + # MultiTaskLassoCV does not (yet). + def fit(self, X, y, **params): + """Fit MultiTaskLasso model with coordinate descent. + + Fit is on grid of alphas and best alpha estimated by cross-validation. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Data. + y : ndarray of shape (n_samples, n_targets) + Target. Will be cast to X's dtype if necessary. + + **params : dict, default=None + Parameters to be passed to the CV splitter. + + .. versionadded:: 1.4 + Only available if `enable_metadata_routing=True`, + which can be set by using + ``sklearn.set_config(enable_metadata_routing=True)``. + See :ref:`Metadata Routing User Guide ` for + more details. + + Returns + ------- + self : object + Returns an instance of fitted model. + """ + return super().fit(X, y, **params) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d0a51e65d32111554af0651a7c7f27c28d301c94 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/__init__.py @@ -0,0 +1,16 @@ +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +from .glm import ( + GammaRegressor, + PoissonRegressor, + TweedieRegressor, + _GeneralizedLinearRegressor, +) + +__all__ = [ + "_GeneralizedLinearRegressor", + "PoissonRegressor", + "GammaRegressor", + "TweedieRegressor", +] diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8fb840fc3fad78b4588b66f43e3a2347ddf2284d Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/_newton_solver.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/_newton_solver.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6aad258a02825ab77259193f30739bfd594e6af9 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/_newton_solver.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/glm.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/glm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..645e44b18b8706609dea42efe4a0c9537f0fa6ca Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/glm.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/_newton_solver.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/_newton_solver.py new file mode 100644 index 0000000000000000000000000000000000000000..2967b91225fdb5ac04d9bd55ea304fafde616d89 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/_newton_solver.py @@ -0,0 +1,616 @@ +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +""" +Newton solver for Generalized Linear Models +""" + +import warnings +from abc import ABC, abstractmethod + +import numpy as np +import scipy.linalg +import scipy.optimize + +from ..._loss.loss import HalfSquaredError +from ...exceptions import ConvergenceWarning +from ...utils.optimize import _check_optimize_result +from .._linear_loss import LinearModelLoss + + +class NewtonSolver(ABC): + """Newton solver for GLMs. + + This class implements Newton/2nd-order optimization routines for GLMs. Each Newton + iteration aims at finding the Newton step which is done by the inner solver. With + Hessian H, gradient g and coefficients coef, one step solves: + + H @ coef_newton = -g + + For our GLM / LinearModelLoss, we have gradient g and Hessian H: + + g = X.T @ loss.gradient + l2_reg_strength * coef + H = X.T @ diag(loss.hessian) @ X + l2_reg_strength * identity + + Backtracking line search updates coef = coef_old + t * coef_newton for some t in + (0, 1]. + + This is a base class, actual implementations (child classes) may deviate from the + above pattern and use structure specific tricks. + + Usage pattern: + - initialize solver: sol = NewtonSolver(...) + - solve the problem: sol.solve(X, y, sample_weight) + + References + ---------- + - Jorge Nocedal, Stephen J. Wright. (2006) "Numerical Optimization" + 2nd edition + https://doi.org/10.1007/978-0-387-40065-5 + + - Stephen P. Boyd, Lieven Vandenberghe. (2004) "Convex Optimization." + Cambridge University Press, 2004. + https://web.stanford.edu/~boyd/cvxbook/bv_cvxbook.pdf + + Parameters + ---------- + coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,) + Initial coefficients of a linear model. + If shape (n_classes * n_dof,), the classes of one feature are contiguous, + i.e. one reconstructs the 2d-array via + coef.reshape((n_classes, -1), order="F"). + + linear_loss : LinearModelLoss + The loss to be minimized. + + l2_reg_strength : float, default=0.0 + L2 regularization strength. + + tol : float, default=1e-4 + The optimization problem is solved when each of the following condition is + fulfilled: + 1. maximum |gradient| <= tol + 2. Newton decrement d: 1/2 * d^2 <= tol + + max_iter : int, default=100 + Maximum number of Newton steps allowed. + + n_threads : int, default=1 + Number of OpenMP threads to use for the computation of the Hessian and gradient + of the loss function. + + Attributes + ---------- + coef_old : ndarray of shape coef.shape + Coefficient of previous iteration. + + coef_newton : ndarray of shape coef.shape + Newton step. + + gradient : ndarray of shape coef.shape + Gradient of the loss w.r.t. the coefficients. + + gradient_old : ndarray of shape coef.shape + Gradient of previous iteration. + + loss_value : float + Value of objective function = loss + penalty. + + loss_value_old : float + Value of objective function of previous itertion. + + raw_prediction : ndarray of shape (n_samples,) or (n_samples, n_classes) + + converged : bool + Indicator for convergence of the solver. + + iteration : int + Number of Newton steps, i.e. calls to inner_solve + + use_fallback_lbfgs_solve : bool + If set to True, the solver will resort to call LBFGS to finish the optimisation + procedure in case of convergence issues. + + gradient_times_newton : float + gradient @ coef_newton, set in inner_solve and used by line_search. If the + Newton step is a descent direction, this is negative. + """ + + def __init__( + self, + *, + coef, + linear_loss=LinearModelLoss(base_loss=HalfSquaredError(), fit_intercept=True), + l2_reg_strength=0.0, + tol=1e-4, + max_iter=100, + n_threads=1, + verbose=0, + ): + self.coef = coef + self.linear_loss = linear_loss + self.l2_reg_strength = l2_reg_strength + self.tol = tol + self.max_iter = max_iter + self.n_threads = n_threads + self.verbose = verbose + + def setup(self, X, y, sample_weight): + """Precomputations + + If None, initializes: + - self.coef + Sets: + - self.raw_prediction + - self.loss_value + """ + _, _, self.raw_prediction = self.linear_loss.weight_intercept_raw(self.coef, X) + self.loss_value = self.linear_loss.loss( + coef=self.coef, + X=X, + y=y, + sample_weight=sample_weight, + l2_reg_strength=self.l2_reg_strength, + n_threads=self.n_threads, + raw_prediction=self.raw_prediction, + ) + + @abstractmethod + def update_gradient_hessian(self, X, y, sample_weight): + """Update gradient and Hessian.""" + + @abstractmethod + def inner_solve(self, X, y, sample_weight): + """Compute Newton step. + + Sets: + - self.coef_newton + - self.gradient_times_newton + """ + + def fallback_lbfgs_solve(self, X, y, sample_weight): + """Fallback solver in case of emergency. + + If a solver detects convergence problems, it may fall back to this methods in + the hope to exit with success instead of raising an error. + + Sets: + - self.coef + - self.converged + """ + opt_res = scipy.optimize.minimize( + self.linear_loss.loss_gradient, + self.coef, + method="L-BFGS-B", + jac=True, + options={ + "maxiter": self.max_iter - self.iteration, + "maxls": 50, # default is 20 + "iprint": self.verbose - 1, + "gtol": self.tol, + "ftol": 64 * np.finfo(np.float64).eps, + }, + args=(X, y, sample_weight, self.l2_reg_strength, self.n_threads), + ) + self.iteration += _check_optimize_result("lbfgs", opt_res) + self.coef = opt_res.x + self.converged = opt_res.status == 0 + + def line_search(self, X, y, sample_weight): + """Backtracking line search. + + Sets: + - self.coef_old + - self.coef + - self.loss_value_old + - self.loss_value + - self.gradient_old + - self.gradient + - self.raw_prediction + """ + # line search parameters + beta, sigma = 0.5, 0.00048828125 # 1/2, 1/2**11 + eps = 16 * np.finfo(self.loss_value.dtype).eps + t = 1 # step size + + # gradient_times_newton = self.gradient @ self.coef_newton + # was computed in inner_solve. + armijo_term = sigma * self.gradient_times_newton + _, _, raw_prediction_newton = self.linear_loss.weight_intercept_raw( + self.coef_newton, X + ) + + self.coef_old = self.coef + self.loss_value_old = self.loss_value + self.gradient_old = self.gradient + + # np.sum(np.abs(self.gradient_old)) + sum_abs_grad_old = -1 + + is_verbose = self.verbose >= 2 + if is_verbose: + print(" Backtracking Line Search") + print(f" eps=16 * finfo.eps={eps}") + + for i in range(21): # until and including t = beta**20 ~ 1e-6 + self.coef = self.coef_old + t * self.coef_newton + raw = self.raw_prediction + t * raw_prediction_newton + self.loss_value, self.gradient = self.linear_loss.loss_gradient( + coef=self.coef, + X=X, + y=y, + sample_weight=sample_weight, + l2_reg_strength=self.l2_reg_strength, + n_threads=self.n_threads, + raw_prediction=raw, + ) + # Note: If coef_newton is too large, loss_gradient may produce inf values, + # potentially accompanied by a RuntimeWarning. + # This case will be captured by the Armijo condition. + + # 1. Check Armijo / sufficient decrease condition. + # The smaller (more negative) the better. + loss_improvement = self.loss_value - self.loss_value_old + check = loss_improvement <= t * armijo_term + if is_verbose: + print( + f" line search iteration={i+1}, step size={t}\n" + f" check loss improvement <= armijo term: {loss_improvement} " + f"<= {t * armijo_term} {check}" + ) + if check: + break + # 2. Deal with relative loss differences around machine precision. + tiny_loss = np.abs(self.loss_value_old * eps) + check = np.abs(loss_improvement) <= tiny_loss + if is_verbose: + print( + " check loss |improvement| <= eps * |loss_old|:" + f" {np.abs(loss_improvement)} <= {tiny_loss} {check}" + ) + if check: + if sum_abs_grad_old < 0: + sum_abs_grad_old = scipy.linalg.norm(self.gradient_old, ord=1) + # 2.1 Check sum of absolute gradients as alternative condition. + sum_abs_grad = scipy.linalg.norm(self.gradient, ord=1) + check = sum_abs_grad < sum_abs_grad_old + if is_verbose: + print( + " check sum(|gradient|) < sum(|gradient_old|): " + f"{sum_abs_grad} < {sum_abs_grad_old} {check}" + ) + if check: + break + + t *= beta + else: + warnings.warn( + ( + f"Line search of Newton solver {self.__class__.__name__} at" + f" iteration #{self.iteration} did no converge after 21 line search" + " refinement iterations. It will now resort to lbfgs instead." + ), + ConvergenceWarning, + ) + if self.verbose: + print(" Line search did not converge and resorts to lbfgs instead.") + self.use_fallback_lbfgs_solve = True + return + + self.raw_prediction = raw + if is_verbose: + print( + f" line search successful after {i+1} iterations with " + f"loss={self.loss_value}." + ) + + def check_convergence(self, X, y, sample_weight): + """Check for convergence. + + Sets self.converged. + """ + if self.verbose: + print(" Check Convergence") + # Note: Checking maximum relative change of coefficient <= tol is a bad + # convergence criterion because even a large step could have brought us close + # to the true minimum. + # coef_step = self.coef - self.coef_old + # change = np.max(np.abs(coef_step) / np.maximum(1, np.abs(self.coef_old))) + # check = change <= tol + + # 1. Criterion: maximum |gradient| <= tol + # The gradient was already updated in line_search() + g_max_abs = np.max(np.abs(self.gradient)) + check = g_max_abs <= self.tol + if self.verbose: + print(f" 1. max |gradient| {g_max_abs} <= {self.tol} {check}") + if not check: + return + + # 2. Criterion: For Newton decrement d, check 1/2 * d^2 <= tol + # d = sqrt(grad @ hessian^-1 @ grad) + # = sqrt(coef_newton @ hessian @ coef_newton) + # See Boyd, Vanderberghe (2009) "Convex Optimization" Chapter 9.5.1. + d2 = self.coef_newton @ self.hessian @ self.coef_newton + check = 0.5 * d2 <= self.tol + if self.verbose: + print(f" 2. Newton decrement {0.5 * d2} <= {self.tol} {check}") + if not check: + return + + if self.verbose: + loss_value = self.linear_loss.loss( + coef=self.coef, + X=X, + y=y, + sample_weight=sample_weight, + l2_reg_strength=self.l2_reg_strength, + n_threads=self.n_threads, + ) + print(f" Solver did converge at loss = {loss_value}.") + self.converged = True + + def finalize(self, X, y, sample_weight): + """Finalize the solvers results. + + Some solvers may need this, others not. + """ + pass + + def solve(self, X, y, sample_weight): + """Solve the optimization problem. + + This is the main routine. + + Order of calls: + self.setup() + while iteration: + self.update_gradient_hessian() + self.inner_solve() + self.line_search() + self.check_convergence() + self.finalize() + + Returns + ------- + coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,) + Solution of the optimization problem. + """ + # setup usually: + # - initializes self.coef if needed + # - initializes and calculates self.raw_predictions, self.loss_value + self.setup(X=X, y=y, sample_weight=sample_weight) + + self.iteration = 1 + self.converged = False + self.use_fallback_lbfgs_solve = False + + while self.iteration <= self.max_iter and not self.converged: + if self.verbose: + print(f"Newton iter={self.iteration}") + + self.use_fallback_lbfgs_solve = False # Fallback solver. + + # 1. Update Hessian and gradient + self.update_gradient_hessian(X=X, y=y, sample_weight=sample_weight) + + # TODO: + # if iteration == 1: + # We might stop early, e.g. we already are close to the optimum, + # usually detected by zero gradients at this stage. + + # 2. Inner solver + # Calculate Newton step/direction + # This usually sets self.coef_newton and self.gradient_times_newton. + self.inner_solve(X=X, y=y, sample_weight=sample_weight) + if self.use_fallback_lbfgs_solve: + break + + # 3. Backtracking line search + # This usually sets self.coef_old, self.coef, self.loss_value_old + # self.loss_value, self.gradient_old, self.gradient, + # self.raw_prediction. + self.line_search(X=X, y=y, sample_weight=sample_weight) + if self.use_fallback_lbfgs_solve: + break + + # 4. Check convergence + # Sets self.converged. + self.check_convergence(X=X, y=y, sample_weight=sample_weight) + + # 5. Next iteration + self.iteration += 1 + + if not self.converged: + if self.use_fallback_lbfgs_solve: + # Note: The fallback solver circumvents check_convergence and relies on + # the convergence checks of lbfgs instead. Enough warnings have been + # raised on the way. + self.fallback_lbfgs_solve(X=X, y=y, sample_weight=sample_weight) + else: + warnings.warn( + ( + f"Newton solver did not converge after {self.iteration - 1} " + "iterations." + ), + ConvergenceWarning, + ) + + self.iteration -= 1 + self.finalize(X=X, y=y, sample_weight=sample_weight) + return self.coef + + +class NewtonCholeskySolver(NewtonSolver): + """Cholesky based Newton solver. + + Inner solver for finding the Newton step H w_newton = -g uses Cholesky based linear + solver. + """ + + def setup(self, X, y, sample_weight): + super().setup(X=X, y=y, sample_weight=sample_weight) + if self.linear_loss.base_loss.is_multiclass: + # Easier with ravelled arrays, e.g., for scipy.linalg.solve. + # As with LinearModelLoss, we always are contiguous in n_classes. + self.coef = self.coef.ravel(order="F") + # Note that the computation of gradient in LinearModelLoss follows the shape of + # coef. + self.gradient = np.empty_like(self.coef) + # But the hessian is always 2d. + n = self.coef.size + self.hessian = np.empty_like(self.coef, shape=(n, n)) + # To help case distinctions. + self.is_multinomial_with_intercept = ( + self.linear_loss.base_loss.is_multiclass and self.linear_loss.fit_intercept + ) + self.is_multinomial_no_penalty = ( + self.linear_loss.base_loss.is_multiclass and self.l2_reg_strength == 0 + ) + + def update_gradient_hessian(self, X, y, sample_weight): + _, _, self.hessian_warning = self.linear_loss.gradient_hessian( + coef=self.coef, + X=X, + y=y, + sample_weight=sample_weight, + l2_reg_strength=self.l2_reg_strength, + n_threads=self.n_threads, + gradient_out=self.gradient, + hessian_out=self.hessian, + raw_prediction=self.raw_prediction, # this was updated in line_search + ) + + def inner_solve(self, X, y, sample_weight): + if self.hessian_warning: + warnings.warn( + ( + f"The inner solver of {self.__class__.__name__} detected a " + "pointwise hessian with many negative values at iteration " + f"#{self.iteration}. It will now resort to lbfgs instead." + ), + ConvergenceWarning, + ) + if self.verbose: + print( + " The inner solver detected a pointwise Hessian with many " + "negative values and resorts to lbfgs instead." + ) + self.use_fallback_lbfgs_solve = True + return + + # Note: The following case distinction could also be shifted to the + # implementation of HalfMultinomialLoss instead of here within the solver. + if self.is_multinomial_no_penalty: + # The multinomial loss is overparametrized for each unpenalized feature, so + # at least the intercepts. This can be seen by noting that predicted + # probabilities are invariant under shifting all coefficients of a single + # feature j for all classes by the same amount c: + # coef[k, :] -> coef[k, :] + c => proba stays the same + # where we have assumned coef.shape = (n_classes, n_features). + # Therefore, also the loss (-log-likelihood), gradient and hessian stay the + # same, see + # Noah Simon and Jerome Friedman and Trevor Hastie. (2013) "A Blockwise + # Descent Algorithm for Group-penalized Multiresponse and Multinomial + # Regression". https://doi.org/10.48550/arXiv.1311.6529 + # + # We choose the standard approach and set all the coefficients of the last + # class to zero, for all features including the intercept. + n_classes = self.linear_loss.base_loss.n_classes + n_dof = self.coef.size // n_classes # degree of freedom per class + n = self.coef.size - n_dof # effective size + self.coef[n_classes - 1 :: n_classes] = 0 + self.gradient[n_classes - 1 :: n_classes] = 0 + self.hessian[n_classes - 1 :: n_classes, :] = 0 + self.hessian[:, n_classes - 1 :: n_classes] = 0 + # We also need the reduced variants of gradient and hessian where the + # entries set to zero are removed. For 2 features and 3 classes with + # arbitrary values, "x" means removed: + # gradient = [0, 1, x, 3, 4, x] + # + # hessian = [0, 1, x, 3, 4, x] + # [1, 7, x, 9, 10, x] + # [x, x, x, x, x, x] + # [3, 9, x, 21, 22, x] + # [4, 10, x, 22, 28, x] + # [x, x, x, x, x, x] + # The following slicing triggers copies of gradient and hessian. + gradient = self.gradient.reshape(-1, n_classes)[:, :-1].flatten() + hessian = self.hessian.reshape(n_dof, n_classes, n_dof, n_classes)[ + :, :-1, :, :-1 + ].reshape(n, n) + elif self.is_multinomial_with_intercept: + # Here, only intercepts are unpenalized. We again choose the last class and + # set its intercept to zero. + self.coef[-1] = 0 + self.gradient[-1] = 0 + self.hessian[-1, :] = 0 + self.hessian[:, -1] = 0 + gradient, hessian = self.gradient[:-1], self.hessian[:-1, :-1] + else: + gradient, hessian = self.gradient, self.hessian + + try: + with warnings.catch_warnings(): + warnings.simplefilter("error", scipy.linalg.LinAlgWarning) + self.coef_newton = scipy.linalg.solve( + hessian, -gradient, check_finite=False, assume_a="sym" + ) + if self.is_multinomial_no_penalty: + self.coef_newton = np.c_[ + self.coef_newton.reshape(n_dof, n_classes - 1), np.zeros(n_dof) + ].reshape(-1) + assert self.coef_newton.flags.f_contiguous + elif self.is_multinomial_with_intercept: + self.coef_newton = np.r_[self.coef_newton, 0] + self.gradient_times_newton = self.gradient @ self.coef_newton + if self.gradient_times_newton > 0: + if self.verbose: + print( + " The inner solver found a Newton step that is not a " + "descent direction and resorts to LBFGS steps instead." + ) + self.use_fallback_lbfgs_solve = True + return + except (np.linalg.LinAlgError, scipy.linalg.LinAlgWarning) as e: + warnings.warn( + f"The inner solver of {self.__class__.__name__} stumbled upon a " + "singular or very ill-conditioned Hessian matrix at iteration " + f"{self.iteration}. It will now resort to lbfgs instead.\n" + "Further options are to use another solver or to avoid such situation " + "in the first place. Possible remedies are removing collinear features" + " of X or increasing the penalization strengths.\n" + "The original Linear Algebra message was:\n" + str(e), + scipy.linalg.LinAlgWarning, + ) + # Possible causes: + # 1. hess_pointwise is negative. But this is already taken care in + # LinearModelLoss.gradient_hessian. + # 2. X is singular or ill-conditioned + # This might be the most probable cause. + # + # There are many possible ways to deal with this situation. Most of them + # add, explicitly or implicitly, a matrix to the hessian to make it + # positive definite, confer to Chapter 3.4 of Nocedal & Wright 2nd ed. + # Instead, we resort to lbfgs. + if self.verbose: + print( + " The inner solver stumbled upon an singular or ill-conditioned " + "Hessian matrix and resorts to LBFGS instead." + ) + self.use_fallback_lbfgs_solve = True + return + + def finalize(self, X, y, sample_weight): + if self.is_multinomial_no_penalty: + # Our convention is usually the symmetric parametrization where + # sum(coef[classes, features], axis=0) = 0. + # We convert now to this convention. Note that it does not change + # the predicted probabilities. + n_classes = self.linear_loss.base_loss.n_classes + self.coef = self.coef.reshape(n_classes, -1, order="F") + self.coef -= np.mean(self.coef, axis=0) + elif self.is_multinomial_with_intercept: + # Only the intercept needs an update to the symmetric parametrization. + n_classes = self.linear_loss.base_loss.n_classes + self.coef[-n_classes:] -= np.mean(self.coef[-n_classes:]) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/glm.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/glm.py new file mode 100644 index 0000000000000000000000000000000000000000..fc31f9825d2e55bab4641b81a73cb7faaa607db3 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/glm.py @@ -0,0 +1,908 @@ +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +""" +Generalized Linear Models with Exponential Dispersion Family +""" + +from numbers import Integral, Real + +import numpy as np +import scipy.optimize + +from ..._loss.loss import ( + HalfGammaLoss, + HalfPoissonLoss, + HalfSquaredError, + HalfTweedieLoss, + HalfTweedieLossIdentity, +) +from ...base import BaseEstimator, RegressorMixin, _fit_context +from ...utils import check_array +from ...utils._openmp_helpers import _openmp_effective_n_threads +from ...utils._param_validation import Hidden, Interval, StrOptions +from ...utils.optimize import _check_optimize_result +from ...utils.validation import _check_sample_weight, check_is_fitted, validate_data +from .._linear_loss import LinearModelLoss +from ._newton_solver import NewtonCholeskySolver, NewtonSolver + + +class _GeneralizedLinearRegressor(RegressorMixin, BaseEstimator): + """Regression via a penalized Generalized Linear Model (GLM). + + GLMs based on a reproductive Exponential Dispersion Model (EDM) aim at fitting and + predicting the mean of the target y as y_pred=h(X*w) with coefficients w. + Therefore, the fit minimizes the following objective function with L2 priors as + regularizer:: + + 1/(2*sum(s_i)) * sum(s_i * deviance(y_i, h(x_i*w)) + 1/2 * alpha * ||w||_2^2 + + with inverse link function h, s=sample_weight and per observation (unit) deviance + deviance(y_i, h(x_i*w)). Note that for an EDM, 1/2 * deviance is the negative + log-likelihood up to a constant (in w) term. + The parameter ``alpha`` corresponds to the lambda parameter in glmnet. + + Instead of implementing the EDM family and a link function separately, we directly + use the loss functions `from sklearn._loss` which have the link functions included + in them for performance reasons. We pick the loss functions that implement + (1/2 times) EDM deviances. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.23 + + Parameters + ---------- + alpha : float, default=1 + Constant that multiplies the penalty term and thus determines the + regularization strength. ``alpha = 0`` is equivalent to unpenalized + GLMs. In this case, the design matrix `X` must have full column rank + (no collinearities). + Values must be in the range `[0.0, inf)`. + + fit_intercept : bool, default=True + Specifies if a constant (a.k.a. bias or intercept) should be + added to the linear predictor (X @ coef + intercept). + + solver : {'lbfgs', 'newton-cholesky'}, default='lbfgs' + Algorithm to use in the optimization problem: + + 'lbfgs' + Calls scipy's L-BFGS-B optimizer. + + 'newton-cholesky' + Uses Newton-Raphson steps (in arbitrary precision arithmetic equivalent to + iterated reweighted least squares) with an inner Cholesky based solver. + This solver is a good choice for `n_samples` >> `n_features`, especially + with one-hot encoded categorical features with rare categories. Be aware + that the memory usage of this solver has a quadratic dependency on + `n_features` because it explicitly computes the Hessian matrix. + + .. versionadded:: 1.2 + + max_iter : int, default=100 + The maximal number of iterations for the solver. + Values must be in the range `[1, inf)`. + + tol : float, default=1e-4 + Stopping criterion. For the lbfgs solver, + the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol`` + where ``g_j`` is the j-th component of the gradient (derivative) of + the objective function. + Values must be in the range `(0.0, inf)`. + + warm_start : bool, default=False + If set to ``True``, reuse the solution of the previous call to ``fit`` + as initialization for ``coef_`` and ``intercept_``. + + verbose : int, default=0 + For the lbfgs solver set verbose to any positive number for verbosity. + Values must be in the range `[0, inf)`. + + Attributes + ---------- + coef_ : array of shape (n_features,) + Estimated coefficients for the linear predictor (`X @ coef_ + + intercept_`) in the GLM. + + intercept_ : float + Intercept (a.k.a. bias) added to linear predictor. + + n_iter_ : int + Actual number of iterations used in the solver. + + _base_loss : BaseLoss, default=HalfSquaredError() + This is set during fit via `self._get_loss()`. + A `_base_loss` contains a specific loss function as well as the link + function. The loss to be minimized specifies the distributional assumption of + the GLM, i.e. the distribution from the EDM. Here are some examples: + + ======================= ======== ========================== + _base_loss Link Target Domain + ======================= ======== ========================== + HalfSquaredError identity y any real number + HalfPoissonLoss log 0 <= y + HalfGammaLoss log 0 < y + HalfTweedieLoss log dependent on tweedie power + HalfTweedieLossIdentity identity dependent on tweedie power + ======================= ======== ========================== + + The link function of the GLM, i.e. mapping from linear predictor + `X @ coeff + intercept` to prediction `y_pred`. For instance, with a log link, + we have `y_pred = exp(X @ coeff + intercept)`. + """ + + # We allow for NewtonSolver classes for the "solver" parameter but do not + # make them public in the docstrings. This facilitates testing and + # benchmarking. + _parameter_constraints: dict = { + "alpha": [Interval(Real, 0.0, None, closed="left")], + "fit_intercept": ["boolean"], + "solver": [ + StrOptions({"lbfgs", "newton-cholesky"}), + Hidden(type), + ], + "max_iter": [Interval(Integral, 1, None, closed="left")], + "tol": [Interval(Real, 0.0, None, closed="neither")], + "warm_start": ["boolean"], + "verbose": ["verbose"], + } + + def __init__( + self, + *, + alpha=1.0, + fit_intercept=True, + solver="lbfgs", + max_iter=100, + tol=1e-4, + warm_start=False, + verbose=0, + ): + self.alpha = alpha + self.fit_intercept = fit_intercept + self.solver = solver + self.max_iter = max_iter + self.tol = tol + self.warm_start = warm_start + self.verbose = verbose + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """Fit a Generalized Linear Model. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) + Target values. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + self : object + Fitted model. + """ + X, y = validate_data( + self, + X, + y, + accept_sparse=["csc", "csr"], + dtype=[np.float64, np.float32], + y_numeric=True, + multi_output=False, + ) + + # required by losses + if self.solver == "lbfgs": + # lbfgs will force coef and therefore raw_prediction to be float64. The + # base_loss needs y, X @ coef and sample_weight all of same dtype + # (and contiguous). + loss_dtype = np.float64 + else: + loss_dtype = min(max(y.dtype, X.dtype), np.float64) + y = check_array(y, dtype=loss_dtype, order="C", ensure_2d=False) + + if sample_weight is not None: + # Note that _check_sample_weight calls check_array(order="C") required by + # losses. + sample_weight = _check_sample_weight(sample_weight, X, dtype=loss_dtype) + + n_samples, n_features = X.shape + self._base_loss = self._get_loss() + + linear_loss = LinearModelLoss( + base_loss=self._base_loss, + fit_intercept=self.fit_intercept, + ) + + if not linear_loss.base_loss.in_y_true_range(y): + raise ValueError( + "Some value(s) of y are out of the valid range of the loss" + f" {self._base_loss.__class__.__name__!r}." + ) + + # TODO: if alpha=0 check that X is not rank deficient + + # NOTE: Rescaling of sample_weight: + # We want to minimize + # obj = 1/(2 * sum(sample_weight)) * sum(sample_weight * deviance) + # + 1/2 * alpha * L2, + # with + # deviance = 2 * loss. + # The objective is invariant to multiplying sample_weight by a constant. We + # could choose this constant such that sum(sample_weight) = 1 in order to end + # up with + # obj = sum(sample_weight * loss) + 1/2 * alpha * L2. + # But LinearModelLoss.loss() already computes + # average(loss, weights=sample_weight) + # Thus, without rescaling, we have + # obj = LinearModelLoss.loss(...) + + if self.warm_start and hasattr(self, "coef_"): + if self.fit_intercept: + # LinearModelLoss needs intercept at the end of coefficient array. + coef = np.concatenate((self.coef_, np.array([self.intercept_]))) + else: + coef = self.coef_ + coef = coef.astype(loss_dtype, copy=False) + else: + coef = linear_loss.init_zero_coef(X, dtype=loss_dtype) + if self.fit_intercept: + coef[-1] = linear_loss.base_loss.link.link( + np.average(y, weights=sample_weight) + ) + + l2_reg_strength = self.alpha + n_threads = _openmp_effective_n_threads() + + # Algorithms for optimization: + # Note again that our losses implement 1/2 * deviance. + if self.solver == "lbfgs": + func = linear_loss.loss_gradient + + opt_res = scipy.optimize.minimize( + func, + coef, + method="L-BFGS-B", + jac=True, + options={ + "maxiter": self.max_iter, + "maxls": 50, # default is 20 + "iprint": self.verbose - 1, + "gtol": self.tol, + # The constant 64 was found empirically to pass the test suite. + # The point is that ftol is very small, but a bit larger than + # machine precision for float64, which is the dtype used by lbfgs. + "ftol": 64 * np.finfo(float).eps, + }, + args=(X, y, sample_weight, l2_reg_strength, n_threads), + ) + self.n_iter_ = _check_optimize_result("lbfgs", opt_res) + coef = opt_res.x + elif self.solver == "newton-cholesky": + sol = NewtonCholeskySolver( + coef=coef, + linear_loss=linear_loss, + l2_reg_strength=l2_reg_strength, + tol=self.tol, + max_iter=self.max_iter, + n_threads=n_threads, + verbose=self.verbose, + ) + coef = sol.solve(X, y, sample_weight) + self.n_iter_ = sol.iteration + elif issubclass(self.solver, NewtonSolver): + sol = self.solver( + coef=coef, + linear_loss=linear_loss, + l2_reg_strength=l2_reg_strength, + tol=self.tol, + max_iter=self.max_iter, + n_threads=n_threads, + ) + coef = sol.solve(X, y, sample_weight) + self.n_iter_ = sol.iteration + else: + raise ValueError(f"Invalid solver={self.solver}.") + + if self.fit_intercept: + self.intercept_ = coef[-1] + self.coef_ = coef[:-1] + else: + # set intercept to zero as the other linear models do + self.intercept_ = 0.0 + self.coef_ = coef + + return self + + def _linear_predictor(self, X): + """Compute the linear_predictor = `X @ coef_ + intercept_`. + + Note that we often use the term raw_prediction instead of linear predictor. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Samples. + + Returns + ------- + y_pred : array of shape (n_samples,) + Returns predicted values of linear predictor. + """ + check_is_fitted(self) + X = validate_data( + self, + X, + accept_sparse=["csr", "csc", "coo"], + dtype=[np.float64, np.float32], + ensure_2d=True, + allow_nd=False, + reset=False, + ) + return X @ self.coef_ + self.intercept_ + + def predict(self, X): + """Predict using GLM with feature matrix X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Samples. + + Returns + ------- + y_pred : array of shape (n_samples,) + Returns predicted values. + """ + # check_array is done in _linear_predictor + raw_prediction = self._linear_predictor(X) + y_pred = self._base_loss.link.inverse(raw_prediction) + return y_pred + + def score(self, X, y, sample_weight=None): + """Compute D^2, the percentage of deviance explained. + + D^2 is a generalization of the coefficient of determination R^2. + R^2 uses squared error and D^2 uses the deviance of this GLM, see the + :ref:`User Guide `. + + D^2 is defined as + :math:`D^2 = 1-\\frac{D(y_{true},y_{pred})}{D_{null}}`, + :math:`D_{null}` is the null deviance, i.e. the deviance of a model + with intercept alone, which corresponds to :math:`y_{pred} = \\bar{y}`. + The mean :math:`\\bar{y}` is averaged by sample_weight. + Best possible score is 1.0 and it can be negative (because the model + can be arbitrarily worse). + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Test samples. + + y : array-like of shape (n_samples,) + True values of target. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + score : float + D^2 of self.predict(X) w.r.t. y. + """ + # TODO: Adapt link to User Guide in the docstring, once + # https://github.com/scikit-learn/scikit-learn/pull/22118 is merged. + # + # Note, default score defined in RegressorMixin is R^2 score. + # TODO: make D^2 a score function in module metrics (and thereby get + # input validation and so on) + raw_prediction = self._linear_predictor(X) # validates X + # required by losses + y = check_array(y, dtype=raw_prediction.dtype, order="C", ensure_2d=False) + + if sample_weight is not None: + # Note that _check_sample_weight calls check_array(order="C") required by + # losses. + sample_weight = _check_sample_weight(sample_weight, X, dtype=y.dtype) + + base_loss = self._base_loss + + if not base_loss.in_y_true_range(y): + raise ValueError( + "Some value(s) of y are out of the valid range of the loss" + f" {base_loss.__name__}." + ) + + constant = np.average( + base_loss.constant_to_optimal_zero(y_true=y, sample_weight=None), + weights=sample_weight, + ) + + # Missing factor of 2 in deviance cancels out. + deviance = base_loss( + y_true=y, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + n_threads=1, + ) + y_mean = base_loss.link.link(np.average(y, weights=sample_weight)) + deviance_null = base_loss( + y_true=y, + raw_prediction=np.tile(y_mean, y.shape[0]), + sample_weight=sample_weight, + n_threads=1, + ) + return 1 - (deviance + constant) / (deviance_null + constant) + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.input_tags.sparse = True + try: + # Create instance of BaseLoss if fit wasn't called yet. This is necessary as + # TweedieRegressor might set the used loss during fit different from + # self._base_loss. + base_loss = self._get_loss() + tags.target_tags.positive_only = not base_loss.in_y_true_range(-1.0) + except (ValueError, AttributeError, TypeError): + # This happens when the link or power parameter of TweedieRegressor is + # invalid. We fallback on the default tags in that case. + pass # pragma: no cover + return tags + + def _get_loss(self): + """This is only necessary because of the link and power arguments of the + TweedieRegressor. + + Note that we do not need to pass sample_weight to the loss class as this is + only needed to set loss.constant_hessian on which GLMs do not rely. + """ + return HalfSquaredError() + + +class PoissonRegressor(_GeneralizedLinearRegressor): + """Generalized Linear Model with a Poisson distribution. + + This regressor uses the 'log' link function. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.23 + + Parameters + ---------- + alpha : float, default=1 + Constant that multiplies the L2 penalty term and determines the + regularization strength. ``alpha = 0`` is equivalent to unpenalized + GLMs. In this case, the design matrix `X` must have full column rank + (no collinearities). + Values of `alpha` must be in the range `[0.0, inf)`. + + fit_intercept : bool, default=True + Specifies if a constant (a.k.a. bias or intercept) should be + added to the linear predictor (`X @ coef + intercept`). + + solver : {'lbfgs', 'newton-cholesky'}, default='lbfgs' + Algorithm to use in the optimization problem: + + 'lbfgs' + Calls scipy's L-BFGS-B optimizer. + + 'newton-cholesky' + Uses Newton-Raphson steps (in arbitrary precision arithmetic equivalent to + iterated reweighted least squares) with an inner Cholesky based solver. + This solver is a good choice for `n_samples` >> `n_features`, especially + with one-hot encoded categorical features with rare categories. Be aware + that the memory usage of this solver has a quadratic dependency on + `n_features` because it explicitly computes the Hessian matrix. + + .. versionadded:: 1.2 + + max_iter : int, default=100 + The maximal number of iterations for the solver. + Values must be in the range `[1, inf)`. + + tol : float, default=1e-4 + Stopping criterion. For the lbfgs solver, + the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol`` + where ``g_j`` is the j-th component of the gradient (derivative) of + the objective function. + Values must be in the range `(0.0, inf)`. + + warm_start : bool, default=False + If set to ``True``, reuse the solution of the previous call to ``fit`` + as initialization for ``coef_`` and ``intercept_`` . + + verbose : int, default=0 + For the lbfgs solver set verbose to any positive number for verbosity. + Values must be in the range `[0, inf)`. + + Attributes + ---------- + coef_ : array of shape (n_features,) + Estimated coefficients for the linear predictor (`X @ coef_ + + intercept_`) in the GLM. + + intercept_ : float + Intercept (a.k.a. bias) added to linear predictor. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : int + Actual number of iterations used in the solver. + + See Also + -------- + TweedieRegressor : Generalized Linear Model with a Tweedie distribution. + + Examples + -------- + >>> from sklearn import linear_model + >>> clf = linear_model.PoissonRegressor() + >>> X = [[1, 2], [2, 3], [3, 4], [4, 3]] + >>> y = [12, 17, 22, 21] + >>> clf.fit(X, y) + PoissonRegressor() + >>> clf.score(X, y) + np.float64(0.990...) + >>> clf.coef_ + array([0.121..., 0.158...]) + >>> clf.intercept_ + np.float64(2.088...) + >>> clf.predict([[1, 1], [3, 4]]) + array([10.676..., 21.875...]) + """ + + _parameter_constraints: dict = { + **_GeneralizedLinearRegressor._parameter_constraints + } + + def __init__( + self, + *, + alpha=1.0, + fit_intercept=True, + solver="lbfgs", + max_iter=100, + tol=1e-4, + warm_start=False, + verbose=0, + ): + super().__init__( + alpha=alpha, + fit_intercept=fit_intercept, + solver=solver, + max_iter=max_iter, + tol=tol, + warm_start=warm_start, + verbose=verbose, + ) + + def _get_loss(self): + return HalfPoissonLoss() + + +class GammaRegressor(_GeneralizedLinearRegressor): + """Generalized Linear Model with a Gamma distribution. + + This regressor uses the 'log' link function. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.23 + + Parameters + ---------- + alpha : float, default=1 + Constant that multiplies the L2 penalty term and determines the + regularization strength. ``alpha = 0`` is equivalent to unpenalized + GLMs. In this case, the design matrix `X` must have full column rank + (no collinearities). + Values of `alpha` must be in the range `[0.0, inf)`. + + fit_intercept : bool, default=True + Specifies if a constant (a.k.a. bias or intercept) should be + added to the linear predictor `X @ coef_ + intercept_`. + + solver : {'lbfgs', 'newton-cholesky'}, default='lbfgs' + Algorithm to use in the optimization problem: + + 'lbfgs' + Calls scipy's L-BFGS-B optimizer. + + 'newton-cholesky' + Uses Newton-Raphson steps (in arbitrary precision arithmetic equivalent to + iterated reweighted least squares) with an inner Cholesky based solver. + This solver is a good choice for `n_samples` >> `n_features`, especially + with one-hot encoded categorical features with rare categories. Be aware + that the memory usage of this solver has a quadratic dependency on + `n_features` because it explicitly computes the Hessian matrix. + + .. versionadded:: 1.2 + + max_iter : int, default=100 + The maximal number of iterations for the solver. + Values must be in the range `[1, inf)`. + + tol : float, default=1e-4 + Stopping criterion. For the lbfgs solver, + the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol`` + where ``g_j`` is the j-th component of the gradient (derivative) of + the objective function. + Values must be in the range `(0.0, inf)`. + + warm_start : bool, default=False + If set to ``True``, reuse the solution of the previous call to ``fit`` + as initialization for `coef_` and `intercept_`. + + verbose : int, default=0 + For the lbfgs solver set verbose to any positive number for verbosity. + Values must be in the range `[0, inf)`. + + Attributes + ---------- + coef_ : array of shape (n_features,) + Estimated coefficients for the linear predictor (`X @ coef_ + + intercept_`) in the GLM. + + intercept_ : float + Intercept (a.k.a. bias) added to linear predictor. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + n_iter_ : int + Actual number of iterations used in the solver. + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + PoissonRegressor : Generalized Linear Model with a Poisson distribution. + TweedieRegressor : Generalized Linear Model with a Tweedie distribution. + + Examples + -------- + >>> from sklearn import linear_model + >>> clf = linear_model.GammaRegressor() + >>> X = [[1, 2], [2, 3], [3, 4], [4, 3]] + >>> y = [19, 26, 33, 30] + >>> clf.fit(X, y) + GammaRegressor() + >>> clf.score(X, y) + np.float64(0.773...) + >>> clf.coef_ + array([0.072..., 0.066...]) + >>> clf.intercept_ + np.float64(2.896...) + >>> clf.predict([[1, 0], [2, 8]]) + array([19.483..., 35.795...]) + """ + + _parameter_constraints: dict = { + **_GeneralizedLinearRegressor._parameter_constraints + } + + def __init__( + self, + *, + alpha=1.0, + fit_intercept=True, + solver="lbfgs", + max_iter=100, + tol=1e-4, + warm_start=False, + verbose=0, + ): + super().__init__( + alpha=alpha, + fit_intercept=fit_intercept, + solver=solver, + max_iter=max_iter, + tol=tol, + warm_start=warm_start, + verbose=verbose, + ) + + def _get_loss(self): + return HalfGammaLoss() + + +class TweedieRegressor(_GeneralizedLinearRegressor): + """Generalized Linear Model with a Tweedie distribution. + + This estimator can be used to model different GLMs depending on the + ``power`` parameter, which determines the underlying distribution. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.23 + + Parameters + ---------- + power : float, default=0 + The power determines the underlying target distribution according + to the following table: + + +-------+------------------------+ + | Power | Distribution | + +=======+========================+ + | 0 | Normal | + +-------+------------------------+ + | 1 | Poisson | + +-------+------------------------+ + | (1,2) | Compound Poisson Gamma | + +-------+------------------------+ + | 2 | Gamma | + +-------+------------------------+ + | 3 | Inverse Gaussian | + +-------+------------------------+ + + For ``0 < power < 1``, no distribution exists. + + alpha : float, default=1 + Constant that multiplies the L2 penalty term and determines the + regularization strength. ``alpha = 0`` is equivalent to unpenalized + GLMs. In this case, the design matrix `X` must have full column rank + (no collinearities). + Values of `alpha` must be in the range `[0.0, inf)`. + + fit_intercept : bool, default=True + Specifies if a constant (a.k.a. bias or intercept) should be + added to the linear predictor (`X @ coef + intercept`). + + link : {'auto', 'identity', 'log'}, default='auto' + The link function of the GLM, i.e. mapping from linear predictor + `X @ coeff + intercept` to prediction `y_pred`. Option 'auto' sets + the link depending on the chosen `power` parameter as follows: + + - 'identity' for ``power <= 0``, e.g. for the Normal distribution + - 'log' for ``power > 0``, e.g. for Poisson, Gamma and Inverse Gaussian + distributions + + solver : {'lbfgs', 'newton-cholesky'}, default='lbfgs' + Algorithm to use in the optimization problem: + + 'lbfgs' + Calls scipy's L-BFGS-B optimizer. + + 'newton-cholesky' + Uses Newton-Raphson steps (in arbitrary precision arithmetic equivalent to + iterated reweighted least squares) with an inner Cholesky based solver. + This solver is a good choice for `n_samples` >> `n_features`, especially + with one-hot encoded categorical features with rare categories. Be aware + that the memory usage of this solver has a quadratic dependency on + `n_features` because it explicitly computes the Hessian matrix. + + .. versionadded:: 1.2 + + max_iter : int, default=100 + The maximal number of iterations for the solver. + Values must be in the range `[1, inf)`. + + tol : float, default=1e-4 + Stopping criterion. For the lbfgs solver, + the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol`` + where ``g_j`` is the j-th component of the gradient (derivative) of + the objective function. + Values must be in the range `(0.0, inf)`. + + warm_start : bool, default=False + If set to ``True``, reuse the solution of the previous call to ``fit`` + as initialization for ``coef_`` and ``intercept_`` . + + verbose : int, default=0 + For the lbfgs solver set verbose to any positive number for verbosity. + Values must be in the range `[0, inf)`. + + Attributes + ---------- + coef_ : array of shape (n_features,) + Estimated coefficients for the linear predictor (`X @ coef_ + + intercept_`) in the GLM. + + intercept_ : float + Intercept (a.k.a. bias) added to linear predictor. + + n_iter_ : int + Actual number of iterations used in the solver. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + PoissonRegressor : Generalized Linear Model with a Poisson distribution. + GammaRegressor : Generalized Linear Model with a Gamma distribution. + + Examples + -------- + >>> from sklearn import linear_model + >>> clf = linear_model.TweedieRegressor() + >>> X = [[1, 2], [2, 3], [3, 4], [4, 3]] + >>> y = [2, 3.5, 5, 5.5] + >>> clf.fit(X, y) + TweedieRegressor() + >>> clf.score(X, y) + np.float64(0.839...) + >>> clf.coef_ + array([0.599..., 0.299...]) + >>> clf.intercept_ + np.float64(1.600...) + >>> clf.predict([[1, 1], [3, 4]]) + array([2.500..., 4.599...]) + """ + + _parameter_constraints: dict = { + **_GeneralizedLinearRegressor._parameter_constraints, + "power": [Interval(Real, None, None, closed="neither")], + "link": [StrOptions({"auto", "identity", "log"})], + } + + def __init__( + self, + *, + power=0.0, + alpha=1.0, + fit_intercept=True, + link="auto", + solver="lbfgs", + max_iter=100, + tol=1e-4, + warm_start=False, + verbose=0, + ): + super().__init__( + alpha=alpha, + fit_intercept=fit_intercept, + solver=solver, + max_iter=max_iter, + tol=tol, + warm_start=warm_start, + verbose=verbose, + ) + self.link = link + self.power = power + + def _get_loss(self): + if self.link == "auto": + if self.power <= 0: + # identity link + return HalfTweedieLossIdentity(power=self.power) + else: + # log link + return HalfTweedieLoss(power=self.power) + + if self.link == "log": + return HalfTweedieLoss(power=self.power) + + if self.link == "identity": + return HalfTweedieLossIdentity(power=self.power) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..67dd18fb94b593f0a3125c1f5833f3b9597614ba --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__init__.py @@ -0,0 +1,2 @@ +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00b8d0c4602a6b7de8e682272eb7e954dfe96349 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__pycache__/test_glm.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__pycache__/test_glm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8565830d1b4906a354f429bc7a6d217ccc92f93d Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__pycache__/test_glm.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/test_glm.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/test_glm.py new file mode 100644 index 0000000000000000000000000000000000000000..cb052860dd7568907354bfeb464e1cead6001d07 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/test_glm.py @@ -0,0 +1,1110 @@ +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import itertools +import warnings +from functools import partial + +import numpy as np +import pytest +import scipy +from numpy.testing import assert_allclose +from scipy import linalg +from scipy.optimize import minimize, root + +from sklearn._loss import HalfBinomialLoss, HalfPoissonLoss, HalfTweedieLoss +from sklearn._loss.link import IdentityLink, LogLink +from sklearn.base import clone +from sklearn.datasets import make_low_rank_matrix, make_regression +from sklearn.exceptions import ConvergenceWarning +from sklearn.linear_model import ( + GammaRegressor, + PoissonRegressor, + Ridge, + TweedieRegressor, +) +from sklearn.linear_model._glm import _GeneralizedLinearRegressor +from sklearn.linear_model._glm._newton_solver import NewtonCholeskySolver +from sklearn.linear_model._linear_loss import LinearModelLoss +from sklearn.metrics import d2_tweedie_score, mean_poisson_deviance +from sklearn.model_selection import train_test_split + +SOLVERS = ["lbfgs", "newton-cholesky"] + + +class BinomialRegressor(_GeneralizedLinearRegressor): + def _get_loss(self): + return HalfBinomialLoss() + + +def _special_minimize(fun, grad, x, tol_NM, tol): + # Find good starting point by Nelder-Mead + res_NM = minimize( + fun, x, method="Nelder-Mead", options={"xatol": tol_NM, "fatol": tol_NM} + ) + # Now refine via root finding on the gradient of the function, which is + # more precise than minimizing the function itself. + res = root( + grad, + res_NM.x, + method="lm", + options={"ftol": tol, "xtol": tol, "gtol": tol}, + ) + return res.x + + +@pytest.fixture(scope="module") +def regression_data(): + X, y = make_regression( + n_samples=107, n_features=10, n_informative=80, noise=0.5, random_state=2 + ) + return X, y + + +@pytest.fixture( + params=itertools.product( + ["long", "wide"], + [ + BinomialRegressor(), + PoissonRegressor(), + GammaRegressor(), + # TweedieRegressor(power=3.0), # too difficult + # TweedieRegressor(power=0, link="log"), # too difficult + TweedieRegressor(power=1.5), + ], + ), + ids=lambda param: f"{param[0]}-{param[1]}", +) +def glm_dataset(global_random_seed, request): + """Dataset with GLM solutions, well conditioned X. + + This is inspired by ols_ridge_dataset in test_ridge.py. + + The construction is based on the SVD decomposition of X = U S V'. + + Parameters + ---------- + type : {"long", "wide"} + If "long", then n_samples > n_features. + If "wide", then n_features > n_samples. + model : a GLM model + + For "wide", we return the minimum norm solution: + + min ||w||_2 subject to w = argmin deviance(X, y, w) + + Note that the deviance is always minimized if y = inverse_link(X w) is possible to + achieve, which it is in the wide data case. Therefore, we can construct the + solution with minimum norm like (wide) OLS: + + min ||w||_2 subject to link(y) = raw_prediction = X w + + Returns + ------- + model : GLM model + X : ndarray + Last column of 1, i.e. intercept. + y : ndarray + coef_unpenalized : ndarray + Minimum norm solutions, i.e. min sum(loss(w)) (with minimum ||w||_2 in + case of ambiguity) + Last coefficient is intercept. + coef_penalized : ndarray + GLM solution with alpha=l2_reg_strength=1, i.e. + min 1/n * sum(loss) + ||w[:-1]||_2^2. + Last coefficient is intercept. + l2_reg_strength : float + Always equal 1. + """ + data_type, model = request.param + # Make larger dim more than double as big as the smaller one. + # This helps when constructing singular matrices like (X, X). + if data_type == "long": + n_samples, n_features = 12, 4 + else: + n_samples, n_features = 4, 12 + k = min(n_samples, n_features) + rng = np.random.RandomState(global_random_seed) + X = make_low_rank_matrix( + n_samples=n_samples, + n_features=n_features, + effective_rank=k, + tail_strength=0.1, + random_state=rng, + ) + X[:, -1] = 1 # last columns acts as intercept + U, s, Vt = linalg.svd(X, full_matrices=False) + assert np.all(s > 1e-3) # to be sure + assert np.max(s) / np.min(s) < 100 # condition number of X + + if data_type == "long": + coef_unpenalized = rng.uniform(low=1, high=3, size=n_features) + coef_unpenalized *= rng.choice([-1, 1], size=n_features) + raw_prediction = X @ coef_unpenalized + else: + raw_prediction = rng.uniform(low=-3, high=3, size=n_samples) + # minimum norm solution min ||w||_2 such that raw_prediction = X w: + # w = X'(XX')^-1 raw_prediction = V s^-1 U' raw_prediction + coef_unpenalized = Vt.T @ np.diag(1 / s) @ U.T @ raw_prediction + + linear_loss = LinearModelLoss(base_loss=model._get_loss(), fit_intercept=True) + sw = np.full(shape=n_samples, fill_value=1 / n_samples) + y = linear_loss.base_loss.link.inverse(raw_prediction) + + # Add penalty l2_reg_strength * ||coef||_2^2 for l2_reg_strength=1 and solve with + # optimizer. Note that the problem is well conditioned such that we get accurate + # results. + l2_reg_strength = 1 + fun = partial( + linear_loss.loss, + X=X[:, :-1], + y=y, + sample_weight=sw, + l2_reg_strength=l2_reg_strength, + ) + grad = partial( + linear_loss.gradient, + X=X[:, :-1], + y=y, + sample_weight=sw, + l2_reg_strength=l2_reg_strength, + ) + coef_penalized_with_intercept = _special_minimize( + fun, grad, coef_unpenalized, tol_NM=1e-6, tol=1e-14 + ) + + linear_loss = LinearModelLoss(base_loss=model._get_loss(), fit_intercept=False) + fun = partial( + linear_loss.loss, + X=X[:, :-1], + y=y, + sample_weight=sw, + l2_reg_strength=l2_reg_strength, + ) + grad = partial( + linear_loss.gradient, + X=X[:, :-1], + y=y, + sample_weight=sw, + l2_reg_strength=l2_reg_strength, + ) + coef_penalized_without_intercept = _special_minimize( + fun, grad, coef_unpenalized[:-1], tol_NM=1e-6, tol=1e-14 + ) + + # To be sure + assert np.linalg.norm(coef_penalized_with_intercept) < np.linalg.norm( + coef_unpenalized + ) + + return ( + model, + X, + y, + coef_unpenalized, + coef_penalized_with_intercept, + coef_penalized_without_intercept, + l2_reg_strength, + ) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [False, True]) +def test_glm_regression(solver, fit_intercept, glm_dataset): + """Test that GLM converges for all solvers to correct solution. + + We work with a simple constructed data set with known solution. + """ + model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset + params = dict( + alpha=alpha, + fit_intercept=fit_intercept, + solver=solver, + tol=1e-12, + max_iter=1000, + ) + + model = clone(model).set_params(**params) + X = X[:, :-1] # remove intercept + if fit_intercept: + coef = coef_with_intercept + intercept = coef[-1] + coef = coef[:-1] + else: + coef = coef_without_intercept + intercept = 0 + + model.fit(X, y) + + rtol = 5e-5 if solver == "lbfgs" else 1e-9 + assert model.intercept_ == pytest.approx(intercept, rel=rtol) + assert_allclose(model.coef_, coef, rtol=rtol) + + # Same with sample_weight. + model = ( + clone(model).set_params(**params).fit(X, y, sample_weight=np.ones(X.shape[0])) + ) + assert model.intercept_ == pytest.approx(intercept, rel=rtol) + assert_allclose(model.coef_, coef, rtol=rtol) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_glm_regression_hstacked_X(solver, fit_intercept, glm_dataset): + """Test that GLM converges for all solvers to correct solution on hstacked data. + + We work with a simple constructed data set with known solution. + Fit on [X] with alpha is the same as fit on [X, X]/2 with alpha/2. + For long X, [X, X] is still a long but singular matrix. + """ + model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset + n_samples, n_features = X.shape + params = dict( + alpha=alpha / 2, + fit_intercept=fit_intercept, + solver=solver, + tol=1e-12, + max_iter=1000, + ) + + model = clone(model).set_params(**params) + X = X[:, :-1] # remove intercept + X = 0.5 * np.concatenate((X, X), axis=1) + assert np.linalg.matrix_rank(X) <= min(n_samples, n_features - 1) + if fit_intercept: + coef = coef_with_intercept + intercept = coef[-1] + coef = coef[:-1] + else: + coef = coef_without_intercept + intercept = 0 + + with warnings.catch_warnings(): + # XXX: Investigate if the ConvergenceWarning that can appear in some + # cases should be considered a bug or not. In the mean time we don't + # fail when the assertions below pass irrespective of the presence of + # the warning. + warnings.simplefilter("ignore", ConvergenceWarning) + model.fit(X, y) + + rtol = 2e-4 if solver == "lbfgs" else 5e-9 + assert model.intercept_ == pytest.approx(intercept, rel=rtol) + assert_allclose(model.coef_, np.r_[coef, coef], rtol=rtol) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_glm_regression_vstacked_X(solver, fit_intercept, glm_dataset): + """Test that GLM converges for all solvers to correct solution on vstacked data. + + We work with a simple constructed data set with known solution. + Fit on [X] with alpha is the same as fit on [X], [y] + [X], [y] with 1 * alpha. + It is the same alpha as the average loss stays the same. + For wide X, [X', X'] is a singular matrix. + """ + model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset + n_samples, n_features = X.shape + params = dict( + alpha=alpha, + fit_intercept=fit_intercept, + solver=solver, + tol=1e-12, + max_iter=1000, + ) + + model = clone(model).set_params(**params) + X = X[:, :-1] # remove intercept + X = np.concatenate((X, X), axis=0) + assert np.linalg.matrix_rank(X) <= min(n_samples, n_features) + y = np.r_[y, y] + if fit_intercept: + coef = coef_with_intercept + intercept = coef[-1] + coef = coef[:-1] + else: + coef = coef_without_intercept + intercept = 0 + model.fit(X, y) + + rtol = 3e-5 if solver == "lbfgs" else 5e-9 + assert model.intercept_ == pytest.approx(intercept, rel=rtol) + assert_allclose(model.coef_, coef, rtol=rtol) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_glm_regression_unpenalized(solver, fit_intercept, glm_dataset): + """Test that unpenalized GLM converges for all solvers to correct solution. + + We work with a simple constructed data set with known solution. + Note: This checks the minimum norm solution for wide X, i.e. + n_samples < n_features: + min ||w||_2 subject to w = argmin deviance(X, y, w) + """ + model, X, y, coef, _, _, _ = glm_dataset + n_samples, n_features = X.shape + alpha = 0 # unpenalized + params = dict( + alpha=alpha, + fit_intercept=fit_intercept, + solver=solver, + tol=1e-12, + max_iter=1000, + ) + + model = clone(model).set_params(**params) + if fit_intercept: + X = X[:, :-1] # remove intercept + intercept = coef[-1] + coef = coef[:-1] + else: + intercept = 0 + + with warnings.catch_warnings(): + if solver.startswith("newton") and n_samples < n_features: + # The newton solvers should warn and automatically fallback to LBFGS + # in this case. The model should still converge. + warnings.filterwarnings("ignore", category=scipy.linalg.LinAlgWarning) + # XXX: Investigate if the ConvergenceWarning that can appear in some + # cases should be considered a bug or not. In the mean time we don't + # fail when the assertions below pass irrespective of the presence of + # the warning. + warnings.filterwarnings("ignore", category=ConvergenceWarning) + model.fit(X, y) + + # FIXME: `assert_allclose(model.coef_, coef)` should work for all cases but fails + # for the wide/fat case with n_features > n_samples. Most current GLM solvers do + # NOT return the minimum norm solution with fit_intercept=True. + if n_samples > n_features: + rtol = 5e-5 if solver == "lbfgs" else 1e-7 + assert model.intercept_ == pytest.approx(intercept) + assert_allclose(model.coef_, coef, rtol=rtol) + else: + # As it is an underdetermined problem, prediction = y. The following shows that + # we get a solution, i.e. a (non-unique) minimum of the objective function ... + rtol = 5e-5 + if solver == "newton-cholesky": + rtol = 5e-4 + assert_allclose(model.predict(X), y, rtol=rtol) + + norm_solution = np.linalg.norm(np.r_[intercept, coef]) + norm_model = np.linalg.norm(np.r_[model.intercept_, model.coef_]) + if solver == "newton-cholesky": + # XXX: This solver shows random behaviour. Sometimes it finds solutions + # with norm_model <= norm_solution! So we check conditionally. + if norm_model < (1 + 1e-12) * norm_solution: + assert model.intercept_ == pytest.approx(intercept) + assert_allclose(model.coef_, coef, rtol=rtol) + elif solver == "lbfgs" and fit_intercept: + # But it is not the minimum norm solution. Otherwise the norms would be + # equal. + assert norm_model > (1 + 1e-12) * norm_solution + + # See https://github.com/scikit-learn/scikit-learn/issues/23670. + # Note: Even adding a tiny penalty does not give the minimal norm solution. + # XXX: We could have naively expected LBFGS to find the minimal norm + # solution by adding a very small penalty. Even that fails for a reason we + # do not properly understand at this point. + else: + # When `fit_intercept=False`, LBFGS naturally converges to the minimum norm + # solution on this problem. + # XXX: Do we have any theoretical guarantees why this should be the case? + assert model.intercept_ == pytest.approx(intercept, rel=rtol) + assert_allclose(model.coef_, coef, rtol=rtol) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_glm_regression_unpenalized_hstacked_X(solver, fit_intercept, glm_dataset): + """Test that unpenalized GLM converges for all solvers to correct solution. + + We work with a simple constructed data set with known solution. + GLM fit on [X] is the same as fit on [X, X]/2. + For long X, [X, X] is a singular matrix and we check against the minimum norm + solution: + min ||w||_2 subject to w = argmin deviance(X, y, w) + """ + model, X, y, coef, _, _, _ = glm_dataset + n_samples, n_features = X.shape + alpha = 0 # unpenalized + params = dict( + alpha=alpha, + fit_intercept=fit_intercept, + solver=solver, + tol=1e-12, + max_iter=1000, + ) + + model = clone(model).set_params(**params) + if fit_intercept: + intercept = coef[-1] + coef = coef[:-1] + if n_samples > n_features: + X = X[:, :-1] # remove intercept + X = 0.5 * np.concatenate((X, X), axis=1) + else: + # To know the minimum norm solution, we keep one intercept column and do + # not divide by 2. Later on, we must take special care. + X = np.c_[X[:, :-1], X[:, :-1], X[:, -1]] + else: + intercept = 0 + X = 0.5 * np.concatenate((X, X), axis=1) + assert np.linalg.matrix_rank(X) <= min(n_samples, n_features) + + with warnings.catch_warnings(): + if solver.startswith("newton"): + # The newton solvers should warn and automatically fallback to LBFGS + # in this case. The model should still converge. + warnings.filterwarnings("ignore", category=scipy.linalg.LinAlgWarning) + # XXX: Investigate if the ConvergenceWarning that can appear in some + # cases should be considered a bug or not. In the mean time we don't + # fail when the assertions below pass irrespective of the presence of + # the warning. + warnings.filterwarnings("ignore", category=ConvergenceWarning) + model.fit(X, y) + + if fit_intercept and n_samples < n_features: + # Here we take special care. + model_intercept = 2 * model.intercept_ + model_coef = 2 * model.coef_[:-1] # exclude the other intercept term. + # For minimum norm solution, we would have + # assert model.intercept_ == pytest.approx(model.coef_[-1]) + else: + model_intercept = model.intercept_ + model_coef = model.coef_ + + if n_samples > n_features: + assert model_intercept == pytest.approx(intercept) + rtol = 1e-4 + assert_allclose(model_coef, np.r_[coef, coef], rtol=rtol) + else: + # As it is an underdetermined problem, prediction = y. The following shows that + # we get a solution, i.e. a (non-unique) minimum of the objective function ... + rtol = 1e-6 if solver == "lbfgs" else 5e-6 + assert_allclose(model.predict(X), y, rtol=rtol) + if (solver == "lbfgs" and fit_intercept) or solver == "newton-cholesky": + # Same as in test_glm_regression_unpenalized. + # But it is not the minimum norm solution. Otherwise the norms would be + # equal. + norm_solution = np.linalg.norm( + 0.5 * np.r_[intercept, intercept, coef, coef] + ) + norm_model = np.linalg.norm(np.r_[model.intercept_, model.coef_]) + assert norm_model > (1 + 1e-12) * norm_solution + # For minimum norm solution, we would have + # assert model.intercept_ == pytest.approx(model.coef_[-1]) + else: + assert model_intercept == pytest.approx(intercept, rel=5e-6) + assert_allclose(model_coef, np.r_[coef, coef], rtol=1e-4) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_glm_regression_unpenalized_vstacked_X(solver, fit_intercept, glm_dataset): + """Test that unpenalized GLM converges for all solvers to correct solution. + + We work with a simple constructed data set with known solution. + GLM fit on [X] is the same as fit on [X], [y] + [X], [y]. + For wide X, [X', X'] is a singular matrix and we check against the minimum norm + solution: + min ||w||_2 subject to w = argmin deviance(X, y, w) + """ + model, X, y, coef, _, _, _ = glm_dataset + n_samples, n_features = X.shape + alpha = 0 # unpenalized + params = dict( + alpha=alpha, + fit_intercept=fit_intercept, + solver=solver, + tol=1e-12, + max_iter=1000, + ) + + model = clone(model).set_params(**params) + if fit_intercept: + X = X[:, :-1] # remove intercept + intercept = coef[-1] + coef = coef[:-1] + else: + intercept = 0 + X = np.concatenate((X, X), axis=0) + assert np.linalg.matrix_rank(X) <= min(n_samples, n_features) + y = np.r_[y, y] + + with warnings.catch_warnings(): + if solver.startswith("newton") and n_samples < n_features: + # The newton solvers should warn and automatically fallback to LBFGS + # in this case. The model should still converge. + warnings.filterwarnings("ignore", category=scipy.linalg.LinAlgWarning) + # XXX: Investigate if the ConvergenceWarning that can appear in some + # cases should be considered a bug or not. In the mean time we don't + # fail when the assertions below pass irrespective of the presence of + # the warning. + warnings.filterwarnings("ignore", category=ConvergenceWarning) + model.fit(X, y) + + if n_samples > n_features: + rtol = 5e-5 if solver == "lbfgs" else 1e-6 + assert model.intercept_ == pytest.approx(intercept) + assert_allclose(model.coef_, coef, rtol=rtol) + else: + # As it is an underdetermined problem, prediction = y. The following shows that + # we get a solution, i.e. a (non-unique) minimum of the objective function ... + rtol = 1e-6 if solver == "lbfgs" else 5e-6 + assert_allclose(model.predict(X), y, rtol=rtol) + + norm_solution = np.linalg.norm(np.r_[intercept, coef]) + norm_model = np.linalg.norm(np.r_[model.intercept_, model.coef_]) + if solver == "newton-cholesky": + # XXX: This solver shows random behaviour. Sometimes it finds solutions + # with norm_model <= norm_solution! So we check conditionally. + if not (norm_model > (1 + 1e-12) * norm_solution): + assert model.intercept_ == pytest.approx(intercept) + assert_allclose(model.coef_, coef, rtol=1e-4) + elif solver == "lbfgs" and fit_intercept: + # Same as in test_glm_regression_unpenalized. + # But it is not the minimum norm solution. Otherwise the norms would be + # equal. + assert norm_model > (1 + 1e-12) * norm_solution + else: + rtol = 1e-5 if solver == "newton-cholesky" else 1e-4 + assert model.intercept_ == pytest.approx(intercept, rel=rtol) + assert_allclose(model.coef_, coef, rtol=rtol) + + +def test_sample_weights_validation(): + """Test the raised errors in the validation of sample_weight.""" + # scalar value but not positive + X = [[1]] + y = [1] + weights = 0 + glm = _GeneralizedLinearRegressor() + + # Positive weights are accepted + glm.fit(X, y, sample_weight=1) + + # 2d array + weights = [[0]] + with pytest.raises(ValueError, match="must be 1D array or scalar"): + glm.fit(X, y, weights) + + # 1d but wrong length + weights = [1, 0] + msg = r"sample_weight.shape == \(2,\), expected \(1,\)!" + with pytest.raises(ValueError, match=msg): + glm.fit(X, y, weights) + + +@pytest.mark.parametrize( + "glm", + [ + TweedieRegressor(power=3), + PoissonRegressor(), + GammaRegressor(), + TweedieRegressor(power=1.5), + ], +) +def test_glm_wrong_y_range(glm): + y = np.array([-1, 2]) + X = np.array([[1], [1]]) + msg = r"Some value\(s\) of y are out of the valid range of the loss" + with pytest.raises(ValueError, match=msg): + glm.fit(X, y) + + +@pytest.mark.parametrize("fit_intercept", [False, True]) +def test_glm_identity_regression(fit_intercept): + """Test GLM regression with identity link on a simple dataset.""" + coef = [1.0, 2.0] + X = np.array([[1, 1, 1, 1, 1], [0, 1, 2, 3, 4]]).T + y = np.dot(X, coef) + glm = _GeneralizedLinearRegressor( + alpha=0, + fit_intercept=fit_intercept, + tol=1e-12, + ) + if fit_intercept: + glm.fit(X[:, 1:], y) + assert_allclose(glm.coef_, coef[1:], rtol=1e-10) + assert_allclose(glm.intercept_, coef[0], rtol=1e-10) + else: + glm.fit(X, y) + assert_allclose(glm.coef_, coef, rtol=1e-12) + + +@pytest.mark.parametrize("fit_intercept", [False, True]) +@pytest.mark.parametrize("alpha", [0.0, 1.0]) +@pytest.mark.parametrize( + "GLMEstimator", [_GeneralizedLinearRegressor, PoissonRegressor, GammaRegressor] +) +def test_glm_sample_weight_consistency(fit_intercept, alpha, GLMEstimator): + """Test that the impact of sample_weight is consistent""" + rng = np.random.RandomState(0) + n_samples, n_features = 10, 5 + + X = rng.rand(n_samples, n_features) + y = rng.rand(n_samples) + glm_params = dict(alpha=alpha, fit_intercept=fit_intercept) + + glm = GLMEstimator(**glm_params).fit(X, y) + coef = glm.coef_.copy() + + # sample_weight=np.ones(..) should be equivalent to sample_weight=None + sample_weight = np.ones(y.shape) + glm.fit(X, y, sample_weight=sample_weight) + assert_allclose(glm.coef_, coef, rtol=1e-12) + + # sample_weight are normalized to 1 so, scaling them has no effect + sample_weight = 2 * np.ones(y.shape) + glm.fit(X, y, sample_weight=sample_weight) + assert_allclose(glm.coef_, coef, rtol=1e-12) + + # setting one element of sample_weight to 0 is equivalent to removing + # the corresponding sample + sample_weight = np.ones(y.shape) + sample_weight[-1] = 0 + glm.fit(X, y, sample_weight=sample_weight) + coef1 = glm.coef_.copy() + glm.fit(X[:-1], y[:-1]) + assert_allclose(glm.coef_, coef1, rtol=1e-12) + + # check that multiplying sample_weight by 2 is equivalent + # to repeating corresponding samples twice + X2 = np.concatenate([X, X[: n_samples // 2]], axis=0) + y2 = np.concatenate([y, y[: n_samples // 2]]) + sample_weight_1 = np.ones(len(y)) + sample_weight_1[: n_samples // 2] = 2 + + glm1 = GLMEstimator(**glm_params).fit(X, y, sample_weight=sample_weight_1) + + glm2 = GLMEstimator(**glm_params).fit(X2, y2, sample_weight=None) + assert_allclose(glm1.coef_, glm2.coef_) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +@pytest.mark.parametrize( + "estimator", + [ + PoissonRegressor(), + GammaRegressor(), + TweedieRegressor(power=3.0), + TweedieRegressor(power=0, link="log"), + TweedieRegressor(power=1.5), + TweedieRegressor(power=4.5), + ], +) +def test_glm_log_regression(solver, fit_intercept, estimator): + """Test GLM regression with log link on a simple dataset.""" + coef = [0.2, -0.1] + X = np.array([[0, 1, 2, 3, 4], [1, 1, 1, 1, 1]]).T + y = np.exp(np.dot(X, coef)) + glm = clone(estimator).set_params( + alpha=0, + fit_intercept=fit_intercept, + solver=solver, + tol=1e-8, + ) + if fit_intercept: + res = glm.fit(X[:, :-1], y) + assert_allclose(res.coef_, coef[:-1], rtol=1e-6) + assert_allclose(res.intercept_, coef[-1], rtol=1e-6) + else: + res = glm.fit(X, y) + assert_allclose(res.coef_, coef, rtol=2e-6) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("fit_intercept", [True, False]) +def test_warm_start(solver, fit_intercept, global_random_seed): + n_samples, n_features = 100, 10 + X, y = make_regression( + n_samples=n_samples, + n_features=n_features, + n_informative=n_features - 2, + bias=fit_intercept * 1.0, + noise=1.0, + random_state=global_random_seed, + ) + y = np.abs(y) # Poisson requires non-negative targets. + alpha = 1 + params = { + "solver": solver, + "fit_intercept": fit_intercept, + "tol": 1e-10, + } + + glm1 = PoissonRegressor(warm_start=False, max_iter=1000, alpha=alpha, **params) + glm1.fit(X, y) + + glm2 = PoissonRegressor(warm_start=True, max_iter=1, alpha=alpha, **params) + # As we intentionally set max_iter=1 such that the solver should raise a + # ConvergenceWarning. + with pytest.warns(ConvergenceWarning): + glm2.fit(X, y) + + linear_loss = LinearModelLoss( + base_loss=glm1._get_loss(), + fit_intercept=fit_intercept, + ) + sw = np.full_like(y, fill_value=1 / n_samples) + + objective_glm1 = linear_loss.loss( + coef=np.r_[glm1.coef_, glm1.intercept_] if fit_intercept else glm1.coef_, + X=X, + y=y, + sample_weight=sw, + l2_reg_strength=alpha, + ) + objective_glm2 = linear_loss.loss( + coef=np.r_[glm2.coef_, glm2.intercept_] if fit_intercept else glm2.coef_, + X=X, + y=y, + sample_weight=sw, + l2_reg_strength=alpha, + ) + assert objective_glm1 < objective_glm2 + + glm2.set_params(max_iter=1000) + glm2.fit(X, y) + # The two models are not exactly identical since the lbfgs solver + # computes the approximate hessian from previous iterations, which + # will not be strictly identical in the case of a warm start. + rtol = 2e-4 if solver == "lbfgs" else 1e-9 + assert_allclose(glm1.coef_, glm2.coef_, rtol=rtol) + assert_allclose(glm1.score(X, y), glm2.score(X, y), rtol=1e-5) + + +@pytest.mark.parametrize("n_samples, n_features", [(100, 10), (10, 100)]) +@pytest.mark.parametrize("fit_intercept", [True, False]) +@pytest.mark.parametrize("sample_weight", [None, True]) +def test_normal_ridge_comparison( + n_samples, n_features, fit_intercept, sample_weight, request +): + """Compare with Ridge regression for Normal distributions.""" + test_size = 10 + X, y = make_regression( + n_samples=n_samples + test_size, + n_features=n_features, + n_informative=n_features - 2, + noise=0.5, + random_state=42, + ) + + if n_samples > n_features: + ridge_params = {"solver": "svd"} + else: + ridge_params = {"solver": "saga", "max_iter": 1000000, "tol": 1e-7} + + ( + X_train, + X_test, + y_train, + y_test, + ) = train_test_split(X, y, test_size=test_size, random_state=0) + + alpha = 1.0 + if sample_weight is None: + sw_train = None + alpha_ridge = alpha * n_samples + else: + sw_train = np.random.RandomState(0).rand(len(y_train)) + alpha_ridge = alpha * sw_train.sum() + + # GLM has 1/(2*n) * Loss + 1/2*L2, Ridge has Loss + L2 + ridge = Ridge( + alpha=alpha_ridge, + random_state=42, + fit_intercept=fit_intercept, + **ridge_params, + ) + ridge.fit(X_train, y_train, sample_weight=sw_train) + + glm = _GeneralizedLinearRegressor( + alpha=alpha, + fit_intercept=fit_intercept, + max_iter=300, + tol=1e-5, + ) + glm.fit(X_train, y_train, sample_weight=sw_train) + assert glm.coef_.shape == (X.shape[1],) + assert_allclose(glm.coef_, ridge.coef_, atol=5e-5) + assert_allclose(glm.intercept_, ridge.intercept_, rtol=1e-5) + assert_allclose(glm.predict(X_train), ridge.predict(X_train), rtol=2e-4) + assert_allclose(glm.predict(X_test), ridge.predict(X_test), rtol=2e-4) + + +@pytest.mark.parametrize("solver", ["lbfgs", "newton-cholesky"]) +def test_poisson_glmnet(solver): + """Compare Poisson regression with L2 regularization and LogLink to glmnet""" + # library("glmnet") + # options(digits=10) + # df <- data.frame(a=c(-2,-1,1,2), b=c(0,0,1,1), y=c(0,1,1,2)) + # x <- data.matrix(df[,c("a", "b")]) + # y <- df$y + # fit <- glmnet(x=x, y=y, alpha=0, intercept=T, family="poisson", + # standardize=F, thresh=1e-10, nlambda=10000) + # coef(fit, s=1) + # (Intercept) -0.12889386979 + # a 0.29019207995 + # b 0.03741173122 + X = np.array([[-2, -1, 1, 2], [0, 0, 1, 1]]).T + y = np.array([0, 1, 1, 2]) + glm = PoissonRegressor( + alpha=1, + fit_intercept=True, + tol=1e-7, + max_iter=300, + solver=solver, + ) + glm.fit(X, y) + assert_allclose(glm.intercept_, -0.12889386979, rtol=1e-5) + assert_allclose(glm.coef_, [0.29019207995, 0.03741173122], rtol=1e-5) + + +def test_convergence_warning(regression_data): + X, y = regression_data + + est = _GeneralizedLinearRegressor(max_iter=1, tol=1e-20) + with pytest.warns(ConvergenceWarning): + est.fit(X, y) + + +@pytest.mark.parametrize( + "name, link_class", [("identity", IdentityLink), ("log", LogLink)] +) +def test_tweedie_link_argument(name, link_class): + """Test GLM link argument set as string.""" + y = np.array([0.1, 0.5]) # in range of all distributions + X = np.array([[1], [2]]) + glm = TweedieRegressor(power=1, link=name).fit(X, y) + assert isinstance(glm._base_loss.link, link_class) + + +@pytest.mark.parametrize( + "power, expected_link_class", + [ + (0, IdentityLink), # normal + (1, LogLink), # poisson + (2, LogLink), # gamma + (3, LogLink), # inverse-gaussian + ], +) +def test_tweedie_link_auto(power, expected_link_class): + """Test that link='auto' delivers the expected link function""" + y = np.array([0.1, 0.5]) # in range of all distributions + X = np.array([[1], [2]]) + glm = TweedieRegressor(link="auto", power=power).fit(X, y) + assert isinstance(glm._base_loss.link, expected_link_class) + + +@pytest.mark.parametrize("power", [0, 1, 1.5, 2, 3]) +@pytest.mark.parametrize("link", ["log", "identity"]) +def test_tweedie_score(regression_data, power, link): + """Test that GLM score equals d2_tweedie_score for Tweedie losses.""" + X, y = regression_data + # make y positive + y = np.abs(y) + 1.0 + glm = TweedieRegressor(power=power, link=link).fit(X, y) + assert glm.score(X, y) == pytest.approx( + d2_tweedie_score(y, glm.predict(X), power=power) + ) + + +@pytest.mark.parametrize( + "estimator, value", + [ + (PoissonRegressor(), True), + (GammaRegressor(), True), + (TweedieRegressor(power=1.5), True), + (TweedieRegressor(power=0), False), + ], +) +def test_tags(estimator, value): + assert estimator.__sklearn_tags__().target_tags.positive_only is value + + +def test_linalg_warning_with_newton_solver(global_random_seed): + newton_solver = "newton-cholesky" + rng = np.random.RandomState(global_random_seed) + # Use at least 20 samples to reduce the likelihood of getting a degenerate + # dataset for any global_random_seed. + X_orig = rng.normal(size=(20, 3)) + y = rng.poisson( + np.exp(X_orig @ np.ones(X_orig.shape[1])), size=X_orig.shape[0] + ).astype(np.float64) + + # Collinear variation of the same input features. + X_collinear = np.hstack([X_orig] * 10) + + # Let's consider the deviance of a constant baseline on this problem. + baseline_pred = np.full_like(y, y.mean()) + constant_model_deviance = mean_poisson_deviance(y, baseline_pred) + assert constant_model_deviance > 1.0 + + # No warning raised on well-conditioned design, even without regularization. + tol = 1e-10 + with warnings.catch_warnings(): + warnings.simplefilter("error") + reg = PoissonRegressor(solver=newton_solver, alpha=0.0, tol=tol).fit(X_orig, y) + original_newton_deviance = mean_poisson_deviance(y, reg.predict(X_orig)) + + # On this dataset, we should have enough data points to not make it + # possible to get a near zero deviance (for the any of the admissible + # random seeds). This will make it easier to interpret meaning of rtol in + # the subsequent assertions: + assert original_newton_deviance > 0.2 + + # We check that the model could successfully fit information in X_orig to + # improve upon the constant baseline by a large margin (when evaluated on + # the traing set). + assert constant_model_deviance - original_newton_deviance > 0.1 + + # LBFGS is robust to a collinear design because its approximation of the + # Hessian is Symmeric Positive Definite by construction. Let's record its + # solution + with warnings.catch_warnings(): + warnings.simplefilter("error") + reg = PoissonRegressor(solver="lbfgs", alpha=0.0, tol=tol).fit(X_collinear, y) + collinear_lbfgs_deviance = mean_poisson_deviance(y, reg.predict(X_collinear)) + + # The LBFGS solution on the collinear is expected to reach a comparable + # solution to the Newton solution on the original data. + rtol = 1e-6 + assert collinear_lbfgs_deviance == pytest.approx(original_newton_deviance, rel=rtol) + + # Fitting a Newton solver on the collinear version of the training data + # without regularization should raise an informative warning and fallback + # to the LBFGS solver. + msg = ( + "The inner solver of .*Newton.*Solver stumbled upon a singular or very " + "ill-conditioned Hessian matrix" + ) + with pytest.warns(scipy.linalg.LinAlgWarning, match=msg): + reg = PoissonRegressor(solver=newton_solver, alpha=0.0, tol=tol).fit( + X_collinear, y + ) + # As a result we should still automatically converge to a good solution. + collinear_newton_deviance = mean_poisson_deviance(y, reg.predict(X_collinear)) + assert collinear_newton_deviance == pytest.approx( + original_newton_deviance, rel=rtol + ) + + # Increasing the regularization slightly should make the problem go away: + with warnings.catch_warnings(): + warnings.simplefilter("error", scipy.linalg.LinAlgWarning) + reg = PoissonRegressor(solver=newton_solver, alpha=1e-10).fit(X_collinear, y) + + # The slightly penalized model on the collinear data should be close enough + # to the unpenalized model on the original data. + penalized_collinear_newton_deviance = mean_poisson_deviance( + y, reg.predict(X_collinear) + ) + assert penalized_collinear_newton_deviance == pytest.approx( + original_newton_deviance, rel=rtol + ) + + +@pytest.mark.parametrize("verbose", [0, 1, 2]) +def test_newton_solver_verbosity(capsys, verbose): + """Test the std output of verbose newton solvers.""" + y = np.array([1, 2], dtype=float) + X = np.array([[1.0, 0], [0, 1]], dtype=float) + linear_loss = LinearModelLoss(base_loss=HalfPoissonLoss(), fit_intercept=False) + sol = NewtonCholeskySolver( + coef=linear_loss.init_zero_coef(X), + linear_loss=linear_loss, + l2_reg_strength=0, + verbose=verbose, + ) + sol.solve(X, y, None) # returns array([0., 0.69314758]) + captured = capsys.readouterr() + + if verbose == 0: + assert captured.out == "" + else: + msg = [ + "Newton iter=1", + "Check Convergence", + "1. max |gradient|", + "2. Newton decrement", + "Solver did converge at loss = ", + ] + for m in msg: + assert m in captured.out + + if verbose >= 2: + msg = ["Backtracking Line Search", "line search iteration="] + for m in msg: + assert m in captured.out + + # Set the Newton solver to a state with a completely wrong Newton step. + sol = NewtonCholeskySolver( + coef=linear_loss.init_zero_coef(X), + linear_loss=linear_loss, + l2_reg_strength=0, + verbose=verbose, + ) + sol.setup(X=X, y=y, sample_weight=None) + sol.iteration = 1 + sol.update_gradient_hessian(X=X, y=y, sample_weight=None) + sol.coef_newton = np.array([1.0, 0]) + sol.gradient_times_newton = sol.gradient @ sol.coef_newton + with warnings.catch_warnings(): + warnings.simplefilter("ignore", ConvergenceWarning) + sol.line_search(X=X, y=y, sample_weight=None) + captured = capsys.readouterr() + if verbose >= 1: + assert ( + "Line search did not converge and resorts to lbfgs instead." in captured.out + ) + + # Set the Newton solver to a state with bad Newton step such that the loss + # improvement in line search is tiny. + sol = NewtonCholeskySolver( + coef=np.array([1e-12, 0.69314758]), + linear_loss=linear_loss, + l2_reg_strength=0, + verbose=verbose, + ) + sol.setup(X=X, y=y, sample_weight=None) + sol.iteration = 1 + sol.update_gradient_hessian(X=X, y=y, sample_weight=None) + sol.coef_newton = np.array([1e-6, 0]) + sol.gradient_times_newton = sol.gradient @ sol.coef_newton + with warnings.catch_warnings(): + warnings.simplefilter("ignore", ConvergenceWarning) + sol.line_search(X=X, y=y, sample_weight=None) + captured = capsys.readouterr() + if verbose >= 2: + msg = [ + "line search iteration=", + "check loss improvement <= armijo term:", + "check loss |improvement| <= eps * |loss_old|:", + "check sum(|gradient|) < sum(|gradient_old|):", + ] + for m in msg: + assert m in captured.out + + # Test for a case with negative hessian. We badly initialize coef for a Tweedie + # loss with non-canonical link, e.g. Inverse Gaussian deviance with a log link. + linear_loss = LinearModelLoss( + base_loss=HalfTweedieLoss(power=3), fit_intercept=False + ) + sol = NewtonCholeskySolver( + coef=linear_loss.init_zero_coef(X) + 1, + linear_loss=linear_loss, + l2_reg_strength=0, + verbose=verbose, + ) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", ConvergenceWarning) + sol.solve(X, y, None) + captured = capsys.readouterr() + if verbose >= 1: + assert ( + "The inner solver detected a pointwise Hessian with many negative values" + " and resorts to lbfgs instead." in captured.out + ) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_huber.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_huber.py new file mode 100644 index 0000000000000000000000000000000000000000..598d208df535ce8f95ab7c8e79140cb28851e076 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_huber.py @@ -0,0 +1,358 @@ +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +from numbers import Integral, Real + +import numpy as np +from scipy import optimize + +from ..base import BaseEstimator, RegressorMixin, _fit_context +from ..utils._mask import axis0_safe_slice +from ..utils._param_validation import Interval +from ..utils.extmath import safe_sparse_dot +from ..utils.optimize import _check_optimize_result +from ..utils.validation import _check_sample_weight, validate_data +from ._base import LinearModel + + +def _huber_loss_and_gradient(w, X, y, epsilon, alpha, sample_weight=None): + """Returns the Huber loss and the gradient. + + Parameters + ---------- + w : ndarray, shape (n_features + 1,) or (n_features + 2,) + Feature vector. + w[:n_features] gives the coefficients + w[-1] gives the scale factor and if the intercept is fit w[-2] + gives the intercept factor. + + X : ndarray of shape (n_samples, n_features) + Input data. + + y : ndarray of shape (n_samples,) + Target vector. + + epsilon : float + Robustness of the Huber estimator. + + alpha : float + Regularization parameter. + + sample_weight : ndarray of shape (n_samples,), default=None + Weight assigned to each sample. + + Returns + ------- + loss : float + Huber loss. + + gradient : ndarray, shape (len(w)) + Returns the derivative of the Huber loss with respect to each + coefficient, intercept and the scale as a vector. + """ + _, n_features = X.shape + fit_intercept = n_features + 2 == w.shape[0] + if fit_intercept: + intercept = w[-2] + sigma = w[-1] + w = w[:n_features] + n_samples = np.sum(sample_weight) + + # Calculate the values where |y - X'w -c / sigma| > epsilon + # The values above this threshold are outliers. + linear_loss = y - safe_sparse_dot(X, w) + if fit_intercept: + linear_loss -= intercept + abs_linear_loss = np.abs(linear_loss) + outliers_mask = abs_linear_loss > epsilon * sigma + + # Calculate the linear loss due to the outliers. + # This is equal to (2 * M * |y - X'w -c / sigma| - M**2) * sigma + outliers = abs_linear_loss[outliers_mask] + num_outliers = np.count_nonzero(outliers_mask) + n_non_outliers = X.shape[0] - num_outliers + + # n_sq_outliers includes the weight give to the outliers while + # num_outliers is just the number of outliers. + outliers_sw = sample_weight[outliers_mask] + n_sw_outliers = np.sum(outliers_sw) + outlier_loss = ( + 2.0 * epsilon * np.sum(outliers_sw * outliers) + - sigma * n_sw_outliers * epsilon**2 + ) + + # Calculate the quadratic loss due to the non-outliers.- + # This is equal to |(y - X'w - c)**2 / sigma**2| * sigma + non_outliers = linear_loss[~outliers_mask] + weighted_non_outliers = sample_weight[~outliers_mask] * non_outliers + weighted_loss = np.dot(weighted_non_outliers.T, non_outliers) + squared_loss = weighted_loss / sigma + + if fit_intercept: + grad = np.zeros(n_features + 2) + else: + grad = np.zeros(n_features + 1) + + # Gradient due to the squared loss. + X_non_outliers = -axis0_safe_slice(X, ~outliers_mask, n_non_outliers) + grad[:n_features] = ( + 2.0 / sigma * safe_sparse_dot(weighted_non_outliers, X_non_outliers) + ) + + # Gradient due to the linear loss. + signed_outliers = np.ones_like(outliers) + signed_outliers_mask = linear_loss[outliers_mask] < 0 + signed_outliers[signed_outliers_mask] = -1.0 + X_outliers = axis0_safe_slice(X, outliers_mask, num_outliers) + sw_outliers = sample_weight[outliers_mask] * signed_outliers + grad[:n_features] -= 2.0 * epsilon * (safe_sparse_dot(sw_outliers, X_outliers)) + + # Gradient due to the penalty. + grad[:n_features] += alpha * 2.0 * w + + # Gradient due to sigma. + grad[-1] = n_samples + grad[-1] -= n_sw_outliers * epsilon**2 + grad[-1] -= squared_loss / sigma + + # Gradient due to the intercept. + if fit_intercept: + grad[-2] = -2.0 * np.sum(weighted_non_outliers) / sigma + grad[-2] -= 2.0 * epsilon * np.sum(sw_outliers) + + loss = n_samples * sigma + squared_loss + outlier_loss + loss += alpha * np.dot(w, w) + return loss, grad + + +class HuberRegressor(LinearModel, RegressorMixin, BaseEstimator): + """L2-regularized linear regression model that is robust to outliers. + + The Huber Regressor optimizes the squared loss for the samples where + ``|(y - Xw - c) / sigma| < epsilon`` and the absolute loss for the samples + where ``|(y - Xw - c) / sigma| > epsilon``, where the model coefficients + ``w``, the intercept ``c`` and the scale ``sigma`` are parameters + to be optimized. The parameter `sigma` makes sure that if `y` is scaled up + or down by a certain factor, one does not need to rescale `epsilon` to + achieve the same robustness. Note that this does not take into account + the fact that the different features of `X` may be of different scales. + + The Huber loss function has the advantage of not being heavily influenced + by the outliers while not completely ignoring their effect. + + Read more in the :ref:`User Guide ` + + .. versionadded:: 0.18 + + Parameters + ---------- + epsilon : float, default=1.35 + The parameter epsilon controls the number of samples that should be + classified as outliers. The smaller the epsilon, the more robust it is + to outliers. Epsilon must be in the range `[1, inf)`. + + max_iter : int, default=100 + Maximum number of iterations that + ``scipy.optimize.minimize(method="L-BFGS-B")`` should run for. + + alpha : float, default=0.0001 + Strength of the squared L2 regularization. Note that the penalty is + equal to ``alpha * ||w||^2``. + Must be in the range `[0, inf)`. + + warm_start : bool, default=False + This is useful if the stored attributes of a previously used model + has to be reused. If set to False, then the coefficients will + be rewritten for every call to fit. + See :term:`the Glossary `. + + fit_intercept : bool, default=True + Whether or not to fit the intercept. This can be set to False + if the data is already centered around the origin. + + tol : float, default=1e-05 + The iteration will stop when + ``max{|proj g_i | i = 1, ..., n}`` <= ``tol`` + where pg_i is the i-th component of the projected gradient. + + Attributes + ---------- + coef_ : array, shape (n_features,) + Features got by optimizing the L2-regularized Huber loss. + + intercept_ : float + Bias. + + scale_ : float + The value by which ``|y - Xw - c|`` is scaled down. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : int + Number of iterations that + ``scipy.optimize.minimize(method="L-BFGS-B")`` has run for. + + .. versionchanged:: 0.20 + + In SciPy <= 1.0.0 the number of lbfgs iterations may exceed + ``max_iter``. ``n_iter_`` will now report at most ``max_iter``. + + outliers_ : array, shape (n_samples,) + A boolean mask which is set to True where the samples are identified + as outliers. + + See Also + -------- + RANSACRegressor : RANSAC (RANdom SAmple Consensus) algorithm. + TheilSenRegressor : Theil-Sen Estimator robust multivariate regression model. + SGDRegressor : Fitted by minimizing a regularized empirical loss with SGD. + + References + ---------- + .. [1] Peter J. Huber, Elvezio M. Ronchetti, Robust Statistics + Concomitant scale estimates, p. 172 + .. [2] Art B. Owen (2006), `A robust hybrid of lasso and ridge regression. + `_ + + Examples + -------- + >>> import numpy as np + >>> from sklearn.linear_model import HuberRegressor, LinearRegression + >>> from sklearn.datasets import make_regression + >>> rng = np.random.RandomState(0) + >>> X, y, coef = make_regression( + ... n_samples=200, n_features=2, noise=4.0, coef=True, random_state=0) + >>> X[:4] = rng.uniform(10, 20, (4, 2)) + >>> y[:4] = rng.uniform(10, 20, 4) + >>> huber = HuberRegressor().fit(X, y) + >>> huber.score(X, y) + -7.284... + >>> huber.predict(X[:1,]) + array([806.7200...]) + >>> linear = LinearRegression().fit(X, y) + >>> print("True coefficients:", coef) + True coefficients: [20.4923... 34.1698...] + >>> print("Huber coefficients:", huber.coef_) + Huber coefficients: [17.7906... 31.0106...] + >>> print("Linear Regression coefficients:", linear.coef_) + Linear Regression coefficients: [-1.9221... 7.0226...] + """ + + _parameter_constraints: dict = { + "epsilon": [Interval(Real, 1.0, None, closed="left")], + "max_iter": [Interval(Integral, 0, None, closed="left")], + "alpha": [Interval(Real, 0, None, closed="left")], + "warm_start": ["boolean"], + "fit_intercept": ["boolean"], + "tol": [Interval(Real, 0.0, None, closed="left")], + } + + def __init__( + self, + *, + epsilon=1.35, + max_iter=100, + alpha=0.0001, + warm_start=False, + fit_intercept=True, + tol=1e-05, + ): + self.epsilon = epsilon + self.max_iter = max_iter + self.alpha = alpha + self.warm_start = warm_start + self.fit_intercept = fit_intercept + self.tol = tol + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """Fit the model according to the given training data. + + Parameters + ---------- + X : array-like, shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like, shape (n_samples,) + Target vector relative to X. + + sample_weight : array-like, shape (n_samples,) + Weight given to each sample. + + Returns + ------- + self : object + Fitted `HuberRegressor` estimator. + """ + X, y = validate_data( + self, + X, + y, + copy=False, + accept_sparse=["csr"], + y_numeric=True, + dtype=[np.float64, np.float32], + ) + + sample_weight = _check_sample_weight(sample_weight, X) + + if self.warm_start and hasattr(self, "coef_"): + parameters = np.concatenate((self.coef_, [self.intercept_, self.scale_])) + else: + if self.fit_intercept: + parameters = np.zeros(X.shape[1] + 2) + else: + parameters = np.zeros(X.shape[1] + 1) + # Make sure to initialize the scale parameter to a strictly + # positive value: + parameters[-1] = 1 + + # Sigma or the scale factor should be non-negative. + # Setting it to be zero might cause undefined bounds hence we set it + # to a value close to zero. + bounds = np.tile([-np.inf, np.inf], (parameters.shape[0], 1)) + bounds[-1][0] = np.finfo(np.float64).eps * 10 + + opt_res = optimize.minimize( + _huber_loss_and_gradient, + parameters, + method="L-BFGS-B", + jac=True, + args=(X, y, self.epsilon, self.alpha, sample_weight), + options={"maxiter": self.max_iter, "gtol": self.tol, "iprint": -1}, + bounds=bounds, + ) + + parameters = opt_res.x + + if opt_res.status == 2: + raise ValueError( + "HuberRegressor convergence failed: l-BFGS-b solver terminated with %s" + % opt_res.message + ) + self.n_iter_ = _check_optimize_result("lbfgs", opt_res, self.max_iter) + self.scale_ = parameters[-1] + if self.fit_intercept: + self.intercept_ = parameters[-2] + else: + self.intercept_ = 0.0 + self.coef_ = parameters[: X.shape[1]] + + residual = np.abs(y - safe_sparse_dot(X, self.coef_) - self.intercept_) + self.outliers_ = residual > self.scale_ * self.epsilon + return self + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.input_tags.sparse = True + return tags diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_omp.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_omp.py new file mode 100644 index 0000000000000000000000000000000000000000..aad9d1184fb8f446859dde6465b4ee08bbaab788 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_omp.py @@ -0,0 +1,1121 @@ +"""Orthogonal matching pursuit algorithms""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import warnings +from math import sqrt +from numbers import Integral, Real + +import numpy as np +from scipy import linalg +from scipy.linalg.lapack import get_lapack_funcs + +from ..base import MultiOutputMixin, RegressorMixin, _fit_context +from ..model_selection import check_cv +from ..utils import Bunch, as_float_array, check_array +from ..utils._param_validation import Interval, StrOptions, validate_params +from ..utils.metadata_routing import ( + MetadataRouter, + MethodMapping, + _raise_for_params, + _routing_enabled, + process_routing, +) +from ..utils.parallel import Parallel, delayed +from ..utils.validation import validate_data +from ._base import LinearModel, _pre_fit + +premature = ( + "Orthogonal matching pursuit ended prematurely due to linear" + " dependence in the dictionary. The requested precision might" + " not have been met." +) + + +def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True, return_path=False): + """Orthogonal Matching Pursuit step using the Cholesky decomposition. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Input dictionary. Columns are assumed to have unit norm. + + y : ndarray of shape (n_samples,) + Input targets. + + n_nonzero_coefs : int + Targeted number of non-zero elements. + + tol : float, default=None + Targeted squared error, if not None overrides n_nonzero_coefs. + + copy_X : bool, default=True + Whether the design matrix X must be copied by the algorithm. A false + value is only helpful if X is already Fortran-ordered, otherwise a + copy is made anyway. + + return_path : bool, default=False + Whether to return every value of the nonzero coefficients along the + forward path. Useful for cross-validation. + + Returns + ------- + gamma : ndarray of shape (n_nonzero_coefs,) + Non-zero elements of the solution. + + idx : ndarray of shape (n_nonzero_coefs,) + Indices of the positions of the elements in gamma within the solution + vector. + + coef : ndarray of shape (n_features, n_nonzero_coefs) + The first k values of column k correspond to the coefficient value + for the active features at that step. The lower left triangle contains + garbage. Only returned if ``return_path=True``. + + n_active : int + Number of active features at convergence. + """ + if copy_X: + X = X.copy("F") + else: # even if we are allowed to overwrite, still copy it if bad order + X = np.asfortranarray(X) + + min_float = np.finfo(X.dtype).eps + nrm2, swap = linalg.get_blas_funcs(("nrm2", "swap"), (X,)) + (potrs,) = get_lapack_funcs(("potrs",), (X,)) + + alpha = np.dot(X.T, y) + residual = y + gamma = np.empty(0) + n_active = 0 + indices = np.arange(X.shape[1]) # keeping track of swapping + + max_features = X.shape[1] if tol is not None else n_nonzero_coefs + + L = np.empty((max_features, max_features), dtype=X.dtype) + + if return_path: + coefs = np.empty_like(L) + + while True: + lam = np.argmax(np.abs(np.dot(X.T, residual))) + if lam < n_active or alpha[lam] ** 2 < min_float: + # atom already selected or inner product too small + warnings.warn(premature, RuntimeWarning, stacklevel=2) + break + + if n_active > 0: + # Updates the Cholesky decomposition of X' X + L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam]) + linalg.solve_triangular( + L[:n_active, :n_active], + L[n_active, :n_active], + trans=0, + lower=1, + overwrite_b=True, + check_finite=False, + ) + v = nrm2(L[n_active, :n_active]) ** 2 + Lkk = linalg.norm(X[:, lam]) ** 2 - v + if Lkk <= min_float: # selected atoms are dependent + warnings.warn(premature, RuntimeWarning, stacklevel=2) + break + L[n_active, n_active] = sqrt(Lkk) + else: + L[0, 0] = linalg.norm(X[:, lam]) + + X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam]) + alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active] + indices[n_active], indices[lam] = indices[lam], indices[n_active] + n_active += 1 + + # solves LL'x = X'y as a composition of two triangular systems + gamma, _ = potrs( + L[:n_active, :n_active], alpha[:n_active], lower=True, overwrite_b=False + ) + + if return_path: + coefs[:n_active, n_active - 1] = gamma + residual = y - np.dot(X[:, :n_active], gamma) + if tol is not None and nrm2(residual) ** 2 <= tol: + break + elif n_active == max_features: + break + + if return_path: + return gamma, indices[:n_active], coefs[:, :n_active], n_active + else: + return gamma, indices[:n_active], n_active + + +def _gram_omp( + Gram, + Xy, + n_nonzero_coefs, + tol_0=None, + tol=None, + copy_Gram=True, + copy_Xy=True, + return_path=False, +): + """Orthogonal Matching Pursuit step on a precomputed Gram matrix. + + This function uses the Cholesky decomposition method. + + Parameters + ---------- + Gram : ndarray of shape (n_features, n_features) + Gram matrix of the input data matrix. + + Xy : ndarray of shape (n_features,) + Input targets. + + n_nonzero_coefs : int + Targeted number of non-zero elements. + + tol_0 : float, default=None + Squared norm of y, required if tol is not None. + + tol : float, default=None + Targeted squared error, if not None overrides n_nonzero_coefs. + + copy_Gram : bool, default=True + Whether the gram matrix must be copied by the algorithm. A false + value is only helpful if it is already Fortran-ordered, otherwise a + copy is made anyway. + + copy_Xy : bool, default=True + Whether the covariance vector Xy must be copied by the algorithm. + If False, it may be overwritten. + + return_path : bool, default=False + Whether to return every value of the nonzero coefficients along the + forward path. Useful for cross-validation. + + Returns + ------- + gamma : ndarray of shape (n_nonzero_coefs,) + Non-zero elements of the solution. + + idx : ndarray of shape (n_nonzero_coefs,) + Indices of the positions of the elements in gamma within the solution + vector. + + coefs : ndarray of shape (n_features, n_nonzero_coefs) + The first k values of column k correspond to the coefficient value + for the active features at that step. The lower left triangle contains + garbage. Only returned if ``return_path=True``. + + n_active : int + Number of active features at convergence. + """ + Gram = Gram.copy("F") if copy_Gram else np.asfortranarray(Gram) + + if copy_Xy or not Xy.flags.writeable: + Xy = Xy.copy() + + min_float = np.finfo(Gram.dtype).eps + nrm2, swap = linalg.get_blas_funcs(("nrm2", "swap"), (Gram,)) + (potrs,) = get_lapack_funcs(("potrs",), (Gram,)) + + indices = np.arange(len(Gram)) # keeping track of swapping + alpha = Xy + tol_curr = tol_0 + delta = 0 + gamma = np.empty(0) + n_active = 0 + + max_features = len(Gram) if tol is not None else n_nonzero_coefs + + L = np.empty((max_features, max_features), dtype=Gram.dtype) + + L[0, 0] = 1.0 + if return_path: + coefs = np.empty_like(L) + + while True: + lam = np.argmax(np.abs(alpha)) + if lam < n_active or alpha[lam] ** 2 < min_float: + # selected same atom twice, or inner product too small + warnings.warn(premature, RuntimeWarning, stacklevel=3) + break + if n_active > 0: + L[n_active, :n_active] = Gram[lam, :n_active] + linalg.solve_triangular( + L[:n_active, :n_active], + L[n_active, :n_active], + trans=0, + lower=1, + overwrite_b=True, + check_finite=False, + ) + v = nrm2(L[n_active, :n_active]) ** 2 + Lkk = Gram[lam, lam] - v + if Lkk <= min_float: # selected atoms are dependent + warnings.warn(premature, RuntimeWarning, stacklevel=3) + break + L[n_active, n_active] = sqrt(Lkk) + else: + L[0, 0] = sqrt(Gram[lam, lam]) + + Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam]) + Gram.T[n_active], Gram.T[lam] = swap(Gram.T[n_active], Gram.T[lam]) + indices[n_active], indices[lam] = indices[lam], indices[n_active] + Xy[n_active], Xy[lam] = Xy[lam], Xy[n_active] + n_active += 1 + # solves LL'x = X'y as a composition of two triangular systems + gamma, _ = potrs( + L[:n_active, :n_active], Xy[:n_active], lower=True, overwrite_b=False + ) + if return_path: + coefs[:n_active, n_active - 1] = gamma + beta = np.dot(Gram[:, :n_active], gamma) + alpha = Xy - beta + if tol is not None: + tol_curr += delta + delta = np.inner(gamma, beta[:n_active]) + tol_curr -= delta + if abs(tol_curr) <= tol: + break + elif n_active == max_features: + break + + if return_path: + return gamma, indices[:n_active], coefs[:, :n_active], n_active + else: + return gamma, indices[:n_active], n_active + + +@validate_params( + { + "X": ["array-like"], + "y": [np.ndarray], + "n_nonzero_coefs": [Interval(Integral, 1, None, closed="left"), None], + "tol": [Interval(Real, 0, None, closed="left"), None], + "precompute": ["boolean", StrOptions({"auto"})], + "copy_X": ["boolean"], + "return_path": ["boolean"], + "return_n_iter": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def orthogonal_mp( + X, + y, + *, + n_nonzero_coefs=None, + tol=None, + precompute=False, + copy_X=True, + return_path=False, + return_n_iter=False, +): + r"""Orthogonal Matching Pursuit (OMP). + + Solves n_targets Orthogonal Matching Pursuit problems. + An instance of the problem has the form: + + When parametrized by the number of non-zero coefficients using + `n_nonzero_coefs`: + argmin ||y - X\gamma||^2 subject to ||\gamma||_0 <= n_{nonzero coefs} + + When parametrized by error using the parameter `tol`: + argmin ||\gamma||_0 subject to ||y - X\gamma||^2 <= tol + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input data. Columns are assumed to have unit norm. + + y : ndarray of shape (n_samples,) or (n_samples, n_targets) + Input targets. + + n_nonzero_coefs : int, default=None + Desired number of non-zero entries in the solution. If None (by + default) this value is set to 10% of n_features. + + tol : float, default=None + Maximum squared norm of the residual. If not None, overrides n_nonzero_coefs. + + precompute : 'auto' or bool, default=False + Whether to perform precomputations. Improves performance when n_targets + or n_samples is very large. + + copy_X : bool, default=True + Whether the design matrix X must be copied by the algorithm. A false + value is only helpful if X is already Fortran-ordered, otherwise a + copy is made anyway. + + return_path : bool, default=False + Whether to return every value of the nonzero coefficients along the + forward path. Useful for cross-validation. + + return_n_iter : bool, default=False + Whether or not to return the number of iterations. + + Returns + ------- + coef : ndarray of shape (n_features,) or (n_features, n_targets) + Coefficients of the OMP solution. If `return_path=True`, this contains + the whole coefficient path. In this case its shape is + (n_features, n_features) or (n_features, n_targets, n_features) and + iterating over the last axis generates coefficients in increasing order + of active features. + + n_iters : array-like or int + Number of active features across every target. Returned only if + `return_n_iter` is set to True. + + See Also + -------- + OrthogonalMatchingPursuit : Orthogonal Matching Pursuit model. + orthogonal_mp_gram : Solve OMP problems using Gram matrix and the product X.T * y. + lars_path : Compute Least Angle Regression or Lasso path using LARS algorithm. + sklearn.decomposition.sparse_encode : Sparse coding. + + Notes + ----- + Orthogonal matching pursuit was introduced in S. Mallat, Z. Zhang, + Matching pursuits with time-frequency dictionaries, IEEE Transactions on + Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415. + (https://www.di.ens.fr/~mallat/papiers/MallatPursuit93.pdf) + + This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad, + M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal + Matching Pursuit Technical Report - CS Technion, April 2008. + https://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf + + Examples + -------- + >>> from sklearn.datasets import make_regression + >>> from sklearn.linear_model import orthogonal_mp + >>> X, y = make_regression(noise=4, random_state=0) + >>> coef = orthogonal_mp(X, y) + >>> coef.shape + (100,) + >>> X[:1,] @ coef + array([-78.68...]) + """ + X = check_array(X, order="F", copy=copy_X) + copy_X = False + if y.ndim == 1: + y = y.reshape(-1, 1) + y = check_array(y) + if y.shape[1] > 1: # subsequent targets will be affected + copy_X = True + if n_nonzero_coefs is None and tol is None: + # default for n_nonzero_coefs is 0.1 * n_features + # but at least one. + n_nonzero_coefs = max(int(0.1 * X.shape[1]), 1) + if tol is None and n_nonzero_coefs > X.shape[1]: + raise ValueError( + "The number of atoms cannot be more than the number of features" + ) + if precompute == "auto": + precompute = X.shape[0] > X.shape[1] + if precompute: + G = np.dot(X.T, X) + G = np.asfortranarray(G) + Xy = np.dot(X.T, y) + if tol is not None: + norms_squared = np.sum((y**2), axis=0) + else: + norms_squared = None + return orthogonal_mp_gram( + G, + Xy, + n_nonzero_coefs=n_nonzero_coefs, + tol=tol, + norms_squared=norms_squared, + copy_Gram=copy_X, + copy_Xy=False, + return_path=return_path, + ) + + if return_path: + coef = np.zeros((X.shape[1], y.shape[1], X.shape[1])) + else: + coef = np.zeros((X.shape[1], y.shape[1])) + n_iters = [] + + for k in range(y.shape[1]): + out = _cholesky_omp( + X, y[:, k], n_nonzero_coefs, tol, copy_X=copy_X, return_path=return_path + ) + if return_path: + _, idx, coefs, n_iter = out + coef = coef[:, :, : len(idx)] + for n_active, x in enumerate(coefs.T): + coef[idx[: n_active + 1], k, n_active] = x[: n_active + 1] + else: + x, idx, n_iter = out + coef[idx, k] = x + n_iters.append(n_iter) + + if y.shape[1] == 1: + n_iters = n_iters[0] + + if return_n_iter: + return np.squeeze(coef), n_iters + else: + return np.squeeze(coef) + + +@validate_params( + { + "Gram": ["array-like"], + "Xy": ["array-like"], + "n_nonzero_coefs": [Interval(Integral, 0, None, closed="neither"), None], + "tol": [Interval(Real, 0, None, closed="left"), None], + "norms_squared": ["array-like", None], + "copy_Gram": ["boolean"], + "copy_Xy": ["boolean"], + "return_path": ["boolean"], + "return_n_iter": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def orthogonal_mp_gram( + Gram, + Xy, + *, + n_nonzero_coefs=None, + tol=None, + norms_squared=None, + copy_Gram=True, + copy_Xy=True, + return_path=False, + return_n_iter=False, +): + """Gram Orthogonal Matching Pursuit (OMP). + + Solves n_targets Orthogonal Matching Pursuit problems using only + the Gram matrix X.T * X and the product X.T * y. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + Gram : array-like of shape (n_features, n_features) + Gram matrix of the input data: `X.T * X`. + + Xy : array-like of shape (n_features,) or (n_features, n_targets) + Input targets multiplied by `X`: `X.T * y`. + + n_nonzero_coefs : int, default=None + Desired number of non-zero entries in the solution. If `None` (by + default) this value is set to 10% of n_features. + + tol : float, default=None + Maximum squared norm of the residual. If not `None`, + overrides `n_nonzero_coefs`. + + norms_squared : array-like of shape (n_targets,), default=None + Squared L2 norms of the lines of `y`. Required if `tol` is not None. + + copy_Gram : bool, default=True + Whether the gram matrix must be copied by the algorithm. A `False` + value is only helpful if it is already Fortran-ordered, otherwise a + copy is made anyway. + + copy_Xy : bool, default=True + Whether the covariance vector `Xy` must be copied by the algorithm. + If `False`, it may be overwritten. + + return_path : bool, default=False + Whether to return every value of the nonzero coefficients along the + forward path. Useful for cross-validation. + + return_n_iter : bool, default=False + Whether or not to return the number of iterations. + + Returns + ------- + coef : ndarray of shape (n_features,) or (n_features, n_targets) + Coefficients of the OMP solution. If `return_path=True`, this contains + the whole coefficient path. In this case its shape is + `(n_features, n_features)` or `(n_features, n_targets, n_features)` and + iterating over the last axis yields coefficients in increasing order + of active features. + + n_iters : list or int + Number of active features across every target. Returned only if + `return_n_iter` is set to True. + + See Also + -------- + OrthogonalMatchingPursuit : Orthogonal Matching Pursuit model (OMP). + orthogonal_mp : Solves n_targets Orthogonal Matching Pursuit problems. + lars_path : Compute Least Angle Regression or Lasso path using + LARS algorithm. + sklearn.decomposition.sparse_encode : Generic sparse coding. + Each column of the result is the solution to a Lasso problem. + + Notes + ----- + Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang, + Matching pursuits with time-frequency dictionaries, IEEE Transactions on + Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415. + (https://www.di.ens.fr/~mallat/papiers/MallatPursuit93.pdf) + + This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad, + M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal + Matching Pursuit Technical Report - CS Technion, April 2008. + https://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf + + Examples + -------- + >>> from sklearn.datasets import make_regression + >>> from sklearn.linear_model import orthogonal_mp_gram + >>> X, y = make_regression(noise=4, random_state=0) + >>> coef = orthogonal_mp_gram(X.T @ X, X.T @ y) + >>> coef.shape + (100,) + >>> X[:1,] @ coef + array([-78.68...]) + """ + Gram = check_array(Gram, order="F", copy=copy_Gram) + Xy = np.asarray(Xy) + if Xy.ndim > 1 and Xy.shape[1] > 1: + # or subsequent target will be affected + copy_Gram = True + if Xy.ndim == 1: + Xy = Xy[:, np.newaxis] + if tol is not None: + norms_squared = [norms_squared] + if copy_Xy or not Xy.flags.writeable: + # Make the copy once instead of many times in _gram_omp itself. + Xy = Xy.copy() + + if n_nonzero_coefs is None and tol is None: + n_nonzero_coefs = int(0.1 * len(Gram)) + if tol is not None and norms_squared is None: + raise ValueError( + "Gram OMP needs the precomputed norms in order " + "to evaluate the error sum of squares." + ) + if tol is not None and tol < 0: + raise ValueError("Epsilon cannot be negative") + if tol is None and n_nonzero_coefs <= 0: + raise ValueError("The number of atoms must be positive") + if tol is None and n_nonzero_coefs > len(Gram): + raise ValueError( + "The number of atoms cannot be more than the number of features" + ) + + if return_path: + coef = np.zeros((len(Gram), Xy.shape[1], len(Gram)), dtype=Gram.dtype) + else: + coef = np.zeros((len(Gram), Xy.shape[1]), dtype=Gram.dtype) + + n_iters = [] + for k in range(Xy.shape[1]): + out = _gram_omp( + Gram, + Xy[:, k], + n_nonzero_coefs, + norms_squared[k] if tol is not None else None, + tol, + copy_Gram=copy_Gram, + copy_Xy=False, + return_path=return_path, + ) + if return_path: + _, idx, coefs, n_iter = out + coef = coef[:, :, : len(idx)] + for n_active, x in enumerate(coefs.T): + coef[idx[: n_active + 1], k, n_active] = x[: n_active + 1] + else: + x, idx, n_iter = out + coef[idx, k] = x + n_iters.append(n_iter) + + if Xy.shape[1] == 1: + n_iters = n_iters[0] + + if return_n_iter: + return np.squeeze(coef), n_iters + else: + return np.squeeze(coef) + + +class OrthogonalMatchingPursuit(MultiOutputMixin, RegressorMixin, LinearModel): + """Orthogonal Matching Pursuit model (OMP). + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_nonzero_coefs : int, default=None + Desired number of non-zero entries in the solution. Ignored if `tol` is set. + When `None` and `tol` is also `None`, this value is either set to 10% of + `n_features` or 1, whichever is greater. + + tol : float, default=None + Maximum squared norm of the residual. If not None, overrides n_nonzero_coefs. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + precompute : 'auto' or bool, default='auto' + Whether to use a precomputed Gram and Xy matrix to speed up + calculations. Improves performance when :term:`n_targets` or + :term:`n_samples` is very large. Note that if you already have such + matrices, you can pass them directly to the fit method. + + Attributes + ---------- + coef_ : ndarray of shape (n_features,) or (n_targets, n_features) + Parameter vector (w in the formula). + + intercept_ : float or ndarray of shape (n_targets,) + Independent term in decision function. + + n_iter_ : int or array-like + Number of active features across every target. + + n_nonzero_coefs_ : int or None + The number of non-zero coefficients in the solution or `None` when `tol` is + set. If `n_nonzero_coefs` is None and `tol` is None this value is either set + to 10% of `n_features` or 1, whichever is greater. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + orthogonal_mp : Solves n_targets Orthogonal Matching Pursuit problems. + orthogonal_mp_gram : Solves n_targets Orthogonal Matching Pursuit + problems using only the Gram matrix X.T * X and the product X.T * y. + lars_path : Compute Least Angle Regression or Lasso path using LARS algorithm. + Lars : Least Angle Regression model a.k.a. LAR. + LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars. + sklearn.decomposition.sparse_encode : Generic sparse coding. + Each column of the result is the solution to a Lasso problem. + OrthogonalMatchingPursuitCV : Cross-validated + Orthogonal Matching Pursuit model (OMP). + + Notes + ----- + Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang, + Matching pursuits with time-frequency dictionaries, IEEE Transactions on + Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415. + (https://www.di.ens.fr/~mallat/papiers/MallatPursuit93.pdf) + + This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad, + M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal + Matching Pursuit Technical Report - CS Technion, April 2008. + https://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf + + Examples + -------- + >>> from sklearn.linear_model import OrthogonalMatchingPursuit + >>> from sklearn.datasets import make_regression + >>> X, y = make_regression(noise=4, random_state=0) + >>> reg = OrthogonalMatchingPursuit().fit(X, y) + >>> reg.score(X, y) + 0.9991... + >>> reg.predict(X[:1,]) + array([-78.3854...]) + """ + + _parameter_constraints: dict = { + "n_nonzero_coefs": [Interval(Integral, 1, None, closed="left"), None], + "tol": [Interval(Real, 0, None, closed="left"), None], + "fit_intercept": ["boolean"], + "precompute": [StrOptions({"auto"}), "boolean"], + } + + def __init__( + self, + *, + n_nonzero_coefs=None, + tol=None, + fit_intercept=True, + precompute="auto", + ): + self.n_nonzero_coefs = n_nonzero_coefs + self.tol = tol + self.fit_intercept = fit_intercept + self.precompute = precompute + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y): + """Fit the model using X, y as training data. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values. Will be cast to X's dtype if necessary. + + Returns + ------- + self : object + Returns an instance of self. + """ + X, y = validate_data(self, X, y, multi_output=True, y_numeric=True) + n_features = X.shape[1] + + X, y, X_offset, y_offset, X_scale, Gram, Xy = _pre_fit( + X, y, None, self.precompute, self.fit_intercept, copy=True + ) + + if y.ndim == 1: + y = y[:, np.newaxis] + + if self.n_nonzero_coefs is None and self.tol is None: + # default for n_nonzero_coefs is 0.1 * n_features + # but at least one. + self.n_nonzero_coefs_ = max(int(0.1 * n_features), 1) + elif self.tol is not None: + self.n_nonzero_coefs_ = None + else: + self.n_nonzero_coefs_ = self.n_nonzero_coefs + + if Gram is False: + coef_, self.n_iter_ = orthogonal_mp( + X, + y, + n_nonzero_coefs=self.n_nonzero_coefs_, + tol=self.tol, + precompute=False, + copy_X=True, + return_n_iter=True, + ) + else: + norms_sq = np.sum(y**2, axis=0) if self.tol is not None else None + + coef_, self.n_iter_ = orthogonal_mp_gram( + Gram, + Xy=Xy, + n_nonzero_coefs=self.n_nonzero_coefs_, + tol=self.tol, + norms_squared=norms_sq, + copy_Gram=True, + copy_Xy=True, + return_n_iter=True, + ) + self.coef_ = coef_.T + self._set_intercept(X_offset, y_offset, X_scale) + return self + + +def _omp_path_residues( + X_train, + y_train, + X_test, + y_test, + copy=True, + fit_intercept=True, + max_iter=100, +): + """Compute the residues on left-out data for a full LARS path. + + Parameters + ---------- + X_train : ndarray of shape (n_samples, n_features) + The data to fit the LARS on. + + y_train : ndarray of shape (n_samples) + The target variable to fit LARS on. + + X_test : ndarray of shape (n_samples, n_features) + The data to compute the residues on. + + y_test : ndarray of shape (n_samples) + The target variable to compute the residues on. + + copy : bool, default=True + Whether X_train, X_test, y_train and y_test should be copied. If + False, they may be overwritten. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + max_iter : int, default=100 + Maximum numbers of iterations to perform, therefore maximum features + to include. 100 by default. + + Returns + ------- + residues : ndarray of shape (n_samples, max_features) + Residues of the prediction on the test data. + """ + + if copy: + X_train = X_train.copy() + y_train = y_train.copy() + X_test = X_test.copy() + y_test = y_test.copy() + + if fit_intercept: + X_mean = X_train.mean(axis=0) + X_train -= X_mean + X_test -= X_mean + y_mean = y_train.mean(axis=0) + y_train = as_float_array(y_train, copy=False) + y_train -= y_mean + y_test = as_float_array(y_test, copy=False) + y_test -= y_mean + + coefs = orthogonal_mp( + X_train, + y_train, + n_nonzero_coefs=max_iter, + tol=None, + precompute=False, + copy_X=False, + return_path=True, + ) + if coefs.ndim == 1: + coefs = coefs[:, np.newaxis] + + return np.dot(coefs.T, X_test.T) - y_test + + +class OrthogonalMatchingPursuitCV(RegressorMixin, LinearModel): + """Cross-validated Orthogonal Matching Pursuit model (OMP). + + See glossary entry for :term:`cross-validation estimator`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + copy : bool, default=True + Whether the design matrix X must be copied by the algorithm. A false + value is only helpful if X is already Fortran-ordered, otherwise a + copy is made anyway. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + max_iter : int, default=None + Maximum numbers of iterations to perform, therefore maximum features + to include. 10% of ``n_features`` but at least 5 if available. + + cv : int, cross-validation generator or iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross-validation, + - integer, to specify the number of folds. + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For integer/None inputs, :class:`~sklearn.model_selection.KFold` is used. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value if None changed from 3-fold to 5-fold. + + n_jobs : int, default=None + Number of CPUs to use during the cross validation. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + verbose : bool or int, default=False + Sets the verbosity amount. + + Attributes + ---------- + intercept_ : float or ndarray of shape (n_targets,) + Independent term in decision function. + + coef_ : ndarray of shape (n_features,) or (n_targets, n_features) + Parameter vector (w in the problem formulation). + + n_nonzero_coefs_ : int + Estimated number of non-zero coefficients giving the best mean squared + error over the cross-validation folds. + + n_iter_ : int or array-like + Number of active features across every target for the model refit with + the best hyperparameters got by cross-validating across all folds. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + orthogonal_mp : Solves n_targets Orthogonal Matching Pursuit problems. + orthogonal_mp_gram : Solves n_targets Orthogonal Matching Pursuit + problems using only the Gram matrix X.T * X and the product X.T * y. + lars_path : Compute Least Angle Regression or Lasso path using LARS algorithm. + Lars : Least Angle Regression model a.k.a. LAR. + LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars. + OrthogonalMatchingPursuit : Orthogonal Matching Pursuit model (OMP). + LarsCV : Cross-validated Least Angle Regression model. + LassoLarsCV : Cross-validated Lasso model fit with Least Angle Regression. + sklearn.decomposition.sparse_encode : Generic sparse coding. + Each column of the result is the solution to a Lasso problem. + + Notes + ----- + In `fit`, once the optimal number of non-zero coefficients is found through + cross-validation, the model is fit again using the entire training set. + + Examples + -------- + >>> from sklearn.linear_model import OrthogonalMatchingPursuitCV + >>> from sklearn.datasets import make_regression + >>> X, y = make_regression(n_features=100, n_informative=10, + ... noise=4, random_state=0) + >>> reg = OrthogonalMatchingPursuitCV(cv=5).fit(X, y) + >>> reg.score(X, y) + 0.9991... + >>> reg.n_nonzero_coefs_ + np.int64(10) + >>> reg.predict(X[:1,]) + array([-78.3854...]) + """ + + _parameter_constraints: dict = { + "copy": ["boolean"], + "fit_intercept": ["boolean"], + "max_iter": [Interval(Integral, 0, None, closed="left"), None], + "cv": ["cv_object"], + "n_jobs": [Integral, None], + "verbose": ["verbose"], + } + + def __init__( + self, + *, + copy=True, + fit_intercept=True, + max_iter=None, + cv=None, + n_jobs=None, + verbose=False, + ): + self.copy = copy + self.fit_intercept = fit_intercept + self.max_iter = max_iter + self.cv = cv + self.n_jobs = n_jobs + self.verbose = verbose + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, **fit_params): + """Fit the model using X, y as training data. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) + Target values. Will be cast to X's dtype if necessary. + + **fit_params : dict + Parameters to pass to the underlying splitter. + + .. versionadded:: 1.4 + Only available if `enable_metadata_routing=True`, + which can be set by using + ``sklearn.set_config(enable_metadata_routing=True)``. + See :ref:`Metadata Routing User Guide ` for + more details. + + Returns + ------- + self : object + Returns an instance of self. + """ + _raise_for_params(fit_params, self, "fit") + + X, y = validate_data(self, X, y, y_numeric=True, ensure_min_features=2) + X = as_float_array(X, copy=False, ensure_all_finite=False) + cv = check_cv(self.cv, classifier=False) + if _routing_enabled(): + routed_params = process_routing(self, "fit", **fit_params) + else: + # TODO(SLEP6): remove when metadata routing cannot be disabled. + routed_params = Bunch() + routed_params.splitter = Bunch(split={}) + max_iter = ( + min(max(int(0.1 * X.shape[1]), 5), X.shape[1]) + if not self.max_iter + else self.max_iter + ) + cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)( + delayed(_omp_path_residues)( + X[train], + y[train], + X[test], + y[test], + self.copy, + self.fit_intercept, + max_iter, + ) + for train, test in cv.split(X, **routed_params.splitter.split) + ) + + min_early_stop = min(fold.shape[0] for fold in cv_paths) + mse_folds = np.array( + [(fold[:min_early_stop] ** 2).mean(axis=1) for fold in cv_paths] + ) + best_n_nonzero_coefs = np.argmin(mse_folds.mean(axis=0)) + 1 + self.n_nonzero_coefs_ = best_n_nonzero_coefs + omp = OrthogonalMatchingPursuit( + n_nonzero_coefs=best_n_nonzero_coefs, + fit_intercept=self.fit_intercept, + ).fit(X, y) + + self.coef_ = omp.coef_ + self.intercept_ = omp.intercept_ + self.n_iter_ = omp.n_iter_ + return self + + def get_metadata_routing(self): + """Get metadata routing of this object. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + .. versionadded:: 1.4 + + Returns + ------- + routing : MetadataRouter + A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating + routing information. + """ + + router = MetadataRouter(owner=self.__class__.__name__).add( + splitter=self.cv, + method_mapping=MethodMapping().add(caller="fit", callee="split"), + ) + return router diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_passive_aggressive.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_passive_aggressive.py new file mode 100644 index 0000000000000000000000000000000000000000..61eb06edae85f9c6d04a94c070cd71c1bbbcaa3b --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_passive_aggressive.py @@ -0,0 +1,573 @@ +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +from numbers import Real + +from ..base import _fit_context +from ..utils._param_validation import Interval, StrOptions +from ._stochastic_gradient import DEFAULT_EPSILON, BaseSGDClassifier, BaseSGDRegressor + + +class PassiveAggressiveClassifier(BaseSGDClassifier): + """Passive Aggressive Classifier. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + C : float, default=1.0 + Maximum step size (regularization). Defaults to 1.0. + + fit_intercept : bool, default=True + Whether the intercept should be estimated or not. If False, the + data is assumed to be already centered. + + max_iter : int, default=1000 + The maximum number of passes over the training data (aka epochs). + It only impacts the behavior in the ``fit`` method, and not the + :meth:`~sklearn.linear_model.PassiveAggressiveClassifier.partial_fit` method. + + .. versionadded:: 0.19 + + tol : float or None, default=1e-3 + The stopping criterion. If it is not None, the iterations will stop + when (loss > previous_loss - tol). + + .. versionadded:: 0.19 + + early_stopping : bool, default=False + Whether to use early stopping to terminate training when validation + score is not improving. If set to True, it will automatically set aside + a stratified fraction of training data as validation and terminate + training when validation score is not improving by at least `tol` for + `n_iter_no_change` consecutive epochs. + + .. versionadded:: 0.20 + + validation_fraction : float, default=0.1 + The proportion of training data to set aside as validation set for + early stopping. Must be between 0 and 1. + Only used if early_stopping is True. + + .. versionadded:: 0.20 + + n_iter_no_change : int, default=5 + Number of iterations with no improvement to wait before early stopping. + + .. versionadded:: 0.20 + + shuffle : bool, default=True + Whether or not the training data should be shuffled after each epoch. + + verbose : int, default=0 + The verbosity level. + + loss : str, default="hinge" + The loss function to be used: + hinge: equivalent to PA-I in the reference paper. + squared_hinge: equivalent to PA-II in the reference paper. + + n_jobs : int or None, default=None + The number of CPUs to use to do the OVA (One Versus All, for + multi-class problems) computation. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + random_state : int, RandomState instance, default=None + Used to shuffle the training data, when ``shuffle`` is set to + ``True``. Pass an int for reproducible output across multiple + function calls. + See :term:`Glossary `. + + warm_start : bool, default=False + When set to True, reuse the solution of the previous call to fit as + initialization, otherwise, just erase the previous solution. + See :term:`the Glossary `. + + Repeatedly calling fit or partial_fit when warm_start is True can + result in a different solution than when calling fit a single time + because of the way the data is shuffled. + + class_weight : dict, {class_label: weight} or "balanced" or None, \ + default=None + Preset for the class_weight fit parameter. + + Weights associated with classes. If not given, all classes + are supposed to have weight one. + + The "balanced" mode uses the values of y to automatically adjust + weights inversely proportional to class frequencies in the input data + as ``n_samples / (n_classes * np.bincount(y))``. + + .. versionadded:: 0.17 + parameter *class_weight* to automatically weight samples. + + average : bool or int, default=False + When set to True, computes the averaged SGD weights and stores the + result in the ``coef_`` attribute. If set to an int greater than 1, + averaging will begin once the total number of samples seen reaches + average. So average=10 will begin averaging after seeing 10 samples. + + .. versionadded:: 0.19 + parameter *average* to use weights averaging in SGD. + + Attributes + ---------- + coef_ : ndarray of shape (1, n_features) if n_classes == 2 else \ + (n_classes, n_features) + Weights assigned to the features. + + intercept_ : ndarray of shape (1,) if n_classes == 2 else (n_classes,) + Constants in decision function. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : int + The actual number of iterations to reach the stopping criterion. + For multiclass fits, it is the maximum over every binary fit. + + classes_ : ndarray of shape (n_classes,) + The unique classes labels. + + t_ : int + Number of weight updates performed during training. + Same as ``(n_iter_ * n_samples + 1)``. + + See Also + -------- + SGDClassifier : Incrementally trained logistic regression. + Perceptron : Linear perceptron classifier. + + References + ---------- + Online Passive-Aggressive Algorithms + + K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006) + + Examples + -------- + >>> from sklearn.linear_model import PassiveAggressiveClassifier + >>> from sklearn.datasets import make_classification + >>> X, y = make_classification(n_features=4, random_state=0) + >>> clf = PassiveAggressiveClassifier(max_iter=1000, random_state=0, + ... tol=1e-3) + >>> clf.fit(X, y) + PassiveAggressiveClassifier(random_state=0) + >>> print(clf.coef_) + [[0.26642044 0.45070924 0.67251877 0.64185414]] + >>> print(clf.intercept_) + [1.84127814] + >>> print(clf.predict([[0, 0, 0, 0]])) + [1] + """ + + _parameter_constraints: dict = { + **BaseSGDClassifier._parameter_constraints, + "loss": [StrOptions({"hinge", "squared_hinge"})], + "C": [Interval(Real, 0, None, closed="right")], + } + + def __init__( + self, + *, + C=1.0, + fit_intercept=True, + max_iter=1000, + tol=1e-3, + early_stopping=False, + validation_fraction=0.1, + n_iter_no_change=5, + shuffle=True, + verbose=0, + loss="hinge", + n_jobs=None, + random_state=None, + warm_start=False, + class_weight=None, + average=False, + ): + super().__init__( + penalty=None, + fit_intercept=fit_intercept, + max_iter=max_iter, + tol=tol, + early_stopping=early_stopping, + validation_fraction=validation_fraction, + n_iter_no_change=n_iter_no_change, + shuffle=shuffle, + verbose=verbose, + random_state=random_state, + eta0=1.0, + warm_start=warm_start, + class_weight=class_weight, + average=average, + n_jobs=n_jobs, + ) + + self.C = C + self.loss = loss + + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X, y, classes=None): + """Fit linear model with Passive Aggressive algorithm. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Subset of the training data. + + y : array-like of shape (n_samples,) + Subset of the target values. + + classes : ndarray of shape (n_classes,) + Classes across all calls to partial_fit. + Can be obtained by via `np.unique(y_all)`, where y_all is the + target vector of the entire dataset. + This argument is required for the first call to partial_fit + and can be omitted in the subsequent calls. + Note that y doesn't need to contain all labels in `classes`. + + Returns + ------- + self : object + Fitted estimator. + """ + if not hasattr(self, "classes_"): + self._more_validate_params(for_partial_fit=True) + + if self.class_weight == "balanced": + raise ValueError( + "class_weight 'balanced' is not supported for " + "partial_fit. For 'balanced' weights, use " + "`sklearn.utils.compute_class_weight` with " + "`class_weight='balanced'`. In place of y you " + "can use a large enough subset of the full " + "training set target to properly estimate the " + "class frequency distributions. Pass the " + "resulting weights as the class_weight " + "parameter." + ) + + lr = "pa1" if self.loss == "hinge" else "pa2" + return self._partial_fit( + X, + y, + alpha=1.0, + C=self.C, + loss="hinge", + learning_rate=lr, + max_iter=1, + classes=classes, + sample_weight=None, + coef_init=None, + intercept_init=None, + ) + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, coef_init=None, intercept_init=None): + """Fit linear model with Passive Aggressive algorithm. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) + Target values. + + coef_init : ndarray of shape (n_classes, n_features) + The initial coefficients to warm-start the optimization. + + intercept_init : ndarray of shape (n_classes,) + The initial intercept to warm-start the optimization. + + Returns + ------- + self : object + Fitted estimator. + """ + self._more_validate_params() + + lr = "pa1" if self.loss == "hinge" else "pa2" + return self._fit( + X, + y, + alpha=1.0, + C=self.C, + loss="hinge", + learning_rate=lr, + coef_init=coef_init, + intercept_init=intercept_init, + ) + + +class PassiveAggressiveRegressor(BaseSGDRegressor): + """Passive Aggressive Regressor. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + + C : float, default=1.0 + Maximum step size (regularization). Defaults to 1.0. + + fit_intercept : bool, default=True + Whether the intercept should be estimated or not. If False, the + data is assumed to be already centered. Defaults to True. + + max_iter : int, default=1000 + The maximum number of passes over the training data (aka epochs). + It only impacts the behavior in the ``fit`` method, and not the + :meth:`~sklearn.linear_model.PassiveAggressiveRegressor.partial_fit` method. + + .. versionadded:: 0.19 + + tol : float or None, default=1e-3 + The stopping criterion. If it is not None, the iterations will stop + when (loss > previous_loss - tol). + + .. versionadded:: 0.19 + + early_stopping : bool, default=False + Whether to use early stopping to terminate training when validation. + score is not improving. If set to True, it will automatically set aside + a fraction of training data as validation and terminate + training when validation score is not improving by at least tol for + n_iter_no_change consecutive epochs. + + .. versionadded:: 0.20 + + validation_fraction : float, default=0.1 + The proportion of training data to set aside as validation set for + early stopping. Must be between 0 and 1. + Only used if early_stopping is True. + + .. versionadded:: 0.20 + + n_iter_no_change : int, default=5 + Number of iterations with no improvement to wait before early stopping. + + .. versionadded:: 0.20 + + shuffle : bool, default=True + Whether or not the training data should be shuffled after each epoch. + + verbose : int, default=0 + The verbosity level. + + loss : str, default="epsilon_insensitive" + The loss function to be used: + epsilon_insensitive: equivalent to PA-I in the reference paper. + squared_epsilon_insensitive: equivalent to PA-II in the reference + paper. + + epsilon : float, default=0.1 + If the difference between the current prediction and the correct label + is below this threshold, the model is not updated. + + random_state : int, RandomState instance, default=None + Used to shuffle the training data, when ``shuffle`` is set to + ``True``. Pass an int for reproducible output across multiple + function calls. + See :term:`Glossary `. + + warm_start : bool, default=False + When set to True, reuse the solution of the previous call to fit as + initialization, otherwise, just erase the previous solution. + See :term:`the Glossary `. + + Repeatedly calling fit or partial_fit when warm_start is True can + result in a different solution than when calling fit a single time + because of the way the data is shuffled. + + average : bool or int, default=False + When set to True, computes the averaged SGD weights and stores the + result in the ``coef_`` attribute. If set to an int greater than 1, + averaging will begin once the total number of samples seen reaches + average. So average=10 will begin averaging after seeing 10 samples. + + .. versionadded:: 0.19 + parameter *average* to use weights averaging in SGD. + + Attributes + ---------- + coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\ + n_features] + Weights assigned to the features. + + intercept_ : array, shape = [1] if n_classes == 2 else [n_classes] + Constants in decision function. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : int + The actual number of iterations to reach the stopping criterion. + + t_ : int + Number of weight updates performed during training. + Same as ``(n_iter_ * n_samples + 1)``. + + See Also + -------- + SGDRegressor : Linear model fitted by minimizing a regularized + empirical loss with SGD. + + References + ---------- + Online Passive-Aggressive Algorithms + + K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006). + + Examples + -------- + >>> from sklearn.linear_model import PassiveAggressiveRegressor + >>> from sklearn.datasets import make_regression + + >>> X, y = make_regression(n_features=4, random_state=0) + >>> regr = PassiveAggressiveRegressor(max_iter=100, random_state=0, + ... tol=1e-3) + >>> regr.fit(X, y) + PassiveAggressiveRegressor(max_iter=100, random_state=0) + >>> print(regr.coef_) + [20.48736655 34.18818427 67.59122734 87.94731329] + >>> print(regr.intercept_) + [-0.02306214] + >>> print(regr.predict([[0, 0, 0, 0]])) + [-0.02306214] + """ + + _parameter_constraints: dict = { + **BaseSGDRegressor._parameter_constraints, + "loss": [StrOptions({"epsilon_insensitive", "squared_epsilon_insensitive"})], + "C": [Interval(Real, 0, None, closed="right")], + "epsilon": [Interval(Real, 0, None, closed="left")], + } + + def __init__( + self, + *, + C=1.0, + fit_intercept=True, + max_iter=1000, + tol=1e-3, + early_stopping=False, + validation_fraction=0.1, + n_iter_no_change=5, + shuffle=True, + verbose=0, + loss="epsilon_insensitive", + epsilon=DEFAULT_EPSILON, + random_state=None, + warm_start=False, + average=False, + ): + super().__init__( + penalty=None, + l1_ratio=0, + epsilon=epsilon, + eta0=1.0, + fit_intercept=fit_intercept, + max_iter=max_iter, + tol=tol, + early_stopping=early_stopping, + validation_fraction=validation_fraction, + n_iter_no_change=n_iter_no_change, + shuffle=shuffle, + verbose=verbose, + random_state=random_state, + warm_start=warm_start, + average=average, + ) + self.C = C + self.loss = loss + + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X, y): + """Fit linear model with Passive Aggressive algorithm. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Subset of training data. + + y : numpy array of shape [n_samples] + Subset of target values. + + Returns + ------- + self : object + Fitted estimator. + """ + if not hasattr(self, "coef_"): + self._more_validate_params(for_partial_fit=True) + + lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2" + return self._partial_fit( + X, + y, + alpha=1.0, + C=self.C, + loss="epsilon_insensitive", + learning_rate=lr, + max_iter=1, + sample_weight=None, + coef_init=None, + intercept_init=None, + ) + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, coef_init=None, intercept_init=None): + """Fit linear model with Passive Aggressive algorithm. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + + y : numpy array of shape [n_samples] + Target values. + + coef_init : array, shape = [n_features] + The initial coefficients to warm-start the optimization. + + intercept_init : array, shape = [1] + The initial intercept to warm-start the optimization. + + Returns + ------- + self : object + Fitted estimator. + """ + self._more_validate_params() + + lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2" + return self._fit( + X, + y, + alpha=1.0, + C=self.C, + loss="epsilon_insensitive", + learning_rate=lr, + coef_init=coef_init, + intercept_init=intercept_init, + ) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_perceptron.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_perceptron.py new file mode 100644 index 0000000000000000000000000000000000000000..e93200ba385faf037be75654061932ee6e886b7b --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_perceptron.py @@ -0,0 +1,226 @@ +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +from numbers import Real + +from ..utils._param_validation import Interval, StrOptions +from ._stochastic_gradient import BaseSGDClassifier + + +class Perceptron(BaseSGDClassifier): + """Linear perceptron classifier. + + The implementation is a wrapper around :class:`~sklearn.linear_model.SGDClassifier` + by fixing the `loss` and `learning_rate` parameters as:: + + SGDClassifier(loss="perceptron", learning_rate="constant") + + Other available parameters are described below and are forwarded to + :class:`~sklearn.linear_model.SGDClassifier`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + + penalty : {'l2','l1','elasticnet'}, default=None + The penalty (aka regularization term) to be used. + + alpha : float, default=0.0001 + Constant that multiplies the regularization term if regularization is + used. + + l1_ratio : float, default=0.15 + The Elastic Net mixing parameter, with `0 <= l1_ratio <= 1`. + `l1_ratio=0` corresponds to L2 penalty, `l1_ratio=1` to L1. + Only used if `penalty='elasticnet'`. + + .. versionadded:: 0.24 + + fit_intercept : bool, default=True + Whether the intercept should be estimated or not. If False, the + data is assumed to be already centered. + + max_iter : int, default=1000 + The maximum number of passes over the training data (aka epochs). + It only impacts the behavior in the ``fit`` method, and not the + :meth:`partial_fit` method. + + .. versionadded:: 0.19 + + tol : float or None, default=1e-3 + The stopping criterion. If it is not None, the iterations will stop + when (loss > previous_loss - tol). + + .. versionadded:: 0.19 + + shuffle : bool, default=True + Whether or not the training data should be shuffled after each epoch. + + verbose : int, default=0 + The verbosity level. + + eta0 : float, default=1 + Constant by which the updates are multiplied. + + n_jobs : int, default=None + The number of CPUs to use to do the OVA (One Versus All, for + multi-class problems) computation. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + random_state : int, RandomState instance or None, default=0 + Used to shuffle the training data, when ``shuffle`` is set to + ``True``. Pass an int for reproducible output across multiple + function calls. + See :term:`Glossary `. + + early_stopping : bool, default=False + Whether to use early stopping to terminate training when validation + score is not improving. If set to True, it will automatically set aside + a stratified fraction of training data as validation and terminate + training when validation score is not improving by at least `tol` for + `n_iter_no_change` consecutive epochs. + + .. versionadded:: 0.20 + + validation_fraction : float, default=0.1 + The proportion of training data to set aside as validation set for + early stopping. Must be between 0 and 1. + Only used if early_stopping is True. + + .. versionadded:: 0.20 + + n_iter_no_change : int, default=5 + Number of iterations with no improvement to wait before early stopping. + + .. versionadded:: 0.20 + + class_weight : dict, {class_label: weight} or "balanced", default=None + Preset for the class_weight fit parameter. + + Weights associated with classes. If not given, all classes + are supposed to have weight one. + + The "balanced" mode uses the values of y to automatically adjust + weights inversely proportional to class frequencies in the input data + as ``n_samples / (n_classes * np.bincount(y))``. + + warm_start : bool, default=False + When set to True, reuse the solution of the previous call to fit as + initialization, otherwise, just erase the previous solution. See + :term:`the Glossary `. + + Attributes + ---------- + classes_ : ndarray of shape (n_classes,) + The unique classes labels. + + coef_ : ndarray of shape (1, n_features) if n_classes == 2 else \ + (n_classes, n_features) + Weights assigned to the features. + + intercept_ : ndarray of shape (1,) if n_classes == 2 else (n_classes,) + Constants in decision function. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : int + The actual number of iterations to reach the stopping criterion. + For multiclass fits, it is the maximum over every binary fit. + + t_ : int + Number of weight updates performed during training. + Same as ``(n_iter_ * n_samples + 1)``. + + See Also + -------- + sklearn.linear_model.SGDClassifier : Linear classifiers + (SVM, logistic regression, etc.) with SGD training. + + Notes + ----- + ``Perceptron`` is a classification algorithm which shares the same + underlying implementation with ``SGDClassifier``. In fact, + ``Perceptron()`` is equivalent to `SGDClassifier(loss="perceptron", + eta0=1, learning_rate="constant", penalty=None)`. + + References + ---------- + https://en.wikipedia.org/wiki/Perceptron and references therein. + + Examples + -------- + >>> from sklearn.datasets import load_digits + >>> from sklearn.linear_model import Perceptron + >>> X, y = load_digits(return_X_y=True) + >>> clf = Perceptron(tol=1e-3, random_state=0) + >>> clf.fit(X, y) + Perceptron() + >>> clf.score(X, y) + 0.939... + """ + + _parameter_constraints: dict = {**BaseSGDClassifier._parameter_constraints} + _parameter_constraints.pop("loss") + _parameter_constraints.pop("average") + _parameter_constraints.update( + { + "penalty": [StrOptions({"l2", "l1", "elasticnet"}), None], + "alpha": [Interval(Real, 0, None, closed="left")], + "l1_ratio": [Interval(Real, 0, 1, closed="both")], + "eta0": [Interval(Real, 0, None, closed="left")], + } + ) + + def __init__( + self, + *, + penalty=None, + alpha=0.0001, + l1_ratio=0.15, + fit_intercept=True, + max_iter=1000, + tol=1e-3, + shuffle=True, + verbose=0, + eta0=1.0, + n_jobs=None, + random_state=0, + early_stopping=False, + validation_fraction=0.1, + n_iter_no_change=5, + class_weight=None, + warm_start=False, + ): + super().__init__( + loss="perceptron", + penalty=penalty, + alpha=alpha, + l1_ratio=l1_ratio, + fit_intercept=fit_intercept, + max_iter=max_iter, + tol=tol, + shuffle=shuffle, + verbose=verbose, + random_state=random_state, + learning_rate="constant", + eta0=eta0, + early_stopping=early_stopping, + validation_fraction=validation_fraction, + n_iter_no_change=n_iter_no_change, + power_t=0.5, + warm_start=warm_start, + class_weight=class_weight, + n_jobs=n_jobs, + ) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_quantile.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_quantile.py new file mode 100644 index 0000000000000000000000000000000000000000..446d232958e8dbe3fec247ab37c05b39469160e8 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_quantile.py @@ -0,0 +1,301 @@ +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import warnings +from numbers import Real + +import numpy as np +from scipy import sparse +from scipy.optimize import linprog + +from ..base import BaseEstimator, RegressorMixin, _fit_context +from ..exceptions import ConvergenceWarning +from ..utils import _safe_indexing +from ..utils._param_validation import Interval, StrOptions +from ..utils.fixes import parse_version, sp_version +from ..utils.validation import _check_sample_weight, validate_data +from ._base import LinearModel + + +class QuantileRegressor(LinearModel, RegressorMixin, BaseEstimator): + """Linear regression model that predicts conditional quantiles. + + The linear :class:`QuantileRegressor` optimizes the pinball loss for a + desired `quantile` and is robust to outliers. + + This model uses an L1 regularization like + :class:`~sklearn.linear_model.Lasso`. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 1.0 + + Parameters + ---------- + quantile : float, default=0.5 + The quantile that the model tries to predict. It must be strictly + between 0 and 1. If 0.5 (default), the model predicts the 50% + quantile, i.e. the median. + + alpha : float, default=1.0 + Regularization constant that multiplies the L1 penalty term. + + fit_intercept : bool, default=True + Whether or not to fit the intercept. + + solver : {'highs-ds', 'highs-ipm', 'highs', 'interior-point', \ + 'revised simplex'}, default='highs' + Method used by :func:`scipy.optimize.linprog` to solve the linear + programming formulation. + + It is recommended to use the highs methods because + they are the fastest ones. Solvers "highs-ds", "highs-ipm" and "highs" + support sparse input data and, in fact, always convert to sparse csc. + + From `scipy>=1.11.0`, "interior-point" is not available anymore. + + .. versionchanged:: 1.4 + The default of `solver` changed to `"highs"` in version 1.4. + + solver_options : dict, default=None + Additional parameters passed to :func:`scipy.optimize.linprog` as + options. If `None` and if `solver='interior-point'`, then + `{"lstsq": True}` is passed to :func:`scipy.optimize.linprog` for the + sake of stability. + + Attributes + ---------- + coef_ : array of shape (n_features,) + Estimated coefficients for the features. + + intercept_ : float + The intercept of the model, aka bias term. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : int + The actual number of iterations performed by the solver. + + See Also + -------- + Lasso : The Lasso is a linear model that estimates sparse coefficients + with l1 regularization. + HuberRegressor : Linear regression model that is robust to outliers. + + Examples + -------- + >>> from sklearn.linear_model import QuantileRegressor + >>> import numpy as np + >>> n_samples, n_features = 10, 2 + >>> rng = np.random.RandomState(0) + >>> y = rng.randn(n_samples) + >>> X = rng.randn(n_samples, n_features) + >>> # the two following lines are optional in practice + >>> from sklearn.utils.fixes import sp_version, parse_version + >>> reg = QuantileRegressor(quantile=0.8).fit(X, y) + >>> np.mean(y <= reg.predict(X)) + np.float64(0.8) + """ + + _parameter_constraints: dict = { + "quantile": [Interval(Real, 0, 1, closed="neither")], + "alpha": [Interval(Real, 0, None, closed="left")], + "fit_intercept": ["boolean"], + "solver": [ + StrOptions( + { + "highs-ds", + "highs-ipm", + "highs", + "interior-point", + "revised simplex", + } + ), + ], + "solver_options": [dict, None], + } + + def __init__( + self, + *, + quantile=0.5, + alpha=1.0, + fit_intercept=True, + solver="highs", + solver_options=None, + ): + self.quantile = quantile + self.alpha = alpha + self.fit_intercept = fit_intercept + self.solver = solver + self.solver_options = solver_options + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """Fit the model according to the given training data. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) + Target values. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + self : object + Returns self. + """ + X, y = validate_data( + self, + X, + y, + accept_sparse=["csc", "csr", "coo"], + y_numeric=True, + multi_output=False, + ) + sample_weight = _check_sample_weight(sample_weight, X) + + n_features = X.shape[1] + n_params = n_features + + if self.fit_intercept: + n_params += 1 + # Note that centering y and X with _preprocess_data does not work + # for quantile regression. + + # The objective is defined as 1/n * sum(pinball loss) + alpha * L1. + # So we rescale the penalty term, which is equivalent. + alpha = np.sum(sample_weight) * self.alpha + + if self.solver == "interior-point" and sp_version >= parse_version("1.11.0"): + raise ValueError( + f"Solver {self.solver} is not anymore available in SciPy >= 1.11.0." + ) + + if sparse.issparse(X) and self.solver not in ["highs", "highs-ds", "highs-ipm"]: + raise ValueError( + f"Solver {self.solver} does not support sparse X. " + "Use solver 'highs' for example." + ) + # make default solver more stable + if self.solver_options is None and self.solver == "interior-point": + solver_options = {"lstsq": True} + else: + solver_options = self.solver_options + + # After rescaling alpha, the minimization problem is + # min sum(pinball loss) + alpha * L1 + # Use linear programming formulation of quantile regression + # min_x c x + # A_eq x = b_eq + # 0 <= x + # x = (s0, s, t0, t, u, v) = slack variables >= 0 + # intercept = s0 - t0 + # coef = s - t + # c = (0, alpha * 1_p, 0, alpha * 1_p, quantile * 1_n, (1-quantile) * 1_n) + # residual = y - X@coef - intercept = u - v + # A_eq = (1_n, X, -1_n, -X, diag(1_n), -diag(1_n)) + # b_eq = y + # p = n_features + # n = n_samples + # 1_n = vector of length n with entries equal one + # see https://stats.stackexchange.com/questions/384909/ + # + # Filtering out zero sample weights from the beginning makes life + # easier for the linprog solver. + indices = np.nonzero(sample_weight)[0] + n_indices = len(indices) # use n_mask instead of n_samples + if n_indices < len(sample_weight): + sample_weight = sample_weight[indices] + X = _safe_indexing(X, indices) + y = _safe_indexing(y, indices) + c = np.concatenate( + [ + np.full(2 * n_params, fill_value=alpha), + sample_weight * self.quantile, + sample_weight * (1 - self.quantile), + ] + ) + if self.fit_intercept: + # do not penalize the intercept + c[0] = 0 + c[n_params] = 0 + + if self.solver in ["highs", "highs-ds", "highs-ipm"]: + # Note that highs methods always use a sparse CSC memory layout internally, + # even for optimization problems parametrized using dense numpy arrays. + # Therefore, we work with CSC matrices as early as possible to limit + # unnecessary repeated memory copies. + eye = sparse.eye(n_indices, dtype=X.dtype, format="csc") + if self.fit_intercept: + ones = sparse.csc_matrix(np.ones(shape=(n_indices, 1), dtype=X.dtype)) + A_eq = sparse.hstack([ones, X, -ones, -X, eye, -eye], format="csc") + else: + A_eq = sparse.hstack([X, -X, eye, -eye], format="csc") + else: + eye = np.eye(n_indices) + if self.fit_intercept: + ones = np.ones((n_indices, 1)) + A_eq = np.concatenate([ones, X, -ones, -X, eye, -eye], axis=1) + else: + A_eq = np.concatenate([X, -X, eye, -eye], axis=1) + + b_eq = y + + result = linprog( + c=c, + A_eq=A_eq, + b_eq=b_eq, + method=self.solver, + options=solver_options, + ) + solution = result.x + if not result.success: + failure = { + 1: "Iteration limit reached.", + 2: "Problem appears to be infeasible.", + 3: "Problem appears to be unbounded.", + 4: "Numerical difficulties encountered.", + } + warnings.warn( + "Linear programming for QuantileRegressor did not succeed.\n" + f"Status is {result.status}: " + + failure.setdefault(result.status, "unknown reason") + + "\n" + + "Result message of linprog:\n" + + result.message, + ConvergenceWarning, + ) + + # positive slack - negative slack + # solution is an array with (params_pos, params_neg, u, v) + params = solution[:n_params] - solution[n_params : 2 * n_params] + + self.n_iter_ = result.nit + + if self.fit_intercept: + self.coef_ = params[1:] + self.intercept_ = params[0] + else: + self.coef_ = params + self.intercept_ = 0.0 + return self + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.input_tags.sparse = True + return tags diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_ransac.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_ransac.py new file mode 100644 index 0000000000000000000000000000000000000000..90dc6d6bc5e70884c93c6810d9de6b74faf59982 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_ransac.py @@ -0,0 +1,731 @@ +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import warnings +from numbers import Integral, Real + +import numpy as np + +from ..base import ( + BaseEstimator, + MetaEstimatorMixin, + MultiOutputMixin, + RegressorMixin, + _fit_context, + clone, +) +from ..exceptions import ConvergenceWarning +from ..utils import check_consistent_length, check_random_state, get_tags +from ..utils._bunch import Bunch +from ..utils._param_validation import ( + HasMethods, + Interval, + Options, + RealNotInt, + StrOptions, +) +from ..utils.metadata_routing import ( + MetadataRouter, + MethodMapping, + _raise_for_params, + _routing_enabled, + process_routing, +) +from ..utils.random import sample_without_replacement +from ..utils.validation import ( + _check_method_params, + _check_sample_weight, + _deprecate_positional_args, + check_is_fitted, + has_fit_parameter, + validate_data, +) +from ._base import LinearRegression + +_EPSILON = np.spacing(1) + + +def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability): + """Determine number trials such that at least one outlier-free subset is + sampled for the given inlier/outlier ratio. + + Parameters + ---------- + n_inliers : int + Number of inliers in the data. + + n_samples : int + Total number of samples in the data. + + min_samples : int + Minimum number of samples chosen randomly from original data. + + probability : float + Probability (confidence) that one outlier-free sample is generated. + + Returns + ------- + trials : int + Number of trials. + + """ + inlier_ratio = n_inliers / float(n_samples) + nom = max(_EPSILON, 1 - probability) + denom = max(_EPSILON, 1 - inlier_ratio**min_samples) + if nom == 1: + return 0 + if denom == 1: + return float("inf") + return abs(float(np.ceil(np.log(nom) / np.log(denom)))) + + +class RANSACRegressor( + MetaEstimatorMixin, + RegressorMixin, + MultiOutputMixin, + BaseEstimator, +): + """RANSAC (RANdom SAmple Consensus) algorithm. + + RANSAC is an iterative algorithm for the robust estimation of parameters + from a subset of inliers from the complete data set. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : object, default=None + Base estimator object which implements the following methods: + + * `fit(X, y)`: Fit model to given training data and target values. + * `score(X, y)`: Returns the mean accuracy on the given test data, + which is used for the stop criterion defined by `stop_score`. + Additionally, the score is used to decide which of two equally + large consensus sets is chosen as the better one. + * `predict(X)`: Returns predicted values using the linear model, + which is used to compute residual error using loss function. + + If `estimator` is None, then + :class:`~sklearn.linear_model.LinearRegression` is used for + target values of dtype float. + + Note that the current implementation only supports regression + estimators. + + min_samples : int (>= 1) or float ([0, 1]), default=None + Minimum number of samples chosen randomly from original data. Treated + as an absolute number of samples for `min_samples >= 1`, treated as a + relative number `ceil(min_samples * X.shape[0])` for + `min_samples < 1`. This is typically chosen as the minimal number of + samples necessary to estimate the given `estimator`. By default a + :class:`~sklearn.linear_model.LinearRegression` estimator is assumed and + `min_samples` is chosen as ``X.shape[1] + 1``. This parameter is highly + dependent upon the model, so if a `estimator` other than + :class:`~sklearn.linear_model.LinearRegression` is used, the user must + provide a value. + + residual_threshold : float, default=None + Maximum residual for a data sample to be classified as an inlier. + By default the threshold is chosen as the MAD (median absolute + deviation) of the target values `y`. Points whose residuals are + strictly equal to the threshold are considered as inliers. + + is_data_valid : callable, default=None + This function is called with the randomly selected data before the + model is fitted to it: `is_data_valid(X, y)`. If its return value is + False the current randomly chosen sub-sample is skipped. + + is_model_valid : callable, default=None + This function is called with the estimated model and the randomly + selected data: `is_model_valid(model, X, y)`. If its return value is + False the current randomly chosen sub-sample is skipped. + Rejecting samples with this function is computationally costlier than + with `is_data_valid`. `is_model_valid` should therefore only be used if + the estimated model is needed for making the rejection decision. + + max_trials : int, default=100 + Maximum number of iterations for random sample selection. + + max_skips : int, default=np.inf + Maximum number of iterations that can be skipped due to finding zero + inliers or invalid data defined by ``is_data_valid`` or invalid models + defined by ``is_model_valid``. + + .. versionadded:: 0.19 + + stop_n_inliers : int, default=np.inf + Stop iteration if at least this number of inliers are found. + + stop_score : float, default=np.inf + Stop iteration if score is greater equal than this threshold. + + stop_probability : float in range [0, 1], default=0.99 + RANSAC iteration stops if at least one outlier-free set of the training + data is sampled in RANSAC. This requires to generate at least N + samples (iterations):: + + N >= log(1 - probability) / log(1 - e**m) + + where the probability (confidence) is typically set to high value such + as 0.99 (the default) and e is the current fraction of inliers w.r.t. + the total number of samples. + + loss : str, callable, default='absolute_error' + String inputs, 'absolute_error' and 'squared_error' are supported which + find the absolute error and squared error per sample respectively. + + If ``loss`` is a callable, then it should be a function that takes + two arrays as inputs, the true and predicted value and returns a 1-D + array with the i-th value of the array corresponding to the loss + on ``X[i]``. + + If the loss on a sample is greater than the ``residual_threshold``, + then this sample is classified as an outlier. + + .. versionadded:: 0.18 + + random_state : int, RandomState instance, default=None + The generator used to initialize the centers. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + estimator_ : object + Final model fitted on the inliers predicted by the "best" model found + during RANSAC sampling (copy of the `estimator` object). + + n_trials_ : int + Number of random selection trials until one of the stop criteria is + met. It is always ``<= max_trials``. + + inlier_mask_ : bool array of shape [n_samples] + Boolean mask of inliers classified as ``True``. + + n_skips_no_inliers_ : int + Number of iterations skipped due to finding zero inliers. + + .. versionadded:: 0.19 + + n_skips_invalid_data_ : int + Number of iterations skipped due to invalid data defined by + ``is_data_valid``. + + .. versionadded:: 0.19 + + n_skips_invalid_model_ : int + Number of iterations skipped due to an invalid model defined by + ``is_model_valid``. + + .. versionadded:: 0.19 + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + HuberRegressor : Linear regression model that is robust to outliers. + TheilSenRegressor : Theil-Sen Estimator robust multivariate regression model. + SGDRegressor : Fitted by minimizing a regularized empirical loss with SGD. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/RANSAC + .. [2] https://www.sri.com/wp-content/uploads/2021/12/ransac-publication.pdf + .. [3] https://bmva-archive.org.uk/bmvc/2009/Papers/Paper355/Paper355.pdf + + Examples + -------- + >>> from sklearn.linear_model import RANSACRegressor + >>> from sklearn.datasets import make_regression + >>> X, y = make_regression( + ... n_samples=200, n_features=2, noise=4.0, random_state=0) + >>> reg = RANSACRegressor(random_state=0).fit(X, y) + >>> reg.score(X, y) + 0.9885... + >>> reg.predict(X[:1,]) + array([-31.9417...]) + + For a more detailed example, see + :ref:`sphx_glr_auto_examples_linear_model_plot_ransac.py` + """ # noqa: E501 + + _parameter_constraints: dict = { + "estimator": [HasMethods(["fit", "score", "predict"]), None], + "min_samples": [ + Interval(Integral, 1, None, closed="left"), + Interval(RealNotInt, 0, 1, closed="both"), + None, + ], + "residual_threshold": [Interval(Real, 0, None, closed="left"), None], + "is_data_valid": [callable, None], + "is_model_valid": [callable, None], + "max_trials": [ + Interval(Integral, 0, None, closed="left"), + Options(Real, {np.inf}), + ], + "max_skips": [ + Interval(Integral, 0, None, closed="left"), + Options(Real, {np.inf}), + ], + "stop_n_inliers": [ + Interval(Integral, 0, None, closed="left"), + Options(Real, {np.inf}), + ], + "stop_score": [Interval(Real, None, None, closed="both")], + "stop_probability": [Interval(Real, 0, 1, closed="both")], + "loss": [StrOptions({"absolute_error", "squared_error"}), callable], + "random_state": ["random_state"], + } + + def __init__( + self, + estimator=None, + *, + min_samples=None, + residual_threshold=None, + is_data_valid=None, + is_model_valid=None, + max_trials=100, + max_skips=np.inf, + stop_n_inliers=np.inf, + stop_score=np.inf, + stop_probability=0.99, + loss="absolute_error", + random_state=None, + ): + self.estimator = estimator + self.min_samples = min_samples + self.residual_threshold = residual_threshold + self.is_data_valid = is_data_valid + self.is_model_valid = is_model_valid + self.max_trials = max_trials + self.max_skips = max_skips + self.stop_n_inliers = stop_n_inliers + self.stop_score = stop_score + self.stop_probability = stop_probability + self.random_state = random_state + self.loss = loss + + @_fit_context( + # RansacRegressor.estimator is not validated yet + prefer_skip_nested_validation=False + ) + # TODO(1.7): remove `sample_weight` from the signature after deprecation + # cycle; for backwards compatibility: pop it from `fit_params` before the + # `_raise_for_params` check and reinsert it after the check + @_deprecate_positional_args(version="1.7") + def fit(self, X, y, *, sample_weight=None, **fit_params): + """Fit estimator using RANSAC algorithm. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values. + + sample_weight : array-like of shape (n_samples,), default=None + Individual weights for each sample + raises error if sample_weight is passed and estimator + fit method does not support it. + + .. versionadded:: 0.18 + + **fit_params : dict + Parameters routed to the `fit` method of the sub-estimator via the + metadata routing API. + + .. versionadded:: 1.5 + + Only available if + `sklearn.set_config(enable_metadata_routing=True)` is set. See + :ref:`Metadata Routing User Guide ` for more + details. + + Returns + ------- + self : object + Fitted `RANSACRegressor` estimator. + + Raises + ------ + ValueError + If no valid consensus set could be found. This occurs if + `is_data_valid` and `is_model_valid` return False for all + `max_trials` randomly chosen sub-samples. + """ + # Need to validate separately here. We can't pass multi_output=True + # because that would allow y to be csr. Delay expensive finiteness + # check to the estimator's own input validation. + _raise_for_params(fit_params, self, "fit") + check_X_params = dict(accept_sparse="csr", ensure_all_finite=False) + check_y_params = dict(ensure_2d=False) + X, y = validate_data( + self, X, y, validate_separately=(check_X_params, check_y_params) + ) + check_consistent_length(X, y) + + if self.estimator is not None: + estimator = clone(self.estimator) + else: + estimator = LinearRegression() + + if self.min_samples is None: + if not isinstance(estimator, LinearRegression): + raise ValueError( + "`min_samples` needs to be explicitly set when estimator " + "is not a LinearRegression." + ) + min_samples = X.shape[1] + 1 + elif 0 < self.min_samples < 1: + min_samples = np.ceil(self.min_samples * X.shape[0]) + elif self.min_samples >= 1: + min_samples = self.min_samples + if min_samples > X.shape[0]: + raise ValueError( + "`min_samples` may not be larger than number " + "of samples: n_samples = %d." % (X.shape[0]) + ) + + if self.residual_threshold is None: + # MAD (median absolute deviation) + residual_threshold = np.median(np.abs(y - np.median(y))) + else: + residual_threshold = self.residual_threshold + + if self.loss == "absolute_error": + if y.ndim == 1: + loss_function = lambda y_true, y_pred: np.abs(y_true - y_pred) + else: + loss_function = lambda y_true, y_pred: np.sum( + np.abs(y_true - y_pred), axis=1 + ) + elif self.loss == "squared_error": + if y.ndim == 1: + loss_function = lambda y_true, y_pred: (y_true - y_pred) ** 2 + else: + loss_function = lambda y_true, y_pred: np.sum( + (y_true - y_pred) ** 2, axis=1 + ) + + elif callable(self.loss): + loss_function = self.loss + + random_state = check_random_state(self.random_state) + + try: # Not all estimator accept a random_state + estimator.set_params(random_state=random_state) + except ValueError: + pass + + estimator_fit_has_sample_weight = has_fit_parameter(estimator, "sample_weight") + estimator_name = type(estimator).__name__ + if sample_weight is not None and not estimator_fit_has_sample_weight: + raise ValueError( + "%s does not support sample_weight. Sample" + " weights are only used for the calibration" + " itself." % estimator_name + ) + + if sample_weight is not None: + fit_params["sample_weight"] = sample_weight + + if _routing_enabled(): + routed_params = process_routing(self, "fit", **fit_params) + else: + routed_params = Bunch() + routed_params.estimator = Bunch(fit={}, predict={}, score={}) + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X) + routed_params.estimator.fit = {"sample_weight": sample_weight} + + n_inliers_best = 1 + score_best = -np.inf + inlier_mask_best = None + X_inlier_best = None + y_inlier_best = None + inlier_best_idxs_subset = None + self.n_skips_no_inliers_ = 0 + self.n_skips_invalid_data_ = 0 + self.n_skips_invalid_model_ = 0 + + # number of data samples + n_samples = X.shape[0] + sample_idxs = np.arange(n_samples) + + self.n_trials_ = 0 + max_trials = self.max_trials + while self.n_trials_ < max_trials: + self.n_trials_ += 1 + + if ( + self.n_skips_no_inliers_ + + self.n_skips_invalid_data_ + + self.n_skips_invalid_model_ + ) > self.max_skips: + break + + # choose random sample set + subset_idxs = sample_without_replacement( + n_samples, min_samples, random_state=random_state + ) + X_subset = X[subset_idxs] + y_subset = y[subset_idxs] + + # check if random sample set is valid + if self.is_data_valid is not None and not self.is_data_valid( + X_subset, y_subset + ): + self.n_skips_invalid_data_ += 1 + continue + + # cut `fit_params` down to `subset_idxs` + fit_params_subset = _check_method_params( + X, params=routed_params.estimator.fit, indices=subset_idxs + ) + + # fit model for current random sample set + estimator.fit(X_subset, y_subset, **fit_params_subset) + + # check if estimated model is valid + if self.is_model_valid is not None and not self.is_model_valid( + estimator, X_subset, y_subset + ): + self.n_skips_invalid_model_ += 1 + continue + + # residuals of all data for current random sample model + y_pred = estimator.predict(X) + residuals_subset = loss_function(y, y_pred) + + # classify data into inliers and outliers + inlier_mask_subset = residuals_subset <= residual_threshold + n_inliers_subset = np.sum(inlier_mask_subset) + + # less inliers -> skip current random sample + if n_inliers_subset < n_inliers_best: + self.n_skips_no_inliers_ += 1 + continue + + # extract inlier data set + inlier_idxs_subset = sample_idxs[inlier_mask_subset] + X_inlier_subset = X[inlier_idxs_subset] + y_inlier_subset = y[inlier_idxs_subset] + + # cut `fit_params` down to `inlier_idxs_subset` + score_params_inlier_subset = _check_method_params( + X, params=routed_params.estimator.score, indices=inlier_idxs_subset + ) + + # score of inlier data set + score_subset = estimator.score( + X_inlier_subset, + y_inlier_subset, + **score_params_inlier_subset, + ) + + # same number of inliers but worse score -> skip current random + # sample + if n_inliers_subset == n_inliers_best and score_subset < score_best: + continue + + # save current random sample as best sample + n_inliers_best = n_inliers_subset + score_best = score_subset + inlier_mask_best = inlier_mask_subset + X_inlier_best = X_inlier_subset + y_inlier_best = y_inlier_subset + inlier_best_idxs_subset = inlier_idxs_subset + + max_trials = min( + max_trials, + _dynamic_max_trials( + n_inliers_best, n_samples, min_samples, self.stop_probability + ), + ) + + # break if sufficient number of inliers or score is reached + if n_inliers_best >= self.stop_n_inliers or score_best >= self.stop_score: + break + + # if none of the iterations met the required criteria + if inlier_mask_best is None: + if ( + self.n_skips_no_inliers_ + + self.n_skips_invalid_data_ + + self.n_skips_invalid_model_ + ) > self.max_skips: + raise ValueError( + "RANSAC skipped more iterations than `max_skips` without" + " finding a valid consensus set. Iterations were skipped" + " because each randomly chosen sub-sample failed the" + " passing criteria. See estimator attributes for" + " diagnostics (n_skips*)." + ) + else: + raise ValueError( + "RANSAC could not find a valid consensus set. All" + " `max_trials` iterations were skipped because each" + " randomly chosen sub-sample failed the passing criteria." + " See estimator attributes for diagnostics (n_skips*)." + ) + else: + if ( + self.n_skips_no_inliers_ + + self.n_skips_invalid_data_ + + self.n_skips_invalid_model_ + ) > self.max_skips: + warnings.warn( + ( + "RANSAC found a valid consensus set but exited" + " early due to skipping more iterations than" + " `max_skips`. See estimator attributes for" + " diagnostics (n_skips*)." + ), + ConvergenceWarning, + ) + + # estimate final model using all inliers + fit_params_best_idxs_subset = _check_method_params( + X, params=routed_params.estimator.fit, indices=inlier_best_idxs_subset + ) + + estimator.fit(X_inlier_best, y_inlier_best, **fit_params_best_idxs_subset) + + self.estimator_ = estimator + self.inlier_mask_ = inlier_mask_best + return self + + def predict(self, X, **params): + """Predict using the estimated model. + + This is a wrapper for `estimator_.predict(X)`. + + Parameters + ---------- + X : {array-like or sparse matrix} of shape (n_samples, n_features) + Input data. + + **params : dict + Parameters routed to the `predict` method of the sub-estimator via + the metadata routing API. + + .. versionadded:: 1.5 + + Only available if + `sklearn.set_config(enable_metadata_routing=True)` is set. See + :ref:`Metadata Routing User Guide ` for more + details. + + Returns + ------- + y : array, shape = [n_samples] or [n_samples, n_targets] + Returns predicted values. + """ + check_is_fitted(self) + X = validate_data( + self, + X, + ensure_all_finite=False, + accept_sparse=True, + reset=False, + ) + + _raise_for_params(params, self, "predict") + + if _routing_enabled(): + predict_params = process_routing(self, "predict", **params).estimator[ + "predict" + ] + else: + predict_params = {} + + return self.estimator_.predict(X, **predict_params) + + def score(self, X, y, **params): + """Return the score of the prediction. + + This is a wrapper for `estimator_.score(X, y)`. + + Parameters + ---------- + X : (array-like or sparse matrix} of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values. + + **params : dict + Parameters routed to the `score` method of the sub-estimator via + the metadata routing API. + + .. versionadded:: 1.5 + + Only available if + `sklearn.set_config(enable_metadata_routing=True)` is set. See + :ref:`Metadata Routing User Guide ` for more + details. + + Returns + ------- + z : float + Score of the prediction. + """ + check_is_fitted(self) + X = validate_data( + self, + X, + ensure_all_finite=False, + accept_sparse=True, + reset=False, + ) + + _raise_for_params(params, self, "score") + if _routing_enabled(): + score_params = process_routing(self, "score", **params).estimator["score"] + else: + score_params = {} + + return self.estimator_.score(X, y, **score_params) + + def get_metadata_routing(self): + """Get metadata routing of this object. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + .. versionadded:: 1.5 + + Returns + ------- + routing : MetadataRouter + A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating + routing information. + """ + router = MetadataRouter(owner=self.__class__.__name__).add( + estimator=self.estimator, + method_mapping=MethodMapping() + .add(caller="fit", callee="fit") + .add(caller="fit", callee="score") + .add(caller="score", callee="score") + .add(caller="predict", callee="predict"), + ) + return router + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + if self.estimator is None: + tags.input_tags.sparse = True # default estimator is LinearRegression + else: + tags.input_tags.sparse = get_tags(self.estimator).input_tags.sparse + return tags diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_ridge.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_ridge.py new file mode 100644 index 0000000000000000000000000000000000000000..9a94ba1caec1cc84814f94d6b84546d538db001b --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_ridge.py @@ -0,0 +1,2947 @@ +""" +Ridge regression +""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + + +import numbers +import warnings +from abc import ABCMeta, abstractmethod +from functools import partial +from numbers import Integral, Real + +import numpy as np +from scipy import linalg, optimize, sparse +from scipy.sparse import linalg as sp_linalg + +from sklearn.base import BaseEstimator + +from ..base import MultiOutputMixin, RegressorMixin, _fit_context, is_classifier +from ..exceptions import ConvergenceWarning +from ..metrics import check_scoring, get_scorer_names +from ..model_selection import GridSearchCV +from ..preprocessing import LabelBinarizer +from ..utils import ( + Bunch, + check_array, + check_consistent_length, + check_scalar, + column_or_1d, + compute_sample_weight, + deprecated, +) +from ..utils._array_api import ( + _is_numpy_namespace, + _ravel, + device, + get_namespace, + get_namespace_and_device, +) +from ..utils._param_validation import Hidden, Interval, StrOptions, validate_params +from ..utils.extmath import row_norms, safe_sparse_dot +from ..utils.fixes import _sparse_linalg_cg +from ..utils.metadata_routing import ( + MetadataRouter, + MethodMapping, + _raise_for_params, + _routing_enabled, + process_routing, +) +from ..utils.sparsefuncs import mean_variance_axis +from ..utils.validation import _check_sample_weight, check_is_fitted, validate_data +from ._base import LinearClassifierMixin, LinearModel, _preprocess_data, _rescale_data +from ._sag import sag_solver + + +def _get_rescaled_operator(X, X_offset, sample_weight_sqrt): + """Create LinearOperator for matrix products with implicit centering. + + Matrix product `LinearOperator @ coef` returns `(X - X_offset) @ coef`. + """ + + def matvec(b): + return X.dot(b) - sample_weight_sqrt * b.dot(X_offset) + + def rmatvec(b): + return X.T.dot(b) - X_offset * b.dot(sample_weight_sqrt) + + X1 = sparse.linalg.LinearOperator(shape=X.shape, matvec=matvec, rmatvec=rmatvec) + return X1 + + +def _solve_sparse_cg( + X, + y, + alpha, + max_iter=None, + tol=1e-4, + verbose=0, + X_offset=None, + X_scale=None, + sample_weight_sqrt=None, +): + if sample_weight_sqrt is None: + sample_weight_sqrt = np.ones(X.shape[0], dtype=X.dtype) + + n_samples, n_features = X.shape + + if X_offset is None or X_scale is None: + X1 = sp_linalg.aslinearoperator(X) + else: + X_offset_scale = X_offset / X_scale + X1 = _get_rescaled_operator(X, X_offset_scale, sample_weight_sqrt) + + coefs = np.empty((y.shape[1], n_features), dtype=X.dtype) + + if n_features > n_samples: + + def create_mv(curr_alpha): + def _mv(x): + return X1.matvec(X1.rmatvec(x)) + curr_alpha * x + + return _mv + + else: + + def create_mv(curr_alpha): + def _mv(x): + return X1.rmatvec(X1.matvec(x)) + curr_alpha * x + + return _mv + + for i in range(y.shape[1]): + y_column = y[:, i] + + mv = create_mv(alpha[i]) + if n_features > n_samples: + # kernel ridge + # w = X.T * inv(X X^t + alpha*Id) y + C = sp_linalg.LinearOperator( + (n_samples, n_samples), matvec=mv, dtype=X.dtype + ) + coef, info = _sparse_linalg_cg(C, y_column, rtol=tol) + coefs[i] = X1.rmatvec(coef) + else: + # linear ridge + # w = inv(X^t X + alpha*Id) * X.T y + y_column = X1.rmatvec(y_column) + C = sp_linalg.LinearOperator( + (n_features, n_features), matvec=mv, dtype=X.dtype + ) + coefs[i], info = _sparse_linalg_cg(C, y_column, maxiter=max_iter, rtol=tol) + + if info < 0: + raise ValueError("Failed with error code %d" % info) + + if max_iter is None and info > 0 and verbose: + warnings.warn( + "sparse_cg did not converge after %d iterations." % info, + ConvergenceWarning, + ) + + return coefs + + +def _solve_lsqr( + X, + y, + *, + alpha, + fit_intercept=True, + max_iter=None, + tol=1e-4, + X_offset=None, + X_scale=None, + sample_weight_sqrt=None, +): + """Solve Ridge regression via LSQR. + + We expect that y is always mean centered. + If X is dense, we expect it to be mean centered such that we can solve + ||y - Xw||_2^2 + alpha * ||w||_2^2 + + If X is sparse, we expect X_offset to be given such that we can solve + ||y - (X - X_offset)w||_2^2 + alpha * ||w||_2^2 + + With sample weights S=diag(sample_weight), this becomes + ||sqrt(S) (y - (X - X_offset) w)||_2^2 + alpha * ||w||_2^2 + and we expect y and X to already be rescaled, i.e. sqrt(S) @ y, sqrt(S) @ X. In + this case, X_offset is the sample_weight weighted mean of X before scaling by + sqrt(S). The objective then reads + ||y - (X - sqrt(S) X_offset) w)||_2^2 + alpha * ||w||_2^2 + """ + if sample_weight_sqrt is None: + sample_weight_sqrt = np.ones(X.shape[0], dtype=X.dtype) + + if sparse.issparse(X) and fit_intercept: + X_offset_scale = X_offset / X_scale + X1 = _get_rescaled_operator(X, X_offset_scale, sample_weight_sqrt) + else: + # No need to touch anything + X1 = X + + n_samples, n_features = X.shape + coefs = np.empty((y.shape[1], n_features), dtype=X.dtype) + n_iter = np.empty(y.shape[1], dtype=np.int32) + + # According to the lsqr documentation, alpha = damp^2. + sqrt_alpha = np.sqrt(alpha) + + for i in range(y.shape[1]): + y_column = y[:, i] + info = sp_linalg.lsqr( + X1, y_column, damp=sqrt_alpha[i], atol=tol, btol=tol, iter_lim=max_iter + ) + coefs[i] = info[0] + n_iter[i] = info[2] + + return coefs, n_iter + + +def _solve_cholesky(X, y, alpha): + # w = inv(X^t X + alpha*Id) * X.T y + n_features = X.shape[1] + n_targets = y.shape[1] + + A = safe_sparse_dot(X.T, X, dense_output=True) + Xy = safe_sparse_dot(X.T, y, dense_output=True) + + one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]]) + + if one_alpha: + A.flat[:: n_features + 1] += alpha[0] + return linalg.solve(A, Xy, assume_a="pos", overwrite_a=True).T + else: + coefs = np.empty([n_targets, n_features], dtype=X.dtype) + for coef, target, current_alpha in zip(coefs, Xy.T, alpha): + A.flat[:: n_features + 1] += current_alpha + coef[:] = linalg.solve(A, target, assume_a="pos", overwrite_a=False).ravel() + A.flat[:: n_features + 1] -= current_alpha + return coefs + + +def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False): + # dual_coef = inv(X X^t + alpha*Id) y + n_samples = K.shape[0] + n_targets = y.shape[1] + + if copy: + K = K.copy() + + alpha = np.atleast_1d(alpha) + one_alpha = (alpha == alpha[0]).all() + has_sw = isinstance(sample_weight, np.ndarray) or sample_weight not in [1.0, None] + + if has_sw: + # Unlike other solvers, we need to support sample_weight directly + # because K might be a pre-computed kernel. + sw = np.sqrt(np.atleast_1d(sample_weight)) + y = y * sw[:, np.newaxis] + K *= np.outer(sw, sw) + + if one_alpha: + # Only one penalty, we can solve multi-target problems in one time. + K.flat[:: n_samples + 1] += alpha[0] + + try: + # Note: we must use overwrite_a=False in order to be able to + # use the fall-back solution below in case a LinAlgError + # is raised + dual_coef = linalg.solve(K, y, assume_a="pos", overwrite_a=False) + except np.linalg.LinAlgError: + warnings.warn( + "Singular matrix in solving dual problem. Using " + "least-squares solution instead." + ) + dual_coef = linalg.lstsq(K, y)[0] + + # K is expensive to compute and store in memory so change it back in + # case it was user-given. + K.flat[:: n_samples + 1] -= alpha[0] + + if has_sw: + dual_coef *= sw[:, np.newaxis] + + return dual_coef + else: + # One penalty per target. We need to solve each target separately. + dual_coefs = np.empty([n_targets, n_samples], K.dtype) + + for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha): + K.flat[:: n_samples + 1] += current_alpha + + dual_coef[:] = linalg.solve( + K, target, assume_a="pos", overwrite_a=False + ).ravel() + + K.flat[:: n_samples + 1] -= current_alpha + + if has_sw: + dual_coefs *= sw[np.newaxis, :] + + return dual_coefs.T + + +def _solve_svd(X, y, alpha, xp=None): + xp, _ = get_namespace(X, xp=xp) + U, s, Vt = xp.linalg.svd(X, full_matrices=False) + idx = s > 1e-15 # same default value as scipy.linalg.pinv + s_nnz = s[idx][:, None] + UTy = U.T @ y + d = xp.zeros((s.shape[0], alpha.shape[0]), dtype=X.dtype, device=device(X)) + d[idx] = s_nnz / (s_nnz**2 + alpha) + d_UT_y = d * UTy + return (Vt.T @ d_UT_y).T + + +def _solve_lbfgs( + X, + y, + alpha, + positive=True, + max_iter=None, + tol=1e-4, + X_offset=None, + X_scale=None, + sample_weight_sqrt=None, +): + """Solve ridge regression with LBFGS. + + The main purpose is fitting with forcing coefficients to be positive. + For unconstrained ridge regression, there are faster dedicated solver methods. + Note that with positive bounds on the coefficients, LBFGS seems faster + than scipy.optimize.lsq_linear. + """ + n_samples, n_features = X.shape + + options = {} + if max_iter is not None: + options["maxiter"] = max_iter + config = { + "method": "L-BFGS-B", + "tol": tol, + "jac": True, + "options": options, + } + if positive: + config["bounds"] = [(0, np.inf)] * n_features + + if X_offset is not None and X_scale is not None: + X_offset_scale = X_offset / X_scale + else: + X_offset_scale = None + + if sample_weight_sqrt is None: + sample_weight_sqrt = np.ones(X.shape[0], dtype=X.dtype) + + coefs = np.empty((y.shape[1], n_features), dtype=X.dtype) + + for i in range(y.shape[1]): + x0 = np.zeros((n_features,)) + y_column = y[:, i] + + def func(w): + residual = X.dot(w) - y_column + if X_offset_scale is not None: + residual -= sample_weight_sqrt * w.dot(X_offset_scale) + f = 0.5 * residual.dot(residual) + 0.5 * alpha[i] * w.dot(w) + grad = X.T @ residual + alpha[i] * w + if X_offset_scale is not None: + grad -= X_offset_scale * residual.dot(sample_weight_sqrt) + + return f, grad + + result = optimize.minimize(func, x0, **config) + if not result["success"]: + warnings.warn( + ( + "The lbfgs solver did not converge. Try increasing max_iter " + f"or tol. Currently: max_iter={max_iter} and tol={tol}" + ), + ConvergenceWarning, + ) + coefs[i] = result["x"] + + return coefs + + +def _get_valid_accept_sparse(is_X_sparse, solver): + if is_X_sparse and solver in ["auto", "sag", "saga"]: + return "csr" + else: + return ["csr", "csc", "coo"] + + +@validate_params( + { + "X": ["array-like", "sparse matrix", sp_linalg.LinearOperator], + "y": ["array-like"], + "alpha": [Interval(Real, 0, None, closed="left"), "array-like"], + "sample_weight": [ + Interval(Real, None, None, closed="neither"), + "array-like", + None, + ], + "solver": [ + StrOptions( + {"auto", "svd", "cholesky", "lsqr", "sparse_cg", "sag", "saga", "lbfgs"} + ) + ], + "max_iter": [Interval(Integral, 0, None, closed="left"), None], + "tol": [Interval(Real, 0, None, closed="left")], + "verbose": ["verbose"], + "positive": ["boolean"], + "random_state": ["random_state"], + "return_n_iter": ["boolean"], + "return_intercept": ["boolean"], + "check_input": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def ridge_regression( + X, + y, + alpha, + *, + sample_weight=None, + solver="auto", + max_iter=None, + tol=1e-4, + verbose=0, + positive=False, + random_state=None, + return_n_iter=False, + return_intercept=False, + check_input=True, +): + """Solve the ridge equation by the method of normal equations. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix, LinearOperator} of shape \ + (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values. + + alpha : float or array-like of shape (n_targets,) + Constant that multiplies the L2 term, controlling regularization + strength. `alpha` must be a non-negative float i.e. in `[0, inf)`. + + When `alpha = 0`, the objective is equivalent to ordinary least + squares, solved by the :class:`LinearRegression` object. For numerical + reasons, using `alpha = 0` with the `Ridge` object is not advised. + Instead, you should use the :class:`LinearRegression` object. + + If an array is passed, penalties are assumed to be specific to the + targets. Hence they must correspond in number. + + sample_weight : float or array-like of shape (n_samples,), default=None + Individual weights for each sample. If given a float, every sample + will have the same weight. If sample_weight is not None and + solver='auto', the solver will be set to 'cholesky'. + + .. versionadded:: 0.17 + + solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', \ + 'sag', 'saga', 'lbfgs'}, default='auto' + Solver to use in the computational routines: + + - 'auto' chooses the solver automatically based on the type of data. + + - 'svd' uses a Singular Value Decomposition of X to compute the Ridge + coefficients. It is the most stable solver, in particular more stable + for singular matrices than 'cholesky' at the cost of being slower. + + - 'cholesky' uses the standard scipy.linalg.solve function to + obtain a closed-form solution via a Cholesky decomposition of + dot(X.T, X) + + - 'sparse_cg' uses the conjugate gradient solver as found in + scipy.sparse.linalg.cg. As an iterative algorithm, this solver is + more appropriate than 'cholesky' for large-scale data + (possibility to set `tol` and `max_iter`). + + - 'lsqr' uses the dedicated regularized least-squares routine + scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative + procedure. + + - 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses + its improved, unbiased version named SAGA. Both methods also use an + iterative procedure, and are often faster than other solvers when + both n_samples and n_features are large. Note that 'sag' and + 'saga' fast convergence is only guaranteed on features with + approximately the same scale. You can preprocess the data with a + scaler from sklearn.preprocessing. + + - 'lbfgs' uses L-BFGS-B algorithm implemented in + `scipy.optimize.minimize`. It can be used only when `positive` + is True. + + All solvers except 'svd' support both dense and sparse data. However, only + 'lsqr', 'sag', 'sparse_cg', and 'lbfgs' support sparse input when + `fit_intercept` is True. + + .. versionadded:: 0.17 + Stochastic Average Gradient descent solver. + .. versionadded:: 0.19 + SAGA solver. + + max_iter : int, default=None + Maximum number of iterations for conjugate gradient solver. + For the 'sparse_cg' and 'lsqr' solvers, the default value is determined + by scipy.sparse.linalg. For 'sag' and saga solver, the default value is + 1000. For 'lbfgs' solver, the default value is 15000. + + tol : float, default=1e-4 + Precision of the solution. Note that `tol` has no effect for solvers 'svd' and + 'cholesky'. + + .. versionchanged:: 1.2 + Default value changed from 1e-3 to 1e-4 for consistency with other linear + models. + + verbose : int, default=0 + Verbosity level. Setting verbose > 0 will display additional + information depending on the solver used. + + positive : bool, default=False + When set to ``True``, forces the coefficients to be positive. + Only 'lbfgs' solver is supported in this case. + + random_state : int, RandomState instance, default=None + Used when ``solver`` == 'sag' or 'saga' to shuffle the data. + See :term:`Glossary ` for details. + + return_n_iter : bool, default=False + If True, the method also returns `n_iter`, the actual number of + iteration performed by the solver. + + .. versionadded:: 0.17 + + return_intercept : bool, default=False + If True and if X is sparse, the method also returns the intercept, + and the solver is automatically changed to 'sag'. This is only a + temporary fix for fitting the intercept with sparse data. For dense + data, use sklearn.linear_model._preprocess_data before your regression. + + .. versionadded:: 0.17 + + check_input : bool, default=True + If False, the input arrays X and y will not be checked. + + .. versionadded:: 0.21 + + Returns + ------- + coef : ndarray of shape (n_features,) or (n_targets, n_features) + Weight vector(s). + + n_iter : int, optional + The actual number of iteration performed by the solver. + Only returned if `return_n_iter` is True. + + intercept : float or ndarray of shape (n_targets,) + The intercept of the model. Only returned if `return_intercept` + is True and if X is a scipy sparse array. + + Notes + ----- + This function won't compute the intercept. + + Regularization improves the conditioning of the problem and + reduces the variance of the estimates. Larger values specify stronger + regularization. Alpha corresponds to ``1 / (2C)`` in other linear + models such as :class:`~sklearn.linear_model.LogisticRegression` or + :class:`~sklearn.svm.LinearSVC`. If an array is passed, penalties are + assumed to be specific to the targets. Hence they must correspond in + number. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.datasets import make_regression + >>> from sklearn.linear_model import ridge_regression + >>> rng = np.random.RandomState(0) + >>> X = rng.randn(100, 4) + >>> y = 2.0 * X[:, 0] - 1.0 * X[:, 1] + 0.1 * rng.standard_normal(100) + >>> coef, intercept = ridge_regression(X, y, alpha=1.0, return_intercept=True) + >>> list(coef) + [np.float64(1.9...), np.float64(-1.0...), np.float64(-0.0...), np.float64(-0.0...)] + >>> intercept + np.float64(-0.0...) + """ + return _ridge_regression( + X, + y, + alpha, + sample_weight=sample_weight, + solver=solver, + max_iter=max_iter, + tol=tol, + verbose=verbose, + positive=positive, + random_state=random_state, + return_n_iter=return_n_iter, + return_intercept=return_intercept, + X_scale=None, + X_offset=None, + check_input=check_input, + ) + + +def _ridge_regression( + X, + y, + alpha, + sample_weight=None, + solver="auto", + max_iter=None, + tol=1e-4, + verbose=0, + positive=False, + random_state=None, + return_n_iter=False, + return_intercept=False, + return_solver=False, + X_scale=None, + X_offset=None, + check_input=True, + fit_intercept=False, +): + xp, is_array_api_compliant, device_ = get_namespace_and_device( + X, y, sample_weight, X_scale, X_offset + ) + is_numpy_namespace = _is_numpy_namespace(xp) + X_is_sparse = sparse.issparse(X) + + has_sw = sample_weight is not None + + solver = resolve_solver(solver, positive, return_intercept, X_is_sparse, xp) + + if is_numpy_namespace and not X_is_sparse: + X = np.asarray(X) + + if not is_numpy_namespace and solver != "svd": + raise ValueError( + f"Array API dispatch to namespace {xp.__name__} only supports " + f"solver 'svd'. Got '{solver}'." + ) + + if positive and solver != "lbfgs": + raise ValueError( + "When positive=True, only 'lbfgs' solver can be used. " + f"Please change solver {solver} to 'lbfgs' " + "or set positive=False." + ) + + if solver == "lbfgs" and not positive: + raise ValueError( + "'lbfgs' solver can be used only when positive=True. " + "Please use another solver." + ) + + if return_intercept and solver != "sag": + raise ValueError( + "In Ridge, only 'sag' solver can directly fit the " + "intercept. Please change solver to 'sag' or set " + "return_intercept=False." + ) + + if check_input: + _dtype = [xp.float64, xp.float32] + _accept_sparse = _get_valid_accept_sparse(X_is_sparse, solver) + X = check_array(X, accept_sparse=_accept_sparse, dtype=_dtype, order="C") + y = check_array(y, dtype=X.dtype, ensure_2d=False, order=None) + check_consistent_length(X, y) + + n_samples, n_features = X.shape + + if y.ndim > 2: + raise ValueError("Target y has the wrong shape %s" % str(y.shape)) + + if y.ndim == 1: + y = xp.reshape(y, (-1, 1)) + + n_samples_, n_targets = y.shape + + if n_samples != n_samples_: + raise ValueError( + "Number of samples in X and y does not correspond: %d != %d" + % (n_samples, n_samples_) + ) + + if has_sw: + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + + if solver not in ["sag", "saga"]: + # SAG supports sample_weight directly. For other solvers, + # we implement sample_weight via a simple rescaling. + X, y, sample_weight_sqrt = _rescale_data(X, y, sample_weight) + + # Some callers of this method might pass alpha as single + # element array which already has been validated. + if alpha is not None and not isinstance(alpha, type(xp.asarray([0.0]))): + alpha = check_scalar( + alpha, + "alpha", + target_type=numbers.Real, + min_val=0.0, + include_boundaries="left", + ) + + # There should be either 1 or n_targets penalties + alpha = _ravel(xp.asarray(alpha, device=device_, dtype=X.dtype), xp=xp) + if alpha.shape[0] not in [1, n_targets]: + raise ValueError( + "Number of targets and number of penalties do not correspond: %d != %d" + % (alpha.shape[0], n_targets) + ) + + if alpha.shape[0] == 1 and n_targets > 1: + alpha = xp.full( + shape=(n_targets,), fill_value=alpha[0], dtype=alpha.dtype, device=device_ + ) + + n_iter = None + if solver == "sparse_cg": + coef = _solve_sparse_cg( + X, + y, + alpha, + max_iter=max_iter, + tol=tol, + verbose=verbose, + X_offset=X_offset, + X_scale=X_scale, + sample_weight_sqrt=sample_weight_sqrt if has_sw else None, + ) + + elif solver == "lsqr": + coef, n_iter = _solve_lsqr( + X, + y, + alpha=alpha, + fit_intercept=fit_intercept, + max_iter=max_iter, + tol=tol, + X_offset=X_offset, + X_scale=X_scale, + sample_weight_sqrt=sample_weight_sqrt if has_sw else None, + ) + + elif solver == "cholesky": + if n_features > n_samples: + K = safe_sparse_dot(X, X.T, dense_output=True) + try: + dual_coef = _solve_cholesky_kernel(K, y, alpha) + + coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T + except linalg.LinAlgError: + # use SVD solver if matrix is singular + solver = "svd" + else: + try: + coef = _solve_cholesky(X, y, alpha) + except linalg.LinAlgError: + # use SVD solver if matrix is singular + solver = "svd" + + elif solver in ["sag", "saga"]: + # precompute max_squared_sum for all targets + max_squared_sum = row_norms(X, squared=True).max() + + coef = np.empty((y.shape[1], n_features), dtype=X.dtype) + n_iter = np.empty(y.shape[1], dtype=np.int32) + intercept = np.zeros((y.shape[1],), dtype=X.dtype) + for i, (alpha_i, target) in enumerate(zip(alpha, y.T)): + init = { + "coef": np.zeros((n_features + int(return_intercept), 1), dtype=X.dtype) + } + coef_, n_iter_, _ = sag_solver( + X, + target.ravel(), + sample_weight, + "squared", + alpha_i, + 0, + max_iter, + tol, + verbose, + random_state, + False, + max_squared_sum, + init, + is_saga=solver == "saga", + ) + if return_intercept: + coef[i] = coef_[:-1] + intercept[i] = coef_[-1] + else: + coef[i] = coef_ + n_iter[i] = n_iter_ + + if intercept.shape[0] == 1: + intercept = intercept[0] + + elif solver == "lbfgs": + coef = _solve_lbfgs( + X, + y, + alpha, + positive=positive, + tol=tol, + max_iter=max_iter, + X_offset=X_offset, + X_scale=X_scale, + sample_weight_sqrt=sample_weight_sqrt if has_sw else None, + ) + + if solver == "svd": + if X_is_sparse: + raise TypeError("SVD solver does not support sparse inputs currently") + coef = _solve_svd(X, y, alpha, xp) + + if n_targets == 1: + coef = _ravel(coef) + + coef = xp.asarray(coef) + + if return_n_iter and return_intercept: + res = coef, n_iter, intercept + elif return_intercept: + res = coef, intercept + elif return_n_iter: + res = coef, n_iter + else: + res = coef + + return (*res, solver) if return_solver else res + + +def resolve_solver(solver, positive, return_intercept, is_sparse, xp): + if solver != "auto": + return solver + + is_numpy_namespace = _is_numpy_namespace(xp) + + auto_solver_np = resolve_solver_for_numpy(positive, return_intercept, is_sparse) + if is_numpy_namespace: + return auto_solver_np + + if positive: + raise ValueError( + "The solvers that support positive fitting do not support " + f"Array API dispatch to namespace {xp.__name__}. Please " + "either disable Array API dispatch, or use a numpy-like " + "namespace, or set `positive=False`." + ) + + # At the moment, Array API dispatch only supports the "svd" solver. + solver = "svd" + if solver != auto_solver_np: + warnings.warn( + f"Using Array API dispatch to namespace {xp.__name__} with " + f"`solver='auto'` will result in using the solver '{solver}'. " + "The results may differ from those when using a Numpy array, " + f"because in that case the preferred solver would be {auto_solver_np}. " + f"Set `solver='{solver}'` to suppress this warning." + ) + + return solver + + +def resolve_solver_for_numpy(positive, return_intercept, is_sparse): + if positive: + return "lbfgs" + + if return_intercept: + # sag supports fitting intercept directly + return "sag" + + if not is_sparse: + return "cholesky" + + return "sparse_cg" + + +class _BaseRidge(LinearModel, metaclass=ABCMeta): + _parameter_constraints: dict = { + "alpha": [Interval(Real, 0, None, closed="left"), np.ndarray], + "fit_intercept": ["boolean"], + "copy_X": ["boolean"], + "max_iter": [Interval(Integral, 1, None, closed="left"), None], + "tol": [Interval(Real, 0, None, closed="left")], + "solver": [ + StrOptions( + {"auto", "svd", "cholesky", "lsqr", "sparse_cg", "sag", "saga", "lbfgs"} + ) + ], + "positive": ["boolean"], + "random_state": ["random_state"], + } + + @abstractmethod + def __init__( + self, + alpha=1.0, + *, + fit_intercept=True, + copy_X=True, + max_iter=None, + tol=1e-4, + solver="auto", + positive=False, + random_state=None, + ): + self.alpha = alpha + self.fit_intercept = fit_intercept + self.copy_X = copy_X + self.max_iter = max_iter + self.tol = tol + self.solver = solver + self.positive = positive + self.random_state = random_state + + def fit(self, X, y, sample_weight=None): + xp, is_array_api_compliant = get_namespace(X, y, sample_weight) + + if self.solver == "lbfgs" and not self.positive: + raise ValueError( + "'lbfgs' solver can be used only when positive=True. " + "Please use another solver." + ) + + if self.positive: + if self.solver not in ["auto", "lbfgs"]: + raise ValueError( + f"solver='{self.solver}' does not support positive fitting. Please" + " set the solver to 'auto' or 'lbfgs', or set `positive=False`" + ) + else: + solver = self.solver + elif sparse.issparse(X) and self.fit_intercept: + if self.solver not in ["auto", "lbfgs", "lsqr", "sag", "sparse_cg"]: + raise ValueError( + "solver='{}' does not support fitting the intercept " + "on sparse data. Please set the solver to 'auto' or " + "'lsqr', 'sparse_cg', 'sag', 'lbfgs' " + "or set `fit_intercept=False`".format(self.solver) + ) + if self.solver in ["lsqr", "lbfgs"]: + solver = self.solver + elif self.solver == "sag" and self.max_iter is None and self.tol > 1e-4: + warnings.warn( + '"sag" solver requires many iterations to fit ' + "an intercept with sparse inputs. Either set the " + 'solver to "auto" or "sparse_cg", or set a low ' + '"tol" and a high "max_iter" (especially if inputs are ' + "not standardized)." + ) + solver = "sag" + else: + solver = "sparse_cg" + else: + solver = self.solver + + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + + # when X is sparse we only remove offset from y + X, y, X_offset, y_offset, X_scale = _preprocess_data( + X, + y, + fit_intercept=self.fit_intercept, + copy=self.copy_X, + sample_weight=sample_weight, + ) + + if solver == "sag" and sparse.issparse(X) and self.fit_intercept: + self.coef_, self.n_iter_, self.intercept_, self.solver_ = _ridge_regression( + X, + y, + alpha=self.alpha, + sample_weight=sample_weight, + max_iter=self.max_iter, + tol=self.tol, + solver="sag", + positive=self.positive, + random_state=self.random_state, + return_n_iter=True, + return_intercept=True, + return_solver=True, + check_input=False, + ) + # add the offset which was subtracted by _preprocess_data + self.intercept_ += y_offset + + else: + if sparse.issparse(X) and self.fit_intercept: + # required to fit intercept with sparse_cg and lbfgs solver + params = {"X_offset": X_offset, "X_scale": X_scale} + else: + # for dense matrices or when intercept is set to 0 + params = {} + + self.coef_, self.n_iter_, self.solver_ = _ridge_regression( + X, + y, + alpha=self.alpha, + sample_weight=sample_weight, + max_iter=self.max_iter, + tol=self.tol, + solver=solver, + positive=self.positive, + random_state=self.random_state, + return_n_iter=True, + return_intercept=False, + return_solver=True, + check_input=False, + fit_intercept=self.fit_intercept, + **params, + ) + self._set_intercept(X_offset, y_offset, X_scale) + + return self + + +class Ridge(MultiOutputMixin, RegressorMixin, _BaseRidge): + """Linear least squares with l2 regularization. + + Minimizes the objective function:: + + ||y - Xw||^2_2 + alpha * ||w||^2_2 + + This model solves a regression model where the loss function is + the linear least squares function and regularization is given by + the l2-norm. Also known as Ridge Regression or Tikhonov regularization. + This estimator has built-in support for multi-variate regression + (i.e., when y is a 2d-array of shape (n_samples, n_targets)). + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + alpha : {float, ndarray of shape (n_targets,)}, default=1.0 + Constant that multiplies the L2 term, controlling regularization + strength. `alpha` must be a non-negative float i.e. in `[0, inf)`. + + When `alpha = 0`, the objective is equivalent to ordinary least + squares, solved by the :class:`LinearRegression` object. For numerical + reasons, using `alpha = 0` with the `Ridge` object is not advised. + Instead, you should use the :class:`LinearRegression` object. + + If an array is passed, penalties are assumed to be specific to the + targets. Hence they must correspond in number. + + fit_intercept : bool, default=True + Whether to fit the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. ``X`` and ``y`` are expected to be centered). + + copy_X : bool, default=True + If True, X will be copied; else, it may be overwritten. + + max_iter : int, default=None + Maximum number of iterations for conjugate gradient solver. + For 'sparse_cg' and 'lsqr' solvers, the default value is determined + by scipy.sparse.linalg. For 'sag' solver, the default value is 1000. + For 'lbfgs' solver, the default value is 15000. + + tol : float, default=1e-4 + The precision of the solution (`coef_`) is determined by `tol` which + specifies a different convergence criterion for each solver: + + - 'svd': `tol` has no impact. + + - 'cholesky': `tol` has no impact. + + - 'sparse_cg': norm of residuals smaller than `tol`. + + - 'lsqr': `tol` is set as atol and btol of scipy.sparse.linalg.lsqr, + which control the norm of the residual vector in terms of the norms of + matrix and coefficients. + + - 'sag' and 'saga': relative change of coef smaller than `tol`. + + - 'lbfgs': maximum of the absolute (projected) gradient=max|residuals| + smaller than `tol`. + + .. versionchanged:: 1.2 + Default value changed from 1e-3 to 1e-4 for consistency with other linear + models. + + solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', \ + 'sag', 'saga', 'lbfgs'}, default='auto' + Solver to use in the computational routines: + + - 'auto' chooses the solver automatically based on the type of data. + + - 'svd' uses a Singular Value Decomposition of X to compute the Ridge + coefficients. It is the most stable solver, in particular more stable + for singular matrices than 'cholesky' at the cost of being slower. + + - 'cholesky' uses the standard scipy.linalg.solve function to + obtain a closed-form solution. + + - 'sparse_cg' uses the conjugate gradient solver as found in + scipy.sparse.linalg.cg. As an iterative algorithm, this solver is + more appropriate than 'cholesky' for large-scale data + (possibility to set `tol` and `max_iter`). + + - 'lsqr' uses the dedicated regularized least-squares routine + scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative + procedure. + + - 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses + its improved, unbiased version named SAGA. Both methods also use an + iterative procedure, and are often faster than other solvers when + both n_samples and n_features are large. Note that 'sag' and + 'saga' fast convergence is only guaranteed on features with + approximately the same scale. You can preprocess the data with a + scaler from sklearn.preprocessing. + + - 'lbfgs' uses L-BFGS-B algorithm implemented in + `scipy.optimize.minimize`. It can be used only when `positive` + is True. + + All solvers except 'svd' support both dense and sparse data. However, only + 'lsqr', 'sag', 'sparse_cg', and 'lbfgs' support sparse input when + `fit_intercept` is True. + + .. versionadded:: 0.17 + Stochastic Average Gradient descent solver. + .. versionadded:: 0.19 + SAGA solver. + + positive : bool, default=False + When set to ``True``, forces the coefficients to be positive. + Only 'lbfgs' solver is supported in this case. + + random_state : int, RandomState instance, default=None + Used when ``solver`` == 'sag' or 'saga' to shuffle the data. + See :term:`Glossary ` for details. + + .. versionadded:: 0.17 + `random_state` to support Stochastic Average Gradient. + + Attributes + ---------- + coef_ : ndarray of shape (n_features,) or (n_targets, n_features) + Weight vector(s). + + intercept_ : float or ndarray of shape (n_targets,) + Independent term in decision function. Set to 0.0 if + ``fit_intercept = False``. + + n_iter_ : None or ndarray of shape (n_targets,) + Actual number of iterations for each target. Available only for + sag and lsqr solvers. Other solvers will return None. + + .. versionadded:: 0.17 + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + solver_ : str + The solver that was used at fit time by the computational + routines. + + .. versionadded:: 1.5 + + See Also + -------- + RidgeClassifier : Ridge classifier. + RidgeCV : Ridge regression with built-in cross validation. + :class:`~sklearn.kernel_ridge.KernelRidge` : Kernel ridge regression + combines ridge regression with the kernel trick. + + Notes + ----- + Regularization improves the conditioning of the problem and + reduces the variance of the estimates. Larger values specify stronger + regularization. Alpha corresponds to ``1 / (2C)`` in other linear + models such as :class:`~sklearn.linear_model.LogisticRegression` or + :class:`~sklearn.svm.LinearSVC`. + + Examples + -------- + >>> from sklearn.linear_model import Ridge + >>> import numpy as np + >>> n_samples, n_features = 10, 5 + >>> rng = np.random.RandomState(0) + >>> y = rng.randn(n_samples) + >>> X = rng.randn(n_samples, n_features) + >>> clf = Ridge(alpha=1.0) + >>> clf.fit(X, y) + Ridge() + """ + + def __init__( + self, + alpha=1.0, + *, + fit_intercept=True, + copy_X=True, + max_iter=None, + tol=1e-4, + solver="auto", + positive=False, + random_state=None, + ): + super().__init__( + alpha=alpha, + fit_intercept=fit_intercept, + copy_X=copy_X, + max_iter=max_iter, + tol=tol, + solver=solver, + positive=positive, + random_state=random_state, + ) + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """Fit Ridge regression model. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + Training data. + + y : ndarray of shape (n_samples,) or (n_samples, n_targets) + Target values. + + sample_weight : float or ndarray of shape (n_samples,), default=None + Individual weights for each sample. If given a float, every sample + will have the same weight. + + Returns + ------- + self : object + Fitted estimator. + """ + _accept_sparse = _get_valid_accept_sparse(sparse.issparse(X), self.solver) + xp, _ = get_namespace(X, y, sample_weight) + X, y = validate_data( + self, + X, + y, + accept_sparse=_accept_sparse, + dtype=[xp.float64, xp.float32], + force_writeable=True, + multi_output=True, + y_numeric=True, + ) + return super().fit(X, y, sample_weight=sample_weight) + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.array_api_support = True + tags.input_tags.sparse = (self.solver != "svd") and ( + self.solver != "cholesky" or not self.fit_intercept + ) + return tags + + +class _RidgeClassifierMixin(LinearClassifierMixin): + def _prepare_data(self, X, y, sample_weight, solver): + """Validate `X` and `y` and binarize `y`. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + Training data. + + y : ndarray of shape (n_samples,) + Target values. + + sample_weight : float or ndarray of shape (n_samples,), default=None + Individual weights for each sample. If given a float, every sample + will have the same weight. + + solver : str + The solver used in `Ridge` to know which sparse format to support. + + Returns + ------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + Validated training data. + + y : ndarray of shape (n_samples,) + Validated target values. + + sample_weight : ndarray of shape (n_samples,) + Validated sample weights. + + Y : ndarray of shape (n_samples, n_classes) + The binarized version of `y`. + """ + accept_sparse = _get_valid_accept_sparse(sparse.issparse(X), solver) + X, y = validate_data( + self, + X, + y, + accept_sparse=accept_sparse, + multi_output=True, + y_numeric=False, + force_writeable=True, + ) + + self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1) + Y = self._label_binarizer.fit_transform(y) + if not self._label_binarizer.y_type_.startswith("multilabel"): + y = column_or_1d(y, warn=True) + + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + if self.class_weight: + sample_weight = sample_weight * compute_sample_weight(self.class_weight, y) + return X, y, sample_weight, Y + + def predict(self, X): + """Predict class labels for samples in `X`. + + Parameters + ---------- + X : {array-like, spare matrix} of shape (n_samples, n_features) + The data matrix for which we want to predict the targets. + + Returns + ------- + y_pred : ndarray of shape (n_samples,) or (n_samples, n_outputs) + Vector or matrix containing the predictions. In binary and + multiclass problems, this is a vector containing `n_samples`. In + a multilabel problem, it returns a matrix of shape + `(n_samples, n_outputs)`. + """ + check_is_fitted(self, attributes=["_label_binarizer"]) + if self._label_binarizer.y_type_.startswith("multilabel"): + # Threshold such that the negative label is -1 and positive label + # is 1 to use the inverse transform of the label binarizer fitted + # during fit. + scores = 2 * (self.decision_function(X) > 0) - 1 + return self._label_binarizer.inverse_transform(scores) + return super().predict(X) + + @property + def classes_(self): + """Classes labels.""" + return self._label_binarizer.classes_ + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.classifier_tags.multi_label = True + return tags + + +class RidgeClassifier(_RidgeClassifierMixin, _BaseRidge): + """Classifier using Ridge regression. + + This classifier first converts the target values into ``{-1, 1}`` and + then treats the problem as a regression task (multi-output regression in + the multiclass case). + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + alpha : float, default=1.0 + Regularization strength; must be a positive float. Regularization + improves the conditioning of the problem and reduces the variance of + the estimates. Larger values specify stronger regularization. + Alpha corresponds to ``1 / (2C)`` in other linear models such as + :class:`~sklearn.linear_model.LogisticRegression` or + :class:`~sklearn.svm.LinearSVC`. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set to false, no + intercept will be used in calculations (e.g. data is expected to be + already centered). + + copy_X : bool, default=True + If True, X will be copied; else, it may be overwritten. + + max_iter : int, default=None + Maximum number of iterations for conjugate gradient solver. + The default value is determined by scipy.sparse.linalg. + + tol : float, default=1e-4 + The precision of the solution (`coef_`) is determined by `tol` which + specifies a different convergence criterion for each solver: + + - 'svd': `tol` has no impact. + + - 'cholesky': `tol` has no impact. + + - 'sparse_cg': norm of residuals smaller than `tol`. + + - 'lsqr': `tol` is set as atol and btol of scipy.sparse.linalg.lsqr, + which control the norm of the residual vector in terms of the norms of + matrix and coefficients. + + - 'sag' and 'saga': relative change of coef smaller than `tol`. + + - 'lbfgs': maximum of the absolute (projected) gradient=max|residuals| + smaller than `tol`. + + .. versionchanged:: 1.2 + Default value changed from 1e-3 to 1e-4 for consistency with other linear + models. + + class_weight : dict or 'balanced', default=None + Weights associated with classes in the form ``{class_label: weight}``. + If not given, all classes are supposed to have weight one. + + The "balanced" mode uses the values of y to automatically adjust + weights inversely proportional to class frequencies in the input data + as ``n_samples / (n_classes * np.bincount(y))``. + + solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', \ + 'sag', 'saga', 'lbfgs'}, default='auto' + Solver to use in the computational routines: + + - 'auto' chooses the solver automatically based on the type of data. + + - 'svd' uses a Singular Value Decomposition of X to compute the Ridge + coefficients. It is the most stable solver, in particular more stable + for singular matrices than 'cholesky' at the cost of being slower. + + - 'cholesky' uses the standard scipy.linalg.solve function to + obtain a closed-form solution. + + - 'sparse_cg' uses the conjugate gradient solver as found in + scipy.sparse.linalg.cg. As an iterative algorithm, this solver is + more appropriate than 'cholesky' for large-scale data + (possibility to set `tol` and `max_iter`). + + - 'lsqr' uses the dedicated regularized least-squares routine + scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative + procedure. + + - 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses + its unbiased and more flexible version named SAGA. Both methods + use an iterative procedure, and are often faster than other solvers + when both n_samples and n_features are large. Note that 'sag' and + 'saga' fast convergence is only guaranteed on features with + approximately the same scale. You can preprocess the data with a + scaler from sklearn.preprocessing. + + .. versionadded:: 0.17 + Stochastic Average Gradient descent solver. + .. versionadded:: 0.19 + SAGA solver. + + - 'lbfgs' uses L-BFGS-B algorithm implemented in + `scipy.optimize.minimize`. It can be used only when `positive` + is True. + + positive : bool, default=False + When set to ``True``, forces the coefficients to be positive. + Only 'lbfgs' solver is supported in this case. + + random_state : int, RandomState instance, default=None + Used when ``solver`` == 'sag' or 'saga' to shuffle the data. + See :term:`Glossary ` for details. + + Attributes + ---------- + coef_ : ndarray of shape (1, n_features) or (n_classes, n_features) + Coefficient of the features in the decision function. + + ``coef_`` is of shape (1, n_features) when the given problem is binary. + + intercept_ : float or ndarray of shape (n_targets,) + Independent term in decision function. Set to 0.0 if + ``fit_intercept = False``. + + n_iter_ : None or ndarray of shape (n_targets,) + Actual number of iterations for each target. Available only for + sag and lsqr solvers. Other solvers will return None. + + classes_ : ndarray of shape (n_classes,) + The classes labels. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + solver_ : str + The solver that was used at fit time by the computational + routines. + + .. versionadded:: 1.5 + + See Also + -------- + Ridge : Ridge regression. + RidgeClassifierCV : Ridge classifier with built-in cross validation. + + Notes + ----- + For multi-class classification, n_class classifiers are trained in + a one-versus-all approach. Concretely, this is implemented by taking + advantage of the multi-variate response support in Ridge. + + Examples + -------- + >>> from sklearn.datasets import load_breast_cancer + >>> from sklearn.linear_model import RidgeClassifier + >>> X, y = load_breast_cancer(return_X_y=True) + >>> clf = RidgeClassifier().fit(X, y) + >>> clf.score(X, y) + 0.9595... + """ + + _parameter_constraints: dict = { + **_BaseRidge._parameter_constraints, + "class_weight": [dict, StrOptions({"balanced"}), None], + } + + def __init__( + self, + alpha=1.0, + *, + fit_intercept=True, + copy_X=True, + max_iter=None, + tol=1e-4, + class_weight=None, + solver="auto", + positive=False, + random_state=None, + ): + super().__init__( + alpha=alpha, + fit_intercept=fit_intercept, + copy_X=copy_X, + max_iter=max_iter, + tol=tol, + solver=solver, + positive=positive, + random_state=random_state, + ) + self.class_weight = class_weight + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """Fit Ridge classifier model. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + Training data. + + y : ndarray of shape (n_samples,) + Target values. + + sample_weight : float or ndarray of shape (n_samples,), default=None + Individual weights for each sample. If given a float, every sample + will have the same weight. + + .. versionadded:: 0.17 + *sample_weight* support to RidgeClassifier. + + Returns + ------- + self : object + Instance of the estimator. + """ + X, y, sample_weight, Y = self._prepare_data(X, y, sample_weight, self.solver) + + super().fit(X, Y, sample_weight=sample_weight) + return self + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.input_tags.sparse = (self.solver != "svd") and ( + self.solver != "cholesky" or not self.fit_intercept + ) + return tags + + +def _check_gcv_mode(X, gcv_mode): + if gcv_mode in ["eigen", "svd"]: + return gcv_mode + # if X has more rows than columns, use decomposition of X^T.X, + # otherwise X.X^T + if X.shape[0] > X.shape[1]: + return "svd" + return "eigen" + + +def _find_smallest_angle(query, vectors): + """Find the column of vectors that is most aligned with the query. + + Both query and the columns of vectors must have their l2 norm equal to 1. + + Parameters + ---------- + query : ndarray of shape (n_samples,) + Normalized query vector. + + vectors : ndarray of shape (n_samples, n_features) + Vectors to which we compare query, as columns. Must be normalized. + """ + abs_cosine = np.abs(query.dot(vectors)) + index = np.argmax(abs_cosine) + return index + + +class _X_CenterStackOp(sparse.linalg.LinearOperator): + """Behaves as centered and scaled X with an added intercept column. + + This operator behaves as + np.hstack([X - sqrt_sw[:, None] * X_mean, sqrt_sw[:, None]]) + """ + + def __init__(self, X, X_mean, sqrt_sw): + n_samples, n_features = X.shape + super().__init__(X.dtype, (n_samples, n_features + 1)) + self.X = X + self.X_mean = X_mean + self.sqrt_sw = sqrt_sw + + def _matvec(self, v): + v = v.ravel() + return ( + safe_sparse_dot(self.X, v[:-1], dense_output=True) + - self.sqrt_sw * self.X_mean.dot(v[:-1]) + + v[-1] * self.sqrt_sw + ) + + def _matmat(self, v): + return ( + safe_sparse_dot(self.X, v[:-1], dense_output=True) + - self.sqrt_sw[:, None] * self.X_mean.dot(v[:-1]) + + v[-1] * self.sqrt_sw[:, None] + ) + + def _transpose(self): + return _XT_CenterStackOp(self.X, self.X_mean, self.sqrt_sw) + + +class _XT_CenterStackOp(sparse.linalg.LinearOperator): + """Behaves as transposed centered and scaled X with an intercept column. + + This operator behaves as + np.hstack([X - sqrt_sw[:, None] * X_mean, sqrt_sw[:, None]]).T + """ + + def __init__(self, X, X_mean, sqrt_sw): + n_samples, n_features = X.shape + super().__init__(X.dtype, (n_features + 1, n_samples)) + self.X = X + self.X_mean = X_mean + self.sqrt_sw = sqrt_sw + + def _matvec(self, v): + v = v.ravel() + n_features = self.shape[0] + res = np.empty(n_features, dtype=self.X.dtype) + res[:-1] = safe_sparse_dot(self.X.T, v, dense_output=True) - ( + self.X_mean * self.sqrt_sw.dot(v) + ) + res[-1] = np.dot(v, self.sqrt_sw) + return res + + def _matmat(self, v): + n_features = self.shape[0] + res = np.empty((n_features, v.shape[1]), dtype=self.X.dtype) + res[:-1] = safe_sparse_dot(self.X.T, v, dense_output=True) - self.X_mean[ + :, None + ] * self.sqrt_sw.dot(v) + res[-1] = np.dot(self.sqrt_sw, v) + return res + + +class _IdentityRegressor(RegressorMixin, BaseEstimator): + """Fake regressor which will directly output the prediction.""" + + def decision_function(self, y_predict): + return y_predict + + def predict(self, y_predict): + return y_predict + + +class _IdentityClassifier(LinearClassifierMixin, BaseEstimator): + """Fake classifier which will directly output the prediction. + + We inherit from LinearClassifierMixin to get the proper shape for the + output `y`. + """ + + def __init__(self, classes): + self.classes_ = classes + + def decision_function(self, y_predict): + return y_predict + + +class _RidgeGCV(LinearModel): + """Ridge regression with built-in Leave-one-out Cross-Validation. + + This class is not intended to be used directly. Use RidgeCV instead. + + `_RidgeGCV` uses a Generalized Cross-Validation for model selection. It's an + efficient approximation of leave-one-out cross-validation (LOO-CV), where instead of + computing multiple models by excluding one data point at a time, it uses an + algebraic shortcut to approximate the LOO-CV error, making it faster and + computationally more efficient. + + Using a naive grid-search approach with a leave-one-out cross-validation in contrast + requires to fit `n_samples` models to compute the prediction error for each sample + and then to repeat this process for each alpha in the grid. + + Here, the prediction error for each sample is computed by solving a **single** + linear system (in other words a single model) via a matrix factorization (i.e. + eigendecomposition or SVD) solving the problem stated in the Notes section. Finally, + we need to repeat this process for each alpha in the grid. The detailed complexity + is further discussed in Sect. 4 in [1]. + + This algebraic approach is only applicable for regularized least squares + problems. It could potentially be extended to kernel ridge regression. + + See the Notes section and references for more details regarding the formulation + and the linear system that is solved. + + Notes + ----- + + We want to solve (K + alpha*Id)c = y, + where K = X X^T is the kernel matrix. + + Let G = (K + alpha*Id). + + Dual solution: c = G^-1y + Primal solution: w = X^T c + + Compute eigendecomposition K = Q V Q^T. + Then G^-1 = Q (V + alpha*Id)^-1 Q^T, + where (V + alpha*Id) is diagonal. + It is thus inexpensive to inverse for many alphas. + + Let loov be the vector of prediction values for each example + when the model was fitted with all examples but this example. + + loov = (KG^-1Y - diag(KG^-1)Y) / diag(I-KG^-1) + + Let looe be the vector of prediction errors for each example + when the model was fitted with all examples but this example. + + looe = y - loov = c / diag(G^-1) + + The best score (negative mean squared error or user-provided scoring) is + stored in the `best_score_` attribute, and the selected hyperparameter in + `alpha_`. + + References + ---------- + [1] http://cbcl.mit.edu/publications/ps/MIT-CSAIL-TR-2007-025.pdf + [2] https://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf + """ + + def __init__( + self, + alphas=(0.1, 1.0, 10.0), + *, + fit_intercept=True, + scoring=None, + copy_X=True, + gcv_mode=None, + store_cv_results=False, + is_clf=False, + alpha_per_target=False, + ): + self.alphas = alphas + self.fit_intercept = fit_intercept + self.scoring = scoring + self.copy_X = copy_X + self.gcv_mode = gcv_mode + self.store_cv_results = store_cv_results + self.is_clf = is_clf + self.alpha_per_target = alpha_per_target + + @staticmethod + def _decomp_diag(v_prime, Q): + # compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T)) + return (v_prime * Q**2).sum(axis=-1) + + @staticmethod + def _diag_dot(D, B): + # compute dot(diag(D), B) + if len(B.shape) > 1: + # handle case where B is > 1-d + D = D[(slice(None),) + (np.newaxis,) * (len(B.shape) - 1)] + return D * B + + def _compute_gram(self, X, sqrt_sw): + """Computes the Gram matrix XX^T with possible centering. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + The preprocessed design matrix. + + sqrt_sw : ndarray of shape (n_samples,) + square roots of sample weights + + Returns + ------- + gram : ndarray of shape (n_samples, n_samples) + The Gram matrix. + X_mean : ndarray of shape (n_feature,) + The weighted mean of ``X`` for each feature. + + Notes + ----- + When X is dense the centering has been done in preprocessing + so the mean is 0 and we just compute XX^T. + + When X is sparse it has not been centered in preprocessing, but it has + been scaled by sqrt(sample weights). + + When self.fit_intercept is False no centering is done. + + The centered X is never actually computed because centering would break + the sparsity of X. + """ + center = self.fit_intercept and sparse.issparse(X) + if not center: + # in this case centering has been done in preprocessing + # or we are not fitting an intercept. + X_mean = np.zeros(X.shape[1], dtype=X.dtype) + return safe_sparse_dot(X, X.T, dense_output=True), X_mean + # X is sparse + n_samples = X.shape[0] + sample_weight_matrix = sparse.dia_matrix( + (sqrt_sw, 0), shape=(n_samples, n_samples) + ) + X_weighted = sample_weight_matrix.dot(X) + X_mean, _ = mean_variance_axis(X_weighted, axis=0) + X_mean *= n_samples / sqrt_sw.dot(sqrt_sw) + X_mX = sqrt_sw[:, None] * safe_sparse_dot(X_mean, X.T, dense_output=True) + X_mX_m = np.outer(sqrt_sw, sqrt_sw) * np.dot(X_mean, X_mean) + return ( + safe_sparse_dot(X, X.T, dense_output=True) + X_mX_m - X_mX - X_mX.T, + X_mean, + ) + + def _compute_covariance(self, X, sqrt_sw): + """Computes covariance matrix X^TX with possible centering. + + Parameters + ---------- + X : sparse matrix of shape (n_samples, n_features) + The preprocessed design matrix. + + sqrt_sw : ndarray of shape (n_samples,) + square roots of sample weights + + Returns + ------- + covariance : ndarray of shape (n_features, n_features) + The covariance matrix. + X_mean : ndarray of shape (n_feature,) + The weighted mean of ``X`` for each feature. + + Notes + ----- + Since X is sparse it has not been centered in preprocessing, but it has + been scaled by sqrt(sample weights). + + When self.fit_intercept is False no centering is done. + + The centered X is never actually computed because centering would break + the sparsity of X. + """ + if not self.fit_intercept: + # in this case centering has been done in preprocessing + # or we are not fitting an intercept. + X_mean = np.zeros(X.shape[1], dtype=X.dtype) + return safe_sparse_dot(X.T, X, dense_output=True), X_mean + # this function only gets called for sparse X + n_samples = X.shape[0] + sample_weight_matrix = sparse.dia_matrix( + (sqrt_sw, 0), shape=(n_samples, n_samples) + ) + X_weighted = sample_weight_matrix.dot(X) + X_mean, _ = mean_variance_axis(X_weighted, axis=0) + X_mean = X_mean * n_samples / sqrt_sw.dot(sqrt_sw) + weight_sum = sqrt_sw.dot(sqrt_sw) + return ( + safe_sparse_dot(X.T, X, dense_output=True) + - weight_sum * np.outer(X_mean, X_mean), + X_mean, + ) + + def _sparse_multidot_diag(self, X, A, X_mean, sqrt_sw): + """Compute the diagonal of (X - X_mean).dot(A).dot((X - X_mean).T) + without explicitly centering X nor computing X.dot(A) + when X is sparse. + + Parameters + ---------- + X : sparse matrix of shape (n_samples, n_features) + + A : ndarray of shape (n_features, n_features) + + X_mean : ndarray of shape (n_features,) + + sqrt_sw : ndarray of shape (n_features,) + square roots of sample weights + + Returns + ------- + diag : np.ndarray, shape (n_samples,) + The computed diagonal. + """ + intercept_col = scale = sqrt_sw + batch_size = X.shape[1] + diag = np.empty(X.shape[0], dtype=X.dtype) + for start in range(0, X.shape[0], batch_size): + batch = slice(start, min(X.shape[0], start + batch_size), 1) + X_batch = np.empty( + (X[batch].shape[0], X.shape[1] + self.fit_intercept), dtype=X.dtype + ) + if self.fit_intercept: + X_batch[:, :-1] = X[batch].toarray() - X_mean * scale[batch][:, None] + X_batch[:, -1] = intercept_col[batch] + else: + X_batch = X[batch].toarray() + diag[batch] = (X_batch.dot(A) * X_batch).sum(axis=1) + return diag + + def _eigen_decompose_gram(self, X, y, sqrt_sw): + """Eigendecomposition of X.X^T, used when n_samples <= n_features.""" + # if X is dense it has already been centered in preprocessing + K, X_mean = self._compute_gram(X, sqrt_sw) + if self.fit_intercept: + # to emulate centering X with sample weights, + # ie removing the weighted average, we add a column + # containing the square roots of the sample weights. + # by centering, it is orthogonal to the other columns + K += np.outer(sqrt_sw, sqrt_sw) + eigvals, Q = linalg.eigh(K) + QT_y = np.dot(Q.T, y) + return X_mean, eigvals, Q, QT_y + + def _solve_eigen_gram(self, alpha, y, sqrt_sw, X_mean, eigvals, Q, QT_y): + """Compute dual coefficients and diagonal of G^-1. + + Used when we have a decomposition of X.X^T (n_samples <= n_features). + """ + w = 1.0 / (eigvals + alpha) + if self.fit_intercept: + # the vector containing the square roots of the sample weights (1 + # when no sample weights) is the eigenvector of XX^T which + # corresponds to the intercept; we cancel the regularization on + # this dimension. the corresponding eigenvalue is + # sum(sample_weight). + normalized_sw = sqrt_sw / np.linalg.norm(sqrt_sw) + intercept_dim = _find_smallest_angle(normalized_sw, Q) + w[intercept_dim] = 0 # cancel regularization for the intercept + + c = np.dot(Q, self._diag_dot(w, QT_y)) + G_inverse_diag = self._decomp_diag(w, Q) + # handle case where y is 2-d + if len(y.shape) != 1: + G_inverse_diag = G_inverse_diag[:, np.newaxis] + return G_inverse_diag, c + + def _eigen_decompose_covariance(self, X, y, sqrt_sw): + """Eigendecomposition of X^T.X, used when n_samples > n_features + and X is sparse. + """ + n_samples, n_features = X.shape + cov = np.empty((n_features + 1, n_features + 1), dtype=X.dtype) + cov[:-1, :-1], X_mean = self._compute_covariance(X, sqrt_sw) + if not self.fit_intercept: + cov = cov[:-1, :-1] + # to emulate centering X with sample weights, + # ie removing the weighted average, we add a column + # containing the square roots of the sample weights. + # by centering, it is orthogonal to the other columns + # when all samples have the same weight we add a column of 1 + else: + cov[-1] = 0 + cov[:, -1] = 0 + cov[-1, -1] = sqrt_sw.dot(sqrt_sw) + nullspace_dim = max(0, n_features - n_samples) + eigvals, V = linalg.eigh(cov) + # remove eigenvalues and vectors in the null space of X^T.X + eigvals = eigvals[nullspace_dim:] + V = V[:, nullspace_dim:] + return X_mean, eigvals, V, X + + def _solve_eigen_covariance_no_intercept( + self, alpha, y, sqrt_sw, X_mean, eigvals, V, X + ): + """Compute dual coefficients and diagonal of G^-1. + + Used when we have a decomposition of X^T.X + (n_samples > n_features and X is sparse), and not fitting an intercept. + """ + w = 1 / (eigvals + alpha) + A = (V * w).dot(V.T) + AXy = A.dot(safe_sparse_dot(X.T, y, dense_output=True)) + y_hat = safe_sparse_dot(X, AXy, dense_output=True) + hat_diag = self._sparse_multidot_diag(X, A, X_mean, sqrt_sw) + if len(y.shape) != 1: + # handle case where y is 2-d + hat_diag = hat_diag[:, np.newaxis] + return (1 - hat_diag) / alpha, (y - y_hat) / alpha + + def _solve_eigen_covariance_intercept( + self, alpha, y, sqrt_sw, X_mean, eigvals, V, X + ): + """Compute dual coefficients and diagonal of G^-1. + + Used when we have a decomposition of X^T.X + (n_samples > n_features and X is sparse), + and we are fitting an intercept. + """ + # the vector [0, 0, ..., 0, 1] + # is the eigenvector of X^TX which + # corresponds to the intercept; we cancel the regularization on + # this dimension. the corresponding eigenvalue is + # sum(sample_weight), e.g. n when uniform sample weights. + intercept_sv = np.zeros(V.shape[0]) + intercept_sv[-1] = 1 + intercept_dim = _find_smallest_angle(intercept_sv, V) + w = 1 / (eigvals + alpha) + w[intercept_dim] = 1 / eigvals[intercept_dim] + A = (V * w).dot(V.T) + # add a column to X containing the square roots of sample weights + X_op = _X_CenterStackOp(X, X_mean, sqrt_sw) + AXy = A.dot(X_op.T.dot(y)) + y_hat = X_op.dot(AXy) + hat_diag = self._sparse_multidot_diag(X, A, X_mean, sqrt_sw) + # return (1 - hat_diag), (y - y_hat) + if len(y.shape) != 1: + # handle case where y is 2-d + hat_diag = hat_diag[:, np.newaxis] + return (1 - hat_diag) / alpha, (y - y_hat) / alpha + + def _solve_eigen_covariance(self, alpha, y, sqrt_sw, X_mean, eigvals, V, X): + """Compute dual coefficients and diagonal of G^-1. + + Used when we have a decomposition of X^T.X + (n_samples > n_features and X is sparse). + """ + if self.fit_intercept: + return self._solve_eigen_covariance_intercept( + alpha, y, sqrt_sw, X_mean, eigvals, V, X + ) + return self._solve_eigen_covariance_no_intercept( + alpha, y, sqrt_sw, X_mean, eigvals, V, X + ) + + def _svd_decompose_design_matrix(self, X, y, sqrt_sw): + # X already centered + X_mean = np.zeros(X.shape[1], dtype=X.dtype) + if self.fit_intercept: + # to emulate fit_intercept=True situation, add a column + # containing the square roots of the sample weights + # by centering, the other columns are orthogonal to that one + intercept_column = sqrt_sw[:, None] + X = np.hstack((X, intercept_column)) + U, singvals, _ = linalg.svd(X, full_matrices=0) + singvals_sq = singvals**2 + UT_y = np.dot(U.T, y) + return X_mean, singvals_sq, U, UT_y + + def _solve_svd_design_matrix(self, alpha, y, sqrt_sw, X_mean, singvals_sq, U, UT_y): + """Compute dual coefficients and diagonal of G^-1. + + Used when we have an SVD decomposition of X + (n_samples > n_features and X is dense). + """ + w = ((singvals_sq + alpha) ** -1) - (alpha**-1) + if self.fit_intercept: + # detect intercept column + normalized_sw = sqrt_sw / np.linalg.norm(sqrt_sw) + intercept_dim = _find_smallest_angle(normalized_sw, U) + # cancel the regularization for the intercept + w[intercept_dim] = -(alpha**-1) + c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha**-1) * y + G_inverse_diag = self._decomp_diag(w, U) + (alpha**-1) + if len(y.shape) != 1: + # handle case where y is 2-d + G_inverse_diag = G_inverse_diag[:, np.newaxis] + return G_inverse_diag, c + + def fit(self, X, y, sample_weight=None, score_params=None): + """Fit Ridge regression model with gcv. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + Training data. Will be cast to float64 if necessary. + + y : ndarray of shape (n_samples,) or (n_samples, n_targets) + Target values. Will be cast to float64 if necessary. + + sample_weight : float or ndarray of shape (n_samples,), default=None + Individual weights for each sample. If given a float, every sample + will have the same weight. Note that the scale of `sample_weight` + has an impact on the loss; i.e. multiplying all weights by `k` + is equivalent to setting `alpha / k`. + + score_params : dict, default=None + Parameters to be passed to the underlying scorer. + + .. versionadded:: 1.5 + See :ref:`Metadata Routing User Guide ` for + more details. + + Returns + ------- + self : object + """ + X, y = validate_data( + self, + X, + y, + accept_sparse=["csr", "csc", "coo"], + dtype=[np.float64], + multi_output=True, + y_numeric=True, + ) + + # alpha_per_target cannot be used in classifier mode. All subclasses + # of _RidgeGCV that are classifiers keep alpha_per_target at its + # default value: False, so the condition below should never happen. + assert not (self.is_clf and self.alpha_per_target) + + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + + self.alphas = np.asarray(self.alphas) + + unscaled_y = y + X, y, X_offset, y_offset, X_scale = _preprocess_data( + X, + y, + fit_intercept=self.fit_intercept, + copy=self.copy_X, + sample_weight=sample_weight, + ) + + gcv_mode = _check_gcv_mode(X, self.gcv_mode) + + if gcv_mode == "eigen": + decompose = self._eigen_decompose_gram + solve = self._solve_eigen_gram + elif gcv_mode == "svd": + if sparse.issparse(X): + decompose = self._eigen_decompose_covariance + solve = self._solve_eigen_covariance + else: + decompose = self._svd_decompose_design_matrix + solve = self._solve_svd_design_matrix + + n_samples = X.shape[0] + + if sample_weight is not None: + X, y, sqrt_sw = _rescale_data(X, y, sample_weight) + else: + sqrt_sw = np.ones(n_samples, dtype=X.dtype) + + X_mean, *decomposition = decompose(X, y, sqrt_sw) + + n_y = 1 if len(y.shape) == 1 else y.shape[1] + n_alphas = 1 if np.ndim(self.alphas) == 0 else len(self.alphas) + + if self.store_cv_results: + self.cv_results_ = np.empty((n_samples * n_y, n_alphas), dtype=X.dtype) + + best_coef, best_score, best_alpha = None, None, None + + for i, alpha in enumerate(np.atleast_1d(self.alphas)): + G_inverse_diag, c = solve(float(alpha), y, sqrt_sw, X_mean, *decomposition) + if self.scoring is None: + squared_errors = (c / G_inverse_diag) ** 2 + alpha_score = self._score_without_scorer(squared_errors=squared_errors) + if self.store_cv_results: + self.cv_results_[:, i] = squared_errors.ravel() + else: + predictions = y - (c / G_inverse_diag) + # Rescale predictions back to original scale + if sample_weight is not None: # avoid the unecessary division by ones + if predictions.ndim > 1: + predictions /= sqrt_sw[:, None] + else: + predictions /= sqrt_sw + predictions += y_offset + + if self.store_cv_results: + self.cv_results_[:, i] = predictions.ravel() + + score_params = score_params or {} + alpha_score = self._score( + predictions=predictions, + y=unscaled_y, + n_y=n_y, + scorer=self.scoring, + score_params=score_params, + ) + + # Keep track of the best model + if best_score is None: + # initialize + if self.alpha_per_target and n_y > 1: + best_coef = c + best_score = np.atleast_1d(alpha_score) + best_alpha = np.full(n_y, alpha) + else: + best_coef = c + best_score = alpha_score + best_alpha = alpha + else: + # update + if self.alpha_per_target and n_y > 1: + to_update = alpha_score > best_score + best_coef[:, to_update] = c[:, to_update] + best_score[to_update] = alpha_score[to_update] + best_alpha[to_update] = alpha + elif alpha_score > best_score: + best_coef, best_score, best_alpha = c, alpha_score, alpha + + self.alpha_ = best_alpha + self.best_score_ = best_score + self.dual_coef_ = best_coef + self.coef_ = safe_sparse_dot(self.dual_coef_.T, X) + if y.ndim == 1 or y.shape[1] == 1: + self.coef_ = self.coef_.ravel() + + if sparse.issparse(X): + X_offset = X_mean * X_scale + else: + X_offset += X_mean * X_scale + self._set_intercept(X_offset, y_offset, X_scale) + + if self.store_cv_results: + if len(y.shape) == 1: + cv_results_shape = n_samples, n_alphas + else: + cv_results_shape = n_samples, n_y, n_alphas + self.cv_results_ = self.cv_results_.reshape(cv_results_shape) + + return self + + def _score_without_scorer(self, squared_errors): + """Performs scoring using squared errors when the scorer is None.""" + if self.alpha_per_target: + _score = -squared_errors.mean(axis=0) + else: + _score = -squared_errors.mean() + + return _score + + def _score(self, *, predictions, y, n_y, scorer, score_params): + """Performs scoring with the specified scorer using the + predictions and the true y values. + """ + if self.is_clf: + identity_estimator = _IdentityClassifier(classes=np.arange(n_y)) + _score = scorer( + identity_estimator, + predictions, + y.argmax(axis=1), + **score_params, + ) + else: + identity_estimator = _IdentityRegressor() + if self.alpha_per_target: + _score = np.array( + [ + scorer( + identity_estimator, + predictions[:, j], + y[:, j], + **score_params, + ) + for j in range(n_y) + ] + ) + else: + _score = scorer(identity_estimator, predictions, y, **score_params) + + return _score + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + # Required since this is neither a RegressorMixin nor a ClassifierMixin + tags.target_tags.required = True + return tags + + +class _BaseRidgeCV(LinearModel): + _parameter_constraints: dict = { + "alphas": ["array-like", Interval(Real, 0, None, closed="neither")], + "fit_intercept": ["boolean"], + "scoring": [StrOptions(set(get_scorer_names())), callable, None], + "cv": ["cv_object"], + "gcv_mode": [StrOptions({"auto", "svd", "eigen"}), None], + "store_cv_results": ["boolean", Hidden(None)], + "alpha_per_target": ["boolean"], + "store_cv_values": ["boolean", Hidden(StrOptions({"deprecated"}))], + } + + def __init__( + self, + alphas=(0.1, 1.0, 10.0), + *, + fit_intercept=True, + scoring=None, + cv=None, + gcv_mode=None, + store_cv_results=None, + alpha_per_target=False, + store_cv_values="deprecated", + ): + self.alphas = alphas + self.fit_intercept = fit_intercept + self.scoring = scoring + self.cv = cv + self.gcv_mode = gcv_mode + self.store_cv_results = store_cv_results + self.alpha_per_target = alpha_per_target + self.store_cv_values = store_cv_values + + def fit(self, X, y, sample_weight=None, **params): + """Fit Ridge regression model with cv. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Training data. If using GCV, will be cast to float64 + if necessary. + + y : ndarray of shape (n_samples,) or (n_samples, n_targets) + Target values. Will be cast to X's dtype if necessary. + + sample_weight : float or ndarray of shape (n_samples,), default=None + Individual weights for each sample. If given a float, every sample + will have the same weight. + + **params : dict, default=None + Extra parameters for the underlying scorer. + + .. versionadded:: 1.5 + Only available if `enable_metadata_routing=True`, + which can be set by using + ``sklearn.set_config(enable_metadata_routing=True)``. + See :ref:`Metadata Routing User Guide ` for + more details. + + Returns + ------- + self : object + Fitted estimator. + + Notes + ----- + When sample_weight is provided, the selected hyperparameter may depend + on whether we use leave-one-out cross-validation (cv=None or cv='auto') + or another form of cross-validation, because only leave-one-out + cross-validation takes the sample weights into account when computing + the validation score. + """ + _raise_for_params(params, self, "fit") + cv = self.cv + scorer = self._get_scorer() + + # TODO(1.7): Remove in 1.7 + # Also change `store_cv_results` default back to False + if self.store_cv_values != "deprecated": + if self.store_cv_results is not None: + raise ValueError( + "Both 'store_cv_values' and 'store_cv_results' were set. " + "'store_cv_values' is deprecated in version 1.5 and will be " + "removed in 1.7. To avoid this error, only set 'store_cv_results'." + ) + warnings.warn( + ( + "'store_cv_values' is deprecated in version 1.5 and will be " + "removed in 1.7. Use 'store_cv_results' instead." + ), + FutureWarning, + ) + self._store_cv_results = self.store_cv_values + elif self.store_cv_results is None: + self._store_cv_results = False + else: + self._store_cv_results = self.store_cv_results + + # `_RidgeGCV` does not work for alpha = 0 + if cv is None: + check_scalar_alpha = partial( + check_scalar, + target_type=numbers.Real, + min_val=0.0, + include_boundaries="neither", + ) + else: + check_scalar_alpha = partial( + check_scalar, + target_type=numbers.Real, + min_val=0.0, + include_boundaries="left", + ) + + if isinstance(self.alphas, (np.ndarray, list, tuple)): + n_alphas = 1 if np.ndim(self.alphas) == 0 else len(self.alphas) + if n_alphas != 1: + for index, alpha in enumerate(self.alphas): + alpha = check_scalar_alpha(alpha, f"alphas[{index}]") + else: + self.alphas[0] = check_scalar_alpha(self.alphas[0], "alphas") + alphas = np.asarray(self.alphas) + + if sample_weight is not None: + params["sample_weight"] = sample_weight + + if cv is None: + if _routing_enabled(): + routed_params = process_routing( + self, + "fit", + **params, + ) + else: + routed_params = Bunch(scorer=Bunch(score={})) + if sample_weight is not None: + routed_params.scorer.score["sample_weight"] = sample_weight + + # reset `scorer` variable to original user-intend if no scoring is passed + if self.scoring is None: + scorer = None + + estimator = _RidgeGCV( + alphas, + fit_intercept=self.fit_intercept, + scoring=scorer, + gcv_mode=self.gcv_mode, + store_cv_results=self._store_cv_results, + is_clf=is_classifier(self), + alpha_per_target=self.alpha_per_target, + ) + estimator.fit( + X, + y, + sample_weight=sample_weight, + score_params=routed_params.scorer.score, + ) + self.alpha_ = estimator.alpha_ + self.best_score_ = estimator.best_score_ + if self._store_cv_results: + self.cv_results_ = estimator.cv_results_ + else: + if self._store_cv_results: + raise ValueError("cv!=None and store_cv_results=True are incompatible") + if self.alpha_per_target: + raise ValueError("cv!=None and alpha_per_target=True are incompatible") + + parameters = {"alpha": alphas} + solver = "sparse_cg" if sparse.issparse(X) else "auto" + model = RidgeClassifier if is_classifier(self) else Ridge + estimator = model( + fit_intercept=self.fit_intercept, + solver=solver, + ) + if _routing_enabled(): + estimator.set_fit_request(sample_weight=True) + + grid_search = GridSearchCV( + estimator, + parameters, + cv=cv, + scoring=scorer, + ) + + grid_search.fit(X, y, **params) + estimator = grid_search.best_estimator_ + self.alpha_ = grid_search.best_estimator_.alpha + self.best_score_ = grid_search.best_score_ + + self.coef_ = estimator.coef_ + self.intercept_ = estimator.intercept_ + self.n_features_in_ = estimator.n_features_in_ + if hasattr(estimator, "feature_names_in_"): + self.feature_names_in_ = estimator.feature_names_in_ + + return self + + def get_metadata_routing(self): + """Get metadata routing of this object. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + .. versionadded:: 1.5 + + Returns + ------- + routing : MetadataRouter + A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating + routing information. + """ + router = ( + MetadataRouter(owner=self.__class__.__name__) + .add_self_request(self) + .add( + scorer=self.scoring, + method_mapping=MethodMapping().add(caller="fit", callee="score"), + ) + .add( + splitter=self.cv, + method_mapping=MethodMapping().add(caller="fit", callee="split"), + ) + ) + return router + + def _get_scorer(self): + scorer = check_scoring(estimator=self, scoring=self.scoring, allow_none=True) + if _routing_enabled() and self.scoring is None: + # This estimator passes an array of 1s as sample_weight even if + # sample_weight is not provided by the user. Therefore we need to + # always request it. But we don't set it if it's passed explicitly + # by the user. + scorer.set_score_request(sample_weight=True) + return scorer + + # TODO(1.7): Remove + # mypy error: Decorated property not supported + @deprecated( # type: ignore + "Attribute `cv_values_` is deprecated in version 1.5 and will be removed " + "in 1.7. Use `cv_results_` instead." + ) + @property + def cv_values_(self): + return self.cv_results_ + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.input_tags.sparse = True + return tags + + +class RidgeCV(MultiOutputMixin, RegressorMixin, _BaseRidgeCV): + """Ridge regression with built-in cross-validation. + + See glossary entry for :term:`cross-validation estimator`. + + By default, it performs efficient Leave-One-Out Cross-Validation. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + alphas : array-like of shape (n_alphas,), default=(0.1, 1.0, 10.0) + Array of alpha values to try. + Regularization strength; must be a positive float. Regularization + improves the conditioning of the problem and reduces the variance of + the estimates. Larger values specify stronger regularization. + Alpha corresponds to ``1 / (2C)`` in other linear models such as + :class:`~sklearn.linear_model.LogisticRegression` or + :class:`~sklearn.svm.LinearSVC`. + If using Leave-One-Out cross-validation, alphas must be strictly positive. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + scoring : str, callable, default=None + A string (see :ref:`scoring_parameter`) or a scorer callable object / + function with signature ``scorer(estimator, X, y)``. If None, the + negative mean squared error if cv is 'auto' or None (i.e. when using + leave-one-out cross-validation), and r2 score otherwise. + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the efficient Leave-One-Out cross-validation + - integer, to specify the number of folds. + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For integer/None inputs, if ``y`` is binary or multiclass, + :class:`~sklearn.model_selection.StratifiedKFold` is used, else, + :class:`~sklearn.model_selection.KFold` is used. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + gcv_mode : {'auto', 'svd', 'eigen'}, default='auto' + Flag indicating which strategy to use when performing + Leave-One-Out Cross-Validation. Options are:: + + 'auto' : use 'svd' if n_samples > n_features, otherwise use 'eigen' + 'svd' : force use of singular value decomposition of X when X is + dense, eigenvalue decomposition of X^T.X when X is sparse. + 'eigen' : force computation via eigendecomposition of X.X^T + + The 'auto' mode is the default and is intended to pick the cheaper + option of the two depending on the shape of the training data. + + store_cv_results : bool, default=False + Flag indicating if the cross-validation values corresponding to + each alpha should be stored in the ``cv_results_`` attribute (see + below). This flag is only compatible with ``cv=None`` (i.e. using + Leave-One-Out Cross-Validation). + + .. versionchanged:: 1.5 + Parameter name changed from `store_cv_values` to `store_cv_results`. + + alpha_per_target : bool, default=False + Flag indicating whether to optimize the alpha value (picked from the + `alphas` parameter list) for each target separately (for multi-output + settings: multiple prediction targets). When set to `True`, after + fitting, the `alpha_` attribute will contain a value for each target. + When set to `False`, a single alpha is used for all targets. + + .. versionadded:: 0.24 + + store_cv_values : bool + Flag indicating if the cross-validation values corresponding to + each alpha should be stored in the ``cv_values_`` attribute (see + below). This flag is only compatible with ``cv=None`` (i.e. using + Leave-One-Out Cross-Validation). + + .. deprecated:: 1.5 + `store_cv_values` is deprecated in version 1.5 in favor of + `store_cv_results` and will be removed in version 1.7. + + Attributes + ---------- + cv_results_ : ndarray of shape (n_samples, n_alphas) or \ + shape (n_samples, n_targets, n_alphas), optional + Cross-validation values for each alpha (only available if + ``store_cv_results=True`` and ``cv=None``). After ``fit()`` has been + called, this attribute will contain the mean squared errors if + `scoring is None` otherwise it will contain standardized per point + prediction values. + + .. versionchanged:: 1.5 + `cv_values_` changed to `cv_results_`. + + coef_ : ndarray of shape (n_features) or (n_targets, n_features) + Weight vector(s). + + intercept_ : float or ndarray of shape (n_targets,) + Independent term in decision function. Set to 0.0 if + ``fit_intercept = False``. + + alpha_ : float or ndarray of shape (n_targets,) + Estimated regularization parameter, or, if ``alpha_per_target=True``, + the estimated regularization parameter for each target. + + best_score_ : float or ndarray of shape (n_targets,) + Score of base estimator with best alpha, or, if + ``alpha_per_target=True``, a score for each target. + + .. versionadded:: 0.23 + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + Ridge : Ridge regression. + RidgeClassifier : Classifier based on ridge regression on {-1, 1} labels. + RidgeClassifierCV : Ridge classifier with built-in cross validation. + + Examples + -------- + >>> from sklearn.datasets import load_diabetes + >>> from sklearn.linear_model import RidgeCV + >>> X, y = load_diabetes(return_X_y=True) + >>> clf = RidgeCV(alphas=[1e-3, 1e-2, 1e-1, 1]).fit(X, y) + >>> clf.score(X, y) + 0.5166... + """ + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None, **params): + """Fit Ridge regression model with cv. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Training data. If using GCV, will be cast to float64 + if necessary. + + y : ndarray of shape (n_samples,) or (n_samples, n_targets) + Target values. Will be cast to X's dtype if necessary. + + sample_weight : float or ndarray of shape (n_samples,), default=None + Individual weights for each sample. If given a float, every sample + will have the same weight. + + **params : dict, default=None + Parameters to be passed to the underlying scorer. + + .. versionadded:: 1.5 + Only available if `enable_metadata_routing=True`, + which can be set by using + ``sklearn.set_config(enable_metadata_routing=True)``. + See :ref:`Metadata Routing User Guide ` for + more details. + + Returns + ------- + self : object + Fitted estimator. + + Notes + ----- + When sample_weight is provided, the selected hyperparameter may depend + on whether we use leave-one-out cross-validation (cv=None or cv='auto') + or another form of cross-validation, because only leave-one-out + cross-validation takes the sample weights into account when computing + the validation score. + """ + super().fit(X, y, sample_weight=sample_weight, **params) + return self + + +class RidgeClassifierCV(_RidgeClassifierMixin, _BaseRidgeCV): + """Ridge classifier with built-in cross-validation. + + See glossary entry for :term:`cross-validation estimator`. + + By default, it performs Leave-One-Out Cross-Validation. Currently, + only the n_features > n_samples case is handled efficiently. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + alphas : array-like of shape (n_alphas,), default=(0.1, 1.0, 10.0) + Array of alpha values to try. + Regularization strength; must be a positive float. Regularization + improves the conditioning of the problem and reduces the variance of + the estimates. Larger values specify stronger regularization. + Alpha corresponds to ``1 / (2C)`` in other linear models such as + :class:`~sklearn.linear_model.LogisticRegression` or + :class:`~sklearn.svm.LinearSVC`. + If using Leave-One-Out cross-validation, alphas must be strictly positive. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + scoring : str, callable, default=None + A string (see :ref:`scoring_parameter`) or a scorer callable object / + function with signature ``scorer(estimator, X, y)``. + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the efficient Leave-One-Out cross-validation + - integer, to specify the number of folds. + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + class_weight : dict or 'balanced', default=None + Weights associated with classes in the form ``{class_label: weight}``. + If not given, all classes are supposed to have weight one. + + The "balanced" mode uses the values of y to automatically adjust + weights inversely proportional to class frequencies in the input data + as ``n_samples / (n_classes * np.bincount(y))``. + + store_cv_results : bool, default=False + Flag indicating if the cross-validation results corresponding to + each alpha should be stored in the ``cv_results_`` attribute (see + below). This flag is only compatible with ``cv=None`` (i.e. using + Leave-One-Out Cross-Validation). + + .. versionchanged:: 1.5 + Parameter name changed from `store_cv_values` to `store_cv_results`. + + store_cv_values : bool + Flag indicating if the cross-validation values corresponding to + each alpha should be stored in the ``cv_values_`` attribute (see + below). This flag is only compatible with ``cv=None`` (i.e. using + Leave-One-Out Cross-Validation). + + .. deprecated:: 1.5 + `store_cv_values` is deprecated in version 1.5 in favor of + `store_cv_results` and will be removed in version 1.7. + + Attributes + ---------- + cv_results_ : ndarray of shape (n_samples, n_targets, n_alphas), optional + Cross-validation results for each alpha (only if ``store_cv_results=True`` and + ``cv=None``). After ``fit()`` has been called, this attribute will + contain the mean squared errors if `scoring is None` otherwise it + will contain standardized per point prediction values. + + .. versionchanged:: 1.5 + `cv_values_` changed to `cv_results_`. + + coef_ : ndarray of shape (1, n_features) or (n_targets, n_features) + Coefficient of the features in the decision function. + + ``coef_`` is of shape (1, n_features) when the given problem is binary. + + intercept_ : float or ndarray of shape (n_targets,) + Independent term in decision function. Set to 0.0 if + ``fit_intercept = False``. + + alpha_ : float + Estimated regularization parameter. + + best_score_ : float + Score of base estimator with best alpha. + + .. versionadded:: 0.23 + + classes_ : ndarray of shape (n_classes,) + The classes labels. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + Ridge : Ridge regression. + RidgeClassifier : Ridge classifier. + RidgeCV : Ridge regression with built-in cross validation. + + Notes + ----- + For multi-class classification, n_class classifiers are trained in + a one-versus-all approach. Concretely, this is implemented by taking + advantage of the multi-variate response support in Ridge. + + Examples + -------- + >>> from sklearn.datasets import load_breast_cancer + >>> from sklearn.linear_model import RidgeClassifierCV + >>> X, y = load_breast_cancer(return_X_y=True) + >>> clf = RidgeClassifierCV(alphas=[1e-3, 1e-2, 1e-1, 1]).fit(X, y) + >>> clf.score(X, y) + 0.9630... + """ + + _parameter_constraints: dict = { + **_BaseRidgeCV._parameter_constraints, + "class_weight": [dict, StrOptions({"balanced"}), None], + } + for param in ("gcv_mode", "alpha_per_target"): + _parameter_constraints.pop(param) + + def __init__( + self, + alphas=(0.1, 1.0, 10.0), + *, + fit_intercept=True, + scoring=None, + cv=None, + class_weight=None, + store_cv_results=None, + store_cv_values="deprecated", + ): + super().__init__( + alphas=alphas, + fit_intercept=fit_intercept, + scoring=scoring, + cv=cv, + store_cv_results=store_cv_results, + store_cv_values=store_cv_values, + ) + self.class_weight = class_weight + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None, **params): + """Fit Ridge classifier with cv. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Training vectors, where `n_samples` is the number of samples + and `n_features` is the number of features. When using GCV, + will be cast to float64 if necessary. + + y : ndarray of shape (n_samples,) + Target values. Will be cast to X's dtype if necessary. + + sample_weight : float or ndarray of shape (n_samples,), default=None + Individual weights for each sample. If given a float, every sample + will have the same weight. + + **params : dict, default=None + Parameters to be passed to the underlying scorer. + + .. versionadded:: 1.5 + Only available if `enable_metadata_routing=True`, + which can be set by using + ``sklearn.set_config(enable_metadata_routing=True)``. + See :ref:`Metadata Routing User Guide ` for + more details. + + Returns + ------- + self : object + Fitted estimator. + """ + # `RidgeClassifier` does not accept "sag" or "saga" solver and thus support + # csr, csc, and coo sparse matrices. By using solver="eigen" we force to accept + # all sparse format. + X, y, sample_weight, Y = self._prepare_data(X, y, sample_weight, solver="eigen") + + # If cv is None, gcv mode will be used and we used the binarized Y + # since y will not be binarized in _RidgeGCV estimator. + # If cv is not None, a GridSearchCV with some RidgeClassifier + # estimators are used where y will be binarized. Thus, we pass y + # instead of the binarized Y. + target = Y if self.cv is None else y + super().fit(X, target, sample_weight=sample_weight, **params) + return self diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_sag.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_sag.py new file mode 100644 index 0000000000000000000000000000000000000000..12e5d049b0b1f88b17405f5633d6d7371a3cca83 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_sag.py @@ -0,0 +1,370 @@ +"""Solvers for Ridge and LogisticRegression using SAG algorithm""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import warnings + +import numpy as np + +from ..exceptions import ConvergenceWarning +from ..utils import check_array +from ..utils.extmath import row_norms +from ..utils.validation import _check_sample_weight +from ._base import make_dataset +from ._sag_fast import sag32, sag64 + + +def get_auto_step_size( + max_squared_sum, alpha_scaled, loss, fit_intercept, n_samples=None, is_saga=False +): + """Compute automatic step size for SAG solver. + + The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is + the max sum of squares for over all samples. + + Parameters + ---------- + max_squared_sum : float + Maximum squared sum of X over samples. + + alpha_scaled : float + Constant that multiplies the regularization term, scaled by + 1. / n_samples, the number of samples. + + loss : {'log', 'squared', 'multinomial'} + The loss function used in SAG solver. + + fit_intercept : bool + Specifies if a constant (a.k.a. bias or intercept) will be + added to the decision function. + + n_samples : int, default=None + Number of rows in X. Useful if is_saga=True. + + is_saga : bool, default=False + Whether to return step size for the SAGA algorithm or the SAG + algorithm. + + Returns + ------- + step_size : float + Step size used in SAG solver. + + References + ---------- + Schmidt, M., Roux, N. L., & Bach, F. (2013). + Minimizing finite sums with the stochastic average gradient + https://hal.inria.fr/hal-00860051/document + + :arxiv:`Defazio, A., Bach F. & Lacoste-Julien S. (2014). + "SAGA: A Fast Incremental Gradient Method With Support + for Non-Strongly Convex Composite Objectives" <1407.0202>` + """ + if loss in ("log", "multinomial"): + L = 0.25 * (max_squared_sum + int(fit_intercept)) + alpha_scaled + elif loss == "squared": + # inverse Lipschitz constant for squared loss + L = max_squared_sum + int(fit_intercept) + alpha_scaled + else: + raise ValueError( + "Unknown loss function for SAG solver, got %s instead of 'log' or 'squared'" + % loss + ) + if is_saga: + # SAGA theoretical step size is 1/3L or 1 / (2 * (L + mu n)) + # See Defazio et al. 2014 + mun = min(2 * n_samples * alpha_scaled, L) + step = 1.0 / (2 * L + mun) + else: + # SAG theoretical step size is 1/16L but it is recommended to use 1 / L + # see http://www.birs.ca//workshops//2014/14w5003/files/schmidt.pdf, + # slide 65 + step = 1.0 / L + return step + + +def sag_solver( + X, + y, + sample_weight=None, + loss="log", + alpha=1.0, + beta=0.0, + max_iter=1000, + tol=0.001, + verbose=0, + random_state=None, + check_input=True, + max_squared_sum=None, + warm_start_mem=None, + is_saga=False, +): + """SAG solver for Ridge and LogisticRegression. + + SAG stands for Stochastic Average Gradient: the gradient of the loss is + estimated each sample at a time and the model is updated along the way with + a constant learning rate. + + IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the + same scale. You can normalize the data by using + sklearn.preprocessing.StandardScaler on your data before passing it to the + fit method. + + This implementation works with data represented as dense numpy arrays or + sparse scipy arrays of floating point values for the features. It will + fit the data according to squared loss or log loss. + + The regularizer is a penalty added to the loss function that shrinks model + parameters towards the zero vector using the squared euclidean norm L2. + + .. versionadded:: 0.17 + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + + y : ndarray of shape (n_samples,) + Target values. With loss='multinomial', y must be label encoded + (see preprocessing.LabelEncoder). For loss='log' it must be in [0, 1]. + + sample_weight : array-like of shape (n_samples,), default=None + Weights applied to individual samples (1. for unweighted). + + loss : {'log', 'squared', 'multinomial'}, default='log' + Loss function that will be optimized: + -'log' is the binary logistic loss, as used in LogisticRegression. + -'squared' is the squared loss, as used in Ridge. + -'multinomial' is the multinomial logistic loss, as used in + LogisticRegression. + + .. versionadded:: 0.18 + *loss='multinomial'* + + alpha : float, default=1. + L2 regularization term in the objective function + ``(0.5 * alpha * || W ||_F^2)``. + + beta : float, default=0. + L1 regularization term in the objective function + ``(beta * || W ||_1)``. Only applied if ``is_saga`` is set to True. + + max_iter : int, default=1000 + The max number of passes over the training data if the stopping + criteria is not reached. + + tol : float, default=0.001 + The stopping criteria for the weights. The iterations will stop when + max(change in weights) / max(weights) < tol. + + verbose : int, default=0 + The verbosity level. + + random_state : int, RandomState instance or None, default=None + Used when shuffling the data. Pass an int for reproducible output + across multiple function calls. + See :term:`Glossary `. + + check_input : bool, default=True + If False, the input arrays X and y will not be checked. + + max_squared_sum : float, default=None + Maximum squared sum of X over samples. If None, it will be computed, + going through all the samples. The value should be precomputed + to speed up cross validation. + + warm_start_mem : dict, default=None + The initialization parameters used for warm starting. Warm starting is + currently used in LogisticRegression but not in Ridge. + It contains: + - 'coef': the weight vector, with the intercept in last line + if the intercept is fitted. + - 'gradient_memory': the scalar gradient for all seen samples. + - 'sum_gradient': the sum of gradient over all seen samples, + for each feature. + - 'intercept_sum_gradient': the sum of gradient over all seen + samples, for the intercept. + - 'seen': array of boolean describing the seen samples. + - 'num_seen': the number of seen samples. + + is_saga : bool, default=False + Whether to use the SAGA algorithm or the SAG algorithm. SAGA behaves + better in the first epochs, and allow for l1 regularisation. + + Returns + ------- + coef_ : ndarray of shape (n_features,) + Weight vector. + + n_iter_ : int + The number of full pass on all samples. + + warm_start_mem : dict + Contains a 'coef' key with the fitted result, and possibly the + fitted intercept at the end of the array. Contains also other keys + used for warm starting. + + Examples + -------- + >>> import numpy as np + >>> from sklearn import linear_model + >>> n_samples, n_features = 10, 5 + >>> rng = np.random.RandomState(0) + >>> X = rng.randn(n_samples, n_features) + >>> y = rng.randn(n_samples) + >>> clf = linear_model.Ridge(solver='sag') + >>> clf.fit(X, y) + Ridge(solver='sag') + + >>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) + >>> y = np.array([1, 1, 2, 2]) + >>> clf = linear_model.LogisticRegression(solver='sag') + >>> clf.fit(X, y) + LogisticRegression(solver='sag') + + References + ---------- + Schmidt, M., Roux, N. L., & Bach, F. (2013). + Minimizing finite sums with the stochastic average gradient + https://hal.inria.fr/hal-00860051/document + + :arxiv:`Defazio, A., Bach F. & Lacoste-Julien S. (2014). + "SAGA: A Fast Incremental Gradient Method With Support + for Non-Strongly Convex Composite Objectives" <1407.0202>` + + See Also + -------- + Ridge, SGDRegressor, ElasticNet, Lasso, SVR, + LogisticRegression, SGDClassifier, LinearSVC, Perceptron + """ + if warm_start_mem is None: + warm_start_mem = {} + # Ridge default max_iter is None + if max_iter is None: + max_iter = 1000 + + if check_input: + _dtype = [np.float64, np.float32] + X = check_array(X, dtype=_dtype, accept_sparse="csr", order="C") + y = check_array(y, dtype=_dtype, ensure_2d=False, order="C") + + n_samples, n_features = X.shape[0], X.shape[1] + # As in SGD, the alpha is scaled by n_samples. + alpha_scaled = float(alpha) / n_samples + beta_scaled = float(beta) / n_samples + + # if loss == 'multinomial', y should be label encoded. + n_classes = int(y.max()) + 1 if loss == "multinomial" else 1 + + # initialization + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + + if "coef" in warm_start_mem.keys(): + coef_init = warm_start_mem["coef"] + else: + # assume fit_intercept is False + coef_init = np.zeros((n_features, n_classes), dtype=X.dtype, order="C") + + # coef_init contains possibly the intercept_init at the end. + # Note that Ridge centers the data before fitting, so fit_intercept=False. + fit_intercept = coef_init.shape[0] == (n_features + 1) + if fit_intercept: + intercept_init = coef_init[-1, :] + coef_init = coef_init[:-1, :] + else: + intercept_init = np.zeros(n_classes, dtype=X.dtype) + + if "intercept_sum_gradient" in warm_start_mem.keys(): + intercept_sum_gradient = warm_start_mem["intercept_sum_gradient"] + else: + intercept_sum_gradient = np.zeros(n_classes, dtype=X.dtype) + + if "gradient_memory" in warm_start_mem.keys(): + gradient_memory_init = warm_start_mem["gradient_memory"] + else: + gradient_memory_init = np.zeros( + (n_samples, n_classes), dtype=X.dtype, order="C" + ) + if "sum_gradient" in warm_start_mem.keys(): + sum_gradient_init = warm_start_mem["sum_gradient"] + else: + sum_gradient_init = np.zeros((n_features, n_classes), dtype=X.dtype, order="C") + + if "seen" in warm_start_mem.keys(): + seen_init = warm_start_mem["seen"] + else: + seen_init = np.zeros(n_samples, dtype=np.int32, order="C") + + if "num_seen" in warm_start_mem.keys(): + num_seen_init = warm_start_mem["num_seen"] + else: + num_seen_init = 0 + + dataset, intercept_decay = make_dataset(X, y, sample_weight, random_state) + + if max_squared_sum is None: + max_squared_sum = row_norms(X, squared=True).max() + step_size = get_auto_step_size( + max_squared_sum, + alpha_scaled, + loss, + fit_intercept, + n_samples=n_samples, + is_saga=is_saga, + ) + if step_size * alpha_scaled == 1: + raise ZeroDivisionError( + "Current sag implementation does not handle " + "the case step_size * alpha_scaled == 1" + ) + + sag = sag64 if X.dtype == np.float64 else sag32 + num_seen, n_iter_ = sag( + dataset, + coef_init, + intercept_init, + n_samples, + n_features, + n_classes, + tol, + max_iter, + loss, + step_size, + alpha_scaled, + beta_scaled, + sum_gradient_init, + gradient_memory_init, + seen_init, + num_seen_init, + fit_intercept, + intercept_sum_gradient, + intercept_decay, + is_saga, + verbose, + ) + + if n_iter_ == max_iter: + warnings.warn( + "The max_iter was reached which means the coef_ did not converge", + ConvergenceWarning, + ) + + if fit_intercept: + coef_init = np.vstack((coef_init, intercept_init)) + + warm_start_mem = { + "coef": coef_init, + "sum_gradient": sum_gradient_init, + "intercept_sum_gradient": intercept_sum_gradient, + "gradient_memory": gradient_memory_init, + "seen": seen_init, + "num_seen": num_seen, + } + + if loss == "multinomial": + coef_ = coef_init.T + else: + coef_ = coef_init[:, 0] + + return coef_, n_iter_, warm_start_mem diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_sag_fast.pyx.tp b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_sag_fast.pyx.tp new file mode 100644 index 0000000000000000000000000000000000000000..4502436ffe3129e7e687641f369bc292e94597d1 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_sag_fast.pyx.tp @@ -0,0 +1,647 @@ +{{py: + +""" + +Template file for easily generate fused types consistent code using Tempita +(https://github.com/cython/cython/blob/master/Cython/Tempita/_tempita.py). + +Generated file: sag_fast.pyx + +Each class is duplicated for all dtypes (float and double). The keywords +between double braces are substituted during the build. + +Authors: Danny Sullivan + Tom Dupre la Tour + Arthur Mensch + Joan Massich + +License: BSD 3 clause +""" + +# name_suffix, c_type, np_type +dtypes = [('64', 'double', 'np.float64'), + ('32', 'float', 'np.float32')] + +}} +"""SAG and SAGA implementation""" + +import numpy as np +from libc.math cimport exp, fabs, isfinite, log +from libc.time cimport time, time_t +from libc.stdio cimport printf + +from .._loss._loss cimport ( + CyLossFunction, + CyHalfBinomialLoss, + CyHalfMultinomialLoss, + CyHalfSquaredError, +) +from ..utils._seq_dataset cimport SequentialDataset32, SequentialDataset64 + + +{{for name_suffix, c_type, np_type in dtypes}} + +cdef inline {{c_type}} fmax{{name_suffix}}({{c_type}} x, {{c_type}} y) noexcept nogil: + if x > y: + return x + return y + +{{endfor}} + +{{for name_suffix, c_type, np_type in dtypes}} + +cdef inline {{c_type}} _soft_thresholding{{name_suffix}}({{c_type}} x, {{c_type}} shrinkage) noexcept nogil: + return fmax{{name_suffix}}(x - shrinkage, 0) - fmax{{name_suffix}}(- x - shrinkage, 0) + +{{endfor}} + + +{{for name_suffix, c_type, np_type in dtypes}} + +def sag{{name_suffix}}( + SequentialDataset{{name_suffix}} dataset, + {{c_type}}[:, ::1] weights_array, + {{c_type}}[::1] intercept_array, + int n_samples, + int n_features, + int n_classes, + double tol, + int max_iter, + str loss_function, + double step_size, + double alpha, + double beta, + {{c_type}}[:, ::1] sum_gradient_init, + {{c_type}}[:, ::1] gradient_memory_init, + bint[::1] seen_init, + int num_seen, + bint fit_intercept, + {{c_type}}[::1] intercept_sum_gradient_init, + double intercept_decay, + bint saga, + bint verbose +): + """Stochastic Average Gradient (SAG) and SAGA solvers. + + Used in Ridge and LogisticRegression. + + Some implementation details: + + - Just-in-time (JIT) update: In SAG(A), the average-gradient update is + collinear with the drawn sample X_i. Therefore, if the data is sparse, the + random sample X_i will change the average gradient only on features j where + X_ij != 0. In some cases, the average gradient on feature j might change + only after k random samples with no change. In these cases, instead of + applying k times the same gradient step on feature j, we apply the gradient + step only once, scaled by k. This is called the "just-in-time update", and + it is performed in `lagged_update{{name_suffix}}`. This function also + applies the proximal operator after the gradient step (if L1 regularization + is used in SAGA). + + - Weight scale: In SAG(A), the weights are scaled down at each iteration + due to the L2 regularization. To avoid updating all the weights at each + iteration, the weight scale is factored out in a separate variable `wscale` + which is only used in the JIT update. When this variable is too small, it + is reset for numerical stability using the function + `scale_weights{{name_suffix}}`. This reset requires applying all remaining + JIT updates. This reset is also performed every `n_samples` iterations + before each convergence check, so when the algorithm stops, we are sure + that there is no remaining JIT updates. + + Reference + --------- + Schmidt, M., Roux, N. L., & Bach, F. (2013). + Minimizing finite sums with the stochastic average gradient + https://hal.inria.fr/hal-00860051/document + (section 4.3) + + :arxiv:`Defazio, A., Bach F. & Lacoste-Julien S. (2014). + "SAGA: A Fast Incremental Gradient Method With Support + for Non-Strongly Convex Composite Objectives" <1407.0202>` + """ + # the data pointer for x, the current sample + cdef {{c_type}} *x_data_ptr = NULL + # the index pointer for the column of the data + cdef int *x_ind_ptr = NULL + # the number of non-zero features for current sample + cdef int xnnz = -1 + # the label value for current sample + # the label value for current sample + cdef {{c_type}} y + # the sample weight + cdef {{c_type}} sample_weight + + # helper variable for indexes + cdef int f_idx, s_idx, feature_ind, class_ind, j + # the number of pass through all samples + cdef int n_iter = 0 + # helper to track iterations through samples + cdef int sample_itr + # the index (row number) of the current sample + cdef int sample_ind + + # the maximum change in weights, used to compute stopping criteria + cdef {{c_type}} max_change + # a holder variable for the max weight, used to compute stopping criteria + cdef {{c_type}} max_weight + + # the start time of the fit + cdef time_t start_time + # the end time of the fit + cdef time_t end_time + + # precomputation since the step size does not change in this implementation + cdef {{c_type}} wscale_update = 1.0 - step_size * alpha + + # helper for cumulative sum + cdef {{c_type}} cum_sum + + # the pointer to the coef_ or weights + cdef {{c_type}}* weights = &weights_array[0, 0] + + # the sum of gradients for each feature + cdef {{c_type}}* sum_gradient = &sum_gradient_init[0, 0] + + # the previously seen gradient for each sample + cdef {{c_type}}* gradient_memory = &gradient_memory_init[0, 0] + + # the cumulative sums needed for JIT params + cdef {{c_type}}[::1] cumulative_sums = np.empty(n_samples, dtype={{np_type}}, order="c") + + # the index for the last time this feature was updated + cdef int[::1] feature_hist = np.zeros(n_features, dtype=np.int32, order="c") + + # the previous weights to use to compute stopping criteria + cdef {{c_type}}[:, ::1] previous_weights_array = np.zeros((n_features, n_classes), dtype={{np_type}}, order="c") + cdef {{c_type}}* previous_weights = &previous_weights_array[0, 0] + + cdef {{c_type}}[::1] prediction = np.zeros(n_classes, dtype={{np_type}}, order="c") + + cdef {{c_type}}[::1] gradient = np.zeros(n_classes, dtype={{np_type}}, order="c") + + # Intermediate variable that need declaration since cython cannot infer when templating + cdef {{c_type}} val + + # Bias correction term in saga + cdef {{c_type}} gradient_correction + + # the scalar used for multiplying z + cdef {{c_type}} wscale = 1.0 + + # return value (-1 if an error occurred, 0 otherwise) + cdef int status = 0 + + # the cumulative sums for each iteration for the sparse implementation + cumulative_sums[0] = 0.0 + + # the multipliative scale needed for JIT params + cdef {{c_type}}[::1] cumulative_sums_prox + cdef {{c_type}}* cumulative_sums_prox_ptr + + cdef bint prox = beta > 0 and saga + + # Loss function to optimize + cdef CyLossFunction loss + # Whether the loss function is multinomial + cdef bint multinomial = False + # Multinomial loss function + cdef CyHalfMultinomialLoss multiloss + + if loss_function == "multinomial": + multinomial = True + multiloss = CyHalfMultinomialLoss() + elif loss_function == "log": + loss = CyHalfBinomialLoss() + elif loss_function == "squared": + loss = CyHalfSquaredError() + else: + raise ValueError("Invalid loss parameter: got %s instead of " + "one of ('log', 'squared', 'multinomial')" + % loss_function) + + if prox: + cumulative_sums_prox = np.empty(n_samples, dtype={{np_type}}, order="c") + cumulative_sums_prox_ptr = &cumulative_sums_prox[0] + else: + cumulative_sums_prox = None + cumulative_sums_prox_ptr = NULL + + with nogil: + start_time = time(NULL) + for n_iter in range(max_iter): + for sample_itr in range(n_samples): + # extract a random sample + sample_ind = dataset.random(&x_data_ptr, &x_ind_ptr, &xnnz, &y, &sample_weight) + + # cached index for gradient_memory + s_idx = sample_ind * n_classes + + # update the number of samples seen and the seen array + if seen_init[sample_ind] == 0: + num_seen += 1 + seen_init[sample_ind] = 1 + + # make the weight updates (just-in-time gradient step, and prox operator) + if sample_itr > 0: + status = lagged_update{{name_suffix}}( + weights=weights, + wscale=wscale, + xnnz=xnnz, + n_samples=n_samples, + n_classes=n_classes, + sample_itr=sample_itr, + cumulative_sums=&cumulative_sums[0], + cumulative_sums_prox=cumulative_sums_prox_ptr, + feature_hist=&feature_hist[0], + prox=prox, + sum_gradient=sum_gradient, + x_ind_ptr=x_ind_ptr, + reset=False, + n_iter=n_iter + ) + if status == -1: + break + + # find the current prediction + predict_sample{{name_suffix}}( + x_data_ptr=x_data_ptr, + x_ind_ptr=x_ind_ptr, + xnnz=xnnz, + w_data_ptr=weights, + wscale=wscale, + intercept=&intercept_array[0], + prediction=&prediction[0], + n_classes=n_classes + ) + + # compute the gradient for this sample, given the prediction + if multinomial: + multiloss.cy_gradient( + y_true=y, + raw_prediction=prediction, + sample_weight=sample_weight, + gradient_out=gradient, + ) + else: + gradient[0] = loss.cy_gradient(y, prediction[0]) * sample_weight + + # L2 regularization by simply rescaling the weights + wscale *= wscale_update + + # make the updates to the sum of gradients + for j in range(xnnz): + feature_ind = x_ind_ptr[j] + val = x_data_ptr[j] + f_idx = feature_ind * n_classes + for class_ind in range(n_classes): + gradient_correction = \ + val * (gradient[class_ind] - + gradient_memory[s_idx + class_ind]) + if saga: + # Note that this is not the main gradient step, + # which is performed just-in-time in lagged_update. + # This part is done outside the JIT update + # as it does not depend on the average gradient. + # The prox operator is applied after the JIT update + weights[f_idx + class_ind] -= \ + (gradient_correction * step_size + * (1 - 1. / num_seen) / wscale) + sum_gradient[f_idx + class_ind] += gradient_correction + + # fit the intercept + if fit_intercept: + for class_ind in range(n_classes): + gradient_correction = (gradient[class_ind] - + gradient_memory[s_idx + class_ind]) + intercept_sum_gradient_init[class_ind] += gradient_correction + gradient_correction *= step_size * (1. - 1. / num_seen) + if saga: + intercept_array[class_ind] -= \ + (step_size * intercept_sum_gradient_init[class_ind] / + num_seen * intercept_decay) + gradient_correction + else: + intercept_array[class_ind] -= \ + (step_size * intercept_sum_gradient_init[class_ind] / + num_seen * intercept_decay) + + # check to see that the intercept is not inf or NaN + if not isfinite(intercept_array[class_ind]): + status = -1 + break + # Break from the n_samples outer loop if an error happened + # in the fit_intercept n_classes inner loop + if status == -1: + break + + # update the gradient memory for this sample + for class_ind in range(n_classes): + gradient_memory[s_idx + class_ind] = gradient[class_ind] + + if sample_itr == 0: + cumulative_sums[0] = step_size / (wscale * num_seen) + if prox: + cumulative_sums_prox[0] = step_size * beta / wscale + else: + cumulative_sums[sample_itr] = \ + (cumulative_sums[sample_itr - 1] + + step_size / (wscale * num_seen)) + if prox: + cumulative_sums_prox[sample_itr] = \ + (cumulative_sums_prox[sample_itr - 1] + + step_size * beta / wscale) + # If wscale gets too small, we need to reset the scale. + # This also resets the just-in-time update system. + if wscale < 1e-9: + if verbose: + with gil: + print("rescaling...") + status = scale_weights{{name_suffix}}( + weights=weights, + wscale=&wscale, + n_features=n_features, + n_samples=n_samples, + n_classes=n_classes, + sample_itr=sample_itr, + cumulative_sums=&cumulative_sums[0], + cumulative_sums_prox=cumulative_sums_prox_ptr, + feature_hist=&feature_hist[0], + prox=prox, + sum_gradient=sum_gradient, + n_iter=n_iter + ) + if status == -1: + break + + # Break from the n_iter outer loop if an error happened in the + # n_samples inner loop + if status == -1: + break + + # We scale the weights every n_samples iterations and reset the + # just-in-time update system for numerical stability. + # Because this reset is done before every convergence check, we are + # sure there is no remaining lagged update when the algorithm stops. + status = scale_weights{{name_suffix}}( + weights=weights, + wscale=&wscale, + n_features=n_features, + n_samples=n_samples, + n_classes=n_classes, + sample_itr=n_samples - 1, + cumulative_sums=&cumulative_sums[0], + cumulative_sums_prox=cumulative_sums_prox_ptr, + feature_hist=&feature_hist[0], + prox=prox, + sum_gradient=sum_gradient, + n_iter=n_iter + ) + if status == -1: + break + + # check if the stopping criteria is reached + max_change = 0.0 + max_weight = 0.0 + for idx in range(n_features * n_classes): + max_weight = fmax{{name_suffix}}(max_weight, fabs(weights[idx])) + max_change = fmax{{name_suffix}}(max_change, fabs(weights[idx] - previous_weights[idx])) + previous_weights[idx] = weights[idx] + if ((max_weight != 0 and max_change / max_weight <= tol) + or max_weight == 0 and max_change == 0): + if verbose: + end_time = time(NULL) + with gil: + print("convergence after %d epochs took %d seconds" % + (n_iter + 1, end_time - start_time)) + break + elif verbose: + printf('Epoch %d, change: %.8g\n', n_iter + 1, + max_change / max_weight) + n_iter += 1 + # We do the error treatment here based on error code in status to avoid + # re-acquiring the GIL within the cython code, which slows the computation + # when the sag/saga solver is used concurrently in multiple Python threads. + if status == -1: + raise ValueError(("Floating-point under-/overflow occurred at epoch" + " #%d. Scaling input data with StandardScaler or" + " MinMaxScaler might help.") % n_iter) + + if verbose and n_iter >= max_iter: + end_time = time(NULL) + print(("max_iter reached after %d seconds") % + (end_time - start_time)) + + return num_seen, n_iter + +{{endfor}} + + +{{for name_suffix, c_type, np_type in dtypes}} + +cdef int scale_weights{{name_suffix}}( + {{c_type}}* weights, + {{c_type}}* wscale, + int n_features, + int n_samples, + int n_classes, + int sample_itr, + {{c_type}}* cumulative_sums, + {{c_type}}* cumulative_sums_prox, + int* feature_hist, + bint prox, + {{c_type}}* sum_gradient, + int n_iter +) noexcept nogil: + """Scale the weights and reset wscale to 1.0 for numerical stability, and + reset the just-in-time (JIT) update system. + + See `sag{{name_suffix}}`'s docstring about the JIT update system. + + wscale = (1 - step_size * alpha) ** (n_iter * n_samples + sample_itr) + can become very small, so we reset it every n_samples iterations to 1.0 for + numerical stability. To be able to scale, we first need to update every + coefficients and reset the just-in-time update system. + This also limits the size of `cumulative_sums`. + """ + + cdef int status + status = lagged_update{{name_suffix}}( + weights, + wscale[0], + n_features, + n_samples, + n_classes, + sample_itr + 1, + cumulative_sums, + cumulative_sums_prox, + feature_hist, + prox, + sum_gradient, + NULL, + True, + n_iter + ) + # if lagged update succeeded, reset wscale to 1.0 + if status == 0: + wscale[0] = 1.0 + return status + +{{endfor}} + + +{{for name_suffix, c_type, np_type in dtypes}} + +cdef int lagged_update{{name_suffix}}( + {{c_type}}* weights, + {{c_type}} wscale, + int xnnz, + int n_samples, + int n_classes, + int sample_itr, + {{c_type}}* cumulative_sums, + {{c_type}}* cumulative_sums_prox, + int* feature_hist, + bint prox, + {{c_type}}* sum_gradient, + int* x_ind_ptr, + bint reset, + int n_iter +) noexcept nogil: + """Hard perform the JIT updates for non-zero features of present sample. + + See `sag{{name_suffix}}`'s docstring about the JIT update system. + + The updates that awaits are kept in memory using cumulative_sums, + cumulative_sums_prox, wscale and feature_hist. See original SAGA paper + (Defazio et al. 2014) for details. If reset=True, we also reset wscale to + 1 (this is done at the end of each epoch). + """ + cdef int feature_ind, class_ind, idx, f_idx, lagged_ind, last_update_ind + cdef {{c_type}} cum_sum, grad_step, prox_step, cum_sum_prox + for feature_ind in range(xnnz): + if not reset: + feature_ind = x_ind_ptr[feature_ind] + f_idx = feature_ind * n_classes + + cum_sum = cumulative_sums[sample_itr - 1] + if prox: + cum_sum_prox = cumulative_sums_prox[sample_itr - 1] + if feature_hist[feature_ind] != 0: + cum_sum -= cumulative_sums[feature_hist[feature_ind] - 1] + if prox: + cum_sum_prox -= cumulative_sums_prox[feature_hist[feature_ind] - 1] + if not prox: + for class_ind in range(n_classes): + idx = f_idx + class_ind + weights[idx] -= cum_sum * sum_gradient[idx] + if reset: + weights[idx] *= wscale + if not isfinite(weights[idx]): + # returning here does not require the gil as the return + # type is a C integer + return -1 + else: + for class_ind in range(n_classes): + idx = f_idx + class_ind + if fabs(sum_gradient[idx] * cum_sum) < cum_sum_prox: + # In this case, we can perform all the gradient steps and + # all the proximal steps in this order, which is more + # efficient than unrolling all the lagged updates. + # Idea taken from scikit-learn-contrib/lightning. + weights[idx] -= cum_sum * sum_gradient[idx] + weights[idx] = _soft_thresholding{{name_suffix}}(weights[idx], + cum_sum_prox) + else: + last_update_ind = feature_hist[feature_ind] + if last_update_ind == -1: + last_update_ind = sample_itr - 1 + for lagged_ind in range(sample_itr - 1, + last_update_ind - 1, -1): + if lagged_ind > 0: + grad_step = (cumulative_sums[lagged_ind] + - cumulative_sums[lagged_ind - 1]) + prox_step = (cumulative_sums_prox[lagged_ind] + - cumulative_sums_prox[lagged_ind - 1]) + else: + grad_step = cumulative_sums[lagged_ind] + prox_step = cumulative_sums_prox[lagged_ind] + weights[idx] -= sum_gradient[idx] * grad_step + weights[idx] = _soft_thresholding{{name_suffix}}(weights[idx], + prox_step) + + if reset: + weights[idx] *= wscale + # check to see that the weight is not inf or NaN + if not isfinite(weights[idx]): + return -1 + if reset: + feature_hist[feature_ind] = sample_itr % n_samples + else: + feature_hist[feature_ind] = sample_itr + + if reset: + cumulative_sums[sample_itr - 1] = 0.0 + if prox: + cumulative_sums_prox[sample_itr - 1] = 0.0 + + return 0 + +{{endfor}} + + +{{for name_suffix, c_type, np_type in dtypes}} + +cdef void predict_sample{{name_suffix}}( + {{c_type}}* x_data_ptr, + int* x_ind_ptr, + int xnnz, + {{c_type}}* w_data_ptr, + {{c_type}} wscale, + {{c_type}}* intercept, + {{c_type}}* prediction, + int n_classes +) noexcept nogil: + """Compute the prediction given sparse sample x and dense weight w. + + Parameters + ---------- + x_data_ptr : pointer + Pointer to the data of the sample x + + x_ind_ptr : pointer + Pointer to the indices of the sample x + + xnnz : int + Number of non-zero element in the sample x + + w_data_ptr : pointer + Pointer to the data of the weights w + + wscale : {{c_type}} + Scale of the weights w + + intercept : pointer + Pointer to the intercept + + prediction : pointer + Pointer to store the resulting prediction + + n_classes : int + Number of classes in multinomial case. Equals 1 in binary case. + + """ + cdef int feature_ind, class_ind, j + cdef {{c_type}} innerprod + + for class_ind in range(n_classes): + innerprod = 0.0 + # Compute the dot product only on non-zero elements of x + for j in range(xnnz): + feature_ind = x_ind_ptr[j] + innerprod += (w_data_ptr[feature_ind * n_classes + class_ind] * + x_data_ptr[j]) + + prediction[class_ind] = wscale * innerprod + intercept[class_ind] + + +{{endfor}} diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_sgd_fast.pyx.tp b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_sgd_fast.pyx.tp new file mode 100644 index 0000000000000000000000000000000000000000..7944f02a1ab9514e6a7f31be6cd33b7d5ab60553 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_sgd_fast.pyx.tp @@ -0,0 +1,665 @@ +{{py: + +""" +Template file to easily generate fused types consistent code using Tempita +(https://github.com/cython/cython/blob/master/Cython/Tempita/_tempita.py). + +Generated file: _sgd_fast.pyx + +Each relevant function is duplicated for the dtypes float and double. +The keywords between double braces are substituted during the build. + +Authors: Peter Prettenhofer + Mathieu Blondel (partial_fit support) + Rob Zinkov (passive-aggressive) + Lars Buitinck + +License: BSD 3 clause +""" + +# The dtypes are defined as follows (name_suffix, c_type, np_type) +dtypes = [ + ("64", "double", "np.float64"), + ("32", "float", "np.float32"), +] + +}} +"""SGD implementation""" + +import numpy as np +from time import time + +from cython cimport floating +from libc.math cimport exp, fabs, isfinite, log, pow, INFINITY + +from .._loss._loss cimport CyLossFunction +from ..utils._typedefs cimport uint32_t, uint8_t +from ..utils._weight_vector cimport WeightVector32, WeightVector64 +from ..utils._seq_dataset cimport SequentialDataset32, SequentialDataset64 + + +cdef extern from *: + """ + /* Penalty constants */ + #define NO_PENALTY 0 + #define L1 1 + #define L2 2 + #define ELASTICNET 3 + + /* Learning rate constants */ + #define CONSTANT 1 + #define OPTIMAL 2 + #define INVSCALING 3 + #define ADAPTIVE 4 + #define PA1 5 + #define PA2 6 + """ + int NO_PENALTY = 0 + int L1 = 1 + int L2 = 2 + int ELASTICNET = 3 + + int CONSTANT = 1 + int OPTIMAL = 2 + int INVSCALING = 3 + int ADAPTIVE = 4 + int PA1 = 5 + int PA2 = 6 + + +# ---------------------------------------- +# Extension Types for Loss Functions +# ---------------------------------------- + +cdef class Regression(CyLossFunction): + """Base class for loss functions for regression""" + + def py_loss(self, double p, double y): + """Python version of `loss` for testing only. + + Pytest needs a python function and can't use cdef functions. + + Parameters + ---------- + p : double + The prediction, `p = w^T x + intercept`. + y : double + The true value (aka target). + + Returns + ------- + double + The loss evaluated at `p` and `y`. + """ + return self.cy_loss(y, p) + + def py_dloss(self, double p, double y): + """Python version of `dloss` for testing only. + + Pytest needs a python function and can't use cdef functions. + + Parameters + ---------- + p : double + The prediction, `p = w^T x`. + y : double + The true value (aka target). + + Returns + ------- + double + The derivative of the loss function with regards to `p`. + """ + return self.cy_gradient(y, p) + + +cdef class Classification(CyLossFunction): + """Base class for loss functions for classification""" + + def py_loss(self, double p, double y): + """Python version of `loss` for testing only.""" + return self.cy_loss(y, p) + + def py_dloss(self, double p, double y): + """Python version of `dloss` for testing only.""" + return self.cy_gradient(y, p) + + +cdef class ModifiedHuber(Classification): + """Modified Huber loss for binary classification with y in {-1, 1} + + This is equivalent to quadratically smoothed SVM with gamma = 2. + + See T. Zhang 'Solving Large Scale Linear Prediction Problems Using + Stochastic Gradient Descent', ICML'04. + """ + cdef double cy_loss(self, double y, double p) noexcept nogil: + cdef double z = p * y + if z >= 1.0: + return 0.0 + elif z >= -1.0: + return (1.0 - z) * (1.0 - z) + else: + return -4.0 * z + + cdef double cy_gradient(self, double y, double p) noexcept nogil: + cdef double z = p * y + if z >= 1.0: + return 0.0 + elif z >= -1.0: + return 2.0 * (1.0 - z) * -y + else: + return -4.0 * y + + def __reduce__(self): + return ModifiedHuber, () + + +cdef class Hinge(Classification): + """Hinge loss for binary classification tasks with y in {-1,1} + + Parameters + ---------- + + threshold : float > 0.0 + Margin threshold. When threshold=1.0, one gets the loss used by SVM. + When threshold=0.0, one gets the loss used by the Perceptron. + """ + + cdef double threshold + + def __init__(self, double threshold=1.0): + self.threshold = threshold + + cdef double cy_loss(self, double y, double p) noexcept nogil: + cdef double z = p * y + if z <= self.threshold: + return self.threshold - z + return 0.0 + + cdef double cy_gradient(self, double y, double p) noexcept nogil: + cdef double z = p * y + if z <= self.threshold: + return -y + return 0.0 + + def __reduce__(self): + return Hinge, (self.threshold,) + + +cdef class SquaredHinge(Classification): + """Squared Hinge loss for binary classification tasks with y in {-1,1} + + Parameters + ---------- + + threshold : float > 0.0 + Margin threshold. When threshold=1.0, one gets the loss used by + (quadratically penalized) SVM. + """ + + cdef double threshold + + def __init__(self, double threshold=1.0): + self.threshold = threshold + + cdef double cy_loss(self, double y, double p) noexcept nogil: + cdef double z = self.threshold - p * y + if z > 0: + return z * z + return 0.0 + + cdef double cy_gradient(self, double y, double p) noexcept nogil: + cdef double z = self.threshold - p * y + if z > 0: + return -2 * y * z + return 0.0 + + def __reduce__(self): + return SquaredHinge, (self.threshold,) + + +cdef class EpsilonInsensitive(Regression): + """Epsilon-Insensitive loss (used by SVR). + + loss = max(0, |y - p| - epsilon) + """ + + cdef double epsilon + + def __init__(self, double epsilon): + self.epsilon = epsilon + + cdef double cy_loss(self, double y, double p) noexcept nogil: + cdef double ret = fabs(y - p) - self.epsilon + return ret if ret > 0 else 0 + + cdef double cy_gradient(self, double y, double p) noexcept nogil: + if y - p > self.epsilon: + return -1 + elif p - y > self.epsilon: + return 1 + else: + return 0 + + def __reduce__(self): + return EpsilonInsensitive, (self.epsilon,) + + +cdef class SquaredEpsilonInsensitive(Regression): + """Epsilon-Insensitive loss. + + loss = max(0, |y - p| - epsilon)^2 + """ + + cdef double epsilon + + def __init__(self, double epsilon): + self.epsilon = epsilon + + cdef double cy_loss(self, double y, double p) noexcept nogil: + cdef double ret = fabs(y - p) - self.epsilon + return ret * ret if ret > 0 else 0 + + cdef double cy_gradient(self, double y, double p) noexcept nogil: + cdef double z + z = y - p + if z > self.epsilon: + return -2 * (z - self.epsilon) + elif z < -self.epsilon: + return 2 * (-z - self.epsilon) + else: + return 0 + + def __reduce__(self): + return SquaredEpsilonInsensitive, (self.epsilon,) + +{{for name_suffix, c_type, np_type in dtypes}} + +def _plain_sgd{{name_suffix}}( + const {{c_type}}[::1] weights, + double intercept, + const {{c_type}}[::1] average_weights, + double average_intercept, + CyLossFunction loss, + int penalty_type, + double alpha, + double C, + double l1_ratio, + SequentialDataset{{name_suffix}} dataset, + const uint8_t[::1] validation_mask, + bint early_stopping, + validation_score_cb, + int n_iter_no_change, + unsigned int max_iter, + double tol, + int fit_intercept, + int verbose, + bint shuffle, + uint32_t seed, + double weight_pos, + double weight_neg, + int learning_rate, + double eta0, + double power_t, + bint one_class, + double t=1.0, + double intercept_decay=1.0, + int average=0, +): + """SGD for generic loss functions and penalties with optional averaging + + Parameters + ---------- + weights : ndarray[{{c_type}}, ndim=1] + The allocated vector of weights. + intercept : double + The initial intercept. + average_weights : ndarray[{{c_type}}, ndim=1] + The average weights as computed for ASGD. Should be None if average + is 0. + average_intercept : double + The average intercept for ASGD. Should be 0 if average is 0. + loss : CyLossFunction + A concrete ``CyLossFunction`` object. + penalty_type : int + The penalty 2 for L2, 1 for L1, and 3 for Elastic-Net. + alpha : float + The regularization parameter. + C : float + Maximum step size for passive aggressive. + l1_ratio : float + The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1. + l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1. + dataset : SequentialDataset + A concrete ``SequentialDataset`` object. + validation_mask : ndarray[uint8_t, ndim=1] + Equal to True on the validation set. + early_stopping : boolean + Whether to use a stopping criterion based on the validation set. + validation_score_cb : callable + A callable to compute a validation score given the current + coefficients and intercept values. + Used only if early_stopping is True. + n_iter_no_change : int + Number of iteration with no improvement to wait before stopping. + max_iter : int + The maximum number of iterations (epochs). + tol: double + The tolerance for the stopping criterion. + fit_intercept : int + Whether or not to fit the intercept (1 or 0). + verbose : int + Print verbose output; 0 for quite. + shuffle : boolean + Whether to shuffle the training data before each epoch. + weight_pos : float + The weight of the positive class. + weight_neg : float + The weight of the negative class. + seed : uint32_t + Seed of the pseudorandom number generator used to shuffle the data. + learning_rate : int + The learning rate: + (1) constant, eta = eta0 + (2) optimal, eta = 1.0/(alpha * t). + (3) inverse scaling, eta = eta0 / pow(t, power_t) + (4) adaptive decrease + (5) Passive Aggressive-I, eta = min(alpha, loss/norm(x)) + (6) Passive Aggressive-II, eta = 1.0 / (norm(x) + 0.5*alpha) + eta0 : double + The initial learning rate. + power_t : double + The exponent for inverse scaling learning rate. + one_class : boolean + Whether to solve the One-Class SVM optimization problem. + t : double + Initial state of the learning rate. This value is equal to the + iteration count except when the learning rate is set to `optimal`. + Default: 1.0. + average : int + The number of iterations before averaging starts. average=1 is + equivalent to averaging for all iterations. + + + Returns + ------- + weights : array, shape=[n_features] + The fitted weight vector. + intercept : float + The fitted intercept term. + average_weights : array shape=[n_features] + The averaged weights across iterations. Values are valid only if + average > 0. + average_intercept : float + The averaged intercept across iterations. + Values are valid only if average > 0. + n_iter_ : int + The actual number of iter (epochs). + """ + + # get the data information into easy vars + cdef Py_ssize_t n_samples = dataset.n_samples + cdef Py_ssize_t n_features = weights.shape[0] + + cdef WeightVector{{name_suffix}} w = WeightVector{{name_suffix}}(weights, average_weights) + cdef {{c_type}} *x_data_ptr = NULL + cdef int *x_ind_ptr = NULL + + # helper variables + cdef int no_improvement_count = 0 + cdef bint infinity = False + cdef int xnnz + cdef double eta = 0.0 + cdef double p = 0.0 + cdef double update = 0.0 + cdef double intercept_update = 0.0 + cdef double sumloss = 0.0 + cdef double score = 0.0 + cdef double best_loss = INFINITY + cdef double best_score = -INFINITY + cdef {{c_type}} y = 0.0 + cdef {{c_type}} sample_weight + cdef {{c_type}} class_weight = 1.0 + cdef unsigned int count = 0 + cdef unsigned int train_count = n_samples - np.sum(validation_mask) + cdef unsigned int epoch = 0 + cdef unsigned int i = 0 + cdef int is_hinge = isinstance(loss, Hinge) + cdef double optimal_init = 0.0 + cdef double dloss = 0.0 + cdef double MAX_DLOSS = 1e12 + + cdef long long sample_index + + # q vector is only used for L1 regularization + cdef {{c_type}}[::1] q = None + cdef {{c_type}} * q_data_ptr = NULL + if penalty_type == L1 or penalty_type == ELASTICNET: + q = np.zeros((n_features,), dtype={{np_type}}, order="c") + q_data_ptr = &q[0] + cdef double u = 0.0 + + if penalty_type == L2: + l1_ratio = 0.0 + elif penalty_type == L1: + l1_ratio = 1.0 + + eta = eta0 + + if learning_rate == OPTIMAL: + typw = np.sqrt(1.0 / np.sqrt(alpha)) + # computing eta0, the initial learning rate + initial_eta0 = typw / max(1.0, loss.cy_gradient(1.0, -typw)) + # initialize t such that eta at first sample equals eta0 + optimal_init = 1.0 / (initial_eta0 * alpha) + + t_start = time() + with nogil: + for epoch in range(max_iter): + sumloss = 0 + if verbose > 0: + with gil: + print("-- Epoch %d" % (epoch + 1)) + if shuffle: + dataset.shuffle(seed) + for i in range(n_samples): + dataset.next(&x_data_ptr, &x_ind_ptr, &xnnz, + &y, &sample_weight) + + sample_index = dataset.index_data_ptr[dataset.current_index] + if validation_mask[sample_index]: + # do not learn on the validation set + continue + + p = w.dot(x_data_ptr, x_ind_ptr, xnnz) + intercept + if learning_rate == OPTIMAL: + eta = 1.0 / (alpha * (optimal_init + t - 1)) + elif learning_rate == INVSCALING: + eta = eta0 / pow(t, power_t) + + if verbose or not early_stopping: + sumloss += loss.cy_loss(y, p) + + if y > 0.0: + class_weight = weight_pos + else: + class_weight = weight_neg + + if learning_rate == PA1: + update = sqnorm(x_data_ptr, x_ind_ptr, xnnz) + if update == 0: + continue + update = min(C, loss.cy_loss(y, p) / update) + elif learning_rate == PA2: + update = sqnorm(x_data_ptr, x_ind_ptr, xnnz) + update = loss.cy_loss(y, p) / (update + 0.5 / C) + else: + dloss = loss.cy_gradient(y, p) + # clip dloss with large values to avoid numerical + # instabilities + if dloss < -MAX_DLOSS: + dloss = -MAX_DLOSS + elif dloss > MAX_DLOSS: + dloss = MAX_DLOSS + update = -eta * dloss + + if learning_rate >= PA1: + if is_hinge: + # classification + update *= y + elif y - p < 0: + # regression + update *= -1 + + update *= class_weight * sample_weight + + if penalty_type >= L2: + # do not scale to negative values when eta or alpha are too + # big: instead set the weights to zero + w.scale(max(0, 1.0 - ((1.0 - l1_ratio) * eta * alpha))) + + if update != 0.0: + w.add(x_data_ptr, x_ind_ptr, xnnz, update) + if fit_intercept == 1: + intercept_update = update + if one_class: # specific for One-Class SVM + intercept_update -= 2. * eta * alpha + if intercept_update != 0: + intercept += intercept_update * intercept_decay + + if 0 < average <= t: + # compute the average for the intercept and update the + # average weights, this is done regardless as to whether + # the update is 0 + + w.add_average(x_data_ptr, x_ind_ptr, xnnz, + update, (t - average + 1)) + average_intercept += ((intercept - average_intercept) / + (t - average + 1)) + + if penalty_type == L1 or penalty_type == ELASTICNET: + u += (l1_ratio * eta * alpha) + l1penalty{{name_suffix}}(w, q_data_ptr, x_ind_ptr, xnnz, u) + + t += 1 + count += 1 + + # report epoch information + if verbose > 0: + with gil: + print("Norm: %.2f, NNZs: %d, Bias: %.6f, T: %d, " + "Avg. loss: %f" + % (w.norm(), np.nonzero(weights)[0].shape[0], + intercept, count, sumloss / train_count)) + print("Total training time: %.2f seconds." + % (time() - t_start)) + + # floating-point under-/overflow check. + if (not isfinite(intercept) or any_nonfinite(weights)): + infinity = True + break + + # evaluate the score on the validation set + if early_stopping: + with gil: + score = validation_score_cb(weights.base, intercept) + if tol > -INFINITY and score < best_score + tol: + no_improvement_count += 1 + else: + no_improvement_count = 0 + if score > best_score: + best_score = score + # or evaluate the loss on the training set + else: + if tol > -INFINITY and sumloss > best_loss - tol * train_count: + no_improvement_count += 1 + else: + no_improvement_count = 0 + if sumloss < best_loss: + best_loss = sumloss + + # if there is no improvement several times in a row + if no_improvement_count >= n_iter_no_change: + if learning_rate == ADAPTIVE and eta > 1e-6: + eta = eta / 5 + no_improvement_count = 0 + else: + if verbose: + with gil: + print("Convergence after %d epochs took %.2f " + "seconds" % (epoch + 1, time() - t_start)) + break + + if infinity: + raise ValueError(("Floating-point under-/overflow occurred at epoch" + " #%d. Scaling input data with StandardScaler or" + " MinMaxScaler might help.") % (epoch + 1)) + + w.reset_wscale() + + return ( + weights.base, + intercept, + None if average_weights is None else average_weights.base, + average_intercept, + epoch + 1 + ) + +{{endfor}} + + +cdef inline bint any_nonfinite(const floating[::1] w) noexcept nogil: + for i in range(w.shape[0]): + if not isfinite(w[i]): + return True + return 0 + + +cdef inline double sqnorm( + floating * x_data_ptr, + int * x_ind_ptr, + int xnnz, +) noexcept nogil: + cdef double x_norm = 0.0 + cdef int j + cdef double z + for j in range(xnnz): + z = x_data_ptr[j] + x_norm += z * z + return x_norm + + +{{for name_suffix, c_type, np_type in dtypes}} + +cdef void l1penalty{{name_suffix}}( + WeightVector{{name_suffix}} w, + {{c_type}} * q_data_ptr, + int *x_ind_ptr, + int xnnz, + double u, +) noexcept nogil: + """Apply the L1 penalty to each updated feature + + This implements the truncated gradient approach by + [Tsuruoka, Y., Tsujii, J., and Ananiadou, S., 2009]. + """ + cdef double z = 0.0 + cdef int j = 0 + cdef int idx = 0 + cdef double wscale = w.wscale + cdef {{c_type}} *w_data_ptr = w.w_data_ptr + for j in range(xnnz): + idx = x_ind_ptr[j] + z = w_data_ptr[idx] + if wscale * z > 0.0: + w_data_ptr[idx] = max( + 0.0, w_data_ptr[idx] - ((u + q_data_ptr[idx]) / wscale)) + + elif wscale * z < 0.0: + w_data_ptr[idx] = min( + 0.0, w_data_ptr[idx] + ((u - q_data_ptr[idx]) / wscale)) + + q_data_ptr[idx] += wscale * (w_data_ptr[idx] - z) + +{{endfor}} diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_theil_sen.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_theil_sen.py new file mode 100644 index 0000000000000000000000000000000000000000..e6a4fba57401d7d9d8586f3f1820341db332c32b --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_theil_sen.py @@ -0,0 +1,468 @@ +""" +A Theil-Sen Estimator for Multiple Linear Regression Model +""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + + +import warnings +from itertools import combinations +from numbers import Integral, Real + +import numpy as np +from joblib import effective_n_jobs +from scipy import linalg +from scipy.linalg.lapack import get_lapack_funcs +from scipy.special import binom + +from ..base import RegressorMixin, _fit_context +from ..exceptions import ConvergenceWarning +from ..utils import check_random_state +from ..utils._param_validation import Hidden, Interval, StrOptions +from ..utils.parallel import Parallel, delayed +from ..utils.validation import validate_data +from ._base import LinearModel + +_EPSILON = np.finfo(np.double).eps + + +def _modified_weiszfeld_step(X, x_old): + """Modified Weiszfeld step. + + This function defines one iteration step in order to approximate the + spatial median (L1 median). It is a form of an iteratively re-weighted + least squares method. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples and + `n_features` is the number of features. + + x_old : ndarray of shape = (n_features,) + Current start vector. + + Returns + ------- + x_new : ndarray of shape (n_features,) + New iteration step. + + References + ---------- + - On Computation of Spatial Median for Robust Data Mining, 2005 + T. Kärkkäinen and S. Äyrämö + http://users.jyu.fi/~samiayr/pdf/ayramo_eurogen05.pdf + """ + diff = X - x_old + diff_norm = np.sqrt(np.sum(diff**2, axis=1)) + mask = diff_norm >= _EPSILON + # x_old equals one of our samples + is_x_old_in_X = int(mask.sum() < X.shape[0]) + + diff = diff[mask] + diff_norm = diff_norm[mask][:, np.newaxis] + quotient_norm = linalg.norm(np.sum(diff / diff_norm, axis=0)) + + if quotient_norm > _EPSILON: # to avoid division by zero + new_direction = np.sum(X[mask, :] / diff_norm, axis=0) / np.sum( + 1 / diff_norm, axis=0 + ) + else: + new_direction = 1.0 + quotient_norm = 1.0 + + return ( + max(0.0, 1.0 - is_x_old_in_X / quotient_norm) * new_direction + + min(1.0, is_x_old_in_X / quotient_norm) * x_old + ) + + +def _spatial_median(X, max_iter=300, tol=1.0e-3): + """Spatial median (L1 median). + + The spatial median is member of a class of so-called M-estimators which + are defined by an optimization problem. Given a number of p points in an + n-dimensional space, the point x minimizing the sum of all distances to the + p other points is called spatial median. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples and + `n_features` is the number of features. + + max_iter : int, default=300 + Maximum number of iterations. + + tol : float, default=1.e-3 + Stop the algorithm if spatial_median has converged. + + Returns + ------- + spatial_median : ndarray of shape = (n_features,) + Spatial median. + + n_iter : int + Number of iterations needed. + + References + ---------- + - On Computation of Spatial Median for Robust Data Mining, 2005 + T. Kärkkäinen and S. Äyrämö + http://users.jyu.fi/~samiayr/pdf/ayramo_eurogen05.pdf + """ + if X.shape[1] == 1: + return 1, np.median(X.ravel(), keepdims=True) + + tol **= 2 # We are computing the tol on the squared norm + spatial_median_old = np.mean(X, axis=0) + + for n_iter in range(max_iter): + spatial_median = _modified_weiszfeld_step(X, spatial_median_old) + if np.sum((spatial_median_old - spatial_median) ** 2) < tol: + break + else: + spatial_median_old = spatial_median + else: + warnings.warn( + "Maximum number of iterations {max_iter} reached in " + "spatial median for TheilSen regressor." + "".format(max_iter=max_iter), + ConvergenceWarning, + ) + return n_iter, spatial_median + + +def _breakdown_point(n_samples, n_subsamples): + """Approximation of the breakdown point. + + Parameters + ---------- + n_samples : int + Number of samples. + + n_subsamples : int + Number of subsamples to consider. + + Returns + ------- + breakdown_point : float + Approximation of breakdown point. + """ + return ( + 1 + - ( + 0.5 ** (1 / n_subsamples) * (n_samples - n_subsamples + 1) + + n_subsamples + - 1 + ) + / n_samples + ) + + +def _lstsq(X, y, indices, fit_intercept): + """Least Squares Estimator for TheilSenRegressor class. + + This function calculates the least squares method on a subset of rows of X + and y defined by the indices array. Optionally, an intercept column is + added if intercept is set to true. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Design matrix, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : ndarray of shape (n_samples,) + Target vector, where `n_samples` is the number of samples. + + indices : ndarray of shape (n_subpopulation, n_subsamples) + Indices of all subsamples with respect to the chosen subpopulation. + + fit_intercept : bool + Fit intercept or not. + + Returns + ------- + weights : ndarray of shape (n_subpopulation, n_features + intercept) + Solution matrix of n_subpopulation solved least square problems. + """ + fit_intercept = int(fit_intercept) + n_features = X.shape[1] + fit_intercept + n_subsamples = indices.shape[1] + weights = np.empty((indices.shape[0], n_features)) + X_subpopulation = np.ones((n_subsamples, n_features)) + # gelss need to pad y_subpopulation to be of the max dim of X_subpopulation + y_subpopulation = np.zeros((max(n_subsamples, n_features))) + (lstsq,) = get_lapack_funcs(("gelss",), (X_subpopulation, y_subpopulation)) + + for index, subset in enumerate(indices): + X_subpopulation[:, fit_intercept:] = X[subset, :] + y_subpopulation[:n_subsamples] = y[subset] + weights[index] = lstsq(X_subpopulation, y_subpopulation)[1][:n_features] + + return weights + + +class TheilSenRegressor(RegressorMixin, LinearModel): + """Theil-Sen Estimator: robust multivariate regression model. + + The algorithm calculates least square solutions on subsets with size + n_subsamples of the samples in X. Any value of n_subsamples between the + number of features and samples leads to an estimator with a compromise + between robustness and efficiency. Since the number of least square + solutions is "n_samples choose n_subsamples", it can be extremely large + and can therefore be limited with max_subpopulation. If this limit is + reached, the subsets are chosen randomly. In a final step, the spatial + median (or L1 median) is calculated of all least square solutions. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations. + + copy_X : bool, default=True + If True, X will be copied; else, it may be overwritten. + + .. deprecated:: 1.6 + `copy_X` was deprecated in 1.6 and will be removed in 1.8. + It has no effect as a copy is always made. + + max_subpopulation : int, default=1e4 + Instead of computing with a set of cardinality 'n choose k', where n is + the number of samples and k is the number of subsamples (at least + number of features), consider only a stochastic subpopulation of a + given maximal size if 'n choose k' is larger than max_subpopulation. + For other than small problem sizes this parameter will determine + memory usage and runtime if n_subsamples is not changed. Note that the + data type should be int but floats such as 1e4 can be accepted too. + + n_subsamples : int, default=None + Number of samples to calculate the parameters. This is at least the + number of features (plus 1 if fit_intercept=True) and the number of + samples as a maximum. A lower number leads to a higher breakdown + point and a low efficiency while a high number leads to a low + breakdown point and a high efficiency. If None, take the + minimum number of subsamples leading to maximal robustness. + If n_subsamples is set to n_samples, Theil-Sen is identical to least + squares. + + max_iter : int, default=300 + Maximum number of iterations for the calculation of spatial median. + + tol : float, default=1e-3 + Tolerance when calculating spatial median. + + random_state : int, RandomState instance or None, default=None + A random number generator instance to define the state of the random + permutations generator. Pass an int for reproducible output across + multiple function calls. + See :term:`Glossary `. + + n_jobs : int, default=None + Number of CPUs to use during the cross validation. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + verbose : bool, default=False + Verbose mode when fitting the model. + + Attributes + ---------- + coef_ : ndarray of shape (n_features,) + Coefficients of the regression model (median of distribution). + + intercept_ : float + Estimated intercept of regression model. + + breakdown_ : float + Approximated breakdown point. + + n_iter_ : int + Number of iterations needed for the spatial median. + + n_subpopulation_ : int + Number of combinations taken into account from 'n choose k', where n is + the number of samples and k is the number of subsamples. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + HuberRegressor : Linear regression model that is robust to outliers. + RANSACRegressor : RANSAC (RANdom SAmple Consensus) algorithm. + SGDRegressor : Fitted by minimizing a regularized empirical loss with SGD. + + References + ---------- + - Theil-Sen Estimators in a Multiple Linear Regression Model, 2009 + Xin Dang, Hanxiang Peng, Xueqin Wang and Heping Zhang + http://home.olemiss.edu/~xdang/papers/MTSE.pdf + + Examples + -------- + >>> from sklearn.linear_model import TheilSenRegressor + >>> from sklearn.datasets import make_regression + >>> X, y = make_regression( + ... n_samples=200, n_features=2, noise=4.0, random_state=0) + >>> reg = TheilSenRegressor(random_state=0).fit(X, y) + >>> reg.score(X, y) + 0.9884... + >>> reg.predict(X[:1,]) + array([-31.5871...]) + """ + + _parameter_constraints: dict = { + "fit_intercept": ["boolean"], + "copy_X": ["boolean", Hidden(StrOptions({"deprecated"}))], + # target_type should be Integral but can accept Real for backward compatibility + "max_subpopulation": [Interval(Real, 1, None, closed="left")], + "n_subsamples": [None, Integral], + "max_iter": [Interval(Integral, 0, None, closed="left")], + "tol": [Interval(Real, 0.0, None, closed="left")], + "random_state": ["random_state"], + "n_jobs": [None, Integral], + "verbose": ["verbose"], + } + + def __init__( + self, + *, + fit_intercept=True, + copy_X="deprecated", + max_subpopulation=1e4, + n_subsamples=None, + max_iter=300, + tol=1.0e-3, + random_state=None, + n_jobs=None, + verbose=False, + ): + self.fit_intercept = fit_intercept + self.copy_X = copy_X + self.max_subpopulation = max_subpopulation + self.n_subsamples = n_subsamples + self.max_iter = max_iter + self.tol = tol + self.random_state = random_state + self.n_jobs = n_jobs + self.verbose = verbose + + def _check_subparams(self, n_samples, n_features): + n_subsamples = self.n_subsamples + + if self.fit_intercept: + n_dim = n_features + 1 + else: + n_dim = n_features + + if n_subsamples is not None: + if n_subsamples > n_samples: + raise ValueError( + "Invalid parameter since n_subsamples > " + "n_samples ({0} > {1}).".format(n_subsamples, n_samples) + ) + if n_samples >= n_features: + if n_dim > n_subsamples: + plus_1 = "+1" if self.fit_intercept else "" + raise ValueError( + "Invalid parameter since n_features{0} " + "> n_subsamples ({1} > {2})." + "".format(plus_1, n_dim, n_subsamples) + ) + else: # if n_samples < n_features + if n_subsamples != n_samples: + raise ValueError( + "Invalid parameter since n_subsamples != " + "n_samples ({0} != {1}) while n_samples " + "< n_features.".format(n_subsamples, n_samples) + ) + else: + n_subsamples = min(n_dim, n_samples) + + all_combinations = max(1, np.rint(binom(n_samples, n_subsamples))) + n_subpopulation = int(min(self.max_subpopulation, all_combinations)) + + return n_subsamples, n_subpopulation + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y): + """Fit linear model. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Training data. + y : ndarray of shape (n_samples,) + Target values. + + Returns + ------- + self : returns an instance of self. + Fitted `TheilSenRegressor` estimator. + """ + if self.copy_X != "deprecated": + warnings.warn( + "`copy_X` was deprecated in 1.6 and will be removed in 1.8 since it " + "has no effect internally. Simply leave this parameter to its default " + "value to avoid this warning.", + FutureWarning, + ) + + random_state = check_random_state(self.random_state) + X, y = validate_data(self, X, y, y_numeric=True) + n_samples, n_features = X.shape + n_subsamples, self.n_subpopulation_ = self._check_subparams( + n_samples, n_features + ) + self.breakdown_ = _breakdown_point(n_samples, n_subsamples) + + if self.verbose: + print("Breakdown point: {0}".format(self.breakdown_)) + print("Number of samples: {0}".format(n_samples)) + tol_outliers = int(self.breakdown_ * n_samples) + print("Tolerable outliers: {0}".format(tol_outliers)) + print("Number of subpopulations: {0}".format(self.n_subpopulation_)) + + # Determine indices of subpopulation + if np.rint(binom(n_samples, n_subsamples)) <= self.max_subpopulation: + indices = list(combinations(range(n_samples), n_subsamples)) + else: + indices = [ + random_state.choice(n_samples, size=n_subsamples, replace=False) + for _ in range(self.n_subpopulation_) + ] + + n_jobs = effective_n_jobs(self.n_jobs) + index_list = np.array_split(indices, n_jobs) + weights = Parallel(n_jobs=n_jobs, verbose=self.verbose)( + delayed(_lstsq)(X, y, index_list[job], self.fit_intercept) + for job in range(n_jobs) + ) + weights = np.vstack(weights) + self.n_iter_, coefs = _spatial_median( + weights, max_iter=self.max_iter, tol=self.tol + ) + + if self.fit_intercept: + self.intercept_ = coefs[0] + self.coef_ = coefs[1:] + else: + self.intercept_ = 0.0 + self.coef_ = coefs + + return self diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/meson.build b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/meson.build new file mode 100644 index 0000000000000000000000000000000000000000..00ab496fb60aae75b603a5dfabab74c0ef14f73a --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/meson.build @@ -0,0 +1,34 @@ +# .pyx is generated, so this is needed to make Cython compilation work +linear_model_cython_tree = [ + fs.copyfile('__init__.py'), +] + +py.extension_module( + '_cd_fast', + ['_cd_fast.pyx', utils_cython_tree], + cython_args: cython_args, + subdir: 'sklearn/linear_model', + install: true +) + +name_list = ['_sgd_fast', '_sag_fast'] + +foreach name: name_list + pyx = custom_target( + name + '_pyx', + output: name + '.pyx', + input: name + '.pyx.tp', + command: [py, tempita, '@INPUT@', '-o', '@OUTDIR@'], + # TODO in principle this should go in py.exension_module below. This is + # temporary work-around for dependency issue with .pyx.tp files. For more + # details, see https://github.com/mesonbuild/meson/issues/13212 + depends: [linear_model_cython_tree, utils_cython_tree], + ) + py.extension_module( + name, + pyx, + cython_args: cython_args, + subdir: 'sklearn/linear_model', + install: true +) +endforeach diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d466fbb80c49c3091ac4083140c8614511eb432e Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_common.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ecdb84fa4c152cd4e9907051b82bf4e8e9a5136 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_common.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_huber.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_huber.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a5ce272dcafe9e8f108d6ac71d72215b70ac479 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_huber.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_least_angle.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_least_angle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3bcf968161f37e1fc07bbb919b651f06e9e201f Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_least_angle.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_omp.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_omp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..215605d46aca67b738864bccceaf482f2ce8b705 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_omp.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_perceptron.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_perceptron.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9705011b37c8066ed4cc101700465abf010a21e9 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_perceptron.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_ransac.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_ransac.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd6cda5c869749d1074264350fa9fe10cedfcfa2 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/__pycache__/test_ransac.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/test_coordinate_descent.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/test_coordinate_descent.py new file mode 100644 index 0000000000000000000000000000000000000000..2eefe45e068d3e5fb9fa422ea48860f8d002add3 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/test_coordinate_descent.py @@ -0,0 +1,1682 @@ +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import warnings +from copy import deepcopy + +import joblib +import numpy as np +import pytest +from scipy import interpolate, sparse + +from sklearn.base import clone, config_context, is_classifier +from sklearn.datasets import load_diabetes, make_regression +from sklearn.exceptions import ConvergenceWarning +from sklearn.linear_model import ( + ElasticNet, + ElasticNetCV, + Lasso, + LassoCV, + LassoLars, + LassoLarsCV, + LinearRegression, + MultiTaskElasticNet, + MultiTaskElasticNetCV, + MultiTaskLasso, + MultiTaskLassoCV, + Ridge, + RidgeClassifier, + RidgeClassifierCV, + RidgeCV, + enet_path, + lars_path, + lasso_path, +) +from sklearn.linear_model._coordinate_descent import _set_order +from sklearn.model_selection import ( + BaseCrossValidator, + GridSearchCV, + LeaveOneGroupOut, +) +from sklearn.model_selection._split import GroupsConsumerMixin +from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import StandardScaler +from sklearn.utils import check_array +from sklearn.utils._testing import ( + TempMemmap, + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_array_less, + ignore_warnings, +) +from sklearn.utils.fixes import COO_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS + + +@pytest.mark.parametrize("order", ["C", "F"]) +@pytest.mark.parametrize("input_order", ["C", "F"]) +def test_set_order_dense(order, input_order): + """Check that _set_order returns arrays with promised order.""" + X = np.array([[0], [0], [0]], order=input_order) + y = np.array([0, 0, 0], order=input_order) + X2, y2 = _set_order(X, y, order=order) + if order == "C": + assert X2.flags["C_CONTIGUOUS"] + assert y2.flags["C_CONTIGUOUS"] + elif order == "F": + assert X2.flags["F_CONTIGUOUS"] + assert y2.flags["F_CONTIGUOUS"] + + if order == input_order: + assert X is X2 + assert y is y2 + + +@pytest.mark.parametrize("order", ["C", "F"]) +@pytest.mark.parametrize("input_order", ["C", "F"]) +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_set_order_sparse(order, input_order, coo_container): + """Check that _set_order returns sparse matrices in promised format.""" + X = coo_container(np.array([[0], [0], [0]])) + y = coo_container(np.array([0, 0, 0])) + sparse_format = "csc" if input_order == "F" else "csr" + X = X.asformat(sparse_format) + y = X.asformat(sparse_format) + X2, y2 = _set_order(X, y, order=order) + + format = "csc" if order == "F" else "csr" + assert sparse.issparse(X2) and X2.format == format + assert sparse.issparse(y2) and y2.format == format + + +def test_lasso_zero(): + # Check that the lasso can handle zero data without crashing + X = [[0], [0], [0]] + y = [0, 0, 0] + # _cd_fast.pyx tests for gap < tol, but here we get 0.0 < 0.0 + # should probably be changed to gap <= tol ? + with ignore_warnings(category=ConvergenceWarning): + clf = Lasso(alpha=0.1).fit(X, y) + pred = clf.predict([[1], [2], [3]]) + assert_array_almost_equal(clf.coef_, [0]) + assert_array_almost_equal(pred, [0, 0, 0]) + assert_almost_equal(clf.dual_gap_, 0) + + +@pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning") +def test_enet_nonfinite_params(): + # Check ElasticNet throws ValueError when dealing with non-finite parameter + # values + rng = np.random.RandomState(0) + n_samples = 10 + fmax = np.finfo(np.float64).max + X = fmax * rng.uniform(size=(n_samples, 2)) + y = rng.randint(0, 2, size=n_samples) + + clf = ElasticNet(alpha=0.1) + msg = "Coordinate descent iterations resulted in non-finite parameter values" + with pytest.raises(ValueError, match=msg): + clf.fit(X, y) + + +def test_lasso_toy(): + # Test Lasso on a toy example for various values of alpha. + # When validating this against glmnet notice that glmnet divides it + # against nobs. + + X = [[-1], [0], [1]] + Y = [-1, 0, 1] # just a straight line + T = [[2], [3], [4]] # test sample + + clf = Lasso(alpha=1e-8) + clf.fit(X, Y) + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [1]) + assert_array_almost_equal(pred, [2, 3, 4]) + assert_almost_equal(clf.dual_gap_, 0) + + clf = Lasso(alpha=0.1) + clf.fit(X, Y) + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [0.85]) + assert_array_almost_equal(pred, [1.7, 2.55, 3.4]) + assert_almost_equal(clf.dual_gap_, 0) + + clf = Lasso(alpha=0.5) + clf.fit(X, Y) + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [0.25]) + assert_array_almost_equal(pred, [0.5, 0.75, 1.0]) + assert_almost_equal(clf.dual_gap_, 0) + + clf = Lasso(alpha=1) + clf.fit(X, Y) + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [0.0]) + assert_array_almost_equal(pred, [0, 0, 0]) + assert_almost_equal(clf.dual_gap_, 0) + + +def test_enet_toy(): + # Test ElasticNet for various parameters of alpha and l1_ratio. + # Actually, the parameters alpha = 0 should not be allowed. However, + # we test it as a border case. + # ElasticNet is tested with and without precomputed Gram matrix + + X = np.array([[-1.0], [0.0], [1.0]]) + Y = [-1, 0, 1] # just a straight line + T = [[2.0], [3.0], [4.0]] # test sample + + # this should be the same as lasso + clf = ElasticNet(alpha=1e-8, l1_ratio=1.0) + clf.fit(X, Y) + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [1]) + assert_array_almost_equal(pred, [2, 3, 4]) + assert_almost_equal(clf.dual_gap_, 0) + + clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100, precompute=False) + clf.fit(X, Y) + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [0.50819], decimal=3) + assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3) + assert_almost_equal(clf.dual_gap_, 0) + + clf.set_params(max_iter=100, precompute=True) + clf.fit(X, Y) # with Gram + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [0.50819], decimal=3) + assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3) + assert_almost_equal(clf.dual_gap_, 0) + + clf.set_params(max_iter=100, precompute=np.dot(X.T, X)) + clf.fit(X, Y) # with Gram + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [0.50819], decimal=3) + assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3) + assert_almost_equal(clf.dual_gap_, 0) + + clf = ElasticNet(alpha=0.5, l1_ratio=0.5) + clf.fit(X, Y) + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [0.45454], 3) + assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3) + assert_almost_equal(clf.dual_gap_, 0) + + +def test_lasso_dual_gap(): + """ + Check that Lasso.dual_gap_ matches its objective formulation, with the + datafit normalized by n_samples + """ + X, y, _, _ = build_dataset(n_samples=10, n_features=30) + n_samples = len(y) + alpha = 0.01 * np.max(np.abs(X.T @ y)) / n_samples + clf = Lasso(alpha=alpha, fit_intercept=False).fit(X, y) + w = clf.coef_ + R = y - X @ w + primal = 0.5 * np.mean(R**2) + clf.alpha * np.sum(np.abs(w)) + # dual pt: R / n_samples, dual constraint: norm(X.T @ theta, inf) <= alpha + R /= np.max(np.abs(X.T @ R) / (n_samples * alpha)) + dual = 0.5 * (np.mean(y**2) - np.mean((y - R) ** 2)) + assert_allclose(clf.dual_gap_, primal - dual) + + +def build_dataset(n_samples=50, n_features=200, n_informative_features=10, n_targets=1): + """ + build an ill-posed linear regression problem with many noisy features and + comparatively few samples + """ + random_state = np.random.RandomState(0) + if n_targets > 1: + w = random_state.randn(n_features, n_targets) + else: + w = random_state.randn(n_features) + w[n_informative_features:] = 0.0 + X = random_state.randn(n_samples, n_features) + y = np.dot(X, w) + X_test = random_state.randn(n_samples, n_features) + y_test = np.dot(X_test, w) + return X, y, X_test, y_test + + +def test_lasso_cv(): + X, y, X_test, y_test = build_dataset() + max_iter = 150 + clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, cv=3).fit(X, y) + assert_almost_equal(clf.alpha_, 0.056, 2) + + clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True, cv=3) + clf.fit(X, y) + assert_almost_equal(clf.alpha_, 0.056, 2) + + # Check that the lars and the coordinate descent implementation + # select a similar alpha + lars = LassoLarsCV(max_iter=30, cv=3).fit(X, y) + # for this we check that they don't fall in the grid of + # clf.alphas further than 1 + assert ( + np.abs( + np.searchsorted(clf.alphas_[::-1], lars.alpha_) + - np.searchsorted(clf.alphas_[::-1], clf.alpha_) + ) + <= 1 + ) + # check that they also give a similar MSE + mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.mse_path_.T) + assert_allclose(mse_lars(clf.alphas_[5]).mean(), clf.mse_path_[5].mean(), rtol=1e-2) + + # test set + assert clf.score(X_test, y_test) > 0.99 + + +def test_lasso_cv_with_some_model_selection(): + from sklearn import datasets + from sklearn.model_selection import ShuffleSplit + + diabetes = datasets.load_diabetes() + X = diabetes.data + y = diabetes.target + + pipe = make_pipeline(StandardScaler(), LassoCV(cv=ShuffleSplit(random_state=0))) + pipe.fit(X, y) + + +def test_lasso_cv_positive_constraint(): + X, y, X_test, y_test = build_dataset() + max_iter = 500 + + # Ensure the unconstrained fit has a negative coefficient + clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2, n_jobs=1) + clf_unconstrained.fit(X, y) + assert min(clf_unconstrained.coef_) < 0 + + # On same data, constrained fit has non-negative coefficients + clf_constrained = LassoCV( + n_alphas=3, eps=1e-1, max_iter=max_iter, positive=True, cv=2, n_jobs=1 + ) + clf_constrained.fit(X, y) + assert min(clf_constrained.coef_) >= 0 + + +@pytest.mark.parametrize( + "alphas, err_type, err_msg", + [ + ((1, -1, -100), ValueError, r"alphas\[1\] == -1, must be >= 0.0."), + ( + (-0.1, -1.0, -10.0), + ValueError, + r"alphas\[0\] == -0.1, must be >= 0.0.", + ), + ( + (1, 1.0, "1"), + TypeError, + r"alphas\[2\] must be an instance of float, not str", + ), + ], +) +def test_lassocv_alphas_validation(alphas, err_type, err_msg): + """Check the `alphas` validation in LassoCV.""" + + n_samples, n_features = 5, 5 + rng = np.random.RandomState(0) + X = rng.randn(n_samples, n_features) + y = rng.randint(0, 2, n_samples) + lassocv = LassoCV(alphas=alphas) + with pytest.raises(err_type, match=err_msg): + lassocv.fit(X, y) + + +def _scale_alpha_inplace(estimator, n_samples): + """Rescale the parameter alpha from when the estimator is evoked with + normalize set to True as if it were evoked in a Pipeline with normalize set + to False and with a StandardScaler. + """ + if ("alpha" not in estimator.get_params()) and ( + "alphas" not in estimator.get_params() + ): + return + + if isinstance(estimator, (RidgeCV, RidgeClassifierCV)): + # alphas is not validated at this point and can be a list. + # We convert it to a np.ndarray to make sure broadcasting + # is used. + alphas = np.asarray(estimator.alphas) * n_samples + return estimator.set_params(alphas=alphas) + if isinstance(estimator, (Lasso, LassoLars, MultiTaskLasso)): + alpha = estimator.alpha * np.sqrt(n_samples) + if isinstance(estimator, (Ridge, RidgeClassifier)): + alpha = estimator.alpha * n_samples + if isinstance(estimator, (ElasticNet, MultiTaskElasticNet)): + if estimator.l1_ratio == 1: + alpha = estimator.alpha * np.sqrt(n_samples) + elif estimator.l1_ratio == 0: + alpha = estimator.alpha * n_samples + else: + # To avoid silent errors in case of refactoring + raise NotImplementedError + + estimator.set_params(alpha=alpha) + + +@pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning") +@pytest.mark.parametrize( + "LinearModel, params", + [ + (Lasso, {"tol": 1e-16, "alpha": 0.1}), + (LassoCV, {"tol": 1e-16}), + (ElasticNetCV, {}), + (RidgeClassifier, {"solver": "sparse_cg", "alpha": 0.1}), + (ElasticNet, {"tol": 1e-16, "l1_ratio": 1, "alpha": 0.01}), + (ElasticNet, {"tol": 1e-16, "l1_ratio": 0, "alpha": 0.01}), + (Ridge, {"solver": "sparse_cg", "tol": 1e-12, "alpha": 0.1}), + (LinearRegression, {}), + (RidgeCV, {}), + (RidgeClassifierCV, {}), + ], +) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_model_pipeline_same_dense_and_sparse(LinearModel, params, csr_container): + # Test that linear model preceded by StandardScaler in the pipeline and + # with normalize set to False gives the same y_pred and the same .coef_ + # given X sparse or dense + + model_dense = make_pipeline(StandardScaler(with_mean=False), LinearModel(**params)) + + model_sparse = make_pipeline(StandardScaler(with_mean=False), LinearModel(**params)) + + # prepare the data + rng = np.random.RandomState(0) + n_samples = 200 + n_features = 2 + X = rng.randn(n_samples, n_features) + X[X < 0.1] = 0.0 + + X_sparse = csr_container(X) + y = rng.rand(n_samples) + + if is_classifier(model_dense): + y = np.sign(y) + + model_dense.fit(X, y) + model_sparse.fit(X_sparse, y) + + assert_allclose(model_sparse[1].coef_, model_dense[1].coef_) + y_pred_dense = model_dense.predict(X) + y_pred_sparse = model_sparse.predict(X_sparse) + assert_allclose(y_pred_dense, y_pred_sparse) + + assert_allclose(model_dense[1].intercept_, model_sparse[1].intercept_) + + +def test_lasso_path_return_models_vs_new_return_gives_same_coefficients(): + # Test that lasso_path with lars_path style output gives the + # same result + + # Some toy data + X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T + y = np.array([1, 2, 3.1]) + alphas = [5.0, 1.0, 0.5] + + # Use lars_path and lasso_path(new output) with 1D linear interpolation + # to compute the same path + alphas_lars, _, coef_path_lars = lars_path(X, y, method="lasso") + coef_path_cont_lars = interpolate.interp1d( + alphas_lars[::-1], coef_path_lars[:, ::-1] + ) + alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas) + coef_path_cont_lasso = interpolate.interp1d( + alphas_lasso2[::-1], coef_path_lasso2[:, ::-1] + ) + + assert_array_almost_equal( + coef_path_cont_lasso(alphas), coef_path_cont_lars(alphas), decimal=1 + ) + + +def test_enet_path(): + # We use a large number of samples and of informative features so that + # the l1_ratio selected is more toward ridge than lasso + X, y, X_test, y_test = build_dataset( + n_samples=200, n_features=100, n_informative_features=100 + ) + max_iter = 150 + + # Here we have a small number of iterations, and thus the + # ElasticNet might not converge. This is to speed up tests + clf = ElasticNetCV( + alphas=[0.01, 0.05, 0.1], eps=2e-3, l1_ratio=[0.5, 0.7], cv=3, max_iter=max_iter + ) + ignore_warnings(clf.fit)(X, y) + # Well-conditioned settings, we should have selected our + # smallest penalty + assert_almost_equal(clf.alpha_, min(clf.alphas_)) + # Non-sparse ground truth: we should have selected an elastic-net + # that is closer to ridge than to lasso + assert clf.l1_ratio_ == min(clf.l1_ratio) + + clf = ElasticNetCV( + alphas=[0.01, 0.05, 0.1], + eps=2e-3, + l1_ratio=[0.5, 0.7], + cv=3, + max_iter=max_iter, + precompute=True, + ) + ignore_warnings(clf.fit)(X, y) + + # Well-conditioned settings, we should have selected our + # smallest penalty + assert_almost_equal(clf.alpha_, min(clf.alphas_)) + # Non-sparse ground truth: we should have selected an elastic-net + # that is closer to ridge than to lasso + assert clf.l1_ratio_ == min(clf.l1_ratio) + + # We are in well-conditioned settings with low noise: we should + # have a good test-set performance + assert clf.score(X_test, y_test) > 0.99 + + # Multi-output/target case + X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3) + clf = MultiTaskElasticNetCV( + n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7], cv=3, max_iter=max_iter + ) + ignore_warnings(clf.fit)(X, y) + # We are in well-conditioned settings with low noise: we should + # have a good test-set performance + assert clf.score(X_test, y_test) > 0.99 + assert clf.coef_.shape == (3, 10) + + # Mono-output should have same cross-validated alpha_ and l1_ratio_ + # in both cases. + X, y, _, _ = build_dataset(n_features=10) + clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7]) + clf1.fit(X, y) + clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7]) + clf2.fit(X, y[:, np.newaxis]) + assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_) + assert_almost_equal(clf1.alpha_, clf2.alpha_) + + +def test_path_parameters(): + X, y, _, _ = build_dataset() + max_iter = 100 + + clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter, l1_ratio=0.5, tol=1e-3) + clf.fit(X, y) # new params + assert_almost_equal(0.5, clf.l1_ratio) + assert 50 == clf.n_alphas + assert 50 == len(clf.alphas_) + + +def test_warm_start(): + X, y, _, _ = build_dataset() + clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True) + ignore_warnings(clf.fit)(X, y) + ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations + + clf2 = ElasticNet(alpha=0.1, max_iter=10) + ignore_warnings(clf2.fit)(X, y) + assert_array_almost_equal(clf2.coef_, clf.coef_) + + +def test_lasso_alpha_warning(): + X = [[-1], [0], [1]] + Y = [-1, 0, 1] # just a straight line + + clf = Lasso(alpha=0) + warning_message = ( + "With alpha=0, this algorithm does not " + "converge well. You are advised to use the " + "LinearRegression estimator" + ) + with pytest.warns(UserWarning, match=warning_message): + clf.fit(X, Y) + + +def test_lasso_positive_constraint(): + X = [[-1], [0], [1]] + y = [1, 0, -1] # just a straight line with negative slope + + lasso = Lasso(alpha=0.1, positive=True) + lasso.fit(X, y) + assert min(lasso.coef_) >= 0 + + lasso = Lasso(alpha=0.1, precompute=True, positive=True) + lasso.fit(X, y) + assert min(lasso.coef_) >= 0 + + +def test_enet_positive_constraint(): + X = [[-1], [0], [1]] + y = [1, 0, -1] # just a straight line with negative slope + + enet = ElasticNet(alpha=0.1, positive=True) + enet.fit(X, y) + assert min(enet.coef_) >= 0 + + +def test_enet_cv_positive_constraint(): + X, y, X_test, y_test = build_dataset() + max_iter = 500 + + # Ensure the unconstrained fit has a negative coefficient + enetcv_unconstrained = ElasticNetCV( + n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2, n_jobs=1 + ) + enetcv_unconstrained.fit(X, y) + assert min(enetcv_unconstrained.coef_) < 0 + + # On same data, constrained fit has non-negative coefficients + enetcv_constrained = ElasticNetCV( + n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2, positive=True, n_jobs=1 + ) + enetcv_constrained.fit(X, y) + assert min(enetcv_constrained.coef_) >= 0 + + +def test_uniform_targets(): + enet = ElasticNetCV(n_alphas=3) + m_enet = MultiTaskElasticNetCV(n_alphas=3) + lasso = LassoCV(n_alphas=3) + m_lasso = MultiTaskLassoCV(n_alphas=3) + + models_single_task = (enet, lasso) + models_multi_task = (m_enet, m_lasso) + + rng = np.random.RandomState(0) + + X_train = rng.random_sample(size=(10, 3)) + X_test = rng.random_sample(size=(10, 3)) + + y1 = np.empty(10) + y2 = np.empty((10, 2)) + + for model in models_single_task: + for y_values in (0, 5): + y1.fill(y_values) + with ignore_warnings(category=ConvergenceWarning): + assert_array_equal(model.fit(X_train, y1).predict(X_test), y1) + assert_array_equal(model.alphas_, [np.finfo(float).resolution] * 3) + + for model in models_multi_task: + for y_values in (0, 5): + y2[:, 0].fill(y_values) + y2[:, 1].fill(2 * y_values) + with ignore_warnings(category=ConvergenceWarning): + assert_array_equal(model.fit(X_train, y2).predict(X_test), y2) + assert_array_equal(model.alphas_, [np.finfo(float).resolution] * 3) + + +def test_multi_task_lasso_and_enet(): + X, y, X_test, y_test = build_dataset() + Y = np.c_[y, y] + # Y_test = np.c_[y_test, y_test] + clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y) + assert 0 < clf.dual_gap_ < 1e-5 + assert_array_almost_equal(clf.coef_[0], clf.coef_[1]) + + clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y) + assert 0 < clf.dual_gap_ < 1e-5 + assert_array_almost_equal(clf.coef_[0], clf.coef_[1]) + + clf = MultiTaskElasticNet(alpha=1.0, tol=1e-8, max_iter=1) + warning_message = ( + "Objective did not converge. You might want to " + "increase the number of iterations." + ) + with pytest.warns(ConvergenceWarning, match=warning_message): + clf.fit(X, Y) + + +def test_lasso_readonly_data(): + X = np.array([[-1], [0], [1]]) + Y = np.array([-1, 0, 1]) # just a straight line + T = np.array([[2], [3], [4]]) # test sample + with TempMemmap((X, Y)) as (X, Y): + clf = Lasso(alpha=0.5) + clf.fit(X, Y) + pred = clf.predict(T) + assert_array_almost_equal(clf.coef_, [0.25]) + assert_array_almost_equal(pred, [0.5, 0.75, 1.0]) + assert_almost_equal(clf.dual_gap_, 0) + + +def test_multi_task_lasso_readonly_data(): + X, y, X_test, y_test = build_dataset() + Y = np.c_[y, y] + with TempMemmap((X, Y)) as (X, Y): + Y = np.c_[y, y] + clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y) + assert 0 < clf.dual_gap_ < 1e-5 + assert_array_almost_equal(clf.coef_[0], clf.coef_[1]) + + +def test_enet_multitarget(): + n_targets = 3 + X, y, _, _ = build_dataset( + n_samples=10, n_features=8, n_informative_features=10, n_targets=n_targets + ) + estimator = ElasticNet(alpha=0.01) + estimator.fit(X, y) + coef, intercept, dual_gap = ( + estimator.coef_, + estimator.intercept_, + estimator.dual_gap_, + ) + + for k in range(n_targets): + estimator.fit(X, y[:, k]) + assert_array_almost_equal(coef[k, :], estimator.coef_) + assert_array_almost_equal(intercept[k], estimator.intercept_) + assert_array_almost_equal(dual_gap[k], estimator.dual_gap_) + + +def test_multioutput_enetcv_error(): + rng = np.random.RandomState(0) + X = rng.randn(10, 2) + y = rng.randn(10, 2) + clf = ElasticNetCV() + with pytest.raises(ValueError): + clf.fit(X, y) + + +def test_multitask_enet_and_lasso_cv(): + X, y, _, _ = build_dataset(n_features=50, n_targets=3) + clf = MultiTaskElasticNetCV(cv=3).fit(X, y) + assert_almost_equal(clf.alpha_, 0.00556, 3) + clf = MultiTaskLassoCV(cv=3).fit(X, y) + assert_almost_equal(clf.alpha_, 0.00278, 3) + + X, y, _, _ = build_dataset(n_targets=3) + clf = MultiTaskElasticNetCV( + n_alphas=10, eps=1e-3, max_iter=200, l1_ratio=[0.3, 0.5], tol=1e-3, cv=3 + ) + clf.fit(X, y) + assert 0.5 == clf.l1_ratio_ + assert (3, X.shape[1]) == clf.coef_.shape + assert (3,) == clf.intercept_.shape + assert (2, 10, 3) == clf.mse_path_.shape + assert (2, 10) == clf.alphas_.shape + + X, y, _, _ = build_dataset(n_targets=3) + clf = MultiTaskLassoCV(n_alphas=10, eps=1e-3, max_iter=500, tol=1e-3, cv=3) + clf.fit(X, y) + assert (3, X.shape[1]) == clf.coef_.shape + assert (3,) == clf.intercept_.shape + assert (10, 3) == clf.mse_path_.shape + assert 10 == len(clf.alphas_) + + +def test_1d_multioutput_enet_and_multitask_enet_cv(): + X, y, _, _ = build_dataset(n_features=10) + y = y[:, np.newaxis] + clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7]) + clf.fit(X, y[:, 0]) + clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7]) + clf1.fit(X, y) + assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_) + assert_almost_equal(clf.alpha_, clf1.alpha_) + assert_almost_equal(clf.coef_, clf1.coef_[0]) + assert_almost_equal(clf.intercept_, clf1.intercept_[0]) + + +def test_1d_multioutput_lasso_and_multitask_lasso_cv(): + X, y, _, _ = build_dataset(n_features=10) + y = y[:, np.newaxis] + clf = LassoCV(n_alphas=5, eps=2e-3) + clf.fit(X, y[:, 0]) + clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3) + clf1.fit(X, y) + assert_almost_equal(clf.alpha_, clf1.alpha_) + assert_almost_equal(clf.coef_, clf1.coef_[0]) + assert_almost_equal(clf.intercept_, clf1.intercept_[0]) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sparse_input_dtype_enet_and_lassocv(csr_container): + X, y, _, _ = build_dataset(n_features=10) + clf = ElasticNetCV(n_alphas=5) + clf.fit(csr_container(X), y) + clf1 = ElasticNetCV(n_alphas=5) + clf1.fit(csr_container(X, dtype=np.float32), y) + assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6) + assert_almost_equal(clf.coef_, clf1.coef_, decimal=6) + + clf = LassoCV(n_alphas=5) + clf.fit(csr_container(X), y) + clf1 = LassoCV(n_alphas=5) + clf1.fit(csr_container(X, dtype=np.float32), y) + assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6) + assert_almost_equal(clf.coef_, clf1.coef_, decimal=6) + + +def test_elasticnet_precompute_incorrect_gram(): + # check that passing an invalid precomputed Gram matrix will raise an + # error. + X, y, _, _ = build_dataset() + + rng = np.random.RandomState(0) + + X_centered = X - np.average(X, axis=0) + garbage = rng.standard_normal(X.shape) + precompute = np.dot(garbage.T, garbage) + + clf = ElasticNet(alpha=0.01, precompute=precompute) + msg = "Gram matrix.*did not pass validation.*" + with pytest.raises(ValueError, match=msg): + clf.fit(X_centered, y) + + +def test_elasticnet_precompute_gram_weighted_samples(): + # check the equivalence between passing a precomputed Gram matrix and + # internal computation using sample weights. + X, y, _, _ = build_dataset() + + rng = np.random.RandomState(0) + sample_weight = rng.lognormal(size=y.shape) + + w_norm = sample_weight * (y.shape / np.sum(sample_weight)) + X_c = X - np.average(X, axis=0, weights=w_norm) + X_r = X_c * np.sqrt(w_norm)[:, np.newaxis] + gram = np.dot(X_r.T, X_r) + + clf1 = ElasticNet(alpha=0.01, precompute=gram) + clf1.fit(X_c, y, sample_weight=sample_weight) + + clf2 = ElasticNet(alpha=0.01, precompute=False) + clf2.fit(X, y, sample_weight=sample_weight) + + assert_allclose(clf1.coef_, clf2.coef_) + + +def test_elasticnet_precompute_gram(): + # Check the dtype-aware check for a precomputed Gram matrix + # (see https://github.com/scikit-learn/scikit-learn/pull/22059 + # and https://github.com/scikit-learn/scikit-learn/issues/21997). + # Here: (X_c.T, X_c)[2, 3] is not equal to np.dot(X_c[:, 2], X_c[:, 3]) + # but within tolerance for np.float32 + + rng = np.random.RandomState(58) + X = rng.binomial(1, 0.25, (1000, 4)).astype(np.float32) + y = rng.rand(1000).astype(np.float32) + + X_c = X - np.average(X, axis=0) + gram = np.dot(X_c.T, X_c) + + clf1 = ElasticNet(alpha=0.01, precompute=gram) + clf1.fit(X_c, y) + + clf2 = ElasticNet(alpha=0.01, precompute=False) + clf2.fit(X, y) + + assert_allclose(clf1.coef_, clf2.coef_) + + +def test_warm_start_convergence(): + X, y, _, _ = build_dataset() + model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y) + n_iter_reference = model.n_iter_ + + # This dataset is not trivial enough for the model to converge in one pass. + assert n_iter_reference > 2 + + # Check that n_iter_ is invariant to multiple calls to fit + # when warm_start=False, all else being equal. + model.fit(X, y) + n_iter_cold_start = model.n_iter_ + assert n_iter_cold_start == n_iter_reference + + # Fit the same model again, using a warm start: the optimizer just performs + # a single pass before checking that it has already converged + model.set_params(warm_start=True) + model.fit(X, y) + n_iter_warm_start = model.n_iter_ + assert n_iter_warm_start == 1 + + +def test_warm_start_convergence_with_regularizer_decrement(): + X, y = load_diabetes(return_X_y=True) + + # Train a model to converge on a lightly regularized problem + final_alpha = 1e-5 + low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y) + + # Fitting a new model on a more regularized version of the same problem. + # Fitting with high regularization is easier it should converge faster + # in general. + high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y) + assert low_reg_model.n_iter_ > high_reg_model.n_iter_ + + # Fit the solution to the original, less regularized version of the + # problem but from the solution of the highly regularized variant of + # the problem as a better starting point. This should also converge + # faster than the original model that starts from zero. + warm_low_reg_model = deepcopy(high_reg_model) + warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha) + warm_low_reg_model.fit(X, y) + assert low_reg_model.n_iter_ > warm_low_reg_model.n_iter_ + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_random_descent(csr_container): + # Test that both random and cyclic selection give the same results. + # Ensure that the test models fully converge and check a wide + # range of conditions. + + # This uses the coordinate descent algo using the gram trick. + X, y, _, _ = build_dataset(n_samples=50, n_features=20) + clf_cyclic = ElasticNet(selection="cyclic", tol=1e-8) + clf_cyclic.fit(X, y) + clf_random = ElasticNet(selection="random", tol=1e-8, random_state=42) + clf_random.fit(X, y) + assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_) + assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_) + + # This uses the descent algo without the gram trick + clf_cyclic = ElasticNet(selection="cyclic", tol=1e-8) + clf_cyclic.fit(X.T, y[:20]) + clf_random = ElasticNet(selection="random", tol=1e-8, random_state=42) + clf_random.fit(X.T, y[:20]) + assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_) + assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_) + + # Sparse Case + clf_cyclic = ElasticNet(selection="cyclic", tol=1e-8) + clf_cyclic.fit(csr_container(X), y) + clf_random = ElasticNet(selection="random", tol=1e-8, random_state=42) + clf_random.fit(csr_container(X), y) + assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_) + assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_) + + # Multioutput case. + new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis])) + clf_cyclic = MultiTaskElasticNet(selection="cyclic", tol=1e-8) + clf_cyclic.fit(X, new_y) + clf_random = MultiTaskElasticNet(selection="random", tol=1e-8, random_state=42) + clf_random.fit(X, new_y) + assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_) + assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_) + + +def test_enet_path_positive(): + # Test positive parameter + + X, Y, _, _ = build_dataset(n_samples=50, n_features=50, n_targets=2) + + # For mono output + # Test that the coefs returned by positive=True in enet_path are positive + for path in [enet_path, lasso_path]: + pos_path_coef = path(X, Y[:, 0], positive=True)[1] + assert np.all(pos_path_coef >= 0) + + # For multi output, positive parameter is not allowed + # Test that an error is raised + for path in [enet_path, lasso_path]: + with pytest.raises(ValueError): + path(X, Y, positive=True) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sparse_dense_descent_paths(csr_container): + # Test that dense and sparse input give the same input for descent paths. + X, y, _, _ = build_dataset(n_samples=50, n_features=20) + csr = csr_container(X) + for path in [enet_path, lasso_path]: + _, coefs, _ = path(X, y) + _, sparse_coefs, _ = path(csr, y) + assert_array_almost_equal(coefs, sparse_coefs) + + +@pytest.mark.parametrize("path_func", [enet_path, lasso_path]) +def test_path_unknown_parameter(path_func): + """Check that passing parameter not used by the coordinate descent solver + will raise an error.""" + X, y, _, _ = build_dataset(n_samples=50, n_features=20) + err_msg = "Unexpected parameters in params" + with pytest.raises(ValueError, match=err_msg): + path_func(X, y, normalize=True, fit_intercept=True) + + +def test_check_input_false(): + X, y, _, _ = build_dataset(n_samples=20, n_features=10) + X = check_array(X, order="F", dtype="float64") + y = check_array(X, order="F", dtype="float64") + clf = ElasticNet(selection="cyclic", tol=1e-8) + # Check that no error is raised if data is provided in the right format + clf.fit(X, y, check_input=False) + # With check_input=False, an exhaustive check is not made on y but its + # dtype is still cast in _preprocess_data to X's dtype. So the test should + # pass anyway + X = check_array(X, order="F", dtype="float32") + with ignore_warnings(category=ConvergenceWarning): + clf.fit(X, y, check_input=False) + # With no input checking, providing X in C order should result in false + # computation + X = check_array(X, order="C", dtype="float64") + with pytest.raises(ValueError): + clf.fit(X, y, check_input=False) + + +@pytest.mark.parametrize("check_input", [True, False]) +def test_enet_copy_X_True(check_input): + X, y, _, _ = build_dataset() + X = X.copy(order="F") + + original_X = X.copy() + enet = ElasticNet(copy_X=True) + enet.fit(X, y, check_input=check_input) + + assert_array_equal(original_X, X) + + +def test_enet_copy_X_False_check_input_False(): + X, y, _, _ = build_dataset() + X = X.copy(order="F") + + original_X = X.copy() + enet = ElasticNet(copy_X=False) + enet.fit(X, y, check_input=False) + + # No copying, X is overwritten + assert np.any(np.not_equal(original_X, X)) + + +def test_overrided_gram_matrix(): + X, y, _, _ = build_dataset(n_samples=20, n_features=10) + Gram = X.T.dot(X) + clf = ElasticNet(selection="cyclic", tol=1e-8, precompute=Gram) + warning_message = ( + "Gram matrix was provided but X was centered" + " to fit intercept: recomputing Gram matrix." + ) + with pytest.warns(UserWarning, match=warning_message): + clf.fit(X, y) + + +@pytest.mark.parametrize("model", [ElasticNet, Lasso]) +def test_lasso_non_float_y(model): + X = [[0, 0], [1, 1], [-1, -1]] + y = [0, 1, 2] + y_float = [0.0, 1.0, 2.0] + + clf = model(fit_intercept=False) + clf.fit(X, y) + clf_float = model(fit_intercept=False) + clf_float.fit(X, y_float) + assert_array_equal(clf.coef_, clf_float.coef_) + + +def test_enet_float_precision(): + # Generate dataset + X, y, X_test, y_test = build_dataset(n_samples=20, n_features=10) + # Here we have a small number of iterations, and thus the + # ElasticNet might not converge. This is to speed up tests + + for fit_intercept in [True, False]: + coef = {} + intercept = {} + for dtype in [np.float64, np.float32]: + clf = ElasticNet( + alpha=0.5, + max_iter=100, + precompute=False, + fit_intercept=fit_intercept, + ) + + X = dtype(X) + y = dtype(y) + ignore_warnings(clf.fit)(X, y) + + coef[("simple", dtype)] = clf.coef_ + intercept[("simple", dtype)] = clf.intercept_ + + assert clf.coef_.dtype == dtype + + # test precompute Gram array + Gram = X.T.dot(X) + clf_precompute = ElasticNet( + alpha=0.5, + max_iter=100, + precompute=Gram, + fit_intercept=fit_intercept, + ) + ignore_warnings(clf_precompute.fit)(X, y) + assert_array_almost_equal(clf.coef_, clf_precompute.coef_) + assert_array_almost_equal(clf.intercept_, clf_precompute.intercept_) + + # test multi task enet + multi_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis])) + clf_multioutput = MultiTaskElasticNet( + alpha=0.5, + max_iter=100, + fit_intercept=fit_intercept, + ) + clf_multioutput.fit(X, multi_y) + coef[("multi", dtype)] = clf_multioutput.coef_ + intercept[("multi", dtype)] = clf_multioutput.intercept_ + assert clf.coef_.dtype == dtype + + for v in ["simple", "multi"]: + assert_array_almost_equal( + coef[(v, np.float32)], coef[(v, np.float64)], decimal=4 + ) + assert_array_almost_equal( + intercept[(v, np.float32)], intercept[(v, np.float64)], decimal=4 + ) + + +@pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning") +def test_enet_l1_ratio(): + # Test that an error message is raised if an estimator that + # uses _alpha_grid is called with l1_ratio=0 + msg = ( + "Automatic alpha grid generation is not supported for l1_ratio=0. " + "Please supply a grid by providing your estimator with the " + "appropriate `alphas=` argument." + ) + X = np.array([[1, 2, 4, 5, 8], [3, 5, 7, 7, 8]]).T + y = np.array([12, 10, 11, 21, 5]) + + with pytest.raises(ValueError, match=msg): + ElasticNetCV(l1_ratio=0, random_state=42).fit(X, y) + + with pytest.raises(ValueError, match=msg): + MultiTaskElasticNetCV(l1_ratio=0, random_state=42).fit(X, y[:, None]) + + # Test that l1_ratio=0 with alpha>0 produces user warning + warning_message = ( + "Coordinate descent without L1 regularization may " + "lead to unexpected results and is discouraged. " + "Set l1_ratio > 0 to add L1 regularization." + ) + est = ElasticNetCV(l1_ratio=[0], alphas=[1]) + with pytest.warns(UserWarning, match=warning_message): + est.fit(X, y) + + # Test that l1_ratio=0 is allowed if we supply a grid manually + alphas = [0.1, 10] + estkwds = {"alphas": alphas, "random_state": 42} + est_desired = ElasticNetCV(l1_ratio=0.00001, **estkwds) + est = ElasticNetCV(l1_ratio=0, **estkwds) + with ignore_warnings(): + est_desired.fit(X, y) + est.fit(X, y) + assert_array_almost_equal(est.coef_, est_desired.coef_, decimal=5) + + est_desired = MultiTaskElasticNetCV(l1_ratio=0.00001, **estkwds) + est = MultiTaskElasticNetCV(l1_ratio=0, **estkwds) + with ignore_warnings(): + est.fit(X, y[:, None]) + est_desired.fit(X, y[:, None]) + assert_array_almost_equal(est.coef_, est_desired.coef_, decimal=5) + + +def test_coef_shape_not_zero(): + est_no_intercept = Lasso(fit_intercept=False) + est_no_intercept.fit(np.c_[np.ones(3)], np.ones(3)) + assert est_no_intercept.coef_.shape == (1,) + + +def test_warm_start_multitask_lasso(): + X, y, X_test, y_test = build_dataset() + Y = np.c_[y, y] + clf = MultiTaskLasso(alpha=0.1, max_iter=5, warm_start=True) + ignore_warnings(clf.fit)(X, Y) + ignore_warnings(clf.fit)(X, Y) # do a second round with 5 iterations + + clf2 = MultiTaskLasso(alpha=0.1, max_iter=10) + ignore_warnings(clf2.fit)(X, Y) + assert_array_almost_equal(clf2.coef_, clf.coef_) + + +@pytest.mark.parametrize( + "klass, n_classes, kwargs", + [ + (Lasso, 1, dict(precompute=True)), + (Lasso, 1, dict(precompute=False)), + ], +) +def test_enet_coordinate_descent(klass, n_classes, kwargs): + """Test that a warning is issued if model does not converge""" + clf = klass(max_iter=2, **kwargs) + n_samples = 5 + n_features = 2 + X = np.ones((n_samples, n_features)) * 1e50 + y = np.ones((n_samples, n_classes)) + if klass == Lasso: + y = y.ravel() + warning_message = ( + "Objective did not converge. You might want to" + " increase the number of iterations." + ) + with pytest.warns(ConvergenceWarning, match=warning_message): + clf.fit(X, y) + + +def test_convergence_warnings(): + random_state = np.random.RandomState(0) + X = random_state.standard_normal((1000, 500)) + y = random_state.standard_normal((1000, 3)) + + # check that the model converges w/o convergence warnings + with warnings.catch_warnings(): + warnings.simplefilter("error", ConvergenceWarning) + MultiTaskElasticNet().fit(X, y) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sparse_input_convergence_warning(csr_container): + X, y, _, _ = build_dataset(n_samples=1000, n_features=500) + + with pytest.warns(ConvergenceWarning): + ElasticNet(max_iter=1, tol=0).fit(csr_container(X, dtype=np.float32), y) + + # check that the model converges w/o convergence warnings + with warnings.catch_warnings(): + warnings.simplefilter("error", ConvergenceWarning) + Lasso().fit(csr_container(X, dtype=np.float32), y) + + +@pytest.mark.parametrize( + "precompute, inner_precompute", + [ + (True, True), + ("auto", False), + (False, False), + ], +) +def test_lassoCV_does_not_set_precompute(monkeypatch, precompute, inner_precompute): + X, y, _, _ = build_dataset() + calls = 0 + + class LassoMock(Lasso): + def fit(self, X, y): + super().fit(X, y) + nonlocal calls + calls += 1 + assert self.precompute == inner_precompute + + monkeypatch.setattr("sklearn.linear_model._coordinate_descent.Lasso", LassoMock) + clf = LassoCV(precompute=precompute) + clf.fit(X, y) + assert calls > 0 + + +def test_multi_task_lasso_cv_dtype(): + n_samples, n_features = 10, 3 + rng = np.random.RandomState(42) + X = rng.binomial(1, 0.5, size=(n_samples, n_features)) + X = X.astype(int) # make it explicit that X is int + y = X[:, [0, 0]].copy() + est = MultiTaskLassoCV(n_alphas=5, fit_intercept=True).fit(X, y) + assert_array_almost_equal(est.coef_, [[1, 0, 0]] * 2, decimal=3) + + +@pytest.mark.parametrize("fit_intercept", [True, False]) +@pytest.mark.parametrize("alpha", [0.01]) +@pytest.mark.parametrize("precompute", [False, True]) +@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS) +def test_enet_sample_weight_consistency( + fit_intercept, alpha, precompute, sparse_container, global_random_seed +): + """Test that the impact of sample_weight is consistent. + + Note that this test is stricter than the common test + check_sample_weight_equivalence alone and also tests sparse X. + """ + rng = np.random.RandomState(global_random_seed) + n_samples, n_features = 10, 5 + + X = rng.rand(n_samples, n_features) + y = rng.rand(n_samples) + if sparse_container is not None: + X = sparse_container(X) + params = dict( + alpha=alpha, + fit_intercept=fit_intercept, + precompute=precompute, + tol=1e-6, + l1_ratio=0.5, + ) + + reg = ElasticNet(**params).fit(X, y) + coef = reg.coef_.copy() + if fit_intercept: + intercept = reg.intercept_ + + # 1) sample_weight=np.ones(..) should be equivalent to sample_weight=None + sample_weight = np.ones_like(y) + reg.fit(X, y, sample_weight=sample_weight) + assert_allclose(reg.coef_, coef, rtol=1e-6) + if fit_intercept: + assert_allclose(reg.intercept_, intercept) + + # 2) sample_weight=None should be equivalent to sample_weight = number + sample_weight = 123.0 + reg.fit(X, y, sample_weight=sample_weight) + assert_allclose(reg.coef_, coef, rtol=1e-6) + if fit_intercept: + assert_allclose(reg.intercept_, intercept) + + # 3) scaling of sample_weight should have no effect, cf. np.average() + sample_weight = rng.uniform(low=0.01, high=2, size=X.shape[0]) + reg = reg.fit(X, y, sample_weight=sample_weight) + coef = reg.coef_.copy() + if fit_intercept: + intercept = reg.intercept_ + + reg.fit(X, y, sample_weight=np.pi * sample_weight) + assert_allclose(reg.coef_, coef, rtol=1e-6) + if fit_intercept: + assert_allclose(reg.intercept_, intercept) + + # 4) setting elements of sample_weight to 0 is equivalent to removing these samples + sample_weight_0 = sample_weight.copy() + sample_weight_0[-5:] = 0 + y[-5:] *= 1000 # to make excluding those samples important + reg.fit(X, y, sample_weight=sample_weight_0) + coef_0 = reg.coef_.copy() + if fit_intercept: + intercept_0 = reg.intercept_ + reg.fit(X[:-5], y[:-5], sample_weight=sample_weight[:-5]) + assert_allclose(reg.coef_, coef_0, rtol=1e-6) + if fit_intercept: + assert_allclose(reg.intercept_, intercept_0) + + # 5) check that multiplying sample_weight by 2 is equivalent to repeating + # corresponding samples twice + if sparse_container is not None: + X2 = sparse.vstack([X, X[: n_samples // 2]], format="csc") + else: + X2 = np.concatenate([X, X[: n_samples // 2]], axis=0) + y2 = np.concatenate([y, y[: n_samples // 2]]) + sample_weight_1 = sample_weight.copy() + sample_weight_1[: n_samples // 2] *= 2 + sample_weight_2 = np.concatenate( + [sample_weight, sample_weight[: n_samples // 2]], axis=0 + ) + + reg1 = ElasticNet(**params).fit(X, y, sample_weight=sample_weight_1) + reg2 = ElasticNet(**params).fit(X2, y2, sample_weight=sample_weight_2) + assert_allclose(reg1.coef_, reg2.coef_, rtol=1e-6) + + +@pytest.mark.parametrize("fit_intercept", [True, False]) +@pytest.mark.parametrize("sparse_container", [None] + CSC_CONTAINERS) +def test_enet_cv_sample_weight_correctness( + fit_intercept, sparse_container, global_random_seed +): + """Test that ElasticNetCV with sample weights gives correct results. + + We fit the same model twice, once with weighted training data, once with repeated + data points in the training data and check that both models converge to the + same solution. + + Since this model uses an internal cross-validation scheme to tune the alpha + regularization parameter, we make sure that the repetitions only occur within + a specific CV group. Data points belonging to other CV groups stay + unit-weighted / "unrepeated". + """ + rng = np.random.RandomState(global_random_seed) + n_splits, n_samples_per_cv, n_features = 3, 10, 5 + X_with_weights = rng.rand(n_splits * n_samples_per_cv, n_features) + beta = rng.rand(n_features) + beta[0:2] = 0 + y_with_weights = X_with_weights @ beta + rng.rand(n_splits * n_samples_per_cv) + + if sparse_container is not None: + X_with_weights = sparse_container(X_with_weights) + params = dict(tol=1e-6) + + # Assign random integer weights only to the first cross-validation group. + # The samples in the other cross-validation groups are left with unit + # weights. + + sw = np.ones_like(y_with_weights) + sw[:n_samples_per_cv] = rng.randint(0, 5, size=n_samples_per_cv) + groups_with_weights = np.concatenate( + [ + np.full(n_samples_per_cv, 0), + np.full(n_samples_per_cv, 1), + np.full(n_samples_per_cv, 2), + ] + ) + splits_with_weights = list( + LeaveOneGroupOut().split(X_with_weights, groups=groups_with_weights) + ) + reg_with_weights = ElasticNetCV( + cv=splits_with_weights, fit_intercept=fit_intercept, **params + ) + + reg_with_weights.fit(X_with_weights, y_with_weights, sample_weight=sw) + + if sparse_container is not None: + X_with_weights = X_with_weights.toarray() + X_with_repetitions = np.repeat(X_with_weights, sw.astype(int), axis=0) + if sparse_container is not None: + X_with_repetitions = sparse_container(X_with_repetitions) + + y_with_repetitions = np.repeat(y_with_weights, sw.astype(int), axis=0) + groups_with_repetitions = np.repeat(groups_with_weights, sw.astype(int), axis=0) + + splits_with_repetitions = list( + LeaveOneGroupOut().split(X_with_repetitions, groups=groups_with_repetitions) + ) + reg_with_repetitions = ElasticNetCV( + cv=splits_with_repetitions, fit_intercept=fit_intercept, **params + ) + reg_with_repetitions.fit(X_with_repetitions, y_with_repetitions) + + # Check that the alpha selection process is the same: + assert_allclose(reg_with_weights.mse_path_, reg_with_repetitions.mse_path_) + assert_allclose(reg_with_weights.alphas_, reg_with_repetitions.alphas_) + assert reg_with_weights.alpha_ == pytest.approx(reg_with_repetitions.alpha_) + + # Check that the final model coefficients are the same: + assert_allclose(reg_with_weights.coef_, reg_with_repetitions.coef_, atol=1e-10) + assert reg_with_weights.intercept_ == pytest.approx(reg_with_repetitions.intercept_) + + +@pytest.mark.parametrize("sample_weight", [False, True]) +def test_enet_cv_grid_search(sample_weight): + """Test that ElasticNetCV gives same result as GridSearchCV.""" + n_samples, n_features = 200, 10 + cv = 5 + X, y = make_regression( + n_samples=n_samples, + n_features=n_features, + effective_rank=10, + n_informative=n_features - 4, + noise=10, + random_state=0, + ) + if sample_weight: + sample_weight = np.linspace(1, 5, num=n_samples) + else: + sample_weight = None + + alphas = np.logspace(np.log10(1e-5), np.log10(1), num=10) + l1_ratios = [0.1, 0.5, 0.9] + reg = ElasticNetCV(cv=cv, alphas=alphas, l1_ratio=l1_ratios) + reg.fit(X, y, sample_weight=sample_weight) + + param = {"alpha": alphas, "l1_ratio": l1_ratios} + gs = GridSearchCV( + estimator=ElasticNet(), + param_grid=param, + cv=cv, + scoring="neg_mean_squared_error", + ).fit(X, y, sample_weight=sample_weight) + + assert reg.l1_ratio_ == pytest.approx(gs.best_params_["l1_ratio"]) + assert reg.alpha_ == pytest.approx(gs.best_params_["alpha"]) + + +@pytest.mark.parametrize("fit_intercept", [True, False]) +@pytest.mark.parametrize("l1_ratio", [0, 0.5, 1]) +@pytest.mark.parametrize("precompute", [False, True]) +@pytest.mark.parametrize("sparse_container", [None] + CSC_CONTAINERS) +def test_enet_cv_sample_weight_consistency( + fit_intercept, l1_ratio, precompute, sparse_container +): + """Test that the impact of sample_weight is consistent.""" + rng = np.random.RandomState(0) + n_samples, n_features = 10, 5 + + X = rng.rand(n_samples, n_features) + y = X.sum(axis=1) + rng.rand(n_samples) + params = dict( + l1_ratio=l1_ratio, + fit_intercept=fit_intercept, + precompute=precompute, + tol=1e-6, + cv=3, + ) + if sparse_container is not None: + X = sparse_container(X) + + if l1_ratio == 0: + params.pop("l1_ratio", None) + reg = LassoCV(**params).fit(X, y) + else: + reg = ElasticNetCV(**params).fit(X, y) + coef = reg.coef_.copy() + if fit_intercept: + intercept = reg.intercept_ + + # sample_weight=np.ones(..) should be equivalent to sample_weight=None + sample_weight = np.ones_like(y) + reg.fit(X, y, sample_weight=sample_weight) + assert_allclose(reg.coef_, coef, rtol=1e-6) + if fit_intercept: + assert_allclose(reg.intercept_, intercept) + + # sample_weight=None should be equivalent to sample_weight = number + sample_weight = 123.0 + reg.fit(X, y, sample_weight=sample_weight) + assert_allclose(reg.coef_, coef, rtol=1e-6) + if fit_intercept: + assert_allclose(reg.intercept_, intercept) + + # scaling of sample_weight should have no effect, cf. np.average() + sample_weight = 2 * np.ones_like(y) + reg.fit(X, y, sample_weight=sample_weight) + assert_allclose(reg.coef_, coef, rtol=1e-6) + if fit_intercept: + assert_allclose(reg.intercept_, intercept) + + +@pytest.mark.parametrize("X_is_sparse", [False, True]) +@pytest.mark.parametrize("fit_intercept", [False, True]) +@pytest.mark.parametrize("sample_weight", [np.array([10, 1, 10, 1]), None]) +def test_enet_alpha_max_sample_weight(X_is_sparse, fit_intercept, sample_weight): + X = np.array([[3.0, 1.0], [2.0, 5.0], [5.0, 3.0], [1.0, 4.0]]) + beta = np.array([1, 1]) + y = X @ beta + if X_is_sparse: + X = sparse.csc_matrix(X) + # Test alpha_max makes coefs zero. + reg = ElasticNetCV(n_alphas=1, cv=2, eps=1, fit_intercept=fit_intercept) + reg.fit(X, y, sample_weight=sample_weight) + assert_allclose(reg.coef_, 0, atol=1e-5) + alpha_max = reg.alpha_ + # Test smaller alpha makes coefs nonzero. + reg = ElasticNet(alpha=0.99 * alpha_max, fit_intercept=fit_intercept) + reg.fit(X, y, sample_weight=sample_weight) + assert_array_less(1e-3, np.max(np.abs(reg.coef_))) + + +@pytest.mark.parametrize("estimator", [ElasticNetCV, LassoCV]) +def test_linear_models_cv_fit_with_loky(estimator): + # LinearModelsCV.fit performs operations on fancy-indexed memmapped + # data when using the loky backend, causing an error due to unexpected + # behavior of fancy indexing of read-only memmaps (cf. numpy#14132). + + # Create a problem sufficiently large to cause memmapping (1MB). + # Unfortunately the scikit-learn and joblib APIs do not make it possible to + # change the max_nbyte of the inner Parallel call. + X, y = make_regression(int(1e6) // 8 + 1, 1) + assert X.nbytes > 1e6 # 1 MB + with joblib.parallel_backend("loky"): + estimator(n_jobs=2, cv=3).fit(X, y) + + +@pytest.mark.parametrize("check_input", [True, False]) +def test_enet_sample_weight_does_not_overwrite_sample_weight(check_input): + """Check that ElasticNet does not overwrite sample_weights.""" + + rng = np.random.RandomState(0) + n_samples, n_features = 10, 5 + + X = rng.rand(n_samples, n_features) + y = rng.rand(n_samples) + + sample_weight_1_25 = 1.25 * np.ones_like(y) + sample_weight = sample_weight_1_25.copy() + + reg = ElasticNet() + reg.fit(X, y, sample_weight=sample_weight, check_input=check_input) + + assert_array_equal(sample_weight, sample_weight_1_25) + + +@pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning") +@pytest.mark.parametrize("ridge_alpha", [1e-1, 1.0, 1e6]) +def test_enet_ridge_consistency(ridge_alpha): + # Check that ElasticNet(l1_ratio=0) converges to the same solution as Ridge + # provided that the value of alpha is adapted. + # + # XXX: this test does not pass for weaker regularization (lower values of + # ridge_alpha): it could be either a problem of ElasticNet or Ridge (less + # likely) and depends on the dataset statistics: lower values for + # effective_rank are more problematic in particular. + + rng = np.random.RandomState(42) + n_samples = 300 + X, y = make_regression( + n_samples=n_samples, + n_features=100, + effective_rank=10, + n_informative=50, + random_state=rng, + ) + sw = rng.uniform(low=0.01, high=10, size=X.shape[0]) + alpha = 1.0 + common_params = dict( + tol=1e-12, + ) + ridge = Ridge(alpha=alpha, **common_params).fit(X, y, sample_weight=sw) + + alpha_enet = alpha / sw.sum() + enet = ElasticNet(alpha=alpha_enet, l1_ratio=0, **common_params).fit( + X, y, sample_weight=sw + ) + assert_allclose(ridge.coef_, enet.coef_) + assert_allclose(ridge.intercept_, enet.intercept_) + + +@pytest.mark.parametrize( + "estimator", + [ + Lasso(alpha=1.0), + ElasticNet(alpha=1.0, l1_ratio=0.1), + ], +) +def test_sample_weight_invariance(estimator): + rng = np.random.RandomState(42) + X, y = make_regression( + n_samples=100, + n_features=300, + effective_rank=10, + n_informative=50, + random_state=rng, + ) + sw = rng.uniform(low=0.01, high=2, size=X.shape[0]) + params = dict(tol=1e-12) + + # Check that setting some weights to 0 is equivalent to trimming the + # samples: + cutoff = X.shape[0] // 3 + sw_with_null = sw.copy() + sw_with_null[:cutoff] = 0.0 + X_trimmed, y_trimmed = X[cutoff:, :], y[cutoff:] + sw_trimmed = sw[cutoff:] + + reg_trimmed = ( + clone(estimator) + .set_params(**params) + .fit(X_trimmed, y_trimmed, sample_weight=sw_trimmed) + ) + reg_null_weighted = ( + clone(estimator).set_params(**params).fit(X, y, sample_weight=sw_with_null) + ) + assert_allclose(reg_null_weighted.coef_, reg_trimmed.coef_) + assert_allclose(reg_null_weighted.intercept_, reg_trimmed.intercept_) + + # Check that duplicating the training dataset is equivalent to multiplying + # the weights by 2: + X_dup = np.concatenate([X, X], axis=0) + y_dup = np.concatenate([y, y], axis=0) + sw_dup = np.concatenate([sw, sw], axis=0) + + reg_2sw = clone(estimator).set_params(**params).fit(X, y, sample_weight=2 * sw) + reg_dup = ( + clone(estimator).set_params(**params).fit(X_dup, y_dup, sample_weight=sw_dup) + ) + + assert_allclose(reg_2sw.coef_, reg_dup.coef_) + assert_allclose(reg_2sw.intercept_, reg_dup.intercept_) + + +def test_read_only_buffer(): + """Test that sparse coordinate descent works for read-only buffers""" + + rng = np.random.RandomState(0) + clf = ElasticNet(alpha=0.1, copy_X=True, random_state=rng) + X = np.asfortranarray(rng.uniform(size=(100, 10))) + X.setflags(write=False) + + y = rng.rand(100) + clf.fit(X, y) + + +@pytest.mark.parametrize( + "EstimatorCV", + [ElasticNetCV, LassoCV, MultiTaskElasticNetCV, MultiTaskLassoCV], +) +def test_cv_estimators_reject_params_with_no_routing_enabled(EstimatorCV): + """Check that the models inheriting from class:`LinearModelCV` raise an + error when any `params` are passed when routing is not enabled. + """ + X, y = make_regression(random_state=42) + groups = np.array([0, 1] * (len(y) // 2)) + estimator = EstimatorCV() + msg = "is only supported if enable_metadata_routing=True" + with pytest.raises(ValueError, match=msg): + estimator.fit(X, y, groups=groups) + + +@pytest.mark.parametrize( + "MultiTaskEstimatorCV", + [MultiTaskElasticNetCV, MultiTaskLassoCV], +) +@config_context(enable_metadata_routing=True) +def test_multitask_cv_estimators_with_sample_weight(MultiTaskEstimatorCV): + """Check that for :class:`MultiTaskElasticNetCV` and + class:`MultiTaskLassoCV` if `sample_weight` is passed and the + CV splitter does not support `sample_weight` an error is raised. + On the other hand if the splitter does support `sample_weight` + while `sample_weight` is passed there is no error and process + completes smoothly as before. + """ + + class CVSplitter(GroupsConsumerMixin, BaseCrossValidator): + def get_n_splits(self, X=None, y=None, groups=None, metadata=None): + pass # pragma: nocover + + class CVSplitterSampleWeight(CVSplitter): + def split(self, X, y=None, groups=None, sample_weight=None): + split_index = len(X) // 2 + train_indices = list(range(0, split_index)) + test_indices = list(range(split_index, len(X))) + yield test_indices, train_indices + yield train_indices, test_indices + + X, y = make_regression(random_state=42, n_targets=2) + sample_weight = np.ones(X.shape[0]) + + # If CV splitter does not support sample_weight an error is raised + splitter = CVSplitter().set_split_request(groups=True) + estimator = MultiTaskEstimatorCV(cv=splitter) + msg = "do not support sample weights" + with pytest.raises(ValueError, match=msg): + estimator.fit(X, y, sample_weight=sample_weight) + + # If CV splitter does support sample_weight no error is raised + splitter = CVSplitterSampleWeight().set_split_request( + groups=True, sample_weight=True + ) + estimator = MultiTaskEstimatorCV(cv=splitter) + estimator.fit(X, y, sample_weight=sample_weight) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/test_huber.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/test_huber.py new file mode 100644 index 0000000000000000000000000000000000000000..9c0c7d213ee27a9d3d2ee8030f638dfe7a1325c7 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/test_huber.py @@ -0,0 +1,216 @@ +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import numpy as np +import pytest +from scipy import optimize + +from sklearn.datasets import make_regression +from sklearn.linear_model import HuberRegressor, LinearRegression, Ridge, SGDRegressor +from sklearn.linear_model._huber import _huber_loss_and_gradient +from sklearn.utils._testing import ( + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, +) +from sklearn.utils.fixes import CSR_CONTAINERS + + +def make_regression_with_outliers(n_samples=50, n_features=20): + rng = np.random.RandomState(0) + # Generate data with outliers by replacing 10% of the samples with noise. + X, y = make_regression( + n_samples=n_samples, n_features=n_features, random_state=0, noise=0.05 + ) + + # Replace 10% of the sample with noise. + num_noise = int(0.1 * n_samples) + random_samples = rng.randint(0, n_samples, num_noise) + X[random_samples, :] = 2.0 * rng.normal(0, 1, (num_noise, X.shape[1])) + return X, y + + +def test_huber_equals_lr_for_high_epsilon(): + # Test that Ridge matches LinearRegression for large epsilon + X, y = make_regression_with_outliers() + lr = LinearRegression() + lr.fit(X, y) + huber = HuberRegressor(epsilon=1e3, alpha=0.0) + huber.fit(X, y) + assert_almost_equal(huber.coef_, lr.coef_, 3) + assert_almost_equal(huber.intercept_, lr.intercept_, 2) + + +def test_huber_max_iter(): + X, y = make_regression_with_outliers() + huber = HuberRegressor(max_iter=1) + huber.fit(X, y) + assert huber.n_iter_ == huber.max_iter + + +def test_huber_gradient(): + # Test that the gradient calculated by _huber_loss_and_gradient is correct + rng = np.random.RandomState(1) + X, y = make_regression_with_outliers() + sample_weight = rng.randint(1, 3, (y.shape[0])) + + def loss_func(x, *args): + return _huber_loss_and_gradient(x, *args)[0] + + def grad_func(x, *args): + return _huber_loss_and_gradient(x, *args)[1] + + # Check using optimize.check_grad that the gradients are equal. + for _ in range(5): + # Check for both fit_intercept and otherwise. + for n_features in [X.shape[1] + 1, X.shape[1] + 2]: + w = rng.randn(n_features) + w[-1] = np.abs(w[-1]) + grad_same = optimize.check_grad( + loss_func, grad_func, w, X, y, 0.01, 0.1, sample_weight + ) + assert_almost_equal(grad_same, 1e-6, 4) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_huber_sample_weights(csr_container): + # Test sample_weights implementation in HuberRegressor""" + + X, y = make_regression_with_outliers() + huber = HuberRegressor() + huber.fit(X, y) + huber_coef = huber.coef_ + huber_intercept = huber.intercept_ + + # Rescale coefs before comparing with assert_array_almost_equal to make + # sure that the number of decimal places used is somewhat insensitive to + # the amplitude of the coefficients and therefore to the scale of the + # data and the regularization parameter + scale = max(np.mean(np.abs(huber.coef_)), np.mean(np.abs(huber.intercept_))) + + huber.fit(X, y, sample_weight=np.ones(y.shape[0])) + assert_array_almost_equal(huber.coef_ / scale, huber_coef / scale) + assert_array_almost_equal(huber.intercept_ / scale, huber_intercept / scale) + + X, y = make_regression_with_outliers(n_samples=5, n_features=20) + X_new = np.vstack((X, np.vstack((X[1], X[1], X[3])))) + y_new = np.concatenate((y, [y[1]], [y[1]], [y[3]])) + huber.fit(X_new, y_new) + huber_coef = huber.coef_ + huber_intercept = huber.intercept_ + sample_weight = np.ones(X.shape[0]) + sample_weight[1] = 3 + sample_weight[3] = 2 + huber.fit(X, y, sample_weight=sample_weight) + + assert_array_almost_equal(huber.coef_ / scale, huber_coef / scale) + assert_array_almost_equal(huber.intercept_ / scale, huber_intercept / scale) + + # Test sparse implementation with sample weights. + X_csr = csr_container(X) + huber_sparse = HuberRegressor() + huber_sparse.fit(X_csr, y, sample_weight=sample_weight) + assert_array_almost_equal(huber_sparse.coef_ / scale, huber_coef / scale) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_huber_sparse(csr_container): + X, y = make_regression_with_outliers() + huber = HuberRegressor(alpha=0.1) + huber.fit(X, y) + + X_csr = csr_container(X) + huber_sparse = HuberRegressor(alpha=0.1) + huber_sparse.fit(X_csr, y) + assert_array_almost_equal(huber_sparse.coef_, huber.coef_) + assert_array_equal(huber.outliers_, huber_sparse.outliers_) + + +def test_huber_scaling_invariant(): + # Test that outliers filtering is scaling independent. + X, y = make_regression_with_outliers() + huber = HuberRegressor(fit_intercept=False, alpha=0.0) + huber.fit(X, y) + n_outliers_mask_1 = huber.outliers_ + assert not np.all(n_outliers_mask_1) + + huber.fit(X, 2.0 * y) + n_outliers_mask_2 = huber.outliers_ + assert_array_equal(n_outliers_mask_2, n_outliers_mask_1) + + huber.fit(2.0 * X, 2.0 * y) + n_outliers_mask_3 = huber.outliers_ + assert_array_equal(n_outliers_mask_3, n_outliers_mask_1) + + +def test_huber_and_sgd_same_results(): + # Test they should converge to same coefficients for same parameters + + X, y = make_regression_with_outliers(n_samples=10, n_features=2) + + # Fit once to find out the scale parameter. Scale down X and y by scale + # so that the scale parameter is optimized to 1.0 + huber = HuberRegressor(fit_intercept=False, alpha=0.0, epsilon=1.35) + huber.fit(X, y) + X_scale = X / huber.scale_ + y_scale = y / huber.scale_ + huber.fit(X_scale, y_scale) + assert_almost_equal(huber.scale_, 1.0, 3) + + sgdreg = SGDRegressor( + alpha=0.0, + loss="huber", + shuffle=True, + random_state=0, + max_iter=10000, + fit_intercept=False, + epsilon=1.35, + tol=None, + ) + sgdreg.fit(X_scale, y_scale) + assert_array_almost_equal(huber.coef_, sgdreg.coef_, 1) + + +def test_huber_warm_start(): + X, y = make_regression_with_outliers() + huber_warm = HuberRegressor(alpha=1.0, max_iter=10000, warm_start=True, tol=1e-1) + + huber_warm.fit(X, y) + huber_warm_coef = huber_warm.coef_.copy() + huber_warm.fit(X, y) + + # SciPy performs the tol check after doing the coef updates, so + # these would be almost same but not equal. + assert_array_almost_equal(huber_warm.coef_, huber_warm_coef, 1) + + assert huber_warm.n_iter_ == 0 + + +def test_huber_better_r2_score(): + # Test that huber returns a better r2 score than non-outliers""" + X, y = make_regression_with_outliers() + huber = HuberRegressor(alpha=0.01) + huber.fit(X, y) + linear_loss = np.dot(X, huber.coef_) + huber.intercept_ - y + mask = np.abs(linear_loss) < huber.epsilon * huber.scale_ + huber_score = huber.score(X[mask], y[mask]) + huber_outlier_score = huber.score(X[~mask], y[~mask]) + + # The Ridge regressor should be influenced by the outliers and hence + # give a worse score on the non-outliers as compared to the huber + # regressor. + ridge = Ridge(alpha=0.01) + ridge.fit(X, y) + ridge_score = ridge.score(X[mask], y[mask]) + ridge_outlier_score = ridge.score(X[~mask], y[~mask]) + assert huber_score > ridge_score + + # The huber model should also fit poorly on the outliers. + assert ridge_outlier_score > huber_outlier_score + + +def test_huber_bool(): + # Test that it does not crash with bool data + X, y = make_regression(n_samples=200, n_features=2, noise=4.0, random_state=0) + X_bool = X > 0 + HuberRegressor().fit(X_bool, y) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/test_logistic.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/test_logistic.py new file mode 100644 index 0000000000000000000000000000000000000000..38325e4fe4cfd43f54bb29f6ab5a22fc5286185b --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/test_logistic.py @@ -0,0 +1,2418 @@ +import itertools +import os +import warnings +from functools import partial + +import numpy as np +import pytest +from numpy.testing import ( + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, +) +from scipy import sparse +from scipy.linalg import LinAlgWarning, svd + +from sklearn import config_context +from sklearn._loss import HalfMultinomialLoss +from sklearn.base import clone +from sklearn.datasets import load_iris, make_classification, make_low_rank_matrix +from sklearn.exceptions import ConvergenceWarning +from sklearn.linear_model import SGDClassifier +from sklearn.linear_model._logistic import ( + LogisticRegression as LogisticRegressionDefault, +) +from sklearn.linear_model._logistic import ( + LogisticRegressionCV as LogisticRegressionCVDefault, +) +from sklearn.linear_model._logistic import ( + _log_reg_scoring_path, + _logistic_regression_path, +) +from sklearn.metrics import get_scorer, log_loss +from sklearn.model_selection import ( + GridSearchCV, + LeaveOneGroupOut, + StratifiedKFold, + cross_val_score, + train_test_split, +) +from sklearn.multiclass import OneVsRestClassifier +from sklearn.preprocessing import LabelEncoder, StandardScaler, scale +from sklearn.svm import l1_min_c +from sklearn.utils import compute_class_weight, shuffle +from sklearn.utils._testing import ignore_warnings, skip_if_no_parallel +from sklearn.utils.fixes import _IS_32BIT, COO_CONTAINERS, CSR_CONTAINERS + +pytestmark = pytest.mark.filterwarnings( + "error::sklearn.exceptions.ConvergenceWarning:sklearn.*" +) +# Fixing random_state helps prevent ConvergenceWarnings +LogisticRegression = partial(LogisticRegressionDefault, random_state=0) +LogisticRegressionCV = partial(LogisticRegressionCVDefault, random_state=0) + + +SOLVERS = ("lbfgs", "liblinear", "newton-cg", "newton-cholesky", "sag", "saga") +X = [[-1, 0], [0, 1], [1, 1]] +Y1 = [0, 1, 1] +Y2 = [2, 1, 0] +iris = load_iris() + + +def check_predictions(clf, X, y): + """Check that the model is able to fit the classification data""" + n_samples = len(y) + classes = np.unique(y) + n_classes = classes.shape[0] + + predicted = clf.fit(X, y).predict(X) + assert_array_equal(clf.classes_, classes) + + assert predicted.shape == (n_samples,) + assert_array_equal(predicted, y) + + probabilities = clf.predict_proba(X) + assert probabilities.shape == (n_samples, n_classes) + assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples)) + assert_array_equal(probabilities.argmax(axis=1), y) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_predict_2_classes(csr_container): + # Simple sanity check on a 2 classes dataset + # Make sure it predicts the correct result on simple datasets. + check_predictions(LogisticRegression(random_state=0), X, Y1) + check_predictions(LogisticRegression(random_state=0), csr_container(X), Y1) + + check_predictions(LogisticRegression(C=100, random_state=0), X, Y1) + check_predictions(LogisticRegression(C=100, random_state=0), csr_container(X), Y1) + + check_predictions(LogisticRegression(fit_intercept=False, random_state=0), X, Y1) + check_predictions( + LogisticRegression(fit_intercept=False, random_state=0), csr_container(X), Y1 + ) + + +def test_logistic_cv_mock_scorer(): + class MockScorer: + def __init__(self): + self.calls = 0 + self.scores = [0.1, 0.4, 0.8, 0.5] + + def __call__(self, model, X, y, sample_weight=None): + score = self.scores[self.calls % len(self.scores)] + self.calls += 1 + return score + + mock_scorer = MockScorer() + Cs = [1, 2, 3, 4] + cv = 2 + + lr = LogisticRegressionCV(Cs=Cs, scoring=mock_scorer, cv=cv) + X, y = make_classification(random_state=0) + lr.fit(X, y) + + # Cs[2] has the highest score (0.8) from MockScorer + assert lr.C_[0] == Cs[2] + + # scorer called 8 times (cv*len(Cs)) + assert mock_scorer.calls == cv * len(Cs) + + # reset mock_scorer + mock_scorer.calls = 0 + custom_score = lr.score(X, lr.predict(X)) + + assert custom_score == mock_scorer.scores[0] + assert mock_scorer.calls == 1 + + +@skip_if_no_parallel +def test_lr_liblinear_warning(): + n_samples, n_features = iris.data.shape + target = iris.target_names[iris.target] + + lr = LogisticRegression(solver="liblinear", n_jobs=2) + warning_message = ( + "'n_jobs' > 1 does not have any effect when" + " 'solver' is set to 'liblinear'. Got 'n_jobs'" + " = 2." + ) + with pytest.warns(UserWarning, match=warning_message): + lr.fit(iris.data, target) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_predict_3_classes(csr_container): + check_predictions(LogisticRegression(C=10), X, Y2) + check_predictions(LogisticRegression(C=10), csr_container(X), Y2) + + +# TODO(1.7): remove filterwarnings after the deprecation of multi_class +@pytest.mark.filterwarnings("ignore:.*'multi_class' was deprecated.*:FutureWarning") +@pytest.mark.parametrize( + "clf", + [ + LogisticRegression(C=len(iris.data), solver="liblinear", multi_class="ovr"), + LogisticRegression(C=len(iris.data), solver="lbfgs"), + LogisticRegression(C=len(iris.data), solver="newton-cg"), + LogisticRegression( + C=len(iris.data), solver="sag", tol=1e-2, multi_class="ovr", random_state=42 + ), + LogisticRegression( + C=len(iris.data), + solver="saga", + tol=1e-2, + multi_class="ovr", + random_state=42, + ), + LogisticRegression(C=len(iris.data), solver="newton-cholesky"), + ], +) +def test_predict_iris(clf): + """Test logistic regression with the iris dataset. + + Test that both multinomial and OvR solvers handle multiclass data correctly and + give good accuracy score (>0.95) for the training data. + """ + n_samples, n_features = iris.data.shape + target = iris.target_names[iris.target] + + if clf.solver == "lbfgs": + # lbfgs has convergence issues on the iris data with its default max_iter=100 + with warnings.catch_warnings(): + warnings.simplefilter("ignore", ConvergenceWarning) + clf.fit(iris.data, target) + else: + clf.fit(iris.data, target) + assert_array_equal(np.unique(target), clf.classes_) + + pred = clf.predict(iris.data) + assert np.mean(pred == target) > 0.95 + + probabilities = clf.predict_proba(iris.data) + assert_allclose(probabilities.sum(axis=1), np.ones(n_samples)) + + pred = iris.target_names[probabilities.argmax(axis=1)] + assert np.mean(pred == target) > 0.95 + + +# TODO(1.7): remove filterwarnings after the deprecation of multi_class +@pytest.mark.filterwarnings("ignore:.*'multi_class' was deprecated.*:FutureWarning") +@pytest.mark.parametrize("LR", [LogisticRegression, LogisticRegressionCV]) +def test_check_solver_option(LR): + X, y = iris.data, iris.target + + # only 'liblinear' solver + for solver in ["liblinear"]: + msg = f"Solver {solver} does not support a multinomial backend." + lr = LR(solver=solver, multi_class="multinomial") + with pytest.raises(ValueError, match=msg): + lr.fit(X, y) + + # all solvers except 'liblinear' and 'saga' + for solver in ["lbfgs", "newton-cg", "newton-cholesky", "sag"]: + msg = "Solver %s supports only 'l2' or None penalties," % solver + lr = LR(solver=solver, penalty="l1", multi_class="ovr") + with pytest.raises(ValueError, match=msg): + lr.fit(X, y) + for solver in ["lbfgs", "newton-cg", "newton-cholesky", "sag", "saga"]: + msg = "Solver %s supports only dual=False, got dual=True" % solver + lr = LR(solver=solver, dual=True, multi_class="ovr") + with pytest.raises(ValueError, match=msg): + lr.fit(X, y) + + # only saga supports elasticnet. We only test for liblinear because the + # error is raised before for the other solvers (solver %s supports only l2 + # penalties) + for solver in ["liblinear"]: + msg = f"Only 'saga' solver supports elasticnet penalty, got solver={solver}." + lr = LR(solver=solver, penalty="elasticnet") + with pytest.raises(ValueError, match=msg): + lr.fit(X, y) + + # liblinear does not support penalty='none' + # (LogisticRegressionCV does not supports penalty='none' at all) + if LR is LogisticRegression: + msg = "penalty=None is not supported for the liblinear solver" + lr = LR(penalty=None, solver="liblinear") + with pytest.raises(ValueError, match=msg): + lr.fit(X, y) + + +@pytest.mark.parametrize("LR", [LogisticRegression, LogisticRegressionCV]) +def test_elasticnet_l1_ratio_err_helpful(LR): + # Check that an informative error message is raised when penalty="elasticnet" + # but l1_ratio is not specified. + model = LR(penalty="elasticnet", solver="saga") + with pytest.raises(ValueError, match=r".*l1_ratio.*"): + model.fit(np.array([[1, 2], [3, 4]]), np.array([0, 1])) + + +# TODO(1.7): remove whole test with deprecation of multi_class +@pytest.mark.filterwarnings("ignore:.*'multi_class' was deprecated.*:FutureWarning") +@pytest.mark.parametrize("solver", ["lbfgs", "newton-cg", "sag", "saga"]) +def test_multinomial_binary(solver): + # Test multinomial LR on a binary problem. + target = (iris.target > 0).astype(np.intp) + target = np.array(["setosa", "not-setosa"])[target] + + clf = LogisticRegression( + solver=solver, multi_class="multinomial", random_state=42, max_iter=2000 + ) + clf.fit(iris.data, target) + + assert clf.coef_.shape == (1, iris.data.shape[1]) + assert clf.intercept_.shape == (1,) + assert_array_equal(clf.predict(iris.data), target) + + mlr = LogisticRegression( + solver=solver, multi_class="multinomial", random_state=42, fit_intercept=False + ) + mlr.fit(iris.data, target) + pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data), axis=1)] + assert np.mean(pred == target) > 0.9 + + +# TODO(1.7): remove filterwarnings after the deprecation of multi_class +# Maybe even remove this whole test as correctness of multinomial loss is tested +# elsewhere. +@pytest.mark.filterwarnings("ignore:.*'multi_class' was deprecated.*:FutureWarning") +def test_multinomial_binary_probabilities(global_random_seed): + # Test multinomial LR gives expected probabilities based on the + # decision function, for a binary problem. + X, y = make_classification(random_state=global_random_seed) + clf = LogisticRegression( + multi_class="multinomial", + solver="saga", + tol=1e-3, + random_state=global_random_seed, + ) + clf.fit(X, y) + + decision = clf.decision_function(X) + proba = clf.predict_proba(X) + + expected_proba_class_1 = np.exp(decision) / (np.exp(decision) + np.exp(-decision)) + expected_proba = np.c_[1 - expected_proba_class_1, expected_proba_class_1] + + assert_almost_equal(proba, expected_proba) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_sparsify(coo_container): + # Test sparsify and densify members. + n_samples, n_features = iris.data.shape + target = iris.target_names[iris.target] + X = scale(iris.data) + clf = LogisticRegression(random_state=0).fit(X, target) + + pred_d_d = clf.decision_function(X) + + clf.sparsify() + assert sparse.issparse(clf.coef_) + pred_s_d = clf.decision_function(X) + + sp_data = coo_container(X) + pred_s_s = clf.decision_function(sp_data) + + clf.densify() + pred_d_s = clf.decision_function(sp_data) + + assert_array_almost_equal(pred_d_d, pred_s_d) + assert_array_almost_equal(pred_d_d, pred_s_s) + assert_array_almost_equal(pred_d_d, pred_d_s) + + +def test_inconsistent_input(): + # Test that an exception is raised on inconsistent input + rng = np.random.RandomState(0) + X_ = rng.random_sample((5, 10)) + y_ = np.ones(X_.shape[0]) + y_[0] = 0 + + clf = LogisticRegression(random_state=0) + + # Wrong dimensions for training data + y_wrong = y_[:-1] + + with pytest.raises(ValueError): + clf.fit(X, y_wrong) + + # Wrong dimensions for test data + with pytest.raises(ValueError): + clf.fit(X_, y_).predict(rng.random_sample((3, 12))) + + +def test_write_parameters(): + # Test that we can write to coef_ and intercept_ + clf = LogisticRegression(random_state=0) + clf.fit(X, Y1) + clf.coef_[:] = 0 + clf.intercept_[:] = 0 + assert_array_almost_equal(clf.decision_function(X), 0) + + +def test_nan(): + # Test proper NaN handling. + # Regression test for Issue #252: fit used to go into an infinite loop. + Xnan = np.array(X, dtype=np.float64) + Xnan[0, 1] = np.nan + logistic = LogisticRegression(random_state=0) + + with pytest.raises(ValueError): + logistic.fit(Xnan, Y1) + + +def test_consistency_path(): + # Test that the path algorithm is consistent + rng = np.random.RandomState(0) + X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2))) + y = [1] * 100 + [-1] * 100 + Cs = np.logspace(0, 4, 10) + + f = ignore_warnings + # can't test with fit_intercept=True since LIBLINEAR + # penalizes the intercept + for solver in ["sag", "saga"]: + coefs, Cs, _ = f(_logistic_regression_path)( + X, + y, + Cs=Cs, + fit_intercept=False, + tol=1e-5, + solver=solver, + max_iter=1000, + random_state=0, + ) + for i, C in enumerate(Cs): + lr = LogisticRegression( + C=C, + fit_intercept=False, + tol=1e-5, + solver=solver, + random_state=0, + max_iter=1000, + ) + lr.fit(X, y) + lr_coef = lr.coef_.ravel() + assert_array_almost_equal( + lr_coef, coefs[i], decimal=4, err_msg="with solver = %s" % solver + ) + + # test for fit_intercept=True + for solver in ("lbfgs", "newton-cg", "newton-cholesky", "liblinear", "sag", "saga"): + Cs = [1e3] + coefs, Cs, _ = f(_logistic_regression_path)( + X, + y, + Cs=Cs, + tol=1e-6, + solver=solver, + intercept_scaling=10000.0, + random_state=0, + ) + lr = LogisticRegression( + C=Cs[0], + tol=1e-6, + intercept_scaling=10000.0, + random_state=0, + solver=solver, + ) + lr.fit(X, y) + lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_]) + assert_array_almost_equal( + lr_coef, coefs[0], decimal=4, err_msg="with solver = %s" % solver + ) + + +def test_logistic_regression_path_convergence_fail(): + rng = np.random.RandomState(0) + X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2))) + y = [1] * 100 + [-1] * 100 + Cs = [1e3] + + # Check that the convergence message points to both a model agnostic + # advice (scaling the data) and to the logistic regression specific + # documentation that includes hints on the solver configuration. + with pytest.warns(ConvergenceWarning) as record: + _logistic_regression_path( + X, y, Cs=Cs, tol=0.0, max_iter=1, random_state=0, verbose=0 + ) + + assert len(record) == 1 + warn_msg = record[0].message.args[0] + assert "lbfgs failed to converge" in warn_msg + assert "Increase the number of iterations" in warn_msg + assert "scale the data" in warn_msg + assert "linear_model.html#logistic-regression" in warn_msg + + +def test_liblinear_dual_random_state(): + # random_state is relevant for liblinear solver only if dual=True + X, y = make_classification(n_samples=20, random_state=0) + lr1 = LogisticRegression( + random_state=0, + dual=True, + tol=1e-3, + solver="liblinear", + ) + lr1.fit(X, y) + lr2 = LogisticRegression( + random_state=0, + dual=True, + tol=1e-3, + solver="liblinear", + ) + lr2.fit(X, y) + lr3 = LogisticRegression( + random_state=8, + dual=True, + tol=1e-3, + solver="liblinear", + ) + lr3.fit(X, y) + + # same result for same random state + assert_array_almost_equal(lr1.coef_, lr2.coef_) + # different results for different random states + msg = "Arrays are not almost equal to 6 decimals" + with pytest.raises(AssertionError, match=msg): + assert_array_almost_equal(lr1.coef_, lr3.coef_) + + +def test_logistic_cv(): + # test for LogisticRegressionCV object + n_samples, n_features = 50, 5 + rng = np.random.RandomState(0) + X_ref = rng.randn(n_samples, n_features) + y = np.sign(X_ref.dot(5 * rng.randn(n_features))) + X_ref -= X_ref.mean() + X_ref /= X_ref.std() + lr_cv = LogisticRegressionCV( + Cs=[1.0], fit_intercept=False, solver="liblinear", cv=3 + ) + lr_cv.fit(X_ref, y) + lr = LogisticRegression(C=1.0, fit_intercept=False, solver="liblinear") + lr.fit(X_ref, y) + assert_array_almost_equal(lr.coef_, lr_cv.coef_) + + assert_array_equal(lr_cv.coef_.shape, (1, n_features)) + assert_array_equal(lr_cv.classes_, [-1, 1]) + assert len(lr_cv.classes_) == 2 + + coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values())) + assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features)) + assert_array_equal(lr_cv.Cs_.shape, (1,)) + scores = np.asarray(list(lr_cv.scores_.values())) + assert_array_equal(scores.shape, (1, 3, 1)) + + +@pytest.mark.parametrize( + "scoring, multiclass_agg_list", + [ + ("accuracy", [""]), + ("precision", ["_macro", "_weighted"]), + # no need to test for micro averaging because it + # is the same as accuracy for f1, precision, + # and recall (see https://github.com/ + # scikit-learn/scikit-learn/pull/ + # 11578#discussion_r203250062) + ("f1", ["_macro", "_weighted"]), + ("neg_log_loss", [""]), + ("recall", ["_macro", "_weighted"]), + ], +) +def test_logistic_cv_multinomial_score(scoring, multiclass_agg_list): + # test that LogisticRegressionCV uses the right score to compute its + # cross-validation scores when using a multinomial scoring + # see https://github.com/scikit-learn/scikit-learn/issues/8720 + X, y = make_classification( + n_samples=100, random_state=0, n_classes=3, n_informative=6 + ) + train, test = np.arange(80), np.arange(80, 100) + lr = LogisticRegression(C=1.0) + # we use lbfgs to support multinomial + params = lr.get_params() + # we store the params to set them further in _log_reg_scoring_path + for key in ["C", "n_jobs", "warm_start"]: + del params[key] + lr.fit(X[train], y[train]) + for averaging in multiclass_agg_list: + scorer = get_scorer(scoring + averaging) + assert_array_almost_equal( + _log_reg_scoring_path( + X, + y, + train, + test, + Cs=[1.0], + scoring=scorer, + pos_class=None, + max_squared_sum=None, + sample_weight=None, + score_params=None, + **(params | {"multi_class": "multinomial"}), + )[2][0], + scorer(lr, X[test], y[test]), + ) + + +def test_multinomial_logistic_regression_string_inputs(): + # Test with string labels for LogisticRegression(CV) + n_samples, n_features, n_classes = 50, 5, 3 + X_ref, y = make_classification( + n_samples=n_samples, + n_features=n_features, + n_classes=n_classes, + n_informative=3, + random_state=0, + ) + y_str = LabelEncoder().fit(["bar", "baz", "foo"]).inverse_transform(y) + # For numerical labels, let y values be taken from set (-1, 0, 1) + y = np.array(y) - 1 + # Test for string labels + lr = LogisticRegression() + lr_cv = LogisticRegressionCV(Cs=3) + lr_str = LogisticRegression() + lr_cv_str = LogisticRegressionCV(Cs=3) + + lr.fit(X_ref, y) + lr_cv.fit(X_ref, y) + lr_str.fit(X_ref, y_str) + lr_cv_str.fit(X_ref, y_str) + + assert_array_almost_equal(lr.coef_, lr_str.coef_) + assert sorted(lr_str.classes_) == ["bar", "baz", "foo"] + assert_array_almost_equal(lr_cv.coef_, lr_cv_str.coef_) + assert sorted(lr_str.classes_) == ["bar", "baz", "foo"] + assert sorted(lr_cv_str.classes_) == ["bar", "baz", "foo"] + + # The predictions should be in original labels + assert sorted(np.unique(lr_str.predict(X_ref))) == ["bar", "baz", "foo"] + assert sorted(np.unique(lr_cv_str.predict(X_ref))) == ["bar", "baz", "foo"] + + # Make sure class weights can be given with string labels + lr_cv_str = LogisticRegression(class_weight={"bar": 1, "baz": 2, "foo": 0}).fit( + X_ref, y_str + ) + assert sorted(np.unique(lr_cv_str.predict(X_ref))) == ["bar", "baz"] + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_logistic_cv_sparse(csr_container): + X, y = make_classification(n_samples=50, n_features=5, random_state=0) + X[X < 1.0] = 0.0 + csr = csr_container(X) + + clf = LogisticRegressionCV() + clf.fit(X, y) + clfs = LogisticRegressionCV() + clfs.fit(csr, y) + assert_array_almost_equal(clfs.coef_, clf.coef_) + assert_array_almost_equal(clfs.intercept_, clf.intercept_) + assert clfs.C_ == clf.C_ + + +# TODO(1.7): remove filterwarnings after the deprecation of multi_class +# Best remove this whole test. +@pytest.mark.filterwarnings("ignore:.*'multi_class' was deprecated.*:FutureWarning") +def test_ovr_multinomial_iris(): + # Test that OvR and multinomial are correct using the iris dataset. + train, target = iris.data, iris.target + n_samples, n_features = train.shape + + # The cv indices from stratified kfold (where stratification is done based + # on the fine-grained iris classes, i.e, before the classes 0 and 1 are + # conflated) is used for both clf and clf1 + n_cv = 2 + cv = StratifiedKFold(n_cv) + precomputed_folds = list(cv.split(train, target)) + + # Train clf on the original dataset where classes 0 and 1 are separated + clf = LogisticRegressionCV(cv=precomputed_folds, multi_class="ovr") + clf.fit(train, target) + + # Conflate classes 0 and 1 and train clf1 on this modified dataset + clf1 = LogisticRegressionCV(cv=precomputed_folds, multi_class="ovr") + target_copy = target.copy() + target_copy[target_copy == 0] = 1 + clf1.fit(train, target_copy) + + # Ensure that what OvR learns for class2 is same regardless of whether + # classes 0 and 1 are separated or not + assert_allclose(clf.scores_[2], clf1.scores_[2]) + assert_allclose(clf.intercept_[2:], clf1.intercept_) + assert_allclose(clf.coef_[2][np.newaxis, :], clf1.coef_) + + # Test the shape of various attributes. + assert clf.coef_.shape == (3, n_features) + assert_array_equal(clf.classes_, [0, 1, 2]) + coefs_paths = np.asarray(list(clf.coefs_paths_.values())) + assert coefs_paths.shape == (3, n_cv, 10, n_features + 1) + assert clf.Cs_.shape == (10,) + scores = np.asarray(list(clf.scores_.values())) + assert scores.shape == (3, n_cv, 10) + + # Test that for the iris data multinomial gives a better accuracy than OvR + for solver in ["lbfgs", "newton-cg", "sag", "saga"]: + max_iter = 500 if solver in ["sag", "saga"] else 30 + clf_multi = LogisticRegressionCV( + solver=solver, + max_iter=max_iter, + random_state=42, + tol=1e-3 if solver in ["sag", "saga"] else 1e-2, + cv=2, + ) + if solver == "lbfgs": + # lbfgs requires scaling to avoid convergence warnings + train = scale(train) + + clf_multi.fit(train, target) + multi_score = clf_multi.score(train, target) + ovr_score = clf.score(train, target) + assert multi_score > ovr_score + + # Test attributes of LogisticRegressionCV + assert clf.coef_.shape == clf_multi.coef_.shape + assert_array_equal(clf_multi.classes_, [0, 1, 2]) + coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values())) + assert coefs_paths.shape == (3, n_cv, 10, n_features + 1) + assert clf_multi.Cs_.shape == (10,) + scores = np.asarray(list(clf_multi.scores_.values())) + assert scores.shape == (3, n_cv, 10) + + +def test_logistic_regression_solvers(): + """Test solvers converge to the same result.""" + X, y = make_classification(n_features=10, n_informative=5, random_state=0) + + params = dict(fit_intercept=False, random_state=42) + + regressors = { + solver: LogisticRegression(solver=solver, **params).fit(X, y) + for solver in SOLVERS + } + + for solver_1, solver_2 in itertools.combinations(regressors, r=2): + assert_array_almost_equal( + regressors[solver_1].coef_, regressors[solver_2].coef_, decimal=3 + ) + + +# TODO(1.7): remove filterwarnings after the deprecation of multi_class +@pytest.mark.filterwarnings("ignore:.*'multi_class' was deprecated.*:FutureWarning") +@pytest.mark.parametrize("fit_intercept", [False, True]) +def test_logistic_regression_solvers_multiclass(fit_intercept): + """Test solvers converge to the same result for multiclass problems.""" + X, y = make_classification( + n_samples=20, n_features=20, n_informative=10, n_classes=3, random_state=0 + ) + tol = 1e-8 + params = dict(fit_intercept=fit_intercept, tol=tol, random_state=42) + + # Override max iteration count for specific solvers to allow for + # proper convergence. + solver_max_iter = {"lbfgs": 200, "sag": 10_000, "saga": 10_000} + + regressors = { + solver: LogisticRegression( + solver=solver, max_iter=solver_max_iter.get(solver, 100), **params + ).fit(X, y) + for solver in set(SOLVERS) - set(["liblinear"]) + } + + for solver_1, solver_2 in itertools.combinations(regressors, r=2): + assert_allclose( + regressors[solver_1].coef_, + regressors[solver_2].coef_, + rtol=5e-3 if (solver_1 == "saga" or solver_2 == "saga") else 1e-3, + err_msg=f"{solver_1} vs {solver_2}", + ) + if fit_intercept: + assert_allclose( + regressors[solver_1].intercept_, + regressors[solver_2].intercept_, + rtol=5e-3 if (solver_1 == "saga" or solver_2 == "saga") else 1e-3, + err_msg=f"{solver_1} vs {solver_2}", + ) + + +@pytest.mark.parametrize("fit_intercept", [False, True]) +def test_logistic_regression_solvers_multiclass_unpenalized( + fit_intercept, global_random_seed +): + """Test and compare solver results for unpenalized multinomial multiclass.""" + # Our use of numpy.random.multinomial requires numpy >= 1.22 + pytest.importorskip("numpy", minversion="1.22.0") + # We want to avoid perfect separation. + n_samples, n_features, n_classes = 100, 4, 3 + rng = np.random.RandomState(global_random_seed) + X = make_low_rank_matrix( + n_samples=n_samples, + n_features=n_features + fit_intercept, + effective_rank=n_features + fit_intercept, + tail_strength=0.1, + random_state=rng, + ) + if fit_intercept: + X[:, -1] = 1 + U, s, Vt = svd(X) + assert np.all(s > 1e-3) # to be sure that X is not singular + assert np.max(s) / np.min(s) < 100 # condition number of X + if fit_intercept: + X = X[:, :-1] + coef = rng.uniform(low=1, high=3, size=n_features * n_classes) + coef = coef.reshape(n_classes, n_features) + intercept = rng.uniform(low=-1, high=1, size=n_classes) * fit_intercept + raw_prediction = X @ coef.T + intercept + + loss = HalfMultinomialLoss(n_classes=n_classes) + proba = loss.link.inverse(raw_prediction) + # Only newer numpy version (1.22) support more dimensions on pvals. + y = np.zeros(n_samples) + for i in range(n_samples): + y[i] = np.argwhere(rng.multinomial(n=1, pvals=proba[i, :]))[0, 0] + + tol = 1e-9 + params = dict(fit_intercept=fit_intercept, random_state=42) + solver_max_iter = {"lbfgs": 200, "sag": 10_000, "saga": 10_000} + solver_tol = {"sag": 1e-8, "saga": 1e-8} + regressors = { + solver: LogisticRegression( + C=np.inf, + solver=solver, + tol=solver_tol.get(solver, tol), + max_iter=solver_max_iter.get(solver, 100), + **params, + ).fit(X, y) + for solver in set(SOLVERS) - set(["liblinear"]) + } + for solver in regressors.keys(): + # See the docstring of test_multinomial_identifiability_on_iris for reference. + assert_allclose( + regressors[solver].coef_.sum(axis=0), 0, atol=1e-10, err_msg=solver + ) + + for solver_1, solver_2 in itertools.combinations(regressors, r=2): + assert_allclose( + regressors[solver_1].coef_, + regressors[solver_2].coef_, + rtol=5e-3 if (solver_1 == "saga" or solver_2 == "saga") else 2e-3, + err_msg=f"{solver_1} vs {solver_2}", + ) + if fit_intercept: + assert_allclose( + regressors[solver_1].intercept_, + regressors[solver_2].intercept_, + rtol=5e-3 if (solver_1 == "saga" or solver_2 == "saga") else 1e-3, + err_msg=f"{solver_1} vs {solver_2}", + ) + + +@pytest.mark.parametrize("weight", [{0: 0.1, 1: 0.2}, {0: 0.1, 1: 0.2, 2: 0.5}]) +@pytest.mark.parametrize("class_weight", ["weight", "balanced"]) +def test_logistic_regressioncv_class_weights(weight, class_weight, global_random_seed): + """Test class_weight for LogisticRegressionCV.""" + n_classes = len(weight) + if class_weight == "weight": + class_weight = weight + + X, y = make_classification( + n_samples=30, + n_features=3, + n_repeated=0, + n_informative=3, + n_redundant=0, + n_classes=n_classes, + random_state=global_random_seed, + ) + params = dict( + Cs=1, + fit_intercept=False, + class_weight=class_weight, + tol=1e-8, + ) + clf_lbfgs = LogisticRegressionCV(solver="lbfgs", **params) + + # XXX: lbfgs' line search can fail and cause a ConvergenceWarning for some + # 10% of the random seeds, but only on specific platforms (in particular + # when using Atlas BLAS/LAPACK implementation). Doubling the maxls internal + # parameter of the solver does not help. However this lack of proper + # convergence does not seem to prevent the assertion to pass, so we ignore + # the warning for now. + # See: https://github.com/scikit-learn/scikit-learn/pull/27649 + with ignore_warnings(category=ConvergenceWarning): + clf_lbfgs.fit(X, y) + + for solver in set(SOLVERS) - set(["lbfgs", "liblinear", "newton-cholesky"]): + clf = LogisticRegressionCV(solver=solver, **params) + if solver in ("sag", "saga"): + clf.set_params( + tol=1e-18, max_iter=10000, random_state=global_random_seed + 1 + ) + clf.fit(X, y) + + assert_allclose( + clf.coef_, clf_lbfgs.coef_, rtol=1e-3, err_msg=f"{solver} vs lbfgs" + ) + + +@pytest.mark.parametrize("problem", ("single", "cv")) +@pytest.mark.parametrize( + "solver", ("lbfgs", "liblinear", "newton-cg", "newton-cholesky", "sag", "saga") +) +def test_logistic_regression_sample_weights(problem, solver, global_random_seed): + n_samples_per_cv_group = 200 + n_cv_groups = 3 + + X, y = make_classification( + n_samples=n_samples_per_cv_group * n_cv_groups, + n_features=5, + n_informative=3, + n_classes=2, + n_redundant=0, + random_state=global_random_seed, + ) + rng = np.random.RandomState(global_random_seed) + sw = np.ones(y.shape[0]) + + kw_weighted = { + "random_state": global_random_seed, + "fit_intercept": False, + "max_iter": 100_000 if solver.startswith("sag") else 1_000, + "tol": 1e-8, + } + kw_repeated = kw_weighted.copy() + sw[:n_samples_per_cv_group] = rng.randint(0, 5, size=n_samples_per_cv_group) + X_repeated = np.repeat(X, sw.astype(int), axis=0) + y_repeated = np.repeat(y, sw.astype(int), axis=0) + + if problem == "single": + LR = LogisticRegression + elif problem == "cv": + LR = LogisticRegressionCV + # We weight the first fold 2 times more. + groups_weighted = np.concatenate( + [ + np.full(n_samples_per_cv_group, 0), + np.full(n_samples_per_cv_group, 1), + np.full(n_samples_per_cv_group, 2), + ] + ) + splits_weighted = list(LeaveOneGroupOut().split(X, groups=groups_weighted)) + kw_weighted.update({"Cs": 100, "cv": splits_weighted}) + + groups_repeated = np.repeat(groups_weighted, sw.astype(int), axis=0) + splits_repeated = list( + LeaveOneGroupOut().split(X_repeated, groups=groups_repeated) + ) + kw_repeated.update({"Cs": 100, "cv": splits_repeated}) + + clf_sw_weighted = LR(solver=solver, **kw_weighted) + clf_sw_repeated = LR(solver=solver, **kw_repeated) + + if solver == "lbfgs": + # lbfgs has convergence issues on the data but this should not impact + # the quality of the results. + with warnings.catch_warnings(): + warnings.simplefilter("ignore", ConvergenceWarning) + clf_sw_weighted.fit(X, y, sample_weight=sw) + clf_sw_repeated.fit(X_repeated, y_repeated) + + else: + clf_sw_weighted.fit(X, y, sample_weight=sw) + clf_sw_repeated.fit(X_repeated, y_repeated) + + if problem == "cv": + assert_allclose(clf_sw_weighted.scores_[1], clf_sw_repeated.scores_[1]) + assert_allclose(clf_sw_weighted.coef_, clf_sw_repeated.coef_, atol=1e-5) + + +@pytest.mark.parametrize( + "solver", ("lbfgs", "newton-cg", "newton-cholesky", "sag", "saga") +) +def test_logistic_regression_solver_class_weights(solver, global_random_seed): + # Test that passing class_weight as [1, 2] is the same as + # passing class weight = [1,1] but adjusting sample weights + # to be 2 for all instances of class 1. + + X, y = make_classification( + n_samples=300, + n_features=5, + n_informative=3, + n_classes=2, + random_state=global_random_seed, + ) + + sample_weight = y + 1 + + kw_weighted = { + "random_state": global_random_seed, + "fit_intercept": False, + "max_iter": 100_000, + "tol": 1e-8, + } + clf_cw_12 = LogisticRegression( + solver=solver, class_weight={0: 1, 1: 2}, **kw_weighted + ) + clf_cw_12.fit(X, y) + clf_sw_12 = LogisticRegression(solver=solver, **kw_weighted) + clf_sw_12.fit(X, y, sample_weight=sample_weight) + assert_allclose(clf_cw_12.coef_, clf_sw_12.coef_, atol=1e-6) + + +def test_sample_and_class_weight_equivalence_liblinear(global_random_seed): + # Test the above for l1 penalty and l2 penalty with dual=True. + # since the patched liblinear code is different. + + X, y = make_classification( + n_samples=300, + n_features=5, + n_informative=3, + n_classes=2, + random_state=global_random_seed, + ) + + sample_weight = y + 1 + + clf_cw = LogisticRegression( + solver="liblinear", + fit_intercept=False, + class_weight={0: 1, 1: 2}, + penalty="l1", + max_iter=10_000, + tol=1e-12, + random_state=global_random_seed, + ) + clf_cw.fit(X, y) + clf_sw = LogisticRegression( + solver="liblinear", + fit_intercept=False, + penalty="l1", + max_iter=10_000, + tol=1e-12, + random_state=global_random_seed, + ) + clf_sw.fit(X, y, sample_weight) + assert_allclose(clf_cw.coef_, clf_sw.coef_, atol=1e-10) + + clf_cw = LogisticRegression( + solver="liblinear", + fit_intercept=False, + class_weight={0: 1, 1: 2}, + penalty="l2", + max_iter=10_000, + tol=1e-12, + dual=True, + random_state=global_random_seed, + ) + clf_cw.fit(X, y) + clf_sw = LogisticRegression( + solver="liblinear", + fit_intercept=False, + penalty="l2", + max_iter=10_000, + tol=1e-12, + dual=True, + random_state=global_random_seed, + ) + clf_sw.fit(X, y, sample_weight) + assert_allclose(clf_cw.coef_, clf_sw.coef_, atol=1e-10) + + +def _compute_class_weight_dictionary(y): + # helper for returning a dictionary instead of an array + classes = np.unique(y) + class_weight = compute_class_weight("balanced", classes=classes, y=y) + class_weight_dict = dict(zip(classes, class_weight)) + return class_weight_dict + + +@pytest.mark.parametrize("csr_container", [lambda x: x] + CSR_CONTAINERS) +def test_logistic_regression_class_weights(csr_container): + # Scale data to avoid convergence warnings with the lbfgs solver + X_iris = scale(iris.data) + # Multinomial case: remove 90% of class 0 + X = X_iris[45:, :] + X = csr_container(X) + y = iris.target[45:] + class_weight_dict = _compute_class_weight_dictionary(y) + + for solver in set(SOLVERS) - set(["liblinear", "newton-cholesky"]): + params = dict(solver=solver, max_iter=1000) + clf1 = LogisticRegression(class_weight="balanced", **params) + clf2 = LogisticRegression(class_weight=class_weight_dict, **params) + clf1.fit(X, y) + clf2.fit(X, y) + assert len(clf1.classes_) == 3 + assert_allclose(clf1.coef_, clf2.coef_, rtol=1e-4) + # Same as appropriate sample_weight. + sw = np.ones(X.shape[0]) + for c in clf1.classes_: + sw[y == c] *= class_weight_dict[c] + clf3 = LogisticRegression(**params).fit(X, y, sample_weight=sw) + assert_allclose(clf3.coef_, clf2.coef_, rtol=1e-4) + + # Binary case: remove 90% of class 0 and 100% of class 2 + X = X_iris[45:100, :] + y = iris.target[45:100] + class_weight_dict = _compute_class_weight_dictionary(y) + + for solver in SOLVERS: + params = dict(solver=solver, max_iter=1000) + clf1 = LogisticRegression(class_weight="balanced", **params) + clf2 = LogisticRegression(class_weight=class_weight_dict, **params) + clf1.fit(X, y) + clf2.fit(X, y) + assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=6) + + +def test_logistic_regression_multinomial(): + # Tests for the multinomial option in logistic regression + + # Some basic attributes of Logistic Regression + n_samples, n_features, n_classes = 50, 20, 3 + X, y = make_classification( + n_samples=n_samples, + n_features=n_features, + n_informative=10, + n_classes=n_classes, + random_state=0, + ) + + X = StandardScaler(with_mean=False).fit_transform(X) + + # 'lbfgs' is used as a referenced + solver = "lbfgs" + ref_i = LogisticRegression(solver=solver, tol=1e-6) + ref_w = LogisticRegression(solver=solver, fit_intercept=False, tol=1e-6) + ref_i.fit(X, y) + ref_w.fit(X, y) + assert ref_i.coef_.shape == (n_classes, n_features) + assert ref_w.coef_.shape == (n_classes, n_features) + for solver in ["sag", "saga", "newton-cg"]: + clf_i = LogisticRegression( + solver=solver, + random_state=42, + max_iter=2000, + tol=1e-7, + ) + clf_w = LogisticRegression( + solver=solver, + random_state=42, + max_iter=2000, + tol=1e-7, + fit_intercept=False, + ) + clf_i.fit(X, y) + clf_w.fit(X, y) + assert clf_i.coef_.shape == (n_classes, n_features) + assert clf_w.coef_.shape == (n_classes, n_features) + + # Compare solutions between lbfgs and the other solvers + assert_allclose(ref_i.coef_, clf_i.coef_, rtol=1e-3) + assert_allclose(ref_w.coef_, clf_w.coef_, rtol=1e-2) + assert_allclose(ref_i.intercept_, clf_i.intercept_, rtol=1e-3) + + # Test that the path give almost the same results. However since in this + # case we take the average of the coefs after fitting across all the + # folds, it need not be exactly the same. + for solver in ["lbfgs", "newton-cg", "sag", "saga"]: + clf_path = LogisticRegressionCV( + solver=solver, max_iter=2000, tol=1e-6, Cs=[1.0] + ) + clf_path.fit(X, y) + assert_allclose(clf_path.coef_, ref_i.coef_, rtol=1e-2) + assert_allclose(clf_path.intercept_, ref_i.intercept_, rtol=1e-2) + + +def test_liblinear_decision_function_zero(): + # Test negative prediction when decision_function values are zero. + # Liblinear predicts the positive class when decision_function values + # are zero. This is a test to verify that we do not do the same. + # See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600 + # and the PR https://github.com/scikit-learn/scikit-learn/pull/3623 + X, y = make_classification(n_samples=5, n_features=5, random_state=0) + clf = LogisticRegression(fit_intercept=False, solver="liblinear") + clf.fit(X, y) + + # Dummy data such that the decision function becomes zero. + X = np.zeros((5, 5)) + assert_array_equal(clf.predict(X), np.zeros(5)) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_liblinear_logregcv_sparse(csr_container): + # Test LogRegCV with solver='liblinear' works for sparse matrices + + X, y = make_classification(n_samples=10, n_features=5, random_state=0) + clf = LogisticRegressionCV(solver="liblinear") + clf.fit(csr_container(X), y) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_saga_sparse(csr_container): + # Test LogRegCV with solver='liblinear' works for sparse matrices + + X, y = make_classification(n_samples=10, n_features=5, random_state=0) + clf = LogisticRegressionCV(solver="saga", tol=1e-2) + clf.fit(csr_container(X), y) + + +def test_logreg_intercept_scaling_zero(): + # Test that intercept_scaling is ignored when fit_intercept is False + + clf = LogisticRegression(fit_intercept=False) + clf.fit(X, Y1) + assert clf.intercept_ == 0.0 + + +def test_logreg_l1(): + # Because liblinear penalizes the intercept and saga does not, we do not + # fit the intercept to make it possible to compare the coefficients of + # the two models at convergence. + rng = np.random.RandomState(42) + n_samples = 50 + X, y = make_classification(n_samples=n_samples, n_features=20, random_state=0) + X_noise = rng.normal(size=(n_samples, 3)) + X_constant = np.ones(shape=(n_samples, 2)) + X = np.concatenate((X, X_noise, X_constant), axis=1) + lr_liblinear = LogisticRegression( + penalty="l1", + C=1.0, + solver="liblinear", + fit_intercept=False, + tol=1e-10, + ) + lr_liblinear.fit(X, y) + + lr_saga = LogisticRegression( + penalty="l1", + C=1.0, + solver="saga", + fit_intercept=False, + max_iter=1000, + tol=1e-10, + ) + lr_saga.fit(X, y) + assert_array_almost_equal(lr_saga.coef_, lr_liblinear.coef_) + + # Noise and constant features should be regularized to zero by the l1 + # penalty + assert_array_almost_equal(lr_liblinear.coef_[0, -5:], np.zeros(5)) + assert_array_almost_equal(lr_saga.coef_[0, -5:], np.zeros(5)) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_logreg_l1_sparse_data(csr_container): + # Because liblinear penalizes the intercept and saga does not, we do not + # fit the intercept to make it possible to compare the coefficients of + # the two models at convergence. + rng = np.random.RandomState(42) + n_samples = 50 + X, y = make_classification(n_samples=n_samples, n_features=20, random_state=0) + X_noise = rng.normal(scale=0.1, size=(n_samples, 3)) + X_constant = np.zeros(shape=(n_samples, 2)) + X = np.concatenate((X, X_noise, X_constant), axis=1) + X[X < 1] = 0 + X = csr_container(X) + + lr_liblinear = LogisticRegression( + penalty="l1", + C=1.0, + solver="liblinear", + fit_intercept=False, + tol=1e-10, + ) + lr_liblinear.fit(X, y) + + lr_saga = LogisticRegression( + penalty="l1", + C=1.0, + solver="saga", + fit_intercept=False, + max_iter=1000, + tol=1e-10, + ) + lr_saga.fit(X, y) + assert_array_almost_equal(lr_saga.coef_, lr_liblinear.coef_) + # Noise and constant features should be regularized to zero by the l1 + # penalty + assert_array_almost_equal(lr_liblinear.coef_[0, -5:], np.zeros(5)) + assert_array_almost_equal(lr_saga.coef_[0, -5:], np.zeros(5)) + + # Check that solving on the sparse and dense data yield the same results + lr_saga_dense = LogisticRegression( + penalty="l1", + C=1.0, + solver="saga", + fit_intercept=False, + max_iter=1000, + tol=1e-10, + ) + lr_saga_dense.fit(X.toarray(), y) + assert_array_almost_equal(lr_saga.coef_, lr_saga_dense.coef_) + + +@pytest.mark.parametrize("random_seed", [42]) +@pytest.mark.parametrize("penalty", ["l1", "l2"]) +def test_logistic_regression_cv_refit(random_seed, penalty): + # Test that when refit=True, logistic regression cv with the saga solver + # converges to the same solution as logistic regression with a fixed + # regularization parameter. + # Internally the LogisticRegressionCV model uses a warm start to refit on + # the full data model with the optimal C found by CV. As the penalized + # logistic regression loss is convex, we should still recover exactly + # the same solution as long as the stopping criterion is strict enough (and + # that there are no exactly duplicated features when penalty='l1'). + X, y = make_classification(n_samples=100, n_features=20, random_state=random_seed) + common_params = dict( + solver="saga", + penalty=penalty, + random_state=random_seed, + max_iter=1000, + tol=1e-12, + ) + lr_cv = LogisticRegressionCV(Cs=[1.0], refit=True, **common_params) + lr_cv.fit(X, y) + lr = LogisticRegression(C=1.0, **common_params) + lr.fit(X, y) + assert_array_almost_equal(lr_cv.coef_, lr.coef_) + + +def test_logreg_predict_proba_multinomial(): + X, y = make_classification( + n_samples=10, n_features=20, random_state=0, n_classes=3, n_informative=10 + ) + + # Predicted probabilities using the true-entropy loss should give a + # smaller loss than those using the ovr method. + clf_multi = LogisticRegression(solver="lbfgs") + clf_multi.fit(X, y) + clf_multi_loss = log_loss(y, clf_multi.predict_proba(X)) + clf_ovr = OneVsRestClassifier(LogisticRegression(solver="lbfgs")) + clf_ovr.fit(X, y) + clf_ovr_loss = log_loss(y, clf_ovr.predict_proba(X)) + assert clf_ovr_loss > clf_multi_loss + + # Predicted probabilities using the soft-max function should give a + # smaller loss than those using the logistic function. + clf_multi_loss = log_loss(y, clf_multi.predict_proba(X)) + clf_wrong_loss = log_loss(y, clf_multi._predict_proba_lr(X)) + assert clf_wrong_loss > clf_multi_loss + + +# TODO(1.7): remove filterwarnings after the deprecation of multi_class +@pytest.mark.filterwarnings("ignore:.*'multi_class' was deprecated.*:FutureWarning") +@pytest.mark.parametrize("max_iter", np.arange(1, 5)) +@pytest.mark.parametrize("multi_class", ["ovr", "multinomial"]) +@pytest.mark.parametrize( + "solver, message", + [ + ( + "newton-cg", + "newton-cg failed to converge.* Increase the number of iterations.", + ), + ( + "liblinear", + "Liblinear failed to converge, increase the number of iterations.", + ), + ("sag", "The max_iter was reached which means the coef_ did not converge"), + ("saga", "The max_iter was reached which means the coef_ did not converge"), + ("lbfgs", "lbfgs failed to converge"), + ("newton-cholesky", "Newton solver did not converge after [0-9]* iterations"), + ], +) +def test_max_iter(max_iter, multi_class, solver, message): + # Test that the maximum number of iteration is reached + X, y_bin = iris.data, iris.target.copy() + y_bin[y_bin == 2] = 0 + + if solver in ("liblinear",) and multi_class == "multinomial": + pytest.skip("'multinomial' is not supported by liblinear") + if solver == "newton-cholesky" and max_iter > 1: + pytest.skip("solver newton-cholesky might converge very fast") + + lr = LogisticRegression( + max_iter=max_iter, + tol=1e-15, + multi_class=multi_class, + random_state=0, + solver=solver, + ) + with pytest.warns(ConvergenceWarning, match=message): + lr.fit(X, y_bin) + + assert lr.n_iter_[0] == max_iter + + +# TODO(1.7): remove filterwarnings after the deprecation of multi_class +@pytest.mark.filterwarnings("ignore:.*'multi_class' was deprecated.*:FutureWarning") +@pytest.mark.parametrize("solver", SOLVERS) +def test_n_iter(solver): + # Test that self.n_iter_ has the correct format. + X, y = iris.data, iris.target + if solver == "lbfgs": + # lbfgs requires scaling to avoid convergence warnings + X = scale(X) + + n_classes = np.unique(y).shape[0] + assert n_classes == 3 + + # Also generate a binary classification sub-problem. + y_bin = y.copy() + y_bin[y_bin == 2] = 0 + + n_Cs = 4 + n_cv_fold = 2 + + # Binary classification case + clf = LogisticRegression(tol=1e-2, C=1.0, solver=solver, random_state=42) + clf.fit(X, y_bin) + assert clf.n_iter_.shape == (1,) + + clf_cv = LogisticRegressionCV( + tol=1e-2, solver=solver, Cs=n_Cs, cv=n_cv_fold, random_state=42 + ) + clf_cv.fit(X, y_bin) + assert clf_cv.n_iter_.shape == (1, n_cv_fold, n_Cs) + + # OvR case + clf.set_params(multi_class="ovr").fit(X, y) + assert clf.n_iter_.shape == (n_classes,) + + clf_cv.set_params(multi_class="ovr").fit(X, y) + assert clf_cv.n_iter_.shape == (n_classes, n_cv_fold, n_Cs) + + # multinomial case + if solver in ("liblinear",): + # This solver only supports one-vs-rest multiclass classification. + return + + # When using the multinomial objective function, there is a single + # optimization problem to solve for all classes at once: + clf.set_params(multi_class="multinomial").fit(X, y) + assert clf.n_iter_.shape == (1,) + + clf_cv.set_params(multi_class="multinomial").fit(X, y) + assert clf_cv.n_iter_.shape == (1, n_cv_fold, n_Cs) + + +@pytest.mark.parametrize( + "solver", sorted(set(SOLVERS) - set(["liblinear", "newton-cholesky"])) +) +@pytest.mark.parametrize("warm_start", (True, False)) +@pytest.mark.parametrize("fit_intercept", (True, False)) +def test_warm_start(solver, warm_start, fit_intercept): + # A 1-iteration second fit on same data should give almost same result + # with warm starting, and quite different result without warm starting. + # Warm starting does not work with liblinear solver. + X, y = iris.data, iris.target + + clf = LogisticRegression( + tol=1e-4, + warm_start=warm_start, + solver=solver, + random_state=42, + fit_intercept=fit_intercept, + ) + with ignore_warnings(category=ConvergenceWarning): + clf.fit(X, y) + coef_1 = clf.coef_ + + clf.max_iter = 1 + clf.fit(X, y) + cum_diff = np.sum(np.abs(coef_1 - clf.coef_)) + msg = ( + f"Warm starting issue with solver {solver}" + f"with {fit_intercept=} and {warm_start=}" + ) + if warm_start: + assert 2.0 > cum_diff, msg + else: + assert cum_diff > 2.0, msg + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_saga_vs_liblinear(csr_container): + iris = load_iris() + X, y = iris.data, iris.target + X = np.concatenate([X] * 3) + y = np.concatenate([y] * 3) + + X_bin = X[y <= 1] + y_bin = y[y <= 1] * 2 - 1 + + X_sparse, y_sparse = make_classification( + n_samples=50, n_features=20, random_state=0 + ) + X_sparse = csr_container(X_sparse) + + for X, y in ((X_bin, y_bin), (X_sparse, y_sparse)): + for penalty in ["l1", "l2"]: + n_samples = X.shape[0] + # alpha=1e-3 is time consuming + for alpha in np.logspace(-1, 1, 3): + saga = LogisticRegression( + C=1.0 / (n_samples * alpha), + solver="saga", + max_iter=200, + fit_intercept=False, + penalty=penalty, + random_state=0, + tol=1e-6, + ) + + liblinear = LogisticRegression( + C=1.0 / (n_samples * alpha), + solver="liblinear", + max_iter=200, + fit_intercept=False, + penalty=penalty, + random_state=0, + tol=1e-6, + ) + + saga.fit(X, y) + liblinear.fit(X, y) + # Convergence for alpha=1e-3 is very slow + assert_array_almost_equal(saga.coef_, liblinear.coef_, 3) + + +# TODO(1.7): remove filterwarnings after the deprecation of multi_class +@pytest.mark.filterwarnings("ignore:.*'multi_class' was deprecated.*:FutureWarning") +@pytest.mark.parametrize("multi_class", ["ovr", "multinomial"]) +@pytest.mark.parametrize( + "solver", ["liblinear", "newton-cg", "newton-cholesky", "saga"] +) +@pytest.mark.parametrize("fit_intercept", [False, True]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_dtype_match(solver, multi_class, fit_intercept, csr_container): + # Test that np.float32 input data is not cast to np.float64 when possible + # and that the output is approximately the same no matter the input format. + + if solver == "liblinear" and multi_class == "multinomial": + pytest.skip(f"Solver={solver} does not support multinomial logistic.") + + out32_type = np.float64 if solver == "liblinear" else np.float32 + + X_32 = np.array(X).astype(np.float32) + y_32 = np.array(Y1).astype(np.float32) + X_64 = np.array(X).astype(np.float64) + y_64 = np.array(Y1).astype(np.float64) + X_sparse_32 = csr_container(X, dtype=np.float32) + X_sparse_64 = csr_container(X, dtype=np.float64) + solver_tol = 5e-4 + + lr_templ = LogisticRegression( + solver=solver, + multi_class=multi_class, + random_state=42, + tol=solver_tol, + fit_intercept=fit_intercept, + ) + + # Check 32-bit type consistency + lr_32 = clone(lr_templ) + lr_32.fit(X_32, y_32) + assert lr_32.coef_.dtype == out32_type + + # Check 32-bit type consistency with sparsity + lr_32_sparse = clone(lr_templ) + lr_32_sparse.fit(X_sparse_32, y_32) + assert lr_32_sparse.coef_.dtype == out32_type + + # Check 64-bit type consistency + lr_64 = clone(lr_templ) + lr_64.fit(X_64, y_64) + assert lr_64.coef_.dtype == np.float64 + + # Check 64-bit type consistency with sparsity + lr_64_sparse = clone(lr_templ) + lr_64_sparse.fit(X_sparse_64, y_64) + assert lr_64_sparse.coef_.dtype == np.float64 + + # solver_tol bounds the norm of the loss gradient + # dw ~= inv(H)*grad ==> |dw| ~= |inv(H)| * solver_tol, where H - hessian + # + # See https://github.com/scikit-learn/scikit-learn/pull/13645 + # + # with Z = np.hstack((np.ones((3,1)), np.array(X))) + # In [8]: np.linalg.norm(np.diag([0,2,2]) + np.linalg.inv((Z.T @ Z)/4)) + # Out[8]: 1.7193336918135917 + + # factor of 2 to get the ball diameter + atol = 2 * 1.72 * solver_tol + if os.name == "nt" and _IS_32BIT: + # FIXME + atol = 1e-2 + + # Check accuracy consistency + assert_allclose(lr_32.coef_, lr_64.coef_.astype(np.float32), atol=atol) + + if solver == "saga" and fit_intercept: + # FIXME: SAGA on sparse data fits the intercept inaccurately with the + # default tol and max_iter parameters. + atol = 1e-1 + + assert_allclose(lr_32.coef_, lr_32_sparse.coef_, atol=atol) + assert_allclose(lr_64.coef_, lr_64_sparse.coef_, atol=atol) + + +def test_warm_start_converge_LR(): + # Test to see that the logistic regression converges on warm start, + # with multi_class='multinomial'. Non-regressive test for #10836 + + rng = np.random.RandomState(0) + X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2))) + y = np.array([1] * 100 + [-1] * 100) + lr_no_ws = LogisticRegression(solver="sag", warm_start=False, random_state=0) + lr_ws = LogisticRegression(solver="sag", warm_start=True, random_state=0) + + lr_no_ws_loss = log_loss(y, lr_no_ws.fit(X, y).predict_proba(X)) + for i in range(5): + lr_ws.fit(X, y) + lr_ws_loss = log_loss(y, lr_ws.predict_proba(X)) + assert_allclose(lr_no_ws_loss, lr_ws_loss, rtol=1e-5) + + +def test_elastic_net_coeffs(): + # make sure elasticnet penalty gives different coefficients from l1 and l2 + # with saga solver (l1_ratio different from 0 or 1) + X, y = make_classification(random_state=0) + + C = 2.0 + l1_ratio = 0.5 + coeffs = list() + for penalty, ratio in (("elasticnet", l1_ratio), ("l1", None), ("l2", None)): + lr = LogisticRegression( + penalty=penalty, + C=C, + solver="saga", + random_state=0, + l1_ratio=ratio, + tol=1e-3, + max_iter=200, + ) + lr.fit(X, y) + coeffs.append(lr.coef_) + + elastic_net_coeffs, l1_coeffs, l2_coeffs = coeffs + # make sure coeffs differ by at least .1 + assert not np.allclose(elastic_net_coeffs, l1_coeffs, rtol=0, atol=0.1) + assert not np.allclose(elastic_net_coeffs, l2_coeffs, rtol=0, atol=0.1) + assert not np.allclose(l2_coeffs, l1_coeffs, rtol=0, atol=0.1) + + +@pytest.mark.parametrize("C", [0.001, 0.1, 1, 10, 100, 1000, 1e6]) +@pytest.mark.parametrize("penalty, l1_ratio", [("l1", 1), ("l2", 0)]) +def test_elastic_net_l1_l2_equivalence(C, penalty, l1_ratio): + # Make sure elasticnet is equivalent to l1 when l1_ratio=1 and to l2 when + # l1_ratio=0. + X, y = make_classification(random_state=0) + + lr_enet = LogisticRegression( + penalty="elasticnet", + C=C, + l1_ratio=l1_ratio, + solver="saga", + random_state=0, + tol=1e-2, + ) + lr_expected = LogisticRegression( + penalty=penalty, C=C, solver="saga", random_state=0, tol=1e-2 + ) + lr_enet.fit(X, y) + lr_expected.fit(X, y) + + assert_array_almost_equal(lr_enet.coef_, lr_expected.coef_) + + +@pytest.mark.parametrize("C", [0.001, 1, 100, 1e6]) +def test_elastic_net_vs_l1_l2(C): + # Make sure that elasticnet with grid search on l1_ratio gives same or + # better results than just l1 or just l2. + + X, y = make_classification(500, random_state=0) + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) + + param_grid = {"l1_ratio": np.linspace(0, 1, 5)} + + enet_clf = LogisticRegression( + penalty="elasticnet", C=C, solver="saga", random_state=0, tol=1e-2 + ) + gs = GridSearchCV(enet_clf, param_grid, refit=True) + + l1_clf = LogisticRegression( + penalty="l1", C=C, solver="saga", random_state=0, tol=1e-2 + ) + l2_clf = LogisticRegression( + penalty="l2", C=C, solver="saga", random_state=0, tol=1e-2 + ) + + for clf in (gs, l1_clf, l2_clf): + clf.fit(X_train, y_train) + + assert gs.score(X_test, y_test) >= l1_clf.score(X_test, y_test) + assert gs.score(X_test, y_test) >= l2_clf.score(X_test, y_test) + + +@pytest.mark.parametrize("C", np.logspace(-3, 2, 4)) +@pytest.mark.parametrize("l1_ratio", [0.1, 0.5, 0.9]) +def test_LogisticRegression_elastic_net_objective(C, l1_ratio): + # Check that training with a penalty matching the objective leads + # to a lower objective. + # Here we train a logistic regression with l2 (a) and elasticnet (b) + # penalties, and compute the elasticnet objective. That of a should be + # greater than that of b (both objectives are convex). + X, y = make_classification( + n_samples=1000, + n_classes=2, + n_features=20, + n_informative=10, + n_redundant=0, + n_repeated=0, + random_state=0, + ) + X = scale(X) + + lr_enet = LogisticRegression( + penalty="elasticnet", + solver="saga", + random_state=0, + C=C, + l1_ratio=l1_ratio, + fit_intercept=False, + ) + lr_l2 = LogisticRegression( + penalty="l2", solver="saga", random_state=0, C=C, fit_intercept=False + ) + lr_enet.fit(X, y) + lr_l2.fit(X, y) + + def enet_objective(lr): + coef = lr.coef_.ravel() + obj = C * log_loss(y, lr.predict_proba(X)) + obj += l1_ratio * np.sum(np.abs(coef)) + obj += (1.0 - l1_ratio) * 0.5 * np.dot(coef, coef) + return obj + + assert enet_objective(lr_enet) < enet_objective(lr_l2) + + +@pytest.mark.parametrize("n_classes", (2, 3)) +def test_LogisticRegressionCV_GridSearchCV_elastic_net(n_classes): + # make sure LogisticRegressionCV gives same best params (l1 and C) as + # GridSearchCV when penalty is elasticnet + + X, y = make_classification( + n_samples=100, n_classes=n_classes, n_informative=3, random_state=0 + ) + + cv = StratifiedKFold(5) + + l1_ratios = np.linspace(0, 1, 3) + Cs = np.logspace(-4, 4, 3) + + lrcv = LogisticRegressionCV( + penalty="elasticnet", + Cs=Cs, + solver="saga", + cv=cv, + l1_ratios=l1_ratios, + random_state=0, + tol=1e-2, + ) + lrcv.fit(X, y) + + param_grid = {"C": Cs, "l1_ratio": l1_ratios} + lr = LogisticRegression( + penalty="elasticnet", + solver="saga", + random_state=0, + tol=1e-2, + ) + gs = GridSearchCV(lr, param_grid, cv=cv) + gs.fit(X, y) + + assert gs.best_params_["l1_ratio"] == lrcv.l1_ratio_[0] + assert gs.best_params_["C"] == lrcv.C_[0] + + +# TODO(1.7): remove filterwarnings after the deprecation of multi_class +# Maybe remove whole test after removal of the deprecated multi_class. +@pytest.mark.filterwarnings("ignore:.*'multi_class' was deprecated.*:FutureWarning") +def test_LogisticRegressionCV_GridSearchCV_elastic_net_ovr(): + # make sure LogisticRegressionCV gives same best params (l1 and C) as + # GridSearchCV when penalty is elasticnet and multiclass is ovr. We can't + # compare best_params like in the previous test because + # LogisticRegressionCV with multi_class='ovr' will have one C and one + # l1_param for each class, while LogisticRegression will share the + # parameters over the *n_classes* classifiers. + + X, y = make_classification( + n_samples=100, n_classes=3, n_informative=3, random_state=0 + ) + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) + cv = StratifiedKFold(5) + + l1_ratios = np.linspace(0, 1, 3) + Cs = np.logspace(-4, 4, 3) + + lrcv = LogisticRegressionCV( + penalty="elasticnet", + Cs=Cs, + solver="saga", + cv=cv, + l1_ratios=l1_ratios, + random_state=0, + multi_class="ovr", + tol=1e-2, + ) + lrcv.fit(X_train, y_train) + + param_grid = {"C": Cs, "l1_ratio": l1_ratios} + lr = LogisticRegression( + penalty="elasticnet", + solver="saga", + random_state=0, + multi_class="ovr", + tol=1e-2, + ) + gs = GridSearchCV(lr, param_grid, cv=cv) + gs.fit(X_train, y_train) + + # Check that predictions are 80% the same + assert (lrcv.predict(X_train) == gs.predict(X_train)).mean() >= 0.8 + assert (lrcv.predict(X_test) == gs.predict(X_test)).mean() >= 0.8 + + +# TODO(1.7): remove filterwarnings after the deprecation of multi_class +@pytest.mark.filterwarnings("ignore:.*'multi_class' was deprecated.*:FutureWarning") +@pytest.mark.parametrize("penalty", ("l2", "elasticnet")) +@pytest.mark.parametrize("multi_class", ("ovr", "multinomial", "auto")) +def test_LogisticRegressionCV_no_refit(penalty, multi_class): + # Test LogisticRegressionCV attribute shapes when refit is False + + n_classes = 3 + n_features = 20 + X, y = make_classification( + n_samples=200, + n_classes=n_classes, + n_informative=n_classes, + n_features=n_features, + random_state=0, + ) + + Cs = np.logspace(-4, 4, 3) + if penalty == "elasticnet": + l1_ratios = np.linspace(0, 1, 2) + else: + l1_ratios = None + + lrcv = LogisticRegressionCV( + penalty=penalty, + Cs=Cs, + solver="saga", + l1_ratios=l1_ratios, + random_state=0, + multi_class=multi_class, + tol=1e-2, + refit=False, + ) + lrcv.fit(X, y) + assert lrcv.C_.shape == (n_classes,) + assert lrcv.l1_ratio_.shape == (n_classes,) + assert lrcv.coef_.shape == (n_classes, n_features) + + +# TODO(1.7): remove filterwarnings after the deprecation of multi_class +# Remove multi_class an change first element of the expected n_iter_.shape from +# n_classes to 1 (according to the docstring). +@pytest.mark.filterwarnings("ignore:.*'multi_class' was deprecated.*:FutureWarning") +def test_LogisticRegressionCV_elasticnet_attribute_shapes(): + # Make sure the shapes of scores_ and coefs_paths_ attributes are correct + # when using elasticnet (added one dimension for l1_ratios) + + n_classes = 3 + n_features = 20 + X, y = make_classification( + n_samples=200, + n_classes=n_classes, + n_informative=n_classes, + n_features=n_features, + random_state=0, + ) + + Cs = np.logspace(-4, 4, 3) + l1_ratios = np.linspace(0, 1, 2) + + n_folds = 2 + lrcv = LogisticRegressionCV( + penalty="elasticnet", + Cs=Cs, + solver="saga", + cv=n_folds, + l1_ratios=l1_ratios, + multi_class="ovr", + random_state=0, + tol=1e-2, + ) + lrcv.fit(X, y) + coefs_paths = np.asarray(list(lrcv.coefs_paths_.values())) + assert coefs_paths.shape == ( + n_classes, + n_folds, + Cs.size, + l1_ratios.size, + n_features + 1, + ) + scores = np.asarray(list(lrcv.scores_.values())) + assert scores.shape == (n_classes, n_folds, Cs.size, l1_ratios.size) + + assert lrcv.n_iter_.shape == (n_classes, n_folds, Cs.size, l1_ratios.size) + + +def test_l1_ratio_non_elasticnet(): + msg = ( + r"l1_ratio parameter is only used when penalty is" + r" 'elasticnet'\. Got \(penalty=l1\)" + ) + with pytest.warns(UserWarning, match=msg): + LogisticRegression(penalty="l1", solver="saga", l1_ratio=0.5).fit(X, Y1) + + +@pytest.mark.parametrize("C", np.logspace(-3, 2, 4)) +@pytest.mark.parametrize("l1_ratio", [0.1, 0.5, 0.9]) +def test_elastic_net_versus_sgd(C, l1_ratio): + # Compare elasticnet penalty in LogisticRegression() and SGD(loss='log') + n_samples = 500 + X, y = make_classification( + n_samples=n_samples, + n_classes=2, + n_features=5, + n_informative=5, + n_redundant=0, + n_repeated=0, + random_state=1, + ) + X = scale(X) + + sgd = SGDClassifier( + penalty="elasticnet", + random_state=1, + fit_intercept=False, + tol=None, + max_iter=2000, + l1_ratio=l1_ratio, + alpha=1.0 / C / n_samples, + loss="log_loss", + ) + log = LogisticRegression( + penalty="elasticnet", + random_state=1, + fit_intercept=False, + tol=1e-5, + max_iter=1000, + l1_ratio=l1_ratio, + C=C, + solver="saga", + ) + + sgd.fit(X, y) + log.fit(X, y) + assert_array_almost_equal(sgd.coef_, log.coef_, decimal=1) + + +def test_logistic_regression_path_coefs_multinomial(): + # Make sure that the returned coefs by logistic_regression_path when + # multi_class='multinomial' don't override each other (used to be a + # bug). + X, y = make_classification( + n_samples=200, + n_classes=3, + n_informative=2, + n_redundant=0, + n_clusters_per_class=1, + random_state=0, + n_features=2, + ) + Cs = [0.00001, 1, 10000] + coefs, _, _ = _logistic_regression_path( + X, + y, + penalty="l1", + Cs=Cs, + solver="saga", + random_state=0, + multi_class="multinomial", + ) + + with pytest.raises(AssertionError): + assert_array_almost_equal(coefs[0], coefs[1], decimal=1) + with pytest.raises(AssertionError): + assert_array_almost_equal(coefs[0], coefs[2], decimal=1) + with pytest.raises(AssertionError): + assert_array_almost_equal(coefs[1], coefs[2], decimal=1) + + +# TODO(1.7): remove filterwarnings after the deprecation of multi_class +@pytest.mark.filterwarnings("ignore:.*'multi_class' was deprecated.*:FutureWarning") +@pytest.mark.parametrize( + "est", + [ + LogisticRegression(random_state=0, max_iter=500), + LogisticRegressionCV(random_state=0, cv=3, Cs=3, tol=1e-3, max_iter=500), + ], + ids=lambda x: x.__class__.__name__, +) +@pytest.mark.parametrize("solver", SOLVERS) +def test_logistic_regression_multi_class_auto(est, solver): + # check multi_class='auto' => multi_class='ovr' + # iff binary y or liblinear + + def fit(X, y, **kw): + return clone(est).set_params(**kw).fit(X, y) + + scaled_data = scale(iris.data) + X = scaled_data[::10] + X2 = scaled_data[1::10] + y_multi = iris.target[::10] + y_bin = y_multi == 0 + est_auto_bin = fit(X, y_bin, multi_class="auto", solver=solver) + est_ovr_bin = fit(X, y_bin, multi_class="ovr", solver=solver) + assert_allclose(est_auto_bin.coef_, est_ovr_bin.coef_) + assert_allclose(est_auto_bin.predict_proba(X2), est_ovr_bin.predict_proba(X2)) + + est_auto_multi = fit(X, y_multi, multi_class="auto", solver=solver) + if solver == "liblinear": + est_ovr_multi = fit(X, y_multi, multi_class="ovr", solver=solver) + assert_allclose(est_auto_multi.coef_, est_ovr_multi.coef_) + assert_allclose( + est_auto_multi.predict_proba(X2), est_ovr_multi.predict_proba(X2) + ) + else: + est_multi_multi = fit(X, y_multi, multi_class="multinomial", solver=solver) + assert_allclose(est_auto_multi.coef_, est_multi_multi.coef_) + assert_allclose( + est_auto_multi.predict_proba(X2), est_multi_multi.predict_proba(X2) + ) + + # Make sure multi_class='ovr' is distinct from ='multinomial' + assert not np.allclose( + est_auto_bin.coef_, + fit(X, y_bin, multi_class="multinomial", solver=solver).coef_, + ) + assert not np.allclose( + est_auto_bin.coef_, + fit(X, y_multi, multi_class="multinomial", solver=solver).coef_, + ) + + +@pytest.mark.parametrize("solver", sorted(set(SOLVERS) - set(["liblinear"]))) +def test_penalty_none(solver): + # - Make sure warning is raised if penalty=None and C is set to a + # non-default value. + # - Make sure setting penalty=None is equivalent to setting C=np.inf with + # l2 penalty. + X, y = make_classification(n_samples=1000, n_redundant=0, random_state=0) + + msg = "Setting penalty=None will ignore the C" + lr = LogisticRegression(penalty=None, solver=solver, C=4) + with pytest.warns(UserWarning, match=msg): + lr.fit(X, y) + + lr_none = LogisticRegression(penalty=None, solver=solver, random_state=0) + lr_l2_C_inf = LogisticRegression( + penalty="l2", C=np.inf, solver=solver, random_state=0 + ) + pred_none = lr_none.fit(X, y).predict(X) + pred_l2_C_inf = lr_l2_C_inf.fit(X, y).predict(X) + assert_array_equal(pred_none, pred_l2_C_inf) + + +@pytest.mark.parametrize( + "params", + [ + {"penalty": "l1", "dual": False, "tol": 1e-6, "max_iter": 1000}, + {"penalty": "l2", "dual": True, "tol": 1e-12, "max_iter": 1000}, + {"penalty": "l2", "dual": False, "tol": 1e-12, "max_iter": 1000}, + ], +) +def test_logisticregression_liblinear_sample_weight(params): + # check that we support sample_weight with liblinear in all possible cases: + # l1-primal, l2-primal, l2-dual + X = np.array( + [ + [1, 3], + [1, 3], + [1, 3], + [1, 3], + [2, 1], + [2, 1], + [2, 1], + [2, 1], + [3, 3], + [3, 3], + [3, 3], + [3, 3], + [4, 1], + [4, 1], + [4, 1], + [4, 1], + ], + dtype=np.dtype("float"), + ) + y = np.array( + [1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2], dtype=np.dtype("int") + ) + + X2 = np.vstack([X, X]) + y2 = np.hstack([y, 3 - y]) + sample_weight = np.ones(shape=len(y) * 2) + sample_weight[len(y) :] = 0 + X2, y2, sample_weight = shuffle(X2, y2, sample_weight, random_state=0) + + base_clf = LogisticRegression(solver="liblinear", random_state=42) + base_clf.set_params(**params) + clf_no_weight = clone(base_clf).fit(X, y) + clf_with_weight = clone(base_clf).fit(X2, y2, sample_weight=sample_weight) + + for method in ("predict", "predict_proba", "decision_function"): + X_clf_no_weight = getattr(clf_no_weight, method)(X) + X_clf_with_weight = getattr(clf_with_weight, method)(X) + assert_allclose(X_clf_no_weight, X_clf_with_weight) + + +def test_scores_attribute_layout_elasticnet(): + # Non regression test for issue #14955. + # when penalty is elastic net the scores_ attribute has shape + # (n_classes, n_Cs, n_l1_ratios) + # We here make sure that the second dimension indeed corresponds to Cs and + # the third dimension corresponds to l1_ratios. + + X, y = make_classification(n_samples=1000, random_state=0) + cv = StratifiedKFold(n_splits=5) + + l1_ratios = [0.1, 0.9] + Cs = [0.1, 1, 10] + + lrcv = LogisticRegressionCV( + penalty="elasticnet", + solver="saga", + l1_ratios=l1_ratios, + Cs=Cs, + cv=cv, + random_state=0, + max_iter=250, + tol=1e-3, + ) + lrcv.fit(X, y) + + avg_scores_lrcv = lrcv.scores_[1].mean(axis=0) # average over folds + + for i, C in enumerate(Cs): + for j, l1_ratio in enumerate(l1_ratios): + lr = LogisticRegression( + penalty="elasticnet", + solver="saga", + C=C, + l1_ratio=l1_ratio, + random_state=0, + max_iter=250, + tol=1e-3, + ) + + avg_score_lr = cross_val_score(lr, X, y, cv=cv).mean() + assert avg_scores_lrcv[i, j] == pytest.approx(avg_score_lr) + + +# TODO(1.7): remove filterwarnings after the deprecation of multi_class +@pytest.mark.filterwarnings("ignore:.*'multi_class' was deprecated.*:FutureWarning") +@pytest.mark.parametrize("solver", ["lbfgs", "newton-cg", "newton-cholesky"]) +@pytest.mark.parametrize("fit_intercept", [False, True]) +def test_multinomial_identifiability_on_iris(solver, fit_intercept): + """Test that the multinomial classification is identifiable. + + A multinomial with c classes can be modeled with + probability_k = exp(X@coef_k) / sum(exp(X@coef_l), l=1..c) for k=1..c. + This is not identifiable, unless one chooses a further constraint. + According to [1], the maximum of the L2 penalized likelihood automatically + satisfies the symmetric constraint: + sum(coef_k, k=1..c) = 0 + + Further details can be found in [2]. + + Reference + --------- + .. [1] :doi:`Zhu, Ji and Trevor J. Hastie. "Classification of gene microarrays by + penalized logistic regression". Biostatistics 5 3 (2004): 427-43. + <10.1093/biostatistics/kxg046>` + + .. [2] :arxiv:`Noah Simon and Jerome Friedman and Trevor Hastie. (2013) + "A Blockwise Descent Algorithm for Group-penalized Multiresponse and + Multinomial Regression". <1311.6529>` + """ + # Test logistic regression with the iris dataset + n_samples, n_features = iris.data.shape + target = iris.target_names[iris.target] + + clf = LogisticRegression( + C=len(iris.data), + solver="lbfgs", + fit_intercept=fit_intercept, + ) + # Scaling X to ease convergence. + X_scaled = scale(iris.data) + clf.fit(X_scaled, target) + + # axis=0 is sum over classes + assert_allclose(clf.coef_.sum(axis=0), 0, atol=1e-10) + if fit_intercept: + assert clf.intercept_.sum(axis=0) == pytest.approx(0, abs=1e-11) + + +# TODO(1.7): remove filterwarnings after the deprecation of multi_class +@pytest.mark.filterwarnings("ignore:.*'multi_class' was deprecated.*:FutureWarning") +@pytest.mark.parametrize("multi_class", ["ovr", "multinomial", "auto"]) +@pytest.mark.parametrize("class_weight", [{0: 1.0, 1: 10.0, 2: 1.0}, "balanced"]) +def test_sample_weight_not_modified(multi_class, class_weight): + X, y = load_iris(return_X_y=True) + n_features = len(X) + W = np.ones(n_features) + W[: n_features // 2] = 2 + + expected = W.copy() + + clf = LogisticRegression( + random_state=0, class_weight=class_weight, max_iter=200, multi_class=multi_class + ) + clf.fit(X, y, sample_weight=W) + assert_allclose(expected, W) + + +@pytest.mark.parametrize("solver", SOLVERS) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_large_sparse_matrix(solver, global_random_seed, csr_container): + # Solvers either accept large sparse matrices, or raise helpful error. + # Non-regression test for pull-request #21093. + + # generate sparse matrix with int64 indices + X = csr_container(sparse.rand(20, 10, random_state=global_random_seed)) + for attr in ["indices", "indptr"]: + setattr(X, attr, getattr(X, attr).astype("int64")) + rng = np.random.RandomState(global_random_seed) + y = rng.randint(2, size=X.shape[0]) + + if solver in ["liblinear", "sag", "saga"]: + msg = "Only sparse matrices with 32-bit integer indices" + with pytest.raises(ValueError, match=msg): + LogisticRegression(solver=solver).fit(X, y) + else: + LogisticRegression(solver=solver).fit(X, y) + + +def test_single_feature_newton_cg(): + # Test that Newton-CG works with a single feature and intercept. + # Non-regression test for issue #23605. + + X = np.array([[0.5, 0.65, 1.1, 1.25, 0.8, 0.54, 0.95, 0.7]]).T + y = np.array([1, 1, 0, 0, 1, 1, 0, 1]) + assert X.shape[1] == 1 + LogisticRegression(solver="newton-cg", fit_intercept=True).fit(X, y) + + +def test_liblinear_not_stuck(): + # Non-regression https://github.com/scikit-learn/scikit-learn/issues/18264 + X = iris.data.copy() + y = iris.target.copy() + X = X[y != 2] + y = y[y != 2] + X_prep = StandardScaler().fit_transform(X) + + C = l1_min_c(X, y, loss="log") * 10 ** (10 / 29) + clf = LogisticRegression( + penalty="l1", + solver="liblinear", + tol=1e-6, + max_iter=100, + intercept_scaling=10000.0, + random_state=0, + C=C, + ) + + # test that the fit does not raise a ConvergenceWarning + with warnings.catch_warnings(): + warnings.simplefilter("error", ConvergenceWarning) + clf.fit(X_prep, y) + + +@config_context(enable_metadata_routing=True) +def test_lr_cv_scores_differ_when_sample_weight_is_requested(): + """Test that `sample_weight` is correctly passed to the scorer in + `LogisticRegressionCV.fit` and `LogisticRegressionCV.score` by + checking the difference in scores with the case when `sample_weight` + is not requested. + """ + rng = np.random.RandomState(10) + X, y = make_classification(n_samples=10, random_state=rng) + X_t, y_t = make_classification(n_samples=10, random_state=rng) + sample_weight = np.ones(len(y)) + sample_weight[: len(y) // 2] = 2 + kwargs = {"sample_weight": sample_weight} + + scorer1 = get_scorer("accuracy") + lr_cv1 = LogisticRegressionCV(scoring=scorer1) + lr_cv1.fit(X, y, **kwargs) + + scorer2 = get_scorer("accuracy") + scorer2.set_score_request(sample_weight=True) + lr_cv2 = LogisticRegressionCV(scoring=scorer2) + lr_cv2.fit(X, y, **kwargs) + + assert not np.allclose(lr_cv1.scores_[1], lr_cv2.scores_[1]) + + score_1 = lr_cv1.score(X_t, y_t, **kwargs) + score_2 = lr_cv2.score(X_t, y_t, **kwargs) + + assert not np.allclose(score_1, score_2) + + +def test_lr_cv_scores_without_enabling_metadata_routing(): + """Test that `sample_weight` is passed correctly to the scorer in + `LogisticRegressionCV.fit` and `LogisticRegressionCV.score` even + when `enable_metadata_routing=False` + """ + rng = np.random.RandomState(10) + X, y = make_classification(n_samples=10, random_state=rng) + X_t, y_t = make_classification(n_samples=10, random_state=rng) + sample_weight = np.ones(len(y)) + sample_weight[: len(y) // 2] = 2 + kwargs = {"sample_weight": sample_weight} + + with config_context(enable_metadata_routing=False): + scorer1 = get_scorer("accuracy") + lr_cv1 = LogisticRegressionCV(scoring=scorer1) + lr_cv1.fit(X, y, **kwargs) + score_1 = lr_cv1.score(X_t, y_t, **kwargs) + + with config_context(enable_metadata_routing=True): + scorer2 = get_scorer("accuracy") + scorer2.set_score_request(sample_weight=True) + lr_cv2 = LogisticRegressionCV(scoring=scorer2) + lr_cv2.fit(X, y, **kwargs) + score_2 = lr_cv2.score(X_t, y_t, **kwargs) + + assert_allclose(lr_cv1.scores_[1], lr_cv2.scores_[1]) + assert_allclose(score_1, score_2) + + +@pytest.mark.parametrize("solver", SOLVERS) +def test_zero_max_iter(solver): + # Make sure we can inspect the state of LogisticRegression right after + # initialization (before the first weight update). + X, y = load_iris(return_X_y=True) + y = y == 2 + with ignore_warnings(category=ConvergenceWarning): + clf = LogisticRegression(solver=solver, max_iter=0).fit(X, y) + if solver not in ["saga", "sag"]: + # XXX: sag and saga have n_iter_ = [1]... + assert clf.n_iter_ == 0 + + if solver != "lbfgs": + # XXX: lbfgs has already started to update the coefficients... + assert_allclose(clf.coef_, np.zeros_like(clf.coef_)) + assert_allclose( + clf.decision_function(X), + np.full(shape=X.shape[0], fill_value=clf.intercept_), + ) + assert_allclose( + clf.predict_proba(X), + np.full(shape=(X.shape[0], 2), fill_value=0.5), + ) + assert clf.score(X, y) < 0.7 + + +def test_passing_params_without_enabling_metadata_routing(): + """Test that the right error message is raised when metadata params + are passed while not supported when `enable_metadata_routing=False`.""" + X, y = make_classification(n_samples=10, random_state=0) + lr_cv = LogisticRegressionCV() + msg = "is only supported if enable_metadata_routing=True" + + with config_context(enable_metadata_routing=False): + params = {"extra_param": 1.0} + + with pytest.raises(ValueError, match=msg): + lr_cv.fit(X, y, **params) + + with pytest.raises(ValueError, match=msg): + lr_cv.score(X, y, **params) + + +# TODO(1.7): remove +def test_multi_class_deprecated(): + """Check `multi_class` parameter deprecated.""" + X, y = make_classification(n_classes=3, n_samples=50, n_informative=6) + lr = LogisticRegression(multi_class="ovr") + msg = "'multi_class' was deprecated" + with pytest.warns(FutureWarning, match=msg): + lr.fit(X, y) + + lrCV = LogisticRegressionCV(multi_class="ovr") + with pytest.warns(FutureWarning, match=msg): + lrCV.fit(X, y) + + # Special warning for "binary multinomial" + X, y = make_classification(n_classes=2, n_samples=50, n_informative=6) + lr = LogisticRegression(multi_class="multinomial") + msg = "'multi_class' was deprecated.*binary problems" + with pytest.warns(FutureWarning, match=msg): + lr.fit(X, y) + + lrCV = LogisticRegressionCV(multi_class="multinomial") + with pytest.warns(FutureWarning, match=msg): + lrCV.fit(X, y) + + +def test_newton_cholesky_fallback_to_lbfgs(global_random_seed): + # Wide data matrix should lead to a rank-deficient Hessian matrix + # hence make the Newton-Cholesky solver raise a warning and fallback to + # lbfgs. + X, y = make_classification( + n_samples=10, n_features=20, random_state=global_random_seed + ) + C = 1e30 # very high C to nearly disable regularization + + # Check that LBFGS can converge without any warning on this problem. + lr_lbfgs = LogisticRegression(solver="lbfgs", C=C) + with warnings.catch_warnings(): + warnings.simplefilter("error") + lr_lbfgs.fit(X, y) + n_iter_lbfgs = lr_lbfgs.n_iter_[0] + + assert n_iter_lbfgs >= 1 + + # Check that the Newton-Cholesky solver raises a warning and falls back to + # LBFGS. This should converge with the same number of iterations as the + # above call of lbfgs since the Newton-Cholesky triggers the fallback + # before completing the first iteration, for the problem setting at hand. + lr_nc = LogisticRegression(solver="newton-cholesky", C=C) + with ignore_warnings(category=LinAlgWarning): + lr_nc.fit(X, y) + n_iter_nc = lr_nc.n_iter_[0] + + assert n_iter_nc == n_iter_lbfgs + + # Trying to fit the same model again with a small iteration budget should + # therefore raise a ConvergenceWarning: + lr_nc_limited = LogisticRegression( + solver="newton-cholesky", C=C, max_iter=n_iter_lbfgs - 1 + ) + with ignore_warnings(category=LinAlgWarning): + with pytest.warns(ConvergenceWarning, match="lbfgs failed to converge"): + lr_nc_limited.fit(X, y) + n_iter_nc_limited = lr_nc_limited.n_iter_[0] + + assert n_iter_nc_limited == lr_nc_limited.max_iter - 1 diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/test_omp.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/test_omp.py new file mode 100644 index 0000000000000000000000000000000000000000..cfdffe581e034ac33378bbd36e4dc64025fbcfcd --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/test_omp.py @@ -0,0 +1,273 @@ +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + + +import numpy as np +import pytest + +from sklearn.datasets import make_sparse_coded_signal +from sklearn.linear_model import ( + LinearRegression, + OrthogonalMatchingPursuit, + OrthogonalMatchingPursuitCV, + orthogonal_mp, + orthogonal_mp_gram, +) +from sklearn.utils import check_random_state +from sklearn.utils._testing import ( + assert_allclose, + assert_array_almost_equal, + assert_array_equal, + ignore_warnings, +) + +n_samples, n_features, n_nonzero_coefs, n_targets = 25, 35, 5, 3 +y, X, gamma = make_sparse_coded_signal( + n_samples=n_targets, + n_components=n_features, + n_features=n_samples, + n_nonzero_coefs=n_nonzero_coefs, + random_state=0, +) +y, X, gamma = y.T, X.T, gamma.T +# Make X not of norm 1 for testing +X *= 10 +y *= 10 +G, Xy = np.dot(X.T, X), np.dot(X.T, y) +# this makes X (n_samples, n_features) +# and y (n_samples, 3) + + +def test_correct_shapes(): + assert orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape == (n_features,) + assert orthogonal_mp(X, y, n_nonzero_coefs=5).shape == (n_features, 3) + + +def test_correct_shapes_gram(): + assert orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape == (n_features,) + assert orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape == (n_features, 3) + + +def test_n_nonzero_coefs(): + assert np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5)) <= 5 + assert ( + np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5, precompute=True)) + <= 5 + ) + + +def test_tol(): + tol = 0.5 + gamma = orthogonal_mp(X, y[:, 0], tol=tol) + gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True) + assert np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol + assert np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol + + +def test_with_without_gram(): + assert_array_almost_equal( + orthogonal_mp(X, y, n_nonzero_coefs=5), + orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True), + ) + + +def test_with_without_gram_tol(): + assert_array_almost_equal( + orthogonal_mp(X, y, tol=1.0), orthogonal_mp(X, y, tol=1.0, precompute=True) + ) + + +def test_unreachable_accuracy(): + assert_array_almost_equal( + orthogonal_mp(X, y, tol=0), orthogonal_mp(X, y, n_nonzero_coefs=n_features) + ) + warning_message = ( + "Orthogonal matching pursuit ended prematurely " + "due to linear dependence in the dictionary. " + "The requested precision might not have been met." + ) + with pytest.warns(RuntimeWarning, match=warning_message): + assert_array_almost_equal( + orthogonal_mp(X, y, tol=0, precompute=True), + orthogonal_mp(X, y, precompute=True, n_nonzero_coefs=n_features), + ) + + +@pytest.mark.parametrize("positional_params", [(X, y), (G, Xy)]) +@pytest.mark.parametrize( + "keyword_params", + [{"n_nonzero_coefs": n_features + 1}], +) +def test_bad_input(positional_params, keyword_params): + with pytest.raises(ValueError): + orthogonal_mp(*positional_params, **keyword_params) + + +def test_perfect_signal_recovery(): + (idx,) = gamma[:, 0].nonzero() + gamma_rec = orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5) + gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5) + assert_array_equal(idx, np.flatnonzero(gamma_rec)) + assert_array_equal(idx, np.flatnonzero(gamma_gram)) + assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2) + assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2) + + +def test_orthogonal_mp_gram_readonly(): + # Non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/5956 + (idx,) = gamma[:, 0].nonzero() + G_readonly = G.copy() + G_readonly.setflags(write=False) + Xy_readonly = Xy.copy() + Xy_readonly.setflags(write=False) + gamma_gram = orthogonal_mp_gram( + G_readonly, Xy_readonly[:, 0], n_nonzero_coefs=5, copy_Gram=False, copy_Xy=False + ) + assert_array_equal(idx, np.flatnonzero(gamma_gram)) + assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2) + + +def test_estimator(): + omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs) + omp.fit(X, y[:, 0]) + assert omp.coef_.shape == (n_features,) + assert omp.intercept_.shape == () + assert np.count_nonzero(omp.coef_) <= n_nonzero_coefs + + omp.fit(X, y) + assert omp.coef_.shape == (n_targets, n_features) + assert omp.intercept_.shape == (n_targets,) + assert np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs + + coef_normalized = omp.coef_[0].copy() + omp.set_params(fit_intercept=True) + omp.fit(X, y[:, 0]) + assert_array_almost_equal(coef_normalized, omp.coef_) + + omp.set_params(fit_intercept=False) + omp.fit(X, y[:, 0]) + assert np.count_nonzero(omp.coef_) <= n_nonzero_coefs + assert omp.coef_.shape == (n_features,) + assert omp.intercept_ == 0 + + omp.fit(X, y) + assert omp.coef_.shape == (n_targets, n_features) + assert omp.intercept_ == 0 + assert np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs + + +def test_estimator_n_nonzero_coefs(): + """Check `n_nonzero_coefs_` correct when `tol` is and isn't set.""" + omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs) + omp.fit(X, y[:, 0]) + assert omp.n_nonzero_coefs_ == n_nonzero_coefs + + omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs, tol=0.5) + omp.fit(X, y[:, 0]) + assert omp.n_nonzero_coefs_ is None + + +def test_identical_regressors(): + newX = X.copy() + newX[:, 1] = newX[:, 0] + gamma = np.zeros(n_features) + gamma[0] = gamma[1] = 1.0 + newy = np.dot(newX, gamma) + warning_message = ( + "Orthogonal matching pursuit ended prematurely " + "due to linear dependence in the dictionary. " + "The requested precision might not have been met." + ) + with pytest.warns(RuntimeWarning, match=warning_message): + orthogonal_mp(newX, newy, n_nonzero_coefs=2) + + +def test_swapped_regressors(): + gamma = np.zeros(n_features) + # X[:, 21] should be selected first, then X[:, 0] selected second, + # which will take X[:, 21]'s place in case the algorithm does + # column swapping for optimization (which is the case at the moment) + gamma[21] = 1.0 + gamma[0] = 0.5 + new_y = np.dot(X, gamma) + new_Xy = np.dot(X.T, new_y) + gamma_hat = orthogonal_mp(X, new_y, n_nonzero_coefs=2) + gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, n_nonzero_coefs=2) + assert_array_equal(np.flatnonzero(gamma_hat), [0, 21]) + assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21]) + + +def test_no_atoms(): + y_empty = np.zeros_like(y) + Xy_empty = np.dot(X.T, y_empty) + gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, n_nonzero_coefs=1) + gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, n_nonzero_coefs=1) + assert np.all(gamma_empty == 0) + assert np.all(gamma_empty_gram == 0) + + +def test_omp_path(): + path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True) + last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False) + assert path.shape == (n_features, n_targets, 5) + assert_array_almost_equal(path[:, :, -1], last) + path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True) + last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False) + assert path.shape == (n_features, n_targets, 5) + assert_array_almost_equal(path[:, :, -1], last) + + +def test_omp_return_path_prop_with_gram(): + path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True, precompute=True) + last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False, precompute=True) + assert path.shape == (n_features, n_targets, 5) + assert_array_almost_equal(path[:, :, -1], last) + + +def test_omp_cv(): + y_ = y[:, 0] + gamma_ = gamma[:, 0] + ompcv = OrthogonalMatchingPursuitCV(fit_intercept=False, max_iter=10) + ompcv.fit(X, y_) + assert ompcv.n_nonzero_coefs_ == n_nonzero_coefs + assert_array_almost_equal(ompcv.coef_, gamma_) + omp = OrthogonalMatchingPursuit( + fit_intercept=False, n_nonzero_coefs=ompcv.n_nonzero_coefs_ + ) + omp.fit(X, y_) + assert_array_almost_equal(ompcv.coef_, omp.coef_) + + +def test_omp_reaches_least_squares(): + # Use small simple data; it's a sanity check but OMP can stop early + rng = check_random_state(0) + n_samples, n_features = (10, 8) + n_targets = 3 + X = rng.randn(n_samples, n_features) + Y = rng.randn(n_samples, n_targets) + omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features) + lstsq = LinearRegression() + omp.fit(X, Y) + lstsq.fit(X, Y) + assert_array_almost_equal(omp.coef_, lstsq.coef_) + + +@pytest.mark.parametrize("data_type", (np.float32, np.float64)) +def test_omp_gram_dtype_match(data_type): + # verify matching input data type and output data type + coef = orthogonal_mp_gram( + G.astype(data_type), Xy.astype(data_type), n_nonzero_coefs=5 + ) + assert coef.dtype == data_type + + +def test_omp_gram_numerical_consistency(): + # verify numericaly consistency among np.float32 and np.float64 + coef_32 = orthogonal_mp_gram( + G.astype(np.float32), Xy.astype(np.float32), n_nonzero_coefs=5 + ) + coef_64 = orthogonal_mp_gram( + G.astype(np.float32), Xy.astype(np.float64), n_nonzero_coefs=5 + ) + assert_allclose(coef_32, coef_64) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/test_passive_aggressive.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/test_passive_aggressive.py new file mode 100644 index 0000000000000000000000000000000000000000..0bcb19eb96536a430aed5662d223ebfa7c7cc544 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/test_passive_aggressive.py @@ -0,0 +1,278 @@ +import numpy as np +import pytest + +from sklearn.base import ClassifierMixin +from sklearn.datasets import load_iris +from sklearn.linear_model import PassiveAggressiveClassifier, PassiveAggressiveRegressor +from sklearn.utils import check_random_state +from sklearn.utils._testing import ( + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, +) +from sklearn.utils.fixes import CSR_CONTAINERS + +iris = load_iris() +random_state = check_random_state(12) +indices = np.arange(iris.data.shape[0]) +random_state.shuffle(indices) +X = iris.data[indices] +y = iris.target[indices] + + +class MyPassiveAggressive(ClassifierMixin): + def __init__( + self, + C=1.0, + epsilon=0.01, + loss="hinge", + fit_intercept=True, + n_iter=1, + random_state=None, + ): + self.C = C + self.epsilon = epsilon + self.loss = loss + self.fit_intercept = fit_intercept + self.n_iter = n_iter + + def fit(self, X, y): + n_samples, n_features = X.shape + self.w = np.zeros(n_features, dtype=np.float64) + self.b = 0.0 + + for t in range(self.n_iter): + for i in range(n_samples): + p = self.project(X[i]) + if self.loss in ("hinge", "squared_hinge"): + loss = max(1 - y[i] * p, 0) + else: + loss = max(np.abs(p - y[i]) - self.epsilon, 0) + + sqnorm = np.dot(X[i], X[i]) + + if self.loss in ("hinge", "epsilon_insensitive"): + step = min(self.C, loss / sqnorm) + elif self.loss in ("squared_hinge", "squared_epsilon_insensitive"): + step = loss / (sqnorm + 1.0 / (2 * self.C)) + + if self.loss in ("hinge", "squared_hinge"): + step *= y[i] + else: + step *= np.sign(y[i] - p) + + self.w += step * X[i] + if self.fit_intercept: + self.b += step + + def project(self, X): + return np.dot(X, self.w) + self.b + + +@pytest.mark.parametrize("average", [False, True]) +@pytest.mark.parametrize("fit_intercept", [True, False]) +@pytest.mark.parametrize("csr_container", [None, *CSR_CONTAINERS]) +def test_classifier_accuracy(csr_container, fit_intercept, average): + data = csr_container(X) if csr_container is not None else X + clf = PassiveAggressiveClassifier( + C=1.0, + max_iter=30, + fit_intercept=fit_intercept, + random_state=1, + average=average, + tol=None, + ) + clf.fit(data, y) + score = clf.score(data, y) + assert score > 0.79 + if average: + assert hasattr(clf, "_average_coef") + assert hasattr(clf, "_average_intercept") + assert hasattr(clf, "_standard_intercept") + assert hasattr(clf, "_standard_coef") + + +@pytest.mark.parametrize("average", [False, True]) +@pytest.mark.parametrize("csr_container", [None, *CSR_CONTAINERS]) +def test_classifier_partial_fit(csr_container, average): + classes = np.unique(y) + data = csr_container(X) if csr_container is not None else X + clf = PassiveAggressiveClassifier(random_state=0, average=average, max_iter=5) + for t in range(30): + clf.partial_fit(data, y, classes) + score = clf.score(data, y) + assert score > 0.79 + if average: + assert hasattr(clf, "_average_coef") + assert hasattr(clf, "_average_intercept") + assert hasattr(clf, "_standard_intercept") + assert hasattr(clf, "_standard_coef") + + +def test_classifier_refit(): + # Classifier can be retrained on different labels and features. + clf = PassiveAggressiveClassifier(max_iter=5).fit(X, y) + assert_array_equal(clf.classes_, np.unique(y)) + + clf.fit(X[:, :-1], iris.target_names[y]) + assert_array_equal(clf.classes_, iris.target_names) + + +@pytest.mark.parametrize("csr_container", [None, *CSR_CONTAINERS]) +@pytest.mark.parametrize("loss", ("hinge", "squared_hinge")) +def test_classifier_correctness(loss, csr_container): + y_bin = y.copy() + y_bin[y != 1] = -1 + + clf1 = MyPassiveAggressive(loss=loss, n_iter=2) + clf1.fit(X, y_bin) + + data = csr_container(X) if csr_container is not None else X + clf2 = PassiveAggressiveClassifier(loss=loss, max_iter=2, shuffle=False, tol=None) + clf2.fit(data, y_bin) + + assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2) + + +@pytest.mark.parametrize( + "response_method", ["predict_proba", "predict_log_proba", "transform"] +) +def test_classifier_undefined_methods(response_method): + clf = PassiveAggressiveClassifier(max_iter=100) + with pytest.raises(AttributeError): + getattr(clf, response_method) + + +def test_class_weights(): + # Test class weights. + X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]]) + y2 = [1, 1, 1, -1, -1] + + clf = PassiveAggressiveClassifier( + C=0.1, max_iter=100, class_weight=None, random_state=100 + ) + clf.fit(X2, y2) + assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1])) + + # we give a small weights to class 1 + clf = PassiveAggressiveClassifier( + C=0.1, max_iter=100, class_weight={1: 0.001}, random_state=100 + ) + clf.fit(X2, y2) + + # now the hyperplane should rotate clock-wise and + # the prediction on this point should shift + assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1])) + + +def test_partial_fit_weight_class_balanced(): + # partial_fit with class_weight='balanced' not supported + clf = PassiveAggressiveClassifier(class_weight="balanced", max_iter=100) + with pytest.raises(ValueError): + clf.partial_fit(X, y, classes=np.unique(y)) + + +def test_equal_class_weight(): + X2 = [[1, 0], [1, 0], [0, 1], [0, 1]] + y2 = [0, 0, 1, 1] + clf = PassiveAggressiveClassifier(C=0.1, tol=None, class_weight=None) + clf.fit(X2, y2) + + # Already balanced, so "balanced" weights should have no effect + clf_balanced = PassiveAggressiveClassifier(C=0.1, tol=None, class_weight="balanced") + clf_balanced.fit(X2, y2) + + clf_weighted = PassiveAggressiveClassifier( + C=0.1, tol=None, class_weight={0: 0.5, 1: 0.5} + ) + clf_weighted.fit(X2, y2) + + # should be similar up to some epsilon due to learning rate schedule + assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2) + assert_almost_equal(clf.coef_, clf_balanced.coef_, decimal=2) + + +def test_wrong_class_weight_label(): + # ValueError due to wrong class_weight label. + X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]]) + y2 = [1, 1, 1, -1, -1] + + clf = PassiveAggressiveClassifier(class_weight={0: 0.5}, max_iter=100) + with pytest.raises(ValueError): + clf.fit(X2, y2) + + +@pytest.mark.parametrize("average", [False, True]) +@pytest.mark.parametrize("fit_intercept", [True, False]) +@pytest.mark.parametrize("csr_container", [None, *CSR_CONTAINERS]) +def test_regressor_mse(csr_container, fit_intercept, average): + y_bin = y.copy() + y_bin[y != 1] = -1 + + data = csr_container(X) if csr_container is not None else X + reg = PassiveAggressiveRegressor( + C=1.0, + fit_intercept=fit_intercept, + random_state=0, + average=average, + max_iter=5, + ) + reg.fit(data, y_bin) + pred = reg.predict(data) + assert np.mean((pred - y_bin) ** 2) < 1.7 + if average: + assert hasattr(reg, "_average_coef") + assert hasattr(reg, "_average_intercept") + assert hasattr(reg, "_standard_intercept") + assert hasattr(reg, "_standard_coef") + + +@pytest.mark.parametrize("average", [False, True]) +@pytest.mark.parametrize("csr_container", [None, *CSR_CONTAINERS]) +def test_regressor_partial_fit(csr_container, average): + y_bin = y.copy() + y_bin[y != 1] = -1 + + data = csr_container(X) if csr_container is not None else X + reg = PassiveAggressiveRegressor(random_state=0, average=average, max_iter=100) + for t in range(50): + reg.partial_fit(data, y_bin) + pred = reg.predict(data) + assert np.mean((pred - y_bin) ** 2) < 1.7 + if average: + assert hasattr(reg, "_average_coef") + assert hasattr(reg, "_average_intercept") + assert hasattr(reg, "_standard_intercept") + assert hasattr(reg, "_standard_coef") + + +@pytest.mark.parametrize("csr_container", [None, *CSR_CONTAINERS]) +@pytest.mark.parametrize("loss", ("epsilon_insensitive", "squared_epsilon_insensitive")) +def test_regressor_correctness(loss, csr_container): + y_bin = y.copy() + y_bin[y != 1] = -1 + + reg1 = MyPassiveAggressive(loss=loss, n_iter=2) + reg1.fit(X, y_bin) + + data = csr_container(X) if csr_container is not None else X + reg2 = PassiveAggressiveRegressor(tol=None, loss=loss, max_iter=2, shuffle=False) + reg2.fit(data, y_bin) + + assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2) + + +def test_regressor_undefined_methods(): + reg = PassiveAggressiveRegressor(max_iter=100) + with pytest.raises(AttributeError): + reg.transform(X) + + +# TODO(1.7): remove +@pytest.mark.parametrize( + "Estimator", [PassiveAggressiveClassifier, PassiveAggressiveRegressor] +) +def test_passive_aggressive_deprecated_average(Estimator): + est = Estimator(average=0) + with pytest.warns(FutureWarning, match="average=0"): + est.fit(X, y) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/test_sag.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/test_sag.py new file mode 100644 index 0000000000000000000000000000000000000000..575838f8e8497a01c60adbb74ddad95dadc6e662 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/test_sag.py @@ -0,0 +1,861 @@ +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import math +import re + +import numpy as np +import pytest + +from sklearn.base import clone +from sklearn.datasets import load_iris, make_blobs, make_classification +from sklearn.linear_model import LogisticRegression, Ridge +from sklearn.linear_model._sag import get_auto_step_size +from sklearn.multiclass import OneVsRestClassifier +from sklearn.preprocessing import LabelEncoder +from sklearn.utils import check_random_state, compute_class_weight +from sklearn.utils._testing import ( + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, +) +from sklearn.utils.extmath import row_norms +from sklearn.utils.fixes import CSR_CONTAINERS + +iris = load_iris() + + +# this is used for sag classification +def log_dloss(p, y): + z = p * y + # approximately equal and saves the computation of the log + if z > 18.0: + return math.exp(-z) * -y + if z < -18.0: + return -y + return -y / (math.exp(z) + 1.0) + + +def log_loss(p, y): + return np.mean(np.log(1.0 + np.exp(-y * p))) + + +# this is used for sag regression +def squared_dloss(p, y): + return p - y + + +def squared_loss(p, y): + return np.mean(0.5 * (p - y) * (p - y)) + + +# function for measuring the log loss +def get_pobj(w, alpha, myX, myy, loss): + w = w.ravel() + pred = np.dot(myX, w) + p = loss(pred, myy) + p += alpha * w.dot(w) / 2.0 + return p + + +def sag( + X, + y, + step_size, + alpha, + n_iter=1, + dloss=None, + sparse=False, + sample_weight=None, + fit_intercept=True, + saga=False, +): + n_samples, n_features = X.shape[0], X.shape[1] + + weights = np.zeros(X.shape[1]) + sum_gradient = np.zeros(X.shape[1]) + gradient_memory = np.zeros((n_samples, n_features)) + + intercept = 0.0 + intercept_sum_gradient = 0.0 + intercept_gradient_memory = np.zeros(n_samples) + + rng = np.random.RandomState(77) + decay = 1.0 + seen = set() + + # sparse data has a fixed decay of .01 + if sparse: + decay = 0.01 + + for epoch in range(n_iter): + for k in range(n_samples): + idx = int(rng.rand() * n_samples) + # idx = k + entry = X[idx] + seen.add(idx) + p = np.dot(entry, weights) + intercept + gradient = dloss(p, y[idx]) + if sample_weight is not None: + gradient *= sample_weight[idx] + update = entry * gradient + alpha * weights + gradient_correction = update - gradient_memory[idx] + sum_gradient += gradient_correction + gradient_memory[idx] = update + if saga: + weights -= gradient_correction * step_size * (1 - 1.0 / len(seen)) + + if fit_intercept: + gradient_correction = gradient - intercept_gradient_memory[idx] + intercept_gradient_memory[idx] = gradient + intercept_sum_gradient += gradient_correction + gradient_correction *= step_size * (1.0 - 1.0 / len(seen)) + if saga: + intercept -= ( + step_size * intercept_sum_gradient / len(seen) * decay + ) + gradient_correction + else: + intercept -= step_size * intercept_sum_gradient / len(seen) * decay + + weights -= step_size * sum_gradient / len(seen) + + return weights, intercept + + +def sag_sparse( + X, + y, + step_size, + alpha, + n_iter=1, + dloss=None, + sample_weight=None, + sparse=False, + fit_intercept=True, + saga=False, + random_state=0, +): + if step_size * alpha == 1.0: + raise ZeroDivisionError( + "Sparse sag does not handle the case step_size * alpha == 1" + ) + n_samples, n_features = X.shape[0], X.shape[1] + + weights = np.zeros(n_features) + sum_gradient = np.zeros(n_features) + last_updated = np.zeros(n_features, dtype=int) + gradient_memory = np.zeros(n_samples) + rng = check_random_state(random_state) + intercept = 0.0 + intercept_sum_gradient = 0.0 + wscale = 1.0 + decay = 1.0 + seen = set() + + c_sum = np.zeros(n_iter * n_samples) + + # sparse data has a fixed decay of .01 + if sparse: + decay = 0.01 + + counter = 0 + for epoch in range(n_iter): + for k in range(n_samples): + # idx = k + idx = int(rng.rand() * n_samples) + entry = X[idx] + seen.add(idx) + + if counter >= 1: + for j in range(n_features): + if last_updated[j] == 0: + weights[j] -= c_sum[counter - 1] * sum_gradient[j] + else: + weights[j] -= ( + c_sum[counter - 1] - c_sum[last_updated[j] - 1] + ) * sum_gradient[j] + last_updated[j] = counter + + p = (wscale * np.dot(entry, weights)) + intercept + gradient = dloss(p, y[idx]) + + if sample_weight is not None: + gradient *= sample_weight[idx] + + update = entry * gradient + gradient_correction = update - (gradient_memory[idx] * entry) + sum_gradient += gradient_correction + if saga: + for j in range(n_features): + weights[j] -= ( + gradient_correction[j] + * step_size + * (1 - 1.0 / len(seen)) + / wscale + ) + + if fit_intercept: + gradient_correction = gradient - gradient_memory[idx] + intercept_sum_gradient += gradient_correction + gradient_correction *= step_size * (1.0 - 1.0 / len(seen)) + if saga: + intercept -= ( + step_size * intercept_sum_gradient / len(seen) * decay + ) + gradient_correction + else: + intercept -= step_size * intercept_sum_gradient / len(seen) * decay + + gradient_memory[idx] = gradient + + wscale *= 1.0 - alpha * step_size + if counter == 0: + c_sum[0] = step_size / (wscale * len(seen)) + else: + c_sum[counter] = c_sum[counter - 1] + step_size / (wscale * len(seen)) + + if counter >= 1 and wscale < 1e-9: + for j in range(n_features): + if last_updated[j] == 0: + weights[j] -= c_sum[counter] * sum_gradient[j] + else: + weights[j] -= ( + c_sum[counter] - c_sum[last_updated[j] - 1] + ) * sum_gradient[j] + last_updated[j] = counter + 1 + c_sum[counter] = 0 + weights *= wscale + wscale = 1.0 + + counter += 1 + + for j in range(n_features): + if last_updated[j] == 0: + weights[j] -= c_sum[counter - 1] * sum_gradient[j] + else: + weights[j] -= ( + c_sum[counter - 1] - c_sum[last_updated[j] - 1] + ) * sum_gradient[j] + weights *= wscale + return weights, intercept + + +def get_step_size(X, alpha, fit_intercept, classification=True): + if classification: + return 4.0 / (np.max(np.sum(X * X, axis=1)) + fit_intercept + 4.0 * alpha) + else: + return 1.0 / (np.max(np.sum(X * X, axis=1)) + fit_intercept + alpha) + + +def test_classifier_matching(): + n_samples = 20 + X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0, cluster_std=0.1) + # y must be 0 or 1 + alpha = 1.1 + fit_intercept = True + step_size = get_step_size(X, alpha, fit_intercept) + for solver in ["sag", "saga"]: + if solver == "sag": + n_iter = 80 + else: + # SAGA variance w.r.t. stream order is higher + n_iter = 300 + clf = LogisticRegression( + solver=solver, + fit_intercept=fit_intercept, + tol=1e-11, + C=1.0 / alpha / n_samples, + max_iter=n_iter, + random_state=10, + ) + clf.fit(X, y) + + weights, intercept = sag_sparse( + X, + 2 * y - 1, # y must be -1 or +1 + step_size, + alpha, + n_iter=n_iter, + dloss=log_dloss, + fit_intercept=fit_intercept, + saga=solver == "saga", + ) + weights2, intercept2 = sag( + X, + 2 * y - 1, # y must be -1 or +1 + step_size, + alpha, + n_iter=n_iter, + dloss=log_dloss, + fit_intercept=fit_intercept, + saga=solver == "saga", + ) + weights = np.atleast_2d(weights) + intercept = np.atleast_1d(intercept) + weights2 = np.atleast_2d(weights2) + intercept2 = np.atleast_1d(intercept2) + + assert_array_almost_equal(weights, clf.coef_, decimal=9) + assert_array_almost_equal(intercept, clf.intercept_, decimal=9) + assert_array_almost_equal(weights2, clf.coef_, decimal=9) + assert_array_almost_equal(intercept2, clf.intercept_, decimal=9) + + +def test_regressor_matching(): + n_samples = 10 + n_features = 5 + + rng = np.random.RandomState(10) + X = rng.normal(size=(n_samples, n_features)) + true_w = rng.normal(size=n_features) + y = X.dot(true_w) + + alpha = 1.0 + n_iter = 100 + fit_intercept = True + + step_size = get_step_size(X, alpha, fit_intercept, classification=False) + clf = Ridge( + fit_intercept=fit_intercept, + tol=0.00000000001, + solver="sag", + alpha=alpha * n_samples, + max_iter=n_iter, + ) + clf.fit(X, y) + + weights1, intercept1 = sag_sparse( + X, + y, + step_size, + alpha, + n_iter=n_iter, + dloss=squared_dloss, + fit_intercept=fit_intercept, + ) + weights2, intercept2 = sag( + X, + y, + step_size, + alpha, + n_iter=n_iter, + dloss=squared_dloss, + fit_intercept=fit_intercept, + ) + + assert_allclose(weights1, clf.coef_) + assert_allclose(intercept1, clf.intercept_) + assert_allclose(weights2, clf.coef_) + assert_allclose(intercept2, clf.intercept_) + + +@pytest.mark.filterwarnings("ignore:The max_iter was reached") +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sag_pobj_matches_logistic_regression(csr_container): + """tests if the sag pobj matches log reg""" + n_samples = 100 + alpha = 1.0 + max_iter = 20 + X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0, cluster_std=0.1) + + clf1 = LogisticRegression( + solver="sag", + fit_intercept=False, + tol=0.0000001, + C=1.0 / alpha / n_samples, + max_iter=max_iter, + random_state=10, + ) + clf2 = clone(clf1) + clf3 = LogisticRegression( + fit_intercept=False, + tol=0.0000001, + C=1.0 / alpha / n_samples, + max_iter=max_iter, + random_state=10, + ) + + clf1.fit(X, y) + clf2.fit(csr_container(X), y) + clf3.fit(X, y) + + pobj1 = get_pobj(clf1.coef_, alpha, X, y, log_loss) + pobj2 = get_pobj(clf2.coef_, alpha, X, y, log_loss) + pobj3 = get_pobj(clf3.coef_, alpha, X, y, log_loss) + + assert_array_almost_equal(pobj1, pobj2, decimal=4) + assert_array_almost_equal(pobj2, pobj3, decimal=4) + assert_array_almost_equal(pobj3, pobj1, decimal=4) + + +@pytest.mark.filterwarnings("ignore:The max_iter was reached") +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sag_pobj_matches_ridge_regression(csr_container): + """tests if the sag pobj matches ridge reg""" + n_samples = 100 + n_features = 10 + alpha = 1.0 + n_iter = 100 + fit_intercept = False + rng = np.random.RandomState(10) + X = rng.normal(size=(n_samples, n_features)) + true_w = rng.normal(size=n_features) + y = X.dot(true_w) + + clf1 = Ridge( + fit_intercept=fit_intercept, + tol=0.00000000001, + solver="sag", + alpha=alpha, + max_iter=n_iter, + random_state=42, + ) + clf2 = clone(clf1) + clf3 = Ridge( + fit_intercept=fit_intercept, + tol=0.00001, + solver="lsqr", + alpha=alpha, + max_iter=n_iter, + random_state=42, + ) + + clf1.fit(X, y) + clf2.fit(csr_container(X), y) + clf3.fit(X, y) + + pobj1 = get_pobj(clf1.coef_, alpha, X, y, squared_loss) + pobj2 = get_pobj(clf2.coef_, alpha, X, y, squared_loss) + pobj3 = get_pobj(clf3.coef_, alpha, X, y, squared_loss) + + assert_array_almost_equal(pobj1, pobj2, decimal=4) + assert_array_almost_equal(pobj1, pobj3, decimal=4) + assert_array_almost_equal(pobj3, pobj2, decimal=4) + + +@pytest.mark.filterwarnings("ignore:The max_iter was reached") +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sag_regressor_computed_correctly(csr_container): + """tests if the sag regressor is computed correctly""" + alpha = 0.1 + n_features = 10 + n_samples = 40 + max_iter = 100 + tol = 0.000001 + fit_intercept = True + rng = np.random.RandomState(0) + X = rng.normal(size=(n_samples, n_features)) + w = rng.normal(size=n_features) + y = np.dot(X, w) + 2.0 + step_size = get_step_size(X, alpha, fit_intercept, classification=False) + + clf1 = Ridge( + fit_intercept=fit_intercept, + tol=tol, + solver="sag", + alpha=alpha * n_samples, + max_iter=max_iter, + random_state=rng, + ) + clf2 = clone(clf1) + + clf1.fit(X, y) + clf2.fit(csr_container(X), y) + + spweights1, spintercept1 = sag_sparse( + X, + y, + step_size, + alpha, + n_iter=max_iter, + dloss=squared_dloss, + fit_intercept=fit_intercept, + random_state=rng, + ) + + spweights2, spintercept2 = sag_sparse( + X, + y, + step_size, + alpha, + n_iter=max_iter, + dloss=squared_dloss, + sparse=True, + fit_intercept=fit_intercept, + random_state=rng, + ) + + assert_array_almost_equal(clf1.coef_.ravel(), spweights1.ravel(), decimal=3) + assert_almost_equal(clf1.intercept_, spintercept1, decimal=1) + + # TODO: uncomment when sparse Ridge with intercept will be fixed (#4710) + # assert_array_almost_equal(clf2.coef_.ravel(), + # spweights2.ravel(), + # decimal=3) + # assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)''' + + +def test_get_auto_step_size(): + X = np.array([[1, 2, 3], [2, 3, 4], [2, 3, 2]], dtype=np.float64) + alpha = 1.2 + fit_intercept = False + # sum the squares of the second sample because that's the largest + max_squared_sum = 4 + 9 + 16 + max_squared_sum_ = row_norms(X, squared=True).max() + n_samples = X.shape[0] + assert_almost_equal(max_squared_sum, max_squared_sum_, decimal=4) + + for saga in [True, False]: + for fit_intercept in (True, False): + if saga: + L_sqr = max_squared_sum + alpha + int(fit_intercept) + L_log = (max_squared_sum + 4.0 * alpha + int(fit_intercept)) / 4.0 + mun_sqr = min(2 * n_samples * alpha, L_sqr) + mun_log = min(2 * n_samples * alpha, L_log) + step_size_sqr = 1 / (2 * L_sqr + mun_sqr) + step_size_log = 1 / (2 * L_log + mun_log) + else: + step_size_sqr = 1.0 / (max_squared_sum + alpha + int(fit_intercept)) + step_size_log = 4.0 / ( + max_squared_sum + 4.0 * alpha + int(fit_intercept) + ) + + step_size_sqr_ = get_auto_step_size( + max_squared_sum_, + alpha, + "squared", + fit_intercept, + n_samples=n_samples, + is_saga=saga, + ) + step_size_log_ = get_auto_step_size( + max_squared_sum_, + alpha, + "log", + fit_intercept, + n_samples=n_samples, + is_saga=saga, + ) + + assert_almost_equal(step_size_sqr, step_size_sqr_, decimal=4) + assert_almost_equal(step_size_log, step_size_log_, decimal=4) + + msg = "Unknown loss function for SAG solver, got wrong instead of" + with pytest.raises(ValueError, match=msg): + get_auto_step_size(max_squared_sum_, alpha, "wrong", fit_intercept) + + +@pytest.mark.parametrize("seed", range(3)) # locally tested with 1000 seeds +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sag_regressor(seed, csr_container): + """tests if the sag regressor performs well""" + xmin, xmax = -5, 5 + n_samples = 300 + tol = 0.001 + max_iter = 100 + alpha = 0.1 + rng = np.random.RandomState(seed) + X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1) + + # simple linear function without noise + y = 0.5 * X.ravel() + + clf1 = Ridge( + tol=tol, + solver="sag", + max_iter=max_iter, + alpha=alpha * n_samples, + random_state=rng, + ) + clf2 = clone(clf1) + clf1.fit(X, y) + clf2.fit(csr_container(X), y) + score1 = clf1.score(X, y) + score2 = clf2.score(X, y) + assert score1 > 0.98 + assert score2 > 0.98 + + # simple linear function with noise + y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel() + + clf1 = Ridge(tol=tol, solver="sag", max_iter=max_iter, alpha=alpha * n_samples) + clf2 = clone(clf1) + clf1.fit(X, y) + clf2.fit(csr_container(X), y) + score1 = clf1.score(X, y) + score2 = clf2.score(X, y) + assert score1 > 0.45 + assert score2 > 0.45 + + +@pytest.mark.filterwarnings("ignore:The max_iter was reached") +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sag_classifier_computed_correctly(csr_container): + """tests if the binary classifier is computed correctly""" + alpha = 0.1 + n_samples = 50 + n_iter = 50 + tol = 0.00001 + fit_intercept = True + X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0, cluster_std=0.1) + step_size = get_step_size(X, alpha, fit_intercept, classification=True) + classes = np.unique(y) + y_tmp = np.ones(n_samples) + y_tmp[y != classes[1]] = -1 + y = y_tmp + + clf1 = LogisticRegression( + solver="sag", + C=1.0 / alpha / n_samples, + max_iter=n_iter, + tol=tol, + random_state=77, + fit_intercept=fit_intercept, + ) + clf2 = clone(clf1) + + clf1.fit(X, y) + clf2.fit(csr_container(X), y) + + spweights, spintercept = sag_sparse( + X, + y, + step_size, + alpha, + n_iter=n_iter, + dloss=log_dloss, + fit_intercept=fit_intercept, + ) + spweights2, spintercept2 = sag_sparse( + X, + y, + step_size, + alpha, + n_iter=n_iter, + dloss=log_dloss, + sparse=True, + fit_intercept=fit_intercept, + ) + + assert_array_almost_equal(clf1.coef_.ravel(), spweights.ravel(), decimal=2) + assert_almost_equal(clf1.intercept_, spintercept, decimal=1) + + assert_array_almost_equal(clf2.coef_.ravel(), spweights2.ravel(), decimal=2) + assert_almost_equal(clf2.intercept_, spintercept2, decimal=1) + + +@pytest.mark.filterwarnings("ignore:The max_iter was reached") +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sag_multiclass_computed_correctly(csr_container): + """tests if the multiclass classifier is computed correctly""" + alpha = 0.1 + n_samples = 20 + tol = 1e-5 + max_iter = 70 + fit_intercept = True + X, y = make_blobs(n_samples=n_samples, centers=3, random_state=0, cluster_std=0.1) + step_size = get_step_size(X, alpha, fit_intercept, classification=True) + classes = np.unique(y) + + clf1 = OneVsRestClassifier( + LogisticRegression( + solver="sag", + C=1.0 / alpha / n_samples, + max_iter=max_iter, + tol=tol, + random_state=77, + fit_intercept=fit_intercept, + ) + ) + clf2 = clone(clf1) + + clf1.fit(X, y) + clf2.fit(csr_container(X), y) + + coef1 = [] + intercept1 = [] + coef2 = [] + intercept2 = [] + for cl in classes: + y_encoded = np.ones(n_samples) + y_encoded[y != cl] = -1 + + spweights1, spintercept1 = sag_sparse( + X, + y_encoded, + step_size, + alpha, + dloss=log_dloss, + n_iter=max_iter, + fit_intercept=fit_intercept, + ) + spweights2, spintercept2 = sag_sparse( + X, + y_encoded, + step_size, + alpha, + dloss=log_dloss, + n_iter=max_iter, + sparse=True, + fit_intercept=fit_intercept, + ) + coef1.append(spweights1) + intercept1.append(spintercept1) + + coef2.append(spweights2) + intercept2.append(spintercept2) + + coef1 = np.vstack(coef1) + intercept1 = np.array(intercept1) + coef2 = np.vstack(coef2) + intercept2 = np.array(intercept2) + + for i, cl in enumerate(classes): + assert_allclose(clf1.estimators_[i].coef_.ravel(), coef1[i], rtol=1e-2) + assert_allclose(clf1.estimators_[i].intercept_, intercept1[i], rtol=1e-1) + + assert_allclose(clf2.estimators_[i].coef_.ravel(), coef2[i], rtol=1e-2) + # Note the very crude accuracy, i.e. high rtol. + assert_allclose(clf2.estimators_[i].intercept_, intercept2[i], rtol=5e-1) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_classifier_results(csr_container): + """tests if classifier results match target""" + alpha = 0.1 + n_features = 20 + n_samples = 10 + tol = 0.01 + max_iter = 200 + rng = np.random.RandomState(0) + X = rng.normal(size=(n_samples, n_features)) + w = rng.normal(size=n_features) + y = np.dot(X, w) + y = np.sign(y) + clf1 = LogisticRegression( + solver="sag", + C=1.0 / alpha / n_samples, + max_iter=max_iter, + tol=tol, + random_state=77, + ) + clf2 = clone(clf1) + + clf1.fit(X, y) + clf2.fit(csr_container(X), y) + pred1 = clf1.predict(X) + pred2 = clf2.predict(X) + assert_almost_equal(pred1, y, decimal=12) + assert_almost_equal(pred2, y, decimal=12) + + +@pytest.mark.filterwarnings("ignore:The max_iter was reached") +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_binary_classifier_class_weight(csr_container): + """tests binary classifier with classweights for each class""" + alpha = 0.1 + n_samples = 50 + n_iter = 20 + tol = 0.00001 + fit_intercept = True + X, y = make_blobs(n_samples=n_samples, centers=2, random_state=10, cluster_std=0.1) + step_size = get_step_size(X, alpha, fit_intercept, classification=True) + classes = np.unique(y) + y_tmp = np.ones(n_samples) + y_tmp[y != classes[1]] = -1 + y = y_tmp + + class_weight = {1: 0.45, -1: 0.55} + clf1 = LogisticRegression( + solver="sag", + C=1.0 / alpha / n_samples, + max_iter=n_iter, + tol=tol, + random_state=77, + fit_intercept=fit_intercept, + class_weight=class_weight, + ) + clf2 = clone(clf1) + + clf1.fit(X, y) + clf2.fit(csr_container(X), y) + + le = LabelEncoder() + class_weight_ = compute_class_weight(class_weight, classes=np.unique(y), y=y) + sample_weight = class_weight_[le.fit_transform(y)] + spweights, spintercept = sag_sparse( + X, + y, + step_size, + alpha, + n_iter=n_iter, + dloss=log_dloss, + sample_weight=sample_weight, + fit_intercept=fit_intercept, + ) + spweights2, spintercept2 = sag_sparse( + X, + y, + step_size, + alpha, + n_iter=n_iter, + dloss=log_dloss, + sparse=True, + sample_weight=sample_weight, + fit_intercept=fit_intercept, + ) + + assert_array_almost_equal(clf1.coef_.ravel(), spweights.ravel(), decimal=2) + assert_almost_equal(clf1.intercept_, spintercept, decimal=1) + + assert_array_almost_equal(clf2.coef_.ravel(), spweights2.ravel(), decimal=2) + assert_almost_equal(clf2.intercept_, spintercept2, decimal=1) + + +def test_classifier_single_class(): + """tests if ValueError is thrown with only one class""" + X = [[1, 2], [3, 4]] + y = [1, 1] + + msg = "This solver needs samples of at least 2 classes in the data" + with pytest.raises(ValueError, match=msg): + LogisticRegression(solver="sag").fit(X, y) + + +def test_step_size_alpha_error(): + X = [[0, 0], [0, 0]] + y = [1, -1] + fit_intercept = False + alpha = 1.0 + msg = re.escape( + "Current sag implementation does not handle the case" + " step_size * alpha_scaled == 1" + ) + + clf1 = LogisticRegression(solver="sag", C=1.0 / alpha, fit_intercept=fit_intercept) + with pytest.raises(ZeroDivisionError, match=msg): + clf1.fit(X, y) + + clf2 = Ridge(fit_intercept=fit_intercept, solver="sag", alpha=alpha) + with pytest.raises(ZeroDivisionError, match=msg): + clf2.fit(X, y) + + +@pytest.mark.parametrize("solver", ["sag", "saga"]) +def test_sag_classifier_raises_error(solver): + # Following #13316, the error handling behavior changed in cython sag. This + # is simply a non-regression test to make sure numerical errors are + # properly raised. + + # Train a classifier on a simple problem + rng = np.random.RandomState(42) + X, y = make_classification(random_state=rng) + clf = LogisticRegression(solver=solver, random_state=rng, warm_start=True) + clf.fit(X, y) + + # Trigger a numerical error by: + # - corrupting the fitted coefficients of the classifier + # - fit it again starting from its current state thanks to warm_start + clf.coef_[:] = np.nan + + with pytest.raises(ValueError, match="Floating-point under-/overflow"): + clf.fit(X, y) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/test_theil_sen.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/test_theil_sen.py new file mode 100644 index 0000000000000000000000000000000000000000..216415f2ee9277e618c457afc0a7280c8a2a4b8a --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/tests/test_theil_sen.py @@ -0,0 +1,303 @@ +""" +Testing for Theil-Sen module (sklearn.linear_model.theil_sen) +""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import os +import re +import sys +from contextlib import contextmanager + +import numpy as np +import pytest +from numpy.testing import ( + assert_array_almost_equal, + assert_array_equal, + assert_array_less, +) +from scipy.linalg import norm +from scipy.optimize import fmin_bfgs + +from sklearn.exceptions import ConvergenceWarning +from sklearn.linear_model import LinearRegression, TheilSenRegressor +from sklearn.linear_model._theil_sen import ( + _breakdown_point, + _modified_weiszfeld_step, + _spatial_median, +) +from sklearn.utils._testing import assert_almost_equal + + +@contextmanager +def no_stdout_stderr(): + old_stdout = sys.stdout + old_stderr = sys.stderr + with open(os.devnull, "w") as devnull: + sys.stdout = devnull + sys.stderr = devnull + yield + devnull.flush() + sys.stdout = old_stdout + sys.stderr = old_stderr + + +def gen_toy_problem_1d(intercept=True): + random_state = np.random.RandomState(0) + # Linear model y = 3*x + N(2, 0.1**2) + w = 3.0 + if intercept: + c = 2.0 + n_samples = 50 + else: + c = 0.1 + n_samples = 100 + x = random_state.normal(size=n_samples) + noise = 0.1 * random_state.normal(size=n_samples) + y = w * x + c + noise + # Add some outliers + if intercept: + x[42], y[42] = (-2, 4) + x[43], y[43] = (-2.5, 8) + x[33], y[33] = (2.5, 1) + x[49], y[49] = (2.1, 2) + else: + x[42], y[42] = (-2, 4) + x[43], y[43] = (-2.5, 8) + x[53], y[53] = (2.5, 1) + x[60], y[60] = (2.1, 2) + x[72], y[72] = (1.8, -7) + return x[:, np.newaxis], y, w, c + + +def gen_toy_problem_2d(): + random_state = np.random.RandomState(0) + n_samples = 100 + # Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2) + X = random_state.normal(size=(n_samples, 2)) + w = np.array([5.0, 10.0]) + c = 1.0 + noise = 0.1 * random_state.normal(size=n_samples) + y = np.dot(X, w) + c + noise + # Add some outliers + n_outliers = n_samples // 10 + ix = random_state.randint(0, n_samples, size=n_outliers) + y[ix] = 50 * random_state.normal(size=n_outliers) + return X, y, w, c + + +def gen_toy_problem_4d(): + random_state = np.random.RandomState(0) + n_samples = 10000 + # Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2) + X = random_state.normal(size=(n_samples, 4)) + w = np.array([5.0, 10.0, 42.0, 7.0]) + c = 1.0 + noise = 0.1 * random_state.normal(size=n_samples) + y = np.dot(X, w) + c + noise + # Add some outliers + n_outliers = n_samples // 10 + ix = random_state.randint(0, n_samples, size=n_outliers) + y[ix] = 50 * random_state.normal(size=n_outliers) + return X, y, w, c + + +def test_modweiszfeld_step_1d(): + X = np.array([1.0, 2.0, 3.0]).reshape(3, 1) + # Check startvalue is element of X and solution + median = 2.0 + new_y = _modified_weiszfeld_step(X, median) + assert_array_almost_equal(new_y, median) + # Check startvalue is not the solution + y = 2.5 + new_y = _modified_weiszfeld_step(X, y) + assert_array_less(median, new_y) + assert_array_less(new_y, y) + # Check startvalue is not the solution but element of X + y = 3.0 + new_y = _modified_weiszfeld_step(X, y) + assert_array_less(median, new_y) + assert_array_less(new_y, y) + # Check that a single vector is identity + X = np.array([1.0, 2.0, 3.0]).reshape(1, 3) + y = X[0] + new_y = _modified_weiszfeld_step(X, y) + assert_array_equal(y, new_y) + + +def test_modweiszfeld_step_2d(): + X = np.array([0.0, 0.0, 1.0, 1.0, 0.0, 1.0]).reshape(3, 2) + y = np.array([0.5, 0.5]) + # Check first two iterations + new_y = _modified_weiszfeld_step(X, y) + assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3])) + new_y = _modified_weiszfeld_step(X, new_y) + assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592])) + # Check fix point + y = np.array([0.21132505, 0.78867497]) + new_y = _modified_weiszfeld_step(X, y) + assert_array_almost_equal(new_y, y) + + +def test_spatial_median_1d(): + X = np.array([1.0, 2.0, 3.0]).reshape(3, 1) + true_median = 2.0 + _, median = _spatial_median(X) + assert_array_almost_equal(median, true_median) + # Test larger problem and for exact solution in 1d case + random_state = np.random.RandomState(0) + X = random_state.randint(100, size=(1000, 1)) + true_median = np.median(X.ravel()) + _, median = _spatial_median(X) + assert_array_equal(median, true_median) + + +def test_spatial_median_2d(): + X = np.array([0.0, 0.0, 1.0, 1.0, 0.0, 1.0]).reshape(3, 2) + _, median = _spatial_median(X, max_iter=100, tol=1.0e-6) + + def cost_func(y): + dists = np.array([norm(x - y) for x in X]) + return np.sum(dists) + + # Check if median is solution of the Fermat-Weber location problem + fermat_weber = fmin_bfgs(cost_func, median, disp=False) + assert_array_almost_equal(median, fermat_weber) + # Check when maximum iteration is exceeded a warning is emitted + warning_message = "Maximum number of iterations 30 reached in spatial median." + with pytest.warns(ConvergenceWarning, match=warning_message): + _spatial_median(X, max_iter=30, tol=0.0) + + +def test_theil_sen_1d(): + X, y, w, c = gen_toy_problem_1d() + # Check that Least Squares fails + lstq = LinearRegression().fit(X, y) + assert np.abs(lstq.coef_ - w) > 0.9 + # Check that Theil-Sen works + theil_sen = TheilSenRegressor(random_state=0).fit(X, y) + assert_array_almost_equal(theil_sen.coef_, w, 1) + assert_array_almost_equal(theil_sen.intercept_, c, 1) + + +def test_theil_sen_1d_no_intercept(): + X, y, w, c = gen_toy_problem_1d(intercept=False) + # Check that Least Squares fails + lstq = LinearRegression(fit_intercept=False).fit(X, y) + assert np.abs(lstq.coef_ - w - c) > 0.5 + # Check that Theil-Sen works + theil_sen = TheilSenRegressor(fit_intercept=False, random_state=0).fit(X, y) + assert_array_almost_equal(theil_sen.coef_, w + c, 1) + assert_almost_equal(theil_sen.intercept_, 0.0) + + # non-regression test for #18104 + theil_sen.score(X, y) + + +def test_theil_sen_2d(): + X, y, w, c = gen_toy_problem_2d() + # Check that Least Squares fails + lstq = LinearRegression().fit(X, y) + assert norm(lstq.coef_ - w) > 1.0 + # Check that Theil-Sen works + theil_sen = TheilSenRegressor(max_subpopulation=1e3, random_state=0).fit(X, y) + assert_array_almost_equal(theil_sen.coef_, w, 1) + assert_array_almost_equal(theil_sen.intercept_, c, 1) + + +def test_calc_breakdown_point(): + bp = _breakdown_point(1e10, 2) + assert np.abs(bp - 1 + 1 / (np.sqrt(2))) < 1.0e-6 + + +@pytest.mark.parametrize( + "param, ExceptionCls, match", + [ + ( + {"n_subsamples": 1}, + ValueError, + re.escape("Invalid parameter since n_features+1 > n_subsamples (2 > 1)"), + ), + ( + {"n_subsamples": 101}, + ValueError, + re.escape("Invalid parameter since n_subsamples > n_samples (101 > 50)"), + ), + ], +) +def test_checksubparams_invalid_input(param, ExceptionCls, match): + X, y, w, c = gen_toy_problem_1d() + theil_sen = TheilSenRegressor(**param, random_state=0) + with pytest.raises(ExceptionCls, match=match): + theil_sen.fit(X, y) + + +def test_checksubparams_n_subsamples_if_less_samples_than_features(): + random_state = np.random.RandomState(0) + n_samples, n_features = 10, 20 + X = random_state.normal(size=(n_samples, n_features)) + y = random_state.normal(size=n_samples) + theil_sen = TheilSenRegressor(n_subsamples=9, random_state=0) + with pytest.raises(ValueError): + theil_sen.fit(X, y) + + +def test_subpopulation(): + X, y, w, c = gen_toy_problem_4d() + theil_sen = TheilSenRegressor(max_subpopulation=250, random_state=0).fit(X, y) + assert_array_almost_equal(theil_sen.coef_, w, 1) + assert_array_almost_equal(theil_sen.intercept_, c, 1) + + +def test_subsamples(): + X, y, w, c = gen_toy_problem_4d() + theil_sen = TheilSenRegressor(n_subsamples=X.shape[0], random_state=0).fit(X, y) + lstq = LinearRegression().fit(X, y) + # Check for exact the same results as Least Squares + assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9) + + +def test_verbosity(): + X, y, w, c = gen_toy_problem_1d() + # Check that Theil-Sen can be verbose + with no_stdout_stderr(): + TheilSenRegressor(verbose=True, random_state=0).fit(X, y) + TheilSenRegressor(verbose=True, max_subpopulation=10, random_state=0).fit(X, y) + + +def test_theil_sen_parallel(): + X, y, w, c = gen_toy_problem_2d() + # Check that Least Squares fails + lstq = LinearRegression().fit(X, y) + assert norm(lstq.coef_ - w) > 1.0 + # Check that Theil-Sen works + theil_sen = TheilSenRegressor(n_jobs=2, random_state=0, max_subpopulation=2e3).fit( + X, y + ) + assert_array_almost_equal(theil_sen.coef_, w, 1) + assert_array_almost_equal(theil_sen.intercept_, c, 1) + + +def test_less_samples_than_features(): + random_state = np.random.RandomState(0) + n_samples, n_features = 10, 20 + X = random_state.normal(size=(n_samples, n_features)) + y = random_state.normal(size=n_samples) + # Check that Theil-Sen falls back to Least Squares if fit_intercept=False + theil_sen = TheilSenRegressor(fit_intercept=False, random_state=0).fit(X, y) + lstq = LinearRegression(fit_intercept=False).fit(X, y) + assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12) + # Check fit_intercept=True case. This will not be equal to the Least + # Squares solution since the intercept is calculated differently. + theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y) + y_pred = theil_sen.predict(X) + assert_array_almost_equal(y_pred, y, 12) + + +# TODO(1.8): Remove +def test_copy_X_deprecated(): + X, y, _, _ = gen_toy_problem_1d() + theil_sen = TheilSenRegressor(copy_X=True, random_state=0) + with pytest.warns(FutureWarning, match="`copy_X` was deprecated"): + theil_sen.fit(X, y) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0bc33ff234b86671fa011c71ac8af0797a167caa Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/metadata_routing_common.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/metadata_routing_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1df6740b778b486c09c377ebcee129723e976fd Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/metadata_routing_common.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_build.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_build.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7027fd8e23d9e7f93eb8d9f844f2d1218ed9d7c2 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_build.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_common.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c1e6a99986edea30a4a00e664bf32fdfe2bf2093 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_common.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_docstrings.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_docstrings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96fad69cacf2d1cb3fb00e70e7454dd0c5f50970 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_docstrings.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_init.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_init.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..acafc493b752b358b76725fc2a330d7d0b7b4b22 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_init.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_isotonic.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_isotonic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..523c07a74dc9cd62d0fd89b7286b2519b695df6d Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_isotonic.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_metaestimators_metadata_routing.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_metaestimators_metadata_routing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6711a6cbfe96218f10080283a89ecd8519159b7c Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_metaestimators_metadata_routing.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_min_dependencies_readme.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_min_dependencies_readme.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd56ff5d4eb22f940abdf9e9880df81f68bb8077 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_min_dependencies_readme.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_multioutput.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_multioutput.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1314457a6275978f37cb0c506569fa637230efb Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_multioutput.cpython-310.pyc differ