Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +2 -0
- evalkit_internvl/lib/python3.10/site-packages/annotated_types-0.7.0.dist-info/INSTALLER +1 -0
- evalkit_internvl/lib/python3.10/site-packages/annotated_types-0.7.0.dist-info/METADATA +295 -0
- evalkit_internvl/lib/python3.10/site-packages/annotated_types-0.7.0.dist-info/RECORD +11 -0
- evalkit_internvl/lib/python3.10/site-packages/annotated_types-0.7.0.dist-info/REQUESTED +0 -0
- evalkit_internvl/lib/python3.10/site-packages/annotated_types-0.7.0.dist-info/WHEEL +4 -0
- evalkit_internvl/lib/python3.10/site-packages/annotated_types-0.7.0.dist-info/licenses/LICENSE +21 -0
- evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/__pycache__/ebay.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/__pycache__/facebook.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/__pycache__/weibo.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/_jit_internal.py +1510 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/_meta_registrations.py +0 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/_sources.py +137 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/_tensor_str.py +677 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/_torch_docs.py +0 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/functional.py +1978 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/overrides.py +0 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/random.py +175 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/return_types.pyi +172 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/serialization.py +1448 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/types.py +79 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/version.py +8 -0
- evalkit_tf437/lib/python3.10/site-packages/__pycache__/markdown2.cpython-310.pyc +3 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_extraction/_hashing_fast.cpython-310-x86_64-linux-gnu.so +3 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_perceptron.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_ridge.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_theil_sen.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_base.py +850 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_cd_fast.pyx +956 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_coordinate_descent.py +0 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/__init__.py +16 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/_newton_solver.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/glm.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/_newton_solver.py +616 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/glm.py +908 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__init__.py +2 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__pycache__/test_glm.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/test_glm.py +1110 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_huber.py +358 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_omp.py +1121 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_passive_aggressive.py +573 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_perceptron.py +226 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_quantile.py +301 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_ransac.py +731 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_ridge.py +0 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_sag.py +370 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_sag_fast.pyx.tp +647 -0
.gitattributes
CHANGED
|
@@ -1656,3 +1656,5 @@ evalkit_internvl/lib/python3.10/site-packages/torchvision.libs/libcudart.60cfec8
|
|
| 1656 |
evalkit_tf437/lib/python3.10/site-packages/accelerate/__pycache__/accelerator.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1657 |
evalkit_internvl/lib/python3.10/site-packages/torchvision.libs/libnvjpeg.70530407.so.11 filter=lfs diff=lfs merge=lfs -text
|
| 1658 |
evalkit_internvl/lib/python3.10/site-packages/torchvision.libs/libpng16.7f72a3c5.so.16 filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
| 1656 |
evalkit_tf437/lib/python3.10/site-packages/accelerate/__pycache__/accelerator.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1657 |
evalkit_internvl/lib/python3.10/site-packages/torchvision.libs/libnvjpeg.70530407.so.11 filter=lfs diff=lfs merge=lfs -text
|
| 1658 |
evalkit_internvl/lib/python3.10/site-packages/torchvision.libs/libpng16.7f72a3c5.so.16 filter=lfs diff=lfs merge=lfs -text
|
| 1659 |
+
evalkit_tf437/lib/python3.10/site-packages/__pycache__/markdown2.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1660 |
+
evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_extraction/_hashing_fast.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
evalkit_internvl/lib/python3.10/site-packages/annotated_types-0.7.0.dist-info/INSTALLER
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
pip
|
evalkit_internvl/lib/python3.10/site-packages/annotated_types-0.7.0.dist-info/METADATA
ADDED
|
@@ -0,0 +1,295 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.3
|
| 2 |
+
Name: annotated-types
|
| 3 |
+
Version: 0.7.0
|
| 4 |
+
Summary: Reusable constraint types to use with typing.Annotated
|
| 5 |
+
Project-URL: Homepage, https://github.com/annotated-types/annotated-types
|
| 6 |
+
Project-URL: Source, https://github.com/annotated-types/annotated-types
|
| 7 |
+
Project-URL: Changelog, https://github.com/annotated-types/annotated-types/releases
|
| 8 |
+
Author-email: Adrian Garcia Badaracco <1755071+adriangb@users.noreply.github.com>, Samuel Colvin <s@muelcolvin.com>, Zac Hatfield-Dodds <zac@zhd.dev>
|
| 9 |
+
License-File: LICENSE
|
| 10 |
+
Classifier: Development Status :: 4 - Beta
|
| 11 |
+
Classifier: Environment :: Console
|
| 12 |
+
Classifier: Environment :: MacOS X
|
| 13 |
+
Classifier: Intended Audience :: Developers
|
| 14 |
+
Classifier: Intended Audience :: Information Technology
|
| 15 |
+
Classifier: License :: OSI Approved :: MIT License
|
| 16 |
+
Classifier: Operating System :: POSIX :: Linux
|
| 17 |
+
Classifier: Operating System :: Unix
|
| 18 |
+
Classifier: Programming Language :: Python :: 3 :: Only
|
| 19 |
+
Classifier: Programming Language :: Python :: 3.8
|
| 20 |
+
Classifier: Programming Language :: Python :: 3.9
|
| 21 |
+
Classifier: Programming Language :: Python :: 3.10
|
| 22 |
+
Classifier: Programming Language :: Python :: 3.11
|
| 23 |
+
Classifier: Programming Language :: Python :: 3.12
|
| 24 |
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
| 25 |
+
Classifier: Typing :: Typed
|
| 26 |
+
Requires-Python: >=3.8
|
| 27 |
+
Requires-Dist: typing-extensions>=4.0.0; python_version < '3.9'
|
| 28 |
+
Description-Content-Type: text/markdown
|
| 29 |
+
|
| 30 |
+
# annotated-types
|
| 31 |
+
|
| 32 |
+
[](https://github.com/annotated-types/annotated-types/actions?query=event%3Apush+branch%3Amain+workflow%3ACI)
|
| 33 |
+
[](https://pypi.python.org/pypi/annotated-types)
|
| 34 |
+
[](https://github.com/annotated-types/annotated-types)
|
| 35 |
+
[](https://github.com/annotated-types/annotated-types/blob/main/LICENSE)
|
| 36 |
+
|
| 37 |
+
[PEP-593](https://peps.python.org/pep-0593/) added `typing.Annotated` as a way of
|
| 38 |
+
adding context-specific metadata to existing types, and specifies that
|
| 39 |
+
`Annotated[T, x]` _should_ be treated as `T` by any tool or library without special
|
| 40 |
+
logic for `x`.
|
| 41 |
+
|
| 42 |
+
This package provides metadata objects which can be used to represent common
|
| 43 |
+
constraints such as upper and lower bounds on scalar values and collection sizes,
|
| 44 |
+
a `Predicate` marker for runtime checks, and
|
| 45 |
+
descriptions of how we intend these metadata to be interpreted. In some cases,
|
| 46 |
+
we also note alternative representations which do not require this package.
|
| 47 |
+
|
| 48 |
+
## Install
|
| 49 |
+
|
| 50 |
+
```bash
|
| 51 |
+
pip install annotated-types
|
| 52 |
+
```
|
| 53 |
+
|
| 54 |
+
## Examples
|
| 55 |
+
|
| 56 |
+
```python
|
| 57 |
+
from typing import Annotated
|
| 58 |
+
from annotated_types import Gt, Len, Predicate
|
| 59 |
+
|
| 60 |
+
class MyClass:
|
| 61 |
+
age: Annotated[int, Gt(18)] # Valid: 19, 20, ...
|
| 62 |
+
# Invalid: 17, 18, "19", 19.0, ...
|
| 63 |
+
factors: list[Annotated[int, Predicate(is_prime)]] # Valid: 2, 3, 5, 7, 11, ...
|
| 64 |
+
# Invalid: 4, 8, -2, 5.0, "prime", ...
|
| 65 |
+
|
| 66 |
+
my_list: Annotated[list[int], Len(0, 10)] # Valid: [], [10, 20, 30, 40, 50]
|
| 67 |
+
# Invalid: (1, 2), ["abc"], [0] * 20
|
| 68 |
+
```
|
| 69 |
+
|
| 70 |
+
## Documentation
|
| 71 |
+
|
| 72 |
+
_While `annotated-types` avoids runtime checks for performance, users should not
|
| 73 |
+
construct invalid combinations such as `MultipleOf("non-numeric")` or `Annotated[int, Len(3)]`.
|
| 74 |
+
Downstream implementors may choose to raise an error, emit a warning, silently ignore
|
| 75 |
+
a metadata item, etc., if the metadata objects described below are used with an
|
| 76 |
+
incompatible type - or for any other reason!_
|
| 77 |
+
|
| 78 |
+
### Gt, Ge, Lt, Le
|
| 79 |
+
|
| 80 |
+
Express inclusive and/or exclusive bounds on orderable values - which may be numbers,
|
| 81 |
+
dates, times, strings, sets, etc. Note that the boundary value need not be of the
|
| 82 |
+
same type that was annotated, so long as they can be compared: `Annotated[int, Gt(1.5)]`
|
| 83 |
+
is fine, for example, and implies that the value is an integer x such that `x > 1.5`.
|
| 84 |
+
|
| 85 |
+
We suggest that implementors may also interpret `functools.partial(operator.le, 1.5)`
|
| 86 |
+
as being equivalent to `Gt(1.5)`, for users who wish to avoid a runtime dependency on
|
| 87 |
+
the `annotated-types` package.
|
| 88 |
+
|
| 89 |
+
To be explicit, these types have the following meanings:
|
| 90 |
+
|
| 91 |
+
* `Gt(x)` - value must be "Greater Than" `x` - equivalent to exclusive minimum
|
| 92 |
+
* `Ge(x)` - value must be "Greater than or Equal" to `x` - equivalent to inclusive minimum
|
| 93 |
+
* `Lt(x)` - value must be "Less Than" `x` - equivalent to exclusive maximum
|
| 94 |
+
* `Le(x)` - value must be "Less than or Equal" to `x` - equivalent to inclusive maximum
|
| 95 |
+
|
| 96 |
+
### Interval
|
| 97 |
+
|
| 98 |
+
`Interval(gt, ge, lt, le)` allows you to specify an upper and lower bound with a single
|
| 99 |
+
metadata object. `None` attributes should be ignored, and non-`None` attributes
|
| 100 |
+
treated as per the single bounds above.
|
| 101 |
+
|
| 102 |
+
### MultipleOf
|
| 103 |
+
|
| 104 |
+
`MultipleOf(multiple_of=x)` might be interpreted in two ways:
|
| 105 |
+
|
| 106 |
+
1. Python semantics, implying `value % multiple_of == 0`, or
|
| 107 |
+
2. [JSONschema semantics](https://json-schema.org/draft/2020-12/json-schema-validation.html#rfc.section.6.2.1),
|
| 108 |
+
where `int(value / multiple_of) == value / multiple_of`.
|
| 109 |
+
|
| 110 |
+
We encourage users to be aware of these two common interpretations and their
|
| 111 |
+
distinct behaviours, especially since very large or non-integer numbers make
|
| 112 |
+
it easy to cause silent data corruption due to floating-point imprecision.
|
| 113 |
+
|
| 114 |
+
We encourage libraries to carefully document which interpretation they implement.
|
| 115 |
+
|
| 116 |
+
### MinLen, MaxLen, Len
|
| 117 |
+
|
| 118 |
+
`Len()` implies that `min_length <= len(value) <= max_length` - lower and upper bounds are inclusive.
|
| 119 |
+
|
| 120 |
+
As well as `Len()` which can optionally include upper and lower bounds, we also
|
| 121 |
+
provide `MinLen(x)` and `MaxLen(y)` which are equivalent to `Len(min_length=x)`
|
| 122 |
+
and `Len(max_length=y)` respectively.
|
| 123 |
+
|
| 124 |
+
`Len`, `MinLen`, and `MaxLen` may be used with any type which supports `len(value)`.
|
| 125 |
+
|
| 126 |
+
Examples of usage:
|
| 127 |
+
|
| 128 |
+
* `Annotated[list, MaxLen(10)]` (or `Annotated[list, Len(max_length=10))`) - list must have a length of 10 or less
|
| 129 |
+
* `Annotated[str, MaxLen(10)]` - string must have a length of 10 or less
|
| 130 |
+
* `Annotated[list, MinLen(3))` (or `Annotated[list, Len(min_length=3))`) - list must have a length of 3 or more
|
| 131 |
+
* `Annotated[list, Len(4, 6)]` - list must have a length of 4, 5, or 6
|
| 132 |
+
* `Annotated[list, Len(8, 8)]` - list must have a length of exactly 8
|
| 133 |
+
|
| 134 |
+
#### Changed in v0.4.0
|
| 135 |
+
|
| 136 |
+
* `min_inclusive` has been renamed to `min_length`, no change in meaning
|
| 137 |
+
* `max_exclusive` has been renamed to `max_length`, upper bound is now **inclusive** instead of **exclusive**
|
| 138 |
+
* The recommendation that slices are interpreted as `Len` has been removed due to ambiguity and different semantic
|
| 139 |
+
meaning of the upper bound in slices vs. `Len`
|
| 140 |
+
|
| 141 |
+
See [issue #23](https://github.com/annotated-types/annotated-types/issues/23) for discussion.
|
| 142 |
+
|
| 143 |
+
### Timezone
|
| 144 |
+
|
| 145 |
+
`Timezone` can be used with a `datetime` or a `time` to express which timezones
|
| 146 |
+
are allowed. `Annotated[datetime, Timezone(None)]` must be a naive datetime.
|
| 147 |
+
`Timezone[...]` ([literal ellipsis](https://docs.python.org/3/library/constants.html#Ellipsis))
|
| 148 |
+
expresses that any timezone-aware datetime is allowed. You may also pass a specific
|
| 149 |
+
timezone string or [`tzinfo`](https://docs.python.org/3/library/datetime.html#tzinfo-objects)
|
| 150 |
+
object such as `Timezone(timezone.utc)` or `Timezone("Africa/Abidjan")` to express that you only
|
| 151 |
+
allow a specific timezone, though we note that this is often a symptom of fragile design.
|
| 152 |
+
|
| 153 |
+
#### Changed in v0.x.x
|
| 154 |
+
|
| 155 |
+
* `Timezone` accepts [`tzinfo`](https://docs.python.org/3/library/datetime.html#tzinfo-objects) objects instead of
|
| 156 |
+
`timezone`, extending compatibility to [`zoneinfo`](https://docs.python.org/3/library/zoneinfo.html) and third party libraries.
|
| 157 |
+
|
| 158 |
+
### Unit
|
| 159 |
+
|
| 160 |
+
`Unit(unit: str)` expresses that the annotated numeric value is the magnitude of
|
| 161 |
+
a quantity with the specified unit. For example, `Annotated[float, Unit("m/s")]`
|
| 162 |
+
would be a float representing a velocity in meters per second.
|
| 163 |
+
|
| 164 |
+
Please note that `annotated_types` itself makes no attempt to parse or validate
|
| 165 |
+
the unit string in any way. That is left entirely to downstream libraries,
|
| 166 |
+
such as [`pint`](https://pint.readthedocs.io) or
|
| 167 |
+
[`astropy.units`](https://docs.astropy.org/en/stable/units/).
|
| 168 |
+
|
| 169 |
+
An example of how a library might use this metadata:
|
| 170 |
+
|
| 171 |
+
```python
|
| 172 |
+
from annotated_types import Unit
|
| 173 |
+
from typing import Annotated, TypeVar, Callable, Any, get_origin, get_args
|
| 174 |
+
|
| 175 |
+
# given a type annotated with a unit:
|
| 176 |
+
Meters = Annotated[float, Unit("m")]
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
# you can cast the annotation to a specific unit type with any
|
| 180 |
+
# callable that accepts a string and returns the desired type
|
| 181 |
+
T = TypeVar("T")
|
| 182 |
+
def cast_unit(tp: Any, unit_cls: Callable[[str], T]) -> T | None:
|
| 183 |
+
if get_origin(tp) is Annotated:
|
| 184 |
+
for arg in get_args(tp):
|
| 185 |
+
if isinstance(arg, Unit):
|
| 186 |
+
return unit_cls(arg.unit)
|
| 187 |
+
return None
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
# using `pint`
|
| 191 |
+
import pint
|
| 192 |
+
pint_unit = cast_unit(Meters, pint.Unit)
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
# using `astropy.units`
|
| 196 |
+
import astropy.units as u
|
| 197 |
+
astropy_unit = cast_unit(Meters, u.Unit)
|
| 198 |
+
```
|
| 199 |
+
|
| 200 |
+
### Predicate
|
| 201 |
+
|
| 202 |
+
`Predicate(func: Callable)` expresses that `func(value)` is truthy for valid values.
|
| 203 |
+
Users should prefer the statically inspectable metadata above, but if you need
|
| 204 |
+
the full power and flexibility of arbitrary runtime predicates... here it is.
|
| 205 |
+
|
| 206 |
+
For some common constraints, we provide generic types:
|
| 207 |
+
|
| 208 |
+
* `IsLower = Annotated[T, Predicate(str.islower)]`
|
| 209 |
+
* `IsUpper = Annotated[T, Predicate(str.isupper)]`
|
| 210 |
+
* `IsDigit = Annotated[T, Predicate(str.isdigit)]`
|
| 211 |
+
* `IsFinite = Annotated[T, Predicate(math.isfinite)]`
|
| 212 |
+
* `IsNotFinite = Annotated[T, Predicate(Not(math.isfinite))]`
|
| 213 |
+
* `IsNan = Annotated[T, Predicate(math.isnan)]`
|
| 214 |
+
* `IsNotNan = Annotated[T, Predicate(Not(math.isnan))]`
|
| 215 |
+
* `IsInfinite = Annotated[T, Predicate(math.isinf)]`
|
| 216 |
+
* `IsNotInfinite = Annotated[T, Predicate(Not(math.isinf))]`
|
| 217 |
+
|
| 218 |
+
so that you can write e.g. `x: IsFinite[float] = 2.0` instead of the longer
|
| 219 |
+
(but exactly equivalent) `x: Annotated[float, Predicate(math.isfinite)] = 2.0`.
|
| 220 |
+
|
| 221 |
+
Some libraries might have special logic to handle known or understandable predicates,
|
| 222 |
+
for example by checking for `str.isdigit` and using its presence to both call custom
|
| 223 |
+
logic to enforce digit-only strings, and customise some generated external schema.
|
| 224 |
+
Users are therefore encouraged to avoid indirection like `lambda s: s.lower()`, in
|
| 225 |
+
favor of introspectable methods such as `str.lower` or `re.compile("pattern").search`.
|
| 226 |
+
|
| 227 |
+
To enable basic negation of commonly used predicates like `math.isnan` without introducing introspection that makes it impossible for implementers to introspect the predicate we provide a `Not` wrapper that simply negates the predicate in an introspectable manner. Several of the predicates listed above are created in this manner.
|
| 228 |
+
|
| 229 |
+
We do not specify what behaviour should be expected for predicates that raise
|
| 230 |
+
an exception. For example `Annotated[int, Predicate(str.isdigit)]` might silently
|
| 231 |
+
skip invalid constraints, or statically raise an error; or it might try calling it
|
| 232 |
+
and then propagate or discard the resulting
|
| 233 |
+
`TypeError: descriptor 'isdigit' for 'str' objects doesn't apply to a 'int' object`
|
| 234 |
+
exception. We encourage libraries to document the behaviour they choose.
|
| 235 |
+
|
| 236 |
+
### Doc
|
| 237 |
+
|
| 238 |
+
`doc()` can be used to add documentation information in `Annotated`, for function and method parameters, variables, class attributes, return types, and any place where `Annotated` can be used.
|
| 239 |
+
|
| 240 |
+
It expects a value that can be statically analyzed, as the main use case is for static analysis, editors, documentation generators, and similar tools.
|
| 241 |
+
|
| 242 |
+
It returns a `DocInfo` class with a single attribute `documentation` containing the value passed to `doc()`.
|
| 243 |
+
|
| 244 |
+
This is the early adopter's alternative form of the [`typing-doc` proposal](https://github.com/tiangolo/fastapi/blob/typing-doc/typing_doc.md).
|
| 245 |
+
|
| 246 |
+
### Integrating downstream types with `GroupedMetadata`
|
| 247 |
+
|
| 248 |
+
Implementers may choose to provide a convenience wrapper that groups multiple pieces of metadata.
|
| 249 |
+
This can help reduce verbosity and cognitive overhead for users.
|
| 250 |
+
For example, an implementer like Pydantic might provide a `Field` or `Meta` type that accepts keyword arguments and transforms these into low-level metadata:
|
| 251 |
+
|
| 252 |
+
```python
|
| 253 |
+
from dataclasses import dataclass
|
| 254 |
+
from typing import Iterator
|
| 255 |
+
from annotated_types import GroupedMetadata, Ge
|
| 256 |
+
|
| 257 |
+
@dataclass
|
| 258 |
+
class Field(GroupedMetadata):
|
| 259 |
+
ge: int | None = None
|
| 260 |
+
description: str | None = None
|
| 261 |
+
|
| 262 |
+
def __iter__(self) -> Iterator[object]:
|
| 263 |
+
# Iterating over a GroupedMetadata object should yield annotated-types
|
| 264 |
+
# constraint metadata objects which describe it as fully as possible,
|
| 265 |
+
# and may include other unknown objects too.
|
| 266 |
+
if self.ge is not None:
|
| 267 |
+
yield Ge(self.ge)
|
| 268 |
+
if self.description is not None:
|
| 269 |
+
yield Description(self.description)
|
| 270 |
+
```
|
| 271 |
+
|
| 272 |
+
Libraries consuming annotated-types constraints should check for `GroupedMetadata` and unpack it by iterating over the object and treating the results as if they had been "unpacked" in the `Annotated` type. The same logic should be applied to the [PEP 646 `Unpack` type](https://peps.python.org/pep-0646/), so that `Annotated[T, Field(...)]`, `Annotated[T, Unpack[Field(...)]]` and `Annotated[T, *Field(...)]` are all treated consistently.
|
| 273 |
+
|
| 274 |
+
Libraries consuming annotated-types should also ignore any metadata they do not recongize that came from unpacking a `GroupedMetadata`, just like they ignore unrecognized metadata in `Annotated` itself.
|
| 275 |
+
|
| 276 |
+
Our own `annotated_types.Interval` class is a `GroupedMetadata` which unpacks itself into `Gt`, `Lt`, etc., so this is not an abstract concern. Similarly, `annotated_types.Len` is a `GroupedMetadata` which unpacks itself into `MinLen` (optionally) and `MaxLen`.
|
| 277 |
+
|
| 278 |
+
### Consuming metadata
|
| 279 |
+
|
| 280 |
+
We intend to not be prescriptive as to _how_ the metadata and constraints are used, but as an example of how one might parse constraints from types annotations see our [implementation in `test_main.py`](https://github.com/annotated-types/annotated-types/blob/f59cf6d1b5255a0fe359b93896759a180bec30ae/tests/test_main.py#L94-L103).
|
| 281 |
+
|
| 282 |
+
It is up to the implementer to determine how this metadata is used.
|
| 283 |
+
You could use the metadata for runtime type checking, for generating schemas or to generate example data, amongst other use cases.
|
| 284 |
+
|
| 285 |
+
## Design & History
|
| 286 |
+
|
| 287 |
+
This package was designed at the PyCon 2022 sprints by the maintainers of Pydantic
|
| 288 |
+
and Hypothesis, with the goal of making it as easy as possible for end-users to
|
| 289 |
+
provide more informative annotations for use by runtime libraries.
|
| 290 |
+
|
| 291 |
+
It is deliberately minimal, and following PEP-593 allows considerable downstream
|
| 292 |
+
discretion in what (if anything!) they choose to support. Nonetheless, we expect
|
| 293 |
+
that staying simple and covering _only_ the most common use-cases will give users
|
| 294 |
+
and maintainers the best experience we can. If you'd like more constraints for your
|
| 295 |
+
types - follow our lead, by defining them and documenting them downstream!
|
evalkit_internvl/lib/python3.10/site-packages/annotated_types-0.7.0.dist-info/RECORD
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
annotated_types-0.7.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
| 2 |
+
annotated_types-0.7.0.dist-info/METADATA,sha256=7ltqxksJJ0wCYFGBNIQCWTlWQGeAH0hRFdnK3CB895E,15046
|
| 3 |
+
annotated_types-0.7.0.dist-info/RECORD,,
|
| 4 |
+
annotated_types-0.7.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 5 |
+
annotated_types-0.7.0.dist-info/WHEEL,sha256=zEMcRr9Kr03x1ozGwg5v9NQBKn3kndp6LSoSlVg-jhU,87
|
| 6 |
+
annotated_types-0.7.0.dist-info/licenses/LICENSE,sha256=_hBJiEsaDZNCkB6I4H8ykl0ksxIdmXK2poBfuYJLCV0,1083
|
| 7 |
+
annotated_types/__init__.py,sha256=RynLsRKUEGI0KimXydlD1fZEfEzWwDo0Uon3zOKhG1Q,13819
|
| 8 |
+
annotated_types/__pycache__/__init__.cpython-310.pyc,,
|
| 9 |
+
annotated_types/__pycache__/test_cases.cpython-310.pyc,,
|
| 10 |
+
annotated_types/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 11 |
+
annotated_types/test_cases.py,sha256=zHFX6EpcMbGJ8FzBYDbO56bPwx_DYIVSKbZM-4B3_lg,6421
|
evalkit_internvl/lib/python3.10/site-packages/annotated_types-0.7.0.dist-info/REQUESTED
ADDED
|
File without changes
|
evalkit_internvl/lib/python3.10/site-packages/annotated_types-0.7.0.dist-info/WHEEL
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Wheel-Version: 1.0
|
| 2 |
+
Generator: hatchling 1.24.2
|
| 3 |
+
Root-Is-Purelib: true
|
| 4 |
+
Tag: py3-none-any
|
evalkit_internvl/lib/python3.10/site-packages/annotated_types-0.7.0.dist-info/licenses/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
The MIT License (MIT)
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2022 the contributors
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (634 Bytes). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/__pycache__/ebay.cpython-310.pyc
ADDED
|
Binary file (780 Bytes). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/__pycache__/facebook.cpython-310.pyc
ADDED
|
Binary file (945 Bytes). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/__pycache__/weibo.cpython-310.pyc
ADDED
|
Binary file (731 Bytes). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/torch/_jit_internal.py
ADDED
|
@@ -0,0 +1,1510 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
The weak_script annotation needs to be here instead of inside torch/jit/ so it
|
| 3 |
+
can be used in other places in torch/ (namely torch.nn) without running into
|
| 4 |
+
circular dependency problems
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import ast
|
| 8 |
+
import builtins
|
| 9 |
+
import collections
|
| 10 |
+
import contextlib
|
| 11 |
+
import enum
|
| 12 |
+
import inspect
|
| 13 |
+
import io
|
| 14 |
+
import pickle
|
| 15 |
+
import sys
|
| 16 |
+
import threading
|
| 17 |
+
import types
|
| 18 |
+
import typing
|
| 19 |
+
import warnings
|
| 20 |
+
import weakref
|
| 21 |
+
from textwrap import dedent
|
| 22 |
+
from typing import ( # noqa: F401
|
| 23 |
+
Any,
|
| 24 |
+
Callable,
|
| 25 |
+
Dict,
|
| 26 |
+
Final,
|
| 27 |
+
ForwardRef,
|
| 28 |
+
Generic,
|
| 29 |
+
get_args, # new in 3.8
|
| 30 |
+
get_origin, # new in 3.8
|
| 31 |
+
List,
|
| 32 |
+
Optional,
|
| 33 |
+
Tuple,
|
| 34 |
+
Type,
|
| 35 |
+
TypeVar,
|
| 36 |
+
Union,
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
import torch
|
| 40 |
+
|
| 41 |
+
# This is needed. `torch._jit_internal` is imported before `torch.distributed.__init__`.
|
| 42 |
+
# Explicitly ask to import `torch.distributed.__init__` first.
|
| 43 |
+
# Otherwise, "AttributeError: module 'torch' has no attribute 'distributed'" is raised.
|
| 44 |
+
import torch.distributed.rpc
|
| 45 |
+
import torch.package._mangling as package_mangling
|
| 46 |
+
from torch._awaits import _Await
|
| 47 |
+
from torch._C import _Await as CAwait, Future as CFuture
|
| 48 |
+
from torch._sources import fake_range, get_source_lines_and_file, parse_def
|
| 49 |
+
from torch.futures import Future
|
| 50 |
+
|
| 51 |
+
IS_PY39_PLUS: Final[bool] = sys.version_info >= (3, 9)
|
| 52 |
+
IS_PY310_PLUS: Final[bool] = sys.version_info >= (3, 10)
|
| 53 |
+
|
| 54 |
+
BuiltinUnionType: Union[Type, Tuple[Type, ...]]
|
| 55 |
+
if sys.version_info >= (3, 10):
|
| 56 |
+
# NOTE: IS_PY310_PLUS doesn't work with mypy.
|
| 57 |
+
# cf. https://mypy.readthedocs.io/en/stable/common_issues.html#python-version-and-system-platform-checks
|
| 58 |
+
BuiltinUnionType = types.UnionType
|
| 59 |
+
else:
|
| 60 |
+
BuiltinUnionType = () # trick: this makes isinstance short circuit.
|
| 61 |
+
|
| 62 |
+
LockType: Type
|
| 63 |
+
try:
|
| 64 |
+
import _thread
|
| 65 |
+
|
| 66 |
+
LockType = _thread.LockType
|
| 67 |
+
except ImportError:
|
| 68 |
+
import _dummy_thread
|
| 69 |
+
|
| 70 |
+
LockType = _dummy_thread.LockType
|
| 71 |
+
|
| 72 |
+
# Wrapper functions that can call either of 2 functions depending on a boolean
|
| 73 |
+
# argument
|
| 74 |
+
boolean_dispatched: "weakref.WeakKeyDictionary[Callable, Dict[str, Callable]]" = (
|
| 75 |
+
weakref.WeakKeyDictionary()
|
| 76 |
+
) # noqa: T484
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
FAKE_FILENAME_PREFIX = "__torch_jit_dataclass"
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
class SourceLoader:
|
| 83 |
+
def __init__(self):
|
| 84 |
+
self.content = {}
|
| 85 |
+
|
| 86 |
+
def cache(self, fn, source):
|
| 87 |
+
self.content[fn] = source
|
| 88 |
+
|
| 89 |
+
def get_source(self, fn):
|
| 90 |
+
return self.content.get(fn)
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
loader = SourceLoader()
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def createResolutionCallbackFromEnv(lookup_base):
|
| 97 |
+
"""
|
| 98 |
+
Creates a resolution callback that will look up qualified names in an
|
| 99 |
+
environment, starting with `lookup_base` for the base of any qualified
|
| 100 |
+
names, then proceeding down the lookup chain with the resolved object.
|
| 101 |
+
|
| 102 |
+
You should not use this directly, it should only be used from the other
|
| 103 |
+
createResolutionCallbackFrom* functions.
|
| 104 |
+
"""
|
| 105 |
+
|
| 106 |
+
def lookupInModule(qualified_name, module):
|
| 107 |
+
if "." in qualified_name:
|
| 108 |
+
parts = qualified_name.split(".")
|
| 109 |
+
base = parts[0]
|
| 110 |
+
remaining_pieces = ".".join(parts[1:])
|
| 111 |
+
module_value = getattr(module, base)
|
| 112 |
+
return lookupInModule(remaining_pieces, module_value)
|
| 113 |
+
else:
|
| 114 |
+
return getattr(module, qualified_name)
|
| 115 |
+
|
| 116 |
+
def parseNestedExpr(expr, module) -> Tuple[Any, int]:
|
| 117 |
+
i = 0
|
| 118 |
+
while i < len(expr) and expr[i] not in (",", "[", "]"):
|
| 119 |
+
i += 1
|
| 120 |
+
|
| 121 |
+
# Special case logic for the empty Tuple as a subscript (used
|
| 122 |
+
# in the type annotation `Tuple[()]`)
|
| 123 |
+
if expr[:i] == "()":
|
| 124 |
+
return (), i
|
| 125 |
+
|
| 126 |
+
base = lookupInModule(expr[:i].strip(), module)
|
| 127 |
+
assert base is not None, f"Unresolvable type {expr[:i]}"
|
| 128 |
+
if i == len(expr) or expr[i] != "[":
|
| 129 |
+
return base, i
|
| 130 |
+
|
| 131 |
+
assert expr[i] == "["
|
| 132 |
+
parts = []
|
| 133 |
+
while expr[i] != "]":
|
| 134 |
+
part_len = 0
|
| 135 |
+
i += 1
|
| 136 |
+
part, part_len = parseNestedExpr(expr[i:], module)
|
| 137 |
+
parts.append(part)
|
| 138 |
+
i += part_len
|
| 139 |
+
if len(parts) > 1:
|
| 140 |
+
return base[tuple(parts)], i + 1
|
| 141 |
+
else:
|
| 142 |
+
return base[parts[0]], i + 1
|
| 143 |
+
|
| 144 |
+
def parseExpr(expr, module):
|
| 145 |
+
try:
|
| 146 |
+
value, len_parsed = parseNestedExpr(expr, module)
|
| 147 |
+
assert len_parsed == len(
|
| 148 |
+
expr
|
| 149 |
+
), "whole expression was not parsed, falling back to c++ parser"
|
| 150 |
+
return value
|
| 151 |
+
except Exception:
|
| 152 |
+
"""
|
| 153 |
+
The python resolver fails in several cases in known unit tests, and is intended
|
| 154 |
+
to fall back gracefully to the c++ resolver in general. For example, python 2 style
|
| 155 |
+
annotations which are frequent in our unit tests often fail with types e.g. int not
|
| 156 |
+
resolvable from the calling frame.
|
| 157 |
+
"""
|
| 158 |
+
return None
|
| 159 |
+
|
| 160 |
+
return lambda expr: parseExpr(expr, lookup_base)
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
def createResolutionCallbackFromFrame(frames_up: int = 0):
|
| 164 |
+
"""
|
| 165 |
+
Creates a function which, given a string variable name,
|
| 166 |
+
returns the value of the variable in the scope of the caller of
|
| 167 |
+
the function which called createResolutionCallbackFromFrame (by default).
|
| 168 |
+
|
| 169 |
+
This is used to enable access in-scope Python variables inside
|
| 170 |
+
TorchScript fragments.
|
| 171 |
+
|
| 172 |
+
frames_up is number of additional frames to go up on the stack.
|
| 173 |
+
The default value is 0, which correspond to the frame of the caller
|
| 174 |
+
of createResolutionCallbackFromFrame. Also for example, if frames_up is set
|
| 175 |
+
to 1, then the frame of the caller's caller of createResolutionCallbackFromFrame
|
| 176 |
+
will be taken.
|
| 177 |
+
|
| 178 |
+
For example, the following program prints 2::
|
| 179 |
+
|
| 180 |
+
def bar():
|
| 181 |
+
cb = createResolutionCallbackFromFrame(1)
|
| 182 |
+
print(cb("foo"))
|
| 183 |
+
|
| 184 |
+
def baz():
|
| 185 |
+
foo = 2
|
| 186 |
+
bar()
|
| 187 |
+
|
| 188 |
+
baz()
|
| 189 |
+
"""
|
| 190 |
+
frame = inspect.currentframe()
|
| 191 |
+
i = 0
|
| 192 |
+
while i < frames_up + 1:
|
| 193 |
+
assert frame is not None
|
| 194 |
+
frame = frame.f_back
|
| 195 |
+
i += 1
|
| 196 |
+
|
| 197 |
+
assert frame is not None
|
| 198 |
+
f_locals = frame.f_locals
|
| 199 |
+
f_globals = frame.f_globals
|
| 200 |
+
|
| 201 |
+
class env:
|
| 202 |
+
def __getattr__(self, key):
|
| 203 |
+
if key in f_locals:
|
| 204 |
+
return f_locals[key]
|
| 205 |
+
elif key in f_globals:
|
| 206 |
+
return f_globals[key]
|
| 207 |
+
elif key in dir(builtins):
|
| 208 |
+
return getattr(builtins, key)
|
| 209 |
+
|
| 210 |
+
return createResolutionCallbackFromEnv(env())
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
def get_closure(fn):
|
| 214 |
+
"""
|
| 215 |
+
Get a dictionary of closed over variables from a function
|
| 216 |
+
"""
|
| 217 |
+
captures = {}
|
| 218 |
+
captures.update(fn.__globals__)
|
| 219 |
+
|
| 220 |
+
for index, captured_name in enumerate(fn.__code__.co_freevars):
|
| 221 |
+
captures[captured_name] = fn.__closure__[index].cell_contents
|
| 222 |
+
|
| 223 |
+
return captures
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
# [local resolution in python]
|
| 227 |
+
# Depending on where a variable is defined, and where it is used, we may
|
| 228 |
+
# or may not be able to recover its value when recursively compiling a
|
| 229 |
+
# script function. Remember in the general case, a module or function is
|
| 230 |
+
# first defined and then later scripted. This means we do not have a
|
| 231 |
+
# chance to capture the active frames when the function is defined. Hence any
|
| 232 |
+
# name resolution has to happen later on the created closure. The way
|
| 233 |
+
# python captures type annotations restricts what we can recover. The
|
| 234 |
+
# follow example illustrates the different cases:
|
| 235 |
+
#
|
| 236 |
+
# class MyGlobalClass:
|
| 237 |
+
# ...
|
| 238 |
+
# def my_local_scope():
|
| 239 |
+
# @torch.jit.script
|
| 240 |
+
# class MyClass:
|
| 241 |
+
# ...
|
| 242 |
+
# @torch.jit.script
|
| 243 |
+
# class MyClassUsedAsVar:
|
| 244 |
+
# ...
|
| 245 |
+
# def eg(x: MyClass, y: MyGlobalClass):
|
| 246 |
+
# a_local_capture : Foo
|
| 247 |
+
# return MyClassUsedAsVar(x)
|
| 248 |
+
#
|
| 249 |
+
# MyGlobalClass is defined in the __globals__ dictionary of function
|
| 250 |
+
# 'eg', so it is always recoverable. my_local_scope introduces a new local
|
| 251 |
+
# variable scope in the function. Classes defined here are only visible as
|
| 252 |
+
# local variables. For the case of MyClassUsedAsVar, it is captured
|
| 253 |
+
# because it is used as a variable inside the body of the function, and we
|
| 254 |
+
# can resolve it using the captures returned from `get_closure`. However,
|
| 255 |
+
# the type annotations are not captured by the closure. In Python
|
| 256 |
+
# 3.0--3.9, the _value_ of MyClass and MyGlobalClass will be available as
|
| 257 |
+
# annotations on `eg``, but starting in Python 4.0, they will represented as
|
| 258 |
+
# strings and no longer present. Furthermore, since the body of `eg` does
|
| 259 |
+
# not reference those names, they do not appear in the list of closed over
|
| 260 |
+
# variables. In Python 2.x, type annotations are in comments, leading to a
|
| 261 |
+
# similar situation where their definitions are not available. We anticipate
|
| 262 |
+
# that most users will not run into this issue because their modules and
|
| 263 |
+
# functions will be defined at a global scope like MyGlobalClass. In cases
|
| 264 |
+
# where they are not, it is possible to work around issues by declaring the
|
| 265 |
+
# values global in the function.
|
| 266 |
+
# In Python 3.9 declaring class as global will make it invisible to
|
| 267 |
+
# `inspect.getsource`, see https://bugs.python.org/issue42666 .
|
| 268 |
+
# This could be worked around by manualy adding it to `global()` dictionary.
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
def createResolutionCallbackFromClosure(fn):
|
| 272 |
+
"""
|
| 273 |
+
Create a resolutionCallback by introspecting the function instead of
|
| 274 |
+
looking up the stack for the enclosing scope
|
| 275 |
+
"""
|
| 276 |
+
closure = get_closure(fn)
|
| 277 |
+
|
| 278 |
+
class closure_lookup:
|
| 279 |
+
# This is a class since `closure` is a dict and it's easier in
|
| 280 |
+
# `env_helper` if everything just works with `getattr` calls
|
| 281 |
+
def __getattr__(self, key):
|
| 282 |
+
if key in closure:
|
| 283 |
+
return closure[key]
|
| 284 |
+
elif hasattr(typing, key):
|
| 285 |
+
return getattr(typing, key)
|
| 286 |
+
elif hasattr(builtins, key):
|
| 287 |
+
return getattr(builtins, key)
|
| 288 |
+
return None
|
| 289 |
+
|
| 290 |
+
return createResolutionCallbackFromEnv(closure_lookup())
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
def can_compile_class(cls) -> bool:
|
| 294 |
+
# If any of the functions on a type don't have a code object, this type can't
|
| 295 |
+
# be compiled and is probably a builtin / bound from C
|
| 296 |
+
if is_ignored_fn(cls):
|
| 297 |
+
return False
|
| 298 |
+
|
| 299 |
+
# Ignore the following list of built-in classes.
|
| 300 |
+
ignored_builtin_classes = (torch.nn.Module, tuple, list, Exception)
|
| 301 |
+
if issubclass(cls, ignored_builtin_classes):
|
| 302 |
+
return False
|
| 303 |
+
|
| 304 |
+
names = cls.__dict__
|
| 305 |
+
fns = [
|
| 306 |
+
getattr(cls, name)
|
| 307 |
+
for name in names
|
| 308 |
+
if inspect.isroutine(getattr(cls, name, None))
|
| 309 |
+
]
|
| 310 |
+
has_code = [hasattr(fn, "__code__") for fn in fns]
|
| 311 |
+
return all(has_code)
|
| 312 |
+
|
| 313 |
+
|
| 314 |
+
def get_callable_argument_names(fn) -> List[str]:
|
| 315 |
+
"""
|
| 316 |
+
Gets names of all POSITIONAL_OR_KEYWORD arguments for callable `fn`.
|
| 317 |
+
Returns an empty list when other types of arguments are present.
|
| 318 |
+
|
| 319 |
+
This is used by `torch.jit.trace` to assign meaningful argument names to
|
| 320 |
+
traced functions and modules.
|
| 321 |
+
|
| 322 |
+
Args:
|
| 323 |
+
fn: A callable.
|
| 324 |
+
Returns:
|
| 325 |
+
Argument names: List[str]
|
| 326 |
+
"""
|
| 327 |
+
# inspect.signature may fail, give up in that case.
|
| 328 |
+
try:
|
| 329 |
+
callable_signature = inspect.signature(fn)
|
| 330 |
+
except Exception:
|
| 331 |
+
return []
|
| 332 |
+
|
| 333 |
+
argument_names = []
|
| 334 |
+
for name, param in callable_signature.parameters.items():
|
| 335 |
+
# All four other types of arguments do not map to individual values
|
| 336 |
+
# with a keyword as name.
|
| 337 |
+
if not param.kind == param.POSITIONAL_OR_KEYWORD:
|
| 338 |
+
continue
|
| 339 |
+
|
| 340 |
+
argument_names.append(name)
|
| 341 |
+
|
| 342 |
+
return argument_names
|
| 343 |
+
|
| 344 |
+
|
| 345 |
+
def get_annotation_str(annotation):
|
| 346 |
+
"""
|
| 347 |
+
Convert an AST node containing a type annotation to the string present in the source
|
| 348 |
+
that represents the same annotation.
|
| 349 |
+
"""
|
| 350 |
+
if isinstance(annotation, ast.Name):
|
| 351 |
+
return annotation.id
|
| 352 |
+
elif isinstance(annotation, ast.Attribute):
|
| 353 |
+
return ".".join([get_annotation_str(annotation.value), annotation.attr])
|
| 354 |
+
elif isinstance(annotation, ast.Subscript):
|
| 355 |
+
# In Python3.9+ subscript indicies are not wrapped in ast.Index
|
| 356 |
+
subscript_slice = annotation.slice if IS_PY39_PLUS else annotation.slice.value # type: ignore[attr-defined]
|
| 357 |
+
return f"{get_annotation_str(annotation.value)}[{get_annotation_str(subscript_slice)}]"
|
| 358 |
+
elif isinstance(annotation, ast.Tuple):
|
| 359 |
+
return ",".join([get_annotation_str(elt) for elt in annotation.elts])
|
| 360 |
+
elif isinstance(annotation, (ast.Constant, ast.NameConstant)):
|
| 361 |
+
return f"{annotation.value}"
|
| 362 |
+
|
| 363 |
+
# If an AST node is not handled here, it's probably handled in ScriptTypeParser.
|
| 364 |
+
return None
|
| 365 |
+
|
| 366 |
+
|
| 367 |
+
def get_type_hint_captures(fn):
|
| 368 |
+
"""
|
| 369 |
+
Get a dictionary containing type resolution mappings necessary to resolve types
|
| 370 |
+
for the literal annotations on 'fn'. These are not considered to be closed-over by fn
|
| 371 |
+
and must be obtained separately (e.g. using this function).
|
| 372 |
+
|
| 373 |
+
Args:
|
| 374 |
+
fn: A callable.
|
| 375 |
+
Returns:
|
| 376 |
+
A Dict[str, Any] containing a mapping from the literal annotations used on
|
| 377 |
+
fn to the Python objects they refer to.
|
| 378 |
+
"""
|
| 379 |
+
# First, try to get the source of the function. We'll need to parse it to find the actual string names
|
| 380 |
+
# that were used to annotate the types, since inspect.signature() will only return the class object that
|
| 381 |
+
# the annotation refers to, not the string name. If we can't get the source, simply return an empty dict.
|
| 382 |
+
# This may happen in cases where the function is synthesized dynamically at runtime.
|
| 383 |
+
src = loader.get_source(fn)
|
| 384 |
+
if src is None:
|
| 385 |
+
src = inspect.getsource(fn)
|
| 386 |
+
|
| 387 |
+
# Gather a dictionary of parameter name -> type, skipping any parameters whose annotated
|
| 388 |
+
# types are strings. These are only understood by TorchScript in the context of a type annotation
|
| 389 |
+
# that refers to a class in its own definition, but trying to include a mapping for this in the result
|
| 390 |
+
# function would cause infinite recursion because the class is currently being compiled.
|
| 391 |
+
# In addition, there is logic in ScriptTypeParser to handle this.
|
| 392 |
+
signature = inspect.signature(fn)
|
| 393 |
+
name_to_type = {
|
| 394 |
+
name: parameter.annotation
|
| 395 |
+
for name, parameter in signature.parameters.items()
|
| 396 |
+
if parameter.annotation is not inspect.Parameter.empty
|
| 397 |
+
and not isinstance(parameter.annotation, str)
|
| 398 |
+
}
|
| 399 |
+
|
| 400 |
+
# Then, get the literal type annotations from the function declaration
|
| 401 |
+
# by source inspection. This accounts for the case in which aliases are used
|
| 402 |
+
# to annotate the arguments (e.g device_t = torch.device, and then d: device_t).
|
| 403 |
+
# frontend.py cannot be used here because it includes _jit_internal, so use ast instead.
|
| 404 |
+
a = ast.parse(dedent(src))
|
| 405 |
+
if len(a.body) != 1 or not isinstance(a.body[0], ast.FunctionDef):
|
| 406 |
+
raise RuntimeError(f"Expected {fn} to be a function")
|
| 407 |
+
f = a.body[0]
|
| 408 |
+
|
| 409 |
+
# Prepare a dictionary of source annotation -> type, which will be the final result of this function,
|
| 410 |
+
# by using the parsed AST (f) to reconstruct source annotations as strings for each parameter and mapping
|
| 411 |
+
# them to the type object corresponding to the annotation via name_to_type using the parameter name.
|
| 412 |
+
annotation_to_type = {}
|
| 413 |
+
|
| 414 |
+
for arg in f.args.args:
|
| 415 |
+
# Get the source type annotation string for this argument if possible.
|
| 416 |
+
arg_annotation_str = (
|
| 417 |
+
get_annotation_str(arg.annotation) if arg.annotation else None
|
| 418 |
+
)
|
| 419 |
+
|
| 420 |
+
# If the argument has no annotation or get_annotation_str cannot convert it to a string,
|
| 421 |
+
# arg_annotation_str will be None. Skip this arg; ScriptTypeParser will probably handle
|
| 422 |
+
# this in the latter case.
|
| 423 |
+
if arg_annotation_str is None:
|
| 424 |
+
continue
|
| 425 |
+
|
| 426 |
+
# Insert {arg_annotation_str: type} into annotation_to_type if possible. One reason arg_name may not
|
| 427 |
+
# be present in name_to_type is that the annotation itself is a string and not a type object
|
| 428 |
+
# (common for self-refential annotations in classes). Once again, let ScriptTypeParser handle this.
|
| 429 |
+
arg_name = arg.arg
|
| 430 |
+
if arg_name in name_to_type:
|
| 431 |
+
annotation_to_type[arg_annotation_str] = name_to_type[arg_name]
|
| 432 |
+
|
| 433 |
+
# If there is a valid return annotation, include it in annotation_to_type. As with argument annotations,
|
| 434 |
+
# the literal annotation has to be convertible to a string by get_annotation_str, and the actual type
|
| 435 |
+
# of the annotation cannot be a string.
|
| 436 |
+
literal_return_annotation = get_annotation_str(f.returns)
|
| 437 |
+
valid_literal_annotation = literal_return_annotation is not None
|
| 438 |
+
return_annotation = signature.return_annotation
|
| 439 |
+
valid_return_annotation_type = (
|
| 440 |
+
return_annotation is not inspect.Parameter.empty
|
| 441 |
+
and not isinstance(return_annotation, str)
|
| 442 |
+
)
|
| 443 |
+
if valid_literal_annotation and valid_return_annotation_type:
|
| 444 |
+
annotation_to_type[literal_return_annotation] = return_annotation
|
| 445 |
+
|
| 446 |
+
return annotation_to_type
|
| 447 |
+
|
| 448 |
+
|
| 449 |
+
def createResolutionCallbackForClassMethods(cls):
|
| 450 |
+
"""
|
| 451 |
+
This looks at all the methods defined in a class and pulls their closed-over
|
| 452 |
+
variables into a dictionary and uses that to resolve variables.
|
| 453 |
+
"""
|
| 454 |
+
# cls is a type here, so `ismethod` is false since the methods on the type
|
| 455 |
+
# aren't bound to anything, so Python treats them as regular functions
|
| 456 |
+
fns = [
|
| 457 |
+
getattr(cls, name)
|
| 458 |
+
for name in cls.__dict__
|
| 459 |
+
if inspect.isroutine(getattr(cls, name))
|
| 460 |
+
]
|
| 461 |
+
# Skip built-ins, as they do not have global scope nor type hints
|
| 462 |
+
# Needed to support `enum.Enum` derived classes in Python-3.11
|
| 463 |
+
# That adds `_new_member_` property which is an alias to `__new__`
|
| 464 |
+
fns = [fn for fn in fns if not inspect.isbuiltin(fn) and hasattr(fn, "__globals__")]
|
| 465 |
+
captures = {}
|
| 466 |
+
|
| 467 |
+
for fn in fns:
|
| 468 |
+
captures.update(get_closure(fn))
|
| 469 |
+
captures.update(get_type_hint_captures(fn))
|
| 470 |
+
|
| 471 |
+
def lookup_in_class(key):
|
| 472 |
+
if key in captures:
|
| 473 |
+
return captures[key]
|
| 474 |
+
else:
|
| 475 |
+
return getattr(builtins, key, None)
|
| 476 |
+
|
| 477 |
+
return lookup_in_class
|
| 478 |
+
|
| 479 |
+
|
| 480 |
+
def boolean_dispatch(
|
| 481 |
+
arg_name, arg_index, default, if_true, if_false, module_name, func_name
|
| 482 |
+
):
|
| 483 |
+
"""
|
| 484 |
+
Dispatches to either of 2 script functions based on a boolean argument.
|
| 485 |
+
In TorchScript, the boolean argument must be constant so that the correct
|
| 486 |
+
function to use can be determined at compile time.
|
| 487 |
+
"""
|
| 488 |
+
|
| 489 |
+
def fn(*args, **kwargs):
|
| 490 |
+
dispatch_flag = default
|
| 491 |
+
if arg_name in kwargs:
|
| 492 |
+
dispatch_flag = kwargs[arg_name]
|
| 493 |
+
elif arg_index < len(args):
|
| 494 |
+
dispatch_flag = args[arg_index]
|
| 495 |
+
|
| 496 |
+
if dispatch_flag:
|
| 497 |
+
return if_true(*args, **kwargs)
|
| 498 |
+
else:
|
| 499 |
+
return if_false(*args, **kwargs)
|
| 500 |
+
|
| 501 |
+
if if_true.__doc__ is None and if_false.__doc__ is not None:
|
| 502 |
+
doc = if_false.__doc__
|
| 503 |
+
if_true.__doc__ = doc
|
| 504 |
+
elif if_false.__doc__ is None and if_true.__doc__ is not None:
|
| 505 |
+
doc = if_true.__doc__
|
| 506 |
+
if_false.__doc__ = doc
|
| 507 |
+
elif if_false.__doc__ is None and if_true.__doc__ is None:
|
| 508 |
+
# neither function has a docstring
|
| 509 |
+
doc = None
|
| 510 |
+
else:
|
| 511 |
+
raise RuntimeError("only one function can have a docstring")
|
| 512 |
+
fn.__doc__ = doc
|
| 513 |
+
|
| 514 |
+
if module_name is not None:
|
| 515 |
+
fn.__module__ = module_name
|
| 516 |
+
if func_name is not None:
|
| 517 |
+
fn.__name__ = func_name
|
| 518 |
+
|
| 519 |
+
boolean_dispatched[fn] = {
|
| 520 |
+
"if_true": if_true,
|
| 521 |
+
"if_false": if_false,
|
| 522 |
+
"index": arg_index,
|
| 523 |
+
"default": default,
|
| 524 |
+
"arg_name": arg_name,
|
| 525 |
+
}
|
| 526 |
+
return fn
|
| 527 |
+
|
| 528 |
+
|
| 529 |
+
class FunctionModifiers:
|
| 530 |
+
"""
|
| 531 |
+
Used to denote the behavior of a function in TorchScript. See export() and
|
| 532 |
+
ignore() for details.
|
| 533 |
+
"""
|
| 534 |
+
|
| 535 |
+
UNUSED = "unused (ignored and replaced with raising of an exception)"
|
| 536 |
+
IGNORE = "ignore (leave as a call to Python, cannot be torch.jit.save'd)"
|
| 537 |
+
EXPORT = "export (compile this function even if nothing calls it)"
|
| 538 |
+
DEFAULT = "default (compile if called from a exported function / forward)"
|
| 539 |
+
COPY_TO_SCRIPT_WRAPPER = (
|
| 540 |
+
"if this method is not scripted, copy the python method onto the scripted model"
|
| 541 |
+
)
|
| 542 |
+
_DROP = "_drop (function is fully ignored, declaration can be unscriptable)"
|
| 543 |
+
|
| 544 |
+
|
| 545 |
+
def export(fn):
|
| 546 |
+
"""
|
| 547 |
+
This decorator indicates that a method on an ``nn.Module`` is used as an entry point into a
|
| 548 |
+
:class:`ScriptModule` and should be compiled.
|
| 549 |
+
|
| 550 |
+
``forward`` implicitly is assumed to be an entry point, so it does not need this decorator.
|
| 551 |
+
Functions and methods called from ``forward`` are compiled as they are seen
|
| 552 |
+
by the compiler, so they do not need this decorator either.
|
| 553 |
+
|
| 554 |
+
Example (using ``@torch.jit.export`` on a method):
|
| 555 |
+
|
| 556 |
+
.. testcode::
|
| 557 |
+
|
| 558 |
+
import torch
|
| 559 |
+
import torch.nn as nn
|
| 560 |
+
|
| 561 |
+
class MyModule(nn.Module):
|
| 562 |
+
def implicitly_compiled_method(self, x):
|
| 563 |
+
return x + 99
|
| 564 |
+
|
| 565 |
+
# `forward` is implicitly decorated with `@torch.jit.export`,
|
| 566 |
+
# so adding it here would have no effect
|
| 567 |
+
def forward(self, x):
|
| 568 |
+
return x + 10
|
| 569 |
+
|
| 570 |
+
@torch.jit.export
|
| 571 |
+
def another_forward(self, x):
|
| 572 |
+
# When the compiler sees this call, it will compile
|
| 573 |
+
# `implicitly_compiled_method`
|
| 574 |
+
return self.implicitly_compiled_method(x)
|
| 575 |
+
|
| 576 |
+
def unused_method(self, x):
|
| 577 |
+
return x - 20
|
| 578 |
+
|
| 579 |
+
# `m` will contain compiled methods:
|
| 580 |
+
# `forward`
|
| 581 |
+
# `another_forward`
|
| 582 |
+
# `implicitly_compiled_method`
|
| 583 |
+
# `unused_method` will not be compiled since it was not called from
|
| 584 |
+
# any compiled methods and wasn't decorated with `@torch.jit.export`
|
| 585 |
+
m = torch.jit.script(MyModule())
|
| 586 |
+
"""
|
| 587 |
+
fn._torchscript_modifier = FunctionModifiers.EXPORT
|
| 588 |
+
return fn
|
| 589 |
+
|
| 590 |
+
|
| 591 |
+
def unused(fn):
|
| 592 |
+
"""
|
| 593 |
+
This decorator indicates to the compiler that a function or method should
|
| 594 |
+
be ignored and replaced with the raising of an exception. This allows you
|
| 595 |
+
to leave code in your model that is not yet TorchScript compatible and still
|
| 596 |
+
export your model.
|
| 597 |
+
|
| 598 |
+
Example (using ``@torch.jit.unused`` on a method)::
|
| 599 |
+
|
| 600 |
+
import torch
|
| 601 |
+
import torch.nn as nn
|
| 602 |
+
|
| 603 |
+
class MyModule(nn.Module):
|
| 604 |
+
def __init__(self, use_memory_efficient):
|
| 605 |
+
super().__init__()
|
| 606 |
+
self.use_memory_efficient = use_memory_efficient
|
| 607 |
+
|
| 608 |
+
@torch.jit.unused
|
| 609 |
+
def memory_efficient(self, x):
|
| 610 |
+
import pdb
|
| 611 |
+
pdb.set_trace()
|
| 612 |
+
return x + 10
|
| 613 |
+
|
| 614 |
+
def forward(self, x):
|
| 615 |
+
# Use not-yet-scriptable memory efficient mode
|
| 616 |
+
if self.use_memory_efficient:
|
| 617 |
+
return self.memory_efficient(x)
|
| 618 |
+
else:
|
| 619 |
+
return x + 10
|
| 620 |
+
|
| 621 |
+
m = torch.jit.script(MyModule(use_memory_efficient=False))
|
| 622 |
+
m.save("m.pt")
|
| 623 |
+
|
| 624 |
+
m = torch.jit.script(MyModule(use_memory_efficient=True))
|
| 625 |
+
# exception raised
|
| 626 |
+
m(torch.rand(100))
|
| 627 |
+
"""
|
| 628 |
+
if isinstance(fn, property):
|
| 629 |
+
prop = fn
|
| 630 |
+
setattr( # noqa: B010
|
| 631 |
+
prop.fget, "_torchscript_modifier", FunctionModifiers.UNUSED
|
| 632 |
+
)
|
| 633 |
+
|
| 634 |
+
if prop.fset:
|
| 635 |
+
setattr( # noqa: B010
|
| 636 |
+
prop.fset, "_torchscript_modifier", FunctionModifiers.UNUSED
|
| 637 |
+
)
|
| 638 |
+
|
| 639 |
+
return prop
|
| 640 |
+
|
| 641 |
+
fn._torchscript_modifier = FunctionModifiers.UNUSED
|
| 642 |
+
return fn
|
| 643 |
+
|
| 644 |
+
|
| 645 |
+
# No op context manager from python side
|
| 646 |
+
class _IgnoreContextManager(contextlib.AbstractContextManager):
|
| 647 |
+
def __init__(self, **kwargs):
|
| 648 |
+
pass
|
| 649 |
+
|
| 650 |
+
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
|
| 651 |
+
pass
|
| 652 |
+
|
| 653 |
+
|
| 654 |
+
def ignore(drop=False, **kwargs):
|
| 655 |
+
"""
|
| 656 |
+
This decorator indicates to the compiler that a function or method should
|
| 657 |
+
be ignored and left as a Python function. This allows you to leave code in
|
| 658 |
+
your model that is not yet TorchScript compatible. If called from TorchScript,
|
| 659 |
+
ignored functions will dispatch the call to the Python interpreter. Models with ignored
|
| 660 |
+
functions cannot be exported; use :func:`@torch.jit.unused <torch.jit.unused>` instead.
|
| 661 |
+
|
| 662 |
+
Example (using ``@torch.jit.ignore`` on a method)::
|
| 663 |
+
|
| 664 |
+
import torch
|
| 665 |
+
import torch.nn as nn
|
| 666 |
+
|
| 667 |
+
class MyModule(nn.Module):
|
| 668 |
+
@torch.jit.ignore
|
| 669 |
+
def debugger(self, x):
|
| 670 |
+
import pdb
|
| 671 |
+
pdb.set_trace()
|
| 672 |
+
|
| 673 |
+
def forward(self, x):
|
| 674 |
+
x += 10
|
| 675 |
+
# The compiler would normally try to compile `debugger`,
|
| 676 |
+
# but since it is `@ignore`d, it will be left as a call
|
| 677 |
+
# to Python
|
| 678 |
+
self.debugger(x)
|
| 679 |
+
return x
|
| 680 |
+
|
| 681 |
+
m = torch.jit.script(MyModule())
|
| 682 |
+
|
| 683 |
+
# Error! The call `debugger` cannot be saved since it calls into Python
|
| 684 |
+
m.save("m.pt")
|
| 685 |
+
|
| 686 |
+
Example (using ``@torch.jit.ignore(drop=True)`` on a method):
|
| 687 |
+
|
| 688 |
+
.. testcode::
|
| 689 |
+
|
| 690 |
+
import torch
|
| 691 |
+
import torch.nn as nn
|
| 692 |
+
|
| 693 |
+
class MyModule(nn.Module):
|
| 694 |
+
@torch.jit.ignore(drop=True)
|
| 695 |
+
def training_method(self, x):
|
| 696 |
+
import pdb
|
| 697 |
+
pdb.set_trace()
|
| 698 |
+
|
| 699 |
+
def forward(self, x):
|
| 700 |
+
if self.training:
|
| 701 |
+
self.training_method(x)
|
| 702 |
+
return x
|
| 703 |
+
|
| 704 |
+
m = torch.jit.script(MyModule())
|
| 705 |
+
|
| 706 |
+
# This is OK since `training_method` is not saved, the call is replaced
|
| 707 |
+
# with a `raise`.
|
| 708 |
+
m.save("m.pt")
|
| 709 |
+
|
| 710 |
+
.. testcleanup::
|
| 711 |
+
|
| 712 |
+
import os
|
| 713 |
+
os.remove('m.pt')
|
| 714 |
+
"""
|
| 715 |
+
|
| 716 |
+
if callable(drop):
|
| 717 |
+
# used without any args, so drop is actually a function
|
| 718 |
+
# @torch.jit.ignore
|
| 719 |
+
# def fn(...):
|
| 720 |
+
fn = drop
|
| 721 |
+
fn._torchscript_modifier = FunctionModifiers.IGNORE
|
| 722 |
+
return fn
|
| 723 |
+
|
| 724 |
+
if not isinstance(drop, bool):
|
| 725 |
+
raise RuntimeError(
|
| 726 |
+
"Argument to @torch.jit.ignore must be a bool or "
|
| 727 |
+
f"a function but got {drop}"
|
| 728 |
+
)
|
| 729 |
+
|
| 730 |
+
# for backwards compat
|
| 731 |
+
drop_on_export = kwargs.pop("drop_on_export", None)
|
| 732 |
+
if drop_on_export:
|
| 733 |
+
warnings.warn(
|
| 734 |
+
"ignore(drop_on_export=True) has been deprecated. TorchScript will now drop the function "
|
| 735 |
+
"call on compilation. Use torch.jit.unused now. {}",
|
| 736 |
+
category=FutureWarning,
|
| 737 |
+
)
|
| 738 |
+
|
| 739 |
+
drop = drop_on_export
|
| 740 |
+
elif drop:
|
| 741 |
+
warnings.warn(
|
| 742 |
+
"ignore(True) has been deprecated. TorchScript will now drop the function "
|
| 743 |
+
"call on compilation. Use torch.jit.unused now. {}",
|
| 744 |
+
category=FutureWarning,
|
| 745 |
+
)
|
| 746 |
+
|
| 747 |
+
def decorator(fn):
|
| 748 |
+
if drop:
|
| 749 |
+
fn._torchscript_modifier = FunctionModifiers.UNUSED
|
| 750 |
+
else:
|
| 751 |
+
fn._torchscript_modifier = FunctionModifiers.IGNORE
|
| 752 |
+
return fn
|
| 753 |
+
|
| 754 |
+
return decorator
|
| 755 |
+
|
| 756 |
+
|
| 757 |
+
def _drop(fn):
|
| 758 |
+
fn._torchscript_modifier = FunctionModifiers._DROP
|
| 759 |
+
return fn
|
| 760 |
+
|
| 761 |
+
|
| 762 |
+
def _copy_to_script_wrapper(fn):
|
| 763 |
+
fn._torchscript_modifier = FunctionModifiers.COPY_TO_SCRIPT_WRAPPER
|
| 764 |
+
return fn
|
| 765 |
+
|
| 766 |
+
|
| 767 |
+
def module_has_exports(mod):
|
| 768 |
+
for name in dir(mod):
|
| 769 |
+
if hasattr(mod, name):
|
| 770 |
+
item = getattr(mod, name)
|
| 771 |
+
if callable(item):
|
| 772 |
+
if get_torchscript_modifier(item) is FunctionModifiers.EXPORT:
|
| 773 |
+
return True
|
| 774 |
+
return False
|
| 775 |
+
|
| 776 |
+
|
| 777 |
+
# WARNING: should_drop is currently being used by our JIT code coverage plug-in to mark JIT'd code as covered. If you
|
| 778 |
+
# rename this function, please update references in tools/coverage_plugins_package/src/coverage_plugins/jit_plugin.py to
|
| 779 |
+
# allow JIT'd code to still be covered.
|
| 780 |
+
def should_drop(fn) -> bool:
|
| 781 |
+
attr = get_torchscript_modifier(fn)
|
| 782 |
+
if attr is None:
|
| 783 |
+
return False
|
| 784 |
+
return attr is FunctionModifiers.UNUSED or attr is FunctionModifiers._DROP
|
| 785 |
+
|
| 786 |
+
|
| 787 |
+
def is_ignored_fn(fn) -> bool:
|
| 788 |
+
mod = get_torchscript_modifier(fn)
|
| 789 |
+
return (
|
| 790 |
+
mod is FunctionModifiers.UNUSED
|
| 791 |
+
or mod is FunctionModifiers.IGNORE
|
| 792 |
+
or mod is FunctionModifiers._DROP
|
| 793 |
+
)
|
| 794 |
+
|
| 795 |
+
|
| 796 |
+
def _is_drop_fn(fn) -> bool:
|
| 797 |
+
mod = get_torchscript_modifier(fn)
|
| 798 |
+
return mod is FunctionModifiers._DROP
|
| 799 |
+
|
| 800 |
+
|
| 801 |
+
def is_static_fn(cls, fn) -> bool:
|
| 802 |
+
return isinstance(inspect.getattr_static(cls, fn, default=None), staticmethod)
|
| 803 |
+
|
| 804 |
+
|
| 805 |
+
def get_static_fn(cls, fn):
|
| 806 |
+
return inspect.getattr_static(cls, fn).__func__
|
| 807 |
+
|
| 808 |
+
|
| 809 |
+
def get_torchscript_modifier(fn):
|
| 810 |
+
if not callable(fn):
|
| 811 |
+
return None
|
| 812 |
+
if hasattr(fn, "__func__"):
|
| 813 |
+
fn = fn.__func__
|
| 814 |
+
return getattr(fn, "_torchscript_modifier", FunctionModifiers.DEFAULT)
|
| 815 |
+
|
| 816 |
+
|
| 817 |
+
def copy_torchscript_modifier(orig, new) -> None:
|
| 818 |
+
attr = get_torchscript_modifier(orig)
|
| 819 |
+
if attr is None:
|
| 820 |
+
return
|
| 821 |
+
new._torchscript_modifier = attr
|
| 822 |
+
|
| 823 |
+
|
| 824 |
+
# overloading registration
|
| 825 |
+
# overloads get registered in this file, and compiled in torch/jit/__init__.py
|
| 826 |
+
# so that they can be imported in nn/functional.py without an import cycle
|
| 827 |
+
|
| 828 |
+
# qualified_name => list[overload_functions]
|
| 829 |
+
_overloaded_fns: Dict[str, List[Callable]] = {} # noqa: T484
|
| 830 |
+
|
| 831 |
+
|
| 832 |
+
_OVERLOAD_EXAMPLE = """
|
| 833 |
+
Example usage of overload function:
|
| 834 |
+
@torch.jit._overload
|
| 835 |
+
def my_function(x: type0) -> type0: # decl 1
|
| 836 |
+
pass
|
| 837 |
+
|
| 838 |
+
@torch.jit._overload
|
| 839 |
+
def my_function(x: type1) -> type1: # decl 2
|
| 840 |
+
pass
|
| 841 |
+
|
| 842 |
+
def my_function(x): # implementation
|
| 843 |
+
if isinstance(x, type0):
|
| 844 |
+
return x
|
| 845 |
+
elif isinstance(x, type1):
|
| 846 |
+
return x
|
| 847 |
+
"""
|
| 848 |
+
|
| 849 |
+
|
| 850 |
+
def get_overload_no_implementation_error_message(kind, obj):
|
| 851 |
+
sourcelines, file_lineno, filename = get_source_lines_and_file(obj)
|
| 852 |
+
return (
|
| 853 |
+
f'Implementation for the {kind} "{_qualified_name(obj)}" is missing. Please make '
|
| 854 |
+
f"sure a definition is provided and defined after all overload declarations.\n"
|
| 855 |
+
f'File "{filename}", line {file_lineno}:\n'
|
| 856 |
+
+ "".join(sourcelines)
|
| 857 |
+
+ "\n"
|
| 858 |
+
+ _OVERLOAD_EXAMPLE
|
| 859 |
+
)
|
| 860 |
+
|
| 861 |
+
|
| 862 |
+
def _check_overload_body(func):
|
| 863 |
+
try:
|
| 864 |
+
parsed_def = parse_def(func)
|
| 865 |
+
except OSError as e:
|
| 866 |
+
# Parsing the function definition can raise an OSError if source is unavailable.
|
| 867 |
+
# Since this is just an initial check, just raise a warning if this is the case.
|
| 868 |
+
warnings.warn(
|
| 869 |
+
f"Unable to retrieve source for @torch.jit._overload function: {func}."
|
| 870 |
+
)
|
| 871 |
+
return
|
| 872 |
+
|
| 873 |
+
body = parsed_def.ast.body[0].body
|
| 874 |
+
|
| 875 |
+
def is_pass(x):
|
| 876 |
+
return isinstance(x, ast.Pass)
|
| 877 |
+
|
| 878 |
+
def is_ellipsis(x):
|
| 879 |
+
return isinstance(x, ast.Expr) and isinstance(x.value, ast.Ellipsis)
|
| 880 |
+
|
| 881 |
+
if len(body) != 1 or not (is_pass(body[0]) or is_ellipsis(body[0])):
|
| 882 |
+
msg = (
|
| 883 |
+
"Only `pass` statement or `...` can be the body of overload declaration:\n"
|
| 884 |
+
)
|
| 885 |
+
msg += "\n".join(parsed_def.source.split("\n")[:3])
|
| 886 |
+
msg += " <- Expecting `pass` or `...` here!\n" + _OVERLOAD_EXAMPLE
|
| 887 |
+
raise RuntimeError(msg)
|
| 888 |
+
|
| 889 |
+
|
| 890 |
+
def _overload(func):
|
| 891 |
+
_check_overload_body(func)
|
| 892 |
+
qual_name = _qualified_name(func)
|
| 893 |
+
global _overloaded_fns
|
| 894 |
+
fn_overload_list = _overloaded_fns.get(qual_name)
|
| 895 |
+
if fn_overload_list is None:
|
| 896 |
+
fn_overload_list = []
|
| 897 |
+
_overloaded_fns[qual_name] = fn_overload_list
|
| 898 |
+
fn_overload_list.append(func)
|
| 899 |
+
return func
|
| 900 |
+
|
| 901 |
+
|
| 902 |
+
def _get_fn_overloads(qual_name):
|
| 903 |
+
return _overloaded_fns.get(qual_name)
|
| 904 |
+
|
| 905 |
+
|
| 906 |
+
def _clear_fn_overloads(qual_name) -> None:
|
| 907 |
+
del _overloaded_fns[qual_name]
|
| 908 |
+
|
| 909 |
+
|
| 910 |
+
def get_class_name_lineno(method) -> Tuple[str, int]:
|
| 911 |
+
current_frame = inspect.currentframe()
|
| 912 |
+
|
| 913 |
+
# one for the get_class_name call, one for _overload_method call
|
| 914 |
+
for i in range(2):
|
| 915 |
+
assert (
|
| 916 |
+
current_frame is not None
|
| 917 |
+
) # assert current frame is not an Optional[FrameType]
|
| 918 |
+
current_frame = current_frame.f_back
|
| 919 |
+
|
| 920 |
+
assert current_frame is not None # same here
|
| 921 |
+
class_name = current_frame.f_code.co_name
|
| 922 |
+
line_no = current_frame.f_code.co_firstlineno
|
| 923 |
+
return class_name, line_no
|
| 924 |
+
|
| 925 |
+
|
| 926 |
+
# At the point the decorator is applied to class methods the method
|
| 927 |
+
# has no reference to its owning class. _qualified_name would not include
|
| 928 |
+
# the class it is defined in, so any methods with the same name in the same file
|
| 929 |
+
# would have the same _qualified_name, even if they were defined in different
|
| 930 |
+
# classes. This problem only exists in python 2.
|
| 931 |
+
# We get around this problem by looking at the stack frame and identifying
|
| 932 |
+
# the class name, and throwing an error whenever overloads are used
|
| 933 |
+
# when modules of the same name are in the same file
|
| 934 |
+
|
| 935 |
+
# qualified_name => class name => list[overload_functions]
|
| 936 |
+
_overloaded_methods: Dict[str, Dict[str, List[Callable]]] = {} # noqa: T484
|
| 937 |
+
|
| 938 |
+
|
| 939 |
+
# (qualified_name, class name) => class_fileno
|
| 940 |
+
_overloaded_method_class_fileno = {}
|
| 941 |
+
|
| 942 |
+
|
| 943 |
+
def _overload_method(func):
|
| 944 |
+
_check_overload_body(func)
|
| 945 |
+
qual_name = _qualified_name(func)
|
| 946 |
+
global _overloaded_methods
|
| 947 |
+
class_name_map = _overloaded_methods.get(qual_name, None)
|
| 948 |
+
if class_name_map is None:
|
| 949 |
+
class_name_map = {}
|
| 950 |
+
_overloaded_methods[qual_name] = class_name_map
|
| 951 |
+
|
| 952 |
+
class_name, line_no = get_class_name_lineno(func)
|
| 953 |
+
method_overloads = class_name_map.get(class_name, None)
|
| 954 |
+
if method_overloads is None:
|
| 955 |
+
method_overloads = []
|
| 956 |
+
class_name_map[class_name] = method_overloads
|
| 957 |
+
_overloaded_method_class_fileno[(qual_name, class_name)] = line_no
|
| 958 |
+
else:
|
| 959 |
+
existing_lineno = _overloaded_method_class_fileno[(qual_name, class_name)]
|
| 960 |
+
if existing_lineno != line_no:
|
| 961 |
+
raise RuntimeError(
|
| 962 |
+
"Cannot currently overload the same method name in two different"
|
| 963 |
+
" classes with the same name in the same module"
|
| 964 |
+
)
|
| 965 |
+
|
| 966 |
+
method_overloads.append(func)
|
| 967 |
+
return func
|
| 968 |
+
|
| 969 |
+
|
| 970 |
+
def _get_overloaded_methods(method, mod_class):
|
| 971 |
+
# TODO: __name__ not set for submodules in recursive script
|
| 972 |
+
if not hasattr(method, "__name__"):
|
| 973 |
+
return None
|
| 974 |
+
qual_name = _qualified_name(method)
|
| 975 |
+
class_name_map = _overloaded_methods.get(qual_name, None)
|
| 976 |
+
if class_name_map is None:
|
| 977 |
+
return None
|
| 978 |
+
overloads = class_name_map.get(mod_class.__name__, None)
|
| 979 |
+
if overloads is None:
|
| 980 |
+
return None
|
| 981 |
+
|
| 982 |
+
method_line_no = get_source_lines_and_file(method)[1]
|
| 983 |
+
mod_class_fileno = get_source_lines_and_file(mod_class)[1]
|
| 984 |
+
mod_end_fileno = mod_class_fileno + len(get_source_lines_and_file(mod_class)[0])
|
| 985 |
+
if not (method_line_no >= mod_class_fileno and method_line_no <= mod_end_fileno):
|
| 986 |
+
raise Exception(
|
| 987 |
+
"Overloads are not useable when a module is redeclared within the same file: "
|
| 988 |
+
+ str(method)
|
| 989 |
+
)
|
| 990 |
+
return overloads
|
| 991 |
+
|
| 992 |
+
|
| 993 |
+
def is_tuple(ann) -> bool:
|
| 994 |
+
if ann is Tuple:
|
| 995 |
+
raise_error_container_parameter_missing("Tuple")
|
| 996 |
+
|
| 997 |
+
# For some reason Python 3.7 violates the Type[A, B].__origin__ == Type rule
|
| 998 |
+
if not hasattr(ann, "__module__"):
|
| 999 |
+
return False
|
| 1000 |
+
|
| 1001 |
+
ann_origin = get_origin(ann)
|
| 1002 |
+
if IS_PY39_PLUS and ann.__module__ == "builtins" and ann_origin is tuple:
|
| 1003 |
+
return True
|
| 1004 |
+
return ann.__module__ == "typing" and (ann_origin is Tuple or ann_origin is tuple)
|
| 1005 |
+
|
| 1006 |
+
|
| 1007 |
+
def is_list(ann) -> bool:
|
| 1008 |
+
if ann is List:
|
| 1009 |
+
raise_error_container_parameter_missing("List")
|
| 1010 |
+
|
| 1011 |
+
if not hasattr(ann, "__module__"):
|
| 1012 |
+
return False
|
| 1013 |
+
|
| 1014 |
+
ann_origin = get_origin(ann)
|
| 1015 |
+
if IS_PY39_PLUS and ann.__module__ == "builtins" and ann_origin is list:
|
| 1016 |
+
return True
|
| 1017 |
+
return ann.__module__ == "typing" and (ann_origin is List or ann_origin is list)
|
| 1018 |
+
|
| 1019 |
+
|
| 1020 |
+
def is_dict(ann) -> bool:
|
| 1021 |
+
if ann is Dict:
|
| 1022 |
+
raise_error_container_parameter_missing("Dict")
|
| 1023 |
+
|
| 1024 |
+
if not hasattr(ann, "__module__"):
|
| 1025 |
+
return False
|
| 1026 |
+
|
| 1027 |
+
ann_origin = get_origin(ann)
|
| 1028 |
+
if IS_PY39_PLUS and ann.__module__ == "builtins" and ann_origin is dict:
|
| 1029 |
+
return True
|
| 1030 |
+
return ann.__module__ == "typing" and (ann_origin is Dict or ann_origin is dict)
|
| 1031 |
+
|
| 1032 |
+
|
| 1033 |
+
def is_union(ann):
|
| 1034 |
+
if ann is Union:
|
| 1035 |
+
raise_error_container_parameter_missing("Union")
|
| 1036 |
+
|
| 1037 |
+
return isinstance(ann, BuiltinUnionType) or (
|
| 1038 |
+
hasattr(ann, "__module__")
|
| 1039 |
+
and ann.__module__ == "typing"
|
| 1040 |
+
and (get_origin(ann) is Union)
|
| 1041 |
+
)
|
| 1042 |
+
|
| 1043 |
+
|
| 1044 |
+
def is_optional(ann):
|
| 1045 |
+
if ann is Optional:
|
| 1046 |
+
raise_error_container_parameter_missing("Optional")
|
| 1047 |
+
|
| 1048 |
+
def is_optional_as_optional(ann):
|
| 1049 |
+
return (
|
| 1050 |
+
hasattr(ann, "__module__")
|
| 1051 |
+
and ann.__module__ == "typing"
|
| 1052 |
+
and (get_origin(ann) is Optional)
|
| 1053 |
+
)
|
| 1054 |
+
|
| 1055 |
+
def is_union_as_optional(ann):
|
| 1056 |
+
ann_args = get_args(ann)
|
| 1057 |
+
return len(ann_args) == 2 and (None in ann_args or type(None) in ann_args)
|
| 1058 |
+
|
| 1059 |
+
return is_optional_as_optional(ann) or (is_union(ann) and is_union_as_optional(ann))
|
| 1060 |
+
|
| 1061 |
+
|
| 1062 |
+
def is_future(ann) -> bool:
|
| 1063 |
+
if ann is Future:
|
| 1064 |
+
raise RuntimeError(
|
| 1065 |
+
"Attempted to use Future without a "
|
| 1066 |
+
"contained type. Please add a contained type, e.g. "
|
| 1067 |
+
"Future[int]"
|
| 1068 |
+
)
|
| 1069 |
+
return get_origin(ann) is Future
|
| 1070 |
+
|
| 1071 |
+
|
| 1072 |
+
def is_await(ann) -> bool:
|
| 1073 |
+
if ann is _Await:
|
| 1074 |
+
return True
|
| 1075 |
+
return get_origin(ann) is _Await
|
| 1076 |
+
|
| 1077 |
+
|
| 1078 |
+
if torch.distributed.rpc.is_available():
|
| 1079 |
+
from torch._C._distributed_rpc import PyRRef
|
| 1080 |
+
from torch.distributed.rpc import RRef
|
| 1081 |
+
|
| 1082 |
+
def is_rref(ann) -> bool:
|
| 1083 |
+
if ann is RRef:
|
| 1084 |
+
raise RuntimeError(
|
| 1085 |
+
"Attempted to use RRef without a "
|
| 1086 |
+
"contained type. Please add a contained type, e.g. "
|
| 1087 |
+
"RRef[int]"
|
| 1088 |
+
)
|
| 1089 |
+
return get_origin(ann) is RRef
|
| 1090 |
+
|
| 1091 |
+
def is_rref_instance(obj) -> bool:
|
| 1092 |
+
return isinstance(obj, PyRRef)
|
| 1093 |
+
|
| 1094 |
+
else:
|
| 1095 |
+
|
| 1096 |
+
def is_rref_instance(obj) -> bool:
|
| 1097 |
+
# If the RPC module doesn't exist then RRefs don't exist either.
|
| 1098 |
+
return False
|
| 1099 |
+
|
| 1100 |
+
|
| 1101 |
+
def is_final(ann) -> bool:
|
| 1102 |
+
return ann.__module__ in {"typing", "typing_extensions"} and (
|
| 1103 |
+
get_origin(ann) is Final or isinstance(ann, type(Final))
|
| 1104 |
+
)
|
| 1105 |
+
|
| 1106 |
+
|
| 1107 |
+
# allows BroadcastingList instance to be subscriptable
|
| 1108 |
+
class BroadcastingListCls:
|
| 1109 |
+
def __getitem__(self, types):
|
| 1110 |
+
return
|
| 1111 |
+
|
| 1112 |
+
|
| 1113 |
+
# mypy doesn't support parameters on types, so we have to explicitly type each
|
| 1114 |
+
# list size
|
| 1115 |
+
BroadcastingList1 = BroadcastingListCls()
|
| 1116 |
+
for i in range(2, 7):
|
| 1117 |
+
globals()[f"BroadcastingList{i}"] = BroadcastingList1
|
| 1118 |
+
|
| 1119 |
+
|
| 1120 |
+
def is_scripting() -> bool:
|
| 1121 |
+
r"""
|
| 1122 |
+
Function that returns True when in compilation and False otherwise. This
|
| 1123 |
+
is useful especially with the @unused decorator to leave code in your
|
| 1124 |
+
model that is not yet TorchScript compatible.
|
| 1125 |
+
.. testcode::
|
| 1126 |
+
|
| 1127 |
+
import torch
|
| 1128 |
+
|
| 1129 |
+
@torch.jit.unused
|
| 1130 |
+
def unsupported_linear_op(x):
|
| 1131 |
+
return x
|
| 1132 |
+
|
| 1133 |
+
def linear(x):
|
| 1134 |
+
if torch.jit.is_scripting():
|
| 1135 |
+
return torch.linear(x)
|
| 1136 |
+
else:
|
| 1137 |
+
return unsupported_linear_op(x)
|
| 1138 |
+
"""
|
| 1139 |
+
return False
|
| 1140 |
+
|
| 1141 |
+
|
| 1142 |
+
# Retrieves a fully-qualified name (module hierarchy + classname) for a given obj.
|
| 1143 |
+
def _qualified_name(obj, mangle_name=True) -> str:
|
| 1144 |
+
# This special case allows us to override the qualified name on a type.
|
| 1145 |
+
# It's currently used in conjunction with tracing, where we create a
|
| 1146 |
+
# fake module to filter only supported attributes. However, since this
|
| 1147 |
+
# new type is defined as a local class, we need a mechanism to override
|
| 1148 |
+
# its qualname so it appears correctly in the TorchScript system. This,
|
| 1149 |
+
# we set '_jit_override_qualname' with the original traced module's
|
| 1150 |
+
# qualified name, which is picked up here
|
| 1151 |
+
if hasattr(obj, "_jit_override_qualname"):
|
| 1152 |
+
return obj._jit_override_qualname
|
| 1153 |
+
# short-circuit in cases where the object already has a known qualified name
|
| 1154 |
+
if isinstance(obj, torch._C.ScriptFunction):
|
| 1155 |
+
return obj.qualified_name
|
| 1156 |
+
|
| 1157 |
+
if getattr(obj, "__name__", None):
|
| 1158 |
+
name = obj.__name__
|
| 1159 |
+
# Enum classes do not have `__name__` attr, instead they have `name`.
|
| 1160 |
+
elif isinstance(obj, enum.Enum):
|
| 1161 |
+
name = obj.name
|
| 1162 |
+
else:
|
| 1163 |
+
raise RuntimeError("Could not get name of python class object")
|
| 1164 |
+
|
| 1165 |
+
if name == "<lambda>":
|
| 1166 |
+
name = "_lambda" # make name a valid identifier
|
| 1167 |
+
|
| 1168 |
+
module_name = obj.__module__
|
| 1169 |
+
|
| 1170 |
+
# If the module is actually a torchbind module, then we should short circuit
|
| 1171 |
+
if module_name == "torch._classes":
|
| 1172 |
+
return obj.qualified_name
|
| 1173 |
+
|
| 1174 |
+
# The Python docs are very clear that `__module__` can be None, but I can't
|
| 1175 |
+
# figure out when it actually would be.
|
| 1176 |
+
if module_name is None:
|
| 1177 |
+
raise RuntimeError(
|
| 1178 |
+
f"Could not get qualified name for class '{name}': "
|
| 1179 |
+
"__module__ can't be None."
|
| 1180 |
+
)
|
| 1181 |
+
|
| 1182 |
+
# if getattr(sys.modules[module_name], name) is not obj:
|
| 1183 |
+
# raise RuntimeError(f"Could not get qualified name for class '{name}': "
|
| 1184 |
+
# f"the attr {name} on module {module_name} is not the class")
|
| 1185 |
+
|
| 1186 |
+
# torch.package and TorchScript have separate mangling schemes to avoid
|
| 1187 |
+
# name collisions from multiple packages. To avoid them interfering with
|
| 1188 |
+
# each other, normalize the package manging here.
|
| 1189 |
+
if package_mangling.is_mangled(module_name):
|
| 1190 |
+
module_name = module_name.replace("<", "_")
|
| 1191 |
+
module_name = module_name.replace(">", "_")
|
| 1192 |
+
|
| 1193 |
+
# The PythonExceptionValue C++ class in torch/csrc/jit/python/python_sugared_value.h
|
| 1194 |
+
# does not need mangle the python class name.
|
| 1195 |
+
if mangle_name:
|
| 1196 |
+
# __main__ is a builtin module, so rewrite it to "__torch__".
|
| 1197 |
+
if module_name == "__main__":
|
| 1198 |
+
module_name = "__torch__"
|
| 1199 |
+
else:
|
| 1200 |
+
# Everything else gets a "__torch__" prefix to avoid name collisions
|
| 1201 |
+
# with the names of user values.
|
| 1202 |
+
module_name = "__torch__." + module_name
|
| 1203 |
+
|
| 1204 |
+
if "." in name:
|
| 1205 |
+
raise RuntimeError(
|
| 1206 |
+
f"Could not get qualified name for class '{name}': "
|
| 1207 |
+
f"'{name}' is not a valid identifier"
|
| 1208 |
+
)
|
| 1209 |
+
|
| 1210 |
+
return module_name + "." + name
|
| 1211 |
+
|
| 1212 |
+
|
| 1213 |
+
def _try_get_dispatched_fn(fn):
|
| 1214 |
+
if not callable(fn):
|
| 1215 |
+
return None
|
| 1216 |
+
return boolean_dispatched.get(fn)
|
| 1217 |
+
|
| 1218 |
+
|
| 1219 |
+
def _get_named_tuple_properties(
|
| 1220 |
+
obj, loc: Optional[torch._C._jit_tree_views.SourceRange] = None, rcb=None
|
| 1221 |
+
):
|
| 1222 |
+
if loc is None:
|
| 1223 |
+
loc = fake_range()
|
| 1224 |
+
|
| 1225 |
+
assert issubclass(obj, tuple) and hasattr(obj, "_fields")
|
| 1226 |
+
if hasattr(obj, "_field_defaults"):
|
| 1227 |
+
defaults = [
|
| 1228 |
+
obj._field_defaults[field]
|
| 1229 |
+
for field in obj._fields
|
| 1230 |
+
if field in obj._field_defaults
|
| 1231 |
+
]
|
| 1232 |
+
else:
|
| 1233 |
+
defaults = []
|
| 1234 |
+
# In 3.10 recommended way to get annotations is to call `inspect.get_annotations` function
|
| 1235 |
+
# Also, annotations from base class are not inherited so they need to be queried explicitly
|
| 1236 |
+
if sys.version_info[:2] < (3, 10):
|
| 1237 |
+
obj_annotations = getattr(obj, "__annotations__", {})
|
| 1238 |
+
else:
|
| 1239 |
+
obj_annotations = inspect.get_annotations(obj)
|
| 1240 |
+
if len(obj_annotations) == 0 and hasattr(obj, "__base__"):
|
| 1241 |
+
obj_annotations = inspect.get_annotations(obj.__base__)
|
| 1242 |
+
|
| 1243 |
+
annotations = []
|
| 1244 |
+
for field in obj._fields:
|
| 1245 |
+
if field in obj_annotations:
|
| 1246 |
+
field_type = obj_annotations[field]
|
| 1247 |
+
# [Note: ForwardRef annotations in NamedTuple attributes]
|
| 1248 |
+
# NamedTuple types are slightly different from normal types.
|
| 1249 |
+
#
|
| 1250 |
+
# Normally, annotations are evaluted like this (during jit.script):
|
| 1251 |
+
# 1. Load strings of python code into c++ and parse.
|
| 1252 |
+
# 2. Get annotations as strings
|
| 1253 |
+
# 3. Use the PythonResolver's resolution callback (rcb) to convert
|
| 1254 |
+
# the string into a python object
|
| 1255 |
+
# 4. We call into annotations.py:ann_to_type to convert python obj
|
| 1256 |
+
# from step 3 into a type that torchscript understands.
|
| 1257 |
+
#
|
| 1258 |
+
# NamedTuples are more complicated, because it has sub-types.
|
| 1259 |
+
# Normally, once we have the NamedTuple type object from #3,
|
| 1260 |
+
# we can just look at the annotation literal values and use
|
| 1261 |
+
# ann_to_type directly on them.
|
| 1262 |
+
#
|
| 1263 |
+
# But sometimes, users will annotate with string literals, e.g.
|
| 1264 |
+
# x: 'int'
|
| 1265 |
+
# This also happens with PEP563 (from __forward__ import annotations)
|
| 1266 |
+
#
|
| 1267 |
+
# These annotations appear in the annotation dict as ForwardRef('int').
|
| 1268 |
+
#
|
| 1269 |
+
# Then, we need to convert the string into a python object. This
|
| 1270 |
+
# requires having local context for custom objects or imported types.
|
| 1271 |
+
# rcb() is what gives us this. So, we plumb rcb through the stack so
|
| 1272 |
+
# it can be used in this context for the if block below.
|
| 1273 |
+
#
|
| 1274 |
+
# FAQ:
|
| 1275 |
+
# - Why do we need this special handling for NamedTuple but string
|
| 1276 |
+
# annotations work fine for normal types? Normally, we parse the
|
| 1277 |
+
# string directly and then call rcb() directly from C++.
|
| 1278 |
+
# - Why not use ForwardRef._evaluate? For that, we need globals()
|
| 1279 |
+
# and locals() for the local context where the NamedTuple was defined.
|
| 1280 |
+
# rcb is what lets us look up into these. So, basically rcb does the
|
| 1281 |
+
# hard work for us.
|
| 1282 |
+
if isinstance(field_type, ForwardRef) and rcb is not None:
|
| 1283 |
+
rcb_type = rcb(field_type.__forward_arg__)
|
| 1284 |
+
# rcb returns None if it can't find anything.
|
| 1285 |
+
if rcb_type is None:
|
| 1286 |
+
raise ValueError(
|
| 1287 |
+
f"Unknown type annotation: '{field_type}' in NamedTuple {obj.__name__}."
|
| 1288 |
+
f" Likely due to partial support for ForwardRef parameters in NamedTuples, see #95858."
|
| 1289 |
+
f" Issue occurred at {loc.highlight()}"
|
| 1290 |
+
)
|
| 1291 |
+
field_type = rcb_type
|
| 1292 |
+
the_type = torch.jit.annotations.ann_to_type(field_type, loc, rcb)
|
| 1293 |
+
annotations.append(the_type)
|
| 1294 |
+
else:
|
| 1295 |
+
annotations.append(torch._C.TensorType.getInferred())
|
| 1296 |
+
return type(obj).__name__, obj._fields, annotations, defaults
|
| 1297 |
+
|
| 1298 |
+
|
| 1299 |
+
def _create_named_tuple(
|
| 1300 |
+
t, unqual_name: str, field_names: List[str], defaults: Tuple[Any, ...]
|
| 1301 |
+
):
|
| 1302 |
+
TupleType = collections.namedtuple(unqual_name, field_names, defaults=defaults) # type: ignore[call-arg, no-redef, misc]
|
| 1303 |
+
return TupleType(*t)
|
| 1304 |
+
|
| 1305 |
+
|
| 1306 |
+
@contextlib.contextmanager
|
| 1307 |
+
def _disable_emit_hooks():
|
| 1308 |
+
hooks = torch._C._jit_get_emit_hooks()
|
| 1309 |
+
torch._C._jit_set_emit_hooks(None, None)
|
| 1310 |
+
try:
|
| 1311 |
+
yield
|
| 1312 |
+
finally:
|
| 1313 |
+
torch._C._jit_set_emit_hooks(hooks[0], hooks[1])
|
| 1314 |
+
|
| 1315 |
+
|
| 1316 |
+
def _disable_emit_hooks_decorator(_DecoratorContextManager) -> None: # noqa: F811
|
| 1317 |
+
def __enter__(self) -> None:
|
| 1318 |
+
self.hooks = torch._C._jit_get_emit_hooks()
|
| 1319 |
+
torch._C._jit_set_emit_hooks(None, None)
|
| 1320 |
+
|
| 1321 |
+
def __exit__(self, *args) -> None:
|
| 1322 |
+
torch._C._jit_set_emit_hooks(self.hooks[0], self.hooks[1])
|
| 1323 |
+
|
| 1324 |
+
|
| 1325 |
+
def _is_exception(obj) -> bool:
|
| 1326 |
+
if not inspect.isclass(obj):
|
| 1327 |
+
return False
|
| 1328 |
+
return issubclass(obj, Exception)
|
| 1329 |
+
|
| 1330 |
+
|
| 1331 |
+
def raise_error_container_parameter_missing(target_type) -> None:
|
| 1332 |
+
if target_type == "Dict":
|
| 1333 |
+
raise RuntimeError(
|
| 1334 |
+
"Attempted to use Dict without "
|
| 1335 |
+
"contained types. Please add contained type, e.g. "
|
| 1336 |
+
"Dict[int, int]"
|
| 1337 |
+
)
|
| 1338 |
+
raise RuntimeError(
|
| 1339 |
+
f"Attempted to use {target_type} without a "
|
| 1340 |
+
"contained type. Please add a contained type, e.g. "
|
| 1341 |
+
f"{target_type}[int]"
|
| 1342 |
+
)
|
| 1343 |
+
|
| 1344 |
+
|
| 1345 |
+
def check_args_exist(target_type) -> None:
|
| 1346 |
+
if target_type is List or target_type is list:
|
| 1347 |
+
raise_error_container_parameter_missing("List")
|
| 1348 |
+
elif target_type is Tuple or target_type is tuple:
|
| 1349 |
+
raise_error_container_parameter_missing("Tuple")
|
| 1350 |
+
elif target_type is Dict or target_type is dict:
|
| 1351 |
+
raise_error_container_parameter_missing("Dict")
|
| 1352 |
+
elif target_type is None or target_type is Optional:
|
| 1353 |
+
raise_error_container_parameter_missing("Optional")
|
| 1354 |
+
|
| 1355 |
+
|
| 1356 |
+
def check_empty_containers(obj) -> None:
|
| 1357 |
+
if obj == [] or obj == {} or obj == ():
|
| 1358 |
+
warnings.warn(
|
| 1359 |
+
"The inner type of a container is lost when "
|
| 1360 |
+
"calling torch.jit.isinstance in eager mode. For "
|
| 1361 |
+
"example, List[int] would become list and "
|
| 1362 |
+
"therefore falsely return True for List[float] or"
|
| 1363 |
+
" List[str]."
|
| 1364 |
+
)
|
| 1365 |
+
|
| 1366 |
+
|
| 1367 |
+
# supports List/Dict/Tuple and Optional types
|
| 1368 |
+
# TODO support future
|
| 1369 |
+
def container_checker(obj, target_type) -> bool:
|
| 1370 |
+
origin_type = get_origin(target_type)
|
| 1371 |
+
check_args_exist(target_type)
|
| 1372 |
+
if origin_type is None:
|
| 1373 |
+
return False
|
| 1374 |
+
elif origin_type is list or origin_type is List:
|
| 1375 |
+
check_empty_containers(obj)
|
| 1376 |
+
if not isinstance(obj, list):
|
| 1377 |
+
return False
|
| 1378 |
+
arg_type = get_args(target_type)[0]
|
| 1379 |
+
arg_origin = get_origin(arg_type)
|
| 1380 |
+
for el in obj:
|
| 1381 |
+
# check if nested container, ex: List[List[str]]
|
| 1382 |
+
if arg_origin: # processes nested container, ex: List[List[str]]
|
| 1383 |
+
if not container_checker(el, arg_type):
|
| 1384 |
+
return False
|
| 1385 |
+
elif not isinstance(el, arg_type):
|
| 1386 |
+
return False
|
| 1387 |
+
return True
|
| 1388 |
+
elif origin_type is Dict or origin_type is dict:
|
| 1389 |
+
check_empty_containers(obj)
|
| 1390 |
+
if not isinstance(obj, dict):
|
| 1391 |
+
return False
|
| 1392 |
+
key_type = get_args(target_type)[0]
|
| 1393 |
+
val_type = get_args(target_type)[1]
|
| 1394 |
+
for key, val in obj.items():
|
| 1395 |
+
# check if keys are of right type
|
| 1396 |
+
if not isinstance(key, key_type):
|
| 1397 |
+
return False
|
| 1398 |
+
val_origin = get_origin(val_type)
|
| 1399 |
+
if val_origin:
|
| 1400 |
+
if not container_checker(val, val_type):
|
| 1401 |
+
return False
|
| 1402 |
+
elif not isinstance(val, val_type):
|
| 1403 |
+
return False
|
| 1404 |
+
return True
|
| 1405 |
+
elif origin_type is Tuple or origin_type is tuple:
|
| 1406 |
+
check_empty_containers(obj)
|
| 1407 |
+
if not isinstance(obj, tuple):
|
| 1408 |
+
return False
|
| 1409 |
+
arg_types = get_args(target_type)
|
| 1410 |
+
if len(obj) != len(arg_types):
|
| 1411 |
+
return False
|
| 1412 |
+
for el, el_type in zip(obj, arg_types):
|
| 1413 |
+
el_origin = get_origin(el_type)
|
| 1414 |
+
if el_origin:
|
| 1415 |
+
if not container_checker(el, el_type):
|
| 1416 |
+
return False
|
| 1417 |
+
elif not isinstance(el, el_type):
|
| 1418 |
+
return False
|
| 1419 |
+
return True
|
| 1420 |
+
elif origin_type is Union or issubclass(
|
| 1421 |
+
origin_type, BuiltinUnionType
|
| 1422 |
+
): # also handles Optional
|
| 1423 |
+
if obj is None: # check before recursion because None is always fine
|
| 1424 |
+
return True
|
| 1425 |
+
inner_types = get_args(target_type)
|
| 1426 |
+
for t in inner_types:
|
| 1427 |
+
t_origin = get_origin(t)
|
| 1428 |
+
if t_origin:
|
| 1429 |
+
return container_checker(obj, t)
|
| 1430 |
+
elif isinstance(obj, t):
|
| 1431 |
+
return True
|
| 1432 |
+
return False
|
| 1433 |
+
|
| 1434 |
+
|
| 1435 |
+
def _isinstance(obj, target_type) -> bool:
|
| 1436 |
+
if isinstance(target_type, collections.abc.Container):
|
| 1437 |
+
if not isinstance(target_type, tuple):
|
| 1438 |
+
raise RuntimeError(
|
| 1439 |
+
"The second argument to "
|
| 1440 |
+
"`torch.jit.isinstance` must be a type "
|
| 1441 |
+
"or a tuple of types"
|
| 1442 |
+
)
|
| 1443 |
+
for t_type in target_type:
|
| 1444 |
+
if _isinstance(obj, t_type):
|
| 1445 |
+
return True
|
| 1446 |
+
return False
|
| 1447 |
+
|
| 1448 |
+
origin_type = get_origin(target_type)
|
| 1449 |
+
if origin_type:
|
| 1450 |
+
return container_checker(obj, target_type)
|
| 1451 |
+
|
| 1452 |
+
# Check to handle non-typed optional origin returns as none instead
|
| 1453 |
+
# of as optional in 3.7-3.8
|
| 1454 |
+
check_args_exist(target_type)
|
| 1455 |
+
|
| 1456 |
+
# handle non-containers
|
| 1457 |
+
return isinstance(obj, target_type)
|
| 1458 |
+
|
| 1459 |
+
|
| 1460 |
+
class _TensorExtractor(pickle.Pickler):
|
| 1461 |
+
def __init__(self, *args, tensors: List[torch.Tensor], **kwargs):
|
| 1462 |
+
super().__init__(*args, **kwargs)
|
| 1463 |
+
self.tensors = tensors
|
| 1464 |
+
|
| 1465 |
+
def persistent_id(self, obj):
|
| 1466 |
+
if isinstance(obj, torch.Tensor):
|
| 1467 |
+
self.tensors.append(obj)
|
| 1468 |
+
return ""
|
| 1469 |
+
# Since we just want to extract tensors, we don't mind if an object is
|
| 1470 |
+
# unpicklable if it doesn't contain tensors, as we can just ignore/skip
|
| 1471 |
+
# it. To play it safe, we only do so for common objects that we're sure
|
| 1472 |
+
# don't contain tensors. Feel free to add new types here. Note also that
|
| 1473 |
+
# even if a type isn't listed here this won't block users, since thet
|
| 1474 |
+
# can just add a __getstate__ or __reduce__ method to their class.
|
| 1475 |
+
if isinstance(obj, LockType):
|
| 1476 |
+
return ""
|
| 1477 |
+
# Futures and RRefs don't technically contain a value, they just offer
|
| 1478 |
+
# the means to access a value.
|
| 1479 |
+
if isinstance(obj, CFuture) or is_rref_instance(obj):
|
| 1480 |
+
return ""
|
| 1481 |
+
if isinstance(obj, CAwait):
|
| 1482 |
+
return ""
|
| 1483 |
+
if isinstance(obj, torch.cuda.Event):
|
| 1484 |
+
return ""
|
| 1485 |
+
if isinstance(obj, threading.Thread):
|
| 1486 |
+
return ""
|
| 1487 |
+
return None
|
| 1488 |
+
|
| 1489 |
+
|
| 1490 |
+
def _extract_tensors(obj):
|
| 1491 |
+
r"""
|
| 1492 |
+
This function is exclusively called from C++.
|
| 1493 |
+
See ``torch/csrc/jit/python/python_ivalue.h``.
|
| 1494 |
+
|
| 1495 |
+
It extracts the tensors contained in the given object, through pickling.
|
| 1496 |
+
"""
|
| 1497 |
+
tensors: List[torch.Tensor] = []
|
| 1498 |
+
extractor = _TensorExtractor(io.BytesIO(), protocol=-1, tensors=tensors)
|
| 1499 |
+
extractor.dump(obj)
|
| 1500 |
+
return tensors
|
| 1501 |
+
|
| 1502 |
+
|
| 1503 |
+
# In Python-3.11+ typed enums (i.e. IntEnum for example) retain number of base class methods in subclass
|
| 1504 |
+
# that were previously dropped. To preserve the behavior, explicitly drop them there
|
| 1505 |
+
|
| 1506 |
+
if sys.version_info > (3, 10):
|
| 1507 |
+
_drop(enum.Enum.__new__)
|
| 1508 |
+
_drop(enum.Enum.__format__)
|
| 1509 |
+
_drop(enum.Enum.__repr__)
|
| 1510 |
+
_drop(enum.Enum.__str__)
|
evalkit_internvl/lib/python3.10/site-packages/torch/_meta_registrations.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
evalkit_internvl/lib/python3.10/site-packages/torch/_sources.py
ADDED
|
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ast
|
| 2 |
+
import functools
|
| 3 |
+
import inspect
|
| 4 |
+
from textwrap import dedent
|
| 5 |
+
from typing import Any, List, NamedTuple, Optional, Tuple
|
| 6 |
+
|
| 7 |
+
from torch._C import ErrorReport
|
| 8 |
+
from torch._C._jit_tree_views import SourceRangeFactory
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def get_source_lines_and_file(
|
| 12 |
+
obj: Any,
|
| 13 |
+
error_msg: Optional[str] = None,
|
| 14 |
+
) -> Tuple[List[str], int, Optional[str]]:
|
| 15 |
+
"""
|
| 16 |
+
Wrapper around inspect.getsourcelines and inspect.getsourcefile.
|
| 17 |
+
|
| 18 |
+
Returns: (sourcelines, file_lino, filename)
|
| 19 |
+
"""
|
| 20 |
+
filename = None # in case getsourcefile throws
|
| 21 |
+
try:
|
| 22 |
+
filename = inspect.getsourcefile(obj)
|
| 23 |
+
sourcelines, file_lineno = inspect.getsourcelines(obj)
|
| 24 |
+
except OSError as e:
|
| 25 |
+
msg = (
|
| 26 |
+
f"Can't get source for {obj}. TorchScript requires source access in "
|
| 27 |
+
"order to carry out compilation, make sure original .py files are "
|
| 28 |
+
"available."
|
| 29 |
+
)
|
| 30 |
+
if error_msg:
|
| 31 |
+
msg += "\n" + error_msg
|
| 32 |
+
raise OSError(msg) from e
|
| 33 |
+
|
| 34 |
+
return sourcelines, file_lineno, filename
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def normalize_source_lines(sourcelines: List[str]) -> List[str]:
|
| 38 |
+
"""
|
| 39 |
+
This helper function accepts a list of source lines. It finds the
|
| 40 |
+
indentation level of the function definition (`def`), then it indents
|
| 41 |
+
all lines in the function body to a point at or greater than that
|
| 42 |
+
level. This allows for comments and continued string literals that
|
| 43 |
+
are at a lower indentation than the rest of the code.
|
| 44 |
+
Args:
|
| 45 |
+
sourcelines: function source code, separated into lines by
|
| 46 |
+
the '\n' character
|
| 47 |
+
Returns:
|
| 48 |
+
A list of source lines that have been correctly aligned
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
def remove_prefix(text, prefix):
|
| 52 |
+
return text[text.startswith(prefix) and len(prefix) :]
|
| 53 |
+
|
| 54 |
+
# Find the line and line number containing the function definition
|
| 55 |
+
idx = None
|
| 56 |
+
for i, l in enumerate(sourcelines):
|
| 57 |
+
if l.lstrip().startswith("def"):
|
| 58 |
+
idx = i
|
| 59 |
+
break
|
| 60 |
+
|
| 61 |
+
# This will happen when the function is a lambda- we won't find "def" anywhere in the source
|
| 62 |
+
# lines in that case. Currently trying to JIT compile a lambda will throw an error up in
|
| 63 |
+
# `parse_def()`, but we might want to handle this case in the future.
|
| 64 |
+
if idx is None:
|
| 65 |
+
return sourcelines
|
| 66 |
+
|
| 67 |
+
# Get a string representing the amount of leading whitespace
|
| 68 |
+
fn_def = sourcelines[idx]
|
| 69 |
+
whitespace = fn_def.split("def")[0]
|
| 70 |
+
|
| 71 |
+
# Add this leading whitespace to all lines before and after the `def`
|
| 72 |
+
aligned_prefix = [
|
| 73 |
+
whitespace + remove_prefix(s, whitespace) for s in sourcelines[:idx]
|
| 74 |
+
]
|
| 75 |
+
aligned_suffix = [
|
| 76 |
+
whitespace + remove_prefix(s, whitespace) for s in sourcelines[idx + 1 :]
|
| 77 |
+
]
|
| 78 |
+
|
| 79 |
+
# Put it together again
|
| 80 |
+
aligned_prefix.append(fn_def)
|
| 81 |
+
return aligned_prefix + aligned_suffix
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
# Thin wrapper around SourceRangeFactory to store extra metadata
|
| 85 |
+
# about the function-to-be-compiled.
|
| 86 |
+
class SourceContext(SourceRangeFactory):
|
| 87 |
+
def __init__(
|
| 88 |
+
self,
|
| 89 |
+
source,
|
| 90 |
+
filename,
|
| 91 |
+
file_lineno,
|
| 92 |
+
leading_whitespace_len,
|
| 93 |
+
uses_true_division=True,
|
| 94 |
+
funcname=None,
|
| 95 |
+
):
|
| 96 |
+
super().__init__(source, filename, file_lineno, leading_whitespace_len)
|
| 97 |
+
self.uses_true_division = uses_true_division
|
| 98 |
+
self.filename = filename
|
| 99 |
+
self.funcname = funcname
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
@functools.lru_cache(maxsize=None)
|
| 103 |
+
def make_source_context(*args):
|
| 104 |
+
return SourceContext(*args)
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def fake_range():
|
| 108 |
+
return SourceContext("", None, 0, 0).make_raw_range(0, 1)
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
class ParsedDef(NamedTuple):
|
| 112 |
+
ast: ast.Module
|
| 113 |
+
ctx: SourceContext
|
| 114 |
+
source: str
|
| 115 |
+
filename: Optional[str]
|
| 116 |
+
file_lineno: int
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
def parse_def(fn):
|
| 120 |
+
sourcelines, file_lineno, filename = get_source_lines_and_file(
|
| 121 |
+
fn, ErrorReport.call_stack()
|
| 122 |
+
)
|
| 123 |
+
sourcelines = normalize_source_lines(sourcelines)
|
| 124 |
+
source = "".join(sourcelines)
|
| 125 |
+
dedent_src = dedent(source)
|
| 126 |
+
py_ast = ast.parse(dedent_src)
|
| 127 |
+
if len(py_ast.body) != 1 or not isinstance(py_ast.body[0], ast.FunctionDef):
|
| 128 |
+
raise RuntimeError(
|
| 129 |
+
f"Expected a single top-level function: {filename}:{file_lineno}"
|
| 130 |
+
)
|
| 131 |
+
leading_whitespace_len = len(source.split("\n", 1)[0]) - len(
|
| 132 |
+
dedent_src.split("\n", 1)[0]
|
| 133 |
+
)
|
| 134 |
+
ctx = make_source_context(
|
| 135 |
+
source, filename, file_lineno, leading_whitespace_len, True, fn.__name__
|
| 136 |
+
)
|
| 137 |
+
return ParsedDef(py_ast, ctx, source, filename, file_lineno)
|
evalkit_internvl/lib/python3.10/site-packages/torch/_tensor_str.py
ADDED
|
@@ -0,0 +1,677 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import contextlib
|
| 2 |
+
import dataclasses
|
| 3 |
+
import math
|
| 4 |
+
import textwrap
|
| 5 |
+
from typing import Any, Dict, Optional
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
from torch import inf
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
@dataclasses.dataclass
|
| 12 |
+
class __PrinterOptions:
|
| 13 |
+
precision: int = 4
|
| 14 |
+
threshold: float = 1000
|
| 15 |
+
edgeitems: int = 3
|
| 16 |
+
linewidth: int = 80
|
| 17 |
+
sci_mode: Optional[bool] = None
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
PRINT_OPTS = __PrinterOptions()
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
# We could use **kwargs, but this will give better docs
|
| 24 |
+
def set_printoptions(
|
| 25 |
+
precision=None,
|
| 26 |
+
threshold=None,
|
| 27 |
+
edgeitems=None,
|
| 28 |
+
linewidth=None,
|
| 29 |
+
profile=None,
|
| 30 |
+
sci_mode=None,
|
| 31 |
+
):
|
| 32 |
+
r"""Set options for printing. Items shamelessly taken from NumPy
|
| 33 |
+
|
| 34 |
+
Args:
|
| 35 |
+
precision: Number of digits of precision for floating point output
|
| 36 |
+
(default = 4).
|
| 37 |
+
threshold: Total number of array elements which trigger summarization
|
| 38 |
+
rather than full `repr` (default = 1000).
|
| 39 |
+
edgeitems: Number of array items in summary at beginning and end of
|
| 40 |
+
each dimension (default = 3).
|
| 41 |
+
linewidth: The number of characters per line for the purpose of
|
| 42 |
+
inserting line breaks (default = 80). Thresholded matrices will
|
| 43 |
+
ignore this parameter.
|
| 44 |
+
profile: Sane defaults for pretty printing. Can override with any of
|
| 45 |
+
the above options. (any one of `default`, `short`, `full`)
|
| 46 |
+
sci_mode: Enable (True) or disable (False) scientific notation. If
|
| 47 |
+
None (default) is specified, the value is defined by
|
| 48 |
+
`torch._tensor_str._Formatter`. This value is automatically chosen
|
| 49 |
+
by the framework.
|
| 50 |
+
|
| 51 |
+
Example::
|
| 52 |
+
|
| 53 |
+
>>> # Limit the precision of elements
|
| 54 |
+
>>> torch.set_printoptions(precision=2)
|
| 55 |
+
>>> torch.tensor([1.12345])
|
| 56 |
+
tensor([1.12])
|
| 57 |
+
>>> # Limit the number of elements shown
|
| 58 |
+
>>> torch.set_printoptions(threshold=5)
|
| 59 |
+
>>> torch.arange(10)
|
| 60 |
+
tensor([0, 1, 2, ..., 7, 8, 9])
|
| 61 |
+
>>> # Restore defaults
|
| 62 |
+
>>> torch.set_printoptions(profile='default')
|
| 63 |
+
>>> torch.tensor([1.12345])
|
| 64 |
+
tensor([1.1235])
|
| 65 |
+
>>> torch.arange(10)
|
| 66 |
+
tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
|
| 67 |
+
|
| 68 |
+
"""
|
| 69 |
+
if profile is not None:
|
| 70 |
+
if profile == "default":
|
| 71 |
+
PRINT_OPTS.precision = 4
|
| 72 |
+
PRINT_OPTS.threshold = 1000
|
| 73 |
+
PRINT_OPTS.edgeitems = 3
|
| 74 |
+
PRINT_OPTS.linewidth = 80
|
| 75 |
+
elif profile == "short":
|
| 76 |
+
PRINT_OPTS.precision = 2
|
| 77 |
+
PRINT_OPTS.threshold = 1000
|
| 78 |
+
PRINT_OPTS.edgeitems = 2
|
| 79 |
+
PRINT_OPTS.linewidth = 80
|
| 80 |
+
elif profile == "full":
|
| 81 |
+
PRINT_OPTS.precision = 4
|
| 82 |
+
PRINT_OPTS.threshold = inf
|
| 83 |
+
PRINT_OPTS.edgeitems = 3
|
| 84 |
+
PRINT_OPTS.linewidth = 80
|
| 85 |
+
|
| 86 |
+
if precision is not None:
|
| 87 |
+
PRINT_OPTS.precision = precision
|
| 88 |
+
if threshold is not None:
|
| 89 |
+
PRINT_OPTS.threshold = threshold
|
| 90 |
+
if edgeitems is not None:
|
| 91 |
+
PRINT_OPTS.edgeitems = edgeitems
|
| 92 |
+
if linewidth is not None:
|
| 93 |
+
PRINT_OPTS.linewidth = linewidth
|
| 94 |
+
PRINT_OPTS.sci_mode = sci_mode
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def get_printoptions() -> Dict[str, Any]:
|
| 98 |
+
r"""Gets the current options for printing, as a dictionary that
|
| 99 |
+
can be passed as ``**kwargs`` to set_printoptions().
|
| 100 |
+
"""
|
| 101 |
+
return dataclasses.asdict(PRINT_OPTS)
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
@contextlib.contextmanager
|
| 105 |
+
def printoptions(**kwargs):
|
| 106 |
+
r"""Context manager that temporarily changes the print options. Accepted
|
| 107 |
+
arguments are same as :func:`set_printoptions`."""
|
| 108 |
+
old_kwargs = get_printoptions()
|
| 109 |
+
set_printoptions(**kwargs)
|
| 110 |
+
try:
|
| 111 |
+
yield
|
| 112 |
+
finally:
|
| 113 |
+
set_printoptions(**old_kwargs)
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def tensor_totype(t):
|
| 117 |
+
dtype = torch.float if t.is_mps else torch.double
|
| 118 |
+
return t.to(dtype=dtype)
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
class _Formatter:
|
| 122 |
+
def __init__(self, tensor):
|
| 123 |
+
self.floating_dtype = tensor.dtype.is_floating_point
|
| 124 |
+
self.int_mode = True
|
| 125 |
+
self.sci_mode = False
|
| 126 |
+
self.max_width = 1
|
| 127 |
+
|
| 128 |
+
with torch.no_grad():
|
| 129 |
+
tensor_view = tensor.reshape(-1)
|
| 130 |
+
|
| 131 |
+
if not self.floating_dtype:
|
| 132 |
+
for value in tensor_view:
|
| 133 |
+
value_str = f"{value}"
|
| 134 |
+
self.max_width = max(self.max_width, len(value_str))
|
| 135 |
+
|
| 136 |
+
else:
|
| 137 |
+
nonzero_finite_vals = torch.masked_select(
|
| 138 |
+
tensor_view, torch.isfinite(tensor_view) & tensor_view.ne(0)
|
| 139 |
+
)
|
| 140 |
+
|
| 141 |
+
if nonzero_finite_vals.numel() == 0:
|
| 142 |
+
# no valid number, do nothing
|
| 143 |
+
return
|
| 144 |
+
|
| 145 |
+
# Convert to double for easy calculation. HalfTensor overflows with 1e8, and there's no div() on CPU.
|
| 146 |
+
nonzero_finite_abs = tensor_totype(nonzero_finite_vals.abs())
|
| 147 |
+
nonzero_finite_min = tensor_totype(nonzero_finite_abs.min())
|
| 148 |
+
nonzero_finite_max = tensor_totype(nonzero_finite_abs.max())
|
| 149 |
+
|
| 150 |
+
for value in nonzero_finite_vals:
|
| 151 |
+
if value != torch.ceil(value):
|
| 152 |
+
self.int_mode = False
|
| 153 |
+
break
|
| 154 |
+
|
| 155 |
+
if self.int_mode:
|
| 156 |
+
# in int_mode for floats, all numbers are integers, and we append a decimal to nonfinites
|
| 157 |
+
# to indicate that the tensor is of floating type. add 1 to the len to account for this.
|
| 158 |
+
if (
|
| 159 |
+
nonzero_finite_max / nonzero_finite_min > 1000.0
|
| 160 |
+
or nonzero_finite_max > 1.0e8
|
| 161 |
+
):
|
| 162 |
+
self.sci_mode = True
|
| 163 |
+
for value in nonzero_finite_vals:
|
| 164 |
+
value_str = f"{{:.{PRINT_OPTS.precision}e}}".format(value)
|
| 165 |
+
self.max_width = max(self.max_width, len(value_str))
|
| 166 |
+
else:
|
| 167 |
+
for value in nonzero_finite_vals:
|
| 168 |
+
value_str = f"{value:.0f}"
|
| 169 |
+
self.max_width = max(self.max_width, len(value_str) + 1)
|
| 170 |
+
else:
|
| 171 |
+
# Check if scientific representation should be used.
|
| 172 |
+
if (
|
| 173 |
+
nonzero_finite_max / nonzero_finite_min > 1000.0
|
| 174 |
+
or nonzero_finite_max > 1.0e8
|
| 175 |
+
or nonzero_finite_min < 1.0e-4
|
| 176 |
+
):
|
| 177 |
+
self.sci_mode = True
|
| 178 |
+
for value in nonzero_finite_vals:
|
| 179 |
+
value_str = f"{{:.{PRINT_OPTS.precision}e}}".format(value)
|
| 180 |
+
self.max_width = max(self.max_width, len(value_str))
|
| 181 |
+
else:
|
| 182 |
+
for value in nonzero_finite_vals:
|
| 183 |
+
value_str = f"{{:.{PRINT_OPTS.precision}f}}".format(value)
|
| 184 |
+
self.max_width = max(self.max_width, len(value_str))
|
| 185 |
+
|
| 186 |
+
if PRINT_OPTS.sci_mode is not None:
|
| 187 |
+
self.sci_mode = PRINT_OPTS.sci_mode
|
| 188 |
+
|
| 189 |
+
def width(self):
|
| 190 |
+
return self.max_width
|
| 191 |
+
|
| 192 |
+
def format(self, value):
|
| 193 |
+
if self.floating_dtype:
|
| 194 |
+
if self.sci_mode:
|
| 195 |
+
ret = f"{{:{self.max_width}.{PRINT_OPTS.precision}e}}".format(value)
|
| 196 |
+
elif self.int_mode:
|
| 197 |
+
ret = f"{value:.0f}"
|
| 198 |
+
if not (math.isinf(value) or math.isnan(value)):
|
| 199 |
+
ret += "."
|
| 200 |
+
else:
|
| 201 |
+
ret = f"{{:.{PRINT_OPTS.precision}f}}".format(value)
|
| 202 |
+
else:
|
| 203 |
+
ret = f"{value}"
|
| 204 |
+
return (self.max_width - len(ret)) * " " + ret
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
def _scalar_str(self, formatter1, formatter2=None):
|
| 208 |
+
if formatter2 is not None:
|
| 209 |
+
real_str = _scalar_str(self.real, formatter1)
|
| 210 |
+
imag_str = (_scalar_str(self.imag, formatter2) + "j").lstrip()
|
| 211 |
+
# handles negative numbers, +0.0, -0.0
|
| 212 |
+
if imag_str[0] == "+" or imag_str[0] == "-":
|
| 213 |
+
return real_str + imag_str
|
| 214 |
+
else:
|
| 215 |
+
return real_str + "+" + imag_str
|
| 216 |
+
else:
|
| 217 |
+
return formatter1.format(self.item())
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
def _vector_str(self, indent, summarize, formatter1, formatter2=None):
|
| 221 |
+
# length includes spaces and comma between elements
|
| 222 |
+
element_length = formatter1.width() + 2
|
| 223 |
+
if formatter2 is not None:
|
| 224 |
+
# width for imag_formatter + an extra j for complex
|
| 225 |
+
element_length += formatter2.width() + 1
|
| 226 |
+
|
| 227 |
+
elements_per_line = max(
|
| 228 |
+
1, int(math.floor((PRINT_OPTS.linewidth - indent) / (element_length)))
|
| 229 |
+
)
|
| 230 |
+
|
| 231 |
+
def _val_formatter(val, formatter1=formatter1, formatter2=formatter2):
|
| 232 |
+
if formatter2 is not None:
|
| 233 |
+
real_str = formatter1.format(val.real)
|
| 234 |
+
imag_str = (formatter2.format(val.imag) + "j").lstrip()
|
| 235 |
+
# handles negative numbers, +0.0, -0.0
|
| 236 |
+
if imag_str[0] == "+" or imag_str[0] == "-":
|
| 237 |
+
return real_str + imag_str
|
| 238 |
+
else:
|
| 239 |
+
return real_str + "+" + imag_str
|
| 240 |
+
else:
|
| 241 |
+
return formatter1.format(val)
|
| 242 |
+
|
| 243 |
+
if summarize and not PRINT_OPTS.edgeitems:
|
| 244 |
+
# Deal with edge case that negative zero is zero
|
| 245 |
+
data = ["..."]
|
| 246 |
+
elif summarize and self.size(0) > 2 * PRINT_OPTS.edgeitems:
|
| 247 |
+
data = (
|
| 248 |
+
[_val_formatter(val) for val in self[: PRINT_OPTS.edgeitems].tolist()]
|
| 249 |
+
+ [" ..."]
|
| 250 |
+
+ [_val_formatter(val) for val in self[-PRINT_OPTS.edgeitems :].tolist()]
|
| 251 |
+
)
|
| 252 |
+
else:
|
| 253 |
+
data = [_val_formatter(val) for val in self.tolist()]
|
| 254 |
+
|
| 255 |
+
data_lines = [
|
| 256 |
+
data[i : i + elements_per_line] for i in range(0, len(data), elements_per_line)
|
| 257 |
+
]
|
| 258 |
+
lines = [", ".join(line) for line in data_lines]
|
| 259 |
+
return "[" + ("," + "\n" + " " * (indent + 1)).join(lines) + "]"
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
# formatter2 is only used for printing complex tensors.
|
| 263 |
+
# For complex tensors, formatter1 and formatter2 are the formatters for tensor.real
|
| 264 |
+
# and tensor.imag respesectively
|
| 265 |
+
def _tensor_str_with_formatter(self, indent, summarize, formatter1, formatter2=None):
|
| 266 |
+
dim = self.dim()
|
| 267 |
+
|
| 268 |
+
if dim == 0:
|
| 269 |
+
return _scalar_str(self, formatter1, formatter2)
|
| 270 |
+
|
| 271 |
+
if dim == 1:
|
| 272 |
+
return _vector_str(self, indent, summarize, formatter1, formatter2)
|
| 273 |
+
|
| 274 |
+
if summarize and self.size(0) > 2 * PRINT_OPTS.edgeitems:
|
| 275 |
+
slices = (
|
| 276 |
+
[
|
| 277 |
+
_tensor_str_with_formatter(
|
| 278 |
+
self[i], indent + 1, summarize, formatter1, formatter2
|
| 279 |
+
)
|
| 280 |
+
for i in range(0, PRINT_OPTS.edgeitems)
|
| 281 |
+
]
|
| 282 |
+
+ ["..."]
|
| 283 |
+
+ [
|
| 284 |
+
_tensor_str_with_formatter(
|
| 285 |
+
self[i], indent + 1, summarize, formatter1, formatter2
|
| 286 |
+
)
|
| 287 |
+
for i in range(len(self) - PRINT_OPTS.edgeitems, len(self))
|
| 288 |
+
]
|
| 289 |
+
)
|
| 290 |
+
else:
|
| 291 |
+
slices = [
|
| 292 |
+
_tensor_str_with_formatter(
|
| 293 |
+
self[i], indent + 1, summarize, formatter1, formatter2
|
| 294 |
+
)
|
| 295 |
+
for i in range(0, self.size(0))
|
| 296 |
+
]
|
| 297 |
+
|
| 298 |
+
tensor_str = ("," + "\n" * (dim - 1) + " " * (indent + 1)).join(slices)
|
| 299 |
+
return "[" + tensor_str + "]"
|
| 300 |
+
|
| 301 |
+
|
| 302 |
+
def _tensor_str(self, indent):
|
| 303 |
+
if self.numel() == 0:
|
| 304 |
+
return "[]"
|
| 305 |
+
|
| 306 |
+
if self.has_names():
|
| 307 |
+
# There are two main codepaths (possibly more) that tensor printing goes through:
|
| 308 |
+
# - tensor data can fit comfortably on screen
|
| 309 |
+
# - tensor data needs to be summarized
|
| 310 |
+
# Some of the codepaths don't fully support named tensors, so we send in
|
| 311 |
+
# an unnamed tensor to the formatting code as a workaround.
|
| 312 |
+
self = self.rename(None)
|
| 313 |
+
|
| 314 |
+
summarize = self.numel() > PRINT_OPTS.threshold
|
| 315 |
+
|
| 316 |
+
if self._is_zerotensor():
|
| 317 |
+
self = self.clone()
|
| 318 |
+
|
| 319 |
+
# handle the negative bit
|
| 320 |
+
if self.is_neg():
|
| 321 |
+
self = self.resolve_neg()
|
| 322 |
+
|
| 323 |
+
if self.dtype in [
|
| 324 |
+
torch.float16,
|
| 325 |
+
torch.bfloat16,
|
| 326 |
+
torch.float8_e5m2,
|
| 327 |
+
torch.float8_e5m2fnuz,
|
| 328 |
+
torch.float8_e4m3fn,
|
| 329 |
+
torch.float8_e4m3fnuz,
|
| 330 |
+
]:
|
| 331 |
+
self = self.float()
|
| 332 |
+
|
| 333 |
+
if self.dtype is torch.complex32:
|
| 334 |
+
self = self.cfloat()
|
| 335 |
+
|
| 336 |
+
if self.dtype.is_complex:
|
| 337 |
+
# handle the conjugate bit
|
| 338 |
+
self = self.resolve_conj()
|
| 339 |
+
real_formatter = _Formatter(
|
| 340 |
+
get_summarized_data(self.real) if summarize else self.real
|
| 341 |
+
)
|
| 342 |
+
imag_formatter = _Formatter(
|
| 343 |
+
get_summarized_data(self.imag) if summarize else self.imag
|
| 344 |
+
)
|
| 345 |
+
return _tensor_str_with_formatter(
|
| 346 |
+
self, indent, summarize, real_formatter, imag_formatter
|
| 347 |
+
)
|
| 348 |
+
else:
|
| 349 |
+
formatter = _Formatter(get_summarized_data(self) if summarize else self)
|
| 350 |
+
return _tensor_str_with_formatter(self, indent, summarize, formatter)
|
| 351 |
+
|
| 352 |
+
|
| 353 |
+
def _add_suffixes(tensor_str, suffixes, indent, force_newline):
|
| 354 |
+
tensor_strs = [tensor_str]
|
| 355 |
+
last_line_len = len(tensor_str) - tensor_str.rfind("\n") + 1
|
| 356 |
+
for suffix in suffixes:
|
| 357 |
+
suffix_len = len(suffix)
|
| 358 |
+
if force_newline or last_line_len + suffix_len + 2 > PRINT_OPTS.linewidth:
|
| 359 |
+
tensor_strs.append(",\n" + " " * indent + suffix)
|
| 360 |
+
last_line_len = indent + suffix_len
|
| 361 |
+
force_newline = False
|
| 362 |
+
else:
|
| 363 |
+
tensor_strs.append(", " + suffix)
|
| 364 |
+
last_line_len += suffix_len + 2
|
| 365 |
+
tensor_strs.append(")")
|
| 366 |
+
return "".join(tensor_strs)
|
| 367 |
+
|
| 368 |
+
|
| 369 |
+
def get_summarized_data(self):
|
| 370 |
+
dim = self.dim()
|
| 371 |
+
if dim == 0:
|
| 372 |
+
return self
|
| 373 |
+
if dim == 1:
|
| 374 |
+
if self.size(0) > 2 * PRINT_OPTS.edgeitems:
|
| 375 |
+
return torch.cat(
|
| 376 |
+
(self[: PRINT_OPTS.edgeitems], self[-PRINT_OPTS.edgeitems :])
|
| 377 |
+
)
|
| 378 |
+
else:
|
| 379 |
+
return self
|
| 380 |
+
if not PRINT_OPTS.edgeitems:
|
| 381 |
+
return self.new_empty([0] * self.dim())
|
| 382 |
+
elif self.size(0) > 2 * PRINT_OPTS.edgeitems:
|
| 383 |
+
start = [self[i] for i in range(0, PRINT_OPTS.edgeitems)]
|
| 384 |
+
end = [self[i] for i in range(len(self) - PRINT_OPTS.edgeitems, len(self))]
|
| 385 |
+
return torch.stack([get_summarized_data(x) for x in (start + end)])
|
| 386 |
+
else:
|
| 387 |
+
return torch.stack([get_summarized_data(x) for x in self])
|
| 388 |
+
|
| 389 |
+
|
| 390 |
+
def _str_intern(inp, *, tensor_contents=None):
|
| 391 |
+
if torch._C._functorch.is_functorch_wrapped_tensor(inp):
|
| 392 |
+
return _functorch_wrapper_str_intern(inp, tensor_contents=tensor_contents)
|
| 393 |
+
is_plain_tensor = type(inp) is torch.Tensor or type(inp) is torch.nn.Parameter
|
| 394 |
+
if inp.is_nested:
|
| 395 |
+
prefix = "nested_tensor("
|
| 396 |
+
elif is_plain_tensor:
|
| 397 |
+
prefix = "tensor("
|
| 398 |
+
else:
|
| 399 |
+
prefix = f"{type(inp).__name__}("
|
| 400 |
+
indent = len(prefix)
|
| 401 |
+
suffixes = []
|
| 402 |
+
custom_contents_provided = tensor_contents is not None
|
| 403 |
+
if custom_contents_provided:
|
| 404 |
+
tensor_str = tensor_contents
|
| 405 |
+
|
| 406 |
+
# This is used to extract the primal value and thus disable the forward AD
|
| 407 |
+
# within this function.
|
| 408 |
+
# TODO(albanD) This needs to be updated when more than one level is supported
|
| 409 |
+
self, tangent = torch.autograd.forward_ad.unpack_dual(inp)
|
| 410 |
+
|
| 411 |
+
# Note [Print tensor device]:
|
| 412 |
+
# A general logic here is we only print device when it doesn't match
|
| 413 |
+
# the device specified in default tensor type.
|
| 414 |
+
# Currently torch.set_default_tensor_type() only supports CPU/CUDA, thus
|
| 415 |
+
# torch._C._get_default_device() only returns either cpu or cuda.
|
| 416 |
+
# In other cases, we don't have a way to set them as default yet,
|
| 417 |
+
# and we should always print out device for them.
|
| 418 |
+
if (
|
| 419 |
+
self.device.type != torch._C._get_default_device()
|
| 420 |
+
or (
|
| 421 |
+
self.device.type == "cuda"
|
| 422 |
+
and torch.cuda.current_device() != self.device.index
|
| 423 |
+
)
|
| 424 |
+
or (self.device.type == "mps")
|
| 425 |
+
):
|
| 426 |
+
suffixes.append("device='" + str(self.device) + "'")
|
| 427 |
+
|
| 428 |
+
# Tensor printing performs tensor operations like slice, indexing, etc to make it in a
|
| 429 |
+
# representable format. These operations on ipu/xla/lazy/mtia tensor results in compilations. Hence,
|
| 430 |
+
# to avoid compilations, copying the tensor to cpu before printing.
|
| 431 |
+
if self.device.type in ["xla", "lazy", "ipu", "mtia"]:
|
| 432 |
+
self = self.to("cpu")
|
| 433 |
+
|
| 434 |
+
# TODO: add an API to map real -> complex dtypes
|
| 435 |
+
_default_complex_dtype = (
|
| 436 |
+
torch.cdouble if torch.get_default_dtype() == torch.double else torch.cfloat
|
| 437 |
+
)
|
| 438 |
+
has_default_dtype = self.dtype in (
|
| 439 |
+
torch.get_default_dtype(),
|
| 440 |
+
_default_complex_dtype,
|
| 441 |
+
torch.int64,
|
| 442 |
+
torch.bool,
|
| 443 |
+
)
|
| 444 |
+
if self.is_sparse:
|
| 445 |
+
suffixes.append("size=" + str(tuple(self.shape)))
|
| 446 |
+
from torch._subclasses.fake_tensor import FakeTensor
|
| 447 |
+
|
| 448 |
+
if not self.is_meta and not isinstance(self, FakeTensor):
|
| 449 |
+
suffixes.append("nnz=" + str(self._nnz()))
|
| 450 |
+
if not has_default_dtype:
|
| 451 |
+
suffixes.append("dtype=" + str(self.dtype))
|
| 452 |
+
if not custom_contents_provided:
|
| 453 |
+
indices_prefix = "indices=tensor("
|
| 454 |
+
indices = self._indices().detach()
|
| 455 |
+
indices_str = _tensor_str(indices, indent + len(indices_prefix))
|
| 456 |
+
if indices.numel() == 0:
|
| 457 |
+
indices_str += ", size=" + str(tuple(indices.shape))
|
| 458 |
+
values_prefix = "values=tensor("
|
| 459 |
+
values = self._values().detach()
|
| 460 |
+
values_str = _tensor_str(values, indent + len(values_prefix))
|
| 461 |
+
if values.numel() == 0:
|
| 462 |
+
values_str += ", size=" + str(tuple(values.shape))
|
| 463 |
+
tensor_str = (
|
| 464 |
+
indices_prefix
|
| 465 |
+
+ indices_str
|
| 466 |
+
+ "),\n"
|
| 467 |
+
+ " " * indent
|
| 468 |
+
+ values_prefix
|
| 469 |
+
+ values_str
|
| 470 |
+
+ ")"
|
| 471 |
+
)
|
| 472 |
+
elif self.layout in {
|
| 473 |
+
torch.sparse_csr,
|
| 474 |
+
torch.sparse_csc,
|
| 475 |
+
torch.sparse_bsr,
|
| 476 |
+
torch.sparse_bsc,
|
| 477 |
+
}:
|
| 478 |
+
suffixes.append("size=" + str(tuple(self.shape)))
|
| 479 |
+
suffixes.append("nnz=" + str(self._nnz()))
|
| 480 |
+
if not has_default_dtype:
|
| 481 |
+
suffixes.append("dtype=" + str(self.dtype))
|
| 482 |
+
if not custom_contents_provided:
|
| 483 |
+
compressed_indices_method, plain_indices_method = {
|
| 484 |
+
torch.sparse_csr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
|
| 485 |
+
torch.sparse_csc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
|
| 486 |
+
torch.sparse_bsr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
|
| 487 |
+
torch.sparse_bsc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
|
| 488 |
+
}[self.layout]
|
| 489 |
+
if self.layout in {torch.sparse_csr, torch.sparse_bsr}:
|
| 490 |
+
cdimname, pdimname = "row", "column"
|
| 491 |
+
else:
|
| 492 |
+
cdimname, pdimname = "column", "row"
|
| 493 |
+
compressed_indices_prefix = f"c{cdimname[:3]}_indices=tensor("
|
| 494 |
+
compressed_indices = compressed_indices_method(self).detach()
|
| 495 |
+
compressed_indices_str = _tensor_str(
|
| 496 |
+
compressed_indices, indent + len(compressed_indices_prefix)
|
| 497 |
+
)
|
| 498 |
+
if compressed_indices.numel() == 0:
|
| 499 |
+
compressed_indices_str += ", size=" + str(
|
| 500 |
+
tuple(compressed_indices.shape)
|
| 501 |
+
)
|
| 502 |
+
plain_indices_prefix = f"{pdimname[:3]}_indices=tensor("
|
| 503 |
+
plain_indices = plain_indices_method(self).detach()
|
| 504 |
+
plain_indices_str = _tensor_str(
|
| 505 |
+
plain_indices, indent + len(plain_indices_prefix)
|
| 506 |
+
)
|
| 507 |
+
if plain_indices.numel() == 0:
|
| 508 |
+
plain_indices_str += ", size=" + str(tuple(plain_indices.shape))
|
| 509 |
+
values_prefix = "values=tensor("
|
| 510 |
+
values = self.values().detach()
|
| 511 |
+
values_str = _tensor_str(values, indent + len(values_prefix))
|
| 512 |
+
if values.numel() == 0:
|
| 513 |
+
values_str += ", size=" + str(tuple(values.shape))
|
| 514 |
+
tensor_str = (
|
| 515 |
+
compressed_indices_prefix
|
| 516 |
+
+ compressed_indices_str
|
| 517 |
+
+ "),\n"
|
| 518 |
+
+ " " * indent
|
| 519 |
+
+ plain_indices_prefix
|
| 520 |
+
+ plain_indices_str
|
| 521 |
+
+ "),\n"
|
| 522 |
+
+ " " * indent
|
| 523 |
+
+ values_prefix
|
| 524 |
+
+ values_str
|
| 525 |
+
+ ")"
|
| 526 |
+
)
|
| 527 |
+
elif self.is_quantized:
|
| 528 |
+
suffixes.append("size=" + str(tuple(self.shape)))
|
| 529 |
+
if not has_default_dtype:
|
| 530 |
+
suffixes.append("dtype=" + str(self.dtype))
|
| 531 |
+
suffixes.append("quantization_scheme=" + str(self.qscheme()))
|
| 532 |
+
if (
|
| 533 |
+
self.qscheme() == torch.per_tensor_affine
|
| 534 |
+
or self.qscheme() == torch.per_tensor_symmetric
|
| 535 |
+
):
|
| 536 |
+
suffixes.append("scale=" + str(self.q_scale()))
|
| 537 |
+
suffixes.append("zero_point=" + str(self.q_zero_point()))
|
| 538 |
+
elif (
|
| 539 |
+
self.qscheme() == torch.per_channel_affine
|
| 540 |
+
or self.qscheme() == torch.per_channel_symmetric
|
| 541 |
+
or self.qscheme() == torch.per_channel_affine_float_qparams
|
| 542 |
+
):
|
| 543 |
+
suffixes.append("scale=" + str(self.q_per_channel_scales()))
|
| 544 |
+
suffixes.append("zero_point=" + str(self.q_per_channel_zero_points()))
|
| 545 |
+
suffixes.append("axis=" + str(self.q_per_channel_axis()))
|
| 546 |
+
if not custom_contents_provided:
|
| 547 |
+
tensor_str = _tensor_str(self.dequantize(), indent)
|
| 548 |
+
elif self.is_nested:
|
| 549 |
+
if not custom_contents_provided:
|
| 550 |
+
|
| 551 |
+
def indented_str(s, indent):
|
| 552 |
+
return "\n".join(f" {line}" for line in s.split("\n"))
|
| 553 |
+
|
| 554 |
+
strs = ",\n".join(
|
| 555 |
+
indented_str(str(t), indent + 1)
|
| 556 |
+
for t in torch.ops.aten.unbind.int(self, 0)
|
| 557 |
+
)
|
| 558 |
+
tensor_str = f"[\n{strs}\n]"
|
| 559 |
+
elif torch._is_functional_tensor(self):
|
| 560 |
+
prefix = "_to_functional_tensor("
|
| 561 |
+
tensor_str = repr(torch._from_functional_tensor(self))
|
| 562 |
+
else:
|
| 563 |
+
# Circular import problem, so we import it here
|
| 564 |
+
from torch._subclasses.fake_tensor import FakeTensor
|
| 565 |
+
|
| 566 |
+
if self.is_meta or isinstance(self, FakeTensor):
|
| 567 |
+
suffixes.append("size=" + str(tuple(self.shape)))
|
| 568 |
+
if self.dtype != torch.get_default_dtype():
|
| 569 |
+
suffixes.append("dtype=" + str(self.dtype))
|
| 570 |
+
# TODO: This implies that ellipses is valid syntax for allocating
|
| 571 |
+
# a meta tensor or FakeTensor, which it could be, but it isn't right now
|
| 572 |
+
if not custom_contents_provided:
|
| 573 |
+
tensor_str = "..."
|
| 574 |
+
else:
|
| 575 |
+
if self.numel() == 0 and not self.is_sparse:
|
| 576 |
+
# Explicitly print the shape if it is not (0,), to match NumPy behavior
|
| 577 |
+
if self.dim() != 1:
|
| 578 |
+
suffixes.append("size=" + str(tuple(self.shape)))
|
| 579 |
+
|
| 580 |
+
# In an empty tensor, there are no elements to infer if the dtype
|
| 581 |
+
# should be int64, so it must be shown explicitly.
|
| 582 |
+
if self.dtype != torch.get_default_dtype():
|
| 583 |
+
suffixes.append("dtype=" + str(self.dtype))
|
| 584 |
+
if not custom_contents_provided:
|
| 585 |
+
tensor_str = "[]"
|
| 586 |
+
else:
|
| 587 |
+
if not PRINT_OPTS.edgeitems:
|
| 588 |
+
suffixes.append("size=" + str(tuple(self.shape)))
|
| 589 |
+
|
| 590 |
+
if not has_default_dtype:
|
| 591 |
+
suffixes.append("dtype=" + str(self.dtype))
|
| 592 |
+
|
| 593 |
+
if not custom_contents_provided:
|
| 594 |
+
if self.layout != torch.strided:
|
| 595 |
+
tensor_str = _tensor_str(self.to_dense(), indent)
|
| 596 |
+
else:
|
| 597 |
+
tensor_str = _tensor_str(self, indent)
|
| 598 |
+
|
| 599 |
+
if self.layout != torch.strided:
|
| 600 |
+
suffixes.append("layout=" + str(self.layout))
|
| 601 |
+
|
| 602 |
+
# Use inp here to get the original grad_fn and not the one generated by the forward grad
|
| 603 |
+
# unpacking.
|
| 604 |
+
grad_fn_name = None
|
| 605 |
+
try:
|
| 606 |
+
grad_fn = inp.grad_fn
|
| 607 |
+
except RuntimeError:
|
| 608 |
+
# Accessing the grad_fn calls rebasing logic which would cause an error
|
| 609 |
+
# if that tensor is a view created in no-grad mode modified in-place in
|
| 610 |
+
# no-grad mode. See: https://github.com/pytorch/pytorch/issues/99968
|
| 611 |
+
grad_fn_name = "Invalid"
|
| 612 |
+
|
| 613 |
+
if grad_fn_name is None and grad_fn is not None:
|
| 614 |
+
grad_fn_name = type(grad_fn).__name__
|
| 615 |
+
if grad_fn_name == "CppFunction":
|
| 616 |
+
grad_fn_name = grad_fn.name().rsplit("::", 1)[-1]
|
| 617 |
+
|
| 618 |
+
if grad_fn_name is not None:
|
| 619 |
+
suffixes.append(f"grad_fn=<{grad_fn_name}>")
|
| 620 |
+
elif inp.requires_grad:
|
| 621 |
+
suffixes.append("requires_grad=True")
|
| 622 |
+
|
| 623 |
+
if self.has_names():
|
| 624 |
+
suffixes.append(f"names={self.names}")
|
| 625 |
+
|
| 626 |
+
if tangent is not None:
|
| 627 |
+
suffixes.append(f"tangent={tangent}")
|
| 628 |
+
|
| 629 |
+
string_repr = _add_suffixes(
|
| 630 |
+
prefix + tensor_str, suffixes, indent, force_newline=self.is_sparse
|
| 631 |
+
)
|
| 632 |
+
|
| 633 |
+
# Check if this instance is flagged as a parameter and change the repr accordingly.
|
| 634 |
+
# Unfortunately, this function has to be aware of this detail.
|
| 635 |
+
# NB: This is currently skipped for plain tensor parameters to maintain BC. In the future,
|
| 636 |
+
# this should be done for those as well to produce a valid repr.
|
| 637 |
+
if isinstance(self, torch.nn.Parameter) and not is_plain_tensor:
|
| 638 |
+
string_repr = f"Parameter({string_repr})"
|
| 639 |
+
|
| 640 |
+
return string_repr
|
| 641 |
+
|
| 642 |
+
|
| 643 |
+
def _functorch_wrapper_str_intern(tensor, *, tensor_contents=None):
|
| 644 |
+
level = torch._C._functorch.maybe_get_level(tensor)
|
| 645 |
+
assert level != -1
|
| 646 |
+
|
| 647 |
+
if torch._C._functorch.is_functionaltensor(tensor):
|
| 648 |
+
# Since we're unwrapping the FunctionalTensorWrapper, we need to make sure
|
| 649 |
+
# that it's up to date first
|
| 650 |
+
torch._sync(tensor)
|
| 651 |
+
|
| 652 |
+
value = torch._C._functorch.get_unwrapped(tensor)
|
| 653 |
+
value_repr = repr(value)
|
| 654 |
+
|
| 655 |
+
indented_value_repr = textwrap.indent(value_repr, " " * 4)
|
| 656 |
+
if torch._C._functorch.is_batchedtensor(tensor):
|
| 657 |
+
bdim = torch._C._functorch.maybe_get_bdim(tensor)
|
| 658 |
+
assert bdim != -1
|
| 659 |
+
return (
|
| 660 |
+
f"BatchedTensor(lvl={level}, bdim={bdim}, value=\n"
|
| 661 |
+
f"{indented_value_repr}\n"
|
| 662 |
+
f")"
|
| 663 |
+
)
|
| 664 |
+
if torch._C._functorch.is_gradtrackingtensor(tensor):
|
| 665 |
+
return (
|
| 666 |
+
f"GradTrackingTensor(lvl={level}, value=\n" f"{indented_value_repr}\n" f")"
|
| 667 |
+
)
|
| 668 |
+
if torch._C._functorch.is_functionaltensor(tensor):
|
| 669 |
+
return f"FunctionalTensor(lvl={level}, value=\\\n{value_repr})"
|
| 670 |
+
|
| 671 |
+
raise ValueError("We don't know how to print this, please file us an issue")
|
| 672 |
+
|
| 673 |
+
|
| 674 |
+
def _str(self, *, tensor_contents=None):
|
| 675 |
+
with torch.no_grad(), torch.utils._python_dispatch._disable_current_modes():
|
| 676 |
+
guard = torch._C._DisableFuncTorch()
|
| 677 |
+
return _str_intern(self, tensor_contents=tensor_contents)
|
evalkit_internvl/lib/python3.10/site-packages/torch/_torch_docs.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
evalkit_internvl/lib/python3.10/site-packages/torch/functional.py
ADDED
|
@@ -0,0 +1,1978 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import (
|
| 2 |
+
List, Tuple, Optional, Union, Any, Sequence, TYPE_CHECKING
|
| 3 |
+
)
|
| 4 |
+
import operator
|
| 5 |
+
import itertools
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
from torch._C import _add_docstr
|
| 9 |
+
import torch.nn.functional as F
|
| 10 |
+
from ._lowrank import svd_lowrank, pca_lowrank
|
| 11 |
+
from .overrides import (
|
| 12 |
+
has_torch_function, has_torch_function_unary, has_torch_function_variadic,
|
| 13 |
+
handle_torch_function)
|
| 14 |
+
from ._jit_internal import boolean_dispatch
|
| 15 |
+
from ._jit_internal import _overload as overload
|
| 16 |
+
|
| 17 |
+
Tensor = torch.Tensor
|
| 18 |
+
from torch import _VF
|
| 19 |
+
|
| 20 |
+
__all__ = [
|
| 21 |
+
'atleast_1d',
|
| 22 |
+
'atleast_2d',
|
| 23 |
+
'atleast_3d',
|
| 24 |
+
'align_tensors',
|
| 25 |
+
'broadcast_shapes',
|
| 26 |
+
'broadcast_tensors',
|
| 27 |
+
'cartesian_prod',
|
| 28 |
+
'block_diag',
|
| 29 |
+
'cdist',
|
| 30 |
+
'chain_matmul',
|
| 31 |
+
'einsum',
|
| 32 |
+
'istft',
|
| 33 |
+
'lu',
|
| 34 |
+
'norm',
|
| 35 |
+
'meshgrid',
|
| 36 |
+
'pca_lowrank',
|
| 37 |
+
'split',
|
| 38 |
+
'stft',
|
| 39 |
+
'svd_lowrank',
|
| 40 |
+
'tensordot',
|
| 41 |
+
'unique',
|
| 42 |
+
'unique_consecutive',
|
| 43 |
+
'unravel_index',
|
| 44 |
+
]
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def broadcast_tensors(*tensors):
|
| 48 |
+
r"""broadcast_tensors(*tensors) -> List of Tensors
|
| 49 |
+
|
| 50 |
+
Broadcasts the given tensors according to :ref:`broadcasting-semantics`.
|
| 51 |
+
|
| 52 |
+
Args:
|
| 53 |
+
*tensors: any number of tensors of the same type
|
| 54 |
+
|
| 55 |
+
.. warning::
|
| 56 |
+
|
| 57 |
+
More than one element of a broadcasted tensor may refer to a single
|
| 58 |
+
memory location. As a result, in-place operations (especially ones that
|
| 59 |
+
are vectorized) may result in incorrect behavior. If you need to write
|
| 60 |
+
to the tensors, please clone them first.
|
| 61 |
+
|
| 62 |
+
Example::
|
| 63 |
+
|
| 64 |
+
>>> x = torch.arange(3).view(1, 3)
|
| 65 |
+
>>> y = torch.arange(2).view(2, 1)
|
| 66 |
+
>>> a, b = torch.broadcast_tensors(x, y)
|
| 67 |
+
>>> a.size()
|
| 68 |
+
torch.Size([2, 3])
|
| 69 |
+
>>> a
|
| 70 |
+
tensor([[0, 1, 2],
|
| 71 |
+
[0, 1, 2]])
|
| 72 |
+
"""
|
| 73 |
+
# This wrapper exists to support variadic args.
|
| 74 |
+
if has_torch_function(tensors):
|
| 75 |
+
return handle_torch_function(broadcast_tensors, tensors, *tensors)
|
| 76 |
+
return _VF.broadcast_tensors(tensors) # type: ignore[attr-defined]
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def broadcast_shapes(*shapes):
|
| 80 |
+
r"""broadcast_shapes(*shapes) -> Size
|
| 81 |
+
|
| 82 |
+
Similar to :func:`broadcast_tensors` but for shapes.
|
| 83 |
+
|
| 84 |
+
This is equivalent to
|
| 85 |
+
``torch.broadcast_tensors(*map(torch.empty, shapes))[0].shape``
|
| 86 |
+
but avoids the need create to intermediate tensors. This is useful for
|
| 87 |
+
broadcasting tensors of common batch shape but different rightmost shape,
|
| 88 |
+
e.g. to broadcast mean vectors with covariance matrices.
|
| 89 |
+
|
| 90 |
+
Example::
|
| 91 |
+
|
| 92 |
+
>>> torch.broadcast_shapes((2,), (3, 1), (1, 1, 1))
|
| 93 |
+
torch.Size([1, 3, 2])
|
| 94 |
+
|
| 95 |
+
Args:
|
| 96 |
+
\*shapes (torch.Size): Shapes of tensors.
|
| 97 |
+
|
| 98 |
+
Returns:
|
| 99 |
+
shape (torch.Size): A shape compatible with all input shapes.
|
| 100 |
+
|
| 101 |
+
Raises:
|
| 102 |
+
RuntimeError: If shapes are incompatible.
|
| 103 |
+
"""
|
| 104 |
+
# This wrapper exists to support variadic args.
|
| 105 |
+
# TODO Move this to C++ once the jit has better support for torch.Size.
|
| 106 |
+
if not torch.jit.is_tracing():
|
| 107 |
+
max_len = 0
|
| 108 |
+
for shape in shapes:
|
| 109 |
+
if isinstance(shape, (int, torch.SymInt)):
|
| 110 |
+
if max_len < 1:
|
| 111 |
+
max_len = 1
|
| 112 |
+
elif isinstance(shape, (tuple, list)):
|
| 113 |
+
s = len(shape)
|
| 114 |
+
if max_len < s:
|
| 115 |
+
max_len = s
|
| 116 |
+
result = [1] * max_len
|
| 117 |
+
for shape in shapes:
|
| 118 |
+
if isinstance(shape, (int, torch.SymInt)):
|
| 119 |
+
shape = (shape,)
|
| 120 |
+
if isinstance(shape, (tuple, list)):
|
| 121 |
+
for i in range(-1, -1 - len(shape), -1):
|
| 122 |
+
if shape[i] < 0:
|
| 123 |
+
raise RuntimeError(f"Trying to create tensor with negative dimension ({shape[i]}): ({shape[i]})")
|
| 124 |
+
if shape[i] == 1 or shape[i] == result[i]:
|
| 125 |
+
continue
|
| 126 |
+
if result[i] != 1:
|
| 127 |
+
raise RuntimeError("Shape mismatch: objects cannot be broadcast to a single shape")
|
| 128 |
+
result[i] = shape[i]
|
| 129 |
+
else:
|
| 130 |
+
raise RuntimeError("Input shapes should be of type ints, a tuple of ints, or a list of ints, got ", shape)
|
| 131 |
+
return torch.Size(result)
|
| 132 |
+
else:
|
| 133 |
+
# with implementation above, torch.jit.trace hardcodes the sizes which makes subsequent replays fail
|
| 134 |
+
with torch.no_grad():
|
| 135 |
+
scalar = torch.zeros((), device="cpu")
|
| 136 |
+
tensors = [scalar.expand(shape) for shape in shapes]
|
| 137 |
+
tensors = broadcast_tensors(*tensors)
|
| 138 |
+
return tensors[0].shape
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
def split(
|
| 142 |
+
tensor: Tensor, split_size_or_sections: Union[int, List[int]], dim: int = 0
|
| 143 |
+
) -> Tuple[Tensor, ...]:
|
| 144 |
+
r"""Splits the tensor into chunks. Each chunk is a view of the original tensor.
|
| 145 |
+
|
| 146 |
+
If :attr:`split_size_or_sections` is an integer type, then :attr:`tensor` will
|
| 147 |
+
be split into equally sized chunks (if possible). Last chunk will be smaller if
|
| 148 |
+
the tensor size along the given dimension :attr:`dim` is not divisible by
|
| 149 |
+
:attr:`split_size`.
|
| 150 |
+
|
| 151 |
+
If :attr:`split_size_or_sections` is a list, then :attr:`tensor` will be split
|
| 152 |
+
into ``len(split_size_or_sections)`` chunks with sizes in :attr:`dim` according
|
| 153 |
+
to :attr:`split_size_or_sections`.
|
| 154 |
+
|
| 155 |
+
Args:
|
| 156 |
+
tensor (Tensor): tensor to split.
|
| 157 |
+
split_size_or_sections (int) or (list(int)): size of a single chunk or
|
| 158 |
+
list of sizes for each chunk
|
| 159 |
+
dim (int): dimension along which to split the tensor.
|
| 160 |
+
|
| 161 |
+
Example::
|
| 162 |
+
|
| 163 |
+
>>> a = torch.arange(10).reshape(5, 2)
|
| 164 |
+
>>> a
|
| 165 |
+
tensor([[0, 1],
|
| 166 |
+
[2, 3],
|
| 167 |
+
[4, 5],
|
| 168 |
+
[6, 7],
|
| 169 |
+
[8, 9]])
|
| 170 |
+
>>> torch.split(a, 2)
|
| 171 |
+
(tensor([[0, 1],
|
| 172 |
+
[2, 3]]),
|
| 173 |
+
tensor([[4, 5],
|
| 174 |
+
[6, 7]]),
|
| 175 |
+
tensor([[8, 9]]))
|
| 176 |
+
>>> torch.split(a, [1, 4])
|
| 177 |
+
(tensor([[0, 1]]),
|
| 178 |
+
tensor([[2, 3],
|
| 179 |
+
[4, 5],
|
| 180 |
+
[6, 7],
|
| 181 |
+
[8, 9]]))
|
| 182 |
+
"""
|
| 183 |
+
if has_torch_function_unary(tensor):
|
| 184 |
+
return handle_torch_function(
|
| 185 |
+
split, (tensor,), tensor, split_size_or_sections, dim=dim)
|
| 186 |
+
# Overwriting reason:
|
| 187 |
+
# This dispatches to two ATen functions depending on the type of
|
| 188 |
+
# split_size_or_sections. The branching code is in _tensor.py, which we
|
| 189 |
+
# call here.
|
| 190 |
+
return tensor.split(split_size_or_sections, dim)
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
def einsum(*args: Any) -> Tensor:
|
| 194 |
+
r"""einsum(equation, *operands) -> Tensor
|
| 195 |
+
|
| 196 |
+
Sums the product of the elements of the input :attr:`operands` along dimensions specified using a notation
|
| 197 |
+
based on the Einstein summation convention.
|
| 198 |
+
|
| 199 |
+
Einsum allows computing many common multi-dimensional linear algebraic array operations by representing them
|
| 200 |
+
in a short-hand format based on the Einstein summation convention, given by :attr:`equation`. The details of
|
| 201 |
+
this format are described below, but the general idea is to label every dimension of the input :attr:`operands`
|
| 202 |
+
with some subscript and define which subscripts are part of the output. The output is then computed by summing
|
| 203 |
+
the product of the elements of the :attr:`operands` along the dimensions whose subscripts are not part of the
|
| 204 |
+
output. For example, matrix multiplication can be computed using einsum as `torch.einsum("ij,jk->ik", A, B)`.
|
| 205 |
+
Here, j is the summation subscript and i and k the output subscripts (see section below for more details on why).
|
| 206 |
+
|
| 207 |
+
Equation:
|
| 208 |
+
|
| 209 |
+
The :attr:`equation` string specifies the subscripts (letters in `[a-zA-Z]`) for each dimension of
|
| 210 |
+
the input :attr:`operands` in the same order as the dimensions, separating subscripts for each operand by a
|
| 211 |
+
comma (','), e.g. `'ij,jk'` specify subscripts for two 2D operands. The dimensions labeled with the same subscript
|
| 212 |
+
must be broadcastable, that is, their size must either match or be `1`. The exception is if a subscript is
|
| 213 |
+
repeated for the same input operand, in which case the dimensions labeled with this subscript for this operand
|
| 214 |
+
must match in size and the operand will be replaced by its diagonal along these dimensions. The subscripts that
|
| 215 |
+
appear exactly once in the :attr:`equation` will be part of the output, sorted in increasing alphabetical order.
|
| 216 |
+
The output is computed by multiplying the input :attr:`operands` element-wise, with their dimensions aligned based
|
| 217 |
+
on the subscripts, and then summing out the dimensions whose subscripts are not part of the output.
|
| 218 |
+
|
| 219 |
+
Optionally, the output subscripts can be explicitly defined by adding an arrow ('->') at the end of the equation
|
| 220 |
+
followed by the subscripts for the output. For instance, the following equation computes the transpose of a
|
| 221 |
+
matrix multiplication: 'ij,jk->ki'. The output subscripts must appear at least once for some input operand and
|
| 222 |
+
at most once for the output.
|
| 223 |
+
|
| 224 |
+
Ellipsis ('...') can be used in place of subscripts to broadcast the dimensions covered by the ellipsis.
|
| 225 |
+
Each input operand may contain at most one ellipsis which will cover the dimensions not covered by subscripts,
|
| 226 |
+
e.g. for an input operand with 5 dimensions, the ellipsis in the equation `'ab...c'` cover the third and fourth
|
| 227 |
+
dimensions. The ellipsis does not need to cover the same number of dimensions across the :attr:`operands` but the
|
| 228 |
+
'shape' of the ellipsis (the size of the dimensions covered by them) must broadcast together. If the output is not
|
| 229 |
+
explicitly defined with the arrow ('->') notation, the ellipsis will come first in the output (left-most dimensions),
|
| 230 |
+
before the subscript labels that appear exactly once for the input operands. e.g. the following equation implements
|
| 231 |
+
batch matrix multiplication `'...ij,...jk'`.
|
| 232 |
+
|
| 233 |
+
A few final notes: the equation may contain whitespaces between the different elements (subscripts, ellipsis,
|
| 234 |
+
arrow and comma) but something like `'. . .'` is not valid. An empty string `''` is valid for scalar operands.
|
| 235 |
+
|
| 236 |
+
.. note::
|
| 237 |
+
|
| 238 |
+
``torch.einsum`` handles ellipsis ('...') differently from NumPy in that it allows dimensions
|
| 239 |
+
covered by the ellipsis to be summed over, that is, ellipsis are not required to be part of the output.
|
| 240 |
+
|
| 241 |
+
.. note::
|
| 242 |
+
|
| 243 |
+
This function uses opt_einsum (https://optimized-einsum.readthedocs.io/en/stable/) to speed up computation or to
|
| 244 |
+
consume less memory by optimizing contraction order. This optimization occurs when there are at least three
|
| 245 |
+
inputs, since the order does not matter otherwise. Note that finding _the_ optimal path is an NP-hard problem,
|
| 246 |
+
thus, opt_einsum relies on different heuristics to achieve near-optimal results. If opt_einsum is not available,
|
| 247 |
+
the default order is to contract from left to right.
|
| 248 |
+
|
| 249 |
+
To bypass this default behavior, add the following line to disable the usage of opt_einsum and skip path
|
| 250 |
+
calculation: `torch.backends.opt_einsum.enabled = False`
|
| 251 |
+
|
| 252 |
+
To specify which strategy you'd like for opt_einsum to compute the contraction path, add the following line:
|
| 253 |
+
`torch.backends.opt_einsum.strategy = 'auto'`. The default strategy is 'auto', and we also support 'greedy' and
|
| 254 |
+
'optimal'. Disclaimer that the runtime of 'optimal' is factorial in the number of inputs! See more details in
|
| 255 |
+
the opt_einsum documentation (https://optimized-einsum.readthedocs.io/en/stable/path_finding.html).
|
| 256 |
+
|
| 257 |
+
.. note::
|
| 258 |
+
|
| 259 |
+
As of PyTorch 1.10 :func:`torch.einsum` also supports the sublist format (see examples below). In this format,
|
| 260 |
+
subscripts for each operand are specified by sublists, list of integers in the range [0, 52). These sublists
|
| 261 |
+
follow their operands, and an extra sublist can appear at the end of the input to specify the output's
|
| 262 |
+
subscripts., e.g. `torch.einsum(op1, sublist1, op2, sublist2, ..., [subslist_out])`. Python's `Ellipsis` object
|
| 263 |
+
may be provided in a sublist to enable broadcasting as described in the Equation section above.
|
| 264 |
+
|
| 265 |
+
Args:
|
| 266 |
+
equation (str): The subscripts for the Einstein summation.
|
| 267 |
+
operands (List[Tensor]): The tensors to compute the Einstein summation of.
|
| 268 |
+
|
| 269 |
+
Examples::
|
| 270 |
+
|
| 271 |
+
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
|
| 272 |
+
>>> # trace
|
| 273 |
+
>>> torch.einsum('ii', torch.randn(4, 4))
|
| 274 |
+
tensor(-1.2104)
|
| 275 |
+
|
| 276 |
+
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
|
| 277 |
+
>>> # diagonal
|
| 278 |
+
>>> torch.einsum('ii->i', torch.randn(4, 4))
|
| 279 |
+
tensor([-0.1034, 0.7952, -0.2433, 0.4545])
|
| 280 |
+
|
| 281 |
+
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
|
| 282 |
+
>>> # outer product
|
| 283 |
+
>>> x = torch.randn(5)
|
| 284 |
+
>>> y = torch.randn(4)
|
| 285 |
+
>>> torch.einsum('i,j->ij', x, y)
|
| 286 |
+
tensor([[ 0.1156, -0.2897, -0.3918, 0.4963],
|
| 287 |
+
[-0.3744, 0.9381, 1.2685, -1.6070],
|
| 288 |
+
[ 0.7208, -1.8058, -2.4419, 3.0936],
|
| 289 |
+
[ 0.1713, -0.4291, -0.5802, 0.7350],
|
| 290 |
+
[ 0.5704, -1.4290, -1.9323, 2.4480]])
|
| 291 |
+
|
| 292 |
+
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
|
| 293 |
+
>>> # batch matrix multiplication
|
| 294 |
+
>>> As = torch.randn(3, 2, 5)
|
| 295 |
+
>>> Bs = torch.randn(3, 5, 4)
|
| 296 |
+
>>> torch.einsum('bij,bjk->bik', As, Bs)
|
| 297 |
+
tensor([[[-1.0564, -1.5904, 3.2023, 3.1271],
|
| 298 |
+
[-1.6706, -0.8097, -0.8025, -2.1183]],
|
| 299 |
+
|
| 300 |
+
[[ 4.2239, 0.3107, -0.5756, -0.2354],
|
| 301 |
+
[-1.4558, -0.3460, 1.5087, -0.8530]],
|
| 302 |
+
|
| 303 |
+
[[ 2.8153, 1.8787, -4.3839, -1.2112],
|
| 304 |
+
[ 0.3728, -2.1131, 0.0921, 0.8305]]])
|
| 305 |
+
|
| 306 |
+
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
|
| 307 |
+
>>> # with sublist format and ellipsis
|
| 308 |
+
>>> torch.einsum(As, [..., 0, 1], Bs, [..., 1, 2], [..., 0, 2])
|
| 309 |
+
tensor([[[-1.0564, -1.5904, 3.2023, 3.1271],
|
| 310 |
+
[-1.6706, -0.8097, -0.8025, -2.1183]],
|
| 311 |
+
|
| 312 |
+
[[ 4.2239, 0.3107, -0.5756, -0.2354],
|
| 313 |
+
[-1.4558, -0.3460, 1.5087, -0.8530]],
|
| 314 |
+
|
| 315 |
+
[[ 2.8153, 1.8787, -4.3839, -1.2112],
|
| 316 |
+
[ 0.3728, -2.1131, 0.0921, 0.8305]]])
|
| 317 |
+
|
| 318 |
+
>>> # batch permute
|
| 319 |
+
>>> A = torch.randn(2, 3, 4, 5)
|
| 320 |
+
>>> torch.einsum('...ij->...ji', A).shape
|
| 321 |
+
torch.Size([2, 3, 5, 4])
|
| 322 |
+
|
| 323 |
+
>>> # equivalent to torch.nn.functional.bilinear
|
| 324 |
+
>>> A = torch.randn(3, 5, 4)
|
| 325 |
+
>>> l = torch.randn(2, 5)
|
| 326 |
+
>>> r = torch.randn(2, 4)
|
| 327 |
+
>>> torch.einsum('bn,anm,bm->ba', l, A, r)
|
| 328 |
+
tensor([[-0.3430, -5.2405, 0.4494],
|
| 329 |
+
[ 0.3311, 5.5201, -3.0356]])
|
| 330 |
+
"""
|
| 331 |
+
import torch.backends.opt_einsum as opt_einsum
|
| 332 |
+
# This wrapper exists to support variadic args.
|
| 333 |
+
if len(args) < 2:
|
| 334 |
+
raise ValueError('einsum(): must specify the equation string and at least one operand, '
|
| 335 |
+
'or at least one operand and its subscripts list')
|
| 336 |
+
|
| 337 |
+
equation = None
|
| 338 |
+
operands = None
|
| 339 |
+
|
| 340 |
+
if isinstance(args[0], torch.Tensor):
|
| 341 |
+
# Convert the subscript list format which is an interleaving of operand and its subscripts
|
| 342 |
+
# list with an optional output subscripts list at the end (see documentation for more details on this)
|
| 343 |
+
# to the equation string format by creating the equation string from the subscripts list and grouping the
|
| 344 |
+
# input operands into a tensorlist (List[Tensor]).
|
| 345 |
+
def parse_subscript(n: int) -> str:
|
| 346 |
+
if n == Ellipsis:
|
| 347 |
+
return '...'
|
| 348 |
+
if n >= 0 and n < 26:
|
| 349 |
+
return chr(ord('A') + n)
|
| 350 |
+
if n >= 26 and n < 52:
|
| 351 |
+
return chr(ord('a') + n - 26)
|
| 352 |
+
raise ValueError('einsum(): subscript in subscript list is not within the valid range [0, 52)')
|
| 353 |
+
|
| 354 |
+
# Parse subscripts for input operands
|
| 355 |
+
equation = ','.join(''.join(parse_subscript(s) for s in l) for l in args[1::2])
|
| 356 |
+
|
| 357 |
+
# Parse optional output subscripts (provided when the number of arguments is odd)
|
| 358 |
+
if len(args) % 2 == 1:
|
| 359 |
+
equation += '->' + ''.join(parse_subscript(s) for s in args[-1])
|
| 360 |
+
operands = args[:-1:2]
|
| 361 |
+
else:
|
| 362 |
+
operands = args[::2]
|
| 363 |
+
else:
|
| 364 |
+
equation = args[0]
|
| 365 |
+
operands = args[1:]
|
| 366 |
+
|
| 367 |
+
if has_torch_function(operands):
|
| 368 |
+
return handle_torch_function(einsum, operands, equation, *operands)
|
| 369 |
+
|
| 370 |
+
if len(operands) == 1 and isinstance(operands[0], (list, tuple)):
|
| 371 |
+
# the old interface of passing the operands as one list argument
|
| 372 |
+
_operands = operands[0]
|
| 373 |
+
# recurse incase operands contains value that has torch function
|
| 374 |
+
# in the original implementation this line is omitted
|
| 375 |
+
return einsum(equation, *_operands)
|
| 376 |
+
|
| 377 |
+
if len(operands) <= 2 or not opt_einsum.enabled:
|
| 378 |
+
# the path for contracting 0 or 1 time(s) is already optimized
|
| 379 |
+
# or the user has disabled using opt_einsum
|
| 380 |
+
return _VF.einsum(equation, operands) # type: ignore[attr-defined]
|
| 381 |
+
|
| 382 |
+
path = None
|
| 383 |
+
if opt_einsum.is_available():
|
| 384 |
+
_opt_einsum = opt_einsum.get_opt_einsum()
|
| 385 |
+
tupled_path = _opt_einsum.contract_path(equation, *operands, optimize=opt_einsum.strategy)[0]
|
| 386 |
+
# flatten path for dispatching to C++
|
| 387 |
+
path = [item for pair in tupled_path for item in pair]
|
| 388 |
+
return _VF.einsum(equation, operands, path=path) # type: ignore[attr-defined]
|
| 389 |
+
|
| 390 |
+
|
| 391 |
+
# This wrapper exists to support variadic args.
|
| 392 |
+
if TYPE_CHECKING:
|
| 393 |
+
# The JIT doesn't understand Union, so only add type annotation for mypy
|
| 394 |
+
def meshgrid(*tensors: Union[Tensor, List[Tensor]],
|
| 395 |
+
indexing: Optional[str] = None) -> Tuple[Tensor, ...]:
|
| 396 |
+
return _meshgrid(*tensors, indexing=indexing)
|
| 397 |
+
else:
|
| 398 |
+
def meshgrid(*tensors, indexing: Optional[str] = None) -> Tuple[Tensor, ...]:
|
| 399 |
+
r"""Creates grids of coordinates specified by the 1D inputs in `attr`:tensors.
|
| 400 |
+
|
| 401 |
+
This is helpful when you want to visualize data over some
|
| 402 |
+
range of inputs. See below for a plotting example.
|
| 403 |
+
|
| 404 |
+
Given :math:`N` 1D tensors :math:`T_0 \ldots T_{N-1}` as
|
| 405 |
+
inputs with corresponding sizes :math:`S_0 \ldots S_{N-1}`,
|
| 406 |
+
this creates :math:`N` N-dimensional tensors :math:`G_0 \ldots
|
| 407 |
+
G_{N-1}`, each with shape :math:`(S_0, ..., S_{N-1})` where
|
| 408 |
+
the output :math:`G_i` is constructed by expanding :math:`T_i`
|
| 409 |
+
to the result shape.
|
| 410 |
+
|
| 411 |
+
.. note::
|
| 412 |
+
0D inputs are treated equivalently to 1D inputs of a
|
| 413 |
+
single element.
|
| 414 |
+
|
| 415 |
+
.. warning::
|
| 416 |
+
`torch.meshgrid(*tensors)` currently has the same behavior
|
| 417 |
+
as calling `numpy.meshgrid(*arrays, indexing='ij')`.
|
| 418 |
+
|
| 419 |
+
In the future `torch.meshgrid` will transition to
|
| 420 |
+
`indexing='xy'` as the default.
|
| 421 |
+
|
| 422 |
+
https://github.com/pytorch/pytorch/issues/50276 tracks
|
| 423 |
+
this issue with the goal of migrating to NumPy's behavior.
|
| 424 |
+
|
| 425 |
+
.. seealso::
|
| 426 |
+
|
| 427 |
+
:func:`torch.cartesian_prod` has the same effect but it
|
| 428 |
+
collects the data in a tensor of vectors.
|
| 429 |
+
|
| 430 |
+
Args:
|
| 431 |
+
tensors (list of Tensor): list of scalars or 1 dimensional tensors. Scalars will be
|
| 432 |
+
treated as tensors of size :math:`(1,)` automatically
|
| 433 |
+
|
| 434 |
+
indexing: (str, optional): the indexing mode, either "xy"
|
| 435 |
+
or "ij", defaults to "ij". See warning for future changes.
|
| 436 |
+
|
| 437 |
+
If "xy" is selected, the first dimension corresponds
|
| 438 |
+
to the cardinality of the second input and the second
|
| 439 |
+
dimension corresponds to the cardinality of the first
|
| 440 |
+
input.
|
| 441 |
+
|
| 442 |
+
If "ij" is selected, the dimensions are in the same
|
| 443 |
+
order as the cardinality of the inputs.
|
| 444 |
+
|
| 445 |
+
Returns:
|
| 446 |
+
seq (sequence of Tensors): If the input has :math:`N`
|
| 447 |
+
tensors of size :math:`S_0 \ldots S_{N-1}``, then the
|
| 448 |
+
output will also have :math:`N` tensors, where each tensor
|
| 449 |
+
is of shape :math:`(S_0, ..., S_{N-1})`.
|
| 450 |
+
|
| 451 |
+
Example::
|
| 452 |
+
|
| 453 |
+
>>> x = torch.tensor([1, 2, 3])
|
| 454 |
+
>>> y = torch.tensor([4, 5, 6])
|
| 455 |
+
|
| 456 |
+
Observe the element-wise pairings across the grid, (1, 4),
|
| 457 |
+
(1, 5), ..., (3, 6). This is the same thing as the
|
| 458 |
+
cartesian product.
|
| 459 |
+
>>> grid_x, grid_y = torch.meshgrid(x, y, indexing='ij')
|
| 460 |
+
>>> grid_x
|
| 461 |
+
tensor([[1, 1, 1],
|
| 462 |
+
[2, 2, 2],
|
| 463 |
+
[3, 3, 3]])
|
| 464 |
+
>>> grid_y
|
| 465 |
+
tensor([[4, 5, 6],
|
| 466 |
+
[4, 5, 6],
|
| 467 |
+
[4, 5, 6]])
|
| 468 |
+
|
| 469 |
+
This correspondence can be seen when these grids are
|
| 470 |
+
stacked properly.
|
| 471 |
+
>>> torch.equal(torch.cat(tuple(torch.dstack([grid_x, grid_y]))),
|
| 472 |
+
... torch.cartesian_prod(x, y))
|
| 473 |
+
True
|
| 474 |
+
|
| 475 |
+
`torch.meshgrid` is commonly used to produce a grid for
|
| 476 |
+
plotting.
|
| 477 |
+
>>> # xdoctest: +REQUIRES(module:matplotlib)
|
| 478 |
+
>>> # xdoctest: +REQUIRES(env:DOCTEST_SHOW)
|
| 479 |
+
>>> import matplotlib.pyplot as plt
|
| 480 |
+
>>> xs = torch.linspace(-5, 5, steps=100)
|
| 481 |
+
>>> ys = torch.linspace(-5, 5, steps=100)
|
| 482 |
+
>>> x, y = torch.meshgrid(xs, ys, indexing='xy')
|
| 483 |
+
>>> z = torch.sin(torch.sqrt(x * x + y * y))
|
| 484 |
+
>>> ax = plt.axes(projection='3d')
|
| 485 |
+
>>> ax.plot_surface(x.numpy(), y.numpy(), z.numpy())
|
| 486 |
+
>>> plt.show()
|
| 487 |
+
|
| 488 |
+
.. image:: ../_static/img/meshgrid.png
|
| 489 |
+
:width: 512
|
| 490 |
+
|
| 491 |
+
"""
|
| 492 |
+
return _meshgrid(*tensors, indexing=indexing)
|
| 493 |
+
|
| 494 |
+
|
| 495 |
+
def _meshgrid(*tensors, indexing: Optional[str]):
|
| 496 |
+
if has_torch_function(tensors):
|
| 497 |
+
return handle_torch_function(meshgrid, tensors, *tensors, indexing=indexing)
|
| 498 |
+
if len(tensors) == 1 and isinstance(tensors[0], (list, tuple)):
|
| 499 |
+
# the old interface of passing the operands as one list argument
|
| 500 |
+
tensors = tensors[0] # type: ignore[assignment]
|
| 501 |
+
|
| 502 |
+
# Continue allowing call of old method that takes no indexing
|
| 503 |
+
# kwarg for forward compatibility reasons.
|
| 504 |
+
#
|
| 505 |
+
# Remove this two weeks after landing.
|
| 506 |
+
kwargs = {} if indexing is None else {'indexing': indexing}
|
| 507 |
+
return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]
|
| 508 |
+
|
| 509 |
+
|
| 510 |
+
def stft(input: Tensor, n_fft: int, hop_length: Optional[int] = None,
|
| 511 |
+
win_length: Optional[int] = None, window: Optional[Tensor] = None,
|
| 512 |
+
center: bool = True, pad_mode: str = 'reflect', normalized: bool = False,
|
| 513 |
+
onesided: Optional[bool] = None,
|
| 514 |
+
return_complex: Optional[bool] = None) -> Tensor:
|
| 515 |
+
r"""Short-time Fourier transform (STFT).
|
| 516 |
+
|
| 517 |
+
.. warning::
|
| 518 |
+
From version 1.8.0, :attr:`return_complex` must always be given
|
| 519 |
+
explicitly for real inputs and `return_complex=False` has been
|
| 520 |
+
deprecated. Strongly prefer `return_complex=True` as in a future
|
| 521 |
+
pytorch release, this function will only return complex tensors.
|
| 522 |
+
|
| 523 |
+
Note that :func:`torch.view_as_real` can be used to recover a real
|
| 524 |
+
tensor with an extra last dimension for real and imaginary components.
|
| 525 |
+
|
| 526 |
+
.. warning::
|
| 527 |
+
From version 2.1, a warning will be provided if a :attr:`window` is
|
| 528 |
+
not specified. In a future release, this attribute will be required.
|
| 529 |
+
Not providing a window currently defaults to using a rectangular window,
|
| 530 |
+
which may result in undesirable artifacts. Consider using tapered windows,
|
| 531 |
+
such as :func:`torch.hann_window`.
|
| 532 |
+
|
| 533 |
+
The STFT computes the Fourier transform of short overlapping windows of the
|
| 534 |
+
input. This giving frequency components of the signal as they change over
|
| 535 |
+
time. The interface of this function is modeled after (but *not* a drop-in
|
| 536 |
+
replacement for) librosa_ stft function.
|
| 537 |
+
|
| 538 |
+
.. _librosa: https://librosa.org/doc/latest/generated/librosa.stft.html
|
| 539 |
+
|
| 540 |
+
Ignoring the optional batch dimension, this method computes the following
|
| 541 |
+
expression:
|
| 542 |
+
|
| 543 |
+
.. math::
|
| 544 |
+
X[\omega, m] = \sum_{k = 0}^{\text{win\_length-1}}%
|
| 545 |
+
\text{window}[k]\ \text{input}[m \times \text{hop\_length} + k]\ %
|
| 546 |
+
\exp\left(- j \frac{2 \pi \cdot \omega k}{\text{n\_fft}}\right),
|
| 547 |
+
|
| 548 |
+
where :math:`m` is the index of the sliding window, and :math:`\omega` is
|
| 549 |
+
the frequency :math:`0 \leq \omega < \text{n\_fft}` for ``onesided=False``,
|
| 550 |
+
or :math:`0 \leq \omega < \lfloor \text{n\_fft} / 2 \rfloor + 1` for ``onesided=True``.
|
| 551 |
+
|
| 552 |
+
* :attr:`input` must be either a 1-D time sequence or a 2-D batch of time
|
| 553 |
+
sequences.
|
| 554 |
+
|
| 555 |
+
* If :attr:`hop_length` is ``None`` (default), it is treated as equal to
|
| 556 |
+
``floor(n_fft / 4)``.
|
| 557 |
+
|
| 558 |
+
* If :attr:`win_length` is ``None`` (default), it is treated as equal to
|
| 559 |
+
:attr:`n_fft`.
|
| 560 |
+
|
| 561 |
+
* :attr:`window` can be a 1-D tensor of size :attr:`win_length`, e.g., from
|
| 562 |
+
:meth:`torch.hann_window`. If :attr:`window` is ``None`` (default), it is
|
| 563 |
+
treated as if having :math:`1` everywhere in the window. If
|
| 564 |
+
:math:`\text{win\_length} < \text{n\_fft}`, :attr:`window` will be padded on
|
| 565 |
+
both sides to length :attr:`n_fft` before being applied.
|
| 566 |
+
|
| 567 |
+
* If :attr:`center` is ``True`` (default), :attr:`input` will be padded on
|
| 568 |
+
both sides so that the :math:`t`-th frame is centered at time
|
| 569 |
+
:math:`t \times \text{hop\_length}`. Otherwise, the :math:`t`-th frame
|
| 570 |
+
begins at time :math:`t \times \text{hop\_length}`.
|
| 571 |
+
|
| 572 |
+
* :attr:`pad_mode` determines the padding method used on :attr:`input` when
|
| 573 |
+
:attr:`center` is ``True``. See :meth:`torch.nn.functional.pad` for
|
| 574 |
+
all available options. Default is ``"reflect"``.
|
| 575 |
+
|
| 576 |
+
* If :attr:`onesided` is ``True`` (default for real input), only values for
|
| 577 |
+
:math:`\omega` in :math:`\left[0, 1, 2, \dots, \left\lfloor
|
| 578 |
+
\frac{\text{n\_fft}}{2} \right\rfloor + 1\right]` are returned because
|
| 579 |
+
the real-to-complex Fourier transform satisfies the conjugate symmetry,
|
| 580 |
+
i.e., :math:`X[m, \omega] = X[m, \text{n\_fft} - \omega]^*`.
|
| 581 |
+
Note if the input or window tensors are complex, then :attr:`onesided`
|
| 582 |
+
output is not possible.
|
| 583 |
+
|
| 584 |
+
* If :attr:`normalized` is ``True`` (default is ``False``), the function
|
| 585 |
+
returns the normalized STFT results, i.e., multiplied by :math:`(\text{frame\_length})^{-0.5}`.
|
| 586 |
+
|
| 587 |
+
* If :attr:`return_complex` is ``True`` (default if input is complex), the
|
| 588 |
+
return is a ``input.dim() + 1`` dimensional complex tensor. If ``False``,
|
| 589 |
+
the output is a ``input.dim() + 2`` dimensional real tensor where the last
|
| 590 |
+
dimension represents the real and imaginary components.
|
| 591 |
+
|
| 592 |
+
Returns either a complex tensor of size :math:`(* \times N \times T)` if
|
| 593 |
+
:attr:`return_complex` is true, or a real tensor of size :math:`(* \times N
|
| 594 |
+
\times T \times 2)`. Where :math:`*` is the optional batch size of
|
| 595 |
+
:attr:`input`, :math:`N` is the number of frequencies where STFT is applied
|
| 596 |
+
and :math:`T` is the total number of frames used.
|
| 597 |
+
|
| 598 |
+
.. warning::
|
| 599 |
+
This function changed signature at version 0.4.1. Calling with the
|
| 600 |
+
previous signature may cause error or return incorrect result.
|
| 601 |
+
|
| 602 |
+
Args:
|
| 603 |
+
input (Tensor): the input tensor of shape `(B?, L)` where `B?` is an optional
|
| 604 |
+
batch dimension
|
| 605 |
+
n_fft (int): size of Fourier transform
|
| 606 |
+
hop_length (int, optional): the distance between neighboring sliding window
|
| 607 |
+
frames. Default: ``None`` (treated as equal to ``floor(n_fft / 4)``)
|
| 608 |
+
win_length (int, optional): the size of window frame and STFT filter.
|
| 609 |
+
Default: ``None`` (treated as equal to :attr:`n_fft`)
|
| 610 |
+
window (Tensor, optional): the optional window function.
|
| 611 |
+
Shape must be 1d and `<= n_fft`
|
| 612 |
+
Default: ``None`` (treated as window of all :math:`1` s)
|
| 613 |
+
center (bool, optional): whether to pad :attr:`input` on both sides so
|
| 614 |
+
that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`.
|
| 615 |
+
Default: ``True``
|
| 616 |
+
pad_mode (str, optional): controls the padding method used when
|
| 617 |
+
:attr:`center` is ``True``. Default: ``"reflect"``
|
| 618 |
+
normalized (bool, optional): controls whether to return the normalized STFT results
|
| 619 |
+
Default: ``False``
|
| 620 |
+
onesided (bool, optional): controls whether to return half of results to
|
| 621 |
+
avoid redundancy for real inputs.
|
| 622 |
+
Default: ``True`` for real :attr:`input` and :attr:`window`, ``False`` otherwise.
|
| 623 |
+
return_complex (bool, optional): whether to return a complex tensor, or
|
| 624 |
+
a real tensor with an extra last dimension for the real and
|
| 625 |
+
imaginary components.
|
| 626 |
+
|
| 627 |
+
.. versionchanged:: 2.0
|
| 628 |
+
``return_complex`` is now a required argument for real inputs,
|
| 629 |
+
as the default is being transitioned to ``True``.
|
| 630 |
+
|
| 631 |
+
.. deprecated:: 2.0
|
| 632 |
+
``return_complex=False`` is deprecated, instead use ``return_complex=True``
|
| 633 |
+
Note that calling :func:`torch.view_as_real` on the output will
|
| 634 |
+
recover the deprecated output format.
|
| 635 |
+
|
| 636 |
+
Returns:
|
| 637 |
+
Tensor: A tensor containing the STFT result with shape `(B?, N, T, C?)` where
|
| 638 |
+
- `B?` is an optional batch dimnsion from the input
|
| 639 |
+
- `N` is the number of frequency samples, `(n_fft // 2) + 1` for
|
| 640 |
+
`onesided=True`, or otherwise `n_fft`.
|
| 641 |
+
- `T` is the number of frames, `1 + L // hop_length`
|
| 642 |
+
for `center=True`, or `1 + (L - n_fft) // hop_length` otherwise.
|
| 643 |
+
- `C?` is an optional length-2 dimension of real and imaginary
|
| 644 |
+
components, present when `return_complex=False`.
|
| 645 |
+
|
| 646 |
+
"""
|
| 647 |
+
if has_torch_function_unary(input):
|
| 648 |
+
return handle_torch_function(
|
| 649 |
+
stft, (input,), input, n_fft, hop_length=hop_length, win_length=win_length,
|
| 650 |
+
window=window, center=center, pad_mode=pad_mode, normalized=normalized,
|
| 651 |
+
onesided=onesided, return_complex=return_complex)
|
| 652 |
+
# NOTE: Do not edit. This code will be removed once the forward-compatibility
|
| 653 |
+
# period is over for PR #73432
|
| 654 |
+
if center:
|
| 655 |
+
signal_dim = input.dim()
|
| 656 |
+
extended_shape = [1] * (3 - signal_dim) + list(input.size())
|
| 657 |
+
pad = int(n_fft // 2)
|
| 658 |
+
input = F.pad(input.view(extended_shape), [pad, pad], pad_mode)
|
| 659 |
+
input = input.view(input.shape[-signal_dim:])
|
| 660 |
+
return _VF.stft(input, n_fft, hop_length, win_length, window, # type: ignore[attr-defined]
|
| 661 |
+
normalized, onesided, return_complex)
|
| 662 |
+
|
| 663 |
+
|
| 664 |
+
istft = _add_docstr(
|
| 665 |
+
torch.istft,
|
| 666 |
+
"istft(input, n_fft, hop_length=None, win_length=None, window=None, center=True, "
|
| 667 |
+
"normalized=False, onesided=None, length=None, return_complex=False) -> Tensor:\n"
|
| 668 |
+
r"""
|
| 669 |
+
Inverse short time Fourier Transform. This is expected to be the inverse of :func:`~torch.stft`.
|
| 670 |
+
|
| 671 |
+
.. warning::
|
| 672 |
+
From version 2.1, a warning will be provided if a :attr:`window` is
|
| 673 |
+
not specified. In a future release, this attribute will be required.
|
| 674 |
+
Please provide the same window used in the stft call.
|
| 675 |
+
|
| 676 |
+
It has the same parameters (+ additional optional parameter of :attr:`length`) and it should return the
|
| 677 |
+
least squares estimation of the original signal. The algorithm will check using the NOLA condition (
|
| 678 |
+
nonzero overlap).
|
| 679 |
+
|
| 680 |
+
Important consideration in the parameters :attr:`window` and :attr:`center` so that the envelop
|
| 681 |
+
created by the summation of all the windows is never zero at certain point in time. Specifically,
|
| 682 |
+
:math:`\sum_{t=-\infty}^{\infty} |w|^2[n-t\times hop\_length] \cancel{=} 0`.
|
| 683 |
+
|
| 684 |
+
Since :func:`~torch.stft` discards elements at the end of the signal if they do not fit in a frame,
|
| 685 |
+
``istft`` may return a shorter signal than the original signal (can occur if :attr:`center` is False
|
| 686 |
+
since the signal isn't padded). If `length` is given in the arguments and is longer than expected,
|
| 687 |
+
``istft`` will pad zeros to the end of the returned signal.
|
| 688 |
+
|
| 689 |
+
If :attr:`center` is ``True``, then there will be padding e.g. ``'constant'``, ``'reflect'``, etc.
|
| 690 |
+
Left padding can be trimmed off exactly because they can be calculated but right padding cannot be
|
| 691 |
+
calculated without additional information.
|
| 692 |
+
|
| 693 |
+
Example: Suppose the last window is:
|
| 694 |
+
``[17, 18, 0, 0, 0]`` vs ``[18, 0, 0, 0, 0]``
|
| 695 |
+
|
| 696 |
+
The :attr:`n_fft`, :attr:`hop_length`, :attr:`win_length` are all the same which prevents the calculation
|
| 697 |
+
of right padding. These additional values could be zeros or a reflection of the signal so providing
|
| 698 |
+
:attr:`length` could be useful. If :attr:`length` is ``None`` then padding will be aggressively removed
|
| 699 |
+
(some loss of signal).
|
| 700 |
+
|
| 701 |
+
[1] D. W. Griffin and J. S. Lim, "Signal estimation from modified short-time Fourier transform,"
|
| 702 |
+
IEEE Trans. ASSP, vol.32, no.2, pp.236-243, Apr. 1984.
|
| 703 |
+
|
| 704 |
+
Args:
|
| 705 |
+
input (Tensor): The input tensor. Expected to be in the format of :func:`~torch.stft`,
|
| 706 |
+
output. That is a complex tensor of shape `(B?, N, T)` where
|
| 707 |
+
|
| 708 |
+
- `B?` is an optional batch dimension
|
| 709 |
+
- `N` is the number of frequency samples, `(n_fft // 2) + 1`
|
| 710 |
+
for onesided input, or otherwise `n_fft`.
|
| 711 |
+
- `T` is the number of frames, `1 + length // hop_length` for centered stft,
|
| 712 |
+
or `1 + (length - n_fft) // hop_length` otherwise.
|
| 713 |
+
|
| 714 |
+
.. versionchanged:: 2.0
|
| 715 |
+
Real datatype inputs are no longer supported. Input must now have a
|
| 716 |
+
complex datatype, as returned by ``stft(..., return_complex=True)``.
|
| 717 |
+
n_fft (int): Size of Fourier transform
|
| 718 |
+
hop_length (Optional[int]): The distance between neighboring sliding window frames.
|
| 719 |
+
(Default: ``n_fft // 4``)
|
| 720 |
+
win_length (Optional[int]): The size of window frame and STFT filter. (Default: ``n_fft``)
|
| 721 |
+
window (Optional[torch.Tensor]): The optional window function.
|
| 722 |
+
Shape must be 1d and `<= n_fft`
|
| 723 |
+
(Default: ``torch.ones(win_length)``)
|
| 724 |
+
center (bool): Whether :attr:`input` was padded on both sides so that the :math:`t`-th frame is
|
| 725 |
+
centered at time :math:`t \times \text{hop\_length}`.
|
| 726 |
+
(Default: ``True``)
|
| 727 |
+
normalized (bool): Whether the STFT was normalized. (Default: ``False``)
|
| 728 |
+
onesided (Optional[bool]): Whether the STFT was onesided.
|
| 729 |
+
(Default: ``True`` if `n_fft != fft_size` in the input size)
|
| 730 |
+
length (Optional[int]): The amount to trim the signal by (i.e. the
|
| 731 |
+
original signal length). Defaults to `(T - 1) * hop_length` for
|
| 732 |
+
centered stft, or `n_fft + (T - 1) * hop_length` otherwise, where `T`
|
| 733 |
+
is the number of input frames.
|
| 734 |
+
return_complex (Optional[bool]):
|
| 735 |
+
Whether the output should be complex, or if the input should be
|
| 736 |
+
assumed to derive from a real signal and window.
|
| 737 |
+
Note that this is incompatible with ``onesided=True``.
|
| 738 |
+
(Default: ``False``)
|
| 739 |
+
|
| 740 |
+
Returns:
|
| 741 |
+
Tensor: Least squares estimation of the original signal of shape `(B?, length)` where
|
| 742 |
+
`B?` is an optional batch dimension from the input tensor.
|
| 743 |
+
""")
|
| 744 |
+
|
| 745 |
+
|
| 746 |
+
if TYPE_CHECKING:
|
| 747 |
+
# These _impl functions return a variable number of tensors as output with
|
| 748 |
+
# __torch_function__; tuple unpacking is done already rather than being
|
| 749 |
+
# done by the caller of the _impl function
|
| 750 |
+
_unique_impl_out = Any
|
| 751 |
+
else:
|
| 752 |
+
_unique_impl_out = Tuple[Tensor, Tensor, Tensor]
|
| 753 |
+
|
| 754 |
+
|
| 755 |
+
def _unique_impl(input: Tensor, sorted: bool = True,
|
| 756 |
+
return_inverse: bool = False, return_counts: bool = False,
|
| 757 |
+
dim: Optional[int] = None) -> _unique_impl_out:
|
| 758 |
+
r"""unique(input, sorted=True, return_inverse=False, return_counts=False, dim=None) -> Tuple[Tensor, Tensor, Tensor]
|
| 759 |
+
|
| 760 |
+
Returns the unique elements of the input tensor.
|
| 761 |
+
|
| 762 |
+
.. note:: This function is different from :func:`torch.unique_consecutive` in the sense that
|
| 763 |
+
this function also eliminates non-consecutive duplicate values.
|
| 764 |
+
|
| 765 |
+
.. note:: Currently in the CUDA implementation and the CPU implementation,
|
| 766 |
+
`torch.unique` always sort the tensor at the beginning regardless of the `sort` argument.
|
| 767 |
+
Sorting could be slow, so if your input tensor is already sorted, it is recommended to use
|
| 768 |
+
:func:`torch.unique_consecutive` which avoids the sorting.
|
| 769 |
+
|
| 770 |
+
Args:
|
| 771 |
+
input (Tensor): the input tensor
|
| 772 |
+
sorted (bool): Whether to sort the unique elements in ascending order
|
| 773 |
+
before returning as output.
|
| 774 |
+
return_inverse (bool): Whether to also return the indices for where
|
| 775 |
+
elements in the original input ended up in the returned unique list.
|
| 776 |
+
return_counts (bool): Whether to also return the counts for each unique
|
| 777 |
+
element.
|
| 778 |
+
dim (int, optional): the dimension to operate upon. If ``None``, the
|
| 779 |
+
unique of the flattened input is returned. Otherwise, each of the
|
| 780 |
+
tensors indexed by the given dimension is treated as one of the
|
| 781 |
+
elements to apply the unique operation upon. See examples for more
|
| 782 |
+
details. Default: ``None``
|
| 783 |
+
|
| 784 |
+
Returns:
|
| 785 |
+
(Tensor, Tensor (optional), Tensor (optional)): A tensor or a tuple of tensors containing
|
| 786 |
+
|
| 787 |
+
- **output** (*Tensor*): the output list of unique scalar elements.
|
| 788 |
+
- **inverse_indices** (*Tensor*): (optional) if
|
| 789 |
+
:attr:`return_inverse` is True, there will be an additional
|
| 790 |
+
returned tensor (same shape as input) representing the indices
|
| 791 |
+
for where elements in the original input map to in the output;
|
| 792 |
+
otherwise, this function will only return a single tensor.
|
| 793 |
+
- **counts** (*Tensor*): (optional) if
|
| 794 |
+
:attr:`return_counts` is True, there will be an additional
|
| 795 |
+
returned tensor (same shape as output or output.size(dim),
|
| 796 |
+
if dim was specified) representing the number of occurrences
|
| 797 |
+
for each unique value or tensor.
|
| 798 |
+
|
| 799 |
+
Example::
|
| 800 |
+
|
| 801 |
+
>>> output = torch.unique(torch.tensor([1, 3, 2, 3], dtype=torch.long))
|
| 802 |
+
>>> output
|
| 803 |
+
tensor([1, 2, 3])
|
| 804 |
+
|
| 805 |
+
>>> output, inverse_indices = torch.unique(
|
| 806 |
+
... torch.tensor([1, 3, 2, 3], dtype=torch.long), sorted=True, return_inverse=True)
|
| 807 |
+
>>> output
|
| 808 |
+
tensor([1, 2, 3])
|
| 809 |
+
>>> inverse_indices
|
| 810 |
+
tensor([0, 2, 1, 2])
|
| 811 |
+
|
| 812 |
+
>>> output, inverse_indices = torch.unique(
|
| 813 |
+
... torch.tensor([[1, 3], [2, 3]], dtype=torch.long), sorted=True, return_inverse=True)
|
| 814 |
+
>>> output
|
| 815 |
+
tensor([1, 2, 3])
|
| 816 |
+
>>> inverse_indices
|
| 817 |
+
tensor([[0, 2],
|
| 818 |
+
[1, 2]])
|
| 819 |
+
|
| 820 |
+
>>> a = torch.tensor([
|
| 821 |
+
... [
|
| 822 |
+
... [1, 1, 0, 0],
|
| 823 |
+
... [1, 1, 0, 0],
|
| 824 |
+
... [0, 0, 1, 1],
|
| 825 |
+
... ],
|
| 826 |
+
... [
|
| 827 |
+
... [0, 0, 1, 1],
|
| 828 |
+
... [0, 0, 1, 1],
|
| 829 |
+
... [1, 1, 1, 1],
|
| 830 |
+
... ],
|
| 831 |
+
... [
|
| 832 |
+
... [1, 1, 0, 0],
|
| 833 |
+
... [1, 1, 0, 0],
|
| 834 |
+
... [0, 0, 1, 1],
|
| 835 |
+
... ],
|
| 836 |
+
... ])
|
| 837 |
+
|
| 838 |
+
>>> # If we call `torch.unique(a, dim=0)`, each of the tensors `a[idx, :, :]`
|
| 839 |
+
>>> # will be compared. We can see that `a[0, :, :]` and `a[2, :, :]` match
|
| 840 |
+
>>> # each other, so one of them will be removed.
|
| 841 |
+
>>> (a[0, :, :] == a[2, :, :]).all()
|
| 842 |
+
tensor(True)
|
| 843 |
+
>>> a_unique_dim0 = torch.unique(a, dim=0)
|
| 844 |
+
>>> a_unique_dim0
|
| 845 |
+
tensor([[[0, 0, 1, 1],
|
| 846 |
+
[0, 0, 1, 1],
|
| 847 |
+
[1, 1, 1, 1]],
|
| 848 |
+
[[1, 1, 0, 0],
|
| 849 |
+
[1, 1, 0, 0],
|
| 850 |
+
[0, 0, 1, 1]]])
|
| 851 |
+
|
| 852 |
+
>>> # Notice which sub-tensors from `a` match with the sub-tensors from
|
| 853 |
+
>>> # `a_unique_dim0`:
|
| 854 |
+
>>> (a_unique_dim0[0, :, :] == a[1, :, :]).all()
|
| 855 |
+
tensor(True)
|
| 856 |
+
>>> (a_unique_dim0[1, :, :] == a[0, :, :]).all()
|
| 857 |
+
tensor(True)
|
| 858 |
+
|
| 859 |
+
>>> # For `torch.unique(a, dim=1)`, each of the tensors `a[:, idx, :]` are
|
| 860 |
+
>>> # compared. `a[:, 0, :]` and `a[:, 1, :]` match each other, so one of
|
| 861 |
+
>>> # them will be removed.
|
| 862 |
+
>>> (a[:, 0, :] == a[:, 1, :]).all()
|
| 863 |
+
tensor(True)
|
| 864 |
+
>>> torch.unique(a, dim=1)
|
| 865 |
+
tensor([[[0, 0, 1, 1],
|
| 866 |
+
[1, 1, 0, 0]],
|
| 867 |
+
[[1, 1, 1, 1],
|
| 868 |
+
[0, 0, 1, 1]],
|
| 869 |
+
[[0, 0, 1, 1],
|
| 870 |
+
[1, 1, 0, 0]]])
|
| 871 |
+
|
| 872 |
+
>>> # For `torch.unique(a, dim=2)`, the tensors `a[:, :, idx]` are compared.
|
| 873 |
+
>>> # `a[:, :, 0]` and `a[:, :, 1]` match each other. Also, `a[:, :, 2]` and
|
| 874 |
+
>>> # `a[:, :, 3]` match each other as well. So in this case, two of the
|
| 875 |
+
>>> # sub-tensors will be removed.
|
| 876 |
+
>>> (a[:, :, 0] == a[:, :, 1]).all()
|
| 877 |
+
tensor(True)
|
| 878 |
+
>>> (a[:, :, 2] == a[:, :, 3]).all()
|
| 879 |
+
tensor(True)
|
| 880 |
+
>>> torch.unique(a, dim=2)
|
| 881 |
+
tensor([[[0, 1],
|
| 882 |
+
[0, 1],
|
| 883 |
+
[1, 0]],
|
| 884 |
+
[[1, 0],
|
| 885 |
+
[1, 0],
|
| 886 |
+
[1, 1]],
|
| 887 |
+
[[0, 1],
|
| 888 |
+
[0, 1],
|
| 889 |
+
[1, 0]]])
|
| 890 |
+
"""
|
| 891 |
+
if has_torch_function_unary(input):
|
| 892 |
+
return handle_torch_function(
|
| 893 |
+
unique, (input,), input, sorted=sorted, return_inverse=return_inverse,
|
| 894 |
+
return_counts=return_counts, dim=dim)
|
| 895 |
+
|
| 896 |
+
if dim is not None:
|
| 897 |
+
output, inverse_indices, counts = _VF.unique_dim(
|
| 898 |
+
input,
|
| 899 |
+
dim,
|
| 900 |
+
sorted=sorted,
|
| 901 |
+
return_inverse=return_inverse,
|
| 902 |
+
return_counts=return_counts,
|
| 903 |
+
)
|
| 904 |
+
else:
|
| 905 |
+
output, inverse_indices, counts = torch._unique2(
|
| 906 |
+
input,
|
| 907 |
+
sorted=sorted,
|
| 908 |
+
return_inverse=return_inverse,
|
| 909 |
+
return_counts=return_counts,
|
| 910 |
+
)
|
| 911 |
+
return output, inverse_indices, counts
|
| 912 |
+
|
| 913 |
+
|
| 914 |
+
def _unique_consecutive_impl(input: Tensor, return_inverse: bool = False,
|
| 915 |
+
return_counts: bool = False,
|
| 916 |
+
dim: Optional[int] = None) -> _unique_impl_out:
|
| 917 |
+
r"""Eliminates all but the first element from every consecutive group of equivalent elements.
|
| 918 |
+
|
| 919 |
+
.. note:: This function is different from :func:`torch.unique` in the sense that this function
|
| 920 |
+
only eliminates consecutive duplicate values. This semantics is similar to `std::unique`
|
| 921 |
+
in C++.
|
| 922 |
+
|
| 923 |
+
Args:
|
| 924 |
+
input (Tensor): the input tensor
|
| 925 |
+
return_inverse (bool): Whether to also return the indices for where
|
| 926 |
+
elements in the original input ended up in the returned unique list.
|
| 927 |
+
return_counts (bool): Whether to also return the counts for each unique
|
| 928 |
+
element.
|
| 929 |
+
dim (int): the dimension to apply unique. If ``None``, the unique of the
|
| 930 |
+
flattened input is returned. default: ``None``
|
| 931 |
+
|
| 932 |
+
Returns:
|
| 933 |
+
(Tensor, Tensor (optional), Tensor (optional)): A tensor or a tuple of tensors containing
|
| 934 |
+
|
| 935 |
+
- **output** (*Tensor*): the output list of unique scalar elements.
|
| 936 |
+
- **inverse_indices** (*Tensor*): (optional) if
|
| 937 |
+
:attr:`return_inverse` is True, there will be an additional
|
| 938 |
+
returned tensor (same shape as input) representing the indices
|
| 939 |
+
for where elements in the original input map to in the output;
|
| 940 |
+
otherwise, this function will only return a single tensor.
|
| 941 |
+
- **counts** (*Tensor*): (optional) if
|
| 942 |
+
:attr:`return_counts` is True, there will be an additional
|
| 943 |
+
returned tensor (same shape as output or output.size(dim),
|
| 944 |
+
if dim was specified) representing the number of occurrences
|
| 945 |
+
for each unique value or tensor.
|
| 946 |
+
|
| 947 |
+
Example::
|
| 948 |
+
|
| 949 |
+
>>> x = torch.tensor([1, 1, 2, 2, 3, 1, 1, 2])
|
| 950 |
+
>>> output = torch.unique_consecutive(x)
|
| 951 |
+
>>> output
|
| 952 |
+
tensor([1, 2, 3, 1, 2])
|
| 953 |
+
|
| 954 |
+
>>> output, inverse_indices = torch.unique_consecutive(x, return_inverse=True)
|
| 955 |
+
>>> output
|
| 956 |
+
tensor([1, 2, 3, 1, 2])
|
| 957 |
+
>>> inverse_indices
|
| 958 |
+
tensor([0, 0, 1, 1, 2, 3, 3, 4])
|
| 959 |
+
|
| 960 |
+
>>> output, counts = torch.unique_consecutive(x, return_counts=True)
|
| 961 |
+
>>> output
|
| 962 |
+
tensor([1, 2, 3, 1, 2])
|
| 963 |
+
>>> counts
|
| 964 |
+
tensor([2, 2, 1, 2, 1])
|
| 965 |
+
"""
|
| 966 |
+
if has_torch_function_unary(input):
|
| 967 |
+
return handle_torch_function(
|
| 968 |
+
unique_consecutive, (input,), input, return_inverse=return_inverse,
|
| 969 |
+
return_counts=return_counts, dim=dim)
|
| 970 |
+
output, inverse_indices, counts = _VF.unique_consecutive( # type: ignore[attr-defined]
|
| 971 |
+
input, return_inverse=return_inverse, return_counts=return_counts, dim=dim)
|
| 972 |
+
return output, inverse_indices, counts
|
| 973 |
+
|
| 974 |
+
|
| 975 |
+
def _return_counts(input, sorted=True, return_inverse=False, return_counts=False, dim=None):
|
| 976 |
+
# type: (Tensor, bool, bool, bool, Optional[int]) -> Tuple[Tensor, Tensor]
|
| 977 |
+
|
| 978 |
+
if has_torch_function_unary(input):
|
| 979 |
+
return _unique_impl(input, sorted, return_inverse, return_counts, dim)
|
| 980 |
+
|
| 981 |
+
output, _, counts = _unique_impl(input, sorted, return_inverse, return_counts, dim)
|
| 982 |
+
return output, counts
|
| 983 |
+
|
| 984 |
+
|
| 985 |
+
def _return_output(input, sorted=True, return_inverse=False, return_counts=False, dim=None):
|
| 986 |
+
# type: (Tensor, bool, bool, bool, Optional[int]) -> Tensor
|
| 987 |
+
|
| 988 |
+
if has_torch_function_unary(input):
|
| 989 |
+
return _unique_impl(input, sorted, return_inverse, return_counts, dim)
|
| 990 |
+
|
| 991 |
+
output, _, _ = _unique_impl(input, sorted, return_inverse, return_counts, dim)
|
| 992 |
+
return output
|
| 993 |
+
|
| 994 |
+
|
| 995 |
+
def _return_inverse(input, sorted=True, return_inverse=False, return_counts=False, dim=None):
|
| 996 |
+
# type: (Tensor, bool, bool, bool, Optional[int]) -> Tuple[Tensor, Tensor]
|
| 997 |
+
|
| 998 |
+
if has_torch_function_unary(input):
|
| 999 |
+
return _unique_impl(input, sorted, return_inverse, return_counts, dim)
|
| 1000 |
+
|
| 1001 |
+
output, inverse_indices, _ = _unique_impl(input, sorted, return_inverse, return_counts, dim)
|
| 1002 |
+
return output, inverse_indices
|
| 1003 |
+
|
| 1004 |
+
|
| 1005 |
+
_return_inverse_false = boolean_dispatch(
|
| 1006 |
+
arg_name='return_counts',
|
| 1007 |
+
arg_index=3,
|
| 1008 |
+
default=False,
|
| 1009 |
+
if_true=_return_counts,
|
| 1010 |
+
if_false=_return_output,
|
| 1011 |
+
module_name=__name__,
|
| 1012 |
+
func_name='unique')
|
| 1013 |
+
|
| 1014 |
+
_return_inverse_true = boolean_dispatch(
|
| 1015 |
+
arg_name='return_counts',
|
| 1016 |
+
arg_index=3,
|
| 1017 |
+
default=False,
|
| 1018 |
+
if_true=_unique_impl,
|
| 1019 |
+
if_false=_return_inverse,
|
| 1020 |
+
module_name=__name__,
|
| 1021 |
+
func_name='unique')
|
| 1022 |
+
|
| 1023 |
+
# The return type of unique depends on `return_inverse`, and `return_counts` so in order to
|
| 1024 |
+
# resolve the output type in TorchScript we need to statically know the value of both parameters
|
| 1025 |
+
|
| 1026 |
+
unique = boolean_dispatch(
|
| 1027 |
+
arg_name='return_inverse',
|
| 1028 |
+
arg_index=2,
|
| 1029 |
+
default=False,
|
| 1030 |
+
if_true=_return_inverse_true,
|
| 1031 |
+
if_false=_return_inverse_false,
|
| 1032 |
+
module_name=__name__,
|
| 1033 |
+
func_name='unique')
|
| 1034 |
+
unique.__doc__ = _unique_impl.__doc__
|
| 1035 |
+
|
| 1036 |
+
|
| 1037 |
+
def _consecutive_return_counts(input, return_inverse=False, return_counts=False, dim=None):
|
| 1038 |
+
# type: (Tensor, bool, bool, Optional[int]) -> Tuple[Tensor, Tensor]
|
| 1039 |
+
|
| 1040 |
+
if has_torch_function_unary(input):
|
| 1041 |
+
return _unique_consecutive_impl(input, return_inverse, return_counts, dim)
|
| 1042 |
+
|
| 1043 |
+
output, _, counts = _unique_consecutive_impl(input, return_inverse, return_counts, dim)
|
| 1044 |
+
return output, counts
|
| 1045 |
+
|
| 1046 |
+
|
| 1047 |
+
def _consecutive_return_output(input, return_inverse=False, return_counts=False, dim=None):
|
| 1048 |
+
# type: (Tensor, bool, bool, Optional[int]) -> Tensor
|
| 1049 |
+
|
| 1050 |
+
if has_torch_function_unary(input):
|
| 1051 |
+
return _unique_consecutive_impl(input, return_inverse, return_counts, dim)
|
| 1052 |
+
|
| 1053 |
+
output, _, _ = _unique_consecutive_impl(input, return_inverse, return_counts, dim)
|
| 1054 |
+
return output
|
| 1055 |
+
|
| 1056 |
+
|
| 1057 |
+
def _consecutive_return_inverse(input, return_inverse=False, return_counts=False, dim=None):
|
| 1058 |
+
# type: (Tensor, bool, bool, Optional[int]) -> Tuple[Tensor, Tensor]
|
| 1059 |
+
|
| 1060 |
+
if has_torch_function_unary(input):
|
| 1061 |
+
return _unique_consecutive_impl(input, return_inverse, return_counts, dim)
|
| 1062 |
+
|
| 1063 |
+
output, inverse_indices, _ = _unique_consecutive_impl(input, return_inverse, return_counts, dim)
|
| 1064 |
+
return output, inverse_indices
|
| 1065 |
+
|
| 1066 |
+
|
| 1067 |
+
_consecutive_return_inverse_false = boolean_dispatch(
|
| 1068 |
+
arg_name='return_counts',
|
| 1069 |
+
arg_index=1,
|
| 1070 |
+
default=False,
|
| 1071 |
+
if_true=_consecutive_return_counts,
|
| 1072 |
+
if_false=_consecutive_return_output,
|
| 1073 |
+
module_name=__name__,
|
| 1074 |
+
func_name='unique_consecutive')
|
| 1075 |
+
|
| 1076 |
+
_consecutive_return_inverse_true = boolean_dispatch(
|
| 1077 |
+
arg_name='return_counts',
|
| 1078 |
+
arg_index=1,
|
| 1079 |
+
default=False,
|
| 1080 |
+
if_true=_unique_consecutive_impl,
|
| 1081 |
+
if_false=_consecutive_return_inverse,
|
| 1082 |
+
module_name=__name__,
|
| 1083 |
+
func_name='unique_consecutive')
|
| 1084 |
+
|
| 1085 |
+
# The return type of unique depends on `return_inverse`, and `return_counts` so in order to
|
| 1086 |
+
# resolve the output type in TorchScript we need to statically know the value of both parameters
|
| 1087 |
+
|
| 1088 |
+
unique_consecutive = boolean_dispatch(
|
| 1089 |
+
arg_name='return_inverse',
|
| 1090 |
+
arg_index=2,
|
| 1091 |
+
default=False,
|
| 1092 |
+
if_true=_consecutive_return_inverse_true,
|
| 1093 |
+
if_false=_consecutive_return_inverse_false,
|
| 1094 |
+
module_name=__name__,
|
| 1095 |
+
func_name='unique_consecutive')
|
| 1096 |
+
unique_consecutive.__doc__ = _unique_consecutive_impl.__doc__
|
| 1097 |
+
|
| 1098 |
+
if TYPE_CHECKING:
|
| 1099 |
+
pass
|
| 1100 |
+
# There's no good way to use this type annotation without breaking JIT
|
| 1101 |
+
# overloads. So leave untyped for mypy for now.
|
| 1102 |
+
else:
|
| 1103 |
+
@overload
|
| 1104 |
+
def tensordot(a, b, dims: int = 2, out: Optional[torch.Tensor] = None):
|
| 1105 |
+
pass
|
| 1106 |
+
|
| 1107 |
+
@overload # noqa: F811
|
| 1108 |
+
def tensordot(a, b, dims: Tuple[List[int], List[int]], out: Optional[torch.Tensor] = None): # noqa: F811
|
| 1109 |
+
pass
|
| 1110 |
+
|
| 1111 |
+
@overload # noqa: F811
|
| 1112 |
+
def tensordot(a, b, dims: List[List[int]], out: Optional[torch.Tensor] = None): # noqa: F811
|
| 1113 |
+
pass
|
| 1114 |
+
|
| 1115 |
+
@overload # noqa: F811
|
| 1116 |
+
def tensordot(a, b, dims: torch.Tensor, out: Optional[torch.Tensor] = None): # noqa: F811
|
| 1117 |
+
pass
|
| 1118 |
+
|
| 1119 |
+
|
| 1120 |
+
def tensordot(a, b, dims=2, out: Optional[torch.Tensor] = None): # noqa: F811
|
| 1121 |
+
r"""Returns a contraction of a and b over multiple dimensions.
|
| 1122 |
+
|
| 1123 |
+
:attr:`tensordot` implements a generalized matrix product.
|
| 1124 |
+
|
| 1125 |
+
Args:
|
| 1126 |
+
a (Tensor): Left tensor to contract
|
| 1127 |
+
b (Tensor): Right tensor to contract
|
| 1128 |
+
dims (int or Tuple[List[int], List[int]] or List[List[int]] containing two lists or Tensor): number of dimensions to
|
| 1129 |
+
contract or explicit lists of dimensions for :attr:`a` and
|
| 1130 |
+
:attr:`b` respectively
|
| 1131 |
+
|
| 1132 |
+
When called with a non-negative integer argument :attr:`dims` = :math:`d`, and
|
| 1133 |
+
the number of dimensions of :attr:`a` and :attr:`b` is :math:`m` and :math:`n`,
|
| 1134 |
+
respectively, :func:`~torch.tensordot` computes
|
| 1135 |
+
|
| 1136 |
+
.. math::
|
| 1137 |
+
r_{i_0,...,i_{m-d}, i_d,...,i_n}
|
| 1138 |
+
= \sum_{k_0,...,k_{d-1}} a_{i_0,...,i_{m-d},k_0,...,k_{d-1}} \times b_{k_0,...,k_{d-1}, i_d,...,i_n}.
|
| 1139 |
+
|
| 1140 |
+
When called with :attr:`dims` of the list form, the given dimensions will be contracted
|
| 1141 |
+
in place of the last :math:`d` of :attr:`a` and the first :math:`d` of :math:`b`. The sizes
|
| 1142 |
+
in these dimensions must match, but :func:`~torch.tensordot` will deal with broadcasted
|
| 1143 |
+
dimensions.
|
| 1144 |
+
|
| 1145 |
+
Examples::
|
| 1146 |
+
|
| 1147 |
+
>>> a = torch.arange(60.).reshape(3, 4, 5)
|
| 1148 |
+
>>> b = torch.arange(24.).reshape(4, 3, 2)
|
| 1149 |
+
>>> torch.tensordot(a, b, dims=([1, 0], [0, 1]))
|
| 1150 |
+
tensor([[4400., 4730.],
|
| 1151 |
+
[4532., 4874.],
|
| 1152 |
+
[4664., 5018.],
|
| 1153 |
+
[4796., 5162.],
|
| 1154 |
+
[4928., 5306.]])
|
| 1155 |
+
|
| 1156 |
+
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
|
| 1157 |
+
>>> a = torch.randn(3, 4, 5, device='cuda')
|
| 1158 |
+
>>> b = torch.randn(4, 5, 6, device='cuda')
|
| 1159 |
+
>>> c = torch.tensordot(a, b, dims=2).cpu()
|
| 1160 |
+
tensor([[ 8.3504, -2.5436, 6.2922, 2.7556, -1.0732, 3.2741],
|
| 1161 |
+
[ 3.3161, 0.0704, 5.0187, -0.4079, -4.3126, 4.8744],
|
| 1162 |
+
[ 0.8223, 3.9445, 3.2168, -0.2400, 3.4117, 1.7780]])
|
| 1163 |
+
|
| 1164 |
+
>>> a = torch.randn(3, 5, 4, 6)
|
| 1165 |
+
>>> b = torch.randn(6, 4, 5, 3)
|
| 1166 |
+
>>> torch.tensordot(a, b, dims=([2, 1, 3], [1, 2, 0]))
|
| 1167 |
+
tensor([[ 7.7193, -2.4867, -10.3204],
|
| 1168 |
+
[ 1.5513, -14.4737, -6.5113],
|
| 1169 |
+
[ -0.2850, 4.2573, -3.5997]])
|
| 1170 |
+
"""
|
| 1171 |
+
if has_torch_function_variadic(a, b):
|
| 1172 |
+
return handle_torch_function(tensordot, (a, b), a, b, dims=dims, out=out)
|
| 1173 |
+
|
| 1174 |
+
if not isinstance(dims, (tuple, list, torch.Tensor, int, torch.SymInt)):
|
| 1175 |
+
raise RuntimeError("tensordot expects dims to be int or "
|
| 1176 |
+
+ "Tuple[List[int], List[int]] or "
|
| 1177 |
+
+ "List[List[int]] containing two lists, but got "
|
| 1178 |
+
+ f"dims={dims}")
|
| 1179 |
+
|
| 1180 |
+
dims_a: List[int] = []
|
| 1181 |
+
dims_b: List[int] = []
|
| 1182 |
+
|
| 1183 |
+
if isinstance(dims, (tuple, list)):
|
| 1184 |
+
dims_a, dims_b = dims
|
| 1185 |
+
|
| 1186 |
+
if isinstance(dims, torch.Tensor):
|
| 1187 |
+
num_elements = dims.numel()
|
| 1188 |
+
if num_elements > 1:
|
| 1189 |
+
assert dims.size()[0] == 2
|
| 1190 |
+
dims_a = torch.jit.annotate(List[int], dims[0].tolist())
|
| 1191 |
+
dims_b = torch.jit.annotate(List[int], dims[1].tolist())
|
| 1192 |
+
else:
|
| 1193 |
+
dims_val = int(dims.item())
|
| 1194 |
+
if dims_val < 0:
|
| 1195 |
+
raise RuntimeError(f"tensordot expects dims >= 0, but got dims={dims}")
|
| 1196 |
+
dims_a = list(range(-dims_val, 0))
|
| 1197 |
+
dims_b = list(range(dims_val))
|
| 1198 |
+
|
| 1199 |
+
if isinstance(dims, (int, torch.SymInt)):
|
| 1200 |
+
if dims < 0:
|
| 1201 |
+
raise RuntimeError(f"tensordot expects dims >= 0, but got dims={dims}")
|
| 1202 |
+
if dims > min(a.dim(), b.dim()):
|
| 1203 |
+
raise RuntimeError(f"tensordot expects dims < ndim_a or ndim_b, but got dims={dims}")
|
| 1204 |
+
dims_a = list(range(-dims, 0))
|
| 1205 |
+
dims_b = list(range(dims))
|
| 1206 |
+
|
| 1207 |
+
if out is None:
|
| 1208 |
+
return _VF.tensordot(a, b, dims_a, dims_b) # type: ignore[attr-defined]
|
| 1209 |
+
else:
|
| 1210 |
+
return _VF.tensordot(a, b, dims_a, dims_b, out=out) # type: ignore[attr-defined]
|
| 1211 |
+
|
| 1212 |
+
|
| 1213 |
+
def cartesian_prod(*tensors: Tensor) -> Tensor:
|
| 1214 |
+
"""Do cartesian product of the given sequence of tensors. The behavior is similar to
|
| 1215 |
+
python's `itertools.product`.
|
| 1216 |
+
|
| 1217 |
+
Args:
|
| 1218 |
+
*tensors: any number of 1 dimensional tensors.
|
| 1219 |
+
|
| 1220 |
+
Returns:
|
| 1221 |
+
Tensor: A tensor equivalent to converting all the input tensors into lists,
|
| 1222 |
+
do `itertools.product` on these lists, and finally convert the resulting list
|
| 1223 |
+
into tensor.
|
| 1224 |
+
|
| 1225 |
+
Example::
|
| 1226 |
+
|
| 1227 |
+
>>> import itertools
|
| 1228 |
+
>>> a = [1, 2, 3]
|
| 1229 |
+
>>> b = [4, 5]
|
| 1230 |
+
>>> list(itertools.product(a, b))
|
| 1231 |
+
[(1, 4), (1, 5), (2, 4), (2, 5), (3, 4), (3, 5)]
|
| 1232 |
+
>>> tensor_a = torch.tensor(a)
|
| 1233 |
+
>>> tensor_b = torch.tensor(b)
|
| 1234 |
+
>>> torch.cartesian_prod(tensor_a, tensor_b)
|
| 1235 |
+
tensor([[1, 4],
|
| 1236 |
+
[1, 5],
|
| 1237 |
+
[2, 4],
|
| 1238 |
+
[2, 5],
|
| 1239 |
+
[3, 4],
|
| 1240 |
+
[3, 5]])
|
| 1241 |
+
"""
|
| 1242 |
+
# This wrapper exists to support variadic args.
|
| 1243 |
+
if has_torch_function(tensors):
|
| 1244 |
+
return handle_torch_function(cartesian_prod, tensors, *tensors)
|
| 1245 |
+
return _VF.cartesian_prod(tensors) # type: ignore[attr-defined]
|
| 1246 |
+
|
| 1247 |
+
|
| 1248 |
+
def block_diag(*tensors):
|
| 1249 |
+
"""Create a block diagonal matrix from provided tensors.
|
| 1250 |
+
|
| 1251 |
+
Args:
|
| 1252 |
+
*tensors: One or more tensors with 0, 1, or 2 dimensions.
|
| 1253 |
+
|
| 1254 |
+
Returns:
|
| 1255 |
+
Tensor: A 2 dimensional tensor with all the input tensors arranged in
|
| 1256 |
+
order such that their upper left and lower right corners are
|
| 1257 |
+
diagonally adjacent. All other elements are set to 0.
|
| 1258 |
+
|
| 1259 |
+
Example::
|
| 1260 |
+
|
| 1261 |
+
>>> import torch
|
| 1262 |
+
>>> A = torch.tensor([[0, 1], [1, 0]])
|
| 1263 |
+
>>> B = torch.tensor([[3, 4, 5], [6, 7, 8]])
|
| 1264 |
+
>>> C = torch.tensor(7)
|
| 1265 |
+
>>> D = torch.tensor([1, 2, 3])
|
| 1266 |
+
>>> E = torch.tensor([[4], [5], [6]])
|
| 1267 |
+
>>> torch.block_diag(A, B, C, D, E)
|
| 1268 |
+
tensor([[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
|
| 1269 |
+
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
| 1270 |
+
[0, 0, 3, 4, 5, 0, 0, 0, 0, 0],
|
| 1271 |
+
[0, 0, 6, 7, 8, 0, 0, 0, 0, 0],
|
| 1272 |
+
[0, 0, 0, 0, 0, 7, 0, 0, 0, 0],
|
| 1273 |
+
[0, 0, 0, 0, 0, 0, 1, 2, 3, 0],
|
| 1274 |
+
[0, 0, 0, 0, 0, 0, 0, 0, 0, 4],
|
| 1275 |
+
[0, 0, 0, 0, 0, 0, 0, 0, 0, 5],
|
| 1276 |
+
[0, 0, 0, 0, 0, 0, 0, 0, 0, 6]])
|
| 1277 |
+
"""
|
| 1278 |
+
# This wrapper exists to support variadic args.
|
| 1279 |
+
if has_torch_function(tensors):
|
| 1280 |
+
return handle_torch_function(block_diag, tensors, *tensors)
|
| 1281 |
+
return torch._C._VariableFunctions.block_diag(tensors) # type: ignore[attr-defined]
|
| 1282 |
+
|
| 1283 |
+
|
| 1284 |
+
def cdist(x1, x2, p=2., compute_mode='use_mm_for_euclid_dist_if_necessary'):
|
| 1285 |
+
# type: (Tensor, Tensor, float, str) -> (Tensor)
|
| 1286 |
+
r"""Computes batched the p-norm distance between each pair of the two collections of row vectors.
|
| 1287 |
+
|
| 1288 |
+
Args:
|
| 1289 |
+
x1 (Tensor): input tensor of shape :math:`B \times P \times M`.
|
| 1290 |
+
x2 (Tensor): input tensor of shape :math:`B \times R \times M`.
|
| 1291 |
+
p: p value for the p-norm distance to calculate between each vector pair
|
| 1292 |
+
:math:`\in [0, \infty]`.
|
| 1293 |
+
compute_mode:
|
| 1294 |
+
'use_mm_for_euclid_dist_if_necessary' - will use matrix multiplication approach to calculate
|
| 1295 |
+
euclidean distance (p = 2) if P > 25 or R > 25
|
| 1296 |
+
'use_mm_for_euclid_dist' - will always use matrix multiplication approach to calculate
|
| 1297 |
+
euclidean distance (p = 2)
|
| 1298 |
+
'donot_use_mm_for_euclid_dist' - will never use matrix multiplication approach to calculate
|
| 1299 |
+
euclidean distance (p = 2)
|
| 1300 |
+
Default: use_mm_for_euclid_dist_if_necessary.
|
| 1301 |
+
|
| 1302 |
+
If x1 has shape :math:`B \times P \times M` and x2 has shape :math:`B \times R \times M` then the
|
| 1303 |
+
output will have shape :math:`B \times P \times R`.
|
| 1304 |
+
|
| 1305 |
+
This function is equivalent to `scipy.spatial.distance.cdist(input,'minkowski', p=p)`
|
| 1306 |
+
if :math:`p \in (0, \infty)`. When :math:`p = 0` it is equivalent to
|
| 1307 |
+
`scipy.spatial.distance.cdist(input, 'hamming') * M`. When :math:`p = \infty`, the closest
|
| 1308 |
+
scipy function is `scipy.spatial.distance.cdist(xn, lambda x, y: np.abs(x - y).max())`.
|
| 1309 |
+
|
| 1310 |
+
Example:
|
| 1311 |
+
|
| 1312 |
+
>>> a = torch.tensor([[0.9041, 0.0196], [-0.3108, -2.4423], [-0.4821, 1.059]])
|
| 1313 |
+
>>> a
|
| 1314 |
+
tensor([[ 0.9041, 0.0196],
|
| 1315 |
+
[-0.3108, -2.4423],
|
| 1316 |
+
[-0.4821, 1.0590]])
|
| 1317 |
+
>>> b = torch.tensor([[-2.1763, -0.4713], [-0.6986, 1.3702]])
|
| 1318 |
+
>>> b
|
| 1319 |
+
tensor([[-2.1763, -0.4713],
|
| 1320 |
+
[-0.6986, 1.3702]])
|
| 1321 |
+
>>> torch.cdist(a, b, p=2)
|
| 1322 |
+
tensor([[3.1193, 2.0959],
|
| 1323 |
+
[2.7138, 3.8322],
|
| 1324 |
+
[2.2830, 0.3791]])
|
| 1325 |
+
"""
|
| 1326 |
+
if has_torch_function_variadic(x1, x2):
|
| 1327 |
+
return handle_torch_function(
|
| 1328 |
+
cdist, (x1, x2), x1, x2, p=p, compute_mode=compute_mode)
|
| 1329 |
+
if compute_mode == 'use_mm_for_euclid_dist_if_necessary':
|
| 1330 |
+
return _VF.cdist(x1, x2, p, None) # type: ignore[attr-defined]
|
| 1331 |
+
elif compute_mode == 'use_mm_for_euclid_dist':
|
| 1332 |
+
return _VF.cdist(x1, x2, p, 1) # type: ignore[attr-defined]
|
| 1333 |
+
elif compute_mode == 'donot_use_mm_for_euclid_dist':
|
| 1334 |
+
return _VF.cdist(x1, x2, p, 2) # type: ignore[attr-defined]
|
| 1335 |
+
else:
|
| 1336 |
+
raise ValueError(f"{compute_mode} is not a valid value for compute_mode")
|
| 1337 |
+
|
| 1338 |
+
|
| 1339 |
+
def atleast_1d(*tensors):
|
| 1340 |
+
r"""
|
| 1341 |
+
Returns a 1-dimensional view of each input tensor with zero dimensions.
|
| 1342 |
+
Input tensors with one or more dimensions are returned as-is.
|
| 1343 |
+
|
| 1344 |
+
Args:
|
| 1345 |
+
input (Tensor or list of Tensors)
|
| 1346 |
+
|
| 1347 |
+
Returns:
|
| 1348 |
+
output (Tensor or tuple of Tensors)
|
| 1349 |
+
|
| 1350 |
+
Example::
|
| 1351 |
+
|
| 1352 |
+
>>> x = torch.arange(2)
|
| 1353 |
+
>>> x
|
| 1354 |
+
tensor([0, 1])
|
| 1355 |
+
>>> torch.atleast_1d(x)
|
| 1356 |
+
tensor([0, 1])
|
| 1357 |
+
>>> x = torch.tensor(1.)
|
| 1358 |
+
>>> x
|
| 1359 |
+
tensor(1.)
|
| 1360 |
+
>>> torch.atleast_1d(x)
|
| 1361 |
+
tensor([1.])
|
| 1362 |
+
>>> x = torch.tensor(0.5)
|
| 1363 |
+
>>> y = torch.tensor(1.)
|
| 1364 |
+
>>> torch.atleast_1d((x, y))
|
| 1365 |
+
(tensor([0.5000]), tensor([1.]))
|
| 1366 |
+
"""
|
| 1367 |
+
# This wrapper exists to support variadic args.
|
| 1368 |
+
if has_torch_function(tensors):
|
| 1369 |
+
return handle_torch_function(atleast_1d, tensors, *tensors)
|
| 1370 |
+
if len(tensors) == 1:
|
| 1371 |
+
tensors = tensors[0]
|
| 1372 |
+
return _VF.atleast_1d(tensors) # type: ignore[attr-defined]
|
| 1373 |
+
|
| 1374 |
+
|
| 1375 |
+
def atleast_2d(*tensors):
|
| 1376 |
+
r"""
|
| 1377 |
+
Returns a 2-dimensional view of each input tensor with zero dimensions.
|
| 1378 |
+
Input tensors with two or more dimensions are returned as-is.
|
| 1379 |
+
|
| 1380 |
+
Args:
|
| 1381 |
+
input (Tensor or list of Tensors)
|
| 1382 |
+
|
| 1383 |
+
Returns:
|
| 1384 |
+
output (Tensor or tuple of Tensors)
|
| 1385 |
+
|
| 1386 |
+
Example::
|
| 1387 |
+
|
| 1388 |
+
>>> x = torch.tensor(1.)
|
| 1389 |
+
>>> x
|
| 1390 |
+
tensor(1.)
|
| 1391 |
+
>>> torch.atleast_2d(x)
|
| 1392 |
+
tensor([[1.]])
|
| 1393 |
+
>>> x = torch.arange(4).view(2, 2)
|
| 1394 |
+
>>> x
|
| 1395 |
+
tensor([[0, 1],
|
| 1396 |
+
[2, 3]])
|
| 1397 |
+
>>> torch.atleast_2d(x)
|
| 1398 |
+
tensor([[0, 1],
|
| 1399 |
+
[2, 3]])
|
| 1400 |
+
>>> x = torch.tensor(0.5)
|
| 1401 |
+
>>> y = torch.tensor(1.)
|
| 1402 |
+
>>> torch.atleast_2d((x, y))
|
| 1403 |
+
(tensor([[0.5000]]), tensor([[1.]]))
|
| 1404 |
+
"""
|
| 1405 |
+
# This wrapper exists to support variadic args.
|
| 1406 |
+
if has_torch_function(tensors):
|
| 1407 |
+
return handle_torch_function(atleast_2d, tensors, *tensors)
|
| 1408 |
+
if len(tensors) == 1:
|
| 1409 |
+
tensors = tensors[0]
|
| 1410 |
+
return _VF.atleast_2d(tensors) # type: ignore[attr-defined]
|
| 1411 |
+
|
| 1412 |
+
|
| 1413 |
+
def atleast_3d(*tensors):
|
| 1414 |
+
r"""
|
| 1415 |
+
Returns a 3-dimensional view of each input tensor with zero dimensions.
|
| 1416 |
+
Input tensors with three or more dimensions are returned as-is.
|
| 1417 |
+
|
| 1418 |
+
Args:
|
| 1419 |
+
input (Tensor or list of Tensors)
|
| 1420 |
+
|
| 1421 |
+
Returns:
|
| 1422 |
+
output (Tensor or tuple of Tensors)
|
| 1423 |
+
|
| 1424 |
+
Example:
|
| 1425 |
+
|
| 1426 |
+
>>> x = torch.tensor(0.5)
|
| 1427 |
+
>>> x
|
| 1428 |
+
tensor(0.5000)
|
| 1429 |
+
>>> torch.atleast_3d(x)
|
| 1430 |
+
tensor([[[0.5000]]])
|
| 1431 |
+
>>> y = torch.arange(4).view(2, 2)
|
| 1432 |
+
>>> y
|
| 1433 |
+
tensor([[0, 1],
|
| 1434 |
+
[2, 3]])
|
| 1435 |
+
>>> torch.atleast_3d(y)
|
| 1436 |
+
tensor([[[0],
|
| 1437 |
+
[1]],
|
| 1438 |
+
<BLANKLINE>
|
| 1439 |
+
[[2],
|
| 1440 |
+
[3]]])
|
| 1441 |
+
>>> x = torch.tensor(1).view(1, 1, 1)
|
| 1442 |
+
>>> x
|
| 1443 |
+
tensor([[[1]]])
|
| 1444 |
+
>>> torch.atleast_3d(x)
|
| 1445 |
+
tensor([[[1]]])
|
| 1446 |
+
>>> x = torch.tensor(0.5)
|
| 1447 |
+
>>> y = torch.tensor(1.)
|
| 1448 |
+
>>> torch.atleast_3d((x, y))
|
| 1449 |
+
(tensor([[[0.5000]]]), tensor([[[1.]]]))
|
| 1450 |
+
"""
|
| 1451 |
+
# This wrapper exists to support variadic args.
|
| 1452 |
+
if has_torch_function(tensors):
|
| 1453 |
+
return handle_torch_function(atleast_3d, tensors, *tensors)
|
| 1454 |
+
if len(tensors) == 1:
|
| 1455 |
+
tensors = tensors[0]
|
| 1456 |
+
return _VF.atleast_3d(tensors) # type: ignore[attr-defined]
|
| 1457 |
+
|
| 1458 |
+
|
| 1459 |
+
if TYPE_CHECKING:
|
| 1460 |
+
pass
|
| 1461 |
+
# There's no good way to use this type annotation; cannot rename norm() to
|
| 1462 |
+
# _norm_impl() in a way that doesn't break JIT overloads. So leave untyped
|
| 1463 |
+
# for mypy for now.
|
| 1464 |
+
# def norm(input: Tensor,
|
| 1465 |
+
# p: Optional[Union[str, Number]] = "fro",
|
| 1466 |
+
# dim: Optional[Union[int, List[int]]] = None,
|
| 1467 |
+
# keepdim: bool = False,
|
| 1468 |
+
# out: Optional[Tensor] = None,
|
| 1469 |
+
# dtype: _dtype = None) -> Tensor:
|
| 1470 |
+
# return _norm_impl(input, p, dim, keepdim, out, dtype)
|
| 1471 |
+
else:
|
| 1472 |
+
# TODO: type dim as BroadcastingList when
|
| 1473 |
+
# https://github.com/pytorch/pytorch/issues/33782 is fixed
|
| 1474 |
+
@overload
|
| 1475 |
+
def norm(input, p="fro", dim=None, keepdim=False, out=None, dtype=None):
|
| 1476 |
+
# type: (Tensor, str, Optional[List[int]], bool, Optional[Tensor], Optional[int]) -> Tensor
|
| 1477 |
+
pass
|
| 1478 |
+
|
| 1479 |
+
@overload # noqa: F811
|
| 1480 |
+
def norm(input, p="fro", dim=None, keepdim=False, out=None, dtype=None): # noqa: F811
|
| 1481 |
+
# type: (Tensor, Optional[number], Optional[List[int]], bool, Optional[Tensor], Optional[int]) -> Tensor
|
| 1482 |
+
pass
|
| 1483 |
+
|
| 1484 |
+
@overload # noqa: F811
|
| 1485 |
+
def norm(input, p="fro", dim=None, keepdim=False, out=None, dtype=None): # noqa: F811
|
| 1486 |
+
# type: (Tensor, Optional[number], Optional[int], bool, Optional[Tensor], Optional[int]) -> Tensor
|
| 1487 |
+
pass
|
| 1488 |
+
|
| 1489 |
+
@overload # noqa: F811
|
| 1490 |
+
def norm(input, p="fro", dim=None, keepdim=False, out=None, dtype=None): # noqa: F811
|
| 1491 |
+
# type: (Tensor, str, Optional[int], bool, Optional[Tensor], Optional[int]) -> Tensor
|
| 1492 |
+
pass
|
| 1493 |
+
|
| 1494 |
+
|
| 1495 |
+
def norm(input, p: Optional[Union[float, str]] = "fro", dim=None, keepdim=False, out=None, dtype=None): # noqa: F811
|
| 1496 |
+
r"""Returns the matrix norm or vector norm of a given tensor.
|
| 1497 |
+
|
| 1498 |
+
.. warning::
|
| 1499 |
+
|
| 1500 |
+
torch.norm is deprecated and may be removed in a future PyTorch release.
|
| 1501 |
+
Its documentation and behavior may be incorrect, and it is no longer
|
| 1502 |
+
actively maintained.
|
| 1503 |
+
|
| 1504 |
+
Use :func:`torch.linalg.vector_norm` when computing vector norms and
|
| 1505 |
+
:func:`torch.linalg.matrix_norm` when computing matrix norms.
|
| 1506 |
+
For a function with a similar behavior as this one see :func:`torch.linalg.norm`.
|
| 1507 |
+
Note, however, the signature for these functions is slightly different than the
|
| 1508 |
+
signature for ``torch.norm``.
|
| 1509 |
+
|
| 1510 |
+
Args:
|
| 1511 |
+
input (Tensor): The input tensor. Its data type must be either a floating
|
| 1512 |
+
point or complex type. For complex inputs, the norm is calculated using the
|
| 1513 |
+
absolute value of each element. If the input is complex and neither
|
| 1514 |
+
:attr:`dtype` nor :attr:`out` is specified, the result's data type will
|
| 1515 |
+
be the corresponding floating point type (e.g. float if :attr:`input` is
|
| 1516 |
+
complexfloat).
|
| 1517 |
+
|
| 1518 |
+
p (int, float, inf, -inf, 'fro', 'nuc', optional): the order of norm. Default: ``'fro'``
|
| 1519 |
+
The following norms can be calculated:
|
| 1520 |
+
|
| 1521 |
+
====== ============== ==========================
|
| 1522 |
+
ord matrix norm vector norm
|
| 1523 |
+
====== ============== ==========================
|
| 1524 |
+
'fro' Frobenius norm --
|
| 1525 |
+
'nuc' nuclear norm --
|
| 1526 |
+
Number -- sum(abs(x)**ord)**(1./ord)
|
| 1527 |
+
====== ============== ==========================
|
| 1528 |
+
|
| 1529 |
+
The vector norm can be calculated across any number of dimensions.
|
| 1530 |
+
The corresponding dimensions of :attr:`input` are flattened into
|
| 1531 |
+
one dimension, and the norm is calculated on the flattened
|
| 1532 |
+
dimension.
|
| 1533 |
+
|
| 1534 |
+
Frobenius norm produces the same result as ``p=2`` in all cases
|
| 1535 |
+
except when :attr:`dim` is a list of three or more dims, in which
|
| 1536 |
+
case Frobenius norm throws an error.
|
| 1537 |
+
|
| 1538 |
+
Nuclear norm can only be calculated across exactly two dimensions.
|
| 1539 |
+
|
| 1540 |
+
dim (int, tuple of ints, list of ints, optional):
|
| 1541 |
+
Specifies which dimension or dimensions of :attr:`input` to
|
| 1542 |
+
calculate the norm across. If :attr:`dim` is ``None``, the norm will
|
| 1543 |
+
be calculated across all dimensions of :attr:`input`. If the norm
|
| 1544 |
+
type indicated by :attr:`p` does not support the specified number of
|
| 1545 |
+
dimensions, an error will occur.
|
| 1546 |
+
keepdim (bool, optional): whether the output tensors have :attr:`dim`
|
| 1547 |
+
retained or not. Ignored if :attr:`dim` = ``None`` and
|
| 1548 |
+
:attr:`out` = ``None``. Default: ``False``
|
| 1549 |
+
out (Tensor, optional): the output tensor. Ignored if
|
| 1550 |
+
:attr:`dim` = ``None`` and :attr:`out` = ``None``.
|
| 1551 |
+
dtype (:class:`torch.dtype`, optional): the desired data type of
|
| 1552 |
+
returned tensor. If specified, the input tensor is casted to
|
| 1553 |
+
:attr:`dtype` while performing the operation. Default: None.
|
| 1554 |
+
|
| 1555 |
+
.. note::
|
| 1556 |
+
Even though ``p='fro'`` supports any number of dimensions, the true
|
| 1557 |
+
mathematical definition of Frobenius norm only applies to tensors with
|
| 1558 |
+
exactly two dimensions. :func:`torch.linalg.matrix_norm` with ``ord='fro'``
|
| 1559 |
+
aligns with the mathematical definition, since it can only be applied across
|
| 1560 |
+
exactly two dimensions.
|
| 1561 |
+
|
| 1562 |
+
Example::
|
| 1563 |
+
|
| 1564 |
+
>>> import torch
|
| 1565 |
+
>>> a = torch.arange(9, dtype= torch.float) - 4
|
| 1566 |
+
>>> b = a.reshape((3, 3))
|
| 1567 |
+
>>> torch.norm(a)
|
| 1568 |
+
tensor(7.7460)
|
| 1569 |
+
>>> torch.norm(b)
|
| 1570 |
+
tensor(7.7460)
|
| 1571 |
+
>>> torch.norm(a, float('inf'))
|
| 1572 |
+
tensor(4.)
|
| 1573 |
+
>>> torch.norm(b, float('inf'))
|
| 1574 |
+
tensor(4.)
|
| 1575 |
+
>>> c = torch.tensor([[ 1, 2, 3], [-1, 1, 4]] , dtype=torch.float)
|
| 1576 |
+
>>> torch.norm(c, dim=0)
|
| 1577 |
+
tensor([1.4142, 2.2361, 5.0000])
|
| 1578 |
+
>>> torch.norm(c, dim=1)
|
| 1579 |
+
tensor([3.7417, 4.2426])
|
| 1580 |
+
>>> torch.norm(c, p=1, dim=1)
|
| 1581 |
+
tensor([6., 6.])
|
| 1582 |
+
>>> d = torch.arange(8, dtype=torch.float).reshape(2, 2, 2)
|
| 1583 |
+
>>> torch.norm(d, dim=(1, 2))
|
| 1584 |
+
tensor([ 3.7417, 11.2250])
|
| 1585 |
+
>>> torch.norm(d[0, :, :]), torch.norm(d[1, :, :])
|
| 1586 |
+
(tensor(3.7417), tensor(11.2250))
|
| 1587 |
+
"""
|
| 1588 |
+
|
| 1589 |
+
if has_torch_function_unary(input):
|
| 1590 |
+
return handle_torch_function(
|
| 1591 |
+
norm, (input,), input, p=p, dim=dim, keepdim=keepdim, out=out, dtype=dtype)
|
| 1592 |
+
|
| 1593 |
+
# NB. All the repeated code and weird python is to please TorchScript.
|
| 1594 |
+
# For a more compact implementation see the relevant function in `_refs/__init__.py`
|
| 1595 |
+
|
| 1596 |
+
# We don't do this for MPS or sparse tensors
|
| 1597 |
+
if input.layout == torch.strided and input.device.type in \
|
| 1598 |
+
("cpu", "cuda", "meta", torch.utils.backend_registration._privateuse1_backend_name):
|
| 1599 |
+
if dim is not None:
|
| 1600 |
+
if isinstance(dim, (int, torch.SymInt)):
|
| 1601 |
+
_dim = [dim]
|
| 1602 |
+
else:
|
| 1603 |
+
_dim = dim
|
| 1604 |
+
else:
|
| 1605 |
+
_dim = None # type: ignore[assignment]
|
| 1606 |
+
|
| 1607 |
+
if isinstance(p, str):
|
| 1608 |
+
if p == "fro" and (dim is None or isinstance(dim, (int, torch.SymInt)) or len(dim) <= 2):
|
| 1609 |
+
if out is None:
|
| 1610 |
+
return torch.linalg.vector_norm(input, 2, _dim, keepdim, dtype=dtype)
|
| 1611 |
+
else:
|
| 1612 |
+
return torch.linalg.vector_norm(input, 2, _dim, keepdim, dtype=dtype, out=out)
|
| 1613 |
+
|
| 1614 |
+
# Here we either call the nuclear norm, or we call matrix_norm with some arguments
|
| 1615 |
+
# that will throw an error
|
| 1616 |
+
if _dim is None:
|
| 1617 |
+
_dim = list(range(input.ndim))
|
| 1618 |
+
if out is None:
|
| 1619 |
+
return torch.linalg.matrix_norm(input, p, _dim, keepdim, dtype=dtype)
|
| 1620 |
+
else:
|
| 1621 |
+
return torch.linalg.matrix_norm(input, p, _dim, keepdim, dtype=dtype, out=out)
|
| 1622 |
+
else:
|
| 1623 |
+
# NB. p should be Union[str, number], not Optional!
|
| 1624 |
+
_p = 2.0 if p is None else p
|
| 1625 |
+
if out is None:
|
| 1626 |
+
return torch.linalg.vector_norm(input, _p, _dim, keepdim, dtype=dtype)
|
| 1627 |
+
else:
|
| 1628 |
+
return torch.linalg.vector_norm(input, _p, _dim, keepdim, dtype=dtype, out=out)
|
| 1629 |
+
|
| 1630 |
+
ndim = input.dim()
|
| 1631 |
+
|
| 1632 |
+
# catch default case
|
| 1633 |
+
if dim is None and out is None and dtype is None and p is not None:
|
| 1634 |
+
if isinstance(p, str):
|
| 1635 |
+
if p == "fro":
|
| 1636 |
+
return _VF.frobenius_norm(input, dim=(), keepdim=keepdim)
|
| 1637 |
+
if not isinstance(p, str):
|
| 1638 |
+
_dim = [i for i in range(ndim)] # noqa: C416 TODO: rewrite as list(range(m))
|
| 1639 |
+
return _VF.norm(input, p, dim=_dim, keepdim=keepdim) # type: ignore[attr-defined]
|
| 1640 |
+
|
| 1641 |
+
# TODO: when https://github.com/pytorch/pytorch/issues/33782 is fixed
|
| 1642 |
+
# remove the overloads where dim is an int and replace with BraodcastingList1
|
| 1643 |
+
# and remove next four lines, replace _dim with dim
|
| 1644 |
+
if dim is not None:
|
| 1645 |
+
if isinstance(dim, (int, torch.SymInt)):
|
| 1646 |
+
_dim = [dim]
|
| 1647 |
+
else:
|
| 1648 |
+
_dim = dim
|
| 1649 |
+
else:
|
| 1650 |
+
_dim = None # type: ignore[assignment]
|
| 1651 |
+
|
| 1652 |
+
if isinstance(p, str):
|
| 1653 |
+
if p == "fro":
|
| 1654 |
+
if dtype is not None:
|
| 1655 |
+
raise ValueError("dtype argument is not supported in frobenius norm")
|
| 1656 |
+
|
| 1657 |
+
if _dim is None:
|
| 1658 |
+
_dim = list(range(ndim))
|
| 1659 |
+
if out is None:
|
| 1660 |
+
return _VF.frobenius_norm(input, _dim, keepdim=keepdim) # type: ignore[arg-type]
|
| 1661 |
+
else:
|
| 1662 |
+
return _VF.frobenius_norm(input, _dim, keepdim=keepdim, out=out) # type: ignore[arg-type]
|
| 1663 |
+
elif p == "nuc":
|
| 1664 |
+
if dtype is not None:
|
| 1665 |
+
raise ValueError("dtype argument is not supported in nuclear norm")
|
| 1666 |
+
if _dim is None:
|
| 1667 |
+
if out is None:
|
| 1668 |
+
return _VF.nuclear_norm(input, keepdim=keepdim) # type: ignore[arg-type]
|
| 1669 |
+
else:
|
| 1670 |
+
return _VF.nuclear_norm(input, keepdim=keepdim, out=out) # type: ignore[arg-type]
|
| 1671 |
+
else:
|
| 1672 |
+
if out is None:
|
| 1673 |
+
return _VF.nuclear_norm(input, _dim, keepdim=keepdim) # type: ignore[arg-type]
|
| 1674 |
+
else:
|
| 1675 |
+
return _VF.nuclear_norm(input, _dim, keepdim=keepdim, out=out) # type: ignore[arg-type]
|
| 1676 |
+
raise RuntimeError(f"only valid string values are 'fro' and 'nuc', found {p}")
|
| 1677 |
+
else:
|
| 1678 |
+
if _dim is None:
|
| 1679 |
+
_dim = list(range(ndim))
|
| 1680 |
+
|
| 1681 |
+
if out is None:
|
| 1682 |
+
if dtype is None:
|
| 1683 |
+
return _VF.norm(input, p, _dim, keepdim=keepdim) # type: ignore[attr-defined]
|
| 1684 |
+
else:
|
| 1685 |
+
return _VF.norm(input, p, _dim, keepdim=keepdim, dtype=dtype) # type: ignore[attr-defined]
|
| 1686 |
+
else:
|
| 1687 |
+
if dtype is None:
|
| 1688 |
+
return _VF.norm(input, p, _dim, keepdim=keepdim, out=out) # type: ignore[attr-defined]
|
| 1689 |
+
else:
|
| 1690 |
+
return _VF.norm(input, p, _dim, keepdim=keepdim, dtype=dtype, out=out) # type: ignore[attr-defined]
|
| 1691 |
+
|
| 1692 |
+
def unravel_index(indices: Tensor, shape: Union[int, Sequence[int], torch.Size]) -> List[Tensor]:
|
| 1693 |
+
r"""Converts a tensor of flat indices into a tuple of coordinate tensors that
|
| 1694 |
+
index into an arbitrary tensor of the specified shape.
|
| 1695 |
+
|
| 1696 |
+
Args:
|
| 1697 |
+
indices (Tensor): An integer tensor containing indices into the
|
| 1698 |
+
flattened version of an arbitrary tensor of shape :attr:`shape`.
|
| 1699 |
+
All elements must be in the range ``[0, prod(shape) - 1]``.
|
| 1700 |
+
|
| 1701 |
+
shape (int, sequence of ints, or torch.Size): The shape of the arbitrary
|
| 1702 |
+
tensor. All elements must be non-negative.
|
| 1703 |
+
|
| 1704 |
+
Returns:
|
| 1705 |
+
tuple of Tensors: Each ``i``-th tensor in the ouput corresponds with
|
| 1706 |
+
dimension ``i`` of :attr:`shape`. Each tensor has the same shape as
|
| 1707 |
+
``indices`` and contains one index into dimension ``i`` for each of the
|
| 1708 |
+
flat indices given by ``indices``.
|
| 1709 |
+
|
| 1710 |
+
Example::
|
| 1711 |
+
|
| 1712 |
+
>>> import torch
|
| 1713 |
+
>>> torch.unravel_index(torch.tensor(4), (3, 2))
|
| 1714 |
+
(tensor(2),
|
| 1715 |
+
tensor(0))
|
| 1716 |
+
|
| 1717 |
+
>>> torch.unravel_index(torch.tensor([4, 1]), (3, 2))
|
| 1718 |
+
(tensor([2, 0]),
|
| 1719 |
+
tensor([0, 1]))
|
| 1720 |
+
|
| 1721 |
+
>>> torch.unravel_index(torch.tensor([0, 1, 2, 3, 4, 5]), (3, 2))
|
| 1722 |
+
(tensor([0, 0, 1, 1, 2, 2]),
|
| 1723 |
+
tensor([0, 1, 0, 1, 0, 1]))
|
| 1724 |
+
|
| 1725 |
+
>>> torch.unravel_index(torch.tensor([1234, 5678]), (10, 10, 10, 10))
|
| 1726 |
+
(tensor([1, 5]),
|
| 1727 |
+
tensor([2, 6]),
|
| 1728 |
+
tensor([3, 7]),
|
| 1729 |
+
tensor([4, 8]))
|
| 1730 |
+
|
| 1731 |
+
>>> torch.unravel_index(torch.tensor([[1234], [5678]]), (10, 10, 10, 10))
|
| 1732 |
+
(tensor([[1], [5]]),
|
| 1733 |
+
tensor([[2], [6]]),
|
| 1734 |
+
tensor([[3], [7]]),
|
| 1735 |
+
tensor([[4], [8]]))
|
| 1736 |
+
|
| 1737 |
+
>>> torch.unravel_index(torch.tensor([[1234], [5678]]), (100, 100))
|
| 1738 |
+
(tensor([[12], [56]]),
|
| 1739 |
+
tensor([[34], [78]]))
|
| 1740 |
+
"""
|
| 1741 |
+
if has_torch_function_unary(indices):
|
| 1742 |
+
return handle_torch_function(
|
| 1743 |
+
unravel_index, (indices,), indices, shape=shape)
|
| 1744 |
+
res_tensor = _unravel_index(indices, shape)
|
| 1745 |
+
return res_tensor.unbind(-1)
|
| 1746 |
+
|
| 1747 |
+
def _unravel_index(indices: Tensor, shape: Union[int, Sequence[int]]) -> Tensor:
|
| 1748 |
+
torch._check_type(
|
| 1749 |
+
not indices.is_complex() and not indices.is_floating_point() and not indices.dtype == torch.bool,
|
| 1750 |
+
lambda: f"expected 'indices' to be integer dtype, but got {indices.dtype}")
|
| 1751 |
+
|
| 1752 |
+
torch._check_type(
|
| 1753 |
+
isinstance(shape, (int, torch.SymInt, Sequence)),
|
| 1754 |
+
lambda: f"expected 'shape' to be int or sequence of ints, but got {type(shape)}")
|
| 1755 |
+
|
| 1756 |
+
if isinstance(shape, (int, torch.SymInt)):
|
| 1757 |
+
shape = torch.Size([shape])
|
| 1758 |
+
else:
|
| 1759 |
+
for dim in shape:
|
| 1760 |
+
torch._check_type(
|
| 1761 |
+
isinstance(dim, (int, torch.SymInt)),
|
| 1762 |
+
lambda: f"expected 'shape' sequence to only contain ints, but got {type(dim)}")
|
| 1763 |
+
shape = torch.Size(shape)
|
| 1764 |
+
|
| 1765 |
+
torch._check_value(
|
| 1766 |
+
all(dim >= 0 for dim in shape),
|
| 1767 |
+
lambda: f"'shape' cannot have negative values, but got {tuple(shape)}")
|
| 1768 |
+
|
| 1769 |
+
coefs = list(reversed(list(itertools.accumulate(reversed(shape[1:] + torch.Size([1])), func=operator.mul))))
|
| 1770 |
+
return indices.unsqueeze(-1).floor_divide(
|
| 1771 |
+
torch.tensor(coefs, device=indices.device, dtype=torch.int64)
|
| 1772 |
+
) % torch.tensor(shape, device=indices.device, dtype=torch.int64)
|
| 1773 |
+
|
| 1774 |
+
def chain_matmul(*matrices, out=None):
|
| 1775 |
+
r"""Returns the matrix product of the :math:`N` 2-D tensors. This product is efficiently computed
|
| 1776 |
+
using the matrix chain order algorithm which selects the order in which incurs the lowest cost in terms
|
| 1777 |
+
of arithmetic operations (`[CLRS]`_). Note that since this is a function to compute the product, :math:`N`
|
| 1778 |
+
needs to be greater than or equal to 2; if equal to 2 then a trivial matrix-matrix product is returned.
|
| 1779 |
+
If :math:`N` is 1, then this is a no-op - the original matrix is returned as is.
|
| 1780 |
+
|
| 1781 |
+
.. warning::
|
| 1782 |
+
|
| 1783 |
+
:func:`torch.chain_matmul` is deprecated and will be removed in a future PyTorch release.
|
| 1784 |
+
Use :func:`torch.linalg.multi_dot` instead, which accepts a list of two or more tensors
|
| 1785 |
+
rather than multiple arguments.
|
| 1786 |
+
|
| 1787 |
+
Args:
|
| 1788 |
+
matrices (Tensors...): a sequence of 2 or more 2-D tensors whose product is to be determined.
|
| 1789 |
+
out (Tensor, optional): the output tensor. Ignored if :attr:`out` = ``None``.
|
| 1790 |
+
|
| 1791 |
+
Returns:
|
| 1792 |
+
Tensor: if the :math:`i^{th}` tensor was of dimensions :math:`p_{i} \times p_{i + 1}`, then the product
|
| 1793 |
+
would be of dimensions :math:`p_{1} \times p_{N + 1}`.
|
| 1794 |
+
|
| 1795 |
+
Example::
|
| 1796 |
+
|
| 1797 |
+
>>> # xdoctest: +SKIP
|
| 1798 |
+
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
|
| 1799 |
+
>>> a = torch.randn(3, 4)
|
| 1800 |
+
>>> b = torch.randn(4, 5)
|
| 1801 |
+
>>> c = torch.randn(5, 6)
|
| 1802 |
+
>>> d = torch.randn(6, 7)
|
| 1803 |
+
>>> # will raise a deprecation warning
|
| 1804 |
+
>>> torch.chain_matmul(a, b, c, d)
|
| 1805 |
+
tensor([[ -2.3375, -3.9790, -4.1119, -6.6577, 9.5609, -11.5095, -3.2614],
|
| 1806 |
+
[ 21.4038, 3.3378, -8.4982, -5.2457, -10.2561, -2.4684, 2.7163],
|
| 1807 |
+
[ -0.9647, -5.8917, -2.3213, -5.2284, 12.8615, -12.2816, -2.5095]])
|
| 1808 |
+
|
| 1809 |
+
.. _`[CLRS]`: https://mitpress.mit.edu/books/introduction-algorithms-third-edition
|
| 1810 |
+
"""
|
| 1811 |
+
# This wrapper exists to support variadic args.
|
| 1812 |
+
if has_torch_function(matrices):
|
| 1813 |
+
return handle_torch_function(chain_matmul, matrices, *matrices)
|
| 1814 |
+
|
| 1815 |
+
if out is None:
|
| 1816 |
+
return _VF.chain_matmul(matrices) # type: ignore[attr-defined]
|
| 1817 |
+
else:
|
| 1818 |
+
return _VF.chain_matmul(matrices, out=out) # type: ignore[attr-defined]
|
| 1819 |
+
|
| 1820 |
+
|
| 1821 |
+
def _lu_impl(A, pivot=True, get_infos=False, out=None):
|
| 1822 |
+
# type: (Tensor, bool, bool, Any) -> Tuple[Tensor, Tensor, Tensor]
|
| 1823 |
+
r"""Computes the LU factorization of a matrix or batches of matrices
|
| 1824 |
+
:attr:`A`. Returns a tuple containing the LU factorization and
|
| 1825 |
+
pivots of :attr:`A`. Pivoting is done if :attr:`pivot` is set to
|
| 1826 |
+
``True``.
|
| 1827 |
+
|
| 1828 |
+
.. warning::
|
| 1829 |
+
|
| 1830 |
+
:func:`torch.lu` is deprecated in favor of :func:`torch.linalg.lu_factor`
|
| 1831 |
+
and :func:`torch.linalg.lu_factor_ex`. :func:`torch.lu` will be removed in a
|
| 1832 |
+
future PyTorch release.
|
| 1833 |
+
``LU, pivots, info = torch.lu(A, compute_pivots)`` should be replaced with
|
| 1834 |
+
|
| 1835 |
+
.. code:: python
|
| 1836 |
+
|
| 1837 |
+
LU, pivots = torch.linalg.lu_factor(A, compute_pivots)
|
| 1838 |
+
|
| 1839 |
+
``LU, pivots, info = torch.lu(A, compute_pivots, get_infos=True)`` should be replaced with
|
| 1840 |
+
|
| 1841 |
+
.. code:: python
|
| 1842 |
+
|
| 1843 |
+
LU, pivots, info = torch.linalg.lu_factor_ex(A, compute_pivots)
|
| 1844 |
+
|
| 1845 |
+
.. note::
|
| 1846 |
+
* The returned permutation matrix for every matrix in the batch is
|
| 1847 |
+
represented by a 1-indexed vector of size ``min(A.shape[-2], A.shape[-1])``.
|
| 1848 |
+
``pivots[i] == j`` represents that in the ``i``-th step of the algorithm,
|
| 1849 |
+
the ``i``-th row was permuted with the ``j-1``-th row.
|
| 1850 |
+
* LU factorization with :attr:`pivot` = ``False`` is not available
|
| 1851 |
+
for CPU, and attempting to do so will throw an error. However,
|
| 1852 |
+
LU factorization with :attr:`pivot` = ``False`` is available for
|
| 1853 |
+
CUDA.
|
| 1854 |
+
* This function does not check if the factorization was successful
|
| 1855 |
+
or not if :attr:`get_infos` is ``True`` since the status of the
|
| 1856 |
+
factorization is present in the third element of the return tuple.
|
| 1857 |
+
* In the case of batches of square matrices with size less or equal
|
| 1858 |
+
to 32 on a CUDA device, the LU factorization is repeated for
|
| 1859 |
+
singular matrices due to the bug in the MAGMA library
|
| 1860 |
+
(see magma issue 13).
|
| 1861 |
+
* ``L``, ``U``, and ``P`` can be derived using :func:`torch.lu_unpack`.
|
| 1862 |
+
|
| 1863 |
+
.. warning::
|
| 1864 |
+
The gradients of this function will only be finite when :attr:`A` is full rank.
|
| 1865 |
+
This is because the LU decomposition is just differentiable at full rank matrices.
|
| 1866 |
+
Furthermore, if :attr:`A` is close to not being full rank,
|
| 1867 |
+
the gradient will be numerically unstable as it depends on the computation of :math:`L^{-1}` and :math:`U^{-1}`.
|
| 1868 |
+
|
| 1869 |
+
Args:
|
| 1870 |
+
A (Tensor): the tensor to factor of size :math:`(*, m, n)`
|
| 1871 |
+
pivot (bool, optional): controls whether pivoting is done. Default: ``True``
|
| 1872 |
+
get_infos (bool, optional): if set to ``True``, returns an info IntTensor.
|
| 1873 |
+
Default: ``False``
|
| 1874 |
+
out (tuple, optional): optional output tuple. If :attr:`get_infos` is ``True``,
|
| 1875 |
+
then the elements in the tuple are Tensor, IntTensor,
|
| 1876 |
+
and IntTensor. If :attr:`get_infos` is ``False``, then the
|
| 1877 |
+
elements in the tuple are Tensor, IntTensor. Default: ``None``
|
| 1878 |
+
|
| 1879 |
+
Returns:
|
| 1880 |
+
(Tensor, IntTensor, IntTensor (optional)): A tuple of tensors containing
|
| 1881 |
+
|
| 1882 |
+
- **factorization** (*Tensor*): the factorization of size :math:`(*, m, n)`
|
| 1883 |
+
|
| 1884 |
+
- **pivots** (*IntTensor*): the pivots of size :math:`(*, \text{min}(m, n))`.
|
| 1885 |
+
``pivots`` stores all the intermediate transpositions of rows.
|
| 1886 |
+
The final permutation ``perm`` could be reconstructed by
|
| 1887 |
+
applying ``swap(perm[i], perm[pivots[i] - 1])`` for ``i = 0, ..., pivots.size(-1) - 1``,
|
| 1888 |
+
where ``perm`` is initially the identity permutation of :math:`m` elements
|
| 1889 |
+
(essentially this is what :func:`torch.lu_unpack` is doing).
|
| 1890 |
+
|
| 1891 |
+
- **infos** (*IntTensor*, *optional*): if :attr:`get_infos` is ``True``, this is a tensor of
|
| 1892 |
+
size :math:`(*)` where non-zero values indicate whether factorization for the matrix or
|
| 1893 |
+
each minibatch has succeeded or failed
|
| 1894 |
+
|
| 1895 |
+
Example::
|
| 1896 |
+
|
| 1897 |
+
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_LAPACK)
|
| 1898 |
+
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
|
| 1899 |
+
>>> A = torch.randn(2, 3, 3)
|
| 1900 |
+
>>> A_LU, pivots = torch.lu(A)
|
| 1901 |
+
>>> A_LU
|
| 1902 |
+
tensor([[[ 1.3506, 2.5558, -0.0816],
|
| 1903 |
+
[ 0.1684, 1.1551, 0.1940],
|
| 1904 |
+
[ 0.1193, 0.6189, -0.5497]],
|
| 1905 |
+
|
| 1906 |
+
[[ 0.4526, 1.2526, -0.3285],
|
| 1907 |
+
[-0.7988, 0.7175, -0.9701],
|
| 1908 |
+
[ 0.2634, -0.9255, -0.3459]]])
|
| 1909 |
+
>>> pivots
|
| 1910 |
+
tensor([[ 3, 3, 3],
|
| 1911 |
+
[ 3, 3, 3]], dtype=torch.int32)
|
| 1912 |
+
>>> A_LU, pivots, info = torch.lu(A, get_infos=True)
|
| 1913 |
+
>>> if info.nonzero().size(0) == 0:
|
| 1914 |
+
... print('LU factorization succeeded for all samples!')
|
| 1915 |
+
LU factorization succeeded for all samples!
|
| 1916 |
+
"""
|
| 1917 |
+
# If get_infos is True, then we don't need to check for errors and vice versa
|
| 1918 |
+
return torch._lu_with_info(A, pivot=pivot, check_errors=(not get_infos))
|
| 1919 |
+
|
| 1920 |
+
if TYPE_CHECKING:
|
| 1921 |
+
_ListOrSeq = Sequence[Tensor]
|
| 1922 |
+
else:
|
| 1923 |
+
_ListOrSeq = List[Tensor]
|
| 1924 |
+
|
| 1925 |
+
|
| 1926 |
+
def _check_list_size(out_len: int, get_infos: bool, out: _ListOrSeq) -> None:
|
| 1927 |
+
get_infos_int = 1 if get_infos else 0
|
| 1928 |
+
if out_len - get_infos_int != 2:
|
| 1929 |
+
raise TypeError(f"expected tuple of {2 + int(get_infos)} elements but got {out_len}")
|
| 1930 |
+
if not isinstance(out, (tuple, list)):
|
| 1931 |
+
raise TypeError(f"argument 'out' must be tuple of Tensors, not {type(out).__name__}")
|
| 1932 |
+
|
| 1933 |
+
|
| 1934 |
+
def _lu_with_infos(A, pivot=True, get_infos=False, out=None):
|
| 1935 |
+
# type: (Tensor, bool, bool, Optional[Tuple[Tensor, Tensor, Tensor]]) -> Tuple[Tensor, Tensor, Tensor]
|
| 1936 |
+
if has_torch_function_unary(A):
|
| 1937 |
+
return handle_torch_function(
|
| 1938 |
+
lu, (A,), A, pivot=pivot, get_infos=get_infos, out=out)
|
| 1939 |
+
result = _lu_impl(A, pivot, get_infos, out)
|
| 1940 |
+
if out is not None:
|
| 1941 |
+
_check_list_size(len(out), get_infos, out)
|
| 1942 |
+
for i in range(len(out)):
|
| 1943 |
+
out[i].resize_as_(result[i]).copy_(result[i])
|
| 1944 |
+
return out
|
| 1945 |
+
else:
|
| 1946 |
+
return result # A_LU, pivots, infos
|
| 1947 |
+
|
| 1948 |
+
|
| 1949 |
+
def _lu_no_infos(A, pivot=True, get_infos=False, out=None):
|
| 1950 |
+
# type: (Tensor, bool, bool, Optional[Tuple[Tensor, Tensor]]) -> Tuple[Tensor, Tensor]
|
| 1951 |
+
# need to check for torch_function here so that we exit if
|
| 1952 |
+
if has_torch_function_unary(A):
|
| 1953 |
+
return handle_torch_function(
|
| 1954 |
+
lu, (A,), A, pivot=pivot, get_infos=get_infos, out=out)
|
| 1955 |
+
result = _lu_impl(A, pivot, get_infos, out)
|
| 1956 |
+
if out is not None:
|
| 1957 |
+
_check_list_size(len(out), get_infos, out)
|
| 1958 |
+
for i in range(len(out)):
|
| 1959 |
+
out[i].resize_as_(result[i]).copy_(result[i])
|
| 1960 |
+
return out
|
| 1961 |
+
else:
|
| 1962 |
+
return result[0], result[1] # A_LU, pivots
|
| 1963 |
+
|
| 1964 |
+
# The return type of lu depends on `get_infos`, so in order to resolve the output type
|
| 1965 |
+
# of lu in TorchScript we need to statically know the value of `get_infos`
|
| 1966 |
+
lu = boolean_dispatch(
|
| 1967 |
+
arg_name='get_infos',
|
| 1968 |
+
arg_index=2,
|
| 1969 |
+
default=False,
|
| 1970 |
+
if_true=_lu_with_infos,
|
| 1971 |
+
if_false=_lu_no_infos,
|
| 1972 |
+
module_name=__name__,
|
| 1973 |
+
func_name='lu')
|
| 1974 |
+
lu.__doc__ = _lu_impl.__doc__
|
| 1975 |
+
|
| 1976 |
+
|
| 1977 |
+
def align_tensors(*tensors):
|
| 1978 |
+
raise RuntimeError('`align_tensors` not yet implemented.')
|
evalkit_internvl/lib/python3.10/site-packages/torch/overrides.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
evalkit_internvl/lib/python3.10/site-packages/torch/random.py
ADDED
|
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import contextlib
|
| 2 |
+
from typing import Generator
|
| 3 |
+
import warnings
|
| 4 |
+
|
| 5 |
+
from torch._C import default_generator
|
| 6 |
+
import torch
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def set_rng_state(new_state: torch.Tensor) -> None:
|
| 10 |
+
r"""Sets the random number generator state.
|
| 11 |
+
|
| 12 |
+
.. note: This function only works for CPU. For CUDA, please use
|
| 13 |
+
torch.manual_seed(seed), which works for both CPU and CUDA.
|
| 14 |
+
|
| 15 |
+
Args:
|
| 16 |
+
new_state (torch.ByteTensor): The desired state
|
| 17 |
+
"""
|
| 18 |
+
default_generator.set_state(new_state)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def get_rng_state() -> torch.Tensor:
|
| 22 |
+
r"""Returns the random number generator state as a `torch.ByteTensor`."""
|
| 23 |
+
return default_generator.get_state()
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def manual_seed(seed) -> torch._C.Generator:
|
| 27 |
+
r"""Sets the seed for generating random numbers. Returns a
|
| 28 |
+
`torch.Generator` object.
|
| 29 |
+
|
| 30 |
+
Args:
|
| 31 |
+
seed (int): The desired seed. Value must be within the inclusive range
|
| 32 |
+
`[-0x8000_0000_0000_0000, 0xffff_ffff_ffff_ffff]`. Otherwise, a RuntimeError
|
| 33 |
+
is raised. Negative inputs are remapped to positive values with the formula
|
| 34 |
+
`0xffff_ffff_ffff_ffff + seed`.
|
| 35 |
+
"""
|
| 36 |
+
seed = int(seed)
|
| 37 |
+
import torch.cuda
|
| 38 |
+
|
| 39 |
+
if not torch.cuda._is_in_bad_fork():
|
| 40 |
+
torch.cuda.manual_seed_all(seed)
|
| 41 |
+
|
| 42 |
+
import torch.mps
|
| 43 |
+
if not torch.mps._is_in_bad_fork():
|
| 44 |
+
torch.mps.manual_seed(seed)
|
| 45 |
+
|
| 46 |
+
if hasattr(torch, 'xpu') and not torch.xpu._is_in_bad_fork():
|
| 47 |
+
torch.xpu.manual_seed_all(seed)
|
| 48 |
+
|
| 49 |
+
_seed_custom_device(seed)
|
| 50 |
+
|
| 51 |
+
return default_generator.manual_seed(seed)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def seed() -> int:
|
| 55 |
+
r"""Sets the seed for generating random numbers to a non-deterministic
|
| 56 |
+
random number. Returns a 64 bit number used to seed the RNG.
|
| 57 |
+
"""
|
| 58 |
+
seed = default_generator.seed()
|
| 59 |
+
import torch.cuda
|
| 60 |
+
|
| 61 |
+
if not torch.cuda._is_in_bad_fork():
|
| 62 |
+
torch.cuda.manual_seed_all(seed)
|
| 63 |
+
|
| 64 |
+
import torch.mps
|
| 65 |
+
if not torch.mps._is_in_bad_fork():
|
| 66 |
+
torch.mps.manual_seed(seed)
|
| 67 |
+
|
| 68 |
+
if hasattr(torch, 'xpu') and not torch.xpu._is_in_bad_fork():
|
| 69 |
+
torch.xpu.manual_seed_all(seed)
|
| 70 |
+
|
| 71 |
+
_seed_custom_device(seed)
|
| 72 |
+
|
| 73 |
+
return seed
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def _seed_custom_device(seed) -> None:
|
| 77 |
+
r"""Sets the seed to generate random numbers for custom device.
|
| 78 |
+
|
| 79 |
+
Args:
|
| 80 |
+
seed (int): The desired seed.
|
| 81 |
+
|
| 82 |
+
See [Note: support the custom device with privateuse1]
|
| 83 |
+
"""
|
| 84 |
+
seed = int(seed)
|
| 85 |
+
custom_backend_name = torch._C._get_privateuse1_backend_name()
|
| 86 |
+
if hasattr(torch, custom_backend_name):
|
| 87 |
+
custom_device_mod = getattr(torch, custom_backend_name)
|
| 88 |
+
_bad_fork_name = "_is_in_bad_fork"
|
| 89 |
+
_seed_all_name = "manual_seed_all"
|
| 90 |
+
if hasattr(custom_device_mod, _bad_fork_name) and hasattr(custom_device_mod, _seed_all_name):
|
| 91 |
+
if not getattr(custom_device_mod, _bad_fork_name)():
|
| 92 |
+
getattr(custom_device_mod, _seed_all_name)(seed)
|
| 93 |
+
else:
|
| 94 |
+
message = f"Set seed for `{custom_backend_name}` device does not take effect, please add API's "
|
| 95 |
+
message += f"`{_bad_fork_name}` and `{_seed_all_name}` to `{custom_backend_name}` device module."
|
| 96 |
+
warnings.warn(message, UserWarning, stacklevel=3)
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def initial_seed() -> int:
|
| 100 |
+
r"""Returns the initial seed for generating random numbers as a
|
| 101 |
+
Python `long`.
|
| 102 |
+
"""
|
| 103 |
+
return default_generator.initial_seed()
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
_fork_rng_warned_already = False
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
@contextlib.contextmanager
|
| 110 |
+
def fork_rng(devices=None, enabled=True, _caller="fork_rng", _devices_kw="devices", device_type="cuda") -> Generator:
|
| 111 |
+
"""
|
| 112 |
+
Forks the RNG, so that when you return, the RNG is reset
|
| 113 |
+
to the state that it was previously in.
|
| 114 |
+
|
| 115 |
+
Args:
|
| 116 |
+
devices (iterable of Device IDs): devices for which to fork
|
| 117 |
+
the RNG. CPU RNG state is always forked. By default, :meth:`fork_rng` operates
|
| 118 |
+
on all devices, but will emit a warning if your machine has a lot
|
| 119 |
+
of devices, since this function will run very slowly in that case.
|
| 120 |
+
If you explicitly specify devices, this warning will be suppressed
|
| 121 |
+
enabled (bool): if ``False``, the RNG is not forked. This is a convenience
|
| 122 |
+
argument for easily disabling the context manager without having
|
| 123 |
+
to delete it and unindent your Python code under it.
|
| 124 |
+
deivce_type (str): device type str, default is `cuda`. As for custom device,
|
| 125 |
+
see details in [Note: support the custom device with privateuse1]
|
| 126 |
+
"""
|
| 127 |
+
|
| 128 |
+
device_type = torch.device(device_type).type
|
| 129 |
+
device_mod = getattr(torch, device_type, None)
|
| 130 |
+
if device_mod is None:
|
| 131 |
+
raise RuntimeError(f"torch has no module of `{device_type}`, you should register " +
|
| 132 |
+
"a module by `torch._register_device_module`.")
|
| 133 |
+
global _fork_rng_warned_already
|
| 134 |
+
|
| 135 |
+
# Internal arguments:
|
| 136 |
+
# _caller: the function which called fork_rng, which the user used
|
| 137 |
+
# _devices_kw: the devices keyword of _caller
|
| 138 |
+
|
| 139 |
+
if not enabled:
|
| 140 |
+
yield
|
| 141 |
+
return
|
| 142 |
+
|
| 143 |
+
if devices is None:
|
| 144 |
+
num_devices = device_mod.device_count()
|
| 145 |
+
if num_devices > 1 and not _fork_rng_warned_already:
|
| 146 |
+
message = (f"{device_type.upper()} reports that you have {num_devices} available devices, and "
|
| 147 |
+
f"you have used {_caller} without explicitly specifying which devices are being used. "
|
| 148 |
+
f"For safety, we initialize *every* {device_type.upper()} device by default, which can "
|
| 149 |
+
f"be quite slow if you have a lot of {device_type.upper()}s. If you know that you are only"
|
| 150 |
+
f" making use of a few {device_type.upper()} devices, set the environment variable "
|
| 151 |
+
f"{device_type.upper()}_VISIBLE_DEVICES or the '{_devices_kw}' keyword argument of {_caller} "
|
| 152 |
+
"with the set of devices you are actually using. For example, if you are using CPU only, "
|
| 153 |
+
"set device.upper()_VISIBLE_DEVICES= or devices=[]; if you are using device 0 only, "
|
| 154 |
+
f"set {device_type.upper()}_VISIBLE_DEVICES=0 or devices=[0]. To initialize all devices "
|
| 155 |
+
f"and suppress this warning, set the '{_devices_kw}' keyword argument to "
|
| 156 |
+
f"`range(torch.{device_type}.device_count())`.")
|
| 157 |
+
warnings.warn(message)
|
| 158 |
+
_fork_rng_warned_already = True
|
| 159 |
+
devices = list(range(num_devices))
|
| 160 |
+
else:
|
| 161 |
+
# Protect against user passing us a generator; we need to traverse this
|
| 162 |
+
# multiple times but a generator will be exhausted upon first traversal
|
| 163 |
+
devices = list(devices)
|
| 164 |
+
|
| 165 |
+
cpu_rng_state = torch.get_rng_state()
|
| 166 |
+
device_rng_states = []
|
| 167 |
+
for device in devices:
|
| 168 |
+
device_rng_states.append(device_mod.get_rng_state(device))
|
| 169 |
+
|
| 170 |
+
try:
|
| 171 |
+
yield
|
| 172 |
+
finally:
|
| 173 |
+
torch.set_rng_state(cpu_rng_state)
|
| 174 |
+
for device, device_rng_state in zip(devices, device_rng_states):
|
| 175 |
+
device_mod.set_rng_state(device_rng_state, device)
|
evalkit_internvl/lib/python3.10/site-packages/torch/return_types.pyi
ADDED
|
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# @generated from torch/_C/return_types.pyi
|
| 2 |
+
|
| 3 |
+
from typing import (
|
| 4 |
+
Any,
|
| 5 |
+
Callable,
|
| 6 |
+
ContextManager,
|
| 7 |
+
Iterator,
|
| 8 |
+
List,
|
| 9 |
+
Literal,
|
| 10 |
+
NamedTuple,
|
| 11 |
+
Optional,
|
| 12 |
+
overload,
|
| 13 |
+
Sequence,
|
| 14 |
+
Tuple,
|
| 15 |
+
TypeVar,
|
| 16 |
+
Union,
|
| 17 |
+
)
|
| 18 |
+
|
| 19 |
+
from torch import contiguous_format, Generator, inf, memory_format, strided, Tensor, SymInt
|
| 20 |
+
from torch.types import (
|
| 21 |
+
_bool,
|
| 22 |
+
_device,
|
| 23 |
+
_dtype,
|
| 24 |
+
_float,
|
| 25 |
+
_int,
|
| 26 |
+
_layout,
|
| 27 |
+
_qscheme,
|
| 28 |
+
_size,
|
| 29 |
+
Number,
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
class _fake_quantize_per_tensor_affine_cachemask_tensor_qparams(NamedTuple):
|
| 33 |
+
output: Tensor
|
| 34 |
+
mask: Tensor
|
| 35 |
+
|
| 36 |
+
class _fused_moving_avg_obs_fq_helper(NamedTuple):
|
| 37 |
+
output: Tensor
|
| 38 |
+
mask: Tensor
|
| 39 |
+
|
| 40 |
+
class _linalg_det(NamedTuple):
|
| 41 |
+
result: Tensor
|
| 42 |
+
LU: Tensor
|
| 43 |
+
pivots: Tensor
|
| 44 |
+
|
| 45 |
+
class _linalg_eigh(NamedTuple):
|
| 46 |
+
eigenvalues: Tensor
|
| 47 |
+
eigenvectors: Tensor
|
| 48 |
+
|
| 49 |
+
class _linalg_slogdet(NamedTuple):
|
| 50 |
+
sign: Tensor
|
| 51 |
+
logabsdet: Tensor
|
| 52 |
+
LU: Tensor
|
| 53 |
+
pivots: Tensor
|
| 54 |
+
|
| 55 |
+
class _linalg_solve_ex(NamedTuple):
|
| 56 |
+
result: Tensor
|
| 57 |
+
LU: Tensor
|
| 58 |
+
pivots: Tensor
|
| 59 |
+
info: Tensor
|
| 60 |
+
|
| 61 |
+
class _linalg_svd(NamedTuple):
|
| 62 |
+
U: Tensor
|
| 63 |
+
S: Tensor
|
| 64 |
+
Vh: Tensor
|
| 65 |
+
|
| 66 |
+
class _lu_with_info(NamedTuple):
|
| 67 |
+
LU: Tensor
|
| 68 |
+
pivots: Tensor
|
| 69 |
+
info: Tensor
|
| 70 |
+
|
| 71 |
+
class _scaled_dot_product_efficient_attention(NamedTuple):
|
| 72 |
+
output: Tensor
|
| 73 |
+
log_sumexp: Tensor
|
| 74 |
+
philox_seed: Tensor
|
| 75 |
+
philox_offset: Tensor
|
| 76 |
+
|
| 77 |
+
class _scaled_dot_product_flash_attention(NamedTuple):
|
| 78 |
+
output: Tensor
|
| 79 |
+
logsumexp: Tensor
|
| 80 |
+
cum_seq_q: Tensor
|
| 81 |
+
cum_seq_k: Tensor
|
| 82 |
+
max_q: Union[_int, SymInt]
|
| 83 |
+
max_k: Union[_int, SymInt]
|
| 84 |
+
philox_seed: Tensor
|
| 85 |
+
philox_offset: Tensor
|
| 86 |
+
debug_attn_mask: Tensor
|
| 87 |
+
|
| 88 |
+
class _unpack_dual(NamedTuple):
|
| 89 |
+
primal: Tensor
|
| 90 |
+
tangent: Tensor
|
| 91 |
+
|
| 92 |
+
class aminmax(NamedTuple):
|
| 93 |
+
min: Tensor
|
| 94 |
+
max: Tensor
|
| 95 |
+
|
| 96 |
+
class cummax(NamedTuple):
|
| 97 |
+
values: Tensor
|
| 98 |
+
indices: Tensor
|
| 99 |
+
|
| 100 |
+
class cummin(NamedTuple):
|
| 101 |
+
values: Tensor
|
| 102 |
+
indices: Tensor
|
| 103 |
+
|
| 104 |
+
class frexp(NamedTuple):
|
| 105 |
+
mantissa: Tensor
|
| 106 |
+
exponent: Tensor
|
| 107 |
+
|
| 108 |
+
class geqrf(NamedTuple):
|
| 109 |
+
a: Tensor
|
| 110 |
+
tau: Tensor
|
| 111 |
+
|
| 112 |
+
class histogram(NamedTuple):
|
| 113 |
+
hist: Tensor
|
| 114 |
+
bin_edges: Tensor
|
| 115 |
+
|
| 116 |
+
class histogramdd(NamedTuple):
|
| 117 |
+
hist: Tensor
|
| 118 |
+
bin_edges: List[Tensor]
|
| 119 |
+
|
| 120 |
+
class kthvalue(NamedTuple):
|
| 121 |
+
values: Tensor
|
| 122 |
+
indices: Tensor
|
| 123 |
+
|
| 124 |
+
class lu_unpack(NamedTuple):
|
| 125 |
+
P: Tensor
|
| 126 |
+
L: Tensor
|
| 127 |
+
U: Tensor
|
| 128 |
+
|
| 129 |
+
class max(NamedTuple):
|
| 130 |
+
values: Tensor
|
| 131 |
+
indices: Tensor
|
| 132 |
+
|
| 133 |
+
class median(NamedTuple):
|
| 134 |
+
values: Tensor
|
| 135 |
+
indices: Tensor
|
| 136 |
+
|
| 137 |
+
class min(NamedTuple):
|
| 138 |
+
values: Tensor
|
| 139 |
+
indices: Tensor
|
| 140 |
+
|
| 141 |
+
class mode(NamedTuple):
|
| 142 |
+
values: Tensor
|
| 143 |
+
indices: Tensor
|
| 144 |
+
|
| 145 |
+
class nanmedian(NamedTuple):
|
| 146 |
+
values: Tensor
|
| 147 |
+
indices: Tensor
|
| 148 |
+
|
| 149 |
+
class qr(NamedTuple):
|
| 150 |
+
Q: Tensor
|
| 151 |
+
R: Tensor
|
| 152 |
+
|
| 153 |
+
class slogdet(NamedTuple):
|
| 154 |
+
sign: Tensor
|
| 155 |
+
logabsdet: Tensor
|
| 156 |
+
|
| 157 |
+
class sort(NamedTuple):
|
| 158 |
+
values: Tensor
|
| 159 |
+
indices: Tensor
|
| 160 |
+
|
| 161 |
+
class svd(NamedTuple):
|
| 162 |
+
U: Tensor
|
| 163 |
+
S: Tensor
|
| 164 |
+
V: Tensor
|
| 165 |
+
|
| 166 |
+
class topk(NamedTuple):
|
| 167 |
+
values: Tensor
|
| 168 |
+
indices: Tensor
|
| 169 |
+
|
| 170 |
+
class triangular_solve(NamedTuple):
|
| 171 |
+
solution: Tensor
|
| 172 |
+
cloned_coefficient: Tensor
|
evalkit_internvl/lib/python3.10/site-packages/torch/serialization.py
ADDED
|
@@ -0,0 +1,1448 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import difflib
|
| 2 |
+
import os
|
| 3 |
+
import io
|
| 4 |
+
import shutil
|
| 5 |
+
import struct
|
| 6 |
+
import sys
|
| 7 |
+
import torch
|
| 8 |
+
import tarfile
|
| 9 |
+
import tempfile
|
| 10 |
+
import warnings
|
| 11 |
+
from contextlib import closing, contextmanager
|
| 12 |
+
from enum import Enum
|
| 13 |
+
from ._utils import _import_dotted_name
|
| 14 |
+
from torch._sources import get_source_lines_and_file
|
| 15 |
+
from torch.types import Storage
|
| 16 |
+
from torch.storage import _get_dtype_from_pickle_storage_type
|
| 17 |
+
from typing import Any, BinaryIO, Callable, cast, Dict, Optional, Type, Tuple, Union, IO
|
| 18 |
+
from typing_extensions import TypeAlias # Python 3.10+
|
| 19 |
+
import copyreg
|
| 20 |
+
import pickle
|
| 21 |
+
import pathlib
|
| 22 |
+
import torch._weights_only_unpickler as _weights_only_unpickler
|
| 23 |
+
|
| 24 |
+
DEFAULT_PROTOCOL = 2
|
| 25 |
+
|
| 26 |
+
LONG_SIZE = struct.Struct('=l').size
|
| 27 |
+
INT_SIZE = struct.Struct('=i').size
|
| 28 |
+
SHORT_SIZE = struct.Struct('=h').size
|
| 29 |
+
|
| 30 |
+
MAGIC_NUMBER = 0x1950a86a20f9469cfc6c
|
| 31 |
+
PROTOCOL_VERSION = 1001
|
| 32 |
+
STORAGE_KEY_SEPARATOR = ','
|
| 33 |
+
|
| 34 |
+
FILE_LIKE: TypeAlias = Union[str, os.PathLike, BinaryIO, IO[bytes]]
|
| 35 |
+
MAP_LOCATION: TypeAlias = Optional[Union[Callable[[torch.Tensor, str], torch.Tensor], torch.device, str, Dict[str, str]]]
|
| 36 |
+
STORAGE: TypeAlias = Union[Storage, torch.storage.TypedStorage, torch.UntypedStorage]
|
| 37 |
+
|
| 38 |
+
__all__ = [
|
| 39 |
+
'SourceChangeWarning',
|
| 40 |
+
'mkdtemp',
|
| 41 |
+
'register_package',
|
| 42 |
+
'check_module_version_greater_or_equal',
|
| 43 |
+
'validate_cuda_device',
|
| 44 |
+
'validate_hpu_device',
|
| 45 |
+
'location_tag',
|
| 46 |
+
'default_restore_location',
|
| 47 |
+
'normalize_storage_type',
|
| 48 |
+
'storage_to_tensor_type',
|
| 49 |
+
'save',
|
| 50 |
+
'load',
|
| 51 |
+
'StorageType',
|
| 52 |
+
'LoadEndianness',
|
| 53 |
+
'get_default_load_endianness',
|
| 54 |
+
'set_default_load_endianness',
|
| 55 |
+
]
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
class SourceChangeWarning(Warning):
|
| 59 |
+
pass
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
@contextmanager
|
| 63 |
+
def mkdtemp():
|
| 64 |
+
path = tempfile.mkdtemp()
|
| 65 |
+
try:
|
| 66 |
+
yield path
|
| 67 |
+
finally:
|
| 68 |
+
shutil.rmtree(path)
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
_package_registry = []
|
| 72 |
+
|
| 73 |
+
class LoadEndianness(Enum):
|
| 74 |
+
NATIVE = 1
|
| 75 |
+
LITTLE = 2
|
| 76 |
+
BIG = 3
|
| 77 |
+
|
| 78 |
+
_default_load_endian: Optional[LoadEndianness] = None
|
| 79 |
+
|
| 80 |
+
def get_default_load_endianness() -> Optional[LoadEndianness]:
|
| 81 |
+
'''
|
| 82 |
+
Get fallback byte order for loading files
|
| 83 |
+
|
| 84 |
+
If byteorder mark is not present in saved checkpoint,
|
| 85 |
+
this byte order is used as fallback.
|
| 86 |
+
By default, it's "native" byte order.
|
| 87 |
+
|
| 88 |
+
Returns:
|
| 89 |
+
default_load_endian: Optional[LoadEndianness]
|
| 90 |
+
'''
|
| 91 |
+
return _default_load_endian
|
| 92 |
+
|
| 93 |
+
def set_default_load_endianness(endianness):
|
| 94 |
+
'''
|
| 95 |
+
Set fallback byte order for loading files
|
| 96 |
+
|
| 97 |
+
If byteorder mark is not present in saved checkpoint,
|
| 98 |
+
this byte order is used as fallback.
|
| 99 |
+
By default, it's "native" byte order.
|
| 100 |
+
|
| 101 |
+
Args:
|
| 102 |
+
endianness: the new fallback byte order
|
| 103 |
+
'''
|
| 104 |
+
global _default_load_endian
|
| 105 |
+
if not isinstance(endianness, LoadEndianness) and endianness is not None:
|
| 106 |
+
raise TypeError("Invalid argument type in function set_default_load_endianness")
|
| 107 |
+
_default_load_endian = endianness
|
| 108 |
+
|
| 109 |
+
def _is_zipfile(f) -> bool:
|
| 110 |
+
# This is a stricter implementation than zipfile.is_zipfile().
|
| 111 |
+
# zipfile.is_zipfile() is True if the magic number appears anywhere in the
|
| 112 |
+
# binary. Since we expect the files here to be generated by torch.save or
|
| 113 |
+
# torch.jit.save, it's safe to only check the start bytes and avoid
|
| 114 |
+
# collisions and assume the zip has only 1 file.
|
| 115 |
+
# See bugs.python.org/issue28494.
|
| 116 |
+
|
| 117 |
+
start = f.tell()
|
| 118 |
+
# Read the first few bytes and match against the ZIP file signature
|
| 119 |
+
local_header_magic_number = b'PK\x03\x04'
|
| 120 |
+
read_bytes = f.read(len(local_header_magic_number))
|
| 121 |
+
f.seek(start)
|
| 122 |
+
return read_bytes == local_header_magic_number
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def register_package(
|
| 126 |
+
priority: int,
|
| 127 |
+
tagger: Callable[[STORAGE], Optional[str]],
|
| 128 |
+
deserializer: Callable[[STORAGE, str], Optional[STORAGE]]
|
| 129 |
+
):
|
| 130 |
+
'''
|
| 131 |
+
Registers callables for tagging and deserializing storage objects with an associated priority.
|
| 132 |
+
Tagging associates a device with a storage object at save time while deserializing moves a
|
| 133 |
+
storage object to an appropriate device at load time. :attr:`tagger` and :attr:`deserializer`
|
| 134 |
+
are run in the order given by their :attr:`priority` until a tagger/deserializer returns a
|
| 135 |
+
value that is not `None`.
|
| 136 |
+
|
| 137 |
+
To override the deserialization behavior for a device in the global registry, one can register a
|
| 138 |
+
tagger with a higher priority than the existing tagger.
|
| 139 |
+
|
| 140 |
+
This function can also be used to register a tagger and deserializer for new devices.
|
| 141 |
+
|
| 142 |
+
Args:
|
| 143 |
+
priority: Indicates the priority associated with the tagger and deserializer, where a lower
|
| 144 |
+
value indicates higher priority.
|
| 145 |
+
tagger: Callable that takes in a storage object and returns its tagged device as a string
|
| 146 |
+
or None.
|
| 147 |
+
deserializer: Callable that takes in storage object and a device string and returns a storage
|
| 148 |
+
object on the appropriate device or None.
|
| 149 |
+
|
| 150 |
+
Returns:
|
| 151 |
+
`None`
|
| 152 |
+
|
| 153 |
+
Example:
|
| 154 |
+
>>> def ipu_tag(obj):
|
| 155 |
+
>>> if obj.device.type == 'ipu':
|
| 156 |
+
>>> return 'ipu'
|
| 157 |
+
>>> def ipu_deserialize(obj, location):
|
| 158 |
+
>>> if location.startswith('ipu'):
|
| 159 |
+
>>> ipu = getattr(torch, "ipu", None)
|
| 160 |
+
>>> assert ipu is not None, "IPU device module is not loaded"
|
| 161 |
+
>>> assert torch.ipu.is_available(), "ipu is not available"
|
| 162 |
+
>>> return obj.ipu(location)
|
| 163 |
+
>>> torch.serialization.register_package(11, ipu_tag, ipu_deserialize)
|
| 164 |
+
'''
|
| 165 |
+
queue_elem = (priority, tagger, deserializer)
|
| 166 |
+
_package_registry.append(queue_elem)
|
| 167 |
+
_package_registry.sort()
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
def check_module_version_greater_or_equal(module, req_version_tuple, error_if_malformed=True):
|
| 171 |
+
'''
|
| 172 |
+
Check if a module's version satisfies requirements
|
| 173 |
+
|
| 174 |
+
Usually, a module's version string will be like 'x.y.z', which would be represented
|
| 175 |
+
as a tuple (x, y, z), but sometimes it could be an unexpected format. If the version
|
| 176 |
+
string does not match the given tuple's format up to the length of the tuple, then
|
| 177 |
+
error and exit or emit a warning.
|
| 178 |
+
|
| 179 |
+
Args:
|
| 180 |
+
module: the module to check the version of
|
| 181 |
+
req_version_tuple: tuple (usually of ints) representing the required version
|
| 182 |
+
error_if_malformed: whether we should exit if module version string is malformed
|
| 183 |
+
|
| 184 |
+
Returns:
|
| 185 |
+
requirement_is_met: bool
|
| 186 |
+
'''
|
| 187 |
+
try:
|
| 188 |
+
version_strs = module.__version__.split('.')
|
| 189 |
+
# Cast module version fields to match the types of the required version
|
| 190 |
+
module_version = tuple(
|
| 191 |
+
type(req_field)(version_strs[idx]) for idx, req_field in enumerate(req_version_tuple)
|
| 192 |
+
)
|
| 193 |
+
requirement_is_met = module_version >= req_version_tuple
|
| 194 |
+
|
| 195 |
+
except Exception as e:
|
| 196 |
+
message = (
|
| 197 |
+
f"'{module.__name__}' module version string is malformed '{module.__version__}' and cannot be compared"
|
| 198 |
+
f" with tuple {str(req_version_tuple)}"
|
| 199 |
+
)
|
| 200 |
+
if error_if_malformed:
|
| 201 |
+
raise RuntimeError(message) from e
|
| 202 |
+
else:
|
| 203 |
+
warnings.warn(message + ', but continuing assuming that requirement is met')
|
| 204 |
+
requirement_is_met = True
|
| 205 |
+
|
| 206 |
+
return requirement_is_met
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
def _cpu_tag(obj):
|
| 210 |
+
if obj.device.type == 'cpu':
|
| 211 |
+
return 'cpu'
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
def _cuda_tag(obj):
|
| 215 |
+
if obj.device.type == 'cuda':
|
| 216 |
+
return 'cuda:' + str(obj.device.index)
|
| 217 |
+
|
| 218 |
+
def _hpu_tag(obj):
|
| 219 |
+
if obj.device.type == 'hpu':
|
| 220 |
+
return 'hpu:' + str(obj.device.index)
|
| 221 |
+
|
| 222 |
+
def _mps_tag(obj):
|
| 223 |
+
if obj.device.type == 'mps':
|
| 224 |
+
return 'mps'
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
def _meta_tag(obj):
|
| 228 |
+
if obj.device.type == 'meta':
|
| 229 |
+
return 'meta'
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
def _privateuse1_tag(obj):
|
| 233 |
+
backend_name = torch._C._get_privateuse1_backend_name()
|
| 234 |
+
if obj.device.type == backend_name:
|
| 235 |
+
if obj.device.index is None:
|
| 236 |
+
return backend_name
|
| 237 |
+
else:
|
| 238 |
+
return backend_name + ':' + str(obj.device.index)
|
| 239 |
+
|
| 240 |
+
|
| 241 |
+
def _cpu_deserialize(obj, location):
|
| 242 |
+
if location == 'cpu':
|
| 243 |
+
return obj
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
def validate_cuda_device(location):
|
| 247 |
+
device = torch.cuda._utils._get_device_index(location, True)
|
| 248 |
+
|
| 249 |
+
if not torch.cuda.is_available():
|
| 250 |
+
raise RuntimeError('Attempting to deserialize object on a CUDA '
|
| 251 |
+
'device but torch.cuda.is_available() is False. '
|
| 252 |
+
'If you are running on a CPU-only machine, '
|
| 253 |
+
'please use torch.load with map_location=torch.device(\'cpu\') '
|
| 254 |
+
'to map your storages to the CPU.')
|
| 255 |
+
device_count = torch.cuda.device_count()
|
| 256 |
+
if device >= device_count:
|
| 257 |
+
raise RuntimeError('Attempting to deserialize object on CUDA device '
|
| 258 |
+
f'{device} but torch.cuda.device_count() is {device_count}. Please use '
|
| 259 |
+
'torch.load with map_location to map your storages '
|
| 260 |
+
'to an existing device.')
|
| 261 |
+
return device
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
def _cuda_deserialize(obj, location):
|
| 265 |
+
if location.startswith('cuda'):
|
| 266 |
+
device = validate_cuda_device(location)
|
| 267 |
+
if getattr(obj, "_torch_load_uninitialized", False):
|
| 268 |
+
with torch.cuda.device(device):
|
| 269 |
+
return torch.UntypedStorage(obj.nbytes(), device=torch.device(location))
|
| 270 |
+
else:
|
| 271 |
+
return obj.cuda(device)
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
def validate_hpu_device(location):
|
| 275 |
+
hpu = getattr(torch, "hpu", None)
|
| 276 |
+
assert hpu is not None, "HPU device module is not loaded"
|
| 277 |
+
device = hpu._utils._get_device_index(location, optional=True)
|
| 278 |
+
|
| 279 |
+
if not hpu.is_available():
|
| 280 |
+
raise RuntimeError('Attempting to deserialize object on a HPU '
|
| 281 |
+
'device but torch.hpu.is_available() is False. '
|
| 282 |
+
'If you are running on a CPU-only machine, '
|
| 283 |
+
'please use torch.load with map_location=torch.device(\'cpu\') '
|
| 284 |
+
'to map your storages to the CPU.')
|
| 285 |
+
device_count = hpu.device_count()
|
| 286 |
+
if device >= device_count:
|
| 287 |
+
raise RuntimeError('Attempting to deserialize object on HPU device '
|
| 288 |
+
f'{device} but torch.hpu.device_count() is {device_count}. Please use '
|
| 289 |
+
'torch.load with map_location to map your storages '
|
| 290 |
+
'to an existing device.')
|
| 291 |
+
return device
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
def _hpu_deserialize(obj, location):
|
| 295 |
+
if location.startswith('hpu'):
|
| 296 |
+
hpu = getattr(torch, "hpu", None)
|
| 297 |
+
assert hpu is not None, "HPU device module is not loaded"
|
| 298 |
+
device = validate_hpu_device(location)
|
| 299 |
+
if getattr(obj, "_torch_load_uninitialized", False):
|
| 300 |
+
with hpu.device(device):
|
| 301 |
+
return torch.UntypedStorage(obj.nbytes(), device=torch.device(location))
|
| 302 |
+
else:
|
| 303 |
+
return obj.hpu(device)
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
def _mps_deserialize(obj, location):
|
| 307 |
+
if location.startswith('mps'):
|
| 308 |
+
return obj.mps()
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
def _meta_deserialize(obj, location):
|
| 312 |
+
if location == 'meta':
|
| 313 |
+
return torch.UntypedStorage(obj.nbytes(), device='meta')
|
| 314 |
+
|
| 315 |
+
|
| 316 |
+
def _validate_privateuse1_device(location, backend_name):
|
| 317 |
+
'''
|
| 318 |
+
Check whether the device index of privateuse1 is valid
|
| 319 |
+
|
| 320 |
+
Register a device_module of privateuse1 by torch._register_device_module.
|
| 321 |
+
Implement the following methods in device_module like cuda:
|
| 322 |
+
device_module._utils._get_device_index(location, True),
|
| 323 |
+
device_module.device_count().
|
| 324 |
+
|
| 325 |
+
Args:
|
| 326 |
+
location: string of device
|
| 327 |
+
backend_name: the name of privateuse1, which can be renamed
|
| 328 |
+
|
| 329 |
+
Returns:
|
| 330 |
+
device_index: int
|
| 331 |
+
'''
|
| 332 |
+
if not hasattr(torch, backend_name):
|
| 333 |
+
raise RuntimeError(f'The {backend_name.upper()} device module is not registered. '
|
| 334 |
+
'If you are running on a CPU-only machine, '
|
| 335 |
+
'please use torch.load with map_location=torch.device(\'cpu\') '
|
| 336 |
+
'to map your storages to the CPU.')
|
| 337 |
+
device_module = getattr(torch, backend_name)
|
| 338 |
+
if hasattr(device_module, '_utils') and hasattr(device_module._utils, '_get_device_index'):
|
| 339 |
+
device_index = device_module._utils._get_device_index(location, True)
|
| 340 |
+
else:
|
| 341 |
+
device = torch.device(location)
|
| 342 |
+
device_index = device.index if device.index else 0
|
| 343 |
+
if hasattr(device_module, 'is_available') and not device_module.is_available():
|
| 344 |
+
raise RuntimeError(f'Attempting to deserialize object on a {backend_name.upper()} '
|
| 345 |
+
f'device but torch.{backend_name}.is_available() is False. '
|
| 346 |
+
'If you are running on a CPU-only machine, '
|
| 347 |
+
'please use torch.load with map_location=torch.device(\'cpu\') '
|
| 348 |
+
'to map your storages to the CPU.')
|
| 349 |
+
if hasattr(device_module, 'device_count'):
|
| 350 |
+
device_count = device_module.device_count()
|
| 351 |
+
if device_index >= device_count:
|
| 352 |
+
raise RuntimeError(f'Attempting to deserialize object on {backend_name.upper()} device '
|
| 353 |
+
f'{device_index} but torch.{backend_name}.device_count() is {device_count}. '
|
| 354 |
+
'Please use torch.load with map_location to map your storages '
|
| 355 |
+
'to an existing device.')
|
| 356 |
+
return device_index
|
| 357 |
+
|
| 358 |
+
|
| 359 |
+
def _privateuse1_deserialize(obj, location):
|
| 360 |
+
backend_name = torch._C._get_privateuse1_backend_name()
|
| 361 |
+
if location.startswith(backend_name):
|
| 362 |
+
if not hasattr(obj, backend_name):
|
| 363 |
+
raise RuntimeError(f'Attempting to load the storages to the {backend_name.upper()} device '
|
| 364 |
+
f'but torch.storage._StorageBase.{backend_name}() or '
|
| 365 |
+
f'torch.storage.TypedStorage.{backend_name}() is not generated. '
|
| 366 |
+
'Please use torch.utils.generate_methods_for_privateuse1_backend '
|
| 367 |
+
f'to generate storage.{backend_name}() method first.')
|
| 368 |
+
device_index = _validate_privateuse1_device(location, backend_name)
|
| 369 |
+
return getattr(obj, backend_name)(device_index)
|
| 370 |
+
|
| 371 |
+
|
| 372 |
+
register_package(10, _cpu_tag, _cpu_deserialize)
|
| 373 |
+
register_package(20, _cuda_tag, _cuda_deserialize)
|
| 374 |
+
register_package(21, _mps_tag, _mps_deserialize)
|
| 375 |
+
register_package(22, _meta_tag, _meta_deserialize)
|
| 376 |
+
register_package(23, _privateuse1_tag, _privateuse1_deserialize)
|
| 377 |
+
register_package(24, _hpu_tag, _hpu_deserialize)
|
| 378 |
+
|
| 379 |
+
|
| 380 |
+
def location_tag(storage: Union[Storage, torch.storage.TypedStorage, torch.UntypedStorage]):
|
| 381 |
+
for _, tagger, _ in _package_registry:
|
| 382 |
+
location = tagger(storage)
|
| 383 |
+
if location:
|
| 384 |
+
return location
|
| 385 |
+
raise RuntimeError("don't know how to determine data location of "
|
| 386 |
+
+ torch.typename(storage))
|
| 387 |
+
|
| 388 |
+
|
| 389 |
+
def default_restore_location(storage, location):
|
| 390 |
+
for _, _, fn in _package_registry:
|
| 391 |
+
result = fn(storage, location)
|
| 392 |
+
if result is not None:
|
| 393 |
+
return result
|
| 394 |
+
raise RuntimeError("don't know how to restore data location of "
|
| 395 |
+
+ torch.typename(storage) + " (tagged with "
|
| 396 |
+
+ location + ")")
|
| 397 |
+
|
| 398 |
+
|
| 399 |
+
def normalize_storage_type(storage_type):
|
| 400 |
+
return getattr(torch, storage_type.__name__)
|
| 401 |
+
|
| 402 |
+
|
| 403 |
+
def storage_to_tensor_type(storage):
|
| 404 |
+
storage_type = type(storage)
|
| 405 |
+
module = _import_dotted_name(storage_type.__module__)
|
| 406 |
+
return getattr(module, storage_type.__name__.replace('Storage', 'Tensor'))
|
| 407 |
+
|
| 408 |
+
|
| 409 |
+
def _is_path(name_or_buffer):
|
| 410 |
+
return isinstance(name_or_buffer, (str, pathlib.Path))
|
| 411 |
+
|
| 412 |
+
|
| 413 |
+
class _opener:
|
| 414 |
+
def __init__(self, file_like):
|
| 415 |
+
self.file_like = file_like
|
| 416 |
+
|
| 417 |
+
def __enter__(self):
|
| 418 |
+
return self.file_like
|
| 419 |
+
|
| 420 |
+
def __exit__(self, *args):
|
| 421 |
+
pass
|
| 422 |
+
|
| 423 |
+
|
| 424 |
+
class _open_file(_opener):
|
| 425 |
+
def __init__(self, name, mode):
|
| 426 |
+
super().__init__(open(name, mode))
|
| 427 |
+
|
| 428 |
+
def __exit__(self, *args):
|
| 429 |
+
self.file_like.close()
|
| 430 |
+
|
| 431 |
+
|
| 432 |
+
class _open_buffer_reader(_opener):
|
| 433 |
+
def __init__(self, buffer):
|
| 434 |
+
super().__init__(buffer)
|
| 435 |
+
_check_seekable(buffer)
|
| 436 |
+
|
| 437 |
+
|
| 438 |
+
class _open_buffer_writer(_opener):
|
| 439 |
+
def __exit__(self, *args):
|
| 440 |
+
self.file_like.flush()
|
| 441 |
+
|
| 442 |
+
|
| 443 |
+
def _open_file_like(name_or_buffer, mode):
|
| 444 |
+
if _is_path(name_or_buffer):
|
| 445 |
+
return _open_file(name_or_buffer, mode)
|
| 446 |
+
else:
|
| 447 |
+
if 'w' in mode:
|
| 448 |
+
return _open_buffer_writer(name_or_buffer)
|
| 449 |
+
elif 'r' in mode:
|
| 450 |
+
return _open_buffer_reader(name_or_buffer)
|
| 451 |
+
else:
|
| 452 |
+
raise RuntimeError(f"Expected 'r' or 'w' in mode but got {mode}")
|
| 453 |
+
|
| 454 |
+
|
| 455 |
+
class _open_zipfile_reader(_opener):
|
| 456 |
+
def __init__(self, name_or_buffer) -> None:
|
| 457 |
+
super().__init__(torch._C.PyTorchFileReader(name_or_buffer))
|
| 458 |
+
|
| 459 |
+
|
| 460 |
+
class _open_zipfile_writer_file(_opener):
|
| 461 |
+
def __init__(self, name) -> None:
|
| 462 |
+
self.file_stream = None
|
| 463 |
+
self.name = str(name)
|
| 464 |
+
try:
|
| 465 |
+
self.name.encode('ascii')
|
| 466 |
+
except UnicodeEncodeError:
|
| 467 |
+
# PyTorchFileWriter only supports ascii filename.
|
| 468 |
+
# For filenames with non-ascii characters, we rely on Python
|
| 469 |
+
# for writing out the file.
|
| 470 |
+
self.file_stream = io.FileIO(self.name, mode='w')
|
| 471 |
+
super().__init__(torch._C.PyTorchFileWriter(self.file_stream))
|
| 472 |
+
else:
|
| 473 |
+
super().__init__(torch._C.PyTorchFileWriter(self.name))
|
| 474 |
+
|
| 475 |
+
def __exit__(self, *args) -> None:
|
| 476 |
+
self.file_like.write_end_of_file()
|
| 477 |
+
if self.file_stream is not None:
|
| 478 |
+
self.file_stream.close()
|
| 479 |
+
|
| 480 |
+
|
| 481 |
+
class _open_zipfile_writer_buffer(_opener):
|
| 482 |
+
def __init__(self, buffer) -> None:
|
| 483 |
+
if not callable(getattr(buffer, "write", None)):
|
| 484 |
+
msg = f"Buffer of {str(type(buffer)).strip('<>')} has no callable attribute 'write'"
|
| 485 |
+
if not hasattr(buffer, "write"):
|
| 486 |
+
raise AttributeError(msg)
|
| 487 |
+
raise TypeError(msg)
|
| 488 |
+
self.buffer = buffer
|
| 489 |
+
super().__init__(torch._C.PyTorchFileWriter(buffer))
|
| 490 |
+
|
| 491 |
+
def __exit__(self, *args) -> None:
|
| 492 |
+
self.file_like.write_end_of_file()
|
| 493 |
+
self.buffer.flush()
|
| 494 |
+
|
| 495 |
+
|
| 496 |
+
def _open_zipfile_writer(name_or_buffer):
|
| 497 |
+
container: Type[_opener]
|
| 498 |
+
if _is_path(name_or_buffer):
|
| 499 |
+
container = _open_zipfile_writer_file
|
| 500 |
+
else:
|
| 501 |
+
container = _open_zipfile_writer_buffer
|
| 502 |
+
return container(name_or_buffer)
|
| 503 |
+
|
| 504 |
+
|
| 505 |
+
def _is_compressed_file(f) -> bool:
|
| 506 |
+
compress_modules = ['gzip']
|
| 507 |
+
try:
|
| 508 |
+
return f.__module__ in compress_modules
|
| 509 |
+
except AttributeError:
|
| 510 |
+
return False
|
| 511 |
+
|
| 512 |
+
|
| 513 |
+
def _should_read_directly(f):
|
| 514 |
+
"""
|
| 515 |
+
Checks if f is a file that should be read directly. It should be read
|
| 516 |
+
directly if it is backed by a real file (has a fileno) and is not a
|
| 517 |
+
a compressed file (e.g. gzip)
|
| 518 |
+
"""
|
| 519 |
+
if _is_compressed_file(f):
|
| 520 |
+
return False
|
| 521 |
+
try:
|
| 522 |
+
return f.fileno() >= 0
|
| 523 |
+
except io.UnsupportedOperation:
|
| 524 |
+
return False
|
| 525 |
+
except AttributeError:
|
| 526 |
+
return False
|
| 527 |
+
|
| 528 |
+
|
| 529 |
+
def _check_seekable(f) -> bool:
|
| 530 |
+
|
| 531 |
+
def raise_err_msg(patterns, e):
|
| 532 |
+
for p in patterns:
|
| 533 |
+
if p in str(e):
|
| 534 |
+
msg = (str(e) + ". You can only torch.load from a file that is seekable."
|
| 535 |
+
+ " Please pre-load the data into a buffer like io.BytesIO and"
|
| 536 |
+
+ " try to load from it instead.")
|
| 537 |
+
raise type(e)(msg)
|
| 538 |
+
raise e
|
| 539 |
+
|
| 540 |
+
try:
|
| 541 |
+
f.seek(f.tell())
|
| 542 |
+
return True
|
| 543 |
+
except (io.UnsupportedOperation, AttributeError) as e:
|
| 544 |
+
raise_err_msg(["seek", "tell"], e)
|
| 545 |
+
return False
|
| 546 |
+
|
| 547 |
+
|
| 548 |
+
def _check_dill_version(pickle_module) -> None:
|
| 549 |
+
'''Checks if using dill as the pickle module, and if so, checks if it is the correct version.
|
| 550 |
+
If dill version is lower than 0.3.1, a ValueError is raised.
|
| 551 |
+
|
| 552 |
+
Args:
|
| 553 |
+
pickle_module: module used for pickling metadata and objects
|
| 554 |
+
|
| 555 |
+
'''
|
| 556 |
+
if pickle_module is not None and pickle_module.__name__ == 'dill':
|
| 557 |
+
required_dill_version = (0, 3, 1)
|
| 558 |
+
if not check_module_version_greater_or_equal(pickle_module, required_dill_version, False):
|
| 559 |
+
raise ValueError((
|
| 560 |
+
"'torch' supports dill >= {}, but you have dill {}."
|
| 561 |
+
" Please upgrade dill or switch to 'pickle'"
|
| 562 |
+
).format(
|
| 563 |
+
'.'.join([str(num) for num in required_dill_version]),
|
| 564 |
+
pickle_module.__version__
|
| 565 |
+
))
|
| 566 |
+
|
| 567 |
+
|
| 568 |
+
def _check_save_filelike(f):
|
| 569 |
+
if not isinstance(f, (str, os.PathLike)) and not hasattr(f, 'write'):
|
| 570 |
+
raise AttributeError(
|
| 571 |
+
"expected 'f' to be string, path, or a file-like object with "
|
| 572 |
+
"a 'write' attribute")
|
| 573 |
+
|
| 574 |
+
|
| 575 |
+
def save(
|
| 576 |
+
obj: object,
|
| 577 |
+
f: FILE_LIKE,
|
| 578 |
+
pickle_module: Any = pickle,
|
| 579 |
+
pickle_protocol: int = DEFAULT_PROTOCOL,
|
| 580 |
+
_use_new_zipfile_serialization: bool = True,
|
| 581 |
+
_disable_byteorder_record: bool = False
|
| 582 |
+
) -> None:
|
| 583 |
+
# Reference: https://github.com/pytorch/pytorch/issues/54354
|
| 584 |
+
# The first line of this docstring overrides the one Sphinx generates for the
|
| 585 |
+
# documentation. We need it so that Sphinx doesn't leak `pickle`s path from
|
| 586 |
+
# the build environment (e.g. `<module 'pickle' from '/leaked/path').
|
| 587 |
+
|
| 588 |
+
"""save(obj, f, pickle_module=pickle, pickle_protocol=DEFAULT_PROTOCOL, _use_new_zipfile_serialization=True)
|
| 589 |
+
|
| 590 |
+
Saves an object to a disk file.
|
| 591 |
+
|
| 592 |
+
See also: :ref:`saving-loading-tensors`
|
| 593 |
+
|
| 594 |
+
Args:
|
| 595 |
+
obj: saved object
|
| 596 |
+
f: a file-like object (has to implement write and flush) or a string or
|
| 597 |
+
os.PathLike object containing a file name
|
| 598 |
+
pickle_module: module used for pickling metadata and objects
|
| 599 |
+
pickle_protocol: can be specified to override the default protocol
|
| 600 |
+
|
| 601 |
+
.. note::
|
| 602 |
+
A common PyTorch convention is to save tensors using .pt file extension.
|
| 603 |
+
|
| 604 |
+
.. note::
|
| 605 |
+
PyTorch preserves storage sharing across serialization. See
|
| 606 |
+
:ref:`preserve-storage-sharing` for more details.
|
| 607 |
+
|
| 608 |
+
.. note::
|
| 609 |
+
The 1.6 release of PyTorch switched ``torch.save`` to use a new
|
| 610 |
+
zipfile-based file format. ``torch.load`` still retains the ability to
|
| 611 |
+
load files in the old format. If for any reason you want ``torch.save``
|
| 612 |
+
to use the old format, pass the kwarg ``_use_new_zipfile_serialization=False``.
|
| 613 |
+
|
| 614 |
+
Example:
|
| 615 |
+
>>> # xdoctest: +SKIP("makes cwd dirty")
|
| 616 |
+
>>> # Save to file
|
| 617 |
+
>>> x = torch.tensor([0, 1, 2, 3, 4])
|
| 618 |
+
>>> torch.save(x, 'tensor.pt')
|
| 619 |
+
>>> # Save to io.BytesIO buffer
|
| 620 |
+
>>> buffer = io.BytesIO()
|
| 621 |
+
>>> torch.save(x, buffer)
|
| 622 |
+
"""
|
| 623 |
+
torch._C._log_api_usage_once("torch.save")
|
| 624 |
+
_check_dill_version(pickle_module)
|
| 625 |
+
_check_save_filelike(f)
|
| 626 |
+
|
| 627 |
+
if _use_new_zipfile_serialization:
|
| 628 |
+
with _open_zipfile_writer(f) as opened_zipfile:
|
| 629 |
+
_save(obj, opened_zipfile, pickle_module, pickle_protocol, _disable_byteorder_record)
|
| 630 |
+
return
|
| 631 |
+
else:
|
| 632 |
+
with _open_file_like(f, 'wb') as opened_file:
|
| 633 |
+
_legacy_save(obj, opened_file, pickle_module, pickle_protocol)
|
| 634 |
+
|
| 635 |
+
|
| 636 |
+
def _legacy_save(obj, f, pickle_module, pickle_protocol) -> None:
|
| 637 |
+
import torch.nn as nn
|
| 638 |
+
serialized_container_types = {}
|
| 639 |
+
serialized_storages = {}
|
| 640 |
+
|
| 641 |
+
# Since loading storages that view the same data with different dtypes is
|
| 642 |
+
# not supported, we need to keep track of the dtype associated with each
|
| 643 |
+
# storage data_ptr and throw an error if the dtype is ever different.
|
| 644 |
+
# TODO: This feature could be added in the future
|
| 645 |
+
storage_dtypes: Dict[int, torch.dtype] = {}
|
| 646 |
+
|
| 647 |
+
def persistent_id(obj: Any) -> Optional[Tuple]:
|
| 648 |
+
# FIXME: the docs say that persistent_id should only return a string
|
| 649 |
+
# but torch store returns tuples. This works only in the binary protocol
|
| 650 |
+
# see
|
| 651 |
+
# https://docs.python.org/2/library/pickle.html#pickling-and-unpickling-external-objects
|
| 652 |
+
# https://github.com/python/cpython/blob/master/Lib/pickle.py#L527-L537
|
| 653 |
+
if isinstance(obj, type) and issubclass(obj, nn.Module):
|
| 654 |
+
if obj in serialized_container_types:
|
| 655 |
+
return None
|
| 656 |
+
serialized_container_types[obj] = True
|
| 657 |
+
source_file = source = None
|
| 658 |
+
try:
|
| 659 |
+
source_lines, _, source_file = get_source_lines_and_file(obj)
|
| 660 |
+
source = ''.join(source_lines)
|
| 661 |
+
except Exception: # saving the source is optional, so we can ignore any errors
|
| 662 |
+
warnings.warn("Couldn't retrieve source code for container of "
|
| 663 |
+
"type " + obj.__name__ + ". It won't be checked "
|
| 664 |
+
"for correctness upon loading.")
|
| 665 |
+
return ('module', obj, source_file, source)
|
| 666 |
+
|
| 667 |
+
if isinstance(obj, torch.storage.TypedStorage) or torch.is_storage(obj):
|
| 668 |
+
storage: torch.UntypedStorage
|
| 669 |
+
|
| 670 |
+
if isinstance(obj, torch.storage.TypedStorage):
|
| 671 |
+
# TODO: Once we decide to break serialization FC, this case
|
| 672 |
+
# can be deleted
|
| 673 |
+
storage = obj._untyped_storage
|
| 674 |
+
storage_dtype = obj.dtype
|
| 675 |
+
storage_type_str = obj._pickle_storage_type()
|
| 676 |
+
storage_type = getattr(torch, storage_type_str)
|
| 677 |
+
dtype = obj.dtype
|
| 678 |
+
storage_numel = obj._size()
|
| 679 |
+
|
| 680 |
+
elif isinstance(obj, torch.UntypedStorage):
|
| 681 |
+
storage = obj
|
| 682 |
+
storage_dtype = torch.uint8
|
| 683 |
+
storage_type = normalize_storage_type(type(obj))
|
| 684 |
+
dtype = torch.uint8
|
| 685 |
+
storage_numel = storage.nbytes()
|
| 686 |
+
else:
|
| 687 |
+
raise TypeError(f'type not recognized: {type(obj)}')
|
| 688 |
+
|
| 689 |
+
# If storage is allocated, ensure that any other saved storages
|
| 690 |
+
# pointing to the same data all have the same dtype. If storage is
|
| 691 |
+
# not allocated, don't perform this check
|
| 692 |
+
if storage.data_ptr() != 0:
|
| 693 |
+
if storage.data_ptr() in storage_dtypes:
|
| 694 |
+
if storage_dtype != storage_dtypes[storage.data_ptr()]:
|
| 695 |
+
raise RuntimeError(
|
| 696 |
+
'Cannot save multiple tensors or storages that '
|
| 697 |
+
'view the same data as different types')
|
| 698 |
+
else:
|
| 699 |
+
storage_dtypes[storage.data_ptr()] = storage_dtype
|
| 700 |
+
|
| 701 |
+
view_metadata: Optional[Tuple[str, int, int]]
|
| 702 |
+
|
| 703 |
+
# Offset is always 0, but we keep it for backwards compatibility
|
| 704 |
+
# with the old serialization format (which supported storage views)
|
| 705 |
+
offset = 0
|
| 706 |
+
storage_key = str(storage._cdata)
|
| 707 |
+
location = location_tag(storage)
|
| 708 |
+
|
| 709 |
+
# TODO: There's an issue here with FC. It might be impossible to
|
| 710 |
+
# solve, but it's worth noting. Imagine we save a list `[storage,
|
| 711 |
+
# tensor]`, where `tensor.storage()` is the same as `storage`, and
|
| 712 |
+
# `tensor.element_size() > 1`. Let's say that `tensor.dtype ==
|
| 713 |
+
# torch.float`. The storage will be serialized with element size
|
| 714 |
+
# of 1, since we're choosing to serialize the first occurance of
|
| 715 |
+
# a duplicate storage. Since this legacy serialization format saves
|
| 716 |
+
# the numel of the storage, rather than nbytes directly, we'll be
|
| 717 |
+
# effectively saving nbytes in this case. We'll be able to load it
|
| 718 |
+
# and the tensor back up with no problems in _this_ and future
|
| 719 |
+
# versions of pytorch, but in older versions, here's the problem:
|
| 720 |
+
# the storage will be loaded up as a UntypedStorage, and then the
|
| 721 |
+
# FloatTensor will loaded and the UntypedStorage will be assigned to
|
| 722 |
+
# it. Since the storage dtype does not match the tensor dtype, this
|
| 723 |
+
# will cause an error. If we reverse the list, like `[tensor,
|
| 724 |
+
# storage]`, then we will save the `tensor.storage()` as a faked
|
| 725 |
+
# `FloatStorage`, and the saved size will be the correct
|
| 726 |
+
# dtype-specific numel count that old versions expect. `tensor`
|
| 727 |
+
# will be able to load up properly in old versions, pointing to
|
| 728 |
+
# a FloatStorage. However, `storage` is still being translated to
|
| 729 |
+
# a UntypedStorage, and it will try to resolve to the same
|
| 730 |
+
# FloatStorage that `tensor` contains. This will also cause an
|
| 731 |
+
# error. It doesn't seem like there's any way around this.
|
| 732 |
+
# Probably, we just cannot maintain FC for the legacy format if the
|
| 733 |
+
# saved list contains both a tensor and a storage that point to the
|
| 734 |
+
# same data. We should still be able to maintain FC for lists of
|
| 735 |
+
# just tensors, as long as all views share the same dtype as the
|
| 736 |
+
# tensor they are viewing.
|
| 737 |
+
|
| 738 |
+
if storage_key not in serialized_storages:
|
| 739 |
+
serialized_storages[storage_key] = (storage, dtype)
|
| 740 |
+
is_view = storage._cdata != storage._cdata
|
| 741 |
+
if is_view:
|
| 742 |
+
view_metadata = (str(storage._cdata), offset, storage.nbytes())
|
| 743 |
+
else:
|
| 744 |
+
view_metadata = None
|
| 745 |
+
|
| 746 |
+
res = ('storage',
|
| 747 |
+
storage_type,
|
| 748 |
+
storage_key,
|
| 749 |
+
location,
|
| 750 |
+
storage_numel,
|
| 751 |
+
view_metadata)
|
| 752 |
+
return res
|
| 753 |
+
return None
|
| 754 |
+
|
| 755 |
+
sys_info = dict(
|
| 756 |
+
protocol_version=PROTOCOL_VERSION,
|
| 757 |
+
little_endian=sys.byteorder == 'little',
|
| 758 |
+
type_sizes=dict(
|
| 759 |
+
short=SHORT_SIZE,
|
| 760 |
+
int=INT_SIZE,
|
| 761 |
+
long=LONG_SIZE,
|
| 762 |
+
),
|
| 763 |
+
)
|
| 764 |
+
|
| 765 |
+
pickle_module.dump(MAGIC_NUMBER, f, protocol=pickle_protocol)
|
| 766 |
+
pickle_module.dump(PROTOCOL_VERSION, f, protocol=pickle_protocol)
|
| 767 |
+
pickle_module.dump(sys_info, f, protocol=pickle_protocol)
|
| 768 |
+
pickler = pickle_module.Pickler(f, protocol=pickle_protocol)
|
| 769 |
+
pickler.persistent_id = persistent_id
|
| 770 |
+
pickler.dump(obj)
|
| 771 |
+
|
| 772 |
+
serialized_storage_keys = sorted(serialized_storages.keys())
|
| 773 |
+
pickle_module.dump(serialized_storage_keys, f, protocol=pickle_protocol)
|
| 774 |
+
f.flush()
|
| 775 |
+
for key in serialized_storage_keys:
|
| 776 |
+
storage, dtype = serialized_storages[key]
|
| 777 |
+
storage._write_file(f, _should_read_directly(f), True, torch._utils._element_size(dtype))
|
| 778 |
+
|
| 779 |
+
|
| 780 |
+
def _save(obj, zip_file, pickle_module, pickle_protocol, _disable_byteorder_record):
|
| 781 |
+
serialized_storages = {}
|
| 782 |
+
id_map: Dict[int, str] = {}
|
| 783 |
+
|
| 784 |
+
# Since loading storages that view the same data with different dtypes is
|
| 785 |
+
# not supported, we need to keep track of the dtype associated with each
|
| 786 |
+
# storage data_ptr and throw an error if the dtype is ever different.
|
| 787 |
+
# TODO: This feature could be added in the future
|
| 788 |
+
storage_dtypes: Dict[int, torch.dtype] = {}
|
| 789 |
+
|
| 790 |
+
def persistent_id(obj):
|
| 791 |
+
# FIXME: the docs say that persistent_id should only return a string
|
| 792 |
+
# but torch store returns tuples. This works only in the binary protocol
|
| 793 |
+
# see
|
| 794 |
+
# https://docs.python.org/2/library/pickle.html#pickling-and-unpickling-external-objects
|
| 795 |
+
# https://github.com/python/cpython/blob/master/Lib/pickle.py#L527-L537
|
| 796 |
+
if isinstance(obj, torch.storage.TypedStorage) or torch.is_storage(obj):
|
| 797 |
+
|
| 798 |
+
if isinstance(obj, torch.storage.TypedStorage):
|
| 799 |
+
# TODO: Once we decide to break serialization FC, this case
|
| 800 |
+
# can be deleted
|
| 801 |
+
storage = obj._untyped_storage
|
| 802 |
+
storage_dtype = obj.dtype
|
| 803 |
+
storage_type_str = obj._pickle_storage_type()
|
| 804 |
+
storage_type = getattr(torch, storage_type_str)
|
| 805 |
+
storage_numel = obj._size()
|
| 806 |
+
|
| 807 |
+
else:
|
| 808 |
+
storage = obj
|
| 809 |
+
storage_dtype = torch.uint8
|
| 810 |
+
storage_type = normalize_storage_type(type(obj))
|
| 811 |
+
storage_numel = storage.nbytes()
|
| 812 |
+
|
| 813 |
+
# If storage is allocated, ensure that any other saved storages
|
| 814 |
+
# pointing to the same data all have the same dtype. If storage is
|
| 815 |
+
# not allocated, don't perform this check
|
| 816 |
+
if storage.data_ptr() != 0:
|
| 817 |
+
if storage.data_ptr() in storage_dtypes:
|
| 818 |
+
if storage_dtype != storage_dtypes[storage.data_ptr()]:
|
| 819 |
+
raise RuntimeError(
|
| 820 |
+
'Cannot save multiple tensors or storages that '
|
| 821 |
+
'view the same data as different types')
|
| 822 |
+
else:
|
| 823 |
+
storage_dtypes[storage.data_ptr()] = storage_dtype
|
| 824 |
+
|
| 825 |
+
storage_key = id_map.setdefault(storage._cdata, str(len(id_map)))
|
| 826 |
+
location = location_tag(storage)
|
| 827 |
+
serialized_storages[storage_key] = storage
|
| 828 |
+
|
| 829 |
+
return ('storage',
|
| 830 |
+
storage_type,
|
| 831 |
+
storage_key,
|
| 832 |
+
location,
|
| 833 |
+
storage_numel)
|
| 834 |
+
|
| 835 |
+
return None
|
| 836 |
+
|
| 837 |
+
# Write the pickle data for `obj`
|
| 838 |
+
data_buf = io.BytesIO()
|
| 839 |
+
pickler = pickle_module.Pickler(data_buf, protocol=pickle_protocol)
|
| 840 |
+
pickler.persistent_id = persistent_id
|
| 841 |
+
pickler.dump(obj)
|
| 842 |
+
data_value = data_buf.getvalue()
|
| 843 |
+
zip_file.write_record('data.pkl', data_value, len(data_value))
|
| 844 |
+
|
| 845 |
+
# Write byte order marker
|
| 846 |
+
if not _disable_byteorder_record:
|
| 847 |
+
if sys.byteorder not in ['little', 'big']:
|
| 848 |
+
raise ValueError('Unknown endianness type: ' + sys.byteorder)
|
| 849 |
+
|
| 850 |
+
zip_file.write_record('byteorder', sys.byteorder, len(sys.byteorder))
|
| 851 |
+
|
| 852 |
+
# Write each tensor to a file named tensor/the_tensor_key in the zip archive
|
| 853 |
+
for key in sorted(serialized_storages.keys()):
|
| 854 |
+
name = f'data/{key}'
|
| 855 |
+
storage = serialized_storages[key]
|
| 856 |
+
# given that we copy things around anyway, we might use storage.cpu()
|
| 857 |
+
# this means to that to get tensors serialized, you need to implement
|
| 858 |
+
# .cpu() on the underlying Storage
|
| 859 |
+
if storage.device.type != 'cpu':
|
| 860 |
+
storage = storage.cpu()
|
| 861 |
+
# Now that it is on the CPU we can directly copy it into the zip file
|
| 862 |
+
num_bytes = storage.nbytes()
|
| 863 |
+
zip_file.write_record(name, storage.data_ptr(), num_bytes)
|
| 864 |
+
|
| 865 |
+
|
| 866 |
+
def load(
|
| 867 |
+
f: FILE_LIKE,
|
| 868 |
+
map_location: MAP_LOCATION = None,
|
| 869 |
+
pickle_module: Any = None,
|
| 870 |
+
*,
|
| 871 |
+
weights_only: bool = False,
|
| 872 |
+
mmap: Optional[bool] = None,
|
| 873 |
+
**pickle_load_args: Any
|
| 874 |
+
) -> Any:
|
| 875 |
+
# Reference: https://github.com/pytorch/pytorch/issues/54354
|
| 876 |
+
# The first line of this docstring overrides the one Sphinx generates for the
|
| 877 |
+
# documentation. We need it so that Sphinx doesn't leak `pickle`s path from
|
| 878 |
+
# the build environment (e.g. `<module 'pickle' from '/leaked/path').
|
| 879 |
+
|
| 880 |
+
"""load(f, map_location=None, pickle_module=pickle, *, weights_only=False, mmap=None, **pickle_load_args)
|
| 881 |
+
|
| 882 |
+
Loads an object saved with :func:`torch.save` from a file.
|
| 883 |
+
|
| 884 |
+
:func:`torch.load` uses Python's unpickling facilities but treats storages,
|
| 885 |
+
which underlie tensors, specially. They are first deserialized on the
|
| 886 |
+
CPU and are then moved to the device they were saved from. If this fails
|
| 887 |
+
(e.g. because the run time system doesn't have certain devices), an exception
|
| 888 |
+
is raised. However, storages can be dynamically remapped to an alternative
|
| 889 |
+
set of devices using the :attr:`map_location` argument.
|
| 890 |
+
|
| 891 |
+
If :attr:`map_location` is a callable, it will be called once for each serialized
|
| 892 |
+
storage with two arguments: storage and location. The storage argument
|
| 893 |
+
will be the initial deserialization of the storage, residing on the CPU.
|
| 894 |
+
Each serialized storage has a location tag associated with it which
|
| 895 |
+
identifies the device it was saved from, and this tag is the second
|
| 896 |
+
argument passed to :attr:`map_location`. The builtin location tags are ``'cpu'``
|
| 897 |
+
for CPU tensors and ``'cuda:device_id'`` (e.g. ``'cuda:2'``) for CUDA tensors.
|
| 898 |
+
:attr:`map_location` should return either ``None`` or a storage. If
|
| 899 |
+
:attr:`map_location` returns a storage, it will be used as the final deserialized
|
| 900 |
+
object, already moved to the right device. Otherwise, :func:`torch.load` will
|
| 901 |
+
fall back to the default behavior, as if :attr:`map_location` wasn't specified.
|
| 902 |
+
|
| 903 |
+
If :attr:`map_location` is a :class:`torch.device` object or a string containing
|
| 904 |
+
a device tag, it indicates the location where all tensors should be loaded.
|
| 905 |
+
|
| 906 |
+
Otherwise, if :attr:`map_location` is a dict, it will be used to remap location tags
|
| 907 |
+
appearing in the file (keys), to ones that specify where to put the
|
| 908 |
+
storages (values).
|
| 909 |
+
|
| 910 |
+
User extensions can register their own location tags and tagging and
|
| 911 |
+
deserialization methods using :func:`torch.serialization.register_package`.
|
| 912 |
+
|
| 913 |
+
Args:
|
| 914 |
+
f: a file-like object (has to implement :meth:`read`, :meth:`readline`, :meth:`tell`, and :meth:`seek`),
|
| 915 |
+
or a string or os.PathLike object containing a file name
|
| 916 |
+
map_location: a function, :class:`torch.device`, string or a dict specifying how to remap storage
|
| 917 |
+
locations
|
| 918 |
+
pickle_module: module used for unpickling metadata and objects (has to
|
| 919 |
+
match the :attr:`pickle_module` used to serialize file)
|
| 920 |
+
weights_only: Indicates whether unpickler should be restricted to
|
| 921 |
+
loading only tensors, primitive types and dictionaries
|
| 922 |
+
mmap: Indicates whether the file should be mmaped rather than loading all the storages into memory.
|
| 923 |
+
Typically, tensor storages in the file will first be moved from disk to CPU memory, after which they
|
| 924 |
+
are moved to the location that they were tagged with when saving, or specified by ``map_location``. This
|
| 925 |
+
second step is a no-op if the final location is CPU. When the ``mmap`` flag is set, instead of copying the
|
| 926 |
+
tensor storages from disk to CPU memory in the first step, ``f`` is mmaped.
|
| 927 |
+
pickle_load_args: (Python 3 only) optional keyword arguments passed over to
|
| 928 |
+
:func:`pickle_module.load` and :func:`pickle_module.Unpickler`, e.g.,
|
| 929 |
+
:attr:`errors=...`.
|
| 930 |
+
|
| 931 |
+
.. warning::
|
| 932 |
+
:func:`torch.load()` unless `weights_only` parameter is set to `True`,
|
| 933 |
+
uses ``pickle`` module implicitly, which is known to be insecure.
|
| 934 |
+
It is possible to construct malicious pickle data which will execute arbitrary code
|
| 935 |
+
during unpickling. Never load data that could have come from an untrusted
|
| 936 |
+
source in an unsafe mode, or that could have been tampered with. **Only load data you trust**.
|
| 937 |
+
|
| 938 |
+
.. note::
|
| 939 |
+
When you call :func:`torch.load()` on a file which contains GPU tensors, those tensors
|
| 940 |
+
will be loaded to GPU by default. You can call ``torch.load(.., map_location='cpu')``
|
| 941 |
+
and then :meth:`load_state_dict` to avoid GPU RAM surge when loading a model checkpoint.
|
| 942 |
+
|
| 943 |
+
.. note::
|
| 944 |
+
By default, we decode byte strings as ``utf-8``. This is to avoid a common error
|
| 945 |
+
case ``UnicodeDecodeError: 'ascii' codec can't decode byte 0x...``
|
| 946 |
+
when loading files saved by Python 2 in Python 3. If this default
|
| 947 |
+
is incorrect, you may use an extra :attr:`encoding` keyword argument to specify how
|
| 948 |
+
these objects should be loaded, e.g., :attr:`encoding='latin1'` decodes them
|
| 949 |
+
to strings using ``latin1`` encoding, and :attr:`encoding='bytes'` keeps them
|
| 950 |
+
as byte arrays which can be decoded later with ``byte_array.decode(...)``.
|
| 951 |
+
|
| 952 |
+
Example:
|
| 953 |
+
>>> # xdoctest: +SKIP("undefined filepaths")
|
| 954 |
+
>>> torch.load('tensors.pt', weights_only=True)
|
| 955 |
+
# Load all tensors onto the CPU
|
| 956 |
+
>>> torch.load('tensors.pt', map_location=torch.device('cpu'), weights_only=True)
|
| 957 |
+
# Load all tensors onto the CPU, using a function
|
| 958 |
+
>>> torch.load('tensors.pt', map_location=lambda storage, loc: storage, weights_only=True)
|
| 959 |
+
# Load all tensors onto GPU 1
|
| 960 |
+
>>> torch.load('tensors.pt', map_location=lambda storage, loc: storage.cuda(1), weights_only=True)
|
| 961 |
+
# Map tensors from GPU 1 to GPU 0
|
| 962 |
+
>>> torch.load('tensors.pt', map_location={'cuda:1': 'cuda:0'}, weights_only=True)
|
| 963 |
+
# Load tensor from io.BytesIO object
|
| 964 |
+
# Loading from a buffer setting weights_only=False, warning this can be unsafe
|
| 965 |
+
>>> with open('tensor.pt', 'rb') as f:
|
| 966 |
+
... buffer = io.BytesIO(f.read())
|
| 967 |
+
>>> torch.load(buffer, weights_only=False)
|
| 968 |
+
# Load a module with 'ascii' encoding for unpickling
|
| 969 |
+
# Loading from a module setting weights_only=False, warning this can be unsafe
|
| 970 |
+
>>> torch.load('module.pt', encoding='ascii', weights_only=False)
|
| 971 |
+
"""
|
| 972 |
+
torch._C._log_api_usage_once("torch.load")
|
| 973 |
+
UNSAFE_MESSAGE = (
|
| 974 |
+
"Weights only load failed. Re-running `torch.load` with `weights_only` set to `False`"
|
| 975 |
+
" will likely succeed, but it can result in arbitrary code execution."
|
| 976 |
+
"Do it only if you get the file from a trusted source. WeightsUnpickler error: "
|
| 977 |
+
)
|
| 978 |
+
# Add ability to force safe only weight loads via environment variable
|
| 979 |
+
if os.getenv("TORCH_FORCE_WEIGHTS_ONLY_LOAD", "0").lower() in ['1', 'y', 'yes', 'true']:
|
| 980 |
+
weights_only = True
|
| 981 |
+
|
| 982 |
+
if weights_only:
|
| 983 |
+
if pickle_module is not None:
|
| 984 |
+
raise RuntimeError("Can not safely load weights when explicit pickle_module is specified")
|
| 985 |
+
else:
|
| 986 |
+
if pickle_module is None:
|
| 987 |
+
pickle_module = pickle
|
| 988 |
+
|
| 989 |
+
# make flipping default BC-compatible
|
| 990 |
+
if mmap is None:
|
| 991 |
+
mmap = False
|
| 992 |
+
|
| 993 |
+
_check_dill_version(pickle_module)
|
| 994 |
+
|
| 995 |
+
if 'encoding' not in pickle_load_args.keys():
|
| 996 |
+
pickle_load_args['encoding'] = 'utf-8'
|
| 997 |
+
|
| 998 |
+
with _open_file_like(f, 'rb') as opened_file:
|
| 999 |
+
if _is_zipfile(opened_file):
|
| 1000 |
+
# The zipfile reader is going to advance the current file position.
|
| 1001 |
+
# If we want to actually tail call to torch.jit.load, we need to
|
| 1002 |
+
# reset back to the original position.
|
| 1003 |
+
orig_position = opened_file.tell()
|
| 1004 |
+
overall_storage = None
|
| 1005 |
+
with _open_zipfile_reader(opened_file) as opened_zipfile:
|
| 1006 |
+
if _is_torchscript_zip(opened_zipfile):
|
| 1007 |
+
warnings.warn("'torch.load' received a zip file that looks like a TorchScript archive"
|
| 1008 |
+
" dispatching to 'torch.jit.load' (call 'torch.jit.load' directly to"
|
| 1009 |
+
" silence this warning)", UserWarning)
|
| 1010 |
+
opened_file.seek(orig_position)
|
| 1011 |
+
return torch.jit.load(opened_file, map_location=map_location)
|
| 1012 |
+
if mmap:
|
| 1013 |
+
if not isinstance(f, str):
|
| 1014 |
+
raise ValueError("f must be a string filename in order to use mmap argument")
|
| 1015 |
+
size = os.path.getsize(f)
|
| 1016 |
+
overall_storage = torch.UntypedStorage.from_file(f, False, size)
|
| 1017 |
+
if weights_only:
|
| 1018 |
+
try:
|
| 1019 |
+
return _load(opened_zipfile,
|
| 1020 |
+
map_location,
|
| 1021 |
+
_weights_only_unpickler,
|
| 1022 |
+
overall_storage=overall_storage,
|
| 1023 |
+
**pickle_load_args)
|
| 1024 |
+
except RuntimeError as e:
|
| 1025 |
+
raise pickle.UnpicklingError(UNSAFE_MESSAGE + str(e)) from None
|
| 1026 |
+
return _load(opened_zipfile,
|
| 1027 |
+
map_location,
|
| 1028 |
+
pickle_module,
|
| 1029 |
+
overall_storage=overall_storage,
|
| 1030 |
+
**pickle_load_args)
|
| 1031 |
+
if mmap:
|
| 1032 |
+
raise RuntimeError("mmap can only be used with files saved with "
|
| 1033 |
+
"`torch.save(_use_new_zipfile_serialization=True), "
|
| 1034 |
+
"please torch.save your checkpoint with this option in order to use mmap.")
|
| 1035 |
+
if weights_only:
|
| 1036 |
+
try:
|
| 1037 |
+
return _legacy_load(opened_file, map_location, _weights_only_unpickler, **pickle_load_args)
|
| 1038 |
+
except RuntimeError as e:
|
| 1039 |
+
raise pickle.UnpicklingError(UNSAFE_MESSAGE + str(e)) from None
|
| 1040 |
+
return _legacy_load(opened_file, map_location, pickle_module, **pickle_load_args)
|
| 1041 |
+
|
| 1042 |
+
|
| 1043 |
+
# Register pickling support for layout instances such as
|
| 1044 |
+
# torch.sparse_coo, etc
|
| 1045 |
+
def _get_layout(name):
|
| 1046 |
+
"""Get layout extension object from its string representation.
|
| 1047 |
+
"""
|
| 1048 |
+
cache = _get_layout.cache # type: ignore[attr-defined]
|
| 1049 |
+
if not cache:
|
| 1050 |
+
for v in torch.__dict__.values():
|
| 1051 |
+
if isinstance(v, torch.layout):
|
| 1052 |
+
cache[str(v)] = v
|
| 1053 |
+
return cache[name]
|
| 1054 |
+
|
| 1055 |
+
# There are yet not good way to type annotate function attributes https://github.com/python/mypy/issues/2087
|
| 1056 |
+
_get_layout.cache = {} # type: ignore[attr-defined]
|
| 1057 |
+
copyreg.pickle(torch.layout, lambda obj: (_get_layout, (str(obj),)))
|
| 1058 |
+
|
| 1059 |
+
|
| 1060 |
+
def _legacy_load(f, map_location, pickle_module, **pickle_load_args):
|
| 1061 |
+
deserialized_objects: Dict[int, Any] = {}
|
| 1062 |
+
|
| 1063 |
+
restore_location = _get_restore_location(map_location)
|
| 1064 |
+
|
| 1065 |
+
class UnpicklerWrapper(pickle_module.Unpickler): # type: ignore[name-defined]
|
| 1066 |
+
|
| 1067 |
+
def find_class(self, mod_name, name):
|
| 1068 |
+
if type(name) is str and 'Storage' in name:
|
| 1069 |
+
try:
|
| 1070 |
+
return StorageType(name)
|
| 1071 |
+
except KeyError:
|
| 1072 |
+
pass
|
| 1073 |
+
return super().find_class(mod_name, name)
|
| 1074 |
+
|
| 1075 |
+
def _check_container_source(container_type, source_file, original_source):
|
| 1076 |
+
try:
|
| 1077 |
+
current_source = ''.join(get_source_lines_and_file(container_type)[0])
|
| 1078 |
+
except Exception: # saving the source is optional, so we can ignore any errors
|
| 1079 |
+
warnings.warn("Couldn't retrieve source code for container of "
|
| 1080 |
+
"type " + container_type.__name__ + ". It won't be checked "
|
| 1081 |
+
"for correctness upon loading.")
|
| 1082 |
+
return
|
| 1083 |
+
if original_source != current_source:
|
| 1084 |
+
if container_type.dump_patches:
|
| 1085 |
+
file_name = container_type.__name__ + '.patch'
|
| 1086 |
+
diff = difflib.unified_diff(current_source.split('\n'),
|
| 1087 |
+
original_source.split('\n'),
|
| 1088 |
+
source_file,
|
| 1089 |
+
source_file, lineterm="")
|
| 1090 |
+
lines = '\n'.join(diff)
|
| 1091 |
+
try:
|
| 1092 |
+
with open(file_name, 'a+') as f:
|
| 1093 |
+
file_size = f.seek(0, 2)
|
| 1094 |
+
f.seek(0)
|
| 1095 |
+
if file_size == 0:
|
| 1096 |
+
f.write(lines)
|
| 1097 |
+
elif file_size != len(lines) or f.read() != lines:
|
| 1098 |
+
raise OSError
|
| 1099 |
+
msg = ("Saved a reverse patch to " + file_name + ". "
|
| 1100 |
+
"Run `patch -p0 < " + file_name + "` to revert your "
|
| 1101 |
+
"changes.")
|
| 1102 |
+
except OSError:
|
| 1103 |
+
msg = ("Tried to save a patch, but couldn't create a "
|
| 1104 |
+
"writable file " + file_name + ". Make sure it "
|
| 1105 |
+
"doesn't exist and your working directory is "
|
| 1106 |
+
"writable.")
|
| 1107 |
+
else:
|
| 1108 |
+
msg = ("you can retrieve the original source code by "
|
| 1109 |
+
"accessing the object's source attribute or set "
|
| 1110 |
+
"`torch.nn.Module.dump_patches = True` and use the "
|
| 1111 |
+
"patch tool to revert the changes.")
|
| 1112 |
+
msg = f"source code of class '{torch.typename(container_type)}' has changed. {msg}"
|
| 1113 |
+
warnings.warn(msg, SourceChangeWarning)
|
| 1114 |
+
|
| 1115 |
+
def legacy_load(f):
|
| 1116 |
+
deserialized_objects: Dict[int, Any] = {}
|
| 1117 |
+
|
| 1118 |
+
def persistent_load(saved_id):
|
| 1119 |
+
if isinstance(saved_id, tuple):
|
| 1120 |
+
# Ignore containers that don't have any sources saved
|
| 1121 |
+
if all(saved_id[1:]):
|
| 1122 |
+
_check_container_source(*saved_id)
|
| 1123 |
+
return saved_id[0]
|
| 1124 |
+
return deserialized_objects[int(saved_id)]
|
| 1125 |
+
|
| 1126 |
+
with closing(tarfile.open(fileobj=f, mode='r:', format=tarfile.PAX_FORMAT)) as tar, \
|
| 1127 |
+
mkdtemp() as tmpdir:
|
| 1128 |
+
|
| 1129 |
+
tar.extract('storages', path=tmpdir)
|
| 1130 |
+
with open(os.path.join(tmpdir, 'storages'), 'rb', 0) as f:
|
| 1131 |
+
num_storages = pickle_module.load(f, **pickle_load_args)
|
| 1132 |
+
for i in range(num_storages):
|
| 1133 |
+
args = pickle_module.load(f, **pickle_load_args)
|
| 1134 |
+
key, location, storage_type = args
|
| 1135 |
+
dtype = storage_type._dtype
|
| 1136 |
+
obj = cast(Storage, torch.UntypedStorage)._new_with_file(f, torch._utils._element_size(dtype))
|
| 1137 |
+
obj = restore_location(obj, location)
|
| 1138 |
+
# TODO: Once we decide to break serialization FC, we can
|
| 1139 |
+
# stop wrapping with TypedStorage
|
| 1140 |
+
deserialized_objects[key] = torch.storage.TypedStorage(
|
| 1141 |
+
wrap_storage=obj,
|
| 1142 |
+
dtype=dtype,
|
| 1143 |
+
_internal=True)
|
| 1144 |
+
|
| 1145 |
+
storage_views = pickle_module.load(f, **pickle_load_args)
|
| 1146 |
+
for target_cdata, root_cdata, offset, numel in storage_views:
|
| 1147 |
+
root = deserialized_objects[root_cdata]
|
| 1148 |
+
element_size = torch._utils._element_size(root.dtype)
|
| 1149 |
+
offset_bytes = offset * element_size
|
| 1150 |
+
# TODO: Once we decide to break serialization FC, we can
|
| 1151 |
+
# stop wrapping with TypedStorage
|
| 1152 |
+
deserialized_objects[target_cdata] = torch.storage.TypedStorage(
|
| 1153 |
+
wrap_storage=root._untyped_storage[offset_bytes:offset_bytes + numel * element_size],
|
| 1154 |
+
dtype=root.dtype,
|
| 1155 |
+
_internal=True)
|
| 1156 |
+
|
| 1157 |
+
tar.extract('tensors', path=tmpdir)
|
| 1158 |
+
with open(os.path.join(tmpdir, 'tensors'), 'rb', 0) as f:
|
| 1159 |
+
num_tensors = pickle_module.load(f, **pickle_load_args)
|
| 1160 |
+
for _ in range(num_tensors):
|
| 1161 |
+
args = pickle_module.load(f, **pickle_load_args)
|
| 1162 |
+
key, storage_id, original_tensor_type = args
|
| 1163 |
+
storage = deserialized_objects[storage_id]
|
| 1164 |
+
ndim, = struct.unpack('<i', f.read(4))
|
| 1165 |
+
# skip next 4 bytes; legacy encoding treated ndim as 8 bytes
|
| 1166 |
+
f.read(4)
|
| 1167 |
+
numel = struct.unpack(f'<{ndim}q', f.read(8 * ndim))
|
| 1168 |
+
stride = struct.unpack(f'<{ndim}q', f.read(8 * ndim))
|
| 1169 |
+
storage_offset, = struct.unpack('<q', f.read(8))
|
| 1170 |
+
tensor = torch.tensor([], dtype=storage.dtype).set_(
|
| 1171 |
+
storage._untyped_storage, storage_offset, numel, stride)
|
| 1172 |
+
deserialized_objects[key] = tensor
|
| 1173 |
+
|
| 1174 |
+
pickle_file = tar.extractfile('pickle')
|
| 1175 |
+
unpickler = UnpicklerWrapper(pickle_file, **pickle_load_args)
|
| 1176 |
+
unpickler.persistent_load = persistent_load
|
| 1177 |
+
result = unpickler.load()
|
| 1178 |
+
return result
|
| 1179 |
+
|
| 1180 |
+
deserialized_objects = {}
|
| 1181 |
+
|
| 1182 |
+
def persistent_load(saved_id):
|
| 1183 |
+
assert isinstance(saved_id, tuple)
|
| 1184 |
+
typename = _maybe_decode_ascii(saved_id[0])
|
| 1185 |
+
data = saved_id[1:]
|
| 1186 |
+
|
| 1187 |
+
if typename == 'module':
|
| 1188 |
+
# Ignore containers that don't have any sources saved
|
| 1189 |
+
if all(data[1:]):
|
| 1190 |
+
_check_container_source(*data)
|
| 1191 |
+
return data[0]
|
| 1192 |
+
elif typename == 'storage':
|
| 1193 |
+
storage_type, root_key, location, numel, view_metadata = data
|
| 1194 |
+
location = _maybe_decode_ascii(location)
|
| 1195 |
+
dtype = storage_type.dtype
|
| 1196 |
+
|
| 1197 |
+
nbytes = numel * torch._utils._element_size(dtype)
|
| 1198 |
+
|
| 1199 |
+
if root_key not in deserialized_objects:
|
| 1200 |
+
obj = cast(Storage, torch.UntypedStorage(nbytes))
|
| 1201 |
+
obj._torch_load_uninitialized = True
|
| 1202 |
+
# TODO: Once we decide to break serialization FC, we can
|
| 1203 |
+
# stop wrapping with TypedStorage
|
| 1204 |
+
typed_storage = torch.storage.TypedStorage(
|
| 1205 |
+
wrap_storage=restore_location(obj, location),
|
| 1206 |
+
dtype=dtype,
|
| 1207 |
+
_internal=True)
|
| 1208 |
+
deserialized_objects[root_key] = typed_storage
|
| 1209 |
+
else:
|
| 1210 |
+
typed_storage = deserialized_objects[root_key]
|
| 1211 |
+
if typed_storage._data_ptr() == 0:
|
| 1212 |
+
typed_storage = torch.storage.TypedStorage(
|
| 1213 |
+
device=typed_storage._untyped_storage.device,
|
| 1214 |
+
dtype=dtype,
|
| 1215 |
+
_internal=True)
|
| 1216 |
+
|
| 1217 |
+
if view_metadata is not None:
|
| 1218 |
+
view_key, offset, view_size = view_metadata
|
| 1219 |
+
offset_bytes = offset * torch._utils._element_size(dtype)
|
| 1220 |
+
view_size_bytes = view_size * torch._utils._element_size(dtype)
|
| 1221 |
+
if view_key not in deserialized_objects:
|
| 1222 |
+
# TODO: Once we decide to break serialization FC, we can
|
| 1223 |
+
# stop wrapping with TypedStorage
|
| 1224 |
+
deserialized_objects[view_key] = torch.storage.TypedStorage(
|
| 1225 |
+
wrap_storage=typed_storage._untyped_storage[offset_bytes:offset_bytes + view_size_bytes],
|
| 1226 |
+
dtype=dtype,
|
| 1227 |
+
_internal=True)
|
| 1228 |
+
res = deserialized_objects[view_key]
|
| 1229 |
+
|
| 1230 |
+
else:
|
| 1231 |
+
res = typed_storage
|
| 1232 |
+
return res
|
| 1233 |
+
else:
|
| 1234 |
+
raise RuntimeError(f"Unknown saved id type: {saved_id[0]}")
|
| 1235 |
+
|
| 1236 |
+
_check_seekable(f)
|
| 1237 |
+
f_should_read_directly = _should_read_directly(f)
|
| 1238 |
+
|
| 1239 |
+
if f_should_read_directly and f.tell() == 0:
|
| 1240 |
+
# legacy_load requires that f has fileno()
|
| 1241 |
+
# only if offset is zero we can attempt the legacy tar file loader
|
| 1242 |
+
try:
|
| 1243 |
+
return legacy_load(f)
|
| 1244 |
+
except tarfile.TarError:
|
| 1245 |
+
if _is_zipfile(f):
|
| 1246 |
+
# .zip is used for torch.jit.save and will throw an un-pickling error here
|
| 1247 |
+
raise RuntimeError(
|
| 1248 |
+
f"{f.name} is a zip archive (did you mean to use torch.jit.load()?)") from None
|
| 1249 |
+
# if not a tarfile, reset file offset and proceed
|
| 1250 |
+
f.seek(0)
|
| 1251 |
+
|
| 1252 |
+
if not hasattr(f, 'readinto') and (3, 8, 0) <= sys.version_info < (3, 8, 2):
|
| 1253 |
+
raise RuntimeError(
|
| 1254 |
+
"torch.load does not work with file-like objects that do not implement readinto on Python 3.8.0 and 3.8.1. "
|
| 1255 |
+
f"Received object of type \"{type(f)}\". Please update to Python 3.8.2 or newer to restore this "
|
| 1256 |
+
"functionality.")
|
| 1257 |
+
|
| 1258 |
+
magic_number = pickle_module.load(f, **pickle_load_args)
|
| 1259 |
+
if magic_number != MAGIC_NUMBER:
|
| 1260 |
+
raise RuntimeError("Invalid magic number; corrupt file?")
|
| 1261 |
+
protocol_version = pickle_module.load(f, **pickle_load_args)
|
| 1262 |
+
if protocol_version != PROTOCOL_VERSION:
|
| 1263 |
+
raise RuntimeError(f"Invalid protocol version: {protocol_version}")
|
| 1264 |
+
|
| 1265 |
+
_sys_info = pickle_module.load(f, **pickle_load_args)
|
| 1266 |
+
unpickler = UnpicklerWrapper(f, **pickle_load_args)
|
| 1267 |
+
unpickler.persistent_load = persistent_load
|
| 1268 |
+
result = unpickler.load()
|
| 1269 |
+
|
| 1270 |
+
deserialized_storage_keys = pickle_module.load(f, **pickle_load_args)
|
| 1271 |
+
|
| 1272 |
+
offset = f.tell() if f_should_read_directly else None
|
| 1273 |
+
for key in deserialized_storage_keys:
|
| 1274 |
+
assert key in deserialized_objects
|
| 1275 |
+
typed_storage = deserialized_objects[key]
|
| 1276 |
+
typed_storage._untyped_storage._set_from_file(
|
| 1277 |
+
f, offset, f_should_read_directly,
|
| 1278 |
+
torch._utils._element_size(typed_storage.dtype))
|
| 1279 |
+
if offset is not None:
|
| 1280 |
+
offset = f.tell()
|
| 1281 |
+
|
| 1282 |
+
torch._utils._validate_loaded_sparse_tensors()
|
| 1283 |
+
|
| 1284 |
+
return result
|
| 1285 |
+
|
| 1286 |
+
|
| 1287 |
+
def _maybe_decode_ascii(bytes_str: Union[bytes, str]) -> str:
|
| 1288 |
+
# When using encoding='bytes' in Py3, some **internal** keys stored as
|
| 1289 |
+
# strings in Py2 are loaded as bytes. This function decodes them with
|
| 1290 |
+
# ascii encoding, one that Py3 uses by default.
|
| 1291 |
+
#
|
| 1292 |
+
# NOTE: This should only be used on internal keys (e.g., `typename` and
|
| 1293 |
+
# `location` in `persistent_load` below!
|
| 1294 |
+
if isinstance(bytes_str, bytes):
|
| 1295 |
+
return bytes_str.decode('ascii')
|
| 1296 |
+
return bytes_str
|
| 1297 |
+
|
| 1298 |
+
|
| 1299 |
+
def _get_restore_location(map_location):
|
| 1300 |
+
if map_location is None:
|
| 1301 |
+
restore_location = default_restore_location
|
| 1302 |
+
elif isinstance(map_location, dict):
|
| 1303 |
+
def restore_location(storage, location):
|
| 1304 |
+
location = map_location.get(location, location)
|
| 1305 |
+
return default_restore_location(storage, location)
|
| 1306 |
+
elif isinstance(map_location, (str, bytes)):
|
| 1307 |
+
def restore_location(storage, location):
|
| 1308 |
+
return default_restore_location(storage, map_location)
|
| 1309 |
+
elif isinstance(map_location, torch.device):
|
| 1310 |
+
def restore_location(storage, location):
|
| 1311 |
+
return default_restore_location(storage, str(map_location))
|
| 1312 |
+
else:
|
| 1313 |
+
def restore_location(storage, location):
|
| 1314 |
+
result = map_location(storage, location)
|
| 1315 |
+
if result is None:
|
| 1316 |
+
result = default_restore_location(storage, location)
|
| 1317 |
+
return result
|
| 1318 |
+
return restore_location
|
| 1319 |
+
|
| 1320 |
+
|
| 1321 |
+
class StorageType:
|
| 1322 |
+
def __init__(self, name):
|
| 1323 |
+
self._dtype = _get_dtype_from_pickle_storage_type(name)
|
| 1324 |
+
|
| 1325 |
+
@property
|
| 1326 |
+
def dtype(self):
|
| 1327 |
+
return self._dtype
|
| 1328 |
+
|
| 1329 |
+
def __str__(self):
|
| 1330 |
+
return f'StorageType(dtype={self.dtype})'
|
| 1331 |
+
|
| 1332 |
+
|
| 1333 |
+
def _load(zip_file, map_location, pickle_module, pickle_file='data.pkl', overall_storage=None, **pickle_load_args):
|
| 1334 |
+
restore_location = _get_restore_location(map_location)
|
| 1335 |
+
|
| 1336 |
+
loaded_storages = {}
|
| 1337 |
+
|
| 1338 |
+
# check if byteswapping is needed
|
| 1339 |
+
byteordername = 'byteorder'
|
| 1340 |
+
byteorderdata = None
|
| 1341 |
+
if zip_file.has_record(byteordername):
|
| 1342 |
+
byteorderdata = zip_file.get_record(byteordername)
|
| 1343 |
+
if byteorderdata not in [b'little', b'big']:
|
| 1344 |
+
raise ValueError('Unknown endianness type: ' + byteorderdata.decode())
|
| 1345 |
+
elif get_default_load_endianness() == LoadEndianness.LITTLE or \
|
| 1346 |
+
get_default_load_endianness() is None:
|
| 1347 |
+
byteorderdata = b'little'
|
| 1348 |
+
elif get_default_load_endianness() == LoadEndianness.BIG:
|
| 1349 |
+
byteorderdata = b'big'
|
| 1350 |
+
elif get_default_load_endianness() == LoadEndianness.NATIVE:
|
| 1351 |
+
pass
|
| 1352 |
+
else:
|
| 1353 |
+
raise ValueError('Invalid load endianness type')
|
| 1354 |
+
|
| 1355 |
+
if not zip_file.has_record(byteordername) and \
|
| 1356 |
+
get_default_load_endianness() is None and \
|
| 1357 |
+
sys.byteorder == 'big':
|
| 1358 |
+
# Default behaviour was changed
|
| 1359 |
+
# See https://github.com/pytorch/pytorch/issues/101688
|
| 1360 |
+
warnings.warn("The default load endianness for checkpoints without a byteorder mark "
|
| 1361 |
+
"on big endian machines was changed from 'native' to 'little' endian, "
|
| 1362 |
+
"to avoid this behavior please use "
|
| 1363 |
+
"torch.serialization.set_default_load_endianness to set "
|
| 1364 |
+
"the desired default load endianness",
|
| 1365 |
+
UserWarning)
|
| 1366 |
+
|
| 1367 |
+
def load_tensor(dtype, numel, key, location):
|
| 1368 |
+
name = f'data/{key}'
|
| 1369 |
+
if overall_storage is not None:
|
| 1370 |
+
storage_offset = zip_file.get_record_offset(name)
|
| 1371 |
+
storage = overall_storage[storage_offset:storage_offset + numel]
|
| 1372 |
+
else:
|
| 1373 |
+
storage = zip_file.get_storage_from_record(name, numel, torch.UntypedStorage)._typed_storage()._untyped_storage
|
| 1374 |
+
# swap here if byteswapping is needed
|
| 1375 |
+
if byteorderdata is not None:
|
| 1376 |
+
if byteorderdata.decode() != sys.byteorder:
|
| 1377 |
+
storage.byteswap(dtype)
|
| 1378 |
+
|
| 1379 |
+
# TODO: Once we decide to break serialization FC, we can
|
| 1380 |
+
# stop wrapping with TypedStorage
|
| 1381 |
+
typed_storage = torch.storage.TypedStorage(
|
| 1382 |
+
wrap_storage=restore_location(storage, location),
|
| 1383 |
+
dtype=dtype,
|
| 1384 |
+
_internal=True)
|
| 1385 |
+
|
| 1386 |
+
if typed_storage._data_ptr() != 0:
|
| 1387 |
+
loaded_storages[key] = typed_storage
|
| 1388 |
+
|
| 1389 |
+
return typed_storage
|
| 1390 |
+
|
| 1391 |
+
def persistent_load(saved_id):
|
| 1392 |
+
assert isinstance(saved_id, tuple)
|
| 1393 |
+
typename = _maybe_decode_ascii(saved_id[0])
|
| 1394 |
+
data = saved_id[1:]
|
| 1395 |
+
|
| 1396 |
+
assert typename == 'storage', \
|
| 1397 |
+
f"Unknown typename for persistent_load, expected 'storage' but got '{typename}'"
|
| 1398 |
+
storage_type, key, location, numel = data
|
| 1399 |
+
if storage_type is torch.UntypedStorage:
|
| 1400 |
+
dtype = torch.uint8
|
| 1401 |
+
else:
|
| 1402 |
+
dtype = storage_type.dtype
|
| 1403 |
+
|
| 1404 |
+
if key in loaded_storages:
|
| 1405 |
+
typed_storage = loaded_storages[key]
|
| 1406 |
+
else:
|
| 1407 |
+
nbytes = numel * torch._utils._element_size(dtype)
|
| 1408 |
+
typed_storage = load_tensor(dtype, nbytes, key, _maybe_decode_ascii(location))
|
| 1409 |
+
|
| 1410 |
+
return typed_storage
|
| 1411 |
+
|
| 1412 |
+
load_module_mapping: Dict[str, str] = {
|
| 1413 |
+
# See https://github.com/pytorch/pytorch/pull/51633
|
| 1414 |
+
'torch.tensor': 'torch._tensor'
|
| 1415 |
+
}
|
| 1416 |
+
|
| 1417 |
+
# Need to subclass Unpickler instead of directly monkey-patching the find_class method
|
| 1418 |
+
# because it's marked readonly in pickle.
|
| 1419 |
+
# The type: ignore is because mypy can't statically determine the type of this class.
|
| 1420 |
+
class UnpicklerWrapper(pickle_module.Unpickler): # type: ignore[name-defined]
|
| 1421 |
+
# from https://stackoverflow.com/questions/13398462/unpickling-python-objects-with-a-changed-module-path/13405732
|
| 1422 |
+
# Lets us override the imports that pickle uses when unpickling an object.
|
| 1423 |
+
# This is useful for maintaining BC if we change a module path that tensor instantiation relies on.
|
| 1424 |
+
def find_class(self, mod_name, name):
|
| 1425 |
+
if type(name) is str and 'Storage' in name:
|
| 1426 |
+
try:
|
| 1427 |
+
return StorageType(name)
|
| 1428 |
+
except KeyError:
|
| 1429 |
+
pass
|
| 1430 |
+
mod_name = load_module_mapping.get(mod_name, mod_name)
|
| 1431 |
+
return super().find_class(mod_name, name)
|
| 1432 |
+
|
| 1433 |
+
# Load the data (which may in turn use `persistent_load` to load tensors)
|
| 1434 |
+
data_file = io.BytesIO(zip_file.get_record(pickle_file))
|
| 1435 |
+
|
| 1436 |
+
unpickler = UnpicklerWrapper(data_file, **pickle_load_args)
|
| 1437 |
+
unpickler.persistent_load = persistent_load
|
| 1438 |
+
result = unpickler.load()
|
| 1439 |
+
|
| 1440 |
+
torch._utils._validate_loaded_sparse_tensors()
|
| 1441 |
+
torch._C._log_api_usage_metadata(
|
| 1442 |
+
"torch.load.metadata", {"serialization_id": zip_file.serialization_id()}
|
| 1443 |
+
)
|
| 1444 |
+
return result
|
| 1445 |
+
|
| 1446 |
+
|
| 1447 |
+
def _is_torchscript_zip(zip_file):
|
| 1448 |
+
return 'constants.pkl' in zip_file.get_all_records()
|
evalkit_internvl/lib/python3.10/site-packages/torch/types.py
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from typing import Any, List, Optional, Sequence, Tuple, Union
|
| 3 |
+
|
| 4 |
+
import builtins
|
| 5 |
+
|
| 6 |
+
# Convenience aliases for common composite types that we need
|
| 7 |
+
# to talk about in PyTorch
|
| 8 |
+
|
| 9 |
+
_TensorOrTensors = Union[torch.Tensor, Sequence[torch.Tensor]]
|
| 10 |
+
_TensorOrTensorsOrGradEdge = Union[
|
| 11 |
+
torch.Tensor, Sequence[torch.Tensor],
|
| 12 |
+
"torch.autograd.graph.GradientEdge",
|
| 13 |
+
Sequence["torch.autograd.graph.GradientEdge"]]
|
| 14 |
+
|
| 15 |
+
# In some cases, these basic types are shadowed by corresponding
|
| 16 |
+
# top-level values. The underscore variants let us refer to these
|
| 17 |
+
# types. See https://github.com/python/mypy/issues/4146 for why these
|
| 18 |
+
# workarounds is necessary
|
| 19 |
+
_int = builtins.int
|
| 20 |
+
_float = builtins.float
|
| 21 |
+
_bool = builtins.bool
|
| 22 |
+
_complex = builtins.complex
|
| 23 |
+
|
| 24 |
+
_dtype = torch.dtype
|
| 25 |
+
_device = torch.device
|
| 26 |
+
_qscheme = torch.qscheme
|
| 27 |
+
_size = Union[torch.Size, List[_int], Tuple[_int, ...]]
|
| 28 |
+
_layout = torch.layout
|
| 29 |
+
_dispatchkey = Union[str, torch._C.DispatchKey]
|
| 30 |
+
|
| 31 |
+
# Meta-type for "numeric" things; matches our docs
|
| 32 |
+
Number = Union[builtins.int, builtins.float, builtins.bool]
|
| 33 |
+
|
| 34 |
+
# Meta-type for "device-like" things. Not to be confused with 'device' (a
|
| 35 |
+
# literal device object). This nomenclature is consistent with PythonArgParser.
|
| 36 |
+
# None means use the default device (typically CPU)
|
| 37 |
+
Device = Optional[Union[_device, str, _int]]
|
| 38 |
+
del Optional
|
| 39 |
+
|
| 40 |
+
# Storage protocol implemented by ${Type}StorageBase classes
|
| 41 |
+
|
| 42 |
+
class Storage:
|
| 43 |
+
_cdata: int
|
| 44 |
+
device: torch.device
|
| 45 |
+
dtype: torch.dtype
|
| 46 |
+
_torch_load_uninitialized: bool
|
| 47 |
+
|
| 48 |
+
def __deepcopy__(self, memo) -> 'Storage': # type: ignore[empty-body]
|
| 49 |
+
...
|
| 50 |
+
|
| 51 |
+
def _new_shared(self, int) -> 'Storage': # type: ignore[empty-body]
|
| 52 |
+
...
|
| 53 |
+
|
| 54 |
+
def _write_file(self, f: Any, is_real_file: _bool, save_size: _bool, element_size: int) -> None:
|
| 55 |
+
...
|
| 56 |
+
|
| 57 |
+
def element_size(self) -> int: # type: ignore[empty-body]
|
| 58 |
+
...
|
| 59 |
+
|
| 60 |
+
def is_shared(self) -> bool: # type: ignore[empty-body]
|
| 61 |
+
...
|
| 62 |
+
|
| 63 |
+
def share_memory_(self) -> 'Storage': # type: ignore[empty-body]
|
| 64 |
+
...
|
| 65 |
+
|
| 66 |
+
def nbytes(self) -> int: # type: ignore[empty-body]
|
| 67 |
+
...
|
| 68 |
+
|
| 69 |
+
def cpu(self) -> 'Storage': # type: ignore[empty-body]
|
| 70 |
+
...
|
| 71 |
+
|
| 72 |
+
def data_ptr(self) -> int: # type: ignore[empty-body]
|
| 73 |
+
...
|
| 74 |
+
|
| 75 |
+
def from_file(self, filename: str, shared: bool = False, nbytes: int = 0) -> 'Storage': # type: ignore[empty-body]
|
| 76 |
+
...
|
| 77 |
+
|
| 78 |
+
def _new_with_file(self, f: Any, element_size: int) -> 'Storage': # type: ignore[empty-body]
|
| 79 |
+
...
|
evalkit_internvl/lib/python3.10/site-packages/torch/version.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional
|
| 2 |
+
|
| 3 |
+
__all__ = ['__version__', 'debug', 'cuda', 'git_version', 'hip']
|
| 4 |
+
__version__ = '2.2.0+cu118'
|
| 5 |
+
debug = False
|
| 6 |
+
cuda: Optional[str] = '11.8'
|
| 7 |
+
git_version = '8ac9b20d4b090c213799e81acf48a55ea8d437d6'
|
| 8 |
+
hip: Optional[str] = None
|
evalkit_tf437/lib/python3.10/site-packages/__pycache__/markdown2.cpython-310.pyc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3ca59ebaba93465505b3720fdc964ee8b75d1a67353000e4aee3d98c489745ea
|
| 3 |
+
size 110002
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_extraction/_hashing_fast.cpython-310-x86_64-linux-gnu.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:31608925ac5554e7279f30b6d0ba652bd63e641e4eb1b1ae2429ecb3ffc427e6
|
| 3 |
+
size 101480
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_perceptron.cpython-310.pyc
ADDED
|
Binary file (7.47 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_ridge.cpython-310.pyc
ADDED
|
Binary file (84.2 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/__pycache__/_theil_sen.cpython-310.pyc
ADDED
|
Binary file (14.4 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_base.py
ADDED
|
@@ -0,0 +1,850 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Generalized Linear Models.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
# Authors: The scikit-learn developers
|
| 6 |
+
# SPDX-License-Identifier: BSD-3-Clause
|
| 7 |
+
|
| 8 |
+
import numbers
|
| 9 |
+
import warnings
|
| 10 |
+
from abc import ABCMeta, abstractmethod
|
| 11 |
+
from numbers import Integral
|
| 12 |
+
|
| 13 |
+
import numpy as np
|
| 14 |
+
import scipy.sparse as sp
|
| 15 |
+
from scipy import linalg, optimize, sparse
|
| 16 |
+
from scipy.sparse.linalg import lsqr
|
| 17 |
+
from scipy.special import expit
|
| 18 |
+
|
| 19 |
+
from ..base import (
|
| 20 |
+
BaseEstimator,
|
| 21 |
+
ClassifierMixin,
|
| 22 |
+
MultiOutputMixin,
|
| 23 |
+
RegressorMixin,
|
| 24 |
+
_fit_context,
|
| 25 |
+
)
|
| 26 |
+
from ..utils import check_array, check_random_state
|
| 27 |
+
from ..utils._array_api import (
|
| 28 |
+
_asarray_with_order,
|
| 29 |
+
_average,
|
| 30 |
+
get_namespace,
|
| 31 |
+
get_namespace_and_device,
|
| 32 |
+
indexing_dtype,
|
| 33 |
+
supported_float_dtypes,
|
| 34 |
+
)
|
| 35 |
+
from ..utils._seq_dataset import (
|
| 36 |
+
ArrayDataset32,
|
| 37 |
+
ArrayDataset64,
|
| 38 |
+
CSRDataset32,
|
| 39 |
+
CSRDataset64,
|
| 40 |
+
)
|
| 41 |
+
from ..utils.extmath import safe_sparse_dot
|
| 42 |
+
from ..utils.parallel import Parallel, delayed
|
| 43 |
+
from ..utils.sparsefuncs import mean_variance_axis
|
| 44 |
+
from ..utils.validation import _check_sample_weight, check_is_fitted, validate_data
|
| 45 |
+
|
| 46 |
+
# TODO: bayesian_ridge_regression and bayesian_regression_ard
|
| 47 |
+
# should be squashed into its respective objects.
|
| 48 |
+
|
| 49 |
+
SPARSE_INTERCEPT_DECAY = 0.01
|
| 50 |
+
# For sparse data intercept updates are scaled by this decay factor to avoid
|
| 51 |
+
# intercept oscillation.
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def make_dataset(X, y, sample_weight, random_state=None):
|
| 55 |
+
"""Create ``Dataset`` abstraction for sparse and dense inputs.
|
| 56 |
+
|
| 57 |
+
This also returns the ``intercept_decay`` which is different
|
| 58 |
+
for sparse datasets.
|
| 59 |
+
|
| 60 |
+
Parameters
|
| 61 |
+
----------
|
| 62 |
+
X : array-like, shape (n_samples, n_features)
|
| 63 |
+
Training data
|
| 64 |
+
|
| 65 |
+
y : array-like, shape (n_samples, )
|
| 66 |
+
Target values.
|
| 67 |
+
|
| 68 |
+
sample_weight : numpy array of shape (n_samples,)
|
| 69 |
+
The weight of each sample
|
| 70 |
+
|
| 71 |
+
random_state : int, RandomState instance or None (default)
|
| 72 |
+
Determines random number generation for dataset random sampling. It is not
|
| 73 |
+
used for dataset shuffling.
|
| 74 |
+
Pass an int for reproducible output across multiple function calls.
|
| 75 |
+
See :term:`Glossary <random_state>`.
|
| 76 |
+
|
| 77 |
+
Returns
|
| 78 |
+
-------
|
| 79 |
+
dataset
|
| 80 |
+
The ``Dataset`` abstraction
|
| 81 |
+
intercept_decay
|
| 82 |
+
The intercept decay
|
| 83 |
+
"""
|
| 84 |
+
|
| 85 |
+
rng = check_random_state(random_state)
|
| 86 |
+
# seed should never be 0 in SequentialDataset64
|
| 87 |
+
seed = rng.randint(1, np.iinfo(np.int32).max)
|
| 88 |
+
|
| 89 |
+
if X.dtype == np.float32:
|
| 90 |
+
CSRData = CSRDataset32
|
| 91 |
+
ArrayData = ArrayDataset32
|
| 92 |
+
else:
|
| 93 |
+
CSRData = CSRDataset64
|
| 94 |
+
ArrayData = ArrayDataset64
|
| 95 |
+
|
| 96 |
+
if sp.issparse(X):
|
| 97 |
+
dataset = CSRData(X.data, X.indptr, X.indices, y, sample_weight, seed=seed)
|
| 98 |
+
intercept_decay = SPARSE_INTERCEPT_DECAY
|
| 99 |
+
else:
|
| 100 |
+
X = np.ascontiguousarray(X)
|
| 101 |
+
dataset = ArrayData(X, y, sample_weight, seed=seed)
|
| 102 |
+
intercept_decay = 1.0
|
| 103 |
+
|
| 104 |
+
return dataset, intercept_decay
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def _preprocess_data(
|
| 108 |
+
X,
|
| 109 |
+
y,
|
| 110 |
+
*,
|
| 111 |
+
fit_intercept,
|
| 112 |
+
copy=True,
|
| 113 |
+
copy_y=True,
|
| 114 |
+
sample_weight=None,
|
| 115 |
+
check_input=True,
|
| 116 |
+
):
|
| 117 |
+
"""Common data preprocessing for fitting linear models.
|
| 118 |
+
|
| 119 |
+
This helper is in charge of the following steps:
|
| 120 |
+
|
| 121 |
+
- Ensure that `sample_weight` is an array or `None`.
|
| 122 |
+
- If `check_input=True`, perform standard input validation of `X`, `y`.
|
| 123 |
+
- Perform copies if requested to avoid side-effects in case of inplace
|
| 124 |
+
modifications of the input.
|
| 125 |
+
|
| 126 |
+
Then, if `fit_intercept=True` this preprocessing centers both `X` and `y` as
|
| 127 |
+
follows:
|
| 128 |
+
- if `X` is dense, center the data and
|
| 129 |
+
store the mean vector in `X_offset`.
|
| 130 |
+
- if `X` is sparse, store the mean in `X_offset`
|
| 131 |
+
without centering `X`. The centering is expected to be handled by the
|
| 132 |
+
linear solver where appropriate.
|
| 133 |
+
- in either case, always center `y` and store the mean in `y_offset`.
|
| 134 |
+
- both `X_offset` and `y_offset` are always weighted by `sample_weight`
|
| 135 |
+
if not set to `None`.
|
| 136 |
+
|
| 137 |
+
If `fit_intercept=False`, no centering is performed and `X_offset`, `y_offset`
|
| 138 |
+
are set to zero.
|
| 139 |
+
|
| 140 |
+
Returns
|
| 141 |
+
-------
|
| 142 |
+
X_out : {ndarray, sparse matrix} of shape (n_samples, n_features)
|
| 143 |
+
If copy=True a copy of the input X is triggered, otherwise operations are
|
| 144 |
+
inplace.
|
| 145 |
+
If input X is dense, then X_out is centered.
|
| 146 |
+
y_out : {ndarray, sparse matrix} of shape (n_samples,) or (n_samples, n_targets)
|
| 147 |
+
Centered version of y. Possibly performed inplace on input y depending
|
| 148 |
+
on the copy_y parameter.
|
| 149 |
+
X_offset : ndarray of shape (n_features,)
|
| 150 |
+
The mean per column of input X.
|
| 151 |
+
y_offset : float or ndarray of shape (n_features,)
|
| 152 |
+
X_scale : ndarray of shape (n_features,)
|
| 153 |
+
Always an array of ones. TODO: refactor the code base to make it
|
| 154 |
+
possible to remove this unused variable.
|
| 155 |
+
"""
|
| 156 |
+
xp, _, device_ = get_namespace_and_device(X, y, sample_weight)
|
| 157 |
+
n_samples, n_features = X.shape
|
| 158 |
+
X_is_sparse = sp.issparse(X)
|
| 159 |
+
|
| 160 |
+
if isinstance(sample_weight, numbers.Number):
|
| 161 |
+
sample_weight = None
|
| 162 |
+
if sample_weight is not None:
|
| 163 |
+
sample_weight = xp.asarray(sample_weight)
|
| 164 |
+
|
| 165 |
+
if check_input:
|
| 166 |
+
X = check_array(
|
| 167 |
+
X, copy=copy, accept_sparse=["csr", "csc"], dtype=supported_float_dtypes(xp)
|
| 168 |
+
)
|
| 169 |
+
y = check_array(y, dtype=X.dtype, copy=copy_y, ensure_2d=False)
|
| 170 |
+
else:
|
| 171 |
+
y = xp.astype(y, X.dtype, copy=copy_y)
|
| 172 |
+
if copy:
|
| 173 |
+
if X_is_sparse:
|
| 174 |
+
X = X.copy()
|
| 175 |
+
else:
|
| 176 |
+
X = _asarray_with_order(X, order="K", copy=True, xp=xp)
|
| 177 |
+
|
| 178 |
+
dtype_ = X.dtype
|
| 179 |
+
|
| 180 |
+
if fit_intercept:
|
| 181 |
+
if X_is_sparse:
|
| 182 |
+
X_offset, X_var = mean_variance_axis(X, axis=0, weights=sample_weight)
|
| 183 |
+
else:
|
| 184 |
+
X_offset = _average(X, axis=0, weights=sample_weight, xp=xp)
|
| 185 |
+
|
| 186 |
+
X_offset = xp.astype(X_offset, X.dtype, copy=False)
|
| 187 |
+
X -= X_offset
|
| 188 |
+
|
| 189 |
+
y_offset = _average(y, axis=0, weights=sample_weight, xp=xp)
|
| 190 |
+
y -= y_offset
|
| 191 |
+
else:
|
| 192 |
+
X_offset = xp.zeros(n_features, dtype=X.dtype, device=device_)
|
| 193 |
+
if y.ndim == 1:
|
| 194 |
+
y_offset = xp.asarray(0.0, dtype=dtype_, device=device_)
|
| 195 |
+
else:
|
| 196 |
+
y_offset = xp.zeros(y.shape[1], dtype=dtype_, device=device_)
|
| 197 |
+
|
| 198 |
+
# XXX: X_scale is no longer needed. It is an historic artifact from the
|
| 199 |
+
# time where linear model exposed the normalize parameter.
|
| 200 |
+
X_scale = xp.ones(n_features, dtype=X.dtype, device=device_)
|
| 201 |
+
return X, y, X_offset, y_offset, X_scale
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
# TODO: _rescale_data should be factored into _preprocess_data.
|
| 205 |
+
# Currently, the fact that sag implements its own way to deal with
|
| 206 |
+
# sample_weight makes the refactoring tricky.
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
def _rescale_data(X, y, sample_weight, inplace=False):
|
| 210 |
+
"""Rescale data sample-wise by square root of sample_weight.
|
| 211 |
+
|
| 212 |
+
For many linear models, this enables easy support for sample_weight because
|
| 213 |
+
|
| 214 |
+
(y - X w)' S (y - X w)
|
| 215 |
+
|
| 216 |
+
with S = diag(sample_weight) becomes
|
| 217 |
+
|
| 218 |
+
||y_rescaled - X_rescaled w||_2^2
|
| 219 |
+
|
| 220 |
+
when setting
|
| 221 |
+
|
| 222 |
+
y_rescaled = sqrt(S) y
|
| 223 |
+
X_rescaled = sqrt(S) X
|
| 224 |
+
|
| 225 |
+
Returns
|
| 226 |
+
-------
|
| 227 |
+
X_rescaled : {array-like, sparse matrix}
|
| 228 |
+
|
| 229 |
+
y_rescaled : {array-like, sparse matrix}
|
| 230 |
+
"""
|
| 231 |
+
# Assume that _validate_data and _check_sample_weight have been called by
|
| 232 |
+
# the caller.
|
| 233 |
+
xp, _ = get_namespace(X, y, sample_weight)
|
| 234 |
+
n_samples = X.shape[0]
|
| 235 |
+
sample_weight_sqrt = xp.sqrt(sample_weight)
|
| 236 |
+
|
| 237 |
+
if sp.issparse(X) or sp.issparse(y):
|
| 238 |
+
sw_matrix = sparse.dia_matrix(
|
| 239 |
+
(sample_weight_sqrt, 0), shape=(n_samples, n_samples)
|
| 240 |
+
)
|
| 241 |
+
|
| 242 |
+
if sp.issparse(X):
|
| 243 |
+
X = safe_sparse_dot(sw_matrix, X)
|
| 244 |
+
else:
|
| 245 |
+
if inplace:
|
| 246 |
+
X *= sample_weight_sqrt[:, None]
|
| 247 |
+
else:
|
| 248 |
+
X = X * sample_weight_sqrt[:, None]
|
| 249 |
+
|
| 250 |
+
if sp.issparse(y):
|
| 251 |
+
y = safe_sparse_dot(sw_matrix, y)
|
| 252 |
+
else:
|
| 253 |
+
if inplace:
|
| 254 |
+
if y.ndim == 1:
|
| 255 |
+
y *= sample_weight_sqrt
|
| 256 |
+
else:
|
| 257 |
+
y *= sample_weight_sqrt[:, None]
|
| 258 |
+
else:
|
| 259 |
+
if y.ndim == 1:
|
| 260 |
+
y = y * sample_weight_sqrt
|
| 261 |
+
else:
|
| 262 |
+
y = y * sample_weight_sqrt[:, None]
|
| 263 |
+
return X, y, sample_weight_sqrt
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
class LinearModel(BaseEstimator, metaclass=ABCMeta):
|
| 267 |
+
"""Base class for Linear Models"""
|
| 268 |
+
|
| 269 |
+
@abstractmethod
|
| 270 |
+
def fit(self, X, y):
|
| 271 |
+
"""Fit model."""
|
| 272 |
+
|
| 273 |
+
def _decision_function(self, X):
|
| 274 |
+
check_is_fitted(self)
|
| 275 |
+
|
| 276 |
+
X = validate_data(self, X, accept_sparse=["csr", "csc", "coo"], reset=False)
|
| 277 |
+
coef_ = self.coef_
|
| 278 |
+
if coef_.ndim == 1:
|
| 279 |
+
return X @ coef_ + self.intercept_
|
| 280 |
+
else:
|
| 281 |
+
return X @ coef_.T + self.intercept_
|
| 282 |
+
|
| 283 |
+
def predict(self, X):
|
| 284 |
+
"""
|
| 285 |
+
Predict using the linear model.
|
| 286 |
+
|
| 287 |
+
Parameters
|
| 288 |
+
----------
|
| 289 |
+
X : array-like or sparse matrix, shape (n_samples, n_features)
|
| 290 |
+
Samples.
|
| 291 |
+
|
| 292 |
+
Returns
|
| 293 |
+
-------
|
| 294 |
+
C : array, shape (n_samples,)
|
| 295 |
+
Returns predicted values.
|
| 296 |
+
"""
|
| 297 |
+
return self._decision_function(X)
|
| 298 |
+
|
| 299 |
+
def _set_intercept(self, X_offset, y_offset, X_scale):
|
| 300 |
+
"""Set the intercept_"""
|
| 301 |
+
|
| 302 |
+
xp, _ = get_namespace(X_offset, y_offset, X_scale)
|
| 303 |
+
|
| 304 |
+
if self.fit_intercept:
|
| 305 |
+
# We always want coef_.dtype=X.dtype. For instance, X.dtype can differ from
|
| 306 |
+
# coef_.dtype if warm_start=True.
|
| 307 |
+
coef_ = xp.astype(self.coef_, X_scale.dtype, copy=False)
|
| 308 |
+
coef_ = self.coef_ = xp.divide(coef_, X_scale)
|
| 309 |
+
|
| 310 |
+
if coef_.ndim == 1:
|
| 311 |
+
intercept_ = y_offset - X_offset @ coef_
|
| 312 |
+
else:
|
| 313 |
+
intercept_ = y_offset - X_offset @ coef_.T
|
| 314 |
+
|
| 315 |
+
self.intercept_ = intercept_
|
| 316 |
+
|
| 317 |
+
else:
|
| 318 |
+
self.intercept_ = 0.0
|
| 319 |
+
|
| 320 |
+
|
| 321 |
+
# XXX Should this derive from LinearModel? It should be a mixin, not an ABC.
|
| 322 |
+
# Maybe the n_features checking can be moved to LinearModel.
|
| 323 |
+
class LinearClassifierMixin(ClassifierMixin):
|
| 324 |
+
"""Mixin for linear classifiers.
|
| 325 |
+
|
| 326 |
+
Handles prediction for sparse and dense X.
|
| 327 |
+
"""
|
| 328 |
+
|
| 329 |
+
def decision_function(self, X):
|
| 330 |
+
"""
|
| 331 |
+
Predict confidence scores for samples.
|
| 332 |
+
|
| 333 |
+
The confidence score for a sample is proportional to the signed
|
| 334 |
+
distance of that sample to the hyperplane.
|
| 335 |
+
|
| 336 |
+
Parameters
|
| 337 |
+
----------
|
| 338 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 339 |
+
The data matrix for which we want to get the confidence scores.
|
| 340 |
+
|
| 341 |
+
Returns
|
| 342 |
+
-------
|
| 343 |
+
scores : ndarray of shape (n_samples,) or (n_samples, n_classes)
|
| 344 |
+
Confidence scores per `(n_samples, n_classes)` combination. In the
|
| 345 |
+
binary case, confidence score for `self.classes_[1]` where >0 means
|
| 346 |
+
this class would be predicted.
|
| 347 |
+
"""
|
| 348 |
+
check_is_fitted(self)
|
| 349 |
+
xp, _ = get_namespace(X)
|
| 350 |
+
|
| 351 |
+
X = validate_data(self, X, accept_sparse="csr", reset=False)
|
| 352 |
+
scores = safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_
|
| 353 |
+
return (
|
| 354 |
+
xp.reshape(scores, (-1,))
|
| 355 |
+
if (scores.ndim > 1 and scores.shape[1] == 1)
|
| 356 |
+
else scores
|
| 357 |
+
)
|
| 358 |
+
|
| 359 |
+
def predict(self, X):
|
| 360 |
+
"""
|
| 361 |
+
Predict class labels for samples in X.
|
| 362 |
+
|
| 363 |
+
Parameters
|
| 364 |
+
----------
|
| 365 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 366 |
+
The data matrix for which we want to get the predictions.
|
| 367 |
+
|
| 368 |
+
Returns
|
| 369 |
+
-------
|
| 370 |
+
y_pred : ndarray of shape (n_samples,)
|
| 371 |
+
Vector containing the class labels for each sample.
|
| 372 |
+
"""
|
| 373 |
+
xp, _ = get_namespace(X)
|
| 374 |
+
scores = self.decision_function(X)
|
| 375 |
+
if len(scores.shape) == 1:
|
| 376 |
+
indices = xp.astype(scores > 0, indexing_dtype(xp))
|
| 377 |
+
else:
|
| 378 |
+
indices = xp.argmax(scores, axis=1)
|
| 379 |
+
|
| 380 |
+
return xp.take(self.classes_, indices, axis=0)
|
| 381 |
+
|
| 382 |
+
def _predict_proba_lr(self, X):
|
| 383 |
+
"""Probability estimation for OvR logistic regression.
|
| 384 |
+
|
| 385 |
+
Positive class probabilities are computed as
|
| 386 |
+
1. / (1. + np.exp(-self.decision_function(X)));
|
| 387 |
+
multiclass is handled by normalizing that over all classes.
|
| 388 |
+
"""
|
| 389 |
+
prob = self.decision_function(X)
|
| 390 |
+
expit(prob, out=prob)
|
| 391 |
+
if prob.ndim == 1:
|
| 392 |
+
return np.vstack([1 - prob, prob]).T
|
| 393 |
+
else:
|
| 394 |
+
# OvR normalization, like LibLinear's predict_probability
|
| 395 |
+
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
|
| 396 |
+
return prob
|
| 397 |
+
|
| 398 |
+
|
| 399 |
+
class SparseCoefMixin:
|
| 400 |
+
"""Mixin for converting coef_ to and from CSR format.
|
| 401 |
+
|
| 402 |
+
L1-regularizing estimators should inherit this.
|
| 403 |
+
"""
|
| 404 |
+
|
| 405 |
+
def densify(self):
|
| 406 |
+
"""
|
| 407 |
+
Convert coefficient matrix to dense array format.
|
| 408 |
+
|
| 409 |
+
Converts the ``coef_`` member (back) to a numpy.ndarray. This is the
|
| 410 |
+
default format of ``coef_`` and is required for fitting, so calling
|
| 411 |
+
this method is only required on models that have previously been
|
| 412 |
+
sparsified; otherwise, it is a no-op.
|
| 413 |
+
|
| 414 |
+
Returns
|
| 415 |
+
-------
|
| 416 |
+
self
|
| 417 |
+
Fitted estimator.
|
| 418 |
+
"""
|
| 419 |
+
msg = "Estimator, %(name)s, must be fitted before densifying."
|
| 420 |
+
check_is_fitted(self, msg=msg)
|
| 421 |
+
if sp.issparse(self.coef_):
|
| 422 |
+
self.coef_ = self.coef_.toarray()
|
| 423 |
+
return self
|
| 424 |
+
|
| 425 |
+
def sparsify(self):
|
| 426 |
+
"""
|
| 427 |
+
Convert coefficient matrix to sparse format.
|
| 428 |
+
|
| 429 |
+
Converts the ``coef_`` member to a scipy.sparse matrix, which for
|
| 430 |
+
L1-regularized models can be much more memory- and storage-efficient
|
| 431 |
+
than the usual numpy.ndarray representation.
|
| 432 |
+
|
| 433 |
+
The ``intercept_`` member is not converted.
|
| 434 |
+
|
| 435 |
+
Returns
|
| 436 |
+
-------
|
| 437 |
+
self
|
| 438 |
+
Fitted estimator.
|
| 439 |
+
|
| 440 |
+
Notes
|
| 441 |
+
-----
|
| 442 |
+
For non-sparse models, i.e. when there are not many zeros in ``coef_``,
|
| 443 |
+
this may actually *increase* memory usage, so use this method with
|
| 444 |
+
care. A rule of thumb is that the number of zero elements, which can
|
| 445 |
+
be computed with ``(coef_ == 0).sum()``, must be more than 50% for this
|
| 446 |
+
to provide significant benefits.
|
| 447 |
+
|
| 448 |
+
After calling this method, further fitting with the partial_fit
|
| 449 |
+
method (if any) will not work until you call densify.
|
| 450 |
+
"""
|
| 451 |
+
msg = "Estimator, %(name)s, must be fitted before sparsifying."
|
| 452 |
+
check_is_fitted(self, msg=msg)
|
| 453 |
+
self.coef_ = sp.csr_matrix(self.coef_)
|
| 454 |
+
return self
|
| 455 |
+
|
| 456 |
+
|
| 457 |
+
class LinearRegression(MultiOutputMixin, RegressorMixin, LinearModel):
|
| 458 |
+
"""
|
| 459 |
+
Ordinary least squares Linear Regression.
|
| 460 |
+
|
| 461 |
+
LinearRegression fits a linear model with coefficients w = (w1, ..., wp)
|
| 462 |
+
to minimize the residual sum of squares between the observed targets in
|
| 463 |
+
the dataset, and the targets predicted by the linear approximation.
|
| 464 |
+
|
| 465 |
+
Parameters
|
| 466 |
+
----------
|
| 467 |
+
fit_intercept : bool, default=True
|
| 468 |
+
Whether to calculate the intercept for this model. If set
|
| 469 |
+
to False, no intercept will be used in calculations
|
| 470 |
+
(i.e. data is expected to be centered).
|
| 471 |
+
|
| 472 |
+
copy_X : bool, default=True
|
| 473 |
+
If True, X will be copied; else, it may be overwritten.
|
| 474 |
+
|
| 475 |
+
n_jobs : int, default=None
|
| 476 |
+
The number of jobs to use for the computation. This will only provide
|
| 477 |
+
speedup in case of sufficiently large problems, that is if firstly
|
| 478 |
+
`n_targets > 1` and secondly `X` is sparse or if `positive` is set
|
| 479 |
+
to `True`. ``None`` means 1 unless in a
|
| 480 |
+
:obj:`joblib.parallel_backend` context. ``-1`` means using all
|
| 481 |
+
processors. See :term:`Glossary <n_jobs>` for more details.
|
| 482 |
+
|
| 483 |
+
positive : bool, default=False
|
| 484 |
+
When set to ``True``, forces the coefficients to be positive. This
|
| 485 |
+
option is only supported for dense arrays.
|
| 486 |
+
|
| 487 |
+
.. versionadded:: 0.24
|
| 488 |
+
|
| 489 |
+
Attributes
|
| 490 |
+
----------
|
| 491 |
+
coef_ : array of shape (n_features, ) or (n_targets, n_features)
|
| 492 |
+
Estimated coefficients for the linear regression problem.
|
| 493 |
+
If multiple targets are passed during the fit (y 2D), this
|
| 494 |
+
is a 2D array of shape (n_targets, n_features), while if only
|
| 495 |
+
one target is passed, this is a 1D array of length n_features.
|
| 496 |
+
|
| 497 |
+
rank_ : int
|
| 498 |
+
Rank of matrix `X`. Only available when `X` is dense.
|
| 499 |
+
|
| 500 |
+
singular_ : array of shape (min(X, y),)
|
| 501 |
+
Singular values of `X`. Only available when `X` is dense.
|
| 502 |
+
|
| 503 |
+
intercept_ : float or array of shape (n_targets,)
|
| 504 |
+
Independent term in the linear model. Set to 0.0 if
|
| 505 |
+
`fit_intercept = False`.
|
| 506 |
+
|
| 507 |
+
n_features_in_ : int
|
| 508 |
+
Number of features seen during :term:`fit`.
|
| 509 |
+
|
| 510 |
+
.. versionadded:: 0.24
|
| 511 |
+
|
| 512 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
| 513 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
| 514 |
+
has feature names that are all strings.
|
| 515 |
+
|
| 516 |
+
.. versionadded:: 1.0
|
| 517 |
+
|
| 518 |
+
See Also
|
| 519 |
+
--------
|
| 520 |
+
Ridge : Ridge regression addresses some of the
|
| 521 |
+
problems of Ordinary Least Squares by imposing a penalty on the
|
| 522 |
+
size of the coefficients with l2 regularization.
|
| 523 |
+
Lasso : The Lasso is a linear model that estimates
|
| 524 |
+
sparse coefficients with l1 regularization.
|
| 525 |
+
ElasticNet : Elastic-Net is a linear regression
|
| 526 |
+
model trained with both l1 and l2 -norm regularization of the
|
| 527 |
+
coefficients.
|
| 528 |
+
|
| 529 |
+
Notes
|
| 530 |
+
-----
|
| 531 |
+
From the implementation point of view, this is just plain Ordinary
|
| 532 |
+
Least Squares (scipy.linalg.lstsq) or Non Negative Least Squares
|
| 533 |
+
(scipy.optimize.nnls) wrapped as a predictor object.
|
| 534 |
+
|
| 535 |
+
Examples
|
| 536 |
+
--------
|
| 537 |
+
>>> import numpy as np
|
| 538 |
+
>>> from sklearn.linear_model import LinearRegression
|
| 539 |
+
>>> X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])
|
| 540 |
+
>>> # y = 1 * x_0 + 2 * x_1 + 3
|
| 541 |
+
>>> y = np.dot(X, np.array([1, 2])) + 3
|
| 542 |
+
>>> reg = LinearRegression().fit(X, y)
|
| 543 |
+
>>> reg.score(X, y)
|
| 544 |
+
1.0
|
| 545 |
+
>>> reg.coef_
|
| 546 |
+
array([1., 2.])
|
| 547 |
+
>>> reg.intercept_
|
| 548 |
+
np.float64(3.0...)
|
| 549 |
+
>>> reg.predict(np.array([[3, 5]]))
|
| 550 |
+
array([16.])
|
| 551 |
+
"""
|
| 552 |
+
|
| 553 |
+
_parameter_constraints: dict = {
|
| 554 |
+
"fit_intercept": ["boolean"],
|
| 555 |
+
"copy_X": ["boolean"],
|
| 556 |
+
"n_jobs": [None, Integral],
|
| 557 |
+
"positive": ["boolean"],
|
| 558 |
+
}
|
| 559 |
+
|
| 560 |
+
def __init__(
|
| 561 |
+
self,
|
| 562 |
+
*,
|
| 563 |
+
fit_intercept=True,
|
| 564 |
+
copy_X=True,
|
| 565 |
+
n_jobs=None,
|
| 566 |
+
positive=False,
|
| 567 |
+
):
|
| 568 |
+
self.fit_intercept = fit_intercept
|
| 569 |
+
self.copy_X = copy_X
|
| 570 |
+
self.n_jobs = n_jobs
|
| 571 |
+
self.positive = positive
|
| 572 |
+
|
| 573 |
+
@_fit_context(prefer_skip_nested_validation=True)
|
| 574 |
+
def fit(self, X, y, sample_weight=None):
|
| 575 |
+
"""
|
| 576 |
+
Fit linear model.
|
| 577 |
+
|
| 578 |
+
Parameters
|
| 579 |
+
----------
|
| 580 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 581 |
+
Training data.
|
| 582 |
+
|
| 583 |
+
y : array-like of shape (n_samples,) or (n_samples, n_targets)
|
| 584 |
+
Target values. Will be cast to X's dtype if necessary.
|
| 585 |
+
|
| 586 |
+
sample_weight : array-like of shape (n_samples,), default=None
|
| 587 |
+
Individual weights for each sample.
|
| 588 |
+
|
| 589 |
+
.. versionadded:: 0.17
|
| 590 |
+
parameter *sample_weight* support to LinearRegression.
|
| 591 |
+
|
| 592 |
+
Returns
|
| 593 |
+
-------
|
| 594 |
+
self : object
|
| 595 |
+
Fitted Estimator.
|
| 596 |
+
"""
|
| 597 |
+
n_jobs_ = self.n_jobs
|
| 598 |
+
|
| 599 |
+
accept_sparse = False if self.positive else ["csr", "csc", "coo"]
|
| 600 |
+
|
| 601 |
+
X, y = validate_data(
|
| 602 |
+
self,
|
| 603 |
+
X,
|
| 604 |
+
y,
|
| 605 |
+
accept_sparse=accept_sparse,
|
| 606 |
+
y_numeric=True,
|
| 607 |
+
multi_output=True,
|
| 608 |
+
force_writeable=True,
|
| 609 |
+
)
|
| 610 |
+
|
| 611 |
+
has_sw = sample_weight is not None
|
| 612 |
+
if has_sw:
|
| 613 |
+
sample_weight = _check_sample_weight(
|
| 614 |
+
sample_weight, X, dtype=X.dtype, ensure_non_negative=True
|
| 615 |
+
)
|
| 616 |
+
|
| 617 |
+
# Note that neither _rescale_data nor the rest of the fit method of
|
| 618 |
+
# LinearRegression can benefit from in-place operations when X is a
|
| 619 |
+
# sparse matrix. Therefore, let's not copy X when it is sparse.
|
| 620 |
+
copy_X_in_preprocess_data = self.copy_X and not sp.issparse(X)
|
| 621 |
+
|
| 622 |
+
X, y, X_offset, y_offset, X_scale = _preprocess_data(
|
| 623 |
+
X,
|
| 624 |
+
y,
|
| 625 |
+
fit_intercept=self.fit_intercept,
|
| 626 |
+
copy=copy_X_in_preprocess_data,
|
| 627 |
+
sample_weight=sample_weight,
|
| 628 |
+
)
|
| 629 |
+
|
| 630 |
+
if has_sw:
|
| 631 |
+
# Sample weight can be implemented via a simple rescaling. Note
|
| 632 |
+
# that we safely do inplace rescaling when _preprocess_data has
|
| 633 |
+
# already made a copy if requested.
|
| 634 |
+
X, y, sample_weight_sqrt = _rescale_data(
|
| 635 |
+
X, y, sample_weight, inplace=copy_X_in_preprocess_data
|
| 636 |
+
)
|
| 637 |
+
|
| 638 |
+
if self.positive:
|
| 639 |
+
if y.ndim < 2:
|
| 640 |
+
self.coef_ = optimize.nnls(X, y)[0]
|
| 641 |
+
else:
|
| 642 |
+
# scipy.optimize.nnls cannot handle y with shape (M, K)
|
| 643 |
+
outs = Parallel(n_jobs=n_jobs_)(
|
| 644 |
+
delayed(optimize.nnls)(X, y[:, j]) for j in range(y.shape[1])
|
| 645 |
+
)
|
| 646 |
+
self.coef_ = np.vstack([out[0] for out in outs])
|
| 647 |
+
elif sp.issparse(X):
|
| 648 |
+
X_offset_scale = X_offset / X_scale
|
| 649 |
+
|
| 650 |
+
if has_sw:
|
| 651 |
+
|
| 652 |
+
def matvec(b):
|
| 653 |
+
return X.dot(b) - sample_weight_sqrt * b.dot(X_offset_scale)
|
| 654 |
+
|
| 655 |
+
def rmatvec(b):
|
| 656 |
+
return X.T.dot(b) - X_offset_scale * b.dot(sample_weight_sqrt)
|
| 657 |
+
|
| 658 |
+
else:
|
| 659 |
+
|
| 660 |
+
def matvec(b):
|
| 661 |
+
return X.dot(b) - b.dot(X_offset_scale)
|
| 662 |
+
|
| 663 |
+
def rmatvec(b):
|
| 664 |
+
return X.T.dot(b) - X_offset_scale * b.sum()
|
| 665 |
+
|
| 666 |
+
X_centered = sparse.linalg.LinearOperator(
|
| 667 |
+
shape=X.shape, matvec=matvec, rmatvec=rmatvec
|
| 668 |
+
)
|
| 669 |
+
|
| 670 |
+
if y.ndim < 2:
|
| 671 |
+
self.coef_ = lsqr(X_centered, y)[0]
|
| 672 |
+
else:
|
| 673 |
+
# sparse_lstsq cannot handle y with shape (M, K)
|
| 674 |
+
outs = Parallel(n_jobs=n_jobs_)(
|
| 675 |
+
delayed(lsqr)(X_centered, y[:, j].ravel())
|
| 676 |
+
for j in range(y.shape[1])
|
| 677 |
+
)
|
| 678 |
+
self.coef_ = np.vstack([out[0] for out in outs])
|
| 679 |
+
else:
|
| 680 |
+
# cut-off ratio for small singular values
|
| 681 |
+
cond = max(X.shape) * np.finfo(X.dtype).eps
|
| 682 |
+
self.coef_, _, self.rank_, self.singular_ = linalg.lstsq(X, y, cond=cond)
|
| 683 |
+
self.coef_ = self.coef_.T
|
| 684 |
+
|
| 685 |
+
if y.ndim == 1:
|
| 686 |
+
self.coef_ = np.ravel(self.coef_)
|
| 687 |
+
self._set_intercept(X_offset, y_offset, X_scale)
|
| 688 |
+
return self
|
| 689 |
+
|
| 690 |
+
def __sklearn_tags__(self):
|
| 691 |
+
tags = super().__sklearn_tags__()
|
| 692 |
+
tags.input_tags.sparse = not self.positive
|
| 693 |
+
return tags
|
| 694 |
+
|
| 695 |
+
|
| 696 |
+
def _check_precomputed_gram_matrix(
|
| 697 |
+
X, precompute, X_offset, X_scale, rtol=None, atol=1e-5
|
| 698 |
+
):
|
| 699 |
+
"""Computes a single element of the gram matrix and compares it to
|
| 700 |
+
the corresponding element of the user supplied gram matrix.
|
| 701 |
+
|
| 702 |
+
If the values do not match a ValueError will be thrown.
|
| 703 |
+
|
| 704 |
+
Parameters
|
| 705 |
+
----------
|
| 706 |
+
X : ndarray of shape (n_samples, n_features)
|
| 707 |
+
Data array.
|
| 708 |
+
|
| 709 |
+
precompute : array-like of shape (n_features, n_features)
|
| 710 |
+
User-supplied gram matrix.
|
| 711 |
+
|
| 712 |
+
X_offset : ndarray of shape (n_features,)
|
| 713 |
+
Array of feature means used to center design matrix.
|
| 714 |
+
|
| 715 |
+
X_scale : ndarray of shape (n_features,)
|
| 716 |
+
Array of feature scale factors used to normalize design matrix.
|
| 717 |
+
|
| 718 |
+
rtol : float, default=None
|
| 719 |
+
Relative tolerance; see numpy.allclose
|
| 720 |
+
If None, it is set to 1e-4 for arrays of dtype numpy.float32 and 1e-7
|
| 721 |
+
otherwise.
|
| 722 |
+
|
| 723 |
+
atol : float, default=1e-5
|
| 724 |
+
absolute tolerance; see :func`numpy.allclose`. Note that the default
|
| 725 |
+
here is more tolerant than the default for
|
| 726 |
+
:func:`numpy.testing.assert_allclose`, where `atol=0`.
|
| 727 |
+
|
| 728 |
+
Raises
|
| 729 |
+
------
|
| 730 |
+
ValueError
|
| 731 |
+
Raised when the provided Gram matrix is not consistent.
|
| 732 |
+
"""
|
| 733 |
+
|
| 734 |
+
n_features = X.shape[1]
|
| 735 |
+
f1 = n_features // 2
|
| 736 |
+
f2 = min(f1 + 1, n_features - 1)
|
| 737 |
+
|
| 738 |
+
v1 = (X[:, f1] - X_offset[f1]) * X_scale[f1]
|
| 739 |
+
v2 = (X[:, f2] - X_offset[f2]) * X_scale[f2]
|
| 740 |
+
|
| 741 |
+
expected = np.dot(v1, v2)
|
| 742 |
+
actual = precompute[f1, f2]
|
| 743 |
+
|
| 744 |
+
dtypes = [precompute.dtype, expected.dtype]
|
| 745 |
+
if rtol is None:
|
| 746 |
+
rtols = [1e-4 if dtype == np.float32 else 1e-7 for dtype in dtypes]
|
| 747 |
+
rtol = max(rtols)
|
| 748 |
+
|
| 749 |
+
if not np.isclose(expected, actual, rtol=rtol, atol=atol):
|
| 750 |
+
raise ValueError(
|
| 751 |
+
"Gram matrix passed in via 'precompute' parameter "
|
| 752 |
+
"did not pass validation when a single element was "
|
| 753 |
+
"checked - please check that it was computed "
|
| 754 |
+
f"properly. For element ({f1},{f2}) we computed "
|
| 755 |
+
f"{expected} but the user-supplied value was "
|
| 756 |
+
f"{actual}."
|
| 757 |
+
)
|
| 758 |
+
|
| 759 |
+
|
| 760 |
+
def _pre_fit(
|
| 761 |
+
X,
|
| 762 |
+
y,
|
| 763 |
+
Xy,
|
| 764 |
+
precompute,
|
| 765 |
+
fit_intercept,
|
| 766 |
+
copy,
|
| 767 |
+
check_input=True,
|
| 768 |
+
sample_weight=None,
|
| 769 |
+
):
|
| 770 |
+
"""Function used at beginning of fit in linear models with L1 or L0 penalty.
|
| 771 |
+
|
| 772 |
+
This function applies _preprocess_data and additionally computes the gram matrix
|
| 773 |
+
`precompute` as needed as well as `Xy`.
|
| 774 |
+
"""
|
| 775 |
+
n_samples, n_features = X.shape
|
| 776 |
+
|
| 777 |
+
if sparse.issparse(X):
|
| 778 |
+
# copy is not needed here as X is not modified inplace when X is sparse
|
| 779 |
+
precompute = False
|
| 780 |
+
X, y, X_offset, y_offset, X_scale = _preprocess_data(
|
| 781 |
+
X,
|
| 782 |
+
y,
|
| 783 |
+
fit_intercept=fit_intercept,
|
| 784 |
+
copy=False,
|
| 785 |
+
check_input=check_input,
|
| 786 |
+
sample_weight=sample_weight,
|
| 787 |
+
)
|
| 788 |
+
else:
|
| 789 |
+
# copy was done in fit if necessary
|
| 790 |
+
X, y, X_offset, y_offset, X_scale = _preprocess_data(
|
| 791 |
+
X,
|
| 792 |
+
y,
|
| 793 |
+
fit_intercept=fit_intercept,
|
| 794 |
+
copy=copy,
|
| 795 |
+
check_input=check_input,
|
| 796 |
+
sample_weight=sample_weight,
|
| 797 |
+
)
|
| 798 |
+
# Rescale only in dense case. Sparse cd solver directly deals with
|
| 799 |
+
# sample_weight.
|
| 800 |
+
if sample_weight is not None:
|
| 801 |
+
# This triggers copies anyway.
|
| 802 |
+
X, y, _ = _rescale_data(X, y, sample_weight=sample_weight)
|
| 803 |
+
|
| 804 |
+
if hasattr(precompute, "__array__"):
|
| 805 |
+
if fit_intercept and not np.allclose(X_offset, np.zeros(n_features)):
|
| 806 |
+
warnings.warn(
|
| 807 |
+
(
|
| 808 |
+
"Gram matrix was provided but X was centered to fit "
|
| 809 |
+
"intercept: recomputing Gram matrix."
|
| 810 |
+
),
|
| 811 |
+
UserWarning,
|
| 812 |
+
)
|
| 813 |
+
# TODO: instead of warning and recomputing, we could just center
|
| 814 |
+
# the user provided Gram matrix a-posteriori (after making a copy
|
| 815 |
+
# when `copy=True`).
|
| 816 |
+
# recompute Gram
|
| 817 |
+
precompute = "auto"
|
| 818 |
+
Xy = None
|
| 819 |
+
elif check_input:
|
| 820 |
+
# If we're going to use the user's precomputed gram matrix, we
|
| 821 |
+
# do a quick check to make sure its not totally bogus.
|
| 822 |
+
_check_precomputed_gram_matrix(X, precompute, X_offset, X_scale)
|
| 823 |
+
|
| 824 |
+
# precompute if n_samples > n_features
|
| 825 |
+
if isinstance(precompute, str) and precompute == "auto":
|
| 826 |
+
precompute = n_samples > n_features
|
| 827 |
+
|
| 828 |
+
if precompute is True:
|
| 829 |
+
# make sure that the 'precompute' array is contiguous.
|
| 830 |
+
precompute = np.empty(shape=(n_features, n_features), dtype=X.dtype, order="C")
|
| 831 |
+
np.dot(X.T, X, out=precompute)
|
| 832 |
+
|
| 833 |
+
if not hasattr(precompute, "__array__"):
|
| 834 |
+
Xy = None # cannot use Xy if precompute is not Gram
|
| 835 |
+
|
| 836 |
+
if hasattr(precompute, "__array__") and Xy is None:
|
| 837 |
+
common_dtype = np.result_type(X.dtype, y.dtype)
|
| 838 |
+
if y.ndim == 1:
|
| 839 |
+
# Xy is 1d, make sure it is contiguous.
|
| 840 |
+
Xy = np.empty(shape=n_features, dtype=common_dtype, order="C")
|
| 841 |
+
np.dot(X.T, y, out=Xy)
|
| 842 |
+
else:
|
| 843 |
+
# Make sure that Xy is always F contiguous even if X or y are not
|
| 844 |
+
# contiguous: the goal is to make it fast to extract the data for a
|
| 845 |
+
# specific target.
|
| 846 |
+
n_targets = y.shape[1]
|
| 847 |
+
Xy = np.empty(shape=(n_features, n_targets), dtype=common_dtype, order="F")
|
| 848 |
+
np.dot(y.T, X, out=Xy.T)
|
| 849 |
+
|
| 850 |
+
return X, y, X_offset, y_offset, X_scale, precompute, Xy
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_cd_fast.pyx
ADDED
|
@@ -0,0 +1,956 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Authors: The scikit-learn developers
|
| 2 |
+
# SPDX-License-Identifier: BSD-3-Clause
|
| 3 |
+
|
| 4 |
+
from libc.math cimport fabs
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
from cython cimport floating
|
| 8 |
+
import warnings
|
| 9 |
+
from ..exceptions import ConvergenceWarning
|
| 10 |
+
|
| 11 |
+
from ..utils._cython_blas cimport (
|
| 12 |
+
_axpy, _dot, _asum, _gemv, _nrm2, _copy, _scal
|
| 13 |
+
)
|
| 14 |
+
from ..utils._cython_blas cimport ColMajor, Trans, NoTrans
|
| 15 |
+
from ..utils._typedefs cimport uint32_t
|
| 16 |
+
from ..utils._random cimport our_rand_r
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
# The following two functions are shamelessly copied from the tree code.
|
| 20 |
+
|
| 21 |
+
cdef enum:
|
| 22 |
+
# Max value for our rand_r replacement (near the bottom).
|
| 23 |
+
# We don't use RAND_MAX because it's different across platforms and
|
| 24 |
+
# particularly tiny on Windows/MSVC.
|
| 25 |
+
# It corresponds to the maximum representable value for
|
| 26 |
+
# 32-bit signed integers (i.e. 2^31 - 1).
|
| 27 |
+
RAND_R_MAX = 2147483647
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
cdef inline uint32_t rand_int(uint32_t end, uint32_t* random_state) noexcept nogil:
|
| 31 |
+
"""Generate a random integer in [0; end)."""
|
| 32 |
+
return our_rand_r(random_state) % end
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
cdef inline floating fmax(floating x, floating y) noexcept nogil:
|
| 36 |
+
if x > y:
|
| 37 |
+
return x
|
| 38 |
+
return y
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
cdef inline floating fsign(floating f) noexcept nogil:
|
| 42 |
+
if f == 0:
|
| 43 |
+
return 0
|
| 44 |
+
elif f > 0:
|
| 45 |
+
return 1.0
|
| 46 |
+
else:
|
| 47 |
+
return -1.0
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
cdef floating abs_max(int n, const floating* a) noexcept nogil:
|
| 51 |
+
"""np.max(np.abs(a))"""
|
| 52 |
+
cdef int i
|
| 53 |
+
cdef floating m = fabs(a[0])
|
| 54 |
+
cdef floating d
|
| 55 |
+
for i in range(1, n):
|
| 56 |
+
d = fabs(a[i])
|
| 57 |
+
if d > m:
|
| 58 |
+
m = d
|
| 59 |
+
return m
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
cdef floating max(int n, floating* a) noexcept nogil:
|
| 63 |
+
"""np.max(a)"""
|
| 64 |
+
cdef int i
|
| 65 |
+
cdef floating m = a[0]
|
| 66 |
+
cdef floating d
|
| 67 |
+
for i in range(1, n):
|
| 68 |
+
d = a[i]
|
| 69 |
+
if d > m:
|
| 70 |
+
m = d
|
| 71 |
+
return m
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
cdef floating diff_abs_max(int n, const floating* a, floating* b) noexcept nogil:
|
| 75 |
+
"""np.max(np.abs(a - b))"""
|
| 76 |
+
cdef int i
|
| 77 |
+
cdef floating m = fabs(a[0] - b[0])
|
| 78 |
+
cdef floating d
|
| 79 |
+
for i in range(1, n):
|
| 80 |
+
d = fabs(a[i] - b[i])
|
| 81 |
+
if d > m:
|
| 82 |
+
m = d
|
| 83 |
+
return m
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def enet_coordinate_descent(
|
| 87 |
+
floating[::1] w,
|
| 88 |
+
floating alpha,
|
| 89 |
+
floating beta,
|
| 90 |
+
const floating[::1, :] X,
|
| 91 |
+
const floating[::1] y,
|
| 92 |
+
unsigned int max_iter,
|
| 93 |
+
floating tol,
|
| 94 |
+
object rng,
|
| 95 |
+
bint random=0,
|
| 96 |
+
bint positive=0
|
| 97 |
+
):
|
| 98 |
+
"""Cython version of the coordinate descent algorithm
|
| 99 |
+
for Elastic-Net regression
|
| 100 |
+
|
| 101 |
+
We minimize
|
| 102 |
+
|
| 103 |
+
(1/2) * norm(y - X w, 2)^2 + alpha norm(w, 1) + (beta/2) norm(w, 2)^2
|
| 104 |
+
|
| 105 |
+
Returns
|
| 106 |
+
-------
|
| 107 |
+
w : ndarray of shape (n_features,)
|
| 108 |
+
ElasticNet coefficients.
|
| 109 |
+
gap : float
|
| 110 |
+
Achieved dual gap.
|
| 111 |
+
tol : float
|
| 112 |
+
Equals input `tol` times `np.dot(y, y)`. The tolerance used for the dual gap.
|
| 113 |
+
n_iter : int
|
| 114 |
+
Number of coordinate descent iterations.
|
| 115 |
+
"""
|
| 116 |
+
|
| 117 |
+
if floating is float:
|
| 118 |
+
dtype = np.float32
|
| 119 |
+
else:
|
| 120 |
+
dtype = np.float64
|
| 121 |
+
|
| 122 |
+
# get the data information into easy vars
|
| 123 |
+
cdef unsigned int n_samples = X.shape[0]
|
| 124 |
+
cdef unsigned int n_features = X.shape[1]
|
| 125 |
+
|
| 126 |
+
# compute norms of the columns of X
|
| 127 |
+
cdef floating[::1] norm_cols_X = np.square(X).sum(axis=0)
|
| 128 |
+
|
| 129 |
+
# initial value of the residuals
|
| 130 |
+
cdef floating[::1] R = np.empty(n_samples, dtype=dtype)
|
| 131 |
+
cdef floating[::1] XtA = np.empty(n_features, dtype=dtype)
|
| 132 |
+
|
| 133 |
+
cdef floating tmp
|
| 134 |
+
cdef floating w_ii
|
| 135 |
+
cdef floating d_w_max
|
| 136 |
+
cdef floating w_max
|
| 137 |
+
cdef floating d_w_ii
|
| 138 |
+
cdef floating gap = tol + 1.0
|
| 139 |
+
cdef floating d_w_tol = tol
|
| 140 |
+
cdef floating dual_norm_XtA
|
| 141 |
+
cdef floating R_norm2
|
| 142 |
+
cdef floating w_norm2
|
| 143 |
+
cdef floating l1_norm
|
| 144 |
+
cdef floating const
|
| 145 |
+
cdef floating A_norm2
|
| 146 |
+
cdef unsigned int ii
|
| 147 |
+
cdef unsigned int n_iter = 0
|
| 148 |
+
cdef unsigned int f_iter
|
| 149 |
+
cdef uint32_t rand_r_state_seed = rng.randint(0, RAND_R_MAX)
|
| 150 |
+
cdef uint32_t* rand_r_state = &rand_r_state_seed
|
| 151 |
+
|
| 152 |
+
if alpha == 0 and beta == 0:
|
| 153 |
+
warnings.warn("Coordinate descent with no regularization may lead to "
|
| 154 |
+
"unexpected results and is discouraged.")
|
| 155 |
+
|
| 156 |
+
with nogil:
|
| 157 |
+
# R = y - np.dot(X, w)
|
| 158 |
+
_copy(n_samples, &y[0], 1, &R[0], 1)
|
| 159 |
+
_gemv(ColMajor, NoTrans, n_samples, n_features, -1.0, &X[0, 0],
|
| 160 |
+
n_samples, &w[0], 1, 1.0, &R[0], 1)
|
| 161 |
+
|
| 162 |
+
# tol *= np.dot(y, y)
|
| 163 |
+
tol *= _dot(n_samples, &y[0], 1, &y[0], 1)
|
| 164 |
+
|
| 165 |
+
for n_iter in range(max_iter):
|
| 166 |
+
w_max = 0.0
|
| 167 |
+
d_w_max = 0.0
|
| 168 |
+
for f_iter in range(n_features): # Loop over coordinates
|
| 169 |
+
if random:
|
| 170 |
+
ii = rand_int(n_features, rand_r_state)
|
| 171 |
+
else:
|
| 172 |
+
ii = f_iter
|
| 173 |
+
|
| 174 |
+
if norm_cols_X[ii] == 0.0:
|
| 175 |
+
continue
|
| 176 |
+
|
| 177 |
+
w_ii = w[ii] # Store previous value
|
| 178 |
+
|
| 179 |
+
if w_ii != 0.0:
|
| 180 |
+
# R += w_ii * X[:,ii]
|
| 181 |
+
_axpy(n_samples, w_ii, &X[0, ii], 1, &R[0], 1)
|
| 182 |
+
|
| 183 |
+
# tmp = (X[:,ii]*R).sum()
|
| 184 |
+
tmp = _dot(n_samples, &X[0, ii], 1, &R[0], 1)
|
| 185 |
+
|
| 186 |
+
if positive and tmp < 0:
|
| 187 |
+
w[ii] = 0.0
|
| 188 |
+
else:
|
| 189 |
+
w[ii] = (fsign(tmp) * fmax(fabs(tmp) - alpha, 0)
|
| 190 |
+
/ (norm_cols_X[ii] + beta))
|
| 191 |
+
|
| 192 |
+
if w[ii] != 0.0:
|
| 193 |
+
# R -= w[ii] * X[:,ii] # Update residual
|
| 194 |
+
_axpy(n_samples, -w[ii], &X[0, ii], 1, &R[0], 1)
|
| 195 |
+
|
| 196 |
+
# update the maximum absolute coefficient update
|
| 197 |
+
d_w_ii = fabs(w[ii] - w_ii)
|
| 198 |
+
d_w_max = fmax(d_w_max, d_w_ii)
|
| 199 |
+
|
| 200 |
+
w_max = fmax(w_max, fabs(w[ii]))
|
| 201 |
+
|
| 202 |
+
if (
|
| 203 |
+
w_max == 0.0
|
| 204 |
+
or d_w_max / w_max < d_w_tol
|
| 205 |
+
or n_iter == max_iter - 1
|
| 206 |
+
):
|
| 207 |
+
# the biggest coordinate update of this iteration was smaller
|
| 208 |
+
# than the tolerance: check the duality gap as ultimate
|
| 209 |
+
# stopping criterion
|
| 210 |
+
|
| 211 |
+
# XtA = np.dot(X.T, R) - beta * w
|
| 212 |
+
_copy(n_features, &w[0], 1, &XtA[0], 1)
|
| 213 |
+
_gemv(ColMajor, Trans,
|
| 214 |
+
n_samples, n_features, 1.0, &X[0, 0], n_samples,
|
| 215 |
+
&R[0], 1,
|
| 216 |
+
-beta, &XtA[0], 1)
|
| 217 |
+
|
| 218 |
+
if positive:
|
| 219 |
+
dual_norm_XtA = max(n_features, &XtA[0])
|
| 220 |
+
else:
|
| 221 |
+
dual_norm_XtA = abs_max(n_features, &XtA[0])
|
| 222 |
+
|
| 223 |
+
# R_norm2 = np.dot(R, R)
|
| 224 |
+
R_norm2 = _dot(n_samples, &R[0], 1, &R[0], 1)
|
| 225 |
+
|
| 226 |
+
# w_norm2 = np.dot(w, w)
|
| 227 |
+
w_norm2 = _dot(n_features, &w[0], 1, &w[0], 1)
|
| 228 |
+
|
| 229 |
+
if (dual_norm_XtA > alpha):
|
| 230 |
+
const = alpha / dual_norm_XtA
|
| 231 |
+
A_norm2 = R_norm2 * (const ** 2)
|
| 232 |
+
gap = 0.5 * (R_norm2 + A_norm2)
|
| 233 |
+
else:
|
| 234 |
+
const = 1.0
|
| 235 |
+
gap = R_norm2
|
| 236 |
+
|
| 237 |
+
l1_norm = _asum(n_features, &w[0], 1)
|
| 238 |
+
|
| 239 |
+
# np.dot(R.T, y)
|
| 240 |
+
gap += (alpha * l1_norm
|
| 241 |
+
- const * _dot(n_samples, &R[0], 1, &y[0], 1)
|
| 242 |
+
+ 0.5 * beta * (1 + const ** 2) * (w_norm2))
|
| 243 |
+
|
| 244 |
+
if gap < tol:
|
| 245 |
+
# return if we reached desired tolerance
|
| 246 |
+
break
|
| 247 |
+
|
| 248 |
+
else:
|
| 249 |
+
# for/else, runs if for doesn't end with a `break`
|
| 250 |
+
with gil:
|
| 251 |
+
message = (
|
| 252 |
+
"Objective did not converge. You might want to increase "
|
| 253 |
+
"the number of iterations, check the scale of the "
|
| 254 |
+
"features or consider increasing regularisation. "
|
| 255 |
+
f"Duality gap: {gap:.3e}, tolerance: {tol:.3e}"
|
| 256 |
+
)
|
| 257 |
+
if alpha < np.finfo(np.float64).eps:
|
| 258 |
+
message += (
|
| 259 |
+
" Linear regression models with null weight for the "
|
| 260 |
+
"l1 regularization term are more efficiently fitted "
|
| 261 |
+
"using one of the solvers implemented in "
|
| 262 |
+
"sklearn.linear_model.Ridge/RidgeCV instead."
|
| 263 |
+
)
|
| 264 |
+
warnings.warn(message, ConvergenceWarning)
|
| 265 |
+
|
| 266 |
+
return np.asarray(w), gap, tol, n_iter + 1
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
def sparse_enet_coordinate_descent(
|
| 270 |
+
floating[::1] w,
|
| 271 |
+
floating alpha,
|
| 272 |
+
floating beta,
|
| 273 |
+
const floating[::1] X_data,
|
| 274 |
+
const int[::1] X_indices,
|
| 275 |
+
const int[::1] X_indptr,
|
| 276 |
+
const floating[::1] y,
|
| 277 |
+
const floating[::1] sample_weight,
|
| 278 |
+
const floating[::1] X_mean,
|
| 279 |
+
unsigned int max_iter,
|
| 280 |
+
floating tol,
|
| 281 |
+
object rng,
|
| 282 |
+
bint random=0,
|
| 283 |
+
bint positive=0,
|
| 284 |
+
):
|
| 285 |
+
"""Cython version of the coordinate descent algorithm for Elastic-Net
|
| 286 |
+
|
| 287 |
+
We minimize:
|
| 288 |
+
|
| 289 |
+
1/2 * norm(y - Z w, 2)^2 + alpha * norm(w, 1) + (beta/2) * norm(w, 2)^2
|
| 290 |
+
|
| 291 |
+
where Z = X - X_mean.
|
| 292 |
+
With sample weights sw, this becomes
|
| 293 |
+
|
| 294 |
+
1/2 * sum(sw * (y - Z w)^2, axis=0) + alpha * norm(w, 1)
|
| 295 |
+
+ (beta/2) * norm(w, 2)^2
|
| 296 |
+
|
| 297 |
+
and X_mean is the weighted average of X (per column).
|
| 298 |
+
|
| 299 |
+
Returns
|
| 300 |
+
-------
|
| 301 |
+
w : ndarray of shape (n_features,)
|
| 302 |
+
ElasticNet coefficients.
|
| 303 |
+
gap : float
|
| 304 |
+
Achieved dual gap.
|
| 305 |
+
tol : float
|
| 306 |
+
Equals input `tol` times `np.dot(y, y)`. The tolerance used for the dual gap.
|
| 307 |
+
n_iter : int
|
| 308 |
+
Number of coordinate descent iterations.
|
| 309 |
+
"""
|
| 310 |
+
# Notes for sample_weight:
|
| 311 |
+
# For dense X, one centers X and y and then rescales them by sqrt(sample_weight).
|
| 312 |
+
# Here, for sparse X, we get the sample_weight averaged center X_mean. We take care
|
| 313 |
+
# that every calculation results as if we had rescaled y and X (and therefore also
|
| 314 |
+
# X_mean) by sqrt(sample_weight) without actually calculating the square root.
|
| 315 |
+
# We work with:
|
| 316 |
+
# yw = sample_weight
|
| 317 |
+
# R = sample_weight * residual
|
| 318 |
+
# norm_cols_X = np.sum(sample_weight * (X - X_mean)**2, axis=0)
|
| 319 |
+
|
| 320 |
+
# get the data information into easy vars
|
| 321 |
+
cdef unsigned int n_samples = y.shape[0]
|
| 322 |
+
cdef unsigned int n_features = w.shape[0]
|
| 323 |
+
|
| 324 |
+
# compute norms of the columns of X
|
| 325 |
+
cdef unsigned int ii
|
| 326 |
+
cdef floating[:] norm_cols_X
|
| 327 |
+
|
| 328 |
+
cdef unsigned int startptr = X_indptr[0]
|
| 329 |
+
cdef unsigned int endptr
|
| 330 |
+
|
| 331 |
+
# initial value of the residuals
|
| 332 |
+
# R = y - Zw, weighted version R = sample_weight * (y - Zw)
|
| 333 |
+
cdef floating[::1] R
|
| 334 |
+
cdef floating[::1] XtA
|
| 335 |
+
cdef const floating[::1] yw
|
| 336 |
+
|
| 337 |
+
if floating is float:
|
| 338 |
+
dtype = np.float32
|
| 339 |
+
else:
|
| 340 |
+
dtype = np.float64
|
| 341 |
+
|
| 342 |
+
norm_cols_X = np.zeros(n_features, dtype=dtype)
|
| 343 |
+
XtA = np.zeros(n_features, dtype=dtype)
|
| 344 |
+
|
| 345 |
+
cdef floating tmp
|
| 346 |
+
cdef floating w_ii
|
| 347 |
+
cdef floating d_w_max
|
| 348 |
+
cdef floating w_max
|
| 349 |
+
cdef floating d_w_ii
|
| 350 |
+
cdef floating X_mean_ii
|
| 351 |
+
cdef floating R_sum = 0.0
|
| 352 |
+
cdef floating R_norm2
|
| 353 |
+
cdef floating w_norm2
|
| 354 |
+
cdef floating A_norm2
|
| 355 |
+
cdef floating l1_norm
|
| 356 |
+
cdef floating normalize_sum
|
| 357 |
+
cdef floating gap = tol + 1.0
|
| 358 |
+
cdef floating d_w_tol = tol
|
| 359 |
+
cdef floating dual_norm_XtA
|
| 360 |
+
cdef unsigned int jj
|
| 361 |
+
cdef unsigned int n_iter = 0
|
| 362 |
+
cdef unsigned int f_iter
|
| 363 |
+
cdef uint32_t rand_r_state_seed = rng.randint(0, RAND_R_MAX)
|
| 364 |
+
cdef uint32_t* rand_r_state = &rand_r_state_seed
|
| 365 |
+
cdef bint center = False
|
| 366 |
+
cdef bint no_sample_weights = sample_weight is None
|
| 367 |
+
cdef int kk
|
| 368 |
+
|
| 369 |
+
if no_sample_weights:
|
| 370 |
+
yw = y
|
| 371 |
+
R = y.copy()
|
| 372 |
+
else:
|
| 373 |
+
yw = np.multiply(sample_weight, y)
|
| 374 |
+
R = yw.copy()
|
| 375 |
+
|
| 376 |
+
with nogil:
|
| 377 |
+
# center = (X_mean != 0).any()
|
| 378 |
+
for ii in range(n_features):
|
| 379 |
+
if X_mean[ii]:
|
| 380 |
+
center = True
|
| 381 |
+
break
|
| 382 |
+
|
| 383 |
+
for ii in range(n_features):
|
| 384 |
+
X_mean_ii = X_mean[ii]
|
| 385 |
+
endptr = X_indptr[ii + 1]
|
| 386 |
+
normalize_sum = 0.0
|
| 387 |
+
w_ii = w[ii]
|
| 388 |
+
|
| 389 |
+
if no_sample_weights:
|
| 390 |
+
for jj in range(startptr, endptr):
|
| 391 |
+
normalize_sum += (X_data[jj] - X_mean_ii) ** 2
|
| 392 |
+
R[X_indices[jj]] -= X_data[jj] * w_ii
|
| 393 |
+
norm_cols_X[ii] = normalize_sum + \
|
| 394 |
+
(n_samples - endptr + startptr) * X_mean_ii ** 2
|
| 395 |
+
if center:
|
| 396 |
+
for jj in range(n_samples):
|
| 397 |
+
R[jj] += X_mean_ii * w_ii
|
| 398 |
+
else:
|
| 399 |
+
for jj in range(startptr, endptr):
|
| 400 |
+
tmp = sample_weight[X_indices[jj]]
|
| 401 |
+
# second term will be subtracted by loop over range(n_samples)
|
| 402 |
+
normalize_sum += (tmp * (X_data[jj] - X_mean_ii) ** 2
|
| 403 |
+
- tmp * X_mean_ii ** 2)
|
| 404 |
+
R[X_indices[jj]] -= tmp * X_data[jj] * w_ii
|
| 405 |
+
if center:
|
| 406 |
+
for jj in range(n_samples):
|
| 407 |
+
normalize_sum += sample_weight[jj] * X_mean_ii ** 2
|
| 408 |
+
R[jj] += sample_weight[jj] * X_mean_ii * w_ii
|
| 409 |
+
norm_cols_X[ii] = normalize_sum
|
| 410 |
+
startptr = endptr
|
| 411 |
+
|
| 412 |
+
# tol *= np.dot(y, y)
|
| 413 |
+
# with sample weights: tol *= y @ (sw * y)
|
| 414 |
+
tol *= _dot(n_samples, &y[0], 1, &yw[0], 1)
|
| 415 |
+
|
| 416 |
+
for n_iter in range(max_iter):
|
| 417 |
+
|
| 418 |
+
w_max = 0.0
|
| 419 |
+
d_w_max = 0.0
|
| 420 |
+
|
| 421 |
+
for f_iter in range(n_features): # Loop over coordinates
|
| 422 |
+
if random:
|
| 423 |
+
ii = rand_int(n_features, rand_r_state)
|
| 424 |
+
else:
|
| 425 |
+
ii = f_iter
|
| 426 |
+
|
| 427 |
+
if norm_cols_X[ii] == 0.0:
|
| 428 |
+
continue
|
| 429 |
+
|
| 430 |
+
startptr = X_indptr[ii]
|
| 431 |
+
endptr = X_indptr[ii + 1]
|
| 432 |
+
w_ii = w[ii] # Store previous value
|
| 433 |
+
X_mean_ii = X_mean[ii]
|
| 434 |
+
|
| 435 |
+
if w_ii != 0.0:
|
| 436 |
+
# R += w_ii * X[:,ii]
|
| 437 |
+
if no_sample_weights:
|
| 438 |
+
for jj in range(startptr, endptr):
|
| 439 |
+
R[X_indices[jj]] += X_data[jj] * w_ii
|
| 440 |
+
if center:
|
| 441 |
+
for jj in range(n_samples):
|
| 442 |
+
R[jj] -= X_mean_ii * w_ii
|
| 443 |
+
else:
|
| 444 |
+
for jj in range(startptr, endptr):
|
| 445 |
+
tmp = sample_weight[X_indices[jj]]
|
| 446 |
+
R[X_indices[jj]] += tmp * X_data[jj] * w_ii
|
| 447 |
+
if center:
|
| 448 |
+
for jj in range(n_samples):
|
| 449 |
+
R[jj] -= sample_weight[jj] * X_mean_ii * w_ii
|
| 450 |
+
|
| 451 |
+
# tmp = (X[:,ii] * R).sum()
|
| 452 |
+
tmp = 0.0
|
| 453 |
+
for jj in range(startptr, endptr):
|
| 454 |
+
tmp += R[X_indices[jj]] * X_data[jj]
|
| 455 |
+
|
| 456 |
+
if center:
|
| 457 |
+
R_sum = 0.0
|
| 458 |
+
for jj in range(n_samples):
|
| 459 |
+
R_sum += R[jj]
|
| 460 |
+
tmp -= R_sum * X_mean_ii
|
| 461 |
+
|
| 462 |
+
if positive and tmp < 0.0:
|
| 463 |
+
w[ii] = 0.0
|
| 464 |
+
else:
|
| 465 |
+
w[ii] = fsign(tmp) * fmax(fabs(tmp) - alpha, 0) \
|
| 466 |
+
/ (norm_cols_X[ii] + beta)
|
| 467 |
+
|
| 468 |
+
if w[ii] != 0.0:
|
| 469 |
+
# R -= w[ii] * X[:,ii] # Update residual
|
| 470 |
+
if no_sample_weights:
|
| 471 |
+
for jj in range(startptr, endptr):
|
| 472 |
+
R[X_indices[jj]] -= X_data[jj] * w[ii]
|
| 473 |
+
if center:
|
| 474 |
+
for jj in range(n_samples):
|
| 475 |
+
R[jj] += X_mean_ii * w[ii]
|
| 476 |
+
else:
|
| 477 |
+
for jj in range(startptr, endptr):
|
| 478 |
+
tmp = sample_weight[X_indices[jj]]
|
| 479 |
+
R[X_indices[jj]] -= tmp * X_data[jj] * w[ii]
|
| 480 |
+
if center:
|
| 481 |
+
for jj in range(n_samples):
|
| 482 |
+
R[jj] += sample_weight[jj] * X_mean_ii * w[ii]
|
| 483 |
+
|
| 484 |
+
# update the maximum absolute coefficient update
|
| 485 |
+
d_w_ii = fabs(w[ii] - w_ii)
|
| 486 |
+
d_w_max = fmax(d_w_max, d_w_ii)
|
| 487 |
+
|
| 488 |
+
w_max = fmax(w_max, fabs(w[ii]))
|
| 489 |
+
|
| 490 |
+
if w_max == 0.0 or d_w_max / w_max < d_w_tol or n_iter == max_iter - 1:
|
| 491 |
+
# the biggest coordinate update of this iteration was smaller than
|
| 492 |
+
# the tolerance: check the duality gap as ultimate stopping
|
| 493 |
+
# criterion
|
| 494 |
+
|
| 495 |
+
# sparse X.T / dense R dot product
|
| 496 |
+
if center:
|
| 497 |
+
R_sum = 0.0
|
| 498 |
+
for jj in range(n_samples):
|
| 499 |
+
R_sum += R[jj]
|
| 500 |
+
|
| 501 |
+
# XtA = X.T @ R - beta * w
|
| 502 |
+
for ii in range(n_features):
|
| 503 |
+
XtA[ii] = 0.0
|
| 504 |
+
for kk in range(X_indptr[ii], X_indptr[ii + 1]):
|
| 505 |
+
XtA[ii] += X_data[kk] * R[X_indices[kk]]
|
| 506 |
+
|
| 507 |
+
if center:
|
| 508 |
+
XtA[ii] -= X_mean[ii] * R_sum
|
| 509 |
+
XtA[ii] -= beta * w[ii]
|
| 510 |
+
|
| 511 |
+
if positive:
|
| 512 |
+
dual_norm_XtA = max(n_features, &XtA[0])
|
| 513 |
+
else:
|
| 514 |
+
dual_norm_XtA = abs_max(n_features, &XtA[0])
|
| 515 |
+
|
| 516 |
+
# R_norm2 = np.dot(R, R)
|
| 517 |
+
if no_sample_weights:
|
| 518 |
+
R_norm2 = _dot(n_samples, &R[0], 1, &R[0], 1)
|
| 519 |
+
else:
|
| 520 |
+
R_norm2 = 0.0
|
| 521 |
+
for jj in range(n_samples):
|
| 522 |
+
# R is already multiplied by sample_weight
|
| 523 |
+
if sample_weight[jj] != 0:
|
| 524 |
+
R_norm2 += (R[jj] ** 2) / sample_weight[jj]
|
| 525 |
+
|
| 526 |
+
# w_norm2 = np.dot(w, w)
|
| 527 |
+
w_norm2 = _dot(n_features, &w[0], 1, &w[0], 1)
|
| 528 |
+
if (dual_norm_XtA > alpha):
|
| 529 |
+
const = alpha / dual_norm_XtA
|
| 530 |
+
A_norm2 = R_norm2 * const**2
|
| 531 |
+
gap = 0.5 * (R_norm2 + A_norm2)
|
| 532 |
+
else:
|
| 533 |
+
const = 1.0
|
| 534 |
+
gap = R_norm2
|
| 535 |
+
|
| 536 |
+
l1_norm = _asum(n_features, &w[0], 1)
|
| 537 |
+
|
| 538 |
+
gap += (alpha * l1_norm - const * _dot(
|
| 539 |
+
n_samples,
|
| 540 |
+
&R[0], 1,
|
| 541 |
+
&y[0], 1
|
| 542 |
+
)
|
| 543 |
+
+ 0.5 * beta * (1 + const ** 2) * w_norm2)
|
| 544 |
+
|
| 545 |
+
if gap < tol:
|
| 546 |
+
# return if we reached desired tolerance
|
| 547 |
+
break
|
| 548 |
+
|
| 549 |
+
else:
|
| 550 |
+
# for/else, runs if for doesn't end with a `break`
|
| 551 |
+
with gil:
|
| 552 |
+
warnings.warn("Objective did not converge. You might want to "
|
| 553 |
+
"increase the number of iterations. Duality "
|
| 554 |
+
"gap: {}, tolerance: {}".format(gap, tol),
|
| 555 |
+
ConvergenceWarning)
|
| 556 |
+
|
| 557 |
+
return np.asarray(w), gap, tol, n_iter + 1
|
| 558 |
+
|
| 559 |
+
|
| 560 |
+
def enet_coordinate_descent_gram(
|
| 561 |
+
floating[::1] w,
|
| 562 |
+
floating alpha,
|
| 563 |
+
floating beta,
|
| 564 |
+
const floating[:, ::1] Q,
|
| 565 |
+
const floating[::1] q,
|
| 566 |
+
const floating[:] y,
|
| 567 |
+
unsigned int max_iter,
|
| 568 |
+
floating tol,
|
| 569 |
+
object rng,
|
| 570 |
+
bint random=0,
|
| 571 |
+
bint positive=0
|
| 572 |
+
):
|
| 573 |
+
"""Cython version of the coordinate descent algorithm
|
| 574 |
+
for Elastic-Net regression
|
| 575 |
+
|
| 576 |
+
We minimize
|
| 577 |
+
|
| 578 |
+
(1/2) * w^T Q w - q^T w + alpha norm(w, 1) + (beta/2) * norm(w, 2)^2
|
| 579 |
+
|
| 580 |
+
which amount to the Elastic-Net problem when:
|
| 581 |
+
Q = X^T X (Gram matrix)
|
| 582 |
+
q = X^T y
|
| 583 |
+
|
| 584 |
+
Returns
|
| 585 |
+
-------
|
| 586 |
+
w : ndarray of shape (n_features,)
|
| 587 |
+
ElasticNet coefficients.
|
| 588 |
+
gap : float
|
| 589 |
+
Achieved dual gap.
|
| 590 |
+
tol : float
|
| 591 |
+
Equals input `tol` times `np.dot(y, y)`. The tolerance used for the dual gap.
|
| 592 |
+
n_iter : int
|
| 593 |
+
Number of coordinate descent iterations.
|
| 594 |
+
"""
|
| 595 |
+
|
| 596 |
+
if floating is float:
|
| 597 |
+
dtype = np.float32
|
| 598 |
+
else:
|
| 599 |
+
dtype = np.float64
|
| 600 |
+
|
| 601 |
+
# get the data information into easy vars
|
| 602 |
+
cdef unsigned int n_features = Q.shape[0]
|
| 603 |
+
|
| 604 |
+
# initial value "Q w" which will be kept of up to date in the iterations
|
| 605 |
+
cdef floating[:] H = np.dot(Q, w)
|
| 606 |
+
|
| 607 |
+
cdef floating[:] XtA = np.zeros(n_features, dtype=dtype)
|
| 608 |
+
cdef floating tmp
|
| 609 |
+
cdef floating w_ii
|
| 610 |
+
cdef floating d_w_max
|
| 611 |
+
cdef floating w_max
|
| 612 |
+
cdef floating d_w_ii
|
| 613 |
+
cdef floating q_dot_w
|
| 614 |
+
cdef floating w_norm2
|
| 615 |
+
cdef floating gap = tol + 1.0
|
| 616 |
+
cdef floating d_w_tol = tol
|
| 617 |
+
cdef floating dual_norm_XtA
|
| 618 |
+
cdef unsigned int ii
|
| 619 |
+
cdef unsigned int n_iter = 0
|
| 620 |
+
cdef unsigned int f_iter
|
| 621 |
+
cdef uint32_t rand_r_state_seed = rng.randint(0, RAND_R_MAX)
|
| 622 |
+
cdef uint32_t* rand_r_state = &rand_r_state_seed
|
| 623 |
+
|
| 624 |
+
cdef floating y_norm2 = np.dot(y, y)
|
| 625 |
+
cdef floating* w_ptr = &w[0]
|
| 626 |
+
cdef const floating* Q_ptr = &Q[0, 0]
|
| 627 |
+
cdef const floating* q_ptr = &q[0]
|
| 628 |
+
cdef floating* H_ptr = &H[0]
|
| 629 |
+
cdef floating* XtA_ptr = &XtA[0]
|
| 630 |
+
tol = tol * y_norm2
|
| 631 |
+
|
| 632 |
+
if alpha == 0:
|
| 633 |
+
warnings.warn(
|
| 634 |
+
"Coordinate descent without L1 regularization may "
|
| 635 |
+
"lead to unexpected results and is discouraged. "
|
| 636 |
+
"Set l1_ratio > 0 to add L1 regularization."
|
| 637 |
+
)
|
| 638 |
+
|
| 639 |
+
with nogil:
|
| 640 |
+
for n_iter in range(max_iter):
|
| 641 |
+
w_max = 0.0
|
| 642 |
+
d_w_max = 0.0
|
| 643 |
+
for f_iter in range(n_features): # Loop over coordinates
|
| 644 |
+
if random:
|
| 645 |
+
ii = rand_int(n_features, rand_r_state)
|
| 646 |
+
else:
|
| 647 |
+
ii = f_iter
|
| 648 |
+
|
| 649 |
+
if Q[ii, ii] == 0.0:
|
| 650 |
+
continue
|
| 651 |
+
|
| 652 |
+
w_ii = w[ii] # Store previous value
|
| 653 |
+
|
| 654 |
+
if w_ii != 0.0:
|
| 655 |
+
# H -= w_ii * Q[ii]
|
| 656 |
+
_axpy(n_features, -w_ii, Q_ptr + ii * n_features, 1,
|
| 657 |
+
H_ptr, 1)
|
| 658 |
+
|
| 659 |
+
tmp = q[ii] - H[ii]
|
| 660 |
+
|
| 661 |
+
if positive and tmp < 0:
|
| 662 |
+
w[ii] = 0.0
|
| 663 |
+
else:
|
| 664 |
+
w[ii] = fsign(tmp) * fmax(fabs(tmp) - alpha, 0) \
|
| 665 |
+
/ (Q[ii, ii] + beta)
|
| 666 |
+
|
| 667 |
+
if w[ii] != 0.0:
|
| 668 |
+
# H += w[ii] * Q[ii] # Update H = X.T X w
|
| 669 |
+
_axpy(n_features, w[ii], Q_ptr + ii * n_features, 1,
|
| 670 |
+
H_ptr, 1)
|
| 671 |
+
|
| 672 |
+
# update the maximum absolute coefficient update
|
| 673 |
+
d_w_ii = fabs(w[ii] - w_ii)
|
| 674 |
+
if d_w_ii > d_w_max:
|
| 675 |
+
d_w_max = d_w_ii
|
| 676 |
+
|
| 677 |
+
if fabs(w[ii]) > w_max:
|
| 678 |
+
w_max = fabs(w[ii])
|
| 679 |
+
|
| 680 |
+
if w_max == 0.0 or d_w_max / w_max < d_w_tol or n_iter == max_iter - 1:
|
| 681 |
+
# the biggest coordinate update of this iteration was smaller than
|
| 682 |
+
# the tolerance: check the duality gap as ultimate stopping
|
| 683 |
+
# criterion
|
| 684 |
+
|
| 685 |
+
# q_dot_w = np.dot(w, q)
|
| 686 |
+
q_dot_w = _dot(n_features, w_ptr, 1, q_ptr, 1)
|
| 687 |
+
|
| 688 |
+
for ii in range(n_features):
|
| 689 |
+
XtA[ii] = q[ii] - H[ii] - beta * w[ii]
|
| 690 |
+
if positive:
|
| 691 |
+
dual_norm_XtA = max(n_features, XtA_ptr)
|
| 692 |
+
else:
|
| 693 |
+
dual_norm_XtA = abs_max(n_features, XtA_ptr)
|
| 694 |
+
|
| 695 |
+
# temp = np.sum(w * H)
|
| 696 |
+
tmp = 0.0
|
| 697 |
+
for ii in range(n_features):
|
| 698 |
+
tmp += w[ii] * H[ii]
|
| 699 |
+
R_norm2 = y_norm2 + tmp - 2.0 * q_dot_w
|
| 700 |
+
|
| 701 |
+
# w_norm2 = np.dot(w, w)
|
| 702 |
+
w_norm2 = _dot(n_features, &w[0], 1, &w[0], 1)
|
| 703 |
+
|
| 704 |
+
if (dual_norm_XtA > alpha):
|
| 705 |
+
const = alpha / dual_norm_XtA
|
| 706 |
+
A_norm2 = R_norm2 * (const ** 2)
|
| 707 |
+
gap = 0.5 * (R_norm2 + A_norm2)
|
| 708 |
+
else:
|
| 709 |
+
const = 1.0
|
| 710 |
+
gap = R_norm2
|
| 711 |
+
|
| 712 |
+
# The call to asum is equivalent to the L1 norm of w
|
| 713 |
+
gap += (
|
| 714 |
+
alpha * _asum(n_features, &w[0], 1)
|
| 715 |
+
- const * y_norm2
|
| 716 |
+
+ const * q_dot_w
|
| 717 |
+
+ 0.5 * beta * (1 + const ** 2) * w_norm2
|
| 718 |
+
)
|
| 719 |
+
|
| 720 |
+
if gap < tol:
|
| 721 |
+
# return if we reached desired tolerance
|
| 722 |
+
break
|
| 723 |
+
|
| 724 |
+
else:
|
| 725 |
+
# for/else, runs if for doesn't end with a `break`
|
| 726 |
+
with gil:
|
| 727 |
+
warnings.warn("Objective did not converge. You might want to "
|
| 728 |
+
"increase the number of iterations. Duality "
|
| 729 |
+
"gap: {}, tolerance: {}".format(gap, tol),
|
| 730 |
+
ConvergenceWarning)
|
| 731 |
+
|
| 732 |
+
return np.asarray(w), gap, tol, n_iter + 1
|
| 733 |
+
|
| 734 |
+
|
| 735 |
+
def enet_coordinate_descent_multi_task(
|
| 736 |
+
const floating[::1, :] W,
|
| 737 |
+
floating l1_reg,
|
| 738 |
+
floating l2_reg,
|
| 739 |
+
const floating[::1, :] X,
|
| 740 |
+
const floating[::1, :] Y,
|
| 741 |
+
unsigned int max_iter,
|
| 742 |
+
floating tol,
|
| 743 |
+
object rng,
|
| 744 |
+
bint random=0
|
| 745 |
+
):
|
| 746 |
+
"""Cython version of the coordinate descent algorithm
|
| 747 |
+
for Elastic-Net multi-task regression
|
| 748 |
+
|
| 749 |
+
We minimize
|
| 750 |
+
|
| 751 |
+
0.5 * norm(Y - X W.T, 2)^2 + l1_reg ||W.T||_21 + 0.5 * l2_reg norm(W.T, 2)^2
|
| 752 |
+
|
| 753 |
+
Returns
|
| 754 |
+
-------
|
| 755 |
+
W : ndarray of shape (n_tasks, n_features)
|
| 756 |
+
ElasticNet coefficients.
|
| 757 |
+
gap : float
|
| 758 |
+
Achieved dual gap.
|
| 759 |
+
tol : float
|
| 760 |
+
Equals input `tol` times `np.dot(y, y)`. The tolerance used for the dual gap.
|
| 761 |
+
n_iter : int
|
| 762 |
+
Number of coordinate descent iterations.
|
| 763 |
+
"""
|
| 764 |
+
|
| 765 |
+
if floating is float:
|
| 766 |
+
dtype = np.float32
|
| 767 |
+
else:
|
| 768 |
+
dtype = np.float64
|
| 769 |
+
|
| 770 |
+
# get the data information into easy vars
|
| 771 |
+
cdef unsigned int n_samples = X.shape[0]
|
| 772 |
+
cdef unsigned int n_features = X.shape[1]
|
| 773 |
+
cdef unsigned int n_tasks = Y.shape[1]
|
| 774 |
+
|
| 775 |
+
# to store XtA
|
| 776 |
+
cdef floating[:, ::1] XtA = np.zeros((n_features, n_tasks), dtype=dtype)
|
| 777 |
+
cdef floating XtA_axis1norm
|
| 778 |
+
cdef floating dual_norm_XtA
|
| 779 |
+
|
| 780 |
+
# initial value of the residuals
|
| 781 |
+
cdef floating[::1, :] R = np.zeros((n_samples, n_tasks), dtype=dtype, order='F')
|
| 782 |
+
|
| 783 |
+
cdef floating[::1] norm_cols_X = np.zeros(n_features, dtype=dtype)
|
| 784 |
+
cdef floating[::1] tmp = np.zeros(n_tasks, dtype=dtype)
|
| 785 |
+
cdef floating[::1] w_ii = np.zeros(n_tasks, dtype=dtype)
|
| 786 |
+
cdef floating d_w_max
|
| 787 |
+
cdef floating w_max
|
| 788 |
+
cdef floating d_w_ii
|
| 789 |
+
cdef floating nn
|
| 790 |
+
cdef floating W_ii_abs_max
|
| 791 |
+
cdef floating gap = tol + 1.0
|
| 792 |
+
cdef floating d_w_tol = tol
|
| 793 |
+
cdef floating R_norm
|
| 794 |
+
cdef floating w_norm
|
| 795 |
+
cdef floating ry_sum
|
| 796 |
+
cdef floating l21_norm
|
| 797 |
+
cdef unsigned int ii
|
| 798 |
+
cdef unsigned int jj
|
| 799 |
+
cdef unsigned int n_iter = 0
|
| 800 |
+
cdef unsigned int f_iter
|
| 801 |
+
cdef uint32_t rand_r_state_seed = rng.randint(0, RAND_R_MAX)
|
| 802 |
+
cdef uint32_t* rand_r_state = &rand_r_state_seed
|
| 803 |
+
|
| 804 |
+
cdef const floating* X_ptr = &X[0, 0]
|
| 805 |
+
cdef const floating* Y_ptr = &Y[0, 0]
|
| 806 |
+
|
| 807 |
+
if l1_reg == 0:
|
| 808 |
+
warnings.warn(
|
| 809 |
+
"Coordinate descent with l1_reg=0 may lead to unexpected"
|
| 810 |
+
" results and is discouraged."
|
| 811 |
+
)
|
| 812 |
+
|
| 813 |
+
with nogil:
|
| 814 |
+
# norm_cols_X = (np.asarray(X) ** 2).sum(axis=0)
|
| 815 |
+
for ii in range(n_features):
|
| 816 |
+
norm_cols_X[ii] = _nrm2(n_samples, X_ptr + ii * n_samples, 1) ** 2
|
| 817 |
+
|
| 818 |
+
# R = Y - np.dot(X, W.T)
|
| 819 |
+
_copy(n_samples * n_tasks, Y_ptr, 1, &R[0, 0], 1)
|
| 820 |
+
for ii in range(n_features):
|
| 821 |
+
for jj in range(n_tasks):
|
| 822 |
+
if W[jj, ii] != 0:
|
| 823 |
+
_axpy(n_samples, -W[jj, ii], X_ptr + ii * n_samples, 1,
|
| 824 |
+
&R[0, jj], 1)
|
| 825 |
+
|
| 826 |
+
# tol = tol * linalg.norm(Y, ord='fro') ** 2
|
| 827 |
+
tol = tol * _nrm2(n_samples * n_tasks, Y_ptr, 1) ** 2
|
| 828 |
+
|
| 829 |
+
for n_iter in range(max_iter):
|
| 830 |
+
w_max = 0.0
|
| 831 |
+
d_w_max = 0.0
|
| 832 |
+
for f_iter in range(n_features): # Loop over coordinates
|
| 833 |
+
if random:
|
| 834 |
+
ii = rand_int(n_features, rand_r_state)
|
| 835 |
+
else:
|
| 836 |
+
ii = f_iter
|
| 837 |
+
|
| 838 |
+
if norm_cols_X[ii] == 0.0:
|
| 839 |
+
continue
|
| 840 |
+
|
| 841 |
+
# w_ii = W[:, ii] # Store previous value
|
| 842 |
+
_copy(n_tasks, &W[0, ii], 1, &w_ii[0], 1)
|
| 843 |
+
|
| 844 |
+
# Using Numpy:
|
| 845 |
+
# R += np.dot(X[:, ii][:, None], w_ii[None, :]) # rank 1 update
|
| 846 |
+
# Using Blas Level2:
|
| 847 |
+
# _ger(RowMajor, n_samples, n_tasks, 1.0,
|
| 848 |
+
# &X[0, ii], 1,
|
| 849 |
+
# &w_ii[0], 1, &R[0, 0], n_tasks)
|
| 850 |
+
# Using Blas Level1 and for loop to avoid slower threads
|
| 851 |
+
# for such small vectors
|
| 852 |
+
for jj in range(n_tasks):
|
| 853 |
+
if w_ii[jj] != 0:
|
| 854 |
+
_axpy(n_samples, w_ii[jj], X_ptr + ii * n_samples, 1,
|
| 855 |
+
&R[0, jj], 1)
|
| 856 |
+
|
| 857 |
+
# Using numpy:
|
| 858 |
+
# tmp = np.dot(X[:, ii][None, :], R).ravel()
|
| 859 |
+
# Using BLAS Level 2:
|
| 860 |
+
# _gemv(RowMajor, Trans, n_samples, n_tasks, 1.0, &R[0, 0],
|
| 861 |
+
# n_tasks, &X[0, ii], 1, 0.0, &tmp[0], 1)
|
| 862 |
+
# Using BLAS Level 1 (faster for small vectors like here):
|
| 863 |
+
for jj in range(n_tasks):
|
| 864 |
+
tmp[jj] = _dot(n_samples, X_ptr + ii * n_samples, 1,
|
| 865 |
+
&R[0, jj], 1)
|
| 866 |
+
|
| 867 |
+
# nn = sqrt(np.sum(tmp ** 2))
|
| 868 |
+
nn = _nrm2(n_tasks, &tmp[0], 1)
|
| 869 |
+
|
| 870 |
+
# W[:, ii] = tmp * fmax(1. - l1_reg / nn, 0) / (norm_cols_X[ii] + l2_reg)
|
| 871 |
+
_copy(n_tasks, &tmp[0], 1, &W[0, ii], 1)
|
| 872 |
+
_scal(n_tasks, fmax(1. - l1_reg / nn, 0) / (norm_cols_X[ii] + l2_reg),
|
| 873 |
+
&W[0, ii], 1)
|
| 874 |
+
|
| 875 |
+
# Using numpy:
|
| 876 |
+
# R -= np.dot(X[:, ii][:, None], W[:, ii][None, :])
|
| 877 |
+
# Using BLAS Level 2:
|
| 878 |
+
# Update residual : rank 1 update
|
| 879 |
+
# _ger(RowMajor, n_samples, n_tasks, -1.0,
|
| 880 |
+
# &X[0, ii], 1, &W[0, ii], 1,
|
| 881 |
+
# &R[0, 0], n_tasks)
|
| 882 |
+
# Using BLAS Level 1 (faster for small vectors like here):
|
| 883 |
+
for jj in range(n_tasks):
|
| 884 |
+
if W[jj, ii] != 0:
|
| 885 |
+
_axpy(n_samples, -W[jj, ii], X_ptr + ii * n_samples, 1,
|
| 886 |
+
&R[0, jj], 1)
|
| 887 |
+
|
| 888 |
+
# update the maximum absolute coefficient update
|
| 889 |
+
d_w_ii = diff_abs_max(n_tasks, &W[0, ii], &w_ii[0])
|
| 890 |
+
|
| 891 |
+
if d_w_ii > d_w_max:
|
| 892 |
+
d_w_max = d_w_ii
|
| 893 |
+
|
| 894 |
+
W_ii_abs_max = abs_max(n_tasks, &W[0, ii])
|
| 895 |
+
if W_ii_abs_max > w_max:
|
| 896 |
+
w_max = W_ii_abs_max
|
| 897 |
+
|
| 898 |
+
if w_max == 0.0 or d_w_max / w_max < d_w_tol or n_iter == max_iter - 1:
|
| 899 |
+
# the biggest coordinate update of this iteration was smaller than
|
| 900 |
+
# the tolerance: check the duality gap as ultimate stopping
|
| 901 |
+
# criterion
|
| 902 |
+
|
| 903 |
+
# XtA = np.dot(X.T, R) - l2_reg * W.T
|
| 904 |
+
for ii in range(n_features):
|
| 905 |
+
for jj in range(n_tasks):
|
| 906 |
+
XtA[ii, jj] = _dot(
|
| 907 |
+
n_samples, X_ptr + ii * n_samples, 1, &R[0, jj], 1
|
| 908 |
+
) - l2_reg * W[jj, ii]
|
| 909 |
+
|
| 910 |
+
# dual_norm_XtA = np.max(np.sqrt(np.sum(XtA ** 2, axis=1)))
|
| 911 |
+
dual_norm_XtA = 0.0
|
| 912 |
+
for ii in range(n_features):
|
| 913 |
+
# np.sqrt(np.sum(XtA ** 2, axis=1))
|
| 914 |
+
XtA_axis1norm = _nrm2(n_tasks, &XtA[ii, 0], 1)
|
| 915 |
+
if XtA_axis1norm > dual_norm_XtA:
|
| 916 |
+
dual_norm_XtA = XtA_axis1norm
|
| 917 |
+
|
| 918 |
+
# TODO: use squared L2 norm directly
|
| 919 |
+
# R_norm = linalg.norm(R, ord='fro')
|
| 920 |
+
# w_norm = linalg.norm(W, ord='fro')
|
| 921 |
+
R_norm = _nrm2(n_samples * n_tasks, &R[0, 0], 1)
|
| 922 |
+
w_norm = _nrm2(n_features * n_tasks, &W[0, 0], 1)
|
| 923 |
+
if (dual_norm_XtA > l1_reg):
|
| 924 |
+
const = l1_reg / dual_norm_XtA
|
| 925 |
+
A_norm = R_norm * const
|
| 926 |
+
gap = 0.5 * (R_norm ** 2 + A_norm ** 2)
|
| 927 |
+
else:
|
| 928 |
+
const = 1.0
|
| 929 |
+
gap = R_norm ** 2
|
| 930 |
+
|
| 931 |
+
# ry_sum = np.sum(R * y)
|
| 932 |
+
ry_sum = _dot(n_samples * n_tasks, &R[0, 0], 1, &Y[0, 0], 1)
|
| 933 |
+
|
| 934 |
+
# l21_norm = np.sqrt(np.sum(W ** 2, axis=0)).sum()
|
| 935 |
+
l21_norm = 0.0
|
| 936 |
+
for ii in range(n_features):
|
| 937 |
+
l21_norm += _nrm2(n_tasks, &W[0, ii], 1)
|
| 938 |
+
|
| 939 |
+
gap += (
|
| 940 |
+
l1_reg * l21_norm
|
| 941 |
+
- const * ry_sum
|
| 942 |
+
+ 0.5 * l2_reg * (1 + const ** 2) * (w_norm ** 2)
|
| 943 |
+
)
|
| 944 |
+
|
| 945 |
+
if gap <= tol:
|
| 946 |
+
# return if we reached desired tolerance
|
| 947 |
+
break
|
| 948 |
+
else:
|
| 949 |
+
# for/else, runs if for doesn't end with a `break`
|
| 950 |
+
with gil:
|
| 951 |
+
warnings.warn("Objective did not converge. You might want to "
|
| 952 |
+
"increase the number of iterations. Duality "
|
| 953 |
+
"gap: {}, tolerance: {}".format(gap, tol),
|
| 954 |
+
ConvergenceWarning)
|
| 955 |
+
|
| 956 |
+
return np.asarray(W), gap, tol, n_iter + 1
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_coordinate_descent.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/__init__.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Authors: The scikit-learn developers
|
| 2 |
+
# SPDX-License-Identifier: BSD-3-Clause
|
| 3 |
+
|
| 4 |
+
from .glm import (
|
| 5 |
+
GammaRegressor,
|
| 6 |
+
PoissonRegressor,
|
| 7 |
+
TweedieRegressor,
|
| 8 |
+
_GeneralizedLinearRegressor,
|
| 9 |
+
)
|
| 10 |
+
|
| 11 |
+
__all__ = [
|
| 12 |
+
"_GeneralizedLinearRegressor",
|
| 13 |
+
"PoissonRegressor",
|
| 14 |
+
"GammaRegressor",
|
| 15 |
+
"TweedieRegressor",
|
| 16 |
+
]
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (359 Bytes). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/_newton_solver.cpython-310.pyc
ADDED
|
Binary file (14.2 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/__pycache__/glm.cpython-310.pyc
ADDED
|
Binary file (26.6 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/_newton_solver.py
ADDED
|
@@ -0,0 +1,616 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Authors: The scikit-learn developers
|
| 2 |
+
# SPDX-License-Identifier: BSD-3-Clause
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
Newton solver for Generalized Linear Models
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import warnings
|
| 9 |
+
from abc import ABC, abstractmethod
|
| 10 |
+
|
| 11 |
+
import numpy as np
|
| 12 |
+
import scipy.linalg
|
| 13 |
+
import scipy.optimize
|
| 14 |
+
|
| 15 |
+
from ..._loss.loss import HalfSquaredError
|
| 16 |
+
from ...exceptions import ConvergenceWarning
|
| 17 |
+
from ...utils.optimize import _check_optimize_result
|
| 18 |
+
from .._linear_loss import LinearModelLoss
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class NewtonSolver(ABC):
|
| 22 |
+
"""Newton solver for GLMs.
|
| 23 |
+
|
| 24 |
+
This class implements Newton/2nd-order optimization routines for GLMs. Each Newton
|
| 25 |
+
iteration aims at finding the Newton step which is done by the inner solver. With
|
| 26 |
+
Hessian H, gradient g and coefficients coef, one step solves:
|
| 27 |
+
|
| 28 |
+
H @ coef_newton = -g
|
| 29 |
+
|
| 30 |
+
For our GLM / LinearModelLoss, we have gradient g and Hessian H:
|
| 31 |
+
|
| 32 |
+
g = X.T @ loss.gradient + l2_reg_strength * coef
|
| 33 |
+
H = X.T @ diag(loss.hessian) @ X + l2_reg_strength * identity
|
| 34 |
+
|
| 35 |
+
Backtracking line search updates coef = coef_old + t * coef_newton for some t in
|
| 36 |
+
(0, 1].
|
| 37 |
+
|
| 38 |
+
This is a base class, actual implementations (child classes) may deviate from the
|
| 39 |
+
above pattern and use structure specific tricks.
|
| 40 |
+
|
| 41 |
+
Usage pattern:
|
| 42 |
+
- initialize solver: sol = NewtonSolver(...)
|
| 43 |
+
- solve the problem: sol.solve(X, y, sample_weight)
|
| 44 |
+
|
| 45 |
+
References
|
| 46 |
+
----------
|
| 47 |
+
- Jorge Nocedal, Stephen J. Wright. (2006) "Numerical Optimization"
|
| 48 |
+
2nd edition
|
| 49 |
+
https://doi.org/10.1007/978-0-387-40065-5
|
| 50 |
+
|
| 51 |
+
- Stephen P. Boyd, Lieven Vandenberghe. (2004) "Convex Optimization."
|
| 52 |
+
Cambridge University Press, 2004.
|
| 53 |
+
https://web.stanford.edu/~boyd/cvxbook/bv_cvxbook.pdf
|
| 54 |
+
|
| 55 |
+
Parameters
|
| 56 |
+
----------
|
| 57 |
+
coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
|
| 58 |
+
Initial coefficients of a linear model.
|
| 59 |
+
If shape (n_classes * n_dof,), the classes of one feature are contiguous,
|
| 60 |
+
i.e. one reconstructs the 2d-array via
|
| 61 |
+
coef.reshape((n_classes, -1), order="F").
|
| 62 |
+
|
| 63 |
+
linear_loss : LinearModelLoss
|
| 64 |
+
The loss to be minimized.
|
| 65 |
+
|
| 66 |
+
l2_reg_strength : float, default=0.0
|
| 67 |
+
L2 regularization strength.
|
| 68 |
+
|
| 69 |
+
tol : float, default=1e-4
|
| 70 |
+
The optimization problem is solved when each of the following condition is
|
| 71 |
+
fulfilled:
|
| 72 |
+
1. maximum |gradient| <= tol
|
| 73 |
+
2. Newton decrement d: 1/2 * d^2 <= tol
|
| 74 |
+
|
| 75 |
+
max_iter : int, default=100
|
| 76 |
+
Maximum number of Newton steps allowed.
|
| 77 |
+
|
| 78 |
+
n_threads : int, default=1
|
| 79 |
+
Number of OpenMP threads to use for the computation of the Hessian and gradient
|
| 80 |
+
of the loss function.
|
| 81 |
+
|
| 82 |
+
Attributes
|
| 83 |
+
----------
|
| 84 |
+
coef_old : ndarray of shape coef.shape
|
| 85 |
+
Coefficient of previous iteration.
|
| 86 |
+
|
| 87 |
+
coef_newton : ndarray of shape coef.shape
|
| 88 |
+
Newton step.
|
| 89 |
+
|
| 90 |
+
gradient : ndarray of shape coef.shape
|
| 91 |
+
Gradient of the loss w.r.t. the coefficients.
|
| 92 |
+
|
| 93 |
+
gradient_old : ndarray of shape coef.shape
|
| 94 |
+
Gradient of previous iteration.
|
| 95 |
+
|
| 96 |
+
loss_value : float
|
| 97 |
+
Value of objective function = loss + penalty.
|
| 98 |
+
|
| 99 |
+
loss_value_old : float
|
| 100 |
+
Value of objective function of previous itertion.
|
| 101 |
+
|
| 102 |
+
raw_prediction : ndarray of shape (n_samples,) or (n_samples, n_classes)
|
| 103 |
+
|
| 104 |
+
converged : bool
|
| 105 |
+
Indicator for convergence of the solver.
|
| 106 |
+
|
| 107 |
+
iteration : int
|
| 108 |
+
Number of Newton steps, i.e. calls to inner_solve
|
| 109 |
+
|
| 110 |
+
use_fallback_lbfgs_solve : bool
|
| 111 |
+
If set to True, the solver will resort to call LBFGS to finish the optimisation
|
| 112 |
+
procedure in case of convergence issues.
|
| 113 |
+
|
| 114 |
+
gradient_times_newton : float
|
| 115 |
+
gradient @ coef_newton, set in inner_solve and used by line_search. If the
|
| 116 |
+
Newton step is a descent direction, this is negative.
|
| 117 |
+
"""
|
| 118 |
+
|
| 119 |
+
def __init__(
|
| 120 |
+
self,
|
| 121 |
+
*,
|
| 122 |
+
coef,
|
| 123 |
+
linear_loss=LinearModelLoss(base_loss=HalfSquaredError(), fit_intercept=True),
|
| 124 |
+
l2_reg_strength=0.0,
|
| 125 |
+
tol=1e-4,
|
| 126 |
+
max_iter=100,
|
| 127 |
+
n_threads=1,
|
| 128 |
+
verbose=0,
|
| 129 |
+
):
|
| 130 |
+
self.coef = coef
|
| 131 |
+
self.linear_loss = linear_loss
|
| 132 |
+
self.l2_reg_strength = l2_reg_strength
|
| 133 |
+
self.tol = tol
|
| 134 |
+
self.max_iter = max_iter
|
| 135 |
+
self.n_threads = n_threads
|
| 136 |
+
self.verbose = verbose
|
| 137 |
+
|
| 138 |
+
def setup(self, X, y, sample_weight):
|
| 139 |
+
"""Precomputations
|
| 140 |
+
|
| 141 |
+
If None, initializes:
|
| 142 |
+
- self.coef
|
| 143 |
+
Sets:
|
| 144 |
+
- self.raw_prediction
|
| 145 |
+
- self.loss_value
|
| 146 |
+
"""
|
| 147 |
+
_, _, self.raw_prediction = self.linear_loss.weight_intercept_raw(self.coef, X)
|
| 148 |
+
self.loss_value = self.linear_loss.loss(
|
| 149 |
+
coef=self.coef,
|
| 150 |
+
X=X,
|
| 151 |
+
y=y,
|
| 152 |
+
sample_weight=sample_weight,
|
| 153 |
+
l2_reg_strength=self.l2_reg_strength,
|
| 154 |
+
n_threads=self.n_threads,
|
| 155 |
+
raw_prediction=self.raw_prediction,
|
| 156 |
+
)
|
| 157 |
+
|
| 158 |
+
@abstractmethod
|
| 159 |
+
def update_gradient_hessian(self, X, y, sample_weight):
|
| 160 |
+
"""Update gradient and Hessian."""
|
| 161 |
+
|
| 162 |
+
@abstractmethod
|
| 163 |
+
def inner_solve(self, X, y, sample_weight):
|
| 164 |
+
"""Compute Newton step.
|
| 165 |
+
|
| 166 |
+
Sets:
|
| 167 |
+
- self.coef_newton
|
| 168 |
+
- self.gradient_times_newton
|
| 169 |
+
"""
|
| 170 |
+
|
| 171 |
+
def fallback_lbfgs_solve(self, X, y, sample_weight):
|
| 172 |
+
"""Fallback solver in case of emergency.
|
| 173 |
+
|
| 174 |
+
If a solver detects convergence problems, it may fall back to this methods in
|
| 175 |
+
the hope to exit with success instead of raising an error.
|
| 176 |
+
|
| 177 |
+
Sets:
|
| 178 |
+
- self.coef
|
| 179 |
+
- self.converged
|
| 180 |
+
"""
|
| 181 |
+
opt_res = scipy.optimize.minimize(
|
| 182 |
+
self.linear_loss.loss_gradient,
|
| 183 |
+
self.coef,
|
| 184 |
+
method="L-BFGS-B",
|
| 185 |
+
jac=True,
|
| 186 |
+
options={
|
| 187 |
+
"maxiter": self.max_iter - self.iteration,
|
| 188 |
+
"maxls": 50, # default is 20
|
| 189 |
+
"iprint": self.verbose - 1,
|
| 190 |
+
"gtol": self.tol,
|
| 191 |
+
"ftol": 64 * np.finfo(np.float64).eps,
|
| 192 |
+
},
|
| 193 |
+
args=(X, y, sample_weight, self.l2_reg_strength, self.n_threads),
|
| 194 |
+
)
|
| 195 |
+
self.iteration += _check_optimize_result("lbfgs", opt_res)
|
| 196 |
+
self.coef = opt_res.x
|
| 197 |
+
self.converged = opt_res.status == 0
|
| 198 |
+
|
| 199 |
+
def line_search(self, X, y, sample_weight):
|
| 200 |
+
"""Backtracking line search.
|
| 201 |
+
|
| 202 |
+
Sets:
|
| 203 |
+
- self.coef_old
|
| 204 |
+
- self.coef
|
| 205 |
+
- self.loss_value_old
|
| 206 |
+
- self.loss_value
|
| 207 |
+
- self.gradient_old
|
| 208 |
+
- self.gradient
|
| 209 |
+
- self.raw_prediction
|
| 210 |
+
"""
|
| 211 |
+
# line search parameters
|
| 212 |
+
beta, sigma = 0.5, 0.00048828125 # 1/2, 1/2**11
|
| 213 |
+
eps = 16 * np.finfo(self.loss_value.dtype).eps
|
| 214 |
+
t = 1 # step size
|
| 215 |
+
|
| 216 |
+
# gradient_times_newton = self.gradient @ self.coef_newton
|
| 217 |
+
# was computed in inner_solve.
|
| 218 |
+
armijo_term = sigma * self.gradient_times_newton
|
| 219 |
+
_, _, raw_prediction_newton = self.linear_loss.weight_intercept_raw(
|
| 220 |
+
self.coef_newton, X
|
| 221 |
+
)
|
| 222 |
+
|
| 223 |
+
self.coef_old = self.coef
|
| 224 |
+
self.loss_value_old = self.loss_value
|
| 225 |
+
self.gradient_old = self.gradient
|
| 226 |
+
|
| 227 |
+
# np.sum(np.abs(self.gradient_old))
|
| 228 |
+
sum_abs_grad_old = -1
|
| 229 |
+
|
| 230 |
+
is_verbose = self.verbose >= 2
|
| 231 |
+
if is_verbose:
|
| 232 |
+
print(" Backtracking Line Search")
|
| 233 |
+
print(f" eps=16 * finfo.eps={eps}")
|
| 234 |
+
|
| 235 |
+
for i in range(21): # until and including t = beta**20 ~ 1e-6
|
| 236 |
+
self.coef = self.coef_old + t * self.coef_newton
|
| 237 |
+
raw = self.raw_prediction + t * raw_prediction_newton
|
| 238 |
+
self.loss_value, self.gradient = self.linear_loss.loss_gradient(
|
| 239 |
+
coef=self.coef,
|
| 240 |
+
X=X,
|
| 241 |
+
y=y,
|
| 242 |
+
sample_weight=sample_weight,
|
| 243 |
+
l2_reg_strength=self.l2_reg_strength,
|
| 244 |
+
n_threads=self.n_threads,
|
| 245 |
+
raw_prediction=raw,
|
| 246 |
+
)
|
| 247 |
+
# Note: If coef_newton is too large, loss_gradient may produce inf values,
|
| 248 |
+
# potentially accompanied by a RuntimeWarning.
|
| 249 |
+
# This case will be captured by the Armijo condition.
|
| 250 |
+
|
| 251 |
+
# 1. Check Armijo / sufficient decrease condition.
|
| 252 |
+
# The smaller (more negative) the better.
|
| 253 |
+
loss_improvement = self.loss_value - self.loss_value_old
|
| 254 |
+
check = loss_improvement <= t * armijo_term
|
| 255 |
+
if is_verbose:
|
| 256 |
+
print(
|
| 257 |
+
f" line search iteration={i+1}, step size={t}\n"
|
| 258 |
+
f" check loss improvement <= armijo term: {loss_improvement} "
|
| 259 |
+
f"<= {t * armijo_term} {check}"
|
| 260 |
+
)
|
| 261 |
+
if check:
|
| 262 |
+
break
|
| 263 |
+
# 2. Deal with relative loss differences around machine precision.
|
| 264 |
+
tiny_loss = np.abs(self.loss_value_old * eps)
|
| 265 |
+
check = np.abs(loss_improvement) <= tiny_loss
|
| 266 |
+
if is_verbose:
|
| 267 |
+
print(
|
| 268 |
+
" check loss |improvement| <= eps * |loss_old|:"
|
| 269 |
+
f" {np.abs(loss_improvement)} <= {tiny_loss} {check}"
|
| 270 |
+
)
|
| 271 |
+
if check:
|
| 272 |
+
if sum_abs_grad_old < 0:
|
| 273 |
+
sum_abs_grad_old = scipy.linalg.norm(self.gradient_old, ord=1)
|
| 274 |
+
# 2.1 Check sum of absolute gradients as alternative condition.
|
| 275 |
+
sum_abs_grad = scipy.linalg.norm(self.gradient, ord=1)
|
| 276 |
+
check = sum_abs_grad < sum_abs_grad_old
|
| 277 |
+
if is_verbose:
|
| 278 |
+
print(
|
| 279 |
+
" check sum(|gradient|) < sum(|gradient_old|): "
|
| 280 |
+
f"{sum_abs_grad} < {sum_abs_grad_old} {check}"
|
| 281 |
+
)
|
| 282 |
+
if check:
|
| 283 |
+
break
|
| 284 |
+
|
| 285 |
+
t *= beta
|
| 286 |
+
else:
|
| 287 |
+
warnings.warn(
|
| 288 |
+
(
|
| 289 |
+
f"Line search of Newton solver {self.__class__.__name__} at"
|
| 290 |
+
f" iteration #{self.iteration} did no converge after 21 line search"
|
| 291 |
+
" refinement iterations. It will now resort to lbfgs instead."
|
| 292 |
+
),
|
| 293 |
+
ConvergenceWarning,
|
| 294 |
+
)
|
| 295 |
+
if self.verbose:
|
| 296 |
+
print(" Line search did not converge and resorts to lbfgs instead.")
|
| 297 |
+
self.use_fallback_lbfgs_solve = True
|
| 298 |
+
return
|
| 299 |
+
|
| 300 |
+
self.raw_prediction = raw
|
| 301 |
+
if is_verbose:
|
| 302 |
+
print(
|
| 303 |
+
f" line search successful after {i+1} iterations with "
|
| 304 |
+
f"loss={self.loss_value}."
|
| 305 |
+
)
|
| 306 |
+
|
| 307 |
+
def check_convergence(self, X, y, sample_weight):
|
| 308 |
+
"""Check for convergence.
|
| 309 |
+
|
| 310 |
+
Sets self.converged.
|
| 311 |
+
"""
|
| 312 |
+
if self.verbose:
|
| 313 |
+
print(" Check Convergence")
|
| 314 |
+
# Note: Checking maximum relative change of coefficient <= tol is a bad
|
| 315 |
+
# convergence criterion because even a large step could have brought us close
|
| 316 |
+
# to the true minimum.
|
| 317 |
+
# coef_step = self.coef - self.coef_old
|
| 318 |
+
# change = np.max(np.abs(coef_step) / np.maximum(1, np.abs(self.coef_old)))
|
| 319 |
+
# check = change <= tol
|
| 320 |
+
|
| 321 |
+
# 1. Criterion: maximum |gradient| <= tol
|
| 322 |
+
# The gradient was already updated in line_search()
|
| 323 |
+
g_max_abs = np.max(np.abs(self.gradient))
|
| 324 |
+
check = g_max_abs <= self.tol
|
| 325 |
+
if self.verbose:
|
| 326 |
+
print(f" 1. max |gradient| {g_max_abs} <= {self.tol} {check}")
|
| 327 |
+
if not check:
|
| 328 |
+
return
|
| 329 |
+
|
| 330 |
+
# 2. Criterion: For Newton decrement d, check 1/2 * d^2 <= tol
|
| 331 |
+
# d = sqrt(grad @ hessian^-1 @ grad)
|
| 332 |
+
# = sqrt(coef_newton @ hessian @ coef_newton)
|
| 333 |
+
# See Boyd, Vanderberghe (2009) "Convex Optimization" Chapter 9.5.1.
|
| 334 |
+
d2 = self.coef_newton @ self.hessian @ self.coef_newton
|
| 335 |
+
check = 0.5 * d2 <= self.tol
|
| 336 |
+
if self.verbose:
|
| 337 |
+
print(f" 2. Newton decrement {0.5 * d2} <= {self.tol} {check}")
|
| 338 |
+
if not check:
|
| 339 |
+
return
|
| 340 |
+
|
| 341 |
+
if self.verbose:
|
| 342 |
+
loss_value = self.linear_loss.loss(
|
| 343 |
+
coef=self.coef,
|
| 344 |
+
X=X,
|
| 345 |
+
y=y,
|
| 346 |
+
sample_weight=sample_weight,
|
| 347 |
+
l2_reg_strength=self.l2_reg_strength,
|
| 348 |
+
n_threads=self.n_threads,
|
| 349 |
+
)
|
| 350 |
+
print(f" Solver did converge at loss = {loss_value}.")
|
| 351 |
+
self.converged = True
|
| 352 |
+
|
| 353 |
+
def finalize(self, X, y, sample_weight):
|
| 354 |
+
"""Finalize the solvers results.
|
| 355 |
+
|
| 356 |
+
Some solvers may need this, others not.
|
| 357 |
+
"""
|
| 358 |
+
pass
|
| 359 |
+
|
| 360 |
+
def solve(self, X, y, sample_weight):
|
| 361 |
+
"""Solve the optimization problem.
|
| 362 |
+
|
| 363 |
+
This is the main routine.
|
| 364 |
+
|
| 365 |
+
Order of calls:
|
| 366 |
+
self.setup()
|
| 367 |
+
while iteration:
|
| 368 |
+
self.update_gradient_hessian()
|
| 369 |
+
self.inner_solve()
|
| 370 |
+
self.line_search()
|
| 371 |
+
self.check_convergence()
|
| 372 |
+
self.finalize()
|
| 373 |
+
|
| 374 |
+
Returns
|
| 375 |
+
-------
|
| 376 |
+
coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
|
| 377 |
+
Solution of the optimization problem.
|
| 378 |
+
"""
|
| 379 |
+
# setup usually:
|
| 380 |
+
# - initializes self.coef if needed
|
| 381 |
+
# - initializes and calculates self.raw_predictions, self.loss_value
|
| 382 |
+
self.setup(X=X, y=y, sample_weight=sample_weight)
|
| 383 |
+
|
| 384 |
+
self.iteration = 1
|
| 385 |
+
self.converged = False
|
| 386 |
+
self.use_fallback_lbfgs_solve = False
|
| 387 |
+
|
| 388 |
+
while self.iteration <= self.max_iter and not self.converged:
|
| 389 |
+
if self.verbose:
|
| 390 |
+
print(f"Newton iter={self.iteration}")
|
| 391 |
+
|
| 392 |
+
self.use_fallback_lbfgs_solve = False # Fallback solver.
|
| 393 |
+
|
| 394 |
+
# 1. Update Hessian and gradient
|
| 395 |
+
self.update_gradient_hessian(X=X, y=y, sample_weight=sample_weight)
|
| 396 |
+
|
| 397 |
+
# TODO:
|
| 398 |
+
# if iteration == 1:
|
| 399 |
+
# We might stop early, e.g. we already are close to the optimum,
|
| 400 |
+
# usually detected by zero gradients at this stage.
|
| 401 |
+
|
| 402 |
+
# 2. Inner solver
|
| 403 |
+
# Calculate Newton step/direction
|
| 404 |
+
# This usually sets self.coef_newton and self.gradient_times_newton.
|
| 405 |
+
self.inner_solve(X=X, y=y, sample_weight=sample_weight)
|
| 406 |
+
if self.use_fallback_lbfgs_solve:
|
| 407 |
+
break
|
| 408 |
+
|
| 409 |
+
# 3. Backtracking line search
|
| 410 |
+
# This usually sets self.coef_old, self.coef, self.loss_value_old
|
| 411 |
+
# self.loss_value, self.gradient_old, self.gradient,
|
| 412 |
+
# self.raw_prediction.
|
| 413 |
+
self.line_search(X=X, y=y, sample_weight=sample_weight)
|
| 414 |
+
if self.use_fallback_lbfgs_solve:
|
| 415 |
+
break
|
| 416 |
+
|
| 417 |
+
# 4. Check convergence
|
| 418 |
+
# Sets self.converged.
|
| 419 |
+
self.check_convergence(X=X, y=y, sample_weight=sample_weight)
|
| 420 |
+
|
| 421 |
+
# 5. Next iteration
|
| 422 |
+
self.iteration += 1
|
| 423 |
+
|
| 424 |
+
if not self.converged:
|
| 425 |
+
if self.use_fallback_lbfgs_solve:
|
| 426 |
+
# Note: The fallback solver circumvents check_convergence and relies on
|
| 427 |
+
# the convergence checks of lbfgs instead. Enough warnings have been
|
| 428 |
+
# raised on the way.
|
| 429 |
+
self.fallback_lbfgs_solve(X=X, y=y, sample_weight=sample_weight)
|
| 430 |
+
else:
|
| 431 |
+
warnings.warn(
|
| 432 |
+
(
|
| 433 |
+
f"Newton solver did not converge after {self.iteration - 1} "
|
| 434 |
+
"iterations."
|
| 435 |
+
),
|
| 436 |
+
ConvergenceWarning,
|
| 437 |
+
)
|
| 438 |
+
|
| 439 |
+
self.iteration -= 1
|
| 440 |
+
self.finalize(X=X, y=y, sample_weight=sample_weight)
|
| 441 |
+
return self.coef
|
| 442 |
+
|
| 443 |
+
|
| 444 |
+
class NewtonCholeskySolver(NewtonSolver):
|
| 445 |
+
"""Cholesky based Newton solver.
|
| 446 |
+
|
| 447 |
+
Inner solver for finding the Newton step H w_newton = -g uses Cholesky based linear
|
| 448 |
+
solver.
|
| 449 |
+
"""
|
| 450 |
+
|
| 451 |
+
def setup(self, X, y, sample_weight):
|
| 452 |
+
super().setup(X=X, y=y, sample_weight=sample_weight)
|
| 453 |
+
if self.linear_loss.base_loss.is_multiclass:
|
| 454 |
+
# Easier with ravelled arrays, e.g., for scipy.linalg.solve.
|
| 455 |
+
# As with LinearModelLoss, we always are contiguous in n_classes.
|
| 456 |
+
self.coef = self.coef.ravel(order="F")
|
| 457 |
+
# Note that the computation of gradient in LinearModelLoss follows the shape of
|
| 458 |
+
# coef.
|
| 459 |
+
self.gradient = np.empty_like(self.coef)
|
| 460 |
+
# But the hessian is always 2d.
|
| 461 |
+
n = self.coef.size
|
| 462 |
+
self.hessian = np.empty_like(self.coef, shape=(n, n))
|
| 463 |
+
# To help case distinctions.
|
| 464 |
+
self.is_multinomial_with_intercept = (
|
| 465 |
+
self.linear_loss.base_loss.is_multiclass and self.linear_loss.fit_intercept
|
| 466 |
+
)
|
| 467 |
+
self.is_multinomial_no_penalty = (
|
| 468 |
+
self.linear_loss.base_loss.is_multiclass and self.l2_reg_strength == 0
|
| 469 |
+
)
|
| 470 |
+
|
| 471 |
+
def update_gradient_hessian(self, X, y, sample_weight):
|
| 472 |
+
_, _, self.hessian_warning = self.linear_loss.gradient_hessian(
|
| 473 |
+
coef=self.coef,
|
| 474 |
+
X=X,
|
| 475 |
+
y=y,
|
| 476 |
+
sample_weight=sample_weight,
|
| 477 |
+
l2_reg_strength=self.l2_reg_strength,
|
| 478 |
+
n_threads=self.n_threads,
|
| 479 |
+
gradient_out=self.gradient,
|
| 480 |
+
hessian_out=self.hessian,
|
| 481 |
+
raw_prediction=self.raw_prediction, # this was updated in line_search
|
| 482 |
+
)
|
| 483 |
+
|
| 484 |
+
def inner_solve(self, X, y, sample_weight):
|
| 485 |
+
if self.hessian_warning:
|
| 486 |
+
warnings.warn(
|
| 487 |
+
(
|
| 488 |
+
f"The inner solver of {self.__class__.__name__} detected a "
|
| 489 |
+
"pointwise hessian with many negative values at iteration "
|
| 490 |
+
f"#{self.iteration}. It will now resort to lbfgs instead."
|
| 491 |
+
),
|
| 492 |
+
ConvergenceWarning,
|
| 493 |
+
)
|
| 494 |
+
if self.verbose:
|
| 495 |
+
print(
|
| 496 |
+
" The inner solver detected a pointwise Hessian with many "
|
| 497 |
+
"negative values and resorts to lbfgs instead."
|
| 498 |
+
)
|
| 499 |
+
self.use_fallback_lbfgs_solve = True
|
| 500 |
+
return
|
| 501 |
+
|
| 502 |
+
# Note: The following case distinction could also be shifted to the
|
| 503 |
+
# implementation of HalfMultinomialLoss instead of here within the solver.
|
| 504 |
+
if self.is_multinomial_no_penalty:
|
| 505 |
+
# The multinomial loss is overparametrized for each unpenalized feature, so
|
| 506 |
+
# at least the intercepts. This can be seen by noting that predicted
|
| 507 |
+
# probabilities are invariant under shifting all coefficients of a single
|
| 508 |
+
# feature j for all classes by the same amount c:
|
| 509 |
+
# coef[k, :] -> coef[k, :] + c => proba stays the same
|
| 510 |
+
# where we have assumned coef.shape = (n_classes, n_features).
|
| 511 |
+
# Therefore, also the loss (-log-likelihood), gradient and hessian stay the
|
| 512 |
+
# same, see
|
| 513 |
+
# Noah Simon and Jerome Friedman and Trevor Hastie. (2013) "A Blockwise
|
| 514 |
+
# Descent Algorithm for Group-penalized Multiresponse and Multinomial
|
| 515 |
+
# Regression". https://doi.org/10.48550/arXiv.1311.6529
|
| 516 |
+
#
|
| 517 |
+
# We choose the standard approach and set all the coefficients of the last
|
| 518 |
+
# class to zero, for all features including the intercept.
|
| 519 |
+
n_classes = self.linear_loss.base_loss.n_classes
|
| 520 |
+
n_dof = self.coef.size // n_classes # degree of freedom per class
|
| 521 |
+
n = self.coef.size - n_dof # effective size
|
| 522 |
+
self.coef[n_classes - 1 :: n_classes] = 0
|
| 523 |
+
self.gradient[n_classes - 1 :: n_classes] = 0
|
| 524 |
+
self.hessian[n_classes - 1 :: n_classes, :] = 0
|
| 525 |
+
self.hessian[:, n_classes - 1 :: n_classes] = 0
|
| 526 |
+
# We also need the reduced variants of gradient and hessian where the
|
| 527 |
+
# entries set to zero are removed. For 2 features and 3 classes with
|
| 528 |
+
# arbitrary values, "x" means removed:
|
| 529 |
+
# gradient = [0, 1, x, 3, 4, x]
|
| 530 |
+
#
|
| 531 |
+
# hessian = [0, 1, x, 3, 4, x]
|
| 532 |
+
# [1, 7, x, 9, 10, x]
|
| 533 |
+
# [x, x, x, x, x, x]
|
| 534 |
+
# [3, 9, x, 21, 22, x]
|
| 535 |
+
# [4, 10, x, 22, 28, x]
|
| 536 |
+
# [x, x, x, x, x, x]
|
| 537 |
+
# The following slicing triggers copies of gradient and hessian.
|
| 538 |
+
gradient = self.gradient.reshape(-1, n_classes)[:, :-1].flatten()
|
| 539 |
+
hessian = self.hessian.reshape(n_dof, n_classes, n_dof, n_classes)[
|
| 540 |
+
:, :-1, :, :-1
|
| 541 |
+
].reshape(n, n)
|
| 542 |
+
elif self.is_multinomial_with_intercept:
|
| 543 |
+
# Here, only intercepts are unpenalized. We again choose the last class and
|
| 544 |
+
# set its intercept to zero.
|
| 545 |
+
self.coef[-1] = 0
|
| 546 |
+
self.gradient[-1] = 0
|
| 547 |
+
self.hessian[-1, :] = 0
|
| 548 |
+
self.hessian[:, -1] = 0
|
| 549 |
+
gradient, hessian = self.gradient[:-1], self.hessian[:-1, :-1]
|
| 550 |
+
else:
|
| 551 |
+
gradient, hessian = self.gradient, self.hessian
|
| 552 |
+
|
| 553 |
+
try:
|
| 554 |
+
with warnings.catch_warnings():
|
| 555 |
+
warnings.simplefilter("error", scipy.linalg.LinAlgWarning)
|
| 556 |
+
self.coef_newton = scipy.linalg.solve(
|
| 557 |
+
hessian, -gradient, check_finite=False, assume_a="sym"
|
| 558 |
+
)
|
| 559 |
+
if self.is_multinomial_no_penalty:
|
| 560 |
+
self.coef_newton = np.c_[
|
| 561 |
+
self.coef_newton.reshape(n_dof, n_classes - 1), np.zeros(n_dof)
|
| 562 |
+
].reshape(-1)
|
| 563 |
+
assert self.coef_newton.flags.f_contiguous
|
| 564 |
+
elif self.is_multinomial_with_intercept:
|
| 565 |
+
self.coef_newton = np.r_[self.coef_newton, 0]
|
| 566 |
+
self.gradient_times_newton = self.gradient @ self.coef_newton
|
| 567 |
+
if self.gradient_times_newton > 0:
|
| 568 |
+
if self.verbose:
|
| 569 |
+
print(
|
| 570 |
+
" The inner solver found a Newton step that is not a "
|
| 571 |
+
"descent direction and resorts to LBFGS steps instead."
|
| 572 |
+
)
|
| 573 |
+
self.use_fallback_lbfgs_solve = True
|
| 574 |
+
return
|
| 575 |
+
except (np.linalg.LinAlgError, scipy.linalg.LinAlgWarning) as e:
|
| 576 |
+
warnings.warn(
|
| 577 |
+
f"The inner solver of {self.__class__.__name__} stumbled upon a "
|
| 578 |
+
"singular or very ill-conditioned Hessian matrix at iteration "
|
| 579 |
+
f"{self.iteration}. It will now resort to lbfgs instead.\n"
|
| 580 |
+
"Further options are to use another solver or to avoid such situation "
|
| 581 |
+
"in the first place. Possible remedies are removing collinear features"
|
| 582 |
+
" of X or increasing the penalization strengths.\n"
|
| 583 |
+
"The original Linear Algebra message was:\n" + str(e),
|
| 584 |
+
scipy.linalg.LinAlgWarning,
|
| 585 |
+
)
|
| 586 |
+
# Possible causes:
|
| 587 |
+
# 1. hess_pointwise is negative. But this is already taken care in
|
| 588 |
+
# LinearModelLoss.gradient_hessian.
|
| 589 |
+
# 2. X is singular or ill-conditioned
|
| 590 |
+
# This might be the most probable cause.
|
| 591 |
+
#
|
| 592 |
+
# There are many possible ways to deal with this situation. Most of them
|
| 593 |
+
# add, explicitly or implicitly, a matrix to the hessian to make it
|
| 594 |
+
# positive definite, confer to Chapter 3.4 of Nocedal & Wright 2nd ed.
|
| 595 |
+
# Instead, we resort to lbfgs.
|
| 596 |
+
if self.verbose:
|
| 597 |
+
print(
|
| 598 |
+
" The inner solver stumbled upon an singular or ill-conditioned "
|
| 599 |
+
"Hessian matrix and resorts to LBFGS instead."
|
| 600 |
+
)
|
| 601 |
+
self.use_fallback_lbfgs_solve = True
|
| 602 |
+
return
|
| 603 |
+
|
| 604 |
+
def finalize(self, X, y, sample_weight):
|
| 605 |
+
if self.is_multinomial_no_penalty:
|
| 606 |
+
# Our convention is usually the symmetric parametrization where
|
| 607 |
+
# sum(coef[classes, features], axis=0) = 0.
|
| 608 |
+
# We convert now to this convention. Note that it does not change
|
| 609 |
+
# the predicted probabilities.
|
| 610 |
+
n_classes = self.linear_loss.base_loss.n_classes
|
| 611 |
+
self.coef = self.coef.reshape(n_classes, -1, order="F")
|
| 612 |
+
self.coef -= np.mean(self.coef, axis=0)
|
| 613 |
+
elif self.is_multinomial_with_intercept:
|
| 614 |
+
# Only the intercept needs an update to the symmetric parametrization.
|
| 615 |
+
n_classes = self.linear_loss.base_loss.n_classes
|
| 616 |
+
self.coef[-n_classes:] -= np.mean(self.coef[-n_classes:])
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/glm.py
ADDED
|
@@ -0,0 +1,908 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Authors: The scikit-learn developers
|
| 2 |
+
# SPDX-License-Identifier: BSD-3-Clause
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
Generalized Linear Models with Exponential Dispersion Family
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from numbers import Integral, Real
|
| 9 |
+
|
| 10 |
+
import numpy as np
|
| 11 |
+
import scipy.optimize
|
| 12 |
+
|
| 13 |
+
from ..._loss.loss import (
|
| 14 |
+
HalfGammaLoss,
|
| 15 |
+
HalfPoissonLoss,
|
| 16 |
+
HalfSquaredError,
|
| 17 |
+
HalfTweedieLoss,
|
| 18 |
+
HalfTweedieLossIdentity,
|
| 19 |
+
)
|
| 20 |
+
from ...base import BaseEstimator, RegressorMixin, _fit_context
|
| 21 |
+
from ...utils import check_array
|
| 22 |
+
from ...utils._openmp_helpers import _openmp_effective_n_threads
|
| 23 |
+
from ...utils._param_validation import Hidden, Interval, StrOptions
|
| 24 |
+
from ...utils.optimize import _check_optimize_result
|
| 25 |
+
from ...utils.validation import _check_sample_weight, check_is_fitted, validate_data
|
| 26 |
+
from .._linear_loss import LinearModelLoss
|
| 27 |
+
from ._newton_solver import NewtonCholeskySolver, NewtonSolver
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class _GeneralizedLinearRegressor(RegressorMixin, BaseEstimator):
|
| 31 |
+
"""Regression via a penalized Generalized Linear Model (GLM).
|
| 32 |
+
|
| 33 |
+
GLMs based on a reproductive Exponential Dispersion Model (EDM) aim at fitting and
|
| 34 |
+
predicting the mean of the target y as y_pred=h(X*w) with coefficients w.
|
| 35 |
+
Therefore, the fit minimizes the following objective function with L2 priors as
|
| 36 |
+
regularizer::
|
| 37 |
+
|
| 38 |
+
1/(2*sum(s_i)) * sum(s_i * deviance(y_i, h(x_i*w)) + 1/2 * alpha * ||w||_2^2
|
| 39 |
+
|
| 40 |
+
with inverse link function h, s=sample_weight and per observation (unit) deviance
|
| 41 |
+
deviance(y_i, h(x_i*w)). Note that for an EDM, 1/2 * deviance is the negative
|
| 42 |
+
log-likelihood up to a constant (in w) term.
|
| 43 |
+
The parameter ``alpha`` corresponds to the lambda parameter in glmnet.
|
| 44 |
+
|
| 45 |
+
Instead of implementing the EDM family and a link function separately, we directly
|
| 46 |
+
use the loss functions `from sklearn._loss` which have the link functions included
|
| 47 |
+
in them for performance reasons. We pick the loss functions that implement
|
| 48 |
+
(1/2 times) EDM deviances.
|
| 49 |
+
|
| 50 |
+
Read more in the :ref:`User Guide <Generalized_linear_models>`.
|
| 51 |
+
|
| 52 |
+
.. versionadded:: 0.23
|
| 53 |
+
|
| 54 |
+
Parameters
|
| 55 |
+
----------
|
| 56 |
+
alpha : float, default=1
|
| 57 |
+
Constant that multiplies the penalty term and thus determines the
|
| 58 |
+
regularization strength. ``alpha = 0`` is equivalent to unpenalized
|
| 59 |
+
GLMs. In this case, the design matrix `X` must have full column rank
|
| 60 |
+
(no collinearities).
|
| 61 |
+
Values must be in the range `[0.0, inf)`.
|
| 62 |
+
|
| 63 |
+
fit_intercept : bool, default=True
|
| 64 |
+
Specifies if a constant (a.k.a. bias or intercept) should be
|
| 65 |
+
added to the linear predictor (X @ coef + intercept).
|
| 66 |
+
|
| 67 |
+
solver : {'lbfgs', 'newton-cholesky'}, default='lbfgs'
|
| 68 |
+
Algorithm to use in the optimization problem:
|
| 69 |
+
|
| 70 |
+
'lbfgs'
|
| 71 |
+
Calls scipy's L-BFGS-B optimizer.
|
| 72 |
+
|
| 73 |
+
'newton-cholesky'
|
| 74 |
+
Uses Newton-Raphson steps (in arbitrary precision arithmetic equivalent to
|
| 75 |
+
iterated reweighted least squares) with an inner Cholesky based solver.
|
| 76 |
+
This solver is a good choice for `n_samples` >> `n_features`, especially
|
| 77 |
+
with one-hot encoded categorical features with rare categories. Be aware
|
| 78 |
+
that the memory usage of this solver has a quadratic dependency on
|
| 79 |
+
`n_features` because it explicitly computes the Hessian matrix.
|
| 80 |
+
|
| 81 |
+
.. versionadded:: 1.2
|
| 82 |
+
|
| 83 |
+
max_iter : int, default=100
|
| 84 |
+
The maximal number of iterations for the solver.
|
| 85 |
+
Values must be in the range `[1, inf)`.
|
| 86 |
+
|
| 87 |
+
tol : float, default=1e-4
|
| 88 |
+
Stopping criterion. For the lbfgs solver,
|
| 89 |
+
the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol``
|
| 90 |
+
where ``g_j`` is the j-th component of the gradient (derivative) of
|
| 91 |
+
the objective function.
|
| 92 |
+
Values must be in the range `(0.0, inf)`.
|
| 93 |
+
|
| 94 |
+
warm_start : bool, default=False
|
| 95 |
+
If set to ``True``, reuse the solution of the previous call to ``fit``
|
| 96 |
+
as initialization for ``coef_`` and ``intercept_``.
|
| 97 |
+
|
| 98 |
+
verbose : int, default=0
|
| 99 |
+
For the lbfgs solver set verbose to any positive number for verbosity.
|
| 100 |
+
Values must be in the range `[0, inf)`.
|
| 101 |
+
|
| 102 |
+
Attributes
|
| 103 |
+
----------
|
| 104 |
+
coef_ : array of shape (n_features,)
|
| 105 |
+
Estimated coefficients for the linear predictor (`X @ coef_ +
|
| 106 |
+
intercept_`) in the GLM.
|
| 107 |
+
|
| 108 |
+
intercept_ : float
|
| 109 |
+
Intercept (a.k.a. bias) added to linear predictor.
|
| 110 |
+
|
| 111 |
+
n_iter_ : int
|
| 112 |
+
Actual number of iterations used in the solver.
|
| 113 |
+
|
| 114 |
+
_base_loss : BaseLoss, default=HalfSquaredError()
|
| 115 |
+
This is set during fit via `self._get_loss()`.
|
| 116 |
+
A `_base_loss` contains a specific loss function as well as the link
|
| 117 |
+
function. The loss to be minimized specifies the distributional assumption of
|
| 118 |
+
the GLM, i.e. the distribution from the EDM. Here are some examples:
|
| 119 |
+
|
| 120 |
+
======================= ======== ==========================
|
| 121 |
+
_base_loss Link Target Domain
|
| 122 |
+
======================= ======== ==========================
|
| 123 |
+
HalfSquaredError identity y any real number
|
| 124 |
+
HalfPoissonLoss log 0 <= y
|
| 125 |
+
HalfGammaLoss log 0 < y
|
| 126 |
+
HalfTweedieLoss log dependent on tweedie power
|
| 127 |
+
HalfTweedieLossIdentity identity dependent on tweedie power
|
| 128 |
+
======================= ======== ==========================
|
| 129 |
+
|
| 130 |
+
The link function of the GLM, i.e. mapping from linear predictor
|
| 131 |
+
`X @ coeff + intercept` to prediction `y_pred`. For instance, with a log link,
|
| 132 |
+
we have `y_pred = exp(X @ coeff + intercept)`.
|
| 133 |
+
"""
|
| 134 |
+
|
| 135 |
+
# We allow for NewtonSolver classes for the "solver" parameter but do not
|
| 136 |
+
# make them public in the docstrings. This facilitates testing and
|
| 137 |
+
# benchmarking.
|
| 138 |
+
_parameter_constraints: dict = {
|
| 139 |
+
"alpha": [Interval(Real, 0.0, None, closed="left")],
|
| 140 |
+
"fit_intercept": ["boolean"],
|
| 141 |
+
"solver": [
|
| 142 |
+
StrOptions({"lbfgs", "newton-cholesky"}),
|
| 143 |
+
Hidden(type),
|
| 144 |
+
],
|
| 145 |
+
"max_iter": [Interval(Integral, 1, None, closed="left")],
|
| 146 |
+
"tol": [Interval(Real, 0.0, None, closed="neither")],
|
| 147 |
+
"warm_start": ["boolean"],
|
| 148 |
+
"verbose": ["verbose"],
|
| 149 |
+
}
|
| 150 |
+
|
| 151 |
+
def __init__(
|
| 152 |
+
self,
|
| 153 |
+
*,
|
| 154 |
+
alpha=1.0,
|
| 155 |
+
fit_intercept=True,
|
| 156 |
+
solver="lbfgs",
|
| 157 |
+
max_iter=100,
|
| 158 |
+
tol=1e-4,
|
| 159 |
+
warm_start=False,
|
| 160 |
+
verbose=0,
|
| 161 |
+
):
|
| 162 |
+
self.alpha = alpha
|
| 163 |
+
self.fit_intercept = fit_intercept
|
| 164 |
+
self.solver = solver
|
| 165 |
+
self.max_iter = max_iter
|
| 166 |
+
self.tol = tol
|
| 167 |
+
self.warm_start = warm_start
|
| 168 |
+
self.verbose = verbose
|
| 169 |
+
|
| 170 |
+
@_fit_context(prefer_skip_nested_validation=True)
|
| 171 |
+
def fit(self, X, y, sample_weight=None):
|
| 172 |
+
"""Fit a Generalized Linear Model.
|
| 173 |
+
|
| 174 |
+
Parameters
|
| 175 |
+
----------
|
| 176 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 177 |
+
Training data.
|
| 178 |
+
|
| 179 |
+
y : array-like of shape (n_samples,)
|
| 180 |
+
Target values.
|
| 181 |
+
|
| 182 |
+
sample_weight : array-like of shape (n_samples,), default=None
|
| 183 |
+
Sample weights.
|
| 184 |
+
|
| 185 |
+
Returns
|
| 186 |
+
-------
|
| 187 |
+
self : object
|
| 188 |
+
Fitted model.
|
| 189 |
+
"""
|
| 190 |
+
X, y = validate_data(
|
| 191 |
+
self,
|
| 192 |
+
X,
|
| 193 |
+
y,
|
| 194 |
+
accept_sparse=["csc", "csr"],
|
| 195 |
+
dtype=[np.float64, np.float32],
|
| 196 |
+
y_numeric=True,
|
| 197 |
+
multi_output=False,
|
| 198 |
+
)
|
| 199 |
+
|
| 200 |
+
# required by losses
|
| 201 |
+
if self.solver == "lbfgs":
|
| 202 |
+
# lbfgs will force coef and therefore raw_prediction to be float64. The
|
| 203 |
+
# base_loss needs y, X @ coef and sample_weight all of same dtype
|
| 204 |
+
# (and contiguous).
|
| 205 |
+
loss_dtype = np.float64
|
| 206 |
+
else:
|
| 207 |
+
loss_dtype = min(max(y.dtype, X.dtype), np.float64)
|
| 208 |
+
y = check_array(y, dtype=loss_dtype, order="C", ensure_2d=False)
|
| 209 |
+
|
| 210 |
+
if sample_weight is not None:
|
| 211 |
+
# Note that _check_sample_weight calls check_array(order="C") required by
|
| 212 |
+
# losses.
|
| 213 |
+
sample_weight = _check_sample_weight(sample_weight, X, dtype=loss_dtype)
|
| 214 |
+
|
| 215 |
+
n_samples, n_features = X.shape
|
| 216 |
+
self._base_loss = self._get_loss()
|
| 217 |
+
|
| 218 |
+
linear_loss = LinearModelLoss(
|
| 219 |
+
base_loss=self._base_loss,
|
| 220 |
+
fit_intercept=self.fit_intercept,
|
| 221 |
+
)
|
| 222 |
+
|
| 223 |
+
if not linear_loss.base_loss.in_y_true_range(y):
|
| 224 |
+
raise ValueError(
|
| 225 |
+
"Some value(s) of y are out of the valid range of the loss"
|
| 226 |
+
f" {self._base_loss.__class__.__name__!r}."
|
| 227 |
+
)
|
| 228 |
+
|
| 229 |
+
# TODO: if alpha=0 check that X is not rank deficient
|
| 230 |
+
|
| 231 |
+
# NOTE: Rescaling of sample_weight:
|
| 232 |
+
# We want to minimize
|
| 233 |
+
# obj = 1/(2 * sum(sample_weight)) * sum(sample_weight * deviance)
|
| 234 |
+
# + 1/2 * alpha * L2,
|
| 235 |
+
# with
|
| 236 |
+
# deviance = 2 * loss.
|
| 237 |
+
# The objective is invariant to multiplying sample_weight by a constant. We
|
| 238 |
+
# could choose this constant such that sum(sample_weight) = 1 in order to end
|
| 239 |
+
# up with
|
| 240 |
+
# obj = sum(sample_weight * loss) + 1/2 * alpha * L2.
|
| 241 |
+
# But LinearModelLoss.loss() already computes
|
| 242 |
+
# average(loss, weights=sample_weight)
|
| 243 |
+
# Thus, without rescaling, we have
|
| 244 |
+
# obj = LinearModelLoss.loss(...)
|
| 245 |
+
|
| 246 |
+
if self.warm_start and hasattr(self, "coef_"):
|
| 247 |
+
if self.fit_intercept:
|
| 248 |
+
# LinearModelLoss needs intercept at the end of coefficient array.
|
| 249 |
+
coef = np.concatenate((self.coef_, np.array([self.intercept_])))
|
| 250 |
+
else:
|
| 251 |
+
coef = self.coef_
|
| 252 |
+
coef = coef.astype(loss_dtype, copy=False)
|
| 253 |
+
else:
|
| 254 |
+
coef = linear_loss.init_zero_coef(X, dtype=loss_dtype)
|
| 255 |
+
if self.fit_intercept:
|
| 256 |
+
coef[-1] = linear_loss.base_loss.link.link(
|
| 257 |
+
np.average(y, weights=sample_weight)
|
| 258 |
+
)
|
| 259 |
+
|
| 260 |
+
l2_reg_strength = self.alpha
|
| 261 |
+
n_threads = _openmp_effective_n_threads()
|
| 262 |
+
|
| 263 |
+
# Algorithms for optimization:
|
| 264 |
+
# Note again that our losses implement 1/2 * deviance.
|
| 265 |
+
if self.solver == "lbfgs":
|
| 266 |
+
func = linear_loss.loss_gradient
|
| 267 |
+
|
| 268 |
+
opt_res = scipy.optimize.minimize(
|
| 269 |
+
func,
|
| 270 |
+
coef,
|
| 271 |
+
method="L-BFGS-B",
|
| 272 |
+
jac=True,
|
| 273 |
+
options={
|
| 274 |
+
"maxiter": self.max_iter,
|
| 275 |
+
"maxls": 50, # default is 20
|
| 276 |
+
"iprint": self.verbose - 1,
|
| 277 |
+
"gtol": self.tol,
|
| 278 |
+
# The constant 64 was found empirically to pass the test suite.
|
| 279 |
+
# The point is that ftol is very small, but a bit larger than
|
| 280 |
+
# machine precision for float64, which is the dtype used by lbfgs.
|
| 281 |
+
"ftol": 64 * np.finfo(float).eps,
|
| 282 |
+
},
|
| 283 |
+
args=(X, y, sample_weight, l2_reg_strength, n_threads),
|
| 284 |
+
)
|
| 285 |
+
self.n_iter_ = _check_optimize_result("lbfgs", opt_res)
|
| 286 |
+
coef = opt_res.x
|
| 287 |
+
elif self.solver == "newton-cholesky":
|
| 288 |
+
sol = NewtonCholeskySolver(
|
| 289 |
+
coef=coef,
|
| 290 |
+
linear_loss=linear_loss,
|
| 291 |
+
l2_reg_strength=l2_reg_strength,
|
| 292 |
+
tol=self.tol,
|
| 293 |
+
max_iter=self.max_iter,
|
| 294 |
+
n_threads=n_threads,
|
| 295 |
+
verbose=self.verbose,
|
| 296 |
+
)
|
| 297 |
+
coef = sol.solve(X, y, sample_weight)
|
| 298 |
+
self.n_iter_ = sol.iteration
|
| 299 |
+
elif issubclass(self.solver, NewtonSolver):
|
| 300 |
+
sol = self.solver(
|
| 301 |
+
coef=coef,
|
| 302 |
+
linear_loss=linear_loss,
|
| 303 |
+
l2_reg_strength=l2_reg_strength,
|
| 304 |
+
tol=self.tol,
|
| 305 |
+
max_iter=self.max_iter,
|
| 306 |
+
n_threads=n_threads,
|
| 307 |
+
)
|
| 308 |
+
coef = sol.solve(X, y, sample_weight)
|
| 309 |
+
self.n_iter_ = sol.iteration
|
| 310 |
+
else:
|
| 311 |
+
raise ValueError(f"Invalid solver={self.solver}.")
|
| 312 |
+
|
| 313 |
+
if self.fit_intercept:
|
| 314 |
+
self.intercept_ = coef[-1]
|
| 315 |
+
self.coef_ = coef[:-1]
|
| 316 |
+
else:
|
| 317 |
+
# set intercept to zero as the other linear models do
|
| 318 |
+
self.intercept_ = 0.0
|
| 319 |
+
self.coef_ = coef
|
| 320 |
+
|
| 321 |
+
return self
|
| 322 |
+
|
| 323 |
+
def _linear_predictor(self, X):
|
| 324 |
+
"""Compute the linear_predictor = `X @ coef_ + intercept_`.
|
| 325 |
+
|
| 326 |
+
Note that we often use the term raw_prediction instead of linear predictor.
|
| 327 |
+
|
| 328 |
+
Parameters
|
| 329 |
+
----------
|
| 330 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 331 |
+
Samples.
|
| 332 |
+
|
| 333 |
+
Returns
|
| 334 |
+
-------
|
| 335 |
+
y_pred : array of shape (n_samples,)
|
| 336 |
+
Returns predicted values of linear predictor.
|
| 337 |
+
"""
|
| 338 |
+
check_is_fitted(self)
|
| 339 |
+
X = validate_data(
|
| 340 |
+
self,
|
| 341 |
+
X,
|
| 342 |
+
accept_sparse=["csr", "csc", "coo"],
|
| 343 |
+
dtype=[np.float64, np.float32],
|
| 344 |
+
ensure_2d=True,
|
| 345 |
+
allow_nd=False,
|
| 346 |
+
reset=False,
|
| 347 |
+
)
|
| 348 |
+
return X @ self.coef_ + self.intercept_
|
| 349 |
+
|
| 350 |
+
def predict(self, X):
|
| 351 |
+
"""Predict using GLM with feature matrix X.
|
| 352 |
+
|
| 353 |
+
Parameters
|
| 354 |
+
----------
|
| 355 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 356 |
+
Samples.
|
| 357 |
+
|
| 358 |
+
Returns
|
| 359 |
+
-------
|
| 360 |
+
y_pred : array of shape (n_samples,)
|
| 361 |
+
Returns predicted values.
|
| 362 |
+
"""
|
| 363 |
+
# check_array is done in _linear_predictor
|
| 364 |
+
raw_prediction = self._linear_predictor(X)
|
| 365 |
+
y_pred = self._base_loss.link.inverse(raw_prediction)
|
| 366 |
+
return y_pred
|
| 367 |
+
|
| 368 |
+
def score(self, X, y, sample_weight=None):
|
| 369 |
+
"""Compute D^2, the percentage of deviance explained.
|
| 370 |
+
|
| 371 |
+
D^2 is a generalization of the coefficient of determination R^2.
|
| 372 |
+
R^2 uses squared error and D^2 uses the deviance of this GLM, see the
|
| 373 |
+
:ref:`User Guide <regression_metrics>`.
|
| 374 |
+
|
| 375 |
+
D^2 is defined as
|
| 376 |
+
:math:`D^2 = 1-\\frac{D(y_{true},y_{pred})}{D_{null}}`,
|
| 377 |
+
:math:`D_{null}` is the null deviance, i.e. the deviance of a model
|
| 378 |
+
with intercept alone, which corresponds to :math:`y_{pred} = \\bar{y}`.
|
| 379 |
+
The mean :math:`\\bar{y}` is averaged by sample_weight.
|
| 380 |
+
Best possible score is 1.0 and it can be negative (because the model
|
| 381 |
+
can be arbitrarily worse).
|
| 382 |
+
|
| 383 |
+
Parameters
|
| 384 |
+
----------
|
| 385 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 386 |
+
Test samples.
|
| 387 |
+
|
| 388 |
+
y : array-like of shape (n_samples,)
|
| 389 |
+
True values of target.
|
| 390 |
+
|
| 391 |
+
sample_weight : array-like of shape (n_samples,), default=None
|
| 392 |
+
Sample weights.
|
| 393 |
+
|
| 394 |
+
Returns
|
| 395 |
+
-------
|
| 396 |
+
score : float
|
| 397 |
+
D^2 of self.predict(X) w.r.t. y.
|
| 398 |
+
"""
|
| 399 |
+
# TODO: Adapt link to User Guide in the docstring, once
|
| 400 |
+
# https://github.com/scikit-learn/scikit-learn/pull/22118 is merged.
|
| 401 |
+
#
|
| 402 |
+
# Note, default score defined in RegressorMixin is R^2 score.
|
| 403 |
+
# TODO: make D^2 a score function in module metrics (and thereby get
|
| 404 |
+
# input validation and so on)
|
| 405 |
+
raw_prediction = self._linear_predictor(X) # validates X
|
| 406 |
+
# required by losses
|
| 407 |
+
y = check_array(y, dtype=raw_prediction.dtype, order="C", ensure_2d=False)
|
| 408 |
+
|
| 409 |
+
if sample_weight is not None:
|
| 410 |
+
# Note that _check_sample_weight calls check_array(order="C") required by
|
| 411 |
+
# losses.
|
| 412 |
+
sample_weight = _check_sample_weight(sample_weight, X, dtype=y.dtype)
|
| 413 |
+
|
| 414 |
+
base_loss = self._base_loss
|
| 415 |
+
|
| 416 |
+
if not base_loss.in_y_true_range(y):
|
| 417 |
+
raise ValueError(
|
| 418 |
+
"Some value(s) of y are out of the valid range of the loss"
|
| 419 |
+
f" {base_loss.__name__}."
|
| 420 |
+
)
|
| 421 |
+
|
| 422 |
+
constant = np.average(
|
| 423 |
+
base_loss.constant_to_optimal_zero(y_true=y, sample_weight=None),
|
| 424 |
+
weights=sample_weight,
|
| 425 |
+
)
|
| 426 |
+
|
| 427 |
+
# Missing factor of 2 in deviance cancels out.
|
| 428 |
+
deviance = base_loss(
|
| 429 |
+
y_true=y,
|
| 430 |
+
raw_prediction=raw_prediction,
|
| 431 |
+
sample_weight=sample_weight,
|
| 432 |
+
n_threads=1,
|
| 433 |
+
)
|
| 434 |
+
y_mean = base_loss.link.link(np.average(y, weights=sample_weight))
|
| 435 |
+
deviance_null = base_loss(
|
| 436 |
+
y_true=y,
|
| 437 |
+
raw_prediction=np.tile(y_mean, y.shape[0]),
|
| 438 |
+
sample_weight=sample_weight,
|
| 439 |
+
n_threads=1,
|
| 440 |
+
)
|
| 441 |
+
return 1 - (deviance + constant) / (deviance_null + constant)
|
| 442 |
+
|
| 443 |
+
def __sklearn_tags__(self):
|
| 444 |
+
tags = super().__sklearn_tags__()
|
| 445 |
+
tags.input_tags.sparse = True
|
| 446 |
+
try:
|
| 447 |
+
# Create instance of BaseLoss if fit wasn't called yet. This is necessary as
|
| 448 |
+
# TweedieRegressor might set the used loss during fit different from
|
| 449 |
+
# self._base_loss.
|
| 450 |
+
base_loss = self._get_loss()
|
| 451 |
+
tags.target_tags.positive_only = not base_loss.in_y_true_range(-1.0)
|
| 452 |
+
except (ValueError, AttributeError, TypeError):
|
| 453 |
+
# This happens when the link or power parameter of TweedieRegressor is
|
| 454 |
+
# invalid. We fallback on the default tags in that case.
|
| 455 |
+
pass # pragma: no cover
|
| 456 |
+
return tags
|
| 457 |
+
|
| 458 |
+
def _get_loss(self):
|
| 459 |
+
"""This is only necessary because of the link and power arguments of the
|
| 460 |
+
TweedieRegressor.
|
| 461 |
+
|
| 462 |
+
Note that we do not need to pass sample_weight to the loss class as this is
|
| 463 |
+
only needed to set loss.constant_hessian on which GLMs do not rely.
|
| 464 |
+
"""
|
| 465 |
+
return HalfSquaredError()
|
| 466 |
+
|
| 467 |
+
|
| 468 |
+
class PoissonRegressor(_GeneralizedLinearRegressor):
|
| 469 |
+
"""Generalized Linear Model with a Poisson distribution.
|
| 470 |
+
|
| 471 |
+
This regressor uses the 'log' link function.
|
| 472 |
+
|
| 473 |
+
Read more in the :ref:`User Guide <Generalized_linear_models>`.
|
| 474 |
+
|
| 475 |
+
.. versionadded:: 0.23
|
| 476 |
+
|
| 477 |
+
Parameters
|
| 478 |
+
----------
|
| 479 |
+
alpha : float, default=1
|
| 480 |
+
Constant that multiplies the L2 penalty term and determines the
|
| 481 |
+
regularization strength. ``alpha = 0`` is equivalent to unpenalized
|
| 482 |
+
GLMs. In this case, the design matrix `X` must have full column rank
|
| 483 |
+
(no collinearities).
|
| 484 |
+
Values of `alpha` must be in the range `[0.0, inf)`.
|
| 485 |
+
|
| 486 |
+
fit_intercept : bool, default=True
|
| 487 |
+
Specifies if a constant (a.k.a. bias or intercept) should be
|
| 488 |
+
added to the linear predictor (`X @ coef + intercept`).
|
| 489 |
+
|
| 490 |
+
solver : {'lbfgs', 'newton-cholesky'}, default='lbfgs'
|
| 491 |
+
Algorithm to use in the optimization problem:
|
| 492 |
+
|
| 493 |
+
'lbfgs'
|
| 494 |
+
Calls scipy's L-BFGS-B optimizer.
|
| 495 |
+
|
| 496 |
+
'newton-cholesky'
|
| 497 |
+
Uses Newton-Raphson steps (in arbitrary precision arithmetic equivalent to
|
| 498 |
+
iterated reweighted least squares) with an inner Cholesky based solver.
|
| 499 |
+
This solver is a good choice for `n_samples` >> `n_features`, especially
|
| 500 |
+
with one-hot encoded categorical features with rare categories. Be aware
|
| 501 |
+
that the memory usage of this solver has a quadratic dependency on
|
| 502 |
+
`n_features` because it explicitly computes the Hessian matrix.
|
| 503 |
+
|
| 504 |
+
.. versionadded:: 1.2
|
| 505 |
+
|
| 506 |
+
max_iter : int, default=100
|
| 507 |
+
The maximal number of iterations for the solver.
|
| 508 |
+
Values must be in the range `[1, inf)`.
|
| 509 |
+
|
| 510 |
+
tol : float, default=1e-4
|
| 511 |
+
Stopping criterion. For the lbfgs solver,
|
| 512 |
+
the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol``
|
| 513 |
+
where ``g_j`` is the j-th component of the gradient (derivative) of
|
| 514 |
+
the objective function.
|
| 515 |
+
Values must be in the range `(0.0, inf)`.
|
| 516 |
+
|
| 517 |
+
warm_start : bool, default=False
|
| 518 |
+
If set to ``True``, reuse the solution of the previous call to ``fit``
|
| 519 |
+
as initialization for ``coef_`` and ``intercept_`` .
|
| 520 |
+
|
| 521 |
+
verbose : int, default=0
|
| 522 |
+
For the lbfgs solver set verbose to any positive number for verbosity.
|
| 523 |
+
Values must be in the range `[0, inf)`.
|
| 524 |
+
|
| 525 |
+
Attributes
|
| 526 |
+
----------
|
| 527 |
+
coef_ : array of shape (n_features,)
|
| 528 |
+
Estimated coefficients for the linear predictor (`X @ coef_ +
|
| 529 |
+
intercept_`) in the GLM.
|
| 530 |
+
|
| 531 |
+
intercept_ : float
|
| 532 |
+
Intercept (a.k.a. bias) added to linear predictor.
|
| 533 |
+
|
| 534 |
+
n_features_in_ : int
|
| 535 |
+
Number of features seen during :term:`fit`.
|
| 536 |
+
|
| 537 |
+
.. versionadded:: 0.24
|
| 538 |
+
|
| 539 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
| 540 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
| 541 |
+
has feature names that are all strings.
|
| 542 |
+
|
| 543 |
+
.. versionadded:: 1.0
|
| 544 |
+
|
| 545 |
+
n_iter_ : int
|
| 546 |
+
Actual number of iterations used in the solver.
|
| 547 |
+
|
| 548 |
+
See Also
|
| 549 |
+
--------
|
| 550 |
+
TweedieRegressor : Generalized Linear Model with a Tweedie distribution.
|
| 551 |
+
|
| 552 |
+
Examples
|
| 553 |
+
--------
|
| 554 |
+
>>> from sklearn import linear_model
|
| 555 |
+
>>> clf = linear_model.PoissonRegressor()
|
| 556 |
+
>>> X = [[1, 2], [2, 3], [3, 4], [4, 3]]
|
| 557 |
+
>>> y = [12, 17, 22, 21]
|
| 558 |
+
>>> clf.fit(X, y)
|
| 559 |
+
PoissonRegressor()
|
| 560 |
+
>>> clf.score(X, y)
|
| 561 |
+
np.float64(0.990...)
|
| 562 |
+
>>> clf.coef_
|
| 563 |
+
array([0.121..., 0.158...])
|
| 564 |
+
>>> clf.intercept_
|
| 565 |
+
np.float64(2.088...)
|
| 566 |
+
>>> clf.predict([[1, 1], [3, 4]])
|
| 567 |
+
array([10.676..., 21.875...])
|
| 568 |
+
"""
|
| 569 |
+
|
| 570 |
+
_parameter_constraints: dict = {
|
| 571 |
+
**_GeneralizedLinearRegressor._parameter_constraints
|
| 572 |
+
}
|
| 573 |
+
|
| 574 |
+
def __init__(
|
| 575 |
+
self,
|
| 576 |
+
*,
|
| 577 |
+
alpha=1.0,
|
| 578 |
+
fit_intercept=True,
|
| 579 |
+
solver="lbfgs",
|
| 580 |
+
max_iter=100,
|
| 581 |
+
tol=1e-4,
|
| 582 |
+
warm_start=False,
|
| 583 |
+
verbose=0,
|
| 584 |
+
):
|
| 585 |
+
super().__init__(
|
| 586 |
+
alpha=alpha,
|
| 587 |
+
fit_intercept=fit_intercept,
|
| 588 |
+
solver=solver,
|
| 589 |
+
max_iter=max_iter,
|
| 590 |
+
tol=tol,
|
| 591 |
+
warm_start=warm_start,
|
| 592 |
+
verbose=verbose,
|
| 593 |
+
)
|
| 594 |
+
|
| 595 |
+
def _get_loss(self):
|
| 596 |
+
return HalfPoissonLoss()
|
| 597 |
+
|
| 598 |
+
|
| 599 |
+
class GammaRegressor(_GeneralizedLinearRegressor):
|
| 600 |
+
"""Generalized Linear Model with a Gamma distribution.
|
| 601 |
+
|
| 602 |
+
This regressor uses the 'log' link function.
|
| 603 |
+
|
| 604 |
+
Read more in the :ref:`User Guide <Generalized_linear_models>`.
|
| 605 |
+
|
| 606 |
+
.. versionadded:: 0.23
|
| 607 |
+
|
| 608 |
+
Parameters
|
| 609 |
+
----------
|
| 610 |
+
alpha : float, default=1
|
| 611 |
+
Constant that multiplies the L2 penalty term and determines the
|
| 612 |
+
regularization strength. ``alpha = 0`` is equivalent to unpenalized
|
| 613 |
+
GLMs. In this case, the design matrix `X` must have full column rank
|
| 614 |
+
(no collinearities).
|
| 615 |
+
Values of `alpha` must be in the range `[0.0, inf)`.
|
| 616 |
+
|
| 617 |
+
fit_intercept : bool, default=True
|
| 618 |
+
Specifies if a constant (a.k.a. bias or intercept) should be
|
| 619 |
+
added to the linear predictor `X @ coef_ + intercept_`.
|
| 620 |
+
|
| 621 |
+
solver : {'lbfgs', 'newton-cholesky'}, default='lbfgs'
|
| 622 |
+
Algorithm to use in the optimization problem:
|
| 623 |
+
|
| 624 |
+
'lbfgs'
|
| 625 |
+
Calls scipy's L-BFGS-B optimizer.
|
| 626 |
+
|
| 627 |
+
'newton-cholesky'
|
| 628 |
+
Uses Newton-Raphson steps (in arbitrary precision arithmetic equivalent to
|
| 629 |
+
iterated reweighted least squares) with an inner Cholesky based solver.
|
| 630 |
+
This solver is a good choice for `n_samples` >> `n_features`, especially
|
| 631 |
+
with one-hot encoded categorical features with rare categories. Be aware
|
| 632 |
+
that the memory usage of this solver has a quadratic dependency on
|
| 633 |
+
`n_features` because it explicitly computes the Hessian matrix.
|
| 634 |
+
|
| 635 |
+
.. versionadded:: 1.2
|
| 636 |
+
|
| 637 |
+
max_iter : int, default=100
|
| 638 |
+
The maximal number of iterations for the solver.
|
| 639 |
+
Values must be in the range `[1, inf)`.
|
| 640 |
+
|
| 641 |
+
tol : float, default=1e-4
|
| 642 |
+
Stopping criterion. For the lbfgs solver,
|
| 643 |
+
the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol``
|
| 644 |
+
where ``g_j`` is the j-th component of the gradient (derivative) of
|
| 645 |
+
the objective function.
|
| 646 |
+
Values must be in the range `(0.0, inf)`.
|
| 647 |
+
|
| 648 |
+
warm_start : bool, default=False
|
| 649 |
+
If set to ``True``, reuse the solution of the previous call to ``fit``
|
| 650 |
+
as initialization for `coef_` and `intercept_`.
|
| 651 |
+
|
| 652 |
+
verbose : int, default=0
|
| 653 |
+
For the lbfgs solver set verbose to any positive number for verbosity.
|
| 654 |
+
Values must be in the range `[0, inf)`.
|
| 655 |
+
|
| 656 |
+
Attributes
|
| 657 |
+
----------
|
| 658 |
+
coef_ : array of shape (n_features,)
|
| 659 |
+
Estimated coefficients for the linear predictor (`X @ coef_ +
|
| 660 |
+
intercept_`) in the GLM.
|
| 661 |
+
|
| 662 |
+
intercept_ : float
|
| 663 |
+
Intercept (a.k.a. bias) added to linear predictor.
|
| 664 |
+
|
| 665 |
+
n_features_in_ : int
|
| 666 |
+
Number of features seen during :term:`fit`.
|
| 667 |
+
|
| 668 |
+
.. versionadded:: 0.24
|
| 669 |
+
|
| 670 |
+
n_iter_ : int
|
| 671 |
+
Actual number of iterations used in the solver.
|
| 672 |
+
|
| 673 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
| 674 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
| 675 |
+
has feature names that are all strings.
|
| 676 |
+
|
| 677 |
+
.. versionadded:: 1.0
|
| 678 |
+
|
| 679 |
+
See Also
|
| 680 |
+
--------
|
| 681 |
+
PoissonRegressor : Generalized Linear Model with a Poisson distribution.
|
| 682 |
+
TweedieRegressor : Generalized Linear Model with a Tweedie distribution.
|
| 683 |
+
|
| 684 |
+
Examples
|
| 685 |
+
--------
|
| 686 |
+
>>> from sklearn import linear_model
|
| 687 |
+
>>> clf = linear_model.GammaRegressor()
|
| 688 |
+
>>> X = [[1, 2], [2, 3], [3, 4], [4, 3]]
|
| 689 |
+
>>> y = [19, 26, 33, 30]
|
| 690 |
+
>>> clf.fit(X, y)
|
| 691 |
+
GammaRegressor()
|
| 692 |
+
>>> clf.score(X, y)
|
| 693 |
+
np.float64(0.773...)
|
| 694 |
+
>>> clf.coef_
|
| 695 |
+
array([0.072..., 0.066...])
|
| 696 |
+
>>> clf.intercept_
|
| 697 |
+
np.float64(2.896...)
|
| 698 |
+
>>> clf.predict([[1, 0], [2, 8]])
|
| 699 |
+
array([19.483..., 35.795...])
|
| 700 |
+
"""
|
| 701 |
+
|
| 702 |
+
_parameter_constraints: dict = {
|
| 703 |
+
**_GeneralizedLinearRegressor._parameter_constraints
|
| 704 |
+
}
|
| 705 |
+
|
| 706 |
+
def __init__(
|
| 707 |
+
self,
|
| 708 |
+
*,
|
| 709 |
+
alpha=1.0,
|
| 710 |
+
fit_intercept=True,
|
| 711 |
+
solver="lbfgs",
|
| 712 |
+
max_iter=100,
|
| 713 |
+
tol=1e-4,
|
| 714 |
+
warm_start=False,
|
| 715 |
+
verbose=0,
|
| 716 |
+
):
|
| 717 |
+
super().__init__(
|
| 718 |
+
alpha=alpha,
|
| 719 |
+
fit_intercept=fit_intercept,
|
| 720 |
+
solver=solver,
|
| 721 |
+
max_iter=max_iter,
|
| 722 |
+
tol=tol,
|
| 723 |
+
warm_start=warm_start,
|
| 724 |
+
verbose=verbose,
|
| 725 |
+
)
|
| 726 |
+
|
| 727 |
+
def _get_loss(self):
|
| 728 |
+
return HalfGammaLoss()
|
| 729 |
+
|
| 730 |
+
|
| 731 |
+
class TweedieRegressor(_GeneralizedLinearRegressor):
|
| 732 |
+
"""Generalized Linear Model with a Tweedie distribution.
|
| 733 |
+
|
| 734 |
+
This estimator can be used to model different GLMs depending on the
|
| 735 |
+
``power`` parameter, which determines the underlying distribution.
|
| 736 |
+
|
| 737 |
+
Read more in the :ref:`User Guide <Generalized_linear_models>`.
|
| 738 |
+
|
| 739 |
+
.. versionadded:: 0.23
|
| 740 |
+
|
| 741 |
+
Parameters
|
| 742 |
+
----------
|
| 743 |
+
power : float, default=0
|
| 744 |
+
The power determines the underlying target distribution according
|
| 745 |
+
to the following table:
|
| 746 |
+
|
| 747 |
+
+-------+------------------------+
|
| 748 |
+
| Power | Distribution |
|
| 749 |
+
+=======+========================+
|
| 750 |
+
| 0 | Normal |
|
| 751 |
+
+-------+------------------------+
|
| 752 |
+
| 1 | Poisson |
|
| 753 |
+
+-------+------------------------+
|
| 754 |
+
| (1,2) | Compound Poisson Gamma |
|
| 755 |
+
+-------+------------------------+
|
| 756 |
+
| 2 | Gamma |
|
| 757 |
+
+-------+------------------------+
|
| 758 |
+
| 3 | Inverse Gaussian |
|
| 759 |
+
+-------+------------------------+
|
| 760 |
+
|
| 761 |
+
For ``0 < power < 1``, no distribution exists.
|
| 762 |
+
|
| 763 |
+
alpha : float, default=1
|
| 764 |
+
Constant that multiplies the L2 penalty term and determines the
|
| 765 |
+
regularization strength. ``alpha = 0`` is equivalent to unpenalized
|
| 766 |
+
GLMs. In this case, the design matrix `X` must have full column rank
|
| 767 |
+
(no collinearities).
|
| 768 |
+
Values of `alpha` must be in the range `[0.0, inf)`.
|
| 769 |
+
|
| 770 |
+
fit_intercept : bool, default=True
|
| 771 |
+
Specifies if a constant (a.k.a. bias or intercept) should be
|
| 772 |
+
added to the linear predictor (`X @ coef + intercept`).
|
| 773 |
+
|
| 774 |
+
link : {'auto', 'identity', 'log'}, default='auto'
|
| 775 |
+
The link function of the GLM, i.e. mapping from linear predictor
|
| 776 |
+
`X @ coeff + intercept` to prediction `y_pred`. Option 'auto' sets
|
| 777 |
+
the link depending on the chosen `power` parameter as follows:
|
| 778 |
+
|
| 779 |
+
- 'identity' for ``power <= 0``, e.g. for the Normal distribution
|
| 780 |
+
- 'log' for ``power > 0``, e.g. for Poisson, Gamma and Inverse Gaussian
|
| 781 |
+
distributions
|
| 782 |
+
|
| 783 |
+
solver : {'lbfgs', 'newton-cholesky'}, default='lbfgs'
|
| 784 |
+
Algorithm to use in the optimization problem:
|
| 785 |
+
|
| 786 |
+
'lbfgs'
|
| 787 |
+
Calls scipy's L-BFGS-B optimizer.
|
| 788 |
+
|
| 789 |
+
'newton-cholesky'
|
| 790 |
+
Uses Newton-Raphson steps (in arbitrary precision arithmetic equivalent to
|
| 791 |
+
iterated reweighted least squares) with an inner Cholesky based solver.
|
| 792 |
+
This solver is a good choice for `n_samples` >> `n_features`, especially
|
| 793 |
+
with one-hot encoded categorical features with rare categories. Be aware
|
| 794 |
+
that the memory usage of this solver has a quadratic dependency on
|
| 795 |
+
`n_features` because it explicitly computes the Hessian matrix.
|
| 796 |
+
|
| 797 |
+
.. versionadded:: 1.2
|
| 798 |
+
|
| 799 |
+
max_iter : int, default=100
|
| 800 |
+
The maximal number of iterations for the solver.
|
| 801 |
+
Values must be in the range `[1, inf)`.
|
| 802 |
+
|
| 803 |
+
tol : float, default=1e-4
|
| 804 |
+
Stopping criterion. For the lbfgs solver,
|
| 805 |
+
the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol``
|
| 806 |
+
where ``g_j`` is the j-th component of the gradient (derivative) of
|
| 807 |
+
the objective function.
|
| 808 |
+
Values must be in the range `(0.0, inf)`.
|
| 809 |
+
|
| 810 |
+
warm_start : bool, default=False
|
| 811 |
+
If set to ``True``, reuse the solution of the previous call to ``fit``
|
| 812 |
+
as initialization for ``coef_`` and ``intercept_`` .
|
| 813 |
+
|
| 814 |
+
verbose : int, default=0
|
| 815 |
+
For the lbfgs solver set verbose to any positive number for verbosity.
|
| 816 |
+
Values must be in the range `[0, inf)`.
|
| 817 |
+
|
| 818 |
+
Attributes
|
| 819 |
+
----------
|
| 820 |
+
coef_ : array of shape (n_features,)
|
| 821 |
+
Estimated coefficients for the linear predictor (`X @ coef_ +
|
| 822 |
+
intercept_`) in the GLM.
|
| 823 |
+
|
| 824 |
+
intercept_ : float
|
| 825 |
+
Intercept (a.k.a. bias) added to linear predictor.
|
| 826 |
+
|
| 827 |
+
n_iter_ : int
|
| 828 |
+
Actual number of iterations used in the solver.
|
| 829 |
+
|
| 830 |
+
n_features_in_ : int
|
| 831 |
+
Number of features seen during :term:`fit`.
|
| 832 |
+
|
| 833 |
+
.. versionadded:: 0.24
|
| 834 |
+
|
| 835 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
| 836 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
| 837 |
+
has feature names that are all strings.
|
| 838 |
+
|
| 839 |
+
.. versionadded:: 1.0
|
| 840 |
+
|
| 841 |
+
See Also
|
| 842 |
+
--------
|
| 843 |
+
PoissonRegressor : Generalized Linear Model with a Poisson distribution.
|
| 844 |
+
GammaRegressor : Generalized Linear Model with a Gamma distribution.
|
| 845 |
+
|
| 846 |
+
Examples
|
| 847 |
+
--------
|
| 848 |
+
>>> from sklearn import linear_model
|
| 849 |
+
>>> clf = linear_model.TweedieRegressor()
|
| 850 |
+
>>> X = [[1, 2], [2, 3], [3, 4], [4, 3]]
|
| 851 |
+
>>> y = [2, 3.5, 5, 5.5]
|
| 852 |
+
>>> clf.fit(X, y)
|
| 853 |
+
TweedieRegressor()
|
| 854 |
+
>>> clf.score(X, y)
|
| 855 |
+
np.float64(0.839...)
|
| 856 |
+
>>> clf.coef_
|
| 857 |
+
array([0.599..., 0.299...])
|
| 858 |
+
>>> clf.intercept_
|
| 859 |
+
np.float64(1.600...)
|
| 860 |
+
>>> clf.predict([[1, 1], [3, 4]])
|
| 861 |
+
array([2.500..., 4.599...])
|
| 862 |
+
"""
|
| 863 |
+
|
| 864 |
+
_parameter_constraints: dict = {
|
| 865 |
+
**_GeneralizedLinearRegressor._parameter_constraints,
|
| 866 |
+
"power": [Interval(Real, None, None, closed="neither")],
|
| 867 |
+
"link": [StrOptions({"auto", "identity", "log"})],
|
| 868 |
+
}
|
| 869 |
+
|
| 870 |
+
def __init__(
|
| 871 |
+
self,
|
| 872 |
+
*,
|
| 873 |
+
power=0.0,
|
| 874 |
+
alpha=1.0,
|
| 875 |
+
fit_intercept=True,
|
| 876 |
+
link="auto",
|
| 877 |
+
solver="lbfgs",
|
| 878 |
+
max_iter=100,
|
| 879 |
+
tol=1e-4,
|
| 880 |
+
warm_start=False,
|
| 881 |
+
verbose=0,
|
| 882 |
+
):
|
| 883 |
+
super().__init__(
|
| 884 |
+
alpha=alpha,
|
| 885 |
+
fit_intercept=fit_intercept,
|
| 886 |
+
solver=solver,
|
| 887 |
+
max_iter=max_iter,
|
| 888 |
+
tol=tol,
|
| 889 |
+
warm_start=warm_start,
|
| 890 |
+
verbose=verbose,
|
| 891 |
+
)
|
| 892 |
+
self.link = link
|
| 893 |
+
self.power = power
|
| 894 |
+
|
| 895 |
+
def _get_loss(self):
|
| 896 |
+
if self.link == "auto":
|
| 897 |
+
if self.power <= 0:
|
| 898 |
+
# identity link
|
| 899 |
+
return HalfTweedieLossIdentity(power=self.power)
|
| 900 |
+
else:
|
| 901 |
+
# log link
|
| 902 |
+
return HalfTweedieLoss(power=self.power)
|
| 903 |
+
|
| 904 |
+
if self.link == "log":
|
| 905 |
+
return HalfTweedieLoss(power=self.power)
|
| 906 |
+
|
| 907 |
+
if self.link == "identity":
|
| 908 |
+
return HalfTweedieLossIdentity(power=self.power)
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__init__.py
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Authors: The scikit-learn developers
|
| 2 |
+
# SPDX-License-Identifier: BSD-3-Clause
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (190 Bytes). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/__pycache__/test_glm.cpython-310.pyc
ADDED
|
Binary file (23.8 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_glm/tests/test_glm.py
ADDED
|
@@ -0,0 +1,1110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Authors: The scikit-learn developers
|
| 2 |
+
# SPDX-License-Identifier: BSD-3-Clause
|
| 3 |
+
|
| 4 |
+
import itertools
|
| 5 |
+
import warnings
|
| 6 |
+
from functools import partial
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
import pytest
|
| 10 |
+
import scipy
|
| 11 |
+
from numpy.testing import assert_allclose
|
| 12 |
+
from scipy import linalg
|
| 13 |
+
from scipy.optimize import minimize, root
|
| 14 |
+
|
| 15 |
+
from sklearn._loss import HalfBinomialLoss, HalfPoissonLoss, HalfTweedieLoss
|
| 16 |
+
from sklearn._loss.link import IdentityLink, LogLink
|
| 17 |
+
from sklearn.base import clone
|
| 18 |
+
from sklearn.datasets import make_low_rank_matrix, make_regression
|
| 19 |
+
from sklearn.exceptions import ConvergenceWarning
|
| 20 |
+
from sklearn.linear_model import (
|
| 21 |
+
GammaRegressor,
|
| 22 |
+
PoissonRegressor,
|
| 23 |
+
Ridge,
|
| 24 |
+
TweedieRegressor,
|
| 25 |
+
)
|
| 26 |
+
from sklearn.linear_model._glm import _GeneralizedLinearRegressor
|
| 27 |
+
from sklearn.linear_model._glm._newton_solver import NewtonCholeskySolver
|
| 28 |
+
from sklearn.linear_model._linear_loss import LinearModelLoss
|
| 29 |
+
from sklearn.metrics import d2_tweedie_score, mean_poisson_deviance
|
| 30 |
+
from sklearn.model_selection import train_test_split
|
| 31 |
+
|
| 32 |
+
SOLVERS = ["lbfgs", "newton-cholesky"]
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class BinomialRegressor(_GeneralizedLinearRegressor):
|
| 36 |
+
def _get_loss(self):
|
| 37 |
+
return HalfBinomialLoss()
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def _special_minimize(fun, grad, x, tol_NM, tol):
|
| 41 |
+
# Find good starting point by Nelder-Mead
|
| 42 |
+
res_NM = minimize(
|
| 43 |
+
fun, x, method="Nelder-Mead", options={"xatol": tol_NM, "fatol": tol_NM}
|
| 44 |
+
)
|
| 45 |
+
# Now refine via root finding on the gradient of the function, which is
|
| 46 |
+
# more precise than minimizing the function itself.
|
| 47 |
+
res = root(
|
| 48 |
+
grad,
|
| 49 |
+
res_NM.x,
|
| 50 |
+
method="lm",
|
| 51 |
+
options={"ftol": tol, "xtol": tol, "gtol": tol},
|
| 52 |
+
)
|
| 53 |
+
return res.x
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
@pytest.fixture(scope="module")
|
| 57 |
+
def regression_data():
|
| 58 |
+
X, y = make_regression(
|
| 59 |
+
n_samples=107, n_features=10, n_informative=80, noise=0.5, random_state=2
|
| 60 |
+
)
|
| 61 |
+
return X, y
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
@pytest.fixture(
|
| 65 |
+
params=itertools.product(
|
| 66 |
+
["long", "wide"],
|
| 67 |
+
[
|
| 68 |
+
BinomialRegressor(),
|
| 69 |
+
PoissonRegressor(),
|
| 70 |
+
GammaRegressor(),
|
| 71 |
+
# TweedieRegressor(power=3.0), # too difficult
|
| 72 |
+
# TweedieRegressor(power=0, link="log"), # too difficult
|
| 73 |
+
TweedieRegressor(power=1.5),
|
| 74 |
+
],
|
| 75 |
+
),
|
| 76 |
+
ids=lambda param: f"{param[0]}-{param[1]}",
|
| 77 |
+
)
|
| 78 |
+
def glm_dataset(global_random_seed, request):
|
| 79 |
+
"""Dataset with GLM solutions, well conditioned X.
|
| 80 |
+
|
| 81 |
+
This is inspired by ols_ridge_dataset in test_ridge.py.
|
| 82 |
+
|
| 83 |
+
The construction is based on the SVD decomposition of X = U S V'.
|
| 84 |
+
|
| 85 |
+
Parameters
|
| 86 |
+
----------
|
| 87 |
+
type : {"long", "wide"}
|
| 88 |
+
If "long", then n_samples > n_features.
|
| 89 |
+
If "wide", then n_features > n_samples.
|
| 90 |
+
model : a GLM model
|
| 91 |
+
|
| 92 |
+
For "wide", we return the minimum norm solution:
|
| 93 |
+
|
| 94 |
+
min ||w||_2 subject to w = argmin deviance(X, y, w)
|
| 95 |
+
|
| 96 |
+
Note that the deviance is always minimized if y = inverse_link(X w) is possible to
|
| 97 |
+
achieve, which it is in the wide data case. Therefore, we can construct the
|
| 98 |
+
solution with minimum norm like (wide) OLS:
|
| 99 |
+
|
| 100 |
+
min ||w||_2 subject to link(y) = raw_prediction = X w
|
| 101 |
+
|
| 102 |
+
Returns
|
| 103 |
+
-------
|
| 104 |
+
model : GLM model
|
| 105 |
+
X : ndarray
|
| 106 |
+
Last column of 1, i.e. intercept.
|
| 107 |
+
y : ndarray
|
| 108 |
+
coef_unpenalized : ndarray
|
| 109 |
+
Minimum norm solutions, i.e. min sum(loss(w)) (with minimum ||w||_2 in
|
| 110 |
+
case of ambiguity)
|
| 111 |
+
Last coefficient is intercept.
|
| 112 |
+
coef_penalized : ndarray
|
| 113 |
+
GLM solution with alpha=l2_reg_strength=1, i.e.
|
| 114 |
+
min 1/n * sum(loss) + ||w[:-1]||_2^2.
|
| 115 |
+
Last coefficient is intercept.
|
| 116 |
+
l2_reg_strength : float
|
| 117 |
+
Always equal 1.
|
| 118 |
+
"""
|
| 119 |
+
data_type, model = request.param
|
| 120 |
+
# Make larger dim more than double as big as the smaller one.
|
| 121 |
+
# This helps when constructing singular matrices like (X, X).
|
| 122 |
+
if data_type == "long":
|
| 123 |
+
n_samples, n_features = 12, 4
|
| 124 |
+
else:
|
| 125 |
+
n_samples, n_features = 4, 12
|
| 126 |
+
k = min(n_samples, n_features)
|
| 127 |
+
rng = np.random.RandomState(global_random_seed)
|
| 128 |
+
X = make_low_rank_matrix(
|
| 129 |
+
n_samples=n_samples,
|
| 130 |
+
n_features=n_features,
|
| 131 |
+
effective_rank=k,
|
| 132 |
+
tail_strength=0.1,
|
| 133 |
+
random_state=rng,
|
| 134 |
+
)
|
| 135 |
+
X[:, -1] = 1 # last columns acts as intercept
|
| 136 |
+
U, s, Vt = linalg.svd(X, full_matrices=False)
|
| 137 |
+
assert np.all(s > 1e-3) # to be sure
|
| 138 |
+
assert np.max(s) / np.min(s) < 100 # condition number of X
|
| 139 |
+
|
| 140 |
+
if data_type == "long":
|
| 141 |
+
coef_unpenalized = rng.uniform(low=1, high=3, size=n_features)
|
| 142 |
+
coef_unpenalized *= rng.choice([-1, 1], size=n_features)
|
| 143 |
+
raw_prediction = X @ coef_unpenalized
|
| 144 |
+
else:
|
| 145 |
+
raw_prediction = rng.uniform(low=-3, high=3, size=n_samples)
|
| 146 |
+
# minimum norm solution min ||w||_2 such that raw_prediction = X w:
|
| 147 |
+
# w = X'(XX')^-1 raw_prediction = V s^-1 U' raw_prediction
|
| 148 |
+
coef_unpenalized = Vt.T @ np.diag(1 / s) @ U.T @ raw_prediction
|
| 149 |
+
|
| 150 |
+
linear_loss = LinearModelLoss(base_loss=model._get_loss(), fit_intercept=True)
|
| 151 |
+
sw = np.full(shape=n_samples, fill_value=1 / n_samples)
|
| 152 |
+
y = linear_loss.base_loss.link.inverse(raw_prediction)
|
| 153 |
+
|
| 154 |
+
# Add penalty l2_reg_strength * ||coef||_2^2 for l2_reg_strength=1 and solve with
|
| 155 |
+
# optimizer. Note that the problem is well conditioned such that we get accurate
|
| 156 |
+
# results.
|
| 157 |
+
l2_reg_strength = 1
|
| 158 |
+
fun = partial(
|
| 159 |
+
linear_loss.loss,
|
| 160 |
+
X=X[:, :-1],
|
| 161 |
+
y=y,
|
| 162 |
+
sample_weight=sw,
|
| 163 |
+
l2_reg_strength=l2_reg_strength,
|
| 164 |
+
)
|
| 165 |
+
grad = partial(
|
| 166 |
+
linear_loss.gradient,
|
| 167 |
+
X=X[:, :-1],
|
| 168 |
+
y=y,
|
| 169 |
+
sample_weight=sw,
|
| 170 |
+
l2_reg_strength=l2_reg_strength,
|
| 171 |
+
)
|
| 172 |
+
coef_penalized_with_intercept = _special_minimize(
|
| 173 |
+
fun, grad, coef_unpenalized, tol_NM=1e-6, tol=1e-14
|
| 174 |
+
)
|
| 175 |
+
|
| 176 |
+
linear_loss = LinearModelLoss(base_loss=model._get_loss(), fit_intercept=False)
|
| 177 |
+
fun = partial(
|
| 178 |
+
linear_loss.loss,
|
| 179 |
+
X=X[:, :-1],
|
| 180 |
+
y=y,
|
| 181 |
+
sample_weight=sw,
|
| 182 |
+
l2_reg_strength=l2_reg_strength,
|
| 183 |
+
)
|
| 184 |
+
grad = partial(
|
| 185 |
+
linear_loss.gradient,
|
| 186 |
+
X=X[:, :-1],
|
| 187 |
+
y=y,
|
| 188 |
+
sample_weight=sw,
|
| 189 |
+
l2_reg_strength=l2_reg_strength,
|
| 190 |
+
)
|
| 191 |
+
coef_penalized_without_intercept = _special_minimize(
|
| 192 |
+
fun, grad, coef_unpenalized[:-1], tol_NM=1e-6, tol=1e-14
|
| 193 |
+
)
|
| 194 |
+
|
| 195 |
+
# To be sure
|
| 196 |
+
assert np.linalg.norm(coef_penalized_with_intercept) < np.linalg.norm(
|
| 197 |
+
coef_unpenalized
|
| 198 |
+
)
|
| 199 |
+
|
| 200 |
+
return (
|
| 201 |
+
model,
|
| 202 |
+
X,
|
| 203 |
+
y,
|
| 204 |
+
coef_unpenalized,
|
| 205 |
+
coef_penalized_with_intercept,
|
| 206 |
+
coef_penalized_without_intercept,
|
| 207 |
+
l2_reg_strength,
|
| 208 |
+
)
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
@pytest.mark.parametrize("solver", SOLVERS)
|
| 212 |
+
@pytest.mark.parametrize("fit_intercept", [False, True])
|
| 213 |
+
def test_glm_regression(solver, fit_intercept, glm_dataset):
|
| 214 |
+
"""Test that GLM converges for all solvers to correct solution.
|
| 215 |
+
|
| 216 |
+
We work with a simple constructed data set with known solution.
|
| 217 |
+
"""
|
| 218 |
+
model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset
|
| 219 |
+
params = dict(
|
| 220 |
+
alpha=alpha,
|
| 221 |
+
fit_intercept=fit_intercept,
|
| 222 |
+
solver=solver,
|
| 223 |
+
tol=1e-12,
|
| 224 |
+
max_iter=1000,
|
| 225 |
+
)
|
| 226 |
+
|
| 227 |
+
model = clone(model).set_params(**params)
|
| 228 |
+
X = X[:, :-1] # remove intercept
|
| 229 |
+
if fit_intercept:
|
| 230 |
+
coef = coef_with_intercept
|
| 231 |
+
intercept = coef[-1]
|
| 232 |
+
coef = coef[:-1]
|
| 233 |
+
else:
|
| 234 |
+
coef = coef_without_intercept
|
| 235 |
+
intercept = 0
|
| 236 |
+
|
| 237 |
+
model.fit(X, y)
|
| 238 |
+
|
| 239 |
+
rtol = 5e-5 if solver == "lbfgs" else 1e-9
|
| 240 |
+
assert model.intercept_ == pytest.approx(intercept, rel=rtol)
|
| 241 |
+
assert_allclose(model.coef_, coef, rtol=rtol)
|
| 242 |
+
|
| 243 |
+
# Same with sample_weight.
|
| 244 |
+
model = (
|
| 245 |
+
clone(model).set_params(**params).fit(X, y, sample_weight=np.ones(X.shape[0]))
|
| 246 |
+
)
|
| 247 |
+
assert model.intercept_ == pytest.approx(intercept, rel=rtol)
|
| 248 |
+
assert_allclose(model.coef_, coef, rtol=rtol)
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
@pytest.mark.parametrize("solver", SOLVERS)
|
| 252 |
+
@pytest.mark.parametrize("fit_intercept", [True, False])
|
| 253 |
+
def test_glm_regression_hstacked_X(solver, fit_intercept, glm_dataset):
|
| 254 |
+
"""Test that GLM converges for all solvers to correct solution on hstacked data.
|
| 255 |
+
|
| 256 |
+
We work with a simple constructed data set with known solution.
|
| 257 |
+
Fit on [X] with alpha is the same as fit on [X, X]/2 with alpha/2.
|
| 258 |
+
For long X, [X, X] is still a long but singular matrix.
|
| 259 |
+
"""
|
| 260 |
+
model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset
|
| 261 |
+
n_samples, n_features = X.shape
|
| 262 |
+
params = dict(
|
| 263 |
+
alpha=alpha / 2,
|
| 264 |
+
fit_intercept=fit_intercept,
|
| 265 |
+
solver=solver,
|
| 266 |
+
tol=1e-12,
|
| 267 |
+
max_iter=1000,
|
| 268 |
+
)
|
| 269 |
+
|
| 270 |
+
model = clone(model).set_params(**params)
|
| 271 |
+
X = X[:, :-1] # remove intercept
|
| 272 |
+
X = 0.5 * np.concatenate((X, X), axis=1)
|
| 273 |
+
assert np.linalg.matrix_rank(X) <= min(n_samples, n_features - 1)
|
| 274 |
+
if fit_intercept:
|
| 275 |
+
coef = coef_with_intercept
|
| 276 |
+
intercept = coef[-1]
|
| 277 |
+
coef = coef[:-1]
|
| 278 |
+
else:
|
| 279 |
+
coef = coef_without_intercept
|
| 280 |
+
intercept = 0
|
| 281 |
+
|
| 282 |
+
with warnings.catch_warnings():
|
| 283 |
+
# XXX: Investigate if the ConvergenceWarning that can appear in some
|
| 284 |
+
# cases should be considered a bug or not. In the mean time we don't
|
| 285 |
+
# fail when the assertions below pass irrespective of the presence of
|
| 286 |
+
# the warning.
|
| 287 |
+
warnings.simplefilter("ignore", ConvergenceWarning)
|
| 288 |
+
model.fit(X, y)
|
| 289 |
+
|
| 290 |
+
rtol = 2e-4 if solver == "lbfgs" else 5e-9
|
| 291 |
+
assert model.intercept_ == pytest.approx(intercept, rel=rtol)
|
| 292 |
+
assert_allclose(model.coef_, np.r_[coef, coef], rtol=rtol)
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
@pytest.mark.parametrize("solver", SOLVERS)
|
| 296 |
+
@pytest.mark.parametrize("fit_intercept", [True, False])
|
| 297 |
+
def test_glm_regression_vstacked_X(solver, fit_intercept, glm_dataset):
|
| 298 |
+
"""Test that GLM converges for all solvers to correct solution on vstacked data.
|
| 299 |
+
|
| 300 |
+
We work with a simple constructed data set with known solution.
|
| 301 |
+
Fit on [X] with alpha is the same as fit on [X], [y]
|
| 302 |
+
[X], [y] with 1 * alpha.
|
| 303 |
+
It is the same alpha as the average loss stays the same.
|
| 304 |
+
For wide X, [X', X'] is a singular matrix.
|
| 305 |
+
"""
|
| 306 |
+
model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset
|
| 307 |
+
n_samples, n_features = X.shape
|
| 308 |
+
params = dict(
|
| 309 |
+
alpha=alpha,
|
| 310 |
+
fit_intercept=fit_intercept,
|
| 311 |
+
solver=solver,
|
| 312 |
+
tol=1e-12,
|
| 313 |
+
max_iter=1000,
|
| 314 |
+
)
|
| 315 |
+
|
| 316 |
+
model = clone(model).set_params(**params)
|
| 317 |
+
X = X[:, :-1] # remove intercept
|
| 318 |
+
X = np.concatenate((X, X), axis=0)
|
| 319 |
+
assert np.linalg.matrix_rank(X) <= min(n_samples, n_features)
|
| 320 |
+
y = np.r_[y, y]
|
| 321 |
+
if fit_intercept:
|
| 322 |
+
coef = coef_with_intercept
|
| 323 |
+
intercept = coef[-1]
|
| 324 |
+
coef = coef[:-1]
|
| 325 |
+
else:
|
| 326 |
+
coef = coef_without_intercept
|
| 327 |
+
intercept = 0
|
| 328 |
+
model.fit(X, y)
|
| 329 |
+
|
| 330 |
+
rtol = 3e-5 if solver == "lbfgs" else 5e-9
|
| 331 |
+
assert model.intercept_ == pytest.approx(intercept, rel=rtol)
|
| 332 |
+
assert_allclose(model.coef_, coef, rtol=rtol)
|
| 333 |
+
|
| 334 |
+
|
| 335 |
+
@pytest.mark.parametrize("solver", SOLVERS)
|
| 336 |
+
@pytest.mark.parametrize("fit_intercept", [True, False])
|
| 337 |
+
def test_glm_regression_unpenalized(solver, fit_intercept, glm_dataset):
|
| 338 |
+
"""Test that unpenalized GLM converges for all solvers to correct solution.
|
| 339 |
+
|
| 340 |
+
We work with a simple constructed data set with known solution.
|
| 341 |
+
Note: This checks the minimum norm solution for wide X, i.e.
|
| 342 |
+
n_samples < n_features:
|
| 343 |
+
min ||w||_2 subject to w = argmin deviance(X, y, w)
|
| 344 |
+
"""
|
| 345 |
+
model, X, y, coef, _, _, _ = glm_dataset
|
| 346 |
+
n_samples, n_features = X.shape
|
| 347 |
+
alpha = 0 # unpenalized
|
| 348 |
+
params = dict(
|
| 349 |
+
alpha=alpha,
|
| 350 |
+
fit_intercept=fit_intercept,
|
| 351 |
+
solver=solver,
|
| 352 |
+
tol=1e-12,
|
| 353 |
+
max_iter=1000,
|
| 354 |
+
)
|
| 355 |
+
|
| 356 |
+
model = clone(model).set_params(**params)
|
| 357 |
+
if fit_intercept:
|
| 358 |
+
X = X[:, :-1] # remove intercept
|
| 359 |
+
intercept = coef[-1]
|
| 360 |
+
coef = coef[:-1]
|
| 361 |
+
else:
|
| 362 |
+
intercept = 0
|
| 363 |
+
|
| 364 |
+
with warnings.catch_warnings():
|
| 365 |
+
if solver.startswith("newton") and n_samples < n_features:
|
| 366 |
+
# The newton solvers should warn and automatically fallback to LBFGS
|
| 367 |
+
# in this case. The model should still converge.
|
| 368 |
+
warnings.filterwarnings("ignore", category=scipy.linalg.LinAlgWarning)
|
| 369 |
+
# XXX: Investigate if the ConvergenceWarning that can appear in some
|
| 370 |
+
# cases should be considered a bug or not. In the mean time we don't
|
| 371 |
+
# fail when the assertions below pass irrespective of the presence of
|
| 372 |
+
# the warning.
|
| 373 |
+
warnings.filterwarnings("ignore", category=ConvergenceWarning)
|
| 374 |
+
model.fit(X, y)
|
| 375 |
+
|
| 376 |
+
# FIXME: `assert_allclose(model.coef_, coef)` should work for all cases but fails
|
| 377 |
+
# for the wide/fat case with n_features > n_samples. Most current GLM solvers do
|
| 378 |
+
# NOT return the minimum norm solution with fit_intercept=True.
|
| 379 |
+
if n_samples > n_features:
|
| 380 |
+
rtol = 5e-5 if solver == "lbfgs" else 1e-7
|
| 381 |
+
assert model.intercept_ == pytest.approx(intercept)
|
| 382 |
+
assert_allclose(model.coef_, coef, rtol=rtol)
|
| 383 |
+
else:
|
| 384 |
+
# As it is an underdetermined problem, prediction = y. The following shows that
|
| 385 |
+
# we get a solution, i.e. a (non-unique) minimum of the objective function ...
|
| 386 |
+
rtol = 5e-5
|
| 387 |
+
if solver == "newton-cholesky":
|
| 388 |
+
rtol = 5e-4
|
| 389 |
+
assert_allclose(model.predict(X), y, rtol=rtol)
|
| 390 |
+
|
| 391 |
+
norm_solution = np.linalg.norm(np.r_[intercept, coef])
|
| 392 |
+
norm_model = np.linalg.norm(np.r_[model.intercept_, model.coef_])
|
| 393 |
+
if solver == "newton-cholesky":
|
| 394 |
+
# XXX: This solver shows random behaviour. Sometimes it finds solutions
|
| 395 |
+
# with norm_model <= norm_solution! So we check conditionally.
|
| 396 |
+
if norm_model < (1 + 1e-12) * norm_solution:
|
| 397 |
+
assert model.intercept_ == pytest.approx(intercept)
|
| 398 |
+
assert_allclose(model.coef_, coef, rtol=rtol)
|
| 399 |
+
elif solver == "lbfgs" and fit_intercept:
|
| 400 |
+
# But it is not the minimum norm solution. Otherwise the norms would be
|
| 401 |
+
# equal.
|
| 402 |
+
assert norm_model > (1 + 1e-12) * norm_solution
|
| 403 |
+
|
| 404 |
+
# See https://github.com/scikit-learn/scikit-learn/issues/23670.
|
| 405 |
+
# Note: Even adding a tiny penalty does not give the minimal norm solution.
|
| 406 |
+
# XXX: We could have naively expected LBFGS to find the minimal norm
|
| 407 |
+
# solution by adding a very small penalty. Even that fails for a reason we
|
| 408 |
+
# do not properly understand at this point.
|
| 409 |
+
else:
|
| 410 |
+
# When `fit_intercept=False`, LBFGS naturally converges to the minimum norm
|
| 411 |
+
# solution on this problem.
|
| 412 |
+
# XXX: Do we have any theoretical guarantees why this should be the case?
|
| 413 |
+
assert model.intercept_ == pytest.approx(intercept, rel=rtol)
|
| 414 |
+
assert_allclose(model.coef_, coef, rtol=rtol)
|
| 415 |
+
|
| 416 |
+
|
| 417 |
+
@pytest.mark.parametrize("solver", SOLVERS)
|
| 418 |
+
@pytest.mark.parametrize("fit_intercept", [True, False])
|
| 419 |
+
def test_glm_regression_unpenalized_hstacked_X(solver, fit_intercept, glm_dataset):
|
| 420 |
+
"""Test that unpenalized GLM converges for all solvers to correct solution.
|
| 421 |
+
|
| 422 |
+
We work with a simple constructed data set with known solution.
|
| 423 |
+
GLM fit on [X] is the same as fit on [X, X]/2.
|
| 424 |
+
For long X, [X, X] is a singular matrix and we check against the minimum norm
|
| 425 |
+
solution:
|
| 426 |
+
min ||w||_2 subject to w = argmin deviance(X, y, w)
|
| 427 |
+
"""
|
| 428 |
+
model, X, y, coef, _, _, _ = glm_dataset
|
| 429 |
+
n_samples, n_features = X.shape
|
| 430 |
+
alpha = 0 # unpenalized
|
| 431 |
+
params = dict(
|
| 432 |
+
alpha=alpha,
|
| 433 |
+
fit_intercept=fit_intercept,
|
| 434 |
+
solver=solver,
|
| 435 |
+
tol=1e-12,
|
| 436 |
+
max_iter=1000,
|
| 437 |
+
)
|
| 438 |
+
|
| 439 |
+
model = clone(model).set_params(**params)
|
| 440 |
+
if fit_intercept:
|
| 441 |
+
intercept = coef[-1]
|
| 442 |
+
coef = coef[:-1]
|
| 443 |
+
if n_samples > n_features:
|
| 444 |
+
X = X[:, :-1] # remove intercept
|
| 445 |
+
X = 0.5 * np.concatenate((X, X), axis=1)
|
| 446 |
+
else:
|
| 447 |
+
# To know the minimum norm solution, we keep one intercept column and do
|
| 448 |
+
# not divide by 2. Later on, we must take special care.
|
| 449 |
+
X = np.c_[X[:, :-1], X[:, :-1], X[:, -1]]
|
| 450 |
+
else:
|
| 451 |
+
intercept = 0
|
| 452 |
+
X = 0.5 * np.concatenate((X, X), axis=1)
|
| 453 |
+
assert np.linalg.matrix_rank(X) <= min(n_samples, n_features)
|
| 454 |
+
|
| 455 |
+
with warnings.catch_warnings():
|
| 456 |
+
if solver.startswith("newton"):
|
| 457 |
+
# The newton solvers should warn and automatically fallback to LBFGS
|
| 458 |
+
# in this case. The model should still converge.
|
| 459 |
+
warnings.filterwarnings("ignore", category=scipy.linalg.LinAlgWarning)
|
| 460 |
+
# XXX: Investigate if the ConvergenceWarning that can appear in some
|
| 461 |
+
# cases should be considered a bug or not. In the mean time we don't
|
| 462 |
+
# fail when the assertions below pass irrespective of the presence of
|
| 463 |
+
# the warning.
|
| 464 |
+
warnings.filterwarnings("ignore", category=ConvergenceWarning)
|
| 465 |
+
model.fit(X, y)
|
| 466 |
+
|
| 467 |
+
if fit_intercept and n_samples < n_features:
|
| 468 |
+
# Here we take special care.
|
| 469 |
+
model_intercept = 2 * model.intercept_
|
| 470 |
+
model_coef = 2 * model.coef_[:-1] # exclude the other intercept term.
|
| 471 |
+
# For minimum norm solution, we would have
|
| 472 |
+
# assert model.intercept_ == pytest.approx(model.coef_[-1])
|
| 473 |
+
else:
|
| 474 |
+
model_intercept = model.intercept_
|
| 475 |
+
model_coef = model.coef_
|
| 476 |
+
|
| 477 |
+
if n_samples > n_features:
|
| 478 |
+
assert model_intercept == pytest.approx(intercept)
|
| 479 |
+
rtol = 1e-4
|
| 480 |
+
assert_allclose(model_coef, np.r_[coef, coef], rtol=rtol)
|
| 481 |
+
else:
|
| 482 |
+
# As it is an underdetermined problem, prediction = y. The following shows that
|
| 483 |
+
# we get a solution, i.e. a (non-unique) minimum of the objective function ...
|
| 484 |
+
rtol = 1e-6 if solver == "lbfgs" else 5e-6
|
| 485 |
+
assert_allclose(model.predict(X), y, rtol=rtol)
|
| 486 |
+
if (solver == "lbfgs" and fit_intercept) or solver == "newton-cholesky":
|
| 487 |
+
# Same as in test_glm_regression_unpenalized.
|
| 488 |
+
# But it is not the minimum norm solution. Otherwise the norms would be
|
| 489 |
+
# equal.
|
| 490 |
+
norm_solution = np.linalg.norm(
|
| 491 |
+
0.5 * np.r_[intercept, intercept, coef, coef]
|
| 492 |
+
)
|
| 493 |
+
norm_model = np.linalg.norm(np.r_[model.intercept_, model.coef_])
|
| 494 |
+
assert norm_model > (1 + 1e-12) * norm_solution
|
| 495 |
+
# For minimum norm solution, we would have
|
| 496 |
+
# assert model.intercept_ == pytest.approx(model.coef_[-1])
|
| 497 |
+
else:
|
| 498 |
+
assert model_intercept == pytest.approx(intercept, rel=5e-6)
|
| 499 |
+
assert_allclose(model_coef, np.r_[coef, coef], rtol=1e-4)
|
| 500 |
+
|
| 501 |
+
|
| 502 |
+
@pytest.mark.parametrize("solver", SOLVERS)
|
| 503 |
+
@pytest.mark.parametrize("fit_intercept", [True, False])
|
| 504 |
+
def test_glm_regression_unpenalized_vstacked_X(solver, fit_intercept, glm_dataset):
|
| 505 |
+
"""Test that unpenalized GLM converges for all solvers to correct solution.
|
| 506 |
+
|
| 507 |
+
We work with a simple constructed data set with known solution.
|
| 508 |
+
GLM fit on [X] is the same as fit on [X], [y]
|
| 509 |
+
[X], [y].
|
| 510 |
+
For wide X, [X', X'] is a singular matrix and we check against the minimum norm
|
| 511 |
+
solution:
|
| 512 |
+
min ||w||_2 subject to w = argmin deviance(X, y, w)
|
| 513 |
+
"""
|
| 514 |
+
model, X, y, coef, _, _, _ = glm_dataset
|
| 515 |
+
n_samples, n_features = X.shape
|
| 516 |
+
alpha = 0 # unpenalized
|
| 517 |
+
params = dict(
|
| 518 |
+
alpha=alpha,
|
| 519 |
+
fit_intercept=fit_intercept,
|
| 520 |
+
solver=solver,
|
| 521 |
+
tol=1e-12,
|
| 522 |
+
max_iter=1000,
|
| 523 |
+
)
|
| 524 |
+
|
| 525 |
+
model = clone(model).set_params(**params)
|
| 526 |
+
if fit_intercept:
|
| 527 |
+
X = X[:, :-1] # remove intercept
|
| 528 |
+
intercept = coef[-1]
|
| 529 |
+
coef = coef[:-1]
|
| 530 |
+
else:
|
| 531 |
+
intercept = 0
|
| 532 |
+
X = np.concatenate((X, X), axis=0)
|
| 533 |
+
assert np.linalg.matrix_rank(X) <= min(n_samples, n_features)
|
| 534 |
+
y = np.r_[y, y]
|
| 535 |
+
|
| 536 |
+
with warnings.catch_warnings():
|
| 537 |
+
if solver.startswith("newton") and n_samples < n_features:
|
| 538 |
+
# The newton solvers should warn and automatically fallback to LBFGS
|
| 539 |
+
# in this case. The model should still converge.
|
| 540 |
+
warnings.filterwarnings("ignore", category=scipy.linalg.LinAlgWarning)
|
| 541 |
+
# XXX: Investigate if the ConvergenceWarning that can appear in some
|
| 542 |
+
# cases should be considered a bug or not. In the mean time we don't
|
| 543 |
+
# fail when the assertions below pass irrespective of the presence of
|
| 544 |
+
# the warning.
|
| 545 |
+
warnings.filterwarnings("ignore", category=ConvergenceWarning)
|
| 546 |
+
model.fit(X, y)
|
| 547 |
+
|
| 548 |
+
if n_samples > n_features:
|
| 549 |
+
rtol = 5e-5 if solver == "lbfgs" else 1e-6
|
| 550 |
+
assert model.intercept_ == pytest.approx(intercept)
|
| 551 |
+
assert_allclose(model.coef_, coef, rtol=rtol)
|
| 552 |
+
else:
|
| 553 |
+
# As it is an underdetermined problem, prediction = y. The following shows that
|
| 554 |
+
# we get a solution, i.e. a (non-unique) minimum of the objective function ...
|
| 555 |
+
rtol = 1e-6 if solver == "lbfgs" else 5e-6
|
| 556 |
+
assert_allclose(model.predict(X), y, rtol=rtol)
|
| 557 |
+
|
| 558 |
+
norm_solution = np.linalg.norm(np.r_[intercept, coef])
|
| 559 |
+
norm_model = np.linalg.norm(np.r_[model.intercept_, model.coef_])
|
| 560 |
+
if solver == "newton-cholesky":
|
| 561 |
+
# XXX: This solver shows random behaviour. Sometimes it finds solutions
|
| 562 |
+
# with norm_model <= norm_solution! So we check conditionally.
|
| 563 |
+
if not (norm_model > (1 + 1e-12) * norm_solution):
|
| 564 |
+
assert model.intercept_ == pytest.approx(intercept)
|
| 565 |
+
assert_allclose(model.coef_, coef, rtol=1e-4)
|
| 566 |
+
elif solver == "lbfgs" and fit_intercept:
|
| 567 |
+
# Same as in test_glm_regression_unpenalized.
|
| 568 |
+
# But it is not the minimum norm solution. Otherwise the norms would be
|
| 569 |
+
# equal.
|
| 570 |
+
assert norm_model > (1 + 1e-12) * norm_solution
|
| 571 |
+
else:
|
| 572 |
+
rtol = 1e-5 if solver == "newton-cholesky" else 1e-4
|
| 573 |
+
assert model.intercept_ == pytest.approx(intercept, rel=rtol)
|
| 574 |
+
assert_allclose(model.coef_, coef, rtol=rtol)
|
| 575 |
+
|
| 576 |
+
|
| 577 |
+
def test_sample_weights_validation():
|
| 578 |
+
"""Test the raised errors in the validation of sample_weight."""
|
| 579 |
+
# scalar value but not positive
|
| 580 |
+
X = [[1]]
|
| 581 |
+
y = [1]
|
| 582 |
+
weights = 0
|
| 583 |
+
glm = _GeneralizedLinearRegressor()
|
| 584 |
+
|
| 585 |
+
# Positive weights are accepted
|
| 586 |
+
glm.fit(X, y, sample_weight=1)
|
| 587 |
+
|
| 588 |
+
# 2d array
|
| 589 |
+
weights = [[0]]
|
| 590 |
+
with pytest.raises(ValueError, match="must be 1D array or scalar"):
|
| 591 |
+
glm.fit(X, y, weights)
|
| 592 |
+
|
| 593 |
+
# 1d but wrong length
|
| 594 |
+
weights = [1, 0]
|
| 595 |
+
msg = r"sample_weight.shape == \(2,\), expected \(1,\)!"
|
| 596 |
+
with pytest.raises(ValueError, match=msg):
|
| 597 |
+
glm.fit(X, y, weights)
|
| 598 |
+
|
| 599 |
+
|
| 600 |
+
@pytest.mark.parametrize(
|
| 601 |
+
"glm",
|
| 602 |
+
[
|
| 603 |
+
TweedieRegressor(power=3),
|
| 604 |
+
PoissonRegressor(),
|
| 605 |
+
GammaRegressor(),
|
| 606 |
+
TweedieRegressor(power=1.5),
|
| 607 |
+
],
|
| 608 |
+
)
|
| 609 |
+
def test_glm_wrong_y_range(glm):
|
| 610 |
+
y = np.array([-1, 2])
|
| 611 |
+
X = np.array([[1], [1]])
|
| 612 |
+
msg = r"Some value\(s\) of y are out of the valid range of the loss"
|
| 613 |
+
with pytest.raises(ValueError, match=msg):
|
| 614 |
+
glm.fit(X, y)
|
| 615 |
+
|
| 616 |
+
|
| 617 |
+
@pytest.mark.parametrize("fit_intercept", [False, True])
|
| 618 |
+
def test_glm_identity_regression(fit_intercept):
|
| 619 |
+
"""Test GLM regression with identity link on a simple dataset."""
|
| 620 |
+
coef = [1.0, 2.0]
|
| 621 |
+
X = np.array([[1, 1, 1, 1, 1], [0, 1, 2, 3, 4]]).T
|
| 622 |
+
y = np.dot(X, coef)
|
| 623 |
+
glm = _GeneralizedLinearRegressor(
|
| 624 |
+
alpha=0,
|
| 625 |
+
fit_intercept=fit_intercept,
|
| 626 |
+
tol=1e-12,
|
| 627 |
+
)
|
| 628 |
+
if fit_intercept:
|
| 629 |
+
glm.fit(X[:, 1:], y)
|
| 630 |
+
assert_allclose(glm.coef_, coef[1:], rtol=1e-10)
|
| 631 |
+
assert_allclose(glm.intercept_, coef[0], rtol=1e-10)
|
| 632 |
+
else:
|
| 633 |
+
glm.fit(X, y)
|
| 634 |
+
assert_allclose(glm.coef_, coef, rtol=1e-12)
|
| 635 |
+
|
| 636 |
+
|
| 637 |
+
@pytest.mark.parametrize("fit_intercept", [False, True])
|
| 638 |
+
@pytest.mark.parametrize("alpha", [0.0, 1.0])
|
| 639 |
+
@pytest.mark.parametrize(
|
| 640 |
+
"GLMEstimator", [_GeneralizedLinearRegressor, PoissonRegressor, GammaRegressor]
|
| 641 |
+
)
|
| 642 |
+
def test_glm_sample_weight_consistency(fit_intercept, alpha, GLMEstimator):
|
| 643 |
+
"""Test that the impact of sample_weight is consistent"""
|
| 644 |
+
rng = np.random.RandomState(0)
|
| 645 |
+
n_samples, n_features = 10, 5
|
| 646 |
+
|
| 647 |
+
X = rng.rand(n_samples, n_features)
|
| 648 |
+
y = rng.rand(n_samples)
|
| 649 |
+
glm_params = dict(alpha=alpha, fit_intercept=fit_intercept)
|
| 650 |
+
|
| 651 |
+
glm = GLMEstimator(**glm_params).fit(X, y)
|
| 652 |
+
coef = glm.coef_.copy()
|
| 653 |
+
|
| 654 |
+
# sample_weight=np.ones(..) should be equivalent to sample_weight=None
|
| 655 |
+
sample_weight = np.ones(y.shape)
|
| 656 |
+
glm.fit(X, y, sample_weight=sample_weight)
|
| 657 |
+
assert_allclose(glm.coef_, coef, rtol=1e-12)
|
| 658 |
+
|
| 659 |
+
# sample_weight are normalized to 1 so, scaling them has no effect
|
| 660 |
+
sample_weight = 2 * np.ones(y.shape)
|
| 661 |
+
glm.fit(X, y, sample_weight=sample_weight)
|
| 662 |
+
assert_allclose(glm.coef_, coef, rtol=1e-12)
|
| 663 |
+
|
| 664 |
+
# setting one element of sample_weight to 0 is equivalent to removing
|
| 665 |
+
# the corresponding sample
|
| 666 |
+
sample_weight = np.ones(y.shape)
|
| 667 |
+
sample_weight[-1] = 0
|
| 668 |
+
glm.fit(X, y, sample_weight=sample_weight)
|
| 669 |
+
coef1 = glm.coef_.copy()
|
| 670 |
+
glm.fit(X[:-1], y[:-1])
|
| 671 |
+
assert_allclose(glm.coef_, coef1, rtol=1e-12)
|
| 672 |
+
|
| 673 |
+
# check that multiplying sample_weight by 2 is equivalent
|
| 674 |
+
# to repeating corresponding samples twice
|
| 675 |
+
X2 = np.concatenate([X, X[: n_samples // 2]], axis=0)
|
| 676 |
+
y2 = np.concatenate([y, y[: n_samples // 2]])
|
| 677 |
+
sample_weight_1 = np.ones(len(y))
|
| 678 |
+
sample_weight_1[: n_samples // 2] = 2
|
| 679 |
+
|
| 680 |
+
glm1 = GLMEstimator(**glm_params).fit(X, y, sample_weight=sample_weight_1)
|
| 681 |
+
|
| 682 |
+
glm2 = GLMEstimator(**glm_params).fit(X2, y2, sample_weight=None)
|
| 683 |
+
assert_allclose(glm1.coef_, glm2.coef_)
|
| 684 |
+
|
| 685 |
+
|
| 686 |
+
@pytest.mark.parametrize("solver", SOLVERS)
|
| 687 |
+
@pytest.mark.parametrize("fit_intercept", [True, False])
|
| 688 |
+
@pytest.mark.parametrize(
|
| 689 |
+
"estimator",
|
| 690 |
+
[
|
| 691 |
+
PoissonRegressor(),
|
| 692 |
+
GammaRegressor(),
|
| 693 |
+
TweedieRegressor(power=3.0),
|
| 694 |
+
TweedieRegressor(power=0, link="log"),
|
| 695 |
+
TweedieRegressor(power=1.5),
|
| 696 |
+
TweedieRegressor(power=4.5),
|
| 697 |
+
],
|
| 698 |
+
)
|
| 699 |
+
def test_glm_log_regression(solver, fit_intercept, estimator):
|
| 700 |
+
"""Test GLM regression with log link on a simple dataset."""
|
| 701 |
+
coef = [0.2, -0.1]
|
| 702 |
+
X = np.array([[0, 1, 2, 3, 4], [1, 1, 1, 1, 1]]).T
|
| 703 |
+
y = np.exp(np.dot(X, coef))
|
| 704 |
+
glm = clone(estimator).set_params(
|
| 705 |
+
alpha=0,
|
| 706 |
+
fit_intercept=fit_intercept,
|
| 707 |
+
solver=solver,
|
| 708 |
+
tol=1e-8,
|
| 709 |
+
)
|
| 710 |
+
if fit_intercept:
|
| 711 |
+
res = glm.fit(X[:, :-1], y)
|
| 712 |
+
assert_allclose(res.coef_, coef[:-1], rtol=1e-6)
|
| 713 |
+
assert_allclose(res.intercept_, coef[-1], rtol=1e-6)
|
| 714 |
+
else:
|
| 715 |
+
res = glm.fit(X, y)
|
| 716 |
+
assert_allclose(res.coef_, coef, rtol=2e-6)
|
| 717 |
+
|
| 718 |
+
|
| 719 |
+
@pytest.mark.parametrize("solver", SOLVERS)
|
| 720 |
+
@pytest.mark.parametrize("fit_intercept", [True, False])
|
| 721 |
+
def test_warm_start(solver, fit_intercept, global_random_seed):
|
| 722 |
+
n_samples, n_features = 100, 10
|
| 723 |
+
X, y = make_regression(
|
| 724 |
+
n_samples=n_samples,
|
| 725 |
+
n_features=n_features,
|
| 726 |
+
n_informative=n_features - 2,
|
| 727 |
+
bias=fit_intercept * 1.0,
|
| 728 |
+
noise=1.0,
|
| 729 |
+
random_state=global_random_seed,
|
| 730 |
+
)
|
| 731 |
+
y = np.abs(y) # Poisson requires non-negative targets.
|
| 732 |
+
alpha = 1
|
| 733 |
+
params = {
|
| 734 |
+
"solver": solver,
|
| 735 |
+
"fit_intercept": fit_intercept,
|
| 736 |
+
"tol": 1e-10,
|
| 737 |
+
}
|
| 738 |
+
|
| 739 |
+
glm1 = PoissonRegressor(warm_start=False, max_iter=1000, alpha=alpha, **params)
|
| 740 |
+
glm1.fit(X, y)
|
| 741 |
+
|
| 742 |
+
glm2 = PoissonRegressor(warm_start=True, max_iter=1, alpha=alpha, **params)
|
| 743 |
+
# As we intentionally set max_iter=1 such that the solver should raise a
|
| 744 |
+
# ConvergenceWarning.
|
| 745 |
+
with pytest.warns(ConvergenceWarning):
|
| 746 |
+
glm2.fit(X, y)
|
| 747 |
+
|
| 748 |
+
linear_loss = LinearModelLoss(
|
| 749 |
+
base_loss=glm1._get_loss(),
|
| 750 |
+
fit_intercept=fit_intercept,
|
| 751 |
+
)
|
| 752 |
+
sw = np.full_like(y, fill_value=1 / n_samples)
|
| 753 |
+
|
| 754 |
+
objective_glm1 = linear_loss.loss(
|
| 755 |
+
coef=np.r_[glm1.coef_, glm1.intercept_] if fit_intercept else glm1.coef_,
|
| 756 |
+
X=X,
|
| 757 |
+
y=y,
|
| 758 |
+
sample_weight=sw,
|
| 759 |
+
l2_reg_strength=alpha,
|
| 760 |
+
)
|
| 761 |
+
objective_glm2 = linear_loss.loss(
|
| 762 |
+
coef=np.r_[glm2.coef_, glm2.intercept_] if fit_intercept else glm2.coef_,
|
| 763 |
+
X=X,
|
| 764 |
+
y=y,
|
| 765 |
+
sample_weight=sw,
|
| 766 |
+
l2_reg_strength=alpha,
|
| 767 |
+
)
|
| 768 |
+
assert objective_glm1 < objective_glm2
|
| 769 |
+
|
| 770 |
+
glm2.set_params(max_iter=1000)
|
| 771 |
+
glm2.fit(X, y)
|
| 772 |
+
# The two models are not exactly identical since the lbfgs solver
|
| 773 |
+
# computes the approximate hessian from previous iterations, which
|
| 774 |
+
# will not be strictly identical in the case of a warm start.
|
| 775 |
+
rtol = 2e-4 if solver == "lbfgs" else 1e-9
|
| 776 |
+
assert_allclose(glm1.coef_, glm2.coef_, rtol=rtol)
|
| 777 |
+
assert_allclose(glm1.score(X, y), glm2.score(X, y), rtol=1e-5)
|
| 778 |
+
|
| 779 |
+
|
| 780 |
+
@pytest.mark.parametrize("n_samples, n_features", [(100, 10), (10, 100)])
|
| 781 |
+
@pytest.mark.parametrize("fit_intercept", [True, False])
|
| 782 |
+
@pytest.mark.parametrize("sample_weight", [None, True])
|
| 783 |
+
def test_normal_ridge_comparison(
|
| 784 |
+
n_samples, n_features, fit_intercept, sample_weight, request
|
| 785 |
+
):
|
| 786 |
+
"""Compare with Ridge regression for Normal distributions."""
|
| 787 |
+
test_size = 10
|
| 788 |
+
X, y = make_regression(
|
| 789 |
+
n_samples=n_samples + test_size,
|
| 790 |
+
n_features=n_features,
|
| 791 |
+
n_informative=n_features - 2,
|
| 792 |
+
noise=0.5,
|
| 793 |
+
random_state=42,
|
| 794 |
+
)
|
| 795 |
+
|
| 796 |
+
if n_samples > n_features:
|
| 797 |
+
ridge_params = {"solver": "svd"}
|
| 798 |
+
else:
|
| 799 |
+
ridge_params = {"solver": "saga", "max_iter": 1000000, "tol": 1e-7}
|
| 800 |
+
|
| 801 |
+
(
|
| 802 |
+
X_train,
|
| 803 |
+
X_test,
|
| 804 |
+
y_train,
|
| 805 |
+
y_test,
|
| 806 |
+
) = train_test_split(X, y, test_size=test_size, random_state=0)
|
| 807 |
+
|
| 808 |
+
alpha = 1.0
|
| 809 |
+
if sample_weight is None:
|
| 810 |
+
sw_train = None
|
| 811 |
+
alpha_ridge = alpha * n_samples
|
| 812 |
+
else:
|
| 813 |
+
sw_train = np.random.RandomState(0).rand(len(y_train))
|
| 814 |
+
alpha_ridge = alpha * sw_train.sum()
|
| 815 |
+
|
| 816 |
+
# GLM has 1/(2*n) * Loss + 1/2*L2, Ridge has Loss + L2
|
| 817 |
+
ridge = Ridge(
|
| 818 |
+
alpha=alpha_ridge,
|
| 819 |
+
random_state=42,
|
| 820 |
+
fit_intercept=fit_intercept,
|
| 821 |
+
**ridge_params,
|
| 822 |
+
)
|
| 823 |
+
ridge.fit(X_train, y_train, sample_weight=sw_train)
|
| 824 |
+
|
| 825 |
+
glm = _GeneralizedLinearRegressor(
|
| 826 |
+
alpha=alpha,
|
| 827 |
+
fit_intercept=fit_intercept,
|
| 828 |
+
max_iter=300,
|
| 829 |
+
tol=1e-5,
|
| 830 |
+
)
|
| 831 |
+
glm.fit(X_train, y_train, sample_weight=sw_train)
|
| 832 |
+
assert glm.coef_.shape == (X.shape[1],)
|
| 833 |
+
assert_allclose(glm.coef_, ridge.coef_, atol=5e-5)
|
| 834 |
+
assert_allclose(glm.intercept_, ridge.intercept_, rtol=1e-5)
|
| 835 |
+
assert_allclose(glm.predict(X_train), ridge.predict(X_train), rtol=2e-4)
|
| 836 |
+
assert_allclose(glm.predict(X_test), ridge.predict(X_test), rtol=2e-4)
|
| 837 |
+
|
| 838 |
+
|
| 839 |
+
@pytest.mark.parametrize("solver", ["lbfgs", "newton-cholesky"])
|
| 840 |
+
def test_poisson_glmnet(solver):
|
| 841 |
+
"""Compare Poisson regression with L2 regularization and LogLink to glmnet"""
|
| 842 |
+
# library("glmnet")
|
| 843 |
+
# options(digits=10)
|
| 844 |
+
# df <- data.frame(a=c(-2,-1,1,2), b=c(0,0,1,1), y=c(0,1,1,2))
|
| 845 |
+
# x <- data.matrix(df[,c("a", "b")])
|
| 846 |
+
# y <- df$y
|
| 847 |
+
# fit <- glmnet(x=x, y=y, alpha=0, intercept=T, family="poisson",
|
| 848 |
+
# standardize=F, thresh=1e-10, nlambda=10000)
|
| 849 |
+
# coef(fit, s=1)
|
| 850 |
+
# (Intercept) -0.12889386979
|
| 851 |
+
# a 0.29019207995
|
| 852 |
+
# b 0.03741173122
|
| 853 |
+
X = np.array([[-2, -1, 1, 2], [0, 0, 1, 1]]).T
|
| 854 |
+
y = np.array([0, 1, 1, 2])
|
| 855 |
+
glm = PoissonRegressor(
|
| 856 |
+
alpha=1,
|
| 857 |
+
fit_intercept=True,
|
| 858 |
+
tol=1e-7,
|
| 859 |
+
max_iter=300,
|
| 860 |
+
solver=solver,
|
| 861 |
+
)
|
| 862 |
+
glm.fit(X, y)
|
| 863 |
+
assert_allclose(glm.intercept_, -0.12889386979, rtol=1e-5)
|
| 864 |
+
assert_allclose(glm.coef_, [0.29019207995, 0.03741173122], rtol=1e-5)
|
| 865 |
+
|
| 866 |
+
|
| 867 |
+
def test_convergence_warning(regression_data):
|
| 868 |
+
X, y = regression_data
|
| 869 |
+
|
| 870 |
+
est = _GeneralizedLinearRegressor(max_iter=1, tol=1e-20)
|
| 871 |
+
with pytest.warns(ConvergenceWarning):
|
| 872 |
+
est.fit(X, y)
|
| 873 |
+
|
| 874 |
+
|
| 875 |
+
@pytest.mark.parametrize(
|
| 876 |
+
"name, link_class", [("identity", IdentityLink), ("log", LogLink)]
|
| 877 |
+
)
|
| 878 |
+
def test_tweedie_link_argument(name, link_class):
|
| 879 |
+
"""Test GLM link argument set as string."""
|
| 880 |
+
y = np.array([0.1, 0.5]) # in range of all distributions
|
| 881 |
+
X = np.array([[1], [2]])
|
| 882 |
+
glm = TweedieRegressor(power=1, link=name).fit(X, y)
|
| 883 |
+
assert isinstance(glm._base_loss.link, link_class)
|
| 884 |
+
|
| 885 |
+
|
| 886 |
+
@pytest.mark.parametrize(
|
| 887 |
+
"power, expected_link_class",
|
| 888 |
+
[
|
| 889 |
+
(0, IdentityLink), # normal
|
| 890 |
+
(1, LogLink), # poisson
|
| 891 |
+
(2, LogLink), # gamma
|
| 892 |
+
(3, LogLink), # inverse-gaussian
|
| 893 |
+
],
|
| 894 |
+
)
|
| 895 |
+
def test_tweedie_link_auto(power, expected_link_class):
|
| 896 |
+
"""Test that link='auto' delivers the expected link function"""
|
| 897 |
+
y = np.array([0.1, 0.5]) # in range of all distributions
|
| 898 |
+
X = np.array([[1], [2]])
|
| 899 |
+
glm = TweedieRegressor(link="auto", power=power).fit(X, y)
|
| 900 |
+
assert isinstance(glm._base_loss.link, expected_link_class)
|
| 901 |
+
|
| 902 |
+
|
| 903 |
+
@pytest.mark.parametrize("power", [0, 1, 1.5, 2, 3])
|
| 904 |
+
@pytest.mark.parametrize("link", ["log", "identity"])
|
| 905 |
+
def test_tweedie_score(regression_data, power, link):
|
| 906 |
+
"""Test that GLM score equals d2_tweedie_score for Tweedie losses."""
|
| 907 |
+
X, y = regression_data
|
| 908 |
+
# make y positive
|
| 909 |
+
y = np.abs(y) + 1.0
|
| 910 |
+
glm = TweedieRegressor(power=power, link=link).fit(X, y)
|
| 911 |
+
assert glm.score(X, y) == pytest.approx(
|
| 912 |
+
d2_tweedie_score(y, glm.predict(X), power=power)
|
| 913 |
+
)
|
| 914 |
+
|
| 915 |
+
|
| 916 |
+
@pytest.mark.parametrize(
|
| 917 |
+
"estimator, value",
|
| 918 |
+
[
|
| 919 |
+
(PoissonRegressor(), True),
|
| 920 |
+
(GammaRegressor(), True),
|
| 921 |
+
(TweedieRegressor(power=1.5), True),
|
| 922 |
+
(TweedieRegressor(power=0), False),
|
| 923 |
+
],
|
| 924 |
+
)
|
| 925 |
+
def test_tags(estimator, value):
|
| 926 |
+
assert estimator.__sklearn_tags__().target_tags.positive_only is value
|
| 927 |
+
|
| 928 |
+
|
| 929 |
+
def test_linalg_warning_with_newton_solver(global_random_seed):
|
| 930 |
+
newton_solver = "newton-cholesky"
|
| 931 |
+
rng = np.random.RandomState(global_random_seed)
|
| 932 |
+
# Use at least 20 samples to reduce the likelihood of getting a degenerate
|
| 933 |
+
# dataset for any global_random_seed.
|
| 934 |
+
X_orig = rng.normal(size=(20, 3))
|
| 935 |
+
y = rng.poisson(
|
| 936 |
+
np.exp(X_orig @ np.ones(X_orig.shape[1])), size=X_orig.shape[0]
|
| 937 |
+
).astype(np.float64)
|
| 938 |
+
|
| 939 |
+
# Collinear variation of the same input features.
|
| 940 |
+
X_collinear = np.hstack([X_orig] * 10)
|
| 941 |
+
|
| 942 |
+
# Let's consider the deviance of a constant baseline on this problem.
|
| 943 |
+
baseline_pred = np.full_like(y, y.mean())
|
| 944 |
+
constant_model_deviance = mean_poisson_deviance(y, baseline_pred)
|
| 945 |
+
assert constant_model_deviance > 1.0
|
| 946 |
+
|
| 947 |
+
# No warning raised on well-conditioned design, even without regularization.
|
| 948 |
+
tol = 1e-10
|
| 949 |
+
with warnings.catch_warnings():
|
| 950 |
+
warnings.simplefilter("error")
|
| 951 |
+
reg = PoissonRegressor(solver=newton_solver, alpha=0.0, tol=tol).fit(X_orig, y)
|
| 952 |
+
original_newton_deviance = mean_poisson_deviance(y, reg.predict(X_orig))
|
| 953 |
+
|
| 954 |
+
# On this dataset, we should have enough data points to not make it
|
| 955 |
+
# possible to get a near zero deviance (for the any of the admissible
|
| 956 |
+
# random seeds). This will make it easier to interpret meaning of rtol in
|
| 957 |
+
# the subsequent assertions:
|
| 958 |
+
assert original_newton_deviance > 0.2
|
| 959 |
+
|
| 960 |
+
# We check that the model could successfully fit information in X_orig to
|
| 961 |
+
# improve upon the constant baseline by a large margin (when evaluated on
|
| 962 |
+
# the traing set).
|
| 963 |
+
assert constant_model_deviance - original_newton_deviance > 0.1
|
| 964 |
+
|
| 965 |
+
# LBFGS is robust to a collinear design because its approximation of the
|
| 966 |
+
# Hessian is Symmeric Positive Definite by construction. Let's record its
|
| 967 |
+
# solution
|
| 968 |
+
with warnings.catch_warnings():
|
| 969 |
+
warnings.simplefilter("error")
|
| 970 |
+
reg = PoissonRegressor(solver="lbfgs", alpha=0.0, tol=tol).fit(X_collinear, y)
|
| 971 |
+
collinear_lbfgs_deviance = mean_poisson_deviance(y, reg.predict(X_collinear))
|
| 972 |
+
|
| 973 |
+
# The LBFGS solution on the collinear is expected to reach a comparable
|
| 974 |
+
# solution to the Newton solution on the original data.
|
| 975 |
+
rtol = 1e-6
|
| 976 |
+
assert collinear_lbfgs_deviance == pytest.approx(original_newton_deviance, rel=rtol)
|
| 977 |
+
|
| 978 |
+
# Fitting a Newton solver on the collinear version of the training data
|
| 979 |
+
# without regularization should raise an informative warning and fallback
|
| 980 |
+
# to the LBFGS solver.
|
| 981 |
+
msg = (
|
| 982 |
+
"The inner solver of .*Newton.*Solver stumbled upon a singular or very "
|
| 983 |
+
"ill-conditioned Hessian matrix"
|
| 984 |
+
)
|
| 985 |
+
with pytest.warns(scipy.linalg.LinAlgWarning, match=msg):
|
| 986 |
+
reg = PoissonRegressor(solver=newton_solver, alpha=0.0, tol=tol).fit(
|
| 987 |
+
X_collinear, y
|
| 988 |
+
)
|
| 989 |
+
# As a result we should still automatically converge to a good solution.
|
| 990 |
+
collinear_newton_deviance = mean_poisson_deviance(y, reg.predict(X_collinear))
|
| 991 |
+
assert collinear_newton_deviance == pytest.approx(
|
| 992 |
+
original_newton_deviance, rel=rtol
|
| 993 |
+
)
|
| 994 |
+
|
| 995 |
+
# Increasing the regularization slightly should make the problem go away:
|
| 996 |
+
with warnings.catch_warnings():
|
| 997 |
+
warnings.simplefilter("error", scipy.linalg.LinAlgWarning)
|
| 998 |
+
reg = PoissonRegressor(solver=newton_solver, alpha=1e-10).fit(X_collinear, y)
|
| 999 |
+
|
| 1000 |
+
# The slightly penalized model on the collinear data should be close enough
|
| 1001 |
+
# to the unpenalized model on the original data.
|
| 1002 |
+
penalized_collinear_newton_deviance = mean_poisson_deviance(
|
| 1003 |
+
y, reg.predict(X_collinear)
|
| 1004 |
+
)
|
| 1005 |
+
assert penalized_collinear_newton_deviance == pytest.approx(
|
| 1006 |
+
original_newton_deviance, rel=rtol
|
| 1007 |
+
)
|
| 1008 |
+
|
| 1009 |
+
|
| 1010 |
+
@pytest.mark.parametrize("verbose", [0, 1, 2])
|
| 1011 |
+
def test_newton_solver_verbosity(capsys, verbose):
|
| 1012 |
+
"""Test the std output of verbose newton solvers."""
|
| 1013 |
+
y = np.array([1, 2], dtype=float)
|
| 1014 |
+
X = np.array([[1.0, 0], [0, 1]], dtype=float)
|
| 1015 |
+
linear_loss = LinearModelLoss(base_loss=HalfPoissonLoss(), fit_intercept=False)
|
| 1016 |
+
sol = NewtonCholeskySolver(
|
| 1017 |
+
coef=linear_loss.init_zero_coef(X),
|
| 1018 |
+
linear_loss=linear_loss,
|
| 1019 |
+
l2_reg_strength=0,
|
| 1020 |
+
verbose=verbose,
|
| 1021 |
+
)
|
| 1022 |
+
sol.solve(X, y, None) # returns array([0., 0.69314758])
|
| 1023 |
+
captured = capsys.readouterr()
|
| 1024 |
+
|
| 1025 |
+
if verbose == 0:
|
| 1026 |
+
assert captured.out == ""
|
| 1027 |
+
else:
|
| 1028 |
+
msg = [
|
| 1029 |
+
"Newton iter=1",
|
| 1030 |
+
"Check Convergence",
|
| 1031 |
+
"1. max |gradient|",
|
| 1032 |
+
"2. Newton decrement",
|
| 1033 |
+
"Solver did converge at loss = ",
|
| 1034 |
+
]
|
| 1035 |
+
for m in msg:
|
| 1036 |
+
assert m in captured.out
|
| 1037 |
+
|
| 1038 |
+
if verbose >= 2:
|
| 1039 |
+
msg = ["Backtracking Line Search", "line search iteration="]
|
| 1040 |
+
for m in msg:
|
| 1041 |
+
assert m in captured.out
|
| 1042 |
+
|
| 1043 |
+
# Set the Newton solver to a state with a completely wrong Newton step.
|
| 1044 |
+
sol = NewtonCholeskySolver(
|
| 1045 |
+
coef=linear_loss.init_zero_coef(X),
|
| 1046 |
+
linear_loss=linear_loss,
|
| 1047 |
+
l2_reg_strength=0,
|
| 1048 |
+
verbose=verbose,
|
| 1049 |
+
)
|
| 1050 |
+
sol.setup(X=X, y=y, sample_weight=None)
|
| 1051 |
+
sol.iteration = 1
|
| 1052 |
+
sol.update_gradient_hessian(X=X, y=y, sample_weight=None)
|
| 1053 |
+
sol.coef_newton = np.array([1.0, 0])
|
| 1054 |
+
sol.gradient_times_newton = sol.gradient @ sol.coef_newton
|
| 1055 |
+
with warnings.catch_warnings():
|
| 1056 |
+
warnings.simplefilter("ignore", ConvergenceWarning)
|
| 1057 |
+
sol.line_search(X=X, y=y, sample_weight=None)
|
| 1058 |
+
captured = capsys.readouterr()
|
| 1059 |
+
if verbose >= 1:
|
| 1060 |
+
assert (
|
| 1061 |
+
"Line search did not converge and resorts to lbfgs instead." in captured.out
|
| 1062 |
+
)
|
| 1063 |
+
|
| 1064 |
+
# Set the Newton solver to a state with bad Newton step such that the loss
|
| 1065 |
+
# improvement in line search is tiny.
|
| 1066 |
+
sol = NewtonCholeskySolver(
|
| 1067 |
+
coef=np.array([1e-12, 0.69314758]),
|
| 1068 |
+
linear_loss=linear_loss,
|
| 1069 |
+
l2_reg_strength=0,
|
| 1070 |
+
verbose=verbose,
|
| 1071 |
+
)
|
| 1072 |
+
sol.setup(X=X, y=y, sample_weight=None)
|
| 1073 |
+
sol.iteration = 1
|
| 1074 |
+
sol.update_gradient_hessian(X=X, y=y, sample_weight=None)
|
| 1075 |
+
sol.coef_newton = np.array([1e-6, 0])
|
| 1076 |
+
sol.gradient_times_newton = sol.gradient @ sol.coef_newton
|
| 1077 |
+
with warnings.catch_warnings():
|
| 1078 |
+
warnings.simplefilter("ignore", ConvergenceWarning)
|
| 1079 |
+
sol.line_search(X=X, y=y, sample_weight=None)
|
| 1080 |
+
captured = capsys.readouterr()
|
| 1081 |
+
if verbose >= 2:
|
| 1082 |
+
msg = [
|
| 1083 |
+
"line search iteration=",
|
| 1084 |
+
"check loss improvement <= armijo term:",
|
| 1085 |
+
"check loss |improvement| <= eps * |loss_old|:",
|
| 1086 |
+
"check sum(|gradient|) < sum(|gradient_old|):",
|
| 1087 |
+
]
|
| 1088 |
+
for m in msg:
|
| 1089 |
+
assert m in captured.out
|
| 1090 |
+
|
| 1091 |
+
# Test for a case with negative hessian. We badly initialize coef for a Tweedie
|
| 1092 |
+
# loss with non-canonical link, e.g. Inverse Gaussian deviance with a log link.
|
| 1093 |
+
linear_loss = LinearModelLoss(
|
| 1094 |
+
base_loss=HalfTweedieLoss(power=3), fit_intercept=False
|
| 1095 |
+
)
|
| 1096 |
+
sol = NewtonCholeskySolver(
|
| 1097 |
+
coef=linear_loss.init_zero_coef(X) + 1,
|
| 1098 |
+
linear_loss=linear_loss,
|
| 1099 |
+
l2_reg_strength=0,
|
| 1100 |
+
verbose=verbose,
|
| 1101 |
+
)
|
| 1102 |
+
with warnings.catch_warnings():
|
| 1103 |
+
warnings.simplefilter("ignore", ConvergenceWarning)
|
| 1104 |
+
sol.solve(X, y, None)
|
| 1105 |
+
captured = capsys.readouterr()
|
| 1106 |
+
if verbose >= 1:
|
| 1107 |
+
assert (
|
| 1108 |
+
"The inner solver detected a pointwise Hessian with many negative values"
|
| 1109 |
+
" and resorts to lbfgs instead." in captured.out
|
| 1110 |
+
)
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_huber.py
ADDED
|
@@ -0,0 +1,358 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Authors: The scikit-learn developers
|
| 2 |
+
# SPDX-License-Identifier: BSD-3-Clause
|
| 3 |
+
|
| 4 |
+
from numbers import Integral, Real
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
from scipy import optimize
|
| 8 |
+
|
| 9 |
+
from ..base import BaseEstimator, RegressorMixin, _fit_context
|
| 10 |
+
from ..utils._mask import axis0_safe_slice
|
| 11 |
+
from ..utils._param_validation import Interval
|
| 12 |
+
from ..utils.extmath import safe_sparse_dot
|
| 13 |
+
from ..utils.optimize import _check_optimize_result
|
| 14 |
+
from ..utils.validation import _check_sample_weight, validate_data
|
| 15 |
+
from ._base import LinearModel
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def _huber_loss_and_gradient(w, X, y, epsilon, alpha, sample_weight=None):
|
| 19 |
+
"""Returns the Huber loss and the gradient.
|
| 20 |
+
|
| 21 |
+
Parameters
|
| 22 |
+
----------
|
| 23 |
+
w : ndarray, shape (n_features + 1,) or (n_features + 2,)
|
| 24 |
+
Feature vector.
|
| 25 |
+
w[:n_features] gives the coefficients
|
| 26 |
+
w[-1] gives the scale factor and if the intercept is fit w[-2]
|
| 27 |
+
gives the intercept factor.
|
| 28 |
+
|
| 29 |
+
X : ndarray of shape (n_samples, n_features)
|
| 30 |
+
Input data.
|
| 31 |
+
|
| 32 |
+
y : ndarray of shape (n_samples,)
|
| 33 |
+
Target vector.
|
| 34 |
+
|
| 35 |
+
epsilon : float
|
| 36 |
+
Robustness of the Huber estimator.
|
| 37 |
+
|
| 38 |
+
alpha : float
|
| 39 |
+
Regularization parameter.
|
| 40 |
+
|
| 41 |
+
sample_weight : ndarray of shape (n_samples,), default=None
|
| 42 |
+
Weight assigned to each sample.
|
| 43 |
+
|
| 44 |
+
Returns
|
| 45 |
+
-------
|
| 46 |
+
loss : float
|
| 47 |
+
Huber loss.
|
| 48 |
+
|
| 49 |
+
gradient : ndarray, shape (len(w))
|
| 50 |
+
Returns the derivative of the Huber loss with respect to each
|
| 51 |
+
coefficient, intercept and the scale as a vector.
|
| 52 |
+
"""
|
| 53 |
+
_, n_features = X.shape
|
| 54 |
+
fit_intercept = n_features + 2 == w.shape[0]
|
| 55 |
+
if fit_intercept:
|
| 56 |
+
intercept = w[-2]
|
| 57 |
+
sigma = w[-1]
|
| 58 |
+
w = w[:n_features]
|
| 59 |
+
n_samples = np.sum(sample_weight)
|
| 60 |
+
|
| 61 |
+
# Calculate the values where |y - X'w -c / sigma| > epsilon
|
| 62 |
+
# The values above this threshold are outliers.
|
| 63 |
+
linear_loss = y - safe_sparse_dot(X, w)
|
| 64 |
+
if fit_intercept:
|
| 65 |
+
linear_loss -= intercept
|
| 66 |
+
abs_linear_loss = np.abs(linear_loss)
|
| 67 |
+
outliers_mask = abs_linear_loss > epsilon * sigma
|
| 68 |
+
|
| 69 |
+
# Calculate the linear loss due to the outliers.
|
| 70 |
+
# This is equal to (2 * M * |y - X'w -c / sigma| - M**2) * sigma
|
| 71 |
+
outliers = abs_linear_loss[outliers_mask]
|
| 72 |
+
num_outliers = np.count_nonzero(outliers_mask)
|
| 73 |
+
n_non_outliers = X.shape[0] - num_outliers
|
| 74 |
+
|
| 75 |
+
# n_sq_outliers includes the weight give to the outliers while
|
| 76 |
+
# num_outliers is just the number of outliers.
|
| 77 |
+
outliers_sw = sample_weight[outliers_mask]
|
| 78 |
+
n_sw_outliers = np.sum(outliers_sw)
|
| 79 |
+
outlier_loss = (
|
| 80 |
+
2.0 * epsilon * np.sum(outliers_sw * outliers)
|
| 81 |
+
- sigma * n_sw_outliers * epsilon**2
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
# Calculate the quadratic loss due to the non-outliers.-
|
| 85 |
+
# This is equal to |(y - X'w - c)**2 / sigma**2| * sigma
|
| 86 |
+
non_outliers = linear_loss[~outliers_mask]
|
| 87 |
+
weighted_non_outliers = sample_weight[~outliers_mask] * non_outliers
|
| 88 |
+
weighted_loss = np.dot(weighted_non_outliers.T, non_outliers)
|
| 89 |
+
squared_loss = weighted_loss / sigma
|
| 90 |
+
|
| 91 |
+
if fit_intercept:
|
| 92 |
+
grad = np.zeros(n_features + 2)
|
| 93 |
+
else:
|
| 94 |
+
grad = np.zeros(n_features + 1)
|
| 95 |
+
|
| 96 |
+
# Gradient due to the squared loss.
|
| 97 |
+
X_non_outliers = -axis0_safe_slice(X, ~outliers_mask, n_non_outliers)
|
| 98 |
+
grad[:n_features] = (
|
| 99 |
+
2.0 / sigma * safe_sparse_dot(weighted_non_outliers, X_non_outliers)
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
# Gradient due to the linear loss.
|
| 103 |
+
signed_outliers = np.ones_like(outliers)
|
| 104 |
+
signed_outliers_mask = linear_loss[outliers_mask] < 0
|
| 105 |
+
signed_outliers[signed_outliers_mask] = -1.0
|
| 106 |
+
X_outliers = axis0_safe_slice(X, outliers_mask, num_outliers)
|
| 107 |
+
sw_outliers = sample_weight[outliers_mask] * signed_outliers
|
| 108 |
+
grad[:n_features] -= 2.0 * epsilon * (safe_sparse_dot(sw_outliers, X_outliers))
|
| 109 |
+
|
| 110 |
+
# Gradient due to the penalty.
|
| 111 |
+
grad[:n_features] += alpha * 2.0 * w
|
| 112 |
+
|
| 113 |
+
# Gradient due to sigma.
|
| 114 |
+
grad[-1] = n_samples
|
| 115 |
+
grad[-1] -= n_sw_outliers * epsilon**2
|
| 116 |
+
grad[-1] -= squared_loss / sigma
|
| 117 |
+
|
| 118 |
+
# Gradient due to the intercept.
|
| 119 |
+
if fit_intercept:
|
| 120 |
+
grad[-2] = -2.0 * np.sum(weighted_non_outliers) / sigma
|
| 121 |
+
grad[-2] -= 2.0 * epsilon * np.sum(sw_outliers)
|
| 122 |
+
|
| 123 |
+
loss = n_samples * sigma + squared_loss + outlier_loss
|
| 124 |
+
loss += alpha * np.dot(w, w)
|
| 125 |
+
return loss, grad
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
class HuberRegressor(LinearModel, RegressorMixin, BaseEstimator):
|
| 129 |
+
"""L2-regularized linear regression model that is robust to outliers.
|
| 130 |
+
|
| 131 |
+
The Huber Regressor optimizes the squared loss for the samples where
|
| 132 |
+
``|(y - Xw - c) / sigma| < epsilon`` and the absolute loss for the samples
|
| 133 |
+
where ``|(y - Xw - c) / sigma| > epsilon``, where the model coefficients
|
| 134 |
+
``w``, the intercept ``c`` and the scale ``sigma`` are parameters
|
| 135 |
+
to be optimized. The parameter `sigma` makes sure that if `y` is scaled up
|
| 136 |
+
or down by a certain factor, one does not need to rescale `epsilon` to
|
| 137 |
+
achieve the same robustness. Note that this does not take into account
|
| 138 |
+
the fact that the different features of `X` may be of different scales.
|
| 139 |
+
|
| 140 |
+
The Huber loss function has the advantage of not being heavily influenced
|
| 141 |
+
by the outliers while not completely ignoring their effect.
|
| 142 |
+
|
| 143 |
+
Read more in the :ref:`User Guide <huber_regression>`
|
| 144 |
+
|
| 145 |
+
.. versionadded:: 0.18
|
| 146 |
+
|
| 147 |
+
Parameters
|
| 148 |
+
----------
|
| 149 |
+
epsilon : float, default=1.35
|
| 150 |
+
The parameter epsilon controls the number of samples that should be
|
| 151 |
+
classified as outliers. The smaller the epsilon, the more robust it is
|
| 152 |
+
to outliers. Epsilon must be in the range `[1, inf)`.
|
| 153 |
+
|
| 154 |
+
max_iter : int, default=100
|
| 155 |
+
Maximum number of iterations that
|
| 156 |
+
``scipy.optimize.minimize(method="L-BFGS-B")`` should run for.
|
| 157 |
+
|
| 158 |
+
alpha : float, default=0.0001
|
| 159 |
+
Strength of the squared L2 regularization. Note that the penalty is
|
| 160 |
+
equal to ``alpha * ||w||^2``.
|
| 161 |
+
Must be in the range `[0, inf)`.
|
| 162 |
+
|
| 163 |
+
warm_start : bool, default=False
|
| 164 |
+
This is useful if the stored attributes of a previously used model
|
| 165 |
+
has to be reused. If set to False, then the coefficients will
|
| 166 |
+
be rewritten for every call to fit.
|
| 167 |
+
See :term:`the Glossary <warm_start>`.
|
| 168 |
+
|
| 169 |
+
fit_intercept : bool, default=True
|
| 170 |
+
Whether or not to fit the intercept. This can be set to False
|
| 171 |
+
if the data is already centered around the origin.
|
| 172 |
+
|
| 173 |
+
tol : float, default=1e-05
|
| 174 |
+
The iteration will stop when
|
| 175 |
+
``max{|proj g_i | i = 1, ..., n}`` <= ``tol``
|
| 176 |
+
where pg_i is the i-th component of the projected gradient.
|
| 177 |
+
|
| 178 |
+
Attributes
|
| 179 |
+
----------
|
| 180 |
+
coef_ : array, shape (n_features,)
|
| 181 |
+
Features got by optimizing the L2-regularized Huber loss.
|
| 182 |
+
|
| 183 |
+
intercept_ : float
|
| 184 |
+
Bias.
|
| 185 |
+
|
| 186 |
+
scale_ : float
|
| 187 |
+
The value by which ``|y - Xw - c|`` is scaled down.
|
| 188 |
+
|
| 189 |
+
n_features_in_ : int
|
| 190 |
+
Number of features seen during :term:`fit`.
|
| 191 |
+
|
| 192 |
+
.. versionadded:: 0.24
|
| 193 |
+
|
| 194 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
| 195 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
| 196 |
+
has feature names that are all strings.
|
| 197 |
+
|
| 198 |
+
.. versionadded:: 1.0
|
| 199 |
+
|
| 200 |
+
n_iter_ : int
|
| 201 |
+
Number of iterations that
|
| 202 |
+
``scipy.optimize.minimize(method="L-BFGS-B")`` has run for.
|
| 203 |
+
|
| 204 |
+
.. versionchanged:: 0.20
|
| 205 |
+
|
| 206 |
+
In SciPy <= 1.0.0 the number of lbfgs iterations may exceed
|
| 207 |
+
``max_iter``. ``n_iter_`` will now report at most ``max_iter``.
|
| 208 |
+
|
| 209 |
+
outliers_ : array, shape (n_samples,)
|
| 210 |
+
A boolean mask which is set to True where the samples are identified
|
| 211 |
+
as outliers.
|
| 212 |
+
|
| 213 |
+
See Also
|
| 214 |
+
--------
|
| 215 |
+
RANSACRegressor : RANSAC (RANdom SAmple Consensus) algorithm.
|
| 216 |
+
TheilSenRegressor : Theil-Sen Estimator robust multivariate regression model.
|
| 217 |
+
SGDRegressor : Fitted by minimizing a regularized empirical loss with SGD.
|
| 218 |
+
|
| 219 |
+
References
|
| 220 |
+
----------
|
| 221 |
+
.. [1] Peter J. Huber, Elvezio M. Ronchetti, Robust Statistics
|
| 222 |
+
Concomitant scale estimates, p. 172
|
| 223 |
+
.. [2] Art B. Owen (2006), `A robust hybrid of lasso and ridge regression.
|
| 224 |
+
<https://artowen.su.domains/reports/hhu.pdf>`_
|
| 225 |
+
|
| 226 |
+
Examples
|
| 227 |
+
--------
|
| 228 |
+
>>> import numpy as np
|
| 229 |
+
>>> from sklearn.linear_model import HuberRegressor, LinearRegression
|
| 230 |
+
>>> from sklearn.datasets import make_regression
|
| 231 |
+
>>> rng = np.random.RandomState(0)
|
| 232 |
+
>>> X, y, coef = make_regression(
|
| 233 |
+
... n_samples=200, n_features=2, noise=4.0, coef=True, random_state=0)
|
| 234 |
+
>>> X[:4] = rng.uniform(10, 20, (4, 2))
|
| 235 |
+
>>> y[:4] = rng.uniform(10, 20, 4)
|
| 236 |
+
>>> huber = HuberRegressor().fit(X, y)
|
| 237 |
+
>>> huber.score(X, y)
|
| 238 |
+
-7.284...
|
| 239 |
+
>>> huber.predict(X[:1,])
|
| 240 |
+
array([806.7200...])
|
| 241 |
+
>>> linear = LinearRegression().fit(X, y)
|
| 242 |
+
>>> print("True coefficients:", coef)
|
| 243 |
+
True coefficients: [20.4923... 34.1698...]
|
| 244 |
+
>>> print("Huber coefficients:", huber.coef_)
|
| 245 |
+
Huber coefficients: [17.7906... 31.0106...]
|
| 246 |
+
>>> print("Linear Regression coefficients:", linear.coef_)
|
| 247 |
+
Linear Regression coefficients: [-1.9221... 7.0226...]
|
| 248 |
+
"""
|
| 249 |
+
|
| 250 |
+
_parameter_constraints: dict = {
|
| 251 |
+
"epsilon": [Interval(Real, 1.0, None, closed="left")],
|
| 252 |
+
"max_iter": [Interval(Integral, 0, None, closed="left")],
|
| 253 |
+
"alpha": [Interval(Real, 0, None, closed="left")],
|
| 254 |
+
"warm_start": ["boolean"],
|
| 255 |
+
"fit_intercept": ["boolean"],
|
| 256 |
+
"tol": [Interval(Real, 0.0, None, closed="left")],
|
| 257 |
+
}
|
| 258 |
+
|
| 259 |
+
def __init__(
|
| 260 |
+
self,
|
| 261 |
+
*,
|
| 262 |
+
epsilon=1.35,
|
| 263 |
+
max_iter=100,
|
| 264 |
+
alpha=0.0001,
|
| 265 |
+
warm_start=False,
|
| 266 |
+
fit_intercept=True,
|
| 267 |
+
tol=1e-05,
|
| 268 |
+
):
|
| 269 |
+
self.epsilon = epsilon
|
| 270 |
+
self.max_iter = max_iter
|
| 271 |
+
self.alpha = alpha
|
| 272 |
+
self.warm_start = warm_start
|
| 273 |
+
self.fit_intercept = fit_intercept
|
| 274 |
+
self.tol = tol
|
| 275 |
+
|
| 276 |
+
@_fit_context(prefer_skip_nested_validation=True)
|
| 277 |
+
def fit(self, X, y, sample_weight=None):
|
| 278 |
+
"""Fit the model according to the given training data.
|
| 279 |
+
|
| 280 |
+
Parameters
|
| 281 |
+
----------
|
| 282 |
+
X : array-like, shape (n_samples, n_features)
|
| 283 |
+
Training vector, where `n_samples` is the number of samples and
|
| 284 |
+
`n_features` is the number of features.
|
| 285 |
+
|
| 286 |
+
y : array-like, shape (n_samples,)
|
| 287 |
+
Target vector relative to X.
|
| 288 |
+
|
| 289 |
+
sample_weight : array-like, shape (n_samples,)
|
| 290 |
+
Weight given to each sample.
|
| 291 |
+
|
| 292 |
+
Returns
|
| 293 |
+
-------
|
| 294 |
+
self : object
|
| 295 |
+
Fitted `HuberRegressor` estimator.
|
| 296 |
+
"""
|
| 297 |
+
X, y = validate_data(
|
| 298 |
+
self,
|
| 299 |
+
X,
|
| 300 |
+
y,
|
| 301 |
+
copy=False,
|
| 302 |
+
accept_sparse=["csr"],
|
| 303 |
+
y_numeric=True,
|
| 304 |
+
dtype=[np.float64, np.float32],
|
| 305 |
+
)
|
| 306 |
+
|
| 307 |
+
sample_weight = _check_sample_weight(sample_weight, X)
|
| 308 |
+
|
| 309 |
+
if self.warm_start and hasattr(self, "coef_"):
|
| 310 |
+
parameters = np.concatenate((self.coef_, [self.intercept_, self.scale_]))
|
| 311 |
+
else:
|
| 312 |
+
if self.fit_intercept:
|
| 313 |
+
parameters = np.zeros(X.shape[1] + 2)
|
| 314 |
+
else:
|
| 315 |
+
parameters = np.zeros(X.shape[1] + 1)
|
| 316 |
+
# Make sure to initialize the scale parameter to a strictly
|
| 317 |
+
# positive value:
|
| 318 |
+
parameters[-1] = 1
|
| 319 |
+
|
| 320 |
+
# Sigma or the scale factor should be non-negative.
|
| 321 |
+
# Setting it to be zero might cause undefined bounds hence we set it
|
| 322 |
+
# to a value close to zero.
|
| 323 |
+
bounds = np.tile([-np.inf, np.inf], (parameters.shape[0], 1))
|
| 324 |
+
bounds[-1][0] = np.finfo(np.float64).eps * 10
|
| 325 |
+
|
| 326 |
+
opt_res = optimize.minimize(
|
| 327 |
+
_huber_loss_and_gradient,
|
| 328 |
+
parameters,
|
| 329 |
+
method="L-BFGS-B",
|
| 330 |
+
jac=True,
|
| 331 |
+
args=(X, y, self.epsilon, self.alpha, sample_weight),
|
| 332 |
+
options={"maxiter": self.max_iter, "gtol": self.tol, "iprint": -1},
|
| 333 |
+
bounds=bounds,
|
| 334 |
+
)
|
| 335 |
+
|
| 336 |
+
parameters = opt_res.x
|
| 337 |
+
|
| 338 |
+
if opt_res.status == 2:
|
| 339 |
+
raise ValueError(
|
| 340 |
+
"HuberRegressor convergence failed: l-BFGS-b solver terminated with %s"
|
| 341 |
+
% opt_res.message
|
| 342 |
+
)
|
| 343 |
+
self.n_iter_ = _check_optimize_result("lbfgs", opt_res, self.max_iter)
|
| 344 |
+
self.scale_ = parameters[-1]
|
| 345 |
+
if self.fit_intercept:
|
| 346 |
+
self.intercept_ = parameters[-2]
|
| 347 |
+
else:
|
| 348 |
+
self.intercept_ = 0.0
|
| 349 |
+
self.coef_ = parameters[: X.shape[1]]
|
| 350 |
+
|
| 351 |
+
residual = np.abs(y - safe_sparse_dot(X, self.coef_) - self.intercept_)
|
| 352 |
+
self.outliers_ = residual > self.scale_ * self.epsilon
|
| 353 |
+
return self
|
| 354 |
+
|
| 355 |
+
def __sklearn_tags__(self):
|
| 356 |
+
tags = super().__sklearn_tags__()
|
| 357 |
+
tags.input_tags.sparse = True
|
| 358 |
+
return tags
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_omp.py
ADDED
|
@@ -0,0 +1,1121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Orthogonal matching pursuit algorithms"""
|
| 2 |
+
|
| 3 |
+
# Authors: The scikit-learn developers
|
| 4 |
+
# SPDX-License-Identifier: BSD-3-Clause
|
| 5 |
+
|
| 6 |
+
import warnings
|
| 7 |
+
from math import sqrt
|
| 8 |
+
from numbers import Integral, Real
|
| 9 |
+
|
| 10 |
+
import numpy as np
|
| 11 |
+
from scipy import linalg
|
| 12 |
+
from scipy.linalg.lapack import get_lapack_funcs
|
| 13 |
+
|
| 14 |
+
from ..base import MultiOutputMixin, RegressorMixin, _fit_context
|
| 15 |
+
from ..model_selection import check_cv
|
| 16 |
+
from ..utils import Bunch, as_float_array, check_array
|
| 17 |
+
from ..utils._param_validation import Interval, StrOptions, validate_params
|
| 18 |
+
from ..utils.metadata_routing import (
|
| 19 |
+
MetadataRouter,
|
| 20 |
+
MethodMapping,
|
| 21 |
+
_raise_for_params,
|
| 22 |
+
_routing_enabled,
|
| 23 |
+
process_routing,
|
| 24 |
+
)
|
| 25 |
+
from ..utils.parallel import Parallel, delayed
|
| 26 |
+
from ..utils.validation import validate_data
|
| 27 |
+
from ._base import LinearModel, _pre_fit
|
| 28 |
+
|
| 29 |
+
premature = (
|
| 30 |
+
"Orthogonal matching pursuit ended prematurely due to linear"
|
| 31 |
+
" dependence in the dictionary. The requested precision might"
|
| 32 |
+
" not have been met."
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True, return_path=False):
|
| 37 |
+
"""Orthogonal Matching Pursuit step using the Cholesky decomposition.
|
| 38 |
+
|
| 39 |
+
Parameters
|
| 40 |
+
----------
|
| 41 |
+
X : ndarray of shape (n_samples, n_features)
|
| 42 |
+
Input dictionary. Columns are assumed to have unit norm.
|
| 43 |
+
|
| 44 |
+
y : ndarray of shape (n_samples,)
|
| 45 |
+
Input targets.
|
| 46 |
+
|
| 47 |
+
n_nonzero_coefs : int
|
| 48 |
+
Targeted number of non-zero elements.
|
| 49 |
+
|
| 50 |
+
tol : float, default=None
|
| 51 |
+
Targeted squared error, if not None overrides n_nonzero_coefs.
|
| 52 |
+
|
| 53 |
+
copy_X : bool, default=True
|
| 54 |
+
Whether the design matrix X must be copied by the algorithm. A false
|
| 55 |
+
value is only helpful if X is already Fortran-ordered, otherwise a
|
| 56 |
+
copy is made anyway.
|
| 57 |
+
|
| 58 |
+
return_path : bool, default=False
|
| 59 |
+
Whether to return every value of the nonzero coefficients along the
|
| 60 |
+
forward path. Useful for cross-validation.
|
| 61 |
+
|
| 62 |
+
Returns
|
| 63 |
+
-------
|
| 64 |
+
gamma : ndarray of shape (n_nonzero_coefs,)
|
| 65 |
+
Non-zero elements of the solution.
|
| 66 |
+
|
| 67 |
+
idx : ndarray of shape (n_nonzero_coefs,)
|
| 68 |
+
Indices of the positions of the elements in gamma within the solution
|
| 69 |
+
vector.
|
| 70 |
+
|
| 71 |
+
coef : ndarray of shape (n_features, n_nonzero_coefs)
|
| 72 |
+
The first k values of column k correspond to the coefficient value
|
| 73 |
+
for the active features at that step. The lower left triangle contains
|
| 74 |
+
garbage. Only returned if ``return_path=True``.
|
| 75 |
+
|
| 76 |
+
n_active : int
|
| 77 |
+
Number of active features at convergence.
|
| 78 |
+
"""
|
| 79 |
+
if copy_X:
|
| 80 |
+
X = X.copy("F")
|
| 81 |
+
else: # even if we are allowed to overwrite, still copy it if bad order
|
| 82 |
+
X = np.asfortranarray(X)
|
| 83 |
+
|
| 84 |
+
min_float = np.finfo(X.dtype).eps
|
| 85 |
+
nrm2, swap = linalg.get_blas_funcs(("nrm2", "swap"), (X,))
|
| 86 |
+
(potrs,) = get_lapack_funcs(("potrs",), (X,))
|
| 87 |
+
|
| 88 |
+
alpha = np.dot(X.T, y)
|
| 89 |
+
residual = y
|
| 90 |
+
gamma = np.empty(0)
|
| 91 |
+
n_active = 0
|
| 92 |
+
indices = np.arange(X.shape[1]) # keeping track of swapping
|
| 93 |
+
|
| 94 |
+
max_features = X.shape[1] if tol is not None else n_nonzero_coefs
|
| 95 |
+
|
| 96 |
+
L = np.empty((max_features, max_features), dtype=X.dtype)
|
| 97 |
+
|
| 98 |
+
if return_path:
|
| 99 |
+
coefs = np.empty_like(L)
|
| 100 |
+
|
| 101 |
+
while True:
|
| 102 |
+
lam = np.argmax(np.abs(np.dot(X.T, residual)))
|
| 103 |
+
if lam < n_active or alpha[lam] ** 2 < min_float:
|
| 104 |
+
# atom already selected or inner product too small
|
| 105 |
+
warnings.warn(premature, RuntimeWarning, stacklevel=2)
|
| 106 |
+
break
|
| 107 |
+
|
| 108 |
+
if n_active > 0:
|
| 109 |
+
# Updates the Cholesky decomposition of X' X
|
| 110 |
+
L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam])
|
| 111 |
+
linalg.solve_triangular(
|
| 112 |
+
L[:n_active, :n_active],
|
| 113 |
+
L[n_active, :n_active],
|
| 114 |
+
trans=0,
|
| 115 |
+
lower=1,
|
| 116 |
+
overwrite_b=True,
|
| 117 |
+
check_finite=False,
|
| 118 |
+
)
|
| 119 |
+
v = nrm2(L[n_active, :n_active]) ** 2
|
| 120 |
+
Lkk = linalg.norm(X[:, lam]) ** 2 - v
|
| 121 |
+
if Lkk <= min_float: # selected atoms are dependent
|
| 122 |
+
warnings.warn(premature, RuntimeWarning, stacklevel=2)
|
| 123 |
+
break
|
| 124 |
+
L[n_active, n_active] = sqrt(Lkk)
|
| 125 |
+
else:
|
| 126 |
+
L[0, 0] = linalg.norm(X[:, lam])
|
| 127 |
+
|
| 128 |
+
X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam])
|
| 129 |
+
alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active]
|
| 130 |
+
indices[n_active], indices[lam] = indices[lam], indices[n_active]
|
| 131 |
+
n_active += 1
|
| 132 |
+
|
| 133 |
+
# solves LL'x = X'y as a composition of two triangular systems
|
| 134 |
+
gamma, _ = potrs(
|
| 135 |
+
L[:n_active, :n_active], alpha[:n_active], lower=True, overwrite_b=False
|
| 136 |
+
)
|
| 137 |
+
|
| 138 |
+
if return_path:
|
| 139 |
+
coefs[:n_active, n_active - 1] = gamma
|
| 140 |
+
residual = y - np.dot(X[:, :n_active], gamma)
|
| 141 |
+
if tol is not None and nrm2(residual) ** 2 <= tol:
|
| 142 |
+
break
|
| 143 |
+
elif n_active == max_features:
|
| 144 |
+
break
|
| 145 |
+
|
| 146 |
+
if return_path:
|
| 147 |
+
return gamma, indices[:n_active], coefs[:, :n_active], n_active
|
| 148 |
+
else:
|
| 149 |
+
return gamma, indices[:n_active], n_active
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
def _gram_omp(
|
| 153 |
+
Gram,
|
| 154 |
+
Xy,
|
| 155 |
+
n_nonzero_coefs,
|
| 156 |
+
tol_0=None,
|
| 157 |
+
tol=None,
|
| 158 |
+
copy_Gram=True,
|
| 159 |
+
copy_Xy=True,
|
| 160 |
+
return_path=False,
|
| 161 |
+
):
|
| 162 |
+
"""Orthogonal Matching Pursuit step on a precomputed Gram matrix.
|
| 163 |
+
|
| 164 |
+
This function uses the Cholesky decomposition method.
|
| 165 |
+
|
| 166 |
+
Parameters
|
| 167 |
+
----------
|
| 168 |
+
Gram : ndarray of shape (n_features, n_features)
|
| 169 |
+
Gram matrix of the input data matrix.
|
| 170 |
+
|
| 171 |
+
Xy : ndarray of shape (n_features,)
|
| 172 |
+
Input targets.
|
| 173 |
+
|
| 174 |
+
n_nonzero_coefs : int
|
| 175 |
+
Targeted number of non-zero elements.
|
| 176 |
+
|
| 177 |
+
tol_0 : float, default=None
|
| 178 |
+
Squared norm of y, required if tol is not None.
|
| 179 |
+
|
| 180 |
+
tol : float, default=None
|
| 181 |
+
Targeted squared error, if not None overrides n_nonzero_coefs.
|
| 182 |
+
|
| 183 |
+
copy_Gram : bool, default=True
|
| 184 |
+
Whether the gram matrix must be copied by the algorithm. A false
|
| 185 |
+
value is only helpful if it is already Fortran-ordered, otherwise a
|
| 186 |
+
copy is made anyway.
|
| 187 |
+
|
| 188 |
+
copy_Xy : bool, default=True
|
| 189 |
+
Whether the covariance vector Xy must be copied by the algorithm.
|
| 190 |
+
If False, it may be overwritten.
|
| 191 |
+
|
| 192 |
+
return_path : bool, default=False
|
| 193 |
+
Whether to return every value of the nonzero coefficients along the
|
| 194 |
+
forward path. Useful for cross-validation.
|
| 195 |
+
|
| 196 |
+
Returns
|
| 197 |
+
-------
|
| 198 |
+
gamma : ndarray of shape (n_nonzero_coefs,)
|
| 199 |
+
Non-zero elements of the solution.
|
| 200 |
+
|
| 201 |
+
idx : ndarray of shape (n_nonzero_coefs,)
|
| 202 |
+
Indices of the positions of the elements in gamma within the solution
|
| 203 |
+
vector.
|
| 204 |
+
|
| 205 |
+
coefs : ndarray of shape (n_features, n_nonzero_coefs)
|
| 206 |
+
The first k values of column k correspond to the coefficient value
|
| 207 |
+
for the active features at that step. The lower left triangle contains
|
| 208 |
+
garbage. Only returned if ``return_path=True``.
|
| 209 |
+
|
| 210 |
+
n_active : int
|
| 211 |
+
Number of active features at convergence.
|
| 212 |
+
"""
|
| 213 |
+
Gram = Gram.copy("F") if copy_Gram else np.asfortranarray(Gram)
|
| 214 |
+
|
| 215 |
+
if copy_Xy or not Xy.flags.writeable:
|
| 216 |
+
Xy = Xy.copy()
|
| 217 |
+
|
| 218 |
+
min_float = np.finfo(Gram.dtype).eps
|
| 219 |
+
nrm2, swap = linalg.get_blas_funcs(("nrm2", "swap"), (Gram,))
|
| 220 |
+
(potrs,) = get_lapack_funcs(("potrs",), (Gram,))
|
| 221 |
+
|
| 222 |
+
indices = np.arange(len(Gram)) # keeping track of swapping
|
| 223 |
+
alpha = Xy
|
| 224 |
+
tol_curr = tol_0
|
| 225 |
+
delta = 0
|
| 226 |
+
gamma = np.empty(0)
|
| 227 |
+
n_active = 0
|
| 228 |
+
|
| 229 |
+
max_features = len(Gram) if tol is not None else n_nonzero_coefs
|
| 230 |
+
|
| 231 |
+
L = np.empty((max_features, max_features), dtype=Gram.dtype)
|
| 232 |
+
|
| 233 |
+
L[0, 0] = 1.0
|
| 234 |
+
if return_path:
|
| 235 |
+
coefs = np.empty_like(L)
|
| 236 |
+
|
| 237 |
+
while True:
|
| 238 |
+
lam = np.argmax(np.abs(alpha))
|
| 239 |
+
if lam < n_active or alpha[lam] ** 2 < min_float:
|
| 240 |
+
# selected same atom twice, or inner product too small
|
| 241 |
+
warnings.warn(premature, RuntimeWarning, stacklevel=3)
|
| 242 |
+
break
|
| 243 |
+
if n_active > 0:
|
| 244 |
+
L[n_active, :n_active] = Gram[lam, :n_active]
|
| 245 |
+
linalg.solve_triangular(
|
| 246 |
+
L[:n_active, :n_active],
|
| 247 |
+
L[n_active, :n_active],
|
| 248 |
+
trans=0,
|
| 249 |
+
lower=1,
|
| 250 |
+
overwrite_b=True,
|
| 251 |
+
check_finite=False,
|
| 252 |
+
)
|
| 253 |
+
v = nrm2(L[n_active, :n_active]) ** 2
|
| 254 |
+
Lkk = Gram[lam, lam] - v
|
| 255 |
+
if Lkk <= min_float: # selected atoms are dependent
|
| 256 |
+
warnings.warn(premature, RuntimeWarning, stacklevel=3)
|
| 257 |
+
break
|
| 258 |
+
L[n_active, n_active] = sqrt(Lkk)
|
| 259 |
+
else:
|
| 260 |
+
L[0, 0] = sqrt(Gram[lam, lam])
|
| 261 |
+
|
| 262 |
+
Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam])
|
| 263 |
+
Gram.T[n_active], Gram.T[lam] = swap(Gram.T[n_active], Gram.T[lam])
|
| 264 |
+
indices[n_active], indices[lam] = indices[lam], indices[n_active]
|
| 265 |
+
Xy[n_active], Xy[lam] = Xy[lam], Xy[n_active]
|
| 266 |
+
n_active += 1
|
| 267 |
+
# solves LL'x = X'y as a composition of two triangular systems
|
| 268 |
+
gamma, _ = potrs(
|
| 269 |
+
L[:n_active, :n_active], Xy[:n_active], lower=True, overwrite_b=False
|
| 270 |
+
)
|
| 271 |
+
if return_path:
|
| 272 |
+
coefs[:n_active, n_active - 1] = gamma
|
| 273 |
+
beta = np.dot(Gram[:, :n_active], gamma)
|
| 274 |
+
alpha = Xy - beta
|
| 275 |
+
if tol is not None:
|
| 276 |
+
tol_curr += delta
|
| 277 |
+
delta = np.inner(gamma, beta[:n_active])
|
| 278 |
+
tol_curr -= delta
|
| 279 |
+
if abs(tol_curr) <= tol:
|
| 280 |
+
break
|
| 281 |
+
elif n_active == max_features:
|
| 282 |
+
break
|
| 283 |
+
|
| 284 |
+
if return_path:
|
| 285 |
+
return gamma, indices[:n_active], coefs[:, :n_active], n_active
|
| 286 |
+
else:
|
| 287 |
+
return gamma, indices[:n_active], n_active
|
| 288 |
+
|
| 289 |
+
|
| 290 |
+
@validate_params(
|
| 291 |
+
{
|
| 292 |
+
"X": ["array-like"],
|
| 293 |
+
"y": [np.ndarray],
|
| 294 |
+
"n_nonzero_coefs": [Interval(Integral, 1, None, closed="left"), None],
|
| 295 |
+
"tol": [Interval(Real, 0, None, closed="left"), None],
|
| 296 |
+
"precompute": ["boolean", StrOptions({"auto"})],
|
| 297 |
+
"copy_X": ["boolean"],
|
| 298 |
+
"return_path": ["boolean"],
|
| 299 |
+
"return_n_iter": ["boolean"],
|
| 300 |
+
},
|
| 301 |
+
prefer_skip_nested_validation=True,
|
| 302 |
+
)
|
| 303 |
+
def orthogonal_mp(
|
| 304 |
+
X,
|
| 305 |
+
y,
|
| 306 |
+
*,
|
| 307 |
+
n_nonzero_coefs=None,
|
| 308 |
+
tol=None,
|
| 309 |
+
precompute=False,
|
| 310 |
+
copy_X=True,
|
| 311 |
+
return_path=False,
|
| 312 |
+
return_n_iter=False,
|
| 313 |
+
):
|
| 314 |
+
r"""Orthogonal Matching Pursuit (OMP).
|
| 315 |
+
|
| 316 |
+
Solves n_targets Orthogonal Matching Pursuit problems.
|
| 317 |
+
An instance of the problem has the form:
|
| 318 |
+
|
| 319 |
+
When parametrized by the number of non-zero coefficients using
|
| 320 |
+
`n_nonzero_coefs`:
|
| 321 |
+
argmin ||y - X\gamma||^2 subject to ||\gamma||_0 <= n_{nonzero coefs}
|
| 322 |
+
|
| 323 |
+
When parametrized by error using the parameter `tol`:
|
| 324 |
+
argmin ||\gamma||_0 subject to ||y - X\gamma||^2 <= tol
|
| 325 |
+
|
| 326 |
+
Read more in the :ref:`User Guide <omp>`.
|
| 327 |
+
|
| 328 |
+
Parameters
|
| 329 |
+
----------
|
| 330 |
+
X : array-like of shape (n_samples, n_features)
|
| 331 |
+
Input data. Columns are assumed to have unit norm.
|
| 332 |
+
|
| 333 |
+
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
|
| 334 |
+
Input targets.
|
| 335 |
+
|
| 336 |
+
n_nonzero_coefs : int, default=None
|
| 337 |
+
Desired number of non-zero entries in the solution. If None (by
|
| 338 |
+
default) this value is set to 10% of n_features.
|
| 339 |
+
|
| 340 |
+
tol : float, default=None
|
| 341 |
+
Maximum squared norm of the residual. If not None, overrides n_nonzero_coefs.
|
| 342 |
+
|
| 343 |
+
precompute : 'auto' or bool, default=False
|
| 344 |
+
Whether to perform precomputations. Improves performance when n_targets
|
| 345 |
+
or n_samples is very large.
|
| 346 |
+
|
| 347 |
+
copy_X : bool, default=True
|
| 348 |
+
Whether the design matrix X must be copied by the algorithm. A false
|
| 349 |
+
value is only helpful if X is already Fortran-ordered, otherwise a
|
| 350 |
+
copy is made anyway.
|
| 351 |
+
|
| 352 |
+
return_path : bool, default=False
|
| 353 |
+
Whether to return every value of the nonzero coefficients along the
|
| 354 |
+
forward path. Useful for cross-validation.
|
| 355 |
+
|
| 356 |
+
return_n_iter : bool, default=False
|
| 357 |
+
Whether or not to return the number of iterations.
|
| 358 |
+
|
| 359 |
+
Returns
|
| 360 |
+
-------
|
| 361 |
+
coef : ndarray of shape (n_features,) or (n_features, n_targets)
|
| 362 |
+
Coefficients of the OMP solution. If `return_path=True`, this contains
|
| 363 |
+
the whole coefficient path. In this case its shape is
|
| 364 |
+
(n_features, n_features) or (n_features, n_targets, n_features) and
|
| 365 |
+
iterating over the last axis generates coefficients in increasing order
|
| 366 |
+
of active features.
|
| 367 |
+
|
| 368 |
+
n_iters : array-like or int
|
| 369 |
+
Number of active features across every target. Returned only if
|
| 370 |
+
`return_n_iter` is set to True.
|
| 371 |
+
|
| 372 |
+
See Also
|
| 373 |
+
--------
|
| 374 |
+
OrthogonalMatchingPursuit : Orthogonal Matching Pursuit model.
|
| 375 |
+
orthogonal_mp_gram : Solve OMP problems using Gram matrix and the product X.T * y.
|
| 376 |
+
lars_path : Compute Least Angle Regression or Lasso path using LARS algorithm.
|
| 377 |
+
sklearn.decomposition.sparse_encode : Sparse coding.
|
| 378 |
+
|
| 379 |
+
Notes
|
| 380 |
+
-----
|
| 381 |
+
Orthogonal matching pursuit was introduced in S. Mallat, Z. Zhang,
|
| 382 |
+
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
|
| 383 |
+
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
|
| 384 |
+
(https://www.di.ens.fr/~mallat/papiers/MallatPursuit93.pdf)
|
| 385 |
+
|
| 386 |
+
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
|
| 387 |
+
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
|
| 388 |
+
Matching Pursuit Technical Report - CS Technion, April 2008.
|
| 389 |
+
https://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
|
| 390 |
+
|
| 391 |
+
Examples
|
| 392 |
+
--------
|
| 393 |
+
>>> from sklearn.datasets import make_regression
|
| 394 |
+
>>> from sklearn.linear_model import orthogonal_mp
|
| 395 |
+
>>> X, y = make_regression(noise=4, random_state=0)
|
| 396 |
+
>>> coef = orthogonal_mp(X, y)
|
| 397 |
+
>>> coef.shape
|
| 398 |
+
(100,)
|
| 399 |
+
>>> X[:1,] @ coef
|
| 400 |
+
array([-78.68...])
|
| 401 |
+
"""
|
| 402 |
+
X = check_array(X, order="F", copy=copy_X)
|
| 403 |
+
copy_X = False
|
| 404 |
+
if y.ndim == 1:
|
| 405 |
+
y = y.reshape(-1, 1)
|
| 406 |
+
y = check_array(y)
|
| 407 |
+
if y.shape[1] > 1: # subsequent targets will be affected
|
| 408 |
+
copy_X = True
|
| 409 |
+
if n_nonzero_coefs is None and tol is None:
|
| 410 |
+
# default for n_nonzero_coefs is 0.1 * n_features
|
| 411 |
+
# but at least one.
|
| 412 |
+
n_nonzero_coefs = max(int(0.1 * X.shape[1]), 1)
|
| 413 |
+
if tol is None and n_nonzero_coefs > X.shape[1]:
|
| 414 |
+
raise ValueError(
|
| 415 |
+
"The number of atoms cannot be more than the number of features"
|
| 416 |
+
)
|
| 417 |
+
if precompute == "auto":
|
| 418 |
+
precompute = X.shape[0] > X.shape[1]
|
| 419 |
+
if precompute:
|
| 420 |
+
G = np.dot(X.T, X)
|
| 421 |
+
G = np.asfortranarray(G)
|
| 422 |
+
Xy = np.dot(X.T, y)
|
| 423 |
+
if tol is not None:
|
| 424 |
+
norms_squared = np.sum((y**2), axis=0)
|
| 425 |
+
else:
|
| 426 |
+
norms_squared = None
|
| 427 |
+
return orthogonal_mp_gram(
|
| 428 |
+
G,
|
| 429 |
+
Xy,
|
| 430 |
+
n_nonzero_coefs=n_nonzero_coefs,
|
| 431 |
+
tol=tol,
|
| 432 |
+
norms_squared=norms_squared,
|
| 433 |
+
copy_Gram=copy_X,
|
| 434 |
+
copy_Xy=False,
|
| 435 |
+
return_path=return_path,
|
| 436 |
+
)
|
| 437 |
+
|
| 438 |
+
if return_path:
|
| 439 |
+
coef = np.zeros((X.shape[1], y.shape[1], X.shape[1]))
|
| 440 |
+
else:
|
| 441 |
+
coef = np.zeros((X.shape[1], y.shape[1]))
|
| 442 |
+
n_iters = []
|
| 443 |
+
|
| 444 |
+
for k in range(y.shape[1]):
|
| 445 |
+
out = _cholesky_omp(
|
| 446 |
+
X, y[:, k], n_nonzero_coefs, tol, copy_X=copy_X, return_path=return_path
|
| 447 |
+
)
|
| 448 |
+
if return_path:
|
| 449 |
+
_, idx, coefs, n_iter = out
|
| 450 |
+
coef = coef[:, :, : len(idx)]
|
| 451 |
+
for n_active, x in enumerate(coefs.T):
|
| 452 |
+
coef[idx[: n_active + 1], k, n_active] = x[: n_active + 1]
|
| 453 |
+
else:
|
| 454 |
+
x, idx, n_iter = out
|
| 455 |
+
coef[idx, k] = x
|
| 456 |
+
n_iters.append(n_iter)
|
| 457 |
+
|
| 458 |
+
if y.shape[1] == 1:
|
| 459 |
+
n_iters = n_iters[0]
|
| 460 |
+
|
| 461 |
+
if return_n_iter:
|
| 462 |
+
return np.squeeze(coef), n_iters
|
| 463 |
+
else:
|
| 464 |
+
return np.squeeze(coef)
|
| 465 |
+
|
| 466 |
+
|
| 467 |
+
@validate_params(
|
| 468 |
+
{
|
| 469 |
+
"Gram": ["array-like"],
|
| 470 |
+
"Xy": ["array-like"],
|
| 471 |
+
"n_nonzero_coefs": [Interval(Integral, 0, None, closed="neither"), None],
|
| 472 |
+
"tol": [Interval(Real, 0, None, closed="left"), None],
|
| 473 |
+
"norms_squared": ["array-like", None],
|
| 474 |
+
"copy_Gram": ["boolean"],
|
| 475 |
+
"copy_Xy": ["boolean"],
|
| 476 |
+
"return_path": ["boolean"],
|
| 477 |
+
"return_n_iter": ["boolean"],
|
| 478 |
+
},
|
| 479 |
+
prefer_skip_nested_validation=True,
|
| 480 |
+
)
|
| 481 |
+
def orthogonal_mp_gram(
|
| 482 |
+
Gram,
|
| 483 |
+
Xy,
|
| 484 |
+
*,
|
| 485 |
+
n_nonzero_coefs=None,
|
| 486 |
+
tol=None,
|
| 487 |
+
norms_squared=None,
|
| 488 |
+
copy_Gram=True,
|
| 489 |
+
copy_Xy=True,
|
| 490 |
+
return_path=False,
|
| 491 |
+
return_n_iter=False,
|
| 492 |
+
):
|
| 493 |
+
"""Gram Orthogonal Matching Pursuit (OMP).
|
| 494 |
+
|
| 495 |
+
Solves n_targets Orthogonal Matching Pursuit problems using only
|
| 496 |
+
the Gram matrix X.T * X and the product X.T * y.
|
| 497 |
+
|
| 498 |
+
Read more in the :ref:`User Guide <omp>`.
|
| 499 |
+
|
| 500 |
+
Parameters
|
| 501 |
+
----------
|
| 502 |
+
Gram : array-like of shape (n_features, n_features)
|
| 503 |
+
Gram matrix of the input data: `X.T * X`.
|
| 504 |
+
|
| 505 |
+
Xy : array-like of shape (n_features,) or (n_features, n_targets)
|
| 506 |
+
Input targets multiplied by `X`: `X.T * y`.
|
| 507 |
+
|
| 508 |
+
n_nonzero_coefs : int, default=None
|
| 509 |
+
Desired number of non-zero entries in the solution. If `None` (by
|
| 510 |
+
default) this value is set to 10% of n_features.
|
| 511 |
+
|
| 512 |
+
tol : float, default=None
|
| 513 |
+
Maximum squared norm of the residual. If not `None`,
|
| 514 |
+
overrides `n_nonzero_coefs`.
|
| 515 |
+
|
| 516 |
+
norms_squared : array-like of shape (n_targets,), default=None
|
| 517 |
+
Squared L2 norms of the lines of `y`. Required if `tol` is not None.
|
| 518 |
+
|
| 519 |
+
copy_Gram : bool, default=True
|
| 520 |
+
Whether the gram matrix must be copied by the algorithm. A `False`
|
| 521 |
+
value is only helpful if it is already Fortran-ordered, otherwise a
|
| 522 |
+
copy is made anyway.
|
| 523 |
+
|
| 524 |
+
copy_Xy : bool, default=True
|
| 525 |
+
Whether the covariance vector `Xy` must be copied by the algorithm.
|
| 526 |
+
If `False`, it may be overwritten.
|
| 527 |
+
|
| 528 |
+
return_path : bool, default=False
|
| 529 |
+
Whether to return every value of the nonzero coefficients along the
|
| 530 |
+
forward path. Useful for cross-validation.
|
| 531 |
+
|
| 532 |
+
return_n_iter : bool, default=False
|
| 533 |
+
Whether or not to return the number of iterations.
|
| 534 |
+
|
| 535 |
+
Returns
|
| 536 |
+
-------
|
| 537 |
+
coef : ndarray of shape (n_features,) or (n_features, n_targets)
|
| 538 |
+
Coefficients of the OMP solution. If `return_path=True`, this contains
|
| 539 |
+
the whole coefficient path. In this case its shape is
|
| 540 |
+
`(n_features, n_features)` or `(n_features, n_targets, n_features)` and
|
| 541 |
+
iterating over the last axis yields coefficients in increasing order
|
| 542 |
+
of active features.
|
| 543 |
+
|
| 544 |
+
n_iters : list or int
|
| 545 |
+
Number of active features across every target. Returned only if
|
| 546 |
+
`return_n_iter` is set to True.
|
| 547 |
+
|
| 548 |
+
See Also
|
| 549 |
+
--------
|
| 550 |
+
OrthogonalMatchingPursuit : Orthogonal Matching Pursuit model (OMP).
|
| 551 |
+
orthogonal_mp : Solves n_targets Orthogonal Matching Pursuit problems.
|
| 552 |
+
lars_path : Compute Least Angle Regression or Lasso path using
|
| 553 |
+
LARS algorithm.
|
| 554 |
+
sklearn.decomposition.sparse_encode : Generic sparse coding.
|
| 555 |
+
Each column of the result is the solution to a Lasso problem.
|
| 556 |
+
|
| 557 |
+
Notes
|
| 558 |
+
-----
|
| 559 |
+
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
|
| 560 |
+
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
|
| 561 |
+
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
|
| 562 |
+
(https://www.di.ens.fr/~mallat/papiers/MallatPursuit93.pdf)
|
| 563 |
+
|
| 564 |
+
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
|
| 565 |
+
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
|
| 566 |
+
Matching Pursuit Technical Report - CS Technion, April 2008.
|
| 567 |
+
https://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
|
| 568 |
+
|
| 569 |
+
Examples
|
| 570 |
+
--------
|
| 571 |
+
>>> from sklearn.datasets import make_regression
|
| 572 |
+
>>> from sklearn.linear_model import orthogonal_mp_gram
|
| 573 |
+
>>> X, y = make_regression(noise=4, random_state=0)
|
| 574 |
+
>>> coef = orthogonal_mp_gram(X.T @ X, X.T @ y)
|
| 575 |
+
>>> coef.shape
|
| 576 |
+
(100,)
|
| 577 |
+
>>> X[:1,] @ coef
|
| 578 |
+
array([-78.68...])
|
| 579 |
+
"""
|
| 580 |
+
Gram = check_array(Gram, order="F", copy=copy_Gram)
|
| 581 |
+
Xy = np.asarray(Xy)
|
| 582 |
+
if Xy.ndim > 1 and Xy.shape[1] > 1:
|
| 583 |
+
# or subsequent target will be affected
|
| 584 |
+
copy_Gram = True
|
| 585 |
+
if Xy.ndim == 1:
|
| 586 |
+
Xy = Xy[:, np.newaxis]
|
| 587 |
+
if tol is not None:
|
| 588 |
+
norms_squared = [norms_squared]
|
| 589 |
+
if copy_Xy or not Xy.flags.writeable:
|
| 590 |
+
# Make the copy once instead of many times in _gram_omp itself.
|
| 591 |
+
Xy = Xy.copy()
|
| 592 |
+
|
| 593 |
+
if n_nonzero_coefs is None and tol is None:
|
| 594 |
+
n_nonzero_coefs = int(0.1 * len(Gram))
|
| 595 |
+
if tol is not None and norms_squared is None:
|
| 596 |
+
raise ValueError(
|
| 597 |
+
"Gram OMP needs the precomputed norms in order "
|
| 598 |
+
"to evaluate the error sum of squares."
|
| 599 |
+
)
|
| 600 |
+
if tol is not None and tol < 0:
|
| 601 |
+
raise ValueError("Epsilon cannot be negative")
|
| 602 |
+
if tol is None and n_nonzero_coefs <= 0:
|
| 603 |
+
raise ValueError("The number of atoms must be positive")
|
| 604 |
+
if tol is None and n_nonzero_coefs > len(Gram):
|
| 605 |
+
raise ValueError(
|
| 606 |
+
"The number of atoms cannot be more than the number of features"
|
| 607 |
+
)
|
| 608 |
+
|
| 609 |
+
if return_path:
|
| 610 |
+
coef = np.zeros((len(Gram), Xy.shape[1], len(Gram)), dtype=Gram.dtype)
|
| 611 |
+
else:
|
| 612 |
+
coef = np.zeros((len(Gram), Xy.shape[1]), dtype=Gram.dtype)
|
| 613 |
+
|
| 614 |
+
n_iters = []
|
| 615 |
+
for k in range(Xy.shape[1]):
|
| 616 |
+
out = _gram_omp(
|
| 617 |
+
Gram,
|
| 618 |
+
Xy[:, k],
|
| 619 |
+
n_nonzero_coefs,
|
| 620 |
+
norms_squared[k] if tol is not None else None,
|
| 621 |
+
tol,
|
| 622 |
+
copy_Gram=copy_Gram,
|
| 623 |
+
copy_Xy=False,
|
| 624 |
+
return_path=return_path,
|
| 625 |
+
)
|
| 626 |
+
if return_path:
|
| 627 |
+
_, idx, coefs, n_iter = out
|
| 628 |
+
coef = coef[:, :, : len(idx)]
|
| 629 |
+
for n_active, x in enumerate(coefs.T):
|
| 630 |
+
coef[idx[: n_active + 1], k, n_active] = x[: n_active + 1]
|
| 631 |
+
else:
|
| 632 |
+
x, idx, n_iter = out
|
| 633 |
+
coef[idx, k] = x
|
| 634 |
+
n_iters.append(n_iter)
|
| 635 |
+
|
| 636 |
+
if Xy.shape[1] == 1:
|
| 637 |
+
n_iters = n_iters[0]
|
| 638 |
+
|
| 639 |
+
if return_n_iter:
|
| 640 |
+
return np.squeeze(coef), n_iters
|
| 641 |
+
else:
|
| 642 |
+
return np.squeeze(coef)
|
| 643 |
+
|
| 644 |
+
|
| 645 |
+
class OrthogonalMatchingPursuit(MultiOutputMixin, RegressorMixin, LinearModel):
|
| 646 |
+
"""Orthogonal Matching Pursuit model (OMP).
|
| 647 |
+
|
| 648 |
+
Read more in the :ref:`User Guide <omp>`.
|
| 649 |
+
|
| 650 |
+
Parameters
|
| 651 |
+
----------
|
| 652 |
+
n_nonzero_coefs : int, default=None
|
| 653 |
+
Desired number of non-zero entries in the solution. Ignored if `tol` is set.
|
| 654 |
+
When `None` and `tol` is also `None`, this value is either set to 10% of
|
| 655 |
+
`n_features` or 1, whichever is greater.
|
| 656 |
+
|
| 657 |
+
tol : float, default=None
|
| 658 |
+
Maximum squared norm of the residual. If not None, overrides n_nonzero_coefs.
|
| 659 |
+
|
| 660 |
+
fit_intercept : bool, default=True
|
| 661 |
+
Whether to calculate the intercept for this model. If set
|
| 662 |
+
to false, no intercept will be used in calculations
|
| 663 |
+
(i.e. data is expected to be centered).
|
| 664 |
+
|
| 665 |
+
precompute : 'auto' or bool, default='auto'
|
| 666 |
+
Whether to use a precomputed Gram and Xy matrix to speed up
|
| 667 |
+
calculations. Improves performance when :term:`n_targets` or
|
| 668 |
+
:term:`n_samples` is very large. Note that if you already have such
|
| 669 |
+
matrices, you can pass them directly to the fit method.
|
| 670 |
+
|
| 671 |
+
Attributes
|
| 672 |
+
----------
|
| 673 |
+
coef_ : ndarray of shape (n_features,) or (n_targets, n_features)
|
| 674 |
+
Parameter vector (w in the formula).
|
| 675 |
+
|
| 676 |
+
intercept_ : float or ndarray of shape (n_targets,)
|
| 677 |
+
Independent term in decision function.
|
| 678 |
+
|
| 679 |
+
n_iter_ : int or array-like
|
| 680 |
+
Number of active features across every target.
|
| 681 |
+
|
| 682 |
+
n_nonzero_coefs_ : int or None
|
| 683 |
+
The number of non-zero coefficients in the solution or `None` when `tol` is
|
| 684 |
+
set. If `n_nonzero_coefs` is None and `tol` is None this value is either set
|
| 685 |
+
to 10% of `n_features` or 1, whichever is greater.
|
| 686 |
+
|
| 687 |
+
n_features_in_ : int
|
| 688 |
+
Number of features seen during :term:`fit`.
|
| 689 |
+
|
| 690 |
+
.. versionadded:: 0.24
|
| 691 |
+
|
| 692 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
| 693 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
| 694 |
+
has feature names that are all strings.
|
| 695 |
+
|
| 696 |
+
.. versionadded:: 1.0
|
| 697 |
+
|
| 698 |
+
See Also
|
| 699 |
+
--------
|
| 700 |
+
orthogonal_mp : Solves n_targets Orthogonal Matching Pursuit problems.
|
| 701 |
+
orthogonal_mp_gram : Solves n_targets Orthogonal Matching Pursuit
|
| 702 |
+
problems using only the Gram matrix X.T * X and the product X.T * y.
|
| 703 |
+
lars_path : Compute Least Angle Regression or Lasso path using LARS algorithm.
|
| 704 |
+
Lars : Least Angle Regression model a.k.a. LAR.
|
| 705 |
+
LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars.
|
| 706 |
+
sklearn.decomposition.sparse_encode : Generic sparse coding.
|
| 707 |
+
Each column of the result is the solution to a Lasso problem.
|
| 708 |
+
OrthogonalMatchingPursuitCV : Cross-validated
|
| 709 |
+
Orthogonal Matching Pursuit model (OMP).
|
| 710 |
+
|
| 711 |
+
Notes
|
| 712 |
+
-----
|
| 713 |
+
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
|
| 714 |
+
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
|
| 715 |
+
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
|
| 716 |
+
(https://www.di.ens.fr/~mallat/papiers/MallatPursuit93.pdf)
|
| 717 |
+
|
| 718 |
+
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
|
| 719 |
+
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
|
| 720 |
+
Matching Pursuit Technical Report - CS Technion, April 2008.
|
| 721 |
+
https://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
|
| 722 |
+
|
| 723 |
+
Examples
|
| 724 |
+
--------
|
| 725 |
+
>>> from sklearn.linear_model import OrthogonalMatchingPursuit
|
| 726 |
+
>>> from sklearn.datasets import make_regression
|
| 727 |
+
>>> X, y = make_regression(noise=4, random_state=0)
|
| 728 |
+
>>> reg = OrthogonalMatchingPursuit().fit(X, y)
|
| 729 |
+
>>> reg.score(X, y)
|
| 730 |
+
0.9991...
|
| 731 |
+
>>> reg.predict(X[:1,])
|
| 732 |
+
array([-78.3854...])
|
| 733 |
+
"""
|
| 734 |
+
|
| 735 |
+
_parameter_constraints: dict = {
|
| 736 |
+
"n_nonzero_coefs": [Interval(Integral, 1, None, closed="left"), None],
|
| 737 |
+
"tol": [Interval(Real, 0, None, closed="left"), None],
|
| 738 |
+
"fit_intercept": ["boolean"],
|
| 739 |
+
"precompute": [StrOptions({"auto"}), "boolean"],
|
| 740 |
+
}
|
| 741 |
+
|
| 742 |
+
def __init__(
|
| 743 |
+
self,
|
| 744 |
+
*,
|
| 745 |
+
n_nonzero_coefs=None,
|
| 746 |
+
tol=None,
|
| 747 |
+
fit_intercept=True,
|
| 748 |
+
precompute="auto",
|
| 749 |
+
):
|
| 750 |
+
self.n_nonzero_coefs = n_nonzero_coefs
|
| 751 |
+
self.tol = tol
|
| 752 |
+
self.fit_intercept = fit_intercept
|
| 753 |
+
self.precompute = precompute
|
| 754 |
+
|
| 755 |
+
@_fit_context(prefer_skip_nested_validation=True)
|
| 756 |
+
def fit(self, X, y):
|
| 757 |
+
"""Fit the model using X, y as training data.
|
| 758 |
+
|
| 759 |
+
Parameters
|
| 760 |
+
----------
|
| 761 |
+
X : array-like of shape (n_samples, n_features)
|
| 762 |
+
Training data.
|
| 763 |
+
|
| 764 |
+
y : array-like of shape (n_samples,) or (n_samples, n_targets)
|
| 765 |
+
Target values. Will be cast to X's dtype if necessary.
|
| 766 |
+
|
| 767 |
+
Returns
|
| 768 |
+
-------
|
| 769 |
+
self : object
|
| 770 |
+
Returns an instance of self.
|
| 771 |
+
"""
|
| 772 |
+
X, y = validate_data(self, X, y, multi_output=True, y_numeric=True)
|
| 773 |
+
n_features = X.shape[1]
|
| 774 |
+
|
| 775 |
+
X, y, X_offset, y_offset, X_scale, Gram, Xy = _pre_fit(
|
| 776 |
+
X, y, None, self.precompute, self.fit_intercept, copy=True
|
| 777 |
+
)
|
| 778 |
+
|
| 779 |
+
if y.ndim == 1:
|
| 780 |
+
y = y[:, np.newaxis]
|
| 781 |
+
|
| 782 |
+
if self.n_nonzero_coefs is None and self.tol is None:
|
| 783 |
+
# default for n_nonzero_coefs is 0.1 * n_features
|
| 784 |
+
# but at least one.
|
| 785 |
+
self.n_nonzero_coefs_ = max(int(0.1 * n_features), 1)
|
| 786 |
+
elif self.tol is not None:
|
| 787 |
+
self.n_nonzero_coefs_ = None
|
| 788 |
+
else:
|
| 789 |
+
self.n_nonzero_coefs_ = self.n_nonzero_coefs
|
| 790 |
+
|
| 791 |
+
if Gram is False:
|
| 792 |
+
coef_, self.n_iter_ = orthogonal_mp(
|
| 793 |
+
X,
|
| 794 |
+
y,
|
| 795 |
+
n_nonzero_coefs=self.n_nonzero_coefs_,
|
| 796 |
+
tol=self.tol,
|
| 797 |
+
precompute=False,
|
| 798 |
+
copy_X=True,
|
| 799 |
+
return_n_iter=True,
|
| 800 |
+
)
|
| 801 |
+
else:
|
| 802 |
+
norms_sq = np.sum(y**2, axis=0) if self.tol is not None else None
|
| 803 |
+
|
| 804 |
+
coef_, self.n_iter_ = orthogonal_mp_gram(
|
| 805 |
+
Gram,
|
| 806 |
+
Xy=Xy,
|
| 807 |
+
n_nonzero_coefs=self.n_nonzero_coefs_,
|
| 808 |
+
tol=self.tol,
|
| 809 |
+
norms_squared=norms_sq,
|
| 810 |
+
copy_Gram=True,
|
| 811 |
+
copy_Xy=True,
|
| 812 |
+
return_n_iter=True,
|
| 813 |
+
)
|
| 814 |
+
self.coef_ = coef_.T
|
| 815 |
+
self._set_intercept(X_offset, y_offset, X_scale)
|
| 816 |
+
return self
|
| 817 |
+
|
| 818 |
+
|
| 819 |
+
def _omp_path_residues(
|
| 820 |
+
X_train,
|
| 821 |
+
y_train,
|
| 822 |
+
X_test,
|
| 823 |
+
y_test,
|
| 824 |
+
copy=True,
|
| 825 |
+
fit_intercept=True,
|
| 826 |
+
max_iter=100,
|
| 827 |
+
):
|
| 828 |
+
"""Compute the residues on left-out data for a full LARS path.
|
| 829 |
+
|
| 830 |
+
Parameters
|
| 831 |
+
----------
|
| 832 |
+
X_train : ndarray of shape (n_samples, n_features)
|
| 833 |
+
The data to fit the LARS on.
|
| 834 |
+
|
| 835 |
+
y_train : ndarray of shape (n_samples)
|
| 836 |
+
The target variable to fit LARS on.
|
| 837 |
+
|
| 838 |
+
X_test : ndarray of shape (n_samples, n_features)
|
| 839 |
+
The data to compute the residues on.
|
| 840 |
+
|
| 841 |
+
y_test : ndarray of shape (n_samples)
|
| 842 |
+
The target variable to compute the residues on.
|
| 843 |
+
|
| 844 |
+
copy : bool, default=True
|
| 845 |
+
Whether X_train, X_test, y_train and y_test should be copied. If
|
| 846 |
+
False, they may be overwritten.
|
| 847 |
+
|
| 848 |
+
fit_intercept : bool, default=True
|
| 849 |
+
Whether to calculate the intercept for this model. If set
|
| 850 |
+
to false, no intercept will be used in calculations
|
| 851 |
+
(i.e. data is expected to be centered).
|
| 852 |
+
|
| 853 |
+
max_iter : int, default=100
|
| 854 |
+
Maximum numbers of iterations to perform, therefore maximum features
|
| 855 |
+
to include. 100 by default.
|
| 856 |
+
|
| 857 |
+
Returns
|
| 858 |
+
-------
|
| 859 |
+
residues : ndarray of shape (n_samples, max_features)
|
| 860 |
+
Residues of the prediction on the test data.
|
| 861 |
+
"""
|
| 862 |
+
|
| 863 |
+
if copy:
|
| 864 |
+
X_train = X_train.copy()
|
| 865 |
+
y_train = y_train.copy()
|
| 866 |
+
X_test = X_test.copy()
|
| 867 |
+
y_test = y_test.copy()
|
| 868 |
+
|
| 869 |
+
if fit_intercept:
|
| 870 |
+
X_mean = X_train.mean(axis=0)
|
| 871 |
+
X_train -= X_mean
|
| 872 |
+
X_test -= X_mean
|
| 873 |
+
y_mean = y_train.mean(axis=0)
|
| 874 |
+
y_train = as_float_array(y_train, copy=False)
|
| 875 |
+
y_train -= y_mean
|
| 876 |
+
y_test = as_float_array(y_test, copy=False)
|
| 877 |
+
y_test -= y_mean
|
| 878 |
+
|
| 879 |
+
coefs = orthogonal_mp(
|
| 880 |
+
X_train,
|
| 881 |
+
y_train,
|
| 882 |
+
n_nonzero_coefs=max_iter,
|
| 883 |
+
tol=None,
|
| 884 |
+
precompute=False,
|
| 885 |
+
copy_X=False,
|
| 886 |
+
return_path=True,
|
| 887 |
+
)
|
| 888 |
+
if coefs.ndim == 1:
|
| 889 |
+
coefs = coefs[:, np.newaxis]
|
| 890 |
+
|
| 891 |
+
return np.dot(coefs.T, X_test.T) - y_test
|
| 892 |
+
|
| 893 |
+
|
| 894 |
+
class OrthogonalMatchingPursuitCV(RegressorMixin, LinearModel):
|
| 895 |
+
"""Cross-validated Orthogonal Matching Pursuit model (OMP).
|
| 896 |
+
|
| 897 |
+
See glossary entry for :term:`cross-validation estimator`.
|
| 898 |
+
|
| 899 |
+
Read more in the :ref:`User Guide <omp>`.
|
| 900 |
+
|
| 901 |
+
Parameters
|
| 902 |
+
----------
|
| 903 |
+
copy : bool, default=True
|
| 904 |
+
Whether the design matrix X must be copied by the algorithm. A false
|
| 905 |
+
value is only helpful if X is already Fortran-ordered, otherwise a
|
| 906 |
+
copy is made anyway.
|
| 907 |
+
|
| 908 |
+
fit_intercept : bool, default=True
|
| 909 |
+
Whether to calculate the intercept for this model. If set
|
| 910 |
+
to false, no intercept will be used in calculations
|
| 911 |
+
(i.e. data is expected to be centered).
|
| 912 |
+
|
| 913 |
+
max_iter : int, default=None
|
| 914 |
+
Maximum numbers of iterations to perform, therefore maximum features
|
| 915 |
+
to include. 10% of ``n_features`` but at least 5 if available.
|
| 916 |
+
|
| 917 |
+
cv : int, cross-validation generator or iterable, default=None
|
| 918 |
+
Determines the cross-validation splitting strategy.
|
| 919 |
+
Possible inputs for cv are:
|
| 920 |
+
|
| 921 |
+
- None, to use the default 5-fold cross-validation,
|
| 922 |
+
- integer, to specify the number of folds.
|
| 923 |
+
- :term:`CV splitter`,
|
| 924 |
+
- An iterable yielding (train, test) splits as arrays of indices.
|
| 925 |
+
|
| 926 |
+
For integer/None inputs, :class:`~sklearn.model_selection.KFold` is used.
|
| 927 |
+
|
| 928 |
+
Refer :ref:`User Guide <cross_validation>` for the various
|
| 929 |
+
cross-validation strategies that can be used here.
|
| 930 |
+
|
| 931 |
+
.. versionchanged:: 0.22
|
| 932 |
+
``cv`` default value if None changed from 3-fold to 5-fold.
|
| 933 |
+
|
| 934 |
+
n_jobs : int, default=None
|
| 935 |
+
Number of CPUs to use during the cross validation.
|
| 936 |
+
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
|
| 937 |
+
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
|
| 938 |
+
for more details.
|
| 939 |
+
|
| 940 |
+
verbose : bool or int, default=False
|
| 941 |
+
Sets the verbosity amount.
|
| 942 |
+
|
| 943 |
+
Attributes
|
| 944 |
+
----------
|
| 945 |
+
intercept_ : float or ndarray of shape (n_targets,)
|
| 946 |
+
Independent term in decision function.
|
| 947 |
+
|
| 948 |
+
coef_ : ndarray of shape (n_features,) or (n_targets, n_features)
|
| 949 |
+
Parameter vector (w in the problem formulation).
|
| 950 |
+
|
| 951 |
+
n_nonzero_coefs_ : int
|
| 952 |
+
Estimated number of non-zero coefficients giving the best mean squared
|
| 953 |
+
error over the cross-validation folds.
|
| 954 |
+
|
| 955 |
+
n_iter_ : int or array-like
|
| 956 |
+
Number of active features across every target for the model refit with
|
| 957 |
+
the best hyperparameters got by cross-validating across all folds.
|
| 958 |
+
|
| 959 |
+
n_features_in_ : int
|
| 960 |
+
Number of features seen during :term:`fit`.
|
| 961 |
+
|
| 962 |
+
.. versionadded:: 0.24
|
| 963 |
+
|
| 964 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
| 965 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
| 966 |
+
has feature names that are all strings.
|
| 967 |
+
|
| 968 |
+
.. versionadded:: 1.0
|
| 969 |
+
|
| 970 |
+
See Also
|
| 971 |
+
--------
|
| 972 |
+
orthogonal_mp : Solves n_targets Orthogonal Matching Pursuit problems.
|
| 973 |
+
orthogonal_mp_gram : Solves n_targets Orthogonal Matching Pursuit
|
| 974 |
+
problems using only the Gram matrix X.T * X and the product X.T * y.
|
| 975 |
+
lars_path : Compute Least Angle Regression or Lasso path using LARS algorithm.
|
| 976 |
+
Lars : Least Angle Regression model a.k.a. LAR.
|
| 977 |
+
LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars.
|
| 978 |
+
OrthogonalMatchingPursuit : Orthogonal Matching Pursuit model (OMP).
|
| 979 |
+
LarsCV : Cross-validated Least Angle Regression model.
|
| 980 |
+
LassoLarsCV : Cross-validated Lasso model fit with Least Angle Regression.
|
| 981 |
+
sklearn.decomposition.sparse_encode : Generic sparse coding.
|
| 982 |
+
Each column of the result is the solution to a Lasso problem.
|
| 983 |
+
|
| 984 |
+
Notes
|
| 985 |
+
-----
|
| 986 |
+
In `fit`, once the optimal number of non-zero coefficients is found through
|
| 987 |
+
cross-validation, the model is fit again using the entire training set.
|
| 988 |
+
|
| 989 |
+
Examples
|
| 990 |
+
--------
|
| 991 |
+
>>> from sklearn.linear_model import OrthogonalMatchingPursuitCV
|
| 992 |
+
>>> from sklearn.datasets import make_regression
|
| 993 |
+
>>> X, y = make_regression(n_features=100, n_informative=10,
|
| 994 |
+
... noise=4, random_state=0)
|
| 995 |
+
>>> reg = OrthogonalMatchingPursuitCV(cv=5).fit(X, y)
|
| 996 |
+
>>> reg.score(X, y)
|
| 997 |
+
0.9991...
|
| 998 |
+
>>> reg.n_nonzero_coefs_
|
| 999 |
+
np.int64(10)
|
| 1000 |
+
>>> reg.predict(X[:1,])
|
| 1001 |
+
array([-78.3854...])
|
| 1002 |
+
"""
|
| 1003 |
+
|
| 1004 |
+
_parameter_constraints: dict = {
|
| 1005 |
+
"copy": ["boolean"],
|
| 1006 |
+
"fit_intercept": ["boolean"],
|
| 1007 |
+
"max_iter": [Interval(Integral, 0, None, closed="left"), None],
|
| 1008 |
+
"cv": ["cv_object"],
|
| 1009 |
+
"n_jobs": [Integral, None],
|
| 1010 |
+
"verbose": ["verbose"],
|
| 1011 |
+
}
|
| 1012 |
+
|
| 1013 |
+
def __init__(
|
| 1014 |
+
self,
|
| 1015 |
+
*,
|
| 1016 |
+
copy=True,
|
| 1017 |
+
fit_intercept=True,
|
| 1018 |
+
max_iter=None,
|
| 1019 |
+
cv=None,
|
| 1020 |
+
n_jobs=None,
|
| 1021 |
+
verbose=False,
|
| 1022 |
+
):
|
| 1023 |
+
self.copy = copy
|
| 1024 |
+
self.fit_intercept = fit_intercept
|
| 1025 |
+
self.max_iter = max_iter
|
| 1026 |
+
self.cv = cv
|
| 1027 |
+
self.n_jobs = n_jobs
|
| 1028 |
+
self.verbose = verbose
|
| 1029 |
+
|
| 1030 |
+
@_fit_context(prefer_skip_nested_validation=True)
|
| 1031 |
+
def fit(self, X, y, **fit_params):
|
| 1032 |
+
"""Fit the model using X, y as training data.
|
| 1033 |
+
|
| 1034 |
+
Parameters
|
| 1035 |
+
----------
|
| 1036 |
+
X : array-like of shape (n_samples, n_features)
|
| 1037 |
+
Training data.
|
| 1038 |
+
|
| 1039 |
+
y : array-like of shape (n_samples,)
|
| 1040 |
+
Target values. Will be cast to X's dtype if necessary.
|
| 1041 |
+
|
| 1042 |
+
**fit_params : dict
|
| 1043 |
+
Parameters to pass to the underlying splitter.
|
| 1044 |
+
|
| 1045 |
+
.. versionadded:: 1.4
|
| 1046 |
+
Only available if `enable_metadata_routing=True`,
|
| 1047 |
+
which can be set by using
|
| 1048 |
+
``sklearn.set_config(enable_metadata_routing=True)``.
|
| 1049 |
+
See :ref:`Metadata Routing User Guide <metadata_routing>` for
|
| 1050 |
+
more details.
|
| 1051 |
+
|
| 1052 |
+
Returns
|
| 1053 |
+
-------
|
| 1054 |
+
self : object
|
| 1055 |
+
Returns an instance of self.
|
| 1056 |
+
"""
|
| 1057 |
+
_raise_for_params(fit_params, self, "fit")
|
| 1058 |
+
|
| 1059 |
+
X, y = validate_data(self, X, y, y_numeric=True, ensure_min_features=2)
|
| 1060 |
+
X = as_float_array(X, copy=False, ensure_all_finite=False)
|
| 1061 |
+
cv = check_cv(self.cv, classifier=False)
|
| 1062 |
+
if _routing_enabled():
|
| 1063 |
+
routed_params = process_routing(self, "fit", **fit_params)
|
| 1064 |
+
else:
|
| 1065 |
+
# TODO(SLEP6): remove when metadata routing cannot be disabled.
|
| 1066 |
+
routed_params = Bunch()
|
| 1067 |
+
routed_params.splitter = Bunch(split={})
|
| 1068 |
+
max_iter = (
|
| 1069 |
+
min(max(int(0.1 * X.shape[1]), 5), X.shape[1])
|
| 1070 |
+
if not self.max_iter
|
| 1071 |
+
else self.max_iter
|
| 1072 |
+
)
|
| 1073 |
+
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
|
| 1074 |
+
delayed(_omp_path_residues)(
|
| 1075 |
+
X[train],
|
| 1076 |
+
y[train],
|
| 1077 |
+
X[test],
|
| 1078 |
+
y[test],
|
| 1079 |
+
self.copy,
|
| 1080 |
+
self.fit_intercept,
|
| 1081 |
+
max_iter,
|
| 1082 |
+
)
|
| 1083 |
+
for train, test in cv.split(X, **routed_params.splitter.split)
|
| 1084 |
+
)
|
| 1085 |
+
|
| 1086 |
+
min_early_stop = min(fold.shape[0] for fold in cv_paths)
|
| 1087 |
+
mse_folds = np.array(
|
| 1088 |
+
[(fold[:min_early_stop] ** 2).mean(axis=1) for fold in cv_paths]
|
| 1089 |
+
)
|
| 1090 |
+
best_n_nonzero_coefs = np.argmin(mse_folds.mean(axis=0)) + 1
|
| 1091 |
+
self.n_nonzero_coefs_ = best_n_nonzero_coefs
|
| 1092 |
+
omp = OrthogonalMatchingPursuit(
|
| 1093 |
+
n_nonzero_coefs=best_n_nonzero_coefs,
|
| 1094 |
+
fit_intercept=self.fit_intercept,
|
| 1095 |
+
).fit(X, y)
|
| 1096 |
+
|
| 1097 |
+
self.coef_ = omp.coef_
|
| 1098 |
+
self.intercept_ = omp.intercept_
|
| 1099 |
+
self.n_iter_ = omp.n_iter_
|
| 1100 |
+
return self
|
| 1101 |
+
|
| 1102 |
+
def get_metadata_routing(self):
|
| 1103 |
+
"""Get metadata routing of this object.
|
| 1104 |
+
|
| 1105 |
+
Please check :ref:`User Guide <metadata_routing>` on how the routing
|
| 1106 |
+
mechanism works.
|
| 1107 |
+
|
| 1108 |
+
.. versionadded:: 1.4
|
| 1109 |
+
|
| 1110 |
+
Returns
|
| 1111 |
+
-------
|
| 1112 |
+
routing : MetadataRouter
|
| 1113 |
+
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
|
| 1114 |
+
routing information.
|
| 1115 |
+
"""
|
| 1116 |
+
|
| 1117 |
+
router = MetadataRouter(owner=self.__class__.__name__).add(
|
| 1118 |
+
splitter=self.cv,
|
| 1119 |
+
method_mapping=MethodMapping().add(caller="fit", callee="split"),
|
| 1120 |
+
)
|
| 1121 |
+
return router
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_passive_aggressive.py
ADDED
|
@@ -0,0 +1,573 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Authors: The scikit-learn developers
|
| 2 |
+
# SPDX-License-Identifier: BSD-3-Clause
|
| 3 |
+
|
| 4 |
+
from numbers import Real
|
| 5 |
+
|
| 6 |
+
from ..base import _fit_context
|
| 7 |
+
from ..utils._param_validation import Interval, StrOptions
|
| 8 |
+
from ._stochastic_gradient import DEFAULT_EPSILON, BaseSGDClassifier, BaseSGDRegressor
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class PassiveAggressiveClassifier(BaseSGDClassifier):
|
| 12 |
+
"""Passive Aggressive Classifier.
|
| 13 |
+
|
| 14 |
+
Read more in the :ref:`User Guide <passive_aggressive>`.
|
| 15 |
+
|
| 16 |
+
Parameters
|
| 17 |
+
----------
|
| 18 |
+
C : float, default=1.0
|
| 19 |
+
Maximum step size (regularization). Defaults to 1.0.
|
| 20 |
+
|
| 21 |
+
fit_intercept : bool, default=True
|
| 22 |
+
Whether the intercept should be estimated or not. If False, the
|
| 23 |
+
data is assumed to be already centered.
|
| 24 |
+
|
| 25 |
+
max_iter : int, default=1000
|
| 26 |
+
The maximum number of passes over the training data (aka epochs).
|
| 27 |
+
It only impacts the behavior in the ``fit`` method, and not the
|
| 28 |
+
:meth:`~sklearn.linear_model.PassiveAggressiveClassifier.partial_fit` method.
|
| 29 |
+
|
| 30 |
+
.. versionadded:: 0.19
|
| 31 |
+
|
| 32 |
+
tol : float or None, default=1e-3
|
| 33 |
+
The stopping criterion. If it is not None, the iterations will stop
|
| 34 |
+
when (loss > previous_loss - tol).
|
| 35 |
+
|
| 36 |
+
.. versionadded:: 0.19
|
| 37 |
+
|
| 38 |
+
early_stopping : bool, default=False
|
| 39 |
+
Whether to use early stopping to terminate training when validation
|
| 40 |
+
score is not improving. If set to True, it will automatically set aside
|
| 41 |
+
a stratified fraction of training data as validation and terminate
|
| 42 |
+
training when validation score is not improving by at least `tol` for
|
| 43 |
+
`n_iter_no_change` consecutive epochs.
|
| 44 |
+
|
| 45 |
+
.. versionadded:: 0.20
|
| 46 |
+
|
| 47 |
+
validation_fraction : float, default=0.1
|
| 48 |
+
The proportion of training data to set aside as validation set for
|
| 49 |
+
early stopping. Must be between 0 and 1.
|
| 50 |
+
Only used if early_stopping is True.
|
| 51 |
+
|
| 52 |
+
.. versionadded:: 0.20
|
| 53 |
+
|
| 54 |
+
n_iter_no_change : int, default=5
|
| 55 |
+
Number of iterations with no improvement to wait before early stopping.
|
| 56 |
+
|
| 57 |
+
.. versionadded:: 0.20
|
| 58 |
+
|
| 59 |
+
shuffle : bool, default=True
|
| 60 |
+
Whether or not the training data should be shuffled after each epoch.
|
| 61 |
+
|
| 62 |
+
verbose : int, default=0
|
| 63 |
+
The verbosity level.
|
| 64 |
+
|
| 65 |
+
loss : str, default="hinge"
|
| 66 |
+
The loss function to be used:
|
| 67 |
+
hinge: equivalent to PA-I in the reference paper.
|
| 68 |
+
squared_hinge: equivalent to PA-II in the reference paper.
|
| 69 |
+
|
| 70 |
+
n_jobs : int or None, default=None
|
| 71 |
+
The number of CPUs to use to do the OVA (One Versus All, for
|
| 72 |
+
multi-class problems) computation.
|
| 73 |
+
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
|
| 74 |
+
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
|
| 75 |
+
for more details.
|
| 76 |
+
|
| 77 |
+
random_state : int, RandomState instance, default=None
|
| 78 |
+
Used to shuffle the training data, when ``shuffle`` is set to
|
| 79 |
+
``True``. Pass an int for reproducible output across multiple
|
| 80 |
+
function calls.
|
| 81 |
+
See :term:`Glossary <random_state>`.
|
| 82 |
+
|
| 83 |
+
warm_start : bool, default=False
|
| 84 |
+
When set to True, reuse the solution of the previous call to fit as
|
| 85 |
+
initialization, otherwise, just erase the previous solution.
|
| 86 |
+
See :term:`the Glossary <warm_start>`.
|
| 87 |
+
|
| 88 |
+
Repeatedly calling fit or partial_fit when warm_start is True can
|
| 89 |
+
result in a different solution than when calling fit a single time
|
| 90 |
+
because of the way the data is shuffled.
|
| 91 |
+
|
| 92 |
+
class_weight : dict, {class_label: weight} or "balanced" or None, \
|
| 93 |
+
default=None
|
| 94 |
+
Preset for the class_weight fit parameter.
|
| 95 |
+
|
| 96 |
+
Weights associated with classes. If not given, all classes
|
| 97 |
+
are supposed to have weight one.
|
| 98 |
+
|
| 99 |
+
The "balanced" mode uses the values of y to automatically adjust
|
| 100 |
+
weights inversely proportional to class frequencies in the input data
|
| 101 |
+
as ``n_samples / (n_classes * np.bincount(y))``.
|
| 102 |
+
|
| 103 |
+
.. versionadded:: 0.17
|
| 104 |
+
parameter *class_weight* to automatically weight samples.
|
| 105 |
+
|
| 106 |
+
average : bool or int, default=False
|
| 107 |
+
When set to True, computes the averaged SGD weights and stores the
|
| 108 |
+
result in the ``coef_`` attribute. If set to an int greater than 1,
|
| 109 |
+
averaging will begin once the total number of samples seen reaches
|
| 110 |
+
average. So average=10 will begin averaging after seeing 10 samples.
|
| 111 |
+
|
| 112 |
+
.. versionadded:: 0.19
|
| 113 |
+
parameter *average* to use weights averaging in SGD.
|
| 114 |
+
|
| 115 |
+
Attributes
|
| 116 |
+
----------
|
| 117 |
+
coef_ : ndarray of shape (1, n_features) if n_classes == 2 else \
|
| 118 |
+
(n_classes, n_features)
|
| 119 |
+
Weights assigned to the features.
|
| 120 |
+
|
| 121 |
+
intercept_ : ndarray of shape (1,) if n_classes == 2 else (n_classes,)
|
| 122 |
+
Constants in decision function.
|
| 123 |
+
|
| 124 |
+
n_features_in_ : int
|
| 125 |
+
Number of features seen during :term:`fit`.
|
| 126 |
+
|
| 127 |
+
.. versionadded:: 0.24
|
| 128 |
+
|
| 129 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
| 130 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
| 131 |
+
has feature names that are all strings.
|
| 132 |
+
|
| 133 |
+
.. versionadded:: 1.0
|
| 134 |
+
|
| 135 |
+
n_iter_ : int
|
| 136 |
+
The actual number of iterations to reach the stopping criterion.
|
| 137 |
+
For multiclass fits, it is the maximum over every binary fit.
|
| 138 |
+
|
| 139 |
+
classes_ : ndarray of shape (n_classes,)
|
| 140 |
+
The unique classes labels.
|
| 141 |
+
|
| 142 |
+
t_ : int
|
| 143 |
+
Number of weight updates performed during training.
|
| 144 |
+
Same as ``(n_iter_ * n_samples + 1)``.
|
| 145 |
+
|
| 146 |
+
See Also
|
| 147 |
+
--------
|
| 148 |
+
SGDClassifier : Incrementally trained logistic regression.
|
| 149 |
+
Perceptron : Linear perceptron classifier.
|
| 150 |
+
|
| 151 |
+
References
|
| 152 |
+
----------
|
| 153 |
+
Online Passive-Aggressive Algorithms
|
| 154 |
+
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
|
| 155 |
+
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
|
| 156 |
+
|
| 157 |
+
Examples
|
| 158 |
+
--------
|
| 159 |
+
>>> from sklearn.linear_model import PassiveAggressiveClassifier
|
| 160 |
+
>>> from sklearn.datasets import make_classification
|
| 161 |
+
>>> X, y = make_classification(n_features=4, random_state=0)
|
| 162 |
+
>>> clf = PassiveAggressiveClassifier(max_iter=1000, random_state=0,
|
| 163 |
+
... tol=1e-3)
|
| 164 |
+
>>> clf.fit(X, y)
|
| 165 |
+
PassiveAggressiveClassifier(random_state=0)
|
| 166 |
+
>>> print(clf.coef_)
|
| 167 |
+
[[0.26642044 0.45070924 0.67251877 0.64185414]]
|
| 168 |
+
>>> print(clf.intercept_)
|
| 169 |
+
[1.84127814]
|
| 170 |
+
>>> print(clf.predict([[0, 0, 0, 0]]))
|
| 171 |
+
[1]
|
| 172 |
+
"""
|
| 173 |
+
|
| 174 |
+
_parameter_constraints: dict = {
|
| 175 |
+
**BaseSGDClassifier._parameter_constraints,
|
| 176 |
+
"loss": [StrOptions({"hinge", "squared_hinge"})],
|
| 177 |
+
"C": [Interval(Real, 0, None, closed="right")],
|
| 178 |
+
}
|
| 179 |
+
|
| 180 |
+
def __init__(
|
| 181 |
+
self,
|
| 182 |
+
*,
|
| 183 |
+
C=1.0,
|
| 184 |
+
fit_intercept=True,
|
| 185 |
+
max_iter=1000,
|
| 186 |
+
tol=1e-3,
|
| 187 |
+
early_stopping=False,
|
| 188 |
+
validation_fraction=0.1,
|
| 189 |
+
n_iter_no_change=5,
|
| 190 |
+
shuffle=True,
|
| 191 |
+
verbose=0,
|
| 192 |
+
loss="hinge",
|
| 193 |
+
n_jobs=None,
|
| 194 |
+
random_state=None,
|
| 195 |
+
warm_start=False,
|
| 196 |
+
class_weight=None,
|
| 197 |
+
average=False,
|
| 198 |
+
):
|
| 199 |
+
super().__init__(
|
| 200 |
+
penalty=None,
|
| 201 |
+
fit_intercept=fit_intercept,
|
| 202 |
+
max_iter=max_iter,
|
| 203 |
+
tol=tol,
|
| 204 |
+
early_stopping=early_stopping,
|
| 205 |
+
validation_fraction=validation_fraction,
|
| 206 |
+
n_iter_no_change=n_iter_no_change,
|
| 207 |
+
shuffle=shuffle,
|
| 208 |
+
verbose=verbose,
|
| 209 |
+
random_state=random_state,
|
| 210 |
+
eta0=1.0,
|
| 211 |
+
warm_start=warm_start,
|
| 212 |
+
class_weight=class_weight,
|
| 213 |
+
average=average,
|
| 214 |
+
n_jobs=n_jobs,
|
| 215 |
+
)
|
| 216 |
+
|
| 217 |
+
self.C = C
|
| 218 |
+
self.loss = loss
|
| 219 |
+
|
| 220 |
+
@_fit_context(prefer_skip_nested_validation=True)
|
| 221 |
+
def partial_fit(self, X, y, classes=None):
|
| 222 |
+
"""Fit linear model with Passive Aggressive algorithm.
|
| 223 |
+
|
| 224 |
+
Parameters
|
| 225 |
+
----------
|
| 226 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 227 |
+
Subset of the training data.
|
| 228 |
+
|
| 229 |
+
y : array-like of shape (n_samples,)
|
| 230 |
+
Subset of the target values.
|
| 231 |
+
|
| 232 |
+
classes : ndarray of shape (n_classes,)
|
| 233 |
+
Classes across all calls to partial_fit.
|
| 234 |
+
Can be obtained by via `np.unique(y_all)`, where y_all is the
|
| 235 |
+
target vector of the entire dataset.
|
| 236 |
+
This argument is required for the first call to partial_fit
|
| 237 |
+
and can be omitted in the subsequent calls.
|
| 238 |
+
Note that y doesn't need to contain all labels in `classes`.
|
| 239 |
+
|
| 240 |
+
Returns
|
| 241 |
+
-------
|
| 242 |
+
self : object
|
| 243 |
+
Fitted estimator.
|
| 244 |
+
"""
|
| 245 |
+
if not hasattr(self, "classes_"):
|
| 246 |
+
self._more_validate_params(for_partial_fit=True)
|
| 247 |
+
|
| 248 |
+
if self.class_weight == "balanced":
|
| 249 |
+
raise ValueError(
|
| 250 |
+
"class_weight 'balanced' is not supported for "
|
| 251 |
+
"partial_fit. For 'balanced' weights, use "
|
| 252 |
+
"`sklearn.utils.compute_class_weight` with "
|
| 253 |
+
"`class_weight='balanced'`. In place of y you "
|
| 254 |
+
"can use a large enough subset of the full "
|
| 255 |
+
"training set target to properly estimate the "
|
| 256 |
+
"class frequency distributions. Pass the "
|
| 257 |
+
"resulting weights as the class_weight "
|
| 258 |
+
"parameter."
|
| 259 |
+
)
|
| 260 |
+
|
| 261 |
+
lr = "pa1" if self.loss == "hinge" else "pa2"
|
| 262 |
+
return self._partial_fit(
|
| 263 |
+
X,
|
| 264 |
+
y,
|
| 265 |
+
alpha=1.0,
|
| 266 |
+
C=self.C,
|
| 267 |
+
loss="hinge",
|
| 268 |
+
learning_rate=lr,
|
| 269 |
+
max_iter=1,
|
| 270 |
+
classes=classes,
|
| 271 |
+
sample_weight=None,
|
| 272 |
+
coef_init=None,
|
| 273 |
+
intercept_init=None,
|
| 274 |
+
)
|
| 275 |
+
|
| 276 |
+
@_fit_context(prefer_skip_nested_validation=True)
|
| 277 |
+
def fit(self, X, y, coef_init=None, intercept_init=None):
|
| 278 |
+
"""Fit linear model with Passive Aggressive algorithm.
|
| 279 |
+
|
| 280 |
+
Parameters
|
| 281 |
+
----------
|
| 282 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 283 |
+
Training data.
|
| 284 |
+
|
| 285 |
+
y : array-like of shape (n_samples,)
|
| 286 |
+
Target values.
|
| 287 |
+
|
| 288 |
+
coef_init : ndarray of shape (n_classes, n_features)
|
| 289 |
+
The initial coefficients to warm-start the optimization.
|
| 290 |
+
|
| 291 |
+
intercept_init : ndarray of shape (n_classes,)
|
| 292 |
+
The initial intercept to warm-start the optimization.
|
| 293 |
+
|
| 294 |
+
Returns
|
| 295 |
+
-------
|
| 296 |
+
self : object
|
| 297 |
+
Fitted estimator.
|
| 298 |
+
"""
|
| 299 |
+
self._more_validate_params()
|
| 300 |
+
|
| 301 |
+
lr = "pa1" if self.loss == "hinge" else "pa2"
|
| 302 |
+
return self._fit(
|
| 303 |
+
X,
|
| 304 |
+
y,
|
| 305 |
+
alpha=1.0,
|
| 306 |
+
C=self.C,
|
| 307 |
+
loss="hinge",
|
| 308 |
+
learning_rate=lr,
|
| 309 |
+
coef_init=coef_init,
|
| 310 |
+
intercept_init=intercept_init,
|
| 311 |
+
)
|
| 312 |
+
|
| 313 |
+
|
| 314 |
+
class PassiveAggressiveRegressor(BaseSGDRegressor):
|
| 315 |
+
"""Passive Aggressive Regressor.
|
| 316 |
+
|
| 317 |
+
Read more in the :ref:`User Guide <passive_aggressive>`.
|
| 318 |
+
|
| 319 |
+
Parameters
|
| 320 |
+
----------
|
| 321 |
+
|
| 322 |
+
C : float, default=1.0
|
| 323 |
+
Maximum step size (regularization). Defaults to 1.0.
|
| 324 |
+
|
| 325 |
+
fit_intercept : bool, default=True
|
| 326 |
+
Whether the intercept should be estimated or not. If False, the
|
| 327 |
+
data is assumed to be already centered. Defaults to True.
|
| 328 |
+
|
| 329 |
+
max_iter : int, default=1000
|
| 330 |
+
The maximum number of passes over the training data (aka epochs).
|
| 331 |
+
It only impacts the behavior in the ``fit`` method, and not the
|
| 332 |
+
:meth:`~sklearn.linear_model.PassiveAggressiveRegressor.partial_fit` method.
|
| 333 |
+
|
| 334 |
+
.. versionadded:: 0.19
|
| 335 |
+
|
| 336 |
+
tol : float or None, default=1e-3
|
| 337 |
+
The stopping criterion. If it is not None, the iterations will stop
|
| 338 |
+
when (loss > previous_loss - tol).
|
| 339 |
+
|
| 340 |
+
.. versionadded:: 0.19
|
| 341 |
+
|
| 342 |
+
early_stopping : bool, default=False
|
| 343 |
+
Whether to use early stopping to terminate training when validation.
|
| 344 |
+
score is not improving. If set to True, it will automatically set aside
|
| 345 |
+
a fraction of training data as validation and terminate
|
| 346 |
+
training when validation score is not improving by at least tol for
|
| 347 |
+
n_iter_no_change consecutive epochs.
|
| 348 |
+
|
| 349 |
+
.. versionadded:: 0.20
|
| 350 |
+
|
| 351 |
+
validation_fraction : float, default=0.1
|
| 352 |
+
The proportion of training data to set aside as validation set for
|
| 353 |
+
early stopping. Must be between 0 and 1.
|
| 354 |
+
Only used if early_stopping is True.
|
| 355 |
+
|
| 356 |
+
.. versionadded:: 0.20
|
| 357 |
+
|
| 358 |
+
n_iter_no_change : int, default=5
|
| 359 |
+
Number of iterations with no improvement to wait before early stopping.
|
| 360 |
+
|
| 361 |
+
.. versionadded:: 0.20
|
| 362 |
+
|
| 363 |
+
shuffle : bool, default=True
|
| 364 |
+
Whether or not the training data should be shuffled after each epoch.
|
| 365 |
+
|
| 366 |
+
verbose : int, default=0
|
| 367 |
+
The verbosity level.
|
| 368 |
+
|
| 369 |
+
loss : str, default="epsilon_insensitive"
|
| 370 |
+
The loss function to be used:
|
| 371 |
+
epsilon_insensitive: equivalent to PA-I in the reference paper.
|
| 372 |
+
squared_epsilon_insensitive: equivalent to PA-II in the reference
|
| 373 |
+
paper.
|
| 374 |
+
|
| 375 |
+
epsilon : float, default=0.1
|
| 376 |
+
If the difference between the current prediction and the correct label
|
| 377 |
+
is below this threshold, the model is not updated.
|
| 378 |
+
|
| 379 |
+
random_state : int, RandomState instance, default=None
|
| 380 |
+
Used to shuffle the training data, when ``shuffle`` is set to
|
| 381 |
+
``True``. Pass an int for reproducible output across multiple
|
| 382 |
+
function calls.
|
| 383 |
+
See :term:`Glossary <random_state>`.
|
| 384 |
+
|
| 385 |
+
warm_start : bool, default=False
|
| 386 |
+
When set to True, reuse the solution of the previous call to fit as
|
| 387 |
+
initialization, otherwise, just erase the previous solution.
|
| 388 |
+
See :term:`the Glossary <warm_start>`.
|
| 389 |
+
|
| 390 |
+
Repeatedly calling fit or partial_fit when warm_start is True can
|
| 391 |
+
result in a different solution than when calling fit a single time
|
| 392 |
+
because of the way the data is shuffled.
|
| 393 |
+
|
| 394 |
+
average : bool or int, default=False
|
| 395 |
+
When set to True, computes the averaged SGD weights and stores the
|
| 396 |
+
result in the ``coef_`` attribute. If set to an int greater than 1,
|
| 397 |
+
averaging will begin once the total number of samples seen reaches
|
| 398 |
+
average. So average=10 will begin averaging after seeing 10 samples.
|
| 399 |
+
|
| 400 |
+
.. versionadded:: 0.19
|
| 401 |
+
parameter *average* to use weights averaging in SGD.
|
| 402 |
+
|
| 403 |
+
Attributes
|
| 404 |
+
----------
|
| 405 |
+
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
|
| 406 |
+
n_features]
|
| 407 |
+
Weights assigned to the features.
|
| 408 |
+
|
| 409 |
+
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
|
| 410 |
+
Constants in decision function.
|
| 411 |
+
|
| 412 |
+
n_features_in_ : int
|
| 413 |
+
Number of features seen during :term:`fit`.
|
| 414 |
+
|
| 415 |
+
.. versionadded:: 0.24
|
| 416 |
+
|
| 417 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
| 418 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
| 419 |
+
has feature names that are all strings.
|
| 420 |
+
|
| 421 |
+
.. versionadded:: 1.0
|
| 422 |
+
|
| 423 |
+
n_iter_ : int
|
| 424 |
+
The actual number of iterations to reach the stopping criterion.
|
| 425 |
+
|
| 426 |
+
t_ : int
|
| 427 |
+
Number of weight updates performed during training.
|
| 428 |
+
Same as ``(n_iter_ * n_samples + 1)``.
|
| 429 |
+
|
| 430 |
+
See Also
|
| 431 |
+
--------
|
| 432 |
+
SGDRegressor : Linear model fitted by minimizing a regularized
|
| 433 |
+
empirical loss with SGD.
|
| 434 |
+
|
| 435 |
+
References
|
| 436 |
+
----------
|
| 437 |
+
Online Passive-Aggressive Algorithms
|
| 438 |
+
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
|
| 439 |
+
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006).
|
| 440 |
+
|
| 441 |
+
Examples
|
| 442 |
+
--------
|
| 443 |
+
>>> from sklearn.linear_model import PassiveAggressiveRegressor
|
| 444 |
+
>>> from sklearn.datasets import make_regression
|
| 445 |
+
|
| 446 |
+
>>> X, y = make_regression(n_features=4, random_state=0)
|
| 447 |
+
>>> regr = PassiveAggressiveRegressor(max_iter=100, random_state=0,
|
| 448 |
+
... tol=1e-3)
|
| 449 |
+
>>> regr.fit(X, y)
|
| 450 |
+
PassiveAggressiveRegressor(max_iter=100, random_state=0)
|
| 451 |
+
>>> print(regr.coef_)
|
| 452 |
+
[20.48736655 34.18818427 67.59122734 87.94731329]
|
| 453 |
+
>>> print(regr.intercept_)
|
| 454 |
+
[-0.02306214]
|
| 455 |
+
>>> print(regr.predict([[0, 0, 0, 0]]))
|
| 456 |
+
[-0.02306214]
|
| 457 |
+
"""
|
| 458 |
+
|
| 459 |
+
_parameter_constraints: dict = {
|
| 460 |
+
**BaseSGDRegressor._parameter_constraints,
|
| 461 |
+
"loss": [StrOptions({"epsilon_insensitive", "squared_epsilon_insensitive"})],
|
| 462 |
+
"C": [Interval(Real, 0, None, closed="right")],
|
| 463 |
+
"epsilon": [Interval(Real, 0, None, closed="left")],
|
| 464 |
+
}
|
| 465 |
+
|
| 466 |
+
def __init__(
|
| 467 |
+
self,
|
| 468 |
+
*,
|
| 469 |
+
C=1.0,
|
| 470 |
+
fit_intercept=True,
|
| 471 |
+
max_iter=1000,
|
| 472 |
+
tol=1e-3,
|
| 473 |
+
early_stopping=False,
|
| 474 |
+
validation_fraction=0.1,
|
| 475 |
+
n_iter_no_change=5,
|
| 476 |
+
shuffle=True,
|
| 477 |
+
verbose=0,
|
| 478 |
+
loss="epsilon_insensitive",
|
| 479 |
+
epsilon=DEFAULT_EPSILON,
|
| 480 |
+
random_state=None,
|
| 481 |
+
warm_start=False,
|
| 482 |
+
average=False,
|
| 483 |
+
):
|
| 484 |
+
super().__init__(
|
| 485 |
+
penalty=None,
|
| 486 |
+
l1_ratio=0,
|
| 487 |
+
epsilon=epsilon,
|
| 488 |
+
eta0=1.0,
|
| 489 |
+
fit_intercept=fit_intercept,
|
| 490 |
+
max_iter=max_iter,
|
| 491 |
+
tol=tol,
|
| 492 |
+
early_stopping=early_stopping,
|
| 493 |
+
validation_fraction=validation_fraction,
|
| 494 |
+
n_iter_no_change=n_iter_no_change,
|
| 495 |
+
shuffle=shuffle,
|
| 496 |
+
verbose=verbose,
|
| 497 |
+
random_state=random_state,
|
| 498 |
+
warm_start=warm_start,
|
| 499 |
+
average=average,
|
| 500 |
+
)
|
| 501 |
+
self.C = C
|
| 502 |
+
self.loss = loss
|
| 503 |
+
|
| 504 |
+
@_fit_context(prefer_skip_nested_validation=True)
|
| 505 |
+
def partial_fit(self, X, y):
|
| 506 |
+
"""Fit linear model with Passive Aggressive algorithm.
|
| 507 |
+
|
| 508 |
+
Parameters
|
| 509 |
+
----------
|
| 510 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 511 |
+
Subset of training data.
|
| 512 |
+
|
| 513 |
+
y : numpy array of shape [n_samples]
|
| 514 |
+
Subset of target values.
|
| 515 |
+
|
| 516 |
+
Returns
|
| 517 |
+
-------
|
| 518 |
+
self : object
|
| 519 |
+
Fitted estimator.
|
| 520 |
+
"""
|
| 521 |
+
if not hasattr(self, "coef_"):
|
| 522 |
+
self._more_validate_params(for_partial_fit=True)
|
| 523 |
+
|
| 524 |
+
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
|
| 525 |
+
return self._partial_fit(
|
| 526 |
+
X,
|
| 527 |
+
y,
|
| 528 |
+
alpha=1.0,
|
| 529 |
+
C=self.C,
|
| 530 |
+
loss="epsilon_insensitive",
|
| 531 |
+
learning_rate=lr,
|
| 532 |
+
max_iter=1,
|
| 533 |
+
sample_weight=None,
|
| 534 |
+
coef_init=None,
|
| 535 |
+
intercept_init=None,
|
| 536 |
+
)
|
| 537 |
+
|
| 538 |
+
@_fit_context(prefer_skip_nested_validation=True)
|
| 539 |
+
def fit(self, X, y, coef_init=None, intercept_init=None):
|
| 540 |
+
"""Fit linear model with Passive Aggressive algorithm.
|
| 541 |
+
|
| 542 |
+
Parameters
|
| 543 |
+
----------
|
| 544 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 545 |
+
Training data.
|
| 546 |
+
|
| 547 |
+
y : numpy array of shape [n_samples]
|
| 548 |
+
Target values.
|
| 549 |
+
|
| 550 |
+
coef_init : array, shape = [n_features]
|
| 551 |
+
The initial coefficients to warm-start the optimization.
|
| 552 |
+
|
| 553 |
+
intercept_init : array, shape = [1]
|
| 554 |
+
The initial intercept to warm-start the optimization.
|
| 555 |
+
|
| 556 |
+
Returns
|
| 557 |
+
-------
|
| 558 |
+
self : object
|
| 559 |
+
Fitted estimator.
|
| 560 |
+
"""
|
| 561 |
+
self._more_validate_params()
|
| 562 |
+
|
| 563 |
+
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
|
| 564 |
+
return self._fit(
|
| 565 |
+
X,
|
| 566 |
+
y,
|
| 567 |
+
alpha=1.0,
|
| 568 |
+
C=self.C,
|
| 569 |
+
loss="epsilon_insensitive",
|
| 570 |
+
learning_rate=lr,
|
| 571 |
+
coef_init=coef_init,
|
| 572 |
+
intercept_init=intercept_init,
|
| 573 |
+
)
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_perceptron.py
ADDED
|
@@ -0,0 +1,226 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Authors: The scikit-learn developers
|
| 2 |
+
# SPDX-License-Identifier: BSD-3-Clause
|
| 3 |
+
|
| 4 |
+
from numbers import Real
|
| 5 |
+
|
| 6 |
+
from ..utils._param_validation import Interval, StrOptions
|
| 7 |
+
from ._stochastic_gradient import BaseSGDClassifier
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class Perceptron(BaseSGDClassifier):
|
| 11 |
+
"""Linear perceptron classifier.
|
| 12 |
+
|
| 13 |
+
The implementation is a wrapper around :class:`~sklearn.linear_model.SGDClassifier`
|
| 14 |
+
by fixing the `loss` and `learning_rate` parameters as::
|
| 15 |
+
|
| 16 |
+
SGDClassifier(loss="perceptron", learning_rate="constant")
|
| 17 |
+
|
| 18 |
+
Other available parameters are described below and are forwarded to
|
| 19 |
+
:class:`~sklearn.linear_model.SGDClassifier`.
|
| 20 |
+
|
| 21 |
+
Read more in the :ref:`User Guide <perceptron>`.
|
| 22 |
+
|
| 23 |
+
Parameters
|
| 24 |
+
----------
|
| 25 |
+
|
| 26 |
+
penalty : {'l2','l1','elasticnet'}, default=None
|
| 27 |
+
The penalty (aka regularization term) to be used.
|
| 28 |
+
|
| 29 |
+
alpha : float, default=0.0001
|
| 30 |
+
Constant that multiplies the regularization term if regularization is
|
| 31 |
+
used.
|
| 32 |
+
|
| 33 |
+
l1_ratio : float, default=0.15
|
| 34 |
+
The Elastic Net mixing parameter, with `0 <= l1_ratio <= 1`.
|
| 35 |
+
`l1_ratio=0` corresponds to L2 penalty, `l1_ratio=1` to L1.
|
| 36 |
+
Only used if `penalty='elasticnet'`.
|
| 37 |
+
|
| 38 |
+
.. versionadded:: 0.24
|
| 39 |
+
|
| 40 |
+
fit_intercept : bool, default=True
|
| 41 |
+
Whether the intercept should be estimated or not. If False, the
|
| 42 |
+
data is assumed to be already centered.
|
| 43 |
+
|
| 44 |
+
max_iter : int, default=1000
|
| 45 |
+
The maximum number of passes over the training data (aka epochs).
|
| 46 |
+
It only impacts the behavior in the ``fit`` method, and not the
|
| 47 |
+
:meth:`partial_fit` method.
|
| 48 |
+
|
| 49 |
+
.. versionadded:: 0.19
|
| 50 |
+
|
| 51 |
+
tol : float or None, default=1e-3
|
| 52 |
+
The stopping criterion. If it is not None, the iterations will stop
|
| 53 |
+
when (loss > previous_loss - tol).
|
| 54 |
+
|
| 55 |
+
.. versionadded:: 0.19
|
| 56 |
+
|
| 57 |
+
shuffle : bool, default=True
|
| 58 |
+
Whether or not the training data should be shuffled after each epoch.
|
| 59 |
+
|
| 60 |
+
verbose : int, default=0
|
| 61 |
+
The verbosity level.
|
| 62 |
+
|
| 63 |
+
eta0 : float, default=1
|
| 64 |
+
Constant by which the updates are multiplied.
|
| 65 |
+
|
| 66 |
+
n_jobs : int, default=None
|
| 67 |
+
The number of CPUs to use to do the OVA (One Versus All, for
|
| 68 |
+
multi-class problems) computation.
|
| 69 |
+
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
|
| 70 |
+
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
|
| 71 |
+
for more details.
|
| 72 |
+
|
| 73 |
+
random_state : int, RandomState instance or None, default=0
|
| 74 |
+
Used to shuffle the training data, when ``shuffle`` is set to
|
| 75 |
+
``True``. Pass an int for reproducible output across multiple
|
| 76 |
+
function calls.
|
| 77 |
+
See :term:`Glossary <random_state>`.
|
| 78 |
+
|
| 79 |
+
early_stopping : bool, default=False
|
| 80 |
+
Whether to use early stopping to terminate training when validation
|
| 81 |
+
score is not improving. If set to True, it will automatically set aside
|
| 82 |
+
a stratified fraction of training data as validation and terminate
|
| 83 |
+
training when validation score is not improving by at least `tol` for
|
| 84 |
+
`n_iter_no_change` consecutive epochs.
|
| 85 |
+
|
| 86 |
+
.. versionadded:: 0.20
|
| 87 |
+
|
| 88 |
+
validation_fraction : float, default=0.1
|
| 89 |
+
The proportion of training data to set aside as validation set for
|
| 90 |
+
early stopping. Must be between 0 and 1.
|
| 91 |
+
Only used if early_stopping is True.
|
| 92 |
+
|
| 93 |
+
.. versionadded:: 0.20
|
| 94 |
+
|
| 95 |
+
n_iter_no_change : int, default=5
|
| 96 |
+
Number of iterations with no improvement to wait before early stopping.
|
| 97 |
+
|
| 98 |
+
.. versionadded:: 0.20
|
| 99 |
+
|
| 100 |
+
class_weight : dict, {class_label: weight} or "balanced", default=None
|
| 101 |
+
Preset for the class_weight fit parameter.
|
| 102 |
+
|
| 103 |
+
Weights associated with classes. If not given, all classes
|
| 104 |
+
are supposed to have weight one.
|
| 105 |
+
|
| 106 |
+
The "balanced" mode uses the values of y to automatically adjust
|
| 107 |
+
weights inversely proportional to class frequencies in the input data
|
| 108 |
+
as ``n_samples / (n_classes * np.bincount(y))``.
|
| 109 |
+
|
| 110 |
+
warm_start : bool, default=False
|
| 111 |
+
When set to True, reuse the solution of the previous call to fit as
|
| 112 |
+
initialization, otherwise, just erase the previous solution. See
|
| 113 |
+
:term:`the Glossary <warm_start>`.
|
| 114 |
+
|
| 115 |
+
Attributes
|
| 116 |
+
----------
|
| 117 |
+
classes_ : ndarray of shape (n_classes,)
|
| 118 |
+
The unique classes labels.
|
| 119 |
+
|
| 120 |
+
coef_ : ndarray of shape (1, n_features) if n_classes == 2 else \
|
| 121 |
+
(n_classes, n_features)
|
| 122 |
+
Weights assigned to the features.
|
| 123 |
+
|
| 124 |
+
intercept_ : ndarray of shape (1,) if n_classes == 2 else (n_classes,)
|
| 125 |
+
Constants in decision function.
|
| 126 |
+
|
| 127 |
+
n_features_in_ : int
|
| 128 |
+
Number of features seen during :term:`fit`.
|
| 129 |
+
|
| 130 |
+
.. versionadded:: 0.24
|
| 131 |
+
|
| 132 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
| 133 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
| 134 |
+
has feature names that are all strings.
|
| 135 |
+
|
| 136 |
+
.. versionadded:: 1.0
|
| 137 |
+
|
| 138 |
+
n_iter_ : int
|
| 139 |
+
The actual number of iterations to reach the stopping criterion.
|
| 140 |
+
For multiclass fits, it is the maximum over every binary fit.
|
| 141 |
+
|
| 142 |
+
t_ : int
|
| 143 |
+
Number of weight updates performed during training.
|
| 144 |
+
Same as ``(n_iter_ * n_samples + 1)``.
|
| 145 |
+
|
| 146 |
+
See Also
|
| 147 |
+
--------
|
| 148 |
+
sklearn.linear_model.SGDClassifier : Linear classifiers
|
| 149 |
+
(SVM, logistic regression, etc.) with SGD training.
|
| 150 |
+
|
| 151 |
+
Notes
|
| 152 |
+
-----
|
| 153 |
+
``Perceptron`` is a classification algorithm which shares the same
|
| 154 |
+
underlying implementation with ``SGDClassifier``. In fact,
|
| 155 |
+
``Perceptron()`` is equivalent to `SGDClassifier(loss="perceptron",
|
| 156 |
+
eta0=1, learning_rate="constant", penalty=None)`.
|
| 157 |
+
|
| 158 |
+
References
|
| 159 |
+
----------
|
| 160 |
+
https://en.wikipedia.org/wiki/Perceptron and references therein.
|
| 161 |
+
|
| 162 |
+
Examples
|
| 163 |
+
--------
|
| 164 |
+
>>> from sklearn.datasets import load_digits
|
| 165 |
+
>>> from sklearn.linear_model import Perceptron
|
| 166 |
+
>>> X, y = load_digits(return_X_y=True)
|
| 167 |
+
>>> clf = Perceptron(tol=1e-3, random_state=0)
|
| 168 |
+
>>> clf.fit(X, y)
|
| 169 |
+
Perceptron()
|
| 170 |
+
>>> clf.score(X, y)
|
| 171 |
+
0.939...
|
| 172 |
+
"""
|
| 173 |
+
|
| 174 |
+
_parameter_constraints: dict = {**BaseSGDClassifier._parameter_constraints}
|
| 175 |
+
_parameter_constraints.pop("loss")
|
| 176 |
+
_parameter_constraints.pop("average")
|
| 177 |
+
_parameter_constraints.update(
|
| 178 |
+
{
|
| 179 |
+
"penalty": [StrOptions({"l2", "l1", "elasticnet"}), None],
|
| 180 |
+
"alpha": [Interval(Real, 0, None, closed="left")],
|
| 181 |
+
"l1_ratio": [Interval(Real, 0, 1, closed="both")],
|
| 182 |
+
"eta0": [Interval(Real, 0, None, closed="left")],
|
| 183 |
+
}
|
| 184 |
+
)
|
| 185 |
+
|
| 186 |
+
def __init__(
|
| 187 |
+
self,
|
| 188 |
+
*,
|
| 189 |
+
penalty=None,
|
| 190 |
+
alpha=0.0001,
|
| 191 |
+
l1_ratio=0.15,
|
| 192 |
+
fit_intercept=True,
|
| 193 |
+
max_iter=1000,
|
| 194 |
+
tol=1e-3,
|
| 195 |
+
shuffle=True,
|
| 196 |
+
verbose=0,
|
| 197 |
+
eta0=1.0,
|
| 198 |
+
n_jobs=None,
|
| 199 |
+
random_state=0,
|
| 200 |
+
early_stopping=False,
|
| 201 |
+
validation_fraction=0.1,
|
| 202 |
+
n_iter_no_change=5,
|
| 203 |
+
class_weight=None,
|
| 204 |
+
warm_start=False,
|
| 205 |
+
):
|
| 206 |
+
super().__init__(
|
| 207 |
+
loss="perceptron",
|
| 208 |
+
penalty=penalty,
|
| 209 |
+
alpha=alpha,
|
| 210 |
+
l1_ratio=l1_ratio,
|
| 211 |
+
fit_intercept=fit_intercept,
|
| 212 |
+
max_iter=max_iter,
|
| 213 |
+
tol=tol,
|
| 214 |
+
shuffle=shuffle,
|
| 215 |
+
verbose=verbose,
|
| 216 |
+
random_state=random_state,
|
| 217 |
+
learning_rate="constant",
|
| 218 |
+
eta0=eta0,
|
| 219 |
+
early_stopping=early_stopping,
|
| 220 |
+
validation_fraction=validation_fraction,
|
| 221 |
+
n_iter_no_change=n_iter_no_change,
|
| 222 |
+
power_t=0.5,
|
| 223 |
+
warm_start=warm_start,
|
| 224 |
+
class_weight=class_weight,
|
| 225 |
+
n_jobs=n_jobs,
|
| 226 |
+
)
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_quantile.py
ADDED
|
@@ -0,0 +1,301 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Authors: The scikit-learn developers
|
| 2 |
+
# SPDX-License-Identifier: BSD-3-Clause
|
| 3 |
+
|
| 4 |
+
import warnings
|
| 5 |
+
from numbers import Real
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
from scipy import sparse
|
| 9 |
+
from scipy.optimize import linprog
|
| 10 |
+
|
| 11 |
+
from ..base import BaseEstimator, RegressorMixin, _fit_context
|
| 12 |
+
from ..exceptions import ConvergenceWarning
|
| 13 |
+
from ..utils import _safe_indexing
|
| 14 |
+
from ..utils._param_validation import Interval, StrOptions
|
| 15 |
+
from ..utils.fixes import parse_version, sp_version
|
| 16 |
+
from ..utils.validation import _check_sample_weight, validate_data
|
| 17 |
+
from ._base import LinearModel
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class QuantileRegressor(LinearModel, RegressorMixin, BaseEstimator):
|
| 21 |
+
"""Linear regression model that predicts conditional quantiles.
|
| 22 |
+
|
| 23 |
+
The linear :class:`QuantileRegressor` optimizes the pinball loss for a
|
| 24 |
+
desired `quantile` and is robust to outliers.
|
| 25 |
+
|
| 26 |
+
This model uses an L1 regularization like
|
| 27 |
+
:class:`~sklearn.linear_model.Lasso`.
|
| 28 |
+
|
| 29 |
+
Read more in the :ref:`User Guide <quantile_regression>`.
|
| 30 |
+
|
| 31 |
+
.. versionadded:: 1.0
|
| 32 |
+
|
| 33 |
+
Parameters
|
| 34 |
+
----------
|
| 35 |
+
quantile : float, default=0.5
|
| 36 |
+
The quantile that the model tries to predict. It must be strictly
|
| 37 |
+
between 0 and 1. If 0.5 (default), the model predicts the 50%
|
| 38 |
+
quantile, i.e. the median.
|
| 39 |
+
|
| 40 |
+
alpha : float, default=1.0
|
| 41 |
+
Regularization constant that multiplies the L1 penalty term.
|
| 42 |
+
|
| 43 |
+
fit_intercept : bool, default=True
|
| 44 |
+
Whether or not to fit the intercept.
|
| 45 |
+
|
| 46 |
+
solver : {'highs-ds', 'highs-ipm', 'highs', 'interior-point', \
|
| 47 |
+
'revised simplex'}, default='highs'
|
| 48 |
+
Method used by :func:`scipy.optimize.linprog` to solve the linear
|
| 49 |
+
programming formulation.
|
| 50 |
+
|
| 51 |
+
It is recommended to use the highs methods because
|
| 52 |
+
they are the fastest ones. Solvers "highs-ds", "highs-ipm" and "highs"
|
| 53 |
+
support sparse input data and, in fact, always convert to sparse csc.
|
| 54 |
+
|
| 55 |
+
From `scipy>=1.11.0`, "interior-point" is not available anymore.
|
| 56 |
+
|
| 57 |
+
.. versionchanged:: 1.4
|
| 58 |
+
The default of `solver` changed to `"highs"` in version 1.4.
|
| 59 |
+
|
| 60 |
+
solver_options : dict, default=None
|
| 61 |
+
Additional parameters passed to :func:`scipy.optimize.linprog` as
|
| 62 |
+
options. If `None` and if `solver='interior-point'`, then
|
| 63 |
+
`{"lstsq": True}` is passed to :func:`scipy.optimize.linprog` for the
|
| 64 |
+
sake of stability.
|
| 65 |
+
|
| 66 |
+
Attributes
|
| 67 |
+
----------
|
| 68 |
+
coef_ : array of shape (n_features,)
|
| 69 |
+
Estimated coefficients for the features.
|
| 70 |
+
|
| 71 |
+
intercept_ : float
|
| 72 |
+
The intercept of the model, aka bias term.
|
| 73 |
+
|
| 74 |
+
n_features_in_ : int
|
| 75 |
+
Number of features seen during :term:`fit`.
|
| 76 |
+
|
| 77 |
+
.. versionadded:: 0.24
|
| 78 |
+
|
| 79 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
| 80 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
| 81 |
+
has feature names that are all strings.
|
| 82 |
+
|
| 83 |
+
.. versionadded:: 1.0
|
| 84 |
+
|
| 85 |
+
n_iter_ : int
|
| 86 |
+
The actual number of iterations performed by the solver.
|
| 87 |
+
|
| 88 |
+
See Also
|
| 89 |
+
--------
|
| 90 |
+
Lasso : The Lasso is a linear model that estimates sparse coefficients
|
| 91 |
+
with l1 regularization.
|
| 92 |
+
HuberRegressor : Linear regression model that is robust to outliers.
|
| 93 |
+
|
| 94 |
+
Examples
|
| 95 |
+
--------
|
| 96 |
+
>>> from sklearn.linear_model import QuantileRegressor
|
| 97 |
+
>>> import numpy as np
|
| 98 |
+
>>> n_samples, n_features = 10, 2
|
| 99 |
+
>>> rng = np.random.RandomState(0)
|
| 100 |
+
>>> y = rng.randn(n_samples)
|
| 101 |
+
>>> X = rng.randn(n_samples, n_features)
|
| 102 |
+
>>> # the two following lines are optional in practice
|
| 103 |
+
>>> from sklearn.utils.fixes import sp_version, parse_version
|
| 104 |
+
>>> reg = QuantileRegressor(quantile=0.8).fit(X, y)
|
| 105 |
+
>>> np.mean(y <= reg.predict(X))
|
| 106 |
+
np.float64(0.8)
|
| 107 |
+
"""
|
| 108 |
+
|
| 109 |
+
_parameter_constraints: dict = {
|
| 110 |
+
"quantile": [Interval(Real, 0, 1, closed="neither")],
|
| 111 |
+
"alpha": [Interval(Real, 0, None, closed="left")],
|
| 112 |
+
"fit_intercept": ["boolean"],
|
| 113 |
+
"solver": [
|
| 114 |
+
StrOptions(
|
| 115 |
+
{
|
| 116 |
+
"highs-ds",
|
| 117 |
+
"highs-ipm",
|
| 118 |
+
"highs",
|
| 119 |
+
"interior-point",
|
| 120 |
+
"revised simplex",
|
| 121 |
+
}
|
| 122 |
+
),
|
| 123 |
+
],
|
| 124 |
+
"solver_options": [dict, None],
|
| 125 |
+
}
|
| 126 |
+
|
| 127 |
+
def __init__(
|
| 128 |
+
self,
|
| 129 |
+
*,
|
| 130 |
+
quantile=0.5,
|
| 131 |
+
alpha=1.0,
|
| 132 |
+
fit_intercept=True,
|
| 133 |
+
solver="highs",
|
| 134 |
+
solver_options=None,
|
| 135 |
+
):
|
| 136 |
+
self.quantile = quantile
|
| 137 |
+
self.alpha = alpha
|
| 138 |
+
self.fit_intercept = fit_intercept
|
| 139 |
+
self.solver = solver
|
| 140 |
+
self.solver_options = solver_options
|
| 141 |
+
|
| 142 |
+
@_fit_context(prefer_skip_nested_validation=True)
|
| 143 |
+
def fit(self, X, y, sample_weight=None):
|
| 144 |
+
"""Fit the model according to the given training data.
|
| 145 |
+
|
| 146 |
+
Parameters
|
| 147 |
+
----------
|
| 148 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 149 |
+
Training data.
|
| 150 |
+
|
| 151 |
+
y : array-like of shape (n_samples,)
|
| 152 |
+
Target values.
|
| 153 |
+
|
| 154 |
+
sample_weight : array-like of shape (n_samples,), default=None
|
| 155 |
+
Sample weights.
|
| 156 |
+
|
| 157 |
+
Returns
|
| 158 |
+
-------
|
| 159 |
+
self : object
|
| 160 |
+
Returns self.
|
| 161 |
+
"""
|
| 162 |
+
X, y = validate_data(
|
| 163 |
+
self,
|
| 164 |
+
X,
|
| 165 |
+
y,
|
| 166 |
+
accept_sparse=["csc", "csr", "coo"],
|
| 167 |
+
y_numeric=True,
|
| 168 |
+
multi_output=False,
|
| 169 |
+
)
|
| 170 |
+
sample_weight = _check_sample_weight(sample_weight, X)
|
| 171 |
+
|
| 172 |
+
n_features = X.shape[1]
|
| 173 |
+
n_params = n_features
|
| 174 |
+
|
| 175 |
+
if self.fit_intercept:
|
| 176 |
+
n_params += 1
|
| 177 |
+
# Note that centering y and X with _preprocess_data does not work
|
| 178 |
+
# for quantile regression.
|
| 179 |
+
|
| 180 |
+
# The objective is defined as 1/n * sum(pinball loss) + alpha * L1.
|
| 181 |
+
# So we rescale the penalty term, which is equivalent.
|
| 182 |
+
alpha = np.sum(sample_weight) * self.alpha
|
| 183 |
+
|
| 184 |
+
if self.solver == "interior-point" and sp_version >= parse_version("1.11.0"):
|
| 185 |
+
raise ValueError(
|
| 186 |
+
f"Solver {self.solver} is not anymore available in SciPy >= 1.11.0."
|
| 187 |
+
)
|
| 188 |
+
|
| 189 |
+
if sparse.issparse(X) and self.solver not in ["highs", "highs-ds", "highs-ipm"]:
|
| 190 |
+
raise ValueError(
|
| 191 |
+
f"Solver {self.solver} does not support sparse X. "
|
| 192 |
+
"Use solver 'highs' for example."
|
| 193 |
+
)
|
| 194 |
+
# make default solver more stable
|
| 195 |
+
if self.solver_options is None and self.solver == "interior-point":
|
| 196 |
+
solver_options = {"lstsq": True}
|
| 197 |
+
else:
|
| 198 |
+
solver_options = self.solver_options
|
| 199 |
+
|
| 200 |
+
# After rescaling alpha, the minimization problem is
|
| 201 |
+
# min sum(pinball loss) + alpha * L1
|
| 202 |
+
# Use linear programming formulation of quantile regression
|
| 203 |
+
# min_x c x
|
| 204 |
+
# A_eq x = b_eq
|
| 205 |
+
# 0 <= x
|
| 206 |
+
# x = (s0, s, t0, t, u, v) = slack variables >= 0
|
| 207 |
+
# intercept = s0 - t0
|
| 208 |
+
# coef = s - t
|
| 209 |
+
# c = (0, alpha * 1_p, 0, alpha * 1_p, quantile * 1_n, (1-quantile) * 1_n)
|
| 210 |
+
# residual = y - X@coef - intercept = u - v
|
| 211 |
+
# A_eq = (1_n, X, -1_n, -X, diag(1_n), -diag(1_n))
|
| 212 |
+
# b_eq = y
|
| 213 |
+
# p = n_features
|
| 214 |
+
# n = n_samples
|
| 215 |
+
# 1_n = vector of length n with entries equal one
|
| 216 |
+
# see https://stats.stackexchange.com/questions/384909/
|
| 217 |
+
#
|
| 218 |
+
# Filtering out zero sample weights from the beginning makes life
|
| 219 |
+
# easier for the linprog solver.
|
| 220 |
+
indices = np.nonzero(sample_weight)[0]
|
| 221 |
+
n_indices = len(indices) # use n_mask instead of n_samples
|
| 222 |
+
if n_indices < len(sample_weight):
|
| 223 |
+
sample_weight = sample_weight[indices]
|
| 224 |
+
X = _safe_indexing(X, indices)
|
| 225 |
+
y = _safe_indexing(y, indices)
|
| 226 |
+
c = np.concatenate(
|
| 227 |
+
[
|
| 228 |
+
np.full(2 * n_params, fill_value=alpha),
|
| 229 |
+
sample_weight * self.quantile,
|
| 230 |
+
sample_weight * (1 - self.quantile),
|
| 231 |
+
]
|
| 232 |
+
)
|
| 233 |
+
if self.fit_intercept:
|
| 234 |
+
# do not penalize the intercept
|
| 235 |
+
c[0] = 0
|
| 236 |
+
c[n_params] = 0
|
| 237 |
+
|
| 238 |
+
if self.solver in ["highs", "highs-ds", "highs-ipm"]:
|
| 239 |
+
# Note that highs methods always use a sparse CSC memory layout internally,
|
| 240 |
+
# even for optimization problems parametrized using dense numpy arrays.
|
| 241 |
+
# Therefore, we work with CSC matrices as early as possible to limit
|
| 242 |
+
# unnecessary repeated memory copies.
|
| 243 |
+
eye = sparse.eye(n_indices, dtype=X.dtype, format="csc")
|
| 244 |
+
if self.fit_intercept:
|
| 245 |
+
ones = sparse.csc_matrix(np.ones(shape=(n_indices, 1), dtype=X.dtype))
|
| 246 |
+
A_eq = sparse.hstack([ones, X, -ones, -X, eye, -eye], format="csc")
|
| 247 |
+
else:
|
| 248 |
+
A_eq = sparse.hstack([X, -X, eye, -eye], format="csc")
|
| 249 |
+
else:
|
| 250 |
+
eye = np.eye(n_indices)
|
| 251 |
+
if self.fit_intercept:
|
| 252 |
+
ones = np.ones((n_indices, 1))
|
| 253 |
+
A_eq = np.concatenate([ones, X, -ones, -X, eye, -eye], axis=1)
|
| 254 |
+
else:
|
| 255 |
+
A_eq = np.concatenate([X, -X, eye, -eye], axis=1)
|
| 256 |
+
|
| 257 |
+
b_eq = y
|
| 258 |
+
|
| 259 |
+
result = linprog(
|
| 260 |
+
c=c,
|
| 261 |
+
A_eq=A_eq,
|
| 262 |
+
b_eq=b_eq,
|
| 263 |
+
method=self.solver,
|
| 264 |
+
options=solver_options,
|
| 265 |
+
)
|
| 266 |
+
solution = result.x
|
| 267 |
+
if not result.success:
|
| 268 |
+
failure = {
|
| 269 |
+
1: "Iteration limit reached.",
|
| 270 |
+
2: "Problem appears to be infeasible.",
|
| 271 |
+
3: "Problem appears to be unbounded.",
|
| 272 |
+
4: "Numerical difficulties encountered.",
|
| 273 |
+
}
|
| 274 |
+
warnings.warn(
|
| 275 |
+
"Linear programming for QuantileRegressor did not succeed.\n"
|
| 276 |
+
f"Status is {result.status}: "
|
| 277 |
+
+ failure.setdefault(result.status, "unknown reason")
|
| 278 |
+
+ "\n"
|
| 279 |
+
+ "Result message of linprog:\n"
|
| 280 |
+
+ result.message,
|
| 281 |
+
ConvergenceWarning,
|
| 282 |
+
)
|
| 283 |
+
|
| 284 |
+
# positive slack - negative slack
|
| 285 |
+
# solution is an array with (params_pos, params_neg, u, v)
|
| 286 |
+
params = solution[:n_params] - solution[n_params : 2 * n_params]
|
| 287 |
+
|
| 288 |
+
self.n_iter_ = result.nit
|
| 289 |
+
|
| 290 |
+
if self.fit_intercept:
|
| 291 |
+
self.coef_ = params[1:]
|
| 292 |
+
self.intercept_ = params[0]
|
| 293 |
+
else:
|
| 294 |
+
self.coef_ = params
|
| 295 |
+
self.intercept_ = 0.0
|
| 296 |
+
return self
|
| 297 |
+
|
| 298 |
+
def __sklearn_tags__(self):
|
| 299 |
+
tags = super().__sklearn_tags__()
|
| 300 |
+
tags.input_tags.sparse = True
|
| 301 |
+
return tags
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_ransac.py
ADDED
|
@@ -0,0 +1,731 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Authors: The scikit-learn developers
|
| 2 |
+
# SPDX-License-Identifier: BSD-3-Clause
|
| 3 |
+
|
| 4 |
+
import warnings
|
| 5 |
+
from numbers import Integral, Real
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
|
| 9 |
+
from ..base import (
|
| 10 |
+
BaseEstimator,
|
| 11 |
+
MetaEstimatorMixin,
|
| 12 |
+
MultiOutputMixin,
|
| 13 |
+
RegressorMixin,
|
| 14 |
+
_fit_context,
|
| 15 |
+
clone,
|
| 16 |
+
)
|
| 17 |
+
from ..exceptions import ConvergenceWarning
|
| 18 |
+
from ..utils import check_consistent_length, check_random_state, get_tags
|
| 19 |
+
from ..utils._bunch import Bunch
|
| 20 |
+
from ..utils._param_validation import (
|
| 21 |
+
HasMethods,
|
| 22 |
+
Interval,
|
| 23 |
+
Options,
|
| 24 |
+
RealNotInt,
|
| 25 |
+
StrOptions,
|
| 26 |
+
)
|
| 27 |
+
from ..utils.metadata_routing import (
|
| 28 |
+
MetadataRouter,
|
| 29 |
+
MethodMapping,
|
| 30 |
+
_raise_for_params,
|
| 31 |
+
_routing_enabled,
|
| 32 |
+
process_routing,
|
| 33 |
+
)
|
| 34 |
+
from ..utils.random import sample_without_replacement
|
| 35 |
+
from ..utils.validation import (
|
| 36 |
+
_check_method_params,
|
| 37 |
+
_check_sample_weight,
|
| 38 |
+
_deprecate_positional_args,
|
| 39 |
+
check_is_fitted,
|
| 40 |
+
has_fit_parameter,
|
| 41 |
+
validate_data,
|
| 42 |
+
)
|
| 43 |
+
from ._base import LinearRegression
|
| 44 |
+
|
| 45 |
+
_EPSILON = np.spacing(1)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):
|
| 49 |
+
"""Determine number trials such that at least one outlier-free subset is
|
| 50 |
+
sampled for the given inlier/outlier ratio.
|
| 51 |
+
|
| 52 |
+
Parameters
|
| 53 |
+
----------
|
| 54 |
+
n_inliers : int
|
| 55 |
+
Number of inliers in the data.
|
| 56 |
+
|
| 57 |
+
n_samples : int
|
| 58 |
+
Total number of samples in the data.
|
| 59 |
+
|
| 60 |
+
min_samples : int
|
| 61 |
+
Minimum number of samples chosen randomly from original data.
|
| 62 |
+
|
| 63 |
+
probability : float
|
| 64 |
+
Probability (confidence) that one outlier-free sample is generated.
|
| 65 |
+
|
| 66 |
+
Returns
|
| 67 |
+
-------
|
| 68 |
+
trials : int
|
| 69 |
+
Number of trials.
|
| 70 |
+
|
| 71 |
+
"""
|
| 72 |
+
inlier_ratio = n_inliers / float(n_samples)
|
| 73 |
+
nom = max(_EPSILON, 1 - probability)
|
| 74 |
+
denom = max(_EPSILON, 1 - inlier_ratio**min_samples)
|
| 75 |
+
if nom == 1:
|
| 76 |
+
return 0
|
| 77 |
+
if denom == 1:
|
| 78 |
+
return float("inf")
|
| 79 |
+
return abs(float(np.ceil(np.log(nom) / np.log(denom))))
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
class RANSACRegressor(
|
| 83 |
+
MetaEstimatorMixin,
|
| 84 |
+
RegressorMixin,
|
| 85 |
+
MultiOutputMixin,
|
| 86 |
+
BaseEstimator,
|
| 87 |
+
):
|
| 88 |
+
"""RANSAC (RANdom SAmple Consensus) algorithm.
|
| 89 |
+
|
| 90 |
+
RANSAC is an iterative algorithm for the robust estimation of parameters
|
| 91 |
+
from a subset of inliers from the complete data set.
|
| 92 |
+
|
| 93 |
+
Read more in the :ref:`User Guide <ransac_regression>`.
|
| 94 |
+
|
| 95 |
+
Parameters
|
| 96 |
+
----------
|
| 97 |
+
estimator : object, default=None
|
| 98 |
+
Base estimator object which implements the following methods:
|
| 99 |
+
|
| 100 |
+
* `fit(X, y)`: Fit model to given training data and target values.
|
| 101 |
+
* `score(X, y)`: Returns the mean accuracy on the given test data,
|
| 102 |
+
which is used for the stop criterion defined by `stop_score`.
|
| 103 |
+
Additionally, the score is used to decide which of two equally
|
| 104 |
+
large consensus sets is chosen as the better one.
|
| 105 |
+
* `predict(X)`: Returns predicted values using the linear model,
|
| 106 |
+
which is used to compute residual error using loss function.
|
| 107 |
+
|
| 108 |
+
If `estimator` is None, then
|
| 109 |
+
:class:`~sklearn.linear_model.LinearRegression` is used for
|
| 110 |
+
target values of dtype float.
|
| 111 |
+
|
| 112 |
+
Note that the current implementation only supports regression
|
| 113 |
+
estimators.
|
| 114 |
+
|
| 115 |
+
min_samples : int (>= 1) or float ([0, 1]), default=None
|
| 116 |
+
Minimum number of samples chosen randomly from original data. Treated
|
| 117 |
+
as an absolute number of samples for `min_samples >= 1`, treated as a
|
| 118 |
+
relative number `ceil(min_samples * X.shape[0])` for
|
| 119 |
+
`min_samples < 1`. This is typically chosen as the minimal number of
|
| 120 |
+
samples necessary to estimate the given `estimator`. By default a
|
| 121 |
+
:class:`~sklearn.linear_model.LinearRegression` estimator is assumed and
|
| 122 |
+
`min_samples` is chosen as ``X.shape[1] + 1``. This parameter is highly
|
| 123 |
+
dependent upon the model, so if a `estimator` other than
|
| 124 |
+
:class:`~sklearn.linear_model.LinearRegression` is used, the user must
|
| 125 |
+
provide a value.
|
| 126 |
+
|
| 127 |
+
residual_threshold : float, default=None
|
| 128 |
+
Maximum residual for a data sample to be classified as an inlier.
|
| 129 |
+
By default the threshold is chosen as the MAD (median absolute
|
| 130 |
+
deviation) of the target values `y`. Points whose residuals are
|
| 131 |
+
strictly equal to the threshold are considered as inliers.
|
| 132 |
+
|
| 133 |
+
is_data_valid : callable, default=None
|
| 134 |
+
This function is called with the randomly selected data before the
|
| 135 |
+
model is fitted to it: `is_data_valid(X, y)`. If its return value is
|
| 136 |
+
False the current randomly chosen sub-sample is skipped.
|
| 137 |
+
|
| 138 |
+
is_model_valid : callable, default=None
|
| 139 |
+
This function is called with the estimated model and the randomly
|
| 140 |
+
selected data: `is_model_valid(model, X, y)`. If its return value is
|
| 141 |
+
False the current randomly chosen sub-sample is skipped.
|
| 142 |
+
Rejecting samples with this function is computationally costlier than
|
| 143 |
+
with `is_data_valid`. `is_model_valid` should therefore only be used if
|
| 144 |
+
the estimated model is needed for making the rejection decision.
|
| 145 |
+
|
| 146 |
+
max_trials : int, default=100
|
| 147 |
+
Maximum number of iterations for random sample selection.
|
| 148 |
+
|
| 149 |
+
max_skips : int, default=np.inf
|
| 150 |
+
Maximum number of iterations that can be skipped due to finding zero
|
| 151 |
+
inliers or invalid data defined by ``is_data_valid`` or invalid models
|
| 152 |
+
defined by ``is_model_valid``.
|
| 153 |
+
|
| 154 |
+
.. versionadded:: 0.19
|
| 155 |
+
|
| 156 |
+
stop_n_inliers : int, default=np.inf
|
| 157 |
+
Stop iteration if at least this number of inliers are found.
|
| 158 |
+
|
| 159 |
+
stop_score : float, default=np.inf
|
| 160 |
+
Stop iteration if score is greater equal than this threshold.
|
| 161 |
+
|
| 162 |
+
stop_probability : float in range [0, 1], default=0.99
|
| 163 |
+
RANSAC iteration stops if at least one outlier-free set of the training
|
| 164 |
+
data is sampled in RANSAC. This requires to generate at least N
|
| 165 |
+
samples (iterations)::
|
| 166 |
+
|
| 167 |
+
N >= log(1 - probability) / log(1 - e**m)
|
| 168 |
+
|
| 169 |
+
where the probability (confidence) is typically set to high value such
|
| 170 |
+
as 0.99 (the default) and e is the current fraction of inliers w.r.t.
|
| 171 |
+
the total number of samples.
|
| 172 |
+
|
| 173 |
+
loss : str, callable, default='absolute_error'
|
| 174 |
+
String inputs, 'absolute_error' and 'squared_error' are supported which
|
| 175 |
+
find the absolute error and squared error per sample respectively.
|
| 176 |
+
|
| 177 |
+
If ``loss`` is a callable, then it should be a function that takes
|
| 178 |
+
two arrays as inputs, the true and predicted value and returns a 1-D
|
| 179 |
+
array with the i-th value of the array corresponding to the loss
|
| 180 |
+
on ``X[i]``.
|
| 181 |
+
|
| 182 |
+
If the loss on a sample is greater than the ``residual_threshold``,
|
| 183 |
+
then this sample is classified as an outlier.
|
| 184 |
+
|
| 185 |
+
.. versionadded:: 0.18
|
| 186 |
+
|
| 187 |
+
random_state : int, RandomState instance, default=None
|
| 188 |
+
The generator used to initialize the centers.
|
| 189 |
+
Pass an int for reproducible output across multiple function calls.
|
| 190 |
+
See :term:`Glossary <random_state>`.
|
| 191 |
+
|
| 192 |
+
Attributes
|
| 193 |
+
----------
|
| 194 |
+
estimator_ : object
|
| 195 |
+
Final model fitted on the inliers predicted by the "best" model found
|
| 196 |
+
during RANSAC sampling (copy of the `estimator` object).
|
| 197 |
+
|
| 198 |
+
n_trials_ : int
|
| 199 |
+
Number of random selection trials until one of the stop criteria is
|
| 200 |
+
met. It is always ``<= max_trials``.
|
| 201 |
+
|
| 202 |
+
inlier_mask_ : bool array of shape [n_samples]
|
| 203 |
+
Boolean mask of inliers classified as ``True``.
|
| 204 |
+
|
| 205 |
+
n_skips_no_inliers_ : int
|
| 206 |
+
Number of iterations skipped due to finding zero inliers.
|
| 207 |
+
|
| 208 |
+
.. versionadded:: 0.19
|
| 209 |
+
|
| 210 |
+
n_skips_invalid_data_ : int
|
| 211 |
+
Number of iterations skipped due to invalid data defined by
|
| 212 |
+
``is_data_valid``.
|
| 213 |
+
|
| 214 |
+
.. versionadded:: 0.19
|
| 215 |
+
|
| 216 |
+
n_skips_invalid_model_ : int
|
| 217 |
+
Number of iterations skipped due to an invalid model defined by
|
| 218 |
+
``is_model_valid``.
|
| 219 |
+
|
| 220 |
+
.. versionadded:: 0.19
|
| 221 |
+
|
| 222 |
+
n_features_in_ : int
|
| 223 |
+
Number of features seen during :term:`fit`.
|
| 224 |
+
|
| 225 |
+
.. versionadded:: 0.24
|
| 226 |
+
|
| 227 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
| 228 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
| 229 |
+
has feature names that are all strings.
|
| 230 |
+
|
| 231 |
+
.. versionadded:: 1.0
|
| 232 |
+
|
| 233 |
+
See Also
|
| 234 |
+
--------
|
| 235 |
+
HuberRegressor : Linear regression model that is robust to outliers.
|
| 236 |
+
TheilSenRegressor : Theil-Sen Estimator robust multivariate regression model.
|
| 237 |
+
SGDRegressor : Fitted by minimizing a regularized empirical loss with SGD.
|
| 238 |
+
|
| 239 |
+
References
|
| 240 |
+
----------
|
| 241 |
+
.. [1] https://en.wikipedia.org/wiki/RANSAC
|
| 242 |
+
.. [2] https://www.sri.com/wp-content/uploads/2021/12/ransac-publication.pdf
|
| 243 |
+
.. [3] https://bmva-archive.org.uk/bmvc/2009/Papers/Paper355/Paper355.pdf
|
| 244 |
+
|
| 245 |
+
Examples
|
| 246 |
+
--------
|
| 247 |
+
>>> from sklearn.linear_model import RANSACRegressor
|
| 248 |
+
>>> from sklearn.datasets import make_regression
|
| 249 |
+
>>> X, y = make_regression(
|
| 250 |
+
... n_samples=200, n_features=2, noise=4.0, random_state=0)
|
| 251 |
+
>>> reg = RANSACRegressor(random_state=0).fit(X, y)
|
| 252 |
+
>>> reg.score(X, y)
|
| 253 |
+
0.9885...
|
| 254 |
+
>>> reg.predict(X[:1,])
|
| 255 |
+
array([-31.9417...])
|
| 256 |
+
|
| 257 |
+
For a more detailed example, see
|
| 258 |
+
:ref:`sphx_glr_auto_examples_linear_model_plot_ransac.py`
|
| 259 |
+
""" # noqa: E501
|
| 260 |
+
|
| 261 |
+
_parameter_constraints: dict = {
|
| 262 |
+
"estimator": [HasMethods(["fit", "score", "predict"]), None],
|
| 263 |
+
"min_samples": [
|
| 264 |
+
Interval(Integral, 1, None, closed="left"),
|
| 265 |
+
Interval(RealNotInt, 0, 1, closed="both"),
|
| 266 |
+
None,
|
| 267 |
+
],
|
| 268 |
+
"residual_threshold": [Interval(Real, 0, None, closed="left"), None],
|
| 269 |
+
"is_data_valid": [callable, None],
|
| 270 |
+
"is_model_valid": [callable, None],
|
| 271 |
+
"max_trials": [
|
| 272 |
+
Interval(Integral, 0, None, closed="left"),
|
| 273 |
+
Options(Real, {np.inf}),
|
| 274 |
+
],
|
| 275 |
+
"max_skips": [
|
| 276 |
+
Interval(Integral, 0, None, closed="left"),
|
| 277 |
+
Options(Real, {np.inf}),
|
| 278 |
+
],
|
| 279 |
+
"stop_n_inliers": [
|
| 280 |
+
Interval(Integral, 0, None, closed="left"),
|
| 281 |
+
Options(Real, {np.inf}),
|
| 282 |
+
],
|
| 283 |
+
"stop_score": [Interval(Real, None, None, closed="both")],
|
| 284 |
+
"stop_probability": [Interval(Real, 0, 1, closed="both")],
|
| 285 |
+
"loss": [StrOptions({"absolute_error", "squared_error"}), callable],
|
| 286 |
+
"random_state": ["random_state"],
|
| 287 |
+
}
|
| 288 |
+
|
| 289 |
+
def __init__(
|
| 290 |
+
self,
|
| 291 |
+
estimator=None,
|
| 292 |
+
*,
|
| 293 |
+
min_samples=None,
|
| 294 |
+
residual_threshold=None,
|
| 295 |
+
is_data_valid=None,
|
| 296 |
+
is_model_valid=None,
|
| 297 |
+
max_trials=100,
|
| 298 |
+
max_skips=np.inf,
|
| 299 |
+
stop_n_inliers=np.inf,
|
| 300 |
+
stop_score=np.inf,
|
| 301 |
+
stop_probability=0.99,
|
| 302 |
+
loss="absolute_error",
|
| 303 |
+
random_state=None,
|
| 304 |
+
):
|
| 305 |
+
self.estimator = estimator
|
| 306 |
+
self.min_samples = min_samples
|
| 307 |
+
self.residual_threshold = residual_threshold
|
| 308 |
+
self.is_data_valid = is_data_valid
|
| 309 |
+
self.is_model_valid = is_model_valid
|
| 310 |
+
self.max_trials = max_trials
|
| 311 |
+
self.max_skips = max_skips
|
| 312 |
+
self.stop_n_inliers = stop_n_inliers
|
| 313 |
+
self.stop_score = stop_score
|
| 314 |
+
self.stop_probability = stop_probability
|
| 315 |
+
self.random_state = random_state
|
| 316 |
+
self.loss = loss
|
| 317 |
+
|
| 318 |
+
@_fit_context(
|
| 319 |
+
# RansacRegressor.estimator is not validated yet
|
| 320 |
+
prefer_skip_nested_validation=False
|
| 321 |
+
)
|
| 322 |
+
# TODO(1.7): remove `sample_weight` from the signature after deprecation
|
| 323 |
+
# cycle; for backwards compatibility: pop it from `fit_params` before the
|
| 324 |
+
# `_raise_for_params` check and reinsert it after the check
|
| 325 |
+
@_deprecate_positional_args(version="1.7")
|
| 326 |
+
def fit(self, X, y, *, sample_weight=None, **fit_params):
|
| 327 |
+
"""Fit estimator using RANSAC algorithm.
|
| 328 |
+
|
| 329 |
+
Parameters
|
| 330 |
+
----------
|
| 331 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 332 |
+
Training data.
|
| 333 |
+
|
| 334 |
+
y : array-like of shape (n_samples,) or (n_samples, n_targets)
|
| 335 |
+
Target values.
|
| 336 |
+
|
| 337 |
+
sample_weight : array-like of shape (n_samples,), default=None
|
| 338 |
+
Individual weights for each sample
|
| 339 |
+
raises error if sample_weight is passed and estimator
|
| 340 |
+
fit method does not support it.
|
| 341 |
+
|
| 342 |
+
.. versionadded:: 0.18
|
| 343 |
+
|
| 344 |
+
**fit_params : dict
|
| 345 |
+
Parameters routed to the `fit` method of the sub-estimator via the
|
| 346 |
+
metadata routing API.
|
| 347 |
+
|
| 348 |
+
.. versionadded:: 1.5
|
| 349 |
+
|
| 350 |
+
Only available if
|
| 351 |
+
`sklearn.set_config(enable_metadata_routing=True)` is set. See
|
| 352 |
+
:ref:`Metadata Routing User Guide <metadata_routing>` for more
|
| 353 |
+
details.
|
| 354 |
+
|
| 355 |
+
Returns
|
| 356 |
+
-------
|
| 357 |
+
self : object
|
| 358 |
+
Fitted `RANSACRegressor` estimator.
|
| 359 |
+
|
| 360 |
+
Raises
|
| 361 |
+
------
|
| 362 |
+
ValueError
|
| 363 |
+
If no valid consensus set could be found. This occurs if
|
| 364 |
+
`is_data_valid` and `is_model_valid` return False for all
|
| 365 |
+
`max_trials` randomly chosen sub-samples.
|
| 366 |
+
"""
|
| 367 |
+
# Need to validate separately here. We can't pass multi_output=True
|
| 368 |
+
# because that would allow y to be csr. Delay expensive finiteness
|
| 369 |
+
# check to the estimator's own input validation.
|
| 370 |
+
_raise_for_params(fit_params, self, "fit")
|
| 371 |
+
check_X_params = dict(accept_sparse="csr", ensure_all_finite=False)
|
| 372 |
+
check_y_params = dict(ensure_2d=False)
|
| 373 |
+
X, y = validate_data(
|
| 374 |
+
self, X, y, validate_separately=(check_X_params, check_y_params)
|
| 375 |
+
)
|
| 376 |
+
check_consistent_length(X, y)
|
| 377 |
+
|
| 378 |
+
if self.estimator is not None:
|
| 379 |
+
estimator = clone(self.estimator)
|
| 380 |
+
else:
|
| 381 |
+
estimator = LinearRegression()
|
| 382 |
+
|
| 383 |
+
if self.min_samples is None:
|
| 384 |
+
if not isinstance(estimator, LinearRegression):
|
| 385 |
+
raise ValueError(
|
| 386 |
+
"`min_samples` needs to be explicitly set when estimator "
|
| 387 |
+
"is not a LinearRegression."
|
| 388 |
+
)
|
| 389 |
+
min_samples = X.shape[1] + 1
|
| 390 |
+
elif 0 < self.min_samples < 1:
|
| 391 |
+
min_samples = np.ceil(self.min_samples * X.shape[0])
|
| 392 |
+
elif self.min_samples >= 1:
|
| 393 |
+
min_samples = self.min_samples
|
| 394 |
+
if min_samples > X.shape[0]:
|
| 395 |
+
raise ValueError(
|
| 396 |
+
"`min_samples` may not be larger than number "
|
| 397 |
+
"of samples: n_samples = %d." % (X.shape[0])
|
| 398 |
+
)
|
| 399 |
+
|
| 400 |
+
if self.residual_threshold is None:
|
| 401 |
+
# MAD (median absolute deviation)
|
| 402 |
+
residual_threshold = np.median(np.abs(y - np.median(y)))
|
| 403 |
+
else:
|
| 404 |
+
residual_threshold = self.residual_threshold
|
| 405 |
+
|
| 406 |
+
if self.loss == "absolute_error":
|
| 407 |
+
if y.ndim == 1:
|
| 408 |
+
loss_function = lambda y_true, y_pred: np.abs(y_true - y_pred)
|
| 409 |
+
else:
|
| 410 |
+
loss_function = lambda y_true, y_pred: np.sum(
|
| 411 |
+
np.abs(y_true - y_pred), axis=1
|
| 412 |
+
)
|
| 413 |
+
elif self.loss == "squared_error":
|
| 414 |
+
if y.ndim == 1:
|
| 415 |
+
loss_function = lambda y_true, y_pred: (y_true - y_pred) ** 2
|
| 416 |
+
else:
|
| 417 |
+
loss_function = lambda y_true, y_pred: np.sum(
|
| 418 |
+
(y_true - y_pred) ** 2, axis=1
|
| 419 |
+
)
|
| 420 |
+
|
| 421 |
+
elif callable(self.loss):
|
| 422 |
+
loss_function = self.loss
|
| 423 |
+
|
| 424 |
+
random_state = check_random_state(self.random_state)
|
| 425 |
+
|
| 426 |
+
try: # Not all estimator accept a random_state
|
| 427 |
+
estimator.set_params(random_state=random_state)
|
| 428 |
+
except ValueError:
|
| 429 |
+
pass
|
| 430 |
+
|
| 431 |
+
estimator_fit_has_sample_weight = has_fit_parameter(estimator, "sample_weight")
|
| 432 |
+
estimator_name = type(estimator).__name__
|
| 433 |
+
if sample_weight is not None and not estimator_fit_has_sample_weight:
|
| 434 |
+
raise ValueError(
|
| 435 |
+
"%s does not support sample_weight. Sample"
|
| 436 |
+
" weights are only used for the calibration"
|
| 437 |
+
" itself." % estimator_name
|
| 438 |
+
)
|
| 439 |
+
|
| 440 |
+
if sample_weight is not None:
|
| 441 |
+
fit_params["sample_weight"] = sample_weight
|
| 442 |
+
|
| 443 |
+
if _routing_enabled():
|
| 444 |
+
routed_params = process_routing(self, "fit", **fit_params)
|
| 445 |
+
else:
|
| 446 |
+
routed_params = Bunch()
|
| 447 |
+
routed_params.estimator = Bunch(fit={}, predict={}, score={})
|
| 448 |
+
if sample_weight is not None:
|
| 449 |
+
sample_weight = _check_sample_weight(sample_weight, X)
|
| 450 |
+
routed_params.estimator.fit = {"sample_weight": sample_weight}
|
| 451 |
+
|
| 452 |
+
n_inliers_best = 1
|
| 453 |
+
score_best = -np.inf
|
| 454 |
+
inlier_mask_best = None
|
| 455 |
+
X_inlier_best = None
|
| 456 |
+
y_inlier_best = None
|
| 457 |
+
inlier_best_idxs_subset = None
|
| 458 |
+
self.n_skips_no_inliers_ = 0
|
| 459 |
+
self.n_skips_invalid_data_ = 0
|
| 460 |
+
self.n_skips_invalid_model_ = 0
|
| 461 |
+
|
| 462 |
+
# number of data samples
|
| 463 |
+
n_samples = X.shape[0]
|
| 464 |
+
sample_idxs = np.arange(n_samples)
|
| 465 |
+
|
| 466 |
+
self.n_trials_ = 0
|
| 467 |
+
max_trials = self.max_trials
|
| 468 |
+
while self.n_trials_ < max_trials:
|
| 469 |
+
self.n_trials_ += 1
|
| 470 |
+
|
| 471 |
+
if (
|
| 472 |
+
self.n_skips_no_inliers_
|
| 473 |
+
+ self.n_skips_invalid_data_
|
| 474 |
+
+ self.n_skips_invalid_model_
|
| 475 |
+
) > self.max_skips:
|
| 476 |
+
break
|
| 477 |
+
|
| 478 |
+
# choose random sample set
|
| 479 |
+
subset_idxs = sample_without_replacement(
|
| 480 |
+
n_samples, min_samples, random_state=random_state
|
| 481 |
+
)
|
| 482 |
+
X_subset = X[subset_idxs]
|
| 483 |
+
y_subset = y[subset_idxs]
|
| 484 |
+
|
| 485 |
+
# check if random sample set is valid
|
| 486 |
+
if self.is_data_valid is not None and not self.is_data_valid(
|
| 487 |
+
X_subset, y_subset
|
| 488 |
+
):
|
| 489 |
+
self.n_skips_invalid_data_ += 1
|
| 490 |
+
continue
|
| 491 |
+
|
| 492 |
+
# cut `fit_params` down to `subset_idxs`
|
| 493 |
+
fit_params_subset = _check_method_params(
|
| 494 |
+
X, params=routed_params.estimator.fit, indices=subset_idxs
|
| 495 |
+
)
|
| 496 |
+
|
| 497 |
+
# fit model for current random sample set
|
| 498 |
+
estimator.fit(X_subset, y_subset, **fit_params_subset)
|
| 499 |
+
|
| 500 |
+
# check if estimated model is valid
|
| 501 |
+
if self.is_model_valid is not None and not self.is_model_valid(
|
| 502 |
+
estimator, X_subset, y_subset
|
| 503 |
+
):
|
| 504 |
+
self.n_skips_invalid_model_ += 1
|
| 505 |
+
continue
|
| 506 |
+
|
| 507 |
+
# residuals of all data for current random sample model
|
| 508 |
+
y_pred = estimator.predict(X)
|
| 509 |
+
residuals_subset = loss_function(y, y_pred)
|
| 510 |
+
|
| 511 |
+
# classify data into inliers and outliers
|
| 512 |
+
inlier_mask_subset = residuals_subset <= residual_threshold
|
| 513 |
+
n_inliers_subset = np.sum(inlier_mask_subset)
|
| 514 |
+
|
| 515 |
+
# less inliers -> skip current random sample
|
| 516 |
+
if n_inliers_subset < n_inliers_best:
|
| 517 |
+
self.n_skips_no_inliers_ += 1
|
| 518 |
+
continue
|
| 519 |
+
|
| 520 |
+
# extract inlier data set
|
| 521 |
+
inlier_idxs_subset = sample_idxs[inlier_mask_subset]
|
| 522 |
+
X_inlier_subset = X[inlier_idxs_subset]
|
| 523 |
+
y_inlier_subset = y[inlier_idxs_subset]
|
| 524 |
+
|
| 525 |
+
# cut `fit_params` down to `inlier_idxs_subset`
|
| 526 |
+
score_params_inlier_subset = _check_method_params(
|
| 527 |
+
X, params=routed_params.estimator.score, indices=inlier_idxs_subset
|
| 528 |
+
)
|
| 529 |
+
|
| 530 |
+
# score of inlier data set
|
| 531 |
+
score_subset = estimator.score(
|
| 532 |
+
X_inlier_subset,
|
| 533 |
+
y_inlier_subset,
|
| 534 |
+
**score_params_inlier_subset,
|
| 535 |
+
)
|
| 536 |
+
|
| 537 |
+
# same number of inliers but worse score -> skip current random
|
| 538 |
+
# sample
|
| 539 |
+
if n_inliers_subset == n_inliers_best and score_subset < score_best:
|
| 540 |
+
continue
|
| 541 |
+
|
| 542 |
+
# save current random sample as best sample
|
| 543 |
+
n_inliers_best = n_inliers_subset
|
| 544 |
+
score_best = score_subset
|
| 545 |
+
inlier_mask_best = inlier_mask_subset
|
| 546 |
+
X_inlier_best = X_inlier_subset
|
| 547 |
+
y_inlier_best = y_inlier_subset
|
| 548 |
+
inlier_best_idxs_subset = inlier_idxs_subset
|
| 549 |
+
|
| 550 |
+
max_trials = min(
|
| 551 |
+
max_trials,
|
| 552 |
+
_dynamic_max_trials(
|
| 553 |
+
n_inliers_best, n_samples, min_samples, self.stop_probability
|
| 554 |
+
),
|
| 555 |
+
)
|
| 556 |
+
|
| 557 |
+
# break if sufficient number of inliers or score is reached
|
| 558 |
+
if n_inliers_best >= self.stop_n_inliers or score_best >= self.stop_score:
|
| 559 |
+
break
|
| 560 |
+
|
| 561 |
+
# if none of the iterations met the required criteria
|
| 562 |
+
if inlier_mask_best is None:
|
| 563 |
+
if (
|
| 564 |
+
self.n_skips_no_inliers_
|
| 565 |
+
+ self.n_skips_invalid_data_
|
| 566 |
+
+ self.n_skips_invalid_model_
|
| 567 |
+
) > self.max_skips:
|
| 568 |
+
raise ValueError(
|
| 569 |
+
"RANSAC skipped more iterations than `max_skips` without"
|
| 570 |
+
" finding a valid consensus set. Iterations were skipped"
|
| 571 |
+
" because each randomly chosen sub-sample failed the"
|
| 572 |
+
" passing criteria. See estimator attributes for"
|
| 573 |
+
" diagnostics (n_skips*)."
|
| 574 |
+
)
|
| 575 |
+
else:
|
| 576 |
+
raise ValueError(
|
| 577 |
+
"RANSAC could not find a valid consensus set. All"
|
| 578 |
+
" `max_trials` iterations were skipped because each"
|
| 579 |
+
" randomly chosen sub-sample failed the passing criteria."
|
| 580 |
+
" See estimator attributes for diagnostics (n_skips*)."
|
| 581 |
+
)
|
| 582 |
+
else:
|
| 583 |
+
if (
|
| 584 |
+
self.n_skips_no_inliers_
|
| 585 |
+
+ self.n_skips_invalid_data_
|
| 586 |
+
+ self.n_skips_invalid_model_
|
| 587 |
+
) > self.max_skips:
|
| 588 |
+
warnings.warn(
|
| 589 |
+
(
|
| 590 |
+
"RANSAC found a valid consensus set but exited"
|
| 591 |
+
" early due to skipping more iterations than"
|
| 592 |
+
" `max_skips`. See estimator attributes for"
|
| 593 |
+
" diagnostics (n_skips*)."
|
| 594 |
+
),
|
| 595 |
+
ConvergenceWarning,
|
| 596 |
+
)
|
| 597 |
+
|
| 598 |
+
# estimate final model using all inliers
|
| 599 |
+
fit_params_best_idxs_subset = _check_method_params(
|
| 600 |
+
X, params=routed_params.estimator.fit, indices=inlier_best_idxs_subset
|
| 601 |
+
)
|
| 602 |
+
|
| 603 |
+
estimator.fit(X_inlier_best, y_inlier_best, **fit_params_best_idxs_subset)
|
| 604 |
+
|
| 605 |
+
self.estimator_ = estimator
|
| 606 |
+
self.inlier_mask_ = inlier_mask_best
|
| 607 |
+
return self
|
| 608 |
+
|
| 609 |
+
def predict(self, X, **params):
|
| 610 |
+
"""Predict using the estimated model.
|
| 611 |
+
|
| 612 |
+
This is a wrapper for `estimator_.predict(X)`.
|
| 613 |
+
|
| 614 |
+
Parameters
|
| 615 |
+
----------
|
| 616 |
+
X : {array-like or sparse matrix} of shape (n_samples, n_features)
|
| 617 |
+
Input data.
|
| 618 |
+
|
| 619 |
+
**params : dict
|
| 620 |
+
Parameters routed to the `predict` method of the sub-estimator via
|
| 621 |
+
the metadata routing API.
|
| 622 |
+
|
| 623 |
+
.. versionadded:: 1.5
|
| 624 |
+
|
| 625 |
+
Only available if
|
| 626 |
+
`sklearn.set_config(enable_metadata_routing=True)` is set. See
|
| 627 |
+
:ref:`Metadata Routing User Guide <metadata_routing>` for more
|
| 628 |
+
details.
|
| 629 |
+
|
| 630 |
+
Returns
|
| 631 |
+
-------
|
| 632 |
+
y : array, shape = [n_samples] or [n_samples, n_targets]
|
| 633 |
+
Returns predicted values.
|
| 634 |
+
"""
|
| 635 |
+
check_is_fitted(self)
|
| 636 |
+
X = validate_data(
|
| 637 |
+
self,
|
| 638 |
+
X,
|
| 639 |
+
ensure_all_finite=False,
|
| 640 |
+
accept_sparse=True,
|
| 641 |
+
reset=False,
|
| 642 |
+
)
|
| 643 |
+
|
| 644 |
+
_raise_for_params(params, self, "predict")
|
| 645 |
+
|
| 646 |
+
if _routing_enabled():
|
| 647 |
+
predict_params = process_routing(self, "predict", **params).estimator[
|
| 648 |
+
"predict"
|
| 649 |
+
]
|
| 650 |
+
else:
|
| 651 |
+
predict_params = {}
|
| 652 |
+
|
| 653 |
+
return self.estimator_.predict(X, **predict_params)
|
| 654 |
+
|
| 655 |
+
def score(self, X, y, **params):
|
| 656 |
+
"""Return the score of the prediction.
|
| 657 |
+
|
| 658 |
+
This is a wrapper for `estimator_.score(X, y)`.
|
| 659 |
+
|
| 660 |
+
Parameters
|
| 661 |
+
----------
|
| 662 |
+
X : (array-like or sparse matrix} of shape (n_samples, n_features)
|
| 663 |
+
Training data.
|
| 664 |
+
|
| 665 |
+
y : array-like of shape (n_samples,) or (n_samples, n_targets)
|
| 666 |
+
Target values.
|
| 667 |
+
|
| 668 |
+
**params : dict
|
| 669 |
+
Parameters routed to the `score` method of the sub-estimator via
|
| 670 |
+
the metadata routing API.
|
| 671 |
+
|
| 672 |
+
.. versionadded:: 1.5
|
| 673 |
+
|
| 674 |
+
Only available if
|
| 675 |
+
`sklearn.set_config(enable_metadata_routing=True)` is set. See
|
| 676 |
+
:ref:`Metadata Routing User Guide <metadata_routing>` for more
|
| 677 |
+
details.
|
| 678 |
+
|
| 679 |
+
Returns
|
| 680 |
+
-------
|
| 681 |
+
z : float
|
| 682 |
+
Score of the prediction.
|
| 683 |
+
"""
|
| 684 |
+
check_is_fitted(self)
|
| 685 |
+
X = validate_data(
|
| 686 |
+
self,
|
| 687 |
+
X,
|
| 688 |
+
ensure_all_finite=False,
|
| 689 |
+
accept_sparse=True,
|
| 690 |
+
reset=False,
|
| 691 |
+
)
|
| 692 |
+
|
| 693 |
+
_raise_for_params(params, self, "score")
|
| 694 |
+
if _routing_enabled():
|
| 695 |
+
score_params = process_routing(self, "score", **params).estimator["score"]
|
| 696 |
+
else:
|
| 697 |
+
score_params = {}
|
| 698 |
+
|
| 699 |
+
return self.estimator_.score(X, y, **score_params)
|
| 700 |
+
|
| 701 |
+
def get_metadata_routing(self):
|
| 702 |
+
"""Get metadata routing of this object.
|
| 703 |
+
|
| 704 |
+
Please check :ref:`User Guide <metadata_routing>` on how the routing
|
| 705 |
+
mechanism works.
|
| 706 |
+
|
| 707 |
+
.. versionadded:: 1.5
|
| 708 |
+
|
| 709 |
+
Returns
|
| 710 |
+
-------
|
| 711 |
+
routing : MetadataRouter
|
| 712 |
+
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
|
| 713 |
+
routing information.
|
| 714 |
+
"""
|
| 715 |
+
router = MetadataRouter(owner=self.__class__.__name__).add(
|
| 716 |
+
estimator=self.estimator,
|
| 717 |
+
method_mapping=MethodMapping()
|
| 718 |
+
.add(caller="fit", callee="fit")
|
| 719 |
+
.add(caller="fit", callee="score")
|
| 720 |
+
.add(caller="score", callee="score")
|
| 721 |
+
.add(caller="predict", callee="predict"),
|
| 722 |
+
)
|
| 723 |
+
return router
|
| 724 |
+
|
| 725 |
+
def __sklearn_tags__(self):
|
| 726 |
+
tags = super().__sklearn_tags__()
|
| 727 |
+
if self.estimator is None:
|
| 728 |
+
tags.input_tags.sparse = True # default estimator is LinearRegression
|
| 729 |
+
else:
|
| 730 |
+
tags.input_tags.sparse = get_tags(self.estimator).input_tags.sparse
|
| 731 |
+
return tags
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_ridge.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_sag.py
ADDED
|
@@ -0,0 +1,370 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Solvers for Ridge and LogisticRegression using SAG algorithm"""
|
| 2 |
+
|
| 3 |
+
# Authors: The scikit-learn developers
|
| 4 |
+
# SPDX-License-Identifier: BSD-3-Clause
|
| 5 |
+
|
| 6 |
+
import warnings
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
|
| 10 |
+
from ..exceptions import ConvergenceWarning
|
| 11 |
+
from ..utils import check_array
|
| 12 |
+
from ..utils.extmath import row_norms
|
| 13 |
+
from ..utils.validation import _check_sample_weight
|
| 14 |
+
from ._base import make_dataset
|
| 15 |
+
from ._sag_fast import sag32, sag64
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def get_auto_step_size(
|
| 19 |
+
max_squared_sum, alpha_scaled, loss, fit_intercept, n_samples=None, is_saga=False
|
| 20 |
+
):
|
| 21 |
+
"""Compute automatic step size for SAG solver.
|
| 22 |
+
|
| 23 |
+
The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is
|
| 24 |
+
the max sum of squares for over all samples.
|
| 25 |
+
|
| 26 |
+
Parameters
|
| 27 |
+
----------
|
| 28 |
+
max_squared_sum : float
|
| 29 |
+
Maximum squared sum of X over samples.
|
| 30 |
+
|
| 31 |
+
alpha_scaled : float
|
| 32 |
+
Constant that multiplies the regularization term, scaled by
|
| 33 |
+
1. / n_samples, the number of samples.
|
| 34 |
+
|
| 35 |
+
loss : {'log', 'squared', 'multinomial'}
|
| 36 |
+
The loss function used in SAG solver.
|
| 37 |
+
|
| 38 |
+
fit_intercept : bool
|
| 39 |
+
Specifies if a constant (a.k.a. bias or intercept) will be
|
| 40 |
+
added to the decision function.
|
| 41 |
+
|
| 42 |
+
n_samples : int, default=None
|
| 43 |
+
Number of rows in X. Useful if is_saga=True.
|
| 44 |
+
|
| 45 |
+
is_saga : bool, default=False
|
| 46 |
+
Whether to return step size for the SAGA algorithm or the SAG
|
| 47 |
+
algorithm.
|
| 48 |
+
|
| 49 |
+
Returns
|
| 50 |
+
-------
|
| 51 |
+
step_size : float
|
| 52 |
+
Step size used in SAG solver.
|
| 53 |
+
|
| 54 |
+
References
|
| 55 |
+
----------
|
| 56 |
+
Schmidt, M., Roux, N. L., & Bach, F. (2013).
|
| 57 |
+
Minimizing finite sums with the stochastic average gradient
|
| 58 |
+
https://hal.inria.fr/hal-00860051/document
|
| 59 |
+
|
| 60 |
+
:arxiv:`Defazio, A., Bach F. & Lacoste-Julien S. (2014).
|
| 61 |
+
"SAGA: A Fast Incremental Gradient Method With Support
|
| 62 |
+
for Non-Strongly Convex Composite Objectives" <1407.0202>`
|
| 63 |
+
"""
|
| 64 |
+
if loss in ("log", "multinomial"):
|
| 65 |
+
L = 0.25 * (max_squared_sum + int(fit_intercept)) + alpha_scaled
|
| 66 |
+
elif loss == "squared":
|
| 67 |
+
# inverse Lipschitz constant for squared loss
|
| 68 |
+
L = max_squared_sum + int(fit_intercept) + alpha_scaled
|
| 69 |
+
else:
|
| 70 |
+
raise ValueError(
|
| 71 |
+
"Unknown loss function for SAG solver, got %s instead of 'log' or 'squared'"
|
| 72 |
+
% loss
|
| 73 |
+
)
|
| 74 |
+
if is_saga:
|
| 75 |
+
# SAGA theoretical step size is 1/3L or 1 / (2 * (L + mu n))
|
| 76 |
+
# See Defazio et al. 2014
|
| 77 |
+
mun = min(2 * n_samples * alpha_scaled, L)
|
| 78 |
+
step = 1.0 / (2 * L + mun)
|
| 79 |
+
else:
|
| 80 |
+
# SAG theoretical step size is 1/16L but it is recommended to use 1 / L
|
| 81 |
+
# see http://www.birs.ca//workshops//2014/14w5003/files/schmidt.pdf,
|
| 82 |
+
# slide 65
|
| 83 |
+
step = 1.0 / L
|
| 84 |
+
return step
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def sag_solver(
|
| 88 |
+
X,
|
| 89 |
+
y,
|
| 90 |
+
sample_weight=None,
|
| 91 |
+
loss="log",
|
| 92 |
+
alpha=1.0,
|
| 93 |
+
beta=0.0,
|
| 94 |
+
max_iter=1000,
|
| 95 |
+
tol=0.001,
|
| 96 |
+
verbose=0,
|
| 97 |
+
random_state=None,
|
| 98 |
+
check_input=True,
|
| 99 |
+
max_squared_sum=None,
|
| 100 |
+
warm_start_mem=None,
|
| 101 |
+
is_saga=False,
|
| 102 |
+
):
|
| 103 |
+
"""SAG solver for Ridge and LogisticRegression.
|
| 104 |
+
|
| 105 |
+
SAG stands for Stochastic Average Gradient: the gradient of the loss is
|
| 106 |
+
estimated each sample at a time and the model is updated along the way with
|
| 107 |
+
a constant learning rate.
|
| 108 |
+
|
| 109 |
+
IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the
|
| 110 |
+
same scale. You can normalize the data by using
|
| 111 |
+
sklearn.preprocessing.StandardScaler on your data before passing it to the
|
| 112 |
+
fit method.
|
| 113 |
+
|
| 114 |
+
This implementation works with data represented as dense numpy arrays or
|
| 115 |
+
sparse scipy arrays of floating point values for the features. It will
|
| 116 |
+
fit the data according to squared loss or log loss.
|
| 117 |
+
|
| 118 |
+
The regularizer is a penalty added to the loss function that shrinks model
|
| 119 |
+
parameters towards the zero vector using the squared euclidean norm L2.
|
| 120 |
+
|
| 121 |
+
.. versionadded:: 0.17
|
| 122 |
+
|
| 123 |
+
Parameters
|
| 124 |
+
----------
|
| 125 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 126 |
+
Training data.
|
| 127 |
+
|
| 128 |
+
y : ndarray of shape (n_samples,)
|
| 129 |
+
Target values. With loss='multinomial', y must be label encoded
|
| 130 |
+
(see preprocessing.LabelEncoder). For loss='log' it must be in [0, 1].
|
| 131 |
+
|
| 132 |
+
sample_weight : array-like of shape (n_samples,), default=None
|
| 133 |
+
Weights applied to individual samples (1. for unweighted).
|
| 134 |
+
|
| 135 |
+
loss : {'log', 'squared', 'multinomial'}, default='log'
|
| 136 |
+
Loss function that will be optimized:
|
| 137 |
+
-'log' is the binary logistic loss, as used in LogisticRegression.
|
| 138 |
+
-'squared' is the squared loss, as used in Ridge.
|
| 139 |
+
-'multinomial' is the multinomial logistic loss, as used in
|
| 140 |
+
LogisticRegression.
|
| 141 |
+
|
| 142 |
+
.. versionadded:: 0.18
|
| 143 |
+
*loss='multinomial'*
|
| 144 |
+
|
| 145 |
+
alpha : float, default=1.
|
| 146 |
+
L2 regularization term in the objective function
|
| 147 |
+
``(0.5 * alpha * || W ||_F^2)``.
|
| 148 |
+
|
| 149 |
+
beta : float, default=0.
|
| 150 |
+
L1 regularization term in the objective function
|
| 151 |
+
``(beta * || W ||_1)``. Only applied if ``is_saga`` is set to True.
|
| 152 |
+
|
| 153 |
+
max_iter : int, default=1000
|
| 154 |
+
The max number of passes over the training data if the stopping
|
| 155 |
+
criteria is not reached.
|
| 156 |
+
|
| 157 |
+
tol : float, default=0.001
|
| 158 |
+
The stopping criteria for the weights. The iterations will stop when
|
| 159 |
+
max(change in weights) / max(weights) < tol.
|
| 160 |
+
|
| 161 |
+
verbose : int, default=0
|
| 162 |
+
The verbosity level.
|
| 163 |
+
|
| 164 |
+
random_state : int, RandomState instance or None, default=None
|
| 165 |
+
Used when shuffling the data. Pass an int for reproducible output
|
| 166 |
+
across multiple function calls.
|
| 167 |
+
See :term:`Glossary <random_state>`.
|
| 168 |
+
|
| 169 |
+
check_input : bool, default=True
|
| 170 |
+
If False, the input arrays X and y will not be checked.
|
| 171 |
+
|
| 172 |
+
max_squared_sum : float, default=None
|
| 173 |
+
Maximum squared sum of X over samples. If None, it will be computed,
|
| 174 |
+
going through all the samples. The value should be precomputed
|
| 175 |
+
to speed up cross validation.
|
| 176 |
+
|
| 177 |
+
warm_start_mem : dict, default=None
|
| 178 |
+
The initialization parameters used for warm starting. Warm starting is
|
| 179 |
+
currently used in LogisticRegression but not in Ridge.
|
| 180 |
+
It contains:
|
| 181 |
+
- 'coef': the weight vector, with the intercept in last line
|
| 182 |
+
if the intercept is fitted.
|
| 183 |
+
- 'gradient_memory': the scalar gradient for all seen samples.
|
| 184 |
+
- 'sum_gradient': the sum of gradient over all seen samples,
|
| 185 |
+
for each feature.
|
| 186 |
+
- 'intercept_sum_gradient': the sum of gradient over all seen
|
| 187 |
+
samples, for the intercept.
|
| 188 |
+
- 'seen': array of boolean describing the seen samples.
|
| 189 |
+
- 'num_seen': the number of seen samples.
|
| 190 |
+
|
| 191 |
+
is_saga : bool, default=False
|
| 192 |
+
Whether to use the SAGA algorithm or the SAG algorithm. SAGA behaves
|
| 193 |
+
better in the first epochs, and allow for l1 regularisation.
|
| 194 |
+
|
| 195 |
+
Returns
|
| 196 |
+
-------
|
| 197 |
+
coef_ : ndarray of shape (n_features,)
|
| 198 |
+
Weight vector.
|
| 199 |
+
|
| 200 |
+
n_iter_ : int
|
| 201 |
+
The number of full pass on all samples.
|
| 202 |
+
|
| 203 |
+
warm_start_mem : dict
|
| 204 |
+
Contains a 'coef' key with the fitted result, and possibly the
|
| 205 |
+
fitted intercept at the end of the array. Contains also other keys
|
| 206 |
+
used for warm starting.
|
| 207 |
+
|
| 208 |
+
Examples
|
| 209 |
+
--------
|
| 210 |
+
>>> import numpy as np
|
| 211 |
+
>>> from sklearn import linear_model
|
| 212 |
+
>>> n_samples, n_features = 10, 5
|
| 213 |
+
>>> rng = np.random.RandomState(0)
|
| 214 |
+
>>> X = rng.randn(n_samples, n_features)
|
| 215 |
+
>>> y = rng.randn(n_samples)
|
| 216 |
+
>>> clf = linear_model.Ridge(solver='sag')
|
| 217 |
+
>>> clf.fit(X, y)
|
| 218 |
+
Ridge(solver='sag')
|
| 219 |
+
|
| 220 |
+
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
|
| 221 |
+
>>> y = np.array([1, 1, 2, 2])
|
| 222 |
+
>>> clf = linear_model.LogisticRegression(solver='sag')
|
| 223 |
+
>>> clf.fit(X, y)
|
| 224 |
+
LogisticRegression(solver='sag')
|
| 225 |
+
|
| 226 |
+
References
|
| 227 |
+
----------
|
| 228 |
+
Schmidt, M., Roux, N. L., & Bach, F. (2013).
|
| 229 |
+
Minimizing finite sums with the stochastic average gradient
|
| 230 |
+
https://hal.inria.fr/hal-00860051/document
|
| 231 |
+
|
| 232 |
+
:arxiv:`Defazio, A., Bach F. & Lacoste-Julien S. (2014).
|
| 233 |
+
"SAGA: A Fast Incremental Gradient Method With Support
|
| 234 |
+
for Non-Strongly Convex Composite Objectives" <1407.0202>`
|
| 235 |
+
|
| 236 |
+
See Also
|
| 237 |
+
--------
|
| 238 |
+
Ridge, SGDRegressor, ElasticNet, Lasso, SVR,
|
| 239 |
+
LogisticRegression, SGDClassifier, LinearSVC, Perceptron
|
| 240 |
+
"""
|
| 241 |
+
if warm_start_mem is None:
|
| 242 |
+
warm_start_mem = {}
|
| 243 |
+
# Ridge default max_iter is None
|
| 244 |
+
if max_iter is None:
|
| 245 |
+
max_iter = 1000
|
| 246 |
+
|
| 247 |
+
if check_input:
|
| 248 |
+
_dtype = [np.float64, np.float32]
|
| 249 |
+
X = check_array(X, dtype=_dtype, accept_sparse="csr", order="C")
|
| 250 |
+
y = check_array(y, dtype=_dtype, ensure_2d=False, order="C")
|
| 251 |
+
|
| 252 |
+
n_samples, n_features = X.shape[0], X.shape[1]
|
| 253 |
+
# As in SGD, the alpha is scaled by n_samples.
|
| 254 |
+
alpha_scaled = float(alpha) / n_samples
|
| 255 |
+
beta_scaled = float(beta) / n_samples
|
| 256 |
+
|
| 257 |
+
# if loss == 'multinomial', y should be label encoded.
|
| 258 |
+
n_classes = int(y.max()) + 1 if loss == "multinomial" else 1
|
| 259 |
+
|
| 260 |
+
# initialization
|
| 261 |
+
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
|
| 262 |
+
|
| 263 |
+
if "coef" in warm_start_mem.keys():
|
| 264 |
+
coef_init = warm_start_mem["coef"]
|
| 265 |
+
else:
|
| 266 |
+
# assume fit_intercept is False
|
| 267 |
+
coef_init = np.zeros((n_features, n_classes), dtype=X.dtype, order="C")
|
| 268 |
+
|
| 269 |
+
# coef_init contains possibly the intercept_init at the end.
|
| 270 |
+
# Note that Ridge centers the data before fitting, so fit_intercept=False.
|
| 271 |
+
fit_intercept = coef_init.shape[0] == (n_features + 1)
|
| 272 |
+
if fit_intercept:
|
| 273 |
+
intercept_init = coef_init[-1, :]
|
| 274 |
+
coef_init = coef_init[:-1, :]
|
| 275 |
+
else:
|
| 276 |
+
intercept_init = np.zeros(n_classes, dtype=X.dtype)
|
| 277 |
+
|
| 278 |
+
if "intercept_sum_gradient" in warm_start_mem.keys():
|
| 279 |
+
intercept_sum_gradient = warm_start_mem["intercept_sum_gradient"]
|
| 280 |
+
else:
|
| 281 |
+
intercept_sum_gradient = np.zeros(n_classes, dtype=X.dtype)
|
| 282 |
+
|
| 283 |
+
if "gradient_memory" in warm_start_mem.keys():
|
| 284 |
+
gradient_memory_init = warm_start_mem["gradient_memory"]
|
| 285 |
+
else:
|
| 286 |
+
gradient_memory_init = np.zeros(
|
| 287 |
+
(n_samples, n_classes), dtype=X.dtype, order="C"
|
| 288 |
+
)
|
| 289 |
+
if "sum_gradient" in warm_start_mem.keys():
|
| 290 |
+
sum_gradient_init = warm_start_mem["sum_gradient"]
|
| 291 |
+
else:
|
| 292 |
+
sum_gradient_init = np.zeros((n_features, n_classes), dtype=X.dtype, order="C")
|
| 293 |
+
|
| 294 |
+
if "seen" in warm_start_mem.keys():
|
| 295 |
+
seen_init = warm_start_mem["seen"]
|
| 296 |
+
else:
|
| 297 |
+
seen_init = np.zeros(n_samples, dtype=np.int32, order="C")
|
| 298 |
+
|
| 299 |
+
if "num_seen" in warm_start_mem.keys():
|
| 300 |
+
num_seen_init = warm_start_mem["num_seen"]
|
| 301 |
+
else:
|
| 302 |
+
num_seen_init = 0
|
| 303 |
+
|
| 304 |
+
dataset, intercept_decay = make_dataset(X, y, sample_weight, random_state)
|
| 305 |
+
|
| 306 |
+
if max_squared_sum is None:
|
| 307 |
+
max_squared_sum = row_norms(X, squared=True).max()
|
| 308 |
+
step_size = get_auto_step_size(
|
| 309 |
+
max_squared_sum,
|
| 310 |
+
alpha_scaled,
|
| 311 |
+
loss,
|
| 312 |
+
fit_intercept,
|
| 313 |
+
n_samples=n_samples,
|
| 314 |
+
is_saga=is_saga,
|
| 315 |
+
)
|
| 316 |
+
if step_size * alpha_scaled == 1:
|
| 317 |
+
raise ZeroDivisionError(
|
| 318 |
+
"Current sag implementation does not handle "
|
| 319 |
+
"the case step_size * alpha_scaled == 1"
|
| 320 |
+
)
|
| 321 |
+
|
| 322 |
+
sag = sag64 if X.dtype == np.float64 else sag32
|
| 323 |
+
num_seen, n_iter_ = sag(
|
| 324 |
+
dataset,
|
| 325 |
+
coef_init,
|
| 326 |
+
intercept_init,
|
| 327 |
+
n_samples,
|
| 328 |
+
n_features,
|
| 329 |
+
n_classes,
|
| 330 |
+
tol,
|
| 331 |
+
max_iter,
|
| 332 |
+
loss,
|
| 333 |
+
step_size,
|
| 334 |
+
alpha_scaled,
|
| 335 |
+
beta_scaled,
|
| 336 |
+
sum_gradient_init,
|
| 337 |
+
gradient_memory_init,
|
| 338 |
+
seen_init,
|
| 339 |
+
num_seen_init,
|
| 340 |
+
fit_intercept,
|
| 341 |
+
intercept_sum_gradient,
|
| 342 |
+
intercept_decay,
|
| 343 |
+
is_saga,
|
| 344 |
+
verbose,
|
| 345 |
+
)
|
| 346 |
+
|
| 347 |
+
if n_iter_ == max_iter:
|
| 348 |
+
warnings.warn(
|
| 349 |
+
"The max_iter was reached which means the coef_ did not converge",
|
| 350 |
+
ConvergenceWarning,
|
| 351 |
+
)
|
| 352 |
+
|
| 353 |
+
if fit_intercept:
|
| 354 |
+
coef_init = np.vstack((coef_init, intercept_init))
|
| 355 |
+
|
| 356 |
+
warm_start_mem = {
|
| 357 |
+
"coef": coef_init,
|
| 358 |
+
"sum_gradient": sum_gradient_init,
|
| 359 |
+
"intercept_sum_gradient": intercept_sum_gradient,
|
| 360 |
+
"gradient_memory": gradient_memory_init,
|
| 361 |
+
"seen": seen_init,
|
| 362 |
+
"num_seen": num_seen,
|
| 363 |
+
}
|
| 364 |
+
|
| 365 |
+
if loss == "multinomial":
|
| 366 |
+
coef_ = coef_init.T
|
| 367 |
+
else:
|
| 368 |
+
coef_ = coef_init[:, 0]
|
| 369 |
+
|
| 370 |
+
return coef_, n_iter_, warm_start_mem
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_sag_fast.pyx.tp
ADDED
|
@@ -0,0 +1,647 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{{py:
|
| 2 |
+
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
Template file for easily generate fused types consistent code using Tempita
|
| 6 |
+
(https://github.com/cython/cython/blob/master/Cython/Tempita/_tempita.py).
|
| 7 |
+
|
| 8 |
+
Generated file: sag_fast.pyx
|
| 9 |
+
|
| 10 |
+
Each class is duplicated for all dtypes (float and double). The keywords
|
| 11 |
+
between double braces are substituted during the build.
|
| 12 |
+
|
| 13 |
+
Authors: Danny Sullivan <dbsullivan23@gmail.com>
|
| 14 |
+
Tom Dupre la Tour <tom.dupre-la-tour@m4x.org>
|
| 15 |
+
Arthur Mensch <arthur.mensch@m4x.org
|
| 16 |
+
Arthur Imbert <arthurimbert05@gmail.com>
|
| 17 |
+
Joan Massich <mailsik@gmail.com>
|
| 18 |
+
|
| 19 |
+
License: BSD 3 clause
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
# name_suffix, c_type, np_type
|
| 23 |
+
dtypes = [('64', 'double', 'np.float64'),
|
| 24 |
+
('32', 'float', 'np.float32')]
|
| 25 |
+
|
| 26 |
+
}}
|
| 27 |
+
"""SAG and SAGA implementation"""
|
| 28 |
+
|
| 29 |
+
import numpy as np
|
| 30 |
+
from libc.math cimport exp, fabs, isfinite, log
|
| 31 |
+
from libc.time cimport time, time_t
|
| 32 |
+
from libc.stdio cimport printf
|
| 33 |
+
|
| 34 |
+
from .._loss._loss cimport (
|
| 35 |
+
CyLossFunction,
|
| 36 |
+
CyHalfBinomialLoss,
|
| 37 |
+
CyHalfMultinomialLoss,
|
| 38 |
+
CyHalfSquaredError,
|
| 39 |
+
)
|
| 40 |
+
from ..utils._seq_dataset cimport SequentialDataset32, SequentialDataset64
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
{{for name_suffix, c_type, np_type in dtypes}}
|
| 44 |
+
|
| 45 |
+
cdef inline {{c_type}} fmax{{name_suffix}}({{c_type}} x, {{c_type}} y) noexcept nogil:
|
| 46 |
+
if x > y:
|
| 47 |
+
return x
|
| 48 |
+
return y
|
| 49 |
+
|
| 50 |
+
{{endfor}}
|
| 51 |
+
|
| 52 |
+
{{for name_suffix, c_type, np_type in dtypes}}
|
| 53 |
+
|
| 54 |
+
cdef inline {{c_type}} _soft_thresholding{{name_suffix}}({{c_type}} x, {{c_type}} shrinkage) noexcept nogil:
|
| 55 |
+
return fmax{{name_suffix}}(x - shrinkage, 0) - fmax{{name_suffix}}(- x - shrinkage, 0)
|
| 56 |
+
|
| 57 |
+
{{endfor}}
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
{{for name_suffix, c_type, np_type in dtypes}}
|
| 61 |
+
|
| 62 |
+
def sag{{name_suffix}}(
|
| 63 |
+
SequentialDataset{{name_suffix}} dataset,
|
| 64 |
+
{{c_type}}[:, ::1] weights_array,
|
| 65 |
+
{{c_type}}[::1] intercept_array,
|
| 66 |
+
int n_samples,
|
| 67 |
+
int n_features,
|
| 68 |
+
int n_classes,
|
| 69 |
+
double tol,
|
| 70 |
+
int max_iter,
|
| 71 |
+
str loss_function,
|
| 72 |
+
double step_size,
|
| 73 |
+
double alpha,
|
| 74 |
+
double beta,
|
| 75 |
+
{{c_type}}[:, ::1] sum_gradient_init,
|
| 76 |
+
{{c_type}}[:, ::1] gradient_memory_init,
|
| 77 |
+
bint[::1] seen_init,
|
| 78 |
+
int num_seen,
|
| 79 |
+
bint fit_intercept,
|
| 80 |
+
{{c_type}}[::1] intercept_sum_gradient_init,
|
| 81 |
+
double intercept_decay,
|
| 82 |
+
bint saga,
|
| 83 |
+
bint verbose
|
| 84 |
+
):
|
| 85 |
+
"""Stochastic Average Gradient (SAG) and SAGA solvers.
|
| 86 |
+
|
| 87 |
+
Used in Ridge and LogisticRegression.
|
| 88 |
+
|
| 89 |
+
Some implementation details:
|
| 90 |
+
|
| 91 |
+
- Just-in-time (JIT) update: In SAG(A), the average-gradient update is
|
| 92 |
+
collinear with the drawn sample X_i. Therefore, if the data is sparse, the
|
| 93 |
+
random sample X_i will change the average gradient only on features j where
|
| 94 |
+
X_ij != 0. In some cases, the average gradient on feature j might change
|
| 95 |
+
only after k random samples with no change. In these cases, instead of
|
| 96 |
+
applying k times the same gradient step on feature j, we apply the gradient
|
| 97 |
+
step only once, scaled by k. This is called the "just-in-time update", and
|
| 98 |
+
it is performed in `lagged_update{{name_suffix}}`. This function also
|
| 99 |
+
applies the proximal operator after the gradient step (if L1 regularization
|
| 100 |
+
is used in SAGA).
|
| 101 |
+
|
| 102 |
+
- Weight scale: In SAG(A), the weights are scaled down at each iteration
|
| 103 |
+
due to the L2 regularization. To avoid updating all the weights at each
|
| 104 |
+
iteration, the weight scale is factored out in a separate variable `wscale`
|
| 105 |
+
which is only used in the JIT update. When this variable is too small, it
|
| 106 |
+
is reset for numerical stability using the function
|
| 107 |
+
`scale_weights{{name_suffix}}`. This reset requires applying all remaining
|
| 108 |
+
JIT updates. This reset is also performed every `n_samples` iterations
|
| 109 |
+
before each convergence check, so when the algorithm stops, we are sure
|
| 110 |
+
that there is no remaining JIT updates.
|
| 111 |
+
|
| 112 |
+
Reference
|
| 113 |
+
---------
|
| 114 |
+
Schmidt, M., Roux, N. L., & Bach, F. (2013).
|
| 115 |
+
Minimizing finite sums with the stochastic average gradient
|
| 116 |
+
https://hal.inria.fr/hal-00860051/document
|
| 117 |
+
(section 4.3)
|
| 118 |
+
|
| 119 |
+
:arxiv:`Defazio, A., Bach F. & Lacoste-Julien S. (2014).
|
| 120 |
+
"SAGA: A Fast Incremental Gradient Method With Support
|
| 121 |
+
for Non-Strongly Convex Composite Objectives" <1407.0202>`
|
| 122 |
+
"""
|
| 123 |
+
# the data pointer for x, the current sample
|
| 124 |
+
cdef {{c_type}} *x_data_ptr = NULL
|
| 125 |
+
# the index pointer for the column of the data
|
| 126 |
+
cdef int *x_ind_ptr = NULL
|
| 127 |
+
# the number of non-zero features for current sample
|
| 128 |
+
cdef int xnnz = -1
|
| 129 |
+
# the label value for current sample
|
| 130 |
+
# the label value for current sample
|
| 131 |
+
cdef {{c_type}} y
|
| 132 |
+
# the sample weight
|
| 133 |
+
cdef {{c_type}} sample_weight
|
| 134 |
+
|
| 135 |
+
# helper variable for indexes
|
| 136 |
+
cdef int f_idx, s_idx, feature_ind, class_ind, j
|
| 137 |
+
# the number of pass through all samples
|
| 138 |
+
cdef int n_iter = 0
|
| 139 |
+
# helper to track iterations through samples
|
| 140 |
+
cdef int sample_itr
|
| 141 |
+
# the index (row number) of the current sample
|
| 142 |
+
cdef int sample_ind
|
| 143 |
+
|
| 144 |
+
# the maximum change in weights, used to compute stopping criteria
|
| 145 |
+
cdef {{c_type}} max_change
|
| 146 |
+
# a holder variable for the max weight, used to compute stopping criteria
|
| 147 |
+
cdef {{c_type}} max_weight
|
| 148 |
+
|
| 149 |
+
# the start time of the fit
|
| 150 |
+
cdef time_t start_time
|
| 151 |
+
# the end time of the fit
|
| 152 |
+
cdef time_t end_time
|
| 153 |
+
|
| 154 |
+
# precomputation since the step size does not change in this implementation
|
| 155 |
+
cdef {{c_type}} wscale_update = 1.0 - step_size * alpha
|
| 156 |
+
|
| 157 |
+
# helper for cumulative sum
|
| 158 |
+
cdef {{c_type}} cum_sum
|
| 159 |
+
|
| 160 |
+
# the pointer to the coef_ or weights
|
| 161 |
+
cdef {{c_type}}* weights = &weights_array[0, 0]
|
| 162 |
+
|
| 163 |
+
# the sum of gradients for each feature
|
| 164 |
+
cdef {{c_type}}* sum_gradient = &sum_gradient_init[0, 0]
|
| 165 |
+
|
| 166 |
+
# the previously seen gradient for each sample
|
| 167 |
+
cdef {{c_type}}* gradient_memory = &gradient_memory_init[0, 0]
|
| 168 |
+
|
| 169 |
+
# the cumulative sums needed for JIT params
|
| 170 |
+
cdef {{c_type}}[::1] cumulative_sums = np.empty(n_samples, dtype={{np_type}}, order="c")
|
| 171 |
+
|
| 172 |
+
# the index for the last time this feature was updated
|
| 173 |
+
cdef int[::1] feature_hist = np.zeros(n_features, dtype=np.int32, order="c")
|
| 174 |
+
|
| 175 |
+
# the previous weights to use to compute stopping criteria
|
| 176 |
+
cdef {{c_type}}[:, ::1] previous_weights_array = np.zeros((n_features, n_classes), dtype={{np_type}}, order="c")
|
| 177 |
+
cdef {{c_type}}* previous_weights = &previous_weights_array[0, 0]
|
| 178 |
+
|
| 179 |
+
cdef {{c_type}}[::1] prediction = np.zeros(n_classes, dtype={{np_type}}, order="c")
|
| 180 |
+
|
| 181 |
+
cdef {{c_type}}[::1] gradient = np.zeros(n_classes, dtype={{np_type}}, order="c")
|
| 182 |
+
|
| 183 |
+
# Intermediate variable that need declaration since cython cannot infer when templating
|
| 184 |
+
cdef {{c_type}} val
|
| 185 |
+
|
| 186 |
+
# Bias correction term in saga
|
| 187 |
+
cdef {{c_type}} gradient_correction
|
| 188 |
+
|
| 189 |
+
# the scalar used for multiplying z
|
| 190 |
+
cdef {{c_type}} wscale = 1.0
|
| 191 |
+
|
| 192 |
+
# return value (-1 if an error occurred, 0 otherwise)
|
| 193 |
+
cdef int status = 0
|
| 194 |
+
|
| 195 |
+
# the cumulative sums for each iteration for the sparse implementation
|
| 196 |
+
cumulative_sums[0] = 0.0
|
| 197 |
+
|
| 198 |
+
# the multipliative scale needed for JIT params
|
| 199 |
+
cdef {{c_type}}[::1] cumulative_sums_prox
|
| 200 |
+
cdef {{c_type}}* cumulative_sums_prox_ptr
|
| 201 |
+
|
| 202 |
+
cdef bint prox = beta > 0 and saga
|
| 203 |
+
|
| 204 |
+
# Loss function to optimize
|
| 205 |
+
cdef CyLossFunction loss
|
| 206 |
+
# Whether the loss function is multinomial
|
| 207 |
+
cdef bint multinomial = False
|
| 208 |
+
# Multinomial loss function
|
| 209 |
+
cdef CyHalfMultinomialLoss multiloss
|
| 210 |
+
|
| 211 |
+
if loss_function == "multinomial":
|
| 212 |
+
multinomial = True
|
| 213 |
+
multiloss = CyHalfMultinomialLoss()
|
| 214 |
+
elif loss_function == "log":
|
| 215 |
+
loss = CyHalfBinomialLoss()
|
| 216 |
+
elif loss_function == "squared":
|
| 217 |
+
loss = CyHalfSquaredError()
|
| 218 |
+
else:
|
| 219 |
+
raise ValueError("Invalid loss parameter: got %s instead of "
|
| 220 |
+
"one of ('log', 'squared', 'multinomial')"
|
| 221 |
+
% loss_function)
|
| 222 |
+
|
| 223 |
+
if prox:
|
| 224 |
+
cumulative_sums_prox = np.empty(n_samples, dtype={{np_type}}, order="c")
|
| 225 |
+
cumulative_sums_prox_ptr = &cumulative_sums_prox[0]
|
| 226 |
+
else:
|
| 227 |
+
cumulative_sums_prox = None
|
| 228 |
+
cumulative_sums_prox_ptr = NULL
|
| 229 |
+
|
| 230 |
+
with nogil:
|
| 231 |
+
start_time = time(NULL)
|
| 232 |
+
for n_iter in range(max_iter):
|
| 233 |
+
for sample_itr in range(n_samples):
|
| 234 |
+
# extract a random sample
|
| 235 |
+
sample_ind = dataset.random(&x_data_ptr, &x_ind_ptr, &xnnz, &y, &sample_weight)
|
| 236 |
+
|
| 237 |
+
# cached index for gradient_memory
|
| 238 |
+
s_idx = sample_ind * n_classes
|
| 239 |
+
|
| 240 |
+
# update the number of samples seen and the seen array
|
| 241 |
+
if seen_init[sample_ind] == 0:
|
| 242 |
+
num_seen += 1
|
| 243 |
+
seen_init[sample_ind] = 1
|
| 244 |
+
|
| 245 |
+
# make the weight updates (just-in-time gradient step, and prox operator)
|
| 246 |
+
if sample_itr > 0:
|
| 247 |
+
status = lagged_update{{name_suffix}}(
|
| 248 |
+
weights=weights,
|
| 249 |
+
wscale=wscale,
|
| 250 |
+
xnnz=xnnz,
|
| 251 |
+
n_samples=n_samples,
|
| 252 |
+
n_classes=n_classes,
|
| 253 |
+
sample_itr=sample_itr,
|
| 254 |
+
cumulative_sums=&cumulative_sums[0],
|
| 255 |
+
cumulative_sums_prox=cumulative_sums_prox_ptr,
|
| 256 |
+
feature_hist=&feature_hist[0],
|
| 257 |
+
prox=prox,
|
| 258 |
+
sum_gradient=sum_gradient,
|
| 259 |
+
x_ind_ptr=x_ind_ptr,
|
| 260 |
+
reset=False,
|
| 261 |
+
n_iter=n_iter
|
| 262 |
+
)
|
| 263 |
+
if status == -1:
|
| 264 |
+
break
|
| 265 |
+
|
| 266 |
+
# find the current prediction
|
| 267 |
+
predict_sample{{name_suffix}}(
|
| 268 |
+
x_data_ptr=x_data_ptr,
|
| 269 |
+
x_ind_ptr=x_ind_ptr,
|
| 270 |
+
xnnz=xnnz,
|
| 271 |
+
w_data_ptr=weights,
|
| 272 |
+
wscale=wscale,
|
| 273 |
+
intercept=&intercept_array[0],
|
| 274 |
+
prediction=&prediction[0],
|
| 275 |
+
n_classes=n_classes
|
| 276 |
+
)
|
| 277 |
+
|
| 278 |
+
# compute the gradient for this sample, given the prediction
|
| 279 |
+
if multinomial:
|
| 280 |
+
multiloss.cy_gradient(
|
| 281 |
+
y_true=y,
|
| 282 |
+
raw_prediction=prediction,
|
| 283 |
+
sample_weight=sample_weight,
|
| 284 |
+
gradient_out=gradient,
|
| 285 |
+
)
|
| 286 |
+
else:
|
| 287 |
+
gradient[0] = loss.cy_gradient(y, prediction[0]) * sample_weight
|
| 288 |
+
|
| 289 |
+
# L2 regularization by simply rescaling the weights
|
| 290 |
+
wscale *= wscale_update
|
| 291 |
+
|
| 292 |
+
# make the updates to the sum of gradients
|
| 293 |
+
for j in range(xnnz):
|
| 294 |
+
feature_ind = x_ind_ptr[j]
|
| 295 |
+
val = x_data_ptr[j]
|
| 296 |
+
f_idx = feature_ind * n_classes
|
| 297 |
+
for class_ind in range(n_classes):
|
| 298 |
+
gradient_correction = \
|
| 299 |
+
val * (gradient[class_ind] -
|
| 300 |
+
gradient_memory[s_idx + class_ind])
|
| 301 |
+
if saga:
|
| 302 |
+
# Note that this is not the main gradient step,
|
| 303 |
+
# which is performed just-in-time in lagged_update.
|
| 304 |
+
# This part is done outside the JIT update
|
| 305 |
+
# as it does not depend on the average gradient.
|
| 306 |
+
# The prox operator is applied after the JIT update
|
| 307 |
+
weights[f_idx + class_ind] -= \
|
| 308 |
+
(gradient_correction * step_size
|
| 309 |
+
* (1 - 1. / num_seen) / wscale)
|
| 310 |
+
sum_gradient[f_idx + class_ind] += gradient_correction
|
| 311 |
+
|
| 312 |
+
# fit the intercept
|
| 313 |
+
if fit_intercept:
|
| 314 |
+
for class_ind in range(n_classes):
|
| 315 |
+
gradient_correction = (gradient[class_ind] -
|
| 316 |
+
gradient_memory[s_idx + class_ind])
|
| 317 |
+
intercept_sum_gradient_init[class_ind] += gradient_correction
|
| 318 |
+
gradient_correction *= step_size * (1. - 1. / num_seen)
|
| 319 |
+
if saga:
|
| 320 |
+
intercept_array[class_ind] -= \
|
| 321 |
+
(step_size * intercept_sum_gradient_init[class_ind] /
|
| 322 |
+
num_seen * intercept_decay) + gradient_correction
|
| 323 |
+
else:
|
| 324 |
+
intercept_array[class_ind] -= \
|
| 325 |
+
(step_size * intercept_sum_gradient_init[class_ind] /
|
| 326 |
+
num_seen * intercept_decay)
|
| 327 |
+
|
| 328 |
+
# check to see that the intercept is not inf or NaN
|
| 329 |
+
if not isfinite(intercept_array[class_ind]):
|
| 330 |
+
status = -1
|
| 331 |
+
break
|
| 332 |
+
# Break from the n_samples outer loop if an error happened
|
| 333 |
+
# in the fit_intercept n_classes inner loop
|
| 334 |
+
if status == -1:
|
| 335 |
+
break
|
| 336 |
+
|
| 337 |
+
# update the gradient memory for this sample
|
| 338 |
+
for class_ind in range(n_classes):
|
| 339 |
+
gradient_memory[s_idx + class_ind] = gradient[class_ind]
|
| 340 |
+
|
| 341 |
+
if sample_itr == 0:
|
| 342 |
+
cumulative_sums[0] = step_size / (wscale * num_seen)
|
| 343 |
+
if prox:
|
| 344 |
+
cumulative_sums_prox[0] = step_size * beta / wscale
|
| 345 |
+
else:
|
| 346 |
+
cumulative_sums[sample_itr] = \
|
| 347 |
+
(cumulative_sums[sample_itr - 1] +
|
| 348 |
+
step_size / (wscale * num_seen))
|
| 349 |
+
if prox:
|
| 350 |
+
cumulative_sums_prox[sample_itr] = \
|
| 351 |
+
(cumulative_sums_prox[sample_itr - 1] +
|
| 352 |
+
step_size * beta / wscale)
|
| 353 |
+
# If wscale gets too small, we need to reset the scale.
|
| 354 |
+
# This also resets the just-in-time update system.
|
| 355 |
+
if wscale < 1e-9:
|
| 356 |
+
if verbose:
|
| 357 |
+
with gil:
|
| 358 |
+
print("rescaling...")
|
| 359 |
+
status = scale_weights{{name_suffix}}(
|
| 360 |
+
weights=weights,
|
| 361 |
+
wscale=&wscale,
|
| 362 |
+
n_features=n_features,
|
| 363 |
+
n_samples=n_samples,
|
| 364 |
+
n_classes=n_classes,
|
| 365 |
+
sample_itr=sample_itr,
|
| 366 |
+
cumulative_sums=&cumulative_sums[0],
|
| 367 |
+
cumulative_sums_prox=cumulative_sums_prox_ptr,
|
| 368 |
+
feature_hist=&feature_hist[0],
|
| 369 |
+
prox=prox,
|
| 370 |
+
sum_gradient=sum_gradient,
|
| 371 |
+
n_iter=n_iter
|
| 372 |
+
)
|
| 373 |
+
if status == -1:
|
| 374 |
+
break
|
| 375 |
+
|
| 376 |
+
# Break from the n_iter outer loop if an error happened in the
|
| 377 |
+
# n_samples inner loop
|
| 378 |
+
if status == -1:
|
| 379 |
+
break
|
| 380 |
+
|
| 381 |
+
# We scale the weights every n_samples iterations and reset the
|
| 382 |
+
# just-in-time update system for numerical stability.
|
| 383 |
+
# Because this reset is done before every convergence check, we are
|
| 384 |
+
# sure there is no remaining lagged update when the algorithm stops.
|
| 385 |
+
status = scale_weights{{name_suffix}}(
|
| 386 |
+
weights=weights,
|
| 387 |
+
wscale=&wscale,
|
| 388 |
+
n_features=n_features,
|
| 389 |
+
n_samples=n_samples,
|
| 390 |
+
n_classes=n_classes,
|
| 391 |
+
sample_itr=n_samples - 1,
|
| 392 |
+
cumulative_sums=&cumulative_sums[0],
|
| 393 |
+
cumulative_sums_prox=cumulative_sums_prox_ptr,
|
| 394 |
+
feature_hist=&feature_hist[0],
|
| 395 |
+
prox=prox,
|
| 396 |
+
sum_gradient=sum_gradient,
|
| 397 |
+
n_iter=n_iter
|
| 398 |
+
)
|
| 399 |
+
if status == -1:
|
| 400 |
+
break
|
| 401 |
+
|
| 402 |
+
# check if the stopping criteria is reached
|
| 403 |
+
max_change = 0.0
|
| 404 |
+
max_weight = 0.0
|
| 405 |
+
for idx in range(n_features * n_classes):
|
| 406 |
+
max_weight = fmax{{name_suffix}}(max_weight, fabs(weights[idx]))
|
| 407 |
+
max_change = fmax{{name_suffix}}(max_change, fabs(weights[idx] - previous_weights[idx]))
|
| 408 |
+
previous_weights[idx] = weights[idx]
|
| 409 |
+
if ((max_weight != 0 and max_change / max_weight <= tol)
|
| 410 |
+
or max_weight == 0 and max_change == 0):
|
| 411 |
+
if verbose:
|
| 412 |
+
end_time = time(NULL)
|
| 413 |
+
with gil:
|
| 414 |
+
print("convergence after %d epochs took %d seconds" %
|
| 415 |
+
(n_iter + 1, end_time - start_time))
|
| 416 |
+
break
|
| 417 |
+
elif verbose:
|
| 418 |
+
printf('Epoch %d, change: %.8g\n', n_iter + 1,
|
| 419 |
+
max_change / max_weight)
|
| 420 |
+
n_iter += 1
|
| 421 |
+
# We do the error treatment here based on error code in status to avoid
|
| 422 |
+
# re-acquiring the GIL within the cython code, which slows the computation
|
| 423 |
+
# when the sag/saga solver is used concurrently in multiple Python threads.
|
| 424 |
+
if status == -1:
|
| 425 |
+
raise ValueError(("Floating-point under-/overflow occurred at epoch"
|
| 426 |
+
" #%d. Scaling input data with StandardScaler or"
|
| 427 |
+
" MinMaxScaler might help.") % n_iter)
|
| 428 |
+
|
| 429 |
+
if verbose and n_iter >= max_iter:
|
| 430 |
+
end_time = time(NULL)
|
| 431 |
+
print(("max_iter reached after %d seconds") %
|
| 432 |
+
(end_time - start_time))
|
| 433 |
+
|
| 434 |
+
return num_seen, n_iter
|
| 435 |
+
|
| 436 |
+
{{endfor}}
|
| 437 |
+
|
| 438 |
+
|
| 439 |
+
{{for name_suffix, c_type, np_type in dtypes}}
|
| 440 |
+
|
| 441 |
+
cdef int scale_weights{{name_suffix}}(
|
| 442 |
+
{{c_type}}* weights,
|
| 443 |
+
{{c_type}}* wscale,
|
| 444 |
+
int n_features,
|
| 445 |
+
int n_samples,
|
| 446 |
+
int n_classes,
|
| 447 |
+
int sample_itr,
|
| 448 |
+
{{c_type}}* cumulative_sums,
|
| 449 |
+
{{c_type}}* cumulative_sums_prox,
|
| 450 |
+
int* feature_hist,
|
| 451 |
+
bint prox,
|
| 452 |
+
{{c_type}}* sum_gradient,
|
| 453 |
+
int n_iter
|
| 454 |
+
) noexcept nogil:
|
| 455 |
+
"""Scale the weights and reset wscale to 1.0 for numerical stability, and
|
| 456 |
+
reset the just-in-time (JIT) update system.
|
| 457 |
+
|
| 458 |
+
See `sag{{name_suffix}}`'s docstring about the JIT update system.
|
| 459 |
+
|
| 460 |
+
wscale = (1 - step_size * alpha) ** (n_iter * n_samples + sample_itr)
|
| 461 |
+
can become very small, so we reset it every n_samples iterations to 1.0 for
|
| 462 |
+
numerical stability. To be able to scale, we first need to update every
|
| 463 |
+
coefficients and reset the just-in-time update system.
|
| 464 |
+
This also limits the size of `cumulative_sums`.
|
| 465 |
+
"""
|
| 466 |
+
|
| 467 |
+
cdef int status
|
| 468 |
+
status = lagged_update{{name_suffix}}(
|
| 469 |
+
weights,
|
| 470 |
+
wscale[0],
|
| 471 |
+
n_features,
|
| 472 |
+
n_samples,
|
| 473 |
+
n_classes,
|
| 474 |
+
sample_itr + 1,
|
| 475 |
+
cumulative_sums,
|
| 476 |
+
cumulative_sums_prox,
|
| 477 |
+
feature_hist,
|
| 478 |
+
prox,
|
| 479 |
+
sum_gradient,
|
| 480 |
+
NULL,
|
| 481 |
+
True,
|
| 482 |
+
n_iter
|
| 483 |
+
)
|
| 484 |
+
# if lagged update succeeded, reset wscale to 1.0
|
| 485 |
+
if status == 0:
|
| 486 |
+
wscale[0] = 1.0
|
| 487 |
+
return status
|
| 488 |
+
|
| 489 |
+
{{endfor}}
|
| 490 |
+
|
| 491 |
+
|
| 492 |
+
{{for name_suffix, c_type, np_type in dtypes}}
|
| 493 |
+
|
| 494 |
+
cdef int lagged_update{{name_suffix}}(
|
| 495 |
+
{{c_type}}* weights,
|
| 496 |
+
{{c_type}} wscale,
|
| 497 |
+
int xnnz,
|
| 498 |
+
int n_samples,
|
| 499 |
+
int n_classes,
|
| 500 |
+
int sample_itr,
|
| 501 |
+
{{c_type}}* cumulative_sums,
|
| 502 |
+
{{c_type}}* cumulative_sums_prox,
|
| 503 |
+
int* feature_hist,
|
| 504 |
+
bint prox,
|
| 505 |
+
{{c_type}}* sum_gradient,
|
| 506 |
+
int* x_ind_ptr,
|
| 507 |
+
bint reset,
|
| 508 |
+
int n_iter
|
| 509 |
+
) noexcept nogil:
|
| 510 |
+
"""Hard perform the JIT updates for non-zero features of present sample.
|
| 511 |
+
|
| 512 |
+
See `sag{{name_suffix}}`'s docstring about the JIT update system.
|
| 513 |
+
|
| 514 |
+
The updates that awaits are kept in memory using cumulative_sums,
|
| 515 |
+
cumulative_sums_prox, wscale and feature_hist. See original SAGA paper
|
| 516 |
+
(Defazio et al. 2014) for details. If reset=True, we also reset wscale to
|
| 517 |
+
1 (this is done at the end of each epoch).
|
| 518 |
+
"""
|
| 519 |
+
cdef int feature_ind, class_ind, idx, f_idx, lagged_ind, last_update_ind
|
| 520 |
+
cdef {{c_type}} cum_sum, grad_step, prox_step, cum_sum_prox
|
| 521 |
+
for feature_ind in range(xnnz):
|
| 522 |
+
if not reset:
|
| 523 |
+
feature_ind = x_ind_ptr[feature_ind]
|
| 524 |
+
f_idx = feature_ind * n_classes
|
| 525 |
+
|
| 526 |
+
cum_sum = cumulative_sums[sample_itr - 1]
|
| 527 |
+
if prox:
|
| 528 |
+
cum_sum_prox = cumulative_sums_prox[sample_itr - 1]
|
| 529 |
+
if feature_hist[feature_ind] != 0:
|
| 530 |
+
cum_sum -= cumulative_sums[feature_hist[feature_ind] - 1]
|
| 531 |
+
if prox:
|
| 532 |
+
cum_sum_prox -= cumulative_sums_prox[feature_hist[feature_ind] - 1]
|
| 533 |
+
if not prox:
|
| 534 |
+
for class_ind in range(n_classes):
|
| 535 |
+
idx = f_idx + class_ind
|
| 536 |
+
weights[idx] -= cum_sum * sum_gradient[idx]
|
| 537 |
+
if reset:
|
| 538 |
+
weights[idx] *= wscale
|
| 539 |
+
if not isfinite(weights[idx]):
|
| 540 |
+
# returning here does not require the gil as the return
|
| 541 |
+
# type is a C integer
|
| 542 |
+
return -1
|
| 543 |
+
else:
|
| 544 |
+
for class_ind in range(n_classes):
|
| 545 |
+
idx = f_idx + class_ind
|
| 546 |
+
if fabs(sum_gradient[idx] * cum_sum) < cum_sum_prox:
|
| 547 |
+
# In this case, we can perform all the gradient steps and
|
| 548 |
+
# all the proximal steps in this order, which is more
|
| 549 |
+
# efficient than unrolling all the lagged updates.
|
| 550 |
+
# Idea taken from scikit-learn-contrib/lightning.
|
| 551 |
+
weights[idx] -= cum_sum * sum_gradient[idx]
|
| 552 |
+
weights[idx] = _soft_thresholding{{name_suffix}}(weights[idx],
|
| 553 |
+
cum_sum_prox)
|
| 554 |
+
else:
|
| 555 |
+
last_update_ind = feature_hist[feature_ind]
|
| 556 |
+
if last_update_ind == -1:
|
| 557 |
+
last_update_ind = sample_itr - 1
|
| 558 |
+
for lagged_ind in range(sample_itr - 1,
|
| 559 |
+
last_update_ind - 1, -1):
|
| 560 |
+
if lagged_ind > 0:
|
| 561 |
+
grad_step = (cumulative_sums[lagged_ind]
|
| 562 |
+
- cumulative_sums[lagged_ind - 1])
|
| 563 |
+
prox_step = (cumulative_sums_prox[lagged_ind]
|
| 564 |
+
- cumulative_sums_prox[lagged_ind - 1])
|
| 565 |
+
else:
|
| 566 |
+
grad_step = cumulative_sums[lagged_ind]
|
| 567 |
+
prox_step = cumulative_sums_prox[lagged_ind]
|
| 568 |
+
weights[idx] -= sum_gradient[idx] * grad_step
|
| 569 |
+
weights[idx] = _soft_thresholding{{name_suffix}}(weights[idx],
|
| 570 |
+
prox_step)
|
| 571 |
+
|
| 572 |
+
if reset:
|
| 573 |
+
weights[idx] *= wscale
|
| 574 |
+
# check to see that the weight is not inf or NaN
|
| 575 |
+
if not isfinite(weights[idx]):
|
| 576 |
+
return -1
|
| 577 |
+
if reset:
|
| 578 |
+
feature_hist[feature_ind] = sample_itr % n_samples
|
| 579 |
+
else:
|
| 580 |
+
feature_hist[feature_ind] = sample_itr
|
| 581 |
+
|
| 582 |
+
if reset:
|
| 583 |
+
cumulative_sums[sample_itr - 1] = 0.0
|
| 584 |
+
if prox:
|
| 585 |
+
cumulative_sums_prox[sample_itr - 1] = 0.0
|
| 586 |
+
|
| 587 |
+
return 0
|
| 588 |
+
|
| 589 |
+
{{endfor}}
|
| 590 |
+
|
| 591 |
+
|
| 592 |
+
{{for name_suffix, c_type, np_type in dtypes}}
|
| 593 |
+
|
| 594 |
+
cdef void predict_sample{{name_suffix}}(
|
| 595 |
+
{{c_type}}* x_data_ptr,
|
| 596 |
+
int* x_ind_ptr,
|
| 597 |
+
int xnnz,
|
| 598 |
+
{{c_type}}* w_data_ptr,
|
| 599 |
+
{{c_type}} wscale,
|
| 600 |
+
{{c_type}}* intercept,
|
| 601 |
+
{{c_type}}* prediction,
|
| 602 |
+
int n_classes
|
| 603 |
+
) noexcept nogil:
|
| 604 |
+
"""Compute the prediction given sparse sample x and dense weight w.
|
| 605 |
+
|
| 606 |
+
Parameters
|
| 607 |
+
----------
|
| 608 |
+
x_data_ptr : pointer
|
| 609 |
+
Pointer to the data of the sample x
|
| 610 |
+
|
| 611 |
+
x_ind_ptr : pointer
|
| 612 |
+
Pointer to the indices of the sample x
|
| 613 |
+
|
| 614 |
+
xnnz : int
|
| 615 |
+
Number of non-zero element in the sample x
|
| 616 |
+
|
| 617 |
+
w_data_ptr : pointer
|
| 618 |
+
Pointer to the data of the weights w
|
| 619 |
+
|
| 620 |
+
wscale : {{c_type}}
|
| 621 |
+
Scale of the weights w
|
| 622 |
+
|
| 623 |
+
intercept : pointer
|
| 624 |
+
Pointer to the intercept
|
| 625 |
+
|
| 626 |
+
prediction : pointer
|
| 627 |
+
Pointer to store the resulting prediction
|
| 628 |
+
|
| 629 |
+
n_classes : int
|
| 630 |
+
Number of classes in multinomial case. Equals 1 in binary case.
|
| 631 |
+
|
| 632 |
+
"""
|
| 633 |
+
cdef int feature_ind, class_ind, j
|
| 634 |
+
cdef {{c_type}} innerprod
|
| 635 |
+
|
| 636 |
+
for class_ind in range(n_classes):
|
| 637 |
+
innerprod = 0.0
|
| 638 |
+
# Compute the dot product only on non-zero elements of x
|
| 639 |
+
for j in range(xnnz):
|
| 640 |
+
feature_ind = x_ind_ptr[j]
|
| 641 |
+
innerprod += (w_data_ptr[feature_ind * n_classes + class_ind] *
|
| 642 |
+
x_data_ptr[j])
|
| 643 |
+
|
| 644 |
+
prediction[class_ind] = wscale * innerprod + intercept[class_ind]
|
| 645 |
+
|
| 646 |
+
|
| 647 |
+
{{endfor}}
|