Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +2 -0
- pllava/lib/python3.10/site-packages/certifi-2024.12.14.dist-info/INSTALLER +1 -0
- pllava/lib/python3.10/site-packages/certifi-2024.12.14.dist-info/LICENSE +20 -0
- pllava/lib/python3.10/site-packages/certifi-2024.12.14.dist-info/METADATA +68 -0
- pllava/lib/python3.10/site-packages/certifi-2024.12.14.dist-info/RECORD +15 -0
- pllava/lib/python3.10/site-packages/certifi-2024.12.14.dist-info/REQUESTED +0 -0
- pllava/lib/python3.10/site-packages/certifi-2024.12.14.dist-info/WHEEL +5 -0
- pllava/lib/python3.10/site-packages/certifi-2024.12.14.dist-info/top_level.txt +1 -0
- pllava/lib/python3.10/site-packages/nvidia_cublas_cu12-12.4.5.8.dist-info/INSTALLER +1 -0
- pllava/lib/python3.10/site-packages/nvidia_cublas_cu12-12.4.5.8.dist-info/License.txt +1568 -0
- pllava/lib/python3.10/site-packages/nvidia_cublas_cu12-12.4.5.8.dist-info/RECORD +24 -0
- pllava/lib/python3.10/site-packages/nvidia_cublas_cu12-12.4.5.8.dist-info/REQUESTED +0 -0
- pllava/lib/python3.10/site-packages/nvidia_cublas_cu12-12.4.5.8.dist-info/WHEEL +5 -0
- pllava/lib/python3.10/site-packages/nvidia_cublas_cu12-12.4.5.8.dist-info/top_level.txt +1 -0
- pllava/lib/python3.10/site-packages/torch/_C.cpython-310-x86_64-linux-gnu.so +0 -0
- pllava/lib/python3.10/site-packages/torch/_VF.py +31 -0
- pllava/lib/python3.10/site-packages/torch/_VF.pyi +0 -0
- pllava/lib/python3.10/site-packages/torch/__config__.py +23 -0
- pllava/lib/python3.10/site-packages/torch/__init__.py +2665 -0
- pllava/lib/python3.10/site-packages/torch/_appdirs.py +667 -0
- pllava/lib/python3.10/site-packages/torch/_classes.py +56 -0
- pllava/lib/python3.10/site-packages/torch/_compile.py +38 -0
- pllava/lib/python3.10/site-packages/torch/_custom_ops.py +324 -0
- pllava/lib/python3.10/site-packages/torch/_deploy.py +104 -0
- pllava/lib/python3.10/site-packages/torch/_guards.py +925 -0
- pllava/lib/python3.10/site-packages/torch/_linalg_utils.py +150 -0
- pllava/lib/python3.10/site-packages/torch/_lowrank.py +294 -0
- pllava/lib/python3.10/site-packages/torch/_ops.py +1355 -0
- pllava/lib/python3.10/site-packages/torch/_python_dispatcher.py +182 -0
- pllava/lib/python3.10/site-packages/torch/_refs/__pycache__/_conversions.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/_refs/_conversions.py +119 -0
- pllava/lib/python3.10/site-packages/torch/_refs/fft.py +590 -0
- pllava/lib/python3.10/site-packages/torch/_refs/linalg/__init__.py +309 -0
- pllava/lib/python3.10/site-packages/torch/_refs/linalg/__pycache__/__init__.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/_refs/nn/functional/__init__.py +1279 -0
- pllava/lib/python3.10/site-packages/torch/_refs/nn/functional/__pycache__/__init__.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/_refs/special/__init__.py +236 -0
- pllava/lib/python3.10/site-packages/torch/_refs/special/__pycache__/__init__.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/_size_docs.py +39 -0
- pllava/lib/python3.10/site-packages/torch/_streambase.py +46 -0
- pllava/lib/python3.10/site-packages/torch/_tensor_docs.py +0 -0
- pllava/lib/python3.10/site-packages/torch/_torch_docs.py +0 -0
- pllava/lib/python3.10/site-packages/torch/_vmap_internals.py +245 -0
- pllava/lib/python3.10/site-packages/torch/_weights_only_unpickler.py +426 -0
- pllava/lib/python3.10/site-packages/torch/autograd/__pycache__/function.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/autograd/__pycache__/gradcheck.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/autograd/__pycache__/graph.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/autograd/__pycache__/profiler_legacy.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/autograd/__pycache__/variable.cpython-310.pyc +0 -0
- pllava/lib/python3.10/site-packages/torch/autograd/function.py +844 -0
.gitattributes
CHANGED
|
@@ -299,3 +299,5 @@ pllava/lib/python3.10/site-packages/sympy/matrices/tests/__pycache__/test_matrix
|
|
| 299 |
pllava/lib/python3.10/site-packages/sympy/physics/quantum/tests/__pycache__/test_spin.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 300 |
pllava/lib/python3.10/site-packages/sympy/polys/__pycache__/polytools.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 301 |
pllava/lib/python3.10/site-packages/sympy/polys/tests/__pycache__/test_polytools.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
| 299 |
pllava/lib/python3.10/site-packages/sympy/physics/quantum/tests/__pycache__/test_spin.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 300 |
pllava/lib/python3.10/site-packages/sympy/polys/__pycache__/polytools.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 301 |
pllava/lib/python3.10/site-packages/sympy/polys/tests/__pycache__/test_polytools.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 302 |
+
pllava/lib/python3.10/site-packages/torchvision.libs/libjpeg.ceea7512.so.62 filter=lfs diff=lfs merge=lfs -text
|
| 303 |
+
pllava/lib/python3.10/site-packages/torchvision.libs/libz.5f199d92.so.1 filter=lfs diff=lfs merge=lfs -text
|
pllava/lib/python3.10/site-packages/certifi-2024.12.14.dist-info/INSTALLER
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
pip
|
pllava/lib/python3.10/site-packages/certifi-2024.12.14.dist-info/LICENSE
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
This package contains a modified version of ca-bundle.crt:
|
| 2 |
+
|
| 3 |
+
ca-bundle.crt -- Bundle of CA Root Certificates
|
| 4 |
+
|
| 5 |
+
This is a bundle of X.509 certificates of public Certificate Authorities
|
| 6 |
+
(CA). These were automatically extracted from Mozilla's root certificates
|
| 7 |
+
file (certdata.txt). This file can be found in the mozilla source tree:
|
| 8 |
+
https://hg.mozilla.org/mozilla-central/file/tip/security/nss/lib/ckfw/builtins/certdata.txt
|
| 9 |
+
It contains the certificates in PEM format and therefore
|
| 10 |
+
can be directly used with curl / libcurl / php_curl, or with
|
| 11 |
+
an Apache+mod_ssl webserver for SSL client authentication.
|
| 12 |
+
Just configure this file as the SSLCACertificateFile.#
|
| 13 |
+
|
| 14 |
+
***** BEGIN LICENSE BLOCK *****
|
| 15 |
+
This Source Code Form is subject to the terms of the Mozilla Public License,
|
| 16 |
+
v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain
|
| 17 |
+
one at http://mozilla.org/MPL/2.0/.
|
| 18 |
+
|
| 19 |
+
***** END LICENSE BLOCK *****
|
| 20 |
+
@(#) $RCSfile: certdata.txt,v $ $Revision: 1.80 $ $Date: 2011/11/03 15:11:58 $
|
pllava/lib/python3.10/site-packages/certifi-2024.12.14.dist-info/METADATA
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.1
|
| 2 |
+
Name: certifi
|
| 3 |
+
Version: 2024.12.14
|
| 4 |
+
Summary: Python package for providing Mozilla's CA Bundle.
|
| 5 |
+
Home-page: https://github.com/certifi/python-certifi
|
| 6 |
+
Author: Kenneth Reitz
|
| 7 |
+
Author-email: me@kennethreitz.com
|
| 8 |
+
License: MPL-2.0
|
| 9 |
+
Project-URL: Source, https://github.com/certifi/python-certifi
|
| 10 |
+
Classifier: Development Status :: 5 - Production/Stable
|
| 11 |
+
Classifier: Intended Audience :: Developers
|
| 12 |
+
Classifier: License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)
|
| 13 |
+
Classifier: Natural Language :: English
|
| 14 |
+
Classifier: Programming Language :: Python
|
| 15 |
+
Classifier: Programming Language :: Python :: 3
|
| 16 |
+
Classifier: Programming Language :: Python :: 3 :: Only
|
| 17 |
+
Classifier: Programming Language :: Python :: 3.6
|
| 18 |
+
Classifier: Programming Language :: Python :: 3.7
|
| 19 |
+
Classifier: Programming Language :: Python :: 3.8
|
| 20 |
+
Classifier: Programming Language :: Python :: 3.9
|
| 21 |
+
Classifier: Programming Language :: Python :: 3.10
|
| 22 |
+
Classifier: Programming Language :: Python :: 3.11
|
| 23 |
+
Classifier: Programming Language :: Python :: 3.12
|
| 24 |
+
Classifier: Programming Language :: Python :: 3.13
|
| 25 |
+
Requires-Python: >=3.6
|
| 26 |
+
License-File: LICENSE
|
| 27 |
+
|
| 28 |
+
Certifi: Python SSL Certificates
|
| 29 |
+
================================
|
| 30 |
+
|
| 31 |
+
Certifi provides Mozilla's carefully curated collection of Root Certificates for
|
| 32 |
+
validating the trustworthiness of SSL certificates while verifying the identity
|
| 33 |
+
of TLS hosts. It has been extracted from the `Requests`_ project.
|
| 34 |
+
|
| 35 |
+
Installation
|
| 36 |
+
------------
|
| 37 |
+
|
| 38 |
+
``certifi`` is available on PyPI. Simply install it with ``pip``::
|
| 39 |
+
|
| 40 |
+
$ pip install certifi
|
| 41 |
+
|
| 42 |
+
Usage
|
| 43 |
+
-----
|
| 44 |
+
|
| 45 |
+
To reference the installed certificate authority (CA) bundle, you can use the
|
| 46 |
+
built-in function::
|
| 47 |
+
|
| 48 |
+
>>> import certifi
|
| 49 |
+
|
| 50 |
+
>>> certifi.where()
|
| 51 |
+
'/usr/local/lib/python3.7/site-packages/certifi/cacert.pem'
|
| 52 |
+
|
| 53 |
+
Or from the command line::
|
| 54 |
+
|
| 55 |
+
$ python -m certifi
|
| 56 |
+
/usr/local/lib/python3.7/site-packages/certifi/cacert.pem
|
| 57 |
+
|
| 58 |
+
Enjoy!
|
| 59 |
+
|
| 60 |
+
.. _`Requests`: https://requests.readthedocs.io/en/master/
|
| 61 |
+
|
| 62 |
+
Addition/Removal of Certificates
|
| 63 |
+
--------------------------------
|
| 64 |
+
|
| 65 |
+
Certifi does not support any addition/removal or other modification of the
|
| 66 |
+
CA trust store content. This project is intended to provide a reliable and
|
| 67 |
+
highly portable root of trust to python deployments. Look to upstream projects
|
| 68 |
+
for methods to use alternate trust.
|
pllava/lib/python3.10/site-packages/certifi-2024.12.14.dist-info/RECORD
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
certifi-2024.12.14.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
| 2 |
+
certifi-2024.12.14.dist-info/LICENSE,sha256=6TcW2mucDVpKHfYP5pWzcPBpVgPSH2-D8FPkLPwQyvc,989
|
| 3 |
+
certifi-2024.12.14.dist-info/METADATA,sha256=z71eRGTFszr4qsHenZ_vG2Fd5bV9PBWmJgShthc8IkY,2274
|
| 4 |
+
certifi-2024.12.14.dist-info/RECORD,,
|
| 5 |
+
certifi-2024.12.14.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 6 |
+
certifi-2024.12.14.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
|
| 7 |
+
certifi-2024.12.14.dist-info/top_level.txt,sha256=KMu4vUCfsjLrkPbSNdgdekS-pVJzBAJFO__nI8NF6-U,8
|
| 8 |
+
certifi/__init__.py,sha256=LqjNcwt1sYSS3uhPXrf6jJzVCuHtNVpuirg5rb7mVm8,94
|
| 9 |
+
certifi/__main__.py,sha256=xBBoj905TUWBLRGANOcf7oi6e-3dMP4cEoG9OyMs11g,243
|
| 10 |
+
certifi/__pycache__/__init__.cpython-310.pyc,,
|
| 11 |
+
certifi/__pycache__/__main__.cpython-310.pyc,,
|
| 12 |
+
certifi/__pycache__/core.cpython-310.pyc,,
|
| 13 |
+
certifi/cacert.pem,sha256=gHiXJU84Oif0XkT0llbzeKurIUHt5DpK08JCCll90j8,294769
|
| 14 |
+
certifi/core.py,sha256=qRDDFyXVJwTB_EmoGppaXU_R9qCZvhl-EzxPMuV3nTA,4426
|
| 15 |
+
certifi/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
pllava/lib/python3.10/site-packages/certifi-2024.12.14.dist-info/REQUESTED
ADDED
|
File without changes
|
pllava/lib/python3.10/site-packages/certifi-2024.12.14.dist-info/WHEEL
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Wheel-Version: 1.0
|
| 2 |
+
Generator: setuptools (75.6.0)
|
| 3 |
+
Root-Is-Purelib: true
|
| 4 |
+
Tag: py3-none-any
|
| 5 |
+
|
pllava/lib/python3.10/site-packages/certifi-2024.12.14.dist-info/top_level.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
certifi
|
pllava/lib/python3.10/site-packages/nvidia_cublas_cu12-12.4.5.8.dist-info/INSTALLER
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
pip
|
pllava/lib/python3.10/site-packages/nvidia_cublas_cu12-12.4.5.8.dist-info/License.txt
ADDED
|
@@ -0,0 +1,1568 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
End User License Agreement
|
| 2 |
+
--------------------------
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
Preface
|
| 6 |
+
-------
|
| 7 |
+
|
| 8 |
+
The Software License Agreement in Chapter 1 and the Supplement
|
| 9 |
+
in Chapter 2 contain license terms and conditions that govern
|
| 10 |
+
the use of NVIDIA software. By accepting this agreement, you
|
| 11 |
+
agree to comply with all the terms and conditions applicable
|
| 12 |
+
to the product(s) included herein.
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
NVIDIA Driver
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
Description
|
| 19 |
+
|
| 20 |
+
This package contains the operating system driver and
|
| 21 |
+
fundamental system software components for NVIDIA GPUs.
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
NVIDIA CUDA Toolkit
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
Description
|
| 28 |
+
|
| 29 |
+
The NVIDIA CUDA Toolkit provides command-line and graphical
|
| 30 |
+
tools for building, debugging and optimizing the performance
|
| 31 |
+
of applications accelerated by NVIDIA GPUs, runtime and math
|
| 32 |
+
libraries, and documentation including programming guides,
|
| 33 |
+
user manuals, and API references.
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
Default Install Location of CUDA Toolkit
|
| 37 |
+
|
| 38 |
+
Windows platform:
|
| 39 |
+
|
| 40 |
+
%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v#.#
|
| 41 |
+
|
| 42 |
+
Linux platform:
|
| 43 |
+
|
| 44 |
+
/usr/local/cuda-#.#
|
| 45 |
+
|
| 46 |
+
Mac platform:
|
| 47 |
+
|
| 48 |
+
/Developer/NVIDIA/CUDA-#.#
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
NVIDIA CUDA Samples
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
Description
|
| 55 |
+
|
| 56 |
+
This package includes over 100+ CUDA examples that demonstrate
|
| 57 |
+
various CUDA programming principles, and efficient CUDA
|
| 58 |
+
implementation of algorithms in specific application domains.
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
Default Install Location of CUDA Samples
|
| 62 |
+
|
| 63 |
+
Windows platform:
|
| 64 |
+
|
| 65 |
+
%ProgramData%\NVIDIA Corporation\CUDA Samples\v#.#
|
| 66 |
+
|
| 67 |
+
Linux platform:
|
| 68 |
+
|
| 69 |
+
/usr/local/cuda-#.#/samples
|
| 70 |
+
|
| 71 |
+
and
|
| 72 |
+
|
| 73 |
+
$HOME/NVIDIA_CUDA-#.#_Samples
|
| 74 |
+
|
| 75 |
+
Mac platform:
|
| 76 |
+
|
| 77 |
+
/Developer/NVIDIA/CUDA-#.#/samples
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
NVIDIA Nsight Visual Studio Edition (Windows only)
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
Description
|
| 84 |
+
|
| 85 |
+
NVIDIA Nsight Development Platform, Visual Studio Edition is a
|
| 86 |
+
development environment integrated into Microsoft Visual
|
| 87 |
+
Studio that provides tools for debugging, profiling, analyzing
|
| 88 |
+
and optimizing your GPU computing and graphics applications.
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
Default Install Location of Nsight Visual Studio Edition
|
| 92 |
+
|
| 93 |
+
Windows platform:
|
| 94 |
+
|
| 95 |
+
%ProgramFiles(x86)%\NVIDIA Corporation\Nsight Visual Studio Edition #.#
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
1. License Agreement for NVIDIA Software Development Kits
|
| 99 |
+
---------------------------------------------------------
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
Release Date: July 26, 2018
|
| 103 |
+
---------------------------
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
Important NoticeRead before downloading, installing,
|
| 107 |
+
copying or using the licensed software:
|
| 108 |
+
-------------------------------------------------------
|
| 109 |
+
|
| 110 |
+
This license agreement, including exhibits attached
|
| 111 |
+
("Agreement”) is a legal agreement between you and NVIDIA
|
| 112 |
+
Corporation ("NVIDIA") and governs your use of a NVIDIA
|
| 113 |
+
software development kit (“SDK”).
|
| 114 |
+
|
| 115 |
+
Each SDK has its own set of software and materials, but here
|
| 116 |
+
is a description of the types of items that may be included in
|
| 117 |
+
a SDK: source code, header files, APIs, data sets and assets
|
| 118 |
+
(examples include images, textures, models, scenes, videos,
|
| 119 |
+
native API input/output files), binary software, sample code,
|
| 120 |
+
libraries, utility programs, programming code and
|
| 121 |
+
documentation.
|
| 122 |
+
|
| 123 |
+
This Agreement can be accepted only by an adult of legal age
|
| 124 |
+
of majority in the country in which the SDK is used.
|
| 125 |
+
|
| 126 |
+
If you are entering into this Agreement on behalf of a company
|
| 127 |
+
or other legal entity, you represent that you have the legal
|
| 128 |
+
authority to bind the entity to this Agreement, in which case
|
| 129 |
+
“you” will mean the entity you represent.
|
| 130 |
+
|
| 131 |
+
If you don’t have the required age or authority to accept
|
| 132 |
+
this Agreement, or if you don’t accept all the terms and
|
| 133 |
+
conditions of this Agreement, do not download, install or use
|
| 134 |
+
the SDK.
|
| 135 |
+
|
| 136 |
+
You agree to use the SDK only for purposes that are permitted
|
| 137 |
+
by (a) this Agreement, and (b) any applicable law, regulation
|
| 138 |
+
or generally accepted practices or guidelines in the relevant
|
| 139 |
+
jurisdictions.
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
1.1. License
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
1.1.1. License Grant
|
| 146 |
+
|
| 147 |
+
Subject to the terms of this Agreement, NVIDIA hereby grants
|
| 148 |
+
you a non-exclusive, non-transferable license, without the
|
| 149 |
+
right to sublicense (except as expressly provided in this
|
| 150 |
+
Agreement) to:
|
| 151 |
+
|
| 152 |
+
1. Install and use the SDK,
|
| 153 |
+
|
| 154 |
+
2. Modify and create derivative works of sample source code
|
| 155 |
+
delivered in the SDK, and
|
| 156 |
+
|
| 157 |
+
3. Distribute those portions of the SDK that are identified
|
| 158 |
+
in this Agreement as distributable, as incorporated in
|
| 159 |
+
object code format into a software application that meets
|
| 160 |
+
the distribution requirements indicated in this Agreement.
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
1.1.2. Distribution Requirements
|
| 164 |
+
|
| 165 |
+
These are the distribution requirements for you to exercise
|
| 166 |
+
the distribution grant:
|
| 167 |
+
|
| 168 |
+
1. Your application must have material additional
|
| 169 |
+
functionality, beyond the included portions of the SDK.
|
| 170 |
+
|
| 171 |
+
2. The distributable portions of the SDK shall only be
|
| 172 |
+
accessed by your application.
|
| 173 |
+
|
| 174 |
+
3. The following notice shall be included in modifications
|
| 175 |
+
and derivative works of sample source code distributed:
|
| 176 |
+
“This software contains source code provided by NVIDIA
|
| 177 |
+
Corporation.”
|
| 178 |
+
|
| 179 |
+
4. Unless a developer tool is identified in this Agreement
|
| 180 |
+
as distributable, it is delivered for your internal use
|
| 181 |
+
only.
|
| 182 |
+
|
| 183 |
+
5. The terms under which you distribute your application
|
| 184 |
+
must be consistent with the terms of this Agreement,
|
| 185 |
+
including (without limitation) terms relating to the
|
| 186 |
+
license grant and license restrictions and protection of
|
| 187 |
+
NVIDIA’s intellectual property rights. Additionally, you
|
| 188 |
+
agree that you will protect the privacy, security and
|
| 189 |
+
legal rights of your application users.
|
| 190 |
+
|
| 191 |
+
6. You agree to notify NVIDIA in writing of any known or
|
| 192 |
+
suspected distribution or use of the SDK not in compliance
|
| 193 |
+
with the requirements of this Agreement, and to enforce
|
| 194 |
+
the terms of your agreements with respect to distributed
|
| 195 |
+
SDK.
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
1.1.3. Authorized Users
|
| 199 |
+
|
| 200 |
+
You may allow employees and contractors of your entity or of
|
| 201 |
+
your subsidiary(ies) to access and use the SDK from your
|
| 202 |
+
secure network to perform work on your behalf.
|
| 203 |
+
|
| 204 |
+
If you are an academic institution you may allow users
|
| 205 |
+
enrolled or employed by the academic institution to access and
|
| 206 |
+
use the SDK from your secure network.
|
| 207 |
+
|
| 208 |
+
You are responsible for the compliance with the terms of this
|
| 209 |
+
Agreement by your authorized users. If you become aware that
|
| 210 |
+
your authorized users didn’t follow the terms of this
|
| 211 |
+
Agreement, you agree to take reasonable steps to resolve the
|
| 212 |
+
non-compliance and prevent new occurrences.
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
1.1.4. Pre-Release SDK
|
| 216 |
+
|
| 217 |
+
The SDK versions identified as alpha, beta, preview or
|
| 218 |
+
otherwise as pre-release, may not be fully functional, may
|
| 219 |
+
contain errors or design flaws, and may have reduced or
|
| 220 |
+
different security, privacy, accessibility, availability, and
|
| 221 |
+
reliability standards relative to commercial versions of
|
| 222 |
+
NVIDIA software and materials. Use of a pre-release SDK may
|
| 223 |
+
result in unexpected results, loss of data, project delays or
|
| 224 |
+
other unpredictable damage or loss.
|
| 225 |
+
|
| 226 |
+
You may use a pre-release SDK at your own risk, understanding
|
| 227 |
+
that pre-release SDKs are not intended for use in production
|
| 228 |
+
or business-critical systems.
|
| 229 |
+
|
| 230 |
+
NVIDIA may choose not to make available a commercial version
|
| 231 |
+
of any pre-release SDK. NVIDIA may also choose to abandon
|
| 232 |
+
development and terminate the availability of a pre-release
|
| 233 |
+
SDK at any time without liability.
|
| 234 |
+
|
| 235 |
+
|
| 236 |
+
1.1.5. Updates
|
| 237 |
+
|
| 238 |
+
NVIDIA may, at its option, make available patches, workarounds
|
| 239 |
+
or other updates to this SDK. Unless the updates are provided
|
| 240 |
+
with their separate governing terms, they are deemed part of
|
| 241 |
+
the SDK licensed to you as provided in this Agreement. You
|
| 242 |
+
agree that the form and content of the SDK that NVIDIA
|
| 243 |
+
provides may change without prior notice to you. While NVIDIA
|
| 244 |
+
generally maintains compatibility between versions, NVIDIA may
|
| 245 |
+
in some cases make changes that introduce incompatibilities in
|
| 246 |
+
future versions of the SDK.
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
1.1.6. Third Party Licenses
|
| 250 |
+
|
| 251 |
+
The SDK may come bundled with, or otherwise include or be
|
| 252 |
+
distributed with, third party software licensed by a NVIDIA
|
| 253 |
+
supplier and/or open source software provided under an open
|
| 254 |
+
source license. Use of third party software is subject to the
|
| 255 |
+
third-party license terms, or in the absence of third party
|
| 256 |
+
terms, the terms of this Agreement. Copyright to third party
|
| 257 |
+
software is held by the copyright holders indicated in the
|
| 258 |
+
third-party software or license.
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
1.1.7. Reservation of Rights
|
| 262 |
+
|
| 263 |
+
NVIDIA reserves all rights, title, and interest in and to the
|
| 264 |
+
SDK, not expressly granted to you under this Agreement.
|
| 265 |
+
|
| 266 |
+
|
| 267 |
+
1.2. Limitations
|
| 268 |
+
|
| 269 |
+
The following license limitations apply to your use of the
|
| 270 |
+
SDK:
|
| 271 |
+
|
| 272 |
+
1. You may not reverse engineer, decompile or disassemble,
|
| 273 |
+
or remove copyright or other proprietary notices from any
|
| 274 |
+
portion of the SDK or copies of the SDK.
|
| 275 |
+
|
| 276 |
+
2. Except as expressly provided in this Agreement, you may
|
| 277 |
+
not copy, sell, rent, sublicense, transfer, distribute,
|
| 278 |
+
modify, or create derivative works of any portion of the
|
| 279 |
+
SDK. For clarity, you may not distribute or sublicense the
|
| 280 |
+
SDK as a stand-alone product.
|
| 281 |
+
|
| 282 |
+
3. Unless you have an agreement with NVIDIA for this
|
| 283 |
+
purpose, you may not indicate that an application created
|
| 284 |
+
with the SDK is sponsored or endorsed by NVIDIA.
|
| 285 |
+
|
| 286 |
+
4. You may not bypass, disable, or circumvent any
|
| 287 |
+
encryption, security, digital rights management or
|
| 288 |
+
authentication mechanism in the SDK.
|
| 289 |
+
|
| 290 |
+
5. You may not use the SDK in any manner that would cause it
|
| 291 |
+
to become subject to an open source software license. As
|
| 292 |
+
examples, licenses that require as a condition of use,
|
| 293 |
+
modification, and/or distribution that the SDK be:
|
| 294 |
+
|
| 295 |
+
a. Disclosed or distributed in source code form;
|
| 296 |
+
|
| 297 |
+
b. Licensed for the purpose of making derivative works;
|
| 298 |
+
or
|
| 299 |
+
|
| 300 |
+
c. Redistributable at no charge.
|
| 301 |
+
|
| 302 |
+
6. Unless you have an agreement with NVIDIA for this
|
| 303 |
+
purpose, you may not use the SDK with any system or
|
| 304 |
+
application where the use or failure of the system or
|
| 305 |
+
application can reasonably be expected to threaten or
|
| 306 |
+
result in personal injury, death, or catastrophic loss.
|
| 307 |
+
Examples include use in avionics, navigation, military,
|
| 308 |
+
medical, life support or other life critical applications.
|
| 309 |
+
NVIDIA does not design, test or manufacture the SDK for
|
| 310 |
+
these critical uses and NVIDIA shall not be liable to you
|
| 311 |
+
or any third party, in whole or in part, for any claims or
|
| 312 |
+
damages arising from such uses.
|
| 313 |
+
|
| 314 |
+
7. You agree to defend, indemnify and hold harmless NVIDIA
|
| 315 |
+
and its affiliates, and their respective employees,
|
| 316 |
+
contractors, agents, officers and directors, from and
|
| 317 |
+
against any and all claims, damages, obligations, losses,
|
| 318 |
+
liabilities, costs or debt, fines, restitutions and
|
| 319 |
+
expenses (including but not limited to attorney’s fees
|
| 320 |
+
and costs incident to establishing the right of
|
| 321 |
+
indemnification) arising out of or related to your use of
|
| 322 |
+
the SDK outside of the scope of this Agreement, or not in
|
| 323 |
+
compliance with its terms.
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
1.3. Ownership
|
| 327 |
+
|
| 328 |
+
1. NVIDIA or its licensors hold all rights, title and
|
| 329 |
+
interest in and to the SDK and its modifications and
|
| 330 |
+
derivative works, including their respective intellectual
|
| 331 |
+
property rights, subject to your rights described in this
|
| 332 |
+
section. This SDK may include software and materials from
|
| 333 |
+
NVIDIA’s licensors, and these licensors are intended
|
| 334 |
+
third party beneficiaries that may enforce this Agreement
|
| 335 |
+
with respect to their intellectual property rights.
|
| 336 |
+
|
| 337 |
+
2. You hold all rights, title and interest in and to your
|
| 338 |
+
applications and your derivative works of the sample
|
| 339 |
+
source code delivered in the SDK, including their
|
| 340 |
+
respective intellectual property rights, subject to
|
| 341 |
+
NVIDIA’s rights described in this section.
|
| 342 |
+
|
| 343 |
+
3. You may, but don’t have to, provide to NVIDIA
|
| 344 |
+
suggestions, feature requests or other feedback regarding
|
| 345 |
+
the SDK, including possible enhancements or modifications
|
| 346 |
+
to the SDK. For any feedback that you voluntarily provide,
|
| 347 |
+
you hereby grant NVIDIA and its affiliates a perpetual,
|
| 348 |
+
non-exclusive, worldwide, irrevocable license to use,
|
| 349 |
+
reproduce, modify, license, sublicense (through multiple
|
| 350 |
+
tiers of sublicensees), and distribute (through multiple
|
| 351 |
+
tiers of distributors) it without the payment of any
|
| 352 |
+
royalties or fees to you. NVIDIA will use feedback at its
|
| 353 |
+
choice. NVIDIA is constantly looking for ways to improve
|
| 354 |
+
its products, so you may send feedback to NVIDIA through
|
| 355 |
+
the developer portal at https://developer.nvidia.com.
|
| 356 |
+
|
| 357 |
+
|
| 358 |
+
1.4. No Warranties
|
| 359 |
+
|
| 360 |
+
THE SDK IS PROVIDED BY NVIDIA “AS IS” AND “WITH ALL
|
| 361 |
+
FAULTS.” TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND
|
| 362 |
+
ITS AFFILIATES EXPRESSLY DISCLAIM ALL WARRANTIES OF ANY KIND
|
| 363 |
+
OR NATURE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING,
|
| 364 |
+
BUT NOT LIMITED TO, ANY WARRANTIES OF MERCHANTABILITY, FITNESS
|
| 365 |
+
FOR A PARTICULAR PURPOSE, TITLE, NON-INFRINGEMENT, OR THE
|
| 366 |
+
ABSENCE OF ANY DEFECTS THEREIN, WHETHER LATENT OR PATENT. NO
|
| 367 |
+
WARRANTY IS MADE ON THE BASIS OF TRADE USAGE, COURSE OF
|
| 368 |
+
DEALING OR COURSE OF TRADE.
|
| 369 |
+
|
| 370 |
+
|
| 371 |
+
1.5. Limitation of Liability
|
| 372 |
+
|
| 373 |
+
TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS
|
| 374 |
+
AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL,
|
| 375 |
+
PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, LOSS
|
| 376 |
+
OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF
|
| 377 |
+
PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION
|
| 378 |
+
WITH THIS AGREEMENT OR THE USE OR PERFORMANCE OF THE SDK,
|
| 379 |
+
WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH
|
| 380 |
+
OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE),
|
| 381 |
+
PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF
|
| 382 |
+
LIABILITY. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES
|
| 383 |
+
TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS
|
| 384 |
+
AGREEMENT EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE
|
| 385 |
+
NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS
|
| 386 |
+
LIMIT.
|
| 387 |
+
|
| 388 |
+
These exclusions and limitations of liability shall apply
|
| 389 |
+
regardless if NVIDIA or its affiliates have been advised of
|
| 390 |
+
the possibility of such damages, and regardless of whether a
|
| 391 |
+
remedy fails its essential purpose. These exclusions and
|
| 392 |
+
limitations of liability form an essential basis of the
|
| 393 |
+
bargain between the parties, and, absent any of these
|
| 394 |
+
exclusions or limitations of liability, the provisions of this
|
| 395 |
+
Agreement, including, without limitation, the economic terms,
|
| 396 |
+
would be substantially different.
|
| 397 |
+
|
| 398 |
+
|
| 399 |
+
1.6. Termination
|
| 400 |
+
|
| 401 |
+
1. This Agreement will continue to apply until terminated by
|
| 402 |
+
either you or NVIDIA as described below.
|
| 403 |
+
|
| 404 |
+
2. If you want to terminate this Agreement, you may do so by
|
| 405 |
+
stopping to use the SDK.
|
| 406 |
+
|
| 407 |
+
3. NVIDIA may, at any time, terminate this Agreement if:
|
| 408 |
+
|
| 409 |
+
a. (i) you fail to comply with any term of this
|
| 410 |
+
Agreement and the non-compliance is not fixed within
|
| 411 |
+
thirty (30) days following notice from NVIDIA (or
|
| 412 |
+
immediately if you violate NVIDIA’s intellectual
|
| 413 |
+
property rights);
|
| 414 |
+
|
| 415 |
+
b. (ii) you commence or participate in any legal
|
| 416 |
+
proceeding against NVIDIA with respect to the SDK; or
|
| 417 |
+
|
| 418 |
+
c. (iii) NVIDIA decides to no longer provide the SDK in
|
| 419 |
+
a country or, in NVIDIA’s sole discretion, the
|
| 420 |
+
continued use of it is no longer commercially viable.
|
| 421 |
+
|
| 422 |
+
4. Upon any termination of this Agreement, you agree to
|
| 423 |
+
promptly discontinue use of the SDK and destroy all copies
|
| 424 |
+
in your possession or control. Your prior distributions in
|
| 425 |
+
accordance with this Agreement are not affected by the
|
| 426 |
+
termination of this Agreement. Upon written request, you
|
| 427 |
+
will certify in writing that you have complied with your
|
| 428 |
+
commitments under this section. Upon any termination of
|
| 429 |
+
this Agreement all provisions survive except for the
|
| 430 |
+
license grant provisions.
|
| 431 |
+
|
| 432 |
+
|
| 433 |
+
1.7. General
|
| 434 |
+
|
| 435 |
+
If you wish to assign this Agreement or your rights and
|
| 436 |
+
obligations, including by merger, consolidation, dissolution
|
| 437 |
+
or operation of law, contact NVIDIA to ask for permission. Any
|
| 438 |
+
attempted assignment not approved by NVIDIA in writing shall
|
| 439 |
+
be void and of no effect. NVIDIA may assign, delegate or
|
| 440 |
+
transfer this Agreement and its rights and obligations, and if
|
| 441 |
+
to a non-affiliate you will be notified.
|
| 442 |
+
|
| 443 |
+
You agree to cooperate with NVIDIA and provide reasonably
|
| 444 |
+
requested information to verify your compliance with this
|
| 445 |
+
Agreement.
|
| 446 |
+
|
| 447 |
+
This Agreement will be governed in all respects by the laws of
|
| 448 |
+
the United States and of the State of Delaware as those laws
|
| 449 |
+
are applied to contracts entered into and performed entirely
|
| 450 |
+
within Delaware by Delaware residents, without regard to the
|
| 451 |
+
conflicts of laws principles. The United Nations Convention on
|
| 452 |
+
Contracts for the International Sale of Goods is specifically
|
| 453 |
+
disclaimed. You agree to all terms of this Agreement in the
|
| 454 |
+
English language.
|
| 455 |
+
|
| 456 |
+
The state or federal courts residing in Santa Clara County,
|
| 457 |
+
California shall have exclusive jurisdiction over any dispute
|
| 458 |
+
or claim arising out of this Agreement. Notwithstanding this,
|
| 459 |
+
you agree that NVIDIA shall still be allowed to apply for
|
| 460 |
+
injunctive remedies or an equivalent type of urgent legal
|
| 461 |
+
relief in any jurisdiction.
|
| 462 |
+
|
| 463 |
+
If any court of competent jurisdiction determines that any
|
| 464 |
+
provision of this Agreement is illegal, invalid or
|
| 465 |
+
unenforceable, such provision will be construed as limited to
|
| 466 |
+
the extent necessary to be consistent with and fully
|
| 467 |
+
enforceable under the law and the remaining provisions will
|
| 468 |
+
remain in full force and effect. Unless otherwise specified,
|
| 469 |
+
remedies are cumulative.
|
| 470 |
+
|
| 471 |
+
Each party acknowledges and agrees that the other is an
|
| 472 |
+
independent contractor in the performance of this Agreement.
|
| 473 |
+
|
| 474 |
+
The SDK has been developed entirely at private expense and is
|
| 475 |
+
“commercial items” consisting of “commercial computer
|
| 476 |
+
software” and “commercial computer software
|
| 477 |
+
documentation” provided with RESTRICTED RIGHTS. Use,
|
| 478 |
+
duplication or disclosure by the U.S. Government or a U.S.
|
| 479 |
+
Government subcontractor is subject to the restrictions in
|
| 480 |
+
this Agreement pursuant to DFARS 227.7202-3(a) or as set forth
|
| 481 |
+
in subparagraphs (c)(1) and (2) of the Commercial Computer
|
| 482 |
+
Software - Restricted Rights clause at FAR 52.227-19, as
|
| 483 |
+
applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas
|
| 484 |
+
Expressway, Santa Clara, CA 95051.
|
| 485 |
+
|
| 486 |
+
The SDK is subject to United States export laws and
|
| 487 |
+
regulations. You agree that you will not ship, transfer or
|
| 488 |
+
export the SDK into any country, or use the SDK in any manner,
|
| 489 |
+
prohibited by the United States Bureau of Industry and
|
| 490 |
+
Security or economic sanctions regulations administered by the
|
| 491 |
+
U.S. Department of Treasury’s Office of Foreign Assets
|
| 492 |
+
Control (OFAC), or any applicable export laws, restrictions or
|
| 493 |
+
regulations. These laws include restrictions on destinations,
|
| 494 |
+
end users and end use. By accepting this Agreement, you
|
| 495 |
+
confirm that you are not a resident or citizen of any country
|
| 496 |
+
currently embargoed by the U.S. and that you are not otherwise
|
| 497 |
+
prohibited from receiving the SDK.
|
| 498 |
+
|
| 499 |
+
Any notice delivered by NVIDIA to you under this Agreement
|
| 500 |
+
will be delivered via mail, email or fax. You agree that any
|
| 501 |
+
notices that NVIDIA sends you electronically will satisfy any
|
| 502 |
+
legal communication requirements. Please direct your legal
|
| 503 |
+
notices or other correspondence to NVIDIA Corporation, 2788
|
| 504 |
+
San Tomas Expressway, Santa Clara, California 95051, United
|
| 505 |
+
States of America, Attention: Legal Department.
|
| 506 |
+
|
| 507 |
+
This Agreement and any exhibits incorporated into this
|
| 508 |
+
Agreement constitute the entire agreement of the parties with
|
| 509 |
+
respect to the subject matter of this Agreement and supersede
|
| 510 |
+
all prior negotiations or documentation exchanged between the
|
| 511 |
+
parties relating to this SDK license. Any additional and/or
|
| 512 |
+
conflicting terms on documents issued by you are null, void,
|
| 513 |
+
and invalid. Any amendment or waiver under this Agreement
|
| 514 |
+
shall be in writing and signed by representatives of both
|
| 515 |
+
parties.
|
| 516 |
+
|
| 517 |
+
|
| 518 |
+
2. CUDA Toolkit Supplement to Software License Agreement for
|
| 519 |
+
NVIDIA Software Development Kits
|
| 520 |
+
------------------------------------------------------------
|
| 521 |
+
|
| 522 |
+
|
| 523 |
+
Release date: August 16, 2018
|
| 524 |
+
-----------------------------
|
| 525 |
+
|
| 526 |
+
The terms in this supplement govern your use of the NVIDIA
|
| 527 |
+
CUDA Toolkit SDK under the terms of your license agreement
|
| 528 |
+
(“Agreement”) as modified by this supplement. Capitalized
|
| 529 |
+
terms used but not defined below have the meaning assigned to
|
| 530 |
+
them in the Agreement.
|
| 531 |
+
|
| 532 |
+
This supplement is an exhibit to the Agreement and is
|
| 533 |
+
incorporated as an integral part of the Agreement. In the
|
| 534 |
+
event of conflict between the terms in this supplement and the
|
| 535 |
+
terms in the Agreement, the terms in this supplement govern.
|
| 536 |
+
|
| 537 |
+
|
| 538 |
+
2.1. License Scope
|
| 539 |
+
|
| 540 |
+
The SDK is licensed for you to develop applications only for
|
| 541 |
+
use in systems with NVIDIA GPUs.
|
| 542 |
+
|
| 543 |
+
|
| 544 |
+
2.2. Distribution
|
| 545 |
+
|
| 546 |
+
The portions of the SDK that are distributable under the
|
| 547 |
+
Agreement are listed in Attachment A.
|
| 548 |
+
|
| 549 |
+
|
| 550 |
+
2.3. Operating Systems
|
| 551 |
+
|
| 552 |
+
Those portions of the SDK designed exclusively for use on the
|
| 553 |
+
Linux or FreeBSD operating systems, or other operating systems
|
| 554 |
+
derived from the source code to these operating systems, may
|
| 555 |
+
be copied and redistributed for use in accordance with this
|
| 556 |
+
Agreement, provided that the object code files are not
|
| 557 |
+
modified in any way (except for unzipping of compressed
|
| 558 |
+
files).
|
| 559 |
+
|
| 560 |
+
|
| 561 |
+
2.4. Audio and Video Encoders and Decoders
|
| 562 |
+
|
| 563 |
+
You acknowledge and agree that it is your sole responsibility
|
| 564 |
+
to obtain any additional third-party licenses required to
|
| 565 |
+
make, have made, use, have used, sell, import, and offer for
|
| 566 |
+
sale your products or services that include or incorporate any
|
| 567 |
+
third-party software and content relating to audio and/or
|
| 568 |
+
video encoders and decoders from, including but not limited
|
| 569 |
+
to, Microsoft, Thomson, Fraunhofer IIS, Sisvel S.p.A.,
|
| 570 |
+
MPEG-LA, and Coding Technologies. NVIDIA does not grant to you
|
| 571 |
+
under this Agreement any necessary patent or other rights with
|
| 572 |
+
respect to any audio and/or video encoders and decoders.
|
| 573 |
+
|
| 574 |
+
|
| 575 |
+
2.5. Licensing
|
| 576 |
+
|
| 577 |
+
If the distribution terms in this Agreement are not suitable
|
| 578 |
+
for your organization, or for any questions regarding this
|
| 579 |
+
Agreement, please contact NVIDIA at
|
| 580 |
+
nvidia-compute-license-questions@nvidia.com.
|
| 581 |
+
|
| 582 |
+
|
| 583 |
+
2.6. Attachment A
|
| 584 |
+
|
| 585 |
+
The following portions of the SDK are distributable under the
|
| 586 |
+
Agreement:
|
| 587 |
+
|
| 588 |
+
Component
|
| 589 |
+
|
| 590 |
+
CUDA Runtime
|
| 591 |
+
|
| 592 |
+
Windows
|
| 593 |
+
|
| 594 |
+
cudart.dll, cudart_static.lib, cudadevrt.lib
|
| 595 |
+
|
| 596 |
+
Mac OSX
|
| 597 |
+
|
| 598 |
+
libcudart.dylib, libcudart_static.a, libcudadevrt.a
|
| 599 |
+
|
| 600 |
+
Linux
|
| 601 |
+
|
| 602 |
+
libcudart.so, libcudart_static.a, libcudadevrt.a
|
| 603 |
+
|
| 604 |
+
Android
|
| 605 |
+
|
| 606 |
+
libcudart.so, libcudart_static.a, libcudadevrt.a
|
| 607 |
+
|
| 608 |
+
Component
|
| 609 |
+
|
| 610 |
+
CUDA FFT Library
|
| 611 |
+
|
| 612 |
+
Windows
|
| 613 |
+
|
| 614 |
+
cufft.dll, cufftw.dll, cufft.lib, cufftw.lib
|
| 615 |
+
|
| 616 |
+
Mac OSX
|
| 617 |
+
|
| 618 |
+
libcufft.dylib, libcufft_static.a, libcufftw.dylib,
|
| 619 |
+
libcufftw_static.a
|
| 620 |
+
|
| 621 |
+
Linux
|
| 622 |
+
|
| 623 |
+
libcufft.so, libcufft_static.a, libcufftw.so,
|
| 624 |
+
libcufftw_static.a
|
| 625 |
+
|
| 626 |
+
Android
|
| 627 |
+
|
| 628 |
+
libcufft.so, libcufft_static.a, libcufftw.so,
|
| 629 |
+
libcufftw_static.a
|
| 630 |
+
|
| 631 |
+
Component
|
| 632 |
+
|
| 633 |
+
CUDA BLAS Library
|
| 634 |
+
|
| 635 |
+
Windows
|
| 636 |
+
|
| 637 |
+
cublas.dll, cublasLt.dll
|
| 638 |
+
|
| 639 |
+
Mac OSX
|
| 640 |
+
|
| 641 |
+
libcublas.dylib, libcublasLt.dylib, libcublas_static.a,
|
| 642 |
+
libcublasLt_static.a
|
| 643 |
+
|
| 644 |
+
Linux
|
| 645 |
+
|
| 646 |
+
libcublas.so, libcublasLt.so, libcublas_static.a,
|
| 647 |
+
libcublasLt_static.a
|
| 648 |
+
|
| 649 |
+
Android
|
| 650 |
+
|
| 651 |
+
libcublas.so, libcublasLt.so, libcublas_static.a,
|
| 652 |
+
libcublasLt_static.a
|
| 653 |
+
|
| 654 |
+
Component
|
| 655 |
+
|
| 656 |
+
NVIDIA "Drop-in" BLAS Library
|
| 657 |
+
|
| 658 |
+
Windows
|
| 659 |
+
|
| 660 |
+
nvblas.dll
|
| 661 |
+
|
| 662 |
+
Mac OSX
|
| 663 |
+
|
| 664 |
+
libnvblas.dylib
|
| 665 |
+
|
| 666 |
+
Linux
|
| 667 |
+
|
| 668 |
+
libnvblas.so
|
| 669 |
+
|
| 670 |
+
Component
|
| 671 |
+
|
| 672 |
+
CUDA Sparse Matrix Library
|
| 673 |
+
|
| 674 |
+
Windows
|
| 675 |
+
|
| 676 |
+
cusparse.dll, cusparse.lib
|
| 677 |
+
|
| 678 |
+
Mac OSX
|
| 679 |
+
|
| 680 |
+
libcusparse.dylib, libcusparse_static.a
|
| 681 |
+
|
| 682 |
+
Linux
|
| 683 |
+
|
| 684 |
+
libcusparse.so, libcusparse_static.a
|
| 685 |
+
|
| 686 |
+
Android
|
| 687 |
+
|
| 688 |
+
libcusparse.so, libcusparse_static.a
|
| 689 |
+
|
| 690 |
+
Component
|
| 691 |
+
|
| 692 |
+
CUDA Linear Solver Library
|
| 693 |
+
|
| 694 |
+
Windows
|
| 695 |
+
|
| 696 |
+
cusolver.dll, cusolver.lib
|
| 697 |
+
|
| 698 |
+
Mac OSX
|
| 699 |
+
|
| 700 |
+
libcusolver.dylib, libcusolver_static.a
|
| 701 |
+
|
| 702 |
+
Linux
|
| 703 |
+
|
| 704 |
+
libcusolver.so, libcusolver_static.a
|
| 705 |
+
|
| 706 |
+
Android
|
| 707 |
+
|
| 708 |
+
libcusolver.so, libcusolver_static.a
|
| 709 |
+
|
| 710 |
+
Component
|
| 711 |
+
|
| 712 |
+
CUDA Random Number Generation Library
|
| 713 |
+
|
| 714 |
+
Windows
|
| 715 |
+
|
| 716 |
+
curand.dll, curand.lib
|
| 717 |
+
|
| 718 |
+
Mac OSX
|
| 719 |
+
|
| 720 |
+
libcurand.dylib, libcurand_static.a
|
| 721 |
+
|
| 722 |
+
Linux
|
| 723 |
+
|
| 724 |
+
libcurand.so, libcurand_static.a
|
| 725 |
+
|
| 726 |
+
Android
|
| 727 |
+
|
| 728 |
+
libcurand.so, libcurand_static.a
|
| 729 |
+
|
| 730 |
+
Component
|
| 731 |
+
|
| 732 |
+
CUDA Accelerated Graph Library
|
| 733 |
+
|
| 734 |
+
Component
|
| 735 |
+
|
| 736 |
+
NVIDIA Performance Primitives Library
|
| 737 |
+
|
| 738 |
+
Windows
|
| 739 |
+
|
| 740 |
+
nppc.dll, nppc.lib, nppial.dll, nppial.lib, nppicc.dll,
|
| 741 |
+
nppicc.lib, nppicom.dll, nppicom.lib, nppidei.dll,
|
| 742 |
+
nppidei.lib, nppif.dll, nppif.lib, nppig.dll, nppig.lib,
|
| 743 |
+
nppim.dll, nppim.lib, nppist.dll, nppist.lib, nppisu.dll,
|
| 744 |
+
nppisu.lib, nppitc.dll, nppitc.lib, npps.dll, npps.lib
|
| 745 |
+
|
| 746 |
+
Mac OSX
|
| 747 |
+
|
| 748 |
+
libnppc.dylib, libnppc_static.a, libnppial.dylib,
|
| 749 |
+
libnppial_static.a, libnppicc.dylib, libnppicc_static.a,
|
| 750 |
+
libnppicom.dylib, libnppicom_static.a, libnppidei.dylib,
|
| 751 |
+
libnppidei_static.a, libnppif.dylib, libnppif_static.a,
|
| 752 |
+
libnppig.dylib, libnppig_static.a, libnppim.dylib,
|
| 753 |
+
libnppisu_static.a, libnppitc.dylib, libnppitc_static.a,
|
| 754 |
+
libnpps.dylib, libnpps_static.a
|
| 755 |
+
|
| 756 |
+
Linux
|
| 757 |
+
|
| 758 |
+
libnppc.so, libnppc_static.a, libnppial.so,
|
| 759 |
+
libnppial_static.a, libnppicc.so, libnppicc_static.a,
|
| 760 |
+
libnppicom.so, libnppicom_static.a, libnppidei.so,
|
| 761 |
+
libnppidei_static.a, libnppif.so, libnppif_static.a
|
| 762 |
+
libnppig.so, libnppig_static.a, libnppim.so,
|
| 763 |
+
libnppim_static.a, libnppist.so, libnppist_static.a,
|
| 764 |
+
libnppisu.so, libnppisu_static.a, libnppitc.so
|
| 765 |
+
libnppitc_static.a, libnpps.so, libnpps_static.a
|
| 766 |
+
|
| 767 |
+
Android
|
| 768 |
+
|
| 769 |
+
libnppc.so, libnppc_static.a, libnppial.so,
|
| 770 |
+
libnppial_static.a, libnppicc.so, libnppicc_static.a,
|
| 771 |
+
libnppicom.so, libnppicom_static.a, libnppidei.so,
|
| 772 |
+
libnppidei_static.a, libnppif.so, libnppif_static.a
|
| 773 |
+
libnppig.so, libnppig_static.a, libnppim.so,
|
| 774 |
+
libnppim_static.a, libnppist.so, libnppist_static.a,
|
| 775 |
+
libnppisu.so, libnppisu_static.a, libnppitc.so
|
| 776 |
+
libnppitc_static.a, libnpps.so, libnpps_static.a
|
| 777 |
+
|
| 778 |
+
Component
|
| 779 |
+
|
| 780 |
+
NVIDIA JPEG Library
|
| 781 |
+
|
| 782 |
+
Linux
|
| 783 |
+
|
| 784 |
+
libnvjpeg.so, libnvjpeg_static.a
|
| 785 |
+
|
| 786 |
+
Component
|
| 787 |
+
|
| 788 |
+
Internal common library required for statically linking to
|
| 789 |
+
cuBLAS, cuSPARSE, cuFFT, cuRAND, nvJPEG and NPP
|
| 790 |
+
|
| 791 |
+
Mac OSX
|
| 792 |
+
|
| 793 |
+
libculibos.a
|
| 794 |
+
|
| 795 |
+
Linux
|
| 796 |
+
|
| 797 |
+
libculibos.a
|
| 798 |
+
|
| 799 |
+
Component
|
| 800 |
+
|
| 801 |
+
NVIDIA Runtime Compilation Library and Header
|
| 802 |
+
|
| 803 |
+
All
|
| 804 |
+
|
| 805 |
+
nvrtc.h
|
| 806 |
+
|
| 807 |
+
Windows
|
| 808 |
+
|
| 809 |
+
nvrtc.dll, nvrtc-builtins.dll
|
| 810 |
+
|
| 811 |
+
Mac OSX
|
| 812 |
+
|
| 813 |
+
libnvrtc.dylib, libnvrtc-builtins.dylib
|
| 814 |
+
|
| 815 |
+
Linux
|
| 816 |
+
|
| 817 |
+
libnvrtc.so, libnvrtc-builtins.so
|
| 818 |
+
|
| 819 |
+
Component
|
| 820 |
+
|
| 821 |
+
NVIDIA Optimizing Compiler Library
|
| 822 |
+
|
| 823 |
+
Windows
|
| 824 |
+
|
| 825 |
+
nvvm.dll
|
| 826 |
+
|
| 827 |
+
Mac OSX
|
| 828 |
+
|
| 829 |
+
libnvvm.dylib
|
| 830 |
+
|
| 831 |
+
Linux
|
| 832 |
+
|
| 833 |
+
libnvvm.so
|
| 834 |
+
|
| 835 |
+
Component
|
| 836 |
+
|
| 837 |
+
NVIDIA Common Device Math Functions Library
|
| 838 |
+
|
| 839 |
+
Windows
|
| 840 |
+
|
| 841 |
+
libdevice.10.bc
|
| 842 |
+
|
| 843 |
+
Mac OSX
|
| 844 |
+
|
| 845 |
+
libdevice.10.bc
|
| 846 |
+
|
| 847 |
+
Linux
|
| 848 |
+
|
| 849 |
+
libdevice.10.bc
|
| 850 |
+
|
| 851 |
+
Component
|
| 852 |
+
|
| 853 |
+
CUDA Occupancy Calculation Header Library
|
| 854 |
+
|
| 855 |
+
All
|
| 856 |
+
|
| 857 |
+
cuda_occupancy.h
|
| 858 |
+
|
| 859 |
+
Component
|
| 860 |
+
|
| 861 |
+
CUDA Half Precision Headers
|
| 862 |
+
|
| 863 |
+
All
|
| 864 |
+
|
| 865 |
+
cuda_fp16.h, cuda_fp16.hpp
|
| 866 |
+
|
| 867 |
+
Component
|
| 868 |
+
|
| 869 |
+
CUDA Profiling Tools Interface (CUPTI) Library
|
| 870 |
+
|
| 871 |
+
Windows
|
| 872 |
+
|
| 873 |
+
cupti.dll
|
| 874 |
+
|
| 875 |
+
Mac OSX
|
| 876 |
+
|
| 877 |
+
libcupti.dylib
|
| 878 |
+
|
| 879 |
+
Linux
|
| 880 |
+
|
| 881 |
+
libcupti.so
|
| 882 |
+
|
| 883 |
+
Component
|
| 884 |
+
|
| 885 |
+
NVIDIA Tools Extension Library
|
| 886 |
+
|
| 887 |
+
Windows
|
| 888 |
+
|
| 889 |
+
nvToolsExt.dll, nvToolsExt.lib
|
| 890 |
+
|
| 891 |
+
Mac OSX
|
| 892 |
+
|
| 893 |
+
libnvToolsExt.dylib
|
| 894 |
+
|
| 895 |
+
Linux
|
| 896 |
+
|
| 897 |
+
libnvToolsExt.so
|
| 898 |
+
|
| 899 |
+
Component
|
| 900 |
+
|
| 901 |
+
NVIDIA CUDA Driver Libraries
|
| 902 |
+
|
| 903 |
+
Linux
|
| 904 |
+
|
| 905 |
+
libcuda.so, libnvidia-fatbinaryloader.so,
|
| 906 |
+
libnvidia-ptxjitcompiler.so
|
| 907 |
+
|
| 908 |
+
The NVIDIA CUDA Driver Libraries are only distributable in
|
| 909 |
+
applications that meet this criteria:
|
| 910 |
+
|
| 911 |
+
1. The application was developed starting from a NVIDIA CUDA
|
| 912 |
+
container obtained from Docker Hub or the NVIDIA GPU
|
| 913 |
+
Cloud, and
|
| 914 |
+
|
| 915 |
+
2. The resulting application is packaged as a Docker
|
| 916 |
+
container and distributed to users on Docker Hub or the
|
| 917 |
+
NVIDIA GPU Cloud only.
|
| 918 |
+
|
| 919 |
+
|
| 920 |
+
2.7. Attachment B
|
| 921 |
+
|
| 922 |
+
|
| 923 |
+
Additional Licensing Obligations
|
| 924 |
+
|
| 925 |
+
The following third party components included in the SOFTWARE
|
| 926 |
+
are licensed to Licensee pursuant to the following terms and
|
| 927 |
+
conditions:
|
| 928 |
+
|
| 929 |
+
1. Licensee's use of the GDB third party component is
|
| 930 |
+
subject to the terms and conditions of GNU GPL v3:
|
| 931 |
+
|
| 932 |
+
This product includes copyrighted third-party software licensed
|
| 933 |
+
under the terms of the GNU General Public License v3 ("GPL v3").
|
| 934 |
+
All third-party software packages are copyright by their respective
|
| 935 |
+
authors. GPL v3 terms and conditions are hereby incorporated into
|
| 936 |
+
the Agreement by this reference: http://www.gnu.org/licenses/gpl.txt
|
| 937 |
+
|
| 938 |
+
Consistent with these licensing requirements, the software
|
| 939 |
+
listed below is provided under the terms of the specified
|
| 940 |
+
open source software licenses. To obtain source code for
|
| 941 |
+
software provided under licenses that require
|
| 942 |
+
redistribution of source code, including the GNU General
|
| 943 |
+
Public License (GPL) and GNU Lesser General Public License
|
| 944 |
+
(LGPL), contact oss-requests@nvidia.com. This offer is
|
| 945 |
+
valid for a period of three (3) years from the date of the
|
| 946 |
+
distribution of this product by NVIDIA CORPORATION.
|
| 947 |
+
|
| 948 |
+
Component License
|
| 949 |
+
CUDA-GDB GPL v3
|
| 950 |
+
|
| 951 |
+
2. Licensee represents and warrants that any and all third
|
| 952 |
+
party licensing and/or royalty payment obligations in
|
| 953 |
+
connection with Licensee's use of the H.264 video codecs
|
| 954 |
+
are solely the responsibility of Licensee.
|
| 955 |
+
|
| 956 |
+
3. Licensee's use of the Thrust library is subject to the
|
| 957 |
+
terms and conditions of the Apache License Version 2.0.
|
| 958 |
+
All third-party software packages are copyright by their
|
| 959 |
+
respective authors. Apache License Version 2.0 terms and
|
| 960 |
+
conditions are hereby incorporated into the Agreement by
|
| 961 |
+
this reference.
|
| 962 |
+
http://www.apache.org/licenses/LICENSE-2.0.html
|
| 963 |
+
|
| 964 |
+
In addition, Licensee acknowledges the following notice:
|
| 965 |
+
Thrust includes source code from the Boost Iterator,
|
| 966 |
+
Tuple, System, and Random Number libraries.
|
| 967 |
+
|
| 968 |
+
Boost Software License - Version 1.0 - August 17th, 2003
|
| 969 |
+
. . . .
|
| 970 |
+
|
| 971 |
+
Permission is hereby granted, free of charge, to any person or
|
| 972 |
+
organization obtaining a copy of the software and accompanying
|
| 973 |
+
documentation covered by this license (the "Software") to use,
|
| 974 |
+
reproduce, display, distribute, execute, and transmit the Software,
|
| 975 |
+
and to prepare derivative works of the Software, and to permit
|
| 976 |
+
third-parties to whom the Software is furnished to do so, all
|
| 977 |
+
subject to the following:
|
| 978 |
+
|
| 979 |
+
The copyright notices in the Software and this entire statement,
|
| 980 |
+
including the above license grant, this restriction and the following
|
| 981 |
+
disclaimer, must be included in all copies of the Software, in whole
|
| 982 |
+
or in part, and all derivative works of the Software, unless such
|
| 983 |
+
copies or derivative works are solely in the form of machine-executable
|
| 984 |
+
object code generated by a source language processor.
|
| 985 |
+
|
| 986 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
| 987 |
+
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
| 988 |
+
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
|
| 989 |
+
NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR
|
| 990 |
+
ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR
|
| 991 |
+
OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING
|
| 992 |
+
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
| 993 |
+
OTHER DEALINGS IN THE SOFTWARE.
|
| 994 |
+
|
| 995 |
+
4. Licensee's use of the LLVM third party component is
|
| 996 |
+
subject to the following terms and conditions:
|
| 997 |
+
|
| 998 |
+
======================================================
|
| 999 |
+
LLVM Release License
|
| 1000 |
+
======================================================
|
| 1001 |
+
University of Illinois/NCSA
|
| 1002 |
+
Open Source License
|
| 1003 |
+
|
| 1004 |
+
Copyright (c) 2003-2010 University of Illinois at Urbana-Champaign.
|
| 1005 |
+
All rights reserved.
|
| 1006 |
+
|
| 1007 |
+
Developed by:
|
| 1008 |
+
|
| 1009 |
+
LLVM Team
|
| 1010 |
+
|
| 1011 |
+
University of Illinois at Urbana-Champaign
|
| 1012 |
+
|
| 1013 |
+
http://llvm.org
|
| 1014 |
+
|
| 1015 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 1016 |
+
of this software and associated documentation files (the "Software"), to
|
| 1017 |
+
deal with the Software without restriction, including without limitation the
|
| 1018 |
+
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
| 1019 |
+
sell copies of the Software, and to permit persons to whom the Software is
|
| 1020 |
+
furnished to do so, subject to the following conditions:
|
| 1021 |
+
|
| 1022 |
+
* Redistributions of source code must retain the above copyright notice,
|
| 1023 |
+
this list of conditions and the following disclaimers.
|
| 1024 |
+
|
| 1025 |
+
* Redistributions in binary form must reproduce the above copyright
|
| 1026 |
+
notice, this list of conditions and the following disclaimers in the
|
| 1027 |
+
documentation and/or other materials provided with the distribution.
|
| 1028 |
+
|
| 1029 |
+
* Neither the names of the LLVM Team, University of Illinois at Urbana-
|
| 1030 |
+
Champaign, nor the names of its contributors may be used to endorse or
|
| 1031 |
+
promote products derived from this Software without specific prior
|
| 1032 |
+
written permission.
|
| 1033 |
+
|
| 1034 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 1035 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 1036 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
| 1037 |
+
THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
| 1038 |
+
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
| 1039 |
+
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
| 1040 |
+
DEALINGS WITH THE SOFTWARE.
|
| 1041 |
+
|
| 1042 |
+
5. Licensee's use (e.g. nvprof) of the PCRE third party
|
| 1043 |
+
component is subject to the following terms and
|
| 1044 |
+
conditions:
|
| 1045 |
+
|
| 1046 |
+
------------
|
| 1047 |
+
PCRE LICENCE
|
| 1048 |
+
------------
|
| 1049 |
+
PCRE is a library of functions to support regular expressions whose syntax
|
| 1050 |
+
and semantics are as close as possible to those of the Perl 5 language.
|
| 1051 |
+
Release 8 of PCRE is distributed under the terms of the "BSD" licence, as
|
| 1052 |
+
specified below. The documentation for PCRE, supplied in the "doc"
|
| 1053 |
+
directory, is distributed under the same terms as the software itself. The
|
| 1054 |
+
basic library functions are written in C and are freestanding. Also
|
| 1055 |
+
included in the distribution is a set of C++ wrapper functions, and a just-
|
| 1056 |
+
in-time compiler that can be used to optimize pattern matching. These are
|
| 1057 |
+
both optional features that can be omitted when the library is built.
|
| 1058 |
+
|
| 1059 |
+
THE BASIC LIBRARY FUNCTIONS
|
| 1060 |
+
---------------------------
|
| 1061 |
+
Written by: Philip Hazel
|
| 1062 |
+
Email local part: ph10
|
| 1063 |
+
Email domain: cam.ac.uk
|
| 1064 |
+
University of Cambridge Computing Service,
|
| 1065 |
+
Cambridge, England.
|
| 1066 |
+
Copyright (c) 1997-2012 University of Cambridge
|
| 1067 |
+
All rights reserved.
|
| 1068 |
+
|
| 1069 |
+
PCRE JUST-IN-TIME COMPILATION SUPPORT
|
| 1070 |
+
-------------------------------------
|
| 1071 |
+
Written by: Zoltan Herczeg
|
| 1072 |
+
Email local part: hzmester
|
| 1073 |
+
Emain domain: freemail.hu
|
| 1074 |
+
Copyright(c) 2010-2012 Zoltan Herczeg
|
| 1075 |
+
All rights reserved.
|
| 1076 |
+
|
| 1077 |
+
STACK-LESS JUST-IN-TIME COMPILER
|
| 1078 |
+
--------------------------------
|
| 1079 |
+
Written by: Zoltan Herczeg
|
| 1080 |
+
Email local part: hzmester
|
| 1081 |
+
Emain domain: freemail.hu
|
| 1082 |
+
Copyright(c) 2009-2012 Zoltan Herczeg
|
| 1083 |
+
All rights reserved.
|
| 1084 |
+
|
| 1085 |
+
THE C++ WRAPPER FUNCTIONS
|
| 1086 |
+
-------------------------
|
| 1087 |
+
Contributed by: Google Inc.
|
| 1088 |
+
Copyright (c) 2007-2012, Google Inc.
|
| 1089 |
+
All rights reserved.
|
| 1090 |
+
|
| 1091 |
+
THE "BSD" LICENCE
|
| 1092 |
+
-----------------
|
| 1093 |
+
Redistribution and use in source and binary forms, with or without
|
| 1094 |
+
modification, are permitted provided that the following conditions are met:
|
| 1095 |
+
|
| 1096 |
+
* Redistributions of source code must retain the above copyright notice,
|
| 1097 |
+
this list of conditions and the following disclaimer.
|
| 1098 |
+
|
| 1099 |
+
* Redistributions in binary form must reproduce the above copyright
|
| 1100 |
+
notice, this list of conditions and the following disclaimer in the
|
| 1101 |
+
documentation and/or other materials provided with the distribution.
|
| 1102 |
+
|
| 1103 |
+
* Neither the name of the University of Cambridge nor the name of Google
|
| 1104 |
+
Inc. nor the names of their contributors may be used to endorse or
|
| 1105 |
+
promote products derived from this software without specific prior
|
| 1106 |
+
written permission.
|
| 1107 |
+
|
| 1108 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
| 1109 |
+
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
| 1110 |
+
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
| 1111 |
+
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
| 1112 |
+
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
| 1113 |
+
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
| 1114 |
+
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
| 1115 |
+
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
| 1116 |
+
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
| 1117 |
+
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
| 1118 |
+
POSSIBILITY OF SUCH DAMAGE.
|
| 1119 |
+
|
| 1120 |
+
6. Some of the cuBLAS library routines were written by or
|
| 1121 |
+
derived from code written by Vasily Volkov and are subject
|
| 1122 |
+
to the Modified Berkeley Software Distribution License as
|
| 1123 |
+
follows:
|
| 1124 |
+
|
| 1125 |
+
Copyright (c) 2007-2009, Regents of the University of California
|
| 1126 |
+
|
| 1127 |
+
All rights reserved.
|
| 1128 |
+
|
| 1129 |
+
Redistribution and use in source and binary forms, with or without
|
| 1130 |
+
modification, are permitted provided that the following conditions are
|
| 1131 |
+
met:
|
| 1132 |
+
* Redistributions of source code must retain the above copyright
|
| 1133 |
+
notice, this list of conditions and the following disclaimer.
|
| 1134 |
+
* Redistributions in binary form must reproduce the above
|
| 1135 |
+
copyright notice, this list of conditions and the following
|
| 1136 |
+
disclaimer in the documentation and/or other materials provided
|
| 1137 |
+
with the distribution.
|
| 1138 |
+
* Neither the name of the University of California, Berkeley nor
|
| 1139 |
+
the names of its contributors may be used to endorse or promote
|
| 1140 |
+
products derived from this software without specific prior
|
| 1141 |
+
written permission.
|
| 1142 |
+
|
| 1143 |
+
THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR
|
| 1144 |
+
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
| 1145 |
+
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
| 1146 |
+
DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
|
| 1147 |
+
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
| 1148 |
+
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
| 1149 |
+
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
| 1150 |
+
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
|
| 1151 |
+
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
|
| 1152 |
+
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
| 1153 |
+
POSSIBILITY OF SUCH DAMAGE.
|
| 1154 |
+
|
| 1155 |
+
7. Some of the cuBLAS library routines were written by or
|
| 1156 |
+
derived from code written by Davide Barbieri and are
|
| 1157 |
+
subject to the Modified Berkeley Software Distribution
|
| 1158 |
+
License as follows:
|
| 1159 |
+
|
| 1160 |
+
Copyright (c) 2008-2009 Davide Barbieri @ University of Rome Tor Vergata.
|
| 1161 |
+
|
| 1162 |
+
All rights reserved.
|
| 1163 |
+
|
| 1164 |
+
Redistribution and use in source and binary forms, with or without
|
| 1165 |
+
modification, are permitted provided that the following conditions are
|
| 1166 |
+
met:
|
| 1167 |
+
* Redistributions of source code must retain the above copyright
|
| 1168 |
+
notice, this list of conditions and the following disclaimer.
|
| 1169 |
+
* Redistributions in binary form must reproduce the above
|
| 1170 |
+
copyright notice, this list of conditions and the following
|
| 1171 |
+
disclaimer in the documentation and/or other materials provided
|
| 1172 |
+
with the distribution.
|
| 1173 |
+
* The name of the author may not be used to endorse or promote
|
| 1174 |
+
products derived from this software without specific prior
|
| 1175 |
+
written permission.
|
| 1176 |
+
|
| 1177 |
+
THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR
|
| 1178 |
+
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
| 1179 |
+
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
| 1180 |
+
DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
|
| 1181 |
+
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
| 1182 |
+
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
| 1183 |
+
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
| 1184 |
+
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
|
| 1185 |
+
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
|
| 1186 |
+
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
| 1187 |
+
POSSIBILITY OF SUCH DAMAGE.
|
| 1188 |
+
|
| 1189 |
+
8. Some of the cuBLAS library routines were derived from
|
| 1190 |
+
code developed by the University of Tennessee and are
|
| 1191 |
+
subject to the Modified Berkeley Software Distribution
|
| 1192 |
+
License as follows:
|
| 1193 |
+
|
| 1194 |
+
Copyright (c) 2010 The University of Tennessee.
|
| 1195 |
+
|
| 1196 |
+
All rights reserved.
|
| 1197 |
+
|
| 1198 |
+
Redistribution and use in source and binary forms, with or without
|
| 1199 |
+
modification, are permitted provided that the following conditions are
|
| 1200 |
+
met:
|
| 1201 |
+
* Redistributions of source code must retain the above copyright
|
| 1202 |
+
notice, this list of conditions and the following disclaimer.
|
| 1203 |
+
* Redistributions in binary form must reproduce the above
|
| 1204 |
+
copyright notice, this list of conditions and the following
|
| 1205 |
+
disclaimer listed in this license in the documentation and/or
|
| 1206 |
+
other materials provided with the distribution.
|
| 1207 |
+
* Neither the name of the copyright holders nor the names of its
|
| 1208 |
+
contributors may be used to endorse or promote products derived
|
| 1209 |
+
from this software without specific prior written permission.
|
| 1210 |
+
|
| 1211 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
| 1212 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
| 1213 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
| 1214 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
| 1215 |
+
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
| 1216 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
| 1217 |
+
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
| 1218 |
+
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
| 1219 |
+
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
| 1220 |
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 1221 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 1222 |
+
|
| 1223 |
+
9. Some of the cuBLAS library routines were written by or
|
| 1224 |
+
derived from code written by Jonathan Hogg and are subject
|
| 1225 |
+
to the Modified Berkeley Software Distribution License as
|
| 1226 |
+
follows:
|
| 1227 |
+
|
| 1228 |
+
Copyright (c) 2012, The Science and Technology Facilities Council (STFC).
|
| 1229 |
+
|
| 1230 |
+
All rights reserved.
|
| 1231 |
+
|
| 1232 |
+
Redistribution and use in source and binary forms, with or without
|
| 1233 |
+
modification, are permitted provided that the following conditions are
|
| 1234 |
+
met:
|
| 1235 |
+
* Redistributions of source code must retain the above copyright
|
| 1236 |
+
notice, this list of conditions and the following disclaimer.
|
| 1237 |
+
* Redistributions in binary form must reproduce the above
|
| 1238 |
+
copyright notice, this list of conditions and the following
|
| 1239 |
+
disclaimer in the documentation and/or other materials provided
|
| 1240 |
+
with the distribution.
|
| 1241 |
+
* Neither the name of the STFC nor the names of its contributors
|
| 1242 |
+
may be used to endorse or promote products derived from this
|
| 1243 |
+
software without specific prior written permission.
|
| 1244 |
+
|
| 1245 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
| 1246 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
| 1247 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
| 1248 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE STFC BE
|
| 1249 |
+
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
| 1250 |
+
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
| 1251 |
+
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
|
| 1252 |
+
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
| 1253 |
+
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
| 1254 |
+
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
|
| 1255 |
+
IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 1256 |
+
|
| 1257 |
+
10. Some of the cuBLAS library routines were written by or
|
| 1258 |
+
derived from code written by Ahmad M. Abdelfattah, David
|
| 1259 |
+
Keyes, and Hatem Ltaief, and are subject to the Apache
|
| 1260 |
+
License, Version 2.0, as follows:
|
| 1261 |
+
|
| 1262 |
+
-- (C) Copyright 2013 King Abdullah University of Science and Technology
|
| 1263 |
+
Authors:
|
| 1264 |
+
Ahmad Abdelfattah (ahmad.ahmad@kaust.edu.sa)
|
| 1265 |
+
David Keyes (david.keyes@kaust.edu.sa)
|
| 1266 |
+
Hatem Ltaief (hatem.ltaief@kaust.edu.sa)
|
| 1267 |
+
|
| 1268 |
+
Redistribution and use in source and binary forms, with or without
|
| 1269 |
+
modification, are permitted provided that the following conditions
|
| 1270 |
+
are met:
|
| 1271 |
+
|
| 1272 |
+
* Redistributions of source code must retain the above copyright
|
| 1273 |
+
notice, this list of conditions and the following disclaimer.
|
| 1274 |
+
* Redistributions in binary form must reproduce the above copyright
|
| 1275 |
+
notice, this list of conditions and the following disclaimer in the
|
| 1276 |
+
documentation and/or other materials provided with the distribution.
|
| 1277 |
+
* Neither the name of the King Abdullah University of Science and
|
| 1278 |
+
Technology nor the names of its contributors may be used to endorse
|
| 1279 |
+
or promote products derived from this software without specific prior
|
| 1280 |
+
written permission.
|
| 1281 |
+
|
| 1282 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
| 1283 |
+
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
| 1284 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
| 1285 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
| 1286 |
+
HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
| 1287 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
| 1288 |
+
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
| 1289 |
+
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
| 1290 |
+
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
| 1291 |
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 1292 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
|
| 1293 |
+
|
| 1294 |
+
11. Some of the cuSPARSE library routines were written by or
|
| 1295 |
+
derived from code written by Li-Wen Chang and are subject
|
| 1296 |
+
to the NCSA Open Source License as follows:
|
| 1297 |
+
|
| 1298 |
+
Copyright (c) 2012, University of Illinois.
|
| 1299 |
+
|
| 1300 |
+
All rights reserved.
|
| 1301 |
+
|
| 1302 |
+
Developed by: IMPACT Group, University of Illinois, http://impact.crhc.illinois.edu
|
| 1303 |
+
|
| 1304 |
+
Permission is hereby granted, free of charge, to any person obtaining
|
| 1305 |
+
a copy of this software and associated documentation files (the
|
| 1306 |
+
"Software"), to deal with the Software without restriction, including
|
| 1307 |
+
without limitation the rights to use, copy, modify, merge, publish,
|
| 1308 |
+
distribute, sublicense, and/or sell copies of the Software, and to
|
| 1309 |
+
permit persons to whom the Software is furnished to do so, subject to
|
| 1310 |
+
the following conditions:
|
| 1311 |
+
* Redistributions of source code must retain the above copyright
|
| 1312 |
+
notice, this list of conditions and the following disclaimer.
|
| 1313 |
+
* Redistributions in binary form must reproduce the above
|
| 1314 |
+
copyright notice, this list of conditions and the following
|
| 1315 |
+
disclaimers in the documentation and/or other materials provided
|
| 1316 |
+
with the distribution.
|
| 1317 |
+
* Neither the names of IMPACT Group, University of Illinois, nor
|
| 1318 |
+
the names of its contributors may be used to endorse or promote
|
| 1319 |
+
products derived from this Software without specific prior
|
| 1320 |
+
written permission.
|
| 1321 |
+
|
| 1322 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
| 1323 |
+
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
| 1324 |
+
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
| 1325 |
+
NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT
|
| 1326 |
+
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
| 1327 |
+
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
| 1328 |
+
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
|
| 1329 |
+
SOFTWARE.
|
| 1330 |
+
|
| 1331 |
+
12. Some of the cuRAND library routines were written by or
|
| 1332 |
+
derived from code written by Mutsuo Saito and Makoto
|
| 1333 |
+
Matsumoto and are subject to the following license:
|
| 1334 |
+
|
| 1335 |
+
Copyright (c) 2009, 2010 Mutsuo Saito, Makoto Matsumoto and Hiroshima
|
| 1336 |
+
University. All rights reserved.
|
| 1337 |
+
|
| 1338 |
+
Copyright (c) 2011 Mutsuo Saito, Makoto Matsumoto, Hiroshima
|
| 1339 |
+
University and University of Tokyo. All rights reserved.
|
| 1340 |
+
|
| 1341 |
+
Redistribution and use in source and binary forms, with or without
|
| 1342 |
+
modification, are permitted provided that the following conditions are
|
| 1343 |
+
met:
|
| 1344 |
+
* Redistributions of source code must retain the above copyright
|
| 1345 |
+
notice, this list of conditions and the following disclaimer.
|
| 1346 |
+
* Redistributions in binary form must reproduce the above
|
| 1347 |
+
copyright notice, this list of conditions and the following
|
| 1348 |
+
disclaimer in the documentation and/or other materials provided
|
| 1349 |
+
with the distribution.
|
| 1350 |
+
* Neither the name of the Hiroshima University nor the names of
|
| 1351 |
+
its contributors may be used to endorse or promote products
|
| 1352 |
+
derived from this software without specific prior written
|
| 1353 |
+
permission.
|
| 1354 |
+
|
| 1355 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
| 1356 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
| 1357 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
| 1358 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
| 1359 |
+
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
| 1360 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
| 1361 |
+
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
| 1362 |
+
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
| 1363 |
+
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
| 1364 |
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 1365 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 1366 |
+
|
| 1367 |
+
13. Some of the cuRAND library routines were derived from
|
| 1368 |
+
code developed by D. E. Shaw Research and are subject to
|
| 1369 |
+
the following license:
|
| 1370 |
+
|
| 1371 |
+
Copyright 2010-2011, D. E. Shaw Research.
|
| 1372 |
+
|
| 1373 |
+
All rights reserved.
|
| 1374 |
+
|
| 1375 |
+
Redistribution and use in source and binary forms, with or without
|
| 1376 |
+
modification, are permitted provided that the following conditions are
|
| 1377 |
+
met:
|
| 1378 |
+
* Redistributions of source code must retain the above copyright
|
| 1379 |
+
notice, this list of conditions, and the following disclaimer.
|
| 1380 |
+
* Redistributions in binary form must reproduce the above
|
| 1381 |
+
copyright notice, this list of conditions, and the following
|
| 1382 |
+
disclaimer in the documentation and/or other materials provided
|
| 1383 |
+
with the distribution.
|
| 1384 |
+
* Neither the name of D. E. Shaw Research nor the names of its
|
| 1385 |
+
contributors may be used to endorse or promote products derived
|
| 1386 |
+
from this software without specific prior written permission.
|
| 1387 |
+
|
| 1388 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
| 1389 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
| 1390 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
| 1391 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
| 1392 |
+
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
| 1393 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
| 1394 |
+
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
| 1395 |
+
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
| 1396 |
+
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
| 1397 |
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 1398 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 1399 |
+
|
| 1400 |
+
14. Some of the Math library routines were written by or
|
| 1401 |
+
derived from code developed by Norbert Juffa and are
|
| 1402 |
+
subject to the following license:
|
| 1403 |
+
|
| 1404 |
+
Copyright (c) 2015-2017, Norbert Juffa
|
| 1405 |
+
All rights reserved.
|
| 1406 |
+
|
| 1407 |
+
Redistribution and use in source and binary forms, with or without
|
| 1408 |
+
modification, are permitted provided that the following conditions
|
| 1409 |
+
are met:
|
| 1410 |
+
|
| 1411 |
+
1. Redistributions of source code must retain the above copyright
|
| 1412 |
+
notice, this list of conditions and the following disclaimer.
|
| 1413 |
+
|
| 1414 |
+
2. Redistributions in binary form must reproduce the above copyright
|
| 1415 |
+
notice, this list of conditions and the following disclaimer in the
|
| 1416 |
+
documentation and/or other materials provided with the distribution.
|
| 1417 |
+
|
| 1418 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
| 1419 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
| 1420 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
| 1421 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
| 1422 |
+
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
| 1423 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
| 1424 |
+
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
| 1425 |
+
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
| 1426 |
+
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
| 1427 |
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 1428 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 1429 |
+
|
| 1430 |
+
15. Licensee's use of the lz4 third party component is
|
| 1431 |
+
subject to the following terms and conditions:
|
| 1432 |
+
|
| 1433 |
+
Copyright (C) 2011-2013, Yann Collet.
|
| 1434 |
+
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
| 1435 |
+
|
| 1436 |
+
Redistribution and use in source and binary forms, with or without
|
| 1437 |
+
modification, are permitted provided that the following conditions are
|
| 1438 |
+
met:
|
| 1439 |
+
|
| 1440 |
+
* Redistributions of source code must retain the above copyright
|
| 1441 |
+
notice, this list of conditions and the following disclaimer.
|
| 1442 |
+
* Redistributions in binary form must reproduce the above
|
| 1443 |
+
copyright notice, this list of conditions and the following disclaimer
|
| 1444 |
+
in the documentation and/or other materials provided with the
|
| 1445 |
+
distribution.
|
| 1446 |
+
|
| 1447 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
| 1448 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
| 1449 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
| 1450 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
| 1451 |
+
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
| 1452 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
| 1453 |
+
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
| 1454 |
+
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
| 1455 |
+
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
| 1456 |
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 1457 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 1458 |
+
|
| 1459 |
+
16. The NPP library uses code from the Boost Math Toolkit,
|
| 1460 |
+
and is subject to the following license:
|
| 1461 |
+
|
| 1462 |
+
Boost Software License - Version 1.0 - August 17th, 2003
|
| 1463 |
+
. . . .
|
| 1464 |
+
|
| 1465 |
+
Permission is hereby granted, free of charge, to any person or
|
| 1466 |
+
organization obtaining a copy of the software and accompanying
|
| 1467 |
+
documentation covered by this license (the "Software") to use,
|
| 1468 |
+
reproduce, display, distribute, execute, and transmit the Software,
|
| 1469 |
+
and to prepare derivative works of the Software, and to permit
|
| 1470 |
+
third-parties to whom the Software is furnished to do so, all
|
| 1471 |
+
subject to the following:
|
| 1472 |
+
|
| 1473 |
+
The copyright notices in the Software and this entire statement,
|
| 1474 |
+
including the above license grant, this restriction and the following
|
| 1475 |
+
disclaimer, must be included in all copies of the Software, in whole
|
| 1476 |
+
or in part, and all derivative works of the Software, unless such
|
| 1477 |
+
copies or derivative works are solely in the form of machine-executable
|
| 1478 |
+
object code generated by a source language processor.
|
| 1479 |
+
|
| 1480 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
| 1481 |
+
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
| 1482 |
+
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
|
| 1483 |
+
NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR
|
| 1484 |
+
ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR
|
| 1485 |
+
OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING
|
| 1486 |
+
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
| 1487 |
+
OTHER DEALINGS IN THE SOFTWARE.
|
| 1488 |
+
|
| 1489 |
+
17. Portions of the Nsight Eclipse Edition is subject to the
|
| 1490 |
+
following license:
|
| 1491 |
+
|
| 1492 |
+
The Eclipse Foundation makes available all content in this plug-in
|
| 1493 |
+
("Content"). Unless otherwise indicated below, the Content is provided
|
| 1494 |
+
to you under the terms and conditions of the Eclipse Public License
|
| 1495 |
+
Version 1.0 ("EPL"). A copy of the EPL is available at http://
|
| 1496 |
+
www.eclipse.org/legal/epl-v10.html. For purposes of the EPL, "Program"
|
| 1497 |
+
will mean the Content.
|
| 1498 |
+
|
| 1499 |
+
If you did not receive this Content directly from the Eclipse
|
| 1500 |
+
Foundation, the Content is being redistributed by another party
|
| 1501 |
+
("Redistributor") and different terms and conditions may apply to your
|
| 1502 |
+
use of any object code in the Content. Check the Redistributor's
|
| 1503 |
+
license that was provided with the Content. If no such license exists,
|
| 1504 |
+
contact the Redistributor. Unless otherwise indicated below, the terms
|
| 1505 |
+
and conditions of the EPL still apply to any source code in the
|
| 1506 |
+
Content and such source code may be obtained at http://www.eclipse.org.
|
| 1507 |
+
|
| 1508 |
+
18. Some of the cuBLAS library routines uses code from
|
| 1509 |
+
OpenAI, which is subject to the following license:
|
| 1510 |
+
|
| 1511 |
+
License URL
|
| 1512 |
+
https://github.com/openai/openai-gemm/blob/master/LICENSE
|
| 1513 |
+
|
| 1514 |
+
License Text
|
| 1515 |
+
The MIT License
|
| 1516 |
+
|
| 1517 |
+
Copyright (c) 2016 OpenAI (http://openai.com), 2016 Google Inc.
|
| 1518 |
+
|
| 1519 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 1520 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 1521 |
+
in the Software without restriction, including without limitation the rights
|
| 1522 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 1523 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 1524 |
+
furnished to do so, subject to the following conditions:
|
| 1525 |
+
|
| 1526 |
+
The above copyright notice and this permission notice shall be included in
|
| 1527 |
+
all copies or substantial portions of the Software.
|
| 1528 |
+
|
| 1529 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 1530 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 1531 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 1532 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 1533 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 1534 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
| 1535 |
+
THE SOFTWARE.
|
| 1536 |
+
|
| 1537 |
+
19. Licensee's use of the Visual Studio Setup Configuration
|
| 1538 |
+
Samples is subject to the following license:
|
| 1539 |
+
|
| 1540 |
+
The MIT License (MIT)
|
| 1541 |
+
Copyright (C) Microsoft Corporation. All rights reserved.
|
| 1542 |
+
|
| 1543 |
+
Permission is hereby granted, free of charge, to any person
|
| 1544 |
+
obtaining a copy of this software and associated documentation
|
| 1545 |
+
files (the "Software"), to deal in the Software without restriction,
|
| 1546 |
+
including without limitation the rights to use, copy, modify, merge,
|
| 1547 |
+
publish, distribute, sublicense, and/or sell copies of the Software,
|
| 1548 |
+
and to permit persons to whom the Software is furnished to do so,
|
| 1549 |
+
subject to the following conditions:
|
| 1550 |
+
|
| 1551 |
+
The above copyright notice and this permission notice shall be included
|
| 1552 |
+
in all copies or substantial portions of the Software.
|
| 1553 |
+
|
| 1554 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
| 1555 |
+
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 1556 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 1557 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 1558 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 1559 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
| 1560 |
+
|
| 1561 |
+
20. Licensee's use of linmath.h header for CPU functions for
|
| 1562 |
+
GL vector/matrix operations from lunarG is subject to the
|
| 1563 |
+
Apache License Version 2.0.
|
| 1564 |
+
|
| 1565 |
+
21. The DX12-CUDA sample uses the d3dx12.h header, which is
|
| 1566 |
+
subject to the MIT license .
|
| 1567 |
+
|
| 1568 |
+
-----------------
|
pllava/lib/python3.10/site-packages/nvidia_cublas_cu12-12.4.5.8.dist-info/RECORD
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nvidia/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 2 |
+
nvidia/__pycache__/__init__.cpython-310.pyc,,
|
| 3 |
+
nvidia/cublas/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 4 |
+
nvidia/cublas/__pycache__/__init__.cpython-310.pyc,,
|
| 5 |
+
nvidia/cublas/include/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 6 |
+
nvidia/cublas/include/__pycache__/__init__.cpython-310.pyc,,
|
| 7 |
+
nvidia/cublas/include/cublas.h,sha256=a0lLqy-k47NuwyDjuueC3W0Mpc908MTU7o5sMJqE-1w,41246
|
| 8 |
+
nvidia/cublas/include/cublasLt.h,sha256=51KyHQc7T9rxmVfNimP9O6vka8JqBdebjZKCWKZakt4,77626
|
| 9 |
+
nvidia/cublas/include/cublasXt.h,sha256=CW9dyXYGSUW1wEXrVVyhU6OxBK1PUvMoYdVGlQT7L9A,37380
|
| 10 |
+
nvidia/cublas/include/cublas_api.h,sha256=XRArlgDy_4hWuEt8XafRsE9KRJ5XVo06Nh113cgg-7o,370663
|
| 11 |
+
nvidia/cublas/include/cublas_v2.h,sha256=qxMdB5jb97luEfw61LEAB-Wlr8A9DLBvO4rRypDCNKw,15460
|
| 12 |
+
nvidia/cublas/include/nvblas.h,sha256=dXCLR-2oUiJFzLsDtIAK09m42ct4G0HWdYzBUuDPXpc,23341
|
| 13 |
+
nvidia/cublas/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 14 |
+
nvidia/cublas/lib/__pycache__/__init__.cpython-310.pyc,,
|
| 15 |
+
nvidia/cublas/lib/libcublas.so.12,sha256=TMRVJkSaldOYU4l4XphbeDJGDqPbF4vJ6ZY74hEfvug,109604768
|
| 16 |
+
nvidia/cublas/lib/libcublasLt.so.12,sha256=RKgTqi2giDD5CD-B0Otz8a5AUqTZsLDeSAqPbNnrMHg,441938896
|
| 17 |
+
nvidia/cublas/lib/libnvblas.so.12,sha256=fCpY3FQVQgg5IwHQ_j1ToSDkwevquegM6R_plIuurck,757496
|
| 18 |
+
nvidia_cublas_cu12-12.4.5.8.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
| 19 |
+
nvidia_cublas_cu12-12.4.5.8.dist-info/License.txt,sha256=rW9YU_ugyg0VnQ9Y1JrkmDDC-Mk_epJki5zpCttMbM0,59262
|
| 20 |
+
nvidia_cublas_cu12-12.4.5.8.dist-info/METADATA,sha256=FtdQvmVmrqzO9Vp7VbNtbQWUxXF45arMsnGnwYdlZuc,1505
|
| 21 |
+
nvidia_cublas_cu12-12.4.5.8.dist-info/RECORD,,
|
| 22 |
+
nvidia_cublas_cu12-12.4.5.8.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 23 |
+
nvidia_cublas_cu12-12.4.5.8.dist-info/WHEEL,sha256=XDTs3wIbcE-BcRO08VJlZpA6z9OaC1mOKPCGGGwuM2g,109
|
| 24 |
+
nvidia_cublas_cu12-12.4.5.8.dist-info/top_level.txt,sha256=fTkAtiFuL16nUrB9ytDDtpytz2t0B4NvYTnRzwAhO14,7
|
pllava/lib/python3.10/site-packages/nvidia_cublas_cu12-12.4.5.8.dist-info/REQUESTED
ADDED
|
File without changes
|
pllava/lib/python3.10/site-packages/nvidia_cublas_cu12-12.4.5.8.dist-info/WHEEL
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Wheel-Version: 1.0
|
| 2 |
+
Generator: bdist_wheel (0.42.0)
|
| 3 |
+
Root-Is-Purelib: true
|
| 4 |
+
Tag: py3-none-manylinux2014_x86_64
|
| 5 |
+
|
pllava/lib/python3.10/site-packages/nvidia_cublas_cu12-12.4.5.8.dist-info/top_level.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
nvidia
|
pllava/lib/python3.10/site-packages/torch/_C.cpython-310-x86_64-linux-gnu.so
ADDED
|
Binary file (37.9 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_VF.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
This makes the functions in torch._C._VariableFunctions available as
|
| 3 |
+
torch._VF.<funcname>
|
| 4 |
+
without mypy being able to find them.
|
| 5 |
+
|
| 6 |
+
A subset of those functions are mapped to ATen functions in
|
| 7 |
+
torch/jit/_builtins.py
|
| 8 |
+
|
| 9 |
+
See https://github.com/pytorch/pytorch/issues/21478 for the reason for
|
| 10 |
+
introducing torch._VF
|
| 11 |
+
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
import sys
|
| 15 |
+
import types
|
| 16 |
+
|
| 17 |
+
import torch
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class VFModule(types.ModuleType):
|
| 21 |
+
vf: types.ModuleType
|
| 22 |
+
|
| 23 |
+
def __init__(self, name: str):
|
| 24 |
+
super().__init__(name)
|
| 25 |
+
self.vf = torch._C._VariableFunctions
|
| 26 |
+
|
| 27 |
+
def __getattr__(self, name: str) -> object:
|
| 28 |
+
return getattr(self.vf, name)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
sys.modules[__name__] = VFModule(__name__)
|
pllava/lib/python3.10/site-packages/torch/_VF.pyi
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
pllava/lib/python3.10/site-packages/torch/__config__.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def show():
|
| 6 |
+
"""
|
| 7 |
+
Return a human-readable string with descriptions of the
|
| 8 |
+
configuration of PyTorch.
|
| 9 |
+
"""
|
| 10 |
+
return torch._C._show_config()
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
# TODO: In principle, we could provide more structured version/config
|
| 14 |
+
# information here. For now only CXX_FLAGS is exposed, as Timer
|
| 15 |
+
# uses them.
|
| 16 |
+
def _cxx_flags():
|
| 17 |
+
"""Returns the CXX_FLAGS used when building PyTorch."""
|
| 18 |
+
return torch._C._cxx_flags()
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def parallel_info():
|
| 22 |
+
r"""Returns detailed string with parallelization settings"""
|
| 23 |
+
return torch._C._parallel_info()
|
pllava/lib/python3.10/site-packages/torch/__init__.py
ADDED
|
@@ -0,0 +1,2665 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
The torch package contains data structures for multi-dimensional
|
| 3 |
+
tensors and defines mathematical operations over these tensors.
|
| 4 |
+
Additionally, it provides many utilities for efficient serialization of
|
| 5 |
+
Tensors and arbitrary types, and other useful utilities.
|
| 6 |
+
|
| 7 |
+
It has a CUDA counterpart, that enables you to run your tensor computations
|
| 8 |
+
on an NVIDIA GPU with compute capability >= 3.0.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
# mypy: allow-untyped-defs
|
| 12 |
+
|
| 13 |
+
import builtins
|
| 14 |
+
import ctypes
|
| 15 |
+
import glob
|
| 16 |
+
import importlib
|
| 17 |
+
import inspect
|
| 18 |
+
import math
|
| 19 |
+
import os
|
| 20 |
+
import platform
|
| 21 |
+
import sys
|
| 22 |
+
import textwrap
|
| 23 |
+
import threading
|
| 24 |
+
from typing import (
|
| 25 |
+
Any as _Any,
|
| 26 |
+
Callable as _Callable,
|
| 27 |
+
Dict as _Dict,
|
| 28 |
+
Optional as _Optional,
|
| 29 |
+
overload as _overload,
|
| 30 |
+
Set as _Set,
|
| 31 |
+
Tuple as _Tuple,
|
| 32 |
+
Type as _Type,
|
| 33 |
+
TYPE_CHECKING,
|
| 34 |
+
TypeVar as _TypeVar,
|
| 35 |
+
Union as _Union,
|
| 36 |
+
)
|
| 37 |
+
from typing_extensions import ParamSpec as _ParamSpec, TypeGuard as _TypeGuard
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
if TYPE_CHECKING:
|
| 41 |
+
from .types import IntLikeType
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
# multipy/deploy is setting this import before importing torch, this is the most
|
| 45 |
+
# reliable way we have to detect if we're running within deploy.
|
| 46 |
+
# https://github.com/pytorch/multipy/blob/d60f34ad38c371e441fe7ffdb77a3c3dda5a5d19/multipy/runtime/interpreter/interpreter_impl.cpp#L134-L137
|
| 47 |
+
def _running_with_deploy() -> builtins.bool:
|
| 48 |
+
return sys.modules.get("torch._meta_registrations", None) is object
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
from torch._utils import (
|
| 52 |
+
_functionalize_sync as _sync,
|
| 53 |
+
_import_dotted_name,
|
| 54 |
+
classproperty,
|
| 55 |
+
)
|
| 56 |
+
from torch._utils_internal import (
|
| 57 |
+
get_file_path,
|
| 58 |
+
prepare_multiprocessing_environment,
|
| 59 |
+
USE_GLOBAL_DEPS,
|
| 60 |
+
USE_RTLD_GLOBAL_WITH_LIBTORCH,
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
# TODO(torch_deploy) figure out how to freeze version.py in fbcode build
|
| 65 |
+
if _running_with_deploy():
|
| 66 |
+
__version__ = "torch-deploy-1.8"
|
| 67 |
+
else:
|
| 68 |
+
from torch.torch_version import __version__ as __version__
|
| 69 |
+
|
| 70 |
+
__all__ = [
|
| 71 |
+
"BoolStorage",
|
| 72 |
+
"BoolTensor",
|
| 73 |
+
"ByteStorage",
|
| 74 |
+
"ByteTensor",
|
| 75 |
+
"CharStorage",
|
| 76 |
+
"CharTensor",
|
| 77 |
+
"DoubleStorage",
|
| 78 |
+
"DoubleTensor",
|
| 79 |
+
"FloatStorage",
|
| 80 |
+
"FloatTensor",
|
| 81 |
+
"GradScaler",
|
| 82 |
+
"IntStorage",
|
| 83 |
+
"IntTensor",
|
| 84 |
+
"LongStorage",
|
| 85 |
+
"LongTensor",
|
| 86 |
+
"ShortStorage",
|
| 87 |
+
"ShortTensor",
|
| 88 |
+
"SymBool",
|
| 89 |
+
"SymFloat",
|
| 90 |
+
"SymInt",
|
| 91 |
+
"Tensor",
|
| 92 |
+
"TypedStorage",
|
| 93 |
+
"UntypedStorage",
|
| 94 |
+
"are_deterministic_algorithms_enabled",
|
| 95 |
+
"autocast",
|
| 96 |
+
"chunk",
|
| 97 |
+
"compile",
|
| 98 |
+
"cond",
|
| 99 |
+
"enable_grad",
|
| 100 |
+
"export",
|
| 101 |
+
"get_default_device",
|
| 102 |
+
"get_deterministic_debug_mode",
|
| 103 |
+
"get_device_module",
|
| 104 |
+
"get_float32_matmul_precision",
|
| 105 |
+
"get_rng_state",
|
| 106 |
+
"inference_mode",
|
| 107 |
+
"initial_seed",
|
| 108 |
+
"is_deterministic_algorithms_warn_only_enabled",
|
| 109 |
+
"is_storage",
|
| 110 |
+
"is_tensor",
|
| 111 |
+
"is_warn_always_enabled",
|
| 112 |
+
"load",
|
| 113 |
+
"lobpcg",
|
| 114 |
+
"manual_seed",
|
| 115 |
+
"matmul",
|
| 116 |
+
"no_grad",
|
| 117 |
+
"rand",
|
| 118 |
+
"randn",
|
| 119 |
+
"save",
|
| 120 |
+
"seed",
|
| 121 |
+
"set_default_device",
|
| 122 |
+
"set_default_tensor_type",
|
| 123 |
+
"set_deterministic_debug_mode",
|
| 124 |
+
"set_float32_matmul_precision",
|
| 125 |
+
"set_printoptions",
|
| 126 |
+
"set_rng_state",
|
| 127 |
+
"set_warn_always",
|
| 128 |
+
"split",
|
| 129 |
+
"stack",
|
| 130 |
+
"sym_float",
|
| 131 |
+
"sym_int",
|
| 132 |
+
"sym_ite",
|
| 133 |
+
"sym_max",
|
| 134 |
+
"sym_min",
|
| 135 |
+
"sym_not",
|
| 136 |
+
"typename",
|
| 137 |
+
"unravel_index",
|
| 138 |
+
"use_deterministic_algorithms",
|
| 139 |
+
"vmap",
|
| 140 |
+
]
|
| 141 |
+
|
| 142 |
+
# Please keep this list sorted
|
| 143 |
+
assert __all__ == sorted(__all__)
|
| 144 |
+
|
| 145 |
+
################################################################################
|
| 146 |
+
# Load the extension module
|
| 147 |
+
################################################################################
|
| 148 |
+
|
| 149 |
+
if sys.platform == "win32":
|
| 150 |
+
|
| 151 |
+
def _load_dll_libraries() -> None:
|
| 152 |
+
import sysconfig
|
| 153 |
+
|
| 154 |
+
from torch.version import cuda as cuda_version
|
| 155 |
+
|
| 156 |
+
pfiles_path = os.getenv("ProgramFiles", r"C:\Program Files")
|
| 157 |
+
py_dll_path = os.path.join(sys.exec_prefix, "Library", "bin")
|
| 158 |
+
th_dll_path = os.path.join(os.path.dirname(__file__), "lib")
|
| 159 |
+
usebase_path = os.path.join(
|
| 160 |
+
sysconfig.get_config_var("userbase"), "Library", "bin"
|
| 161 |
+
)
|
| 162 |
+
|
| 163 |
+
# When users create a virtualenv that inherits the base environment,
|
| 164 |
+
# we will need to add the corresponding library directory into
|
| 165 |
+
# DLL search directories. Otherwise, it will rely on `PATH` which
|
| 166 |
+
# is dependent on user settings.
|
| 167 |
+
if sys.exec_prefix != sys.base_exec_prefix:
|
| 168 |
+
base_py_dll_path = os.path.join(sys.base_exec_prefix, "Library", "bin")
|
| 169 |
+
else:
|
| 170 |
+
base_py_dll_path = ""
|
| 171 |
+
|
| 172 |
+
dll_paths = [
|
| 173 |
+
p
|
| 174 |
+
for p in (th_dll_path, py_dll_path, base_py_dll_path, usebase_path)
|
| 175 |
+
if os.path.exists(p)
|
| 176 |
+
]
|
| 177 |
+
|
| 178 |
+
if not builtins.any(
|
| 179 |
+
os.path.exists(os.path.join(p, "nvToolsExt64_1.dll")) for p in dll_paths
|
| 180 |
+
):
|
| 181 |
+
nvtoolsext_dll_path = os.path.join(
|
| 182 |
+
os.getenv(
|
| 183 |
+
"NVTOOLSEXT_PATH",
|
| 184 |
+
os.path.join(pfiles_path, "NVIDIA Corporation", "NvToolsExt"),
|
| 185 |
+
),
|
| 186 |
+
"bin",
|
| 187 |
+
"x64",
|
| 188 |
+
)
|
| 189 |
+
else:
|
| 190 |
+
nvtoolsext_dll_path = ""
|
| 191 |
+
|
| 192 |
+
if cuda_version and builtins.all(
|
| 193 |
+
not glob.glob(os.path.join(p, "cudart64*.dll")) for p in dll_paths
|
| 194 |
+
):
|
| 195 |
+
cuda_version_1 = cuda_version.replace(".", "_")
|
| 196 |
+
cuda_path_var = "CUDA_PATH_V" + cuda_version_1
|
| 197 |
+
default_path = os.path.join(
|
| 198 |
+
pfiles_path, "NVIDIA GPU Computing Toolkit", "CUDA", f"v{cuda_version}"
|
| 199 |
+
)
|
| 200 |
+
cuda_path = os.path.join(os.getenv(cuda_path_var, default_path), "bin")
|
| 201 |
+
else:
|
| 202 |
+
cuda_path = ""
|
| 203 |
+
|
| 204 |
+
dll_paths.extend(
|
| 205 |
+
p for p in (nvtoolsext_dll_path, cuda_path) if os.path.exists(p)
|
| 206 |
+
)
|
| 207 |
+
|
| 208 |
+
kernel32 = ctypes.WinDLL("kernel32.dll", use_last_error=True)
|
| 209 |
+
with_load_library_flags = hasattr(kernel32, "AddDllDirectory")
|
| 210 |
+
prev_error_mode = kernel32.SetErrorMode(0x0001)
|
| 211 |
+
|
| 212 |
+
kernel32.LoadLibraryW.restype = ctypes.c_void_p
|
| 213 |
+
if with_load_library_flags:
|
| 214 |
+
kernel32.LoadLibraryExW.restype = ctypes.c_void_p
|
| 215 |
+
|
| 216 |
+
for dll_path in dll_paths:
|
| 217 |
+
os.add_dll_directory(dll_path)
|
| 218 |
+
|
| 219 |
+
try:
|
| 220 |
+
ctypes.CDLL("vcruntime140.dll")
|
| 221 |
+
ctypes.CDLL("msvcp140.dll")
|
| 222 |
+
ctypes.CDLL("vcruntime140_1.dll")
|
| 223 |
+
except OSError:
|
| 224 |
+
print(
|
| 225 |
+
textwrap.dedent(
|
| 226 |
+
"""
|
| 227 |
+
Microsoft Visual C++ Redistributable is not installed, this may lead to the DLL load failure.
|
| 228 |
+
It can be downloaded at https://aka.ms/vs/16/release/vc_redist.x64.exe
|
| 229 |
+
"""
|
| 230 |
+
).strip()
|
| 231 |
+
)
|
| 232 |
+
|
| 233 |
+
dlls = glob.glob(os.path.join(th_dll_path, "*.dll"))
|
| 234 |
+
path_patched = False
|
| 235 |
+
for dll in dlls:
|
| 236 |
+
is_loaded = False
|
| 237 |
+
if with_load_library_flags:
|
| 238 |
+
res = kernel32.LoadLibraryExW(dll, None, 0x00001100)
|
| 239 |
+
last_error = ctypes.get_last_error()
|
| 240 |
+
if res is None and last_error != 126:
|
| 241 |
+
err = ctypes.WinError(last_error)
|
| 242 |
+
err.strerror += (
|
| 243 |
+
f' Error loading "{dll}" or one of its dependencies.'
|
| 244 |
+
)
|
| 245 |
+
raise err
|
| 246 |
+
elif res is not None:
|
| 247 |
+
is_loaded = True
|
| 248 |
+
if not is_loaded:
|
| 249 |
+
if not path_patched:
|
| 250 |
+
os.environ["PATH"] = ";".join(dll_paths + [os.environ["PATH"]])
|
| 251 |
+
path_patched = True
|
| 252 |
+
res = kernel32.LoadLibraryW(dll)
|
| 253 |
+
if res is None:
|
| 254 |
+
err = ctypes.WinError(ctypes.get_last_error())
|
| 255 |
+
err.strerror += (
|
| 256 |
+
f' Error loading "{dll}" or one of its dependencies.'
|
| 257 |
+
)
|
| 258 |
+
raise err
|
| 259 |
+
|
| 260 |
+
kernel32.SetErrorMode(prev_error_mode)
|
| 261 |
+
|
| 262 |
+
_load_dll_libraries()
|
| 263 |
+
del _load_dll_libraries
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
def _preload_cuda_deps(lib_folder: str, lib_name: str) -> None:
|
| 267 |
+
"""Preloads cuda deps if they could not be found otherwise."""
|
| 268 |
+
# Should only be called on Linux if default path resolution have failed
|
| 269 |
+
assert platform.system() == "Linux", "Should only be called on Linux"
|
| 270 |
+
|
| 271 |
+
lib_path = None
|
| 272 |
+
for path in sys.path:
|
| 273 |
+
nvidia_path = os.path.join(path, "nvidia")
|
| 274 |
+
if not os.path.exists(nvidia_path):
|
| 275 |
+
continue
|
| 276 |
+
candidate_lib_paths = glob.glob(
|
| 277 |
+
os.path.join(nvidia_path, lib_folder, "lib", lib_name)
|
| 278 |
+
)
|
| 279 |
+
if candidate_lib_paths and not lib_path:
|
| 280 |
+
lib_path = candidate_lib_paths[0]
|
| 281 |
+
if lib_path:
|
| 282 |
+
break
|
| 283 |
+
if not lib_path:
|
| 284 |
+
raise ValueError(f"{lib_name} not found in the system path {sys.path}")
|
| 285 |
+
ctypes.CDLL(lib_path)
|
| 286 |
+
|
| 287 |
+
|
| 288 |
+
# See Note [Global dependencies]
|
| 289 |
+
def _load_global_deps() -> None:
|
| 290 |
+
if _running_with_deploy() or platform.system() == "Windows":
|
| 291 |
+
return
|
| 292 |
+
|
| 293 |
+
# Determine the file extension based on the platform
|
| 294 |
+
lib_ext = ".dylib" if platform.system() == "Darwin" else ".so"
|
| 295 |
+
lib_name = f"libtorch_global_deps{lib_ext}"
|
| 296 |
+
here = os.path.abspath(__file__)
|
| 297 |
+
global_deps_lib_path = os.path.join(os.path.dirname(here), "lib", lib_name)
|
| 298 |
+
|
| 299 |
+
try:
|
| 300 |
+
ctypes.CDLL(global_deps_lib_path, mode=ctypes.RTLD_GLOBAL)
|
| 301 |
+
except OSError as err:
|
| 302 |
+
# Can only happen for wheel with cuda libs as PYPI deps
|
| 303 |
+
# As PyTorch is not purelib, but nvidia-*-cu12 is
|
| 304 |
+
cuda_libs: _Dict[str, str] = {
|
| 305 |
+
"cublas": "libcublas.so.*[0-9]",
|
| 306 |
+
"cudnn": "libcudnn.so.*[0-9]",
|
| 307 |
+
"cuda_nvrtc": "libnvrtc.so.*[0-9]",
|
| 308 |
+
"cuda_runtime": "libcudart.so.*[0-9]",
|
| 309 |
+
"cuda_cupti": "libcupti.so.*[0-9]",
|
| 310 |
+
"cufft": "libcufft.so.*[0-9]",
|
| 311 |
+
"curand": "libcurand.so.*[0-9]",
|
| 312 |
+
"nvjitlink": "libnvJitLink.so.*[0-9]",
|
| 313 |
+
"cusparse": "libcusparse.so.*[0-9]",
|
| 314 |
+
"cusolver": "libcusolver.so.*[0-9]",
|
| 315 |
+
"nccl": "libnccl.so.*[0-9]",
|
| 316 |
+
"nvtx": "libnvToolsExt.so.*[0-9]",
|
| 317 |
+
}
|
| 318 |
+
is_cuda_lib_err = [
|
| 319 |
+
lib for lib in cuda_libs.values() if lib.split(".")[0] in err.args[0]
|
| 320 |
+
]
|
| 321 |
+
if not is_cuda_lib_err:
|
| 322 |
+
raise err
|
| 323 |
+
for lib_folder, lib_name in cuda_libs.items():
|
| 324 |
+
_preload_cuda_deps(lib_folder, lib_name)
|
| 325 |
+
ctypes.CDLL(global_deps_lib_path, mode=ctypes.RTLD_GLOBAL)
|
| 326 |
+
|
| 327 |
+
|
| 328 |
+
if (USE_RTLD_GLOBAL_WITH_LIBTORCH or os.getenv("TORCH_USE_RTLD_GLOBAL")) and (
|
| 329 |
+
_running_with_deploy() or platform.system() != "Windows"
|
| 330 |
+
):
|
| 331 |
+
# Do it the hard way. You might want to load libtorch with RTLD_GLOBAL in a
|
| 332 |
+
# few circumstances:
|
| 333 |
+
#
|
| 334 |
+
# 1. You're in a build environment (e.g., fbcode) where
|
| 335 |
+
# libtorch_global_deps is not available, but you still need
|
| 336 |
+
# to get mkl to link in with RTLD_GLOBAL or it will just
|
| 337 |
+
# not work.
|
| 338 |
+
#
|
| 339 |
+
# 2. You're trying to run PyTorch under UBSAN and you need
|
| 340 |
+
# to ensure that only one copy of libtorch is loaded, so
|
| 341 |
+
# vptr checks work properly
|
| 342 |
+
#
|
| 343 |
+
# If you're using this setting, you must verify that all the libraries
|
| 344 |
+
# you load consistently use the same libstdc++, or you may have
|
| 345 |
+
# mysterious segfaults.
|
| 346 |
+
#
|
| 347 |
+
old_flags = sys.getdlopenflags()
|
| 348 |
+
sys.setdlopenflags(os.RTLD_GLOBAL | os.RTLD_LAZY)
|
| 349 |
+
|
| 350 |
+
from torch._C import * # noqa: F403
|
| 351 |
+
|
| 352 |
+
sys.setdlopenflags(old_flags)
|
| 353 |
+
del old_flags
|
| 354 |
+
|
| 355 |
+
else:
|
| 356 |
+
# Easy way. You want this most of the time, because it will prevent
|
| 357 |
+
# C++ symbols from libtorch clobbering C++ symbols from other
|
| 358 |
+
# libraries, leading to mysterious segfaults.
|
| 359 |
+
#
|
| 360 |
+
# If building in an environment where libtorch_global_deps isn't available
|
| 361 |
+
# like parts of fbsource, but where RTLD_GLOBAL causes segfaults, you will
|
| 362 |
+
# want USE_RTLD_GLOBAL_WITH_LIBTORCH = False and USE_GLOBAL_DEPS = False
|
| 363 |
+
#
|
| 364 |
+
# See Note [Global dependencies]
|
| 365 |
+
if USE_GLOBAL_DEPS:
|
| 366 |
+
_load_global_deps()
|
| 367 |
+
from torch._C import * # noqa: F403
|
| 368 |
+
|
| 369 |
+
|
| 370 |
+
class SymInt:
|
| 371 |
+
"""
|
| 372 |
+
Like an int (including magic methods), but redirects all operations on the
|
| 373 |
+
wrapped node. This is used in particular to symbolically record operations
|
| 374 |
+
in the symbolic shape workflow.
|
| 375 |
+
"""
|
| 376 |
+
|
| 377 |
+
def __init__(self, node):
|
| 378 |
+
# This field MUST be named node; C++ binding code assumes that this
|
| 379 |
+
# class has a field named node that stores SymNode
|
| 380 |
+
self.node = node
|
| 381 |
+
|
| 382 |
+
def __bool__(self):
|
| 383 |
+
return builtins.bool(self != 0)
|
| 384 |
+
|
| 385 |
+
def __int__(self):
|
| 386 |
+
return self.node.int_()
|
| 387 |
+
|
| 388 |
+
def __index__(self):
|
| 389 |
+
return self.node.int_()
|
| 390 |
+
|
| 391 |
+
# Magic methods installed by torch.fx.experimental.sym_node
|
| 392 |
+
|
| 393 |
+
def __round__(self, ndigits=None):
|
| 394 |
+
return self
|
| 395 |
+
|
| 396 |
+
def __truediv__(self, other):
|
| 397 |
+
if isinstance(other, (builtins.float, SymFloat)):
|
| 398 |
+
return sym_float(self).__float_truediv__(other)
|
| 399 |
+
if not isinstance(other, (builtins.int, SymInt)):
|
| 400 |
+
return NotImplemented
|
| 401 |
+
return self.__int_truediv__(other)
|
| 402 |
+
|
| 403 |
+
def __rtruediv__(self, other):
|
| 404 |
+
if isinstance(other, (builtins.float, SymFloat)):
|
| 405 |
+
return sym_float(self).__rfloat_truediv__(other)
|
| 406 |
+
if not isinstance(other, (builtins.int, SymInt)):
|
| 407 |
+
return NotImplemented
|
| 408 |
+
return self.__rint_truediv__(other)
|
| 409 |
+
|
| 410 |
+
def __floordiv__(self, other):
|
| 411 |
+
if isinstance(other, (builtins.float, SymFloat)):
|
| 412 |
+
return sym_float(math.floor(sym_float(self) / other))
|
| 413 |
+
if not isinstance(other, (builtins.int, SymInt)):
|
| 414 |
+
return NotImplemented
|
| 415 |
+
return self.__int_floordiv__(other)
|
| 416 |
+
|
| 417 |
+
def __rfloordiv__(self, other):
|
| 418 |
+
if isinstance(other, (builtins.float, SymFloat)):
|
| 419 |
+
return sym_float(math.floor(other / sym_float(self)))
|
| 420 |
+
if not isinstance(other, (builtins.int, SymInt)):
|
| 421 |
+
return NotImplemented
|
| 422 |
+
return self.__rint_floordiv__(other)
|
| 423 |
+
|
| 424 |
+
# nb: complex is impossible to handle correctly lol, with
|
| 425 |
+
# negative base and integral float need to diverge semantics and
|
| 426 |
+
# just always return complex. Neener neener pretend this problem
|
| 427 |
+
# doesn't exist
|
| 428 |
+
def __pow__(self, other):
|
| 429 |
+
if isinstance(other, (builtins.float, SymFloat)):
|
| 430 |
+
return sym_float(self).__pow__(other)
|
| 431 |
+
if not isinstance(other, (builtins.int, SymInt)):
|
| 432 |
+
return NotImplemented
|
| 433 |
+
# Guards! This guard is necessary because we need to know it to
|
| 434 |
+
# determine the output type of this operation
|
| 435 |
+
if other >= 0:
|
| 436 |
+
return self.__pow_by_natural__(other)
|
| 437 |
+
else:
|
| 438 |
+
# Mercifully, when the exponent is negative, Python just promotes
|
| 439 |
+
# to doubles and does a float pow:
|
| 440 |
+
#
|
| 441 |
+
# if (Py_SIZE(b) < 0 && c == NULL) {
|
| 442 |
+
# /* if exponent is negative and there's no modulus:
|
| 443 |
+
# return a float. This works because we know
|
| 444 |
+
# that this calls float_pow() which converts its
|
| 445 |
+
# arguments to double. */
|
| 446 |
+
# Py_DECREF(a);
|
| 447 |
+
# Py_DECREF(b);
|
| 448 |
+
# return PyFloat_Type.tp_as_number->nb_power(v, w, x);
|
| 449 |
+
# }
|
| 450 |
+
return sym_float(self).__pow__(sym_float(other))
|
| 451 |
+
|
| 452 |
+
def __rpow__(self, other):
|
| 453 |
+
if isinstance(other, (builtins.float, SymFloat)):
|
| 454 |
+
return sym_float(self).__rpow__(other)
|
| 455 |
+
if not isinstance(other, (builtins.int, SymInt)):
|
| 456 |
+
return NotImplemented
|
| 457 |
+
if self >= 0: # self is exponent
|
| 458 |
+
return self.__rpow_by_natural__(other)
|
| 459 |
+
else:
|
| 460 |
+
return sym_float(self).__rpow__(sym_float(other))
|
| 461 |
+
|
| 462 |
+
def __eq__(self, other: object) -> builtins.bool:
|
| 463 |
+
raise TypeError("type stub not overridden")
|
| 464 |
+
|
| 465 |
+
def __lt__(self, other) -> builtins.bool:
|
| 466 |
+
raise TypeError("type stub not overridden")
|
| 467 |
+
|
| 468 |
+
def __gt__(self, other) -> builtins.bool:
|
| 469 |
+
raise TypeError("type stub not overridden")
|
| 470 |
+
|
| 471 |
+
def __le__(self, other) -> builtins.bool:
|
| 472 |
+
raise TypeError("type stub not overridden")
|
| 473 |
+
|
| 474 |
+
def __ge__(self, other) -> builtins.bool:
|
| 475 |
+
raise TypeError("type stub not overridden")
|
| 476 |
+
|
| 477 |
+
def __add__(self, other) -> "SymInt":
|
| 478 |
+
raise TypeError("type stub not overridden")
|
| 479 |
+
|
| 480 |
+
def __mod__(self, other: "IntLikeType") -> "SymInt":
|
| 481 |
+
raise TypeError("type stub not overridden")
|
| 482 |
+
|
| 483 |
+
def __mul__(self, other) -> "SymInt":
|
| 484 |
+
raise TypeError("type stub not overridden")
|
| 485 |
+
|
| 486 |
+
def __pow_by_natural__(self, other) -> "SymInt":
|
| 487 |
+
raise TypeError("type stub not overridden")
|
| 488 |
+
|
| 489 |
+
def __rpow_by_natural__(self, other) -> "SymInt":
|
| 490 |
+
raise TypeError("type stub not overridden")
|
| 491 |
+
|
| 492 |
+
def __int_truediv__(self, other) -> "SymFloat":
|
| 493 |
+
raise TypeError("type stub not overridden")
|
| 494 |
+
|
| 495 |
+
def __rint_truediv__(self, other) -> "SymFloat":
|
| 496 |
+
raise TypeError("type stub not overridden")
|
| 497 |
+
|
| 498 |
+
def __int_floordiv__(self, other) -> "SymFloat":
|
| 499 |
+
raise TypeError("type stub not overridden")
|
| 500 |
+
|
| 501 |
+
def __rint_floordiv__(self, other) -> "SymFloat":
|
| 502 |
+
raise TypeError("type stub not overridden")
|
| 503 |
+
|
| 504 |
+
def __sym_max__(self, other):
|
| 505 |
+
raise TypeError("type stub not overridden")
|
| 506 |
+
|
| 507 |
+
def __sym_min__(self, other):
|
| 508 |
+
raise TypeError("type stub not overridden")
|
| 509 |
+
|
| 510 |
+
def __sym_float__(self):
|
| 511 |
+
raise TypeError("type stub not overridden")
|
| 512 |
+
|
| 513 |
+
def __neg__(self):
|
| 514 |
+
raise TypeError("type stub not overridden")
|
| 515 |
+
|
| 516 |
+
def __sub__(self, other: "IntLikeType") -> "SymInt":
|
| 517 |
+
raise TypeError("type stub not overridden")
|
| 518 |
+
|
| 519 |
+
def __repr__(self):
|
| 520 |
+
return self.node._graph_repr()
|
| 521 |
+
|
| 522 |
+
def _sympy_(self):
|
| 523 |
+
return self.node.expr
|
| 524 |
+
|
| 525 |
+
def __hash__(self) -> builtins.int:
|
| 526 |
+
if self.node.is_nested_int():
|
| 527 |
+
return hash(self.node.nested_int())
|
| 528 |
+
else:
|
| 529 |
+
# We could support constant SymInts as well, but not doing it for now
|
| 530 |
+
raise TypeError("unhashable type: non-nested SymInt")
|
| 531 |
+
# TODO: Force specialization
|
| 532 |
+
# This can't be done because the TypeError here is load bearing
|
| 533 |
+
# for einops
|
| 534 |
+
# https://github.com/arogozhnikov/einops/blob/6181e1e95dc58c00a3143c1726da1c6ee0463164/einops/einops.py#L237
|
| 535 |
+
# return hash(builtins.int(self))
|
| 536 |
+
|
| 537 |
+
def as_integer_ratio(self) -> _Tuple["SymInt", builtins.int]:
|
| 538 |
+
"""Represent this int as an exact integer ratio"""
|
| 539 |
+
return self, 1
|
| 540 |
+
|
| 541 |
+
def bit_length(self) -> builtins.int:
|
| 542 |
+
# TODO: A more relaxed guard is possible here, where you guard to
|
| 543 |
+
# allow all integer quantities which would result in the same bit
|
| 544 |
+
# length. We can also just make a dedicated Sympy function for
|
| 545 |
+
# computing this quantity and represent it symbolically.
|
| 546 |
+
return builtins.int(self).bit_length()
|
| 547 |
+
|
| 548 |
+
def conjugate(self) -> "SymInt":
|
| 549 |
+
return self
|
| 550 |
+
|
| 551 |
+
|
| 552 |
+
class SymFloat:
|
| 553 |
+
"""
|
| 554 |
+
Like an float (including magic methods), but redirects all operations on the
|
| 555 |
+
wrapped node. This is used in particular to symbolically record operations
|
| 556 |
+
in the symbolic shape workflow.
|
| 557 |
+
"""
|
| 558 |
+
|
| 559 |
+
def __init__(self, node):
|
| 560 |
+
# This field MUST be named node; C++ binding code assumes that this
|
| 561 |
+
# class has a field named node that stores SymNode
|
| 562 |
+
self.node = node
|
| 563 |
+
|
| 564 |
+
def __truediv__(self, other):
|
| 565 |
+
if not isinstance(other, (builtins.int, builtins.float, SymInt, SymFloat)):
|
| 566 |
+
return NotImplemented
|
| 567 |
+
return self.__float_truediv__(sym_float(other))
|
| 568 |
+
|
| 569 |
+
def __rtruediv__(self, other):
|
| 570 |
+
if not isinstance(other, (builtins.int, builtins.float, SymInt, SymFloat)):
|
| 571 |
+
return NotImplemented
|
| 572 |
+
return self.__rfloat_truediv__(sym_float(other))
|
| 573 |
+
|
| 574 |
+
def __floordiv__(self, other):
|
| 575 |
+
if not isinstance(other, (builtins.int, builtins.float, SymInt, SymFloat)):
|
| 576 |
+
return NotImplemented
|
| 577 |
+
return sym_float(math.floor(self / sym_float(other)))
|
| 578 |
+
|
| 579 |
+
def __rfloordiv__(self, other):
|
| 580 |
+
if not isinstance(other, (builtins.int, builtins.float, SymInt, SymFloat)):
|
| 581 |
+
return NotImplemented
|
| 582 |
+
return sym_float(math.floor(sym_float(other) / self))
|
| 583 |
+
|
| 584 |
+
def __bool__(self):
|
| 585 |
+
return self.node.bool_()
|
| 586 |
+
|
| 587 |
+
def __float__(self):
|
| 588 |
+
return self.node.guard_float("", 0)
|
| 589 |
+
|
| 590 |
+
# Symbolic power does NOT work with negative base, this is to avoid
|
| 591 |
+
# potential complex outputs
|
| 592 |
+
def __pow__(self, other):
|
| 593 |
+
if not isinstance(other, (builtins.int, builtins.float, SymInt, SymFloat)):
|
| 594 |
+
return NotImplemented
|
| 595 |
+
torch._check(self >= 0)
|
| 596 |
+
return self.__float_pow__(other)
|
| 597 |
+
|
| 598 |
+
def __rpow__(self, other):
|
| 599 |
+
if not isinstance(other, (builtins.int, builtins.float, SymInt, SymFloat)):
|
| 600 |
+
return NotImplemented
|
| 601 |
+
torch._check(other >= 0)
|
| 602 |
+
return self.__rfloat_pow__(other)
|
| 603 |
+
|
| 604 |
+
# Magic methods installed by torch.fx.experimental.sym_node
|
| 605 |
+
|
| 606 |
+
def __eq__(self, other: object) -> builtins.bool:
|
| 607 |
+
raise TypeError("type stub not overridden")
|
| 608 |
+
|
| 609 |
+
def __lt__(self, other) -> builtins.bool:
|
| 610 |
+
raise TypeError("type stub not overridden")
|
| 611 |
+
|
| 612 |
+
def __gt__(self, other) -> builtins.bool:
|
| 613 |
+
raise TypeError("type stub not overridden")
|
| 614 |
+
|
| 615 |
+
def __le__(self, other) -> builtins.bool:
|
| 616 |
+
raise TypeError("type stub not overridden")
|
| 617 |
+
|
| 618 |
+
def __ge__(self, other) -> builtins.bool:
|
| 619 |
+
raise TypeError("type stub not overridden")
|
| 620 |
+
|
| 621 |
+
def __float_pow__(self, other) -> "SymFloat":
|
| 622 |
+
raise TypeError("type stub not overridden")
|
| 623 |
+
|
| 624 |
+
def __rfloat_pow__(self, other) -> "SymFloat":
|
| 625 |
+
raise TypeError("type stub not overridden")
|
| 626 |
+
|
| 627 |
+
def __float_truediv__(self, other) -> "SymFloat":
|
| 628 |
+
raise TypeError("type stub not overridden")
|
| 629 |
+
|
| 630 |
+
def __rfloat_truediv__(self, other) -> "SymFloat":
|
| 631 |
+
raise TypeError("type stub not overridden")
|
| 632 |
+
|
| 633 |
+
def __trunc__(self):
|
| 634 |
+
raise TypeError("type stub not overridden")
|
| 635 |
+
|
| 636 |
+
def __sym_max__(self, other):
|
| 637 |
+
raise TypeError("type stub not overridden")
|
| 638 |
+
|
| 639 |
+
def __sym_min__(self, other):
|
| 640 |
+
raise TypeError("type stub not overridden")
|
| 641 |
+
|
| 642 |
+
def __sym_int__(self):
|
| 643 |
+
raise TypeError("type stub not overridden")
|
| 644 |
+
|
| 645 |
+
def is_integer(self):
|
| 646 |
+
"""Return True if the float is an integer."""
|
| 647 |
+
raise TypeError("type stub not overridden")
|
| 648 |
+
|
| 649 |
+
def as_integer_ratio(self) -> _Tuple[builtins.int, builtins.int]:
|
| 650 |
+
"""Represent this float as an exact integer ratio"""
|
| 651 |
+
return builtins.float(self).as_integer_ratio()
|
| 652 |
+
|
| 653 |
+
def __repr__(self):
|
| 654 |
+
return self.node._graph_repr()
|
| 655 |
+
|
| 656 |
+
def _sympy_(self):
|
| 657 |
+
return self.node.expr
|
| 658 |
+
|
| 659 |
+
def __hash__(self):
|
| 660 |
+
return hash(builtins.float(self))
|
| 661 |
+
|
| 662 |
+
|
| 663 |
+
class SymBool:
|
| 664 |
+
"""
|
| 665 |
+
Like an bool (including magic methods), but redirects all operations on the
|
| 666 |
+
wrapped node. This is used in particular to symbolically record operations
|
| 667 |
+
in the symbolic shape workflow.
|
| 668 |
+
|
| 669 |
+
Unlike regular bools, regular boolean operators will force extra guards instead
|
| 670 |
+
of symbolically evaluate. Use the bitwise operators instead to handle this.
|
| 671 |
+
"""
|
| 672 |
+
|
| 673 |
+
def __init__(self, node):
|
| 674 |
+
# This field MUST be named node; C++ binding code assumes that this
|
| 675 |
+
# class has a field named node that stores SymNode
|
| 676 |
+
self.node = node
|
| 677 |
+
|
| 678 |
+
def __bool__(self):
|
| 679 |
+
return self.node.bool_()
|
| 680 |
+
|
| 681 |
+
def __int__(self):
|
| 682 |
+
return builtins.int(self.node.bool_())
|
| 683 |
+
|
| 684 |
+
# Magic methods installed by torch.fx.experimental.sym_node
|
| 685 |
+
def __and__(self, other) -> "SymBool":
|
| 686 |
+
raise TypeError("type stub not overridden")
|
| 687 |
+
|
| 688 |
+
def __or__(self, other) -> "SymBool":
|
| 689 |
+
raise TypeError("type stub not overridden")
|
| 690 |
+
|
| 691 |
+
# We very carefully define __sym_not__, and not a number of other
|
| 692 |
+
# plausible alternatives:
|
| 693 |
+
#
|
| 694 |
+
# - We do not override __not__ because this is not a real magic
|
| 695 |
+
# method; you cannot override the meaning of the not builtin in
|
| 696 |
+
# Python. We use the name 'sym_not' to clarify that in user code you
|
| 697 |
+
# cannot use the builtin not or operator.not_ or operator.__not__ and
|
| 698 |
+
# hit this magic method; you must use our custom sym_not operator.
|
| 699 |
+
#
|
| 700 |
+
# - We do not override the __invert__ method because SymBool is
|
| 701 |
+
# meant to be usable in situations where bool is expected. However,
|
| 702 |
+
# bitwise negation ~a does the wrong thing with booleans (because
|
| 703 |
+
# bool is a subclass of int, so ~1 = -2 which is not falseish.)
|
| 704 |
+
# This would be a giant footgun, so we get around it by defining
|
| 705 |
+
# our own operator. Note that bitwise and/or do the right thing,
|
| 706 |
+
# so we reuse the conventional operators there for readability.
|
| 707 |
+
#
|
| 708 |
+
def __sym_not__(self) -> "SymBool":
|
| 709 |
+
raise TypeError("type stub not overridden")
|
| 710 |
+
|
| 711 |
+
def __sym_ite__(self, then_val, else_val):
|
| 712 |
+
raise TypeError("type stub not overridden")
|
| 713 |
+
|
| 714 |
+
def __eq__(self, other) -> builtins.bool:
|
| 715 |
+
raise TypeError("type stub not overridden")
|
| 716 |
+
|
| 717 |
+
def __repr__(self):
|
| 718 |
+
return self.node._graph_repr()
|
| 719 |
+
|
| 720 |
+
def _sympy_(self):
|
| 721 |
+
return self.node.expr
|
| 722 |
+
|
| 723 |
+
def __hash__(self):
|
| 724 |
+
if self.node.is_constant():
|
| 725 |
+
return hash(self.node.bool_())
|
| 726 |
+
else:
|
| 727 |
+
# Force specialization
|
| 728 |
+
return hash(builtins.bool(self))
|
| 729 |
+
|
| 730 |
+
|
| 731 |
+
def sym_not(a):
|
| 732 |
+
r"""SymInt-aware utility for logical negation.
|
| 733 |
+
|
| 734 |
+
Args:
|
| 735 |
+
a (SymBool or bool): Object to negate
|
| 736 |
+
"""
|
| 737 |
+
import sympy
|
| 738 |
+
|
| 739 |
+
if overrides.has_torch_function_unary(a):
|
| 740 |
+
return overrides.handle_torch_function(sym_not, (a,), a)
|
| 741 |
+
if hasattr(a, "__sym_not__"):
|
| 742 |
+
return a.__sym_not__()
|
| 743 |
+
if isinstance(a, sympy.Basic):
|
| 744 |
+
return ~a # type: ignore[operator]
|
| 745 |
+
return not a
|
| 746 |
+
|
| 747 |
+
|
| 748 |
+
def sym_float(a):
|
| 749 |
+
r"""SymInt-aware utility for float casting.
|
| 750 |
+
|
| 751 |
+
Args:
|
| 752 |
+
a (SymInt, SymFloat, or object): Object to cast
|
| 753 |
+
"""
|
| 754 |
+
if overrides.has_torch_function_unary(a):
|
| 755 |
+
return overrides.handle_torch_function(sym_float, (a,), a)
|
| 756 |
+
if isinstance(a, SymFloat):
|
| 757 |
+
return a
|
| 758 |
+
elif hasattr(a, "__sym_float__"):
|
| 759 |
+
return a.__sym_float__()
|
| 760 |
+
return builtins.float(a) # type: ignore[operator]
|
| 761 |
+
|
| 762 |
+
|
| 763 |
+
def sym_int(a):
|
| 764 |
+
r"""SymInt-aware utility for int casting.
|
| 765 |
+
|
| 766 |
+
Args:
|
| 767 |
+
a (SymInt, SymFloat, or object): Object to cast
|
| 768 |
+
"""
|
| 769 |
+
if overrides.has_torch_function_unary(a):
|
| 770 |
+
return overrides.handle_torch_function(sym_int, (a,), a)
|
| 771 |
+
if isinstance(a, SymInt):
|
| 772 |
+
return a
|
| 773 |
+
elif isinstance(a, SymFloat):
|
| 774 |
+
return math.trunc(a)
|
| 775 |
+
return builtins.int(a) # type: ignore[operator]
|
| 776 |
+
|
| 777 |
+
|
| 778 |
+
def sym_max(a, b):
|
| 779 |
+
"""
|
| 780 |
+
SymInt-aware utility for max which avoids branching on a < b.
|
| 781 |
+
Unlike builtins.max(), this only works for int/float, and it always
|
| 782 |
+
promotes to float if any argument is float (unlike builtins.max, which
|
| 783 |
+
will faithfully preserve the type of the input argument).
|
| 784 |
+
"""
|
| 785 |
+
if overrides.has_torch_function((a, b)):
|
| 786 |
+
return overrides.handle_torch_function(sym_max, (a, b), a, b)
|
| 787 |
+
if isinstance(a, (SymInt, SymFloat)):
|
| 788 |
+
return a.__sym_max__(b)
|
| 789 |
+
elif isinstance(b, (SymInt, SymFloat)):
|
| 790 |
+
# Due to promotion semantics, this is operator is commutative:
|
| 791 |
+
# max(1, 1.0) === max(1.0, 1) === 1.0
|
| 792 |
+
return b.__sym_max__(a)
|
| 793 |
+
# TODO: Probably can make bool work too, just lazy
|
| 794 |
+
|
| 795 |
+
all_types, float_types = __all_and_float_types()
|
| 796 |
+
|
| 797 |
+
assert isinstance(a, all_types), type(a)
|
| 798 |
+
assert isinstance(b, all_types), type(b)
|
| 799 |
+
if isinstance(a, float_types) or isinstance(b, float_types):
|
| 800 |
+
return builtins.float(builtins.max(a, b))
|
| 801 |
+
else:
|
| 802 |
+
return builtins.max(a, b)
|
| 803 |
+
|
| 804 |
+
|
| 805 |
+
def __all_and_float_types() -> _Tuple[_Tuple[_Type, ...], _Tuple[_Type, ...]]:
|
| 806 |
+
try:
|
| 807 |
+
import numpy as np
|
| 808 |
+
|
| 809 |
+
all_types: _Tuple[_Type, ...] = (
|
| 810 |
+
np.integer,
|
| 811 |
+
np.floating,
|
| 812 |
+
builtins.int,
|
| 813 |
+
builtins.float,
|
| 814 |
+
)
|
| 815 |
+
float_types: _Tuple[_Type, ...] = (np.floating, builtins.float)
|
| 816 |
+
except ModuleNotFoundError:
|
| 817 |
+
all_types = (builtins.int, builtins.float)
|
| 818 |
+
float_types = (builtins.float,)
|
| 819 |
+
|
| 820 |
+
return all_types, float_types
|
| 821 |
+
|
| 822 |
+
|
| 823 |
+
def sym_min(a, b):
|
| 824 |
+
"""SymInt-aware utility for min()."""
|
| 825 |
+
if overrides.has_torch_function((a, b)):
|
| 826 |
+
return overrides.handle_torch_function(sym_min, (a, b), a, b)
|
| 827 |
+
if isinstance(a, (SymInt, SymFloat)):
|
| 828 |
+
return a.__sym_min__(b)
|
| 829 |
+
elif isinstance(b, (SymInt, SymFloat)):
|
| 830 |
+
return b.__sym_min__(a)
|
| 831 |
+
|
| 832 |
+
all_types, float_types = __all_and_float_types()
|
| 833 |
+
|
| 834 |
+
assert isinstance(a, all_types), type(a)
|
| 835 |
+
assert isinstance(b, all_types), type(b)
|
| 836 |
+
if isinstance(a, float_types) or isinstance(b, float_types):
|
| 837 |
+
return builtins.float(builtins.min(a, b))
|
| 838 |
+
else:
|
| 839 |
+
return builtins.min(a, b)
|
| 840 |
+
|
| 841 |
+
|
| 842 |
+
# Drop in replacement for math.sqrt, math.sin, math.cos etc
|
| 843 |
+
def _get_sym_math_fn(name):
|
| 844 |
+
def fn(a):
|
| 845 |
+
if overrides.has_torch_function_unary(a):
|
| 846 |
+
return overrides.handle_torch_function(fn, (a,), a)
|
| 847 |
+
if hasattr(a, f"__sym_{name}__"):
|
| 848 |
+
return getattr(a, f"__sym_{name}__")()
|
| 849 |
+
return getattr(math, name)(a)
|
| 850 |
+
|
| 851 |
+
return fn
|
| 852 |
+
|
| 853 |
+
|
| 854 |
+
__fn, __name, __sym_name = None, "", ""
|
| 855 |
+
for __name in (
|
| 856 |
+
"sqrt",
|
| 857 |
+
"cos",
|
| 858 |
+
"cosh",
|
| 859 |
+
"sin",
|
| 860 |
+
"sinh",
|
| 861 |
+
"tan",
|
| 862 |
+
"tanh",
|
| 863 |
+
"asin",
|
| 864 |
+
"acos",
|
| 865 |
+
"atan",
|
| 866 |
+
):
|
| 867 |
+
__sym_name = f"_sym_{__name}"
|
| 868 |
+
__fn = _get_sym_math_fn(__name)
|
| 869 |
+
__fn.__qualname__ = __fn.__name__ = __sym_name
|
| 870 |
+
globals()[__sym_name] = __fn
|
| 871 |
+
|
| 872 |
+
del __fn, __name, __sym_name, _get_sym_math_fn
|
| 873 |
+
|
| 874 |
+
# Adding temporary shortcut
|
| 875 |
+
sym_sqrt = globals()["_sym_sqrt"]
|
| 876 |
+
__all__.append("sym_sqrt")
|
| 877 |
+
|
| 878 |
+
|
| 879 |
+
def sym_ite(b, t, f):
|
| 880 |
+
if overrides.has_torch_function((b, t, f)):
|
| 881 |
+
return overrides.handle_torch_function(sym_ite, (b, t, f), b, t, f)
|
| 882 |
+
assert isinstance(b, (SymBool, builtins.bool)) and type(t) == type(f)
|
| 883 |
+
if isinstance(b, SymBool):
|
| 884 |
+
return b.__sym_ite__(t, f)
|
| 885 |
+
return t if b else f
|
| 886 |
+
|
| 887 |
+
|
| 888 |
+
# Check to see if we can load C extensions, and if not provide some guidance
|
| 889 |
+
# on what the problem might be.
|
| 890 |
+
try:
|
| 891 |
+
# _initExtension is chosen (arbitrarily) as a sentinel.
|
| 892 |
+
from torch._C import _initExtension
|
| 893 |
+
except ImportError:
|
| 894 |
+
import torch._C as _C_for_compiled_check
|
| 895 |
+
|
| 896 |
+
# The __file__ check only works for Python 3.7 and above.
|
| 897 |
+
if _C_for_compiled_check.__file__ is None:
|
| 898 |
+
raise ImportError(
|
| 899 |
+
textwrap.dedent(
|
| 900 |
+
"""
|
| 901 |
+
Failed to load PyTorch C extensions:
|
| 902 |
+
It appears that PyTorch has loaded the `torch/_C` folder
|
| 903 |
+
of the PyTorch repository rather than the C extensions which
|
| 904 |
+
are expected in the `torch._C` namespace. This can occur when
|
| 905 |
+
using the `install` workflow. e.g.
|
| 906 |
+
$ python setup.py install && python -c "import torch"
|
| 907 |
+
|
| 908 |
+
This error can generally be solved using the `develop` workflow
|
| 909 |
+
$ python setup.py develop && python -c "import torch" # This should succeed
|
| 910 |
+
or by running Python from a different directory.
|
| 911 |
+
"""
|
| 912 |
+
).strip()
|
| 913 |
+
) from None
|
| 914 |
+
raise # If __file__ is not None the cause is unknown, so just re-raise.
|
| 915 |
+
|
| 916 |
+
# The torch._C submodule is already loaded via `from torch._C import *` above
|
| 917 |
+
# Make an explicit reference to the _C submodule to appease linters
|
| 918 |
+
from torch import _C as _C
|
| 919 |
+
|
| 920 |
+
|
| 921 |
+
__name, __obj = "", None
|
| 922 |
+
for __name in dir(_C):
|
| 923 |
+
if __name[0] != "_" and not __name.endswith("Base"):
|
| 924 |
+
__all__.append(__name)
|
| 925 |
+
__obj = getattr(_C, __name)
|
| 926 |
+
if callable(__obj) or inspect.isclass(__obj):
|
| 927 |
+
if __obj.__module__ != __name__: # "torch"
|
| 928 |
+
# TODO: fix their module from C++ side
|
| 929 |
+
if __name not in {
|
| 930 |
+
"DisableTorchFunctionSubclass",
|
| 931 |
+
"DisableTorchFunction",
|
| 932 |
+
"Generator",
|
| 933 |
+
}:
|
| 934 |
+
__obj.__module__ = __name__ # "torch"
|
| 935 |
+
elif __name == "TensorBase":
|
| 936 |
+
# issue 109438 / pr 109940. Prevent TensorBase from being copied into torch.
|
| 937 |
+
delattr(sys.modules[__name__], __name)
|
| 938 |
+
|
| 939 |
+
del __name, __obj
|
| 940 |
+
|
| 941 |
+
if not TYPE_CHECKING:
|
| 942 |
+
# issue 38137 and python issue 43367. Submodules of a C extension are
|
| 943 |
+
# non-standard, and attributes of those submodules cannot be pickled since
|
| 944 |
+
# pickle expect to be able to import them as "from _C.sub import attr"
|
| 945 |
+
# which fails with "_C is not a package
|
| 946 |
+
def _import_extension_to_sys_modules(module, memo=None):
|
| 947 |
+
if memo is None:
|
| 948 |
+
memo = set()
|
| 949 |
+
if module in memo:
|
| 950 |
+
return
|
| 951 |
+
memo.add(module)
|
| 952 |
+
module_name = module.__name__
|
| 953 |
+
for name in dir(module):
|
| 954 |
+
member = getattr(module, name)
|
| 955 |
+
member_name = getattr(member, "__name__", "")
|
| 956 |
+
if inspect.ismodule(member) and member_name.startswith(module_name):
|
| 957 |
+
sys.modules.setdefault(member_name, member)
|
| 958 |
+
# Recurse for submodules (e.g., `_C._dynamo.eval_frame`)
|
| 959 |
+
_import_extension_to_sys_modules(member, memo)
|
| 960 |
+
|
| 961 |
+
_import_extension_to_sys_modules(_C)
|
| 962 |
+
del _import_extension_to_sys_modules
|
| 963 |
+
|
| 964 |
+
################################################################################
|
| 965 |
+
# Define basic utilities
|
| 966 |
+
################################################################################
|
| 967 |
+
|
| 968 |
+
|
| 969 |
+
def typename(obj: _Any, /) -> str:
|
| 970 |
+
"""
|
| 971 |
+
String representation of the type of an object.
|
| 972 |
+
|
| 973 |
+
This function returns a fully qualified string representation of an object's type.
|
| 974 |
+
Args:
|
| 975 |
+
obj (object): The object whose type to represent
|
| 976 |
+
Returns:
|
| 977 |
+
str: the type of the object `o`
|
| 978 |
+
Example:
|
| 979 |
+
>>> x = torch.tensor([1, 2, 3])
|
| 980 |
+
>>> torch.typename(x)
|
| 981 |
+
'torch.LongTensor'
|
| 982 |
+
>>> torch.typename(torch.nn.Parameter)
|
| 983 |
+
'torch.nn.parameter.Parameter'
|
| 984 |
+
"""
|
| 985 |
+
if isinstance(obj, torch.Tensor):
|
| 986 |
+
return obj.type()
|
| 987 |
+
|
| 988 |
+
module = getattr(obj, "__module__", "") or ""
|
| 989 |
+
qualname = ""
|
| 990 |
+
|
| 991 |
+
if hasattr(obj, "__qualname__"):
|
| 992 |
+
qualname = obj.__qualname__
|
| 993 |
+
elif hasattr(obj, "__name__"):
|
| 994 |
+
qualname = obj.__name__
|
| 995 |
+
else:
|
| 996 |
+
module = obj.__class__.__module__ or ""
|
| 997 |
+
qualname = obj.__class__.__qualname__
|
| 998 |
+
|
| 999 |
+
if module in {"", "builtins"}:
|
| 1000 |
+
return qualname
|
| 1001 |
+
return f"{module}.{qualname}"
|
| 1002 |
+
|
| 1003 |
+
|
| 1004 |
+
def is_tensor(obj: _Any, /) -> _TypeGuard["torch.Tensor"]:
|
| 1005 |
+
r"""Returns True if `obj` is a PyTorch tensor.
|
| 1006 |
+
|
| 1007 |
+
Note that this function is simply doing ``isinstance(obj, Tensor)``.
|
| 1008 |
+
Using that ``isinstance`` check is better for typechecking with mypy,
|
| 1009 |
+
and more explicit - so it's recommended to use that instead of
|
| 1010 |
+
``is_tensor``.
|
| 1011 |
+
|
| 1012 |
+
Args:
|
| 1013 |
+
obj (object): Object to test
|
| 1014 |
+
Example::
|
| 1015 |
+
|
| 1016 |
+
>>> x = torch.tensor([1, 2, 3])
|
| 1017 |
+
>>> torch.is_tensor(x)
|
| 1018 |
+
True
|
| 1019 |
+
|
| 1020 |
+
"""
|
| 1021 |
+
return isinstance(obj, torch.Tensor)
|
| 1022 |
+
|
| 1023 |
+
|
| 1024 |
+
def is_storage(obj: _Any, /) -> _TypeGuard[_Union["TypedStorage", "UntypedStorage"]]:
|
| 1025 |
+
r"""Returns True if `obj` is a PyTorch storage object.
|
| 1026 |
+
|
| 1027 |
+
Args:
|
| 1028 |
+
obj (Object): Object to test
|
| 1029 |
+
"""
|
| 1030 |
+
return type(obj) in _storage_classes
|
| 1031 |
+
|
| 1032 |
+
|
| 1033 |
+
_GLOBAL_DEVICE_CONTEXT = threading.local()
|
| 1034 |
+
|
| 1035 |
+
|
| 1036 |
+
def get_default_device() -> "torch.device":
|
| 1037 |
+
r"""Gets the default ``torch.Tensor`` to be allocated on ``device``"""
|
| 1038 |
+
global _GLOBAL_DEVICE_CONTEXT
|
| 1039 |
+
|
| 1040 |
+
if hasattr(_GLOBAL_DEVICE_CONTEXT, "device_context"):
|
| 1041 |
+
device = _GLOBAL_DEVICE_CONTEXT.device_context.device
|
| 1042 |
+
if device.index is not None:
|
| 1043 |
+
return device
|
| 1044 |
+
else:
|
| 1045 |
+
# TODO: Call like get_device_index() method corresponding to
|
| 1046 |
+
# each device type
|
| 1047 |
+
return torch.tensor([]).device
|
| 1048 |
+
else:
|
| 1049 |
+
return torch.device("cpu")
|
| 1050 |
+
|
| 1051 |
+
|
| 1052 |
+
def set_default_device(
|
| 1053 |
+
device: _Optional[_Union["torch.device", str, builtins.int]],
|
| 1054 |
+
) -> None:
|
| 1055 |
+
"""Sets the default ``torch.Tensor`` to be allocated on ``device``. This
|
| 1056 |
+
does not affect factory function calls which are called with an explicit
|
| 1057 |
+
``device`` argument. Factory calls will be performed as if they
|
| 1058 |
+
were passed ``device`` as an argument.
|
| 1059 |
+
|
| 1060 |
+
To only temporarily change the default device instead of setting it
|
| 1061 |
+
globally, use ``with torch.device(device):`` instead.
|
| 1062 |
+
|
| 1063 |
+
The default device is initially ``cpu``. If you set the default tensor
|
| 1064 |
+
device to another device (e.g., ``cuda``) without a device index, tensors
|
| 1065 |
+
will be allocated on whatever the current device for the device type,
|
| 1066 |
+
even after :func:`torch.cuda.set_device` is called.
|
| 1067 |
+
|
| 1068 |
+
.. warning::
|
| 1069 |
+
|
| 1070 |
+
This function imposes a slight performance cost on every Python
|
| 1071 |
+
call to the torch API (not just factory functions). If this
|
| 1072 |
+
is causing problems for you, please comment on
|
| 1073 |
+
https://github.com/pytorch/pytorch/issues/92701
|
| 1074 |
+
|
| 1075 |
+
.. note::
|
| 1076 |
+
|
| 1077 |
+
This doesn't affect functions that create tensors that share the same memory as the input, like:
|
| 1078 |
+
:func:`torch.from_numpy` and :func:`torch.frombuffer`
|
| 1079 |
+
|
| 1080 |
+
Args:
|
| 1081 |
+
device (device or string): the device to set as default
|
| 1082 |
+
|
| 1083 |
+
Example::
|
| 1084 |
+
|
| 1085 |
+
>>> # xdoctest: +SKIP("requires cuda, changes global state")
|
| 1086 |
+
>>> torch.get_default_device()
|
| 1087 |
+
device(type='cpu')
|
| 1088 |
+
>>> torch.set_default_device('cuda') # current device is 0
|
| 1089 |
+
>>> torch.get_default_device()
|
| 1090 |
+
device(type='cuda', index=0)
|
| 1091 |
+
>>> torch.set_default_device('cuda')
|
| 1092 |
+
>>> torch.cuda.set_device('cuda:1') # current device is 1
|
| 1093 |
+
>>> torch.get_default_device()
|
| 1094 |
+
device(type='cuda', index=1)
|
| 1095 |
+
>>> torch.set_default_device('cuda:1')
|
| 1096 |
+
>>> torch.get_default_device()
|
| 1097 |
+
device(type='cuda', index=1)
|
| 1098 |
+
|
| 1099 |
+
"""
|
| 1100 |
+
global _GLOBAL_DEVICE_CONTEXT
|
| 1101 |
+
if hasattr(_GLOBAL_DEVICE_CONTEXT, "device_context"):
|
| 1102 |
+
device_context = _GLOBAL_DEVICE_CONTEXT.device_context
|
| 1103 |
+
if device_context is not None:
|
| 1104 |
+
device_context.__exit__(None, None, None)
|
| 1105 |
+
|
| 1106 |
+
if device is None:
|
| 1107 |
+
device_context = None
|
| 1108 |
+
else:
|
| 1109 |
+
from torch.utils._device import DeviceContext
|
| 1110 |
+
|
| 1111 |
+
device_context = DeviceContext(device)
|
| 1112 |
+
device_context.__enter__()
|
| 1113 |
+
_GLOBAL_DEVICE_CONTEXT.device_context = device_context
|
| 1114 |
+
|
| 1115 |
+
|
| 1116 |
+
def set_default_tensor_type(t: _Union[_Type["torch.Tensor"], str], /) -> None:
|
| 1117 |
+
r"""
|
| 1118 |
+
.. warning::
|
| 1119 |
+
|
| 1120 |
+
This function is deprecated as of PyTorch 2.1, please use :func:`torch.set_default_dtype()` and
|
| 1121 |
+
:func:`torch.set_default_device()` as alternatives.
|
| 1122 |
+
|
| 1123 |
+
Sets the default ``torch.Tensor`` type to floating point tensor type
|
| 1124 |
+
``t``. This type will also be used as default floating point type for
|
| 1125 |
+
type inference in :func:`torch.tensor`.
|
| 1126 |
+
|
| 1127 |
+
The default floating point tensor type is initially ``torch.FloatTensor``.
|
| 1128 |
+
|
| 1129 |
+
Args:
|
| 1130 |
+
t (type or string): the floating point tensor type or its name
|
| 1131 |
+
|
| 1132 |
+
Example::
|
| 1133 |
+
|
| 1134 |
+
>>> # xdoctest: +SKIP("Other tests may have changed the default type. Can we reset it?")
|
| 1135 |
+
>>> torch.tensor([1.2, 3]).dtype # initial default for floating point is torch.float32
|
| 1136 |
+
torch.float32
|
| 1137 |
+
>>> torch.set_default_tensor_type(torch.DoubleTensor)
|
| 1138 |
+
>>> torch.tensor([1.2, 3]).dtype # a new floating point tensor
|
| 1139 |
+
torch.float64
|
| 1140 |
+
|
| 1141 |
+
"""
|
| 1142 |
+
if isinstance(t, str):
|
| 1143 |
+
t = _import_dotted_name(t)
|
| 1144 |
+
_C._set_default_tensor_type(t)
|
| 1145 |
+
|
| 1146 |
+
|
| 1147 |
+
def set_default_dtype(d: "torch.dtype", /) -> None:
|
| 1148 |
+
r"""
|
| 1149 |
+
|
| 1150 |
+
Sets the default floating point dtype to :attr:`d`. Supports floating point dtype
|
| 1151 |
+
as inputs. Other dtypes will cause torch to raise an exception.
|
| 1152 |
+
|
| 1153 |
+
When PyTorch is initialized its default floating point dtype is torch.float32,
|
| 1154 |
+
and the intent of set_default_dtype(torch.float64) is to facilitate NumPy-like
|
| 1155 |
+
type inference. The default floating point dtype is used to:
|
| 1156 |
+
|
| 1157 |
+
1. Implicitly determine the default complex dtype. When the default floating type is float16,
|
| 1158 |
+
the default complex dtype is complex32. For float32, the default complex dtype is complex64.
|
| 1159 |
+
For float64, it is complex128. For bfloat16, an exception will be raised because
|
| 1160 |
+
there is no corresponding complex type for bfloat16.
|
| 1161 |
+
2. Infer the dtype for tensors constructed using Python floats or complex Python
|
| 1162 |
+
numbers. See examples below.
|
| 1163 |
+
3. Determine the result of type promotion between bool and integer tensors and
|
| 1164 |
+
Python floats and complex Python numbers.
|
| 1165 |
+
|
| 1166 |
+
Args:
|
| 1167 |
+
d (:class:`torch.dtype`): the floating point dtype to make the default.
|
| 1168 |
+
|
| 1169 |
+
Example:
|
| 1170 |
+
>>> # xdoctest: +SKIP("Other tests may have changed the default type. Can we reset it?")
|
| 1171 |
+
>>> # initial default for floating point is torch.float32
|
| 1172 |
+
>>> # Python floats are interpreted as float32
|
| 1173 |
+
>>> torch.tensor([1.2, 3]).dtype
|
| 1174 |
+
torch.float32
|
| 1175 |
+
>>> # initial default for floating point is torch.complex64
|
| 1176 |
+
>>> # Complex Python numbers are interpreted as complex64
|
| 1177 |
+
>>> torch.tensor([1.2, 3j]).dtype
|
| 1178 |
+
torch.complex64
|
| 1179 |
+
|
| 1180 |
+
>>> torch.set_default_dtype(torch.float64)
|
| 1181 |
+
>>> # Python floats are now interpreted as float64
|
| 1182 |
+
>>> torch.tensor([1.2, 3]).dtype # a new floating point tensor
|
| 1183 |
+
torch.float64
|
| 1184 |
+
>>> # Complex Python numbers are now interpreted as complex128
|
| 1185 |
+
>>> torch.tensor([1.2, 3j]).dtype # a new complex tensor
|
| 1186 |
+
torch.complex128
|
| 1187 |
+
|
| 1188 |
+
>>> torch.set_default_dtype(torch.float16)
|
| 1189 |
+
>>> # Python floats are now interpreted as float16
|
| 1190 |
+
>>> torch.tensor([1.2, 3]).dtype # a new floating point tensor
|
| 1191 |
+
torch.float16
|
| 1192 |
+
>>> # Complex Python numbers are now interpreted as complex128
|
| 1193 |
+
>>> torch.tensor([1.2, 3j]).dtype # a new complex tensor
|
| 1194 |
+
torch.complex32
|
| 1195 |
+
|
| 1196 |
+
"""
|
| 1197 |
+
_C._set_default_dtype(d)
|
| 1198 |
+
|
| 1199 |
+
|
| 1200 |
+
def use_deterministic_algorithms(
|
| 1201 |
+
mode: builtins.bool,
|
| 1202 |
+
*,
|
| 1203 |
+
warn_only: builtins.bool = False,
|
| 1204 |
+
) -> None:
|
| 1205 |
+
r"""Sets whether PyTorch operations must use "deterministic"
|
| 1206 |
+
algorithms. That is, algorithms which, given the same input, and when
|
| 1207 |
+
run on the same software and hardware, always produce the same output.
|
| 1208 |
+
When enabled, operations will use deterministic algorithms when available,
|
| 1209 |
+
and if only nondeterministic algorithms are available they will throw a
|
| 1210 |
+
:class:`RuntimeError` when called.
|
| 1211 |
+
|
| 1212 |
+
.. note:: This setting alone is not always enough to make an application
|
| 1213 |
+
reproducible. Refer to :ref:`reproducibility` for more information.
|
| 1214 |
+
|
| 1215 |
+
.. note:: :func:`torch.set_deterministic_debug_mode` offers an alternative
|
| 1216 |
+
interface for this feature.
|
| 1217 |
+
|
| 1218 |
+
The following normally-nondeterministic operations will act
|
| 1219 |
+
deterministically when ``mode=True``:
|
| 1220 |
+
|
| 1221 |
+
* :class:`torch.nn.Conv1d` when called on CUDA tensor
|
| 1222 |
+
* :class:`torch.nn.Conv2d` when called on CUDA tensor
|
| 1223 |
+
* :class:`torch.nn.Conv3d` when called on CUDA tensor
|
| 1224 |
+
* :class:`torch.nn.ConvTranspose1d` when called on CUDA tensor
|
| 1225 |
+
* :class:`torch.nn.ConvTranspose2d` when called on CUDA tensor
|
| 1226 |
+
* :class:`torch.nn.ConvTranspose3d` when called on CUDA tensor
|
| 1227 |
+
* :class:`torch.nn.ReplicationPad2d` when attempting to differentiate a CUDA tensor
|
| 1228 |
+
* :func:`torch.bmm` when called on sparse-dense CUDA tensors
|
| 1229 |
+
* :func:`torch.Tensor.__getitem__` when attempting to differentiate a CPU tensor
|
| 1230 |
+
and the index is a list of tensors
|
| 1231 |
+
* :func:`torch.Tensor.index_put` with ``accumulate=False``
|
| 1232 |
+
* :func:`torch.Tensor.index_put` with ``accumulate=True`` when called on a CPU
|
| 1233 |
+
tensor
|
| 1234 |
+
* :func:`torch.Tensor.put_` with ``accumulate=True`` when called on a CPU
|
| 1235 |
+
tensor
|
| 1236 |
+
* :func:`torch.Tensor.scatter_add_` when called on a CUDA tensor
|
| 1237 |
+
* :func:`torch.gather` when called on a CUDA tensor that requires grad
|
| 1238 |
+
* :func:`torch.index_add` when called on CUDA tensor
|
| 1239 |
+
* :func:`torch.index_select` when attempting to differentiate a CUDA tensor
|
| 1240 |
+
* :func:`torch.repeat_interleave` when attempting to differentiate a CUDA tensor
|
| 1241 |
+
* :func:`torch.Tensor.index_copy` when called on a CPU or CUDA tensor
|
| 1242 |
+
* :func:`torch.Tensor.scatter` when `src` type is Tensor and called on CUDA tensor
|
| 1243 |
+
* :func:`torch.Tensor.scatter_reduce` when ``reduce='sum'`` or ``reduce='mean'`` and called on CUDA tensor
|
| 1244 |
+
|
| 1245 |
+
The following normally-nondeterministic operations will throw a
|
| 1246 |
+
:class:`RuntimeError` when ``mode=True``:
|
| 1247 |
+
|
| 1248 |
+
* :class:`torch.nn.AvgPool3d` when attempting to differentiate a CUDA tensor
|
| 1249 |
+
* :class:`torch.nn.AdaptiveAvgPool2d` when attempting to differentiate a CUDA tensor
|
| 1250 |
+
* :class:`torch.nn.AdaptiveAvgPool3d` when attempting to differentiate a CUDA tensor
|
| 1251 |
+
* :class:`torch.nn.MaxPool3d` when attempting to differentiate a CUDA tensor
|
| 1252 |
+
* :class:`torch.nn.AdaptiveMaxPool2d` when attempting to differentiate a CUDA tensor
|
| 1253 |
+
* :class:`torch.nn.FractionalMaxPool2d` when attempting to differentiate a CUDA tensor
|
| 1254 |
+
* :class:`torch.nn.FractionalMaxPool3d` when attempting to differentiate a CUDA tensor
|
| 1255 |
+
* :class:`torch.nn.MaxUnpool1d`
|
| 1256 |
+
* :class:`torch.nn.MaxUnpool2d`
|
| 1257 |
+
* :class:`torch.nn.MaxUnpool3d`
|
| 1258 |
+
* :func:`torch.nn.functional.interpolate` when attempting to differentiate a CUDA tensor
|
| 1259 |
+
and one of the following modes is used:
|
| 1260 |
+
|
| 1261 |
+
- ``linear``
|
| 1262 |
+
- ``bilinear``
|
| 1263 |
+
- ``bicubic``
|
| 1264 |
+
- ``trilinear``
|
| 1265 |
+
|
| 1266 |
+
* :class:`torch.nn.ReflectionPad1d` when attempting to differentiate a CUDA tensor
|
| 1267 |
+
* :class:`torch.nn.ReflectionPad2d` when attempting to differentiate a CUDA tensor
|
| 1268 |
+
* :class:`torch.nn.ReflectionPad3d` when attempting to differentiate a CUDA tensor
|
| 1269 |
+
* :class:`torch.nn.ReplicationPad1d` when attempting to differentiate a CUDA tensor
|
| 1270 |
+
* :class:`torch.nn.ReplicationPad3d` when attempting to differentiate a CUDA tensor
|
| 1271 |
+
* :class:`torch.nn.NLLLoss` when called on a CUDA tensor
|
| 1272 |
+
* :class:`torch.nn.CTCLoss` when attempting to differentiate a CUDA tensor
|
| 1273 |
+
* :class:`torch.nn.EmbeddingBag` when attempting to differentiate a CUDA tensor when
|
| 1274 |
+
``mode='max'``
|
| 1275 |
+
* :func:`torch.Tensor.put_` when ``accumulate=False``
|
| 1276 |
+
* :func:`torch.Tensor.put_` when ``accumulate=True`` and called on a CUDA tensor
|
| 1277 |
+
* :func:`torch.histc` when called on a CUDA tensor
|
| 1278 |
+
* :func:`torch.bincount` when called on a CUDA tensor and ``weights``
|
| 1279 |
+
tensor is given
|
| 1280 |
+
* :func:`torch.kthvalue` with called on a CUDA tensor
|
| 1281 |
+
* :func:`torch.median` with indices output when called on a CUDA tensor
|
| 1282 |
+
* :func:`torch.nn.functional.grid_sample` when attempting to differentiate a CUDA tensor
|
| 1283 |
+
* :func:`torch.cumsum` when called on a CUDA tensor when dtype is floating point or complex
|
| 1284 |
+
* :func:`torch.Tensor.scatter_reduce` when ``reduce='prod'`` and called on CUDA tensor
|
| 1285 |
+
* :func:`torch.Tensor.resize_` when called with a quantized tensor
|
| 1286 |
+
|
| 1287 |
+
In addition, several operations fill uninitialized memory when this setting
|
| 1288 |
+
is turned on and when
|
| 1289 |
+
:attr:`torch.utils.deterministic.fill_uninitialized_memory` is turned on.
|
| 1290 |
+
See the documentation for that attribute for more information.
|
| 1291 |
+
|
| 1292 |
+
A handful of CUDA operations are nondeterministic if the CUDA version is
|
| 1293 |
+
10.2 or greater, unless the environment variable ``CUBLAS_WORKSPACE_CONFIG=:4096:8``
|
| 1294 |
+
or ``CUBLAS_WORKSPACE_CONFIG=:16:8`` is set. See the CUDA documentation for more
|
| 1295 |
+
details: `<https://docs.nvidia.com/cuda/cublas/index.html#results-reproducibility>`_
|
| 1296 |
+
If one of these environment variable configurations is not set, a :class:`RuntimeError`
|
| 1297 |
+
will be raised from these operations when called with CUDA tensors:
|
| 1298 |
+
|
| 1299 |
+
* :func:`torch.mm`
|
| 1300 |
+
* :func:`torch.mv`
|
| 1301 |
+
* :func:`torch.bmm`
|
| 1302 |
+
|
| 1303 |
+
Note that deterministic operations tend to have worse performance than
|
| 1304 |
+
nondeterministic operations.
|
| 1305 |
+
|
| 1306 |
+
.. note::
|
| 1307 |
+
|
| 1308 |
+
This flag does not detect or prevent nondeterministic behavior caused
|
| 1309 |
+
by calling an inplace operation on a tensor with an internal memory
|
| 1310 |
+
overlap or by giving such a tensor as the :attr:`out` argument for an
|
| 1311 |
+
operation. In these cases, multiple writes of different data may target
|
| 1312 |
+
a single memory location, and the order of writes is not guaranteed.
|
| 1313 |
+
|
| 1314 |
+
Args:
|
| 1315 |
+
mode (:class:`bool`): If True, makes potentially nondeterministic
|
| 1316 |
+
operations switch to a deterministic algorithm or throw a runtime
|
| 1317 |
+
error. If False, allows nondeterministic operations.
|
| 1318 |
+
|
| 1319 |
+
Keyword args:
|
| 1320 |
+
warn_only (:class:`bool`, optional): If True, operations that do not
|
| 1321 |
+
have a deterministic implementation will throw a warning instead of
|
| 1322 |
+
an error. Default: ``False``
|
| 1323 |
+
|
| 1324 |
+
Example::
|
| 1325 |
+
|
| 1326 |
+
>>> # xdoctest: +SKIP
|
| 1327 |
+
>>> torch.use_deterministic_algorithms(True)
|
| 1328 |
+
|
| 1329 |
+
# Forward mode nondeterministic error
|
| 1330 |
+
>>> torch.randn(10, device='cuda').kthvalue(1)
|
| 1331 |
+
...
|
| 1332 |
+
RuntimeError: kthvalue CUDA does not have a deterministic implementation...
|
| 1333 |
+
|
| 1334 |
+
# Backward mode nondeterministic error
|
| 1335 |
+
>>> torch.nn.AvgPool3d(1)(torch.randn(3, 4, 5, 6, requires_grad=True).cuda()).sum().backward()
|
| 1336 |
+
...
|
| 1337 |
+
RuntimeError: avg_pool3d_backward_cuda does not have a deterministic implementation...
|
| 1338 |
+
"""
|
| 1339 |
+
_C._set_deterministic_algorithms(mode, warn_only=warn_only)
|
| 1340 |
+
|
| 1341 |
+
|
| 1342 |
+
def are_deterministic_algorithms_enabled() -> builtins.bool:
|
| 1343 |
+
r"""Returns True if the global deterministic flag is turned on. Refer to
|
| 1344 |
+
:func:`torch.use_deterministic_algorithms` documentation for more details.
|
| 1345 |
+
"""
|
| 1346 |
+
return _C._get_deterministic_algorithms()
|
| 1347 |
+
|
| 1348 |
+
|
| 1349 |
+
def is_deterministic_algorithms_warn_only_enabled() -> builtins.bool:
|
| 1350 |
+
r"""Returns True if the global deterministic flag is set to warn only.
|
| 1351 |
+
Refer to :func:`torch.use_deterministic_algorithms` documentation for more
|
| 1352 |
+
details.
|
| 1353 |
+
"""
|
| 1354 |
+
return _C._get_deterministic_algorithms_warn_only()
|
| 1355 |
+
|
| 1356 |
+
|
| 1357 |
+
def set_deterministic_debug_mode(debug_mode: _Union[builtins.int, str]) -> None:
|
| 1358 |
+
r"""Sets the debug mode for deterministic operations.
|
| 1359 |
+
|
| 1360 |
+
.. note:: This is an alternative interface for
|
| 1361 |
+
:func:`torch.use_deterministic_algorithms`. Refer to that function's
|
| 1362 |
+
documentation for details about affected operations.
|
| 1363 |
+
|
| 1364 |
+
Args:
|
| 1365 |
+
debug_mode(str or int): If "default" or 0, don't error or warn on
|
| 1366 |
+
nondeterministic operations. If "warn" or 1, warn on
|
| 1367 |
+
nondeterministic operations. If "error" or 2, error on
|
| 1368 |
+
nondeterministic operations.
|
| 1369 |
+
"""
|
| 1370 |
+
|
| 1371 |
+
# NOTE: builtins.int is used here because int in this scope resolves
|
| 1372 |
+
# to torch.int
|
| 1373 |
+
if not isinstance(debug_mode, (builtins.int, str)):
|
| 1374 |
+
raise TypeError(f"debug_mode must be str or int, but got {type(debug_mode)}")
|
| 1375 |
+
|
| 1376 |
+
if isinstance(debug_mode, str):
|
| 1377 |
+
if debug_mode == "default":
|
| 1378 |
+
debug_mode = 0
|
| 1379 |
+
elif debug_mode == "warn":
|
| 1380 |
+
debug_mode = 1
|
| 1381 |
+
elif debug_mode == "error":
|
| 1382 |
+
debug_mode = 2
|
| 1383 |
+
else:
|
| 1384 |
+
raise RuntimeError(
|
| 1385 |
+
"invalid value of debug_mode, expected one of `default`, "
|
| 1386 |
+
f"`warn`, `error`, but got {debug_mode}"
|
| 1387 |
+
)
|
| 1388 |
+
|
| 1389 |
+
if debug_mode == 0:
|
| 1390 |
+
_C._set_deterministic_algorithms(False)
|
| 1391 |
+
elif debug_mode == 1:
|
| 1392 |
+
_C._set_deterministic_algorithms(True, warn_only=True)
|
| 1393 |
+
elif debug_mode == 2:
|
| 1394 |
+
_C._set_deterministic_algorithms(True)
|
| 1395 |
+
else:
|
| 1396 |
+
raise RuntimeError(
|
| 1397 |
+
"invalid value of debug_mode, expected 0, 1, or 2, " f"but got {debug_mode}"
|
| 1398 |
+
)
|
| 1399 |
+
|
| 1400 |
+
|
| 1401 |
+
def get_deterministic_debug_mode() -> builtins.int:
|
| 1402 |
+
r"""Returns the current value of the debug mode for deterministic
|
| 1403 |
+
operations. Refer to :func:`torch.set_deterministic_debug_mode`
|
| 1404 |
+
documentation for more details.
|
| 1405 |
+
"""
|
| 1406 |
+
|
| 1407 |
+
if _C._get_deterministic_algorithms():
|
| 1408 |
+
if _C._get_deterministic_algorithms_warn_only():
|
| 1409 |
+
return 1
|
| 1410 |
+
else:
|
| 1411 |
+
return 2
|
| 1412 |
+
else:
|
| 1413 |
+
return 0
|
| 1414 |
+
|
| 1415 |
+
|
| 1416 |
+
def get_float32_matmul_precision() -> str:
|
| 1417 |
+
r"""Returns the current value of float32 matrix multiplication precision. Refer to
|
| 1418 |
+
:func:`torch.set_float32_matmul_precision` documentation for more details.
|
| 1419 |
+
"""
|
| 1420 |
+
return _C._get_float32_matmul_precision()
|
| 1421 |
+
|
| 1422 |
+
|
| 1423 |
+
def set_float32_matmul_precision(precision: str) -> None:
|
| 1424 |
+
r"""Sets the internal precision of float32 matrix multiplications.
|
| 1425 |
+
|
| 1426 |
+
Running float32 matrix multiplications in lower precision may significantly increase
|
| 1427 |
+
performance, and in some programs the loss of precision has a negligible impact.
|
| 1428 |
+
|
| 1429 |
+
Supports three settings:
|
| 1430 |
+
|
| 1431 |
+
* "highest", float32 matrix multiplications use the float32 datatype (24 mantissa
|
| 1432 |
+
bits with 23 bits explicitly stored) for internal computations.
|
| 1433 |
+
* "high", float32 matrix multiplications either use the TensorFloat32 datatype (10
|
| 1434 |
+
mantissa bits explicitly stored) or treat each float32 number as the sum of two bfloat16 numbers
|
| 1435 |
+
(approximately 16 mantissa bits with 14 bits explicitly stored), if the appropriate fast matrix multiplication
|
| 1436 |
+
algorithms are available. Otherwise float32 matrix multiplications are computed
|
| 1437 |
+
as if the precision is "highest". See below for more information on the bfloat16
|
| 1438 |
+
approach.
|
| 1439 |
+
* "medium", float32 matrix multiplications use the bfloat16 datatype (8 mantissa
|
| 1440 |
+
bits with 7 bits explicitly stored) for internal computations, if a fast matrix multiplication algorithm
|
| 1441 |
+
using that datatype internally is available. Otherwise float32
|
| 1442 |
+
matrix multiplications are computed as if the precision is "high".
|
| 1443 |
+
|
| 1444 |
+
When using "high" precision, float32 multiplications may use a bfloat16-based algorithm
|
| 1445 |
+
that is more complicated than simply truncating to some smaller number mantissa bits
|
| 1446 |
+
(e.g. 10 for TensorFloat32, 7 for bfloat16 explicitly stored). Refer to [Henry2019]_ for a complete
|
| 1447 |
+
description of this algorithm. To briefly explain here, the first step is to realize
|
| 1448 |
+
that we can perfectly encode a single float32 number as the sum of three bfloat16
|
| 1449 |
+
numbers (because float32 has 23 mantissa bits while bfloat16 has 7 explicitly stored, and both have the
|
| 1450 |
+
same number of exponent bits). This means that the product of two float32 numbers can
|
| 1451 |
+
be exactly given by the sum of nine products of bfloat16 numbers. We can then trade
|
| 1452 |
+
accuracy for speed by dropping some of these products. The "high" precision algorithm
|
| 1453 |
+
specifically keeps only the three most significant products, which conveniently excludes
|
| 1454 |
+
all of the products involving the last 8 mantissa bits of either input. This means that
|
| 1455 |
+
we can represent our inputs as the sum of two bfloat16 numbers rather than three.
|
| 1456 |
+
Because bfloat16 fused-multiply-add (FMA) instructions are typically >10x faster than
|
| 1457 |
+
float32 ones, it's faster to do three multiplications and 2 additions with bfloat16
|
| 1458 |
+
precision than it is to do a single multiplication with float32 precision.
|
| 1459 |
+
|
| 1460 |
+
.. [Henry2019] http://arxiv.org/abs/1904.06376
|
| 1461 |
+
|
| 1462 |
+
.. note::
|
| 1463 |
+
|
| 1464 |
+
This does not change the output dtype of float32 matrix multiplications,
|
| 1465 |
+
it controls how the internal computation of the matrix multiplication is performed.
|
| 1466 |
+
|
| 1467 |
+
.. note::
|
| 1468 |
+
|
| 1469 |
+
This does not change the precision of convolution operations. Other flags,
|
| 1470 |
+
like `torch.backends.cudnn.allow_tf32`, may control the precision of convolution
|
| 1471 |
+
operations.
|
| 1472 |
+
|
| 1473 |
+
.. note::
|
| 1474 |
+
|
| 1475 |
+
This flag currently only affects one native device type: CUDA.
|
| 1476 |
+
If "high" or "medium" are set then the TensorFloat32 datatype will be used
|
| 1477 |
+
when computing float32 matrix multiplications, equivalent to setting
|
| 1478 |
+
`torch.backends.cuda.matmul.allow_tf32 = True`. When "highest" (the default)
|
| 1479 |
+
is set then the float32 datatype is used for internal computations, equivalent
|
| 1480 |
+
to setting `torch.backends.cuda.matmul.allow_tf32 = False`.
|
| 1481 |
+
|
| 1482 |
+
Args:
|
| 1483 |
+
precision(str): can be set to "highest" (default), "high", or "medium" (see above).
|
| 1484 |
+
|
| 1485 |
+
"""
|
| 1486 |
+
_C._set_float32_matmul_precision(precision)
|
| 1487 |
+
|
| 1488 |
+
|
| 1489 |
+
def set_warn_always(b: builtins.bool, /) -> None:
|
| 1490 |
+
r"""When this flag is False (default) then some PyTorch warnings may only
|
| 1491 |
+
appear once per process. This helps avoid excessive warning information.
|
| 1492 |
+
Setting it to True causes these warnings to always appear, which may be
|
| 1493 |
+
helpful when debugging.
|
| 1494 |
+
|
| 1495 |
+
Args:
|
| 1496 |
+
b (:class:`bool`): If True, force warnings to always be emitted
|
| 1497 |
+
If False, set to the default behaviour
|
| 1498 |
+
"""
|
| 1499 |
+
_C._set_warnAlways(b)
|
| 1500 |
+
|
| 1501 |
+
|
| 1502 |
+
def is_warn_always_enabled() -> builtins.bool:
|
| 1503 |
+
r"""Returns True if the global warn_always flag is turned on. Refer to
|
| 1504 |
+
:func:`torch.set_warn_always` documentation for more details.
|
| 1505 |
+
"""
|
| 1506 |
+
return _C._get_warnAlways()
|
| 1507 |
+
|
| 1508 |
+
|
| 1509 |
+
################################################################################
|
| 1510 |
+
# Define error checking functions
|
| 1511 |
+
################################################################################
|
| 1512 |
+
|
| 1513 |
+
# These error checking functions must be kept consistent with their C++
|
| 1514 |
+
# equivalents. Their C++ equivalents are mentioned where applicable.
|
| 1515 |
+
|
| 1516 |
+
|
| 1517 |
+
def _check_with(
|
| 1518 |
+
error_type,
|
| 1519 |
+
cond: _Union[builtins.bool, SymBool],
|
| 1520 |
+
message: _Callable[[], str],
|
| 1521 |
+
): # noqa: F811
|
| 1522 |
+
if not isinstance(cond, (builtins.bool, SymBool)):
|
| 1523 |
+
raise TypeError(f"cond must be a bool, but got {type(cond)}")
|
| 1524 |
+
|
| 1525 |
+
from torch.fx.experimental.symbolic_shapes import expect_true
|
| 1526 |
+
|
| 1527 |
+
if expect_true(cond):
|
| 1528 |
+
return
|
| 1529 |
+
|
| 1530 |
+
# error_type must be a subclass of Exception and not subclass of Warning
|
| 1531 |
+
assert issubclass(error_type, Exception) and not issubclass(error_type, Warning)
|
| 1532 |
+
|
| 1533 |
+
if message is None:
|
| 1534 |
+
message_evaluated = (
|
| 1535 |
+
"Expected cond to be True, but got False. (Could this error "
|
| 1536 |
+
"message be improved? If so, please report an enhancement request "
|
| 1537 |
+
"to PyTorch.)"
|
| 1538 |
+
)
|
| 1539 |
+
|
| 1540 |
+
else:
|
| 1541 |
+
if not callable(message):
|
| 1542 |
+
raise TypeError("message must be a callable")
|
| 1543 |
+
|
| 1544 |
+
message_evaluated = str(message())
|
| 1545 |
+
|
| 1546 |
+
raise error_type(message_evaluated)
|
| 1547 |
+
|
| 1548 |
+
|
| 1549 |
+
def _check(cond, message=None): # noqa: F811
|
| 1550 |
+
r"""Throws error containing an optional message if the specified condition
|
| 1551 |
+
is False.
|
| 1552 |
+
|
| 1553 |
+
Error type: ``RuntimeError``
|
| 1554 |
+
|
| 1555 |
+
C++ equivalent: ``TORCH_CHECK``
|
| 1556 |
+
|
| 1557 |
+
Args:
|
| 1558 |
+
cond (:class:`bool`): If False, throw error
|
| 1559 |
+
|
| 1560 |
+
message (Callable, optional): Callable that returns either a string or
|
| 1561 |
+
an object that has a ``__str__()`` method to be used as the error
|
| 1562 |
+
message. Default: ``None``
|
| 1563 |
+
"""
|
| 1564 |
+
_check_with(RuntimeError, cond, message)
|
| 1565 |
+
|
| 1566 |
+
|
| 1567 |
+
def _check_is_size(i, message=None):
|
| 1568 |
+
"""Checks that a given integer is a valid size (i.e., is non-negative).
|
| 1569 |
+
You should use this over _check(i >= 0) because we can use the semantic
|
| 1570 |
+
information (that i is a size) to make some further inferences in case
|
| 1571 |
+
i is an unbacked SymInt.
|
| 1572 |
+
|
| 1573 |
+
NB: Do NOT use this in contexts where a -1 size would be valid (indicating
|
| 1574 |
+
to infer the size from context, or if you should wrap-around or truncate).
|
| 1575 |
+
Only use this if the only valid value is an honest to goodness size.
|
| 1576 |
+
"""
|
| 1577 |
+
# This is responsible for the expect_true
|
| 1578 |
+
_check(i >= 0, message)
|
| 1579 |
+
from torch.fx.experimental.symbolic_shapes import _advise_is_size
|
| 1580 |
+
|
| 1581 |
+
_advise_is_size(i)
|
| 1582 |
+
|
| 1583 |
+
|
| 1584 |
+
def _check_index(cond, message=None): # noqa: F811
|
| 1585 |
+
r"""Throws error containing an optional message if the specified condition
|
| 1586 |
+
is False.
|
| 1587 |
+
|
| 1588 |
+
Error type: ``IndexError``
|
| 1589 |
+
|
| 1590 |
+
C++ equivalent: ``TORCH_CHECK_INDEX``
|
| 1591 |
+
|
| 1592 |
+
Args:
|
| 1593 |
+
cond (:class:`bool`): If False, throw error
|
| 1594 |
+
|
| 1595 |
+
message (Callable, optional): Callable that returns either a string or
|
| 1596 |
+
an object that has a ``__str__()`` method to be used as the error
|
| 1597 |
+
message. Default: ``None``
|
| 1598 |
+
"""
|
| 1599 |
+
_check_with(IndexError, cond, message)
|
| 1600 |
+
|
| 1601 |
+
|
| 1602 |
+
def _check_value(cond, message=None): # noqa: F811
|
| 1603 |
+
r"""Throws error containing an optional message if the specified condition
|
| 1604 |
+
is False.
|
| 1605 |
+
|
| 1606 |
+
Error type: ``ValueError``
|
| 1607 |
+
|
| 1608 |
+
C++ equivalent: ``TORCH_CHECK_VALUE``
|
| 1609 |
+
|
| 1610 |
+
Args:
|
| 1611 |
+
cond (:class:`bool`): If False, throw error
|
| 1612 |
+
|
| 1613 |
+
message (Callable, optional): Callable that returns either a string or
|
| 1614 |
+
an object that has a ``__str__()`` method to be used as the error
|
| 1615 |
+
message. Default: ``None``
|
| 1616 |
+
"""
|
| 1617 |
+
_check_with(ValueError, cond, message)
|
| 1618 |
+
|
| 1619 |
+
|
| 1620 |
+
def _check_type(cond, message=None): # noqa: F811
|
| 1621 |
+
r"""Throws error containing an optional message if the specified condition
|
| 1622 |
+
is False.
|
| 1623 |
+
|
| 1624 |
+
Error type: ``TypeError``
|
| 1625 |
+
|
| 1626 |
+
C++ equivalent: ``TORCH_CHECK_TYPE``
|
| 1627 |
+
|
| 1628 |
+
Args:
|
| 1629 |
+
cond (:class:`bool`): If False, throw error
|
| 1630 |
+
|
| 1631 |
+
message (Callable, optional): Callable that returns either a string or
|
| 1632 |
+
an object that has a ``__str__()`` method to be used as the error
|
| 1633 |
+
message. Default: ``None``
|
| 1634 |
+
"""
|
| 1635 |
+
_check_with(TypeError, cond, message)
|
| 1636 |
+
|
| 1637 |
+
|
| 1638 |
+
def _check_not_implemented(cond, message=None): # noqa: F811
|
| 1639 |
+
r"""Throws error containing an optional message if the specified condition
|
| 1640 |
+
is False.
|
| 1641 |
+
|
| 1642 |
+
Error type: ``NotImplementedError``
|
| 1643 |
+
|
| 1644 |
+
C++ equivalent: ``TORCH_CHECK_NOT_IMPLEMENTED``
|
| 1645 |
+
|
| 1646 |
+
Args:
|
| 1647 |
+
cond (:class:`bool`): If False, throw error
|
| 1648 |
+
|
| 1649 |
+
message (Callable, optional): Callable that returns either a string or
|
| 1650 |
+
an object that has a ``__str__()`` method to be used as the error
|
| 1651 |
+
message. Default: ``None``
|
| 1652 |
+
"""
|
| 1653 |
+
_check_with(NotImplementedError, cond, message)
|
| 1654 |
+
|
| 1655 |
+
|
| 1656 |
+
def _check_tensor_all_with(error_type, cond, message=None): # noqa: F811
|
| 1657 |
+
if not is_tensor(cond):
|
| 1658 |
+
raise TypeError(f"cond must be a tensor, but got {type(cond)}")
|
| 1659 |
+
|
| 1660 |
+
if not cond.dtype == torch.bool:
|
| 1661 |
+
raise TypeError(f"cond tensor must have dtype torch.bool, but got {cond.dtype}")
|
| 1662 |
+
|
| 1663 |
+
_check_with(error_type, cond._is_all_true().item(), message) # type: ignore[arg-type]
|
| 1664 |
+
|
| 1665 |
+
|
| 1666 |
+
# C++ equivalent: `TORCH_CHECK_TENSOR_ALL`
|
| 1667 |
+
def _check_tensor_all(cond, message=None): # noqa: F811
|
| 1668 |
+
r"""Throws error containing an optional message if the specified condition
|
| 1669 |
+
is False.
|
| 1670 |
+
|
| 1671 |
+
Error type: ``RuntimeError``
|
| 1672 |
+
|
| 1673 |
+
C++ equivalent: ``TORCH_CHECK_TENSOR_ALL``
|
| 1674 |
+
|
| 1675 |
+
Args:
|
| 1676 |
+
cond (:class:`torch.Tensor`): Tensor of dtype ``torch.bool``. If any
|
| 1677 |
+
element is ``False``, throw error
|
| 1678 |
+
|
| 1679 |
+
message (Callable, optional): Callable that returns either a string or
|
| 1680 |
+
an object that has a ``__str__()`` method to be used as the error
|
| 1681 |
+
message. Default: ``None``
|
| 1682 |
+
"""
|
| 1683 |
+
_check_tensor_all_with(RuntimeError, cond, message)
|
| 1684 |
+
|
| 1685 |
+
|
| 1686 |
+
################################################################################
|
| 1687 |
+
# Define numeric constants
|
| 1688 |
+
################################################################################
|
| 1689 |
+
|
| 1690 |
+
# For Python Array API (https://data-apis.org/array-api/latest/API_specification/constants.html) and
|
| 1691 |
+
# NumPy consistency (https://numpy.org/devdocs/reference/constants.html)
|
| 1692 |
+
from math import e, inf, nan, pi
|
| 1693 |
+
|
| 1694 |
+
|
| 1695 |
+
newaxis: None = None
|
| 1696 |
+
|
| 1697 |
+
__all__.extend(["e", "pi", "nan", "inf", "newaxis"])
|
| 1698 |
+
|
| 1699 |
+
################################################################################
|
| 1700 |
+
# Define Storage and Tensor classes
|
| 1701 |
+
################################################################################
|
| 1702 |
+
|
| 1703 |
+
from torch._tensor import Tensor # usort: skip
|
| 1704 |
+
|
| 1705 |
+
# needs to be after torch.Tensor is defined to avoid circular dependencies
|
| 1706 |
+
from torch import storage as storage # usort: skip
|
| 1707 |
+
from torch.storage import (
|
| 1708 |
+
_LegacyStorage,
|
| 1709 |
+
_StorageBase,
|
| 1710 |
+
_warn_typed_storage_removal,
|
| 1711 |
+
TypedStorage,
|
| 1712 |
+
UntypedStorage,
|
| 1713 |
+
)
|
| 1714 |
+
|
| 1715 |
+
|
| 1716 |
+
# NOTE: New <type>Storage classes should never be added. When adding a new
|
| 1717 |
+
# dtype, use torch.storage.TypedStorage directly.
|
| 1718 |
+
class ByteStorage(_LegacyStorage):
|
| 1719 |
+
@classproperty
|
| 1720 |
+
def dtype(self):
|
| 1721 |
+
_warn_typed_storage_removal(stacklevel=3)
|
| 1722 |
+
return self._dtype
|
| 1723 |
+
|
| 1724 |
+
@classproperty
|
| 1725 |
+
def _dtype(self):
|
| 1726 |
+
return torch.uint8
|
| 1727 |
+
|
| 1728 |
+
|
| 1729 |
+
class DoubleStorage(_LegacyStorage):
|
| 1730 |
+
@classproperty
|
| 1731 |
+
def dtype(self):
|
| 1732 |
+
_warn_typed_storage_removal(stacklevel=3)
|
| 1733 |
+
return self._dtype
|
| 1734 |
+
|
| 1735 |
+
@classproperty
|
| 1736 |
+
def _dtype(self):
|
| 1737 |
+
return torch.double
|
| 1738 |
+
|
| 1739 |
+
|
| 1740 |
+
class FloatStorage(_LegacyStorage):
|
| 1741 |
+
@classproperty
|
| 1742 |
+
def dtype(self):
|
| 1743 |
+
_warn_typed_storage_removal(stacklevel=3)
|
| 1744 |
+
return self._dtype
|
| 1745 |
+
|
| 1746 |
+
@classproperty
|
| 1747 |
+
def _dtype(self):
|
| 1748 |
+
return torch.float
|
| 1749 |
+
|
| 1750 |
+
|
| 1751 |
+
class HalfStorage(_LegacyStorage):
|
| 1752 |
+
@classproperty
|
| 1753 |
+
def dtype(self):
|
| 1754 |
+
_warn_typed_storage_removal(stacklevel=3)
|
| 1755 |
+
return self._dtype
|
| 1756 |
+
|
| 1757 |
+
@classproperty
|
| 1758 |
+
def _dtype(self):
|
| 1759 |
+
return torch.half
|
| 1760 |
+
|
| 1761 |
+
|
| 1762 |
+
class LongStorage(_LegacyStorage):
|
| 1763 |
+
@classproperty
|
| 1764 |
+
def dtype(self):
|
| 1765 |
+
_warn_typed_storage_removal(stacklevel=3)
|
| 1766 |
+
return self._dtype
|
| 1767 |
+
|
| 1768 |
+
@classproperty
|
| 1769 |
+
def _dtype(self):
|
| 1770 |
+
return torch.long
|
| 1771 |
+
|
| 1772 |
+
|
| 1773 |
+
class IntStorage(_LegacyStorage):
|
| 1774 |
+
@classproperty
|
| 1775 |
+
def dtype(self):
|
| 1776 |
+
_warn_typed_storage_removal(stacklevel=3)
|
| 1777 |
+
return self._dtype
|
| 1778 |
+
|
| 1779 |
+
@classproperty
|
| 1780 |
+
def _dtype(self):
|
| 1781 |
+
return torch.int
|
| 1782 |
+
|
| 1783 |
+
|
| 1784 |
+
class ShortStorage(_LegacyStorage):
|
| 1785 |
+
@classproperty
|
| 1786 |
+
def dtype(self):
|
| 1787 |
+
_warn_typed_storage_removal(stacklevel=3)
|
| 1788 |
+
return self._dtype
|
| 1789 |
+
|
| 1790 |
+
@classproperty
|
| 1791 |
+
def _dtype(self):
|
| 1792 |
+
return torch.short
|
| 1793 |
+
|
| 1794 |
+
|
| 1795 |
+
class CharStorage(_LegacyStorage):
|
| 1796 |
+
@classproperty
|
| 1797 |
+
def dtype(self):
|
| 1798 |
+
_warn_typed_storage_removal(stacklevel=3)
|
| 1799 |
+
return self._dtype
|
| 1800 |
+
|
| 1801 |
+
@classproperty
|
| 1802 |
+
def _dtype(self):
|
| 1803 |
+
return torch.int8
|
| 1804 |
+
|
| 1805 |
+
|
| 1806 |
+
class BoolStorage(_LegacyStorage):
|
| 1807 |
+
@classproperty
|
| 1808 |
+
def dtype(self):
|
| 1809 |
+
_warn_typed_storage_removal(stacklevel=3)
|
| 1810 |
+
return self._dtype
|
| 1811 |
+
|
| 1812 |
+
@classproperty
|
| 1813 |
+
def _dtype(self):
|
| 1814 |
+
return torch.bool
|
| 1815 |
+
|
| 1816 |
+
|
| 1817 |
+
class BFloat16Storage(_LegacyStorage):
|
| 1818 |
+
@classproperty
|
| 1819 |
+
def dtype(self):
|
| 1820 |
+
_warn_typed_storage_removal(stacklevel=3)
|
| 1821 |
+
return self._dtype
|
| 1822 |
+
|
| 1823 |
+
@classproperty
|
| 1824 |
+
def _dtype(self):
|
| 1825 |
+
return torch.bfloat16
|
| 1826 |
+
|
| 1827 |
+
|
| 1828 |
+
class ComplexDoubleStorage(_LegacyStorage):
|
| 1829 |
+
@classproperty
|
| 1830 |
+
def dtype(self):
|
| 1831 |
+
_warn_typed_storage_removal(stacklevel=3)
|
| 1832 |
+
return self._dtype
|
| 1833 |
+
|
| 1834 |
+
@classproperty
|
| 1835 |
+
def _dtype(self):
|
| 1836 |
+
return torch.cdouble
|
| 1837 |
+
|
| 1838 |
+
|
| 1839 |
+
class ComplexFloatStorage(_LegacyStorage):
|
| 1840 |
+
@classproperty
|
| 1841 |
+
def dtype(self):
|
| 1842 |
+
_warn_typed_storage_removal(stacklevel=3)
|
| 1843 |
+
return self._dtype
|
| 1844 |
+
|
| 1845 |
+
@classproperty
|
| 1846 |
+
def _dtype(self):
|
| 1847 |
+
return torch.cfloat
|
| 1848 |
+
|
| 1849 |
+
|
| 1850 |
+
class QUInt8Storage(_LegacyStorage):
|
| 1851 |
+
@classproperty
|
| 1852 |
+
def dtype(self):
|
| 1853 |
+
_warn_typed_storage_removal(stacklevel=3)
|
| 1854 |
+
return self._dtype
|
| 1855 |
+
|
| 1856 |
+
@classproperty
|
| 1857 |
+
def _dtype(self):
|
| 1858 |
+
return torch.quint8
|
| 1859 |
+
|
| 1860 |
+
|
| 1861 |
+
class QInt8Storage(_LegacyStorage):
|
| 1862 |
+
@classproperty
|
| 1863 |
+
def dtype(self):
|
| 1864 |
+
_warn_typed_storage_removal(stacklevel=3)
|
| 1865 |
+
return self._dtype
|
| 1866 |
+
|
| 1867 |
+
@classproperty
|
| 1868 |
+
def _dtype(self):
|
| 1869 |
+
return torch.qint8
|
| 1870 |
+
|
| 1871 |
+
|
| 1872 |
+
class QInt32Storage(_LegacyStorage):
|
| 1873 |
+
@classproperty
|
| 1874 |
+
def dtype(self):
|
| 1875 |
+
_warn_typed_storage_removal(stacklevel=3)
|
| 1876 |
+
return self._dtype
|
| 1877 |
+
|
| 1878 |
+
@classproperty
|
| 1879 |
+
def _dtype(self):
|
| 1880 |
+
return torch.qint32
|
| 1881 |
+
|
| 1882 |
+
|
| 1883 |
+
class QUInt4x2Storage(_LegacyStorage):
|
| 1884 |
+
@classproperty
|
| 1885 |
+
def dtype(self):
|
| 1886 |
+
_warn_typed_storage_removal(stacklevel=3)
|
| 1887 |
+
return self._dtype
|
| 1888 |
+
|
| 1889 |
+
@classproperty
|
| 1890 |
+
def _dtype(self):
|
| 1891 |
+
return torch.quint4x2
|
| 1892 |
+
|
| 1893 |
+
|
| 1894 |
+
class QUInt2x4Storage(_LegacyStorage):
|
| 1895 |
+
@classproperty
|
| 1896 |
+
def dtype(self):
|
| 1897 |
+
_warn_typed_storage_removal(stacklevel=3)
|
| 1898 |
+
return self._dtype
|
| 1899 |
+
|
| 1900 |
+
@classproperty
|
| 1901 |
+
def _dtype(self):
|
| 1902 |
+
return torch.quint2x4
|
| 1903 |
+
|
| 1904 |
+
|
| 1905 |
+
_storage_classes: _Set[_Type[_Union[TypedStorage, UntypedStorage]]] = {
|
| 1906 |
+
UntypedStorage,
|
| 1907 |
+
DoubleStorage,
|
| 1908 |
+
FloatStorage,
|
| 1909 |
+
LongStorage,
|
| 1910 |
+
IntStorage,
|
| 1911 |
+
ShortStorage,
|
| 1912 |
+
CharStorage,
|
| 1913 |
+
ByteStorage,
|
| 1914 |
+
HalfStorage,
|
| 1915 |
+
BoolStorage,
|
| 1916 |
+
QUInt8Storage,
|
| 1917 |
+
QInt8Storage,
|
| 1918 |
+
QInt32Storage,
|
| 1919 |
+
BFloat16Storage,
|
| 1920 |
+
ComplexFloatStorage,
|
| 1921 |
+
ComplexDoubleStorage,
|
| 1922 |
+
QUInt4x2Storage,
|
| 1923 |
+
QUInt2x4Storage,
|
| 1924 |
+
TypedStorage,
|
| 1925 |
+
}
|
| 1926 |
+
|
| 1927 |
+
# The _tensor_classes set is initialized by the call to initialize_python_bindings.
|
| 1928 |
+
_tensor_classes: _Set[_Type["torch.Tensor"]] = set()
|
| 1929 |
+
|
| 1930 |
+
# If you edit these imports, please update torch/__init__.py.in as well
|
| 1931 |
+
from torch import amp as amp, random as random, serialization as serialization
|
| 1932 |
+
from torch._tensor_str import set_printoptions
|
| 1933 |
+
from torch.amp import autocast, GradScaler
|
| 1934 |
+
from torch.random import get_rng_state, initial_seed, manual_seed, seed, set_rng_state
|
| 1935 |
+
from torch.serialization import load, save
|
| 1936 |
+
|
| 1937 |
+
|
| 1938 |
+
################################################################################
|
| 1939 |
+
# Initialize extension
|
| 1940 |
+
################################################################################
|
| 1941 |
+
|
| 1942 |
+
|
| 1943 |
+
# Shared memory manager needs to know the exact location of manager executable
|
| 1944 |
+
def _manager_path():
|
| 1945 |
+
if _running_with_deploy() or platform.system() == "Windows":
|
| 1946 |
+
return b""
|
| 1947 |
+
path = get_file_path("torch", "bin", "torch_shm_manager")
|
| 1948 |
+
prepare_multiprocessing_environment(get_file_path("torch"))
|
| 1949 |
+
if not os.path.exists(path):
|
| 1950 |
+
raise RuntimeError("Unable to find torch_shm_manager at " + path)
|
| 1951 |
+
return path.encode("utf-8")
|
| 1952 |
+
|
| 1953 |
+
|
| 1954 |
+
_C._initExtension(_manager_path())
|
| 1955 |
+
|
| 1956 |
+
del _manager_path
|
| 1957 |
+
|
| 1958 |
+
# Appease the type checker: it can't deal with direct setting of globals().
|
| 1959 |
+
# Note that we will see "too many" functions when reexporting this way; there
|
| 1960 |
+
# is not a good way to fix this problem. Perhaps, try to redesign VariableFunctions
|
| 1961 |
+
# so that this import is good enough
|
| 1962 |
+
if TYPE_CHECKING:
|
| 1963 |
+
# Some type signatures pulled in from _VariableFunctions here clash with
|
| 1964 |
+
# signatures already imported. For now these clashes are ignored; see
|
| 1965 |
+
# PR #43339 for details.
|
| 1966 |
+
from torch._C._VariableFunctions import * # type: ignore[assignment, misc] # noqa: F403
|
| 1967 |
+
|
| 1968 |
+
# Fixup segment_reduce visibility
|
| 1969 |
+
_segment_reduce = segment_reduce
|
| 1970 |
+
del segment_reduce # noqa: F821
|
| 1971 |
+
|
| 1972 |
+
# Ops not to be exposed in `torch` namespace,
|
| 1973 |
+
# mostly helper ops.
|
| 1974 |
+
PRIVATE_OPS = ("unique_dim",)
|
| 1975 |
+
|
| 1976 |
+
__name, __obj = "", None
|
| 1977 |
+
for __name in dir(_C._VariableFunctions):
|
| 1978 |
+
if __name.startswith("__") or __name in PRIVATE_OPS:
|
| 1979 |
+
continue
|
| 1980 |
+
__obj = getattr(_C._VariableFunctions, __name)
|
| 1981 |
+
__obj.__module__ = __name__ # "torch"
|
| 1982 |
+
# Hide some APIs that should not be public
|
| 1983 |
+
if __name == "segment_reduce":
|
| 1984 |
+
# TODO: Once the undocumented FC window is passed, remove the line bellow
|
| 1985 |
+
globals()[__name] = __obj
|
| 1986 |
+
__name = "_" + __name
|
| 1987 |
+
globals()[__name] = __obj
|
| 1988 |
+
if not __name.startswith("_"):
|
| 1989 |
+
__all__.append(__name)
|
| 1990 |
+
|
| 1991 |
+
del __name, __obj
|
| 1992 |
+
|
| 1993 |
+
################################################################################
|
| 1994 |
+
# Add torch.dtype instances to the public API
|
| 1995 |
+
################################################################################
|
| 1996 |
+
|
| 1997 |
+
import torch
|
| 1998 |
+
|
| 1999 |
+
|
| 2000 |
+
__all__.extend(
|
| 2001 |
+
name for name in dir(torch) if isinstance(getattr(torch, name), torch.dtype)
|
| 2002 |
+
)
|
| 2003 |
+
|
| 2004 |
+
################################################################################
|
| 2005 |
+
# Import TorchDynamo's lazy APIs to avoid circular dependenices
|
| 2006 |
+
################################################################################
|
| 2007 |
+
|
| 2008 |
+
# needs to be before from torch.functional import * to avoid circular dependencies
|
| 2009 |
+
from torch._compile import _disable_dynamo # usort: skip
|
| 2010 |
+
|
| 2011 |
+
################################################################################
|
| 2012 |
+
# Import interface functions defined in Python
|
| 2013 |
+
################################################################################
|
| 2014 |
+
|
| 2015 |
+
# needs to be after the above ATen bindings so we can overwrite from Python side
|
| 2016 |
+
from torch import _VF as _VF, functional as functional # usort: skip
|
| 2017 |
+
from torch.functional import * # usort: skip # noqa: F403
|
| 2018 |
+
|
| 2019 |
+
################################################################################
|
| 2020 |
+
# Remove unnecessary members
|
| 2021 |
+
################################################################################
|
| 2022 |
+
|
| 2023 |
+
del _StorageBase
|
| 2024 |
+
del _LegacyStorage
|
| 2025 |
+
|
| 2026 |
+
################################################################################
|
| 2027 |
+
# Define _assert
|
| 2028 |
+
################################################################################
|
| 2029 |
+
|
| 2030 |
+
|
| 2031 |
+
# needs to be before the submodule imports to avoid circular dependencies
|
| 2032 |
+
def _assert(condition, message):
|
| 2033 |
+
r"""A wrapper around Python's assert which is symbolically traceable."""
|
| 2034 |
+
if type(condition) is not torch.Tensor and overrides.has_torch_function(
|
| 2035 |
+
(condition,)
|
| 2036 |
+
):
|
| 2037 |
+
return overrides.handle_torch_function(
|
| 2038 |
+
_assert, (condition,), condition, message
|
| 2039 |
+
)
|
| 2040 |
+
assert condition, message
|
| 2041 |
+
|
| 2042 |
+
|
| 2043 |
+
################################################################################
|
| 2044 |
+
# Import most common subpackages
|
| 2045 |
+
################################################################################
|
| 2046 |
+
|
| 2047 |
+
# Use the redundant form so that type checkers know that these are a part of
|
| 2048 |
+
# the public API. The "regular" import lines are there solely for the runtime
|
| 2049 |
+
# side effect of adding to the imported module's members for other users.
|
| 2050 |
+
|
| 2051 |
+
# needs to be before import torch.nn as nn to avoid circular dependencies
|
| 2052 |
+
from torch.autograd import ( # usort: skip
|
| 2053 |
+
enable_grad as enable_grad,
|
| 2054 |
+
inference_mode as inference_mode,
|
| 2055 |
+
no_grad as no_grad,
|
| 2056 |
+
set_grad_enabled as set_grad_enabled,
|
| 2057 |
+
)
|
| 2058 |
+
|
| 2059 |
+
from torch import (
|
| 2060 |
+
__config__ as __config__,
|
| 2061 |
+
__future__ as __future__,
|
| 2062 |
+
_awaits as _awaits,
|
| 2063 |
+
autograd as autograd,
|
| 2064 |
+
backends as backends,
|
| 2065 |
+
cpu as cpu,
|
| 2066 |
+
cuda as cuda,
|
| 2067 |
+
distributed as distributed,
|
| 2068 |
+
distributions as distributions,
|
| 2069 |
+
fft as fft,
|
| 2070 |
+
futures as futures,
|
| 2071 |
+
hub as hub,
|
| 2072 |
+
jit as jit,
|
| 2073 |
+
linalg as linalg,
|
| 2074 |
+
mps as mps,
|
| 2075 |
+
mtia as mtia,
|
| 2076 |
+
multiprocessing as multiprocessing,
|
| 2077 |
+
nested as nested,
|
| 2078 |
+
nn as nn,
|
| 2079 |
+
optim as optim,
|
| 2080 |
+
overrides as overrides,
|
| 2081 |
+
profiler as profiler,
|
| 2082 |
+
sparse as sparse,
|
| 2083 |
+
special as special,
|
| 2084 |
+
testing as testing,
|
| 2085 |
+
types as types,
|
| 2086 |
+
utils as utils,
|
| 2087 |
+
xpu as xpu,
|
| 2088 |
+
)
|
| 2089 |
+
from torch.signal import windows as windows
|
| 2090 |
+
|
| 2091 |
+
|
| 2092 |
+
# Quantized, sparse, AO, etc. should be last to get imported, as nothing
|
| 2093 |
+
# is expected to depend on them.
|
| 2094 |
+
from torch import ao as ao # usort: skip
|
| 2095 |
+
|
| 2096 |
+
# nn.quant* depends on ao -- so should be after those.
|
| 2097 |
+
import torch.nn.intrinsic
|
| 2098 |
+
import torch.nn.qat
|
| 2099 |
+
import torch.nn.quantizable
|
| 2100 |
+
import torch.nn.quantized
|
| 2101 |
+
|
| 2102 |
+
|
| 2103 |
+
_C._init_names(list(_storage_classes))
|
| 2104 |
+
|
| 2105 |
+
# attach docstrings to torch and tensor functions
|
| 2106 |
+
from torch import _size_docs, _storage_docs, _tensor_docs, _torch_docs
|
| 2107 |
+
|
| 2108 |
+
|
| 2109 |
+
del _torch_docs, _tensor_docs, _storage_docs, _size_docs
|
| 2110 |
+
|
| 2111 |
+
|
| 2112 |
+
def compiled_with_cxx11_abi() -> builtins.bool:
|
| 2113 |
+
r"""Returns whether PyTorch was built with _GLIBCXX_USE_CXX11_ABI=1"""
|
| 2114 |
+
return _C._GLIBCXX_USE_CXX11_ABI
|
| 2115 |
+
|
| 2116 |
+
|
| 2117 |
+
from torch import _library as _library, _ops as _ops
|
| 2118 |
+
|
| 2119 |
+
|
| 2120 |
+
# Import the ops and classes "namespace"
|
| 2121 |
+
from torch._ops import ops as ops # usort: skip
|
| 2122 |
+
from torch._classes import classes as classes # usort: skip
|
| 2123 |
+
|
| 2124 |
+
sys.modules.setdefault(f"{__name__}.ops", ops)
|
| 2125 |
+
sys.modules.setdefault(f"{__name__}.classes", classes)
|
| 2126 |
+
|
| 2127 |
+
# quantization depends on torch.fx and torch.ops
|
| 2128 |
+
# Import quantization
|
| 2129 |
+
from torch import quantization as quantization # usort: skip
|
| 2130 |
+
|
| 2131 |
+
# Import the quasi random sampler
|
| 2132 |
+
from torch import quasirandom as quasirandom # usort: skip
|
| 2133 |
+
|
| 2134 |
+
# If you are seeing this, it means that this call site was not checked if
|
| 2135 |
+
# the memory format could be preserved, and it was switched to old default
|
| 2136 |
+
# behaviour of contiguous
|
| 2137 |
+
legacy_contiguous_format = contiguous_format # defined by _C._initExtension()
|
| 2138 |
+
|
| 2139 |
+
# Register fork handler to initialize OpenMP in child processes (see gh-28389)
|
| 2140 |
+
from torch.multiprocessing._atfork import register_after_fork
|
| 2141 |
+
|
| 2142 |
+
|
| 2143 |
+
register_after_fork(torch.get_num_threads)
|
| 2144 |
+
del register_after_fork
|
| 2145 |
+
|
| 2146 |
+
# Import tools that require fully imported torch (for applying
|
| 2147 |
+
# torch.jit.script as a decorator, for instance):
|
| 2148 |
+
from torch._lobpcg import lobpcg as lobpcg
|
| 2149 |
+
|
| 2150 |
+
|
| 2151 |
+
# These were previously defined in native_functions.yaml and appeared on the
|
| 2152 |
+
# `torch` namespace, but we moved them to c10 dispatch to facilitate custom
|
| 2153 |
+
# class usage. We add these lines here to preserve backward compatibility.
|
| 2154 |
+
quantized_lstm = ops.aten.quantized_lstm
|
| 2155 |
+
quantized_gru = ops.aten.quantized_gru
|
| 2156 |
+
|
| 2157 |
+
# Import experimental masked operations support. See
|
| 2158 |
+
# [RFC-0016](https://github.com/pytorch/rfcs/pull/27) for more
|
| 2159 |
+
# information.
|
| 2160 |
+
from torch import masked as masked
|
| 2161 |
+
|
| 2162 |
+
# Import removed ops with error message about removal
|
| 2163 |
+
from torch._linalg_utils import ( # type: ignore[misc]
|
| 2164 |
+
_symeig as symeig,
|
| 2165 |
+
eig,
|
| 2166 |
+
lstsq,
|
| 2167 |
+
matrix_rank,
|
| 2168 |
+
solve,
|
| 2169 |
+
)
|
| 2170 |
+
from torch.utils.dlpack import from_dlpack, to_dlpack
|
| 2171 |
+
|
| 2172 |
+
|
| 2173 |
+
class _TorchCompileInductorWrapper:
|
| 2174 |
+
compiler_name = "inductor"
|
| 2175 |
+
|
| 2176 |
+
def __init__(self, mode, options, dynamic):
|
| 2177 |
+
self.config: _Dict[str, _Any] = {}
|
| 2178 |
+
self.dynamic = dynamic
|
| 2179 |
+
self.apply_mode(mode)
|
| 2180 |
+
self.apply_options(options)
|
| 2181 |
+
|
| 2182 |
+
if self.config.get("triton.cudagraphs", False):
|
| 2183 |
+
os.environ["DISABLE_CUPTI_LAZY_REINIT"] = "1"
|
| 2184 |
+
# FIXME: CUDA Graph does not work well with CUPTI teardown.
|
| 2185 |
+
# 1) crashes on 1st lazy CUPTI re-init after teardown (CUDA 11)
|
| 2186 |
+
# 2) crashes on 2nd non-lazy CUPTI re-init after teardown (CUDA 12)
|
| 2187 |
+
# Workaround: turn off CUPTI teardown when using CUDA Graphs.
|
| 2188 |
+
os.environ["TEARDOWN_CUPTI"] = "0"
|
| 2189 |
+
|
| 2190 |
+
def __eq__(self, other):
|
| 2191 |
+
return (
|
| 2192 |
+
isinstance(other, _TorchCompileInductorWrapper)
|
| 2193 |
+
and self.config == other.config
|
| 2194 |
+
and self.dynamic == other.dynamic
|
| 2195 |
+
)
|
| 2196 |
+
|
| 2197 |
+
def apply_mode(self, mode: _Optional[str]):
|
| 2198 |
+
if mode is None or mode == "default":
|
| 2199 |
+
pass
|
| 2200 |
+
elif mode in {"reduce-overhead", "max-autotune", "max-autotune-no-cudagraphs"}:
|
| 2201 |
+
from torch._inductor import list_mode_options
|
| 2202 |
+
|
| 2203 |
+
self.apply_options(list_mode_options(mode, self.dynamic))
|
| 2204 |
+
else:
|
| 2205 |
+
raise RuntimeError(
|
| 2206 |
+
f"Unrecognized mode={mode}, should be one of: default, reduce-overhead, max-autotune, max-autotune-no-cudagraphs"
|
| 2207 |
+
)
|
| 2208 |
+
|
| 2209 |
+
def apply_options(self, options: _Optional[_Dict[str, _Any]]):
|
| 2210 |
+
if not options:
|
| 2211 |
+
return
|
| 2212 |
+
|
| 2213 |
+
from torch._inductor import config
|
| 2214 |
+
|
| 2215 |
+
current_config: _Dict[str, _Any] = config.shallow_copy_dict()
|
| 2216 |
+
|
| 2217 |
+
for key, val in options.items():
|
| 2218 |
+
attr_name = key.replace("-", "_")
|
| 2219 |
+
if attr_name not in current_config:
|
| 2220 |
+
raise RuntimeError(
|
| 2221 |
+
f"Unexpected optimization option {key}, known options are {list(current_config.keys())}"
|
| 2222 |
+
)
|
| 2223 |
+
if type(val) is not type(current_config[attr_name]):
|
| 2224 |
+
val_type_str = type(val).__name__
|
| 2225 |
+
expected_type_str = type(current_config[attr_name]).__name__
|
| 2226 |
+
raise RuntimeError(
|
| 2227 |
+
f"Unexpected type of attr {key}, got {val_type_str} should be {expected_type_str}"
|
| 2228 |
+
)
|
| 2229 |
+
self.config[attr_name] = val
|
| 2230 |
+
|
| 2231 |
+
def __call__(self, model_, inputs_):
|
| 2232 |
+
from torch._inductor.compile_fx import compile_fx
|
| 2233 |
+
|
| 2234 |
+
return compile_fx(model_, inputs_, config_patches=self.config)
|
| 2235 |
+
|
| 2236 |
+
def get_compiler_config(self):
|
| 2237 |
+
from torch._inductor.compile_fx import get_patched_config_dict
|
| 2238 |
+
|
| 2239 |
+
return get_patched_config_dict(config_patches=self.config)
|
| 2240 |
+
|
| 2241 |
+
def reset(self):
|
| 2242 |
+
from torch._inductor import config
|
| 2243 |
+
|
| 2244 |
+
if "triton.cudagraphs" in self.config or config.triton.cudagraphs:
|
| 2245 |
+
if self.config.get("triton.cudagraphs", True):
|
| 2246 |
+
from torch._inductor.cudagraph_trees import reset_cudagraph_trees
|
| 2247 |
+
|
| 2248 |
+
reset_cudagraph_trees()
|
| 2249 |
+
|
| 2250 |
+
|
| 2251 |
+
class _TorchCompileWrapper:
|
| 2252 |
+
def __init__(self, backend, mode, options, dynamic):
|
| 2253 |
+
from torch._dynamo.backends.registry import lookup_backend
|
| 2254 |
+
|
| 2255 |
+
if isinstance(backend, str):
|
| 2256 |
+
self.compiler_name = backend
|
| 2257 |
+
elif hasattr(backend, "__name__"):
|
| 2258 |
+
self.compiler_name = backend.__name__
|
| 2259 |
+
else:
|
| 2260 |
+
self.compiler_name = str(backend)
|
| 2261 |
+
self.dynamic = dynamic
|
| 2262 |
+
self.compiler_fn = lookup_backend(backend)
|
| 2263 |
+
self.kwargs = {}
|
| 2264 |
+
# only pass the args if they non-empty
|
| 2265 |
+
if mode and mode != "default":
|
| 2266 |
+
self.kwargs["mode"] = mode
|
| 2267 |
+
if options:
|
| 2268 |
+
self.kwargs["options"] = options
|
| 2269 |
+
|
| 2270 |
+
def __eq__(self, other):
|
| 2271 |
+
return (
|
| 2272 |
+
isinstance(other, _TorchCompileWrapper)
|
| 2273 |
+
and self.compiler_fn == other.compiler_fn
|
| 2274 |
+
and self.kwargs == other.kwargs
|
| 2275 |
+
and self.dynamic == other.dynamic
|
| 2276 |
+
)
|
| 2277 |
+
|
| 2278 |
+
def __call__(self, model_, inputs_):
|
| 2279 |
+
return self.compiler_fn(model_, inputs_, **self.kwargs)
|
| 2280 |
+
|
| 2281 |
+
def reset(self):
|
| 2282 |
+
if hasattr(self.compiler_fn, "reset"):
|
| 2283 |
+
self.compiler_fn.reset()
|
| 2284 |
+
|
| 2285 |
+
|
| 2286 |
+
_InputT = _ParamSpec("_InputT")
|
| 2287 |
+
_RetT = _TypeVar("_RetT")
|
| 2288 |
+
|
| 2289 |
+
|
| 2290 |
+
@_overload
|
| 2291 |
+
def compile(
|
| 2292 |
+
model: _Callable[_InputT, _RetT],
|
| 2293 |
+
*,
|
| 2294 |
+
fullgraph: builtins.bool = False,
|
| 2295 |
+
dynamic: _Optional[builtins.bool] = None,
|
| 2296 |
+
backend: _Union[str, _Callable] = "inductor",
|
| 2297 |
+
mode: _Union[str, None] = None,
|
| 2298 |
+
options: _Optional[_Dict[str, _Union[str, builtins.int, builtins.bool]]] = None,
|
| 2299 |
+
disable: builtins.bool = False,
|
| 2300 |
+
) -> _Callable[_InputT, _RetT]: ...
|
| 2301 |
+
|
| 2302 |
+
|
| 2303 |
+
@_overload
|
| 2304 |
+
def compile(
|
| 2305 |
+
model: None = None,
|
| 2306 |
+
*,
|
| 2307 |
+
fullgraph: builtins.bool = False,
|
| 2308 |
+
dynamic: _Optional[builtins.bool] = None,
|
| 2309 |
+
backend: _Union[str, _Callable] = "inductor",
|
| 2310 |
+
mode: _Union[str, None] = None,
|
| 2311 |
+
options: _Optional[_Dict[str, _Union[str, builtins.int, builtins.bool]]] = None,
|
| 2312 |
+
disable: builtins.bool = False,
|
| 2313 |
+
) -> _Callable[[_Callable[_InputT, _RetT]], _Callable[_InputT, _RetT]]: ...
|
| 2314 |
+
|
| 2315 |
+
|
| 2316 |
+
def compile(
|
| 2317 |
+
model: _Optional[_Callable] = None,
|
| 2318 |
+
*,
|
| 2319 |
+
fullgraph: builtins.bool = False,
|
| 2320 |
+
dynamic: _Optional[builtins.bool] = None,
|
| 2321 |
+
backend: _Union[str, _Callable] = "inductor",
|
| 2322 |
+
mode: _Union[str, None] = None,
|
| 2323 |
+
options: _Optional[_Dict[str, _Union[str, builtins.int, builtins.bool]]] = None,
|
| 2324 |
+
disable: builtins.bool = False,
|
| 2325 |
+
) -> _Union[
|
| 2326 |
+
_Callable[[_Callable[_InputT, _RetT]], _Callable[_InputT, _RetT]],
|
| 2327 |
+
_Callable[_InputT, _RetT],
|
| 2328 |
+
]:
|
| 2329 |
+
"""
|
| 2330 |
+
Optimizes given model/function using TorchDynamo and specified backend.
|
| 2331 |
+
If you are compiling an :class:`torch.nn.Module`, you can also use :meth:`torch.nn.Module.compile`
|
| 2332 |
+
to compile the module inplace without changing its structure.
|
| 2333 |
+
|
| 2334 |
+
Concretely, for every frame executed within the compiled region, we will attempt
|
| 2335 |
+
to compile it and cache the compiled result on the code object for future
|
| 2336 |
+
use. A single frame may be compiled multiple times if previous compiled
|
| 2337 |
+
results are not applicable for subsequent calls (this is called a "guard
|
| 2338 |
+
failure), you can use TORCH_LOGS=guards to debug these situations.
|
| 2339 |
+
Multiple compiled results can be associated with a frame up to
|
| 2340 |
+
``torch._dynamo.config.cache_size_limit``, which defaults to 8; at which
|
| 2341 |
+
point we will fall back to eager. Note that compile caches are per
|
| 2342 |
+
*code object*, not frame; if you dynamically create multiple copies of a
|
| 2343 |
+
function, they will all share the same code cache.
|
| 2344 |
+
|
| 2345 |
+
Args:
|
| 2346 |
+
model (Callable): Module/function to optimize
|
| 2347 |
+
fullgraph (bool): If False (default), torch.compile attempts to discover compileable regions
|
| 2348 |
+
in the function that it will optimize. If True, then we require that the entire function be
|
| 2349 |
+
capturable into a single graph. If this is not possible (that is, if there are graph breaks),
|
| 2350 |
+
then this will raise an error.
|
| 2351 |
+
dynamic (bool or None): Use dynamic shape tracing. When this is True, we will up-front attempt
|
| 2352 |
+
to generate a kernel that is as dynamic as possible to avoid recompilations when
|
| 2353 |
+
sizes change. This may not always work as some operations/optimizations will
|
| 2354 |
+
force specialization; use TORCH_LOGS=dynamic to debug overspecialization.
|
| 2355 |
+
When this is False, we will NEVER generate dynamic kernels, we will always specialize.
|
| 2356 |
+
By default (None), we automatically detect if dynamism has occurred and compile a more
|
| 2357 |
+
dynamic kernel upon recompile.
|
| 2358 |
+
backend (str or Callable): backend to be used
|
| 2359 |
+
|
| 2360 |
+
- "inductor" is the default backend, which is a good balance between performance and overhead
|
| 2361 |
+
|
| 2362 |
+
- Non experimental in-tree backends can be seen with `torch._dynamo.list_backends()`
|
| 2363 |
+
|
| 2364 |
+
- Experimental or debug in-tree backends can be seen with `torch._dynamo.list_backends(None)`
|
| 2365 |
+
|
| 2366 |
+
- To register an out-of-tree custom backend:
|
| 2367 |
+
https://pytorch.org/docs/main/torch.compiler_custom_backends.html#registering-custom-backends
|
| 2368 |
+
mode (str): Can be either "default", "reduce-overhead", "max-autotune" or "max-autotune-no-cudagraphs"
|
| 2369 |
+
|
| 2370 |
+
- "default" is the default mode, which is a good balance between performance and overhead
|
| 2371 |
+
|
| 2372 |
+
- "reduce-overhead" is a mode that reduces the overhead of python with CUDA graphs,
|
| 2373 |
+
useful for small batches. Reduction of overhead can come at the cost of more memory
|
| 2374 |
+
usage, as we will cache the workspace memory required for the invocation so that we
|
| 2375 |
+
do not have to reallocate it on subsequent runs. Reduction of overhead is not guaranteed
|
| 2376 |
+
to work; today, we only reduce overhead for CUDA only graphs which do not mutate inputs.
|
| 2377 |
+
There are other circumstances where CUDA graphs are not applicable; use TORCH_LOG=perf_hints
|
| 2378 |
+
to debug.
|
| 2379 |
+
|
| 2380 |
+
- "max-autotune" is a mode that leverages Triton or template based matrix multiplications
|
| 2381 |
+
on supported devices and Triton based convolutions on GPU.
|
| 2382 |
+
It enables CUDA graphs by default on GPU.
|
| 2383 |
+
|
| 2384 |
+
- "max-autotune-no-cudagraphs" is a mode similar to "max-autotune" but without CUDA graphs
|
| 2385 |
+
|
| 2386 |
+
- To see the exact configs that each mode sets you can call `torch._inductor.list_mode_options()`
|
| 2387 |
+
|
| 2388 |
+
options (dict): A dictionary of options to pass to the backend. Some notable ones to try out are
|
| 2389 |
+
|
| 2390 |
+
- `epilogue_fusion` which fuses pointwise ops into templates. Requires `max_autotune` to also be set
|
| 2391 |
+
|
| 2392 |
+
- `max_autotune` which will profile to pick the best matmul configuration
|
| 2393 |
+
|
| 2394 |
+
- `fallback_random` which is useful when debugging accuracy issues
|
| 2395 |
+
|
| 2396 |
+
- `shape_padding` which pads matrix shapes to better align loads on GPUs especially for tensor cores
|
| 2397 |
+
|
| 2398 |
+
- `triton.cudagraphs` which will reduce the overhead of python with CUDA graphs
|
| 2399 |
+
|
| 2400 |
+
- `trace.enabled` which is the most useful debugging flag to turn on
|
| 2401 |
+
|
| 2402 |
+
- `trace.graph_diagram` which will show you a picture of your graph after fusion
|
| 2403 |
+
|
| 2404 |
+
- For inductor you can see the full list of configs that it supports by calling `torch._inductor.list_options()`
|
| 2405 |
+
disable (bool): Turn torch.compile() into a no-op for testing
|
| 2406 |
+
|
| 2407 |
+
Example::
|
| 2408 |
+
|
| 2409 |
+
@torch.compile(options={"triton.cudagraphs": True}, fullgraph=True)
|
| 2410 |
+
def foo(x):
|
| 2411 |
+
return torch.sin(x) + torch.cos(x)
|
| 2412 |
+
|
| 2413 |
+
"""
|
| 2414 |
+
_C._log_api_usage_once("torch.compile")
|
| 2415 |
+
if sys.version_info >= (3, 13):
|
| 2416 |
+
raise RuntimeError("Dynamo is not supported on Python 3.13+")
|
| 2417 |
+
|
| 2418 |
+
# Decorator mode
|
| 2419 |
+
if model is None:
|
| 2420 |
+
|
| 2421 |
+
def fn(model: _Callable[_InputT, _RetT]) -> _Callable[_InputT, _RetT]:
|
| 2422 |
+
if model is None:
|
| 2423 |
+
raise RuntimeError("Model can't be None")
|
| 2424 |
+
return compile(
|
| 2425 |
+
model,
|
| 2426 |
+
fullgraph=fullgraph,
|
| 2427 |
+
dynamic=dynamic,
|
| 2428 |
+
backend=backend,
|
| 2429 |
+
mode=mode,
|
| 2430 |
+
options=options,
|
| 2431 |
+
disable=disable,
|
| 2432 |
+
)
|
| 2433 |
+
|
| 2434 |
+
return fn
|
| 2435 |
+
|
| 2436 |
+
if mode is not None and options is not None:
|
| 2437 |
+
raise RuntimeError(
|
| 2438 |
+
"Either mode or options can be specified, but both can't be specified at the same time."
|
| 2439 |
+
)
|
| 2440 |
+
if mode is None and options is None:
|
| 2441 |
+
mode = "default"
|
| 2442 |
+
if backend == "inductor":
|
| 2443 |
+
backend = _TorchCompileInductorWrapper(mode, options, dynamic)
|
| 2444 |
+
else:
|
| 2445 |
+
backend = _TorchCompileWrapper(backend, mode, options, dynamic)
|
| 2446 |
+
|
| 2447 |
+
return torch._dynamo.optimize(
|
| 2448 |
+
backend=backend,
|
| 2449 |
+
nopython=fullgraph,
|
| 2450 |
+
dynamic=dynamic,
|
| 2451 |
+
disable=disable,
|
| 2452 |
+
)(model) # type: ignore[return-value]
|
| 2453 |
+
|
| 2454 |
+
|
| 2455 |
+
def _register_device_module(device_type, module):
|
| 2456 |
+
r"""Register an external runtime module of the specific :attr:`device_type`
|
| 2457 |
+
supported by torch.
|
| 2458 |
+
|
| 2459 |
+
After the :attr:`module` is registered correctly, the user can refer
|
| 2460 |
+
the external runtime module as part of torch with attribute torch.xxx.
|
| 2461 |
+
"""
|
| 2462 |
+
# Make sure the device_type represent a supported device type for torch.
|
| 2463 |
+
device_type = torch.device(device_type).type
|
| 2464 |
+
m = sys.modules[__name__]
|
| 2465 |
+
if hasattr(m, device_type):
|
| 2466 |
+
raise RuntimeError(
|
| 2467 |
+
f"The runtime module of '{device_type}' has already "
|
| 2468 |
+
f"been registered with '{getattr(m, device_type)}'"
|
| 2469 |
+
)
|
| 2470 |
+
setattr(m, device_type, module)
|
| 2471 |
+
torch_module_name = ".".join([__name__, device_type])
|
| 2472 |
+
sys.modules[torch_module_name] = module
|
| 2473 |
+
|
| 2474 |
+
|
| 2475 |
+
from torch import (
|
| 2476 |
+
export as export,
|
| 2477 |
+
func as func,
|
| 2478 |
+
library as library,
|
| 2479 |
+
return_types as return_types,
|
| 2480 |
+
)
|
| 2481 |
+
from torch._higher_order_ops import cond as cond, while_loop as while_loop
|
| 2482 |
+
from torch.func import vmap as vmap
|
| 2483 |
+
|
| 2484 |
+
|
| 2485 |
+
if not TYPE_CHECKING:
|
| 2486 |
+
from torch import _meta_registrations
|
| 2487 |
+
|
| 2488 |
+
# Enable CUDA Sanitizer
|
| 2489 |
+
if "TORCH_CUDA_SANITIZER" in os.environ:
|
| 2490 |
+
import torch.cuda._sanitizer as csan
|
| 2491 |
+
|
| 2492 |
+
csan.enable_cuda_sanitizer()
|
| 2493 |
+
|
| 2494 |
+
# Populate magic methods on SymInt and SymFloat
|
| 2495 |
+
import torch.fx.experimental.sym_node
|
| 2496 |
+
|
| 2497 |
+
|
| 2498 |
+
# Register MPS specific decomps
|
| 2499 |
+
torch.backends.mps._init()
|
| 2500 |
+
|
| 2501 |
+
if not _running_with_deploy():
|
| 2502 |
+
from torch import compiler as compiler
|
| 2503 |
+
|
| 2504 |
+
class _TritonLibrary:
|
| 2505 |
+
lib = torch.library.Library("triton", "DEF")
|
| 2506 |
+
ops_table: _Dict[_Tuple[str, str], _Callable] = {}
|
| 2507 |
+
|
| 2508 |
+
@classmethod
|
| 2509 |
+
def registerOp(cls, op_key, full_schema, op_impl, dispatch_key):
|
| 2510 |
+
if (op_key, dispatch_key) not in cls.ops_table:
|
| 2511 |
+
cls.lib.define(full_schema)
|
| 2512 |
+
cls.lib.impl("triton::" + op_key, op_impl, dispatch_key)
|
| 2513 |
+
cls.ops_table[(op_key, dispatch_key)] = op_impl
|
| 2514 |
+
|
| 2515 |
+
return cls.ops_table[(op_key, dispatch_key)]
|
| 2516 |
+
|
| 2517 |
+
|
| 2518 |
+
# Deprecated attributes
|
| 2519 |
+
_deprecated_attrs = {
|
| 2520 |
+
"has_mps": torch.backends.mps.is_built,
|
| 2521 |
+
"has_cuda": torch.backends.cuda.is_built,
|
| 2522 |
+
"has_cudnn": torch.backends.cudnn.is_available,
|
| 2523 |
+
"has_mkldnn": torch.backends.mkldnn.is_available,
|
| 2524 |
+
}
|
| 2525 |
+
|
| 2526 |
+
if TYPE_CHECKING:
|
| 2527 |
+
# Import the following modules during type checking to enable code intelligence features,
|
| 2528 |
+
# such as auto-completion in tools like pylance, even when these modules are not explicitly
|
| 2529 |
+
# imported in user code.
|
| 2530 |
+
from torch import (
|
| 2531 |
+
_dynamo as _dynamo,
|
| 2532 |
+
_inductor as _inductor,
|
| 2533 |
+
_subclasses as _subclasses,
|
| 2534 |
+
onnx as onnx,
|
| 2535 |
+
)
|
| 2536 |
+
|
| 2537 |
+
else:
|
| 2538 |
+
_lazy_modules = {
|
| 2539 |
+
"_dynamo",
|
| 2540 |
+
"_inductor",
|
| 2541 |
+
"_export",
|
| 2542 |
+
# ONNX must be imported after _dynamo, _ops, _subclasses, fx, func and jit
|
| 2543 |
+
"onnx",
|
| 2544 |
+
}
|
| 2545 |
+
|
| 2546 |
+
def __getattr__(name):
|
| 2547 |
+
# Deprecated attrs
|
| 2548 |
+
replacement = _deprecated_attrs.get(name)
|
| 2549 |
+
if replacement is not None:
|
| 2550 |
+
import warnings
|
| 2551 |
+
|
| 2552 |
+
warnings.warn(
|
| 2553 |
+
f"'{name}' is deprecated, please use '{replacement.__module__}.{replacement.__name__}()'",
|
| 2554 |
+
stacklevel=2,
|
| 2555 |
+
)
|
| 2556 |
+
return replacement()
|
| 2557 |
+
|
| 2558 |
+
# Lazy modules
|
| 2559 |
+
if name in _lazy_modules:
|
| 2560 |
+
return importlib.import_module(f".{name}", __name__)
|
| 2561 |
+
|
| 2562 |
+
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
|
| 2563 |
+
|
| 2564 |
+
|
| 2565 |
+
def get_device_module(device: _Optional[_Union[torch.device, str]] = None):
|
| 2566 |
+
"""
|
| 2567 |
+
Returns the module associated with a given device(e.g., torch.device('cuda'), "mtia:0", "xpu", ...).
|
| 2568 |
+
If no device is given, return the module for the current accelerator or CPU if none is present.
|
| 2569 |
+
"""
|
| 2570 |
+
if isinstance(device, torch.device):
|
| 2571 |
+
device_module_name = device.type
|
| 2572 |
+
elif isinstance(device, str):
|
| 2573 |
+
device_module_name = torch.device(device).type
|
| 2574 |
+
elif device is None:
|
| 2575 |
+
# Using default accelerator type. If no accelerator is available, it automatically returns CPU device.
|
| 2576 |
+
device_module_name = torch._C._get_accelerator().type
|
| 2577 |
+
else:
|
| 2578 |
+
raise RuntimeError(
|
| 2579 |
+
f"Invalid value of device '{device}', expect torch.device, str, or None"
|
| 2580 |
+
)
|
| 2581 |
+
device_module = getattr(torch, device_module_name, None)
|
| 2582 |
+
if device_module is None:
|
| 2583 |
+
raise RuntimeError(
|
| 2584 |
+
f"Device '{device_module_name}' does not have a corresponding module registered as 'torch.{device_module_name}'."
|
| 2585 |
+
)
|
| 2586 |
+
return device_module
|
| 2587 |
+
|
| 2588 |
+
|
| 2589 |
+
def _constrain_as_size(
|
| 2590 |
+
symbol,
|
| 2591 |
+
min: _Optional[builtins.int] = None,
|
| 2592 |
+
max: _Optional[builtins.int] = None,
|
| 2593 |
+
):
|
| 2594 |
+
"""
|
| 2595 |
+
This indicates that a given int is size-like, and can be used in any context where a size is expected.
|
| 2596 |
+
You will typically use this when reading out integers from Tensors, e.g., max.item() or lengths.tolist()
|
| 2597 |
+
which then need to be used as tensor constructors. Providing these assertions to PyTorch can help resolve
|
| 2598 |
+
GuardOnDataDependentSymNode errors upon export, since we cannot guard on unbacked SymInts.
|
| 2599 |
+
|
| 2600 |
+
This function has unusual semantics in some circumstances in framework
|
| 2601 |
+
code, we will treat this int as >= 2 (when we do a size-oblivious guard).
|
| 2602 |
+
This makes it easier to use the unbacked int in size contexts,
|
| 2603 |
+
as we will often attempt to guard on a size being zero/one
|
| 2604 |
+
(e.g., when computing the contiguity of a tensor, or testing if
|
| 2605 |
+
broadcasting can occur), which will not work on unbacked SymInts.
|
| 2606 |
+
However, if we conservatively assume that the size is not zero/one, we will
|
| 2607 |
+
end up with a graph that will still work even if the size is zero/one.
|
| 2608 |
+
|
| 2609 |
+
For more details, see https://docs.google.com/document/d/1HSuTTVvYH1pTew89Rtpeu84Ht3nQEFTYhAX3Ypa_xJs/edit
|
| 2610 |
+
```
|
| 2611 |
+
"""
|
| 2612 |
+
torch.sym_constrain_range_for_size(symbol, min=min, max=max)
|
| 2613 |
+
|
| 2614 |
+
|
| 2615 |
+
from torch import _logging
|
| 2616 |
+
|
| 2617 |
+
|
| 2618 |
+
_logging._init_logs()
|
| 2619 |
+
|
| 2620 |
+
|
| 2621 |
+
def _import_device_backends():
|
| 2622 |
+
"""
|
| 2623 |
+
Leverage the Python plugin mechanism to load out-of-the-tree device extensions.
|
| 2624 |
+
See this RFC: https://github.com/pytorch/pytorch/issues/122468
|
| 2625 |
+
"""
|
| 2626 |
+
from importlib.metadata import entry_points
|
| 2627 |
+
|
| 2628 |
+
group_name = "torch.backends"
|
| 2629 |
+
if sys.version_info < (3, 10):
|
| 2630 |
+
backend_extensions = entry_points().get(group_name, ())
|
| 2631 |
+
else:
|
| 2632 |
+
backend_extensions = entry_points(group=group_name)
|
| 2633 |
+
|
| 2634 |
+
for backend_extension in backend_extensions:
|
| 2635 |
+
try:
|
| 2636 |
+
# Load the extension
|
| 2637 |
+
entrypoint = backend_extension.load()
|
| 2638 |
+
# Call the entrypoint
|
| 2639 |
+
entrypoint()
|
| 2640 |
+
except Exception as err:
|
| 2641 |
+
raise RuntimeError(
|
| 2642 |
+
f"Failed to load the backend extension: {backend_extension.name}. "
|
| 2643 |
+
f"You can disable extension auto-loading with TORCH_DEVICE_BACKEND_AUTOLOAD=0."
|
| 2644 |
+
) from err
|
| 2645 |
+
|
| 2646 |
+
|
| 2647 |
+
def _is_device_backend_autoload_enabled() -> builtins.bool:
|
| 2648 |
+
"""
|
| 2649 |
+
Whether autoloading out-of-the-tree device extensions is enabled.
|
| 2650 |
+
The switch depends on the value of the environment variable
|
| 2651 |
+
`TORCH_DEVICE_BACKEND_AUTOLOAD`.
|
| 2652 |
+
|
| 2653 |
+
Returns:
|
| 2654 |
+
bool: Whether to enable autoloading the extensions. Enabled by default.
|
| 2655 |
+
|
| 2656 |
+
Examples:
|
| 2657 |
+
>>> torch._is_device_backend_autoload_enabled()
|
| 2658 |
+
True
|
| 2659 |
+
"""
|
| 2660 |
+
# enabled by default
|
| 2661 |
+
return os.getenv("TORCH_DEVICE_BACKEND_AUTOLOAD", "1") == "1"
|
| 2662 |
+
|
| 2663 |
+
|
| 2664 |
+
if _is_device_backend_autoload_enabled():
|
| 2665 |
+
_import_device_backends()
|
pllava/lib/python3.10/site-packages/torch/_appdirs.py
ADDED
|
@@ -0,0 +1,667 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
# -*- coding: utf-8 -*-
|
| 3 |
+
# Copyright (c) 2005-2010 ActiveState Software Inc.
|
| 4 |
+
# Copyright (c) 2013 Eddy Petrișor
|
| 5 |
+
|
| 6 |
+
# flake8: noqa
|
| 7 |
+
|
| 8 |
+
"""
|
| 9 |
+
This file is directly from
|
| 10 |
+
https://github.com/ActiveState/appdirs/blob/3fe6a83776843a46f20c2e5587afcffe05e03b39/appdirs.py
|
| 11 |
+
|
| 12 |
+
The license of https://github.com/ActiveState/appdirs copied below:
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
# This is the MIT license
|
| 16 |
+
|
| 17 |
+
Copyright (c) 2010 ActiveState Software Inc.
|
| 18 |
+
|
| 19 |
+
Permission is hereby granted, free of charge, to any person obtaining a
|
| 20 |
+
copy of this software and associated documentation files (the
|
| 21 |
+
"Software"), to deal in the Software without restriction, including
|
| 22 |
+
without limitation the rights to use, copy, modify, merge, publish,
|
| 23 |
+
distribute, sublicense, and/or sell copies of the Software, and to
|
| 24 |
+
permit persons to whom the Software is furnished to do so, subject to
|
| 25 |
+
the following conditions:
|
| 26 |
+
|
| 27 |
+
The above copyright notice and this permission notice shall be included
|
| 28 |
+
in all copies or substantial portions of the Software.
|
| 29 |
+
|
| 30 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
| 31 |
+
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
| 32 |
+
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
| 33 |
+
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
| 34 |
+
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
| 35 |
+
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
| 36 |
+
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
"""Utilities for determining application-specific dirs.
|
| 40 |
+
|
| 41 |
+
See <https://github.com/ActiveState/appdirs> for details and usage.
|
| 42 |
+
"""
|
| 43 |
+
# Dev Notes:
|
| 44 |
+
# - MSDN on where to store app data files:
|
| 45 |
+
# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
|
| 46 |
+
# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
|
| 47 |
+
# - XDG spec for Un*x: https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
|
| 48 |
+
|
| 49 |
+
__version__ = "1.4.4"
|
| 50 |
+
__version_info__ = tuple(int(segment) for segment in __version__.split("."))
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
import os
|
| 54 |
+
import sys
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
unicode = str
|
| 58 |
+
|
| 59 |
+
if sys.platform.startswith("java"):
|
| 60 |
+
import platform
|
| 61 |
+
|
| 62 |
+
os_name = platform.java_ver()[3][0]
|
| 63 |
+
if os_name.startswith("Windows"): # "Windows XP", "Windows 7", etc.
|
| 64 |
+
system = "win32"
|
| 65 |
+
elif os_name.startswith("Mac"): # "Mac OS X", etc.
|
| 66 |
+
system = "darwin"
|
| 67 |
+
else: # "Linux", "SunOS", "FreeBSD", etc.
|
| 68 |
+
# Setting this to "linux2" is not ideal, but only Windows or Mac
|
| 69 |
+
# are actually checked for and the rest of the module expects
|
| 70 |
+
# *sys.platform* style strings.
|
| 71 |
+
system = "linux2"
|
| 72 |
+
else:
|
| 73 |
+
system = sys.platform
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
|
| 77 |
+
r"""Return full path to the user-specific data dir for this application.
|
| 78 |
+
|
| 79 |
+
"appname" is the name of application.
|
| 80 |
+
If None, just the system directory is returned.
|
| 81 |
+
"appauthor" (only used on Windows) is the name of the
|
| 82 |
+
appauthor or distributing body for this application. Typically
|
| 83 |
+
it is the owning company name. This falls back to appname. You may
|
| 84 |
+
pass False to disable it.
|
| 85 |
+
"version" is an optional version path element to append to the
|
| 86 |
+
path. You might want to use this if you want multiple versions
|
| 87 |
+
of your app to be able to run independently. If used, this
|
| 88 |
+
would typically be "<major>.<minor>".
|
| 89 |
+
Only applied when appname is present.
|
| 90 |
+
"roaming" (boolean, default False) can be set True to use the Windows
|
| 91 |
+
roaming appdata directory. That means that for users on a Windows
|
| 92 |
+
network setup for roaming profiles, this user data will be
|
| 93 |
+
sync'd on login. See
|
| 94 |
+
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
|
| 95 |
+
for a discussion of issues.
|
| 96 |
+
|
| 97 |
+
Typical user data directories are:
|
| 98 |
+
Mac OS X: ~/Library/Application Support/<AppName>
|
| 99 |
+
Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined
|
| 100 |
+
Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
|
| 101 |
+
Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
|
| 102 |
+
Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
|
| 103 |
+
Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
|
| 104 |
+
|
| 105 |
+
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
|
| 106 |
+
That means, by default "~/.local/share/<AppName>".
|
| 107 |
+
"""
|
| 108 |
+
if system == "win32":
|
| 109 |
+
if appauthor is None:
|
| 110 |
+
appauthor = appname
|
| 111 |
+
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
|
| 112 |
+
path = os.path.normpath(_get_win_folder(const))
|
| 113 |
+
if appname:
|
| 114 |
+
if appauthor is not False:
|
| 115 |
+
path = os.path.join(path, appauthor, appname)
|
| 116 |
+
else:
|
| 117 |
+
path = os.path.join(path, appname)
|
| 118 |
+
elif system == "darwin":
|
| 119 |
+
path = os.path.expanduser("~/Library/Application Support/")
|
| 120 |
+
if appname:
|
| 121 |
+
path = os.path.join(path, appname)
|
| 122 |
+
else:
|
| 123 |
+
path = os.getenv("XDG_DATA_HOME", os.path.expanduser("~/.local/share"))
|
| 124 |
+
if appname:
|
| 125 |
+
path = os.path.join(path, appname)
|
| 126 |
+
if appname and version:
|
| 127 |
+
path = os.path.join(path, version)
|
| 128 |
+
return path
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
|
| 132 |
+
r"""Return full path to the user-shared data dir for this application.
|
| 133 |
+
|
| 134 |
+
"appname" is the name of application.
|
| 135 |
+
If None, just the system directory is returned.
|
| 136 |
+
"appauthor" (only used on Windows) is the name of the
|
| 137 |
+
appauthor or distributing body for this application. Typically
|
| 138 |
+
it is the owning company name. This falls back to appname. You may
|
| 139 |
+
pass False to disable it.
|
| 140 |
+
"version" is an optional version path element to append to the
|
| 141 |
+
path. You might want to use this if you want multiple versions
|
| 142 |
+
of your app to be able to run independently. If used, this
|
| 143 |
+
would typically be "<major>.<minor>".
|
| 144 |
+
Only applied when appname is present.
|
| 145 |
+
"multipath" is an optional parameter only applicable to *nix
|
| 146 |
+
which indicates that the entire list of data dirs should be
|
| 147 |
+
returned. By default, the first item from XDG_DATA_DIRS is
|
| 148 |
+
returned, or '/usr/local/share/<AppName>',
|
| 149 |
+
if XDG_DATA_DIRS is not set
|
| 150 |
+
|
| 151 |
+
Typical site data directories are:
|
| 152 |
+
Mac OS X: /Library/Application Support/<AppName>
|
| 153 |
+
Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
|
| 154 |
+
Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
|
| 155 |
+
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
|
| 156 |
+
Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
|
| 157 |
+
|
| 158 |
+
For Unix, this is using the $XDG_DATA_DIRS[0] default.
|
| 159 |
+
|
| 160 |
+
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
|
| 161 |
+
"""
|
| 162 |
+
if system == "win32":
|
| 163 |
+
if appauthor is None:
|
| 164 |
+
appauthor = appname
|
| 165 |
+
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
|
| 166 |
+
if appname:
|
| 167 |
+
if appauthor is not False:
|
| 168 |
+
path = os.path.join(path, appauthor, appname)
|
| 169 |
+
else:
|
| 170 |
+
path = os.path.join(path, appname)
|
| 171 |
+
elif system == "darwin":
|
| 172 |
+
path = os.path.expanduser("/Library/Application Support")
|
| 173 |
+
if appname:
|
| 174 |
+
path = os.path.join(path, appname)
|
| 175 |
+
else:
|
| 176 |
+
# XDG default for $XDG_DATA_DIRS
|
| 177 |
+
# only first, if multipath is False
|
| 178 |
+
path = os.getenv(
|
| 179 |
+
"XDG_DATA_DIRS", os.pathsep.join(["/usr/local/share", "/usr/share"])
|
| 180 |
+
)
|
| 181 |
+
pathlist = [
|
| 182 |
+
os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)
|
| 183 |
+
]
|
| 184 |
+
if appname:
|
| 185 |
+
if version:
|
| 186 |
+
appname = os.path.join(appname, version)
|
| 187 |
+
pathlist = [os.sep.join([x, appname]) for x in pathlist]
|
| 188 |
+
|
| 189 |
+
if multipath:
|
| 190 |
+
path = os.pathsep.join(pathlist)
|
| 191 |
+
else:
|
| 192 |
+
path = pathlist[0]
|
| 193 |
+
return path
|
| 194 |
+
|
| 195 |
+
if appname and version:
|
| 196 |
+
path = os.path.join(path, version)
|
| 197 |
+
return path
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
|
| 201 |
+
r"""Return full path to the user-specific config dir for this application.
|
| 202 |
+
|
| 203 |
+
"appname" is the name of application.
|
| 204 |
+
If None, just the system directory is returned.
|
| 205 |
+
"appauthor" (only used on Windows) is the name of the
|
| 206 |
+
appauthor or distributing body for this application. Typically
|
| 207 |
+
it is the owning company name. This falls back to appname. You may
|
| 208 |
+
pass False to disable it.
|
| 209 |
+
"version" is an optional version path element to append to the
|
| 210 |
+
path. You might want to use this if you want multiple versions
|
| 211 |
+
of your app to be able to run independently. If used, this
|
| 212 |
+
would typically be "<major>.<minor>".
|
| 213 |
+
Only applied when appname is present.
|
| 214 |
+
"roaming" (boolean, default False) can be set True to use the Windows
|
| 215 |
+
roaming appdata directory. That means that for users on a Windows
|
| 216 |
+
network setup for roaming profiles, this user data will be
|
| 217 |
+
sync'd on login. See
|
| 218 |
+
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
|
| 219 |
+
for a discussion of issues.
|
| 220 |
+
|
| 221 |
+
Typical user config directories are:
|
| 222 |
+
Mac OS X: ~/Library/Preferences/<AppName>
|
| 223 |
+
Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
|
| 224 |
+
Win *: same as user_data_dir
|
| 225 |
+
|
| 226 |
+
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
|
| 227 |
+
That means, by default "~/.config/<AppName>".
|
| 228 |
+
"""
|
| 229 |
+
if system == "win32":
|
| 230 |
+
path = user_data_dir(appname, appauthor, None, roaming)
|
| 231 |
+
elif system == "darwin":
|
| 232 |
+
path = os.path.expanduser("~/Library/Preferences/")
|
| 233 |
+
if appname:
|
| 234 |
+
path = os.path.join(path, appname)
|
| 235 |
+
else:
|
| 236 |
+
path = os.getenv("XDG_CONFIG_HOME", os.path.expanduser("~/.config"))
|
| 237 |
+
if appname:
|
| 238 |
+
path = os.path.join(path, appname)
|
| 239 |
+
if appname and version:
|
| 240 |
+
path = os.path.join(path, version)
|
| 241 |
+
return path
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
|
| 245 |
+
r"""Return full path to the user-shared data dir for this application.
|
| 246 |
+
|
| 247 |
+
"appname" is the name of application.
|
| 248 |
+
If None, just the system directory is returned.
|
| 249 |
+
"appauthor" (only used on Windows) is the name of the
|
| 250 |
+
appauthor or distributing body for this application. Typically
|
| 251 |
+
it is the owning company name. This falls back to appname. You may
|
| 252 |
+
pass False to disable it.
|
| 253 |
+
"version" is an optional version path element to append to the
|
| 254 |
+
path. You might want to use this if you want multiple versions
|
| 255 |
+
of your app to be able to run independently. If used, this
|
| 256 |
+
would typically be "<major>.<minor>".
|
| 257 |
+
Only applied when appname is present.
|
| 258 |
+
"multipath" is an optional parameter only applicable to *nix
|
| 259 |
+
which indicates that the entire list of config dirs should be
|
| 260 |
+
returned. By default, the first item from XDG_CONFIG_DIRS is
|
| 261 |
+
returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
|
| 262 |
+
|
| 263 |
+
Typical site config directories are:
|
| 264 |
+
Mac OS X: same as site_data_dir
|
| 265 |
+
Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
|
| 266 |
+
$XDG_CONFIG_DIRS
|
| 267 |
+
Win *: same as site_data_dir
|
| 268 |
+
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
|
| 269 |
+
|
| 270 |
+
For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
|
| 271 |
+
|
| 272 |
+
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
|
| 273 |
+
"""
|
| 274 |
+
if system == "win32":
|
| 275 |
+
path = site_data_dir(appname, appauthor)
|
| 276 |
+
if appname and version:
|
| 277 |
+
path = os.path.join(path, version)
|
| 278 |
+
elif system == "darwin":
|
| 279 |
+
path = os.path.expanduser("/Library/Preferences")
|
| 280 |
+
if appname:
|
| 281 |
+
path = os.path.join(path, appname)
|
| 282 |
+
else:
|
| 283 |
+
# XDG default for $XDG_CONFIG_DIRS
|
| 284 |
+
# only first, if multipath is False
|
| 285 |
+
path = os.getenv("XDG_CONFIG_DIRS", "/etc/xdg")
|
| 286 |
+
pathlist = [
|
| 287 |
+
os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)
|
| 288 |
+
]
|
| 289 |
+
if appname:
|
| 290 |
+
if version:
|
| 291 |
+
appname = os.path.join(appname, version)
|
| 292 |
+
pathlist = [os.sep.join([x, appname]) for x in pathlist]
|
| 293 |
+
|
| 294 |
+
if multipath:
|
| 295 |
+
path = os.pathsep.join(pathlist)
|
| 296 |
+
else:
|
| 297 |
+
path = pathlist[0]
|
| 298 |
+
return path
|
| 299 |
+
|
| 300 |
+
|
| 301 |
+
def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
|
| 302 |
+
r"""Return full path to the user-specific cache dir for this application.
|
| 303 |
+
|
| 304 |
+
"appname" is the name of application.
|
| 305 |
+
If None, just the system directory is returned.
|
| 306 |
+
"appauthor" (only used on Windows) is the name of the
|
| 307 |
+
appauthor or distributing body for this application. Typically
|
| 308 |
+
it is the owning company name. This falls back to appname. You may
|
| 309 |
+
pass False to disable it.
|
| 310 |
+
"version" is an optional version path element to append to the
|
| 311 |
+
path. You might want to use this if you want multiple versions
|
| 312 |
+
of your app to be able to run independently. If used, this
|
| 313 |
+
would typically be "<major>.<minor>".
|
| 314 |
+
Only applied when appname is present.
|
| 315 |
+
"opinion" (boolean) can be False to disable the appending of
|
| 316 |
+
"Cache" to the base app data dir for Windows. See
|
| 317 |
+
discussion below.
|
| 318 |
+
|
| 319 |
+
Typical user cache directories are:
|
| 320 |
+
Mac OS X: ~/Library/Caches/<AppName>
|
| 321 |
+
Unix: ~/.cache/<AppName> (XDG default)
|
| 322 |
+
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
|
| 323 |
+
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
|
| 324 |
+
|
| 325 |
+
On Windows the only suggestion in the MSDN docs is that local settings go in
|
| 326 |
+
the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
|
| 327 |
+
app data dir (the default returned by `user_data_dir` above). Apps typically
|
| 328 |
+
put cache data somewhere *under* the given dir here. Some examples:
|
| 329 |
+
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
|
| 330 |
+
...\Acme\SuperApp\Cache\1.0
|
| 331 |
+
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
|
| 332 |
+
This can be disabled with the `opinion=False` option.
|
| 333 |
+
"""
|
| 334 |
+
if system == "win32":
|
| 335 |
+
if appauthor is None:
|
| 336 |
+
appauthor = appname
|
| 337 |
+
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
|
| 338 |
+
if appname:
|
| 339 |
+
if appauthor is not False:
|
| 340 |
+
path = os.path.join(path, appauthor, appname)
|
| 341 |
+
else:
|
| 342 |
+
path = os.path.join(path, appname)
|
| 343 |
+
if opinion:
|
| 344 |
+
path = os.path.join(path, "Cache")
|
| 345 |
+
elif system == "darwin":
|
| 346 |
+
path = os.path.expanduser("~/Library/Caches")
|
| 347 |
+
if appname:
|
| 348 |
+
path = os.path.join(path, appname)
|
| 349 |
+
else:
|
| 350 |
+
path = os.getenv("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
|
| 351 |
+
if appname:
|
| 352 |
+
path = os.path.join(path, appname)
|
| 353 |
+
if appname and version:
|
| 354 |
+
path = os.path.join(path, version)
|
| 355 |
+
return path
|
| 356 |
+
|
| 357 |
+
|
| 358 |
+
def user_state_dir(appname=None, appauthor=None, version=None, roaming=False):
|
| 359 |
+
r"""Return full path to the user-specific state dir for this application.
|
| 360 |
+
|
| 361 |
+
"appname" is the name of application.
|
| 362 |
+
If None, just the system directory is returned.
|
| 363 |
+
"appauthor" (only used on Windows) is the name of the
|
| 364 |
+
appauthor or distributing body for this application. Typically
|
| 365 |
+
it is the owning company name. This falls back to appname. You may
|
| 366 |
+
pass False to disable it.
|
| 367 |
+
"version" is an optional version path element to append to the
|
| 368 |
+
path. You might want to use this if you want multiple versions
|
| 369 |
+
of your app to be able to run independently. If used, this
|
| 370 |
+
would typically be "<major>.<minor>".
|
| 371 |
+
Only applied when appname is present.
|
| 372 |
+
"roaming" (boolean, default False) can be set True to use the Windows
|
| 373 |
+
roaming appdata directory. That means that for users on a Windows
|
| 374 |
+
network setup for roaming profiles, this user data will be
|
| 375 |
+
sync'd on login. See
|
| 376 |
+
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
|
| 377 |
+
for a discussion of issues.
|
| 378 |
+
|
| 379 |
+
Typical user state directories are:
|
| 380 |
+
Mac OS X: same as user_data_dir
|
| 381 |
+
Unix: ~/.local/state/<AppName> # or in $XDG_STATE_HOME, if defined
|
| 382 |
+
Win *: same as user_data_dir
|
| 383 |
+
|
| 384 |
+
For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state>
|
| 385 |
+
to extend the XDG spec and support $XDG_STATE_HOME.
|
| 386 |
+
|
| 387 |
+
That means, by default "~/.local/state/<AppName>".
|
| 388 |
+
"""
|
| 389 |
+
if system in ["win32", "darwin"]:
|
| 390 |
+
path = user_data_dir(appname, appauthor, None, roaming)
|
| 391 |
+
else:
|
| 392 |
+
path = os.getenv("XDG_STATE_HOME", os.path.expanduser("~/.local/state"))
|
| 393 |
+
if appname:
|
| 394 |
+
path = os.path.join(path, appname)
|
| 395 |
+
if appname and version:
|
| 396 |
+
path = os.path.join(path, version)
|
| 397 |
+
return path
|
| 398 |
+
|
| 399 |
+
|
| 400 |
+
def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
|
| 401 |
+
r"""Return full path to the user-specific log dir for this application.
|
| 402 |
+
|
| 403 |
+
"appname" is the name of application.
|
| 404 |
+
If None, just the system directory is returned.
|
| 405 |
+
"appauthor" (only used on Windows) is the name of the
|
| 406 |
+
appauthor or distributing body for this application. Typically
|
| 407 |
+
it is the owning company name. This falls back to appname. You may
|
| 408 |
+
pass False to disable it.
|
| 409 |
+
"version" is an optional version path element to append to the
|
| 410 |
+
path. You might want to use this if you want multiple versions
|
| 411 |
+
of your app to be able to run independently. If used, this
|
| 412 |
+
would typically be "<major>.<minor>".
|
| 413 |
+
Only applied when appname is present.
|
| 414 |
+
"opinion" (boolean) can be False to disable the appending of
|
| 415 |
+
"Logs" to the base app data dir for Windows, and "log" to the
|
| 416 |
+
base cache dir for Unix. See discussion below.
|
| 417 |
+
|
| 418 |
+
Typical user log directories are:
|
| 419 |
+
Mac OS X: ~/Library/Logs/<AppName>
|
| 420 |
+
Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
|
| 421 |
+
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
|
| 422 |
+
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
|
| 423 |
+
|
| 424 |
+
On Windows the only suggestion in the MSDN docs is that local settings
|
| 425 |
+
go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
|
| 426 |
+
examples of what some windows apps use for a logs dir.)
|
| 427 |
+
|
| 428 |
+
OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
|
| 429 |
+
value for Windows and appends "log" to the user cache dir for Unix.
|
| 430 |
+
This can be disabled with the `opinion=False` option.
|
| 431 |
+
"""
|
| 432 |
+
if system == "darwin":
|
| 433 |
+
path = os.path.join(os.path.expanduser("~/Library/Logs"), appname)
|
| 434 |
+
elif system == "win32":
|
| 435 |
+
path = user_data_dir(appname, appauthor, version)
|
| 436 |
+
version = False
|
| 437 |
+
if opinion:
|
| 438 |
+
path = os.path.join(path, "Logs")
|
| 439 |
+
else:
|
| 440 |
+
path = user_cache_dir(appname, appauthor, version)
|
| 441 |
+
version = False
|
| 442 |
+
if opinion:
|
| 443 |
+
path = os.path.join(path, "log")
|
| 444 |
+
if appname and version:
|
| 445 |
+
path = os.path.join(path, version)
|
| 446 |
+
return path
|
| 447 |
+
|
| 448 |
+
|
| 449 |
+
class AppDirs(object):
|
| 450 |
+
"""Convenience wrapper for getting application dirs."""
|
| 451 |
+
|
| 452 |
+
def __init__(
|
| 453 |
+
self, appname=None, appauthor=None, version=None, roaming=False, multipath=False
|
| 454 |
+
):
|
| 455 |
+
self.appname = appname
|
| 456 |
+
self.appauthor = appauthor
|
| 457 |
+
self.version = version
|
| 458 |
+
self.roaming = roaming
|
| 459 |
+
self.multipath = multipath
|
| 460 |
+
|
| 461 |
+
@property
|
| 462 |
+
def user_data_dir(self):
|
| 463 |
+
return user_data_dir(
|
| 464 |
+
self.appname, self.appauthor, version=self.version, roaming=self.roaming
|
| 465 |
+
)
|
| 466 |
+
|
| 467 |
+
@property
|
| 468 |
+
def site_data_dir(self):
|
| 469 |
+
return site_data_dir(
|
| 470 |
+
self.appname, self.appauthor, version=self.version, multipath=self.multipath
|
| 471 |
+
)
|
| 472 |
+
|
| 473 |
+
@property
|
| 474 |
+
def user_config_dir(self):
|
| 475 |
+
return user_config_dir(
|
| 476 |
+
self.appname, self.appauthor, version=self.version, roaming=self.roaming
|
| 477 |
+
)
|
| 478 |
+
|
| 479 |
+
@property
|
| 480 |
+
def site_config_dir(self):
|
| 481 |
+
return site_config_dir(
|
| 482 |
+
self.appname, self.appauthor, version=self.version, multipath=self.multipath
|
| 483 |
+
)
|
| 484 |
+
|
| 485 |
+
@property
|
| 486 |
+
def user_cache_dir(self):
|
| 487 |
+
return user_cache_dir(self.appname, self.appauthor, version=self.version)
|
| 488 |
+
|
| 489 |
+
@property
|
| 490 |
+
def user_state_dir(self):
|
| 491 |
+
return user_state_dir(self.appname, self.appauthor, version=self.version)
|
| 492 |
+
|
| 493 |
+
@property
|
| 494 |
+
def user_log_dir(self):
|
| 495 |
+
return user_log_dir(self.appname, self.appauthor, version=self.version)
|
| 496 |
+
|
| 497 |
+
|
| 498 |
+
# ---- internal support stuff
|
| 499 |
+
|
| 500 |
+
|
| 501 |
+
def _get_win_folder_from_registry(csidl_name):
|
| 502 |
+
"""This is a fallback technique at best. I'm not sure if using the
|
| 503 |
+
registry for this guarantees us the correct answer for all CSIDL_*
|
| 504 |
+
names.
|
| 505 |
+
"""
|
| 506 |
+
import winreg as _winreg
|
| 507 |
+
|
| 508 |
+
shell_folder_name = {
|
| 509 |
+
"CSIDL_APPDATA": "AppData",
|
| 510 |
+
"CSIDL_COMMON_APPDATA": "Common AppData",
|
| 511 |
+
"CSIDL_LOCAL_APPDATA": "Local AppData",
|
| 512 |
+
}[csidl_name]
|
| 513 |
+
|
| 514 |
+
key = _winreg.OpenKey(
|
| 515 |
+
_winreg.HKEY_CURRENT_USER,
|
| 516 |
+
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders",
|
| 517 |
+
)
|
| 518 |
+
dir, type = _winreg.QueryValueEx(key, shell_folder_name)
|
| 519 |
+
return dir
|
| 520 |
+
|
| 521 |
+
|
| 522 |
+
def _get_win_folder_with_pywin32(csidl_name):
|
| 523 |
+
from win32com.shell import shell, shellcon
|
| 524 |
+
|
| 525 |
+
dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
|
| 526 |
+
# Try to make this a unicode path because SHGetFolderPath does
|
| 527 |
+
# not return unicode strings when there is unicode data in the
|
| 528 |
+
# path.
|
| 529 |
+
try:
|
| 530 |
+
dir = unicode(dir)
|
| 531 |
+
|
| 532 |
+
# Downgrade to short path name if have highbit chars. See
|
| 533 |
+
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
|
| 534 |
+
has_high_char = False
|
| 535 |
+
for c in dir:
|
| 536 |
+
if ord(c) > 255:
|
| 537 |
+
has_high_char = True
|
| 538 |
+
break
|
| 539 |
+
if has_high_char:
|
| 540 |
+
try:
|
| 541 |
+
import win32api
|
| 542 |
+
|
| 543 |
+
dir = win32api.GetShortPathName(dir)
|
| 544 |
+
except ImportError:
|
| 545 |
+
pass
|
| 546 |
+
except UnicodeError:
|
| 547 |
+
pass
|
| 548 |
+
return dir
|
| 549 |
+
|
| 550 |
+
|
| 551 |
+
def _get_win_folder_with_ctypes(csidl_name):
|
| 552 |
+
import ctypes
|
| 553 |
+
|
| 554 |
+
csidl_const = {
|
| 555 |
+
"CSIDL_APPDATA": 26,
|
| 556 |
+
"CSIDL_COMMON_APPDATA": 35,
|
| 557 |
+
"CSIDL_LOCAL_APPDATA": 28,
|
| 558 |
+
}[csidl_name]
|
| 559 |
+
|
| 560 |
+
buf = ctypes.create_unicode_buffer(1024)
|
| 561 |
+
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
|
| 562 |
+
|
| 563 |
+
# Downgrade to short path name if have highbit chars. See
|
| 564 |
+
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
|
| 565 |
+
has_high_char = False
|
| 566 |
+
for c in buf:
|
| 567 |
+
if ord(c) > 255:
|
| 568 |
+
has_high_char = True
|
| 569 |
+
break
|
| 570 |
+
if has_high_char:
|
| 571 |
+
buf2 = ctypes.create_unicode_buffer(1024)
|
| 572 |
+
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
|
| 573 |
+
buf = buf2
|
| 574 |
+
|
| 575 |
+
return buf.value
|
| 576 |
+
|
| 577 |
+
|
| 578 |
+
def _get_win_folder_with_jna(csidl_name):
|
| 579 |
+
import array
|
| 580 |
+
|
| 581 |
+
from com.sun import jna
|
| 582 |
+
from com.sun.jna.platform import win32
|
| 583 |
+
|
| 584 |
+
buf_size = win32.WinDef.MAX_PATH * 2
|
| 585 |
+
buf = array.zeros("c", buf_size)
|
| 586 |
+
shell = win32.Shell32.INSTANCE
|
| 587 |
+
shell.SHGetFolderPath(
|
| 588 |
+
None,
|
| 589 |
+
getattr(win32.ShlObj, csidl_name),
|
| 590 |
+
None,
|
| 591 |
+
win32.ShlObj.SHGFP_TYPE_CURRENT,
|
| 592 |
+
buf,
|
| 593 |
+
)
|
| 594 |
+
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
|
| 595 |
+
|
| 596 |
+
# Downgrade to short path name if have highbit chars. See
|
| 597 |
+
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
|
| 598 |
+
has_high_char = False
|
| 599 |
+
for c in dir:
|
| 600 |
+
if ord(c) > 255:
|
| 601 |
+
has_high_char = True
|
| 602 |
+
break
|
| 603 |
+
if has_high_char:
|
| 604 |
+
buf = array.zeros("c", buf_size)
|
| 605 |
+
kernel = win32.Kernel32.INSTANCE
|
| 606 |
+
if kernel.GetShortPathName(dir, buf, buf_size):
|
| 607 |
+
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
|
| 608 |
+
|
| 609 |
+
return dir
|
| 610 |
+
|
| 611 |
+
|
| 612 |
+
if system == "win32":
|
| 613 |
+
try:
|
| 614 |
+
import win32com.shell
|
| 615 |
+
|
| 616 |
+
_get_win_folder = _get_win_folder_with_pywin32
|
| 617 |
+
except ImportError:
|
| 618 |
+
try:
|
| 619 |
+
from ctypes import windll
|
| 620 |
+
|
| 621 |
+
_get_win_folder = _get_win_folder_with_ctypes
|
| 622 |
+
except ImportError:
|
| 623 |
+
try:
|
| 624 |
+
import com.sun.jna
|
| 625 |
+
|
| 626 |
+
_get_win_folder = _get_win_folder_with_jna
|
| 627 |
+
except ImportError:
|
| 628 |
+
_get_win_folder = _get_win_folder_from_registry
|
| 629 |
+
|
| 630 |
+
|
| 631 |
+
# ---- self test code
|
| 632 |
+
|
| 633 |
+
if __name__ == "__main__":
|
| 634 |
+
appname = "MyApp"
|
| 635 |
+
appauthor = "MyCompany"
|
| 636 |
+
|
| 637 |
+
props = (
|
| 638 |
+
"user_data_dir",
|
| 639 |
+
"user_config_dir",
|
| 640 |
+
"user_cache_dir",
|
| 641 |
+
"user_state_dir",
|
| 642 |
+
"user_log_dir",
|
| 643 |
+
"site_data_dir",
|
| 644 |
+
"site_config_dir",
|
| 645 |
+
)
|
| 646 |
+
|
| 647 |
+
print(f"-- app dirs {__version__} --")
|
| 648 |
+
|
| 649 |
+
print("-- app dirs (with optional 'version')")
|
| 650 |
+
dirs = AppDirs(appname, appauthor, version="1.0")
|
| 651 |
+
for prop in props:
|
| 652 |
+
print(f"{prop}: {getattr(dirs, prop)}")
|
| 653 |
+
|
| 654 |
+
print("\n-- app dirs (without optional 'version')")
|
| 655 |
+
dirs = AppDirs(appname, appauthor)
|
| 656 |
+
for prop in props:
|
| 657 |
+
print(f"{prop}: {getattr(dirs, prop)}")
|
| 658 |
+
|
| 659 |
+
print("\n-- app dirs (without optional 'appauthor')")
|
| 660 |
+
dirs = AppDirs(appname)
|
| 661 |
+
for prop in props:
|
| 662 |
+
print(f"{prop}: {getattr(dirs, prop)}")
|
| 663 |
+
|
| 664 |
+
print("\n-- app dirs (with disabled 'appauthor')")
|
| 665 |
+
dirs = AppDirs(appname, appauthor=False)
|
| 666 |
+
for prop in props:
|
| 667 |
+
print(f"{prop}: {getattr(dirs, prop)}")
|
pllava/lib/python3.10/site-packages/torch/_classes.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import types
|
| 3 |
+
|
| 4 |
+
import torch._C
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class _ClassNamespace(types.ModuleType):
|
| 8 |
+
def __init__(self, name):
|
| 9 |
+
super().__init__("torch.classes" + name)
|
| 10 |
+
self.name = name
|
| 11 |
+
|
| 12 |
+
def __getattr__(self, attr):
|
| 13 |
+
proxy = torch._C._get_custom_class_python_wrapper(self.name, attr)
|
| 14 |
+
if proxy is None:
|
| 15 |
+
raise RuntimeError(f"Class {self.name}.{attr} not registered!")
|
| 16 |
+
return proxy
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class _Classes(types.ModuleType):
|
| 20 |
+
__file__ = "_classes.py"
|
| 21 |
+
|
| 22 |
+
def __init__(self) -> None:
|
| 23 |
+
super().__init__("torch.classes")
|
| 24 |
+
|
| 25 |
+
def __getattr__(self, name):
|
| 26 |
+
namespace = _ClassNamespace(name)
|
| 27 |
+
setattr(self, name, namespace)
|
| 28 |
+
return namespace
|
| 29 |
+
|
| 30 |
+
@property
|
| 31 |
+
def loaded_libraries(self):
|
| 32 |
+
return torch.ops.loaded_libraries
|
| 33 |
+
|
| 34 |
+
def load_library(self, path):
|
| 35 |
+
"""
|
| 36 |
+
Loads a shared library from the given path into the current process.
|
| 37 |
+
|
| 38 |
+
The library being loaded may run global initialization code to register
|
| 39 |
+
custom classes with the PyTorch JIT runtime. This allows dynamically
|
| 40 |
+
loading custom classes. For this, you should compile your class
|
| 41 |
+
and the static registration code into a shared library object, and then
|
| 42 |
+
call ``torch.classes.load_library('path/to/libcustom.so')`` to load the
|
| 43 |
+
shared object.
|
| 44 |
+
|
| 45 |
+
After the library is loaded, it is added to the
|
| 46 |
+
``torch.classes.loaded_libraries`` attribute, a set that may be inspected
|
| 47 |
+
for the paths of all libraries loaded using this function.
|
| 48 |
+
|
| 49 |
+
Args:
|
| 50 |
+
path (str): A path to a shared library to load.
|
| 51 |
+
"""
|
| 52 |
+
torch.ops.load_library(path)
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
# The classes "namespace"
|
| 56 |
+
classes = _Classes()
|
pllava/lib/python3.10/site-packages/torch/_compile.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
"""
|
| 3 |
+
APIs related to torch.compile which lazily import torch._dynamo to avoid
|
| 4 |
+
circular dependencies.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import functools
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def _disable_dynamo(fn=None, recursive=True):
|
| 11 |
+
"""
|
| 12 |
+
This API should be only used inside torch, external users should still use
|
| 13 |
+
torch._dynamo.disable. The main goal of this API is to avoid circular
|
| 14 |
+
imports issues that is common while using _dynamo.disable inside torch
|
| 15 |
+
itself.
|
| 16 |
+
|
| 17 |
+
This API avoids it by lazily importing torch._dynamo from the import time to
|
| 18 |
+
the invocation of the decorated function.
|
| 19 |
+
"""
|
| 20 |
+
if fn is not None:
|
| 21 |
+
|
| 22 |
+
@functools.wraps(fn)
|
| 23 |
+
def inner(*args, **kwargs):
|
| 24 |
+
# cache this on the first invocation to avoid adding too much overhead.
|
| 25 |
+
disable_fn = getattr(fn, "__dynamo_disable", None)
|
| 26 |
+
if disable_fn is None:
|
| 27 |
+
import torch._dynamo
|
| 28 |
+
|
| 29 |
+
disable_fn = torch._dynamo.disable(fn, recursive)
|
| 30 |
+
fn.__dynamo_disable = disable_fn
|
| 31 |
+
|
| 32 |
+
return disable_fn(*args, **kwargs)
|
| 33 |
+
|
| 34 |
+
return inner
|
| 35 |
+
else:
|
| 36 |
+
# decorator usage like @_disable_dynamo(recursive=False). The resulting
|
| 37 |
+
# object expects the original decorated function as the arg.
|
| 38 |
+
return functools.partial(_disable_dynamo, recursive=recursive)
|
pllava/lib/python3.10/site-packages/torch/_custom_ops.py
ADDED
|
@@ -0,0 +1,324 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import inspect
|
| 3 |
+
|
| 4 |
+
from torch._custom_op.impl import (
|
| 5 |
+
_custom_op_with_schema,
|
| 6 |
+
_find_custom_op,
|
| 7 |
+
infer_schema,
|
| 8 |
+
parse_qualname,
|
| 9 |
+
validate_namespace,
|
| 10 |
+
)
|
| 11 |
+
from torch.library import get_ctx
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
__all__ = [
|
| 15 |
+
"custom_op",
|
| 16 |
+
"impl",
|
| 17 |
+
"impl_abstract",
|
| 18 |
+
"get_ctx",
|
| 19 |
+
"impl_save_for_backward",
|
| 20 |
+
"impl_backward",
|
| 21 |
+
]
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def custom_op(qualname, func_or_schema=None):
|
| 25 |
+
r"""Register a new custom operator
|
| 26 |
+
|
| 27 |
+
In PyTorch, defining an op (short for "operator") is a two step-process:
|
| 28 |
+
- we need to define the op (by providing an operator name and schema)
|
| 29 |
+
- we need to implement behavior for how the operator interacts with
|
| 30 |
+
various PyTorch subsystems, like CPU/CUDA Tensors, Autograd, etc.
|
| 31 |
+
|
| 32 |
+
This entrypoint defines the custom operator (the first step)
|
| 33 |
+
you must then perform the second step by calling various
|
| 34 |
+
``impl_*`` APIs.
|
| 35 |
+
|
| 36 |
+
This API may be used as a decorator (see examples).
|
| 37 |
+
|
| 38 |
+
For a detailed guide on custom ops, please see
|
| 39 |
+
https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk
|
| 40 |
+
|
| 41 |
+
Arguments:
|
| 42 |
+
qualname (str): Should be a string that looks like
|
| 43 |
+
"namespace::operator_name". Operators in PyTorch need a namespace to
|
| 44 |
+
avoid name collisions; a given operator may only be created once.
|
| 45 |
+
If you are writing a Python library, we recommend the namespace to
|
| 46 |
+
be the name of your top-level module.
|
| 47 |
+
func_or_schema (Union[Callable, str]): Each PyTorch operator needs a
|
| 48 |
+
schema that tells PyTorch the types of the inputs/outputs.
|
| 49 |
+
If this is a Callable, we will automatically infer the schema from
|
| 50 |
+
the type annotations on the function (see examples). Otherwise,
|
| 51 |
+
if you don't want to use type annotations, you may provide us the
|
| 52 |
+
schema string.
|
| 53 |
+
|
| 54 |
+
Example::
|
| 55 |
+
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
|
| 56 |
+
>>> import torch
|
| 57 |
+
>>> import numpy as np
|
| 58 |
+
>>> from torch import Tensor
|
| 59 |
+
>>>
|
| 60 |
+
>>> # Step 1: define the custom op.
|
| 61 |
+
>>> # We need to provide the API a "prototype function"
|
| 62 |
+
>>> # (a function that returns NotImplementedError), from which
|
| 63 |
+
>>> # we will infer the types of the inputs and outputs.
|
| 64 |
+
>>> @torch._custom_ops.custom_op("mylibrary::numpy_sin")
|
| 65 |
+
>>> def numpy_sin(x: Tensor) -> Tensor:
|
| 66 |
+
>>> raise NotImplementedError
|
| 67 |
+
>>>
|
| 68 |
+
>>> # The custom op is now accessible via the torch.ops module:
|
| 69 |
+
>>> torch.ops.mylibrary.numpy_sin
|
| 70 |
+
>>>
|
| 71 |
+
>>> # Step 2: Register an implementation for various PyTorch subsystems
|
| 72 |
+
>>>
|
| 73 |
+
>>> # Register an implementation for CPU tensors
|
| 74 |
+
>>> @torch._custom_ops.impl("mylibrary::numpy_sin", device_types="cpu")
|
| 75 |
+
>>> def numpy_sin_impl_cpu(x):
|
| 76 |
+
>>> return torch.from_numpy(np.sin(x.numpy()))
|
| 77 |
+
>>>
|
| 78 |
+
>>> # Register an implementation for CUDA tensors
|
| 79 |
+
>>> @torch._custom_ops.impl("mylibrary::numpy_sin", device_types="cuda")
|
| 80 |
+
>>> def numpy_sin_impl_cuda(x):
|
| 81 |
+
>>> return torch.from_numpy(np.sin(x.cpu().numpy())).to(x.device)
|
| 82 |
+
>>>
|
| 83 |
+
>>> x = torch.randn(3)
|
| 84 |
+
>>> torch.ops.mylibrary.numpy_sin(x) # calls numpy_sin_impl_cpu
|
| 85 |
+
>>>
|
| 86 |
+
>>> x_cuda = x.cuda()
|
| 87 |
+
>>> torch.ops.mylibrary.numpy_sin(x) # calls numpy_sin_impl_cuda
|
| 88 |
+
|
| 89 |
+
"""
|
| 90 |
+
ns, name = parse_qualname(qualname)
|
| 91 |
+
validate_namespace(ns)
|
| 92 |
+
|
| 93 |
+
def inner(func):
|
| 94 |
+
if not inspect.isfunction(func):
|
| 95 |
+
raise ValueError(
|
| 96 |
+
f"custom_op(...)(func): Expected `func` to be a Python "
|
| 97 |
+
f"function, got: {type(func)}"
|
| 98 |
+
)
|
| 99 |
+
|
| 100 |
+
if func.__name__ != name:
|
| 101 |
+
raise ValueError(
|
| 102 |
+
f"custom_op(qualname='{qualname}', ...)(func): expected `func` "
|
| 103 |
+
f"to have name '{name}' but got '{func.__name__}'. "
|
| 104 |
+
f"Please either change the name of `func` or the qualname that "
|
| 105 |
+
f"is passed to `custom_op`"
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
schema = infer_schema(func, mutates_args=())
|
| 109 |
+
_custom_op_with_schema(qualname, schema)
|
| 110 |
+
return func
|
| 111 |
+
|
| 112 |
+
if func_or_schema is None:
|
| 113 |
+
return inner
|
| 114 |
+
if isinstance(func_or_schema, str):
|
| 115 |
+
_custom_op_with_schema(qualname, func_or_schema)
|
| 116 |
+
else:
|
| 117 |
+
return inner(func_or_schema)
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def impl(qualname, *, device_types=("cpu", "cuda"), func=None):
|
| 121 |
+
r"""Register an implementation for a device type for this custom op.
|
| 122 |
+
|
| 123 |
+
If the op is passed multiple Tensor inputs with different device
|
| 124 |
+
types, it will dispatch to the registered implementation for the highest
|
| 125 |
+
priority device type among those present.
|
| 126 |
+
The supported device types, in order of priority, are {'cuda', 'cpu'}.
|
| 127 |
+
|
| 128 |
+
This API may be used as a decorator (see examples).
|
| 129 |
+
|
| 130 |
+
For a detailed guide on custom ops, please see
|
| 131 |
+
https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk
|
| 132 |
+
|
| 133 |
+
Arguments:
|
| 134 |
+
device_types (str or Iterable[str]): the device type(s) to register the function for.
|
| 135 |
+
|
| 136 |
+
Example::
|
| 137 |
+
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
|
| 138 |
+
>>> import torch
|
| 139 |
+
>>> import numpy as np
|
| 140 |
+
>>> from torch import Tensor
|
| 141 |
+
>>>
|
| 142 |
+
>>> # Step 1: define the custom op.
|
| 143 |
+
>>> # We need to provide the API a "prototype function"
|
| 144 |
+
>>> # (a function that returns NotImplementedError), from which
|
| 145 |
+
>>> # we will infer the types of the inputs and outputs.
|
| 146 |
+
>>> @torch._custom_ops.custom_op("mylibrary::numpy_cos")
|
| 147 |
+
>>> def numpy_cos(x: Tensor) -> Tensor:
|
| 148 |
+
>>> raise NotImplementedError
|
| 149 |
+
>>>
|
| 150 |
+
>>> # The custom op is now accessible via the torch.ops module:
|
| 151 |
+
>>> torch.ops.mylibrary.numpy_cos
|
| 152 |
+
>>>
|
| 153 |
+
>>> # Step 2: Register an implementation for various PyTorch subsystems
|
| 154 |
+
>>>
|
| 155 |
+
>>> # Register an implementation for CPU tensors
|
| 156 |
+
>>> @torch._custom_ops.impl("mylibrary::numpy_cos", device_types="cpu")
|
| 157 |
+
>>> def numpy_cos_impl_cpu(x):
|
| 158 |
+
>>> return torch.from_numpy(np.cos(x.numpy()))
|
| 159 |
+
>>>
|
| 160 |
+
>>> # Register an implementation for CUDA tensors
|
| 161 |
+
>>> @torch._custom_ops.impl("mylibrary::numpy_cos", device_types="cuda")
|
| 162 |
+
>>> def numpy_cos_impl_cuda(x):
|
| 163 |
+
>>> return torch.from_numpy(np.cos(x.cpu().numpy())).to(x.device)
|
| 164 |
+
>>>
|
| 165 |
+
>>> x = torch.randn(3)
|
| 166 |
+
>>> torch.ops.mylibrary.numpy_cos(x) # calls numpy_cos_impl_cpu
|
| 167 |
+
>>>
|
| 168 |
+
>>> x_cuda = x.cuda()
|
| 169 |
+
>>> torch.ops.mylibrary.numpy_cos(x) # calls numpy_cos_impl_cuda
|
| 170 |
+
|
| 171 |
+
"""
|
| 172 |
+
|
| 173 |
+
def inner(func):
|
| 174 |
+
custom_op = _find_custom_op(qualname, also_check_torch_library=True)
|
| 175 |
+
custom_op.impl(device_types, _stacklevel=3)(func)
|
| 176 |
+
return func
|
| 177 |
+
|
| 178 |
+
if func is None:
|
| 179 |
+
return inner
|
| 180 |
+
return inner(func)
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
def impl_abstract(qualname, *, func=None):
|
| 184 |
+
r"""Register an abstract implementation for this operator.
|
| 185 |
+
|
| 186 |
+
An "abstract implementation" specifies the behavior of this operator on
|
| 187 |
+
Tensors that carry no data. Given some input Tensors with certain properties
|
| 188 |
+
(sizes/strides/storage_offset/device), it specifies what the properties of
|
| 189 |
+
the output Tensors are.
|
| 190 |
+
|
| 191 |
+
The abstract implementation has the same signature as the operator.
|
| 192 |
+
It is run for both FakeTensors and meta tensors. To write an abstract
|
| 193 |
+
implementation, assume that all Tensor inputs to the operator are
|
| 194 |
+
regular CPU/CUDA/Meta tensors, but they do not have storage, and
|
| 195 |
+
you are trying to return regular CPU/CUDA/Meta tensor(s) as output.
|
| 196 |
+
The abstract implementation must consist of only PyTorch operations
|
| 197 |
+
(and may not directly access the storage or data of any input or
|
| 198 |
+
intermediate Tensors).
|
| 199 |
+
|
| 200 |
+
This API may be used as a decorator (see examples).
|
| 201 |
+
|
| 202 |
+
For a detailed guide on custom ops, please see
|
| 203 |
+
https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk
|
| 204 |
+
|
| 205 |
+
Examples::
|
| 206 |
+
>>> import numpy as np
|
| 207 |
+
>>> from torch import Tensor
|
| 208 |
+
>>>
|
| 209 |
+
>>> # Example 1: an operator without data-dependent output shape
|
| 210 |
+
>>> @torch._custom_ops.custom_op("mylibrary::custom_linear")
|
| 211 |
+
>>> def custom_linear(x: Tensor, weight: Tensor, bias: Tensor) -> Tensor:
|
| 212 |
+
>>> raise NotImplementedError
|
| 213 |
+
>>>
|
| 214 |
+
>>> @torch._custom_ops.impl_abstract("mylibrary::custom_linear")
|
| 215 |
+
>>> def custom_linear_abstract(x, weight):
|
| 216 |
+
>>> assert x.dim() == 2
|
| 217 |
+
>>> assert weight.dim() == 2
|
| 218 |
+
>>> assert bias.dim() == 1
|
| 219 |
+
>>> assert x.shape[1] == weight.shape[1]
|
| 220 |
+
>>> assert weight.shape[0] == bias.shape[0]
|
| 221 |
+
>>> assert x.device == weight.device
|
| 222 |
+
>>>
|
| 223 |
+
>>> return (x @ weight.t()) + bias
|
| 224 |
+
>>>
|
| 225 |
+
>>> # Example 2: an operator with data-dependent output shape
|
| 226 |
+
>>> @torch._custom_ops.custom_op('mylibrary::custom_nonzero')
|
| 227 |
+
>>> def custom_nonzero(x: Tensor) -> Tensor:
|
| 228 |
+
>>> ...
|
| 229 |
+
>>>
|
| 230 |
+
>>> @torch._custom_ops.impl_abstract("mylibrary::custom_nonzero")
|
| 231 |
+
>>> def custom_nonzero_abstract(x):
|
| 232 |
+
>>> # Number of nonzero-elements is data-dependent.
|
| 233 |
+
>>> # Since we cannot peek at the data in an abstract impl,
|
| 234 |
+
>>> # we use the ctx object to construct a new symint that
|
| 235 |
+
>>> # represents the data-dependent size.
|
| 236 |
+
>>> ctx = torch._custom_ops.get_ctx()
|
| 237 |
+
>>> nnz = ctx.create_unbacked_symint()
|
| 238 |
+
>>> shape = [x.dim(), nnz]
|
| 239 |
+
>>> result = x.new_empty(shape, dtype=torch.long)
|
| 240 |
+
>>> return result
|
| 241 |
+
>>>
|
| 242 |
+
>>> @torch._custom_ops.impl("mylibrary::custom_nonzero")
|
| 243 |
+
>>> def custom_nonzero_impl(x):
|
| 244 |
+
>>> x_np = to_numpy(x)
|
| 245 |
+
>>> res = np.stack(np.nonzero(x_np), axis=1)
|
| 246 |
+
>>> # unbacked symbolic ints in PyTorch must be >= 2, so we
|
| 247 |
+
>>> # constrain the range to at least 2
|
| 248 |
+
>>> if res.shape[0] <= 1:
|
| 249 |
+
>>> raise RuntimeError("not supported")
|
| 250 |
+
>>> return torch.tensor(res, device=x.device)
|
| 251 |
+
|
| 252 |
+
"""
|
| 253 |
+
import torch.library
|
| 254 |
+
|
| 255 |
+
return torch.library.register_fake(qualname, func, _stacklevel=2)
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
def impl_save_for_backward(qualname, *, func=None):
|
| 259 |
+
r"""Register a function that tells us what to save for backward.
|
| 260 |
+
|
| 261 |
+
Please see :func:`impl_backward` for more details.
|
| 262 |
+
"""
|
| 263 |
+
|
| 264 |
+
def inner(func):
|
| 265 |
+
custom_op = _find_custom_op(qualname, also_check_torch_library=True)
|
| 266 |
+
custom_op.impl_save_for_backward(_stacklevel=3)(func)
|
| 267 |
+
return func
|
| 268 |
+
|
| 269 |
+
if func is None:
|
| 270 |
+
return inner
|
| 271 |
+
return inner(func)
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
def impl_backward(qualname, output_differentiability=None, *, func=None):
|
| 275 |
+
r"""Registers a backward formula for an operator.
|
| 276 |
+
|
| 277 |
+
In order for an operator to work with autograd, you need to register
|
| 278 |
+
a backward formula. There are two pieces to this:
|
| 279 |
+
1. You must give us a function to specify what to save for backward.
|
| 280 |
+
Call this the "save for backward" function.
|
| 281 |
+
2. You must give us a function that computes gradients. Call this the
|
| 282 |
+
"backward" function.
|
| 283 |
+
|
| 284 |
+
Use `impl_save_for_backward` to define a "save for backward" function
|
| 285 |
+
that specifies what gets saved for backward. The function should accept
|
| 286 |
+
two arguments ``(inputs, output)`` and return the quantities to be saved
|
| 287 |
+
for backward.
|
| 288 |
+
|
| 289 |
+
During runtime, when you call the operator in a forwards pass, PyTorch
|
| 290 |
+
will invoke the "save for backward" function with the inputs and output
|
| 291 |
+
of the operator.
|
| 292 |
+
|
| 293 |
+
Use `impl_backward` to define the "backward" function. The backward
|
| 294 |
+
function must accept ``(ctx, saved, *grads)``:
|
| 295 |
+
- ``ctx`` is a context object where we may provide information
|
| 296 |
+
- ``saved`` is exactly what gets returned from the "save for backward"
|
| 297 |
+
function
|
| 298 |
+
- ``grads`` is one or more gradients. The number of gradients matches
|
| 299 |
+
the number of outputs of the operator.
|
| 300 |
+
|
| 301 |
+
The backward function must return a dict that maps the name of
|
| 302 |
+
an input to the operator to its corresponding gradient. All inputs that
|
| 303 |
+
were declared to be Tensors in the operator definition must be accounted
|
| 304 |
+
for in the dict. The gradient may be a Tensor or None.
|
| 305 |
+
|
| 306 |
+
For a detailed guide on custom ops, please see
|
| 307 |
+
https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk
|
| 308 |
+
|
| 309 |
+
"""
|
| 310 |
+
|
| 311 |
+
def inner(func):
|
| 312 |
+
custom_op = _find_custom_op(qualname, also_check_torch_library=True)
|
| 313 |
+
custom_op.impl_backward(output_differentiability, _stacklevel=3)(func)
|
| 314 |
+
return func
|
| 315 |
+
|
| 316 |
+
if func is None:
|
| 317 |
+
return inner
|
| 318 |
+
return inner(func)
|
| 319 |
+
|
| 320 |
+
|
| 321 |
+
def _destroy(qualname):
|
| 322 |
+
"""De-registers a custom op. For testing purposes only"""
|
| 323 |
+
custom_op = _find_custom_op(qualname)
|
| 324 |
+
custom_op._destroy()
|
pllava/lib/python3.10/site-packages/torch/_deploy.py
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import io
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
from torch.package import Importer, OrderedImporter, PackageImporter, sys_importer
|
| 6 |
+
from torch.package._package_pickler import create_pickler
|
| 7 |
+
from torch.package._package_unpickler import PackageUnpickler
|
| 8 |
+
from torch.serialization import _maybe_decode_ascii
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def _save_storages(importer, obj):
|
| 12 |
+
serialized_storages = []
|
| 13 |
+
serialized_dtypes = []
|
| 14 |
+
|
| 15 |
+
importer = importer if isinstance(importer, torch.package.PackageImporter) else None
|
| 16 |
+
importers: Importer
|
| 17 |
+
if importer is not None:
|
| 18 |
+
importers = OrderedImporter(importer, sys_importer)
|
| 19 |
+
else:
|
| 20 |
+
importers = sys_importer
|
| 21 |
+
|
| 22 |
+
def persistent_id(obj):
|
| 23 |
+
if torch.is_storage(obj) or isinstance(obj, torch.storage.TypedStorage):
|
| 24 |
+
if isinstance(obj, torch.storage.TypedStorage):
|
| 25 |
+
# TODO: Once we decide to break serialization FC, we can
|
| 26 |
+
# remove this case
|
| 27 |
+
dtype = obj.dtype
|
| 28 |
+
else:
|
| 29 |
+
dtype = torch.uint8
|
| 30 |
+
|
| 31 |
+
serialized_storages.append(obj)
|
| 32 |
+
serialized_dtypes.append(dtype)
|
| 33 |
+
return ("storage", len(serialized_storages) - 1)
|
| 34 |
+
|
| 35 |
+
if hasattr(obj, "__reduce_deploy__"):
|
| 36 |
+
if _serialized_reduces.get(id(obj)) is None:
|
| 37 |
+
_serialized_reduces[id(obj)] = (
|
| 38 |
+
"reduce_deploy",
|
| 39 |
+
id(obj),
|
| 40 |
+
*obj.__reduce_deploy__(importers),
|
| 41 |
+
)
|
| 42 |
+
return _serialized_reduces[id(obj)]
|
| 43 |
+
|
| 44 |
+
return None
|
| 45 |
+
|
| 46 |
+
# Write the pickle data for `obj`
|
| 47 |
+
data_buf = io.BytesIO()
|
| 48 |
+
pickler = create_pickler(data_buf, importers)
|
| 49 |
+
pickler.persistent_id = persistent_id
|
| 50 |
+
pickler.dump(obj)
|
| 51 |
+
data_value = data_buf.getvalue()
|
| 52 |
+
return (
|
| 53 |
+
data_value,
|
| 54 |
+
serialized_storages,
|
| 55 |
+
serialized_dtypes,
|
| 56 |
+
importer.zip_reader if importer else None,
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def _load_storages(id, zip_reader, obj_bytes, serialized_storages, serialized_dtypes):
|
| 61 |
+
def persistent_load(saved_id):
|
| 62 |
+
assert isinstance(saved_id, tuple)
|
| 63 |
+
typename = _maybe_decode_ascii(saved_id[0])
|
| 64 |
+
data = saved_id[1:]
|
| 65 |
+
|
| 66 |
+
if typename == "storage":
|
| 67 |
+
# TODO: Once we decide to break serialization FC, we can
|
| 68 |
+
# stop wrapping with TypedStorage
|
| 69 |
+
storage = serialized_storages[data[0]]
|
| 70 |
+
dtype = serialized_dtypes[data[0]]
|
| 71 |
+
return torch.storage.TypedStorage(
|
| 72 |
+
wrap_storage=storage.untyped(), dtype=dtype
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
if typename == "reduce_deploy":
|
| 76 |
+
reduce_id, func, args = data
|
| 77 |
+
if reduce_id not in _loaded_reduces:
|
| 78 |
+
_loaded_reduces[reduce_id] = func(_raw_packages[zip_reader], *args)
|
| 79 |
+
return _loaded_reduces[reduce_id]
|
| 80 |
+
|
| 81 |
+
return None
|
| 82 |
+
|
| 83 |
+
importer: Importer
|
| 84 |
+
if zip_reader is not None:
|
| 85 |
+
importer = OrderedImporter(_get_package(zip_reader), sys_importer)
|
| 86 |
+
else:
|
| 87 |
+
importer = sys_importer
|
| 88 |
+
|
| 89 |
+
unpickler = PackageUnpickler(importer, io.BytesIO(obj_bytes))
|
| 90 |
+
unpickler.persistent_load = persistent_load # type: ignore[method-assign]
|
| 91 |
+
result = _deploy_objects[id] = unpickler.load()
|
| 92 |
+
return result
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def _get_package(zip_reader):
|
| 96 |
+
if zip_reader not in _raw_packages:
|
| 97 |
+
_raw_packages[zip_reader] = PackageImporter(zip_reader)
|
| 98 |
+
return _raw_packages[zip_reader]
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
_raw_packages: dict = {}
|
| 102 |
+
_deploy_objects: dict = {}
|
| 103 |
+
_serialized_reduces: dict = {}
|
| 104 |
+
_loaded_reduces: dict = {}
|
pllava/lib/python3.10/site-packages/torch/_guards.py
ADDED
|
@@ -0,0 +1,925 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
from __future__ import annotations
|
| 3 |
+
|
| 4 |
+
import contextlib
|
| 5 |
+
import dataclasses
|
| 6 |
+
import enum
|
| 7 |
+
import functools
|
| 8 |
+
import logging
|
| 9 |
+
import threading
|
| 10 |
+
import traceback
|
| 11 |
+
import unittest.mock
|
| 12 |
+
import weakref
|
| 13 |
+
from abc import abstractmethod
|
| 14 |
+
from contextlib import contextmanager
|
| 15 |
+
from typing import (
|
| 16 |
+
Any,
|
| 17 |
+
Callable,
|
| 18 |
+
Dict,
|
| 19 |
+
Generic,
|
| 20 |
+
List,
|
| 21 |
+
NamedTuple,
|
| 22 |
+
Optional,
|
| 23 |
+
Set,
|
| 24 |
+
Tuple,
|
| 25 |
+
TYPE_CHECKING,
|
| 26 |
+
TypeVar,
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
from torch._C._dynamo.eval_frame import set_context_frame # noqa: F401
|
| 30 |
+
from torch.utils import _pytree as pytree
|
| 31 |
+
from torch.utils._traceback import CapturedTraceback
|
| 32 |
+
from torch.utils.weak import WeakTensorKeyDictionary
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
log = logging.getLogger(__name__)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
if TYPE_CHECKING:
|
| 39 |
+
import sympy
|
| 40 |
+
|
| 41 |
+
# Import the following modules during type checking to enable code intelligence features,
|
| 42 |
+
# such as auto-completion in tools like pylance, even when these modules are not explicitly
|
| 43 |
+
# imported in user code.
|
| 44 |
+
import torch
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
"""
|
| 48 |
+
torch._guards is the definitional source of truth for general purpose guard structures.
|
| 49 |
+
|
| 50 |
+
An important thing to keep in mind here is the preservation of layering. There should be no dynamo notions,
|
| 51 |
+
and no guard installation notions here.
|
| 52 |
+
"""
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
class CompileId(NamedTuple):
|
| 56 |
+
frame_id: int
|
| 57 |
+
# This id is per-frame, and counts how many times we've compiled this
|
| 58 |
+
# frame. This could have been a global id but having this be per-frame
|
| 59 |
+
# gives you a better intuitive sense for how many recompiles have occurred
|
| 60 |
+
# so far.
|
| 61 |
+
frame_compile_id: int
|
| 62 |
+
# TODO: consider also tracking the recompilation count
|
| 63 |
+
|
| 64 |
+
def __str__(self):
|
| 65 |
+
return f"{self.frame_id}/{self.frame_compile_id}"
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
class TraceId(NamedTuple):
|
| 69 |
+
compile_id: CompileId
|
| 70 |
+
# This starts off as 0, and every time we restart analysis it goes
|
| 71 |
+
# up by one
|
| 72 |
+
attempt: int
|
| 73 |
+
|
| 74 |
+
def __str__(self):
|
| 75 |
+
if self.attempt == 0:
|
| 76 |
+
return str(self.compile_id)
|
| 77 |
+
else:
|
| 78 |
+
return f"{self.compile_id}_{self.attempt}"
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
class GuardSource(enum.Enum):
|
| 82 |
+
LOCAL = 0
|
| 83 |
+
GLOBAL = 1
|
| 84 |
+
LOCAL_SPECIALIZED_NN_MODULE = 2
|
| 85 |
+
GLOBAL_SPECIALIZED_NN_MODULE = 3
|
| 86 |
+
CONSTANT = 4
|
| 87 |
+
RANDOM_VALUE = 5
|
| 88 |
+
SHAPE_ENV = 6
|
| 89 |
+
LOCAL_FSDP_MODULE = 7
|
| 90 |
+
GLOBAL_FSDP_MODULE = 8
|
| 91 |
+
BACKWARD_STATE = 9
|
| 92 |
+
EPHEMERAL = 10
|
| 93 |
+
SYNTHETIC_LOCAL = 11
|
| 94 |
+
LOCAL_UNSPECIALIZED_NN_MODULE = 12
|
| 95 |
+
GLOBAL_UNSPECIALIZED_NN_MODULE = 13
|
| 96 |
+
LOCAL_UNSPECIALIZED_BUILTIN_NN_MODULE = 14
|
| 97 |
+
GLOBAL_UNSPECIALIZED_BUILTIN_NN_MODULE = 15
|
| 98 |
+
|
| 99 |
+
def is_fsdp_module(self) -> bool:
|
| 100 |
+
return self in (GuardSource.GLOBAL_FSDP_MODULE, GuardSource.LOCAL_FSDP_MODULE)
|
| 101 |
+
|
| 102 |
+
def is_specialized_nn_module(self) -> bool:
|
| 103 |
+
return (
|
| 104 |
+
self
|
| 105 |
+
in (
|
| 106 |
+
GuardSource.GLOBAL_SPECIALIZED_NN_MODULE,
|
| 107 |
+
GuardSource.LOCAL_SPECIALIZED_NN_MODULE,
|
| 108 |
+
)
|
| 109 |
+
# TODO (anijain2305) - Investigate why is_fsdp_module required.
|
| 110 |
+
or self.is_fsdp_module()
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
def is_unspecialized_nn_module(self) -> bool:
|
| 114 |
+
return self in (
|
| 115 |
+
GuardSource.GLOBAL_UNSPECIALIZED_NN_MODULE,
|
| 116 |
+
GuardSource.LOCAL_UNSPECIALIZED_NN_MODULE,
|
| 117 |
+
GuardSource.GLOBAL_UNSPECIALIZED_BUILTIN_NN_MODULE,
|
| 118 |
+
GuardSource.LOCAL_UNSPECIALIZED_BUILTIN_NN_MODULE,
|
| 119 |
+
)
|
| 120 |
+
|
| 121 |
+
def is_unspecialized_builtin_nn_module(self) -> bool:
|
| 122 |
+
return self in (
|
| 123 |
+
GuardSource.GLOBAL_UNSPECIALIZED_BUILTIN_NN_MODULE,
|
| 124 |
+
GuardSource.LOCAL_UNSPECIALIZED_BUILTIN_NN_MODULE,
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
def is_local(self):
|
| 128 |
+
return self in (
|
| 129 |
+
GuardSource.LOCAL,
|
| 130 |
+
GuardSource.LOCAL_SPECIALIZED_NN_MODULE,
|
| 131 |
+
GuardSource.LOCAL_FSDP_MODULE,
|
| 132 |
+
GuardSource.LOCAL_UNSPECIALIZED_NN_MODULE,
|
| 133 |
+
GuardSource.LOCAL_UNSPECIALIZED_BUILTIN_NN_MODULE,
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
"""
|
| 138 |
+
Base class for a "GuardBuilder" role.
|
| 139 |
+
|
| 140 |
+
The GuardBuilderBase role is to represent a scope within which to build a guard. The name is a little
|
| 141 |
+
confusing, as its not a builder, but for the sake of avoiding a lot of renames and keeping the original reference
|
| 142 |
+
to torchdynamo's GuardBuilder.
|
| 143 |
+
|
| 144 |
+
Note: create_fn is invoked with a GuardBuilderBase and a Guard. A GuardBuilder is chosen based
|
| 145 |
+
on GuardSource's select function.
|
| 146 |
+
|
| 147 |
+
There is value in keeping this GuardBuilderBase empty to keep layering clean.
|
| 148 |
+
"""
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
class GuardBuilderBase:
|
| 152 |
+
pass
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
class ShapeGuard(NamedTuple):
|
| 156 |
+
expr: sympy.Expr
|
| 157 |
+
stack: CapturedTraceback
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
@dataclasses.dataclass
|
| 161 |
+
class Guard:
|
| 162 |
+
# originating_source is the source that called the make_guard method to
|
| 163 |
+
# construct this guard object. The property name specifies what exactly it
|
| 164 |
+
# is the guard is guarding on. The meaning of the name is dependent on the
|
| 165 |
+
# create_fn; you must look at the use-site inside create_fn to know what
|
| 166 |
+
# name means.
|
| 167 |
+
#
|
| 168 |
+
# That being said, although you might think this is just a "name", name is
|
| 169 |
+
# usually an arbitrary Python expression that will be evaluated with all
|
| 170 |
+
# globals (and locals, if you create a LOCAL guard) to extract the Python
|
| 171 |
+
# object that we want to perform guard tests on. This evaluation
|
| 172 |
+
# typically happens in GuardBuilder.eval. In these cases, name is
|
| 173 |
+
# typically produced by originating_source.name() (not to be confused with
|
| 174 |
+
# GuardSource - the property source).
|
| 175 |
+
#
|
| 176 |
+
# Occasionally, name is not a valid Python expression; sometimes
|
| 177 |
+
# it is meaningless. Example create_fns that are like this include
|
| 178 |
+
# GRAD_MODE and SHAPE_ENV.
|
| 179 |
+
originating_source: Source
|
| 180 |
+
create_fn: Callable[[GuardBuilderBase, Guard], None]
|
| 181 |
+
|
| 182 |
+
# Export only. These values are written to at time of guard check_fn creation.
|
| 183 |
+
guard_types: Optional[List[str]] = None
|
| 184 |
+
code_list: Optional[List[str]] = None
|
| 185 |
+
obj_weakref: Optional[object] = None
|
| 186 |
+
guarded_class_weakref: Optional[type] = None
|
| 187 |
+
|
| 188 |
+
stack: Optional[CapturedTraceback] = None
|
| 189 |
+
user_stack: Optional[traceback.StackSummary] = None
|
| 190 |
+
_hash: Optional[int] = None
|
| 191 |
+
|
| 192 |
+
def __hash__(self):
|
| 193 |
+
if self._hash is None:
|
| 194 |
+
self._hash = hash((self.name, self.source, id(self.create_fn)))
|
| 195 |
+
return self._hash
|
| 196 |
+
|
| 197 |
+
def sort_key(self):
|
| 198 |
+
# Put the duplicate input guards at the end. The duplicate guards have
|
| 199 |
+
# two sources while guard.name only considers one source.
|
| 200 |
+
from torch._dynamo.guards import GuardBuilder
|
| 201 |
+
|
| 202 |
+
is_duplicate_input = (
|
| 203 |
+
isinstance(self.create_fn, functools.partial)
|
| 204 |
+
and self.create_fn.func is GuardBuilder.DUPLICATE_INPUT
|
| 205 |
+
)
|
| 206 |
+
return (
|
| 207 |
+
is_duplicate_input,
|
| 208 |
+
self.source.value if self.source else -1,
|
| 209 |
+
len(self.name),
|
| 210 |
+
self.name,
|
| 211 |
+
self.inner_create_fn().__code__.co_firstlineno,
|
| 212 |
+
)
|
| 213 |
+
|
| 214 |
+
def __lt__(self, other):
|
| 215 |
+
return self.sort_key() < other.sort_key()
|
| 216 |
+
|
| 217 |
+
def inner_create_fn(self):
|
| 218 |
+
if isinstance(self.create_fn, functools.partial):
|
| 219 |
+
return self.create_fn.func
|
| 220 |
+
else:
|
| 221 |
+
return self.create_fn
|
| 222 |
+
|
| 223 |
+
@property
|
| 224 |
+
def name(self) -> str:
|
| 225 |
+
return self.originating_source.name()
|
| 226 |
+
|
| 227 |
+
@property
|
| 228 |
+
def source(self) -> GuardSource:
|
| 229 |
+
return self.originating_source.guard_source()
|
| 230 |
+
|
| 231 |
+
@staticmethod
|
| 232 |
+
def weakref_to_str(obj_weakref):
|
| 233 |
+
"""
|
| 234 |
+
This is a workaround of a Python weakref bug.
|
| 235 |
+
|
| 236 |
+
`obj_weakref` is instance returned by `weakref.ref`,
|
| 237 |
+
`str(obj_weakref)` is buggy if the original obj overrides __getattr__, e.g:
|
| 238 |
+
|
| 239 |
+
class MyConfig(dict):
|
| 240 |
+
def __getattr__(self, x):
|
| 241 |
+
return self[x]
|
| 242 |
+
|
| 243 |
+
obj = MyConfig(offset=5)
|
| 244 |
+
obj_weakref = weakref.ref(obj)
|
| 245 |
+
str(obj_weakref) # raise error: KeyError: '__name__'
|
| 246 |
+
"""
|
| 247 |
+
if isinstance(obj_weakref, weakref.ReferenceType):
|
| 248 |
+
obj = obj_weakref()
|
| 249 |
+
if obj is not None:
|
| 250 |
+
return f"<weakref at {hex(id(obj_weakref))}; to '{obj.__class__.__name__}' at {hex(id(obj))}>"
|
| 251 |
+
else:
|
| 252 |
+
return f"<weakref at {hex(id(obj_weakref))}; dead>"
|
| 253 |
+
else:
|
| 254 |
+
return str(obj_weakref)
|
| 255 |
+
|
| 256 |
+
def __repr__(self):
|
| 257 |
+
s = f"""
|
| 258 |
+
{self.source.name.lower() if self.source else ""} {repr(self.name)} {self.inner_create_fn().__name__}
|
| 259 |
+
{{
|
| 260 |
+
'guard_types': {self.guard_types},
|
| 261 |
+
'code': {self.code_list},
|
| 262 |
+
'obj_weakref': {self.weakref_to_str(self.obj_weakref)}
|
| 263 |
+
'guarded_class': {self.guarded_class_weakref}
|
| 264 |
+
}}
|
| 265 |
+
"""
|
| 266 |
+
return s
|
| 267 |
+
|
| 268 |
+
def __str__(self):
|
| 269 |
+
output = f"Name: {repr(self.name)}\n"
|
| 270 |
+
source = self.source.name.lower() if self.source else ""
|
| 271 |
+
output += f" Source: {source}\n"
|
| 272 |
+
output += f" Create Function: {self.inner_create_fn().__name__}\n"
|
| 273 |
+
output += f" Guard Types: {self.guard_types}\n"
|
| 274 |
+
output += f" Code List: {self.code_list}\n"
|
| 275 |
+
output += f" Object Weakref: {self.weakref_to_str(self.obj_weakref)}\n"
|
| 276 |
+
output += f" Guarded Class Weakref: {self.guarded_class_weakref}\n"
|
| 277 |
+
return output
|
| 278 |
+
|
| 279 |
+
def create(self, builder: GuardBuilderBase):
|
| 280 |
+
try:
|
| 281 |
+
return self.create_fn(builder, self)
|
| 282 |
+
except Exception:
|
| 283 |
+
log.exception("Error while creating guard:\n%s", str(self).rstrip())
|
| 284 |
+
if self.stack:
|
| 285 |
+
log.error("Created at:\n%s", "".join(self.stack.format()[-4:]).rstrip())
|
| 286 |
+
raise
|
| 287 |
+
|
| 288 |
+
def is_specialized_nn_module(self):
|
| 289 |
+
return self.source.is_specialized_nn_module()
|
| 290 |
+
|
| 291 |
+
def is_fsdp_module(self):
|
| 292 |
+
return self.source.is_fsdp_module()
|
| 293 |
+
|
| 294 |
+
def is_local(self):
|
| 295 |
+
return self.source.is_local()
|
| 296 |
+
|
| 297 |
+
def set_export_info(self, guard_type, guarded_class, code_list, obj_weakref):
|
| 298 |
+
if not self.guard_types:
|
| 299 |
+
self.guard_types = []
|
| 300 |
+
|
| 301 |
+
self.guard_types.append(guard_type)
|
| 302 |
+
|
| 303 |
+
assert self.guarded_class_weakref in (
|
| 304 |
+
guarded_class,
|
| 305 |
+
None,
|
| 306 |
+
), "Guarded class id must be identical, or None"
|
| 307 |
+
self.guarded_class_weakref = guarded_class
|
| 308 |
+
|
| 309 |
+
if not self.code_list:
|
| 310 |
+
self.code_list = code_list
|
| 311 |
+
else:
|
| 312 |
+
self.code_list.extend(code_list)
|
| 313 |
+
|
| 314 |
+
# Some objects are ephemeral, e.g., list[slice(1, 2)]. If we have
|
| 315 |
+
# multiple guards on the same object, the weakref can die between the
|
| 316 |
+
# invocation of set_export_info calls. So a dead weakref is also
|
| 317 |
+
# acceptable.
|
| 318 |
+
assert (
|
| 319 |
+
self.obj_weakref in (obj_weakref, None)
|
| 320 |
+
or callable(self.obj_weakref)
|
| 321 |
+
and self.obj_weakref() is None
|
| 322 |
+
), "Guarded object must be identical, None or ephemeral (dead weakref)"
|
| 323 |
+
self.obj_weakref = obj_weakref
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
T = TypeVar("T")
|
| 327 |
+
|
| 328 |
+
"""
|
| 329 |
+
Parent structure for guard env expressions.
|
| 330 |
+
A GuardEnvExpr can have any subtype.
|
| 331 |
+
Note: All subtypes must be handled exhaustively in
|
| 332 |
+
torch._dynamo.guards._parse_guard_env_guards to avoid a RuntimeError.
|
| 333 |
+
"""
|
| 334 |
+
|
| 335 |
+
|
| 336 |
+
@dataclasses.dataclass
|
| 337 |
+
class GuardEnvExpr:
|
| 338 |
+
pass
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
"""
|
| 342 |
+
A class representing a pair of duplicate inputs.
|
| 343 |
+
input_pos_a and input_pos_b are input positions we have deduped.
|
| 344 |
+
"""
|
| 345 |
+
|
| 346 |
+
|
| 347 |
+
@dataclasses.dataclass
|
| 348 |
+
class DuplicateInputs(GuardEnvExpr):
|
| 349 |
+
input_source_a: Source
|
| 350 |
+
input_source_b: Source
|
| 351 |
+
|
| 352 |
+
def __post_init__(self):
|
| 353 |
+
assert self.input_source_a != self.input_source_b
|
| 354 |
+
|
| 355 |
+
|
| 356 |
+
"""
|
| 357 |
+
Checkpointable is an interface for driving state snapshotting, left purposely vague for now.
|
| 358 |
+
|
| 359 |
+
copy_graphstate() -> T, a somewhat legacy name, is expected to emit a snapshot of any type that
|
| 360 |
+
can also be taken in at restore_graphstate(T) calls.
|
| 361 |
+
|
| 362 |
+
When to snapshot, is, at the moment, an implementation detail of upstream callers. Checkpointable
|
| 363 |
+
does not provide any garuantees around consistency, idempotency, or safety of calling its APIs, yet.
|
| 364 |
+
|
| 365 |
+
In the future, it will have a closer coupling to a generic Checkpoint management system.
|
| 366 |
+
"""
|
| 367 |
+
|
| 368 |
+
|
| 369 |
+
class Checkpointable(Generic[T]):
|
| 370 |
+
@abstractmethod
|
| 371 |
+
def copy_graphstate(self) -> T: ...
|
| 372 |
+
|
| 373 |
+
@abstractmethod
|
| 374 |
+
def restore_graphstate(self, state: T): ...
|
| 375 |
+
|
| 376 |
+
|
| 377 |
+
class GuardsCheckpointState:
|
| 378 |
+
"""
|
| 379 |
+
The GuardCheckpointState - it is the T of Checkpointable[T] for GuardsContext
|
| 380 |
+
"""
|
| 381 |
+
|
| 382 |
+
dynamo_guards: Set[Guard] = set()
|
| 383 |
+
|
| 384 |
+
def __init__(self, dynamo_guards):
|
| 385 |
+
self.dynamo_guards = dynamo_guards
|
| 386 |
+
|
| 387 |
+
def diff(self, other):
|
| 388 |
+
"""
|
| 389 |
+
Produces a delta against another GuardsCheckpointState.
|
| 390 |
+
|
| 391 |
+
Returns None if no delta is found, otherwise, return a set() of mismatched
|
| 392 |
+
Guard type objects.
|
| 393 |
+
"""
|
| 394 |
+
r = self.dynamo_guards.difference(other.dynamo_guards)
|
| 395 |
+
if len(r) == 0:
|
| 396 |
+
return None
|
| 397 |
+
return r
|
| 398 |
+
|
| 399 |
+
def __eq__(self, other):
|
| 400 |
+
return self.diff(other) is None
|
| 401 |
+
|
| 402 |
+
|
| 403 |
+
class ModuleContextCheckpointState:
|
| 404 |
+
nn_modules: Dict[str, torch.nn.Module] = {}
|
| 405 |
+
|
| 406 |
+
def __init__(self, nn_modules):
|
| 407 |
+
self.nn_modules = nn_modules
|
| 408 |
+
|
| 409 |
+
def diff(self, other):
|
| 410 |
+
"""
|
| 411 |
+
Produces a delta against another ModuleContextCheckpointState.
|
| 412 |
+
|
| 413 |
+
Returns None if no delta is found, otherwise, return a set() of mismatched
|
| 414 |
+
module key names.
|
| 415 |
+
"""
|
| 416 |
+
r = set(self.nn_modules.keys()).difference(set(other.nn_modules.keys()))
|
| 417 |
+
if len(r) == 0:
|
| 418 |
+
return None
|
| 419 |
+
return r
|
| 420 |
+
|
| 421 |
+
def __eq__(self, other):
|
| 422 |
+
return self.diff(other) is None
|
| 423 |
+
|
| 424 |
+
|
| 425 |
+
class ModuleContext(Checkpointable[ModuleContextCheckpointState]):
|
| 426 |
+
def __init__(self) -> None:
|
| 427 |
+
self.nn_modules: Dict[str, Any] = {}
|
| 428 |
+
|
| 429 |
+
def copy_graphstate(self):
|
| 430 |
+
return ModuleContextCheckpointState(dict(self.nn_modules))
|
| 431 |
+
|
| 432 |
+
def restore_graphstate(self, state):
|
| 433 |
+
assert isinstance(state, ModuleContextCheckpointState)
|
| 434 |
+
self.nn_modules = state.nn_modules
|
| 435 |
+
|
| 436 |
+
|
| 437 |
+
class GlobalContextCheckpointState:
|
| 438 |
+
global_state: Dict[str, Tuple[Callable, ...]] = {}
|
| 439 |
+
|
| 440 |
+
def __init__(self, global_states):
|
| 441 |
+
self.global_state = global_states
|
| 442 |
+
|
| 443 |
+
def diff(self, other):
|
| 444 |
+
"""
|
| 445 |
+
Produces a delta against another GlobalContextCheckpointState.
|
| 446 |
+
|
| 447 |
+
Returns None if no delta is found, otherwise, return a set() of mismatched
|
| 448 |
+
global key names.
|
| 449 |
+
"""
|
| 450 |
+
r = set(self.global_state.keys()).difference(set(other.global_state.keys()))
|
| 451 |
+
if len(r) == 0:
|
| 452 |
+
return None
|
| 453 |
+
return r
|
| 454 |
+
|
| 455 |
+
def __eq__(self, other):
|
| 456 |
+
return self.diff(other) is None
|
| 457 |
+
|
| 458 |
+
|
| 459 |
+
class GlobalContext(Checkpointable[GlobalContextCheckpointState]):
|
| 460 |
+
"""
|
| 461 |
+
This keeps track of the global torch state during tracing of a function.
|
| 462 |
+
For example, torch.is_grad_enabled.
|
| 463 |
+
"""
|
| 464 |
+
|
| 465 |
+
_supported_global_states = {
|
| 466 |
+
"grad_enabled",
|
| 467 |
+
"torch_function_enabled",
|
| 468 |
+
"autocast_enabled",
|
| 469 |
+
"autocast_cpu_enabled",
|
| 470 |
+
"autocast_gpu_dtype",
|
| 471 |
+
"autocast_cpu_dtype",
|
| 472 |
+
"autocast_cache_enabled",
|
| 473 |
+
}
|
| 474 |
+
|
| 475 |
+
def __init__(self) -> None:
|
| 476 |
+
self.global_state: Dict[str, Tuple[Callable, ...]] = {}
|
| 477 |
+
|
| 478 |
+
def copy_graphstate(self):
|
| 479 |
+
return GlobalContextCheckpointState(dict(self.global_state))
|
| 480 |
+
|
| 481 |
+
def restore_graphstate(self, state):
|
| 482 |
+
assert isinstance(state, GlobalContextCheckpointState)
|
| 483 |
+
self.global_state = state.global_state
|
| 484 |
+
assert (
|
| 485 |
+
len(self.global_state) == len(self._supported_global_states)
|
| 486 |
+
and set(self.global_state.keys()) == self._supported_global_states
|
| 487 |
+
), "Global state mismatch"
|
| 488 |
+
for func, args in self.global_state.values():
|
| 489 |
+
func(args)
|
| 490 |
+
|
| 491 |
+
|
| 492 |
+
"""
|
| 493 |
+
A GuardsContext is a checkpointable representation of all the guards in the current tracing
|
| 494 |
+
context. It's lifecycle is bound 1:1 to the tracing context, and it should never be instantiated
|
| 495 |
+
directly outside of it. For passing around internal state representations of this object,
|
| 496 |
+
prefer to extract them with copy_graphstate to produce a GuardsCheckpointState.
|
| 497 |
+
"""
|
| 498 |
+
|
| 499 |
+
|
| 500 |
+
# Like a Set[Guard] but will record the user stack on all guards at the
|
| 501 |
+
# time they were installed at their destination
|
| 502 |
+
class GuardsSet:
|
| 503 |
+
def __init__(self, inner=None):
|
| 504 |
+
if inner is None:
|
| 505 |
+
inner = set()
|
| 506 |
+
self.inner = inner
|
| 507 |
+
|
| 508 |
+
def __iter__(self):
|
| 509 |
+
return iter(self.inner)
|
| 510 |
+
|
| 511 |
+
def __len__(self):
|
| 512 |
+
return len(self.inner)
|
| 513 |
+
|
| 514 |
+
# Subtraction along with bool is typically used to determine the delta of
|
| 515 |
+
# added guards between checkpoints for higher order ops
|
| 516 |
+
def __sub__(self, other):
|
| 517 |
+
return GuardsSet(self.inner - other.inner)
|
| 518 |
+
|
| 519 |
+
def __bool__(self):
|
| 520 |
+
return bool(self.inner)
|
| 521 |
+
|
| 522 |
+
def add(self, guard: Guard, *, collect_debug_stack=True, skip=0):
|
| 523 |
+
if guard in self.inner:
|
| 524 |
+
return
|
| 525 |
+
if collect_debug_stack:
|
| 526 |
+
if guard.stack is None:
|
| 527 |
+
guard.stack = CapturedTraceback.extract(skip=1 + skip)
|
| 528 |
+
if guard.user_stack is None:
|
| 529 |
+
guard.user_stack = TracingContext.extract_stack()
|
| 530 |
+
self.inner.add(guard)
|
| 531 |
+
|
| 532 |
+
def update(self, *others: Set[Guard]):
|
| 533 |
+
for o in others:
|
| 534 |
+
for g in o:
|
| 535 |
+
self.add(g, skip=1)
|
| 536 |
+
|
| 537 |
+
def remove_guards_with_source(self, source):
|
| 538 |
+
"""Delete all guards with a given source"""
|
| 539 |
+
self.inner = {g for g in self.inner if g.originating_source != source}
|
| 540 |
+
|
| 541 |
+
|
| 542 |
+
class GuardsContext(Checkpointable[GuardsCheckpointState]):
|
| 543 |
+
def __init__(self) -> None:
|
| 544 |
+
self.dynamo_guards: GuardsSet = GuardsSet()
|
| 545 |
+
self.aotautograd_guards: List[GuardEnvExpr] = []
|
| 546 |
+
|
| 547 |
+
def copy_graphstate(self):
|
| 548 |
+
return GuardsCheckpointState(set(self.dynamo_guards.inner))
|
| 549 |
+
|
| 550 |
+
def restore_graphstate(self, state):
|
| 551 |
+
# NB: "steals" the passed in state
|
| 552 |
+
assert isinstance(state, GuardsCheckpointState)
|
| 553 |
+
self.dynamo_guards = GuardsSet(state.dynamo_guards)
|
| 554 |
+
|
| 555 |
+
|
| 556 |
+
_TLS = threading.local()
|
| 557 |
+
|
| 558 |
+
"""
|
| 559 |
+
TracingContext is the source of truth for all currently accumulated information
|
| 560 |
+
needed to trace. Its lifecycle is kept 1:1 when using TorchDynamo, but other systems
|
| 561 |
+
are open to managing their own TracingContext with that in mind.
|
| 562 |
+
|
| 563 |
+
The purpose of TracingContext is not to be a dumping ground, or god object, but rather to avoid
|
| 564 |
+
having to plumb complex subsystems across multiple verticals.
|
| 565 |
+
|
| 566 |
+
Ex: A common example is guard accumulation between dynamo, shape_env, aot_autograd, and inductor.
|
| 567 |
+
Accessing the current tracing context via
|
| 568 |
+
TracingContext.get() allows users to accumulate their own guards for processing, without needing to know how
|
| 569 |
+
to plumb objects back up to where frame interpretation happened.
|
| 570 |
+
|
| 571 |
+
Note that you can end up with multiple TracingContext for a single compilation
|
| 572 |
+
of a frame, as we reset the TracingContext whenever we restart analysis.
|
| 573 |
+
CompileContext is a more overarching context that encompasses multiple restarts.
|
| 574 |
+
"""
|
| 575 |
+
|
| 576 |
+
|
| 577 |
+
class CompileContext:
|
| 578 |
+
@staticmethod
|
| 579 |
+
def get() -> CompileContext:
|
| 580 |
+
assert _TLS.compile_context is not None
|
| 581 |
+
return _TLS.compile_context
|
| 582 |
+
|
| 583 |
+
@staticmethod
|
| 584 |
+
def try_get() -> Optional[CompileContext]:
|
| 585 |
+
return getattr(_TLS, "compile_context", None)
|
| 586 |
+
|
| 587 |
+
def __init__(self, compile_id):
|
| 588 |
+
assert compile_id is None or isinstance(compile_id, CompileId)
|
| 589 |
+
self.compile_id: Optional[CompileId] = compile_id
|
| 590 |
+
self.attempt = 0
|
| 591 |
+
|
| 592 |
+
@staticmethod
|
| 593 |
+
def current_compile_id():
|
| 594 |
+
self = CompileContext.try_get()
|
| 595 |
+
if self is None:
|
| 596 |
+
return None
|
| 597 |
+
return self.compile_id
|
| 598 |
+
|
| 599 |
+
@staticmethod
|
| 600 |
+
def current_trace_id():
|
| 601 |
+
self = CompileContext.try_get()
|
| 602 |
+
if self is None:
|
| 603 |
+
return None
|
| 604 |
+
if self.compile_id is None:
|
| 605 |
+
return None
|
| 606 |
+
return TraceId(self.compile_id, self.attempt)
|
| 607 |
+
|
| 608 |
+
|
| 609 |
+
class TracingContext:
|
| 610 |
+
"""
|
| 611 |
+
Provides the currently installed TracingContext, or None.
|
| 612 |
+
|
| 613 |
+
Note that it is a staticmethod, and invocations outside of `with tracing()` (see below), are valid but
|
| 614 |
+
will return None.
|
| 615 |
+
"""
|
| 616 |
+
|
| 617 |
+
@staticmethod
|
| 618 |
+
def try_get() -> Optional[TracingContext]:
|
| 619 |
+
return getattr(_TLS, "tracing_context", None)
|
| 620 |
+
|
| 621 |
+
@staticmethod
|
| 622 |
+
def get() -> TracingContext:
|
| 623 |
+
if ctx := TracingContext.try_get():
|
| 624 |
+
return ctx
|
| 625 |
+
raise RuntimeError(
|
| 626 |
+
"TracingContext.get() must be called within an ongoing trace."
|
| 627 |
+
)
|
| 628 |
+
|
| 629 |
+
def __init__(self, fake_mode):
|
| 630 |
+
self.guards_context = GuardsContext()
|
| 631 |
+
self.module_context = ModuleContext()
|
| 632 |
+
self.global_context = GlobalContext()
|
| 633 |
+
self.fake_mode = fake_mode
|
| 634 |
+
self.frame_summary_stack = []
|
| 635 |
+
# This is morally part of frame_summary_stack, but it is kept separate
|
| 636 |
+
# for clarity. As we process a frame, this variable gets updated
|
| 637 |
+
# to keep track of what line we are in the function. We make a
|
| 638 |
+
# function call, this gets cleared and the frame location is pushed
|
| 639 |
+
# to frame_summary_stack (prepping this variable for the inner frame's
|
| 640 |
+
# progress)
|
| 641 |
+
self.loc_in_frame = None
|
| 642 |
+
# this is only set after aot_autograd
|
| 643 |
+
self.fw_metadata = None
|
| 644 |
+
# this is only set after aot_autograd
|
| 645 |
+
self.aot_graph_name = None
|
| 646 |
+
self.params_flat = None
|
| 647 |
+
# this is for extended return calling convention from backend
|
| 648 |
+
# compiler to aot_autograd
|
| 649 |
+
# Per output, what the compiler specified stride of the output is,
|
| 650 |
+
# or None if no stride is known. This is always the HINT, it
|
| 651 |
+
# is never a SymInt (it would be better if it was a SymInt, but
|
| 652 |
+
# I can't conveniently get this from Inductor atm. Also, be
|
| 653 |
+
# careful not to accidentally induce guards on the SymInt if
|
| 654 |
+
# you ever do change this in aot_autograd.py; you should check
|
| 655 |
+
# on permutations preferentially.)
|
| 656 |
+
self.output_strides: Optional[List[Optional[Tuple[int, ...]]]] = None
|
| 657 |
+
# When this is True, whenever we encounter an int in Dynamo tracing,
|
| 658 |
+
# we will (1) force unspec it and (2) force it as a size-like unbacked
|
| 659 |
+
# integer. This is currently used when processing certain lists of
|
| 660 |
+
# ints that are known to be size-like and may have 0/1 entries that we
|
| 661 |
+
# must not specialize on.
|
| 662 |
+
self.force_unspec_int_unbacked_size_like = False
|
| 663 |
+
# See note [Tensor Fakification and Symbol Caching]
|
| 664 |
+
self.tensor_to_context = WeakTensorKeyDictionary()
|
| 665 |
+
|
| 666 |
+
# If this true, Aot Autograd will return output Fake Tensors with appropiate
|
| 667 |
+
# meta on the first invocation
|
| 668 |
+
# see note: [Returning Fake Tensors on First AOT Autograd Call]
|
| 669 |
+
self.fakify_first_call = False
|
| 670 |
+
|
| 671 |
+
def clear(self):
|
| 672 |
+
# Look at the note in output_graph.py in function `save_global_state`
|
| 673 |
+
# for the context on clearing global context.
|
| 674 |
+
self.global_context.global_state = {}
|
| 675 |
+
|
| 676 |
+
@staticmethod
|
| 677 |
+
@contextmanager
|
| 678 |
+
def patch(**kwargs):
|
| 679 |
+
prior = {}
|
| 680 |
+
ctx = TracingContext.get()
|
| 681 |
+
|
| 682 |
+
for key in kwargs.keys():
|
| 683 |
+
# KeyError on invalid entry
|
| 684 |
+
prior[key] = getattr(ctx, key)
|
| 685 |
+
for key, val in kwargs.items():
|
| 686 |
+
setattr(ctx, key, val)
|
| 687 |
+
try:
|
| 688 |
+
yield
|
| 689 |
+
finally:
|
| 690 |
+
for key, val in prior.items():
|
| 691 |
+
setattr(ctx, key, val)
|
| 692 |
+
|
| 693 |
+
@staticmethod
|
| 694 |
+
def extract_stack():
|
| 695 |
+
self = TracingContext.try_get()
|
| 696 |
+
if self is None:
|
| 697 |
+
return traceback.StackSummary()
|
| 698 |
+
stack = self.frame_summary_stack
|
| 699 |
+
if self.loc_in_frame is not None:
|
| 700 |
+
stack = stack + [self.loc_in_frame]
|
| 701 |
+
return traceback.StackSummary.from_list(stack)
|
| 702 |
+
|
| 703 |
+
# Call this when you want to call into some code that isn't necessarily
|
| 704 |
+
# associated with the current frame state
|
| 705 |
+
@staticmethod
|
| 706 |
+
@contextlib.contextmanager
|
| 707 |
+
def clear_frame():
|
| 708 |
+
tc = TracingContext.get()
|
| 709 |
+
with unittest.mock.patch.object(
|
| 710 |
+
tc, "frame_summary_stack", []
|
| 711 |
+
), unittest.mock.patch.object(tc, "loc_in_frame", None):
|
| 712 |
+
try:
|
| 713 |
+
yield
|
| 714 |
+
except Exception as e:
|
| 715 |
+
# Prevent real_stack from getting attached
|
| 716 |
+
#
|
| 717 |
+
# The invariant is that if an Exception as real_stack, we've
|
| 718 |
+
# appropriately attached a user stack and we no longer need to
|
| 719 |
+
# attach anything. Because we cannot conveniently interpose
|
| 720 |
+
# when an exception is thrown, we instead interpose everywhere
|
| 721 |
+
# we set what the user stack is set (using the context
|
| 722 |
+
# manager). However, our compiler stack does "tail calls"
|
| 723 |
+
# (when it calls into user compiler), at which point the
|
| 724 |
+
# parent exception frames would incorrectly attach an
|
| 725 |
+
# incorrect frame.
|
| 726 |
+
#
|
| 727 |
+
# However, if, somehow, someone raised an exception with this
|
| 728 |
+
# scope that had a stack (for example, because they are
|
| 729 |
+
# restoring the user stack state appropriately as they process
|
| 730 |
+
# node by node), we should respect it. Thus, we cannot
|
| 731 |
+
# unconditionally set None.
|
| 732 |
+
if not hasattr(e, "real_stack"):
|
| 733 |
+
e.real_stack = None # type: ignore[attr-defined]
|
| 734 |
+
raise
|
| 735 |
+
|
| 736 |
+
@staticmethod
|
| 737 |
+
@contextlib.contextmanager
|
| 738 |
+
def current_frame(frame_summary):
|
| 739 |
+
# frame_summary can be None to solely take advantage of real_stack
|
| 740 |
+
# attachment to thrown exceptions
|
| 741 |
+
tc = TracingContext.get()
|
| 742 |
+
if frame_summary is not None:
|
| 743 |
+
tc.frame_summary_stack.append(frame_summary)
|
| 744 |
+
old = tc.loc_in_frame
|
| 745 |
+
tc.loc_in_frame = None
|
| 746 |
+
try:
|
| 747 |
+
yield
|
| 748 |
+
except Exception as e:
|
| 749 |
+
if not hasattr(e, "real_stack"):
|
| 750 |
+
e.real_stack = tc.extract_stack() # type: ignore[attr-defined]
|
| 751 |
+
raise
|
| 752 |
+
finally:
|
| 753 |
+
if frame_summary is not None:
|
| 754 |
+
tc.frame_summary_stack.pop()
|
| 755 |
+
tc.loc_in_frame = old
|
| 756 |
+
|
| 757 |
+
@staticmethod
|
| 758 |
+
@contextlib.contextmanager
|
| 759 |
+
def report_output_strides():
|
| 760 |
+
tc = TracingContext.try_get()
|
| 761 |
+
if tc is None:
|
| 762 |
+
yield None
|
| 763 |
+
return
|
| 764 |
+
old_output_strides = tc.output_strides
|
| 765 |
+
tc.output_strides = []
|
| 766 |
+
try:
|
| 767 |
+
yield tc.output_strides
|
| 768 |
+
finally:
|
| 769 |
+
tc.output_strides = old_output_strides
|
| 770 |
+
|
| 771 |
+
@staticmethod
|
| 772 |
+
def set_current_loc(filename, lineno, frame_name):
|
| 773 |
+
TracingContext.get().loc_in_frame = traceback.FrameSummary(
|
| 774 |
+
filename, lineno, frame_name, lookup_line=False
|
| 775 |
+
)
|
| 776 |
+
|
| 777 |
+
|
| 778 |
+
@contextmanager
|
| 779 |
+
def compile_context(context: Optional[CompileContext]):
|
| 780 |
+
old_context = getattr(_TLS, "compile_context", None)
|
| 781 |
+
_TLS.compile_context = context
|
| 782 |
+
try:
|
| 783 |
+
yield context
|
| 784 |
+
finally:
|
| 785 |
+
if context is not None:
|
| 786 |
+
if context.compile_id is not None:
|
| 787 |
+
set_context_frame(
|
| 788 |
+
(
|
| 789 |
+
context.compile_id.frame_id,
|
| 790 |
+
context.compile_id.frame_compile_id,
|
| 791 |
+
context.attempt,
|
| 792 |
+
)
|
| 793 |
+
)
|
| 794 |
+
_TLS.compile_context = old_context
|
| 795 |
+
|
| 796 |
+
|
| 797 |
+
@contextmanager
|
| 798 |
+
def tracing(context: Optional[TracingContext]):
|
| 799 |
+
"""
|
| 800 |
+
This function installs the passed in tracing context as a dynamic scoped
|
| 801 |
+
global variable.
|
| 802 |
+
|
| 803 |
+
Calls to TracingContext.get() while not under a `with tracing()` context
|
| 804 |
+
will return None.
|
| 805 |
+
"""
|
| 806 |
+
old_context = getattr(_TLS, "tracing_context", None)
|
| 807 |
+
_TLS.tracing_context = context
|
| 808 |
+
try:
|
| 809 |
+
yield context
|
| 810 |
+
except Exception as e:
|
| 811 |
+
if not hasattr(e, "real_stack") and context is not None:
|
| 812 |
+
e.real_stack = context.extract_stack() # type: ignore[attr-defined]
|
| 813 |
+
raise
|
| 814 |
+
finally:
|
| 815 |
+
if (
|
| 816 |
+
context is not None
|
| 817 |
+
and context.fake_mode is not None
|
| 818 |
+
and context.fake_mode.shape_env is not None
|
| 819 |
+
):
|
| 820 |
+
context.fake_mode.shape_env.cleanup()
|
| 821 |
+
_TLS.tracing_context = old_context
|
| 822 |
+
|
| 823 |
+
|
| 824 |
+
# Subclasses can be found in torch/_dynamo/source.py
|
| 825 |
+
# TODO(voz): Consider a toplevel torch/_source.py
|
| 826 |
+
@dataclasses.dataclass(frozen=True)
|
| 827 |
+
class Source:
|
| 828 |
+
def is_dict_key(self):
|
| 829 |
+
return False
|
| 830 |
+
|
| 831 |
+
def is_ephemeral(self):
|
| 832 |
+
return False
|
| 833 |
+
|
| 834 |
+
def reconstruct(self, codegen):
|
| 835 |
+
raise NotImplementedError
|
| 836 |
+
|
| 837 |
+
def guard_source(self) -> GuardSource:
|
| 838 |
+
raise NotImplementedError
|
| 839 |
+
|
| 840 |
+
def name(self) -> str:
|
| 841 |
+
raise NotImplementedError
|
| 842 |
+
|
| 843 |
+
def make_guard(self, fn) -> Guard:
|
| 844 |
+
if self.guard_source() is GuardSource.CONSTANT:
|
| 845 |
+
raise NotImplementedError
|
| 846 |
+
return Guard(self, fn)
|
| 847 |
+
|
| 848 |
+
def is_specialized_nn_module(self) -> bool:
|
| 849 |
+
return self.guard_source().is_specialized_nn_module()
|
| 850 |
+
|
| 851 |
+
def subguards_allowed(self):
|
| 852 |
+
"""True if you can guard on attributes of this"""
|
| 853 |
+
return self.guard_source() != GuardSource.SYNTHETIC_LOCAL
|
| 854 |
+
|
| 855 |
+
|
| 856 |
+
# Subclasses can be found in torch/_dynamo/source.py
|
| 857 |
+
@dataclasses.dataclass(frozen=True)
|
| 858 |
+
class ChainedSource(Source):
|
| 859 |
+
base: Source
|
| 860 |
+
|
| 861 |
+
def is_dict_key(self):
|
| 862 |
+
# Recurse until you either hit a ConstDictKey or a Source
|
| 863 |
+
return self.base.is_dict_key()
|
| 864 |
+
|
| 865 |
+
def is_ephemeral(self):
|
| 866 |
+
return self.base.is_ephemeral()
|
| 867 |
+
|
| 868 |
+
|
| 869 |
+
def detect_fake_mode(inputs: Any = None):
|
| 870 |
+
"""
|
| 871 |
+
Attempts to "detect" what the current fake mode is. If there is one ambiently
|
| 872 |
+
available from TracingContext, we preferentially use that. Otherwise, we
|
| 873 |
+
heuristically detect the fake mode via the following sources, in order of
|
| 874 |
+
priority:
|
| 875 |
+
|
| 876 |
+
- Currently active fake mode on stack
|
| 877 |
+
- Fake mode associated with passed in tensors (inputs does not
|
| 878 |
+
have to be flattened)
|
| 879 |
+
"""
|
| 880 |
+
from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
|
| 881 |
+
|
| 882 |
+
fake_modes = []
|
| 883 |
+
|
| 884 |
+
if context := TracingContext.try_get():
|
| 885 |
+
fake_mode = context.fake_mode
|
| 886 |
+
if fake_mode is not None:
|
| 887 |
+
fake_modes.append((fake_mode, "tracing context", 0))
|
| 888 |
+
|
| 889 |
+
from torch.utils._python_dispatch import _get_current_dispatch_mode_stack
|
| 890 |
+
|
| 891 |
+
for i, m in enumerate(reversed(_get_current_dispatch_mode_stack())):
|
| 892 |
+
if isinstance(m, FakeTensorMode):
|
| 893 |
+
fake_modes.append((m, "active fake mode", i))
|
| 894 |
+
|
| 895 |
+
flat_inputs = pytree.tree_leaves(inputs)
|
| 896 |
+
for i, flat_input in enumerate(flat_inputs):
|
| 897 |
+
if isinstance(flat_input, FakeTensor):
|
| 898 |
+
fake_modes.append((flat_input.fake_mode, "fake tensor input", i))
|
| 899 |
+
|
| 900 |
+
if fake_modes:
|
| 901 |
+
fake_mode, desc1, i1 = fake_modes[0]
|
| 902 |
+
for m, desc2, i2 in fake_modes[1:]:
|
| 903 |
+
assert fake_mode is m, (
|
| 904 |
+
f"fake mode ({fake_mode}) from {desc1} {i1} doesn't match mode ({m}) from {desc2} {i2}\n\n"
|
| 905 |
+
f"fake mode from {desc1} {i1} allocated at:\n{fake_mode.stack}\n"
|
| 906 |
+
f"fake mode from {desc2} {i2} allocated at:\n{m.stack}"
|
| 907 |
+
)
|
| 908 |
+
return fake_mode
|
| 909 |
+
else:
|
| 910 |
+
return None
|
| 911 |
+
|
| 912 |
+
|
| 913 |
+
def active_fake_mode():
|
| 914 |
+
"""
|
| 915 |
+
Inspects the dispatch mode stack for an active fake mode and returns it.
|
| 916 |
+
Returns None if no fake mode is active.
|
| 917 |
+
"""
|
| 918 |
+
from torch._subclasses.fake_tensor import FakeTensorMode
|
| 919 |
+
from torch.utils._python_dispatch import _get_current_dispatch_mode_stack
|
| 920 |
+
|
| 921 |
+
for _, m in enumerate(reversed(_get_current_dispatch_mode_stack())):
|
| 922 |
+
if isinstance(m, FakeTensorMode):
|
| 923 |
+
return m
|
| 924 |
+
|
| 925 |
+
return None
|
pllava/lib/python3.10/site-packages/torch/_linalg_utils.py
ADDED
|
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
"""Various linear algebra utility methods for internal use."""
|
| 3 |
+
|
| 4 |
+
from typing import Optional, Tuple
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
from torch import Tensor
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def is_sparse(A):
|
| 11 |
+
"""Check if tensor A is a sparse tensor"""
|
| 12 |
+
if isinstance(A, torch.Tensor):
|
| 13 |
+
return A.layout == torch.sparse_coo
|
| 14 |
+
|
| 15 |
+
error_str = "expected Tensor"
|
| 16 |
+
if not torch.jit.is_scripting():
|
| 17 |
+
error_str += f" but got {type(A)}"
|
| 18 |
+
raise TypeError(error_str)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def get_floating_dtype(A):
|
| 22 |
+
"""Return the floating point dtype of tensor A.
|
| 23 |
+
|
| 24 |
+
Integer types map to float32.
|
| 25 |
+
"""
|
| 26 |
+
dtype = A.dtype
|
| 27 |
+
if dtype in (torch.float16, torch.float32, torch.float64):
|
| 28 |
+
return dtype
|
| 29 |
+
return torch.float32
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def matmul(A: Optional[Tensor], B: Tensor) -> Tensor:
|
| 33 |
+
"""Multiply two matrices.
|
| 34 |
+
|
| 35 |
+
If A is None, return B. A can be sparse or dense. B is always
|
| 36 |
+
dense.
|
| 37 |
+
"""
|
| 38 |
+
if A is None:
|
| 39 |
+
return B
|
| 40 |
+
if is_sparse(A):
|
| 41 |
+
return torch.sparse.mm(A, B)
|
| 42 |
+
return torch.matmul(A, B)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def bform(X: Tensor, A: Optional[Tensor], Y: Tensor) -> Tensor:
|
| 46 |
+
"""Return bilinear form of matrices: :math:`X^T A Y`."""
|
| 47 |
+
return matmul(X.mT, matmul(A, Y))
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def qform(A: Optional[Tensor], S: Tensor):
|
| 51 |
+
"""Return quadratic form :math:`S^T A S`."""
|
| 52 |
+
return bform(S, A, S)
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def basis(A):
|
| 56 |
+
"""Return orthogonal basis of A columns."""
|
| 57 |
+
return torch.linalg.qr(A).Q
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def symeig(A: Tensor, largest: Optional[bool] = False) -> Tuple[Tensor, Tensor]:
|
| 61 |
+
"""Return eigenpairs of A with specified ordering."""
|
| 62 |
+
if largest is None:
|
| 63 |
+
largest = False
|
| 64 |
+
E, Z = torch.linalg.eigh(A, UPLO="U")
|
| 65 |
+
# assuming that E is ordered
|
| 66 |
+
if largest:
|
| 67 |
+
E = torch.flip(E, dims=(-1,))
|
| 68 |
+
Z = torch.flip(Z, dims=(-1,))
|
| 69 |
+
return E, Z
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
# These functions were deprecated and removed
|
| 73 |
+
# This nice error message can be removed in version 1.13+
|
| 74 |
+
def matrix_rank(input, tol=None, symmetric=False, *, out=None) -> Tensor:
|
| 75 |
+
raise RuntimeError(
|
| 76 |
+
"This function was deprecated since version 1.9 and is now removed.\n"
|
| 77 |
+
"Please use the `torch.linalg.matrix_rank` function instead. "
|
| 78 |
+
"The parameter 'symmetric' was renamed in `torch.linalg.matrix_rank()` to 'hermitian'."
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def solve(input: Tensor, A: Tensor, *, out=None) -> Tuple[Tensor, Tensor]:
|
| 83 |
+
raise RuntimeError(
|
| 84 |
+
"This function was deprecated since version 1.9 and is now removed. "
|
| 85 |
+
"`torch.solve` is deprecated in favor of `torch.linalg.solve`. "
|
| 86 |
+
"`torch.linalg.solve` has its arguments reversed and does not return the LU factorization.\n\n"
|
| 87 |
+
"To get the LU factorization see `torch.lu`, which can be used with `torch.lu_solve` or `torch.lu_unpack`.\n"
|
| 88 |
+
"X = torch.solve(B, A).solution "
|
| 89 |
+
"should be replaced with:\n"
|
| 90 |
+
"X = torch.linalg.solve(A, B)"
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def lstsq(input: Tensor, A: Tensor, *, out=None) -> Tuple[Tensor, Tensor]:
|
| 95 |
+
raise RuntimeError(
|
| 96 |
+
"This function was deprecated since version 1.9 and is now removed. "
|
| 97 |
+
"`torch.lstsq` is deprecated in favor of `torch.linalg.lstsq`.\n"
|
| 98 |
+
"`torch.linalg.lstsq` has reversed arguments and does not return the QR decomposition in "
|
| 99 |
+
"the returned tuple (although it returns other information about the problem).\n\n"
|
| 100 |
+
"To get the QR decomposition consider using `torch.linalg.qr`.\n\n"
|
| 101 |
+
"The returned solution in `torch.lstsq` stored the residuals of the solution in the "
|
| 102 |
+
"last m - n columns of the returned value whenever m > n. In torch.linalg.lstsq, "
|
| 103 |
+
"the residuals are in the field 'residuals' of the returned named tuple.\n\n"
|
| 104 |
+
"The unpacking of the solution, as in\n"
|
| 105 |
+
"X, _ = torch.lstsq(B, A).solution[:A.size(1)]\n"
|
| 106 |
+
"should be replaced with:\n"
|
| 107 |
+
"X = torch.linalg.lstsq(A, B).solution"
|
| 108 |
+
)
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def _symeig(
|
| 112 |
+
input,
|
| 113 |
+
eigenvectors=False,
|
| 114 |
+
upper=True,
|
| 115 |
+
*,
|
| 116 |
+
out=None,
|
| 117 |
+
) -> Tuple[Tensor, Tensor]:
|
| 118 |
+
raise RuntimeError(
|
| 119 |
+
"This function was deprecated since version 1.9 and is now removed. "
|
| 120 |
+
"The default behavior has changed from using the upper triangular portion of the matrix by default "
|
| 121 |
+
"to using the lower triangular portion.\n\n"
|
| 122 |
+
"L, _ = torch.symeig(A, upper=upper) "
|
| 123 |
+
"should be replaced with:\n"
|
| 124 |
+
"L = torch.linalg.eigvalsh(A, UPLO='U' if upper else 'L')\n\n"
|
| 125 |
+
"and\n\n"
|
| 126 |
+
"L, V = torch.symeig(A, eigenvectors=True) "
|
| 127 |
+
"should be replaced with:\n"
|
| 128 |
+
"L, V = torch.linalg.eigh(A, UPLO='U' if upper else 'L')"
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
def eig(
|
| 133 |
+
self: Tensor,
|
| 134 |
+
eigenvectors: bool = False,
|
| 135 |
+
*,
|
| 136 |
+
e=None,
|
| 137 |
+
v=None,
|
| 138 |
+
) -> Tuple[Tensor, Tensor]:
|
| 139 |
+
raise RuntimeError(
|
| 140 |
+
"This function was deprecated since version 1.9 and is now removed. "
|
| 141 |
+
"`torch.linalg.eig` returns complex tensors of dtype `cfloat` or `cdouble` rather than real tensors "
|
| 142 |
+
"mimicking complex tensors.\n\n"
|
| 143 |
+
"L, _ = torch.eig(A) "
|
| 144 |
+
"should be replaced with:\n"
|
| 145 |
+
"L_complex = torch.linalg.eigvals(A)\n\n"
|
| 146 |
+
"and\n\n"
|
| 147 |
+
"L, V = torch.eig(A, eigenvectors=True) "
|
| 148 |
+
"should be replaced with:\n"
|
| 149 |
+
"L_complex, V_complex = torch.linalg.eig(A)"
|
| 150 |
+
)
|
pllava/lib/python3.10/site-packages/torch/_lowrank.py
ADDED
|
@@ -0,0 +1,294 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Implement various linear algebra algorithms for low rank matrices."""
|
| 2 |
+
|
| 3 |
+
__all__ = ["svd_lowrank", "pca_lowrank"]
|
| 4 |
+
|
| 5 |
+
from typing import Optional, Tuple
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
from torch import _linalg_utils as _utils, Tensor
|
| 9 |
+
from torch.overrides import handle_torch_function, has_torch_function
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def get_approximate_basis(
|
| 13 |
+
A: Tensor,
|
| 14 |
+
q: int,
|
| 15 |
+
niter: Optional[int] = 2,
|
| 16 |
+
M: Optional[Tensor] = None,
|
| 17 |
+
) -> Tensor:
|
| 18 |
+
"""Return tensor :math:`Q` with :math:`q` orthonormal columns such
|
| 19 |
+
that :math:`Q Q^H A` approximates :math:`A`. If :math:`M` is
|
| 20 |
+
specified, then :math:`Q` is such that :math:`Q Q^H (A - M)`
|
| 21 |
+
approximates :math:`A - M`. without instantiating any tensors
|
| 22 |
+
of the size of :math:`A` or :math:`M`.
|
| 23 |
+
|
| 24 |
+
.. note:: The implementation is based on the Algorithm 4.4 from
|
| 25 |
+
Halko et al., 2009.
|
| 26 |
+
|
| 27 |
+
.. note:: For an adequate approximation of a k-rank matrix
|
| 28 |
+
:math:`A`, where k is not known in advance but could be
|
| 29 |
+
estimated, the number of :math:`Q` columns, q, can be
|
| 30 |
+
choosen according to the following criteria: in general,
|
| 31 |
+
:math:`k <= q <= min(2*k, m, n)`. For large low-rank
|
| 32 |
+
matrices, take :math:`q = k + 5..10`. If k is
|
| 33 |
+
relatively small compared to :math:`min(m, n)`, choosing
|
| 34 |
+
:math:`q = k + 0..2` may be sufficient.
|
| 35 |
+
|
| 36 |
+
.. note:: To obtain repeatable results, reset the seed for the
|
| 37 |
+
pseudorandom number generator
|
| 38 |
+
|
| 39 |
+
Args::
|
| 40 |
+
A (Tensor): the input tensor of size :math:`(*, m, n)`
|
| 41 |
+
|
| 42 |
+
q (int): the dimension of subspace spanned by :math:`Q`
|
| 43 |
+
columns.
|
| 44 |
+
|
| 45 |
+
niter (int, optional): the number of subspace iterations to
|
| 46 |
+
conduct; ``niter`` must be a
|
| 47 |
+
nonnegative integer. In most cases, the
|
| 48 |
+
default value 2 is more than enough.
|
| 49 |
+
|
| 50 |
+
M (Tensor, optional): the input tensor's mean of size
|
| 51 |
+
:math:`(*, m, n)`.
|
| 52 |
+
|
| 53 |
+
References::
|
| 54 |
+
- Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, Finding
|
| 55 |
+
structure with randomness: probabilistic algorithms for
|
| 56 |
+
constructing approximate matrix decompositions,
|
| 57 |
+
arXiv:0909.4061 [math.NA; math.PR], 2009 (available at
|
| 58 |
+
`arXiv <http://arxiv.org/abs/0909.4061>`_).
|
| 59 |
+
"""
|
| 60 |
+
|
| 61 |
+
niter = 2 if niter is None else niter
|
| 62 |
+
dtype = _utils.get_floating_dtype(A) if not A.is_complex() else A.dtype
|
| 63 |
+
matmul = _utils.matmul
|
| 64 |
+
|
| 65 |
+
R = torch.randn(A.shape[-1], q, dtype=dtype, device=A.device)
|
| 66 |
+
|
| 67 |
+
# The following code could be made faster using torch.geqrf + torch.ormqr
|
| 68 |
+
# but geqrf is not differentiable
|
| 69 |
+
|
| 70 |
+
X = matmul(A, R)
|
| 71 |
+
if M is not None:
|
| 72 |
+
X = X - matmul(M, R)
|
| 73 |
+
Q = torch.linalg.qr(X).Q
|
| 74 |
+
for i in range(niter):
|
| 75 |
+
X = matmul(A.mH, Q)
|
| 76 |
+
if M is not None:
|
| 77 |
+
X = X - matmul(M.mH, Q)
|
| 78 |
+
Q = torch.linalg.qr(X).Q
|
| 79 |
+
X = matmul(A, Q)
|
| 80 |
+
if M is not None:
|
| 81 |
+
X = X - matmul(M, Q)
|
| 82 |
+
Q = torch.linalg.qr(X).Q
|
| 83 |
+
return Q
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def svd_lowrank(
|
| 87 |
+
A: Tensor,
|
| 88 |
+
q: Optional[int] = 6,
|
| 89 |
+
niter: Optional[int] = 2,
|
| 90 |
+
M: Optional[Tensor] = None,
|
| 91 |
+
) -> Tuple[Tensor, Tensor, Tensor]:
|
| 92 |
+
r"""Return the singular value decomposition ``(U, S, V)`` of a matrix,
|
| 93 |
+
batches of matrices, or a sparse matrix :math:`A` such that
|
| 94 |
+
:math:`A \approx U \operatorname{diag}(S) V^{\text{H}}`. In case :math:`M` is given, then
|
| 95 |
+
SVD is computed for the matrix :math:`A - M`.
|
| 96 |
+
|
| 97 |
+
.. note:: The implementation is based on the Algorithm 5.1 from
|
| 98 |
+
Halko et al., 2009.
|
| 99 |
+
|
| 100 |
+
.. note:: For an adequate approximation of a k-rank matrix
|
| 101 |
+
:math:`A`, where k is not known in advance but could be
|
| 102 |
+
estimated, the number of :math:`Q` columns, q, can be
|
| 103 |
+
choosen according to the following criteria: in general,
|
| 104 |
+
:math:`k <= q <= min(2*k, m, n)`. For large low-rank
|
| 105 |
+
matrices, take :math:`q = k + 5..10`. If k is
|
| 106 |
+
relatively small compared to :math:`min(m, n)`, choosing
|
| 107 |
+
:math:`q = k + 0..2` may be sufficient.
|
| 108 |
+
|
| 109 |
+
.. note:: This is a randomized method. To obtain repeatable results,
|
| 110 |
+
set the seed for the pseudorandom number generator
|
| 111 |
+
|
| 112 |
+
.. note:: In general, use the full-rank SVD implementation
|
| 113 |
+
:func:`torch.linalg.svd` for dense matrices due to its 10x
|
| 114 |
+
higher performance characteristics. The low-rank SVD
|
| 115 |
+
will be useful for huge sparse matrices that
|
| 116 |
+
:func:`torch.linalg.svd` cannot handle.
|
| 117 |
+
|
| 118 |
+
Args::
|
| 119 |
+
A (Tensor): the input tensor of size :math:`(*, m, n)`
|
| 120 |
+
|
| 121 |
+
q (int, optional): a slightly overestimated rank of A.
|
| 122 |
+
|
| 123 |
+
niter (int, optional): the number of subspace iterations to
|
| 124 |
+
conduct; niter must be a nonnegative
|
| 125 |
+
integer, and defaults to 2
|
| 126 |
+
|
| 127 |
+
M (Tensor, optional): the input tensor's mean of size
|
| 128 |
+
:math:`(*, m, n)`, which will be broadcasted
|
| 129 |
+
to the size of A in this function.
|
| 130 |
+
|
| 131 |
+
References::
|
| 132 |
+
- Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, Finding
|
| 133 |
+
structure with randomness: probabilistic algorithms for
|
| 134 |
+
constructing approximate matrix decompositions,
|
| 135 |
+
arXiv:0909.4061 [math.NA; math.PR], 2009 (available at
|
| 136 |
+
`arXiv <https://arxiv.org/abs/0909.4061>`_).
|
| 137 |
+
|
| 138 |
+
"""
|
| 139 |
+
if not torch.jit.is_scripting():
|
| 140 |
+
tensor_ops = (A, M)
|
| 141 |
+
if not set(map(type, tensor_ops)).issubset(
|
| 142 |
+
(torch.Tensor, type(None))
|
| 143 |
+
) and has_torch_function(tensor_ops):
|
| 144 |
+
return handle_torch_function(
|
| 145 |
+
svd_lowrank, tensor_ops, A, q=q, niter=niter, M=M
|
| 146 |
+
)
|
| 147 |
+
return _svd_lowrank(A, q=q, niter=niter, M=M)
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
def _svd_lowrank(
|
| 151 |
+
A: Tensor,
|
| 152 |
+
q: Optional[int] = 6,
|
| 153 |
+
niter: Optional[int] = 2,
|
| 154 |
+
M: Optional[Tensor] = None,
|
| 155 |
+
) -> Tuple[Tensor, Tensor, Tensor]:
|
| 156 |
+
# Algorithm 5.1 in Halko et al., 2009
|
| 157 |
+
|
| 158 |
+
q = 6 if q is None else q
|
| 159 |
+
m, n = A.shape[-2:]
|
| 160 |
+
matmul = _utils.matmul
|
| 161 |
+
if M is not None:
|
| 162 |
+
M = M.broadcast_to(A.size())
|
| 163 |
+
|
| 164 |
+
# Assume that A is tall
|
| 165 |
+
if m < n:
|
| 166 |
+
A = A.mH
|
| 167 |
+
if M is not None:
|
| 168 |
+
M = M.mH
|
| 169 |
+
|
| 170 |
+
Q = get_approximate_basis(A, q, niter=niter, M=M)
|
| 171 |
+
B = matmul(Q.mH, A)
|
| 172 |
+
if M is not None:
|
| 173 |
+
B = B - matmul(Q.mH, M)
|
| 174 |
+
U, S, Vh = torch.linalg.svd(B, full_matrices=False)
|
| 175 |
+
V = Vh.mH
|
| 176 |
+
U = Q.matmul(U)
|
| 177 |
+
|
| 178 |
+
if m < n:
|
| 179 |
+
U, V = V, U
|
| 180 |
+
|
| 181 |
+
return U, S, V
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
def pca_lowrank(
|
| 185 |
+
A: Tensor,
|
| 186 |
+
q: Optional[int] = None,
|
| 187 |
+
center: bool = True,
|
| 188 |
+
niter: int = 2,
|
| 189 |
+
) -> Tuple[Tensor, Tensor, Tensor]:
|
| 190 |
+
r"""Performs linear Principal Component Analysis (PCA) on a low-rank
|
| 191 |
+
matrix, batches of such matrices, or sparse matrix.
|
| 192 |
+
|
| 193 |
+
This function returns a namedtuple ``(U, S, V)`` which is the
|
| 194 |
+
nearly optimal approximation of a singular value decomposition of
|
| 195 |
+
a centered matrix :math:`A` such that :math:`A \approx U \operatorname{diag}(S) V^{\text{H}}`
|
| 196 |
+
|
| 197 |
+
.. note:: The relation of ``(U, S, V)`` to PCA is as follows:
|
| 198 |
+
|
| 199 |
+
- :math:`A` is a data matrix with ``m`` samples and
|
| 200 |
+
``n`` features
|
| 201 |
+
|
| 202 |
+
- the :math:`V` columns represent the principal directions
|
| 203 |
+
|
| 204 |
+
- :math:`S ** 2 / (m - 1)` contains the eigenvalues of
|
| 205 |
+
:math:`A^T A / (m - 1)` which is the covariance of
|
| 206 |
+
``A`` when ``center=True`` is provided.
|
| 207 |
+
|
| 208 |
+
- ``matmul(A, V[:, :k])`` projects data to the first k
|
| 209 |
+
principal components
|
| 210 |
+
|
| 211 |
+
.. note:: Different from the standard SVD, the size of returned
|
| 212 |
+
matrices depend on the specified rank and q
|
| 213 |
+
values as follows:
|
| 214 |
+
|
| 215 |
+
- :math:`U` is m x q matrix
|
| 216 |
+
|
| 217 |
+
- :math:`S` is q-vector
|
| 218 |
+
|
| 219 |
+
- :math:`V` is n x q matrix
|
| 220 |
+
|
| 221 |
+
.. note:: To obtain repeatable results, reset the seed for the
|
| 222 |
+
pseudorandom number generator
|
| 223 |
+
|
| 224 |
+
Args:
|
| 225 |
+
|
| 226 |
+
A (Tensor): the input tensor of size :math:`(*, m, n)`
|
| 227 |
+
|
| 228 |
+
q (int, optional): a slightly overestimated rank of
|
| 229 |
+
:math:`A`. By default, ``q = min(6, m,
|
| 230 |
+
n)``.
|
| 231 |
+
|
| 232 |
+
center (bool, optional): if True, center the input tensor,
|
| 233 |
+
otherwise, assume that the input is
|
| 234 |
+
centered.
|
| 235 |
+
|
| 236 |
+
niter (int, optional): the number of subspace iterations to
|
| 237 |
+
conduct; niter must be a nonnegative
|
| 238 |
+
integer, and defaults to 2.
|
| 239 |
+
|
| 240 |
+
References::
|
| 241 |
+
|
| 242 |
+
- Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, Finding
|
| 243 |
+
structure with randomness: probabilistic algorithms for
|
| 244 |
+
constructing approximate matrix decompositions,
|
| 245 |
+
arXiv:0909.4061 [math.NA; math.PR], 2009 (available at
|
| 246 |
+
`arXiv <http://arxiv.org/abs/0909.4061>`_).
|
| 247 |
+
|
| 248 |
+
"""
|
| 249 |
+
|
| 250 |
+
if not torch.jit.is_scripting():
|
| 251 |
+
if type(A) is not torch.Tensor and has_torch_function((A,)):
|
| 252 |
+
return handle_torch_function(
|
| 253 |
+
pca_lowrank, (A,), A, q=q, center=center, niter=niter
|
| 254 |
+
)
|
| 255 |
+
|
| 256 |
+
(m, n) = A.shape[-2:]
|
| 257 |
+
|
| 258 |
+
if q is None:
|
| 259 |
+
q = min(6, m, n)
|
| 260 |
+
elif not (q >= 0 and q <= min(m, n)):
|
| 261 |
+
raise ValueError(
|
| 262 |
+
f"q(={q}) must be non-negative integer and not greater than min(m, n)={min(m, n)}"
|
| 263 |
+
)
|
| 264 |
+
if not (niter >= 0):
|
| 265 |
+
raise ValueError(f"niter(={niter}) must be non-negative integer")
|
| 266 |
+
|
| 267 |
+
dtype = _utils.get_floating_dtype(A)
|
| 268 |
+
|
| 269 |
+
if not center:
|
| 270 |
+
return _svd_lowrank(A, q, niter=niter, M=None)
|
| 271 |
+
|
| 272 |
+
if _utils.is_sparse(A):
|
| 273 |
+
if len(A.shape) != 2:
|
| 274 |
+
raise ValueError("pca_lowrank input is expected to be 2-dimensional tensor")
|
| 275 |
+
c = torch.sparse.sum(A, dim=(-2,)) / m
|
| 276 |
+
# reshape c
|
| 277 |
+
column_indices = c.indices()[0]
|
| 278 |
+
indices = torch.zeros(
|
| 279 |
+
2,
|
| 280 |
+
len(column_indices),
|
| 281 |
+
dtype=column_indices.dtype,
|
| 282 |
+
device=column_indices.device,
|
| 283 |
+
)
|
| 284 |
+
indices[0] = column_indices
|
| 285 |
+
C_t = torch.sparse_coo_tensor(
|
| 286 |
+
indices, c.values(), (n, 1), dtype=dtype, device=A.device
|
| 287 |
+
)
|
| 288 |
+
|
| 289 |
+
ones_m1_t = torch.ones(A.shape[:-2] + (1, m), dtype=dtype, device=A.device)
|
| 290 |
+
M = torch.sparse.mm(C_t, ones_m1_t).mT
|
| 291 |
+
return _svd_lowrank(A, q, niter=niter, M=M)
|
| 292 |
+
else:
|
| 293 |
+
C = A.mean(dim=(-2,), keepdim=True)
|
| 294 |
+
return _svd_lowrank(A - C, q, niter=niter, M=None)
|
pllava/lib/python3.10/site-packages/torch/_ops.py
ADDED
|
@@ -0,0 +1,1355 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import abc
|
| 3 |
+
import contextlib
|
| 4 |
+
import ctypes
|
| 5 |
+
import importlib
|
| 6 |
+
import inspect
|
| 7 |
+
import sys
|
| 8 |
+
import types
|
| 9 |
+
from typing import Any, Callable, Dict, List, Set, Type, Union
|
| 10 |
+
|
| 11 |
+
import torch
|
| 12 |
+
import torch.utils._pytree as pytree
|
| 13 |
+
from torch import _utils_internal
|
| 14 |
+
from torch._C import _dispatch_is_included_in_alias as is_included_in_alias, DispatchKey
|
| 15 |
+
from torch._functorch.pyfunctorch import dispatch_functorch
|
| 16 |
+
from torch.utils._python_dispatch import TorchDispatchMode
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
# Query `hasattr` only once.
|
| 20 |
+
_SET_GLOBAL_FLAGS = hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags")
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
@contextlib.contextmanager
|
| 24 |
+
def dl_open_guard():
|
| 25 |
+
"""
|
| 26 |
+
Context manager to set the RTLD_GLOBAL dynamic linker flag while we open a
|
| 27 |
+
shared library to load custom operators.
|
| 28 |
+
"""
|
| 29 |
+
if not _SET_GLOBAL_FLAGS:
|
| 30 |
+
yield
|
| 31 |
+
return
|
| 32 |
+
old_flags = sys.getdlopenflags()
|
| 33 |
+
sys.setdlopenflags(old_flags | ctypes.RTLD_GLOBAL)
|
| 34 |
+
try:
|
| 35 |
+
yield
|
| 36 |
+
finally:
|
| 37 |
+
sys.setdlopenflags(old_flags)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class OperatorBase:
|
| 41 |
+
"""
|
| 42 |
+
Base class for OpOverload (which represents C++ ATen operators) and HigherOrderOperator
|
| 43 |
+
(which represents Python-only operators that are unrepresentable in TorchScript).
|
| 44 |
+
"""
|
| 45 |
+
|
| 46 |
+
def __init__(self):
|
| 47 |
+
# The dispatch cache precomputes a mapping of dispatch key that the
|
| 48 |
+
# dispatcher wants to dispatch to, to an actual implementation of the
|
| 49 |
+
# dispatch key. Confusingly, the actual implementation could *also* be a
|
| 50 |
+
# dispatch key, but in this case, this refers to the C++ kernel that
|
| 51 |
+
# was registered to some dispatch key. Aliases are permitted in the
|
| 52 |
+
# latter but not the former; for example, you might lookup the
|
| 53 |
+
# entry for AutogradCPU, and this maps you to the Autograd key for
|
| 54 |
+
# the generic autograd kernel that works for all devices. Since this
|
| 55 |
+
# is the Python dispatcher, you can also put an arbitrary Python
|
| 56 |
+
# callable to call instead. This handler gets precisely the
|
| 57 |
+
# args/kwargs that the operator was __call__'ed with.
|
| 58 |
+
# NB: This name is hard-coded in torch/csrc/autograd/python_variable.cpp
|
| 59 |
+
# for use with OpOverload; cache lookup is done entirely from C++
|
| 60 |
+
# for speed.
|
| 61 |
+
# TODO: The cache is NOT currently used by HigherOrderOperator, but it should!
|
| 62 |
+
self._dispatch_cache: Dict[
|
| 63 |
+
DispatchKey, Union[DispatchKey, Callable[..., Any]]
|
| 64 |
+
] = {}
|
| 65 |
+
|
| 66 |
+
# This table allows you to override the behavior of a particular
|
| 67 |
+
# dispatch key to call a custom Python function, rather than the
|
| 68 |
+
# ordinary C++ configured behavior. This is the raison d'etre of
|
| 69 |
+
# Python dispatcher: to let you program the dispatcher from Python
|
| 70 |
+
# in case you need something unusual, and don't want to clobber
|
| 71 |
+
# the existing registrations using the Python operator registration
|
| 72 |
+
# API.
|
| 73 |
+
self.py_kernels: Dict[DispatchKey, Callable[..., Any]] = {}
|
| 74 |
+
|
| 75 |
+
# This table allows you to override the behavior of a particular
|
| 76 |
+
# operator for a particular TorchDispatchMode. In practice,
|
| 77 |
+
# we are using this mostly for ProxyTensorMode. Modes can be
|
| 78 |
+
# thought of as an open world extension of dispatch keys, so it
|
| 79 |
+
# makes sense that you should be able to register them, the same
|
| 80 |
+
# way you can register dispatch keys.
|
| 81 |
+
self.python_key_table: Dict[
|
| 82 |
+
Union[Type[TorchDispatchMode], Type[torch.Tensor]], Callable[..., Any]
|
| 83 |
+
] = {}
|
| 84 |
+
|
| 85 |
+
# This table allows you to override the behavior of functorch
|
| 86 |
+
# transformations. NB: this currently only does something for
|
| 87 |
+
# HigherOrderOperator
|
| 88 |
+
self.functorch_table = {}
|
| 89 |
+
|
| 90 |
+
def __call__(self, *args, **kwargs):
|
| 91 |
+
raise NotImplementedError
|
| 92 |
+
|
| 93 |
+
def has_kernel_for_dispatch_key(self, k):
|
| 94 |
+
return k in self.py_kernels
|
| 95 |
+
|
| 96 |
+
def has_kernel_for_any_dispatch_key(self, ks):
|
| 97 |
+
for k in self.py_kernels:
|
| 98 |
+
if not torch._C._dispatch_is_alias_key(k) and ks.has(k):
|
| 99 |
+
return True
|
| 100 |
+
return False
|
| 101 |
+
|
| 102 |
+
def py_impl(self, k):
|
| 103 |
+
def inner(fn):
|
| 104 |
+
if inspect.isclass(k) and (
|
| 105 |
+
issubclass(k, TorchDispatchMode) or issubclass(k, torch.Tensor)
|
| 106 |
+
):
|
| 107 |
+
assert k not in self.python_key_table
|
| 108 |
+
# TODO(voz): Should we replace setting DispatchKey.Python entirely with setting mode keys?
|
| 109 |
+
self.python_key_table[k] = fn
|
| 110 |
+
self._dispatch_cache.clear()
|
| 111 |
+
return fn
|
| 112 |
+
|
| 113 |
+
if isinstance(k, torch._C._functorch.TransformType):
|
| 114 |
+
assert k not in self.functorch_table
|
| 115 |
+
self.functorch_table[k] = fn
|
| 116 |
+
return fn
|
| 117 |
+
|
| 118 |
+
assert isinstance(k, DispatchKey)
|
| 119 |
+
assert (
|
| 120 |
+
k != DispatchKey.Python
|
| 121 |
+
), "Please register a mode for the torch._C.DispatchKey.Python key instead."
|
| 122 |
+
|
| 123 |
+
if k in self.py_kernels:
|
| 124 |
+
raise RuntimeError(
|
| 125 |
+
f"Trying to override a python impl for {k} on operator {self.name()}"
|
| 126 |
+
)
|
| 127 |
+
self.py_kernels[k] = fn
|
| 128 |
+
self._dispatch_cache.clear()
|
| 129 |
+
return fn
|
| 130 |
+
|
| 131 |
+
return inner
|
| 132 |
+
|
| 133 |
+
# Registers an implementation to all **3** variants of functionalization that we have:
|
| 134 |
+
# - DispatchKey.Functionalize
|
| 135 |
+
# - functorch.TransformType.Functionalize
|
| 136 |
+
# - FunctionalTensorMode
|
| 137 |
+
# Example:
|
| 138 |
+
# @py_functionalize_impl
|
| 139 |
+
# def functionalize_rule(ctx, inner_f, *args):
|
| 140 |
+
# args_unwrapped = ctx.unwrap_tensors(args)
|
| 141 |
+
# with ctx.redispatch_to_next():
|
| 142 |
+
# out = ctx.functionalize(inner_f)(*args_unwrapped)
|
| 143 |
+
# return ctx.wrap_tensors(out)
|
| 144 |
+
def py_functionalize_impl(self, fn):
|
| 145 |
+
from torch._subclasses.functional_tensor import (
|
| 146 |
+
CppFunctionalizeAPI as _CppFunctionalizeAPI,
|
| 147 |
+
FunctorchFunctionalizeAPI as _FunctorchFunctionalizeAPI,
|
| 148 |
+
PythonFunctionalizeAPI as _PythonFunctionalizeAPI,
|
| 149 |
+
)
|
| 150 |
+
|
| 151 |
+
# Construct our three flavors of functionalization,
|
| 152 |
+
# each of which have slightly different wrap/unwrap/redispatch policies
|
| 153 |
+
def functionalize_dk_fn(*args, **kwargs):
|
| 154 |
+
return fn(_CppFunctionalizeAPI(), *args, **kwargs)
|
| 155 |
+
|
| 156 |
+
def functionalize_dispatch_mode_fn(mode, *args, **kwargs):
|
| 157 |
+
return fn(_PythonFunctionalizeAPI(mode), *args, **kwargs)
|
| 158 |
+
|
| 159 |
+
def functionalize_functorch_fn(interpreter, *args, **kwargs):
|
| 160 |
+
return fn(_FunctorchFunctionalizeAPI(interpreter), *args, **kwargs)
|
| 161 |
+
|
| 162 |
+
self.py_impl(DispatchKey.Functionalize)(functionalize_dk_fn)
|
| 163 |
+
self.py_impl(torch._subclasses.functional_tensor.FunctionalTensorMode)(
|
| 164 |
+
functionalize_dispatch_mode_fn
|
| 165 |
+
)
|
| 166 |
+
self.py_impl(torch._C._functorch.TransformType.Functionalize)(
|
| 167 |
+
functionalize_functorch_fn
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
return fn
|
| 171 |
+
|
| 172 |
+
def name(self):
|
| 173 |
+
raise NotImplementedError
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
# Equivalent to computeDispatchTableEntryWithDebug
|
| 177 |
+
def resolve_key(op: OperatorBase, k: DispatchKey): # type: ignore[valid-type]
|
| 178 |
+
# 1. (Direct) operator registration
|
| 179 |
+
if op.has_kernel_for_dispatch_key(k):
|
| 180 |
+
return k
|
| 181 |
+
# 2.1 Use CompositeExplicitAutogradNonFunctional kernel if available
|
| 182 |
+
cand = DispatchKey.CompositeExplicitAutogradNonFunctional
|
| 183 |
+
if (
|
| 184 |
+
k == DispatchKey.Undefined or is_included_in_alias(k, cand)
|
| 185 |
+
) and op.has_kernel_for_dispatch_key(cand):
|
| 186 |
+
return cand
|
| 187 |
+
# 2.2 Use CompositeExplicitAutograd kernel if available
|
| 188 |
+
cand = DispatchKey.CompositeExplicitAutograd
|
| 189 |
+
if (
|
| 190 |
+
k == DispatchKey.Undefined or is_included_in_alias(k, cand)
|
| 191 |
+
) and op.has_kernel_for_dispatch_key(cand):
|
| 192 |
+
return cand
|
| 193 |
+
has_backend_kernel = op.has_kernel_for_any_dispatch_key(
|
| 194 |
+
torch._C._dispatch_get_backend_keyset_from_autograd(k)
|
| 195 |
+
) or op.has_kernel_for_dispatch_key(DispatchKey.CompositeExplicitAutograd)
|
| 196 |
+
# 2.3. Use CompositeImplicitAutograd kernel if available
|
| 197 |
+
cand = DispatchKey.CompositeImplicitAutogradNestedTensor
|
| 198 |
+
if (
|
| 199 |
+
(k != DispatchKey.Undefined and is_included_in_alias(k, cand))
|
| 200 |
+
and op.has_kernel_for_dispatch_key(cand)
|
| 201 |
+
and not has_backend_kernel
|
| 202 |
+
):
|
| 203 |
+
return cand
|
| 204 |
+
cand = DispatchKey.CompositeImplicitAutograd
|
| 205 |
+
if (
|
| 206 |
+
k == DispatchKey.Undefined or is_included_in_alias(k, cand)
|
| 207 |
+
) and op.has_kernel_for_dispatch_key(cand):
|
| 208 |
+
if k == DispatchKey.AutogradOther and op.has_kernel_for_any_dispatch_key(
|
| 209 |
+
torch._C._dispatch_autogradother_backends
|
| 210 |
+
):
|
| 211 |
+
raise RuntimeError("ambiguous autogradother kernel")
|
| 212 |
+
elif not has_backend_kernel:
|
| 213 |
+
return cand
|
| 214 |
+
# 2.4. For autograd backend keys, use kernel from DispatchKey::Autograd if available
|
| 215 |
+
cand = DispatchKey.Autograd
|
| 216 |
+
if is_included_in_alias(k, cand) and op.has_kernel_for_dispatch_key(cand):
|
| 217 |
+
return cand
|
| 218 |
+
# 2.5 Use kernel from DispatchKey::FuncTorchBatchedDecomposition if available
|
| 219 |
+
cand = DispatchKey.FuncTorchBatchedDecomposition
|
| 220 |
+
if is_included_in_alias(k, cand) and op.has_kernel_for_dispatch_key(cand):
|
| 221 |
+
return cand
|
| 222 |
+
# Backend fallback
|
| 223 |
+
if torch._C._dispatch_has_backend_fallback(k):
|
| 224 |
+
# The dispatch key itself will implicitly route to backend fallback.
|
| 225 |
+
# This is probably not great for the pure Python implementation.
|
| 226 |
+
return k
|
| 227 |
+
raise NotImplementedError(f"could not find kernel for {op} at dispatch key {k}")
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
_higher_order_ops: Dict[str, "HigherOrderOperator"] = {}
|
| 231 |
+
|
| 232 |
+
_HIGHER_ORDER_OP_DEFAULT_FALLTHROUGH_DISPATCH_KEYS = [
|
| 233 |
+
DispatchKey.PythonDispatcher, # type: ignore[attr-defined]
|
| 234 |
+
DispatchKey.PythonTLSSnapshot, # type: ignore[attr-defined]
|
| 235 |
+
DispatchKey.ADInplaceOrView,
|
| 236 |
+
DispatchKey.BackendSelect,
|
| 237 |
+
DispatchKey.AutocastCPU, # type: ignore[attr-defined]
|
| 238 |
+
DispatchKey.AutocastCUDA, # type: ignore[attr-defined]
|
| 239 |
+
]
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
class HigherOrderOperator(OperatorBase, abc.ABC):
|
| 243 |
+
# The HigherOrderOperator will appear as torch.ops.higher_order.{name}
|
| 244 |
+
#
|
| 245 |
+
# If you're creating a new HigherOrderOperator, please do not change the
|
| 246 |
+
# default. Adding operators to the global torch.ops namespace is a bad
|
| 247 |
+
# practice due to name collisions.
|
| 248 |
+
def __init__(self, name):
|
| 249 |
+
super().__init__()
|
| 250 |
+
if type(self) is HigherOrderOperator:
|
| 251 |
+
raise RuntimeError(
|
| 252 |
+
"Direct instantiation of HigherOrderOperator is not allowed. Please subclass it."
|
| 253 |
+
)
|
| 254 |
+
self._name = name
|
| 255 |
+
|
| 256 |
+
# Make _OPNamespace not scream, this whole name based association needs a good hard look
|
| 257 |
+
self.__name__ = name
|
| 258 |
+
_higher_order_ops[name] = self
|
| 259 |
+
self._ns = "higher_order"
|
| 260 |
+
self.__module__ = "torch.ops.higher_order"
|
| 261 |
+
|
| 262 |
+
self.non_fallthrough_keys = torch._C._dispatch_keyset_full()
|
| 263 |
+
|
| 264 |
+
for dispatch_key in _HIGHER_ORDER_OP_DEFAULT_FALLTHROUGH_DISPATCH_KEYS:
|
| 265 |
+
self.fallthrough(dispatch_key)
|
| 266 |
+
|
| 267 |
+
# [NOTE] We have to register pre-dispatch key implementation
|
| 268 |
+
# because sometimes HOP use aot-dispatch tracing to detect certaion
|
| 269 |
+
# mutations. This is problematic when we are functionalizing HOP
|
| 270 |
+
# during pre-dispatch because when the inner tracer starts, it will see
|
| 271 |
+
# that PreDispatch key is still active. In that case, we just redispatch
|
| 272 |
+
# it to next key. This is only safe to do when PreDispatch key stack has no
|
| 273 |
+
# active modes.
|
| 274 |
+
|
| 275 |
+
def py_impl(self, k):
|
| 276 |
+
if isinstance(k, DispatchKey) and not self.non_fallthrough_keys.has(k):
|
| 277 |
+
self.non_fallthrough_keys = self.non_fallthrough_keys.add(k)
|
| 278 |
+
return super().py_impl(k)
|
| 279 |
+
|
| 280 |
+
@property
|
| 281 |
+
def namespace(self):
|
| 282 |
+
return self._ns
|
| 283 |
+
|
| 284 |
+
def fallthrough(self, dispatch_key):
|
| 285 |
+
self.non_fallthrough_keys = self.non_fallthrough_keys.remove(dispatch_key)
|
| 286 |
+
|
| 287 |
+
# Use positional-only argument to avoid naming collide with custom ops arguments
|
| 288 |
+
# that are named "self".
|
| 289 |
+
def dispatch(self, /, dispatch_key, *args, **kwargs):
|
| 290 |
+
from torch.utils._python_dispatch import _get_current_dispatch_mode
|
| 291 |
+
|
| 292 |
+
if dispatch_key in self._dispatch_cache:
|
| 293 |
+
kernel = self._dispatch_cache[dispatch_key]
|
| 294 |
+
assert not isinstance(kernel, DispatchKey)
|
| 295 |
+
return kernel(*args, **kwargs)
|
| 296 |
+
|
| 297 |
+
if dispatch_key == DispatchKey.FuncTorchDynamicLayerFrontMode:
|
| 298 |
+
return dispatch_functorch(self, args, kwargs)
|
| 299 |
+
|
| 300 |
+
if dispatch_key == DispatchKey.Python:
|
| 301 |
+
# Keep the following 1:1 with handle_torch_function_no_python_arg_parser
|
| 302 |
+
# in torch/csrc/utils/python_arg_parser.cpp
|
| 303 |
+
|
| 304 |
+
overloaded_args_list = []
|
| 305 |
+
|
| 306 |
+
def has_python_key(tensor):
|
| 307 |
+
return torch._C._dispatch_keys(tensor).has("Python")
|
| 308 |
+
|
| 309 |
+
def check_overloaded(arg):
|
| 310 |
+
if isinstance(arg, torch.Tensor) and has_python_key(arg):
|
| 311 |
+
overloaded_args_list.append(arg)
|
| 312 |
+
|
| 313 |
+
for arg in (*args, *kwargs.values()):
|
| 314 |
+
check_overloaded(arg)
|
| 315 |
+
if isinstance(arg, (list, tuple)):
|
| 316 |
+
for a in arg:
|
| 317 |
+
check_overloaded(a)
|
| 318 |
+
|
| 319 |
+
overloaded_args = tuple(overloaded_args_list)
|
| 320 |
+
overloaded_types = tuple(type(arg) for arg in overloaded_args)
|
| 321 |
+
|
| 322 |
+
# Step 1: dispatch on any user TorchDispatchModes
|
| 323 |
+
from torch.utils._python_dispatch import _pop_mode_temporarily
|
| 324 |
+
|
| 325 |
+
curr_mode = _get_current_dispatch_mode()
|
| 326 |
+
if curr_mode is not None:
|
| 327 |
+
if type(curr_mode) in self.python_key_table:
|
| 328 |
+
handler = self.python_key_table[type(curr_mode)]
|
| 329 |
+
with _pop_mode_temporarily() as mode:
|
| 330 |
+
# "natural" calling convention: (mode, *args, **kwargs)
|
| 331 |
+
# TODO(rzou): we should support torch_dispatch calling convention too.
|
| 332 |
+
result = handler(mode, *args, **kwargs)
|
| 333 |
+
else:
|
| 334 |
+
raise NotImplementedError(
|
| 335 |
+
f"There was no rule registered for HOP {self._name} and mode {curr_mode}. "
|
| 336 |
+
f"We recommend filing an issue."
|
| 337 |
+
)
|
| 338 |
+
if result is not NotImplemented:
|
| 339 |
+
return result
|
| 340 |
+
|
| 341 |
+
# Step 2: dispatch on any subclasses
|
| 342 |
+
for arg in overloaded_args:
|
| 343 |
+
subclass_type = type(arg)
|
| 344 |
+
if (
|
| 345 |
+
subclass_type.__torch_dispatch__
|
| 346 |
+
== torch._C._disabled_torch_dispatch_impl
|
| 347 |
+
):
|
| 348 |
+
continue
|
| 349 |
+
if subclass_type in self.python_key_table:
|
| 350 |
+
handler = self.python_key_table[subclass_type]
|
| 351 |
+
# "natural" calling convention: (*args, **kwargs)
|
| 352 |
+
# TODO(rzou): we should support torch_dispatch calling convention too.
|
| 353 |
+
result = handler(*args, **kwargs)
|
| 354 |
+
else:
|
| 355 |
+
raise NotImplementedError(
|
| 356 |
+
f"There was no rule registered for HOP {self._name} and subclass {subclass_type}. "
|
| 357 |
+
f"We recommend filing an issue."
|
| 358 |
+
)
|
| 359 |
+
if result is not NotImplemented:
|
| 360 |
+
return result
|
| 361 |
+
|
| 362 |
+
# All handlers returned NotImplemented
|
| 363 |
+
raise TypeError(
|
| 364 |
+
f"Multiple dispatch failed for {self._name}. There was no registered that "
|
| 365 |
+
f"did not return NotImplemented. Use HOP.py_impl to register some. "
|
| 366 |
+
f"Tried mode: {curr_mode}) and subclasses: "
|
| 367 |
+
f"{[type(a) for a in overloaded_args]}"
|
| 368 |
+
)
|
| 369 |
+
|
| 370 |
+
functionality_key = torch._C._to_functionality_key(dispatch_key) # type: ignore[attr-defined]
|
| 371 |
+
if functionality_key == DispatchKey.PreDispatch:
|
| 372 |
+
from torch.utils._python_dispatch import _pop_mode_temporarily
|
| 373 |
+
|
| 374 |
+
# The check for Python in the exclude set is so we properly respect `with no_dispatch()`
|
| 375 |
+
# calls inside of a mode.
|
| 376 |
+
if (
|
| 377 |
+
_len_torch_dispatch_stack_pre_dispatch() > 0
|
| 378 |
+
) and not torch._C._dispatch_tls_is_dispatch_key_excluded(
|
| 379 |
+
DispatchKey.Python
|
| 380 |
+
):
|
| 381 |
+
curr_mode = _get_current_dispatch_mode_pre_dispatch()
|
| 382 |
+
assert (
|
| 383 |
+
curr_mode is not None
|
| 384 |
+
), "Illegal invocation of dispatch on torch._C.DispatchKey.PreDispatch without a mode."
|
| 385 |
+
assert (
|
| 386 |
+
type(curr_mode) in self.python_key_table
|
| 387 |
+
), f"Current active mode {curr_mode} not registered"
|
| 388 |
+
handler = self.python_key_table[type(curr_mode)]
|
| 389 |
+
with _pop_mode_temporarily(functionality_key) as mode:
|
| 390 |
+
return handler(mode, *args, **kwargs)
|
| 391 |
+
|
| 392 |
+
final_key = resolve_key(self, dispatch_key)
|
| 393 |
+
|
| 394 |
+
# This can current fail due to backend fallbacks. You just have to
|
| 395 |
+
# register them by hand for HigherOrderOperator.
|
| 396 |
+
if final_key not in self.py_kernels:
|
| 397 |
+
raise NotImplementedError(
|
| 398 |
+
f"could not find kernel for HigherOrderOperator {self._name} "
|
| 399 |
+
f"at dispatch key {final_key} (resolved from {dispatch_key})"
|
| 400 |
+
)
|
| 401 |
+
|
| 402 |
+
# [NOTE] We shouldn't cache PreDispatch kernel here because depending
|
| 403 |
+
# on what modes are active, predispatch behaviour is different.
|
| 404 |
+
# Also we do same thing for normal ops:
|
| 405 |
+
# See Note [Not Caching Per-Dispatch-Key Mode Handlers]
|
| 406 |
+
if dispatch_key != DispatchKey.PreDispatch:
|
| 407 |
+
self._dispatch_cache[dispatch_key] = self.py_kernels[final_key]
|
| 408 |
+
kernel = self.py_kernels[final_key]
|
| 409 |
+
# It's illegal to register DispatchKey to py_kernels, since there's no
|
| 410 |
+
# C++ kernel to call into
|
| 411 |
+
assert not isinstance(kernel, DispatchKey)
|
| 412 |
+
return kernel(*args, **kwargs)
|
| 413 |
+
|
| 414 |
+
@abc.abstractmethod
|
| 415 |
+
def __call__(self, /, *args, **kwargs):
|
| 416 |
+
# Dynamo already traces the body of HigherOrderOp beforehand when it
|
| 417 |
+
# so no need to trace into it.
|
| 418 |
+
from torch._dynamo import disable
|
| 419 |
+
|
| 420 |
+
@disable
|
| 421 |
+
def wrapper():
|
| 422 |
+
flat_args = _to_flat_tuple(args, kwargs)
|
| 423 |
+
if torch.overrides.has_torch_function(flat_args):
|
| 424 |
+
return torch.overrides.handle_torch_function(
|
| 425 |
+
self, flat_args, *args, **kwargs
|
| 426 |
+
)
|
| 427 |
+
|
| 428 |
+
dispatch_key_set = _compute_keyset(args, kwargs, self.non_fallthrough_keys)
|
| 429 |
+
return self.dispatch(
|
| 430 |
+
dispatch_key_set.highestPriorityTypeId(), *args, **kwargs
|
| 431 |
+
)
|
| 432 |
+
|
| 433 |
+
return wrapper()
|
| 434 |
+
|
| 435 |
+
def __str__(self):
|
| 436 |
+
return f"{self.name()}"
|
| 437 |
+
|
| 438 |
+
def name(self):
|
| 439 |
+
return self._name
|
| 440 |
+
|
| 441 |
+
|
| 442 |
+
def _to_flat_tuple(args, kwargs):
|
| 443 |
+
return pytree.arg_tree_leaves(*args, **kwargs)
|
| 444 |
+
|
| 445 |
+
|
| 446 |
+
def _compute_keyset(args, kwargs, non_fallthrough_keys):
|
| 447 |
+
tensors = _get_tensors(args, kwargs)
|
| 448 |
+
return key_extractor(tensors, non_fallthrough_keys)
|
| 449 |
+
|
| 450 |
+
|
| 451 |
+
def _get_tensors(args, kwargs):
|
| 452 |
+
flat_all = _to_flat_tuple(args, kwargs)
|
| 453 |
+
tensor_args = [t for t in flat_all if isinstance(t, torch.Tensor)]
|
| 454 |
+
return tuple(tensor_args)
|
| 455 |
+
|
| 456 |
+
|
| 457 |
+
# Note - this should maintain identical impl to the C++ dispatcher key extraction logic
|
| 458 |
+
# at ATen/core/dispatch/DispatchKeyExtractor.h
|
| 459 |
+
def key_extractor(tensors, key_mask):
|
| 460 |
+
key_set = torch._C._dispatch_tls_local_include_set()
|
| 461 |
+
for tensor in tensors:
|
| 462 |
+
key_set = key_set | torch._C._dispatch_keys(tensor)
|
| 463 |
+
key_set = key_set - torch._C._dispatch_tls_local_exclude_set()
|
| 464 |
+
key_set = key_set & key_mask
|
| 465 |
+
return key_set
|
| 466 |
+
|
| 467 |
+
|
| 468 |
+
# Mode stack for PreDispatchKey
|
| 469 |
+
# it should always have three keys with
|
| 470 |
+
# priority given to FunctionalTensorMode and
|
| 471 |
+
# then ProxyTorchDispatchMode. It means that
|
| 472 |
+
# slot 0 belongs to ProxyTorchDispatchMode and
|
| 473 |
+
# slot 1 belongs to FunctionalTensorMode.
|
| 474 |
+
#
|
| 475 |
+
# SchemaCheckMode is separate from the other 2,
|
| 476 |
+
# and is only valid when the stack is empty.
|
| 477 |
+
# SchemaCheckMode is for testing purposes, and
|
| 478 |
+
# is meant to run in eager mode on concrete inputs,
|
| 479 |
+
# checking for incorrect schemas in regards to
|
| 480 |
+
# aliasing or mutating ops.
|
| 481 |
+
class _ModeStackStateForPreDispatch:
|
| 482 |
+
def __init__(self):
|
| 483 |
+
self.__infra_modes = [None, None]
|
| 484 |
+
self._schema_check_mode = None
|
| 485 |
+
|
| 486 |
+
def set(self, index, mode):
|
| 487 |
+
assert index < len(self.__infra_modes)
|
| 488 |
+
self.__infra_modes[index] = mode
|
| 489 |
+
|
| 490 |
+
def get(self, index):
|
| 491 |
+
assert index < len(self.__infra_modes)
|
| 492 |
+
return self.__infra_modes[index]
|
| 493 |
+
|
| 494 |
+
def count(self):
|
| 495 |
+
return len([i for i in self.__infra_modes if i is not None]) + int(
|
| 496 |
+
self._schema_check_mode is not None
|
| 497 |
+
)
|
| 498 |
+
|
| 499 |
+
|
| 500 |
+
_mode_stack_state_for_pre_dispatch = _ModeStackStateForPreDispatch()
|
| 501 |
+
|
| 502 |
+
|
| 503 |
+
def unset_mode_pre_dispatch(mode_key, schema_check=False):
|
| 504 |
+
current_mode_stack_pre_dispatch = mode_stack_state_for_pre_dispatch()
|
| 505 |
+
assert mode_key is None or mode_key in (
|
| 506 |
+
torch._C._TorchDispatchModeKey.PROXY,
|
| 507 |
+
torch._C._TorchDispatchModeKey.FUNCTIONAL,
|
| 508 |
+
)
|
| 509 |
+
if schema_check:
|
| 510 |
+
assert mode_key is None
|
| 511 |
+
|
| 512 |
+
def _unset_mode():
|
| 513 |
+
if mode_key == torch._C._TorchDispatchModeKey.PROXY:
|
| 514 |
+
current_mode = current_mode_stack_pre_dispatch.get(0)
|
| 515 |
+
mode_stack_state_for_pre_dispatch().set(0, None)
|
| 516 |
+
return current_mode
|
| 517 |
+
elif mode_key == torch._C._TorchDispatchModeKey.FUNCTIONAL:
|
| 518 |
+
current_mode = current_mode_stack_pre_dispatch.get(1)
|
| 519 |
+
mode_stack_state_for_pre_dispatch().set(1, None)
|
| 520 |
+
return current_mode
|
| 521 |
+
else:
|
| 522 |
+
current_mode = mode_stack_state_for_pre_dispatch()._schema_check_mode
|
| 523 |
+
mode_stack_state_for_pre_dispatch()._schema_check_mode = None
|
| 524 |
+
return current_mode
|
| 525 |
+
|
| 526 |
+
current_mode = _unset_mode()
|
| 527 |
+
|
| 528 |
+
new_pre_dispatch_len = _len_torch_dispatch_stack_pre_dispatch()
|
| 529 |
+
# When we are unsetting a mode, we need to check if there is
|
| 530 |
+
# active mode left on the PreDispatch key. If there is nothing
|
| 531 |
+
# active, we need to remove PreDispatch key from local dispatch include
|
| 532 |
+
# set.
|
| 533 |
+
if new_pre_dispatch_len == 0:
|
| 534 |
+
torch._C._dispatch_tls_set_dispatch_key_included(DispatchKey.PreDispatch, False)
|
| 535 |
+
|
| 536 |
+
return current_mode
|
| 537 |
+
|
| 538 |
+
|
| 539 |
+
def _set_mode_pre_dispatch(mode):
|
| 540 |
+
from torch._subclasses.functional_tensor import FunctionalTensorMode
|
| 541 |
+
from torch._subclasses.schema_check_mode import SchemaCheckMode
|
| 542 |
+
from torch.fx.experimental.proxy_tensor import ProxyTorchDispatchMode
|
| 543 |
+
|
| 544 |
+
assert isinstance(
|
| 545 |
+
mode,
|
| 546 |
+
(
|
| 547 |
+
FunctionalTensorMode,
|
| 548 |
+
ProxyTorchDispatchMode,
|
| 549 |
+
SchemaCheckMode,
|
| 550 |
+
),
|
| 551 |
+
)
|
| 552 |
+
|
| 553 |
+
previous_mode_stack_len = _len_torch_dispatch_stack_pre_dispatch()
|
| 554 |
+
if isinstance(mode, SchemaCheckMode):
|
| 555 |
+
current_mode = mode_stack_state_for_pre_dispatch()._schema_check_mode
|
| 556 |
+
if previous_mode_stack_len > 0:
|
| 557 |
+
raise AssertionError(
|
| 558 |
+
"SchemaCheckMode for pre-dispatch must be used exclusively, found other modes on the stack"
|
| 559 |
+
)
|
| 560 |
+
mode_stack_state_for_pre_dispatch()._schema_check_mode = mode
|
| 561 |
+
elif isinstance(mode, FunctionalTensorMode):
|
| 562 |
+
current_mode = mode_stack_state_for_pre_dispatch().get(1)
|
| 563 |
+
assert current_mode is None
|
| 564 |
+
mode_stack_state_for_pre_dispatch().set(1, mode)
|
| 565 |
+
else:
|
| 566 |
+
current_mode = mode_stack_state_for_pre_dispatch().get(0)
|
| 567 |
+
assert current_mode is None
|
| 568 |
+
mode_stack_state_for_pre_dispatch().set(0, mode)
|
| 569 |
+
|
| 570 |
+
# When we are setting a mode, we need to check if there is
|
| 571 |
+
# active mode left on the PreDispatch key. If there was nothing
|
| 572 |
+
# active before setting this mode, it means that PreDispatch key
|
| 573 |
+
# was turned off. So we need to turn it on again.
|
| 574 |
+
if previous_mode_stack_len == 0:
|
| 575 |
+
torch._C._dispatch_tls_set_dispatch_key_included(DispatchKey.PreDispatch, True)
|
| 576 |
+
|
| 577 |
+
|
| 578 |
+
def _pop_mode_from_pre_dispatch():
|
| 579 |
+
mode_stack = mode_stack_state_for_pre_dispatch()
|
| 580 |
+
pre_dispatch_len = _len_torch_dispatch_stack_pre_dispatch()
|
| 581 |
+
|
| 582 |
+
if pre_dispatch_len == 0:
|
| 583 |
+
raise AssertionError("Trying to pop empty mode stack")
|
| 584 |
+
|
| 585 |
+
if mode_stack._schema_check_mode is not None:
|
| 586 |
+
return unset_mode_pre_dispatch(None, schema_check=True)
|
| 587 |
+
if mode_stack.get(1) is not None:
|
| 588 |
+
return unset_mode_pre_dispatch(torch._C._TorchDispatchModeKey.FUNCTIONAL)
|
| 589 |
+
if mode_stack.get(0) is not None:
|
| 590 |
+
return unset_mode_pre_dispatch(torch._C._TorchDispatchModeKey.PROXY)
|
| 591 |
+
|
| 592 |
+
|
| 593 |
+
def _len_torch_dispatch_stack_pre_dispatch():
|
| 594 |
+
return mode_stack_state_for_pre_dispatch().count()
|
| 595 |
+
|
| 596 |
+
|
| 597 |
+
def _get_dispatch_mode_pre_dispatch(mode_key):
|
| 598 |
+
assert mode_key in (
|
| 599 |
+
torch._C._TorchDispatchModeKey.PROXY,
|
| 600 |
+
torch._C._TorchDispatchModeKey.FUNCTIONAL,
|
| 601 |
+
)
|
| 602 |
+
if mode_key == torch._C._TorchDispatchModeKey.PROXY:
|
| 603 |
+
return mode_stack_state_for_pre_dispatch().get(0)
|
| 604 |
+
else:
|
| 605 |
+
return mode_stack_state_for_pre_dispatch().get(1)
|
| 606 |
+
|
| 607 |
+
|
| 608 |
+
def _get_current_dispatch_mode_pre_dispatch():
|
| 609 |
+
if mode_stack_state_for_pre_dispatch()._schema_check_mode is not None:
|
| 610 |
+
return mode_stack_state_for_pre_dispatch()._schema_check_mode
|
| 611 |
+
else:
|
| 612 |
+
stack_len = mode_stack_state_for_pre_dispatch().count()
|
| 613 |
+
if stack_len == 2:
|
| 614 |
+
return mode_stack_state_for_pre_dispatch().get(1)
|
| 615 |
+
if stack_len == 1:
|
| 616 |
+
return (
|
| 617 |
+
mode_stack_state_for_pre_dispatch().get(1)
|
| 618 |
+
if mode_stack_state_for_pre_dispatch().get(1) is not None
|
| 619 |
+
else mode_stack_state_for_pre_dispatch().get(0)
|
| 620 |
+
)
|
| 621 |
+
return None
|
| 622 |
+
|
| 623 |
+
|
| 624 |
+
def mode_stack_state_for_pre_dispatch():
|
| 625 |
+
global _mode_stack_state_for_pre_dispatch
|
| 626 |
+
return _mode_stack_state_for_pre_dispatch
|
| 627 |
+
|
| 628 |
+
|
| 629 |
+
cached_ops: Set["OpOverload"] = set()
|
| 630 |
+
|
| 631 |
+
|
| 632 |
+
def add_cached_op(op_overload):
|
| 633 |
+
global cached_ops
|
| 634 |
+
cached_ops.add(op_overload)
|
| 635 |
+
|
| 636 |
+
|
| 637 |
+
def reset_cached_ops():
|
| 638 |
+
global cached_ops
|
| 639 |
+
cached_ops.clear()
|
| 640 |
+
|
| 641 |
+
|
| 642 |
+
def get_cached_ops():
|
| 643 |
+
global cached_ops
|
| 644 |
+
return cached_ops
|
| 645 |
+
|
| 646 |
+
|
| 647 |
+
# Each OpOverload object contains pointer to a a specific operator overload, a pointer to the parent `OpOverloadPacket` object.
|
| 648 |
+
# You can obtain an OpOverload object through attribute query on OpOverloadPacket.
|
| 649 |
+
class OpOverload(OperatorBase):
|
| 650 |
+
def __init__(self, overloadpacket, op, op_dk, schema, tags):
|
| 651 |
+
super().__init__()
|
| 652 |
+
self._op = op
|
| 653 |
+
self._op_dk = op_dk
|
| 654 |
+
self._schema = schema
|
| 655 |
+
self._overloadpacket = overloadpacket
|
| 656 |
+
self._tags = tags
|
| 657 |
+
self._overloadname = (
|
| 658 |
+
"default" if schema.overload_name == "" else schema.overload_name
|
| 659 |
+
)
|
| 660 |
+
self._name = self._schema.name
|
| 661 |
+
if schema.overload_name:
|
| 662 |
+
self._name += "." + schema.overload_name
|
| 663 |
+
self.__name__ = f"{self._schema.name.split('::')[1]}.{self._overloadname}"
|
| 664 |
+
self.__module__ = overloadpacket.__module__
|
| 665 |
+
op.__module__ = overloadpacket.__module__
|
| 666 |
+
self.__qualname__ = self._name
|
| 667 |
+
self.__annotations__ = {}
|
| 668 |
+
# Only compute the OperatorHandle when we need it. Not all OpOverloads have
|
| 669 |
+
# OperatorHandles (the TorchScript ones don't...)
|
| 670 |
+
self._lazy_handle = None
|
| 671 |
+
|
| 672 |
+
# If the OpOverload was constructed from a Library.def in Python.
|
| 673 |
+
self._defined_in_python = self.__qualname__ in torch.library._defs
|
| 674 |
+
|
| 675 |
+
# Logic replicated from aten/src/ATen/native/MathBitsFallback.h
|
| 676 |
+
is_write = None
|
| 677 |
+
for a in self._schema.arguments:
|
| 678 |
+
if a.alias_info is None:
|
| 679 |
+
continue
|
| 680 |
+
if is_write is None:
|
| 681 |
+
is_write = a.alias_info.is_write
|
| 682 |
+
else:
|
| 683 |
+
# We will conservatively call mixed mutable/non-mutable
|
| 684 |
+
# aliased inputs as NOT a view
|
| 685 |
+
is_write = a.alias_info.is_write or is_write
|
| 686 |
+
self.is_view = is_write is not None and not is_write
|
| 687 |
+
|
| 688 |
+
@property
|
| 689 |
+
def _namespace(self):
|
| 690 |
+
return self._schema.name.split("::")[0]
|
| 691 |
+
|
| 692 |
+
@property
|
| 693 |
+
def _opname(self):
|
| 694 |
+
return self._schema.name.split("::")[1]
|
| 695 |
+
|
| 696 |
+
@property
|
| 697 |
+
def _handle(self):
|
| 698 |
+
if self._lazy_handle is None:
|
| 699 |
+
self._lazy_handle = torch._C._dispatch_find_schema_or_throw(
|
| 700 |
+
self._schema.name, self._schema.overload_name
|
| 701 |
+
)
|
| 702 |
+
return self._lazy_handle
|
| 703 |
+
|
| 704 |
+
# it's a no-op since OpOverload object is immutable and must be unique for a given op overload.
|
| 705 |
+
def __deepcopy__(self, memo=None):
|
| 706 |
+
return self
|
| 707 |
+
|
| 708 |
+
def __repr__(self):
|
| 709 |
+
return "<OpOverload(op='{}.{}', overload='{}')>".format(
|
| 710 |
+
*self._schema.name.split("::"), self._overloadname
|
| 711 |
+
)
|
| 712 |
+
|
| 713 |
+
# Use positional-only argument to avoid naming collision with aten ops arguments
|
| 714 |
+
# that are named "self". This way, all the aten ops can be called by kwargs.
|
| 715 |
+
def __call__(self, /, *args, **kwargs):
|
| 716 |
+
return self._op(*args, **kwargs)
|
| 717 |
+
|
| 718 |
+
# Use positional-only argument to avoid naming collision with aten ops arguments
|
| 719 |
+
# that are named "self". This way, all the aten ops can be called by kwargs.
|
| 720 |
+
def redispatch(self, /, keyset, *args, **kwargs):
|
| 721 |
+
return self._handle.redispatch_boxed(keyset, *args, **kwargs)
|
| 722 |
+
|
| 723 |
+
def __hash__(self):
|
| 724 |
+
return hash(self._op)
|
| 725 |
+
|
| 726 |
+
# `my_namespace.my_op_name.overload_name`
|
| 727 |
+
def __str__(self):
|
| 728 |
+
return "{}.{}.{}".format(*self._schema.name.split("::"), self._overloadname)
|
| 729 |
+
|
| 730 |
+
def has_kernel_for_dispatch_key(self, k):
|
| 731 |
+
return super().has_kernel_for_dispatch_key(
|
| 732 |
+
k
|
| 733 |
+
) or torch._C._dispatch_has_kernel_for_dispatch_key(self.name(), k)
|
| 734 |
+
|
| 735 |
+
def has_kernel_for_any_dispatch_key(self, ks):
|
| 736 |
+
return torch._C._dispatch_has_kernel_for_any_dispatch_key(
|
| 737 |
+
self.name(), ks
|
| 738 |
+
) or super().has_kernel_for_any_dispatch_key(ks)
|
| 739 |
+
|
| 740 |
+
@property
|
| 741 |
+
def namespace(self):
|
| 742 |
+
return self._schema.name.split("::")[0]
|
| 743 |
+
|
| 744 |
+
def _can_decompose(self):
|
| 745 |
+
dk = DispatchKey.CompositeImplicitAutograd
|
| 746 |
+
return dk in self.py_kernels or torch._C._dispatch_has_kernel_for_dispatch_key(
|
| 747 |
+
self.name(), dk
|
| 748 |
+
)
|
| 749 |
+
|
| 750 |
+
def decompose(self, *args, **kwargs):
|
| 751 |
+
dk = DispatchKey.CompositeImplicitAutograd
|
| 752 |
+
if dk in self.py_kernels:
|
| 753 |
+
# NB: This branch is not too necessary anymore, because we can
|
| 754 |
+
# apply Python CompositeImplicitAutograd *before* tracing
|
| 755 |
+
# using Python dispatcher (also taking advantage of the autograd
|
| 756 |
+
# formula). But it's included for completeness
|
| 757 |
+
return self.py_kernels[dk](*args, **kwargs)
|
| 758 |
+
elif torch._C._dispatch_has_kernel_for_dispatch_key(self.name(), dk):
|
| 759 |
+
return self._op_dk(dk, *args, **kwargs)
|
| 760 |
+
else:
|
| 761 |
+
return NotImplemented
|
| 762 |
+
|
| 763 |
+
# Remove a dispatch key from the dispatch cache. This will force it to get
|
| 764 |
+
# recomputed the next time. Does nothing
|
| 765 |
+
# WARNING: if you register a dispatch key to py_kernels of an OpOverload,
|
| 766 |
+
# calling _del_dispatch on that key is NOT sufficient to apply your change,
|
| 767 |
+
# because a single registration may affect MULTIPLE dispatch keys (e.g.,
|
| 768 |
+
# registering Autograd affects AutogradCPU). del_dispatch is to be used
|
| 769 |
+
# only if you are specifically modifying how get_dispatch handles a
|
| 770 |
+
# particular input 'key'.
|
| 771 |
+
def _uncache_dispatch(self, key):
|
| 772 |
+
self._dispatch_cache.pop(key, None)
|
| 773 |
+
|
| 774 |
+
# This implements the pre-computation logic for the Python dispatcher.
|
| 775 |
+
def _get_dispatch(self, key):
|
| 776 |
+
# This is only called upon a cache miss
|
| 777 |
+
assert key not in self._dispatch_cache, f"{self} {key}"
|
| 778 |
+
|
| 779 |
+
if key == DispatchKey.Python:
|
| 780 |
+
if not isinstance(self, TorchBindOpOverload) and not self.python_key_table:
|
| 781 |
+
self._dispatch_cache[key] = key
|
| 782 |
+
add_cached_op(self)
|
| 783 |
+
return key
|
| 784 |
+
|
| 785 |
+
def handler(*args, **kwargs):
|
| 786 |
+
from torch.utils._python_dispatch import _get_current_dispatch_mode
|
| 787 |
+
|
| 788 |
+
# TODO: We also need to handle tensor subclasses here
|
| 789 |
+
# TODO(voz): We should walk all the nodes here / turn it into a list, topmode is ok for now.
|
| 790 |
+
curr_mode = type(_get_current_dispatch_mode())
|
| 791 |
+
assert (
|
| 792 |
+
curr_mode is not None
|
| 793 |
+
), "Illegal invocation of dispatch on torch._C.DispatchKey.Python without a mode."
|
| 794 |
+
|
| 795 |
+
if curr_mode not in self.python_key_table:
|
| 796 |
+
if isinstance(self, TorchBindOpOverload):
|
| 797 |
+
with torch.utils._python_dispatch._pop_mode_temporarily() as mode:
|
| 798 |
+
return torch._library.utils.handle_dispatch_mode(
|
| 799 |
+
mode, self, *args, **kwargs
|
| 800 |
+
)
|
| 801 |
+
else:
|
| 802 |
+
return self._op_dk(key, *args, **kwargs)
|
| 803 |
+
|
| 804 |
+
with torch.utils._python_dispatch._pop_mode_temporarily() as mode:
|
| 805 |
+
return self.python_key_table[curr_mode](mode, *args, **kwargs)
|
| 806 |
+
|
| 807 |
+
self._dispatch_cache[key] = handler
|
| 808 |
+
add_cached_op(self)
|
| 809 |
+
return handler
|
| 810 |
+
|
| 811 |
+
functionality_key = torch._C._to_functionality_key(key) # type: ignore[attr-defined]
|
| 812 |
+
if functionality_key == DispatchKey.PreDispatch:
|
| 813 |
+
curr_stack_len = _len_torch_dispatch_stack_pre_dispatch()
|
| 814 |
+
# The check for Python in the exclude set is so we properly respect `with no_dispatch()`
|
| 815 |
+
# calls inside of a mode.
|
| 816 |
+
if (
|
| 817 |
+
curr_stack_len > 0
|
| 818 |
+
and not torch._C._dispatch_tls_is_dispatch_key_excluded(
|
| 819 |
+
DispatchKey.Python
|
| 820 |
+
)
|
| 821 |
+
):
|
| 822 |
+
|
| 823 |
+
def handler(*args, **kwargs):
|
| 824 |
+
@contextlib.contextmanager
|
| 825 |
+
def _temporarily_pop_modes_from_pre_dispatch():
|
| 826 |
+
top_mode = _pop_mode_from_pre_dispatch()
|
| 827 |
+
try:
|
| 828 |
+
yield top_mode
|
| 829 |
+
finally:
|
| 830 |
+
_set_mode_pre_dispatch(top_mode)
|
| 831 |
+
|
| 832 |
+
with _temporarily_pop_modes_from_pre_dispatch() as curr_mode:
|
| 833 |
+
return torch._library.utils.handle_dispatch_mode(
|
| 834 |
+
curr_mode, self, *args, **kwargs
|
| 835 |
+
)
|
| 836 |
+
|
| 837 |
+
# Note [Not Caching Per-Dispatch-Key Mode Handlers]
|
| 838 |
+
# Note that we're not caching this handler. There isn't really a point, since the slow bit
|
| 839 |
+
# is the handler itself (in python).
|
| 840 |
+
# Also, not caching means that we don't have to reset the cache when any existing
|
| 841 |
+
# modes go out of scope (which in of itself takes time to loop through all operators).
|
| 842 |
+
return handler
|
| 843 |
+
|
| 844 |
+
final_key = resolve_key(self, key)
|
| 845 |
+
|
| 846 |
+
# See Note [Not Caching Per-Dispatch-Key Mode Handlers]
|
| 847 |
+
cache_result = key != DispatchKey.PreDispatch
|
| 848 |
+
|
| 849 |
+
# TODO: We could potentially have lots of debugging wrappers against
|
| 850 |
+
# dispatch keys; design some general registration mechanism instead of
|
| 851 |
+
# having if statement for each of them
|
| 852 |
+
if key == DispatchKey.Functionalize:
|
| 853 |
+
import torch._dispatch.python as pydispatch
|
| 854 |
+
|
| 855 |
+
if pydispatch.CROSSREF_FUNCTIONALIZE:
|
| 856 |
+
handler = pydispatch.make_crossref_functionalize(self, final_key)
|
| 857 |
+
if cache_result:
|
| 858 |
+
self._dispatch_cache[key] = handler
|
| 859 |
+
add_cached_op(self)
|
| 860 |
+
return handler
|
| 861 |
+
|
| 862 |
+
r = self.py_kernels.get(final_key, final_key)
|
| 863 |
+
if cache_result:
|
| 864 |
+
self._dispatch_cache[key] = r
|
| 865 |
+
add_cached_op(self)
|
| 866 |
+
return r
|
| 867 |
+
|
| 868 |
+
def name(self):
|
| 869 |
+
return self._name
|
| 870 |
+
|
| 871 |
+
@property
|
| 872 |
+
def overloadpacket(self):
|
| 873 |
+
return self._overloadpacket
|
| 874 |
+
|
| 875 |
+
@property
|
| 876 |
+
def op(self):
|
| 877 |
+
return self._op
|
| 878 |
+
|
| 879 |
+
@property
|
| 880 |
+
def tags(self):
|
| 881 |
+
return self._tags
|
| 882 |
+
|
| 883 |
+
# TODO: add more methods to expose information about input and output arguments
|
| 884 |
+
|
| 885 |
+
|
| 886 |
+
# TorchBindOpOverload are those custom ops which have at least one overload's
|
| 887 |
+
# schema consists of torch.ScriptObject (i.e. custom class) input.
|
| 888 |
+
# TorchBindOpOverload will skip C++ dispatcher and purely dispatched in python
|
| 889 |
+
# when its inputs contain FakeScriptObject in a similar way as higher order ops.
|
| 890 |
+
class TorchBindOpOverload(OpOverload):
|
| 891 |
+
def _fallthrough_keys(self) -> List[DispatchKey]:
|
| 892 |
+
# TODO: we should be calling the fallback for these, but a fallthrough is almost close
|
| 893 |
+
# enough to the fallback in most cases that we care about.
|
| 894 |
+
_DEFAULT_FALLTHROUGH_KEYS = [
|
| 895 |
+
DispatchKey.Autograd,
|
| 896 |
+
DispatchKey.AutogradCPU,
|
| 897 |
+
DispatchKey.AutogradCUDA,
|
| 898 |
+
DispatchKey.ADInplaceOrView,
|
| 899 |
+
DispatchKey.BackendSelect,
|
| 900 |
+
DispatchKey.PythonTLSSnapshot,
|
| 901 |
+
DispatchKey.PythonDispatcher,
|
| 902 |
+
]
|
| 903 |
+
|
| 904 |
+
def _may_use_fallthrough_instead_of_fallback(key: DispatchKey):
|
| 905 |
+
if torch._C._dispatch_has_kernel_for_dispatch_key(self.name(), key):
|
| 906 |
+
return torch._C._dispatch_kernel_for_dispatch_key_is_fallthrough(
|
| 907 |
+
self.name(), key
|
| 908 |
+
)
|
| 909 |
+
|
| 910 |
+
return (
|
| 911 |
+
key not in self.py_kernels
|
| 912 |
+
or self.py_kernels[key] is torch.library.fallthrough_kernel
|
| 913 |
+
)
|
| 914 |
+
|
| 915 |
+
return [
|
| 916 |
+
key
|
| 917 |
+
for key in _DEFAULT_FALLTHROUGH_KEYS
|
| 918 |
+
if _may_use_fallthrough_instead_of_fallback(key)
|
| 919 |
+
]
|
| 920 |
+
|
| 921 |
+
@contextlib.contextmanager
|
| 922 |
+
def _register_as_effectful_op_temporarily(self):
|
| 923 |
+
from torch._higher_order_ops.effects import (
|
| 924 |
+
_EffectType,
|
| 925 |
+
_register_effectful_op,
|
| 926 |
+
SIDE_EFFECTS,
|
| 927 |
+
)
|
| 928 |
+
|
| 929 |
+
try:
|
| 930 |
+
if self not in SIDE_EFFECTS:
|
| 931 |
+
_register_effectful_op(self, _EffectType.ORDERED)
|
| 932 |
+
yield
|
| 933 |
+
finally:
|
| 934 |
+
if self in SIDE_EFFECTS:
|
| 935 |
+
del SIDE_EFFECTS[self]
|
| 936 |
+
|
| 937 |
+
# Use positional-only argument to avoid naming collision with aten ops arguments
|
| 938 |
+
# that are named "self". This way, all the aten ops can be called by kwargs.
|
| 939 |
+
def __call__(self, /, *args, **kwargs):
|
| 940 |
+
if _must_dispatch_in_python(args, kwargs):
|
| 941 |
+
# When any inputs are FakeScriptObject, we need to
|
| 942 |
+
# skip c++ dispatcher and dispatch in python through _get_dispatch of python_dispatcher
|
| 943 |
+
# because C++ dispatcher will check the schema and cannot recognize FakeScriptObject.
|
| 944 |
+
#
|
| 945 |
+
# Note:
|
| 946 |
+
# 1. We only register the torchbind op temporarily as effectful op because we only want
|
| 947 |
+
# the effect token functionalization logic to be applied during tracing. Otherwise, the behavior
|
| 948 |
+
# of the eagerly executing the op might change after tracing.
|
| 949 |
+
# 2. We don't want to register the op as effectful for all torchbind ops in ctor because this might
|
| 950 |
+
# cause unexpected behavior for some autograd.profiler ops e.g. profiler._record_function_exit._RecordFunction.
|
| 951 |
+
with self._register_as_effectful_op_temporarily():
|
| 952 |
+
return self._dispatch_in_python(args, kwargs, self._fallthrough_keys())
|
| 953 |
+
return self._op(*args, **kwargs)
|
| 954 |
+
|
| 955 |
+
def _dispatch_in_python(self, args, kwargs, fallthrough_keys):
|
| 956 |
+
non_fallthrough_keys = torch._C._dispatch_keyset_full()
|
| 957 |
+
for key in fallthrough_keys:
|
| 958 |
+
non_fallthrough_keys = non_fallthrough_keys.remove(key)
|
| 959 |
+
|
| 960 |
+
dispatch_key_set = _compute_keyset(args, kwargs, non_fallthrough_keys)
|
| 961 |
+
dispatch_key = dispatch_key_set.highestPriorityTypeId()
|
| 962 |
+
|
| 963 |
+
handler = (
|
| 964 |
+
self._get_dispatch(dispatch_key)
|
| 965 |
+
if dispatch_key not in self._dispatch_cache
|
| 966 |
+
else self._dispatch_cache[dispatch_key]
|
| 967 |
+
)
|
| 968 |
+
|
| 969 |
+
if isinstance(handler, DispatchKey):
|
| 970 |
+
# fallthrough keys can be registered at runtime via torch.library.impl
|
| 971 |
+
# so need to add it to fallthrough_keys and re-dispatch.
|
| 972 |
+
if torch._C._dispatch_kernel_for_dispatch_key_is_fallthrough(
|
| 973 |
+
self.name(), dispatch_key
|
| 974 |
+
):
|
| 975 |
+
return self._dispatch_in_python(
|
| 976 |
+
args, kwargs, fallthrough_keys + [dispatch_key]
|
| 977 |
+
)
|
| 978 |
+
|
| 979 |
+
raise RuntimeError(
|
| 980 |
+
f"Torchbind op {self} received a FakeScriptObject input when dispatching {handler}."
|
| 981 |
+
f" but no python implementation is found."
|
| 982 |
+
f" Please file an issue on this when you encounter this error."
|
| 983 |
+
f" This error can happen when you export or compile the model."
|
| 984 |
+
f" It can still happpen even if a C++ implementation for {dispatch_key}. "
|
| 985 |
+
f" has been registered. That's because FakeScriptObject purely lives in python and cannot work "
|
| 986 |
+
f" with a C++ implementation."
|
| 987 |
+
)
|
| 988 |
+
|
| 989 |
+
assert isinstance(handler, Callable) # type: ignore[arg-type]
|
| 990 |
+
return handler(*args, **kwargs)
|
| 991 |
+
|
| 992 |
+
|
| 993 |
+
def _must_dispatch_in_python(args, kwargs):
|
| 994 |
+
return pytree.tree_any(
|
| 995 |
+
lambda obj: isinstance(
|
| 996 |
+
obj, torch._library.fake_class_registry.FakeScriptObject
|
| 997 |
+
),
|
| 998 |
+
(args, kwargs),
|
| 999 |
+
)
|
| 1000 |
+
|
| 1001 |
+
|
| 1002 |
+
def _has_script_object_arg(schema: torch.FunctionSchema) -> bool:
|
| 1003 |
+
return any(isinstance(arg.type, torch.ClassType) for arg in schema.arguments)
|
| 1004 |
+
|
| 1005 |
+
|
| 1006 |
+
# OpOverloadPacket class contains pointer to a base unresolved operator that doesn't correspond to a specific operator
|
| 1007 |
+
# You can obtain an OpOverload object through attribute query.
|
| 1008 |
+
class OpOverloadPacket:
|
| 1009 |
+
def __init__(self, qualified_op_name, op_name, op, overload_names):
|
| 1010 |
+
# These attributes are accessible on the object through the properties
|
| 1011 |
+
# defined below but are immutable
|
| 1012 |
+
self._qualified_op_name = qualified_op_name
|
| 1013 |
+
self.__name__ = op_name
|
| 1014 |
+
self._op = op
|
| 1015 |
+
self._overload_names = overload_names
|
| 1016 |
+
self._dir = []
|
| 1017 |
+
self._has_torchbind_op_overload = any(
|
| 1018 |
+
_has_script_object_arg(schema) for schema in self._schemas.values()
|
| 1019 |
+
)
|
| 1020 |
+
|
| 1021 |
+
# it's a no-op since OpOverloadPacket object is immutable and must be unique for a given op.
|
| 1022 |
+
def __deepcopy__(self, memo=None):
|
| 1023 |
+
return self
|
| 1024 |
+
|
| 1025 |
+
def __repr__(self):
|
| 1026 |
+
return "<OpOverloadPacket(op='{}.{}')>".format(
|
| 1027 |
+
*self._qualified_op_name.split("::")
|
| 1028 |
+
)
|
| 1029 |
+
|
| 1030 |
+
def __hash__(self):
|
| 1031 |
+
return hash(self._op)
|
| 1032 |
+
|
| 1033 |
+
def __str__(self):
|
| 1034 |
+
return "{}.{}".format(*self._qualified_op_name.split("::"))
|
| 1035 |
+
|
| 1036 |
+
@property
|
| 1037 |
+
def op(self):
|
| 1038 |
+
return self._op
|
| 1039 |
+
|
| 1040 |
+
@property
|
| 1041 |
+
def _schemas(self):
|
| 1042 |
+
return {
|
| 1043 |
+
overload_name: torch._C._get_schema(self._qualified_op_name, overload_name)
|
| 1044 |
+
for overload_name in self._overload_names
|
| 1045 |
+
}
|
| 1046 |
+
|
| 1047 |
+
def __getattr__(self, key):
|
| 1048 |
+
# It is not a valid op_name when __file__ is passed in
|
| 1049 |
+
if key == "__file__":
|
| 1050 |
+
return "torch.ops"
|
| 1051 |
+
|
| 1052 |
+
# ensure that query for dunder attributes that does not exist on
|
| 1053 |
+
# opoverloadpacket but instead exists on the self._op object does not unnecessarily call
|
| 1054 |
+
# `_get_operation_overload` (which is an expensive operation).
|
| 1055 |
+
# This is done to prevent any potential slowdown. This list can be extended
|
| 1056 |
+
# if there exists other attributes like `__name__` that only exist on self._op and not on the
|
| 1057 |
+
# opoverloadpacket.
|
| 1058 |
+
# This is ok since we are guaranteed that an overload name for an aten op can't start with '__'
|
| 1059 |
+
try:
|
| 1060 |
+
if key.startswith("__"):
|
| 1061 |
+
return getattr(self._op, key)
|
| 1062 |
+
except AttributeError:
|
| 1063 |
+
# for consistency because it seems weird to
|
| 1064 |
+
# throw an attribute error with a message containing
|
| 1065 |
+
# an object name different from the one the attribute
|
| 1066 |
+
# query was performed on.
|
| 1067 |
+
raise AttributeError(
|
| 1068 |
+
f"'{str(self)}' can't have an overload name beginning with '__' and the "
|
| 1069 |
+
f"underlying op {str(self._op)} has no attribute {key} either."
|
| 1070 |
+
) from None
|
| 1071 |
+
|
| 1072 |
+
try:
|
| 1073 |
+
# This is ok since we are guaranteed that an overload name for an aten op can't be 'default'
|
| 1074 |
+
use_key = "" if key == "default" else key
|
| 1075 |
+
# TODO: disallow access to overloads registered by JIT
|
| 1076 |
+
op_dk_tags = torch._C._get_operation_overload(
|
| 1077 |
+
self._qualified_op_name, use_key
|
| 1078 |
+
)
|
| 1079 |
+
if op_dk_tags is None:
|
| 1080 |
+
raise AttributeError(
|
| 1081 |
+
f"The underlying op of '{str(self)}' has no overload name '{key}'"
|
| 1082 |
+
)
|
| 1083 |
+
|
| 1084 |
+
op_, op_dk_, tags = op_dk_tags
|
| 1085 |
+
schema = torch._C._get_schema(self._qualified_op_name, use_key)
|
| 1086 |
+
overload = (
|
| 1087 |
+
OpOverload(self, op_, op_dk_, schema, tags)
|
| 1088 |
+
if not _has_script_object_arg(schema)
|
| 1089 |
+
else TorchBindOpOverload(self, op_, op_dk_, schema, tags)
|
| 1090 |
+
)
|
| 1091 |
+
# cache the overload object
|
| 1092 |
+
setattr(self, key, overload)
|
| 1093 |
+
self._dir.append(key)
|
| 1094 |
+
return overload
|
| 1095 |
+
except RuntimeError:
|
| 1096 |
+
raise AttributeError(
|
| 1097 |
+
f"The underlying op of '{str(self)}' has no overload name '{key}'"
|
| 1098 |
+
) from None
|
| 1099 |
+
|
| 1100 |
+
def __iter__(self):
|
| 1101 |
+
return iter(self._dir)
|
| 1102 |
+
|
| 1103 |
+
# Use positional-only argument to avoid naming collision with aten ops arguments
|
| 1104 |
+
# that are named "self". This way, all the aten ops can be called by kwargs.
|
| 1105 |
+
def __call__(self, /, *args, **kwargs):
|
| 1106 |
+
# overloading __call__ to ensure torch.ops.foo.bar()
|
| 1107 |
+
# is still callable from JIT
|
| 1108 |
+
# We save the function ptr as the `op` attribute on
|
| 1109 |
+
# OpOverloadPacket to access it here.
|
| 1110 |
+
|
| 1111 |
+
# Directly calling OverloadPacket goes into C++, which will check
|
| 1112 |
+
# the schema and cause an error for torchbind op when inputs consist of FakeScriptObject so we
|
| 1113 |
+
# intercept it here and call TorchBindOpverload instead.
|
| 1114 |
+
if self._has_torchbind_op_overload and _must_dispatch_in_python(args, kwargs):
|
| 1115 |
+
return _call_overload_packet_from_python(self, args, kwargs)
|
| 1116 |
+
return self._op(*args, **(kwargs or {}))
|
| 1117 |
+
|
| 1118 |
+
# TODO: use this to make a __dir__
|
| 1119 |
+
def overloads(self):
|
| 1120 |
+
return [n if n else "default" for n in self._overload_names]
|
| 1121 |
+
|
| 1122 |
+
|
| 1123 |
+
# Note - this mirrors the logic of the cpp_function defined in jit/python/init.cpp
|
| 1124 |
+
# _jit_get_operations, which calls _get_operation_for_overload_or_packet.
|
| 1125 |
+
def _call_overload_packet_from_python(op: OpOverloadPacket, args, kwargs):
|
| 1126 |
+
# Re-use the torch function handling logic in cpp
|
| 1127 |
+
torch_function_called, ret = torch._C._maybe_call_torch_function_for_op_packet(
|
| 1128 |
+
op, *args, **kwargs
|
| 1129 |
+
)
|
| 1130 |
+
|
| 1131 |
+
if torch_function_called:
|
| 1132 |
+
return ret
|
| 1133 |
+
|
| 1134 |
+
# The following mirrors getOpWithStack.
|
| 1135 |
+
# In cpp, we do a schema matching for the arguments, and call ToIValue to
|
| 1136 |
+
# to check whether the arguments are valid. But need to do similar things here
|
| 1137 |
+
# and check the schema whether the FakeScriptObject is the corresponding fake class
|
| 1138 |
+
# of the actual class used in schema.
|
| 1139 |
+
exceptions = {}
|
| 1140 |
+
found_op = None
|
| 1141 |
+
for overload_name in op.overloads():
|
| 1142 |
+
op_overload = getattr(op, overload_name)
|
| 1143 |
+
try:
|
| 1144 |
+
_ = torch._C._check_schema_allow_fake_script_object(
|
| 1145 |
+
op_overload._schema, *args, **kwargs
|
| 1146 |
+
)
|
| 1147 |
+
found_op = op_overload
|
| 1148 |
+
break
|
| 1149 |
+
except RuntimeError as e:
|
| 1150 |
+
exceptions[overload_name] = e
|
| 1151 |
+
|
| 1152 |
+
if found_op:
|
| 1153 |
+
return found_op(*args, **kwargs)
|
| 1154 |
+
|
| 1155 |
+
err_msg = (
|
| 1156 |
+
f"Fail to match any TorchBindOverload of {op} with following exceptions:\n"
|
| 1157 |
+
)
|
| 1158 |
+
for i, (key, msg) in enumerate(exceptions.items()):
|
| 1159 |
+
err_msg += f"Overload name {key}:\n {msg}\n"
|
| 1160 |
+
raise RuntimeError(err_msg)
|
| 1161 |
+
|
| 1162 |
+
|
| 1163 |
+
# Resolution of torch.fn is different from torch.ops.aten.fn
|
| 1164 |
+
# torch.fn uses the Python argparser, matches with the
|
| 1165 |
+
# appropriate schema, and calls into the unboxed version of the method
|
| 1166 |
+
# torch.ops.aten.fn resolution is done via the mechanism defined in JIT.
|
| 1167 |
+
# JIT creates a stack of all the overloads and then tries to match the
|
| 1168 |
+
# correct one at runtime and always calls into the boxed version of the method
|
| 1169 |
+
# Autograd codegen creates VariableType, TracerType,
|
| 1170 |
+
# inplace or view type and python bindings.
|
| 1171 |
+
# Aten codegen generates tensor methods for the tensor class.
|
| 1172 |
+
|
| 1173 |
+
# _OpNamespace is a subclass of ModuleType because the torch script
|
| 1174 |
+
# allows attribute lookups on modules only. Since we want torch.ops.foo.bar()
|
| 1175 |
+
# to work from script, we need to ensure ops and foo are modules
|
| 1176 |
+
|
| 1177 |
+
|
| 1178 |
+
class _OpNamespace(types.ModuleType):
|
| 1179 |
+
"""
|
| 1180 |
+
An op namespace to dynamically bind Operators into Python.
|
| 1181 |
+
|
| 1182 |
+
Say a user has created a custom Operator called "my_namespace::my_op". To
|
| 1183 |
+
call this op, the user will write torch.ops.my_namespace.my_op(...).
|
| 1184 |
+
At startup, this operation will not yet be bound into Python. Instead, the
|
| 1185 |
+
following sequence of magic tricks will occur:
|
| 1186 |
+
1. `torch.ops.my_namespace` will invoke the `__getattr__` magic method
|
| 1187 |
+
on the `torch.ops` object, which will create a new `_OpNamespace`
|
| 1188 |
+
object called `my_namespace` and set it as an attribute on the `ops`
|
| 1189 |
+
object.
|
| 1190 |
+
2. `torch.ops.my_namespace.my_op` will then invoke `__getattr__` on
|
| 1191 |
+
the `my_namespace` object, which will retrieve the operation via
|
| 1192 |
+
`torch.get_operation`, a function bound from C++, and then in a similar
|
| 1193 |
+
fashion bind this new object onto the `my_namespace` object.
|
| 1194 |
+
3. `torch.ops.my_namespace.my_op(...)` then calls this new operation
|
| 1195 |
+
and subsequent accesses will incur no further lookup (the namespace and
|
| 1196 |
+
operation will already exist).
|
| 1197 |
+
"""
|
| 1198 |
+
|
| 1199 |
+
def __init__(self, name):
|
| 1200 |
+
super().__init__("torch.ops." + name)
|
| 1201 |
+
self.name = name
|
| 1202 |
+
self._dir = []
|
| 1203 |
+
|
| 1204 |
+
def __iter__(self):
|
| 1205 |
+
return iter(self._dir)
|
| 1206 |
+
|
| 1207 |
+
def __getattr__(self, op_name):
|
| 1208 |
+
# It is not a valid op_name when __file__ is passed in
|
| 1209 |
+
if op_name == "__file__":
|
| 1210 |
+
return "torch.ops"
|
| 1211 |
+
elif op_name in ["__origin__", "__self__"]:
|
| 1212 |
+
raise AttributeError(
|
| 1213 |
+
f"Invalid attribute '{op_name}' for '_OpNamespace' '{self.name}'"
|
| 1214 |
+
)
|
| 1215 |
+
|
| 1216 |
+
# Get the op `my_namespace::my_op` if available. This will also check
|
| 1217 |
+
# for overloads and raise an exception if there are more than one.
|
| 1218 |
+
namespace_name = self.name
|
| 1219 |
+
qualified_op_name = f"{namespace_name}::{op_name}"
|
| 1220 |
+
module_name = self.__module__ + "." + namespace_name
|
| 1221 |
+
|
| 1222 |
+
try:
|
| 1223 |
+
op, overload_names = _get_packet(qualified_op_name, module_name)
|
| 1224 |
+
if op is None:
|
| 1225 |
+
raise AttributeError(
|
| 1226 |
+
f"'_OpNamespace' '{self.name}' object has no attribute '{op_name}'"
|
| 1227 |
+
)
|
| 1228 |
+
except RuntimeError as e:
|
| 1229 |
+
# Turn this into AttributeError so getattr(obj, key, default)
|
| 1230 |
+
# works (this is called by TorchScript with __origin__)
|
| 1231 |
+
raise AttributeError(
|
| 1232 |
+
f"'_OpNamespace' '{self.name}' object has no attribute '{op_name}'"
|
| 1233 |
+
) from e
|
| 1234 |
+
|
| 1235 |
+
op.__module__ = module_name
|
| 1236 |
+
opoverloadpacket = OpOverloadPacket(
|
| 1237 |
+
qualified_op_name, op_name, op, overload_names
|
| 1238 |
+
)
|
| 1239 |
+
opoverloadpacket.__module__ = self.__module__ + "." + namespace_name
|
| 1240 |
+
# cache the opoverloadpacket to ensure that each op corresponds to
|
| 1241 |
+
# a unique OpOverloadPacket object
|
| 1242 |
+
setattr(self, op_name, opoverloadpacket)
|
| 1243 |
+
self._dir.append(op_name)
|
| 1244 |
+
return opoverloadpacket
|
| 1245 |
+
|
| 1246 |
+
|
| 1247 |
+
def _get_packet(qualname, op_module):
|
| 1248 |
+
op, overload_names = torch._C._jit_get_operation(qualname)
|
| 1249 |
+
if op is not None:
|
| 1250 |
+
# let the script frontend know that op is identical to the builtin op
|
| 1251 |
+
# with qualified_op_name
|
| 1252 |
+
torch.jit._builtins._register_builtin(op, qualname)
|
| 1253 |
+
op.__module__ = op_module
|
| 1254 |
+
return op, overload_names
|
| 1255 |
+
|
| 1256 |
+
|
| 1257 |
+
def _refresh_packet(packet):
|
| 1258 |
+
op, overload_names = _get_packet(packet._qualified_op_name, packet._op.__module__)
|
| 1259 |
+
assert op is not None
|
| 1260 |
+
packet._op = op
|
| 1261 |
+
packet._overload_names = overload_names
|
| 1262 |
+
|
| 1263 |
+
|
| 1264 |
+
class _PyOpNamespace(_OpNamespace):
|
| 1265 |
+
def __init__(self, name, ops):
|
| 1266 |
+
super().__init__(name)
|
| 1267 |
+
self._ops = ops
|
| 1268 |
+
|
| 1269 |
+
def __getattr__(self, name):
|
| 1270 |
+
# Following _OpNamespace.__getattr__, we cache the op on the _PyOpNamespace object.
|
| 1271 |
+
op = self._ops.get(name, None)
|
| 1272 |
+
if op is None:
|
| 1273 |
+
raise AttributeError(
|
| 1274 |
+
f"'_PyOpNamespace' '{self.name}' object has no attribute '{name}'"
|
| 1275 |
+
)
|
| 1276 |
+
setattr(self, name, op)
|
| 1277 |
+
return op
|
| 1278 |
+
|
| 1279 |
+
|
| 1280 |
+
class _Ops(types.ModuleType):
|
| 1281 |
+
__file__ = "_ops.py"
|
| 1282 |
+
|
| 1283 |
+
def __init__(self):
|
| 1284 |
+
super().__init__("torch.ops")
|
| 1285 |
+
self.loaded_libraries = set()
|
| 1286 |
+
self._higher_order_op_namespace = _PyOpNamespace(
|
| 1287 |
+
"torch.ops.higher_order", _higher_order_ops
|
| 1288 |
+
)
|
| 1289 |
+
self._dir = []
|
| 1290 |
+
|
| 1291 |
+
def __getattr__(self, name):
|
| 1292 |
+
# Check if the name is a HigherOrderOperator
|
| 1293 |
+
if name == "higher_order":
|
| 1294 |
+
return self._higher_order_op_namespace
|
| 1295 |
+
|
| 1296 |
+
# Here we are creating `torch.ops.my_namespace`
|
| 1297 |
+
namespace = _OpNamespace(name)
|
| 1298 |
+
setattr(self, name, namespace)
|
| 1299 |
+
self._dir.append(name)
|
| 1300 |
+
return namespace
|
| 1301 |
+
|
| 1302 |
+
def __iter__(self):
|
| 1303 |
+
return iter(self._dir)
|
| 1304 |
+
|
| 1305 |
+
def import_module(self, module):
|
| 1306 |
+
"""
|
| 1307 |
+
Imports a Python module that has torch.library registrations.
|
| 1308 |
+
|
| 1309 |
+
Generally, to extend PyTorch with custom operators, a user will
|
| 1310 |
+
create a Python module whose import triggers registration of
|
| 1311 |
+
the custom operators via a torch.ops.load_library call or a call
|
| 1312 |
+
to one or more torch.library.* APIs.
|
| 1313 |
+
|
| 1314 |
+
It is unexpected for Python modules to have side effects, so some
|
| 1315 |
+
linters and formatters will complain. Use this API to import Python
|
| 1316 |
+
modules that contain these torch.library side effects.
|
| 1317 |
+
|
| 1318 |
+
Args:
|
| 1319 |
+
module (str): The name of the Python module to import
|
| 1320 |
+
|
| 1321 |
+
"""
|
| 1322 |
+
importlib.import_module(module)
|
| 1323 |
+
|
| 1324 |
+
def load_library(self, path):
|
| 1325 |
+
"""
|
| 1326 |
+
Loads a shared library from the given path into the current process.
|
| 1327 |
+
|
| 1328 |
+
The library being loaded may run global initialization code to register
|
| 1329 |
+
custom operators with the PyTorch JIT runtime. This allows dynamically
|
| 1330 |
+
loading custom operators. For this, you should compile your operator
|
| 1331 |
+
and the static registration code into a shared library object, and then
|
| 1332 |
+
call ``torch.ops.load_library('path/to/libcustom.so')`` to load the
|
| 1333 |
+
shared object.
|
| 1334 |
+
|
| 1335 |
+
After the library is loaded, it is added to the
|
| 1336 |
+
``torch.ops.loaded_libraries`` attribute, a set that may be inspected
|
| 1337 |
+
for the paths of all libraries loaded using this function.
|
| 1338 |
+
|
| 1339 |
+
Args:
|
| 1340 |
+
path (str): A path to a shared library to load.
|
| 1341 |
+
"""
|
| 1342 |
+
if torch._running_with_deploy():
|
| 1343 |
+
return
|
| 1344 |
+
|
| 1345 |
+
path = _utils_internal.resolve_library_path(path)
|
| 1346 |
+
with dl_open_guard():
|
| 1347 |
+
# Import the shared library into the process, thus running its
|
| 1348 |
+
# static (global) initialization code in order to register custom
|
| 1349 |
+
# operators with the JIT.
|
| 1350 |
+
ctypes.CDLL(path)
|
| 1351 |
+
self.loaded_libraries.add(path)
|
| 1352 |
+
|
| 1353 |
+
|
| 1354 |
+
# The ops "namespace"
|
| 1355 |
+
ops: _Ops = _Ops()
|
pllava/lib/python3.10/site-packages/torch/_python_dispatcher.py
ADDED
|
@@ -0,0 +1,182 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import re
|
| 3 |
+
|
| 4 |
+
import torch._C as C
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
"""
|
| 8 |
+
PythonDispatcher class is a thin python-binding to C++ dispatcher and it
|
| 9 |
+
is designed to show how dispatcher precompute works. In particular,
|
| 10 |
+
it shows for a certain op `foo`, what the computed dispatch table looks
|
| 11 |
+
like after user register their kernels to certains dispatch keys.
|
| 12 |
+
|
| 13 |
+
In the real C++ dispatcher we support many dispatch keys for different
|
| 14 |
+
functionalities. For simplicity PythonDispatcher only supports dispatch
|
| 15 |
+
keys for a single example of each use case. These use cases are listed below:
|
| 16 |
+
|
| 17 |
+
- CPU/AutogradCPU: represents in-tree backends which we usually have dedicated inference &
|
| 18 |
+
autograd kernel in pytorch core library.
|
| 19 |
+
E.g. CPU, CUDA
|
| 20 |
+
- FPGA/AutogradOther: represents in-tree backends which we usually have backend specific
|
| 21 |
+
inference kernels, but they share the same autograd kernel specified in AutogradOther.
|
| 22 |
+
E.g. FPGA, SparseCsrCPU
|
| 23 |
+
- XLA/AutogradXLA: represents out-of-tree backends which we don't have either inference or autograd
|
| 24 |
+
kernel defined in pytorch core library. Backend owner is responsible for registering both
|
| 25 |
+
inference & autograd kernels in their extensions(e.g. torch-xla) for the operators they support.
|
| 26 |
+
E.g. XLA, XPU, MPS
|
| 27 |
+
- CompositeExplicitAutograd: alias key mapped to inference kernels of all backends like CPU, CUDA, XLA etc.
|
| 28 |
+
Kernels registered to this key MUST work for inference for all backends.
|
| 29 |
+
- Autograd: alias key mapped to autograd of all backends like AutogradCPU, AutogradXLA, AutogradOther.
|
| 30 |
+
Kernels registered to this key MUST work for autograd for all backends.
|
| 31 |
+
- CompositeImplicitAutograd: alias key CompositeImplicitAutograd = CompositeExplicitAutograd + Autograd
|
| 32 |
+
Kernels registered to this key MUST work for both inference + autograd for all backends.
|
| 33 |
+
|
| 34 |
+
Note we only allow registrations to alias keys inside pytorch core library. E.g
|
| 35 |
+
you shouldn't register a CompositeImplicitAutograd or CompositeExplicitAutograd
|
| 36 |
+
kernel from torch-xla extension, instead you should upstream the kernel into
|
| 37 |
+
pytorch/pytorch repo so that it's available for all backends and continuously
|
| 38 |
+
tested even without the extension.
|
| 39 |
+
|
| 40 |
+
Usage:
|
| 41 |
+
dispatcher = PythonDispatcher()
|
| 42 |
+
dispatcher.register(["CPU", "XLA", "CompositeImplicitAutograd"])
|
| 43 |
+
print(dispatcher.dispatchTable()) # This tells you exactly which kernel is used for certain backend.
|
| 44 |
+
# For more debugging information
|
| 45 |
+
# print(dispatcher.keys())
|
| 46 |
+
# print(dispatcher.registrations())
|
| 47 |
+
# print(dispatcher.rawRegistrations())
|
| 48 |
+
# print(dispatcher.rawDispatchTable())
|
| 49 |
+
PythonDispatcher calls C++ dispatcher under the hood for to precompute dispatch table.
|
| 50 |
+
This file only provides the simplified API for developers, relevant test code is located in
|
| 51 |
+
test/test_dispatch.py
|
| 52 |
+
"""
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
class PythonDispatcher:
|
| 56 |
+
namespace = "__test__"
|
| 57 |
+
name = "foo"
|
| 58 |
+
# fmt: off
|
| 59 |
+
runtime_keys = [
|
| 60 |
+
"CPU", "AutogradCPU",
|
| 61 |
+
"FPGA", "AutogradOther",
|
| 62 |
+
"XLA", "AutogradXLA",
|
| 63 |
+
"Lazy", "AutogradLazy",
|
| 64 |
+
]
|
| 65 |
+
# fmt: on
|
| 66 |
+
alias_keys = [
|
| 67 |
+
"CompositeExplicitAutograd",
|
| 68 |
+
"Autograd",
|
| 69 |
+
"CompositeImplicitAutograd",
|
| 70 |
+
]
|
| 71 |
+
supported_keys = runtime_keys + alias_keys
|
| 72 |
+
|
| 73 |
+
def __init__(self) -> None:
|
| 74 |
+
C._dispatch_check_invariants(self.name) # type: ignore[attr-defined]
|
| 75 |
+
self.ref = C._dispatch_library("FRAGMENT", self.namespace, "")
|
| 76 |
+
self.ref.def_("foo(Tensor x) -> Tensor")
|
| 77 |
+
|
| 78 |
+
"""
|
| 79 |
+
Returns a list of dispatch keys supported by PythonDispatcher.
|
| 80 |
+
You can register kernels to these keys.
|
| 81 |
+
"""
|
| 82 |
+
|
| 83 |
+
def keys(self):
|
| 84 |
+
return self.supported_keys
|
| 85 |
+
|
| 86 |
+
"""
|
| 87 |
+
Register kernels to the target dispatchKeys.
|
| 88 |
+
dispatchKeys(list[str]): a list of dispatch keys that you want to register
|
| 89 |
+
your own kernel. Note that you don't need to write the kernel yourself in
|
| 90 |
+
this PythonDispatcher.E.g. for CPU key, a kernel(e.g fn_CPU for CPU) is
|
| 91 |
+
automatically generated and registered.
|
| 92 |
+
"""
|
| 93 |
+
|
| 94 |
+
def register(self, dispatchKeys):
|
| 95 |
+
# Overriden is not supported and triggers a warning in C++ dispatcher.
|
| 96 |
+
if len(set(dispatchKeys)) != len(dispatchKeys):
|
| 97 |
+
raise RuntimeError(
|
| 98 |
+
f"Overriden is not allowed but found duplicates in {dispatchKeys}."
|
| 99 |
+
)
|
| 100 |
+
# We currently forbid this in codegen instead of C++ dispatcher.
|
| 101 |
+
if (
|
| 102 |
+
"CompositeImplicitAutograd" in dispatchKeys
|
| 103 |
+
and "CompositeExplicitAutograd" in dispatchKeys
|
| 104 |
+
):
|
| 105 |
+
raise RuntimeError(
|
| 106 |
+
"Registration to both CompositeImplicitAutograd and CompositeExplicitAutograd is not allowed."
|
| 107 |
+
)
|
| 108 |
+
for key in dispatchKeys:
|
| 109 |
+
if key not in self.supported_keys:
|
| 110 |
+
raise RuntimeError(
|
| 111 |
+
f"{key} is not supported, please select a dispatch key in {self.supported_keys}."
|
| 112 |
+
)
|
| 113 |
+
self.ref.impl_t_t("foo", dispatch=key, debug="fn_" + key)
|
| 114 |
+
|
| 115 |
+
"""
|
| 116 |
+
Helper function to format (key, kernel).
|
| 117 |
+
"""
|
| 118 |
+
|
| 119 |
+
def _format_line(self, key, kernel):
|
| 120 |
+
return f"{key:<15} {kernel}\n"
|
| 121 |
+
|
| 122 |
+
"""
|
| 123 |
+
Helper function to print a table header.
|
| 124 |
+
"""
|
| 125 |
+
|
| 126 |
+
def _format_header(self, header):
|
| 127 |
+
s = f"""
|
| 128 |
+
{header}
|
| 129 |
+
"""
|
| 130 |
+
s += self._format_line("key", "kernel")
|
| 131 |
+
s += "---------------------------\n"
|
| 132 |
+
return s
|
| 133 |
+
|
| 134 |
+
"""
|
| 135 |
+
Returns raw output of all registration info for debugging only.
|
| 136 |
+
Use registrations() for a simplified version.
|
| 137 |
+
"""
|
| 138 |
+
|
| 139 |
+
def rawRegistrations(self):
|
| 140 |
+
return C._dispatch_dump(f"{self.namespace}::{self.name}") # type: ignore[attr-defined]
|
| 141 |
+
|
| 142 |
+
"""
|
| 143 |
+
Returns raw output of computed dispatch table for debugging only.
|
| 144 |
+
Use dispatchTable() for a simplified version.
|
| 145 |
+
"""
|
| 146 |
+
|
| 147 |
+
def rawDispatchTable(self):
|
| 148 |
+
return C._dispatch_dump_table(f"{self.namespace}::{self.name}") # type: ignore[attr-defined]
|
| 149 |
+
|
| 150 |
+
"""
|
| 151 |
+
Returns a table(str) including all the registrations from users.
|
| 152 |
+
Note this includes registrations to both runtime keys and alias keys.
|
| 153 |
+
"""
|
| 154 |
+
|
| 155 |
+
def registrations(self):
|
| 156 |
+
output = self._format_header("Registered Kernels")
|
| 157 |
+
state = self.rawRegistrations()
|
| 158 |
+
state_entries = state.split("\n")
|
| 159 |
+
for line in state_entries:
|
| 160 |
+
first = line.split(":")[0]
|
| 161 |
+
if any(first.startswith(k) for k in self.supported_keys):
|
| 162 |
+
kernel = line.split("::")[0].split(" ")[1]
|
| 163 |
+
output += self._format_line(first, kernel)
|
| 164 |
+
return output
|
| 165 |
+
|
| 166 |
+
"""
|
| 167 |
+
Returns the computed dispatch table(str). Note this only include
|
| 168 |
+
runtime keys, registrations to alias keys have been decoded to their
|
| 169 |
+
mapped runtime keys.
|
| 170 |
+
"""
|
| 171 |
+
|
| 172 |
+
def dispatchTable(self):
|
| 173 |
+
output = self._format_header("Computed Dispatch Table")
|
| 174 |
+
table = self.rawDispatchTable()
|
| 175 |
+
table_entries = table.split("\n")
|
| 176 |
+
regex = re.compile(r"registered at .*FallbackKernel\.cpp.*(\[)")
|
| 177 |
+
for line in table_entries:
|
| 178 |
+
k = line.split(":")[0]
|
| 179 |
+
if k in self.runtime_keys:
|
| 180 |
+
entry = regex.sub("[", line)
|
| 181 |
+
output += self._format_line(k, entry.split(": ")[1])
|
| 182 |
+
return output
|
pllava/lib/python3.10/site-packages/torch/_refs/__pycache__/_conversions.cpython-310.pyc
ADDED
|
Binary file (2.56 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_refs/_conversions.py
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import torch
|
| 3 |
+
import torch._prims_common as utils
|
| 4 |
+
|
| 5 |
+
# Utilities should come BEFORE this import
|
| 6 |
+
from torch._decomp import register_decomposition
|
| 7 |
+
from torch._prims_common import TensorLikeType
|
| 8 |
+
from torch._prims_common.wrappers import out_wrapper
|
| 9 |
+
from torch._refs import _broadcast_shapes
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
# Data conversion references.
|
| 13 |
+
#
|
| 14 |
+
# Note: this module breaks the usual _refs to torch naming scheme where
|
| 15 |
+
# _refs.foo.bar is a ref for torch.foo.bar. The following definitions are not
|
| 16 |
+
# part of _refs/__init__.py to avoid name clashes with Python builtin types
|
| 17 |
+
# (like int).
|
| 18 |
+
|
| 19 |
+
__all__ = [
|
| 20 |
+
# dtypes
|
| 21 |
+
"bfloat16",
|
| 22 |
+
"bool",
|
| 23 |
+
"byte",
|
| 24 |
+
"cdouble",
|
| 25 |
+
"cfloat",
|
| 26 |
+
"chalf",
|
| 27 |
+
"char",
|
| 28 |
+
"double",
|
| 29 |
+
"float",
|
| 30 |
+
"half",
|
| 31 |
+
"int",
|
| 32 |
+
"long",
|
| 33 |
+
"short",
|
| 34 |
+
# misc
|
| 35 |
+
"complex",
|
| 36 |
+
"polar",
|
| 37 |
+
]
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def _make_conversion_method(name: str, dtype: torch.dtype):
|
| 41 |
+
def fn(
|
| 42 |
+
self: TensorLikeType, memory_format: torch.memory_format = torch.preserve_format
|
| 43 |
+
) -> TensorLikeType:
|
| 44 |
+
return self.to(dtype, memory_format=memory_format) # type: ignore[call-overload]
|
| 45 |
+
|
| 46 |
+
fn.__name__ = name
|
| 47 |
+
return fn
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
bfloat16 = _make_conversion_method("bfloat16", torch.bfloat16)
|
| 51 |
+
|
| 52 |
+
bool = _make_conversion_method("bool", torch.bool)
|
| 53 |
+
|
| 54 |
+
byte = _make_conversion_method("byte", torch.uint8)
|
| 55 |
+
|
| 56 |
+
cdouble = _make_conversion_method("cdouble", torch.cdouble)
|
| 57 |
+
|
| 58 |
+
cfloat = _make_conversion_method("cfloat", torch.cfloat)
|
| 59 |
+
|
| 60 |
+
chalf = _make_conversion_method("chalf", torch.complex32)
|
| 61 |
+
|
| 62 |
+
char = _make_conversion_method("char", torch.int8)
|
| 63 |
+
|
| 64 |
+
double = _make_conversion_method("double", torch.double)
|
| 65 |
+
|
| 66 |
+
float = _make_conversion_method("float", torch.float)
|
| 67 |
+
|
| 68 |
+
half = _make_conversion_method("half", torch.half)
|
| 69 |
+
|
| 70 |
+
int = _make_conversion_method("int", torch.int)
|
| 71 |
+
|
| 72 |
+
long = _make_conversion_method("long", torch.long)
|
| 73 |
+
|
| 74 |
+
short = _make_conversion_method("short", torch.short)
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
@register_decomposition(torch._ops.ops.aten.complex)
|
| 78 |
+
# Note: complex has type promotion tests disabled due to different semantics.
|
| 79 |
+
# exact_dtype is for compat with complex_check_dtype from core.
|
| 80 |
+
@out_wrapper(exact_dtype=True)
|
| 81 |
+
def complex(real: TensorLikeType, imag: TensorLikeType) -> TensorLikeType:
|
| 82 |
+
allowed_dtypes = (torch.float32, torch.float64, torch.float16)
|
| 83 |
+
torch._check(
|
| 84 |
+
real.dtype in allowed_dtypes and imag.dtype in allowed_dtypes,
|
| 85 |
+
lambda: (
|
| 86 |
+
f"Expected both inputs to be Half, Float or Double tensors but got "
|
| 87 |
+
f"{real.dtype} and {imag.dtype}"
|
| 88 |
+
),
|
| 89 |
+
)
|
| 90 |
+
torch._check(
|
| 91 |
+
real.dtype == imag.dtype,
|
| 92 |
+
lambda: (
|
| 93 |
+
f"Expected object of scalar type {real.dtype} but got "
|
| 94 |
+
f"scalar type {imag.dtype} for second argument"
|
| 95 |
+
),
|
| 96 |
+
)
|
| 97 |
+
result_dtype = utils.corresponding_complex_dtype(real.dtype) # type: ignore[arg-type]
|
| 98 |
+
common_shape = _broadcast_shapes(real.shape, imag.shape)
|
| 99 |
+
result = real.new_empty(
|
| 100 |
+
common_shape,
|
| 101 |
+
dtype=result_dtype,
|
| 102 |
+
layout=real.layout,
|
| 103 |
+
device=real.device,
|
| 104 |
+
# pin_memory=real.is_pinned(), # NYI
|
| 105 |
+
)
|
| 106 |
+
result.real = real
|
| 107 |
+
result.imag = imag
|
| 108 |
+
return result
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
@register_decomposition(torch._ops.ops.aten.polar)
|
| 112 |
+
# Note: polar has type promotion tests disabled due to different semantics.
|
| 113 |
+
# exact_dtype is for compat with complex_check_dtype from core.
|
| 114 |
+
@out_wrapper(exact_dtype=True)
|
| 115 |
+
def polar(abs: TensorLikeType, angle: TensorLikeType) -> TensorLikeType:
|
| 116 |
+
result = torch.complex(abs, angle)
|
| 117 |
+
result.real = abs * torch.cos(angle)
|
| 118 |
+
result.imag = abs * torch.sin(angle)
|
| 119 |
+
return result
|
pllava/lib/python3.10/site-packages/torch/_refs/fft.py
ADDED
|
@@ -0,0 +1,590 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
from typing import Iterable, List, Literal, NamedTuple, Optional, Sequence, Tuple, Union
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
import torch._prims as prims
|
| 6 |
+
import torch._prims_common as utils
|
| 7 |
+
from torch._decomp import register_decomposition
|
| 8 |
+
from torch._prims_common import DimsType, ShapeType, TensorLikeType
|
| 9 |
+
from torch._prims_common.wrappers import _maybe_convert_to_dtype, out_wrapper
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
__all__ = [
|
| 13 |
+
# Transforms
|
| 14 |
+
"fft",
|
| 15 |
+
"fft2",
|
| 16 |
+
"fftn",
|
| 17 |
+
"hfft",
|
| 18 |
+
"hfft2",
|
| 19 |
+
"hfftn",
|
| 20 |
+
"rfft",
|
| 21 |
+
"rfft2",
|
| 22 |
+
"rfftn",
|
| 23 |
+
"ifft",
|
| 24 |
+
"ifft2",
|
| 25 |
+
"ifftn",
|
| 26 |
+
"ihfft",
|
| 27 |
+
"ihfft2",
|
| 28 |
+
"ihfftn",
|
| 29 |
+
"irfft",
|
| 30 |
+
"irfft2",
|
| 31 |
+
"irfftn",
|
| 32 |
+
# Helpers
|
| 33 |
+
"fftshift",
|
| 34 |
+
"ifftshift",
|
| 35 |
+
]
|
| 36 |
+
|
| 37 |
+
NormType = Union[None, Literal["forward", "backward", "ortho"]]
|
| 38 |
+
_NORM_VALUES = {None, "forward", "backward", "ortho"}
|
| 39 |
+
aten = torch._ops.ops.aten
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def _apply_norm(
|
| 43 |
+
x: TensorLikeType, norm: NormType, signal_numel: int, forward: bool
|
| 44 |
+
) -> TensorLikeType:
|
| 45 |
+
"""Apply normalization to the un-normalized FFT result"""
|
| 46 |
+
torch._check(norm in _NORM_VALUES, lambda: f"Invalid normalization mode: {norm}")
|
| 47 |
+
|
| 48 |
+
if norm == "ortho":
|
| 49 |
+
return x * (1 / math.sqrt(signal_numel))
|
| 50 |
+
|
| 51 |
+
normalize = (not forward and (norm is None or norm == "backward")) or (
|
| 52 |
+
forward and norm == "forward"
|
| 53 |
+
)
|
| 54 |
+
return x * (1 / signal_numel) if normalize else x
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def _promote_type_fft(
|
| 58 |
+
dtype: torch.dtype, require_complex: bool, device: torch.device
|
| 59 |
+
) -> torch.dtype:
|
| 60 |
+
"""Helper to promote a dtype to one supported by the FFT primitives"""
|
| 61 |
+
if dtype.is_complex:
|
| 62 |
+
return dtype
|
| 63 |
+
|
| 64 |
+
# Promote integral to default float type
|
| 65 |
+
if not dtype.is_floating_point:
|
| 66 |
+
dtype = torch.get_default_dtype()
|
| 67 |
+
|
| 68 |
+
allowed_types = [torch.float32, torch.float64]
|
| 69 |
+
maybe_support_half = device.type in ["cuda", "meta"]
|
| 70 |
+
|
| 71 |
+
if maybe_support_half:
|
| 72 |
+
allowed_types.append(torch.float16)
|
| 73 |
+
torch._check(dtype in allowed_types, lambda: f"Unsupported dtype {dtype}")
|
| 74 |
+
|
| 75 |
+
if require_complex:
|
| 76 |
+
dtype = utils.corresponding_complex_dtype(dtype)
|
| 77 |
+
|
| 78 |
+
return dtype
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def _maybe_promote_tensor_fft(
|
| 82 |
+
t: TensorLikeType, require_complex: bool = False
|
| 83 |
+
) -> TensorLikeType:
|
| 84 |
+
"""Helper to promote a tensor to a dtype supported by the FFT primitives"""
|
| 85 |
+
cur_type = t.dtype
|
| 86 |
+
new_type = _promote_type_fft(cur_type, require_complex, t.device)
|
| 87 |
+
return _maybe_convert_to_dtype(t, new_type) # type: ignore[return-value]
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def _resize_fft_input(
|
| 91 |
+
x: TensorLikeType, dims: Tuple[int, ...], sizes: Tuple[int, ...]
|
| 92 |
+
) -> TensorLikeType:
|
| 93 |
+
"""
|
| 94 |
+
Fixes the shape of x such that x.size(dims[i]) == sizes[i],
|
| 95 |
+
either by zero-padding, or by slicing x starting from 0.
|
| 96 |
+
"""
|
| 97 |
+
assert len(dims) == len(sizes)
|
| 98 |
+
must_copy = False
|
| 99 |
+
x_sizes = x.shape
|
| 100 |
+
pad_amount = [0] * len(x_sizes) * 2
|
| 101 |
+
for i in range(len(dims)):
|
| 102 |
+
if sizes[i] == -1:
|
| 103 |
+
continue
|
| 104 |
+
|
| 105 |
+
if x_sizes[dims[i]] < sizes[i]:
|
| 106 |
+
must_copy = True
|
| 107 |
+
pad_idx = len(pad_amount) - 2 * dims[i] - 1
|
| 108 |
+
pad_amount[pad_idx] = sizes[i] - x_sizes[dims[i]]
|
| 109 |
+
|
| 110 |
+
if x_sizes[dims[i]] > sizes[i]:
|
| 111 |
+
x = x.narrow(dims[i], 0, sizes[i])
|
| 112 |
+
|
| 113 |
+
return torch.constant_pad_nd(x, pad_amount) if must_copy else x
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def _fft_c2r(
|
| 117 |
+
func_name: str,
|
| 118 |
+
input: TensorLikeType,
|
| 119 |
+
n: Optional[int],
|
| 120 |
+
dim: int,
|
| 121 |
+
norm: NormType,
|
| 122 |
+
forward: bool,
|
| 123 |
+
) -> TensorLikeType:
|
| 124 |
+
"""Common code for performing any complex to real FFT (irfft or hfft)"""
|
| 125 |
+
input = _maybe_promote_tensor_fft(input, require_complex=True)
|
| 126 |
+
dims = (utils.canonicalize_dim(input.ndim, dim, wrap_scalar=False),)
|
| 127 |
+
last_dim_size = n if n is not None else 2 * (input.shape[dim] - 1)
|
| 128 |
+
torch._check(
|
| 129 |
+
last_dim_size >= 1,
|
| 130 |
+
lambda: f"Invalid number of data points ({last_dim_size}) specified",
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
if n is not None:
|
| 134 |
+
input = _resize_fft_input(input, dims=dims, sizes=(last_dim_size // 2 + 1,))
|
| 135 |
+
|
| 136 |
+
if forward:
|
| 137 |
+
input = torch.conj(input)
|
| 138 |
+
|
| 139 |
+
output = prims.fft_c2r(input, dim=dims, last_dim_size=last_dim_size)
|
| 140 |
+
return _apply_norm(output, norm=norm, signal_numel=last_dim_size, forward=forward)
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def _fft_r2c(
|
| 144 |
+
func_name: str,
|
| 145 |
+
input: TensorLikeType,
|
| 146 |
+
n: Optional[int],
|
| 147 |
+
dim: int,
|
| 148 |
+
norm: NormType,
|
| 149 |
+
forward: bool,
|
| 150 |
+
onesided: bool,
|
| 151 |
+
) -> TensorLikeType:
|
| 152 |
+
"""Common code for performing any real to complex FFT (rfft or ihfft)"""
|
| 153 |
+
torch._check(
|
| 154 |
+
not input.dtype.is_complex,
|
| 155 |
+
lambda: f"{func_name} expects a floating point input tensor, but got {input.dtype}",
|
| 156 |
+
)
|
| 157 |
+
input = _maybe_promote_tensor_fft(input)
|
| 158 |
+
dims = (utils.canonicalize_dim(input.ndim, dim, wrap_scalar=False),)
|
| 159 |
+
dim_size = n if n is not None else input.shape[dim]
|
| 160 |
+
torch._check(
|
| 161 |
+
dim_size >= 1, lambda: f"Invalid number of data points ({dim_size}) specified"
|
| 162 |
+
)
|
| 163 |
+
|
| 164 |
+
if n is not None:
|
| 165 |
+
input = _resize_fft_input(input, dims, (n,))
|
| 166 |
+
|
| 167 |
+
ret = prims.fft_r2c(input, dim=dims, onesided=onesided)
|
| 168 |
+
ret = _apply_norm(ret, norm, dim_size, forward)
|
| 169 |
+
return ret if forward else torch.conj(ret)
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
def _fft_c2c(
|
| 173 |
+
func_name: str,
|
| 174 |
+
input: TensorLikeType,
|
| 175 |
+
n: Optional[int],
|
| 176 |
+
dim: int,
|
| 177 |
+
norm: NormType,
|
| 178 |
+
forward: bool,
|
| 179 |
+
) -> TensorLikeType:
|
| 180 |
+
"""Common code for performing any complex to complex FFT (fft or ifft)"""
|
| 181 |
+
torch._check(
|
| 182 |
+
input.dtype.is_complex,
|
| 183 |
+
lambda: f"{func_name} expects a complex input tensor, but got {input.dtype}",
|
| 184 |
+
)
|
| 185 |
+
dims = (utils.canonicalize_dim(input.ndim, dim, wrap_scalar=False),)
|
| 186 |
+
dim_size = n if n is not None else input.shape[dim]
|
| 187 |
+
torch._check(
|
| 188 |
+
dim_size >= 1, lambda: f"Invalid number of data points ({dim_size}) specified"
|
| 189 |
+
)
|
| 190 |
+
|
| 191 |
+
if n is not None:
|
| 192 |
+
input = _resize_fft_input(input, dims, (n,))
|
| 193 |
+
|
| 194 |
+
ret = prims.fft_c2c(input, dim=dims, forward=forward)
|
| 195 |
+
return _apply_norm(ret, norm, dim_size, forward)
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
@register_decomposition(aten.fft_fft)
|
| 199 |
+
@out_wrapper()
|
| 200 |
+
def fft(
|
| 201 |
+
input: TensorLikeType,
|
| 202 |
+
n: Optional[int] = None,
|
| 203 |
+
dim: int = -1,
|
| 204 |
+
norm: NormType = None,
|
| 205 |
+
) -> TensorLikeType:
|
| 206 |
+
if input.dtype.is_complex:
|
| 207 |
+
return _fft_c2c("fft", input, n, dim, norm, forward=True)
|
| 208 |
+
else:
|
| 209 |
+
return _fft_r2c("fft", input, n, dim, norm, forward=True, onesided=False)
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
@register_decomposition(aten.fft_ifft)
|
| 213 |
+
@out_wrapper()
|
| 214 |
+
def ifft(
|
| 215 |
+
input: TensorLikeType,
|
| 216 |
+
n: Optional[int] = None,
|
| 217 |
+
dim: int = -1,
|
| 218 |
+
norm: NormType = None,
|
| 219 |
+
) -> TensorLikeType:
|
| 220 |
+
if input.dtype.is_complex:
|
| 221 |
+
return _fft_c2c("ifft", input, n, dim, norm, forward=False)
|
| 222 |
+
else:
|
| 223 |
+
return _fft_r2c("ifft", input, n, dim, norm, forward=False, onesided=False)
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
@register_decomposition(aten.fft_rfft)
|
| 227 |
+
@out_wrapper()
|
| 228 |
+
def rfft(
|
| 229 |
+
input: TensorLikeType,
|
| 230 |
+
n: Optional[int] = None,
|
| 231 |
+
dim: int = -1,
|
| 232 |
+
norm: NormType = None,
|
| 233 |
+
) -> TensorLikeType:
|
| 234 |
+
return _fft_r2c("rfft", input, n, dim, norm, forward=True, onesided=True)
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
@register_decomposition(aten.fft_irfft)
|
| 238 |
+
@out_wrapper()
|
| 239 |
+
def irfft(
|
| 240 |
+
input: TensorLikeType,
|
| 241 |
+
n: Optional[int] = None,
|
| 242 |
+
dim: int = -1,
|
| 243 |
+
norm: NormType = None,
|
| 244 |
+
) -> TensorLikeType:
|
| 245 |
+
return _fft_c2r("irfft", input, n, dim, norm, forward=False)
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
@register_decomposition(aten.fft_hfft)
|
| 249 |
+
@out_wrapper()
|
| 250 |
+
def hfft(
|
| 251 |
+
input: TensorLikeType,
|
| 252 |
+
n: Optional[int] = None,
|
| 253 |
+
dim: int = -1,
|
| 254 |
+
norm: NormType = None,
|
| 255 |
+
) -> TensorLikeType:
|
| 256 |
+
return _fft_c2r("hfft", input, n, dim, norm, forward=True)
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
@register_decomposition(aten.fft_ihfft)
|
| 260 |
+
@out_wrapper()
|
| 261 |
+
def ihfft(
|
| 262 |
+
input: TensorLikeType,
|
| 263 |
+
n: Optional[int] = None,
|
| 264 |
+
dim: int = -1,
|
| 265 |
+
norm: NormType = None,
|
| 266 |
+
) -> TensorLikeType:
|
| 267 |
+
return _fft_r2c("ihfft", input, n, dim, norm, forward=False, onesided=True)
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
class _ShapeAndDims(NamedTuple):
|
| 271 |
+
shape: Tuple[int, ...]
|
| 272 |
+
dims: Tuple[int, ...]
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
def _canonicalize_fft_shape_and_dim_args(
|
| 276 |
+
input: TensorLikeType, shape: Optional[ShapeType], dim: Optional[DimsType]
|
| 277 |
+
) -> _ShapeAndDims:
|
| 278 |
+
"""Convert the shape and dim arguments into a canonical form where neither are optional"""
|
| 279 |
+
input_dim = input.ndim
|
| 280 |
+
input_sizes = input.shape
|
| 281 |
+
|
| 282 |
+
if dim is not None:
|
| 283 |
+
if not isinstance(dim, Sequence):
|
| 284 |
+
dim = (dim,)
|
| 285 |
+
ret_dims = utils.canonicalize_dims(input_dim, dim, wrap_scalar=False)
|
| 286 |
+
|
| 287 |
+
# Check dims are unique
|
| 288 |
+
torch._check(
|
| 289 |
+
len(set(ret_dims)) == len(ret_dims), lambda: "FFT dims must be unique"
|
| 290 |
+
)
|
| 291 |
+
|
| 292 |
+
if shape is not None:
|
| 293 |
+
if not isinstance(shape, Sequence):
|
| 294 |
+
shape = (shape,)
|
| 295 |
+
|
| 296 |
+
# Has shape, might have dim
|
| 297 |
+
torch._check(
|
| 298 |
+
dim is None or len(dim) == len(shape),
|
| 299 |
+
lambda: "When given, dim and shape arguments must have the same length",
|
| 300 |
+
)
|
| 301 |
+
transform_ndim = len(shape)
|
| 302 |
+
|
| 303 |
+
torch._check(
|
| 304 |
+
transform_ndim <= input_dim,
|
| 305 |
+
lambda: f"Got shape with {transform_ndim} values but input tensor "
|
| 306 |
+
f"only has {input_dim} dimensions.",
|
| 307 |
+
)
|
| 308 |
+
|
| 309 |
+
# If shape is given, dims defaults to the last len(shape) dimensions
|
| 310 |
+
if dim is None:
|
| 311 |
+
ret_dims = tuple(range(input_dim - transform_ndim, input_dim))
|
| 312 |
+
|
| 313 |
+
# Translate any -1 values in shape to the default length
|
| 314 |
+
ret_shape = tuple(
|
| 315 |
+
s if s != -1 else input_sizes[d] for (s, d) in zip(shape, ret_dims) # type: ignore[possibly-undefined]
|
| 316 |
+
)
|
| 317 |
+
elif dim is None:
|
| 318 |
+
# No shape, no dim
|
| 319 |
+
ret_dims = tuple(range(input_dim))
|
| 320 |
+
ret_shape = tuple(input_sizes)
|
| 321 |
+
else:
|
| 322 |
+
# No shape, has dim
|
| 323 |
+
ret_shape = tuple(input_sizes[d] for d in ret_dims) # type: ignore[possibly-undefined]
|
| 324 |
+
|
| 325 |
+
for n in ret_shape:
|
| 326 |
+
torch._check(n > 0, lambda: f"Invalid number of data points ({n}) specified")
|
| 327 |
+
|
| 328 |
+
return _ShapeAndDims(shape=ret_shape, dims=ret_dims) # type: ignore[possibly-undefined]
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
def _prod(xs: Iterable[int]) -> int:
|
| 332 |
+
"""Compute product of a list"""
|
| 333 |
+
prod = 1
|
| 334 |
+
for x in xs:
|
| 335 |
+
prod *= x
|
| 336 |
+
return prod
|
| 337 |
+
|
| 338 |
+
|
| 339 |
+
def _fftn_c2c(
|
| 340 |
+
function_name: str,
|
| 341 |
+
input: TensorLikeType,
|
| 342 |
+
shape: Tuple[int, ...],
|
| 343 |
+
dim: Tuple[int, ...],
|
| 344 |
+
norm: NormType,
|
| 345 |
+
forward: bool,
|
| 346 |
+
) -> TensorLikeType:
|
| 347 |
+
"""Common code for n-dimensional complex to complex FFTs (fftn or ifftn)"""
|
| 348 |
+
torch._check(
|
| 349 |
+
input.dtype.is_complex,
|
| 350 |
+
lambda: f"{function_name} expects a complex input tensor, "
|
| 351 |
+
f"but got {input.dtype}",
|
| 352 |
+
)
|
| 353 |
+
x = _resize_fft_input(input, dim, shape)
|
| 354 |
+
output = prims.fft_c2c(x, dim=dim, forward=forward)
|
| 355 |
+
return _apply_norm(output, norm=norm, signal_numel=_prod(shape), forward=forward)
|
| 356 |
+
|
| 357 |
+
|
| 358 |
+
@register_decomposition(aten.fft_fftn)
|
| 359 |
+
@out_wrapper()
|
| 360 |
+
def fftn(
|
| 361 |
+
input: TensorLikeType,
|
| 362 |
+
s: Optional[ShapeType] = None,
|
| 363 |
+
dim: Optional[DimsType] = None,
|
| 364 |
+
norm: NormType = None,
|
| 365 |
+
) -> TensorLikeType:
|
| 366 |
+
(shape, dim) = _canonicalize_fft_shape_and_dim_args(input, s, dim)
|
| 367 |
+
x = _maybe_promote_tensor_fft(input, require_complex=True)
|
| 368 |
+
return _fftn_c2c("fftn", x, shape, dim, norm, forward=True)
|
| 369 |
+
|
| 370 |
+
|
| 371 |
+
@register_decomposition(aten.fft_ifftn)
|
| 372 |
+
@out_wrapper()
|
| 373 |
+
def ifftn(
|
| 374 |
+
input: TensorLikeType,
|
| 375 |
+
s: Optional[ShapeType] = None,
|
| 376 |
+
dim: Optional[DimsType] = None,
|
| 377 |
+
norm: NormType = None,
|
| 378 |
+
) -> TensorLikeType:
|
| 379 |
+
(shape, dim) = _canonicalize_fft_shape_and_dim_args(input, s, dim)
|
| 380 |
+
x = _maybe_promote_tensor_fft(input, require_complex=True)
|
| 381 |
+
return _fftn_c2c("ifftn", x, shape, dim, norm, forward=False)
|
| 382 |
+
|
| 383 |
+
|
| 384 |
+
@register_decomposition(aten.fft_rfftn)
|
| 385 |
+
@out_wrapper()
|
| 386 |
+
def rfftn(
|
| 387 |
+
input: TensorLikeType,
|
| 388 |
+
s: Optional[ShapeType] = None,
|
| 389 |
+
dim: Optional[DimsType] = None,
|
| 390 |
+
norm: NormType = None,
|
| 391 |
+
) -> TensorLikeType:
|
| 392 |
+
torch._check(
|
| 393 |
+
not input.dtype.is_complex,
|
| 394 |
+
lambda: f"rfftn expects a real-valued input tensor, but got {input.dtype}",
|
| 395 |
+
)
|
| 396 |
+
shape, dim = _canonicalize_fft_shape_and_dim_args(input, s, dim)
|
| 397 |
+
input = _maybe_promote_tensor_fft(input, require_complex=False)
|
| 398 |
+
input = _resize_fft_input(input, dim, shape)
|
| 399 |
+
out = prims.fft_r2c(input, dim=dim, onesided=True)
|
| 400 |
+
return _apply_norm(out, norm=norm, signal_numel=_prod(shape), forward=True)
|
| 401 |
+
|
| 402 |
+
|
| 403 |
+
@register_decomposition(aten.fft_ihfftn)
|
| 404 |
+
@out_wrapper()
|
| 405 |
+
def ihfftn(
|
| 406 |
+
input: TensorLikeType,
|
| 407 |
+
s: Optional[ShapeType] = None,
|
| 408 |
+
dim: Optional[DimsType] = None,
|
| 409 |
+
norm: NormType = None,
|
| 410 |
+
) -> TensorLikeType:
|
| 411 |
+
torch._check(
|
| 412 |
+
not input.dtype.is_complex,
|
| 413 |
+
lambda: f"ihfftn expects a real-valued input tensor, but got {input.dtype}",
|
| 414 |
+
)
|
| 415 |
+
shape, dim = _canonicalize_fft_shape_and_dim_args(input, s, dim)
|
| 416 |
+
torch._check(len(shape) > 0, lambda: "ihfftn must transform at least one axis")
|
| 417 |
+
input = _maybe_promote_tensor_fft(input, require_complex=False)
|
| 418 |
+
input = _resize_fft_input(input, dim, shape)
|
| 419 |
+
|
| 420 |
+
tmp = prims.fft_r2c(input, dim=dim[-1:], onesided=True)
|
| 421 |
+
|
| 422 |
+
if len(dim) == 1:
|
| 423 |
+
tmp = _apply_norm(tmp, norm=norm, signal_numel=shape[0], forward=False)
|
| 424 |
+
return prims.conj(tmp)
|
| 425 |
+
|
| 426 |
+
tmp = prims.conj_physical(tmp)
|
| 427 |
+
tmp = prims.fft_c2c(tmp, dim=dim[:-1], forward=False)
|
| 428 |
+
return _apply_norm(tmp, norm=norm, signal_numel=_prod(shape), forward=False)
|
| 429 |
+
|
| 430 |
+
|
| 431 |
+
class _CanonicalizeC2rReturn(NamedTuple):
|
| 432 |
+
shape: Tuple[int, ...]
|
| 433 |
+
dim: Tuple[int, ...]
|
| 434 |
+
last_dim_size: int
|
| 435 |
+
|
| 436 |
+
|
| 437 |
+
def _canonicalize_fft_c2r_shape_and_dim_args(
|
| 438 |
+
fname: str,
|
| 439 |
+
input: TensorLikeType,
|
| 440 |
+
s: Optional[ShapeType],
|
| 441 |
+
dim: Optional[DimsType],
|
| 442 |
+
) -> _CanonicalizeC2rReturn:
|
| 443 |
+
"""Canonicalize shape and dim arguments for n-dimensional c2r transforms,
|
| 444 |
+
as well as calculating the last_dim_size which is shape[dim[-1]] for the output"""
|
| 445 |
+
(shape, dim) = _canonicalize_fft_shape_and_dim_args(input, s, dim)
|
| 446 |
+
torch._check(len(shape) > 0, lambda: f"{fname} must transform at least one axis")
|
| 447 |
+
|
| 448 |
+
if s is None or s[-1] == -1:
|
| 449 |
+
last_dim_size = 2 * (input.shape[dim[-1]] - 1)
|
| 450 |
+
else:
|
| 451 |
+
last_dim_size = shape[-1]
|
| 452 |
+
|
| 453 |
+
torch._check(
|
| 454 |
+
last_dim_size >= 1,
|
| 455 |
+
lambda: f"Invalid number of data points ({last_dim_size}) specified",
|
| 456 |
+
)
|
| 457 |
+
|
| 458 |
+
shape_list = list(shape)
|
| 459 |
+
shape_list[-1] = last_dim_size // 2 + 1
|
| 460 |
+
return _CanonicalizeC2rReturn(
|
| 461 |
+
shape=tuple(shape_list), dim=dim, last_dim_size=last_dim_size
|
| 462 |
+
)
|
| 463 |
+
|
| 464 |
+
|
| 465 |
+
@register_decomposition(aten.fft_irfftn)
|
| 466 |
+
@out_wrapper()
|
| 467 |
+
def irfftn(
|
| 468 |
+
input: TensorLikeType,
|
| 469 |
+
s: Optional[ShapeType] = None,
|
| 470 |
+
dim: Optional[DimsType] = None,
|
| 471 |
+
norm: NormType = None,
|
| 472 |
+
) -> TensorLikeType:
|
| 473 |
+
shape, dim, last_dim_size = _canonicalize_fft_c2r_shape_and_dim_args(
|
| 474 |
+
"irfftn", input, s, dim
|
| 475 |
+
)
|
| 476 |
+
input = _maybe_promote_tensor_fft(input, require_complex=True)
|
| 477 |
+
input = _resize_fft_input(input, dim, shape)
|
| 478 |
+
out = prims.fft_c2r(input, dim=dim, last_dim_size=last_dim_size)
|
| 479 |
+
return _apply_norm(out, norm, _prod(out.shape[d] for d in dim), forward=False)
|
| 480 |
+
|
| 481 |
+
|
| 482 |
+
@register_decomposition(aten.fft_hfftn)
|
| 483 |
+
@out_wrapper()
|
| 484 |
+
def hfftn(
|
| 485 |
+
input: TensorLikeType,
|
| 486 |
+
s: Optional[ShapeType] = None,
|
| 487 |
+
dim: Optional[DimsType] = None,
|
| 488 |
+
norm: NormType = None,
|
| 489 |
+
) -> TensorLikeType:
|
| 490 |
+
shape, dim, last_dim_size = _canonicalize_fft_c2r_shape_and_dim_args(
|
| 491 |
+
"hfftn", input, s, dim
|
| 492 |
+
)
|
| 493 |
+
input = _maybe_promote_tensor_fft(input, require_complex=True)
|
| 494 |
+
input = _resize_fft_input(input, dim, shape)
|
| 495 |
+
|
| 496 |
+
tmp = prims.fft_c2c(input, dim=dim[:-1], forward=True) if len(dim) > 1 else input
|
| 497 |
+
tmp = _apply_norm(tmp, norm, _prod(shape[:-1]), forward=True)
|
| 498 |
+
tmp = prims.conj_physical(tmp)
|
| 499 |
+
out = prims.fft_c2r(tmp, dim=dim[-1:], last_dim_size=last_dim_size)
|
| 500 |
+
return _apply_norm(out, norm, last_dim_size, forward=True)
|
| 501 |
+
|
| 502 |
+
|
| 503 |
+
@register_decomposition(aten.fft_fft2)
|
| 504 |
+
@out_wrapper()
|
| 505 |
+
def fft2(
|
| 506 |
+
input: TensorLikeType,
|
| 507 |
+
s: Optional[ShapeType] = None,
|
| 508 |
+
dim: Optional[DimsType] = (-2, -1),
|
| 509 |
+
norm: NormType = None,
|
| 510 |
+
) -> TensorLikeType:
|
| 511 |
+
return torch.fft.fftn(input, s=s, dim=dim, norm=norm)
|
| 512 |
+
|
| 513 |
+
|
| 514 |
+
@register_decomposition(aten.fft_ifft2)
|
| 515 |
+
@out_wrapper()
|
| 516 |
+
def ifft2(
|
| 517 |
+
input: TensorLikeType,
|
| 518 |
+
s: Optional[ShapeType] = None,
|
| 519 |
+
dim: Optional[DimsType] = (-2, -1),
|
| 520 |
+
norm: NormType = None,
|
| 521 |
+
) -> TensorLikeType:
|
| 522 |
+
return torch.fft.ifftn(input, s=s, dim=dim, norm=norm)
|
| 523 |
+
|
| 524 |
+
|
| 525 |
+
@register_decomposition(aten.fft_rfft2)
|
| 526 |
+
@out_wrapper()
|
| 527 |
+
def rfft2(
|
| 528 |
+
input: TensorLikeType,
|
| 529 |
+
s: Optional[ShapeType] = None,
|
| 530 |
+
dim: Optional[DimsType] = (-2, -1),
|
| 531 |
+
norm: NormType = None,
|
| 532 |
+
) -> TensorLikeType:
|
| 533 |
+
return torch.fft.rfftn(input, s=s, dim=dim, norm=norm)
|
| 534 |
+
|
| 535 |
+
|
| 536 |
+
@register_decomposition(aten.fft_irfft2)
|
| 537 |
+
@out_wrapper()
|
| 538 |
+
def irfft2(
|
| 539 |
+
input: TensorLikeType,
|
| 540 |
+
s: Optional[ShapeType] = None,
|
| 541 |
+
dim: Optional[DimsType] = (-2, -1),
|
| 542 |
+
norm: NormType = None,
|
| 543 |
+
) -> TensorLikeType:
|
| 544 |
+
return torch.fft.irfftn(input, s=s, dim=dim, norm=norm)
|
| 545 |
+
|
| 546 |
+
|
| 547 |
+
@register_decomposition(aten.fft_hfft2)
|
| 548 |
+
@out_wrapper()
|
| 549 |
+
def hfft2(
|
| 550 |
+
input: TensorLikeType,
|
| 551 |
+
s: Optional[ShapeType] = None,
|
| 552 |
+
dim: Optional[DimsType] = (-2, -1),
|
| 553 |
+
norm: NormType = None,
|
| 554 |
+
) -> TensorLikeType:
|
| 555 |
+
return torch.fft.hfftn(input, s=s, dim=dim, norm=norm)
|
| 556 |
+
|
| 557 |
+
|
| 558 |
+
@register_decomposition(aten.fft_ihfft2)
|
| 559 |
+
@out_wrapper()
|
| 560 |
+
def ihfft2(
|
| 561 |
+
input: TensorLikeType,
|
| 562 |
+
s: Optional[ShapeType] = None,
|
| 563 |
+
dim: Optional[DimsType] = (-2, -1),
|
| 564 |
+
norm: NormType = None,
|
| 565 |
+
) -> TensorLikeType:
|
| 566 |
+
return torch.fft.ihfftn(input, s=s, dim=dim, norm=norm)
|
| 567 |
+
|
| 568 |
+
|
| 569 |
+
def _default_alldims(dim: Optional[DimsType], x: TensorLikeType) -> List[int]:
|
| 570 |
+
"""Convert Optional[DimsType] to a simple list, defaulting to all dimensions"""
|
| 571 |
+
if dim is None:
|
| 572 |
+
return list(range(x.ndim))
|
| 573 |
+
elif not isinstance(dim, Sequence):
|
| 574 |
+
return [dim]
|
| 575 |
+
else:
|
| 576 |
+
return list(dim)
|
| 577 |
+
|
| 578 |
+
|
| 579 |
+
@register_decomposition(aten.fft_fftshift)
|
| 580 |
+
def fftshift(input: TensorLikeType, dim: Optional[DimsType] = None) -> TensorLikeType:
|
| 581 |
+
dims = _default_alldims(dim, input)
|
| 582 |
+
shift = [input.shape[d] // 2 for d in dims]
|
| 583 |
+
return torch.roll(input, shift, dims)
|
| 584 |
+
|
| 585 |
+
|
| 586 |
+
@register_decomposition(aten.fft_ifftshift)
|
| 587 |
+
def ifftshift(input: TensorLikeType, dim: Optional[DimsType] = None) -> TensorLikeType:
|
| 588 |
+
dims = _default_alldims(dim, input)
|
| 589 |
+
shift = [(input.shape[d] + 1) // 2 for d in dims]
|
| 590 |
+
return torch.roll(input, shift, dims)
|
pllava/lib/python3.10/site-packages/torch/_refs/linalg/__init__.py
ADDED
|
@@ -0,0 +1,309 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
from functools import partial
|
| 3 |
+
from typing import Optional, Tuple, Union
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
import torch._prims as prims
|
| 7 |
+
import torch._prims_common as utils
|
| 8 |
+
import torch._refs as refs
|
| 9 |
+
import torch._refs.linalg as linalg
|
| 10 |
+
from torch import Tensor
|
| 11 |
+
from torch._prims_common import (
|
| 12 |
+
check_fp_or_complex,
|
| 13 |
+
check_is_matrix,
|
| 14 |
+
Dim,
|
| 15 |
+
DimsType,
|
| 16 |
+
ELEMENTWISE_TYPE_PROMOTION_KIND,
|
| 17 |
+
IntLike,
|
| 18 |
+
TensorLikeType,
|
| 19 |
+
)
|
| 20 |
+
from torch._prims_common.wrappers import (
|
| 21 |
+
_maybe_convert_to_dtype,
|
| 22 |
+
elementwise_type_promotion_wrapper,
|
| 23 |
+
out_wrapper,
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
__all__ = [
|
| 28 |
+
"diagonal",
|
| 29 |
+
"matrix_norm",
|
| 30 |
+
"norm",
|
| 31 |
+
"svd",
|
| 32 |
+
"svdvals",
|
| 33 |
+
"vector_norm",
|
| 34 |
+
"vecdot",
|
| 35 |
+
"cross",
|
| 36 |
+
]
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def _check_norm_dtype(dtype: Optional[torch.dtype], x_dtype: torch.dtype, fn_name: str):
|
| 40 |
+
"""
|
| 41 |
+
Checks related to the dtype kwarg in `linalg.*norm` functions
|
| 42 |
+
"""
|
| 43 |
+
if dtype is not None:
|
| 44 |
+
torch._check(
|
| 45 |
+
utils.is_float_dtype(dtype) or utils.is_complex_dtype(dtype),
|
| 46 |
+
lambda: f"{fn_name}: dtype should be floating point or complex. Got {dtype}",
|
| 47 |
+
)
|
| 48 |
+
torch._check(
|
| 49 |
+
utils.is_complex_dtype(dtype) == utils.is_complex_dtype(x_dtype),
|
| 50 |
+
lambda: "{fn_name}: dtype should be {d} for {d} inputs. Got {dtype}".format(
|
| 51 |
+
fn_name=fn_name,
|
| 52 |
+
d="complex" if utils.is_complex_dtype(x_dtype) else "real",
|
| 53 |
+
dtype=dtype,
|
| 54 |
+
),
|
| 55 |
+
)
|
| 56 |
+
torch._check(
|
| 57 |
+
utils.get_higher_dtype(dtype, x_dtype) == dtype,
|
| 58 |
+
lambda: f"{fn_name}: the dtype of the input ({x_dtype}) should be convertible "
|
| 59 |
+
"without narrowing to the specified dtype ({dtype})",
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
import operator
|
| 64 |
+
|
| 65 |
+
# Utilities should come BEFORE this import
|
| 66 |
+
from torch._decomp import register_decomposition
|
| 67 |
+
from torch._decomp.decompositions import pw_cast_for_opmath
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
@register_decomposition(torch._ops.ops.aten.linalg_cross)
|
| 71 |
+
@out_wrapper()
|
| 72 |
+
@pw_cast_for_opmath
|
| 73 |
+
def cross(a: Tensor, b: Tensor, dim: int = -1):
|
| 74 |
+
torch._check(
|
| 75 |
+
a.ndim == b.ndim,
|
| 76 |
+
lambda: "linalg.cross: inputs must have the same number of dimensions.",
|
| 77 |
+
)
|
| 78 |
+
torch._check(
|
| 79 |
+
a.size(dim) == 3 and b.size(dim) == 3,
|
| 80 |
+
lambda: f"linalg.cross: inputs dim {dim} must have length 3, got {a.size(dim)} and {b.size(dim)}",
|
| 81 |
+
)
|
| 82 |
+
a, b = torch.broadcast_tensors(a, b)
|
| 83 |
+
dim = utils.canonicalize_dim(a.ndim, dim)
|
| 84 |
+
idx = torch.arange(3, device=a.device)
|
| 85 |
+
return a.index_select(dim, (idx + 1) % 3) * b.index_select(
|
| 86 |
+
dim, (idx + 2) % 3
|
| 87 |
+
) - a.index_select(dim, (idx + 2) % 3) * b.index_select(dim, (idx + 1) % 3)
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def diagonal(
|
| 91 |
+
input: TensorLikeType,
|
| 92 |
+
*,
|
| 93 |
+
offset: int = 0,
|
| 94 |
+
dim1: int = -2,
|
| 95 |
+
dim2: int = -1,
|
| 96 |
+
) -> TensorLikeType:
|
| 97 |
+
return torch.diagonal(input, offset=offset, dim1=dim1, dim2=dim2)
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
@register_decomposition(torch._ops.ops.aten.linalg_vector_norm)
|
| 101 |
+
@out_wrapper(exact_dtype=True)
|
| 102 |
+
def vector_norm(
|
| 103 |
+
x: TensorLikeType,
|
| 104 |
+
ord: Union[float, int] = 2,
|
| 105 |
+
dim: Optional[DimsType] = None,
|
| 106 |
+
keepdim: bool = False,
|
| 107 |
+
*,
|
| 108 |
+
dtype: Optional[torch.dtype] = None,
|
| 109 |
+
) -> Tensor:
|
| 110 |
+
from torch.fx.experimental.symbolic_shapes import guard_size_oblivious
|
| 111 |
+
|
| 112 |
+
# Checks
|
| 113 |
+
check_fp_or_complex(x.dtype, "linalg.vector_norm")
|
| 114 |
+
|
| 115 |
+
if isinstance(dim, Dim):
|
| 116 |
+
dim = [dim] # type: ignore[assignment]
|
| 117 |
+
|
| 118 |
+
if guard_size_oblivious(x.numel() == 0) and (ord < 0.0 or ord == float("inf")):
|
| 119 |
+
torch._check(
|
| 120 |
+
dim is not None and len(dim) != 0,
|
| 121 |
+
lambda: f"linalg.vector_norm cannot compute the {ord} norm on an empty tensor "
|
| 122 |
+
"because the operation does not have an identity",
|
| 123 |
+
)
|
| 124 |
+
shape = x.shape
|
| 125 |
+
assert dim is not None # mypy does not seem to be able to see through check?
|
| 126 |
+
for d in dim:
|
| 127 |
+
torch._check(
|
| 128 |
+
shape[d] != 0,
|
| 129 |
+
lambda: f"linalg.vector_norm cannot compute the {ord} norm on the "
|
| 130 |
+
f"dimension {d} because this dimension is empty and the "
|
| 131 |
+
"operation does not have an identity",
|
| 132 |
+
)
|
| 133 |
+
_check_norm_dtype(dtype, x.dtype, "linalg.vector_norm")
|
| 134 |
+
|
| 135 |
+
computation_dtype, result_dtype = utils.reduction_dtypes(
|
| 136 |
+
x, utils.REDUCTION_OUTPUT_TYPE_KIND.COMPLEX_TO_FLOAT, dtype
|
| 137 |
+
)
|
| 138 |
+
|
| 139 |
+
to_result_dtype = partial(_maybe_convert_to_dtype, dtype=result_dtype)
|
| 140 |
+
|
| 141 |
+
# Implementation
|
| 142 |
+
if ord == 0.0:
|
| 143 |
+
return torch.sum(torch.ne(x, 0.0), dim=dim, keepdim=keepdim, dtype=result_dtype)
|
| 144 |
+
elif ord == float("inf"):
|
| 145 |
+
return to_result_dtype(torch.amax(torch.abs(x), dim=dim, keepdim=keepdim)) # type: ignore[return-value,arg-type]
|
| 146 |
+
elif ord == float("-inf"):
|
| 147 |
+
return to_result_dtype(torch.amin(torch.abs(x), dim=dim, keepdim=keepdim)) # type: ignore[return-value,arg-type]
|
| 148 |
+
else:
|
| 149 |
+
# From here on the computation dtype is important as the reduction is non-trivial
|
| 150 |
+
x = _maybe_convert_to_dtype(x, computation_dtype) # type: ignore[assignment]
|
| 151 |
+
reduce_sum = partial(torch.sum, dim=dim, keepdim=keepdim)
|
| 152 |
+
|
| 153 |
+
is_ord_even = ord % 2 == 0 if isinstance(ord, IntLike) else ord % 2.0 == 0.0
|
| 154 |
+
if not (is_ord_even and utils.is_float_dtype(x.dtype)):
|
| 155 |
+
x = torch.abs(x)
|
| 156 |
+
return to_result_dtype(torch.pow(reduce_sum(torch.pow(x, ord)), 1.0 / ord)) # type: ignore[return-value]
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
def _backshift_permutation(dim0, dim1, ndim):
|
| 160 |
+
# Auxiliary function for matrix_norm
|
| 161 |
+
# Computes the permutation that moves the two given dimensions to the back
|
| 162 |
+
ret = [i for i in range(ndim) if i != dim0 and i != dim1]
|
| 163 |
+
ret.extend((dim0, dim1))
|
| 164 |
+
return ret
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
def _inverse_permutation(perm):
|
| 168 |
+
# Given a permutation, returns its inverse. It's equivalent to argsort on an array
|
| 169 |
+
return [i for i, j in sorted(enumerate(perm), key=operator.itemgetter(1))]
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
# CompositeImplicitAutograd
|
| 173 |
+
@out_wrapper(exact_dtype=True)
|
| 174 |
+
def matrix_norm(
|
| 175 |
+
A: TensorLikeType,
|
| 176 |
+
ord: Union[float, str] = "fro",
|
| 177 |
+
dim: DimsType = (-2, -1),
|
| 178 |
+
keepdim: bool = False,
|
| 179 |
+
*,
|
| 180 |
+
dtype: Optional[torch.dtype] = None,
|
| 181 |
+
) -> TensorLikeType:
|
| 182 |
+
# shape
|
| 183 |
+
check_is_matrix(A, "linalg.matrix_norm")
|
| 184 |
+
# dim
|
| 185 |
+
dim = utils.canonicalize_dims(A.ndim, dim)
|
| 186 |
+
if isinstance(dim, Dim):
|
| 187 |
+
dim = (dim,) # type: ignore[assignment]
|
| 188 |
+
torch._check(
|
| 189 |
+
len(dim) == 2, lambda: "linalg.matrix_norm: dim must be a 2-tuple. Got {dim}"
|
| 190 |
+
)
|
| 191 |
+
torch._check(
|
| 192 |
+
dim[0] != dim[1],
|
| 193 |
+
lambda: "linalg.matrix_norm: dims must be different. Got ({dim[0]}, {dim[1]})",
|
| 194 |
+
)
|
| 195 |
+
# dtype arg
|
| 196 |
+
_check_norm_dtype(dtype, A.dtype, "linalg.matrix_norm")
|
| 197 |
+
|
| 198 |
+
if isinstance(ord, str):
|
| 199 |
+
# ord
|
| 200 |
+
torch._check(
|
| 201 |
+
ord in ("fro", "nuc"),
|
| 202 |
+
lambda: "linalg.matrix_norm: Order {ord} not supported.",
|
| 203 |
+
)
|
| 204 |
+
# dtype
|
| 205 |
+
check_fp_or_complex(
|
| 206 |
+
A.dtype, "linalg.matrix_norm", allow_low_precision_dtypes=ord != "nuc"
|
| 207 |
+
)
|
| 208 |
+
|
| 209 |
+
if ord == "fro":
|
| 210 |
+
return vector_norm(A, 2, dim, keepdim, dtype=dtype)
|
| 211 |
+
else: # ord == "nuc"
|
| 212 |
+
if dtype is not None:
|
| 213 |
+
A = _maybe_convert_to_dtype(A, dtype) # type: ignore[assignment]
|
| 214 |
+
perm = _backshift_permutation(dim[0], dim[1], A.ndim)
|
| 215 |
+
result = torch.sum(svdvals(prims.transpose(A, perm)), -1, keepdim)
|
| 216 |
+
if keepdim:
|
| 217 |
+
inv_perm = _inverse_permutation(perm)
|
| 218 |
+
result = prims.transpose(torch.unsqueeze(result, -1), inv_perm)
|
| 219 |
+
return result
|
| 220 |
+
else:
|
| 221 |
+
# ord
|
| 222 |
+
abs_ord = abs(ord)
|
| 223 |
+
torch._check(
|
| 224 |
+
abs_ord in (2, 1, float("inf")),
|
| 225 |
+
lambda: "linalg.matrix_norm: Order {ord} not supported.",
|
| 226 |
+
)
|
| 227 |
+
# dtype
|
| 228 |
+
check_fp_or_complex(
|
| 229 |
+
A.dtype, "linalg.matrix_norm", allow_low_precision_dtypes=ord != 2
|
| 230 |
+
)
|
| 231 |
+
|
| 232 |
+
max_min = partial(torch.amax if ord > 0.0 else torch.amin, keepdim=keepdim)
|
| 233 |
+
|
| 234 |
+
if abs_ord == 2.0:
|
| 235 |
+
if dtype is not None:
|
| 236 |
+
A = _maybe_convert_to_dtype(A, dtype) # type: ignore[assignment]
|
| 237 |
+
perm = _backshift_permutation(dim[0], dim[1], A.ndim)
|
| 238 |
+
result = max_min(svdvals(prims.transpose(A, perm)), dim=-1)
|
| 239 |
+
if keepdim:
|
| 240 |
+
inv_perm = _inverse_permutation(perm)
|
| 241 |
+
result = prims.transpose(torch.unsqueeze(result, -1), inv_perm)
|
| 242 |
+
return result
|
| 243 |
+
else: # 1, -1, inf, -inf
|
| 244 |
+
dim0, dim1 = dim
|
| 245 |
+
if abs_ord == float("inf"):
|
| 246 |
+
dim0, dim1 = dim1, dim0
|
| 247 |
+
if not keepdim and (dim0 < dim1):
|
| 248 |
+
dim1 -= 1
|
| 249 |
+
return max_min(
|
| 250 |
+
vector_norm(A, 1.0, dim=dim0, keepdim=keepdim, dtype=dtype), dim1
|
| 251 |
+
)
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
# CompositeImplicitAutograd
|
| 255 |
+
@out_wrapper(exact_dtype=True)
|
| 256 |
+
def norm(
|
| 257 |
+
A: TensorLikeType,
|
| 258 |
+
ord: Optional[Union[float, str]] = None,
|
| 259 |
+
dim: Optional[DimsType] = None,
|
| 260 |
+
keepdim: bool = False,
|
| 261 |
+
*,
|
| 262 |
+
dtype: Optional[torch.dtype] = None,
|
| 263 |
+
) -> TensorLikeType:
|
| 264 |
+
if dim is not None:
|
| 265 |
+
if isinstance(dim, Dim):
|
| 266 |
+
dim = (dim,) # type: ignore[assignment]
|
| 267 |
+
torch._check(
|
| 268 |
+
len(dim) in (1, 2),
|
| 269 |
+
lambda: "linalg.norm: If dim is specified, it must be of length 1 or 2. Got {dim}",
|
| 270 |
+
)
|
| 271 |
+
elif ord is not None:
|
| 272 |
+
torch._check(
|
| 273 |
+
A.ndim in (1, 2),
|
| 274 |
+
lambda: "linalg.norm: If dim is not specified but ord is, the input must be 1D or 2D. Got {A.ndim}D",
|
| 275 |
+
)
|
| 276 |
+
|
| 277 |
+
if ord is not None and (
|
| 278 |
+
(dim is not None and len(dim) == 2) or (dim is None and A.ndim == 2)
|
| 279 |
+
):
|
| 280 |
+
if dim is None:
|
| 281 |
+
dim = (0, 1)
|
| 282 |
+
return matrix_norm(A, ord, dim, keepdim, dtype=dtype)
|
| 283 |
+
else:
|
| 284 |
+
if ord is None:
|
| 285 |
+
ord = 2.0
|
| 286 |
+
return vector_norm(A, ord, dim, keepdim, dtype=dtype) # type: ignore[arg-type]
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
# CompositeImplicitAutograd
|
| 290 |
+
@out_wrapper("U", "S", "Vh", exact_dtype=True)
|
| 291 |
+
def svd(A: TensorLikeType, full_matrices: bool = True) -> Tuple[Tensor, Tensor, Tensor]:
|
| 292 |
+
return prims.svd(A, full_matrices=full_matrices)
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
# CompositeImplicitAutograd
|
| 296 |
+
@out_wrapper(exact_dtype=True)
|
| 297 |
+
def svdvals(A: TensorLikeType) -> Tensor:
|
| 298 |
+
return svd(A, full_matrices=False)[1]
|
| 299 |
+
|
| 300 |
+
|
| 301 |
+
# CompositeImplicitAutograd
|
| 302 |
+
@out_wrapper()
|
| 303 |
+
@elementwise_type_promotion_wrapper(
|
| 304 |
+
type_promoting_args=("x", "y"),
|
| 305 |
+
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
|
| 306 |
+
)
|
| 307 |
+
def vecdot(x: Tensor, y: Tensor, dim: int = -1) -> Tensor:
|
| 308 |
+
check_fp_or_complex(x.dtype, "linalg.vecdot")
|
| 309 |
+
return (x.conj() * y).sum(dim=dim)
|
pllava/lib/python3.10/site-packages/torch/_refs/linalg/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (9.03 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_refs/nn/functional/__init__.py
ADDED
|
@@ -0,0 +1,1279 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-decorators
|
| 2 |
+
# mypy: allow-untyped-defs
|
| 3 |
+
import math
|
| 4 |
+
from functools import wraps
|
| 5 |
+
from typing import Callable, Optional, Union
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
import torch._prims as prims
|
| 9 |
+
import torch._prims_common as utils
|
| 10 |
+
import torch._refs as refs
|
| 11 |
+
from torch._decomp import register_decomposition
|
| 12 |
+
from torch._prims_common import (
|
| 13 |
+
ELEMENTWISE_TYPE_PROMOTION_KIND,
|
| 14 |
+
NumberType,
|
| 15 |
+
ShapeType,
|
| 16 |
+
TensorLike,
|
| 17 |
+
TensorLikeType,
|
| 18 |
+
)
|
| 19 |
+
from torch._prims_common.wrappers import (
|
| 20 |
+
elementwise_type_promotion_wrapper,
|
| 21 |
+
elementwise_unary_scalar_wrapper,
|
| 22 |
+
out_wrapper,
|
| 23 |
+
)
|
| 24 |
+
from torch._refs import _make_inplace
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
__all__ = [
|
| 28 |
+
"alpha_dropout",
|
| 29 |
+
"celu",
|
| 30 |
+
"celu_",
|
| 31 |
+
"channel_shuffle",
|
| 32 |
+
"dropout",
|
| 33 |
+
"elu",
|
| 34 |
+
"elu_",
|
| 35 |
+
"gelu",
|
| 36 |
+
"glu",
|
| 37 |
+
"group_norm",
|
| 38 |
+
"hardshrink",
|
| 39 |
+
"hardtanh",
|
| 40 |
+
"hinge_embedding_loss",
|
| 41 |
+
"huber_loss",
|
| 42 |
+
"l1_loss",
|
| 43 |
+
"layer_norm",
|
| 44 |
+
"leaky_relu",
|
| 45 |
+
"log_softmax",
|
| 46 |
+
"margin_ranking_loss",
|
| 47 |
+
"mish",
|
| 48 |
+
"mish_",
|
| 49 |
+
"mse_loss",
|
| 50 |
+
"nll_loss",
|
| 51 |
+
"pairwise_distance",
|
| 52 |
+
"pdist",
|
| 53 |
+
"poisson_nll_loss",
|
| 54 |
+
"prelu",
|
| 55 |
+
"relu",
|
| 56 |
+
"relu6",
|
| 57 |
+
"selu",
|
| 58 |
+
"selu_",
|
| 59 |
+
"smooth_l1_loss",
|
| 60 |
+
"softmax",
|
| 61 |
+
"softmin",
|
| 62 |
+
"softplus",
|
| 63 |
+
"softshrink",
|
| 64 |
+
"tanhshrink",
|
| 65 |
+
"threshold",
|
| 66 |
+
"threshold_",
|
| 67 |
+
"triplet_margin_loss",
|
| 68 |
+
]
|
| 69 |
+
|
| 70 |
+
Tensor = torch.Tensor
|
| 71 |
+
aten = torch._ops.ops.aten
|
| 72 |
+
DispatchKey = torch._C.DispatchKey # type: ignore[attr-defined]
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def _dropout_helper(
|
| 76 |
+
self: TensorLikeType,
|
| 77 |
+
val: float,
|
| 78 |
+
) -> TensorLikeType:
|
| 79 |
+
"""
|
| 80 |
+
Helper function for all dropout-type operators. During training,
|
| 81 |
+
some of the elements of the input tensor are randomly masked.
|
| 82 |
+
|
| 83 |
+
Returns the masked tensor of the boolean values.
|
| 84 |
+
|
| 85 |
+
"""
|
| 86 |
+
|
| 87 |
+
return (
|
| 88 |
+
refs._uniform_helper(
|
| 89 |
+
self.shape, low=0.0, high=1.0, dtype=torch.float32, device=self.device
|
| 90 |
+
)
|
| 91 |
+
< val
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
@register_decomposition(aten.alpha_dropout)
|
| 96 |
+
def alpha_dropout(
|
| 97 |
+
self: TensorLikeType, p: float = 0.5, training: bool = False, inplace: bool = False
|
| 98 |
+
) -> TensorLikeType:
|
| 99 |
+
if inplace:
|
| 100 |
+
raise NotImplementedError
|
| 101 |
+
|
| 102 |
+
if not training:
|
| 103 |
+
return self
|
| 104 |
+
|
| 105 |
+
torch._check(
|
| 106 |
+
p <= 1 and p >= 0,
|
| 107 |
+
lambda: f"dropout probability has to be between 0 and 1, but got, {p}",
|
| 108 |
+
)
|
| 109 |
+
|
| 110 |
+
if p == 1:
|
| 111 |
+
return torch.zeros_like(self)
|
| 112 |
+
|
| 113 |
+
if p == 0:
|
| 114 |
+
return self
|
| 115 |
+
|
| 116 |
+
dropout_mask = _dropout_helper(self, 1 - p)
|
| 117 |
+
|
| 118 |
+
# From paper: Self-Normalizing Neural Networks (https://arxiv.org/pdf/1706.02515.pdf)
|
| 119 |
+
# alpha = - SELU.alpha * SELU.scale, here
|
| 120 |
+
# SELU.alpha = 1.6732632423543772848170429916717 and
|
| 121 |
+
# SELU.scale = 1.0507009873554804934193349852946
|
| 122 |
+
alpha = -1.7580993408473766
|
| 123 |
+
|
| 124 |
+
a = 1.0 / math.sqrt((alpha * alpha * p + 1) * (1 - p))
|
| 125 |
+
b = torch.logical_not(dropout_mask)
|
| 126 |
+
b = b * (alpha * a) + alpha * a * p
|
| 127 |
+
dropout_mask = a * dropout_mask
|
| 128 |
+
|
| 129 |
+
return self * dropout_mask + b
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
def _inplace_wrapper(fn):
|
| 133 |
+
"""
|
| 134 |
+
Given a nn.functional non-linearity, implements its `inplace: bool` argument
|
| 135 |
+
"""
|
| 136 |
+
|
| 137 |
+
# nb. We use the name of the first argument used in the unary references
|
| 138 |
+
@wraps(fn)
|
| 139 |
+
def _fn(a, *args, inplace=False, **kwargs):
|
| 140 |
+
if inplace:
|
| 141 |
+
torch._check(
|
| 142 |
+
"out" not in kwargs,
|
| 143 |
+
lambda: "Cannot set inplace=True and pass out= at the same time",
|
| 144 |
+
)
|
| 145 |
+
return fn(a, *args, inplace=False, out=a, **kwargs)
|
| 146 |
+
else:
|
| 147 |
+
return fn(a, *args, inplace=False, **kwargs)
|
| 148 |
+
|
| 149 |
+
return _fn
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
# celu is implemented specially because it has an alpha argument
|
| 153 |
+
# celu is very similar to elu
|
| 154 |
+
@register_decomposition(aten.celu)
|
| 155 |
+
@_inplace_wrapper
|
| 156 |
+
@out_wrapper()
|
| 157 |
+
@elementwise_type_promotion_wrapper(
|
| 158 |
+
type_promoting_args=("a",),
|
| 159 |
+
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
|
| 160 |
+
)
|
| 161 |
+
def celu(
|
| 162 |
+
a: TensorLikeType, alpha: Optional[NumberType] = None, inplace: bool = False
|
| 163 |
+
) -> TensorLikeType:
|
| 164 |
+
"""
|
| 165 |
+
Reference implementation of torch.nn.functional.celu
|
| 166 |
+
"""
|
| 167 |
+
|
| 168 |
+
if inplace:
|
| 169 |
+
raise NotImplementedError
|
| 170 |
+
|
| 171 |
+
rhs: TensorLikeType
|
| 172 |
+
if alpha is not None:
|
| 173 |
+
python_type = utils.dtype_to_type(a.dtype)
|
| 174 |
+
if not utils.is_weakly_lesser_type(type(alpha), python_type):
|
| 175 |
+
msg = f"alpha argument of type {type(alpha)} cannot be safely cast to type {python_type}!"
|
| 176 |
+
raise ValueError(msg)
|
| 177 |
+
rhs = alpha * torch.expm1(torch.true_divide(a, alpha)) # type: ignore[arg-type]
|
| 178 |
+
else:
|
| 179 |
+
rhs = torch.expm1(a)
|
| 180 |
+
|
| 181 |
+
return torch.where(a > 0, a, rhs)
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
@_inplace_wrapper
|
| 185 |
+
@out_wrapper()
|
| 186 |
+
def dropout(
|
| 187 |
+
a: TensorLikeType, p: float = 0.5, training: bool = True, inplace: bool = False
|
| 188 |
+
) -> TensorLikeType:
|
| 189 |
+
if inplace:
|
| 190 |
+
raise NotImplementedError
|
| 191 |
+
|
| 192 |
+
if not training:
|
| 193 |
+
return a
|
| 194 |
+
|
| 195 |
+
torch._check(
|
| 196 |
+
p <= 1 and p >= 0,
|
| 197 |
+
lambda: f"dropout probability has to be between 0 and 1, but got, {p}",
|
| 198 |
+
)
|
| 199 |
+
|
| 200 |
+
if p == 1:
|
| 201 |
+
return torch.zeros_like(a)
|
| 202 |
+
|
| 203 |
+
if p == 0:
|
| 204 |
+
return a
|
| 205 |
+
|
| 206 |
+
scale = 1 / (1 - p)
|
| 207 |
+
dropout_mask = _dropout_helper(a, 1 - p)
|
| 208 |
+
|
| 209 |
+
return a * dropout_mask * scale
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
@register_decomposition(aten.elu)
|
| 213 |
+
@_inplace_wrapper
|
| 214 |
+
@out_wrapper()
|
| 215 |
+
@elementwise_type_promotion_wrapper(
|
| 216 |
+
type_promoting_args=("a",),
|
| 217 |
+
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
|
| 218 |
+
)
|
| 219 |
+
def elu(
|
| 220 |
+
a: TensorLikeType,
|
| 221 |
+
alpha: NumberType = 1.0,
|
| 222 |
+
scale: NumberType = 1.0,
|
| 223 |
+
input_scale: NumberType = 1.0,
|
| 224 |
+
inplace: bool = False,
|
| 225 |
+
) -> TensorLikeType:
|
| 226 |
+
"""
|
| 227 |
+
Reference implementation of torch.nn.functional.elu
|
| 228 |
+
"""
|
| 229 |
+
if inplace:
|
| 230 |
+
raise NotImplementedError
|
| 231 |
+
|
| 232 |
+
# nb. This should be factored out into a can_cast aux function
|
| 233 |
+
python_type = utils.dtype_to_type(a.dtype)
|
| 234 |
+
torch._check(
|
| 235 |
+
utils.is_weakly_lesser_type(type(input_scale), python_type),
|
| 236 |
+
lambda: f"input_scale argument of type {type(input_scale)} cannot be safely cast to type {python_type}!",
|
| 237 |
+
)
|
| 238 |
+
torch._check(
|
| 239 |
+
utils.is_weakly_lesser_type(type(scale), python_type),
|
| 240 |
+
lambda: f"scale argument of type {type(scale)} cannot be safely cast to type {python_type}!",
|
| 241 |
+
)
|
| 242 |
+
torch._check(
|
| 243 |
+
utils.is_weakly_lesser_type(type(alpha), python_type),
|
| 244 |
+
lambda: f"alpha argument of type {type(alpha)} cannot be safely cast to type {python_type}!",
|
| 245 |
+
)
|
| 246 |
+
|
| 247 |
+
return torch.where(a > 0, scale * a, (alpha * scale) * torch.expm1(a * input_scale))
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
@register_decomposition(aten.relu)
|
| 251 |
+
@_inplace_wrapper
|
| 252 |
+
@out_wrapper()
|
| 253 |
+
@elementwise_type_promotion_wrapper(
|
| 254 |
+
type_promoting_args=("a",),
|
| 255 |
+
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
|
| 256 |
+
)
|
| 257 |
+
def relu(a: TensorLikeType, inplace: bool = False) -> TensorLikeType:
|
| 258 |
+
"""
|
| 259 |
+
Reference implementation of torch.nn.functional.relu
|
| 260 |
+
"""
|
| 261 |
+
|
| 262 |
+
if inplace:
|
| 263 |
+
raise NotImplementedError
|
| 264 |
+
|
| 265 |
+
return torch.where(torch.le(a, 0), 0, a)
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
@register_decomposition(aten.channel_shuffle)
|
| 269 |
+
@out_wrapper()
|
| 270 |
+
def channel_shuffle(input: TensorLikeType, groups: int) -> TensorLikeType:
|
| 271 |
+
"""
|
| 272 |
+
Reference implementation of :func:`torch.nn.functional.channel_shuffle`.
|
| 273 |
+
"""
|
| 274 |
+
from torch._meta_registrations import device_hint
|
| 275 |
+
|
| 276 |
+
torch._check(
|
| 277 |
+
input.dim() > 2,
|
| 278 |
+
lambda: f"channel_shuffle expects input with > 2 dims, but got input with sizes {list(input.size())}",
|
| 279 |
+
)
|
| 280 |
+
c = input.shape[1]
|
| 281 |
+
torch._check(
|
| 282 |
+
groups > 0,
|
| 283 |
+
lambda: f"Number of groups to divide channels in must be positive. Value of groups:{groups}",
|
| 284 |
+
)
|
| 285 |
+
torch._check(
|
| 286 |
+
(c % groups) == 0,
|
| 287 |
+
lambda: f"Number of channels must be divisible by groups. Got {c} channels and {groups} groups.",
|
| 288 |
+
)
|
| 289 |
+
n = input.shape[0]
|
| 290 |
+
cg = c // groups
|
| 291 |
+
dhw = input.shape[2:]
|
| 292 |
+
|
| 293 |
+
if input.numel() == 0 or (
|
| 294 |
+
device_hint(input) == "cuda" and (groups == 1 or groups == c)
|
| 295 |
+
):
|
| 296 |
+
return input.view(input.shape)
|
| 297 |
+
|
| 298 |
+
return (
|
| 299 |
+
input.reshape(n, groups, cg, *dhw)
|
| 300 |
+
.transpose(1, 2)
|
| 301 |
+
.reshape(input.shape)
|
| 302 |
+
.contiguous()
|
| 303 |
+
)
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
def group_norm(
|
| 307 |
+
input: Tensor,
|
| 308 |
+
num_groups: int,
|
| 309 |
+
weight: Optional[Tensor] = None,
|
| 310 |
+
bias: Optional[Tensor] = None,
|
| 311 |
+
eps: float = 1e-5,
|
| 312 |
+
) -> Tensor:
|
| 313 |
+
"""
|
| 314 |
+
Reference implementation of :func:`torch.nn.functional.group_norm`.
|
| 315 |
+
"""
|
| 316 |
+
torch._check(
|
| 317 |
+
input.ndim >= 2,
|
| 318 |
+
lambda: f"Expected at least 2 dimensions for input tensor but received {input.ndim}",
|
| 319 |
+
)
|
| 320 |
+
|
| 321 |
+
batch_size = input.shape[0]
|
| 322 |
+
num_channels = input.shape[1]
|
| 323 |
+
torch._check(
|
| 324 |
+
num_channels % num_groups == 0,
|
| 325 |
+
lambda: "Expected number of channels in input to be divisible by num_groups, "
|
| 326 |
+
+ f"but got input of shape {input.shape} and num_groups = {num_groups}",
|
| 327 |
+
)
|
| 328 |
+
|
| 329 |
+
# input shape is (N, C, *), so we flatten all inner dimensions except (N, C)
|
| 330 |
+
flattened_inner_size = 1
|
| 331 |
+
for dim_length in input.shape[2:]:
|
| 332 |
+
flattened_inner_size *= dim_length
|
| 333 |
+
|
| 334 |
+
return torch.native_group_norm(
|
| 335 |
+
input,
|
| 336 |
+
weight,
|
| 337 |
+
bias,
|
| 338 |
+
batch_size,
|
| 339 |
+
num_channels,
|
| 340 |
+
flattened_inner_size,
|
| 341 |
+
num_groups,
|
| 342 |
+
eps,
|
| 343 |
+
)[0]
|
| 344 |
+
|
| 345 |
+
|
| 346 |
+
def layer_norm(
|
| 347 |
+
input: Tensor,
|
| 348 |
+
normalized_shape: ShapeType,
|
| 349 |
+
weight: Optional[Tensor] = None,
|
| 350 |
+
bias: Optional[Tensor] = None,
|
| 351 |
+
eps: float = 1e-5,
|
| 352 |
+
) -> Tensor:
|
| 353 |
+
"""
|
| 354 |
+
Reference implementation of :func:`torch.nn.functional.layer_norm`.
|
| 355 |
+
"""
|
| 356 |
+
return torch.native_layer_norm(input, normalized_shape, weight, bias, eps)[0]
|
| 357 |
+
|
| 358 |
+
|
| 359 |
+
@register_decomposition(aten.leaky_relu)
|
| 360 |
+
@_inplace_wrapper
|
| 361 |
+
@out_wrapper()
|
| 362 |
+
@elementwise_type_promotion_wrapper(
|
| 363 |
+
type_promoting_args=("a",),
|
| 364 |
+
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
|
| 365 |
+
)
|
| 366 |
+
def leaky_relu(
|
| 367 |
+
a: TensorLikeType, negative_slope: float = 0.01, inplace: bool = False
|
| 368 |
+
) -> TensorLikeType:
|
| 369 |
+
"""
|
| 370 |
+
Reference implementation of torch.nn.functional.leaky_relu
|
| 371 |
+
"""
|
| 372 |
+
|
| 373 |
+
if inplace:
|
| 374 |
+
raise NotImplementedError
|
| 375 |
+
|
| 376 |
+
python_type = utils.dtype_to_type(a.dtype)
|
| 377 |
+
if not utils.is_weakly_lesser_type(type(negative_slope), python_type):
|
| 378 |
+
msg = f"negative_slope argument of type {type(negative_slope)} cannot be safely cast to type {python_type}!"
|
| 379 |
+
raise ValueError(msg)
|
| 380 |
+
return torch.where(torch.gt(a, 0), a, torch.mul(a, negative_slope))
|
| 381 |
+
|
| 382 |
+
|
| 383 |
+
@register_decomposition(aten.mish)
|
| 384 |
+
@_inplace_wrapper
|
| 385 |
+
@out_wrapper()
|
| 386 |
+
@elementwise_type_promotion_wrapper(
|
| 387 |
+
type_promoting_args=("a",),
|
| 388 |
+
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
|
| 389 |
+
)
|
| 390 |
+
def mish(a: TensorLikeType, inplace: bool = False) -> TensorLikeType:
|
| 391 |
+
"""
|
| 392 |
+
Reference implementation of torch.nn.functional.mish
|
| 393 |
+
"""
|
| 394 |
+
|
| 395 |
+
if inplace:
|
| 396 |
+
raise NotImplementedError
|
| 397 |
+
return a * torch.tanh(torch.nn.functional.softplus(a))
|
| 398 |
+
|
| 399 |
+
|
| 400 |
+
@register_decomposition(aten.selu)
|
| 401 |
+
@_inplace_wrapper
|
| 402 |
+
@out_wrapper()
|
| 403 |
+
@elementwise_type_promotion_wrapper(
|
| 404 |
+
type_promoting_args=("a",),
|
| 405 |
+
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
|
| 406 |
+
)
|
| 407 |
+
def selu(a: TensorLikeType, inplace: bool = False) -> TensorLikeType:
|
| 408 |
+
"""
|
| 409 |
+
Reference implementation of torch.nn.functional.selu
|
| 410 |
+
"""
|
| 411 |
+
if inplace:
|
| 412 |
+
raise NotImplementedError
|
| 413 |
+
|
| 414 |
+
alpha = 1.6732632423543772848170429916717
|
| 415 |
+
scale = 1.0507009873554804934193349852946
|
| 416 |
+
|
| 417 |
+
rhs = alpha * torch.expm1(a)
|
| 418 |
+
|
| 419 |
+
return scale * torch.where(a > 0, a, rhs)
|
| 420 |
+
|
| 421 |
+
|
| 422 |
+
# Forwarding alias: the functional variant doesn't support the out kwarg
|
| 423 |
+
# CompositeImplicitAutograd - don't register decomp
|
| 424 |
+
def softmax(
|
| 425 |
+
a: TensorLikeType,
|
| 426 |
+
dim: Optional[int] = None,
|
| 427 |
+
_stacklevel: int = 3, # for compat when using TorchRefsMode(strict=True)
|
| 428 |
+
dtype: Optional[torch.dtype] = None,
|
| 429 |
+
) -> TensorLikeType:
|
| 430 |
+
# The error is for compat with regular PyTorch, which has this behavior
|
| 431 |
+
# deprecated. For PrimTorch, it's fine to drop support for deprecated
|
| 432 |
+
# behavior because it requires explicit opt in. This error is to inform
|
| 433 |
+
# users how to update their calls.
|
| 434 |
+
torch._check(dim is not None, lambda: "implicit dim not supported, use dim=X")
|
| 435 |
+
return torch.softmax(a=a, dim=dim, dtype=dtype) # type: ignore[call-overload]
|
| 436 |
+
|
| 437 |
+
|
| 438 |
+
# CompositeImplicitAutograd - don't register decomp
|
| 439 |
+
def softmin(
|
| 440 |
+
a: TensorLikeType,
|
| 441 |
+
dim: Optional[int] = None,
|
| 442 |
+
_stacklevel: int = 3, # for compat when using TorchRefsMode(strict=True)
|
| 443 |
+
dtype: Optional[torch.dtype] = None,
|
| 444 |
+
) -> TensorLikeType:
|
| 445 |
+
# The error is for compat with regular PyTorch, which has this behavior
|
| 446 |
+
# deprecated. For PrimTorch, it's fine to drop support for deprecated
|
| 447 |
+
# behavior because it requires explicit opt in. This error is to inform
|
| 448 |
+
# users how to update their calls.
|
| 449 |
+
torch._check(dim is not None, lambda: "implicit dim not supported, use dim=X")
|
| 450 |
+
return torch.softmax(a=-a, dim=dim, dtype=dtype) # type: ignore[call-overload]
|
| 451 |
+
|
| 452 |
+
|
| 453 |
+
# softplus is implemented specially because it has beta and threshold arguments
|
| 454 |
+
@register_decomposition(aten.softplus)
|
| 455 |
+
@_inplace_wrapper
|
| 456 |
+
@out_wrapper()
|
| 457 |
+
@elementwise_type_promotion_wrapper(
|
| 458 |
+
type_promoting_args=("a",),
|
| 459 |
+
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
|
| 460 |
+
)
|
| 461 |
+
def softplus(
|
| 462 |
+
a: TensorLikeType,
|
| 463 |
+
beta: Optional[NumberType] = None,
|
| 464 |
+
threshold: NumberType = 20,
|
| 465 |
+
inplace: bool = False,
|
| 466 |
+
) -> TensorLikeType:
|
| 467 |
+
"""
|
| 468 |
+
Reference implementation of torch.nn.functional.softplus
|
| 469 |
+
"""
|
| 470 |
+
|
| 471 |
+
if inplace:
|
| 472 |
+
raise NotImplementedError
|
| 473 |
+
|
| 474 |
+
rhs: TensorLikeType
|
| 475 |
+
if beta is not None:
|
| 476 |
+
python_type = utils.dtype_to_type(a.dtype)
|
| 477 |
+
if not utils.is_weakly_lesser_type(type(beta), python_type):
|
| 478 |
+
msg = f"beta argument of type {type(beta)} cannot be safely cast to type {python_type}!"
|
| 479 |
+
raise ValueError(msg)
|
| 480 |
+
scaled_input = a * beta
|
| 481 |
+
rhs = torch.true_divide(torch.log1p(torch.exp(scaled_input)), beta) # type: ignore[arg-type]
|
| 482 |
+
|
| 483 |
+
else:
|
| 484 |
+
scaled_input = a
|
| 485 |
+
rhs = torch.log1p(torch.exp(scaled_input))
|
| 486 |
+
|
| 487 |
+
return torch.where(scaled_input > threshold, a, rhs)
|
| 488 |
+
|
| 489 |
+
|
| 490 |
+
@aten.hardshrink.default.py_impl(DispatchKey.Autograd)
|
| 491 |
+
@register_decomposition(aten.hardshrink)
|
| 492 |
+
@out_wrapper()
|
| 493 |
+
def hardshrink(a: TensorLikeType, lambd: float = 0.5):
|
| 494 |
+
# Formula for reference,
|
| 495 |
+
# hardshrink(x) = x if x > lambd
|
| 496 |
+
# = x if x < -lambd
|
| 497 |
+
# = 0 otherwise
|
| 498 |
+
return torch.where(torch.abs(a) <= lambd, 0, a)
|
| 499 |
+
|
| 500 |
+
|
| 501 |
+
@aten.softshrink.default.py_impl(DispatchKey.Autograd)
|
| 502 |
+
@register_decomposition(aten.softshrink)
|
| 503 |
+
@out_wrapper()
|
| 504 |
+
def softshrink(a: TensorLikeType, lambd: float = 0.5):
|
| 505 |
+
# Formula for reference,
|
| 506 |
+
# softshrink(x) = x - lambd if x > lambd
|
| 507 |
+
# = x + lambd if x < -lambd
|
| 508 |
+
# = 0 otherwise
|
| 509 |
+
torch._check(
|
| 510 |
+
lambd >= 0,
|
| 511 |
+
lambda: f"lambda must be greater or equal to 0, but found to be {lambd}",
|
| 512 |
+
)
|
| 513 |
+
# We implement this in one torch.where to generate better code in the backward
|
| 514 |
+
# see https://github.com/pytorch/pytorch/pull/107052#discussion_r1293748211
|
| 515 |
+
return torch.where(torch.abs(a) > lambd, a - torch.sign(a) * lambd, 0)
|
| 516 |
+
|
| 517 |
+
|
| 518 |
+
# Losses
|
| 519 |
+
def _reduction_int_to_str(reduction: int) -> str:
|
| 520 |
+
from torch._decomp.decompositions import Reduction
|
| 521 |
+
|
| 522 |
+
if reduction == Reduction.NONE.value:
|
| 523 |
+
return "none"
|
| 524 |
+
elif reduction == Reduction.MEAN.value:
|
| 525 |
+
return "mean"
|
| 526 |
+
elif reduction == Reduction.SUM.value:
|
| 527 |
+
return "sum"
|
| 528 |
+
else:
|
| 529 |
+
raise ValueError(f"{reduction} is not a valid value for reduction")
|
| 530 |
+
|
| 531 |
+
|
| 532 |
+
def _apply_loss_reduction(loss: TensorLikeType, reduction: str) -> TensorLikeType:
|
| 533 |
+
if reduction == "sum":
|
| 534 |
+
return torch.sum(loss)
|
| 535 |
+
elif reduction == "mean":
|
| 536 |
+
return torch.mean(loss)
|
| 537 |
+
else: # reduction == "none"
|
| 538 |
+
return loss
|
| 539 |
+
|
| 540 |
+
|
| 541 |
+
def _check_reduction_value(reduction: str):
|
| 542 |
+
if reduction not in ("mean", "sum", "none"):
|
| 543 |
+
raise ValueError(f"{reduction} is not a valid value for reduction")
|
| 544 |
+
|
| 545 |
+
|
| 546 |
+
# This helper function maps depreciated arguments, "size_average" and "reduce"
|
| 547 |
+
# to their corresponding "reduction" string argument
|
| 548 |
+
def _get_string_reduction_arg(
|
| 549 |
+
*, size_average: Optional[bool], reduce: Optional[bool]
|
| 550 |
+
) -> str:
|
| 551 |
+
if size_average is None:
|
| 552 |
+
size_average = True
|
| 553 |
+
if reduce is None:
|
| 554 |
+
reduce = True
|
| 555 |
+
if size_average and reduce:
|
| 556 |
+
ret = "mean"
|
| 557 |
+
elif reduce:
|
| 558 |
+
ret = "sum"
|
| 559 |
+
else:
|
| 560 |
+
ret = "none"
|
| 561 |
+
return ret
|
| 562 |
+
|
| 563 |
+
|
| 564 |
+
# CompositeImplicitAutograd - don't register decomp
|
| 565 |
+
@elementwise_type_promotion_wrapper(
|
| 566 |
+
type_promoting_args=("input", "target"),
|
| 567 |
+
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT,
|
| 568 |
+
)
|
| 569 |
+
def l1_loss(
|
| 570 |
+
input: TensorLikeType,
|
| 571 |
+
target: TensorLikeType,
|
| 572 |
+
size_average: Optional[bool] = None,
|
| 573 |
+
reduce: Optional[bool] = None,
|
| 574 |
+
reduction: str = "mean",
|
| 575 |
+
) -> TensorLikeType:
|
| 576 |
+
"""
|
| 577 |
+
Reference implementation of torch.nn.functional.l1_loss
|
| 578 |
+
"""
|
| 579 |
+
if size_average is not None or reduce is not None:
|
| 580 |
+
# TODO: Raise exception instead of converting value. This is only for
|
| 581 |
+
# primTorch since it can drop support for deprecated arguments.
|
| 582 |
+
# msg = "size_average and reduce args are deprecated, please use reduction argument."
|
| 583 |
+
reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce)
|
| 584 |
+
_check_reduction_value(reduction)
|
| 585 |
+
loss = torch.abs(input - target)
|
| 586 |
+
return _apply_loss_reduction(loss, reduction)
|
| 587 |
+
|
| 588 |
+
|
| 589 |
+
@elementwise_type_promotion_wrapper(
|
| 590 |
+
type_promoting_args=("input", "target"),
|
| 591 |
+
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT,
|
| 592 |
+
)
|
| 593 |
+
def smooth_l1_loss(
|
| 594 |
+
input: TensorLikeType,
|
| 595 |
+
target: TensorLikeType,
|
| 596 |
+
size_average: Optional[bool] = None,
|
| 597 |
+
reduce: Optional[bool] = None,
|
| 598 |
+
reduction: str = "mean",
|
| 599 |
+
beta: float = 1.0,
|
| 600 |
+
) -> TensorLikeType:
|
| 601 |
+
"""
|
| 602 |
+
Reference implementation of torch.nn.functional.smooth_l1_loss
|
| 603 |
+
"""
|
| 604 |
+
if size_average is not None or reduce is not None:
|
| 605 |
+
# TODO: Raise exception instead of converting value. This is only for
|
| 606 |
+
# primTorch since it can drop support for deprecated arguments.
|
| 607 |
+
# msg = "size_average and reduce args are deprecated, please use reduction argument."
|
| 608 |
+
reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce)
|
| 609 |
+
_check_reduction_value(reduction)
|
| 610 |
+
|
| 611 |
+
if beta == 0.0:
|
| 612 |
+
return torch.nn.functional.l1_loss(
|
| 613 |
+
input, target, size_average=size_average, reduce=reduce, reduction=reduction
|
| 614 |
+
)
|
| 615 |
+
else:
|
| 616 |
+
loss = torch.abs(input - target)
|
| 617 |
+
loss = torch.where(loss < beta, 0.5 * loss**2 / beta, loss - 0.5 * beta)
|
| 618 |
+
return _apply_loss_reduction(loss, reduction)
|
| 619 |
+
|
| 620 |
+
|
| 621 |
+
# Forwarding alias: the functional variant doesn't support the out kwarg
|
| 622 |
+
# CompositeImplicitAutograd - don't register decomp
|
| 623 |
+
def log_softmax(
|
| 624 |
+
a: TensorLikeType,
|
| 625 |
+
dim: Optional[int] = None,
|
| 626 |
+
_stacklevel: int = 3, # for compat when using TorchRefsMode(strict=True)
|
| 627 |
+
dtype: Optional[torch.dtype] = None,
|
| 628 |
+
) -> TensorLikeType:
|
| 629 |
+
# The error is for compat with regular PyTorch, which has this behavior
|
| 630 |
+
# deprecated. For PrimTorch, it's fine to drop support for deprecated
|
| 631 |
+
# behavior because it requires explicit opt in. This error is to inform
|
| 632 |
+
# users how to update their calls.
|
| 633 |
+
torch._check(dim is not None, lambda: "implicit dim not supported, use dim=X")
|
| 634 |
+
return torch.log_softmax(a=a, dim=dim, dtype=dtype) # type: ignore[call-overload]
|
| 635 |
+
|
| 636 |
+
|
| 637 |
+
@register_decomposition(aten.margin_ranking_loss)
|
| 638 |
+
def margin_ranking_loss(
|
| 639 |
+
input1: TensorLikeType,
|
| 640 |
+
input2: TensorLikeType,
|
| 641 |
+
target: TensorLikeType,
|
| 642 |
+
margin: float = 0.0,
|
| 643 |
+
reduction: str = "mean",
|
| 644 |
+
) -> TensorLikeType:
|
| 645 |
+
# loss_without_reduction = max(0, -target * (input1 - input2) + margin)
|
| 646 |
+
if input1.ndim != input2.ndim or input1.ndim != target.ndim:
|
| 647 |
+
raise RuntimeError(
|
| 648 |
+
"margin_ranking_loss : All input tensors should have same dimension but got sizes: "
|
| 649 |
+
f"input1: {input1.shape}, input2: {input2.shape}, target: {target.shape} "
|
| 650 |
+
)
|
| 651 |
+
_check_reduction_value(reduction)
|
| 652 |
+
loss = torch.clamp_min(-target * (input1 - input2) + margin, 0)
|
| 653 |
+
return _apply_loss_reduction(loss, reduction)
|
| 654 |
+
|
| 655 |
+
|
| 656 |
+
@elementwise_type_promotion_wrapper(
|
| 657 |
+
type_promoting_args=("input", "target"),
|
| 658 |
+
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT,
|
| 659 |
+
)
|
| 660 |
+
def mse_loss(
|
| 661 |
+
input: TensorLikeType,
|
| 662 |
+
target: TensorLikeType,
|
| 663 |
+
size_average: Optional[bool] = None,
|
| 664 |
+
reduce: Optional[bool] = None,
|
| 665 |
+
reduction: str = "mean",
|
| 666 |
+
) -> TensorLikeType:
|
| 667 |
+
if size_average is not None or reduce is not None:
|
| 668 |
+
# TODO: Raise exception instead of converting value. This is only for
|
| 669 |
+
# primTorch since it can drop support for deprecated arguments.
|
| 670 |
+
# msg = "size_average and reduce args are deprecated, please use reduction argument."
|
| 671 |
+
reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce)
|
| 672 |
+
_check_reduction_value(reduction)
|
| 673 |
+
loss = torch.pow(input - target, 2)
|
| 674 |
+
return _apply_loss_reduction(loss, reduction)
|
| 675 |
+
|
| 676 |
+
|
| 677 |
+
@register_decomposition(aten.hinge_embedding_loss)
|
| 678 |
+
def hinge_embedding_loss(
|
| 679 |
+
input: TensorLikeType,
|
| 680 |
+
target: TensorLikeType,
|
| 681 |
+
margin: float = 1.0,
|
| 682 |
+
reduction: str = "mean",
|
| 683 |
+
) -> TensorLikeType:
|
| 684 |
+
# loss_without_reduction = input if y == 1
|
| 685 |
+
# = max(0, margin - input) if y == -1
|
| 686 |
+
_check_reduction_value(reduction)
|
| 687 |
+
margin_clamp = torch.clamp_min(margin - input, 0)
|
| 688 |
+
output_margin = torch.where(target != 1, margin_clamp, 0)
|
| 689 |
+
output_self = torch.where(target != -1, input, 0)
|
| 690 |
+
loss = output_margin + output_self
|
| 691 |
+
return _apply_loss_reduction(loss, reduction)
|
| 692 |
+
|
| 693 |
+
|
| 694 |
+
def _nll_loss_nd(
|
| 695 |
+
input: TensorLikeType,
|
| 696 |
+
target: TensorLikeType,
|
| 697 |
+
weight: Optional[TensorLikeType],
|
| 698 |
+
reduction: str,
|
| 699 |
+
ignore_index: int,
|
| 700 |
+
) -> TensorLikeType:
|
| 701 |
+
torch._check(
|
| 702 |
+
input.ndim > 0 and input.ndim <= 3,
|
| 703 |
+
lambda: f"Expected input dimension to be either [1, 2, 3] but received {input.ndim}.",
|
| 704 |
+
)
|
| 705 |
+
|
| 706 |
+
torch._check(
|
| 707 |
+
(input.ndim == 1) or (input.shape[0] == target.shape[0]),
|
| 708 |
+
lambda: f"Expected input batch size {input.shape[0]} to match target batch size {target.shape[0]}.",
|
| 709 |
+
)
|
| 710 |
+
|
| 711 |
+
_check_reduction_value(reduction)
|
| 712 |
+
|
| 713 |
+
flat_target = torch.flatten(target)
|
| 714 |
+
ignore_classes_mask = torch.eq(flat_target, ignore_index)
|
| 715 |
+
|
| 716 |
+
# TODO: Enable data-dependent checks with debug mode
|
| 717 |
+
# TODO: This check does not work with FakeTensor inputs; See Issue #85834
|
| 718 |
+
# Explicit cast for class_check to bool; See Issue #78071
|
| 719 |
+
"""
|
| 720 |
+
from torch._subclasses.fake_tensor import FakeTensor
|
| 721 |
+
num_classes = input.shape[1] if input.ndim > 1 else input.shape[0]
|
| 722 |
+
valid_classes_mask = torch.logical_and(
|
| 723 |
+
(flat_target >= 0), (flat_target < num_classes)
|
| 724 |
+
)
|
| 725 |
+
class_check = torch.all(torch.logical_or(ignore_classes_mask, valid_classes_mask))
|
| 726 |
+
torch._check(
|
| 727 |
+
isinstance(target, FakeTensor) or bool(class_check.item()),
|
| 728 |
+
lambda: "A target class is out-of-bounds and not the ignore index.",
|
| 729 |
+
)
|
| 730 |
+
"""
|
| 731 |
+
|
| 732 |
+
ignore_class_weight = torch.scalar_tensor(0, dtype=input.dtype, device=input.device)
|
| 733 |
+
class_weight = (
|
| 734 |
+
torch.scalar_tensor(1, dtype=input.dtype, device=input.device)
|
| 735 |
+
if weight is None
|
| 736 |
+
else weight[flat_target]
|
| 737 |
+
)
|
| 738 |
+
current_weight = torch.where(
|
| 739 |
+
ignore_classes_mask,
|
| 740 |
+
ignore_class_weight,
|
| 741 |
+
class_weight,
|
| 742 |
+
)
|
| 743 |
+
|
| 744 |
+
if input.ndim == 1:
|
| 745 |
+
# implicit batch size = 1
|
| 746 |
+
# input (1 batch size, C classes)
|
| 747 |
+
loss = -input[target] * current_weight
|
| 748 |
+
elif input.ndim == 2:
|
| 749 |
+
# input (N batch size, C classes)
|
| 750 |
+
batch_size = input.shape[0]
|
| 751 |
+
loss = -input[torch.arange(batch_size), target] * current_weight
|
| 752 |
+
else:
|
| 753 |
+
# 3D case (N batch size, C classe, K dimensions)
|
| 754 |
+
# input (N batch size, C classes, K)
|
| 755 |
+
batch_size = input.shape[0]
|
| 756 |
+
extent = input.shape[2]
|
| 757 |
+
numel = batch_size * extent
|
| 758 |
+
indices = torch.arange(numel)
|
| 759 |
+
bdx = indices // extent
|
| 760 |
+
kdx = indices % extent
|
| 761 |
+
loss = -input[bdx, flat_target, kdx] * current_weight
|
| 762 |
+
loss = torch.reshape(loss, target.shape)
|
| 763 |
+
|
| 764 |
+
if reduction == "none":
|
| 765 |
+
return loss
|
| 766 |
+
elif reduction == "sum":
|
| 767 |
+
return torch.sum(loss)
|
| 768 |
+
else:
|
| 769 |
+
# calculate weighted mean of the loss function
|
| 770 |
+
return torch.sum(loss) / torch.sum(current_weight)
|
| 771 |
+
|
| 772 |
+
|
| 773 |
+
@register_decomposition(aten.nll_loss)
|
| 774 |
+
@out_wrapper()
|
| 775 |
+
@elementwise_type_promotion_wrapper(
|
| 776 |
+
type_promoting_args=("input",),
|
| 777 |
+
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
|
| 778 |
+
)
|
| 779 |
+
def nll_loss(
|
| 780 |
+
input: TensorLikeType,
|
| 781 |
+
target: TensorLikeType,
|
| 782 |
+
weight: Optional[TensorLikeType] = None,
|
| 783 |
+
size_average: Optional[bool] = None,
|
| 784 |
+
ignore_index: int = -100,
|
| 785 |
+
reduce: Optional[bool] = None,
|
| 786 |
+
reduction: str = "mean",
|
| 787 |
+
) -> TensorLikeType:
|
| 788 |
+
"""
|
| 789 |
+
Reference implementation of torch.nn.functional.nll_loss
|
| 790 |
+
"""
|
| 791 |
+
torch._check(
|
| 792 |
+
input.ndim > 0,
|
| 793 |
+
lambda: f"Expected input tensor to have 1 or more dimensions (got {input.ndim})",
|
| 794 |
+
)
|
| 795 |
+
|
| 796 |
+
# TODO: raise exception instead of converting value
|
| 797 |
+
# msg = "size_average and reduce args are deprecated, please use reduction argument."
|
| 798 |
+
# Convert these options for consistency with the eager mode
|
| 799 |
+
if size_average is not None or reduce is not None:
|
| 800 |
+
reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce)
|
| 801 |
+
|
| 802 |
+
# The expected behavior when the target and input have zero elements:
|
| 803 |
+
# reduction = 'none' --- tensor([])
|
| 804 |
+
# reduction = 'sum' --- tensor(0.)
|
| 805 |
+
# reduction = 'mean' --- tensor(nan)
|
| 806 |
+
# Mean reduction on empty tensors produces NaN. See the discussion in
|
| 807 |
+
# https://github.com/pytorch/pytorch/pull/64572#issuecomment-926504162
|
| 808 |
+
if input.numel() == 0 and target.numel() == 0:
|
| 809 |
+
if reduction == "none":
|
| 810 |
+
return torch.zeros_like(target)
|
| 811 |
+
elif reduction == "sum":
|
| 812 |
+
return torch.empty_like(target)
|
| 813 |
+
else:
|
| 814 |
+
return torch.full_like(target, float("nan"))
|
| 815 |
+
|
| 816 |
+
# The _nll_loss_nd helper function handles the most common cases.
|
| 817 |
+
# ndim == 1 (Single Example)
|
| 818 |
+
# => Batch Size: 1, Input: (C), Target: ()
|
| 819 |
+
# ndim == 2 (k = 1)
|
| 820 |
+
# => Batch Size: N, Input: (N, C), Target: (N)
|
| 821 |
+
# ndim == 3 (k > 1)
|
| 822 |
+
# => Batch Size: N, Input: (N, C, K), Target: (N, K)
|
| 823 |
+
if input.ndim <= 3:
|
| 824 |
+
return _nll_loss_nd(input, target, weight, reduction, ignore_index)
|
| 825 |
+
|
| 826 |
+
# For ndim > 3, we reshape the input and target to 3-D case.
|
| 827 |
+
# Input (N batch-size, C classes, k-dimensions)
|
| 828 |
+
# Target (N batch-size, k-dimensions)
|
| 829 |
+
torch._check(
|
| 830 |
+
input.ndim > 0 and target.ndim > 0 and target.shape[1:] == input.shape[2:],
|
| 831 |
+
lambda: (
|
| 832 |
+
"Expected input and target to both have ndim > 0 and "
|
| 833 |
+
"target.shape[1:] == input.shape[2:], but got "
|
| 834 |
+
f"target.shape {target.shape} and input.shape {input.shape}"
|
| 835 |
+
),
|
| 836 |
+
)
|
| 837 |
+
|
| 838 |
+
batch_size = input.shape[0]
|
| 839 |
+
num_classes = input.shape[1]
|
| 840 |
+
out_size = [batch_size] + list(target.shape[1:])
|
| 841 |
+
|
| 842 |
+
input = torch.reshape(input, [batch_size, num_classes, -1])
|
| 843 |
+
target = torch.reshape(target, [batch_size, -1])
|
| 844 |
+
if reduction != "none":
|
| 845 |
+
return _nll_loss_nd(input, target, weight, reduction, ignore_index)
|
| 846 |
+
else:
|
| 847 |
+
result = _nll_loss_nd(input, target, weight, reduction, ignore_index)
|
| 848 |
+
# reshape flattened inner-dim to original k-dimensions
|
| 849 |
+
return torch.reshape(result, out_size)
|
| 850 |
+
|
| 851 |
+
|
| 852 |
+
# TODO: This ref supports int reduction and out kwarg to be compatible with ATen:
|
| 853 |
+
# https://github.com/pytorch/pytorch/issues/83931
|
| 854 |
+
# TODO: Could be rewritten to support complex:
|
| 855 |
+
# https://github.com/pytorch/pytorch/pull/85041
|
| 856 |
+
@register_decomposition(aten.huber_loss)
|
| 857 |
+
@out_wrapper()
|
| 858 |
+
@elementwise_type_promotion_wrapper(
|
| 859 |
+
type_promoting_args=("input", "target"),
|
| 860 |
+
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
|
| 861 |
+
)
|
| 862 |
+
def huber_loss(
|
| 863 |
+
input: TensorLikeType,
|
| 864 |
+
target: TensorLikeType,
|
| 865 |
+
reduction: Union[str, int] = "mean",
|
| 866 |
+
delta: float = 1.0,
|
| 867 |
+
) -> TensorLikeType:
|
| 868 |
+
"""
|
| 869 |
+
Reference implementation of torch.nn.functional.huber_loss
|
| 870 |
+
"""
|
| 871 |
+
if type(reduction) is int:
|
| 872 |
+
reduction = _reduction_int_to_str(reduction)
|
| 873 |
+
_check_reduction_value(reduction) # type: ignore[arg-type]
|
| 874 |
+
torch._check(
|
| 875 |
+
delta > 0,
|
| 876 |
+
lambda: "huber_loss does not support non-positive values for delta.",
|
| 877 |
+
)
|
| 878 |
+
z = (input - target).abs()
|
| 879 |
+
loss = torch.where(z < delta, 0.5 * z * z, delta * (z - 0.5 * delta))
|
| 880 |
+
return _apply_loss_reduction(loss, reduction) # type: ignore[arg-type]
|
| 881 |
+
|
| 882 |
+
|
| 883 |
+
# tanhshrink does not use _make_elementwise_unary_reference because it does not support out
|
| 884 |
+
@elementwise_unary_scalar_wrapper
|
| 885 |
+
@elementwise_type_promotion_wrapper(
|
| 886 |
+
type_promoting_args=("a",),
|
| 887 |
+
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
| 888 |
+
)
|
| 889 |
+
def tanhshrink(a: TensorLikeType) -> TensorLikeType:
|
| 890 |
+
"""
|
| 891 |
+
Reference implementation of torch.nn.functional.tanhshrink
|
| 892 |
+
"""
|
| 893 |
+
if not isinstance(a, TensorLike):
|
| 894 |
+
raise RuntimeError(
|
| 895 |
+
"Expected a tensor input for an elementwise unary operation!"
|
| 896 |
+
)
|
| 897 |
+
return a - torch.tanh(a)
|
| 898 |
+
|
| 899 |
+
|
| 900 |
+
@register_decomposition(aten.threshold)
|
| 901 |
+
@_inplace_wrapper
|
| 902 |
+
@out_wrapper()
|
| 903 |
+
@elementwise_type_promotion_wrapper(
|
| 904 |
+
type_promoting_args=("a",),
|
| 905 |
+
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
|
| 906 |
+
)
|
| 907 |
+
def threshold(
|
| 908 |
+
a: TensorLikeType,
|
| 909 |
+
threshold: NumberType,
|
| 910 |
+
value: Union[bool, int, float],
|
| 911 |
+
inplace: bool = False,
|
| 912 |
+
) -> TensorLikeType:
|
| 913 |
+
"""
|
| 914 |
+
Reference implementation of torch.nn.functional.threshold
|
| 915 |
+
"""
|
| 916 |
+
|
| 917 |
+
if inplace:
|
| 918 |
+
raise NotImplementedError
|
| 919 |
+
|
| 920 |
+
return torch.where(a <= threshold, value, a)
|
| 921 |
+
|
| 922 |
+
|
| 923 |
+
# CompositeImplicitAutograd - don't register decomp
|
| 924 |
+
# No elementwise type promotion - core op doesn't explicitly type promote
|
| 925 |
+
def triplet_margin_loss(
|
| 926 |
+
anchor: TensorLikeType,
|
| 927 |
+
positive: TensorLikeType,
|
| 928 |
+
negative: TensorLikeType,
|
| 929 |
+
margin: float = 1.0,
|
| 930 |
+
p: float = 2,
|
| 931 |
+
eps: float = 1e-6,
|
| 932 |
+
swap: bool = False,
|
| 933 |
+
size_average: Optional[bool] = None,
|
| 934 |
+
reduce: Optional[bool] = None,
|
| 935 |
+
reduction: str = "mean",
|
| 936 |
+
) -> TensorLikeType:
|
| 937 |
+
if size_average is not None or reduce is not None:
|
| 938 |
+
# TODO: Raise exception instead of converting value. This is only for
|
| 939 |
+
# primTorch since it can drop support for deprecated arguments.
|
| 940 |
+
# msg = "size_average and reduce args are deprecated, please use reduction argument."
|
| 941 |
+
reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce)
|
| 942 |
+
|
| 943 |
+
if margin <= 0:
|
| 944 |
+
raise ValueError(f"margin must be greater than 0, got {margin}")
|
| 945 |
+
|
| 946 |
+
# torch.nn.functional.triplet_margin_with_distance_loss has no ref defined
|
| 947 |
+
# since it's a pure Python implementation. Use this helper instead.
|
| 948 |
+
return _triplet_margin_with_distance_loss(
|
| 949 |
+
anchor=anchor,
|
| 950 |
+
positive=positive,
|
| 951 |
+
negative=negative,
|
| 952 |
+
distance_function=lambda x, y: torch.pairwise_distance(x, y, p, eps),
|
| 953 |
+
margin=margin,
|
| 954 |
+
swap=swap,
|
| 955 |
+
reduction=reduction,
|
| 956 |
+
)
|
| 957 |
+
|
| 958 |
+
|
| 959 |
+
# Pure Python impl - don't register decomp and don't add a ref. Defined as a
|
| 960 |
+
# helper here since triplet_margin_loss can be nicely implemented with it.
|
| 961 |
+
def _triplet_margin_with_distance_loss(
|
| 962 |
+
anchor: TensorLikeType,
|
| 963 |
+
positive: TensorLikeType,
|
| 964 |
+
negative: TensorLikeType,
|
| 965 |
+
*,
|
| 966 |
+
distance_function: Optional[
|
| 967 |
+
Callable[[TensorLikeType, TensorLikeType], TensorLikeType]
|
| 968 |
+
] = None,
|
| 969 |
+
margin: float = 1.0,
|
| 970 |
+
swap: bool = False,
|
| 971 |
+
reduction: str = "mean",
|
| 972 |
+
) -> TensorLikeType:
|
| 973 |
+
_check_reduction_value(reduction)
|
| 974 |
+
|
| 975 |
+
a_dim = anchor.ndim
|
| 976 |
+
p_dim = positive.ndim
|
| 977 |
+
n_dim = negative.ndim
|
| 978 |
+
torch._check(
|
| 979 |
+
a_dim == p_dim and p_dim == n_dim,
|
| 980 |
+
lambda: (
|
| 981 |
+
f"The anchor, positive, and negative tensors are expected to have "
|
| 982 |
+
f"the same number of dimensions, but got: anchor {a_dim}D, "
|
| 983 |
+
f"positive {p_dim}D, and negative {n_dim}D inputs"
|
| 984 |
+
),
|
| 985 |
+
)
|
| 986 |
+
|
| 987 |
+
if distance_function is None:
|
| 988 |
+
distance_function = torch.pairwise_distance
|
| 989 |
+
|
| 990 |
+
dist_pos = distance_function(anchor, positive)
|
| 991 |
+
dist_neg = distance_function(anchor, negative)
|
| 992 |
+
# The distance swap is described in the paper "Learning shallow
|
| 993 |
+
# convolutional feature descriptors with triplet losses" by V. Balntas, E.
|
| 994 |
+
# Riba et al. If True, and if the positive example is closer to the
|
| 995 |
+
# negative example than the anchor is, swaps the positive example and the
|
| 996 |
+
# anchor in the loss computation.
|
| 997 |
+
if swap:
|
| 998 |
+
dist_swap = distance_function(positive, negative)
|
| 999 |
+
dist_neg = torch.minimum(dist_neg, dist_swap)
|
| 1000 |
+
loss = torch.clamp_min(margin + dist_pos - dist_neg, 0)
|
| 1001 |
+
return _apply_loss_reduction(loss, reduction)
|
| 1002 |
+
|
| 1003 |
+
|
| 1004 |
+
@register_decomposition(aten.hardtanh)
|
| 1005 |
+
@_inplace_wrapper
|
| 1006 |
+
@out_wrapper()
|
| 1007 |
+
@elementwise_unary_scalar_wrapper
|
| 1008 |
+
@elementwise_type_promotion_wrapper(
|
| 1009 |
+
type_promoting_args=("a"),
|
| 1010 |
+
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
|
| 1011 |
+
)
|
| 1012 |
+
def hardtanh(
|
| 1013 |
+
a: TensorLikeType,
|
| 1014 |
+
min_val: NumberType = -1,
|
| 1015 |
+
max_val: NumberType = 1,
|
| 1016 |
+
inplace: bool = False,
|
| 1017 |
+
) -> TensorLikeType:
|
| 1018 |
+
"""
|
| 1019 |
+
Reference implementation of torch.nn.functional.hardtanh
|
| 1020 |
+
"""
|
| 1021 |
+
if inplace:
|
| 1022 |
+
raise NotImplementedError
|
| 1023 |
+
if utils.is_boolean_dtype(a.dtype):
|
| 1024 |
+
raise RuntimeError("Bool inputs not supported for hardtanh")
|
| 1025 |
+
|
| 1026 |
+
# preserve legacy behavior of boundaries not causing type promotion
|
| 1027 |
+
if utils.is_integer_dtype(a.dtype):
|
| 1028 |
+
min_val = int(min_val) # type: ignore[arg-type]
|
| 1029 |
+
max_val = int(max_val) # type: ignore[arg-type]
|
| 1030 |
+
if not (a.dtype != torch.uint8 or (min_val >= 0 and max_val >= 0)):
|
| 1031 |
+
raise RuntimeError(
|
| 1032 |
+
"Cannot do hardtanh on an unsigned type with negative limits"
|
| 1033 |
+
)
|
| 1034 |
+
|
| 1035 |
+
if min_val > max_val: # type: ignore[operator]
|
| 1036 |
+
raise ValueError("min_val cannot be greater than max_val")
|
| 1037 |
+
|
| 1038 |
+
return torch.clamp(a, min_val, max_val) # type: ignore[arg-type]
|
| 1039 |
+
|
| 1040 |
+
|
| 1041 |
+
@register_decomposition(aten.gelu)
|
| 1042 |
+
@out_wrapper()
|
| 1043 |
+
@elementwise_unary_scalar_wrapper
|
| 1044 |
+
@elementwise_type_promotion_wrapper(
|
| 1045 |
+
type_promoting_args=("a",),
|
| 1046 |
+
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
|
| 1047 |
+
)
|
| 1048 |
+
def gelu(a: TensorLikeType, approximate: str = "none") -> TensorLikeType:
|
| 1049 |
+
"""
|
| 1050 |
+
Reference implementation of torch.nn.functional.gelu
|
| 1051 |
+
"""
|
| 1052 |
+
if not isinstance(a, TensorLike):
|
| 1053 |
+
raise RuntimeError(
|
| 1054 |
+
"Expected a tensor input for an elementwise unary operation!"
|
| 1055 |
+
)
|
| 1056 |
+
M_SQRT2 = 1.41421356237309504880
|
| 1057 |
+
M_SQRT1_2 = 0.70710678118654752440
|
| 1058 |
+
M_2_SQRTPI = 1.12837916709551257390
|
| 1059 |
+
if approximate == "tanh":
|
| 1060 |
+
kBeta = M_SQRT2 * M_2_SQRTPI * 0.5
|
| 1061 |
+
kKappa = 0.044715
|
| 1062 |
+
a_cube = a * a * a
|
| 1063 |
+
inner = kBeta * (a + kKappa * a_cube)
|
| 1064 |
+
return 0.5 * a * (1 + torch.tanh(inner))
|
| 1065 |
+
elif approximate == "none":
|
| 1066 |
+
kAlpha = M_SQRT1_2
|
| 1067 |
+
return a * 0.5 * (1 + torch.erf(a * kAlpha))
|
| 1068 |
+
else:
|
| 1069 |
+
raise RuntimeError("approximate argument must be either none or tanh.")
|
| 1070 |
+
|
| 1071 |
+
|
| 1072 |
+
# CompositeImplicitAutograd - don't register decomp
|
| 1073 |
+
@elementwise_type_promotion_wrapper(
|
| 1074 |
+
type_promoting_args=("input", "target"),
|
| 1075 |
+
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
| 1076 |
+
)
|
| 1077 |
+
def poisson_nll_loss(
|
| 1078 |
+
input: TensorLikeType,
|
| 1079 |
+
target: TensorLikeType,
|
| 1080 |
+
log_input: bool = True,
|
| 1081 |
+
full: bool = False,
|
| 1082 |
+
size_average: Optional[bool] = None,
|
| 1083 |
+
eps: float = 1e-8,
|
| 1084 |
+
reduce: Optional[bool] = None,
|
| 1085 |
+
reduction: str = "mean",
|
| 1086 |
+
) -> TensorLikeType:
|
| 1087 |
+
"""
|
| 1088 |
+
Reference implementation of torch.nn.functional.poisson_nll_loss
|
| 1089 |
+
"""
|
| 1090 |
+
if size_average is not None or reduce is not None:
|
| 1091 |
+
# TODO: Raise exception instead of converting value. This is only for
|
| 1092 |
+
# primTorch since it can drop support for deprecated arguments.
|
| 1093 |
+
# msg = "size_average and reduce args are deprecated, please use reduction argument."
|
| 1094 |
+
reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce)
|
| 1095 |
+
_check_reduction_value(reduction)
|
| 1096 |
+
if log_input:
|
| 1097 |
+
loss = torch.exp(input) - target * input
|
| 1098 |
+
else:
|
| 1099 |
+
loss = input - target * torch.log(input + eps)
|
| 1100 |
+
|
| 1101 |
+
if full:
|
| 1102 |
+
stirling_term = (
|
| 1103 |
+
target * torch.log(target) - target + 0.5 * torch.log(2 * torch.pi * target)
|
| 1104 |
+
)
|
| 1105 |
+
# avoid inplace add
|
| 1106 |
+
loss = loss + stirling_term.masked_fill(target <= 1, 0)
|
| 1107 |
+
return _apply_loss_reduction(loss, reduction)
|
| 1108 |
+
|
| 1109 |
+
|
| 1110 |
+
@register_decomposition(aten.prelu)
|
| 1111 |
+
@elementwise_type_promotion_wrapper(
|
| 1112 |
+
type_promoting_args=("a", "weight"),
|
| 1113 |
+
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
|
| 1114 |
+
)
|
| 1115 |
+
def prelu(a: TensorLikeType, weight: TensorLikeType) -> TensorLikeType:
|
| 1116 |
+
"""
|
| 1117 |
+
Reference implementation of torch.nn.functional.prelu
|
| 1118 |
+
"""
|
| 1119 |
+
torch._check(
|
| 1120 |
+
isinstance(a, TensorLike),
|
| 1121 |
+
lambda: f"prelu: Expected `a` to be tensor, but got: {type(a)}",
|
| 1122 |
+
)
|
| 1123 |
+
torch._check(
|
| 1124 |
+
isinstance(weight, TensorLike),
|
| 1125 |
+
lambda: f"prelu: Expected `weight` to be tensor, but got: {type(weight)}",
|
| 1126 |
+
)
|
| 1127 |
+
|
| 1128 |
+
if weight.numel() != 1:
|
| 1129 |
+
torch._check(a.ndim > 0, lambda: "Not allow zero-dim input tensor.")
|
| 1130 |
+
channel_size = a.shape[1] if a.ndim >= 2 else 1
|
| 1131 |
+
torch._check(
|
| 1132 |
+
weight.numel() == channel_size,
|
| 1133 |
+
lambda: f"Mismatch of parameter numbers and input channel size. Found parameter numbers ="
|
| 1134 |
+
f" {weight.numel()} and channel size = {channel_size}.",
|
| 1135 |
+
)
|
| 1136 |
+
|
| 1137 |
+
torch._check(
|
| 1138 |
+
weight.ndim == 0 or weight.ndim == 1,
|
| 1139 |
+
lambda: f"prelu: Expected `weight` to be a scalar or 1D tensor, but got: "
|
| 1140 |
+
f"ndim = {weight.ndim}",
|
| 1141 |
+
)
|
| 1142 |
+
if a.ndim == 0:
|
| 1143 |
+
weight = weight[0] if weight.ndim == 1 else weight
|
| 1144 |
+
else:
|
| 1145 |
+
weight = prims.broadcast_in_dim(
|
| 1146 |
+
weight, a.shape, () if weight.ndim == 0 else (0 if a.ndim == 1 else 1,)
|
| 1147 |
+
)
|
| 1148 |
+
|
| 1149 |
+
return torch.where(a > 0, a, a * weight)
|
| 1150 |
+
|
| 1151 |
+
|
| 1152 |
+
@register_decomposition(aten.relu6)
|
| 1153 |
+
@_inplace_wrapper
|
| 1154 |
+
@out_wrapper()
|
| 1155 |
+
def relu6(a: TensorLikeType, inplace: bool = False) -> TensorLikeType:
|
| 1156 |
+
"""
|
| 1157 |
+
Reference implementation of torch.nn.functional.relu6
|
| 1158 |
+
"""
|
| 1159 |
+
if inplace:
|
| 1160 |
+
raise NotImplementedError
|
| 1161 |
+
|
| 1162 |
+
# See https://github.com/pytorch/pytorch/pull/81142#discussion_r918220126
|
| 1163 |
+
# It may be better to use clamp here, but we use hardtanh to replicate
|
| 1164 |
+
# the behavior of the existing implementation
|
| 1165 |
+
return torch.nn.functional.hardtanh(a, 0, 6)
|
| 1166 |
+
|
| 1167 |
+
|
| 1168 |
+
@register_decomposition(aten.glu)
|
| 1169 |
+
@out_wrapper()
|
| 1170 |
+
@elementwise_type_promotion_wrapper(
|
| 1171 |
+
type_promoting_args=("a",),
|
| 1172 |
+
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
|
| 1173 |
+
)
|
| 1174 |
+
def glu(a: TensorLikeType, dim: int = -1) -> TensorLikeType:
|
| 1175 |
+
dim = utils.canonicalize_dims(a.ndim, dim)
|
| 1176 |
+
torch._check(
|
| 1177 |
+
a.shape[dim] % 2 == 0,
|
| 1178 |
+
lambda: f"Halving dimension must be even, but dimension {dim} is size {a.shape[dim]}",
|
| 1179 |
+
)
|
| 1180 |
+
b, c = torch.tensor_split(a, 2, dim)
|
| 1181 |
+
|
| 1182 |
+
return b * torch.sigmoid(c)
|
| 1183 |
+
|
| 1184 |
+
|
| 1185 |
+
@register_decomposition(aten.pairwise_distance)
|
| 1186 |
+
@out_wrapper()
|
| 1187 |
+
def pairwise_distance(
|
| 1188 |
+
x1: TensorLikeType,
|
| 1189 |
+
x2: TensorLikeType,
|
| 1190 |
+
p: NumberType = 2.0,
|
| 1191 |
+
eps: NumberType = 1e-6,
|
| 1192 |
+
keepdim=False,
|
| 1193 |
+
) -> TensorLikeType:
|
| 1194 |
+
return torch.linalg.vector_norm(x1 - x2 + eps, ord=p, dim=-1, keepdim=keepdim)
|
| 1195 |
+
|
| 1196 |
+
|
| 1197 |
+
@register_decomposition(aten.pdist)
|
| 1198 |
+
@out_wrapper()
|
| 1199 |
+
@elementwise_type_promotion_wrapper(
|
| 1200 |
+
type_promoting_args=("a",),
|
| 1201 |
+
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
|
| 1202 |
+
)
|
| 1203 |
+
def pdist(a: TensorLikeType, p: float = 2) -> TensorLikeType:
|
| 1204 |
+
torch._check(a.ndim == 2, lambda: f"pdist only supports 2D tensors, got: {a.ndim}D")
|
| 1205 |
+
torch._check(p >= 0, lambda: "pdist only supports non-negative p values")
|
| 1206 |
+
# For p == 2 we can use an efficient implementation, but other values of p
|
| 1207 |
+
# require creating a much bigger tensor for an intermediate step
|
| 1208 |
+
if p == 2:
|
| 1209 |
+
aTa = torch.mm(a, a.T)
|
| 1210 |
+
aTa_diag = torch.diag(aTa)
|
| 1211 |
+
t = torch.sqrt(torch.clamp(aTa_diag + aTa_diag.unsqueeze(-1) - 2 * aTa, min=0))
|
| 1212 |
+
else:
|
| 1213 |
+
t = torch.linalg.vector_norm(a.unsqueeze(1) - a, ord=p, dim=2)
|
| 1214 |
+
i = torch.triu_indices(t.shape[0], t.shape[1], offset=1, device=a.device)
|
| 1215 |
+
return t.flatten().index_select(0, i[0] * t.shape[0] + i[1])
|
| 1216 |
+
|
| 1217 |
+
|
| 1218 |
+
@register_decomposition(aten.pixel_shuffle)
|
| 1219 |
+
@out_wrapper()
|
| 1220 |
+
def pixel_shuffle(self: Tensor, upscale_factor: int):
|
| 1221 |
+
torch._check(
|
| 1222 |
+
self.dim() >= 3,
|
| 1223 |
+
lambda: f"pixel_shuffle expects input to have at least 3 dimensions, but got input with {self.dim} dimension(s)",
|
| 1224 |
+
)
|
| 1225 |
+
batch = self.shape[:-3]
|
| 1226 |
+
C_out = self.shape[-3] // upscale_factor**2
|
| 1227 |
+
HW_out = (self.shape[-2] * upscale_factor, self.shape[-1] * upscale_factor)
|
| 1228 |
+
n = len(batch)
|
| 1229 |
+
B_dims = range(n)
|
| 1230 |
+
C_dim, r1_dim, r2_dim, H_dim, W_dim = range(n, n + 5)
|
| 1231 |
+
return (
|
| 1232 |
+
self.view(
|
| 1233 |
+
*batch,
|
| 1234 |
+
C_out,
|
| 1235 |
+
upscale_factor,
|
| 1236 |
+
upscale_factor,
|
| 1237 |
+
self.shape[-2],
|
| 1238 |
+
self.shape[-1],
|
| 1239 |
+
)
|
| 1240 |
+
.permute(*B_dims, C_dim, H_dim, r1_dim, W_dim, r2_dim)
|
| 1241 |
+
.reshape(*batch, C_out, *HW_out)
|
| 1242 |
+
.clone(memory_format=utils.suggest_memory_format(self))
|
| 1243 |
+
)
|
| 1244 |
+
|
| 1245 |
+
|
| 1246 |
+
@register_decomposition(aten.pixel_unshuffle)
|
| 1247 |
+
@out_wrapper()
|
| 1248 |
+
def pixel_unshuffle(self: Tensor, downscale_factor: int):
|
| 1249 |
+
torch._check(
|
| 1250 |
+
self.dim() >= 3,
|
| 1251 |
+
lambda: f"pixel_unshuffle expects input to have at least 3 dimensions, but got input with {self.dim} dimension(s)",
|
| 1252 |
+
)
|
| 1253 |
+
batch = self.shape[:-3]
|
| 1254 |
+
C_out = self.shape[-3] * downscale_factor**2
|
| 1255 |
+
HW_out = (self.shape[-2] // downscale_factor, self.shape[-1] // downscale_factor)
|
| 1256 |
+
n = len(batch)
|
| 1257 |
+
B_dims = range(n)
|
| 1258 |
+
C_dim, H_dim, r1_dim, W_dim, r2_dim = range(n, n + 5)
|
| 1259 |
+
return (
|
| 1260 |
+
self.view(
|
| 1261 |
+
*batch,
|
| 1262 |
+
self.shape[-3],
|
| 1263 |
+
HW_out[0],
|
| 1264 |
+
downscale_factor,
|
| 1265 |
+
HW_out[1],
|
| 1266 |
+
downscale_factor,
|
| 1267 |
+
)
|
| 1268 |
+
.permute(*B_dims, C_dim, r1_dim, r2_dim, H_dim, W_dim)
|
| 1269 |
+
.reshape(*batch, C_out, *HW_out)
|
| 1270 |
+
.clone(memory_format=utils.suggest_memory_format(self))
|
| 1271 |
+
)
|
| 1272 |
+
|
| 1273 |
+
|
| 1274 |
+
# Needed as aten.{celu_,elu_...} exist (even if they don't have the in-place kwarg)
|
| 1275 |
+
celu_ = _make_inplace(celu)
|
| 1276 |
+
elu_ = _make_inplace(elu)
|
| 1277 |
+
mish_ = _make_inplace(mish)
|
| 1278 |
+
selu_ = _make_inplace(selu)
|
| 1279 |
+
threshold_ = _make_inplace(threshold)
|
pllava/lib/python3.10/site-packages/torch/_refs/nn/functional/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (27.5 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_refs/special/__init__.py
ADDED
|
@@ -0,0 +1,236 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import math
|
| 3 |
+
from typing import Optional, Union
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
import torch._prims as prims
|
| 7 |
+
import torch._prims_common as utils
|
| 8 |
+
import torch._refs as refs
|
| 9 |
+
from torch import Tensor
|
| 10 |
+
from torch._decomp import register_decomposition
|
| 11 |
+
from torch._prims_common import (
|
| 12 |
+
ELEMENTWISE_TYPE_PROMOTION_KIND,
|
| 13 |
+
Number,
|
| 14 |
+
NumberType,
|
| 15 |
+
TensorLike,
|
| 16 |
+
TensorLikeType,
|
| 17 |
+
)
|
| 18 |
+
from torch._prims_common.wrappers import elementwise_type_promotion_wrapper, out_wrapper
|
| 19 |
+
from torch._refs import (
|
| 20 |
+
_make_alias,
|
| 21 |
+
_make_elementwise_binary_reference,
|
| 22 |
+
_make_elementwise_unary_reference,
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
__all__ = [
|
| 27 |
+
"bessel_j0",
|
| 28 |
+
"bessel_j1",
|
| 29 |
+
"entr",
|
| 30 |
+
"erfcx",
|
| 31 |
+
"expit",
|
| 32 |
+
"i0e",
|
| 33 |
+
"i1",
|
| 34 |
+
"i1e",
|
| 35 |
+
"log_ndtr",
|
| 36 |
+
"logit",
|
| 37 |
+
"log_softmax",
|
| 38 |
+
"multigammaln",
|
| 39 |
+
"ndtr",
|
| 40 |
+
"ndtri",
|
| 41 |
+
"softmax",
|
| 42 |
+
"spherical_bessel_j0",
|
| 43 |
+
"xlog1py",
|
| 44 |
+
"zeta",
|
| 45 |
+
]
|
| 46 |
+
aten = torch._ops.ops.aten
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
@_make_elementwise_unary_reference(
|
| 50 |
+
ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
| 51 |
+
)
|
| 52 |
+
def bessel_j0(a: TensorLikeType) -> TensorLikeType:
|
| 53 |
+
return prims.bessel_j0(a)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
@_make_elementwise_unary_reference(
|
| 57 |
+
ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
| 58 |
+
)
|
| 59 |
+
def bessel_j1(a: TensorLikeType) -> TensorLikeType:
|
| 60 |
+
return prims.bessel_j1(a)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
@register_decomposition(aten.special_entr)
|
| 64 |
+
@out_wrapper()
|
| 65 |
+
@elementwise_type_promotion_wrapper(
|
| 66 |
+
type_promoting_args=("a",),
|
| 67 |
+
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
| 68 |
+
)
|
| 69 |
+
def entr(a: TensorLikeType) -> TensorLikeType:
|
| 70 |
+
return torch.where(
|
| 71 |
+
torch.isnan(a),
|
| 72 |
+
a,
|
| 73 |
+
torch.where(a > 0, -a * torch.log(a), torch.where(a == 0, 0, -torch.inf)),
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
@register_decomposition(aten.special_erfcx)
|
| 78 |
+
@out_wrapper()
|
| 79 |
+
@elementwise_type_promotion_wrapper(
|
| 80 |
+
type_promoting_args=("a",),
|
| 81 |
+
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
| 82 |
+
)
|
| 83 |
+
def erfcx(a: TensorLikeType) -> TensorLikeType:
|
| 84 |
+
return prims.erfcx(a)
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
# alias for sigmoid
|
| 88 |
+
expit = _make_alias(torch.sigmoid, "expit")
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
@_make_elementwise_unary_reference(
|
| 92 |
+
ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
| 93 |
+
)
|
| 94 |
+
def i0e(a: TensorLikeType) -> TensorLikeType:
|
| 95 |
+
return prims.bessel_i0e(a)
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
@_make_elementwise_unary_reference(
|
| 99 |
+
ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
| 100 |
+
)
|
| 101 |
+
def i1(a: TensorLikeType) -> TensorLikeType:
|
| 102 |
+
return prims.bessel_i1(a)
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
@_make_elementwise_unary_reference(
|
| 106 |
+
ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
| 107 |
+
)
|
| 108 |
+
def i1e(a: TensorLikeType) -> TensorLikeType:
|
| 109 |
+
return prims.bessel_i1e(a)
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
@register_decomposition(aten.special_log_ndtr)
|
| 113 |
+
@out_wrapper()
|
| 114 |
+
@elementwise_type_promotion_wrapper(
|
| 115 |
+
type_promoting_args=("a",),
|
| 116 |
+
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
| 117 |
+
)
|
| 118 |
+
def log_ndtr(a: TensorLikeType) -> TensorLikeType:
|
| 119 |
+
# Note: M_SQRT1_2 is the value of 1 / sqrt(2)
|
| 120 |
+
M_SQRT1_2 = 0.707106781186547524400844362104849039
|
| 121 |
+
t = a * M_SQRT1_2
|
| 122 |
+
return torch.where(
|
| 123 |
+
a < 1.0,
|
| 124 |
+
torch.log(torch.special.erfcx(-t) / 2) - t * t,
|
| 125 |
+
torch.log1p(-torch.erfc(t) / 2),
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
@register_decomposition(aten.logit)
|
| 130 |
+
@out_wrapper()
|
| 131 |
+
@elementwise_type_promotion_wrapper(
|
| 132 |
+
type_promoting_args=("self",),
|
| 133 |
+
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
| 134 |
+
)
|
| 135 |
+
def logit(self: TensorLikeType, eps: Optional[float] = None) -> TensorLikeType:
|
| 136 |
+
if eps is None:
|
| 137 |
+
eps = -1.0
|
| 138 |
+
lo = eps
|
| 139 |
+
hi = 1 - eps
|
| 140 |
+
self = torch.clamp(self, lo, hi)
|
| 141 |
+
return torch.log(torch.true_divide(self, torch.sub(1, self)))
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
@register_decomposition(aten.special_xlog1py)
|
| 145 |
+
@out_wrapper()
|
| 146 |
+
@elementwise_type_promotion_wrapper(
|
| 147 |
+
type_promoting_args=("a", "b"),
|
| 148 |
+
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
| 149 |
+
)
|
| 150 |
+
def xlog1py(a: Union[TensorLikeType, NumberType], b: Union[TensorLikeType, NumberType]):
|
| 151 |
+
torch._check(
|
| 152 |
+
isinstance(a, TensorLike) or isinstance(b, TensorLike),
|
| 153 |
+
lambda: 'Expected either argument a or b to be a Tensor"',
|
| 154 |
+
)
|
| 155 |
+
|
| 156 |
+
# Operations like eq and log do not handle scalar values, so we convert them to scalar_tensors.
|
| 157 |
+
if isinstance(a, TensorLike) and isinstance(b, Number):
|
| 158 |
+
b = refs.scalar_tensor(b, dtype=a.dtype, device=a.device)
|
| 159 |
+
elif isinstance(b, TensorLike) and isinstance(a, Number):
|
| 160 |
+
a = refs.scalar_tensor(a, dtype=b.dtype, device=b.device)
|
| 161 |
+
|
| 162 |
+
# mypy: expected "Tensor"
|
| 163 |
+
assert isinstance(a, TensorLike)
|
| 164 |
+
assert isinstance(b, TensorLike)
|
| 165 |
+
rhs = torch.where(torch.eq(a, 0), 0, torch.mul(a, torch.log1p(b)))
|
| 166 |
+
return torch.where(torch.isnan(b), float("nan"), rhs)
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
@register_decomposition(aten.mvlgamma)
|
| 170 |
+
@out_wrapper()
|
| 171 |
+
@elementwise_type_promotion_wrapper(
|
| 172 |
+
type_promoting_args=("a",),
|
| 173 |
+
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
| 174 |
+
)
|
| 175 |
+
def multigammaln(a: TensorLikeType, p: int) -> TensorLikeType:
|
| 176 |
+
c = 0.25 * p * (p - 1) * math.log(math.pi)
|
| 177 |
+
b = 0.5 * torch.arange(start=(1 - p), end=1, step=1, dtype=a.dtype, device=a.device)
|
| 178 |
+
return torch.sum(torch.lgamma(a.unsqueeze(-1) + b), dim=-1) + c
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
@register_decomposition(aten.special_ndtr)
|
| 182 |
+
@out_wrapper()
|
| 183 |
+
@elementwise_type_promotion_wrapper(
|
| 184 |
+
type_promoting_args=("a",),
|
| 185 |
+
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
| 186 |
+
)
|
| 187 |
+
def ndtr(a: TensorLikeType) -> TensorLikeType:
|
| 188 |
+
# Note: M_SQRT1_2 is the value of 1 / sqrt(2)
|
| 189 |
+
M_SQRT1_2 = 0.707106781186547524400844362104849039
|
| 190 |
+
a_sqrt_2 = a * M_SQRT1_2
|
| 191 |
+
return (1 + torch.erf(a_sqrt_2)) * 0.5
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
@register_decomposition(aten.special_ndtri)
|
| 195 |
+
@out_wrapper()
|
| 196 |
+
@elementwise_type_promotion_wrapper(
|
| 197 |
+
type_promoting_args=("a",),
|
| 198 |
+
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
| 199 |
+
)
|
| 200 |
+
def ndtri(a: TensorLikeType) -> TensorLikeType:
|
| 201 |
+
return prims.ndtri(a)
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
# Forwarding alias: the special variant doesn't support the out kwarg
|
| 205 |
+
# CompositeImplicitAutograd - don't register decomp
|
| 206 |
+
def log_softmax(
|
| 207 |
+
a: TensorLikeType,
|
| 208 |
+
dim: int,
|
| 209 |
+
dtype: Optional[torch.dtype] = None,
|
| 210 |
+
) -> TensorLikeType:
|
| 211 |
+
return torch.log_softmax(a=a, dim=dim, dtype=dtype) # type: ignore[call-overload]
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
# Forwarding alias: the special variant doesn't support the out kwarg
|
| 215 |
+
# CompositeImplicitAutograd - don't register decomp
|
| 216 |
+
def softmax(
|
| 217 |
+
a: TensorLikeType,
|
| 218 |
+
dim: int,
|
| 219 |
+
dtype: Optional[torch.dtype] = None,
|
| 220 |
+
) -> TensorLikeType:
|
| 221 |
+
return torch.softmax(a=a, dim=dim, dtype=dtype) # type: ignore[call-overload]
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
@_make_elementwise_unary_reference(
|
| 225 |
+
ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
| 226 |
+
)
|
| 227 |
+
def spherical_bessel_j0(a: TensorLikeType) -> TensorLikeType:
|
| 228 |
+
return prims.spherical_bessel_j0(a)
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
# TODO: add docstring
|
| 232 |
+
@_make_elementwise_binary_reference(
|
| 233 |
+
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
| 234 |
+
)
|
| 235 |
+
def zeta(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType:
|
| 236 |
+
return prims.zeta(a, b)
|
pllava/lib/python3.10/site-packages/torch/_refs/special/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (5.08 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/_size_docs.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
"""Adds docstrings to torch.Size functions"""
|
| 3 |
+
|
| 4 |
+
import torch._C
|
| 5 |
+
from torch._C import _add_docstr as add_docstr
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def add_docstr_all(method, docstr):
|
| 9 |
+
add_docstr(getattr(torch._C.Size, method), docstr)
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
add_docstr_all(
|
| 13 |
+
"numel",
|
| 14 |
+
"""
|
| 15 |
+
numel() -> int
|
| 16 |
+
|
| 17 |
+
Returns the number of elements a :class:`torch.Tensor` with the given size would contain.
|
| 18 |
+
|
| 19 |
+
More formally, for a tensor ``x = tensor.ones(10, 10)`` with size ``s = torch.Size([10, 10])``,
|
| 20 |
+
``x.numel() == x.size().numel() == s.numel() == 100`` holds true.
|
| 21 |
+
|
| 22 |
+
Example::
|
| 23 |
+
>>> x=torch.ones(10, 10)
|
| 24 |
+
>>> s=x.size()
|
| 25 |
+
>>> s
|
| 26 |
+
torch.Size([10, 10])
|
| 27 |
+
>>> s.numel()
|
| 28 |
+
100
|
| 29 |
+
>>> x.numel() == s.numel()
|
| 30 |
+
True
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
.. warning::
|
| 34 |
+
|
| 35 |
+
This function does not return the number of dimensions described by :class:`torch.Size`, but instead the number
|
| 36 |
+
of elements a :class:`torch.Tensor` with that size would contain.
|
| 37 |
+
|
| 38 |
+
""",
|
| 39 |
+
)
|
pllava/lib/python3.10/site-packages/torch/_streambase.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
from abc import ABC, abstractmethod
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class _StreamBase(ABC):
|
| 6 |
+
r"""Base stream class abstraction for multi backends Stream to herit from"""
|
| 7 |
+
|
| 8 |
+
@abstractmethod
|
| 9 |
+
def wait_event(self, event) -> None:
|
| 10 |
+
raise NotImplementedError
|
| 11 |
+
|
| 12 |
+
@abstractmethod
|
| 13 |
+
def wait_stream(self, stream) -> None:
|
| 14 |
+
raise NotImplementedError
|
| 15 |
+
|
| 16 |
+
@abstractmethod
|
| 17 |
+
def record_event(self, event=None) -> None:
|
| 18 |
+
raise NotImplementedError
|
| 19 |
+
|
| 20 |
+
@abstractmethod
|
| 21 |
+
def query(self) -> bool:
|
| 22 |
+
raise NotImplementedError
|
| 23 |
+
|
| 24 |
+
@abstractmethod
|
| 25 |
+
def synchronize(self) -> None:
|
| 26 |
+
raise NotImplementedError
|
| 27 |
+
|
| 28 |
+
@abstractmethod
|
| 29 |
+
def __eq__(self, stream) -> bool:
|
| 30 |
+
raise NotImplementedError
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class _EventBase(ABC):
|
| 34 |
+
r"""Base Event class abstraction for multi backends Event to herit from"""
|
| 35 |
+
|
| 36 |
+
@abstractmethod
|
| 37 |
+
def wait(self, stream=None) -> None:
|
| 38 |
+
raise NotImplementedError
|
| 39 |
+
|
| 40 |
+
@abstractmethod
|
| 41 |
+
def query(self) -> bool:
|
| 42 |
+
raise NotImplementedError
|
| 43 |
+
|
| 44 |
+
@abstractmethod
|
| 45 |
+
def synchronize(self) -> None:
|
| 46 |
+
raise NotImplementedError
|
pllava/lib/python3.10/site-packages/torch/_tensor_docs.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
pllava/lib/python3.10/site-packages/torch/_torch_docs.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
pllava/lib/python3.10/site-packages/torch/_vmap_internals.py
ADDED
|
@@ -0,0 +1,245 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import functools
|
| 3 |
+
from typing import Any, Callable, List, Optional, Tuple, Union
|
| 4 |
+
from typing_extensions import deprecated
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
from torch import Tensor
|
| 8 |
+
from torch.utils._pytree import _broadcast_to_and_flatten, tree_flatten, tree_unflatten
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
in_dims_t = Union[int, Tuple]
|
| 12 |
+
out_dims_t = Union[int, Tuple[int, ...]]
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
# Checks that all args-to-be-batched have the same batch dim size
|
| 16 |
+
def _validate_and_get_batch_size(
|
| 17 |
+
flat_in_dims: List[Optional[int]],
|
| 18 |
+
flat_args: List,
|
| 19 |
+
) -> int:
|
| 20 |
+
batch_sizes = [
|
| 21 |
+
arg.size(in_dim)
|
| 22 |
+
for in_dim, arg in zip(flat_in_dims, flat_args)
|
| 23 |
+
if in_dim is not None
|
| 24 |
+
]
|
| 25 |
+
if batch_sizes and any(size != batch_sizes[0] for size in batch_sizes):
|
| 26 |
+
raise ValueError(
|
| 27 |
+
f"vmap: Expected all tensors to have the same size in the mapped "
|
| 28 |
+
f"dimension, got sizes {batch_sizes} for the mapped dimension"
|
| 29 |
+
)
|
| 30 |
+
return batch_sizes[0]
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def _num_outputs(batched_outputs: Union[Tensor, Tuple[Tensor, ...]]) -> int:
|
| 34 |
+
if isinstance(batched_outputs, tuple):
|
| 35 |
+
return len(batched_outputs)
|
| 36 |
+
return 1
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
# If value is a tuple, check it has length `num_elements`.
|
| 40 |
+
# If value is not a tuple, make a tuple with `value` repeated `num_elements` times
|
| 41 |
+
def _as_tuple(
|
| 42 |
+
value: Any,
|
| 43 |
+
num_elements: int,
|
| 44 |
+
error_message_lambda: Callable[[], str],
|
| 45 |
+
) -> Tuple:
|
| 46 |
+
if not isinstance(value, tuple):
|
| 47 |
+
return (value,) * num_elements
|
| 48 |
+
if len(value) != num_elements:
|
| 49 |
+
raise ValueError(error_message_lambda())
|
| 50 |
+
return value
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
# Creates BatchedTensors for every Tensor in arg that should be batched.
|
| 54 |
+
# Returns the (potentially) batched arguments and the batch_size.
|
| 55 |
+
def _create_batched_inputs(
|
| 56 |
+
in_dims: in_dims_t,
|
| 57 |
+
args: Tuple,
|
| 58 |
+
vmap_level: int,
|
| 59 |
+
func: Callable,
|
| 60 |
+
) -> Tuple[Tuple, int]:
|
| 61 |
+
if not isinstance(in_dims, int) and not isinstance(in_dims, tuple):
|
| 62 |
+
raise ValueError(
|
| 63 |
+
f"vmap({_get_name(func)}, in_dims={in_dims}, ...)(<inputs>): "
|
| 64 |
+
f"expected `in_dims` to be int or a (potentially nested) tuple "
|
| 65 |
+
f"matching the structure of inputs, got: {type(in_dims)}."
|
| 66 |
+
)
|
| 67 |
+
if len(args) == 0:
|
| 68 |
+
raise ValueError(
|
| 69 |
+
f"vmap({_get_name(func)})(<inputs>): got no inputs. Maybe you forgot to add "
|
| 70 |
+
f"inputs, or you are trying to vmap over a function with no inputs. "
|
| 71 |
+
f"The latter is unsupported."
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
flat_args, args_spec = tree_flatten(args)
|
| 75 |
+
flat_in_dims = _broadcast_to_and_flatten(in_dims, args_spec)
|
| 76 |
+
if flat_in_dims is None:
|
| 77 |
+
raise ValueError(
|
| 78 |
+
f"vmap({_get_name(func)}, in_dims={in_dims}, ...)(<inputs>): "
|
| 79 |
+
f"in_dims is not compatible with the structure of `inputs`. "
|
| 80 |
+
f"in_dims has structure {tree_flatten(in_dims)[1]} but inputs "
|
| 81 |
+
f"has structure {args_spec}."
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
for arg, in_dim in zip(flat_args, flat_in_dims):
|
| 85 |
+
if not isinstance(in_dim, int) and in_dim is not None:
|
| 86 |
+
raise ValueError(
|
| 87 |
+
f"vmap({_get_name(func)}, in_dims={in_dims}, ...)(<inputs>): "
|
| 88 |
+
f"Got in_dim={in_dim} for an input but in_dim must be either "
|
| 89 |
+
f"an integer dimension or None."
|
| 90 |
+
)
|
| 91 |
+
if isinstance(in_dim, int) and not isinstance(arg, Tensor):
|
| 92 |
+
raise ValueError(
|
| 93 |
+
f"vmap({_get_name(func)}, in_dims={in_dims}, ...)(<inputs>): "
|
| 94 |
+
f"Got in_dim={in_dim} for an input but the input is of type "
|
| 95 |
+
f"{type(arg)}. We cannot vmap over non-Tensor arguments, "
|
| 96 |
+
f"please use None as the respective in_dim"
|
| 97 |
+
)
|
| 98 |
+
if in_dim is not None and (in_dim < 0 or in_dim >= arg.dim()):
|
| 99 |
+
raise ValueError(
|
| 100 |
+
f"vmap({_get_name(func)}, in_dims={in_dims}, ...)(<inputs>): "
|
| 101 |
+
f"Got in_dim={in_dim} for some input, but that input is a Tensor "
|
| 102 |
+
f"of dimensionality {arg.dim()} so expected in_dim to satisfy "
|
| 103 |
+
f"0 <= in_dim < {arg.dim()}."
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
batch_size = _validate_and_get_batch_size(flat_in_dims, flat_args)
|
| 107 |
+
# See NOTE [Ignored _remove_batch_dim, _add_batch_dim]
|
| 108 |
+
batched_inputs = [
|
| 109 |
+
arg if in_dim is None else torch._add_batch_dim(arg, in_dim, vmap_level)
|
| 110 |
+
for in_dim, arg in zip(flat_in_dims, flat_args)
|
| 111 |
+
]
|
| 112 |
+
return tree_unflatten(batched_inputs, args_spec), batch_size
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
# Undos the batching (and any batch dimensions) associated with the `vmap_level`.
|
| 116 |
+
def _unwrap_batched(
|
| 117 |
+
batched_outputs: Union[Tensor, Tuple[Tensor, ...]],
|
| 118 |
+
out_dims: out_dims_t,
|
| 119 |
+
vmap_level: int,
|
| 120 |
+
batch_size: int,
|
| 121 |
+
func: Callable,
|
| 122 |
+
allow_none_pass_through: bool = False,
|
| 123 |
+
) -> Tuple:
|
| 124 |
+
num_outputs = _num_outputs(batched_outputs)
|
| 125 |
+
out_dims_as_tuple = _as_tuple(
|
| 126 |
+
out_dims,
|
| 127 |
+
num_outputs,
|
| 128 |
+
lambda: f"vmap({_get_name(func)}, ..., out_dims={out_dims}): `out_dims` must "
|
| 129 |
+
f"have one dim per output (got {num_outputs} outputs) of {_get_name(func)}.",
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
# NOTE [Ignored _remove_batch_dim, _add_batch_dim]
|
| 133 |
+
# There is something wrong with our type bindings for functions that begin
|
| 134 |
+
# with '_', see #40397.
|
| 135 |
+
if isinstance(batched_outputs, Tensor):
|
| 136 |
+
out_dim = out_dims_as_tuple[0]
|
| 137 |
+
return torch._remove_batch_dim(batched_outputs, vmap_level, batch_size, out_dim) # type: ignore[return-value]
|
| 138 |
+
if allow_none_pass_through:
|
| 139 |
+
return tuple(
|
| 140 |
+
(
|
| 141 |
+
torch._remove_batch_dim(out, vmap_level, batch_size, out_dim)
|
| 142 |
+
if out is not None
|
| 143 |
+
else None
|
| 144 |
+
)
|
| 145 |
+
for out, out_dim in zip(batched_outputs, out_dims_as_tuple)
|
| 146 |
+
)
|
| 147 |
+
else:
|
| 148 |
+
return tuple(
|
| 149 |
+
torch._remove_batch_dim(out, vmap_level, batch_size, out_dim)
|
| 150 |
+
for out, out_dim in zip(batched_outputs, out_dims_as_tuple)
|
| 151 |
+
)
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
# Checks that `fn` returned one or more Tensors and nothing else.
|
| 155 |
+
# NB: A python function that return multiple arguments returns a single tuple,
|
| 156 |
+
# so we are effectively checking that `outputs` is a single Tensor or a tuple of
|
| 157 |
+
# Tensors.
|
| 158 |
+
def _validate_outputs(outputs: Any, func: Callable) -> None:
|
| 159 |
+
if isinstance(outputs, Tensor):
|
| 160 |
+
return
|
| 161 |
+
if not isinstance(outputs, tuple):
|
| 162 |
+
raise ValueError(
|
| 163 |
+
f"vmap({_get_name(func)}, ...): `{_get_name(func)}` must only return "
|
| 164 |
+
f"Tensors, got type {type(outputs)} as the return."
|
| 165 |
+
)
|
| 166 |
+
for idx, output in enumerate(outputs):
|
| 167 |
+
if isinstance(output, Tensor):
|
| 168 |
+
continue
|
| 169 |
+
raise ValueError(
|
| 170 |
+
f"vmap({_get_name(func)}, ...): `{_get_name(func)}` must only return "
|
| 171 |
+
f"Tensors, got type {type(output)} for return {idx}."
|
| 172 |
+
)
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
def _check_out_dims_is_int_or_int_tuple(out_dims: out_dims_t, func: Callable) -> None:
|
| 176 |
+
if isinstance(out_dims, int):
|
| 177 |
+
return
|
| 178 |
+
if not isinstance(out_dims, tuple) or not all(
|
| 179 |
+
isinstance(out_dim, int) for out_dim in out_dims
|
| 180 |
+
):
|
| 181 |
+
raise ValueError(
|
| 182 |
+
f"vmap({_get_name(func)}, ..., out_dims={out_dims}): `out_dims` must be "
|
| 183 |
+
f"an int or a tuple of int representing where in the outputs the "
|
| 184 |
+
f"vmapped dimension should appear."
|
| 185 |
+
)
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
def _get_name(func: Callable):
|
| 189 |
+
if hasattr(func, "__name__"):
|
| 190 |
+
return func.__name__
|
| 191 |
+
|
| 192 |
+
# Not all callables have __name__, in fact, only static functions/methods do.
|
| 193 |
+
# A callable created via functools.partial or an nn.Module, to name some
|
| 194 |
+
# examples, don't have a __name__.
|
| 195 |
+
return repr(func)
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
# vmap(func)(inputs) wraps all Tensor inputs to be batched in BatchedTensors,
|
| 199 |
+
# sends those into func, and then unwraps the output BatchedTensors. Operations
|
| 200 |
+
# on BatchedTensors perform the batched operations that the user is asking for.
|
| 201 |
+
@deprecated(
|
| 202 |
+
"Please use `torch.vmap` instead of `torch._vmap_internals.vmap`.",
|
| 203 |
+
category=FutureWarning,
|
| 204 |
+
)
|
| 205 |
+
def vmap(func: Callable, in_dims: in_dims_t = 0, out_dims: out_dims_t = 0) -> Callable:
|
| 206 |
+
"""
|
| 207 |
+
Please use torch.vmap instead of this API.
|
| 208 |
+
"""
|
| 209 |
+
return _vmap(func, in_dims, out_dims)
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
# A version of vmap but without the initial "experimental prototype" warning
|
| 213 |
+
def _vmap(
|
| 214 |
+
func: Callable,
|
| 215 |
+
in_dims: in_dims_t = 0,
|
| 216 |
+
out_dims: out_dims_t = 0,
|
| 217 |
+
allow_none_pass_through: bool = False,
|
| 218 |
+
) -> Callable:
|
| 219 |
+
# The `allow_none_pass_through` argument is a temporary workaround may be removed.
|
| 220 |
+
# Currently it enables us to wrap the call in `autograd.grad` to the autograd engine,
|
| 221 |
+
# which may return None if any of the inputs are unused. See the issue discussing this:
|
| 222 |
+
# https://github.com/facebookresearch/functorch/issues/159.
|
| 223 |
+
@functools.wraps(func)
|
| 224 |
+
def wrapped(*args):
|
| 225 |
+
_check_out_dims_is_int_or_int_tuple(out_dims, func)
|
| 226 |
+
vmap_level = torch._C._vmapmode_increment_nesting()
|
| 227 |
+
try:
|
| 228 |
+
batched_inputs, batch_size = _create_batched_inputs(
|
| 229 |
+
in_dims, args, vmap_level, func
|
| 230 |
+
)
|
| 231 |
+
batched_outputs = func(*batched_inputs)
|
| 232 |
+
if not allow_none_pass_through:
|
| 233 |
+
_validate_outputs(batched_outputs, func)
|
| 234 |
+
return _unwrap_batched(
|
| 235 |
+
batched_outputs,
|
| 236 |
+
out_dims,
|
| 237 |
+
vmap_level,
|
| 238 |
+
batch_size,
|
| 239 |
+
func,
|
| 240 |
+
allow_none_pass_through=allow_none_pass_through,
|
| 241 |
+
)
|
| 242 |
+
finally:
|
| 243 |
+
torch._C._vmapmode_decrement_nesting()
|
| 244 |
+
|
| 245 |
+
return wrapped
|
pllava/lib/python3.10/site-packages/torch/_weights_only_unpickler.py
ADDED
|
@@ -0,0 +1,426 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
# Unpickler restricted to loading only state dicts
|
| 3 |
+
# Restrict constructing types to a list defined in _get_allowed_globals()
|
| 4 |
+
# Restrict BUILD operation to `Tensor`, `Parameter` and `OrderedDict` types only
|
| 5 |
+
# Restrict APPEND/APPENDS to `list`
|
| 6 |
+
# In `GLOBALS` operation do not do class lookup by name, but rather rely on dictionary
|
| 7 |
+
# defined by `_get_allowed_globals()` method, that contains:
|
| 8 |
+
# - torch types (Storage, dtypes, Tensor, `torch.Size`),
|
| 9 |
+
# - `torch._utils._rebuild` functions.
|
| 10 |
+
# - `torch.nn.Parameter`
|
| 11 |
+
# - `collections.Counter`
|
| 12 |
+
# - `collections.OrderedDict`
|
| 13 |
+
# Additionally, users can use an allowlist for adding classes they have deemed as safe using
|
| 14 |
+
# `_add_safe_globals()` (`torch.serialization.add_safe_globals`)
|
| 15 |
+
# `_clear_safe_globals()` (`torch.serialization.clear_safe_globals`)
|
| 16 |
+
# `_get_safe_globals()` (`torch.serialization.get_safe_globals`)
|
| 17 |
+
|
| 18 |
+
# Based of https://github.com/python/cpython/blob/main/Lib/pickle.py
|
| 19 |
+
# Expected to be useful for loading PyTorch model weights
|
| 20 |
+
# For example:
|
| 21 |
+
# data = urllib.request.urlopen('https://download.pytorch.org/models/resnet50-0676ba61.pth').read()
|
| 22 |
+
# buf = io.BytesIO(data)
|
| 23 |
+
# weights = torch.load(buf, weights_only = True)
|
| 24 |
+
|
| 25 |
+
import functools as _functools
|
| 26 |
+
import warnings
|
| 27 |
+
|
| 28 |
+
from _codecs import encode
|
| 29 |
+
from collections import Counter, OrderedDict
|
| 30 |
+
from pickle import (
|
| 31 |
+
APPEND,
|
| 32 |
+
APPENDS,
|
| 33 |
+
BINFLOAT,
|
| 34 |
+
BINGET,
|
| 35 |
+
BININT,
|
| 36 |
+
BININT1,
|
| 37 |
+
BININT2,
|
| 38 |
+
BINPERSID,
|
| 39 |
+
BINPUT,
|
| 40 |
+
BINUNICODE,
|
| 41 |
+
BUILD,
|
| 42 |
+
bytes_types,
|
| 43 |
+
decode_long,
|
| 44 |
+
EMPTY_DICT,
|
| 45 |
+
EMPTY_LIST,
|
| 46 |
+
EMPTY_SET,
|
| 47 |
+
EMPTY_TUPLE,
|
| 48 |
+
GLOBAL,
|
| 49 |
+
LONG1,
|
| 50 |
+
LONG_BINGET,
|
| 51 |
+
LONG_BINPUT,
|
| 52 |
+
MARK,
|
| 53 |
+
NEWFALSE,
|
| 54 |
+
NEWOBJ,
|
| 55 |
+
NEWTRUE,
|
| 56 |
+
NONE,
|
| 57 |
+
PROTO,
|
| 58 |
+
REDUCE,
|
| 59 |
+
SETITEM,
|
| 60 |
+
SETITEMS,
|
| 61 |
+
SHORT_BINSTRING,
|
| 62 |
+
STOP,
|
| 63 |
+
TUPLE,
|
| 64 |
+
TUPLE1,
|
| 65 |
+
TUPLE2,
|
| 66 |
+
TUPLE3,
|
| 67 |
+
UnpicklingError,
|
| 68 |
+
)
|
| 69 |
+
from struct import unpack
|
| 70 |
+
from sys import maxsize
|
| 71 |
+
from typing import Any, Dict, List
|
| 72 |
+
|
| 73 |
+
import torch
|
| 74 |
+
from torch._utils import IMPORT_MAPPING, NAME_MAPPING
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
# modules in this list are never allowed, even if the user attempts to allowlist
|
| 78 |
+
# functions/classes from them
|
| 79 |
+
_blocklisted_modules = [
|
| 80 |
+
"sys",
|
| 81 |
+
"os",
|
| 82 |
+
"posix",
|
| 83 |
+
"nt",
|
| 84 |
+
]
|
| 85 |
+
|
| 86 |
+
_marked_safe_globals_list: List[Any] = []
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def _add_safe_globals(safe_globals: List[Any]):
|
| 90 |
+
global _marked_safe_globals_list
|
| 91 |
+
_marked_safe_globals_list += safe_globals
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def _get_safe_globals() -> List[Any]:
|
| 95 |
+
global _marked_safe_globals_list
|
| 96 |
+
return _marked_safe_globals_list
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def _clear_safe_globals():
|
| 100 |
+
global _marked_safe_globals_list
|
| 101 |
+
_marked_safe_globals_list = []
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def _remove_safe_globals(globals_to_remove: List[Any]):
|
| 105 |
+
global _marked_safe_globals_list
|
| 106 |
+
_marked_safe_globals_list = list(
|
| 107 |
+
set(_marked_safe_globals_list) - set(globals_to_remove)
|
| 108 |
+
)
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
class _safe_globals:
|
| 112 |
+
def __init__(self, safe_globals: List[Any]):
|
| 113 |
+
self.safe_globals = safe_globals
|
| 114 |
+
|
| 115 |
+
def __enter__(self):
|
| 116 |
+
_add_safe_globals(self.safe_globals)
|
| 117 |
+
|
| 118 |
+
def __exit__(self, type, value, tb):
|
| 119 |
+
_remove_safe_globals(self.safe_globals)
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
# Separate from _get_allowed_globals because of the lru_cache on _get_allowed_globals
|
| 123 |
+
# For example if user had a script like
|
| 124 |
+
# torch.load(file_a)
|
| 125 |
+
# torch.serialization._add_safe_globals([torch.foo])
|
| 126 |
+
# torch.load(file_b)
|
| 127 |
+
# the dynamic additions to safe_globals would not be picked up by
|
| 128 |
+
# _get_allowed_globals due to the lru_cache
|
| 129 |
+
def _get_user_allowed_globals():
|
| 130 |
+
rc: Dict[str, Any] = {}
|
| 131 |
+
for f in _marked_safe_globals_list:
|
| 132 |
+
module, name = f.__module__, f.__name__
|
| 133 |
+
rc[f"{module}.{name}"] = f
|
| 134 |
+
return rc
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
def _tensor_rebuild_functions():
|
| 138 |
+
return {
|
| 139 |
+
torch._utils._rebuild_parameter,
|
| 140 |
+
torch._utils._rebuild_parameter_with_state,
|
| 141 |
+
torch._utils._rebuild_qtensor,
|
| 142 |
+
torch._utils._rebuild_tensor,
|
| 143 |
+
torch._utils._rebuild_tensor_v2,
|
| 144 |
+
torch._utils._rebuild_tensor_v3,
|
| 145 |
+
torch._utils._rebuild_sparse_tensor,
|
| 146 |
+
torch._utils._rebuild_meta_tensor_no_storage,
|
| 147 |
+
torch._utils._rebuild_nested_tensor,
|
| 148 |
+
torch._utils._rebuild_wrapper_subclass,
|
| 149 |
+
# Allowlisting this, but not allowlisting the numpy functions by default
|
| 150 |
+
# Reasoning is that we don't have control over the numpy functions, but
|
| 151 |
+
# this utility is provided by pytorch
|
| 152 |
+
torch._utils._rebuild_device_tensor_from_numpy,
|
| 153 |
+
}
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
# Unpickling machinery
|
| 157 |
+
@_functools.lru_cache(maxsize=1)
|
| 158 |
+
def _get_allowed_globals():
|
| 159 |
+
rc: Dict[str, Any] = {
|
| 160 |
+
"collections.OrderedDict": OrderedDict,
|
| 161 |
+
"collections.Counter": Counter,
|
| 162 |
+
"torch.nn.parameter.Parameter": torch.nn.Parameter,
|
| 163 |
+
"torch.serialization._get_layout": torch.serialization._get_layout,
|
| 164 |
+
"torch.Size": torch.Size,
|
| 165 |
+
"torch.Tensor": torch.Tensor,
|
| 166 |
+
"torch.device": torch.device,
|
| 167 |
+
"_codecs.encode": encode, # for bytes
|
| 168 |
+
"builtins.bytearray": bytearray, # for bytearray
|
| 169 |
+
}
|
| 170 |
+
# dtype
|
| 171 |
+
for t in torch.storage._dtype_to_storage_type_map().keys():
|
| 172 |
+
rc[str(t)] = t
|
| 173 |
+
for t in torch.storage._new_dtypes():
|
| 174 |
+
rc[str(t)] = t
|
| 175 |
+
# Tensor classes
|
| 176 |
+
for tt in torch._tensor_classes:
|
| 177 |
+
rc[f"{tt.__module__}.{tt.__name__}"] = tt
|
| 178 |
+
# Storage classes
|
| 179 |
+
for ts in torch._storage_classes:
|
| 180 |
+
if ts not in (torch.storage.TypedStorage, torch.storage.UntypedStorage):
|
| 181 |
+
# Wrap legacy storage types in a dummy class
|
| 182 |
+
rc[f"{ts.__module__}.{ts.__name__}"] = torch.serialization.StorageType(
|
| 183 |
+
ts.__name__
|
| 184 |
+
)
|
| 185 |
+
else:
|
| 186 |
+
rc[f"{ts.__module__}.{ts.__name__}"] = ts
|
| 187 |
+
# Quantization specific
|
| 188 |
+
for qt in [
|
| 189 |
+
torch.per_tensor_affine,
|
| 190 |
+
torch.per_tensor_symmetric,
|
| 191 |
+
torch.per_channel_affine,
|
| 192 |
+
torch.per_channel_symmetric,
|
| 193 |
+
torch.per_channel_affine_float_qparams,
|
| 194 |
+
]:
|
| 195 |
+
rc[str(qt)] = qt
|
| 196 |
+
# Rebuild functions
|
| 197 |
+
for f in _tensor_rebuild_functions():
|
| 198 |
+
rc[f"torch._utils.{f.__name__}"] = f
|
| 199 |
+
|
| 200 |
+
# Handles Tensor Subclasses, Tensor's with attributes.
|
| 201 |
+
# NOTE: It calls into above rebuild functions for regular Tensor types.
|
| 202 |
+
rc["torch._tensor._rebuild_from_type_v2"] = torch._tensor._rebuild_from_type_v2
|
| 203 |
+
return rc
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
class Unpickler:
|
| 207 |
+
def __init__(self, file, *, encoding: str = "bytes"):
|
| 208 |
+
self.encoding = encoding
|
| 209 |
+
self.readline = file.readline
|
| 210 |
+
self.read = file.read
|
| 211 |
+
self.memo: Dict[int, Any] = {}
|
| 212 |
+
self.proto: int = -1
|
| 213 |
+
|
| 214 |
+
def load(self):
|
| 215 |
+
"""Read a pickled object representation from the open file.
|
| 216 |
+
|
| 217 |
+
Return the reconstituted object hierarchy specified in the file.
|
| 218 |
+
"""
|
| 219 |
+
self.metastack = []
|
| 220 |
+
self.stack: List[Any] = []
|
| 221 |
+
self.append = self.stack.append
|
| 222 |
+
read = self.read
|
| 223 |
+
readline = self.readline
|
| 224 |
+
while True:
|
| 225 |
+
key = read(1)
|
| 226 |
+
if not key:
|
| 227 |
+
raise EOFError
|
| 228 |
+
assert isinstance(key, bytes_types)
|
| 229 |
+
# Risky operators
|
| 230 |
+
if key[0] == GLOBAL[0]:
|
| 231 |
+
module = readline()[:-1].decode("utf-8")
|
| 232 |
+
name = readline()[:-1].decode("utf-8")
|
| 233 |
+
# Patch since torch.save default protocol is 2
|
| 234 |
+
# users will be running this code in python > 3
|
| 235 |
+
if self.proto == 2:
|
| 236 |
+
if (module, name) in NAME_MAPPING:
|
| 237 |
+
module, name = NAME_MAPPING[(module, name)]
|
| 238 |
+
elif module in IMPORT_MAPPING:
|
| 239 |
+
module = IMPORT_MAPPING[module]
|
| 240 |
+
full_path = f"{module}.{name}"
|
| 241 |
+
if module in _blocklisted_modules:
|
| 242 |
+
raise UnpicklingError(
|
| 243 |
+
f"Trying to load unsupported GLOBAL {full_path} whose module {module} is blocked."
|
| 244 |
+
)
|
| 245 |
+
if full_path in _get_allowed_globals():
|
| 246 |
+
self.append(_get_allowed_globals()[full_path])
|
| 247 |
+
elif full_path in _get_user_allowed_globals():
|
| 248 |
+
self.append(_get_user_allowed_globals()[full_path])
|
| 249 |
+
else:
|
| 250 |
+
raise UnpicklingError(
|
| 251 |
+
f"Unsupported global: GLOBAL {full_path} was not an allowed global by default. "
|
| 252 |
+
f"Please use `torch.serialization.add_safe_globals([{name}])` to allowlist "
|
| 253 |
+
"this global if you trust this class/function."
|
| 254 |
+
)
|
| 255 |
+
elif key[0] == NEWOBJ[0]:
|
| 256 |
+
args = self.stack.pop()
|
| 257 |
+
cls = self.stack.pop()
|
| 258 |
+
if cls is torch.nn.Parameter:
|
| 259 |
+
self.append(torch.nn.Parameter(*args))
|
| 260 |
+
elif cls in _get_user_allowed_globals().values():
|
| 261 |
+
self.append(cls.__new__(cls, *args))
|
| 262 |
+
else:
|
| 263 |
+
raise UnpicklingError(
|
| 264 |
+
"Can only create new object for nn.Parameter or classes allowlisted "
|
| 265 |
+
f"via `add_safe_globals` but got {cls}"
|
| 266 |
+
)
|
| 267 |
+
elif key[0] == REDUCE[0]:
|
| 268 |
+
args = self.stack.pop()
|
| 269 |
+
func = self.stack[-1]
|
| 270 |
+
if (
|
| 271 |
+
func not in _get_allowed_globals().values()
|
| 272 |
+
and func not in _get_user_allowed_globals().values()
|
| 273 |
+
):
|
| 274 |
+
raise UnpicklingError(
|
| 275 |
+
f"Trying to call reduce for unrecognized function {func}"
|
| 276 |
+
)
|
| 277 |
+
self.stack[-1] = func(*args)
|
| 278 |
+
elif key[0] == BUILD[0]:
|
| 279 |
+
state = self.stack.pop()
|
| 280 |
+
inst = self.stack[-1]
|
| 281 |
+
if type(inst) is torch.Tensor:
|
| 282 |
+
# Legacy unpickling
|
| 283 |
+
inst.set_(*state)
|
| 284 |
+
elif type(inst) is torch.nn.Parameter:
|
| 285 |
+
inst.__setstate__(state)
|
| 286 |
+
elif type(inst) is OrderedDict:
|
| 287 |
+
inst.__dict__.update(state)
|
| 288 |
+
elif type(inst) in _get_user_allowed_globals().values():
|
| 289 |
+
if hasattr(inst, "__setstate__"):
|
| 290 |
+
inst.__setstate__(state)
|
| 291 |
+
else:
|
| 292 |
+
inst.__dict__.update(state)
|
| 293 |
+
else:
|
| 294 |
+
raise UnpicklingError(
|
| 295 |
+
"Can only build Tensor, Parameter, OrderedDict or types allowlisted "
|
| 296 |
+
f"via `add_safe_globals`, but got {type(inst)}"
|
| 297 |
+
)
|
| 298 |
+
# Stack manipulation
|
| 299 |
+
elif key[0] == APPEND[0]:
|
| 300 |
+
item = self.stack.pop()
|
| 301 |
+
list_obj = self.stack[-1]
|
| 302 |
+
if type(list_obj) is not list:
|
| 303 |
+
raise UnpicklingError(
|
| 304 |
+
f"Can only append to lists, but got {type(list_obj)}"
|
| 305 |
+
)
|
| 306 |
+
list_obj.append(item)
|
| 307 |
+
elif key[0] == APPENDS[0]:
|
| 308 |
+
items = self.pop_mark()
|
| 309 |
+
list_obj = self.stack[-1]
|
| 310 |
+
if type(list_obj) is not list:
|
| 311 |
+
raise UnpicklingError(
|
| 312 |
+
f"Can only extend lists, but got {type(list_obj)}"
|
| 313 |
+
)
|
| 314 |
+
list_obj.extend(items)
|
| 315 |
+
elif key[0] == SETITEM[0]:
|
| 316 |
+
(v, k) = (self.stack.pop(), self.stack.pop())
|
| 317 |
+
self.stack[-1][k] = v
|
| 318 |
+
elif key[0] == SETITEMS[0]:
|
| 319 |
+
items = self.pop_mark()
|
| 320 |
+
for i in range(0, len(items), 2):
|
| 321 |
+
self.stack[-1][items[i]] = items[i + 1]
|
| 322 |
+
elif key[0] == MARK[0]:
|
| 323 |
+
self.metastack.append(self.stack)
|
| 324 |
+
self.stack = []
|
| 325 |
+
self.append = self.stack.append
|
| 326 |
+
elif key[0] == TUPLE[0]:
|
| 327 |
+
items = self.pop_mark()
|
| 328 |
+
self.append(tuple(items))
|
| 329 |
+
elif key[0] == TUPLE1[0]:
|
| 330 |
+
self.stack[-1] = (self.stack[-1],)
|
| 331 |
+
elif key[0] == TUPLE2[0]:
|
| 332 |
+
self.stack[-2:] = [(self.stack[-2], self.stack[-1])]
|
| 333 |
+
elif key[0] == TUPLE3[0]:
|
| 334 |
+
self.stack[-3:] = [(self.stack[-3], self.stack[-2], self.stack[-1])]
|
| 335 |
+
# Basic types construction
|
| 336 |
+
elif key[0] == NONE[0]:
|
| 337 |
+
self.append(None)
|
| 338 |
+
elif key[0] == NEWFALSE[0]:
|
| 339 |
+
self.append(False)
|
| 340 |
+
elif key[0] == NEWTRUE[0]:
|
| 341 |
+
self.append(True)
|
| 342 |
+
elif key[0] == EMPTY_TUPLE[0]:
|
| 343 |
+
self.append(())
|
| 344 |
+
elif key[0] == EMPTY_LIST[0]:
|
| 345 |
+
self.append([])
|
| 346 |
+
elif key[0] == EMPTY_DICT[0]:
|
| 347 |
+
self.append({})
|
| 348 |
+
elif key[0] == EMPTY_SET[0]:
|
| 349 |
+
self.append(set())
|
| 350 |
+
elif key[0] == BININT[0]:
|
| 351 |
+
self.append(unpack("<i", read(4))[0])
|
| 352 |
+
elif key[0] == BININT1[0]:
|
| 353 |
+
self.append(self.read(1)[0])
|
| 354 |
+
elif key[0] == BININT2[0]:
|
| 355 |
+
self.append(unpack("<H", read(2))[0])
|
| 356 |
+
elif key[0] == BINFLOAT[0]:
|
| 357 |
+
self.append(unpack(">d", self.read(8))[0])
|
| 358 |
+
elif key[0] == BINUNICODE[0]:
|
| 359 |
+
strlen = unpack("<I", read(4))[0]
|
| 360 |
+
if strlen > maxsize:
|
| 361 |
+
raise UnpicklingError("String is too long")
|
| 362 |
+
strval = str(read(strlen), "utf-8", "surrogatepass")
|
| 363 |
+
self.append(strval)
|
| 364 |
+
elif key[0] == SHORT_BINSTRING[0]:
|
| 365 |
+
strlen = read(1)[0]
|
| 366 |
+
strdata = read(strlen)
|
| 367 |
+
if self.encoding != "bytes":
|
| 368 |
+
strdata = strdata.decode(self.encoding, "strict")
|
| 369 |
+
self.append(strdata)
|
| 370 |
+
elif key[0] == BINPERSID[0]:
|
| 371 |
+
pid = self.stack.pop()
|
| 372 |
+
# Only allow persistent load of storage
|
| 373 |
+
if type(pid) is not tuple and not type(pid) is not int:
|
| 374 |
+
raise UnpicklingError(
|
| 375 |
+
f"persistent_load id must be tuple or int, but got {type(pid)}"
|
| 376 |
+
)
|
| 377 |
+
if (
|
| 378 |
+
type(pid) is tuple
|
| 379 |
+
and len(pid) > 0
|
| 380 |
+
and torch.serialization._maybe_decode_ascii(pid[0]) != "storage"
|
| 381 |
+
):
|
| 382 |
+
raise UnpicklingError(
|
| 383 |
+
f"Only persistent_load of storage is allowed, but got {pid[0]}"
|
| 384 |
+
)
|
| 385 |
+
self.append(self.persistent_load(pid))
|
| 386 |
+
elif key[0] in [BINGET[0], LONG_BINGET[0]]:
|
| 387 |
+
idx = (read(1) if key[0] == BINGET[0] else unpack("<I", read(4)))[0]
|
| 388 |
+
self.append(self.memo[idx])
|
| 389 |
+
elif key[0] in [BINPUT[0], LONG_BINPUT[0]]:
|
| 390 |
+
i = (read(1) if key[0] == BINPUT[0] else unpack("<I", read(4)))[0]
|
| 391 |
+
if i < 0:
|
| 392 |
+
raise ValueError("negative argument")
|
| 393 |
+
self.memo[i] = self.stack[-1]
|
| 394 |
+
elif key[0] == LONG1[0]:
|
| 395 |
+
n = read(1)[0]
|
| 396 |
+
data = read(n)
|
| 397 |
+
self.append(decode_long(data))
|
| 398 |
+
# First and last deserializer ops
|
| 399 |
+
elif key[0] == PROTO[0]:
|
| 400 |
+
self.proto = read(1)[0]
|
| 401 |
+
if self.proto != 2:
|
| 402 |
+
warnings.warn(
|
| 403 |
+
f"Detected pickle protocol {self.proto} in the checkpoint, which was "
|
| 404 |
+
"not the default pickle protocol used by `torch.load` (2). The weights_only "
|
| 405 |
+
"Unpickler might not support all instructions implemented by this protocol, "
|
| 406 |
+
"please file an issue for adding support if you encounter this."
|
| 407 |
+
)
|
| 408 |
+
elif key[0] == STOP[0]:
|
| 409 |
+
rc = self.stack.pop()
|
| 410 |
+
return rc
|
| 411 |
+
else:
|
| 412 |
+
raise UnpicklingError(f"Unsupported operand {key[0]}")
|
| 413 |
+
|
| 414 |
+
# Return a list of items pushed in the stack after last MARK instruction.
|
| 415 |
+
def pop_mark(self):
|
| 416 |
+
items = self.stack
|
| 417 |
+
self.stack = self.metastack.pop()
|
| 418 |
+
self.append = self.stack.append
|
| 419 |
+
return items
|
| 420 |
+
|
| 421 |
+
def persistent_load(self, pid):
|
| 422 |
+
raise UnpicklingError("unsupported persistent id encountered")
|
| 423 |
+
|
| 424 |
+
|
| 425 |
+
def load(file, *, encoding: str = "ASCII"):
|
| 426 |
+
return Unpickler(file, encoding=encoding).load()
|
pllava/lib/python3.10/site-packages/torch/autograd/__pycache__/function.cpython-310.pyc
ADDED
|
Binary file (32.2 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/autograd/__pycache__/gradcheck.cpython-310.pyc
ADDED
|
Binary file (61.1 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/autograd/__pycache__/graph.cpython-310.pyc
ADDED
|
Binary file (28.7 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/autograd/__pycache__/profiler_legacy.cpython-310.pyc
ADDED
|
Binary file (7.81 kB). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/autograd/__pycache__/variable.cpython-310.pyc
ADDED
|
Binary file (829 Bytes). View file
|
|
|
pllava/lib/python3.10/site-packages/torch/autograd/function.py
ADDED
|
@@ -0,0 +1,844 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import functools
|
| 3 |
+
import inspect
|
| 4 |
+
import itertools
|
| 5 |
+
import warnings
|
| 6 |
+
from collections import OrderedDict
|
| 7 |
+
from typing import Any, List, Optional, Tuple
|
| 8 |
+
from typing_extensions import deprecated
|
| 9 |
+
|
| 10 |
+
import torch
|
| 11 |
+
import torch._C as _C
|
| 12 |
+
import torch._functorch as _functorch
|
| 13 |
+
import torch.utils.hooks as hooks
|
| 14 |
+
from torch._C import _functions
|
| 15 |
+
from torch._functorch.autograd_function import custom_function_call
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
__all__ = [
|
| 19 |
+
"FunctionCtx",
|
| 20 |
+
"BackwardCFunction",
|
| 21 |
+
"FunctionMeta",
|
| 22 |
+
"Function",
|
| 23 |
+
"once_differentiable",
|
| 24 |
+
"InplaceFunction",
|
| 25 |
+
"NestedIOFunction",
|
| 26 |
+
]
|
| 27 |
+
|
| 28 |
+
# Unique id provider for each class inheriting from Function
|
| 29 |
+
# This is incremented in FunctionMeta during class definition
|
| 30 |
+
AUTOGRAD_FUNCTION_COUNTER = itertools.count()
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
# Formerly known as: _ContextMethodMixin
|
| 34 |
+
class FunctionCtx:
|
| 35 |
+
def save_for_backward(self, *tensors: torch.Tensor):
|
| 36 |
+
r"""Save given tensors for a future call to :func:`~Function.backward`.
|
| 37 |
+
|
| 38 |
+
``save_for_backward`` should be called at most once, in either the
|
| 39 |
+
:func:`setup_context` or :func:`forward` methods, and only with tensors.
|
| 40 |
+
|
| 41 |
+
All tensors intended to be used in the backward pass should be saved
|
| 42 |
+
with ``save_for_backward`` (as opposed to directly on ``ctx``) to prevent
|
| 43 |
+
incorrect gradients and memory leaks, and enable the application of saved
|
| 44 |
+
tensor hooks. See :class:`torch.autograd.graph.saved_tensors_hooks`.
|
| 45 |
+
|
| 46 |
+
Note that if intermediary tensors, tensors that are neither inputs
|
| 47 |
+
nor outputs of :func:`forward`, are saved for backward, your custom Function
|
| 48 |
+
may not support double backward.
|
| 49 |
+
Custom Functions that do not support double backward should decorate their
|
| 50 |
+
:func:`backward` method with ``@once_differentiable`` so that performing
|
| 51 |
+
double backward raises an error. If you'd like to support double backward,
|
| 52 |
+
you can either recompute intermediaries based on the inputs during backward
|
| 53 |
+
or return the intermediaries as the outputs of the custom Function. See the
|
| 54 |
+
`double backward tutorial <https://pytorch.org/tutorials/intermediate/custom_function_double_backward_tutorial.html>`_
|
| 55 |
+
for more details.
|
| 56 |
+
|
| 57 |
+
In :func:`backward`, saved tensors can be accessed through the :attr:`saved_tensors`
|
| 58 |
+
attribute. Before returning them to the user, a check is made to ensure
|
| 59 |
+
they weren't used in any in-place operation that modified their content.
|
| 60 |
+
|
| 61 |
+
Arguments can also be ``None``. This is a no-op.
|
| 62 |
+
|
| 63 |
+
See :ref:`extending-autograd` for more details on how to use this method.
|
| 64 |
+
|
| 65 |
+
Example::
|
| 66 |
+
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD)
|
| 67 |
+
>>> class Func(Function):
|
| 68 |
+
>>> @staticmethod
|
| 69 |
+
>>> def forward(ctx, x: torch.Tensor, y: torch.Tensor, z: int):
|
| 70 |
+
>>> w = x * z
|
| 71 |
+
>>> out = x * y + y * z + w * y
|
| 72 |
+
>>> ctx.save_for_backward(x, y, w, out)
|
| 73 |
+
>>> ctx.z = z # z is not a tensor
|
| 74 |
+
>>> return out
|
| 75 |
+
>>>
|
| 76 |
+
>>> @staticmethod
|
| 77 |
+
>>> @once_differentiable
|
| 78 |
+
>>> def backward(ctx, grad_out):
|
| 79 |
+
>>> x, y, w, out = ctx.saved_tensors
|
| 80 |
+
>>> z = ctx.z
|
| 81 |
+
>>> gx = grad_out * (y + y * z)
|
| 82 |
+
>>> gy = grad_out * (x + z + w)
|
| 83 |
+
>>> gz = None
|
| 84 |
+
>>> return gx, gy, gz
|
| 85 |
+
>>>
|
| 86 |
+
>>> a = torch.tensor(1., requires_grad=True, dtype=torch.double)
|
| 87 |
+
>>> b = torch.tensor(2., requires_grad=True, dtype=torch.double)
|
| 88 |
+
>>> c = 4
|
| 89 |
+
>>> d = Func.apply(a, b, c)
|
| 90 |
+
|
| 91 |
+
"""
|
| 92 |
+
self.to_save = tensors
|
| 93 |
+
|
| 94 |
+
def save_for_forward(self, *tensors: torch.Tensor):
|
| 95 |
+
r"""Save given tensors for a future call to :func:`~Function.jvp`.
|
| 96 |
+
|
| 97 |
+
``save_for_forward`` should be called at most once, in either the
|
| 98 |
+
:func:`setup_context` or :func:`forward` methods, and all arguments
|
| 99 |
+
should be tensors.
|
| 100 |
+
|
| 101 |
+
In :func:`jvp`, saved objects can be accessed through the :attr:`saved_tensors`
|
| 102 |
+
attribute.
|
| 103 |
+
|
| 104 |
+
Arguments can also be ``None``. This is a no-op.
|
| 105 |
+
|
| 106 |
+
See :ref:`extending-autograd` for more details on how to use this method.
|
| 107 |
+
|
| 108 |
+
Example::
|
| 109 |
+
>>> # xdoctest: +SKIP
|
| 110 |
+
>>> class Func(torch.autograd.Function):
|
| 111 |
+
>>> @staticmethod
|
| 112 |
+
>>> def forward(ctx, x: torch.Tensor, y: torch.Tensor, z: int):
|
| 113 |
+
>>> ctx.save_for_backward(x, y)
|
| 114 |
+
>>> ctx.save_for_forward(x, y)
|
| 115 |
+
>>> ctx.z = z
|
| 116 |
+
>>> return x * y * z
|
| 117 |
+
>>>
|
| 118 |
+
>>> @staticmethod
|
| 119 |
+
>>> def jvp(ctx, x_t, y_t, _):
|
| 120 |
+
>>> x, y = ctx.saved_tensors
|
| 121 |
+
>>> z = ctx.z
|
| 122 |
+
>>> return z * (y * x_t + x * y_t)
|
| 123 |
+
>>>
|
| 124 |
+
>>> @staticmethod
|
| 125 |
+
>>> def vjp(ctx, grad_out):
|
| 126 |
+
>>> x, y = ctx.saved_tensors
|
| 127 |
+
>>> z = ctx.z
|
| 128 |
+
>>> return z * grad_out * y, z * grad_out * x, None
|
| 129 |
+
>>>
|
| 130 |
+
>>> a = torch.tensor(1., requires_grad=True, dtype=torch.double)
|
| 131 |
+
>>> t = torch.tensor(1., dtype=torch.double)
|
| 132 |
+
>>> b = torch.tensor(2., requires_grad=True, dtype=torch.double)
|
| 133 |
+
>>> c = 4
|
| 134 |
+
>>>
|
| 135 |
+
>>> with fwAD.dual_level():
|
| 136 |
+
>>> a_dual = fwAD.make_dual(a, t)
|
| 137 |
+
>>> d = Func.apply(a_dual, b, c)
|
| 138 |
+
|
| 139 |
+
"""
|
| 140 |
+
for tensor in tensors:
|
| 141 |
+
assert isinstance(tensor, torch.Tensor) or tensor is None, (
|
| 142 |
+
"save_for_forward expects all arguments to be tensors; you should "
|
| 143 |
+
"save non-tensors as attributes on ctx."
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
self.saved_for_forward = tensors
|
| 147 |
+
|
| 148 |
+
def mark_dirty(self, *args: torch.Tensor):
|
| 149 |
+
r"""Mark given tensors as modified in an in-place operation.
|
| 150 |
+
|
| 151 |
+
This should be called at most once, in either the :func:`setup_context`
|
| 152 |
+
or :func:`forward` methods, and all arguments should be inputs.
|
| 153 |
+
|
| 154 |
+
Every tensor that's been modified in-place in a call to :func:`forward`
|
| 155 |
+
should be given to this function, to ensure correctness of our checks.
|
| 156 |
+
It doesn't matter whether the function is called before or after
|
| 157 |
+
modification.
|
| 158 |
+
|
| 159 |
+
Examples::
|
| 160 |
+
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD)
|
| 161 |
+
>>> class Inplace(Function):
|
| 162 |
+
>>> @staticmethod
|
| 163 |
+
>>> def forward(ctx, x):
|
| 164 |
+
>>> x_npy = x.numpy() # x_npy shares storage with x
|
| 165 |
+
>>> x_npy += 1
|
| 166 |
+
>>> ctx.mark_dirty(x)
|
| 167 |
+
>>> return x
|
| 168 |
+
>>>
|
| 169 |
+
>>> @staticmethod
|
| 170 |
+
>>> @once_differentiable
|
| 171 |
+
>>> def backward(ctx, grad_output):
|
| 172 |
+
>>> return grad_output
|
| 173 |
+
>>>
|
| 174 |
+
>>> a = torch.tensor(1., requires_grad=True, dtype=torch.double).clone()
|
| 175 |
+
>>> b = a * a
|
| 176 |
+
>>> Inplace.apply(a) # This would lead to wrong gradients!
|
| 177 |
+
>>> # but the engine would not know unless we mark_dirty
|
| 178 |
+
>>> # xdoctest: +SKIP
|
| 179 |
+
>>> b.backward() # RuntimeError: one of the variables needed for gradient
|
| 180 |
+
>>> # computation has been modified by an inplace operation
|
| 181 |
+
|
| 182 |
+
"""
|
| 183 |
+
self.dirty_tensors = args
|
| 184 |
+
|
| 185 |
+
@deprecated(
|
| 186 |
+
"`mark_shared_storage` is deprecated. "
|
| 187 |
+
"Tensors with shared storages are automatically tracked. "
|
| 188 |
+
"Note that calls to `set_()` are not tracked",
|
| 189 |
+
category=FutureWarning,
|
| 190 |
+
)
|
| 191 |
+
def mark_shared_storage(self, *pairs):
|
| 192 |
+
pass
|
| 193 |
+
|
| 194 |
+
def mark_non_differentiable(self, *args: torch.Tensor):
|
| 195 |
+
r"""Mark outputs as non-differentiable.
|
| 196 |
+
|
| 197 |
+
This should be called at most once, in either the :func:`setup_context`
|
| 198 |
+
or :func:`forward` methods, and all arguments should be tensor outputs.
|
| 199 |
+
|
| 200 |
+
This will mark outputs as not requiring gradients, increasing the
|
| 201 |
+
efficiency of backward computation. You still need to accept a gradient
|
| 202 |
+
for each output in :meth:`~Function.backward`, but it's always going to
|
| 203 |
+
be a zero tensor with the same shape as the shape of a corresponding
|
| 204 |
+
output.
|
| 205 |
+
|
| 206 |
+
This is used e.g. for indices returned from a sort. See example::
|
| 207 |
+
>>> class Func(Function):
|
| 208 |
+
>>> @staticmethod
|
| 209 |
+
>>> def forward(ctx, x):
|
| 210 |
+
>>> sorted, idx = x.sort()
|
| 211 |
+
>>> ctx.mark_non_differentiable(idx)
|
| 212 |
+
>>> ctx.save_for_backward(x, idx)
|
| 213 |
+
>>> return sorted, idx
|
| 214 |
+
>>>
|
| 215 |
+
>>> @staticmethod
|
| 216 |
+
>>> @once_differentiable
|
| 217 |
+
>>> def backward(ctx, g1, g2): # still need to accept g2
|
| 218 |
+
>>> x, idx = ctx.saved_tensors
|
| 219 |
+
>>> grad_input = torch.zeros_like(x)
|
| 220 |
+
>>> grad_input.index_add_(0, idx, g1)
|
| 221 |
+
>>> return grad_input
|
| 222 |
+
|
| 223 |
+
"""
|
| 224 |
+
self.non_differentiable = args
|
| 225 |
+
|
| 226 |
+
def set_materialize_grads(self, value: bool):
|
| 227 |
+
r"""Set whether to materialize grad tensors. Default is ``True``.
|
| 228 |
+
|
| 229 |
+
This should be called only from either the :func:`setup_context` or
|
| 230 |
+
:func:`forward` methods.
|
| 231 |
+
|
| 232 |
+
If ``True``, undefined grad tensors will be expanded to tensors full of zeros
|
| 233 |
+
prior to calling the :func:`backward` and :func:`jvp` methods.
|
| 234 |
+
|
| 235 |
+
Example::
|
| 236 |
+
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD)
|
| 237 |
+
>>> class SimpleFunc(Function):
|
| 238 |
+
>>> @staticmethod
|
| 239 |
+
>>> def forward(ctx, x):
|
| 240 |
+
>>> return x.clone(), x.clone()
|
| 241 |
+
>>>
|
| 242 |
+
>>> @staticmethod
|
| 243 |
+
>>> @once_differentiable
|
| 244 |
+
>>> def backward(ctx, g1, g2):
|
| 245 |
+
>>> return g1 + g2 # No check for None necessary
|
| 246 |
+
>>>
|
| 247 |
+
>>> # We modify SimpleFunc to handle non-materialized grad outputs
|
| 248 |
+
>>> class Func(Function):
|
| 249 |
+
>>> @staticmethod
|
| 250 |
+
>>> def forward(ctx, x):
|
| 251 |
+
>>> ctx.set_materialize_grads(False)
|
| 252 |
+
>>> ctx.save_for_backward(x)
|
| 253 |
+
>>> return x.clone(), x.clone()
|
| 254 |
+
>>>
|
| 255 |
+
>>> @staticmethod
|
| 256 |
+
>>> @once_differentiable
|
| 257 |
+
>>> def backward(ctx, g1, g2):
|
| 258 |
+
>>> x, = ctx.saved_tensors
|
| 259 |
+
>>> grad_input = torch.zeros_like(x)
|
| 260 |
+
>>> if g1 is not None: # We must check for None now
|
| 261 |
+
>>> grad_input += g1
|
| 262 |
+
>>> if g2 is not None:
|
| 263 |
+
>>> grad_input += g2
|
| 264 |
+
>>> return grad_input
|
| 265 |
+
>>>
|
| 266 |
+
>>> a = torch.tensor(1., requires_grad=True)
|
| 267 |
+
>>> b, _ = Func.apply(a) # induces g2 to be undefined
|
| 268 |
+
|
| 269 |
+
"""
|
| 270 |
+
self.materialize_grads = value
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
# DO NOT USE: This is only defined to be able to load old serialized models
|
| 274 |
+
_ContextMethodMixin = FunctionCtx
|
| 275 |
+
|
| 276 |
+
|
| 277 |
+
class _HookMixin:
|
| 278 |
+
@staticmethod
|
| 279 |
+
def _register_hook(backward_hooks, hook):
|
| 280 |
+
if backward_hooks is None:
|
| 281 |
+
backward_hooks = OrderedDict()
|
| 282 |
+
handle = hooks.RemovableHandle(backward_hooks)
|
| 283 |
+
backward_hooks[handle.id] = hook
|
| 284 |
+
return backward_hooks, handle
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
class BackwardCFunction(_C._FunctionBase, FunctionCtx, _HookMixin):
|
| 288 |
+
r"""
|
| 289 |
+
This class is used for internal autograd work. Do not use.
|
| 290 |
+
"""
|
| 291 |
+
|
| 292 |
+
def apply(self, *args):
|
| 293 |
+
r"""
|
| 294 |
+
Apply method used when executing this Node during the backward
|
| 295 |
+
"""
|
| 296 |
+
# _forward_cls is defined by derived class
|
| 297 |
+
# The user should define either backward or vjp but never both.
|
| 298 |
+
backward_fn = self._forward_cls.backward # type: ignore[attr-defined]
|
| 299 |
+
vjp_fn = self._forward_cls.vjp # type: ignore[attr-defined]
|
| 300 |
+
if backward_fn is not Function.backward and vjp_fn is not Function.vjp:
|
| 301 |
+
raise RuntimeError(
|
| 302 |
+
"Implementing both 'backward' and 'vjp' for a custom "
|
| 303 |
+
"Function is not allowed. You should only implement one "
|
| 304 |
+
"of them."
|
| 305 |
+
)
|
| 306 |
+
user_fn = vjp_fn if vjp_fn is not Function.vjp else backward_fn
|
| 307 |
+
return user_fn(self, *args)
|
| 308 |
+
|
| 309 |
+
def apply_jvp(self, *args):
|
| 310 |
+
r"""
|
| 311 |
+
Apply method used when executing forward mode AD during the forward
|
| 312 |
+
"""
|
| 313 |
+
# _forward_cls is defined by derived class
|
| 314 |
+
return self._forward_cls.jvp(self, *args) # type: ignore[attr-defined]
|
| 315 |
+
|
| 316 |
+
def _compiled_autograd_key(self):
|
| 317 |
+
return self._forward_cls._compiled_autograd_key(self) # type: ignore[attr-defined]
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
class FunctionMeta(type):
|
| 321 |
+
"""Function metaclass.
|
| 322 |
+
|
| 323 |
+
This metaclass sets up the following properties:
|
| 324 |
+
_backward_cls: The Function class corresponding to the differentiated
|
| 325 |
+
version of this function (which is generated on the fly by this
|
| 326 |
+
metaclass).
|
| 327 |
+
"""
|
| 328 |
+
|
| 329 |
+
def __init__(cls, name, bases, attrs):
|
| 330 |
+
backward_fn = type(
|
| 331 |
+
name + "Backward", (BackwardCFunction,), {"_forward_cls": cls}
|
| 332 |
+
)
|
| 333 |
+
backward_fn._autograd_function_id = next(AUTOGRAD_FUNCTION_COUNTER) # type: ignore[attr-defined]
|
| 334 |
+
backward_fn._compiled_autograd_should_lift = attrs.get( # type: ignore[attr-defined]
|
| 335 |
+
"_compiled_autograd_should_lift", True
|
| 336 |
+
)
|
| 337 |
+
cls._backward_cls = backward_fn
|
| 338 |
+
|
| 339 |
+
super().__init__(name, bases, attrs)
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
class _SingleLevelFunction(
|
| 343 |
+
_C._FunctionBase, FunctionCtx, _HookMixin, metaclass=FunctionMeta
|
| 344 |
+
):
|
| 345 |
+
@staticmethod
|
| 346 |
+
def forward(*args: Any, **kwargs: Any) -> Any:
|
| 347 |
+
r"""Define the forward of the custom autograd Function.
|
| 348 |
+
|
| 349 |
+
This function is to be overridden by all subclasses.
|
| 350 |
+
There are two ways to define forward:
|
| 351 |
+
|
| 352 |
+
Usage 1 (Combined forward and ctx)::
|
| 353 |
+
|
| 354 |
+
@staticmethod
|
| 355 |
+
def forward(ctx: Any, *args: Any, **kwargs: Any) -> Any:
|
| 356 |
+
pass
|
| 357 |
+
|
| 358 |
+
- It must accept a context ctx as the first argument, followed by any
|
| 359 |
+
number of arguments (tensors or other types).
|
| 360 |
+
- See :ref:`combining-forward-context` for more details
|
| 361 |
+
|
| 362 |
+
Usage 2 (Separate forward and ctx)::
|
| 363 |
+
|
| 364 |
+
@staticmethod
|
| 365 |
+
def forward(*args: Any, **kwargs: Any) -> Any:
|
| 366 |
+
pass
|
| 367 |
+
|
| 368 |
+
@staticmethod
|
| 369 |
+
def setup_context(ctx: Any, inputs: Tuple[Any, ...], output: Any) -> None:
|
| 370 |
+
pass
|
| 371 |
+
|
| 372 |
+
- The forward no longer accepts a ctx argument.
|
| 373 |
+
- Instead, you must also override the :meth:`torch.autograd.Function.setup_context`
|
| 374 |
+
staticmethod to handle setting up the ``ctx`` object.
|
| 375 |
+
``output`` is the output of the forward, ``inputs`` are a Tuple of inputs
|
| 376 |
+
to the forward.
|
| 377 |
+
- See :ref:`extending-autograd` for more details
|
| 378 |
+
|
| 379 |
+
The context can be used to store arbitrary data that can be then
|
| 380 |
+
retrieved during the backward pass. Tensors should not be stored
|
| 381 |
+
directly on `ctx` (though this is not currently enforced for
|
| 382 |
+
backward compatibility). Instead, tensors should be saved either with
|
| 383 |
+
:func:`ctx.save_for_backward` if they are intended to be used in
|
| 384 |
+
``backward`` (equivalently, ``vjp``) or :func:`ctx.save_for_forward`
|
| 385 |
+
if they are intended to be used for in ``jvp``.
|
| 386 |
+
"""
|
| 387 |
+
raise NotImplementedError(
|
| 388 |
+
"You must implement the forward function for custom autograd.Function."
|
| 389 |
+
)
|
| 390 |
+
|
| 391 |
+
@staticmethod
|
| 392 |
+
def setup_context(ctx: Any, inputs: Tuple[Any, ...], output: Any) -> Any:
|
| 393 |
+
r"""There are two ways to define the forward pass of an autograd.Function.
|
| 394 |
+
|
| 395 |
+
Either:
|
| 396 |
+
|
| 397 |
+
1. Override forward with the signature ``forward(ctx, *args, **kwargs)``.
|
| 398 |
+
``setup_context`` is not overridden. Setting up the ctx for backward
|
| 399 |
+
happens inside the ``forward``.
|
| 400 |
+
2. Override forward with the signature ``forward(*args, **kwargs)`` and
|
| 401 |
+
override ``setup_context``. Setting up the ctx for backward happens
|
| 402 |
+
inside ``setup_context`` (as opposed to inside the ``forward``)
|
| 403 |
+
|
| 404 |
+
See :meth:`torch.autograd.Function.forward` and :ref:`extending-autograd` for more details.
|
| 405 |
+
"""
|
| 406 |
+
raise NotImplementedError("setup_context is not implemented.")
|
| 407 |
+
|
| 408 |
+
@staticmethod
|
| 409 |
+
def backward(ctx: Any, *grad_outputs: Any) -> Any:
|
| 410 |
+
r"""Define a formula for differentiating the operation with backward mode automatic differentiation.
|
| 411 |
+
|
| 412 |
+
This function is to be overridden by all subclasses.
|
| 413 |
+
(Defining this function is equivalent to defining the ``vjp`` function.)
|
| 414 |
+
|
| 415 |
+
It must accept a context :attr:`ctx` as the first argument, followed by
|
| 416 |
+
as many outputs as the :func:`forward` returned (None will be passed in
|
| 417 |
+
for non tensor outputs of the forward function),
|
| 418 |
+
and it should return as many tensors, as there were inputs to
|
| 419 |
+
:func:`forward`. Each argument is the gradient w.r.t the given output,
|
| 420 |
+
and each returned value should be the gradient w.r.t. the
|
| 421 |
+
corresponding input. If an input is not a Tensor or is a Tensor not
|
| 422 |
+
requiring grads, you can just pass None as a gradient for that input.
|
| 423 |
+
|
| 424 |
+
The context can be used to retrieve tensors saved during the forward
|
| 425 |
+
pass. It also has an attribute :attr:`ctx.needs_input_grad` as a tuple
|
| 426 |
+
of booleans representing whether each input needs gradient. E.g.,
|
| 427 |
+
:func:`backward` will have ``ctx.needs_input_grad[0] = True`` if the
|
| 428 |
+
first input to :func:`forward` needs gradient computed w.r.t. the
|
| 429 |
+
output.
|
| 430 |
+
"""
|
| 431 |
+
raise NotImplementedError(
|
| 432 |
+
"You must implement either the backward or vjp method for "
|
| 433 |
+
"your custom autograd.Function to use it with backward "
|
| 434 |
+
"mode AD."
|
| 435 |
+
)
|
| 436 |
+
|
| 437 |
+
# vjp and backward are alias of each other
|
| 438 |
+
vjp = backward
|
| 439 |
+
|
| 440 |
+
@staticmethod
|
| 441 |
+
def jvp(ctx: Any, *grad_inputs: Any) -> Any:
|
| 442 |
+
r"""Define a formula for differentiating the operation with forward mode automatic differentiation.
|
| 443 |
+
|
| 444 |
+
This function is to be overridden by all subclasses.
|
| 445 |
+
It must accept a context :attr:`ctx` as the first argument, followed by
|
| 446 |
+
as many inputs as the :func:`forward` got (None will be passed in
|
| 447 |
+
for non tensor inputs of the forward function),
|
| 448 |
+
and it should return as many tensors as there were outputs to
|
| 449 |
+
:func:`forward`. Each argument is the gradient w.r.t the given input,
|
| 450 |
+
and each returned value should be the gradient w.r.t. the
|
| 451 |
+
corresponding output. If an output is not a Tensor or the function is not
|
| 452 |
+
differentiable with respect to that output, you can just pass None as a
|
| 453 |
+
gradient for that input.
|
| 454 |
+
|
| 455 |
+
You can use the :attr:`ctx` object to pass any value from the forward to this
|
| 456 |
+
functions.
|
| 457 |
+
"""
|
| 458 |
+
raise NotImplementedError(
|
| 459 |
+
"You must implement the jvp function for custom "
|
| 460 |
+
"autograd.Function to use it with forward mode AD."
|
| 461 |
+
)
|
| 462 |
+
|
| 463 |
+
|
| 464 |
+
class Function(_SingleLevelFunction):
|
| 465 |
+
r"""Base class to create custom `autograd.Function`.
|
| 466 |
+
|
| 467 |
+
To create a custom `autograd.Function`, subclass this class and implement
|
| 468 |
+
the :meth:`forward` and :meth:`backward` static methods. Then, to use your custom
|
| 469 |
+
op in the forward pass, call the class method ``apply``. Do not call
|
| 470 |
+
:meth:`forward` directly.
|
| 471 |
+
|
| 472 |
+
To ensure correctness and best performance, make sure you are calling the
|
| 473 |
+
correct methods on ``ctx`` and validating your backward function using
|
| 474 |
+
:func:`torch.autograd.gradcheck`.
|
| 475 |
+
|
| 476 |
+
See :ref:`extending-autograd` for more details on how to use this class.
|
| 477 |
+
|
| 478 |
+
Examples::
|
| 479 |
+
|
| 480 |
+
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD)
|
| 481 |
+
>>> class Exp(Function):
|
| 482 |
+
>>> @staticmethod
|
| 483 |
+
>>> def forward(ctx, i):
|
| 484 |
+
>>> result = i.exp()
|
| 485 |
+
>>> ctx.save_for_backward(result)
|
| 486 |
+
>>> return result
|
| 487 |
+
>>>
|
| 488 |
+
>>> @staticmethod
|
| 489 |
+
>>> def backward(ctx, grad_output):
|
| 490 |
+
>>> result, = ctx.saved_tensors
|
| 491 |
+
>>> return grad_output * result
|
| 492 |
+
>>>
|
| 493 |
+
>>> # Use it by calling the apply method:
|
| 494 |
+
>>> # xdoctest: +SKIP
|
| 495 |
+
>>> output = Exp.apply(input)
|
| 496 |
+
"""
|
| 497 |
+
|
| 498 |
+
def __init__(self, *args, **kwargs):
|
| 499 |
+
warnings.warn(
|
| 500 |
+
f"{self.__class__} should not be instantiated. Methods on autograd functions"
|
| 501 |
+
"are all static, so you should invoke them on the class itself. "
|
| 502 |
+
"Instantiating an autograd function will raise an "
|
| 503 |
+
"error in a future version of PyTorch.",
|
| 504 |
+
DeprecationWarning,
|
| 505 |
+
stacklevel=2,
|
| 506 |
+
)
|
| 507 |
+
|
| 508 |
+
def __call__(self, *args, **kwargs):
|
| 509 |
+
raise RuntimeError(
|
| 510 |
+
"Legacy autograd function with non-static forward method is deprecated. "
|
| 511 |
+
"Please use new-style autograd function with static forward method. "
|
| 512 |
+
"(Example: https://pytorch.org/docs/stable/autograd.html#torch.autograd.Function)"
|
| 513 |
+
)
|
| 514 |
+
|
| 515 |
+
"""
|
| 516 |
+
Bool that specifies if PyTorch should attempt to autogenerate
|
| 517 |
+
:func:`torch.vmap` support for this autograd.Function. You may set this to
|
| 518 |
+
True only if this autograd.Function's forward, backward, and jvp (if they
|
| 519 |
+
exist) are written using PyTorch operations; otherwise, please override
|
| 520 |
+
:meth:`torch.autograd.Function.vmap` to add support for :func:`torch.vmap`.
|
| 521 |
+
|
| 522 |
+
Please see :ref:`func-autograd-function` for more details.
|
| 523 |
+
"""
|
| 524 |
+
generate_vmap_rule = False
|
| 525 |
+
|
| 526 |
+
@staticmethod
|
| 527 |
+
def vmap(info, in_dims, *args):
|
| 528 |
+
r"""Define the behavior for this autograd.Function underneath :func:`torch.vmap`.
|
| 529 |
+
|
| 530 |
+
For a :func:`torch.autograd.Function` to support
|
| 531 |
+
:func:`torch.vmap`, you must either override this static method, or set
|
| 532 |
+
``generate_vmap_rule`` to ``True`` (you may not do both).
|
| 533 |
+
|
| 534 |
+
If you choose to override this staticmethod: it must accept
|
| 535 |
+
|
| 536 |
+
- an ``info`` object as the first argument. ``info.batch_size``
|
| 537 |
+
specifies the size of the dimension being vmapped over,
|
| 538 |
+
while ``info.randomness`` is the randomness option passed to
|
| 539 |
+
:func:`torch.vmap`.
|
| 540 |
+
- an ``in_dims`` tuple as the second argument.
|
| 541 |
+
For each arg in ``args``, ``in_dims`` has a corresponding
|
| 542 |
+
``Optional[int]``. It is ``None`` if the arg is not a Tensor or if
|
| 543 |
+
the arg is not being vmapped over, otherwise, it is an integer
|
| 544 |
+
specifying what dimension of the Tensor is being vmapped over.
|
| 545 |
+
- ``*args``, which is the same as the args to :meth:`~Function.forward`.
|
| 546 |
+
|
| 547 |
+
The return of the vmap staticmethod is a tuple of ``(output, out_dims)``.
|
| 548 |
+
Similar to ``in_dims``, ``out_dims`` should be of the same structure as
|
| 549 |
+
``output`` and contain one ``out_dim`` per output that specifies if the
|
| 550 |
+
output has the vmapped dimension and what index it is in.
|
| 551 |
+
|
| 552 |
+
Please see :ref:`func-autograd-function` for more details.
|
| 553 |
+
"""
|
| 554 |
+
raise NotImplementedError(
|
| 555 |
+
"To use autograd.Function with vmap, you must either override the "
|
| 556 |
+
"vmap staticmethod or set generate_vmap_rule=True."
|
| 557 |
+
)
|
| 558 |
+
|
| 559 |
+
@classmethod
|
| 560 |
+
def apply(cls, *args, **kwargs):
|
| 561 |
+
def bind_default_args(func, *args, **kwargs):
|
| 562 |
+
signature = inspect.signature(func)
|
| 563 |
+
bound_args = signature.bind(*args, **kwargs)
|
| 564 |
+
bound_args.apply_defaults()
|
| 565 |
+
|
| 566 |
+
return bound_args.args
|
| 567 |
+
|
| 568 |
+
is_setup_ctx_defined = _is_setup_context_defined(cls.setup_context)
|
| 569 |
+
if is_setup_ctx_defined:
|
| 570 |
+
args = bind_default_args(cls.forward, *args, **kwargs)
|
| 571 |
+
|
| 572 |
+
if not torch._C._are_functorch_transforms_active():
|
| 573 |
+
# See NOTE: [functorch vjp and autograd interaction]
|
| 574 |
+
args = _functorch.utils.unwrap_dead_wrappers(args)
|
| 575 |
+
return super().apply(*args, **kwargs) # type: ignore[misc]
|
| 576 |
+
|
| 577 |
+
if not is_setup_ctx_defined:
|
| 578 |
+
raise RuntimeError(
|
| 579 |
+
"In order to use an autograd.Function with functorch transforms "
|
| 580 |
+
"(vmap, grad, jvp, jacrev, ...), it must override the setup_context "
|
| 581 |
+
"staticmethod. For more details, please see "
|
| 582 |
+
"https://pytorch.org/docs/main/notes/extending.func.html"
|
| 583 |
+
)
|
| 584 |
+
|
| 585 |
+
return custom_function_call(cls, *args, **kwargs)
|
| 586 |
+
|
| 587 |
+
@staticmethod
|
| 588 |
+
def _compiled_autograd_key(ctx):
|
| 589 |
+
return (ctx._autograd_function_id,)
|
| 590 |
+
|
| 591 |
+
|
| 592 |
+
def _is_setup_context_defined(fn):
|
| 593 |
+
return fn != _SingleLevelFunction.setup_context
|
| 594 |
+
|
| 595 |
+
|
| 596 |
+
def once_differentiable(fn):
|
| 597 |
+
@functools.wraps(fn)
|
| 598 |
+
def wrapper(ctx, *args):
|
| 599 |
+
with torch.no_grad():
|
| 600 |
+
outputs = fn(ctx, *args)
|
| 601 |
+
|
| 602 |
+
if not torch.is_grad_enabled():
|
| 603 |
+
return outputs
|
| 604 |
+
|
| 605 |
+
# If any of the inputs have requires_grad=True, we force the outputs
|
| 606 |
+
# to have requires_grad=True but point to a grad_fn which throws an
|
| 607 |
+
# error message during (double) back-propagation.
|
| 608 |
+
# XXX: this is only an approximation of requires_grad - there's no way
|
| 609 |
+
# to figure out if fn didn't use ctx.saved_tensors and as a result
|
| 610 |
+
# some Tensors might require grad, even if no args do.
|
| 611 |
+
# Unfortunately, this leads to unexpected error messages ("no nodes
|
| 612 |
+
# require computing gradients"), but I don't have a better idea.
|
| 613 |
+
# These functions would raise an error in backward anyway.
|
| 614 |
+
requires_grad = any(
|
| 615 |
+
isinstance(arg, torch.Tensor) and arg.requires_grad for arg in args
|
| 616 |
+
)
|
| 617 |
+
if not requires_grad:
|
| 618 |
+
return outputs
|
| 619 |
+
|
| 620 |
+
if not isinstance(outputs, tuple):
|
| 621 |
+
outputs = (outputs,)
|
| 622 |
+
|
| 623 |
+
err_fn = _functions.DelayedError(
|
| 624 |
+
b"trying to differentiate twice a function that was marked "
|
| 625 |
+
b"with @once_differentiable",
|
| 626 |
+
len(outputs),
|
| 627 |
+
)
|
| 628 |
+
|
| 629 |
+
# Create aliases of each output that has requires_grad=True. We need
|
| 630 |
+
# at least one of the inputs to err_fn to require grad so that the
|
| 631 |
+
# output will have a grad_fn.
|
| 632 |
+
def fake_requires_grad(var):
|
| 633 |
+
if var is not None:
|
| 634 |
+
var = var.detach()
|
| 635 |
+
var.requires_grad = True
|
| 636 |
+
return var
|
| 637 |
+
|
| 638 |
+
return err_fn(*[fake_requires_grad(v) for v in outputs])
|
| 639 |
+
|
| 640 |
+
return wrapper
|
| 641 |
+
|
| 642 |
+
|
| 643 |
+
class InplaceFunction(Function):
|
| 644 |
+
r"""
|
| 645 |
+
This class is here only for backward compatibility reasons.
|
| 646 |
+
Use :class:`Function` instead of this for any new use case.
|
| 647 |
+
"""
|
| 648 |
+
|
| 649 |
+
def __init__(self, inplace=False):
|
| 650 |
+
super().__init__()
|
| 651 |
+
self.inplace = inplace
|
| 652 |
+
|
| 653 |
+
|
| 654 |
+
def _nested_map(condition, fn, condition_msg=None):
|
| 655 |
+
def _map(obj):
|
| 656 |
+
if condition(obj):
|
| 657 |
+
return fn(obj)
|
| 658 |
+
elif obj is None:
|
| 659 |
+
return None
|
| 660 |
+
elif isinstance(obj, (list, tuple)):
|
| 661 |
+
mapped = (_map(x) for x in obj)
|
| 662 |
+
if hasattr(obj, "_fields"):
|
| 663 |
+
# obj is namedtuple
|
| 664 |
+
return type(obj)(*mapped)
|
| 665 |
+
return type(obj)(mapped)
|
| 666 |
+
elif isinstance(obj, dict):
|
| 667 |
+
return {x: _map(obj[x]) for x in obj}
|
| 668 |
+
else:
|
| 669 |
+
raise ValueError(
|
| 670 |
+
"Auto nesting doesn't know how to process "
|
| 671 |
+
"an input object of type "
|
| 672 |
+
+ torch.typename(obj)
|
| 673 |
+
+ (
|
| 674 |
+
". Accepted types: " + condition_msg + ", or lists/tuples of them"
|
| 675 |
+
if condition_msg
|
| 676 |
+
else ""
|
| 677 |
+
)
|
| 678 |
+
)
|
| 679 |
+
|
| 680 |
+
return _map
|
| 681 |
+
|
| 682 |
+
|
| 683 |
+
def _jit_unwrap_structured(obj):
|
| 684 |
+
if hasattr(obj, "_jit_unwrap"):
|
| 685 |
+
return obj._jit_unwrap()
|
| 686 |
+
return obj
|
| 687 |
+
|
| 688 |
+
|
| 689 |
+
def _iter_filter(condition, allow_unknown=False, condition_msg=None, conversion=None):
|
| 690 |
+
def _iter(obj):
|
| 691 |
+
if conversion is not None:
|
| 692 |
+
obj = conversion(obj)
|
| 693 |
+
if condition(obj):
|
| 694 |
+
yield obj
|
| 695 |
+
elif obj is None:
|
| 696 |
+
return
|
| 697 |
+
elif isinstance(obj, (list, tuple)):
|
| 698 |
+
for o in obj:
|
| 699 |
+
yield from _iter(o)
|
| 700 |
+
elif isinstance(obj, dict):
|
| 701 |
+
# We only accept primitive key types, so we needn't inspect them
|
| 702 |
+
for o in obj.values():
|
| 703 |
+
yield from _iter(o)
|
| 704 |
+
elif allow_unknown:
|
| 705 |
+
yield obj
|
| 706 |
+
else:
|
| 707 |
+
raise ValueError(
|
| 708 |
+
"Auto nesting doesn't know how to process "
|
| 709 |
+
"an input object of type "
|
| 710 |
+
+ torch.typename(obj)
|
| 711 |
+
+ (
|
| 712 |
+
". Accepted types: " + condition_msg + ", or lists/tuples of them"
|
| 713 |
+
if condition_msg
|
| 714 |
+
else ""
|
| 715 |
+
)
|
| 716 |
+
)
|
| 717 |
+
|
| 718 |
+
return _iter
|
| 719 |
+
|
| 720 |
+
|
| 721 |
+
def _unflatten(input, proto):
|
| 722 |
+
# unflatten a list or tuple input into a nested list/tuple structure
|
| 723 |
+
# specified by proto
|
| 724 |
+
def unflatten_helper(input, proto):
|
| 725 |
+
res: List[Optional[torch.Tensor]] = []
|
| 726 |
+
if hasattr(proto, "_jit_wrap"):
|
| 727 |
+
return proto._jit_wrap(input)
|
| 728 |
+
if not isinstance(proto, (list, tuple)):
|
| 729 |
+
return input[0], input[1:]
|
| 730 |
+
for e in proto:
|
| 731 |
+
if e is None:
|
| 732 |
+
res.append(e)
|
| 733 |
+
else:
|
| 734 |
+
res_e, input = unflatten_helper(input, e)
|
| 735 |
+
res.append(res_e)
|
| 736 |
+
return type(proto)(res), input
|
| 737 |
+
|
| 738 |
+
return unflatten_helper(input, proto)[0]
|
| 739 |
+
|
| 740 |
+
|
| 741 |
+
_iter_jit_values = _iter_filter(
|
| 742 |
+
lambda o: o is None or isinstance(o, torch._C.Value),
|
| 743 |
+
condition_msg="jit's Values or None",
|
| 744 |
+
)
|
| 745 |
+
_iter_tensors = _iter_filter(
|
| 746 |
+
lambda x: isinstance(x, torch.Tensor),
|
| 747 |
+
condition_msg="Tensors",
|
| 748 |
+
conversion=_jit_unwrap_structured,
|
| 749 |
+
)
|
| 750 |
+
_iter_tensors_permissive = _iter_filter(
|
| 751 |
+
lambda x: isinstance(x, torch.Tensor),
|
| 752 |
+
allow_unknown=True,
|
| 753 |
+
condition_msg="Tensors (permissive)",
|
| 754 |
+
)
|
| 755 |
+
_iter_None_tensors = _iter_filter(
|
| 756 |
+
lambda o: o is None or isinstance(o, torch.Tensor), condition_msg="Tensors or None"
|
| 757 |
+
)
|
| 758 |
+
_map_tensor_data = _nested_map(
|
| 759 |
+
lambda x: isinstance(x, torch.Tensor), lambda o: o.data, condition_msg="Tensors"
|
| 760 |
+
)
|
| 761 |
+
|
| 762 |
+
|
| 763 |
+
class NestedIOFunction(Function):
|
| 764 |
+
r"""
|
| 765 |
+
This class is here only for backward compatibility reasons.
|
| 766 |
+
Use :class:`Function` instead of this for any new use case.
|
| 767 |
+
"""
|
| 768 |
+
# The 'type: ignore' statements are needed here because these functions are declared as '@staticmethod' in the
|
| 769 |
+
# superclass (Function) but are instance methods here, which mypy reports as incompatible.
|
| 770 |
+
|
| 771 |
+
def _do_forward(self, *input):
|
| 772 |
+
self._nested_input = input
|
| 773 |
+
flat_input = tuple(_iter_tensors(input))
|
| 774 |
+
flat_output = super()._do_forward(*flat_input) # type: ignore[misc]
|
| 775 |
+
nested_output = self._nested_output
|
| 776 |
+
nested_tensors = _unflatten(flat_output, self._nested_output)
|
| 777 |
+
return nested_tensors
|
| 778 |
+
|
| 779 |
+
def _do_backward(self, gradients, retain_variables):
|
| 780 |
+
self.retain_variables = retain_variables
|
| 781 |
+
result = super()._do_backward(gradients, retain_variables) # type: ignore[misc]
|
| 782 |
+
if not retain_variables:
|
| 783 |
+
del self._nested_output
|
| 784 |
+
del self._to_save_nested
|
| 785 |
+
return result
|
| 786 |
+
|
| 787 |
+
def backward(self, *gradients: Any) -> Any: # type: ignore[override]
|
| 788 |
+
r"""
|
| 789 |
+
Shared backward utility.
|
| 790 |
+
"""
|
| 791 |
+
nested_gradients = _unflatten(gradients, self._nested_output)
|
| 792 |
+
result = self.backward_extended(*nested_gradients) # type: ignore[func-returns-value]
|
| 793 |
+
return tuple(_iter_None_tensors(result))
|
| 794 |
+
|
| 795 |
+
__call__ = _do_forward
|
| 796 |
+
|
| 797 |
+
def forward(self, *args: Any) -> Any: # type: ignore[override]
|
| 798 |
+
r"""
|
| 799 |
+
Shared forward utility.
|
| 800 |
+
"""
|
| 801 |
+
nested_tensors = _map_tensor_data(self._nested_input)
|
| 802 |
+
result = self.forward_extended(*nested_tensors) # type: ignore[func-returns-value]
|
| 803 |
+
del self._nested_input
|
| 804 |
+
self._nested_output = result
|
| 805 |
+
return tuple(_iter_tensors(result))
|
| 806 |
+
|
| 807 |
+
def save_for_backward(self, *args: Any) -> None:
|
| 808 |
+
r"""
|
| 809 |
+
See :meth:`Function.save_for_backward`.
|
| 810 |
+
"""
|
| 811 |
+
self.to_save = tuple(_iter_tensors(args))
|
| 812 |
+
self._to_save_nested = args
|
| 813 |
+
|
| 814 |
+
@property
|
| 815 |
+
def saved_tensors(self):
|
| 816 |
+
r"""
|
| 817 |
+
See :meth:`Function.saved_tensors`.
|
| 818 |
+
"""
|
| 819 |
+
flat_tensors = super().saved_tensors # type: ignore[misc]
|
| 820 |
+
return _unflatten(flat_tensors, self._to_save_nested)
|
| 821 |
+
|
| 822 |
+
def mark_dirty(self, *args: Any, **kwargs: Any) -> None:
|
| 823 |
+
r"""
|
| 824 |
+
See :meth:`Function.mark_dirty`.
|
| 825 |
+
"""
|
| 826 |
+
self.dirty_tensors = tuple(_iter_tensors((args, kwargs)))
|
| 827 |
+
|
| 828 |
+
def mark_non_differentiable(self, *args: Any, **kwargs: Any) -> None:
|
| 829 |
+
r"""
|
| 830 |
+
See :meth:`Function.mark_non_differentiable`.
|
| 831 |
+
"""
|
| 832 |
+
self.non_differentiable = tuple(_iter_tensors((args, kwargs)))
|
| 833 |
+
|
| 834 |
+
def forward_extended(self, *input: Any) -> None:
|
| 835 |
+
r"""
|
| 836 |
+
User defined forward.
|
| 837 |
+
"""
|
| 838 |
+
raise NotImplementedError
|
| 839 |
+
|
| 840 |
+
def backward_extended(self, *grad_output: Any) -> None:
|
| 841 |
+
r"""
|
| 842 |
+
User defined backward.
|
| 843 |
+
"""
|
| 844 |
+
raise NotImplementedError
|