Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- lib/python3.10/site-packages/faiss_cpu-1.10.0.dist-info/INSTALLER +1 -0
- lib/python3.10/site-packages/faiss_cpu-1.10.0.dist-info/LICENSE +21 -0
- lib/python3.10/site-packages/faiss_cpu-1.10.0.dist-info/METADATA +111 -0
- lib/python3.10/site-packages/faiss_cpu-1.10.0.dist-info/RECORD +42 -0
- lib/python3.10/site-packages/faiss_cpu-1.10.0.dist-info/REQUESTED +0 -0
- lib/python3.10/site-packages/faiss_cpu-1.10.0.dist-info/WHEEL +5 -0
- lib/python3.10/site-packages/faiss_cpu-1.10.0.dist-info/top_level.txt +1 -0
- lib/python3.10/site-packages/optax/__init__.py +349 -0
- lib/python3.10/site-packages/optax/_src/alias.py +883 -0
- lib/python3.10/site-packages/optax/_src/alias_test.py +186 -0
- lib/python3.10/site-packages/optax/_src/base.py +233 -0
- lib/python3.10/site-packages/optax/_src/combine_test.py +152 -0
- lib/python3.10/site-packages/optax/_src/constrain.py +97 -0
- lib/python3.10/site-packages/optax/_src/constrain_test.py +115 -0
- lib/python3.10/site-packages/optax/_src/control_variates_test.py +595 -0
- lib/python3.10/site-packages/optax/_src/factorized.py +199 -0
- lib/python3.10/site-packages/optax/_src/float64_test.py +94 -0
- lib/python3.10/site-packages/optax/_src/linear_algebra.py +201 -0
- lib/python3.10/site-packages/optax/_src/lookahead.py +192 -0
- lib/python3.10/site-packages/optax/_src/lookahead_test.py +140 -0
- lib/python3.10/site-packages/optax/_src/loss.py +521 -0
- lib/python3.10/site-packages/optax/_src/loss_test.py +500 -0
- lib/python3.10/site-packages/optax/_src/numerics.py +118 -0
- lib/python3.10/site-packages/optax/_src/privacy_test.py +112 -0
- lib/python3.10/site-packages/optax/_src/schedule.py +620 -0
- lib/python3.10/site-packages/optax/_src/schedule_test.py +649 -0
- lib/python3.10/site-packages/optax/_src/second_order.py +111 -0
- lib/python3.10/site-packages/optax/_src/stochastic_gradient_estimators.py +317 -0
- lib/python3.10/site-packages/optax/_src/stochastic_gradient_estimators_test.py +371 -0
- lib/python3.10/site-packages/optax/_src/test_utils.py +42 -0
- lib/python3.10/site-packages/optax/_src/transform.py +1143 -0
- lib/python3.10/site-packages/optax/_src/transform_test.py +305 -0
- lib/python3.10/site-packages/optax/_src/utils.py +152 -0
- lib/python3.10/site-packages/optax/_src/utils_test.py +65 -0
- lib/python3.10/site-packages/optax/_src/wrappers.py +547 -0
- lib/python3.10/site-packages/optax/experimental/__init__.py +23 -0
- lib/python3.10/site-packages/optax/optax_test.py +29 -0
- lib/python3.10/site-packages/pasta/__init__.py +30 -0
- lib/python3.10/site-packages/pasta/augment/__init__.py +0 -0
- lib/python3.10/site-packages/pasta/augment/errors.py +23 -0
- lib/python3.10/site-packages/pasta/augment/import_utils.py +217 -0
- lib/python3.10/site-packages/pasta/augment/import_utils_test.py +428 -0
- lib/python3.10/site-packages/pasta/augment/inline.py +65 -0
- lib/python3.10/site-packages/pasta/augment/inline_test.py +97 -0
- lib/python3.10/site-packages/pasta/augment/rename.py +154 -0
- lib/python3.10/site-packages/pasta/augment/rename_test.py +119 -0
- lib/python3.10/site-packages/pasta/base/__init__.py +0 -0
- lib/python3.10/site-packages/pasta/base/annotate.py +1543 -0
- lib/python3.10/site-packages/pasta/base/annotate_test.py +477 -0
- lib/python3.10/site-packages/pasta/base/ast_constants.py +38 -0
lib/python3.10/site-packages/faiss_cpu-1.10.0.dist-info/INSTALLER
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
uv
|
lib/python3.10/site-packages/faiss_cpu-1.10.0.dist-info/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2019 Kota Yamaguchi
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
lib/python3.10/site-packages/faiss_cpu-1.10.0.dist-info/METADATA
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.2
|
| 2 |
+
Name: faiss-cpu
|
| 3 |
+
Version: 1.10.0
|
| 4 |
+
Summary: A library for efficient similarity search and clustering of dense vectors.
|
| 5 |
+
Author-email: Kota Yamaguchi <yamaguchi_kota@cyberagent.co.jp>
|
| 6 |
+
License: MIT License
|
| 7 |
+
Project-URL: Repository, https://github.com/kyamagu/faiss-wheels
|
| 8 |
+
Keywords: faiss,similarity search,clustering,machine learning
|
| 9 |
+
Classifier: Development Status :: 4 - Beta
|
| 10 |
+
Classifier: Intended Audience :: Developers
|
| 11 |
+
Classifier: Intended Audience :: Science/Research
|
| 12 |
+
Classifier: License :: OSI Approved :: MIT License
|
| 13 |
+
Classifier: Operating System :: MacOS :: MacOS X
|
| 14 |
+
Classifier: Operating System :: Microsoft :: Windows
|
| 15 |
+
Classifier: Operating System :: POSIX
|
| 16 |
+
Classifier: Programming Language :: Python :: 3.9
|
| 17 |
+
Classifier: Programming Language :: Python :: 3.10
|
| 18 |
+
Classifier: Programming Language :: Python :: 3.11
|
| 19 |
+
Classifier: Programming Language :: Python :: 3.12
|
| 20 |
+
Classifier: Programming Language :: Python :: 3.13
|
| 21 |
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
| 22 |
+
Requires-Python: >=3.9
|
| 23 |
+
Description-Content-Type: text/markdown
|
| 24 |
+
License-File: LICENSE
|
| 25 |
+
Requires-Dist: numpy<3.0,>=1.25.0
|
| 26 |
+
Requires-Dist: packaging
|
| 27 |
+
|
| 28 |
+
# faiss-wheels
|
| 29 |
+
|
| 30 |
+
[](https://github.com/kyamagu/faiss-wheels/actions/workflows/build.yml)
|
| 31 |
+
[](https://pypi.org/project/faiss-cpu/)
|
| 32 |
+
|
| 33 |
+
faiss python wheel packages.
|
| 34 |
+
|
| 35 |
+
- [faiss](https://github.com/facebookresearch/faiss)
|
| 36 |
+
|
| 37 |
+
## Overview
|
| 38 |
+
|
| 39 |
+
This repository provides scripts to build wheel packages for the
|
| 40 |
+
[faiss](https://github.com/facebookresearch/faiss) library.
|
| 41 |
+
|
| 42 |
+
- Builds CPU-only version with [cibuildwheel](https://github.com/pypa/cibuildwheel/).
|
| 43 |
+
- Bundles OpenBLAS in Linux/Windows
|
| 44 |
+
- Uses Accelerate framework in macOS
|
| 45 |
+
|
| 46 |
+
There is also a source package to customize the build process.
|
| 47 |
+
|
| 48 |
+
> **Note**
|
| 49 |
+
> GPU binary package is discontinued as of 1.7.3 release. Build a source package to support GPU features.
|
| 50 |
+
|
| 51 |
+
### Install
|
| 52 |
+
|
| 53 |
+
Install the CPU-only binary package by:
|
| 54 |
+
|
| 55 |
+
```bash
|
| 56 |
+
pip install faiss-cpu
|
| 57 |
+
```
|
| 58 |
+
|
| 59 |
+
Note that the package name is `faiss-cpu`.
|
| 60 |
+
|
| 61 |
+
## Supporting GPU or customized build configuration
|
| 62 |
+
|
| 63 |
+
The PyPI binary package does not support GPU.
|
| 64 |
+
To support GPU methods or use faiss with different build configuration, build a source package.
|
| 65 |
+
For building the source package, swig 3.0.12 or later needs to be available.
|
| 66 |
+
Also, there should be all the required prerequisites for building faiss itself, such as `nvcc` and CUDA toolkit.
|
| 67 |
+
|
| 68 |
+
## Building faiss
|
| 69 |
+
|
| 70 |
+
The source package assumes faiss is already built and installed in the system.
|
| 71 |
+
If not done so elsewhere, build and install the faiss library first.
|
| 72 |
+
The following example builds and installs faiss with GPU support and avx512 instruction set.
|
| 73 |
+
|
| 74 |
+
```bash
|
| 75 |
+
git clone https://github.com/facebookresearch/faiss.git
|
| 76 |
+
cd faiss
|
| 77 |
+
cmake . -B build -DFAISS_ENABLE_GPU=ON -DFAISS_ENABLE_PYTHON=OFF -DFAISS_OPT_LEVEL=avx512
|
| 78 |
+
cmake --build build --config Release -j
|
| 79 |
+
cmake --install build install
|
| 80 |
+
cd ..
|
| 81 |
+
```
|
| 82 |
+
|
| 83 |
+
See the official
|
| 84 |
+
[faiss installation instruction](https://github.com/facebookresearch/faiss/blob/master/INSTALL.md)
|
| 85 |
+
for more on how to build and install faiss.
|
| 86 |
+
|
| 87 |
+
### Building a source package
|
| 88 |
+
|
| 89 |
+
Once faiss is built and installed, build the source package.
|
| 90 |
+
The following builds and installs the faiss-cpu source package with GPU and AVX512.
|
| 91 |
+
|
| 92 |
+
```bash
|
| 93 |
+
export FAISS_ENABLE_GPU=ON FAISS_OPT_LEVEL=avx512
|
| 94 |
+
pip install --no-binary :all: faiss-cpu
|
| 95 |
+
```
|
| 96 |
+
|
| 97 |
+
There are a few environment variables that specifies build-time options.
|
| 98 |
+
- `FAISS_INSTALL_PREFIX`: Specifies the install location of faiss library, default to `/usr/local`.
|
| 99 |
+
- `FAISS_OPT_LEVEL`: Faiss SIMD optimization, one of `generic`, `avx2`, `avx512`. Note that AVX option is only available in x86_64 arch.
|
| 100 |
+
- `FAISS_ENABLE_GPU`: Setting this variable to `ON` builds GPU wrappers. Set this variable if faiss is built with GPU support.
|
| 101 |
+
- `CUDA_HOME`: Specifies CUDA install location for building GPU wrappers, default to `/usr/local/cuda`.
|
| 102 |
+
|
| 103 |
+
## Development
|
| 104 |
+
|
| 105 |
+
This repository is intended to support PyPI distribution for the official [faiss](https://github.com/facebookresearch/faiss) library.
|
| 106 |
+
The repository contains the CI workflow based on [cibuildwheel](https://github.com/pypa/cibuildwheel/).
|
| 107 |
+
Feel free to make a pull request to fix packaging problems.
|
| 108 |
+
|
| 109 |
+
Other relevant resources:
|
| 110 |
+
|
| 111 |
+
- [Packaging projects with GPU code](https://pypackaging-native.github.io/key-issues/gpus/)
|
lib/python3.10/site-packages/faiss_cpu-1.10.0.dist-info/RECORD
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
faiss/__init__.py,sha256=tA8_xJ3pt4LyKMXEgiwwuiqc9c2q4F9ZGBOlOTyj9J8,12352
|
| 2 |
+
faiss/_swigfaiss.cpython-310-x86_64-linux-gnu.so,sha256=hLiUl7bZ6Ppd_g6Xe55pQ19es3e42AAOSr4qTGu8haw,41262577
|
| 3 |
+
faiss/_swigfaiss_avx2.cpython-310-x86_64-linux-gnu.so,sha256=mbyaghOqhxRQEOTx6REQRmDaBH-Vf989ZYWpVAQs9Cg,41041497
|
| 4 |
+
faiss/_swigfaiss_avx512.cpython-310-x86_64-linux-gnu.so,sha256=Gl4QVU_KNxLoxaV4wPy7POz8bnaAyoKcTeh69lRH-Yk,42048937
|
| 5 |
+
faiss/array_conversions.py,sha256=D4nCV39t03NR46z_DutDUnzO3JZi7_4w0D5F6HKVFhA,5103
|
| 6 |
+
faiss/class_wrappers.py,sha256=ReYh4z4HoAVlRW4rZ1cQy0Hi2z0Tz4aDU_DwIKvOfCg,48267
|
| 7 |
+
faiss/contrib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 8 |
+
faiss/contrib/big_batch_search.py,sha256=L24CEMtqbls_4nnFYxmdoTG9Odt26N4z6yRw7QjdrZA,17640
|
| 9 |
+
faiss/contrib/client_server.py,sha256=vryOc223DtDbIl-5dI9AS2_zLe9PSyyvS8HUkIiJlIs,2774
|
| 10 |
+
faiss/contrib/clustering.py,sha256=yqnMrpmOk2Ds_3AVt4Ka54sfb5On0azsO0qcdx0pXms,12699
|
| 11 |
+
faiss/contrib/datasets.py,sha256=W-Y_T8JBXSaTqKwaNl_DvtVepd71OS34Dp4UpVIyaFQ,12273
|
| 12 |
+
faiss/contrib/evaluation.py,sha256=YCK_WdDOmuk3YTywCyVk_7bwMjrOwz5LS7o3XCO064U,14995
|
| 13 |
+
faiss/contrib/exhaustive_search.py,sha256=Hw7S_DOjYp2BQ2RGbG5TcJVK6MCbFKpwCgq3aAK3yd8,12380
|
| 14 |
+
faiss/contrib/factory_tools.py,sha256=KChfcCgd5n8VOT669x1LKB035o0u3yMwwMlyf3FgnFU,5085
|
| 15 |
+
faiss/contrib/inspect_tools.py,sha256=adznxU6EFhVrBXuyyol5M-j5baUVlhJCOoz7cFDFVdQ,3749
|
| 16 |
+
faiss/contrib/ivf_tools.py,sha256=yZVz2UFivUUkur27SeeHfAfwM422tJpwVd6uT55w2yk,4874
|
| 17 |
+
faiss/contrib/ondisk.py,sha256=o75LX2UDSVb0WaKWHKVwfVoXXmIGt-qblIMaVP3VRYQ,2069
|
| 18 |
+
faiss/contrib/rpc.py,sha256=AFSNIhDWU6M4PErmYShL9U2DVVTypkJR5C3VpcSZS98,7305
|
| 19 |
+
faiss/contrib/torch/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 20 |
+
faiss/contrib/torch/clustering.py,sha256=fuHiTKrpgH3iXoyaqvuij4j2SD7hKS_VGPITbprIGj8,1622
|
| 21 |
+
faiss/contrib/torch/quantization.py,sha256=co4f6SYN2OcDwIv_QlhbTUosmKT77CbDfP3qH7jCDGA,2796
|
| 22 |
+
faiss/contrib/torch_utils.py,sha256=xiJneS8gCQ-Dgoh4wotaYzWMMe8fWwUPOTVrOjPoCaw,26826
|
| 23 |
+
faiss/contrib/vecs_io.py,sha256=mBCzkcL0g1P438hur9OrTriQTuFl2CDsidFU5LPHIkU,1383
|
| 24 |
+
faiss/extra_wrappers.py,sha256=B_C7O8HC0rfYvIeecWBEZxjIGueIjWgyGt0k5v6JFoE,20492
|
| 25 |
+
faiss/gpu_wrappers.py,sha256=r2XcE_WO_URnFVq01zPgHbyKhfRHFvlw7nlX6-eObc8,9196
|
| 26 |
+
faiss/loader.py,sha256=Ih3mYPcDahHzFjJerQeEadUSwe4r0u2D3H7WKnUtrjY,5715
|
| 27 |
+
faiss/python_callbacks.h,sha256=Di3GvEZb1aJxQxJagsmUS_xNFNfIm4BtbVVaSqB8rdw,1771
|
| 28 |
+
faiss/setup.py,sha256=hyyzmNocXW8-zRziXlGSLS3QoZDwkCNHeBfii_WZEn4,4823
|
| 29 |
+
faiss/swigfaiss.i,sha256=pmiBVlt0CZByBV9iO3gssdxiSOvCIPbvk5XkiECKTJU,34916
|
| 30 |
+
faiss/swigfaiss.py,sha256=Vthkry0pdtS5XxgFWRFqW08f1xC-AjKWL8XH3OsiGLg,522419
|
| 31 |
+
faiss/swigfaiss_avx2.py,sha256=Yc9XDqj1x4Lhfpl-gO_dOvxZlD0EAHDwEMdZ7Fu5OTU,543874
|
| 32 |
+
faiss/swigfaiss_avx512.py,sha256=XbDzP8_tDnKW4gwZE4GQ5bSLb9UzXUsPqpg4DszojIc,552456
|
| 33 |
+
faiss_cpu-1.10.0.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2
|
| 34 |
+
faiss_cpu-1.10.0.dist-info/LICENSE,sha256=WIeseFb8XlKsip07StlCDHeYTCME9qlvgxSryQpfc-4,1071
|
| 35 |
+
faiss_cpu-1.10.0.dist-info/METADATA,sha256=k8Oqa9qO1d_KljrW_e7B5oiV073ICJ5CwOci41vp1dk,4433
|
| 36 |
+
faiss_cpu-1.10.0.dist-info/RECORD,,
|
| 37 |
+
faiss_cpu-1.10.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 38 |
+
faiss_cpu-1.10.0.dist-info/WHEEL,sha256=0LtizVCPjmS43VfmdyNZBTcke7HWiGSz2g6gpFQfJJc,113
|
| 39 |
+
faiss_cpu-1.10.0.dist-info/top_level.txt,sha256=nr2S-1YAhqAuAlFJukntDvRGXfI1KmwnUNs_iFlNiig,6
|
| 40 |
+
faiss_cpu.libs/libgfortran-93980b03.so.5.0.0,sha256=VRPmUjgQd_BY9MZtJxiNd9ToNjdBD8kmV1-G0G2SSKU,2714697
|
| 41 |
+
faiss_cpu.libs/libgomp-24e2ab19.so.1.0.0,sha256=7DVDy2-hHzQlj-PVkILkvZdAoYNJ4Jf8Qqe8aSohvyw,253289
|
| 42 |
+
faiss_cpu.libs/libquadmath-776d53b6.so.0.0.0,sha256=3FY9LfNiF_4qtQ_ZZncl3g5v7__d2mmyG-o2kSRS-8Q,272193
|
lib/python3.10/site-packages/faiss_cpu-1.10.0.dist-info/REQUESTED
ADDED
|
File without changes
|
lib/python3.10/site-packages/faiss_cpu-1.10.0.dist-info/WHEEL
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Wheel-Version: 1.0
|
| 2 |
+
Generator: setuptools (75.8.0)
|
| 3 |
+
Root-Is-Purelib: false
|
| 4 |
+
Tag: cp310-cp310-manylinux_2_28_x86_64
|
| 5 |
+
|
lib/python3.10/site-packages/faiss_cpu-1.10.0.dist-info/top_level.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
faiss
|
lib/python3.10/site-packages/optax/__init__.py
ADDED
|
@@ -0,0 +1,349 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Optax: composable gradient processing and optimization, in JAX."""
|
| 16 |
+
|
| 17 |
+
from optax import experimental
|
| 18 |
+
from optax._src.alias import adabelief
|
| 19 |
+
from optax._src.alias import adafactor
|
| 20 |
+
from optax._src.alias import adagrad
|
| 21 |
+
from optax._src.alias import adam
|
| 22 |
+
from optax._src.alias import adamax
|
| 23 |
+
from optax._src.alias import adamaxw
|
| 24 |
+
from optax._src.alias import adamw
|
| 25 |
+
from optax._src.alias import amsgrad
|
| 26 |
+
from optax._src.alias import dpsgd
|
| 27 |
+
from optax._src.alias import fromage
|
| 28 |
+
from optax._src.alias import lamb
|
| 29 |
+
from optax._src.alias import lars
|
| 30 |
+
from optax._src.alias import MaskOrFn
|
| 31 |
+
from optax._src.alias import noisy_sgd
|
| 32 |
+
from optax._src.alias import novograd
|
| 33 |
+
from optax._src.alias import optimistic_gradient_descent
|
| 34 |
+
from optax._src.alias import radam
|
| 35 |
+
from optax._src.alias import rmsprop
|
| 36 |
+
from optax._src.alias import ScalarOrSchedule
|
| 37 |
+
from optax._src.alias import sgd
|
| 38 |
+
from optax._src.alias import sm3
|
| 39 |
+
from optax._src.alias import yogi
|
| 40 |
+
from optax._src.base import EmptyState
|
| 41 |
+
from optax._src.base import GradientTransformation
|
| 42 |
+
from optax._src.base import identity
|
| 43 |
+
from optax._src.base import OptState
|
| 44 |
+
from optax._src.base import Params
|
| 45 |
+
from optax._src.base import Schedule
|
| 46 |
+
from optax._src.base import set_to_zero
|
| 47 |
+
from optax._src.base import stateless
|
| 48 |
+
from optax._src.base import stateless_with_tree_map
|
| 49 |
+
from optax._src.base import TransformInitFn
|
| 50 |
+
from optax._src.base import TransformUpdateFn
|
| 51 |
+
from optax._src.base import Updates
|
| 52 |
+
from optax._src.clipping import adaptive_grad_clip
|
| 53 |
+
from optax._src.clipping import AdaptiveGradClipState
|
| 54 |
+
from optax._src.clipping import clip
|
| 55 |
+
from optax._src.clipping import clip_by_block_rms
|
| 56 |
+
from optax._src.clipping import clip_by_global_norm
|
| 57 |
+
from optax._src.clipping import ClipByGlobalNormState
|
| 58 |
+
from optax._src.clipping import ClipState
|
| 59 |
+
from optax._src.clipping import per_example_global_norm_clip
|
| 60 |
+
from optax._src.combine import chain
|
| 61 |
+
from optax._src.combine import multi_transform
|
| 62 |
+
from optax._src.combine import MultiTransformState
|
| 63 |
+
from optax._src.constrain import keep_params_nonnegative
|
| 64 |
+
from optax._src.constrain import NonNegativeParamsState
|
| 65 |
+
from optax._src.constrain import zero_nans
|
| 66 |
+
from optax._src.constrain import ZeroNansState
|
| 67 |
+
from optax._src.control_variates import control_delta_method
|
| 68 |
+
from optax._src.control_variates import control_variates_jacobians
|
| 69 |
+
from optax._src.control_variates import moving_avg_baseline
|
| 70 |
+
from optax._src.factorized import FactoredState
|
| 71 |
+
from optax._src.factorized import scale_by_factored_rms
|
| 72 |
+
from optax._src.linear_algebra import global_norm
|
| 73 |
+
from optax._src.linear_algebra import matrix_inverse_pth_root
|
| 74 |
+
from optax._src.linear_algebra import power_iteration
|
| 75 |
+
from optax._src.lookahead import lookahead
|
| 76 |
+
from optax._src.lookahead import LookaheadParams
|
| 77 |
+
from optax._src.lookahead import LookaheadState
|
| 78 |
+
from optax._src.loss import cosine_distance
|
| 79 |
+
from optax._src.loss import cosine_similarity
|
| 80 |
+
from optax._src.loss import ctc_loss
|
| 81 |
+
from optax._src.loss import ctc_loss_with_forward_probs
|
| 82 |
+
from optax._src.loss import hinge_loss
|
| 83 |
+
from optax._src.loss import huber_loss
|
| 84 |
+
from optax._src.loss import l2_loss
|
| 85 |
+
from optax._src.loss import log_cosh
|
| 86 |
+
from optax._src.loss import sigmoid_binary_cross_entropy
|
| 87 |
+
from optax._src.loss import smooth_labels
|
| 88 |
+
from optax._src.loss import softmax_cross_entropy
|
| 89 |
+
from optax._src.loss import softmax_cross_entropy_with_integer_labels
|
| 90 |
+
from optax._src.numerics import safe_int32_increment
|
| 91 |
+
from optax._src.numerics import safe_norm
|
| 92 |
+
from optax._src.numerics import safe_root_mean_squares
|
| 93 |
+
from optax._src.privacy import differentially_private_aggregate
|
| 94 |
+
from optax._src.privacy import DifferentiallyPrivateAggregateState
|
| 95 |
+
from optax._src.schedule import constant_schedule
|
| 96 |
+
from optax._src.schedule import cosine_decay_schedule
|
| 97 |
+
from optax._src.schedule import cosine_onecycle_schedule
|
| 98 |
+
from optax._src.schedule import exponential_decay
|
| 99 |
+
from optax._src.schedule import inject_hyperparams
|
| 100 |
+
from optax._src.schedule import InjectHyperparamsState
|
| 101 |
+
from optax._src.schedule import join_schedules
|
| 102 |
+
from optax._src.schedule import linear_onecycle_schedule
|
| 103 |
+
from optax._src.schedule import linear_schedule
|
| 104 |
+
from optax._src.schedule import piecewise_constant_schedule
|
| 105 |
+
from optax._src.schedule import piecewise_interpolate_schedule
|
| 106 |
+
from optax._src.schedule import polynomial_schedule
|
| 107 |
+
from optax._src.schedule import sgdr_schedule
|
| 108 |
+
from optax._src.schedule import warmup_cosine_decay_schedule
|
| 109 |
+
from optax._src.schedule import warmup_exponential_decay_schedule
|
| 110 |
+
from optax._src.second_order import fisher_diag
|
| 111 |
+
from optax._src.second_order import hessian_diag
|
| 112 |
+
from optax._src.second_order import hvp
|
| 113 |
+
from optax._src.stochastic_gradient_estimators import measure_valued_jacobians
|
| 114 |
+
from optax._src.stochastic_gradient_estimators import pathwise_jacobians
|
| 115 |
+
from optax._src.stochastic_gradient_estimators import score_function_jacobians
|
| 116 |
+
from optax._src.transform import add_decayed_weights
|
| 117 |
+
from optax._src.transform import add_noise
|
| 118 |
+
from optax._src.transform import AddDecayedWeightsState
|
| 119 |
+
from optax._src.transform import additive_weight_decay
|
| 120 |
+
from optax._src.transform import AdditiveWeightDecayState
|
| 121 |
+
from optax._src.transform import AddNoiseState
|
| 122 |
+
from optax._src.transform import apply_every
|
| 123 |
+
from optax._src.transform import ApplyEvery
|
| 124 |
+
from optax._src.transform import bias_correction
|
| 125 |
+
from optax._src.transform import centralize
|
| 126 |
+
from optax._src.transform import ema
|
| 127 |
+
from optax._src.transform import EmaState
|
| 128 |
+
from optax._src.transform import scale
|
| 129 |
+
from optax._src.transform import scale_by_adam
|
| 130 |
+
from optax._src.transform import scale_by_adamax
|
| 131 |
+
from optax._src.transform import scale_by_amsgrad
|
| 132 |
+
from optax._src.transform import scale_by_belief
|
| 133 |
+
from optax._src.transform import scale_by_novograd
|
| 134 |
+
from optax._src.transform import scale_by_optimistic_gradient
|
| 135 |
+
from optax._src.transform import scale_by_param_block_norm
|
| 136 |
+
from optax._src.transform import scale_by_param_block_rms
|
| 137 |
+
from optax._src.transform import scale_by_radam
|
| 138 |
+
from optax._src.transform import scale_by_rms
|
| 139 |
+
from optax._src.transform import scale_by_rss
|
| 140 |
+
from optax._src.transform import scale_by_schedule
|
| 141 |
+
from optax._src.transform import scale_by_sm3
|
| 142 |
+
from optax._src.transform import scale_by_stddev
|
| 143 |
+
from optax._src.transform import scale_by_trust_ratio
|
| 144 |
+
from optax._src.transform import scale_by_yogi
|
| 145 |
+
from optax._src.transform import ScaleByAdamState
|
| 146 |
+
from optax._src.transform import ScaleByAmsgradState
|
| 147 |
+
from optax._src.transform import ScaleByBeliefState
|
| 148 |
+
from optax._src.transform import ScaleByFromageState
|
| 149 |
+
from optax._src.transform import ScaleByNovogradState
|
| 150 |
+
from optax._src.transform import ScaleByRmsState
|
| 151 |
+
from optax._src.transform import ScaleByRssState
|
| 152 |
+
from optax._src.transform import ScaleByRStdDevState
|
| 153 |
+
from optax._src.transform import ScaleByScheduleState
|
| 154 |
+
from optax._src.transform import ScaleBySM3State
|
| 155 |
+
from optax._src.transform import ScaleByTrustRatioState
|
| 156 |
+
from optax._src.transform import ScaleState
|
| 157 |
+
from optax._src.transform import trace
|
| 158 |
+
from optax._src.transform import TraceState
|
| 159 |
+
from optax._src.transform import update_infinity_moment
|
| 160 |
+
from optax._src.transform import update_moment
|
| 161 |
+
from optax._src.transform import update_moment_per_elem_norm
|
| 162 |
+
from optax._src.update import apply_updates
|
| 163 |
+
from optax._src.update import incremental_update
|
| 164 |
+
from optax._src.update import periodic_update
|
| 165 |
+
from optax._src.utils import multi_normal
|
| 166 |
+
from optax._src.utils import scale_gradient
|
| 167 |
+
from optax._src.wrappers import apply_if_finite
|
| 168 |
+
from optax._src.wrappers import ApplyIfFiniteState
|
| 169 |
+
from optax._src.wrappers import flatten
|
| 170 |
+
from optax._src.wrappers import masked
|
| 171 |
+
from optax._src.wrappers import MaskedNode
|
| 172 |
+
from optax._src.wrappers import MaskedState
|
| 173 |
+
from optax._src.wrappers import maybe_update
|
| 174 |
+
from optax._src.wrappers import MaybeUpdateState
|
| 175 |
+
from optax._src.wrappers import MultiSteps
|
| 176 |
+
from optax._src.wrappers import MultiStepsState
|
| 177 |
+
from optax._src.wrappers import ShouldSkipUpdateFunction
|
| 178 |
+
from optax._src.wrappers import skip_large_updates
|
| 179 |
+
from optax._src.wrappers import skip_not_finite
|
| 180 |
+
|
| 181 |
+
__version__ = "0.1.4"
|
| 182 |
+
|
| 183 |
+
__all__ = (
|
| 184 |
+
"adabelief",
|
| 185 |
+
"adafactor",
|
| 186 |
+
"adagrad",
|
| 187 |
+
"adam",
|
| 188 |
+
"adamax",
|
| 189 |
+
"adamaxw",
|
| 190 |
+
"adamw",
|
| 191 |
+
"adaptive_grad_clip",
|
| 192 |
+
"AdaptiveGradClipState",
|
| 193 |
+
"add_decayed_weights",
|
| 194 |
+
"add_noise",
|
| 195 |
+
"AddDecayedWeightsState",
|
| 196 |
+
"additive_weight_decay",
|
| 197 |
+
"AdditiveWeightDecayState",
|
| 198 |
+
"AddNoiseState",
|
| 199 |
+
"amsgrad",
|
| 200 |
+
"apply_every",
|
| 201 |
+
"apply_if_finite",
|
| 202 |
+
"apply_updates",
|
| 203 |
+
"ApplyEvery",
|
| 204 |
+
"ApplyIfFiniteState",
|
| 205 |
+
"centralize",
|
| 206 |
+
"chain",
|
| 207 |
+
"clip_by_block_rms",
|
| 208 |
+
"clip_by_global_norm",
|
| 209 |
+
"clip",
|
| 210 |
+
"ClipByGlobalNormState",
|
| 211 |
+
"ClipState",
|
| 212 |
+
"constant_schedule",
|
| 213 |
+
"ctc_loss",
|
| 214 |
+
"ctc_loss_with_forward_probs",
|
| 215 |
+
"control_delta_method",
|
| 216 |
+
"control_variates_jacobians",
|
| 217 |
+
"cosine_decay_schedule",
|
| 218 |
+
"cosine_distance",
|
| 219 |
+
"cosine_onecycle_schedule",
|
| 220 |
+
"cosine_similarity",
|
| 221 |
+
"differentially_private_aggregate",
|
| 222 |
+
"DifferentiallyPrivateAggregateState",
|
| 223 |
+
"dpsgd",
|
| 224 |
+
"ema",
|
| 225 |
+
"EmaState",
|
| 226 |
+
"EmptyState",
|
| 227 |
+
"exponential_decay",
|
| 228 |
+
"FactoredState",
|
| 229 |
+
"fisher_diag",
|
| 230 |
+
"flatten",
|
| 231 |
+
"fromage",
|
| 232 |
+
"global_norm",
|
| 233 |
+
"GradientTransformation",
|
| 234 |
+
"hinge_loss",
|
| 235 |
+
"hessian_diag",
|
| 236 |
+
"huber_loss",
|
| 237 |
+
"hvp",
|
| 238 |
+
"identity",
|
| 239 |
+
"incremental_update",
|
| 240 |
+
"inject_hyperparams",
|
| 241 |
+
"InjectHyperparamsState",
|
| 242 |
+
"join_schedules",
|
| 243 |
+
"keep_params_nonnegative",
|
| 244 |
+
"l2_loss",
|
| 245 |
+
"lamb",
|
| 246 |
+
"lars",
|
| 247 |
+
"linear_onecycle_schedule",
|
| 248 |
+
"linear_schedule",
|
| 249 |
+
"log_cosh",
|
| 250 |
+
"lookahead",
|
| 251 |
+
"LookaheadParams",
|
| 252 |
+
"LookaheadState",
|
| 253 |
+
"masked",
|
| 254 |
+
"MaskOrFn",
|
| 255 |
+
"MaskedState",
|
| 256 |
+
"matrix_inverse_pth_root",
|
| 257 |
+
"maybe_update",
|
| 258 |
+
"MaybeUpdateState",
|
| 259 |
+
"measure_valued_jacobians",
|
| 260 |
+
"moving_avg_baseline",
|
| 261 |
+
"multi_normal",
|
| 262 |
+
"multi_transform",
|
| 263 |
+
"MultiSteps",
|
| 264 |
+
"MultiStepsState",
|
| 265 |
+
"MultiTransformState",
|
| 266 |
+
"noisy_sgd",
|
| 267 |
+
"novograd",
|
| 268 |
+
"NonNegativeParamsState",
|
| 269 |
+
"OptState",
|
| 270 |
+
"Params",
|
| 271 |
+
"pathwise_jacobians",
|
| 272 |
+
"periodic_update",
|
| 273 |
+
"per_example_global_norm_clip",
|
| 274 |
+
"piecewise_constant_schedule",
|
| 275 |
+
"piecewise_interpolate_schedule",
|
| 276 |
+
"polynomial_schedule",
|
| 277 |
+
"power_iteration",
|
| 278 |
+
"radam",
|
| 279 |
+
"rmsprop",
|
| 280 |
+
"safe_int32_increment",
|
| 281 |
+
"safe_norm",
|
| 282 |
+
"safe_root_mean_squares",
|
| 283 |
+
"ScalarOrSchedule",
|
| 284 |
+
"scale_by_adam",
|
| 285 |
+
"scale_by_adamax",
|
| 286 |
+
"scale_by_amsgrad",
|
| 287 |
+
"scale_by_belief",
|
| 288 |
+
"scale_by_factored_rms",
|
| 289 |
+
"scale_by_novograd",
|
| 290 |
+
"scale_by_param_block_norm",
|
| 291 |
+
"scale_by_param_block_rms",
|
| 292 |
+
"scale_by_radam",
|
| 293 |
+
"scale_by_rms",
|
| 294 |
+
"scale_by_rss",
|
| 295 |
+
"scale_by_schedule",
|
| 296 |
+
"scale_by_sm3",
|
| 297 |
+
"scale_by_stddev",
|
| 298 |
+
"scale_by_trust_ratio",
|
| 299 |
+
"scale_by_yogi",
|
| 300 |
+
"scale_gradient",
|
| 301 |
+
"scale",
|
| 302 |
+
"ScaleByAdamState",
|
| 303 |
+
"ScaleByAmsgradState",
|
| 304 |
+
"ScaleByBeliefState",
|
| 305 |
+
"ScaleByFromageState",
|
| 306 |
+
"ScaleByNovogradState",
|
| 307 |
+
"ScaleByRmsState",
|
| 308 |
+
"ScaleByRssState",
|
| 309 |
+
"ScaleByRStdDevState",
|
| 310 |
+
"ScaleByScheduleState",
|
| 311 |
+
"ScaleBySM3State",
|
| 312 |
+
"ScaleByTrustRatioState",
|
| 313 |
+
"ScaleState",
|
| 314 |
+
"Schedule",
|
| 315 |
+
"score_function_jacobians",
|
| 316 |
+
"set_to_zero",
|
| 317 |
+
"sgd",
|
| 318 |
+
"sgdr_schedule",
|
| 319 |
+
"ShouldSkipUpdateFunction",
|
| 320 |
+
"sigmoid_binary_cross_entropy",
|
| 321 |
+
"skip_large_updates",
|
| 322 |
+
"skip_not_finite",
|
| 323 |
+
"sm3",
|
| 324 |
+
"smooth_labels",
|
| 325 |
+
"softmax_cross_entropy",
|
| 326 |
+
"stateless",
|
| 327 |
+
"stateless_with_tree_map",
|
| 328 |
+
"trace",
|
| 329 |
+
"TraceState",
|
| 330 |
+
"TransformInitFn",
|
| 331 |
+
"TransformUpdateFn",
|
| 332 |
+
"Updates",
|
| 333 |
+
"warmup_cosine_decay_schedule",
|
| 334 |
+
"warmup_exponential_decay_schedule",
|
| 335 |
+
"yogi",
|
| 336 |
+
"zero_nans",
|
| 337 |
+
"ZeroNansState",
|
| 338 |
+
)
|
| 339 |
+
|
| 340 |
+
# _________________________________________
|
| 341 |
+
# / Please don't use symbols in `_src` they \
|
| 342 |
+
# \ are not part of the Optax public API. /
|
| 343 |
+
# -----------------------------------------
|
| 344 |
+
# \ ^__^
|
| 345 |
+
# \ (oo)\_______
|
| 346 |
+
# (__)\ )\/\
|
| 347 |
+
# ||----w |
|
| 348 |
+
# || ||
|
| 349 |
+
#
|
lib/python3.10/site-packages/optax/_src/alias.py
ADDED
|
@@ -0,0 +1,883 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Aliases for popular optimizers."""
|
| 16 |
+
|
| 17 |
+
from typing import Any, Callable, Optional, Union
|
| 18 |
+
|
| 19 |
+
import jax.numpy as jnp
|
| 20 |
+
|
| 21 |
+
from optax._src import base
|
| 22 |
+
from optax._src import clipping
|
| 23 |
+
from optax._src import combine
|
| 24 |
+
from optax._src import factorized
|
| 25 |
+
from optax._src import privacy
|
| 26 |
+
from optax._src import transform
|
| 27 |
+
from optax._src import wrappers
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
ScalarOrSchedule = Union[float, base.Schedule]
|
| 31 |
+
MaskOrFn = Optional[Union[Any, Callable[[base.Params], Any]]]
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def _scale_by_learning_rate(learning_rate: ScalarOrSchedule, flip_sign=True):
|
| 35 |
+
m = -1 if flip_sign else 1
|
| 36 |
+
if callable(learning_rate):
|
| 37 |
+
return transform.scale_by_schedule(lambda count: m * learning_rate(count))
|
| 38 |
+
return transform.scale(m * learning_rate)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def adabelief(
|
| 42 |
+
learning_rate: ScalarOrSchedule,
|
| 43 |
+
b1: float = 0.9,
|
| 44 |
+
b2: float = 0.999,
|
| 45 |
+
eps: float = 1e-16,
|
| 46 |
+
eps_root: float = 1e-16) -> base.GradientTransformation:
|
| 47 |
+
"""The AdaBelief optimizer.
|
| 48 |
+
|
| 49 |
+
AdaBelief is an adaptive learning rate optimizer that focuses on fast
|
| 50 |
+
convergence, generalization, and stability. It adapts the step size depending
|
| 51 |
+
on its "belief" in the gradient direction — the optimizer adaptively scales
|
| 52 |
+
the step size by the difference between the predicted and observed gradients.
|
| 53 |
+
AdaBelief is a modified version of Adam and contains the same number of
|
| 54 |
+
parameters.
|
| 55 |
+
|
| 56 |
+
References:
|
| 57 |
+
Zhuang et al, 2020: https://arxiv.org/abs/2010.07468
|
| 58 |
+
|
| 59 |
+
Args:
|
| 60 |
+
learning_rate: A fixed global scaling factor.
|
| 61 |
+
b1: Exponential decay rate to track the first moment of past gradients.
|
| 62 |
+
b2: Exponential decay rate to track the second moment of past gradients.
|
| 63 |
+
eps: Term added to the denominator to improve numerical stability.
|
| 64 |
+
eps_root: Term added to the second moment of the prediction error to
|
| 65 |
+
improve numerical stability. If backpropagating gradients through the
|
| 66 |
+
gradient transformation (e.g. for meta-learning), this must be non-zero.
|
| 67 |
+
|
| 68 |
+
Returns:
|
| 69 |
+
The corresponding `GradientTransformation`.
|
| 70 |
+
"""
|
| 71 |
+
return combine.chain(
|
| 72 |
+
transform.scale_by_belief(b1=b1, b2=b2, eps=eps, eps_root=eps_root),
|
| 73 |
+
_scale_by_learning_rate(learning_rate),
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def adafactor(
|
| 78 |
+
learning_rate: Optional[ScalarOrSchedule] = None,
|
| 79 |
+
min_dim_size_to_factor: int = 128,
|
| 80 |
+
decay_rate: float = 0.8,
|
| 81 |
+
decay_offset: int = 0,
|
| 82 |
+
multiply_by_parameter_scale: float = True,
|
| 83 |
+
clipping_threshold: Optional[float] = 1.0,
|
| 84 |
+
momentum: Optional[float] = None,
|
| 85 |
+
dtype_momentum: Any = jnp.float32,
|
| 86 |
+
weight_decay_rate: Optional[float] = None,
|
| 87 |
+
eps: float = 1e-30,
|
| 88 |
+
factored: bool = True,
|
| 89 |
+
weight_decay_mask: MaskOrFn = None,
|
| 90 |
+
) -> base.GradientTransformation:
|
| 91 |
+
"""The Adafactor optimizer.
|
| 92 |
+
|
| 93 |
+
Adafactor is an adaptive learning rate optimizer that focuses on fast
|
| 94 |
+
training of large scale neural networks. It saves memory by using a factored
|
| 95 |
+
estimate of the second order moments used to scale gradients.
|
| 96 |
+
|
| 97 |
+
References:
|
| 98 |
+
Shazeer and Stern, 2018: https://arxiv.org/abs/1804.04235
|
| 99 |
+
|
| 100 |
+
Args:
|
| 101 |
+
learning_rate: A fixed global scaling factor. Note: the natural scale for
|
| 102 |
+
Adafactor's LR is markedly different from Adam, one doesn't use the
|
| 103 |
+
1/sqrt(hidden) correction for this optim with attention-based models.
|
| 104 |
+
min_dim_size_to_factor: Only factor the statistics if two array dimensions
|
| 105 |
+
have at least this size.
|
| 106 |
+
decay_rate: Controls second-moment exponential decay schedule.
|
| 107 |
+
decay_offset: For fine-tuning, one may set this to the starting step
|
| 108 |
+
number of the fine-tuning phase.
|
| 109 |
+
multiply_by_parameter_scale: If True, then scale learning_rate by
|
| 110 |
+
parameter norm. If False, provided learning_rate is absolute step size.
|
| 111 |
+
clipping_threshold: Optional clipping threshold. Must be >= 1. If None,
|
| 112 |
+
clipping is disabled.
|
| 113 |
+
momentum: Optional value between 0 and 1, enables momentum and uses extra
|
| 114 |
+
memory if non-None! None by default.
|
| 115 |
+
dtype_momentum: Data type of momentum buffers.
|
| 116 |
+
weight_decay_rate: Optional rate at which to decay weights.
|
| 117 |
+
eps: Regularization constant for root mean squared gradient.
|
| 118 |
+
factored: Whether to use factored second-moment estimates.
|
| 119 |
+
weight_decay_mask: A tree with same structure as (or a prefix of)
|
| 120 |
+
the params PyTree, or a Callable that returns such a pytree given
|
| 121 |
+
the params/updates. The leaves should be booleans, `True`
|
| 122 |
+
for leaves/subtrees you want to apply the transformation to,
|
| 123 |
+
and `False` for those you want to skip.
|
| 124 |
+
|
| 125 |
+
Returns:
|
| 126 |
+
The corresponding `GradientTransformation`.
|
| 127 |
+
"""
|
| 128 |
+
# The core of the algorithm is a procedure for rescaling gradients
|
| 129 |
+
# by a factored estimate of the root mean squared gradients.
|
| 130 |
+
# This reduces memory compared to algorithms such as Adam or RmsProp,
|
| 131 |
+
# by not having to hold a separate estimate for each weight.
|
| 132 |
+
tx = [
|
| 133 |
+
factorized.scale_by_factored_rms(
|
| 134 |
+
factored, decay_rate, decay_offset, min_dim_size_to_factor, eps)]
|
| 135 |
+
# This basic rescaling is typically combined with one or more of the following
|
| 136 |
+
# transformation (all can be disabled via adafactor's constructor args).
|
| 137 |
+
if clipping_threshold is not None:
|
| 138 |
+
tx.append(clipping.clip_by_block_rms(clipping_threshold))
|
| 139 |
+
if learning_rate is not None:
|
| 140 |
+
tx.append(_scale_by_learning_rate(learning_rate, flip_sign=False))
|
| 141 |
+
if multiply_by_parameter_scale:
|
| 142 |
+
tx.append(transform.scale_by_param_block_rms())
|
| 143 |
+
if momentum is not None:
|
| 144 |
+
tx.append(
|
| 145 |
+
transform.ema(momentum, debias=False, accumulator_dtype=dtype_momentum))
|
| 146 |
+
if weight_decay_rate is not None:
|
| 147 |
+
tx.append(transform.add_decayed_weights(
|
| 148 |
+
weight_decay_rate, mask=weight_decay_mask))
|
| 149 |
+
# In gradient "descent" we follow the negative gradient.
|
| 150 |
+
tx.append(transform.scale(-1))
|
| 151 |
+
return combine.chain(*tx)
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
def adagrad(
|
| 155 |
+
learning_rate: ScalarOrSchedule,
|
| 156 |
+
initial_accumulator_value: float = 0.1,
|
| 157 |
+
eps: float = 1e-7
|
| 158 |
+
) -> base.GradientTransformation:
|
| 159 |
+
"""The Adagrad optimizer.
|
| 160 |
+
|
| 161 |
+
Adagrad is an algorithm for gradient based optimization that anneals the
|
| 162 |
+
learning rate for each parameter during the course of training.
|
| 163 |
+
|
| 164 |
+
WARNING: Adagrad's main limit is the monotonic accumulation of squared
|
| 165 |
+
gradients in the denominator: since all terms are >0, the sum keeps growing
|
| 166 |
+
during training and the learning rate eventually becomes vanishingly small.
|
| 167 |
+
|
| 168 |
+
References:
|
| 169 |
+
Duchi et al, 2011: https://jmlr.org/papers/v12/duchi11a.html
|
| 170 |
+
|
| 171 |
+
Args:
|
| 172 |
+
learning_rate: A fixed global scaling factor.
|
| 173 |
+
initial_accumulator_value: Initial value for the accumulator.
|
| 174 |
+
eps: A small constant applied to denominator inside of the square root
|
| 175 |
+
(as in RMSProp) to avoid dividing by zero when rescaling.
|
| 176 |
+
|
| 177 |
+
Returns:
|
| 178 |
+
The corresponding `GradientTransformation`.
|
| 179 |
+
"""
|
| 180 |
+
return combine.chain(
|
| 181 |
+
transform.scale_by_rss(
|
| 182 |
+
initial_accumulator_value=initial_accumulator_value, eps=eps),
|
| 183 |
+
_scale_by_learning_rate(learning_rate),
|
| 184 |
+
)
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
def adam(
|
| 188 |
+
learning_rate: ScalarOrSchedule,
|
| 189 |
+
b1: float = 0.9,
|
| 190 |
+
b2: float = 0.999,
|
| 191 |
+
eps: float = 1e-8,
|
| 192 |
+
eps_root: float = 0.0,
|
| 193 |
+
mu_dtype: Optional[Any] = None,
|
| 194 |
+
) -> base.GradientTransformation:
|
| 195 |
+
r"""The classic Adam optimizer.
|
| 196 |
+
|
| 197 |
+
Adam is an SGD variant with gradient scaling adaptation. The scaling
|
| 198 |
+
used for each parameter is computed from estimates of first and second-order
|
| 199 |
+
moments of the gradients (using suitable exponential moving averages).
|
| 200 |
+
|
| 201 |
+
Let :math:`\alpha_t` represent the learning rate and :math:`\beta_1, \beta_2`,
|
| 202 |
+
:math:`\varepsilon`, :math:`\bar{\varepsilon}` represent the arguments
|
| 203 |
+
``b1``, ``b2``, ``eps`` and ``eps_root`` respectievly. The learning rate is
|
| 204 |
+
indexed by :math:`t` since the learning rate may also be provided by a
|
| 205 |
+
schedule function.
|
| 206 |
+
|
| 207 |
+
The ``init`` function of this optimizer initializes an internal state
|
| 208 |
+
:math:`S_0 := (m_0, v_0) = (0, 0)`, representing initial estimates for the
|
| 209 |
+
first and second moments. In practice these values are stored as pytrees
|
| 210 |
+
containing all zeros, with the same shape as the model updates.
|
| 211 |
+
At step :math:`t`, the ``update`` function of this optimizer takes as
|
| 212 |
+
arguments the incoming gradients :math:`g_t` and optimizer state :math:`S_t`
|
| 213 |
+
and computes updates :math:`u_t` and new state :math:`S_{t+1}`. Thus, for
|
| 214 |
+
:math:`t > 0`, we have,
|
| 215 |
+
|
| 216 |
+
.. math::
|
| 217 |
+
\begin{align*}
|
| 218 |
+
m_t &\leftarrow \beta_1 \cdot m_{t-1} + (1-\beta_1) \cdot g_t \\
|
| 219 |
+
v_t &\leftarrow \beta_2 \cdot v_{t-1} + (1-\beta_2) \cdot {g_t}^2 \\
|
| 220 |
+
\hat{m}_t &\leftarrow m_t / {(1-\beta_1^t)} \\
|
| 221 |
+
\hat{v}_t &\leftarrow v_t / {(1-\beta_2^t)} \\
|
| 222 |
+
u_t &\leftarrow \alpha_t \cdot \hat{m}_t / \left({\sqrt{\hat{v}_t +
|
| 223 |
+
\bar{\varepsilon}} + \varepsilon} \right)\\
|
| 224 |
+
S_t &\leftarrow (m_t, v_t).
|
| 225 |
+
\end{align*}
|
| 226 |
+
|
| 227 |
+
References:
|
| 228 |
+
Kingma et al, 2014: https://arxiv.org/abs/1412.6980
|
| 229 |
+
|
| 230 |
+
Args:
|
| 231 |
+
learning_rate: A fixed global scaling factor.
|
| 232 |
+
b1: Exponential decay rate to track the first moment of past gradients.
|
| 233 |
+
b2: Exponential decay rate to track the second moment of past gradients.
|
| 234 |
+
eps: A small constant applied to denominator outside of the square root
|
| 235 |
+
(as in the Adam paper) to avoid dividing by zero when rescaling.
|
| 236 |
+
eps_root: A small constant applied to denominator inside the square root (as
|
| 237 |
+
in RMSProp), to avoid dividing by zero when rescaling. This is needed for
|
| 238 |
+
example when computing (meta-)gradients through Adam.
|
| 239 |
+
mu_dtype: Optional `dtype` to be used for the first order accumulator; if
|
| 240 |
+
`None` then the `dtype` is inferred from `params` and `updates`.
|
| 241 |
+
|
| 242 |
+
Returns:
|
| 243 |
+
The corresponding `GradientTransformation`.
|
| 244 |
+
"""
|
| 245 |
+
return combine.chain(
|
| 246 |
+
transform.scale_by_adam(
|
| 247 |
+
b1=b1, b2=b2, eps=eps, eps_root=eps_root, mu_dtype=mu_dtype),
|
| 248 |
+
_scale_by_learning_rate(learning_rate),
|
| 249 |
+
)
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
def adamw(
|
| 253 |
+
learning_rate: ScalarOrSchedule,
|
| 254 |
+
b1: float = 0.9,
|
| 255 |
+
b2: float = 0.999,
|
| 256 |
+
eps: float = 1e-8,
|
| 257 |
+
eps_root: float = 0.0,
|
| 258 |
+
mu_dtype: Optional[Any] = None,
|
| 259 |
+
weight_decay: float = 1e-4,
|
| 260 |
+
mask: Optional[Union[Any, Callable[[base.Params], Any]]] = None,
|
| 261 |
+
) -> base.GradientTransformation:
|
| 262 |
+
"""Adam with weight decay regularization.
|
| 263 |
+
|
| 264 |
+
AdamW uses weight decay to regularize learning towards small weights, as
|
| 265 |
+
this leads to better generalization. In SGD you can also use L2 regularization
|
| 266 |
+
to implement this as an additive loss term, however L2 regularization
|
| 267 |
+
does not behave as intended for adaptive gradient algorithms such as Adam.
|
| 268 |
+
|
| 269 |
+
References:
|
| 270 |
+
Loshchilov et al, 2019: https://arxiv.org/abs/1711.05101
|
| 271 |
+
|
| 272 |
+
Args:
|
| 273 |
+
learning_rate: A fixed global scaling factor.
|
| 274 |
+
b1: Exponential decay rate to track the first moment of past gradients.
|
| 275 |
+
b2: Exponential decay rate to track the second moment of past gradients.
|
| 276 |
+
eps: A small constant applied to denominator outside of the square root
|
| 277 |
+
(as in the Adam paper) to avoid dividing by zero when rescaling.
|
| 278 |
+
eps_root: A small constant applied to denominator inside the square root (as
|
| 279 |
+
in RMSProp), to avoid dividing by zero when rescaling. This is needed for
|
| 280 |
+
instance when computing (meta-)gradients through Adam.
|
| 281 |
+
mu_dtype: Optional `dtype` to be used for the first order accumulator; if
|
| 282 |
+
`None` then the `dtype` is inferred from `params` and `updates`.
|
| 283 |
+
weight_decay: Strength of the weight decay regularization. Note that this
|
| 284 |
+
weight decay is multiplied with the learning rate. This is consistent
|
| 285 |
+
with other frameworks such as PyTorch, but different from
|
| 286 |
+
(Loshchilov et al, 2019) where the weight decay is only multiplied with
|
| 287 |
+
the "schedule multiplier", but not the base learning rate.
|
| 288 |
+
mask: A tree with same structure as (or a prefix of) the params PyTree,
|
| 289 |
+
or a Callable that returns such a pytree given the params/updates.
|
| 290 |
+
The leaves should be booleans, `True` for leaves/subtrees you want to
|
| 291 |
+
apply the weight decay to, and `False` for those you want to skip. Note
|
| 292 |
+
that the Adam gradient transformations are applied to all parameters.
|
| 293 |
+
|
| 294 |
+
Returns:
|
| 295 |
+
The corresponding `GradientTransformation`.
|
| 296 |
+
"""
|
| 297 |
+
return combine.chain(
|
| 298 |
+
transform.scale_by_adam(
|
| 299 |
+
b1=b1, b2=b2, eps=eps, eps_root=eps_root, mu_dtype=mu_dtype),
|
| 300 |
+
transform.add_decayed_weights(weight_decay, mask),
|
| 301 |
+
_scale_by_learning_rate(learning_rate),
|
| 302 |
+
)
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
def amsgrad(
|
| 306 |
+
learning_rate: ScalarOrSchedule,
|
| 307 |
+
b1: float = 0.9,
|
| 308 |
+
b2: float = 0.999,
|
| 309 |
+
eps: float = 1e-8,
|
| 310 |
+
eps_root: float = 0.0,
|
| 311 |
+
mu_dtype: Optional[Any] = None,
|
| 312 |
+
) -> base.GradientTransformation:
|
| 313 |
+
"""The AMSGrad optimiser.
|
| 314 |
+
|
| 315 |
+
The original Adam can fail to converge to the optimal solution in some cases.
|
| 316 |
+
AMSGrad guarantees convergence by using a long-term memory of past gradients.
|
| 317 |
+
|
| 318 |
+
References:
|
| 319 |
+
Reddi et al, 2018: https://openreview.net/forum?id=ryQu7f-RZ
|
| 320 |
+
|
| 321 |
+
Args:
|
| 322 |
+
learning_rate: A fixed global scaling factor.
|
| 323 |
+
b1: Exponential decay rate to track the first moment of past gradients.
|
| 324 |
+
b2: Exponential decay rate to track the second moment of past gradients.
|
| 325 |
+
eps: A small constant applied to denominator outside of the square root
|
| 326 |
+
(as in the Adam paper) to avoid dividing by zero when rescaling.
|
| 327 |
+
eps_root: A small constant applied to denominator inside the square root (as
|
| 328 |
+
in RMSProp), to avoid dividing by zero when rescaling. This is needed for
|
| 329 |
+
instance when computing (meta-)gradients through Adam.
|
| 330 |
+
mu_dtype: Optional `dtype` to be used for the first order accumulator; if
|
| 331 |
+
`None` then the `dtype` is inferred from `params` and `updates`.
|
| 332 |
+
|
| 333 |
+
Returns:
|
| 334 |
+
The corresponding `GradientTransformation`.
|
| 335 |
+
"""
|
| 336 |
+
return combine.chain(
|
| 337 |
+
transform.scale_by_amsgrad(
|
| 338 |
+
b1=b1, b2=b2, eps=eps, eps_root=eps_root, mu_dtype=mu_dtype),
|
| 339 |
+
_scale_by_learning_rate(learning_rate),
|
| 340 |
+
)
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
def fromage(
|
| 344 |
+
learning_rate: float,
|
| 345 |
+
min_norm: float = 1e-6
|
| 346 |
+
) -> base.GradientTransformation:
|
| 347 |
+
"""The Frobenius matched gradient descent (Fromage) optimizer.
|
| 348 |
+
|
| 349 |
+
Fromage is a learning algorithm that does not require learning rate tuning.
|
| 350 |
+
The optimizer is based on modeling neural network gradients via deep relative
|
| 351 |
+
trust (a distance function on deep neural networks). Fromage is similar to the
|
| 352 |
+
LARS optimizer and can work on a range of standard neural network benchmarks,
|
| 353 |
+
such as natural language Transformers and generative adversarial networks.
|
| 354 |
+
|
| 355 |
+
References:
|
| 356 |
+
Bernstein et al, 2020: https://arxiv.org/abs/2002.03432
|
| 357 |
+
|
| 358 |
+
Args:
|
| 359 |
+
learning_rate: A fixed global scaling factor.
|
| 360 |
+
min_norm: A minimum value that the norm of the gradient updates and the norm
|
| 361 |
+
of the layer parameters can be clipped to to avoid dividing by zero when
|
| 362 |
+
computing the trust ratio (as in the LARS paper).
|
| 363 |
+
|
| 364 |
+
Returns:
|
| 365 |
+
The corresponding `GradientTransformation`.
|
| 366 |
+
"""
|
| 367 |
+
mult = 1 / jnp.sqrt(1 + learning_rate ** 2)
|
| 368 |
+
return combine.chain(
|
| 369 |
+
transform.scale_by_trust_ratio(min_norm),
|
| 370 |
+
_scale_by_learning_rate(learning_rate * mult),
|
| 371 |
+
transform.add_decayed_weights((mult - 1)),
|
| 372 |
+
)
|
| 373 |
+
|
| 374 |
+
|
| 375 |
+
def lars(
|
| 376 |
+
learning_rate: ScalarOrSchedule,
|
| 377 |
+
weight_decay: float = 0.,
|
| 378 |
+
weight_decay_mask: MaskOrFn = True,
|
| 379 |
+
trust_coefficient: float = 0.001,
|
| 380 |
+
eps: float = 0.,
|
| 381 |
+
trust_ratio_mask: MaskOrFn = True,
|
| 382 |
+
momentum: float = 0.9,
|
| 383 |
+
nesterov: bool = False,
|
| 384 |
+
) -> base.GradientTransformation:
|
| 385 |
+
"""The LARS optimizer.
|
| 386 |
+
|
| 387 |
+
LARS is a layer-wise adaptive optimizer introduced to help scale SGD to
|
| 388 |
+
larger batch sizes. LARS later inspired the LAMB optimizer.
|
| 389 |
+
|
| 390 |
+
References:
|
| 391 |
+
You et al, 2017: https://arxiv.org/abs/1708.03888
|
| 392 |
+
|
| 393 |
+
Args:
|
| 394 |
+
learning_rate: A fixed global scaling factor.
|
| 395 |
+
weight_decay: Strength of the weight decay regularization.
|
| 396 |
+
weight_decay_mask: A tree with same structure as (or a prefix of) the params
|
| 397 |
+
PyTree, or a Callable that returns such a pytree given the params/updates.
|
| 398 |
+
The leaves should be booleans, `True` for leaves/subtrees you want to
|
| 399 |
+
apply the transformation to, and `False` for those you want to skip.
|
| 400 |
+
trust_coefficient: A multiplier for the trust ratio.
|
| 401 |
+
eps: Optional additive constant in the trust ratio denominator.
|
| 402 |
+
trust_ratio_mask: A tree with same structure as (or a prefix of) the params
|
| 403 |
+
PyTree, or a Callable that returns such a pytree given the params/updates.
|
| 404 |
+
The leaves should be booleans, `True` for leaves/subtrees you want to
|
| 405 |
+
apply the transformation to, and `False` for those you want to skip.
|
| 406 |
+
momentum: Decay rate for momentum.
|
| 407 |
+
nesterov: Whether to use Nesterov momentum.
|
| 408 |
+
|
| 409 |
+
Returns:
|
| 410 |
+
The corresponding `GradientTransformation`.
|
| 411 |
+
"""
|
| 412 |
+
return combine.chain(
|
| 413 |
+
transform.add_decayed_weights(weight_decay, mask=weight_decay_mask),
|
| 414 |
+
wrappers.masked(
|
| 415 |
+
inner=transform.scale_by_trust_ratio(
|
| 416 |
+
trust_coefficient=trust_coefficient, eps=eps),
|
| 417 |
+
mask=trust_ratio_mask),
|
| 418 |
+
_scale_by_learning_rate(learning_rate),
|
| 419 |
+
transform.trace(decay=momentum, nesterov=nesterov),
|
| 420 |
+
)
|
| 421 |
+
|
| 422 |
+
|
| 423 |
+
def lamb(
|
| 424 |
+
learning_rate: ScalarOrSchedule,
|
| 425 |
+
b1: float = 0.9,
|
| 426 |
+
b2: float = 0.999,
|
| 427 |
+
eps: float = 1e-6,
|
| 428 |
+
eps_root: float = 0.0,
|
| 429 |
+
weight_decay: float = 0.,
|
| 430 |
+
mask: MaskOrFn = None,
|
| 431 |
+
) -> base.GradientTransformation:
|
| 432 |
+
"""The LAMB optimizer.
|
| 433 |
+
|
| 434 |
+
LAMB is a general purpose layer-wise adaptive large batch optimizer designed
|
| 435 |
+
to provide consistent training performance across a wide range of tasks,
|
| 436 |
+
including those that use attention-based models (such as Transformers) and
|
| 437 |
+
ResNet-50. The optimizer is able to work with small and large batch sizes.
|
| 438 |
+
LAMB was inspired by the LARS learning algorithm.
|
| 439 |
+
|
| 440 |
+
References:
|
| 441 |
+
You et al, 2019: https://arxiv.org/abs/1904.00962
|
| 442 |
+
|
| 443 |
+
Args:
|
| 444 |
+
learning_rate: A fixed global scaling factor.
|
| 445 |
+
b1: Exponential decay rate to track the first moment of past gradients.
|
| 446 |
+
b2: Exponential decay rate to track the second moment of past gradients.
|
| 447 |
+
eps: A small constant applied to denominator outside of the square root
|
| 448 |
+
(as in the Adam paper) to avoid dividing by zero when rescaling.
|
| 449 |
+
eps_root: A small constant applied to denominator inside the square root (as
|
| 450 |
+
in RMSProp), to avoid dividing by zero when rescaling. This is needed for
|
| 451 |
+
instance when computing (meta-)gradients through Adam.
|
| 452 |
+
weight_decay: Strength of the weight decay regularization.
|
| 453 |
+
mask: A tree with same structure as (or a prefix of) the params PyTree,
|
| 454 |
+
or a Callable that returns such a pytree given the params/updates.
|
| 455 |
+
The leaves should be booleans, `True` for leaves/subtrees you want to
|
| 456 |
+
apply the transformation to, and `False` for those you want to skip.
|
| 457 |
+
|
| 458 |
+
Returns:
|
| 459 |
+
The corresponding `GradientTransformation`.
|
| 460 |
+
"""
|
| 461 |
+
return combine.chain(
|
| 462 |
+
transform.scale_by_adam(b1=b1, b2=b2, eps=eps, eps_root=eps_root),
|
| 463 |
+
transform.add_decayed_weights(weight_decay=weight_decay, mask=mask),
|
| 464 |
+
transform.scale_by_trust_ratio(),
|
| 465 |
+
_scale_by_learning_rate(learning_rate),
|
| 466 |
+
)
|
| 467 |
+
|
| 468 |
+
|
| 469 |
+
def noisy_sgd(
|
| 470 |
+
learning_rate: ScalarOrSchedule,
|
| 471 |
+
eta: float = 0.01,
|
| 472 |
+
gamma: float = 0.55,
|
| 473 |
+
seed: int = 0
|
| 474 |
+
) -> base.GradientTransformation:
|
| 475 |
+
r"""A variant of SGD with added noise.
|
| 476 |
+
|
| 477 |
+
It has been found that adding noise to the gradients can improve
|
| 478 |
+
both the training error and the generalization error in very deep networks.
|
| 479 |
+
|
| 480 |
+
References:
|
| 481 |
+
Neelakantan et al, 2014: https://arxiv.org/abs/1511.06807
|
| 482 |
+
|
| 483 |
+
Args:
|
| 484 |
+
learning_rate: A fixed global scaling factor.
|
| 485 |
+
eta: Initial variance for the Gaussian noise added to gradients.
|
| 486 |
+
gamma: A parameter controlling the annealing of noise over time, the
|
| 487 |
+
variance decays according to `(1+t)^-\gamma`.
|
| 488 |
+
seed: Seed for the pseudo-random generation process.
|
| 489 |
+
|
| 490 |
+
Returns:
|
| 491 |
+
The corresponding `GradientTransformation`.
|
| 492 |
+
"""
|
| 493 |
+
return combine.chain(
|
| 494 |
+
transform.add_noise(eta, gamma, seed),
|
| 495 |
+
_scale_by_learning_rate(learning_rate),
|
| 496 |
+
)
|
| 497 |
+
|
| 498 |
+
|
| 499 |
+
def novograd(
|
| 500 |
+
learning_rate: ScalarOrSchedule,
|
| 501 |
+
b1: float = 0.9,
|
| 502 |
+
b2: float = 0.25,
|
| 503 |
+
eps: float = 1e-6,
|
| 504 |
+
eps_root: float = 0.0,
|
| 505 |
+
weight_decay: float = 0.,
|
| 506 |
+
) -> base.GradientTransformation:
|
| 507 |
+
"""NovoGrad optimizer.
|
| 508 |
+
|
| 509 |
+
NovoGrad is more robust to the initial learning rate and
|
| 510 |
+
weight initialization than other methods. For example,
|
| 511 |
+
NovoGrad works well without LR warm-up, while other methods require it.
|
| 512 |
+
NovoGrad performs exceptionally well for large batch training, e.g. it
|
| 513 |
+
outperforms other methods for ResNet-50 for all batches up to 32K.
|
| 514 |
+
In addition, NovoGrad requires half the memory compared to Adam.
|
| 515 |
+
It was introduced together with Jasper ASR model.
|
| 516 |
+
|
| 517 |
+
References:
|
| 518 |
+
Ginsburg et al, 2019: https://arxiv.org/abs/1905.11286
|
| 519 |
+
Li et al, 2019: https://arxiv.org/abs/1904.03288
|
| 520 |
+
|
| 521 |
+
Args:
|
| 522 |
+
learning_rate: A fixed global scaling factor.
|
| 523 |
+
b1: An exponential decay rate to track the first moment of past gradients.
|
| 524 |
+
b2: An exponential decay rate to track the second moment of past gradients.
|
| 525 |
+
eps: A small constant applied to denominator outside of the square root (as
|
| 526 |
+
in the Adam paper) to avoid dividing by zero when rescaling.
|
| 527 |
+
eps_root: A small constant applied to denominator inside
|
| 528 |
+
the square root (as in RMSProp), to avoid dividing by zero when rescaling.
|
| 529 |
+
This is needed for instance when computing (meta-)gradients through Adam.
|
| 530 |
+
weight_decay: Strength of the weight decay regularization.
|
| 531 |
+
|
| 532 |
+
Returns:
|
| 533 |
+
The corresponding `GradientTransformation`.
|
| 534 |
+
"""
|
| 535 |
+
return combine.chain(
|
| 536 |
+
transform.scale_by_novograd(
|
| 537 |
+
b1=b1, b2=b2, eps=eps, eps_root=eps_root, weight_decay=weight_decay),
|
| 538 |
+
_scale_by_learning_rate(learning_rate),
|
| 539 |
+
)
|
| 540 |
+
|
| 541 |
+
|
| 542 |
+
def optimistic_gradient_descent(
|
| 543 |
+
learning_rate: ScalarOrSchedule,
|
| 544 |
+
alpha: ScalarOrSchedule = 1.0,
|
| 545 |
+
beta: ScalarOrSchedule = 1.0
|
| 546 |
+
) -> base.GradientTransformation:
|
| 547 |
+
"""An Optimistic Gradient Descent optimizer.
|
| 548 |
+
|
| 549 |
+
Optimistic gradient descent is an approximation of extra-gradient methods
|
| 550 |
+
which require multiple gradient calls to compute the next update. It has
|
| 551 |
+
strong formal guarantees for last-iterate convergence in min-max games, for
|
| 552 |
+
which standard gradient descent can oscillate or even diverge.
|
| 553 |
+
|
| 554 |
+
References:
|
| 555 |
+
[Mokhtari et al, 2019](https://arxiv.org/abs/1901.08511v2)
|
| 556 |
+
|
| 557 |
+
Args:
|
| 558 |
+
learning_rate: A fixed global scaling factor.
|
| 559 |
+
alpha: Coefficient for generalized OGD.
|
| 560 |
+
beta: Coefficient for generalized OGD negative momentum.
|
| 561 |
+
|
| 562 |
+
Returns:
|
| 563 |
+
A `GradientTransformation`.
|
| 564 |
+
"""
|
| 565 |
+
return combine.chain(
|
| 566 |
+
transform.scale_by_optimistic_gradient(alpha=alpha, beta=beta),
|
| 567 |
+
_scale_by_learning_rate(learning_rate)
|
| 568 |
+
)
|
| 569 |
+
|
| 570 |
+
|
| 571 |
+
def radam(
|
| 572 |
+
learning_rate: ScalarOrSchedule,
|
| 573 |
+
b1: float = 0.9,
|
| 574 |
+
b2: float = 0.999,
|
| 575 |
+
eps: float = 1e-8,
|
| 576 |
+
eps_root: float = 0.0,
|
| 577 |
+
threshold: float = 5.0
|
| 578 |
+
) -> base.GradientTransformation:
|
| 579 |
+
"""The Rectified Adam optimizer.
|
| 580 |
+
|
| 581 |
+
The adaptive learning rate in Adam has undesirably large variance in early
|
| 582 |
+
stages of training, due to the limited number of training samples used to
|
| 583 |
+
estimate the optimizer's statistics. Rectified Adam addresses this issue
|
| 584 |
+
by analytically reducing the large variance.
|
| 585 |
+
|
| 586 |
+
References:
|
| 587 |
+
Kingma et al, 2014: https://arxiv.org/abs/1412.6980
|
| 588 |
+
|
| 589 |
+
Args:
|
| 590 |
+
learning_rate: A fixed global scaling factor.
|
| 591 |
+
b1: Exponential decay rate to track the first moment of past gradients.
|
| 592 |
+
b2: Exponential decay rate to track the second moment of past gradients.
|
| 593 |
+
eps: A small constant applied to denominator outside of the square root
|
| 594 |
+
(as in the Adam paper) to avoid dividing by zero when rescaling.
|
| 595 |
+
eps_root: A small constant applied to denominator inside the square root (as
|
| 596 |
+
in RMSProp), to avoid dividing by zero when rescaling. This is needed for
|
| 597 |
+
instance when computing (meta-)gradients through Adam.
|
| 598 |
+
threshold: Threshold for variance tractability.
|
| 599 |
+
|
| 600 |
+
Returns:
|
| 601 |
+
The corresponding `GradientTransformation`.
|
| 602 |
+
"""
|
| 603 |
+
return combine.chain(
|
| 604 |
+
transform.scale_by_radam(
|
| 605 |
+
b1=b1, b2=b2, eps=eps, eps_root=eps_root, threshold=threshold),
|
| 606 |
+
_scale_by_learning_rate(learning_rate),
|
| 607 |
+
)
|
| 608 |
+
|
| 609 |
+
|
| 610 |
+
def rmsprop(
|
| 611 |
+
learning_rate: ScalarOrSchedule,
|
| 612 |
+
decay: float = 0.9,
|
| 613 |
+
eps: float = 1e-8,
|
| 614 |
+
initial_scale: float = 0.,
|
| 615 |
+
centered: bool = False,
|
| 616 |
+
momentum: Optional[float] = None,
|
| 617 |
+
nesterov: bool = False
|
| 618 |
+
) -> base.GradientTransformation:
|
| 619 |
+
# pylint: disable=line-too-long
|
| 620 |
+
"""A flexible RMSProp optimizer.
|
| 621 |
+
|
| 622 |
+
RMSProp is an SGD variant with learning rate adaptation. The `learning_rate`
|
| 623 |
+
used for each weight is scaled by a suitable estimate of the magnitude of the
|
| 624 |
+
gradients on previous steps. Several variants of RMSProp can be found
|
| 625 |
+
in the literature. This alias provides an easy to configure RMSProp
|
| 626 |
+
optimizer that can be used to switch between several of these variants.
|
| 627 |
+
|
| 628 |
+
References:
|
| 629 |
+
Tieleman and Hinton, 2012: http://www.cs.toronto.edu/~hinton/coursera/lecture6/lec6.pdf
|
| 630 |
+
Graves, 2013: https://arxiv.org/abs/1308.0850
|
| 631 |
+
|
| 632 |
+
Args:
|
| 633 |
+
learning_rate: A fixed global scaling factor.
|
| 634 |
+
decay: Decay used to track the magnitude of previous gradients.
|
| 635 |
+
eps: A small numerical constant to avoid dividing by zero when rescaling.
|
| 636 |
+
initial_scale: Initial value of accumulators tracking the magnitude of
|
| 637 |
+
previous updates. PyTorch uses `0`, TF1 uses `1`. When reproducing results
|
| 638 |
+
from a paper, verify the value used by the authors.
|
| 639 |
+
centered: Whether the second moment or the variance of the past gradients is
|
| 640 |
+
used to rescale the latest gradients.
|
| 641 |
+
momentum: Decay rate used by the momentum term, when it is set to `None`,
|
| 642 |
+
then momentum is not used at all.
|
| 643 |
+
nesterov: Whether Nesterov momentum is used.
|
| 644 |
+
|
| 645 |
+
Returns:
|
| 646 |
+
The corresponding `GradientTransformation`.
|
| 647 |
+
"""
|
| 648 |
+
# pylint: enable=line-too-long
|
| 649 |
+
if centered:
|
| 650 |
+
return combine.chain(
|
| 651 |
+
transform.scale_by_stddev(
|
| 652 |
+
decay=decay, eps=eps, initial_scale=initial_scale),
|
| 653 |
+
_scale_by_learning_rate(learning_rate),
|
| 654 |
+
(transform.trace(decay=momentum, nesterov=nesterov)
|
| 655 |
+
if momentum is not None else base.identity())
|
| 656 |
+
)
|
| 657 |
+
return combine.chain(
|
| 658 |
+
transform.scale_by_rms(
|
| 659 |
+
decay=decay, eps=eps, initial_scale=initial_scale),
|
| 660 |
+
_scale_by_learning_rate(learning_rate),
|
| 661 |
+
(transform.trace(decay=momentum, nesterov=nesterov)
|
| 662 |
+
if momentum is not None else base.identity())
|
| 663 |
+
)
|
| 664 |
+
|
| 665 |
+
|
| 666 |
+
def sgd(
|
| 667 |
+
learning_rate: ScalarOrSchedule,
|
| 668 |
+
momentum: Optional[float] = None,
|
| 669 |
+
nesterov: bool = False,
|
| 670 |
+
accumulator_dtype: Optional[Any] = None,
|
| 671 |
+
) -> base.GradientTransformation:
|
| 672 |
+
"""A canonical Stochastic Gradient Descent optimizer.
|
| 673 |
+
|
| 674 |
+
This implements stochastic gradient descent. It also includes support for
|
| 675 |
+
momentum, and nesterov acceleration, as these are standard practice when
|
| 676 |
+
using stochastic gradient descent to train deep neural networks.
|
| 677 |
+
|
| 678 |
+
References:
|
| 679 |
+
Sutskever et al, 2013: http://proceedings.mlr.press/v28/sutskever13.pdf
|
| 680 |
+
|
| 681 |
+
Args:
|
| 682 |
+
learning_rate: A fixed global scaling factor.
|
| 683 |
+
momentum: Decay rate used by the momentum term, when it is set to `None`,
|
| 684 |
+
then momentum is not used at all.
|
| 685 |
+
nesterov: Whether Nesterov momentum is used.
|
| 686 |
+
accumulator_dtype: Optional `dtype` to be used for the accumulator; if
|
| 687 |
+
`None` then the `dtype` is inferred from `params` and `updates`.
|
| 688 |
+
|
| 689 |
+
Returns:
|
| 690 |
+
A `GradientTransformation`.
|
| 691 |
+
"""
|
| 692 |
+
return combine.chain(
|
| 693 |
+
(transform.trace(decay=momentum, nesterov=nesterov,
|
| 694 |
+
accumulator_dtype=accumulator_dtype)
|
| 695 |
+
if momentum is not None else base.identity()),
|
| 696 |
+
_scale_by_learning_rate(learning_rate)
|
| 697 |
+
)
|
| 698 |
+
|
| 699 |
+
|
| 700 |
+
def sm3(
|
| 701 |
+
learning_rate: float,
|
| 702 |
+
momentum: float = 0.9
|
| 703 |
+
) -> base.GradientTransformation:
|
| 704 |
+
"""The SM3 optimizer.
|
| 705 |
+
|
| 706 |
+
SM3 (Square-root of Minima of Sums of Maxima of Squared-gradients Method) is a
|
| 707 |
+
memory-efficient adaptive optimizer designed to decrease memory overhead when
|
| 708 |
+
training very large models, such as the Transformer for machine translation,
|
| 709 |
+
BERT for language modeling, and AmoebaNet-D for image classification. SM3: 1)
|
| 710 |
+
applies to tensors of arbitrary dimensions and any predefined cover of the
|
| 711 |
+
parameters; 2) adapts the learning rates in an adaptive and data-driven manner
|
| 712 |
+
(like Adagrad and unlike Adafactor); and 3) comes with rigorous convergence
|
| 713 |
+
guarantees in stochastic convex optimization settings.
|
| 714 |
+
|
| 715 |
+
References:
|
| 716 |
+
Anil et al, 2019: https://arxiv.org/abs/1901.11150
|
| 717 |
+
|
| 718 |
+
Args:
|
| 719 |
+
learning_rate: A fixed global scaling factor.
|
| 720 |
+
momentum: Decay rate used by the momentum term (when it is not set to
|
| 721 |
+
`None`, then momentum is not used at all).
|
| 722 |
+
|
| 723 |
+
Returns:
|
| 724 |
+
The corresponding `GradientTransformation`.
|
| 725 |
+
"""
|
| 726 |
+
return combine.chain(
|
| 727 |
+
transform.scale_by_sm3(momentum),
|
| 728 |
+
transform.scale(-learning_rate),
|
| 729 |
+
)
|
| 730 |
+
|
| 731 |
+
|
| 732 |
+
def yogi(
|
| 733 |
+
learning_rate: ScalarOrSchedule,
|
| 734 |
+
b1: float = 0.9,
|
| 735 |
+
b2: float = 0.999,
|
| 736 |
+
eps: float = 1e-3,
|
| 737 |
+
) -> base.GradientTransformation:
|
| 738 |
+
"""The Yogi optimizer.
|
| 739 |
+
|
| 740 |
+
Yogi is an adaptive optimizer, which provides control in tuning the effective
|
| 741 |
+
learning rate to prevent it from increasing. By doing so, it focuses on
|
| 742 |
+
addressing the issues of convergence and generalization in exponential moving
|
| 743 |
+
average-based adaptive methods (such as Adam and RMSprop). Yogi is a
|
| 744 |
+
modification of Adam and uses the same parameters.
|
| 745 |
+
|
| 746 |
+
References:
|
| 747 |
+
Zaheer et al, 2020: http://www.sanjivk.com/yogi_nips2018.pdf
|
| 748 |
+
|
| 749 |
+
Args:
|
| 750 |
+
learning_rate: A fixed global scaling factor.
|
| 751 |
+
b1: Exponential decay rate to track the first moment of past gradients.
|
| 752 |
+
b2: Exponential decay rate to track the second moment of past gradients.
|
| 753 |
+
eps: A small constant applied to denominator outside of the square root
|
| 754 |
+
(as in the Adam paper) to avoid dividing by zero when rescaling.
|
| 755 |
+
|
| 756 |
+
Returns:
|
| 757 |
+
The corresponding `GradientTransformation`.
|
| 758 |
+
"""
|
| 759 |
+
return combine.chain(
|
| 760 |
+
transform.scale_by_yogi(b1=b1, b2=b2, eps=eps),
|
| 761 |
+
_scale_by_learning_rate(learning_rate),
|
| 762 |
+
)
|
| 763 |
+
|
| 764 |
+
|
| 765 |
+
def dpsgd(
|
| 766 |
+
learning_rate: ScalarOrSchedule,
|
| 767 |
+
l2_norm_clip: float,
|
| 768 |
+
noise_multiplier: float,
|
| 769 |
+
seed: int,
|
| 770 |
+
momentum: Optional[float] = None,
|
| 771 |
+
nesterov: bool = False
|
| 772 |
+
) -> base.GradientTransformation:
|
| 773 |
+
"""The DPSGD optimizer.
|
| 774 |
+
|
| 775 |
+
Differential privacy is a standard for privacy guarantees of algorithms
|
| 776 |
+
learning from aggregate databases including potentially sensitive information.
|
| 777 |
+
DPSGD offers protection against a strong adversary with full knowledge of the
|
| 778 |
+
training mechanism and access to the model’s parameters.
|
| 779 |
+
|
| 780 |
+
WARNING: This `GradientTransformation` expects input updates to have a batch
|
| 781 |
+
dimension on the 0th axis. That is, this function expects per-example
|
| 782 |
+
gradients as input (which are easy to obtain in JAX using `jax.vmap`).
|
| 783 |
+
|
| 784 |
+
References:
|
| 785 |
+
Abadi et al, 2016: https://arxiv.org/abs/1607.00133
|
| 786 |
+
|
| 787 |
+
Args:
|
| 788 |
+
learning_rate: A fixed global scaling factor.
|
| 789 |
+
l2_norm_clip: Maximum L2 norm of the per-example gradients.
|
| 790 |
+
noise_multiplier: Ratio of standard deviation to the clipping norm.
|
| 791 |
+
seed: Initial seed used for the jax.random.PRNGKey
|
| 792 |
+
momentum: Decay rate used by the momentum term, when it is set to `None`,
|
| 793 |
+
then momentum is not used at all.
|
| 794 |
+
nesterov: Whether Nesterov momentum is used.
|
| 795 |
+
|
| 796 |
+
Returns:
|
| 797 |
+
A `GradientTransformation`.
|
| 798 |
+
"""
|
| 799 |
+
return combine.chain(
|
| 800 |
+
privacy.differentially_private_aggregate(
|
| 801 |
+
l2_norm_clip=l2_norm_clip,
|
| 802 |
+
noise_multiplier=noise_multiplier,
|
| 803 |
+
seed=seed),
|
| 804 |
+
(transform.trace(decay=momentum, nesterov=nesterov)
|
| 805 |
+
if momentum is not None else base.identity()),
|
| 806 |
+
_scale_by_learning_rate(learning_rate)
|
| 807 |
+
)
|
| 808 |
+
|
| 809 |
+
|
| 810 |
+
def adamax(
|
| 811 |
+
learning_rate: ScalarOrSchedule,
|
| 812 |
+
b1: float = 0.9,
|
| 813 |
+
b2: float = 0.999,
|
| 814 |
+
eps: float = 1e-8,
|
| 815 |
+
) -> base.GradientTransformation:
|
| 816 |
+
"""A variant of the Adam optimizer that uses the infinity norm.
|
| 817 |
+
|
| 818 |
+
References:
|
| 819 |
+
Kingma et al, 2014: https://arxiv.org/abs/1412.6980
|
| 820 |
+
|
| 821 |
+
Args:
|
| 822 |
+
learning_rate: A fixed global scaling factor.
|
| 823 |
+
b1: Exponential decay rate to track the first moment of past gradients.
|
| 824 |
+
b2: Exponential decay rate to track the maximum of past gradients.
|
| 825 |
+
eps: A small constant applied to denominator to avoid dividing by zero when
|
| 826 |
+
rescaling.
|
| 827 |
+
|
| 828 |
+
Returns:
|
| 829 |
+
The corresponding `GradientTransformation`.
|
| 830 |
+
"""
|
| 831 |
+
return combine.chain(
|
| 832 |
+
transform.scale_by_adamax(b1=b1, b2=b2, eps=eps,),
|
| 833 |
+
_scale_by_learning_rate(learning_rate),
|
| 834 |
+
)
|
| 835 |
+
|
| 836 |
+
|
| 837 |
+
def adamaxw(
|
| 838 |
+
learning_rate: ScalarOrSchedule,
|
| 839 |
+
b1: float = 0.9,
|
| 840 |
+
b2: float = 0.999,
|
| 841 |
+
eps: float = 1e-8,
|
| 842 |
+
weight_decay: float = 1e-4,
|
| 843 |
+
mask: Optional[Union[Any, Callable[[base.Params], Any]]] = None,
|
| 844 |
+
) -> base.GradientTransformation:
|
| 845 |
+
"""Adamax with weight decay regularization.
|
| 846 |
+
|
| 847 |
+
AdamaxW uses weight decay to regularize learning towards small weights, as
|
| 848 |
+
this leads to better generalization. In SGD you can also use L2 regularization
|
| 849 |
+
to implement this as an additive loss term, however L2 regularization
|
| 850 |
+
does not behave as intended for adaptive gradient algorithms such as Adam.
|
| 851 |
+
|
| 852 |
+
WARNING: Sometimes you may want to skip weight decay for BatchNorm scale or
|
| 853 |
+
for the bias parameters. You can use `optax.masked` to make your own AdamaxW
|
| 854 |
+
variant where `additive_weight_decay` is applied only to a subset of `params`.
|
| 855 |
+
|
| 856 |
+
References:
|
| 857 |
+
Loshchilov et al, 2019: https://arxiv.org/abs/1711.05101
|
| 858 |
+
|
| 859 |
+
Args:
|
| 860 |
+
learning_rate: A fixed global scaling factor.
|
| 861 |
+
b1: Exponential decay rate to track the first moment of past gradients.
|
| 862 |
+
b2: Exponential decay rate to track the maximum of past gradients.
|
| 863 |
+
eps: A small constant applied to denominator to avoid dividing by zero when
|
| 864 |
+
rescaling.
|
| 865 |
+
weight_decay: Strength of the weight decay regularization. Note that this
|
| 866 |
+
weight decay is multiplied with the learning rate. This is consistent
|
| 867 |
+
with other frameworks such as PyTorch, but different from
|
| 868 |
+
(Loshchilov et al, 2019) where the weight decay is only multiplied with
|
| 869 |
+
the "schedule multiplier", but not the base learning rate.
|
| 870 |
+
mask: A tree with same structure as (or a prefix of) the params PyTree,
|
| 871 |
+
or a Callable that returns such a pytree given the params/updates.
|
| 872 |
+
The leaves should be booleans, `True` for leaves/subtrees you want to
|
| 873 |
+
apply the weight decay to, and `False` for those you want to skip. Note
|
| 874 |
+
that the Adamax gradient transformations are applied to all parameters.
|
| 875 |
+
|
| 876 |
+
Returns:
|
| 877 |
+
The corresponding `GradientTransformation`.
|
| 878 |
+
"""
|
| 879 |
+
return combine.chain(
|
| 880 |
+
transform.scale_by_adamax(b1=b1, b2=b2, eps=eps),
|
| 881 |
+
transform.add_decayed_weights(weight_decay, mask),
|
| 882 |
+
_scale_by_learning_rate(learning_rate),
|
| 883 |
+
)
|
lib/python3.10/site-packages/optax/_src/alias_test.py
ADDED
|
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Tests for `alias.py`."""
|
| 16 |
+
|
| 17 |
+
from absl.testing import absltest
|
| 18 |
+
from absl.testing import parameterized
|
| 19 |
+
|
| 20 |
+
import chex
|
| 21 |
+
import jax
|
| 22 |
+
import jax.numpy as jnp
|
| 23 |
+
|
| 24 |
+
from optax._src import alias
|
| 25 |
+
from optax._src import numerics
|
| 26 |
+
from optax._src import schedule
|
| 27 |
+
from optax._src import update
|
| 28 |
+
|
| 29 |
+
_OPTIMIZERS_UNDER_TEST = (
|
| 30 |
+
dict(opt_name='sgd', opt_kwargs=dict(learning_rate=1e-3, momentum=0.9)),
|
| 31 |
+
dict(opt_name='adafactor', opt_kwargs=dict(learning_rate=5e-3)),
|
| 32 |
+
dict(opt_name='adagrad', opt_kwargs=dict(learning_rate=1.0)),
|
| 33 |
+
dict(opt_name='adam', opt_kwargs=dict(learning_rate=1e-1)),
|
| 34 |
+
dict(opt_name='adamw', opt_kwargs=dict(learning_rate=1e-1)),
|
| 35 |
+
dict(opt_name='adamax', opt_kwargs=dict(learning_rate=1e-1)),
|
| 36 |
+
dict(opt_name='adamaxw', opt_kwargs=dict(learning_rate=1e-1)),
|
| 37 |
+
dict(opt_name='amsgrad', opt_kwargs=dict(learning_rate=1e-1)),
|
| 38 |
+
dict(opt_name='lars', opt_kwargs=dict(learning_rate=1.0)),
|
| 39 |
+
dict(opt_name='lamb', opt_kwargs=dict(learning_rate=1e-3)),
|
| 40 |
+
dict(opt_name='noisy_sgd', opt_kwargs=dict(learning_rate=1e-3, eta=1e-4)),
|
| 41 |
+
dict(opt_name='novograd', opt_kwargs=dict(learning_rate=1e-3)),
|
| 42 |
+
dict(
|
| 43 |
+
opt_name='optimistic_gradient_descent',
|
| 44 |
+
opt_kwargs=dict(learning_rate=2e-3, alpha=0.7, beta=0.1)),
|
| 45 |
+
dict(opt_name='rmsprop', opt_kwargs=dict(learning_rate=5e-3)),
|
| 46 |
+
dict(opt_name='rmsprop', opt_kwargs=dict(learning_rate=5e-3, momentum=0.9)),
|
| 47 |
+
dict(opt_name='fromage', opt_kwargs=dict(learning_rate=5e-3)),
|
| 48 |
+
dict(opt_name='adabelief', opt_kwargs=dict(learning_rate=1e-2)),
|
| 49 |
+
dict(opt_name='radam', opt_kwargs=dict(learning_rate=5e-3)),
|
| 50 |
+
dict(opt_name='sm3', opt_kwargs=dict(learning_rate=1.0)),
|
| 51 |
+
dict(opt_name='yogi', opt_kwargs=dict(learning_rate=1e-1)),
|
| 52 |
+
dict(
|
| 53 |
+
opt_name='dpsgd',
|
| 54 |
+
opt_kwargs=dict(
|
| 55 |
+
learning_rate=1e-3,
|
| 56 |
+
l2_norm_clip=10.,
|
| 57 |
+
noise_multiplier=1e-3,
|
| 58 |
+
seed=0,
|
| 59 |
+
momentum=0.2)),
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def _setup_parabola(dtype):
|
| 64 |
+
"""Quadratic function as an optimization target."""
|
| 65 |
+
initial_params = jnp.array([-1.0, 10.0, 1.0], dtype=dtype)
|
| 66 |
+
final_params = jnp.array([1.0, -1.0, 1.0], dtype=dtype)
|
| 67 |
+
|
| 68 |
+
if jnp.iscomplexobj(dtype):
|
| 69 |
+
final_params *= 1 + 1j
|
| 70 |
+
|
| 71 |
+
@jax.grad
|
| 72 |
+
def get_updates(params):
|
| 73 |
+
return jnp.sum(numerics.abs_sq(params - final_params))
|
| 74 |
+
|
| 75 |
+
return initial_params, final_params, get_updates
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def _setup_rosenbrock(dtype):
|
| 79 |
+
"""Rosenbrock function as an optimization target."""
|
| 80 |
+
a = 1.0
|
| 81 |
+
b = 100.0
|
| 82 |
+
|
| 83 |
+
if jnp.iscomplexobj(dtype):
|
| 84 |
+
a *= 1 + 1j
|
| 85 |
+
|
| 86 |
+
initial_params = jnp.array([0.0, 0.0], dtype=dtype)
|
| 87 |
+
final_params = jnp.array([a, a**2], dtype=dtype)
|
| 88 |
+
|
| 89 |
+
@jax.grad
|
| 90 |
+
def get_updates(params):
|
| 91 |
+
return (numerics.abs_sq(a - params[0]) +
|
| 92 |
+
b * numerics.abs_sq(params[1] - params[0]**2))
|
| 93 |
+
|
| 94 |
+
return initial_params, final_params, get_updates
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
class AliasTest(chex.TestCase):
|
| 98 |
+
|
| 99 |
+
@parameterized.product(
|
| 100 |
+
_OPTIMIZERS_UNDER_TEST,
|
| 101 |
+
target=(_setup_parabola, _setup_rosenbrock),
|
| 102 |
+
dtype=(jnp.float32, jnp.complex64),
|
| 103 |
+
)
|
| 104 |
+
def test_optimization(self, opt_name, opt_kwargs, target, dtype):
|
| 105 |
+
if (opt_name
|
| 106 |
+
in ('fromage', 'noisy_sgd', 'sm3', 'optimistic_gradient_descent') and
|
| 107 |
+
jnp.iscomplexobj(dtype)):
|
| 108 |
+
raise absltest.SkipTest(
|
| 109 |
+
f'{opt_name} does not support complex parameters.')
|
| 110 |
+
|
| 111 |
+
opt = getattr(alias, opt_name)(**opt_kwargs)
|
| 112 |
+
initial_params, final_params, get_updates = target(dtype)
|
| 113 |
+
|
| 114 |
+
@jax.jit
|
| 115 |
+
def step(params, state):
|
| 116 |
+
updates = get_updates(params)
|
| 117 |
+
if opt_name == 'dpsgd':
|
| 118 |
+
updates = updates[None]
|
| 119 |
+
# Complex gradients need to be conjugated before being added to parameters
|
| 120 |
+
# https://gist.github.com/wdphy16/118aef6fb5f82c49790d7678cf87da29
|
| 121 |
+
updates = jax.tree_util.tree_map(lambda x: x.conj(), updates)
|
| 122 |
+
updates, state = opt.update(updates, state, params)
|
| 123 |
+
params = update.apply_updates(params, updates)
|
| 124 |
+
return params, state
|
| 125 |
+
|
| 126 |
+
params = initial_params
|
| 127 |
+
state = opt.init(params)
|
| 128 |
+
for _ in range(10000):
|
| 129 |
+
params, state = step(params, state)
|
| 130 |
+
|
| 131 |
+
chex.assert_trees_all_close(params, final_params, rtol=3e-2, atol=3e-2)
|
| 132 |
+
|
| 133 |
+
@chex.all_variants
|
| 134 |
+
@parameterized.product(_OPTIMIZERS_UNDER_TEST)
|
| 135 |
+
def test_optimizers_can_be_wrapped_in_inject_hyperparams(
|
| 136 |
+
self, opt_name, opt_kwargs):
|
| 137 |
+
"""Checks that optimizers can be wrapped in inject_hyperparams."""
|
| 138 |
+
# See also https://github.com/deepmind/optax/issues/412.
|
| 139 |
+
opt_factory = getattr(alias, opt_name)
|
| 140 |
+
opt = opt_factory(**opt_kwargs)
|
| 141 |
+
if opt_name == 'adafactor':
|
| 142 |
+
# Adafactor wrapped in inject_hyperparams currently needs a static
|
| 143 |
+
# argument to be specified in order to be jittable. See issue
|
| 144 |
+
# https://github.com/deepmind/optax/issues/412.
|
| 145 |
+
opt_inject = schedule.inject_hyperparams(
|
| 146 |
+
opt_factory, static_args=('min_dim_size_to_factor',))(**opt_kwargs)
|
| 147 |
+
else:
|
| 148 |
+
opt_inject = schedule.inject_hyperparams(opt_factory)(**opt_kwargs)
|
| 149 |
+
|
| 150 |
+
params = [-jnp.ones((2, 3)), jnp.ones((2, 5, 2))]
|
| 151 |
+
grads = [jnp.ones((2, 3)), -jnp.ones((2, 5, 2))]
|
| 152 |
+
|
| 153 |
+
state = self.variant(opt.init)(params)
|
| 154 |
+
updates, new_state = self.variant(opt.update)(grads, state, params)
|
| 155 |
+
|
| 156 |
+
state_inject = self.variant(opt_inject.init)(params)
|
| 157 |
+
updates_inject, new_state_inject = self.variant(opt_inject.update)(
|
| 158 |
+
grads, state_inject, params)
|
| 159 |
+
|
| 160 |
+
with self.subTest('Equality of updates.'):
|
| 161 |
+
chex.assert_trees_all_close(updates_inject, updates, rtol=1e-4)
|
| 162 |
+
with self.subTest('Equality of new optimizer states.'):
|
| 163 |
+
chex.assert_trees_all_close(
|
| 164 |
+
new_state_inject.inner_state, new_state, rtol=1e-4)
|
| 165 |
+
|
| 166 |
+
@parameterized.named_parameters([
|
| 167 |
+
('float32', 'float32'),
|
| 168 |
+
('bfloat16', 'bfloat16'),
|
| 169 |
+
('complex64', 'complex64'),
|
| 170 |
+
('None', None),
|
| 171 |
+
])
|
| 172 |
+
def test_explicit_dtype(self, dtype):
|
| 173 |
+
expected_dtype = jax.dtypes.canonicalize_dtype(dtype) # None -> float32
|
| 174 |
+
tx = alias.sgd(0.1, momentum=0.9, accumulator_dtype=dtype)
|
| 175 |
+
trace_state, _ = tx.init(jnp.array([0.0, 0.0]))
|
| 176 |
+
self.assertEqual(expected_dtype, trace_state.trace.dtype)
|
| 177 |
+
tx = alias.adam(0.1, mu_dtype=dtype)
|
| 178 |
+
adam_state, _ = tx.init(jnp.array([0.0, 0.0]))
|
| 179 |
+
self.assertEqual(expected_dtype, adam_state.mu.dtype)
|
| 180 |
+
tx = alias.adamw(0.1, mu_dtype=dtype)
|
| 181 |
+
adam_state, _, _ = tx.init(jnp.array([0.0, 0.0]))
|
| 182 |
+
self.assertEqual(expected_dtype, adam_state.mu.dtype)
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
if __name__ == '__main__':
|
| 186 |
+
absltest.main()
|
lib/python3.10/site-packages/optax/_src/base.py
ADDED
|
@@ -0,0 +1,233 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Base interfaces and datatypes."""
|
| 16 |
+
|
| 17 |
+
from typing import Any, Callable, NamedTuple, Optional, Sequence, Tuple
|
| 18 |
+
|
| 19 |
+
import chex
|
| 20 |
+
import jax
|
| 21 |
+
import jax.numpy as jnp
|
| 22 |
+
import typing_extensions
|
| 23 |
+
|
| 24 |
+
NO_PARAMS_MSG = (
|
| 25 |
+
'You are using a transformation that requires the current value of '
|
| 26 |
+
'parameters, but you are not passing `params` when calling `update`.')
|
| 27 |
+
|
| 28 |
+
PyTree = Any
|
| 29 |
+
Shape = Sequence[int]
|
| 30 |
+
|
| 31 |
+
OptState = chex.ArrayTree # States are arbitrary nests of `jnp.ndarrays`.
|
| 32 |
+
Params = chex.ArrayTree # Parameters are arbitrary nests of `jnp.ndarrays`.
|
| 33 |
+
Updates = Params # Gradient updates are of the same type as parameters.
|
| 34 |
+
|
| 35 |
+
Schedule = Callable[[chex.Numeric], chex.Numeric]
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class TransformInitFn(typing_extensions.Protocol):
|
| 39 |
+
"""A callable type for the `init` step of a `GradientTransformation`.
|
| 40 |
+
|
| 41 |
+
The `init` step takes a tree of `params` and uses these to construct an
|
| 42 |
+
arbitrary structured initial `state` for the gradient transformation. This
|
| 43 |
+
may hold statistics of the past updates or any other non static information.
|
| 44 |
+
"""
|
| 45 |
+
|
| 46 |
+
def __call__(self, params: Params) -> OptState:
|
| 47 |
+
"""The `init` function.
|
| 48 |
+
|
| 49 |
+
Args:
|
| 50 |
+
params: The initial value of the parameters.
|
| 51 |
+
|
| 52 |
+
Returns:
|
| 53 |
+
The initial state of the gradient transformation.
|
| 54 |
+
"""
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
class TransformUpdateFn(typing_extensions.Protocol):
|
| 58 |
+
"""A callable type for the `update` step of a `GradientTransformation`.
|
| 59 |
+
|
| 60 |
+
The `update` step takes a tree of candidate parameter `updates` (e.g. their
|
| 61 |
+
gradient with respect to some loss), an arbitrary structured `state`, and the
|
| 62 |
+
current `params` of the model being optimised. The `params` argument is
|
| 63 |
+
optional, it must however be provided when using transformations that require
|
| 64 |
+
access to the current values of the parameters.
|
| 65 |
+
"""
|
| 66 |
+
|
| 67 |
+
def __call__(
|
| 68 |
+
self,
|
| 69 |
+
updates: Updates,
|
| 70 |
+
state: OptState,
|
| 71 |
+
params: Optional[Params] = None
|
| 72 |
+
) -> Tuple[Updates, OptState]:
|
| 73 |
+
"""The `update` function.
|
| 74 |
+
|
| 75 |
+
Args:
|
| 76 |
+
updates: A tree of candidate updates.
|
| 77 |
+
state: The state of the gradient transformation.
|
| 78 |
+
params: (Optionally) the current value of the parameters.
|
| 79 |
+
|
| 80 |
+
Returns:
|
| 81 |
+
The transformed updates, and the updated state.
|
| 82 |
+
"""
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
class GradientTransformation(NamedTuple):
|
| 86 |
+
"""A pair of pure functions implementing a gradient transformation.
|
| 87 |
+
|
| 88 |
+
Optax optimizers are all implemented as _gradient transformations_.
|
| 89 |
+
A gradient transformation is defined to be a pair of pure functions, which
|
| 90 |
+
are combined together in a `NamedTuple` so that they can be referred to by
|
| 91 |
+
name.
|
| 92 |
+
|
| 93 |
+
Since gradient transformations do not contain any internal state, all stateful
|
| 94 |
+
optimizer properties (such as the current step count when using optimizer
|
| 95 |
+
scheduels, or momemtum values) are passed through optax gradient
|
| 96 |
+
transformations by using the optimizer _state_ pytree. Each time a gradient
|
| 97 |
+
transformation is applied, a new state is computed and returned, ready to be
|
| 98 |
+
passed to the next call to the gradient transformation.
|
| 99 |
+
|
| 100 |
+
Since gradient transformations are pure, idempotent functions, the only way
|
| 101 |
+
to change the behaviour of a gradient transformation between steps, is to
|
| 102 |
+
change the values in the optimizer state. To see an example of mutating the
|
| 103 |
+
optimizer state in order to control the behaviour of an optax gradient
|
| 104 |
+
transformation, see the meta-learning example in the optax documentation.
|
| 105 |
+
|
| 106 |
+
Attributes:
|
| 107 |
+
init: A pure function which, when called with an example instance of the
|
| 108 |
+
parameters whose gradients will be transformed, returns a pytree
|
| 109 |
+
containing the initial value for the optimizer state.
|
| 110 |
+
update: A pure function which takes as input a pytree of updates (with the
|
| 111 |
+
same tree structure as the original params pytree passed to init), the
|
| 112 |
+
previous optimizer state (which may have been initialized using the init
|
| 113 |
+
function), and optionally the current params. The update function then
|
| 114 |
+
returns the computed gradient updates, and a new optimizer state.
|
| 115 |
+
"""
|
| 116 |
+
init: TransformInitFn
|
| 117 |
+
update: TransformUpdateFn
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
class EmptyState(NamedTuple):
|
| 121 |
+
"""An empty state for the simplest stateless transformations."""
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
def identity() -> GradientTransformation:
|
| 125 |
+
"""Stateless identity transformation that leaves input gradients untouched.
|
| 126 |
+
|
| 127 |
+
This function passes through the *gradient updates* unchanged.
|
| 128 |
+
|
| 129 |
+
Note, this should not to be confused with `set_to_zero`, which maps the input
|
| 130 |
+
updates to zero - which is the transform required for the *model parameters*
|
| 131 |
+
to be left unchanged when the updates are applied to them.
|
| 132 |
+
|
| 133 |
+
Returns:
|
| 134 |
+
A `GradientTransformation` object.
|
| 135 |
+
"""
|
| 136 |
+
|
| 137 |
+
def init_fn(_):
|
| 138 |
+
return EmptyState()
|
| 139 |
+
|
| 140 |
+
def update_fn(updates, state, params=None):
|
| 141 |
+
del params
|
| 142 |
+
return updates, state
|
| 143 |
+
|
| 144 |
+
return GradientTransformation(init_fn, update_fn)
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
def set_to_zero() -> GradientTransformation:
|
| 148 |
+
"""Stateless transformation that maps input gradients to zero.
|
| 149 |
+
|
| 150 |
+
The resulting update function, when called, will return a tree of zeros
|
| 151 |
+
matching the shape of the input gradients. This means that when the updates
|
| 152 |
+
returned from this transformation are applied to the model parameters, the
|
| 153 |
+
model parameters will remain unchanged.
|
| 154 |
+
|
| 155 |
+
This can be used in combination with `multi_transform` or `masked` to freeze
|
| 156 |
+
(i.e. keep fixed) some parts of the tree of model parameters while applying
|
| 157 |
+
gradient updates to other parts of the tree.
|
| 158 |
+
|
| 159 |
+
When updates are set to zero inside the same jit-compiled function as the
|
| 160 |
+
calculation of gradients, optax transformations, and application of updates to
|
| 161 |
+
parameters, unnecessary computations will in general be dropped.
|
| 162 |
+
|
| 163 |
+
Returns:
|
| 164 |
+
A `GradientTransformation` object.
|
| 165 |
+
"""
|
| 166 |
+
|
| 167 |
+
def init_fn(params):
|
| 168 |
+
del params
|
| 169 |
+
return EmptyState()
|
| 170 |
+
|
| 171 |
+
def update_fn(updates, state, params=None):
|
| 172 |
+
del params # Unused by the zero transform.
|
| 173 |
+
return jax.tree_util.tree_map(jnp.zeros_like, updates), state
|
| 174 |
+
|
| 175 |
+
return GradientTransformation(init_fn, update_fn)
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
def stateless(
|
| 179 |
+
f: Callable[[Updates, Optional[Params]], Updates],
|
| 180 |
+
) -> GradientTransformation:
|
| 181 |
+
"""Creates a stateless transformation from an update-like function.
|
| 182 |
+
|
| 183 |
+
This wrapper eliminates the boilerplate needed to create a transformation that
|
| 184 |
+
does not require saved state between iterations.
|
| 185 |
+
|
| 186 |
+
Args:
|
| 187 |
+
f: Update function that takes in updates (e.g. gradients) and parameters
|
| 188 |
+
and returns updates. The parameters may be `None`.
|
| 189 |
+
|
| 190 |
+
Returns:
|
| 191 |
+
An `optax.GradientTransformation`.
|
| 192 |
+
"""
|
| 193 |
+
|
| 194 |
+
def init_fn(_):
|
| 195 |
+
return EmptyState()
|
| 196 |
+
|
| 197 |
+
def update_fn(updates, state, params=None):
|
| 198 |
+
del state
|
| 199 |
+
return f(updates, params), EmptyState()
|
| 200 |
+
|
| 201 |
+
return GradientTransformation(init_fn, update_fn)
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
def stateless_with_tree_map(
|
| 205 |
+
f: Callable[[chex.Array, Optional[chex.Array]], chex.Array],
|
| 206 |
+
) -> GradientTransformation:
|
| 207 |
+
"""Creates a stateless transformation from an update-like function for arrays.
|
| 208 |
+
|
| 209 |
+
This wrapper eliminates the boilerplate needed to create a transformation that
|
| 210 |
+
does not require saved state between iterations, just like optax.stateless.
|
| 211 |
+
In addition, this function will apply the tree_map over update/params for you.
|
| 212 |
+
|
| 213 |
+
Args:
|
| 214 |
+
f: Update function that takes in an update array (e.g. gradients) and
|
| 215 |
+
parameter array and returns an update array. The parameter array may be
|
| 216 |
+
`None`.
|
| 217 |
+
|
| 218 |
+
Returns:
|
| 219 |
+
An `optax.GradientTransformation`.
|
| 220 |
+
"""
|
| 221 |
+
|
| 222 |
+
def init_fn(_):
|
| 223 |
+
return EmptyState()
|
| 224 |
+
|
| 225 |
+
def update_fn(updates, state, params=None):
|
| 226 |
+
del state
|
| 227 |
+
if params is not None:
|
| 228 |
+
return jax.tree_util.tree_map(f, updates, params), EmptyState()
|
| 229 |
+
else:
|
| 230 |
+
f_ = lambda u: f(u, None)
|
| 231 |
+
return jax.tree_util.tree_map(f_, updates), EmptyState()
|
| 232 |
+
|
| 233 |
+
return GradientTransformation(init_fn, update_fn)
|
lib/python3.10/site-packages/optax/_src/combine_test.py
ADDED
|
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Tests for `combine.py`."""
|
| 16 |
+
|
| 17 |
+
from absl.testing import absltest
|
| 18 |
+
from absl.testing import parameterized
|
| 19 |
+
|
| 20 |
+
import chex
|
| 21 |
+
import jax
|
| 22 |
+
import jax.numpy as jnp
|
| 23 |
+
|
| 24 |
+
from optax._src import alias
|
| 25 |
+
from optax._src import combine
|
| 26 |
+
from optax._src import transform
|
| 27 |
+
from optax._src import update
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
STEPS = 50
|
| 31 |
+
LR = 1e-2
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class ComposeTest(chex.TestCase):
|
| 35 |
+
|
| 36 |
+
def setUp(self):
|
| 37 |
+
super().setUp()
|
| 38 |
+
self.init_params = (jnp.array([1., 2.]), jnp.array([3., 4.]))
|
| 39 |
+
self.per_step_updates = (jnp.array([500., 5.]), jnp.array([300., 3.]))
|
| 40 |
+
|
| 41 |
+
@chex.all_variants
|
| 42 |
+
def test_chain(self):
|
| 43 |
+
transformations = [
|
| 44 |
+
transform.scale_by_adam(),
|
| 45 |
+
transform.trace(decay=0, nesterov=False),
|
| 46 |
+
transform.scale(-LR)]
|
| 47 |
+
|
| 48 |
+
# Apply updates with chain.
|
| 49 |
+
chain_params = self.init_params
|
| 50 |
+
chained_transforms = combine.chain(*transformations)
|
| 51 |
+
state = chained_transforms.init(chain_params)
|
| 52 |
+
self.assertIsInstance(state, tuple)
|
| 53 |
+
|
| 54 |
+
@self.variant
|
| 55 |
+
def update_fn(updates, state):
|
| 56 |
+
return chained_transforms.update(updates, state)
|
| 57 |
+
|
| 58 |
+
for _ in range(STEPS):
|
| 59 |
+
updates, state = update_fn(self.per_step_updates, state)
|
| 60 |
+
self.assertIsInstance(state, tuple)
|
| 61 |
+
chain_params = update.apply_updates(chain_params, updates)
|
| 62 |
+
|
| 63 |
+
# Manually apply sequence of transformations.
|
| 64 |
+
manual_params = self.init_params
|
| 65 |
+
states = [t.init(manual_params) for t in transformations]
|
| 66 |
+
for _ in range(STEPS):
|
| 67 |
+
updates = self.per_step_updates
|
| 68 |
+
new_states = []
|
| 69 |
+
for t, s in zip(transformations, states):
|
| 70 |
+
updates, state = t.update(updates, s)
|
| 71 |
+
new_states.append(state)
|
| 72 |
+
manual_params = update.apply_updates(manual_params, updates)
|
| 73 |
+
states = new_states
|
| 74 |
+
|
| 75 |
+
# Check equivalence.
|
| 76 |
+
chex.assert_tree_all_close(manual_params, chain_params, rtol=1e-4)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def _map_keys_fn(fn):
|
| 80 |
+
def map_fn(nested_dict):
|
| 81 |
+
return {k: (map_fn(v) if isinstance(v, dict) else fn(k, v))
|
| 82 |
+
for k, v in nested_dict.items()}
|
| 83 |
+
return map_fn
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
class MultiTransformTest(chex.TestCase):
|
| 87 |
+
"""Tests for the multi_transform wrapper."""
|
| 88 |
+
|
| 89 |
+
@chex.all_variants
|
| 90 |
+
@parameterized.parameters(True, False)
|
| 91 |
+
def test_multi_transform(self, use_fn):
|
| 92 |
+
params = {'a1': 1., 'b1': 2., 'z1': {'a2': 3., 'z2': {'c1': 4.}}}
|
| 93 |
+
params = jax.tree_util.tree_map(jnp.asarray, params)
|
| 94 |
+
input_updates = jax.tree_util.tree_map(lambda x: x / 10.0, params)
|
| 95 |
+
tx_dict = {'a': transform.scale(-1.0),
|
| 96 |
+
'b': transform.ema(0.0), # stateful
|
| 97 |
+
'c': transform.scale(2.0)}
|
| 98 |
+
param_labels = _map_keys_fn(lambda k, _: k[0])
|
| 99 |
+
if not use_fn:
|
| 100 |
+
param_labels = param_labels(params)
|
| 101 |
+
tx = combine.multi_transform(tx_dict, param_labels)
|
| 102 |
+
update_fn = self.variant(tx.update)
|
| 103 |
+
state = self.variant(tx.init)(params)
|
| 104 |
+
|
| 105 |
+
correct_update_fn = _map_keys_fn(
|
| 106 |
+
lambda k, v: {'a': -v, 'b': v, 'c': 2.0*v}[k[0]])
|
| 107 |
+
|
| 108 |
+
updates, state = update_fn(input_updates, state, params)
|
| 109 |
+
correct_updates = correct_update_fn(input_updates)
|
| 110 |
+
chex.assert_tree_all_close(updates, correct_updates)
|
| 111 |
+
|
| 112 |
+
# Check repeated application, this time with no params.
|
| 113 |
+
correct_updates = correct_update_fn(correct_updates)
|
| 114 |
+
updates, state = update_fn(updates, state)
|
| 115 |
+
chex.assert_tree_all_close(updates, correct_updates)
|
| 116 |
+
|
| 117 |
+
@parameterized.parameters(list, tuple, dict)
|
| 118 |
+
def test_empty(self, container):
|
| 119 |
+
init_fn, update_fn = combine.multi_transform(
|
| 120 |
+
{0: alias.sgd(1.)}, lambda _: 0)
|
| 121 |
+
updates, _ = update_fn(container(), init_fn(container()))
|
| 122 |
+
self.assertEqual(updates, container())
|
| 123 |
+
|
| 124 |
+
@chex.all_variants
|
| 125 |
+
@parameterized.parameters(
|
| 126 |
+
(False, False), (False, True), (True, False), (True, True))
|
| 127 |
+
def test_labels_mismatch(self, use_extra_label, use_fn):
|
| 128 |
+
# The labels from label_fn must be a subet of the keys for the tx.
|
| 129 |
+
params = {'a': 1., 'b': [2., 3.], 'c': {'d': 4., 'e': (5., 6.)}}
|
| 130 |
+
params = jax.tree_util.tree_map(jnp.asarray, params)
|
| 131 |
+
label_tree = {'a': 0, 'b': [1, 0], 'c': 1} # prefix of params
|
| 132 |
+
|
| 133 |
+
if use_extra_label:
|
| 134 |
+
label_tree['a'] = 3
|
| 135 |
+
|
| 136 |
+
transforms = {0: alias.sgd(1.),
|
| 137 |
+
1: alias.adam(1., b1=0., b2=0.),
|
| 138 |
+
2: transform.trace(1.0)}
|
| 139 |
+
init_fn, update_fn = combine.multi_transform(
|
| 140 |
+
transforms, (lambda _: label_tree) if use_fn else label_tree)
|
| 141 |
+
|
| 142 |
+
if use_extra_label:
|
| 143 |
+
with self.assertRaises(ValueError):
|
| 144 |
+
self.variant(init_fn)(params)
|
| 145 |
+
else:
|
| 146 |
+
state = self.variant(init_fn)(params)
|
| 147 |
+
updates = jax.tree_util.tree_map(lambda x: x / 10.0, params)
|
| 148 |
+
self.variant(update_fn)(updates, state)
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
if __name__ == '__main__':
|
| 152 |
+
absltest.main()
|
lib/python3.10/site-packages/optax/_src/constrain.py
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Gradient transformations used to enforce specific constraints."""
|
| 16 |
+
|
| 17 |
+
from typing import Any, NamedTuple
|
| 18 |
+
|
| 19 |
+
import jax
|
| 20 |
+
import jax.numpy as jnp
|
| 21 |
+
|
| 22 |
+
from optax._src import base
|
| 23 |
+
|
| 24 |
+
# pylint:disable=no-value-for-parameter
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
NonNegativeParamsState = base.EmptyState
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def keep_params_nonnegative() -> base.GradientTransformation:
|
| 31 |
+
"""Modifies the updates to keep parameters non-negative, i.e. >= 0.
|
| 32 |
+
|
| 33 |
+
This transformation ensures that parameters after the update will be
|
| 34 |
+
larger than or equal to zero.
|
| 35 |
+
In a chain of transformations, this should be the last one.
|
| 36 |
+
|
| 37 |
+
WARNING: the transformation expects input params to be non-negative.
|
| 38 |
+
When params is negative the transformed update will move them to 0.
|
| 39 |
+
|
| 40 |
+
Returns:
|
| 41 |
+
A `GradientTransformation` object.
|
| 42 |
+
"""
|
| 43 |
+
|
| 44 |
+
def init_fn(params):
|
| 45 |
+
del params
|
| 46 |
+
return NonNegativeParamsState()
|
| 47 |
+
|
| 48 |
+
def update_fn(updates, state, params):
|
| 49 |
+
if params is None:
|
| 50 |
+
raise ValueError(base.NO_PARAMS_MSG)
|
| 51 |
+
|
| 52 |
+
updates = jax.tree_util.tree_map(
|
| 53 |
+
lambda p, u: jnp.where((p + u) < 0., -p, u), params, updates)
|
| 54 |
+
return updates, state
|
| 55 |
+
|
| 56 |
+
return base.GradientTransformation(init_fn, update_fn)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
class ZeroNansState(NamedTuple):
|
| 60 |
+
"""Contains a tree.
|
| 61 |
+
|
| 62 |
+
The entry `found_nan` has the same tree structure as that of the parameters.
|
| 63 |
+
Each leaf is a single boolean which contains True iff a NaN was detected in
|
| 64 |
+
the corresponding parameter array at the last call to `update`.
|
| 65 |
+
"""
|
| 66 |
+
found_nan: Any
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def zero_nans() -> base.GradientTransformation:
|
| 70 |
+
"""A transformation which replaces NaNs with 0.
|
| 71 |
+
|
| 72 |
+
Zeroing values in gradients is guaranteed to produce a direction of
|
| 73 |
+
non-increasing loss.
|
| 74 |
+
|
| 75 |
+
The state of the transformation has the same tree structure as that of the
|
| 76 |
+
parameters. Each leaf is a single boolean which contains True iff a NaN was
|
| 77 |
+
detected in the corresponding parameter array at the last call to `update`.
|
| 78 |
+
This state is not used by the transformation internally, but lets users be
|
| 79 |
+
aware when NaNs have been zeroed out.
|
| 80 |
+
|
| 81 |
+
Returns:
|
| 82 |
+
A `GradientTransformation`.
|
| 83 |
+
"""
|
| 84 |
+
|
| 85 |
+
def init_fn(params):
|
| 86 |
+
return ZeroNansState(jax.tree_util.tree_map(
|
| 87 |
+
lambda p: jnp.array(False, dtype=jnp.bool_), params))
|
| 88 |
+
|
| 89 |
+
def update_fn(updates, opt_state, params=None):
|
| 90 |
+
del params
|
| 91 |
+
opt_state = ZeroNansState(
|
| 92 |
+
jax.tree_util.tree_map(lambda p: jnp.any(jnp.isnan(p)), updates))
|
| 93 |
+
updates = jax.tree_util.tree_map(
|
| 94 |
+
lambda p: jnp.where(jnp.isnan(p), jnp.zeros_like(p), p), updates)
|
| 95 |
+
return updates, opt_state
|
| 96 |
+
|
| 97 |
+
return base.GradientTransformation(init=init_fn, update=update_fn)
|
lib/python3.10/site-packages/optax/_src/constrain_test.py
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Tests for optax._src.constrain."""
|
| 16 |
+
|
| 17 |
+
from absl.testing import absltest
|
| 18 |
+
|
| 19 |
+
import chex
|
| 20 |
+
import jax.numpy as jnp
|
| 21 |
+
|
| 22 |
+
from optax._src import combine
|
| 23 |
+
from optax._src import constrain
|
| 24 |
+
from optax._src import transform
|
| 25 |
+
from optax._src import update
|
| 26 |
+
|
| 27 |
+
STEPS = 50
|
| 28 |
+
LR = 1e-2
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class ConstraintsTest(chex.TestCase):
|
| 32 |
+
|
| 33 |
+
def test_keep_params_nonnegative(self):
|
| 34 |
+
grads = (jnp.array([500., -500., 0.]),
|
| 35 |
+
jnp.array([500., -500., 0.]),
|
| 36 |
+
jnp.array([500., -500., 0.]))
|
| 37 |
+
|
| 38 |
+
params = (jnp.array([-1., -1., -1.]),
|
| 39 |
+
jnp.array([1., 1., 1.]),
|
| 40 |
+
jnp.array([0., 0., 0.]))
|
| 41 |
+
|
| 42 |
+
# vanilla sgd
|
| 43 |
+
opt = combine.chain(
|
| 44 |
+
transform.trace(decay=0, nesterov=False), transform.scale(-LR))
|
| 45 |
+
opt_state = opt.init(params)
|
| 46 |
+
|
| 47 |
+
updates, _ = opt.update(grads, opt_state, params)
|
| 48 |
+
new_params = update.apply_updates(params, updates)
|
| 49 |
+
|
| 50 |
+
chex.assert_tree_all_close(new_params, (jnp.array([-6., 4., -1.]),
|
| 51 |
+
jnp.array([-4., 6., 1.]),
|
| 52 |
+
jnp.array([-5., 5., 0.])))
|
| 53 |
+
|
| 54 |
+
# sgd with keeping parameters non-negative
|
| 55 |
+
opt = combine.chain(
|
| 56 |
+
transform.trace(decay=0, nesterov=False), transform.scale(-LR),
|
| 57 |
+
constrain.keep_params_nonnegative())
|
| 58 |
+
opt_state = opt.init(params)
|
| 59 |
+
|
| 60 |
+
updates, _ = opt.update(grads, opt_state, params)
|
| 61 |
+
new_params = update.apply_updates(params, updates)
|
| 62 |
+
|
| 63 |
+
chex.assert_tree_all_close(new_params, (jnp.array([0., 4., 0.]),
|
| 64 |
+
jnp.array([0., 6., 1.]),
|
| 65 |
+
jnp.array([0., 5., 0.])))
|
| 66 |
+
|
| 67 |
+
@chex.all_variants
|
| 68 |
+
def test_zero_nans(self):
|
| 69 |
+
params = (jnp.zeros([3]), jnp.zeros([3]), jnp.zeros([3]))
|
| 70 |
+
|
| 71 |
+
opt = constrain.zero_nans()
|
| 72 |
+
opt_state = self.variant(opt.init)(params)
|
| 73 |
+
update_fn = self.variant(opt.update)
|
| 74 |
+
|
| 75 |
+
chex.assert_tree_all_close(opt_state,
|
| 76 |
+
constrain.ZeroNansState((jnp.array(False),) * 3))
|
| 77 |
+
|
| 78 |
+
# Check an upate with nans
|
| 79 |
+
grads_with_nans = (jnp.ones([3]),
|
| 80 |
+
jnp.array([1., float('nan'), float('nan')]),
|
| 81 |
+
jnp.array([float('nan'), 1., 1.]))
|
| 82 |
+
updates, opt_state = update_fn(grads_with_nans, opt_state)
|
| 83 |
+
chex.assert_tree_all_close(
|
| 84 |
+
opt_state,
|
| 85 |
+
constrain.ZeroNansState(
|
| 86 |
+
(jnp.array(False), jnp.array(True), jnp.array(True))))
|
| 87 |
+
chex.assert_tree_all_close(
|
| 88 |
+
updates,
|
| 89 |
+
(jnp.ones([3]), jnp.array([1., 0., 0.]), jnp.array([0., 1., 1.])))
|
| 90 |
+
|
| 91 |
+
# Check an upate with nans and infs
|
| 92 |
+
grads_with_nans_infs = (jnp.ones([3]),
|
| 93 |
+
jnp.array([1., float('nan'),
|
| 94 |
+
float('nan')]),
|
| 95 |
+
jnp.array([float('inf'), 1., 1.]))
|
| 96 |
+
updates, opt_state = update_fn(grads_with_nans_infs, opt_state)
|
| 97 |
+
chex.assert_tree_all_close(
|
| 98 |
+
opt_state,
|
| 99 |
+
constrain.ZeroNansState(
|
| 100 |
+
(jnp.array(False), jnp.array(True), jnp.array(False))))
|
| 101 |
+
chex.assert_tree_all_close(updates, (jnp.ones([3]), jnp.array(
|
| 102 |
+
[1., 0., 0.]), jnp.array([float('inf'), 1., 1.])))
|
| 103 |
+
|
| 104 |
+
# Check an upate with only good values
|
| 105 |
+
grads = (jnp.ones([3]), jnp.ones([3]), jnp.ones([3]))
|
| 106 |
+
updates, opt_state = update_fn(grads, opt_state)
|
| 107 |
+
chex.assert_tree_all_close(
|
| 108 |
+
opt_state,
|
| 109 |
+
constrain.ZeroNansState(
|
| 110 |
+
(jnp.array(False), jnp.array(False), jnp.array(False))))
|
| 111 |
+
chex.assert_tree_all_close(updates, grads)
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
if __name__ == '__main__':
|
| 115 |
+
absltest.main()
|
lib/python3.10/site-packages/optax/_src/control_variates_test.py
ADDED
|
@@ -0,0 +1,595 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Tests for `control_variates.py`."""
|
| 16 |
+
|
| 17 |
+
from absl.testing import absltest
|
| 18 |
+
from absl.testing import parameterized
|
| 19 |
+
|
| 20 |
+
import chex
|
| 21 |
+
import jax
|
| 22 |
+
import jax.numpy as jnp
|
| 23 |
+
import numpy as np
|
| 24 |
+
|
| 25 |
+
from optax._src import control_variates
|
| 26 |
+
from optax._src import stochastic_gradient_estimators as sge
|
| 27 |
+
from optax._src import utils
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
# Set seed for deterministic sampling.
|
| 31 |
+
np.random.seed(42)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def _assert_equal(actual, expected, rtol=1e-2, atol=1e-2):
|
| 35 |
+
"""Asserts that arrays are equal."""
|
| 36 |
+
# Note: assert_allclose does not check shapes
|
| 37 |
+
chex.assert_equal_shape((actual, expected))
|
| 38 |
+
|
| 39 |
+
# Scalar.
|
| 40 |
+
if not actual.shape:
|
| 41 |
+
np.testing.assert_allclose(
|
| 42 |
+
np.asarray(actual), np.asarray(expected), rtol, atol)
|
| 43 |
+
return
|
| 44 |
+
|
| 45 |
+
# We get around the bug https://github.com/numpy/numpy/issues/13801
|
| 46 |
+
zero_indices = np.argwhere(expected == 0)
|
| 47 |
+
if not np.all(np.abs(actual[zero_indices]) <= atol):
|
| 48 |
+
raise AssertionError(f'Larger than {atol} diff in {actual[zero_indices]}')
|
| 49 |
+
|
| 50 |
+
non_zero_indices = np.argwhere(expected != 0)
|
| 51 |
+
np.testing.assert_allclose(
|
| 52 |
+
np.asarray(actual)[non_zero_indices],
|
| 53 |
+
expected[non_zero_indices], rtol, atol)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def _map(cv, params, samples, state=None):
|
| 57 |
+
return jax.vmap(lambda x: cv(params, x, state))(samples)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def _map_variant(variant):
|
| 61 |
+
return variant(_map, static_argnums=0)
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def _cv_jac_variant(variant):
|
| 65 |
+
return variant(
|
| 66 |
+
control_variates.control_variates_jacobians,
|
| 67 |
+
static_argnums=(0, 1, 2, 4, 6, 7, 8))
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
class DeltaControlVariateTest(chex.TestCase):
|
| 71 |
+
|
| 72 |
+
@chex.all_variants
|
| 73 |
+
@parameterized.parameters([(1.0, 0.5)])
|
| 74 |
+
def testQuadraticFunction(self, effective_mean, effective_log_scale):
|
| 75 |
+
data_dims = 20
|
| 76 |
+
num_samples = 10**6
|
| 77 |
+
rng = jax.random.PRNGKey(1)
|
| 78 |
+
|
| 79 |
+
mean = effective_mean * jnp.ones(shape=(data_dims), dtype=jnp.float32)
|
| 80 |
+
log_scale = effective_log_scale * jnp.ones(
|
| 81 |
+
shape=(data_dims), dtype=jnp.float32)
|
| 82 |
+
params = [mean, log_scale]
|
| 83 |
+
|
| 84 |
+
dist = utils.multi_normal(*params)
|
| 85 |
+
dist_samples = dist.sample((num_samples,), rng)
|
| 86 |
+
function = lambda x: jnp.sum(x**2)
|
| 87 |
+
|
| 88 |
+
cv, expected_cv, _ = control_variates.control_delta_method(function)
|
| 89 |
+
avg_cv = jnp.mean(_map_variant(self.variant)(cv, params, dist_samples))
|
| 90 |
+
expected_cv_value = jnp.sum(dist_samples**2) / num_samples
|
| 91 |
+
|
| 92 |
+
# This should be an analytical computation, the result needs to be
|
| 93 |
+
# accurate.
|
| 94 |
+
_assert_equal(avg_cv, expected_cv_value, rtol=1e-1, atol=1e-3)
|
| 95 |
+
_assert_equal(expected_cv(params, None), expected_cv_value, rtol=0.02)
|
| 96 |
+
|
| 97 |
+
@chex.all_variants
|
| 98 |
+
@parameterized.parameters([(1.0, 1.0)])
|
| 99 |
+
def testPolinomialFunction(self, effective_mean, effective_log_scale):
|
| 100 |
+
data_dims = 10
|
| 101 |
+
num_samples = 10**3
|
| 102 |
+
|
| 103 |
+
mean = effective_mean * jnp.ones(shape=(data_dims), dtype=jnp.float32)
|
| 104 |
+
log_scale = effective_log_scale * jnp.ones(
|
| 105 |
+
shape=(data_dims), dtype=jnp.float32)
|
| 106 |
+
params = [mean, log_scale]
|
| 107 |
+
|
| 108 |
+
dist = utils.multi_normal(*params)
|
| 109 |
+
rng = jax.random.PRNGKey(1)
|
| 110 |
+
dist_samples = dist.sample((num_samples,), rng)
|
| 111 |
+
function = lambda x: jnp.sum(x**5)
|
| 112 |
+
|
| 113 |
+
cv, expected_cv, _ = control_variates.control_delta_method(function)
|
| 114 |
+
avg_cv = jnp.mean(_map_variant(self.variant)(cv, params, dist_samples))
|
| 115 |
+
|
| 116 |
+
# Check that the average value of the control variate is close to the
|
| 117 |
+
# expected value.
|
| 118 |
+
_assert_equal(avg_cv, expected_cv(params, None), rtol=1e-1, atol=1e-3)
|
| 119 |
+
|
| 120 |
+
@chex.all_variants
|
| 121 |
+
def testNonPolynomialFunction(self):
|
| 122 |
+
data_dims = 10
|
| 123 |
+
num_samples = 10**3
|
| 124 |
+
|
| 125 |
+
mean = jnp.ones(shape=(data_dims), dtype=jnp.float32)
|
| 126 |
+
log_scale = jnp.ones(shape=(data_dims), dtype=jnp.float32)
|
| 127 |
+
params = [mean, log_scale]
|
| 128 |
+
|
| 129 |
+
rng = jax.random.PRNGKey(1)
|
| 130 |
+
dist = utils.multi_normal(*params)
|
| 131 |
+
dist_samples = dist.sample((num_samples,), rng)
|
| 132 |
+
function = lambda x: jnp.sum(jnp.log(x**2))
|
| 133 |
+
|
| 134 |
+
cv, expected_cv, _ = control_variates.control_delta_method(function)
|
| 135 |
+
avg_cv = jnp.mean(_map_variant(self.variant)(cv, params, dist_samples))
|
| 136 |
+
|
| 137 |
+
# Check that the average value of the control variate is close to the
|
| 138 |
+
# expected value.
|
| 139 |
+
_assert_equal(avg_cv, expected_cv(params, None), rtol=1e-1, atol=1e-3)
|
| 140 |
+
|
| 141 |
+
# Second order expansion is log(\mu**2) + 1/2 * \sigma**2 (-2 / \mu**2)
|
| 142 |
+
expected_cv_val = - np.exp(1.) ** 2 * data_dims
|
| 143 |
+
_assert_equal(
|
| 144 |
+
expected_cv(params, None), expected_cv_val, rtol=1e-1, atol=1e-3)
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
class MovingAverageBaselineTest(chex.TestCase):
|
| 148 |
+
|
| 149 |
+
@chex.all_variants
|
| 150 |
+
@parameterized.parameters(
|
| 151 |
+
[(1.0, 0.5, 0.9),
|
| 152 |
+
(1.0, 0.5, 0.99)])
|
| 153 |
+
def testLinearFunction(
|
| 154 |
+
self, effective_mean, effective_log_scale, decay):
|
| 155 |
+
weights = jnp.array([1., 2., 3.], dtype=jnp.float32)
|
| 156 |
+
num_samples = 10**4
|
| 157 |
+
data_dims = len(weights)
|
| 158 |
+
|
| 159 |
+
mean = effective_mean * jnp.ones(shape=(data_dims), dtype=jnp.float32)
|
| 160 |
+
log_scale = effective_log_scale * jnp.ones(
|
| 161 |
+
shape=(data_dims), dtype=jnp.float32)
|
| 162 |
+
|
| 163 |
+
params = [mean, log_scale]
|
| 164 |
+
function = lambda x: jnp.sum(weights * x)
|
| 165 |
+
|
| 166 |
+
rng = jax.random.PRNGKey(1)
|
| 167 |
+
dist = utils.multi_normal(*params)
|
| 168 |
+
dist_samples = dist.sample((num_samples,), rng)
|
| 169 |
+
|
| 170 |
+
cv, expected_cv, update_state = control_variates.moving_avg_baseline(
|
| 171 |
+
function, decay=decay, zero_debias=False,
|
| 172 |
+
use_decay_early_training_heuristic=False)
|
| 173 |
+
|
| 174 |
+
state_1 = jnp.array(1.)
|
| 175 |
+
avg_cv = jnp.mean(_map_variant(self.variant)(
|
| 176 |
+
cv, params, dist_samples, (state_1, 0)))
|
| 177 |
+
_assert_equal(avg_cv, state_1)
|
| 178 |
+
_assert_equal(expected_cv(params, (state_1, 0)), state_1)
|
| 179 |
+
|
| 180 |
+
state_2 = jnp.array(2.)
|
| 181 |
+
avg_cv = jnp.mean(
|
| 182 |
+
_map_variant(self.variant)(cv, params, dist_samples, (state_2, 0)))
|
| 183 |
+
_assert_equal(avg_cv, state_2)
|
| 184 |
+
_assert_equal(expected_cv(params, (state_2, 0)), state_2)
|
| 185 |
+
|
| 186 |
+
update_state_1 = update_state(params, dist_samples, (state_1, 0))[0]
|
| 187 |
+
_assert_equal(
|
| 188 |
+
update_state_1,
|
| 189 |
+
decay * state_1 + (1 - decay) * function(mean))
|
| 190 |
+
|
| 191 |
+
update_state_2 = update_state(params, dist_samples, (state_2, 0))[0]
|
| 192 |
+
_assert_equal(
|
| 193 |
+
update_state_2,
|
| 194 |
+
decay * state_2 + (1 - decay) * function(mean))
|
| 195 |
+
|
| 196 |
+
@chex.all_variants
|
| 197 |
+
@parameterized.parameters(
|
| 198 |
+
[(1.0, 0.5, 0.9),
|
| 199 |
+
(1.0, 0.5, 0.99)])
|
| 200 |
+
def testLinearFunctionWithHeuristic(
|
| 201 |
+
self, effective_mean, effective_log_scale, decay):
|
| 202 |
+
weights = jnp.array([1., 2., 3.], dtype=jnp.float32)
|
| 203 |
+
num_samples = 10**5
|
| 204 |
+
data_dims = len(weights)
|
| 205 |
+
|
| 206 |
+
mean = effective_mean * jnp.ones(shape=(data_dims), dtype=jnp.float32)
|
| 207 |
+
log_scale = effective_log_scale * jnp.ones(
|
| 208 |
+
shape=(data_dims), dtype=jnp.float32)
|
| 209 |
+
|
| 210 |
+
params = [mean, log_scale]
|
| 211 |
+
function = lambda x: jnp.sum(weights * x)
|
| 212 |
+
|
| 213 |
+
rng = jax.random.PRNGKey(1)
|
| 214 |
+
dist = utils.multi_normal(*params)
|
| 215 |
+
dist_samples = dist.sample((num_samples,), rng)
|
| 216 |
+
|
| 217 |
+
cv, expected_cv, update_state = control_variates.moving_avg_baseline(
|
| 218 |
+
function, decay=decay, zero_debias=False,
|
| 219 |
+
use_decay_early_training_heuristic=True)
|
| 220 |
+
|
| 221 |
+
state_1 = jnp.array(1.)
|
| 222 |
+
avg_cv = jnp.mean(_map_variant(self.variant)(
|
| 223 |
+
cv, params, dist_samples, (state_1, 0)))
|
| 224 |
+
_assert_equal(avg_cv, state_1)
|
| 225 |
+
_assert_equal(expected_cv(params, (state_1, 0)), state_1)
|
| 226 |
+
|
| 227 |
+
state_2 = jnp.array(2.)
|
| 228 |
+
avg_cv = jnp.mean(
|
| 229 |
+
_map_variant(self.variant)(cv, params, dist_samples, (state_2, 0)))
|
| 230 |
+
_assert_equal(avg_cv, state_2)
|
| 231 |
+
_assert_equal(expected_cv(params, (state_2, 0)), state_2)
|
| 232 |
+
|
| 233 |
+
first_step_decay = 0.1
|
| 234 |
+
update_state_1 = update_state(params, dist_samples, (state_1, 0))[0]
|
| 235 |
+
_assert_equal(
|
| 236 |
+
update_state_1,
|
| 237 |
+
first_step_decay * state_1 + (1 - first_step_decay) * function(mean))
|
| 238 |
+
|
| 239 |
+
second_step_decay = 2. / 11
|
| 240 |
+
update_state_2 = update_state(params, dist_samples, (state_2, 1))[0]
|
| 241 |
+
_assert_equal(
|
| 242 |
+
update_state_2,
|
| 243 |
+
second_step_decay * state_2 + (1 - second_step_decay) * function(mean))
|
| 244 |
+
|
| 245 |
+
@parameterized.parameters(
|
| 246 |
+
[(1.0, 0.5, 0.9),
|
| 247 |
+
(1.0, 0.5, 0.99)])
|
| 248 |
+
def testLinearFunctionZeroDebias(
|
| 249 |
+
self, effective_mean, effective_log_scale, decay):
|
| 250 |
+
weights = jnp.array([1., 2., 3.], dtype=jnp.float32)
|
| 251 |
+
num_samples = 10**5
|
| 252 |
+
data_dims = len(weights)
|
| 253 |
+
|
| 254 |
+
mean = effective_mean * jnp.ones(shape=(data_dims), dtype=jnp.float32)
|
| 255 |
+
log_scale = effective_log_scale * jnp.ones(
|
| 256 |
+
shape=(data_dims), dtype=jnp.float32)
|
| 257 |
+
|
| 258 |
+
params = [mean, log_scale]
|
| 259 |
+
function = lambda x: jnp.sum(weights * x)
|
| 260 |
+
|
| 261 |
+
rng = jax.random.PRNGKey(1)
|
| 262 |
+
dist = utils.multi_normal(*params)
|
| 263 |
+
dist_samples = dist.sample((num_samples,), rng)
|
| 264 |
+
|
| 265 |
+
update_state = control_variates.moving_avg_baseline(
|
| 266 |
+
function, decay=decay, zero_debias=False,
|
| 267 |
+
use_decay_early_training_heuristic=False)[-1]
|
| 268 |
+
|
| 269 |
+
update_state_zero_debias = control_variates.moving_avg_baseline(
|
| 270 |
+
function, decay=decay, zero_debias=True,
|
| 271 |
+
use_decay_early_training_heuristic=False)[-1]
|
| 272 |
+
|
| 273 |
+
updated_state = update_state(params, dist_samples, (jnp.array(0.), 0))[0]
|
| 274 |
+
_assert_equal(updated_state, (1 - decay) * function(mean))
|
| 275 |
+
|
| 276 |
+
updated_state_zero_debias = update_state_zero_debias(
|
| 277 |
+
params, dist_samples, (jnp.array(0.), 0))[0]
|
| 278 |
+
_assert_equal(
|
| 279 |
+
updated_state_zero_debias, function(mean))
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
class DeltaMethodAnalyticalExpectedGrads(chex.TestCase):
|
| 283 |
+
"""Tests for grads approximations."""
|
| 284 |
+
|
| 285 |
+
@chex.all_variants
|
| 286 |
+
@parameterized.named_parameters(
|
| 287 |
+
chex.params_product([
|
| 288 |
+
('_score_function_jacobians', 1.0, 1.0, sge.score_function_jacobians),
|
| 289 |
+
('_pathwise_jacobians', 1.0, 1.0, sge.pathwise_jacobians),
|
| 290 |
+
('_measure_valued_jacobians', 1.0, 1.0, sge.measure_valued_jacobians),
|
| 291 |
+
], [
|
| 292 |
+
('estimate_cv_coeffs', True),
|
| 293 |
+
('no_estimate_cv_coeffs', False),
|
| 294 |
+
],
|
| 295 |
+
named=True))
|
| 296 |
+
def testQuadraticFunction(self, effective_mean, effective_log_scale,
|
| 297 |
+
grad_estimator, estimate_cv_coeffs):
|
| 298 |
+
data_dims = 3
|
| 299 |
+
num_samples = 10**3
|
| 300 |
+
|
| 301 |
+
mean = effective_mean * jnp.ones(shape=(data_dims), dtype=jnp.float32)
|
| 302 |
+
log_scale = effective_log_scale * jnp.ones(
|
| 303 |
+
shape=(data_dims), dtype=jnp.float32)
|
| 304 |
+
|
| 305 |
+
params = [mean, log_scale]
|
| 306 |
+
function = lambda x: jnp.sum(x**2)
|
| 307 |
+
rng = jax.random.PRNGKey(1)
|
| 308 |
+
|
| 309 |
+
jacobians = _cv_jac_variant(self.variant)(
|
| 310 |
+
function,
|
| 311 |
+
control_variates.control_delta_method,
|
| 312 |
+
grad_estimator,
|
| 313 |
+
params,
|
| 314 |
+
utils.multi_normal, # dist_builder
|
| 315 |
+
rng,
|
| 316 |
+
num_samples,
|
| 317 |
+
None, # No cv state.
|
| 318 |
+
estimate_cv_coeffs)[0]
|
| 319 |
+
|
| 320 |
+
expected_mean_grads = 2 * effective_mean * np.ones(
|
| 321 |
+
data_dims, dtype=np.float32)
|
| 322 |
+
expected_log_scale_grads = 2 * np.exp(2 * effective_log_scale) * np.ones(
|
| 323 |
+
data_dims, dtype=np.float32)
|
| 324 |
+
|
| 325 |
+
mean_jacobians = jacobians[0]
|
| 326 |
+
chex.assert_shape(mean_jacobians, (num_samples, data_dims))
|
| 327 |
+
mean_grads_from_jacobian = jnp.mean(mean_jacobians, axis=0)
|
| 328 |
+
|
| 329 |
+
log_scale_jacobians = jacobians[1]
|
| 330 |
+
chex.assert_shape(log_scale_jacobians, (num_samples, data_dims))
|
| 331 |
+
log_scale_grads_from_jacobian = jnp.mean(log_scale_jacobians, axis=0)
|
| 332 |
+
|
| 333 |
+
_assert_equal(mean_grads_from_jacobian, expected_mean_grads,
|
| 334 |
+
rtol=1e-1, atol=1e-3)
|
| 335 |
+
_assert_equal(log_scale_grads_from_jacobian, expected_log_scale_grads,
|
| 336 |
+
rtol=1e-1, atol=1e-3)
|
| 337 |
+
|
| 338 |
+
@chex.all_variants
|
| 339 |
+
@parameterized.named_parameters(
|
| 340 |
+
chex.params_product([
|
| 341 |
+
('_score_function_jacobians', 1.0, 1.0, sge.score_function_jacobians),
|
| 342 |
+
('_pathwise_jacobians', 1.0, 1.0, sge.pathwise_jacobians),
|
| 343 |
+
('_measure_valued_jacobians', 1.0, 1.0, sge.measure_valued_jacobians),
|
| 344 |
+
], [
|
| 345 |
+
('estimate_cv_coeffs', True),
|
| 346 |
+
('no_estimate_cv_coeffs', False),
|
| 347 |
+
],
|
| 348 |
+
named=True))
|
| 349 |
+
def testCubicFunction(
|
| 350 |
+
self, effective_mean, effective_log_scale, grad_estimator,
|
| 351 |
+
estimate_cv_coeffs):
|
| 352 |
+
data_dims = 1
|
| 353 |
+
num_samples = 10**5
|
| 354 |
+
|
| 355 |
+
mean = effective_mean * jnp.ones(shape=(data_dims), dtype=jnp.float32)
|
| 356 |
+
log_scale = effective_log_scale * jnp.ones(
|
| 357 |
+
shape=(data_dims), dtype=jnp.float32)
|
| 358 |
+
|
| 359 |
+
params = [mean, log_scale]
|
| 360 |
+
function = lambda x: jnp.sum(x**3)
|
| 361 |
+
rng = jax.random.PRNGKey(1)
|
| 362 |
+
|
| 363 |
+
jacobians = _cv_jac_variant(self.variant)(
|
| 364 |
+
function,
|
| 365 |
+
control_variates.control_delta_method,
|
| 366 |
+
grad_estimator,
|
| 367 |
+
params,
|
| 368 |
+
utils.multi_normal,
|
| 369 |
+
rng,
|
| 370 |
+
num_samples,
|
| 371 |
+
None, # No cv state.
|
| 372 |
+
estimate_cv_coeffs)[0]
|
| 373 |
+
|
| 374 |
+
# The third order uncentered moment of the Gaussian distribution is
|
| 375 |
+
# mu**3 + 2 mu * sigma **2. We use that to compute the expected value
|
| 376 |
+
# of the gradients. Note: for the log scale we need use the chain rule.
|
| 377 |
+
expected_mean_grads = (
|
| 378 |
+
3 * effective_mean**2 + 3 * np.exp(effective_log_scale)**2)
|
| 379 |
+
expected_mean_grads *= np.ones(data_dims, dtype=np.float32)
|
| 380 |
+
expected_log_scale_grads = (
|
| 381 |
+
6 * effective_mean * np.exp(effective_log_scale) ** 2)
|
| 382 |
+
expected_log_scale_grads *= np.ones(data_dims, dtype=np.float32)
|
| 383 |
+
|
| 384 |
+
mean_jacobians = jacobians[0]
|
| 385 |
+
chex.assert_shape(mean_jacobians, (num_samples, data_dims))
|
| 386 |
+
mean_grads_from_jacobian = jnp.mean(mean_jacobians, axis=0)
|
| 387 |
+
|
| 388 |
+
log_scale_jacobians = jacobians[1]
|
| 389 |
+
chex.assert_shape(log_scale_jacobians, (num_samples, data_dims))
|
| 390 |
+
log_scale_grads_from_jacobian = jnp.mean(log_scale_jacobians, axis=0)
|
| 391 |
+
|
| 392 |
+
_assert_equal(mean_grads_from_jacobian, expected_mean_grads,
|
| 393 |
+
rtol=1e-1, atol=1e-3)
|
| 394 |
+
|
| 395 |
+
_assert_equal(log_scale_grads_from_jacobian, expected_log_scale_grads,
|
| 396 |
+
rtol=1e-1, atol=1e-3)
|
| 397 |
+
|
| 398 |
+
@chex.all_variants
|
| 399 |
+
@parameterized.named_parameters(
|
| 400 |
+
chex.params_product([
|
| 401 |
+
('_score_function_jacobians', 1.0, 1.0, sge.score_function_jacobians),
|
| 402 |
+
('_pathwise_jacobians', 1.0, 1.0, sge.pathwise_jacobians),
|
| 403 |
+
('_measure_valued_jacobians', 1.0, 1.0, sge.measure_valued_jacobians),
|
| 404 |
+
], [
|
| 405 |
+
('estimate_cv_coeffs', True),
|
| 406 |
+
('no_estimate_cv_coeffs', False),
|
| 407 |
+
],
|
| 408 |
+
named=True))
|
| 409 |
+
def testForthPowerFunction(
|
| 410 |
+
self, effective_mean, effective_log_scale, grad_estimator,
|
| 411 |
+
estimate_cv_coeffs):
|
| 412 |
+
data_dims = 1
|
| 413 |
+
num_samples = 10**5
|
| 414 |
+
|
| 415 |
+
mean = effective_mean * jnp.ones(shape=(data_dims), dtype=jnp.float32)
|
| 416 |
+
log_scale = effective_log_scale * jnp.ones(
|
| 417 |
+
shape=(data_dims), dtype=jnp.float32)
|
| 418 |
+
|
| 419 |
+
params = [mean, log_scale]
|
| 420 |
+
function = lambda x: jnp.sum(x**4)
|
| 421 |
+
rng = jax.random.PRNGKey(1)
|
| 422 |
+
|
| 423 |
+
jacobians = _cv_jac_variant(self.variant)(
|
| 424 |
+
function,
|
| 425 |
+
control_variates.control_delta_method,
|
| 426 |
+
grad_estimator,
|
| 427 |
+
params,
|
| 428 |
+
utils.multi_normal,
|
| 429 |
+
rng,
|
| 430 |
+
num_samples,
|
| 431 |
+
None, # No cv state
|
| 432 |
+
estimate_cv_coeffs)[0]
|
| 433 |
+
# The third order uncentered moment of the Gaussian distribution is
|
| 434 |
+
# mu**4 + 6 mu **2 sigma **2 + 3 sigma**4. We use that to compute the
|
| 435 |
+
# expected value of the gradients.
|
| 436 |
+
# Note: for the log scale we need use the chain rule.
|
| 437 |
+
expected_mean_grads = (
|
| 438 |
+
3 * effective_mean**3
|
| 439 |
+
+ 12 * effective_mean * np.exp(effective_log_scale)**2)
|
| 440 |
+
expected_mean_grads *= np.ones(data_dims, dtype=np.float32)
|
| 441 |
+
expected_log_scale_grads = 12 * (
|
| 442 |
+
effective_mean**2 * np.exp(effective_log_scale) +
|
| 443 |
+
np.exp(effective_log_scale) ** 3) * np.exp(effective_log_scale)
|
| 444 |
+
expected_log_scale_grads *= np.ones(data_dims, dtype=np.float32)
|
| 445 |
+
|
| 446 |
+
mean_jacobians = jacobians[0]
|
| 447 |
+
chex.assert_shape(mean_jacobians, (num_samples, data_dims))
|
| 448 |
+
mean_grads_from_jacobian = jnp.mean(mean_jacobians, axis=0)
|
| 449 |
+
|
| 450 |
+
log_scale_jacobians = jacobians[1]
|
| 451 |
+
chex.assert_shape(log_scale_jacobians, (num_samples, data_dims))
|
| 452 |
+
log_scale_grads_from_jacobian = jnp.mean(log_scale_jacobians, axis=0)
|
| 453 |
+
|
| 454 |
+
_assert_equal(mean_grads_from_jacobian, expected_mean_grads,
|
| 455 |
+
rtol=1e-1, atol=1e-3)
|
| 456 |
+
|
| 457 |
+
_assert_equal(log_scale_grads_from_jacobian, expected_log_scale_grads,
|
| 458 |
+
rtol=1e-1, atol=1e-3)
|
| 459 |
+
|
| 460 |
+
|
| 461 |
+
class ConsistencyWithStandardEstimators(chex.TestCase):
|
| 462 |
+
"""Tests for consistency between estimators."""
|
| 463 |
+
|
| 464 |
+
@chex.all_variants
|
| 465 |
+
@parameterized.named_parameters(
|
| 466 |
+
chex.params_product([
|
| 467 |
+
('_score_function_jacobians', 1, 1, sge.score_function_jacobians,
|
| 468 |
+
10**6),
|
| 469 |
+
('_pathwise_jacobians', 1, 1, sge.pathwise_jacobians, 10**5),
|
| 470 |
+
('_measure_valued_jacobians', 1, 1, sge.measure_valued_jacobians,
|
| 471 |
+
10**5),
|
| 472 |
+
], [
|
| 473 |
+
('control_delta_method', control_variates.control_delta_method),
|
| 474 |
+
('moving_avg_baseline', control_variates.moving_avg_baseline),
|
| 475 |
+
],
|
| 476 |
+
named=True))
|
| 477 |
+
def testWeightedLinearFunction(self, effective_mean, effective_log_scale,
|
| 478 |
+
grad_estimator, num_samples,
|
| 479 |
+
control_variate_from_function):
|
| 480 |
+
"""Check that the gradients are consistent between estimators."""
|
| 481 |
+
weights = jnp.array([1., 2., 3.], dtype=jnp.float32)
|
| 482 |
+
data_dims = len(weights)
|
| 483 |
+
|
| 484 |
+
mean = effective_mean * jnp.ones(shape=(data_dims), dtype=jnp.float32)
|
| 485 |
+
log_scale = effective_log_scale * jnp.ones(
|
| 486 |
+
shape=(data_dims), dtype=jnp.float32)
|
| 487 |
+
|
| 488 |
+
params = [mean, log_scale]
|
| 489 |
+
function = lambda x: jnp.sum(weights * x)
|
| 490 |
+
rng = jax.random.PRNGKey(1)
|
| 491 |
+
cv_rng, ge_rng = jax.random.split(rng)
|
| 492 |
+
|
| 493 |
+
jacobians = _cv_jac_variant(self.variant)(
|
| 494 |
+
function,
|
| 495 |
+
control_variate_from_function,
|
| 496 |
+
grad_estimator,
|
| 497 |
+
params,
|
| 498 |
+
utils.multi_normal, # dist_builder
|
| 499 |
+
cv_rng, # rng
|
| 500 |
+
num_samples,
|
| 501 |
+
(0., 0), # control_variate_state
|
| 502 |
+
False)[0]
|
| 503 |
+
|
| 504 |
+
mean_jacobians = jacobians[0]
|
| 505 |
+
chex.assert_shape(mean_jacobians, (num_samples, data_dims))
|
| 506 |
+
mean_grads = jnp.mean(mean_jacobians, axis=0)
|
| 507 |
+
|
| 508 |
+
log_scale_jacobians = jacobians[1]
|
| 509 |
+
chex.assert_shape(log_scale_jacobians, (num_samples, data_dims))
|
| 510 |
+
log_scale_grads = jnp.mean(log_scale_jacobians, axis=0)
|
| 511 |
+
|
| 512 |
+
# We use a different random number generator for the gradient estimator
|
| 513 |
+
# without the control variate.
|
| 514 |
+
no_cv_jacobians = grad_estimator(
|
| 515 |
+
function, [mean, log_scale],
|
| 516 |
+
utils.multi_normal, ge_rng, num_samples=num_samples)
|
| 517 |
+
|
| 518 |
+
no_cv_mean_jacobians = no_cv_jacobians[0]
|
| 519 |
+
chex.assert_shape(no_cv_mean_jacobians, (num_samples, data_dims))
|
| 520 |
+
no_cv_mean_grads = jnp.mean(no_cv_mean_jacobians, axis=0)
|
| 521 |
+
|
| 522 |
+
no_cv_log_scale_jacobians = no_cv_jacobians[1]
|
| 523 |
+
chex.assert_shape(no_cv_log_scale_jacobians, (num_samples, data_dims))
|
| 524 |
+
no_cv_log_scale_grads = jnp.mean(no_cv_log_scale_jacobians, axis=0)
|
| 525 |
+
|
| 526 |
+
_assert_equal(mean_grads, no_cv_mean_grads, rtol=1e-1, atol=5e-2)
|
| 527 |
+
_assert_equal(log_scale_grads, no_cv_log_scale_grads, rtol=1, atol=5e-2)
|
| 528 |
+
|
| 529 |
+
@chex.all_variants
|
| 530 |
+
@parameterized.named_parameters(
|
| 531 |
+
chex.params_product([
|
| 532 |
+
('_score_function_jacobians', 1, 1, sge.score_function_jacobians,
|
| 533 |
+
10**5),
|
| 534 |
+
('_pathwise_jacobians', 1, 1, sge.pathwise_jacobians, 10**5),
|
| 535 |
+
('_measure_valued_jacobians', 1, 1, sge.measure_valued_jacobians,
|
| 536 |
+
10**5),
|
| 537 |
+
], [
|
| 538 |
+
('control_delta_method', control_variates.control_delta_method),
|
| 539 |
+
('moving_avg_baseline', control_variates.moving_avg_baseline),
|
| 540 |
+
],
|
| 541 |
+
named=True))
|
| 542 |
+
def testNonPolynomialFunction(
|
| 543 |
+
self, effective_mean, effective_log_scale,
|
| 544 |
+
grad_estimator, num_samples, control_variate_from_function):
|
| 545 |
+
"""Check that the gradients are consistent between estimators."""
|
| 546 |
+
data_dims = 3
|
| 547 |
+
|
| 548 |
+
mean = effective_mean * jnp.ones(shape=(data_dims), dtype=jnp.float32)
|
| 549 |
+
log_scale = effective_log_scale * jnp.ones(
|
| 550 |
+
shape=(data_dims), dtype=jnp.float32)
|
| 551 |
+
|
| 552 |
+
params = [mean, log_scale]
|
| 553 |
+
function = lambda x: jnp.log(jnp.sum(x**2))
|
| 554 |
+
rng = jax.random.PRNGKey(1)
|
| 555 |
+
cv_rng, ge_rng = jax.random.split(rng)
|
| 556 |
+
|
| 557 |
+
jacobians = _cv_jac_variant(self.variant)(
|
| 558 |
+
function,
|
| 559 |
+
control_variate_from_function,
|
| 560 |
+
grad_estimator,
|
| 561 |
+
params,
|
| 562 |
+
utils.multi_normal,
|
| 563 |
+
cv_rng,
|
| 564 |
+
num_samples,
|
| 565 |
+
(0., 0), # control_variate_state
|
| 566 |
+
False)[0]
|
| 567 |
+
|
| 568 |
+
mean_jacobians = jacobians[0]
|
| 569 |
+
chex.assert_shape(mean_jacobians, (num_samples, data_dims))
|
| 570 |
+
mean_grads = jnp.mean(mean_jacobians, axis=0)
|
| 571 |
+
|
| 572 |
+
log_scale_jacobians = jacobians[1]
|
| 573 |
+
chex.assert_shape(log_scale_jacobians, (num_samples, data_dims))
|
| 574 |
+
log_scale_grads = jnp.mean(log_scale_jacobians, axis=0)
|
| 575 |
+
|
| 576 |
+
# We use a different random number generator for the gradient estimator
|
| 577 |
+
# without the control variate.
|
| 578 |
+
no_cv_jacobians = grad_estimator(
|
| 579 |
+
function, [mean, log_scale],
|
| 580 |
+
utils.multi_normal, ge_rng, num_samples=num_samples)
|
| 581 |
+
|
| 582 |
+
no_cv_mean_jacobians = no_cv_jacobians[0]
|
| 583 |
+
chex.assert_shape(no_cv_mean_jacobians, (num_samples, data_dims))
|
| 584 |
+
no_cv_mean_grads = jnp.mean(no_cv_mean_jacobians, axis=0)
|
| 585 |
+
|
| 586 |
+
no_cv_log_scale_jacobians = no_cv_jacobians[1]
|
| 587 |
+
chex.assert_shape(no_cv_log_scale_jacobians, (num_samples, data_dims))
|
| 588 |
+
no_cv_log_scale_grads = jnp.mean(no_cv_log_scale_jacobians, axis=0)
|
| 589 |
+
|
| 590 |
+
_assert_equal(mean_grads, no_cv_mean_grads, rtol=1e-1, atol=5e-2)
|
| 591 |
+
_assert_equal(log_scale_grads, no_cv_log_scale_grads, rtol=1e-1, atol=5e-2)
|
| 592 |
+
|
| 593 |
+
|
| 594 |
+
if __name__ == '__main__':
|
| 595 |
+
absltest.main()
|
lib/python3.10/site-packages/optax/_src/factorized.py
ADDED
|
@@ -0,0 +1,199 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Factorized optimizers."""
|
| 16 |
+
|
| 17 |
+
import dataclasses
|
| 18 |
+
from typing import NamedTuple, Optional, Tuple, Callable
|
| 19 |
+
|
| 20 |
+
import chex
|
| 21 |
+
import jax
|
| 22 |
+
import jax.numpy as jnp
|
| 23 |
+
import numpy as np
|
| 24 |
+
|
| 25 |
+
from optax._src import base
|
| 26 |
+
from optax._src import numerics
|
| 27 |
+
from optax._src import utils
|
| 28 |
+
|
| 29 |
+
# pylint:disable=no-value-for-parameter
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def _decay_rate_pow(i: int, exponent: float = 0.8) -> float:
|
| 33 |
+
"""Second-order moment decay schedule."""
|
| 34 |
+
t = jnp.array(i, jnp.float32) + 1.0
|
| 35 |
+
return 1.0 - t**(-exponent)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def _factored_dims(
|
| 39 |
+
shape: base.Shape,
|
| 40 |
+
factored: bool,
|
| 41 |
+
min_dim_size_to_factor: int
|
| 42 |
+
) -> Optional[Tuple[int, int]]:
|
| 43 |
+
"""Whether to use a factored second moment estimator.
|
| 44 |
+
|
| 45 |
+
This function returns a tuple with the two largest axes to reduce over.
|
| 46 |
+
If no two dimensions have size >= min_dim_size_to_factor, return None.
|
| 47 |
+
|
| 48 |
+
Args:
|
| 49 |
+
shape: an input shape
|
| 50 |
+
factored: whether to use factored second-moment estimator for 2d vars.
|
| 51 |
+
min_dim_size_to_factor: only factor accumulator if two array dimensions
|
| 52 |
+
have at least this size.
|
| 53 |
+
|
| 54 |
+
Returns:
|
| 55 |
+
None or a tuple of ints
|
| 56 |
+
"""
|
| 57 |
+
if not factored or len(shape) < 2:
|
| 58 |
+
return None
|
| 59 |
+
sorted_dims = np.argsort(shape)
|
| 60 |
+
if shape[sorted_dims[-2]] < min_dim_size_to_factor:
|
| 61 |
+
return None
|
| 62 |
+
return int(sorted_dims[-2]), int(sorted_dims[-1])
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
@dataclasses.dataclass
|
| 66 |
+
class _UpdateResult:
|
| 67 |
+
"""Opaque containter that is not traversed by jax.tree_util.tree_map."""
|
| 68 |
+
update: chex.Array # the update to apply to params
|
| 69 |
+
v_row: chex.Array # used for factored params.
|
| 70 |
+
v_col: chex.Array # used for factored params.
|
| 71 |
+
v: chex.Array # used for params where factoring is skipped.
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
class FactoredState(NamedTuple):
|
| 75 |
+
"""Overall state of the gradient transformation."""
|
| 76 |
+
count: chex.Array # number of update steps.
|
| 77 |
+
v_row: chex.ArrayTree # Tree of factored params.
|
| 78 |
+
v_col: chex.ArrayTree # Tree of factored params.
|
| 79 |
+
v: chex.ArrayTree # Tree for params where factoring is skipped.
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def scale_by_factored_rms(
|
| 83 |
+
factored: bool = True,
|
| 84 |
+
decay_rate: float = 0.8,
|
| 85 |
+
step_offset: int = 0,
|
| 86 |
+
min_dim_size_to_factor: int = 128,
|
| 87 |
+
epsilon: float = 1e-30,
|
| 88 |
+
decay_rate_fn: Callable[[int, float], chex.Array] = _decay_rate_pow):
|
| 89 |
+
"""Scaling by a factored estimate of the gradient rms (as in Adafactor).
|
| 90 |
+
|
| 91 |
+
This is a so-called "1+epsilon" scaling algorithms, that is extremely memory
|
| 92 |
+
efficient compared to RMSProp/Adam, and has had wide success when applied to
|
| 93 |
+
large-scale training of attention-based models.
|
| 94 |
+
|
| 95 |
+
References:
|
| 96 |
+
[Shazeer et al, 2018](https://arxiv.org/abs/1804.04235)
|
| 97 |
+
|
| 98 |
+
Args:
|
| 99 |
+
factored: boolean: whether to use factored second-moment estimates..
|
| 100 |
+
decay_rate: float: controls second-moment exponential decay schedule.
|
| 101 |
+
step_offset: for finetuning, one may set this to the starting step-number
|
| 102 |
+
of the fine tuning phase.
|
| 103 |
+
min_dim_size_to_factor: only factor accumulator if two array dimensions
|
| 104 |
+
are at least this size.
|
| 105 |
+
epsilon: Regularization constant for squared gradient.
|
| 106 |
+
decay_rate_fn: A function that accepts the current step, the decay rate
|
| 107 |
+
parameter and controls the schedule for the second momentum. Defaults to
|
| 108 |
+
the original adafactor's power decay schedule. One potential shortcoming
|
| 109 |
+
of the orignal schedule is the fact that second momentum converges to 1,
|
| 110 |
+
which effectively freezes the second momentum. To prevent this the user
|
| 111 |
+
can opt for a custom schedule that sets an upper bound for the second
|
| 112 |
+
momentum, like in [Zhai et al., 2021](https://arxiv.org/abs/2106.04560).
|
| 113 |
+
|
| 114 |
+
Returns:
|
| 115 |
+
the corresponding `GradientTransformation`.
|
| 116 |
+
"""
|
| 117 |
+
|
| 118 |
+
def _to_state(count: chex.Array, result_tree):
|
| 119 |
+
"""Maps from a tree of (factored) values to separate trees of values."""
|
| 120 |
+
return FactoredState(
|
| 121 |
+
count=count,
|
| 122 |
+
v_row=jax.tree_util.tree_map(lambda o: o.v_row, result_tree),
|
| 123 |
+
v_col=jax.tree_util.tree_map(lambda o: o.v_col, result_tree),
|
| 124 |
+
v=jax.tree_util.tree_map(lambda o: o.v, result_tree))
|
| 125 |
+
|
| 126 |
+
def init_fn(params):
|
| 127 |
+
"""Initialise the optimiser's state."""
|
| 128 |
+
|
| 129 |
+
def _init(param):
|
| 130 |
+
shape = param.shape
|
| 131 |
+
factored_dims = _factored_dims(shape, factored, min_dim_size_to_factor)
|
| 132 |
+
if factored_dims is not None:
|
| 133 |
+
d1, d0 = factored_dims
|
| 134 |
+
vr_shape = np.delete(shape, d0)
|
| 135 |
+
vc_shape = np.delete(shape, d1)
|
| 136 |
+
return _UpdateResult(
|
| 137 |
+
update=jnp.zeros((1,)),
|
| 138 |
+
v_row=jnp.zeros(vr_shape),
|
| 139 |
+
v_col=jnp.zeros(vc_shape),
|
| 140 |
+
v=jnp.zeros((1,)))
|
| 141 |
+
else:
|
| 142 |
+
return _UpdateResult(
|
| 143 |
+
update=jnp.zeros((1,)),
|
| 144 |
+
v_row=jnp.zeros((1,)),
|
| 145 |
+
v_col=jnp.zeros((1,)),
|
| 146 |
+
v=jnp.zeros(param.shape))
|
| 147 |
+
|
| 148 |
+
return _to_state(
|
| 149 |
+
jnp.zeros([], jnp.int32), jax.tree_util.tree_map(_init, params))
|
| 150 |
+
|
| 151 |
+
def update_fn(grads, state, params):
|
| 152 |
+
"""Apply gradient transformation."""
|
| 153 |
+
if params is None:
|
| 154 |
+
raise ValueError(base.NO_PARAMS_MSG)
|
| 155 |
+
|
| 156 |
+
def _update(grad, v_row, v_col, v, param, step):
|
| 157 |
+
shape = param.shape
|
| 158 |
+
decay_rate_t = decay_rate_fn(step - step_offset, decay_rate)
|
| 159 |
+
|
| 160 |
+
# Scaled by factorized second moment statistics.
|
| 161 |
+
new_v_row = jnp.zeros((1,))
|
| 162 |
+
new_v_col = jnp.zeros((1,))
|
| 163 |
+
new_v = jnp.zeros((1,))
|
| 164 |
+
|
| 165 |
+
factored_dims = _factored_dims(shape, factored, min_dim_size_to_factor)
|
| 166 |
+
if factored_dims is not None:
|
| 167 |
+
d1, d0 = factored_dims
|
| 168 |
+
grad_sqr = numerics.abs_sq(grad) + epsilon
|
| 169 |
+
new_v_row = (
|
| 170 |
+
decay_rate_t * v_row +
|
| 171 |
+
(1. - decay_rate_t) * jnp.mean(grad_sqr, axis=d0))
|
| 172 |
+
new_v_col = (
|
| 173 |
+
decay_rate_t * v_col +
|
| 174 |
+
(1. - decay_rate_t) * jnp.mean(grad_sqr, axis=d1))
|
| 175 |
+
reduced_d1 = d1-1 if d1 > d0 else d1
|
| 176 |
+
row_col_mean = jnp.mean(new_v_row, axis=reduced_d1, keepdims=True)
|
| 177 |
+
row_factor = (new_v_row / row_col_mean) ** -0.5
|
| 178 |
+
col_factor = (new_v_col) ** -0.5
|
| 179 |
+
update = (
|
| 180 |
+
grad *
|
| 181 |
+
jnp.expand_dims(row_factor, axis=d0) *
|
| 182 |
+
jnp.expand_dims(col_factor, axis=d1))
|
| 183 |
+
else:
|
| 184 |
+
grad_sqr = numerics.abs_sq(grad) + epsilon
|
| 185 |
+
new_v = decay_rate_t * v + (1. - decay_rate_t) * grad_sqr
|
| 186 |
+
update = grad * (new_v)**-0.5
|
| 187 |
+
|
| 188 |
+
return _UpdateResult(update, new_v_row, new_v_col, new_v)
|
| 189 |
+
|
| 190 |
+
# Transform grad and compute new per-parameter stats.
|
| 191 |
+
output = jax.tree_util.tree_map(
|
| 192 |
+
lambda *args: _update(*args, state.count),
|
| 193 |
+
grads, state.v_row, state.v_col, state.v, params)
|
| 194 |
+
|
| 195 |
+
# Unpack updates / stats and return.
|
| 196 |
+
updates = jax.tree_util.tree_map(lambda o: o.update, output)
|
| 197 |
+
return updates, _to_state(utils.safe_int32_increment(state.count), output)
|
| 198 |
+
|
| 199 |
+
return base.GradientTransformation(init_fn, update_fn)
|
lib/python3.10/site-packages/optax/_src/float64_test.py
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Tests that types are preserved by the `update` calls when jax_enbable_x64."""
|
| 16 |
+
|
| 17 |
+
from absl.testing import absltest
|
| 18 |
+
from absl.testing import parameterized
|
| 19 |
+
|
| 20 |
+
import chex
|
| 21 |
+
import jax
|
| 22 |
+
from jax.config import config
|
| 23 |
+
import jax.numpy as jnp
|
| 24 |
+
|
| 25 |
+
from optax._src import alias
|
| 26 |
+
from optax._src import base
|
| 27 |
+
from optax._src import clipping
|
| 28 |
+
from optax._src import transform
|
| 29 |
+
from optax._src import update
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
ALL_MODULES = [
|
| 33 |
+
('identity', base.identity, {}),
|
| 34 |
+
('clip', clipping.clip, dict(max_delta=1.0)),
|
| 35 |
+
('clip_by_global_norm', clipping.clip_by_global_norm, dict(max_norm=1.0)),
|
| 36 |
+
('trace', transform.trace, dict(decay=0.5, nesterov=False)),
|
| 37 |
+
('trace_with_nesterov', transform.trace, dict(decay=0.5, nesterov=True)),
|
| 38 |
+
('scale_by_rss', transform.scale_by_rss, {}),
|
| 39 |
+
('scale_by_rms', transform.scale_by_rms, {}),
|
| 40 |
+
('scale_by_stddev', transform.scale_by_stddev, {}),
|
| 41 |
+
('adam', transform.scale_by_adam, {}),
|
| 42 |
+
('scale', transform.scale, dict(step_size=3.0)),
|
| 43 |
+
('additive_weight_decay', transform.additive_weight_decay,
|
| 44 |
+
dict(weight_decay=0.1)),
|
| 45 |
+
('scale_by_schedule', transform.scale_by_schedule,
|
| 46 |
+
dict(step_size_fn=lambda x: x * 0.1)),
|
| 47 |
+
('scale_by_trust_ratio', transform.scale_by_trust_ratio, {}),
|
| 48 |
+
('add_noise', transform.add_noise, dict(eta=1.0, gamma=0.1, seed=42)),
|
| 49 |
+
('apply_every_k', transform.apply_every, {}),
|
| 50 |
+
('adagrad', alias.adagrad, dict(learning_rate=0.1)),
|
| 51 |
+
('adam', alias.adam, dict(learning_rate=0.1)),
|
| 52 |
+
('adamw', alias.adamw, dict(learning_rate=0.1)),
|
| 53 |
+
('fromage', alias.fromage, dict(learning_rate=0.1)),
|
| 54 |
+
('lamb', alias.lamb, dict(learning_rate=0.1)),
|
| 55 |
+
('noisy_sgd', alias.noisy_sgd, dict(learning_rate=0.1)),
|
| 56 |
+
('rmsprop', alias.rmsprop, dict(learning_rate=0.1)),
|
| 57 |
+
('sgd', alias.sgd, dict(learning_rate=0.1)),
|
| 58 |
+
('dpsgd', alias.dpsgd,
|
| 59 |
+
dict(learning_rate=0.1, l2_norm_clip=0.9, noise_multiplier=1.1, seed=42)),
|
| 60 |
+
]
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
class Float64Test(parameterized.TestCase):
|
| 64 |
+
|
| 65 |
+
def _assert_dtype_equals(self, tree1, tree2):
|
| 66 |
+
tree1_types = jax.tree_util.tree_map(lambda t: t.dtype, tree1)
|
| 67 |
+
tree2_types = jax.tree_util.tree_map(lambda t: t.dtype, tree2)
|
| 68 |
+
self.assertEqual(tree1_types, tree2_types)
|
| 69 |
+
|
| 70 |
+
@chex.all_variants
|
| 71 |
+
@parameterized.named_parameters(ALL_MODULES)
|
| 72 |
+
def test_mixed_dtype_input_outputs(self, transform_constr, transform_kwargs):
|
| 73 |
+
initial_params = (
|
| 74 |
+
jnp.array([1., 2.], dtype=jnp.float32),
|
| 75 |
+
jnp.array([3., 4.], dtype=jnp.float64))
|
| 76 |
+
updates = (
|
| 77 |
+
jnp.array([10., 21.], dtype=jnp.float32),
|
| 78 |
+
jnp.array([33., 42.], dtype=jnp.float64))
|
| 79 |
+
scaler = transform_constr(**transform_kwargs)
|
| 80 |
+
init_fn = self.variant(scaler.init)
|
| 81 |
+
update_fn = self.variant(scaler.update)
|
| 82 |
+
|
| 83 |
+
initial_state = init_fn(initial_params)
|
| 84 |
+
updates, new_state = update_fn(
|
| 85 |
+
updates, initial_state, params=initial_params)
|
| 86 |
+
new_params = update.apply_updates(initial_params, updates)
|
| 87 |
+
|
| 88 |
+
self._assert_dtype_equals(initial_state, new_state)
|
| 89 |
+
self._assert_dtype_equals(initial_params, new_params)
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
if __name__ == '__main__':
|
| 93 |
+
config.update('jax_enable_x64', True)
|
| 94 |
+
absltest.main()
|
lib/python3.10/site-packages/optax/_src/linear_algebra.py
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Linear algebra utilities used in optimisation."""
|
| 16 |
+
|
| 17 |
+
import chex
|
| 18 |
+
import jax
|
| 19 |
+
from jax import lax
|
| 20 |
+
import jax.numpy as jnp
|
| 21 |
+
import numpy as np
|
| 22 |
+
|
| 23 |
+
from optax._src import base
|
| 24 |
+
from optax._src import numerics
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def global_norm(updates: base.Updates) -> base.Updates:
|
| 28 |
+
"""Compute the global norm across a nested structure of tensors."""
|
| 29 |
+
return jnp.sqrt(sum(
|
| 30 |
+
jnp.sum(numerics.abs_sq(x)) for x in jax.tree_util.tree_leaves(updates)))
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def power_iteration(matrix: chex.Array,
|
| 34 |
+
num_iters: int = 100,
|
| 35 |
+
error_tolerance: float = 1e-6,
|
| 36 |
+
precision: lax.Precision = lax.Precision.HIGHEST):
|
| 37 |
+
r"""Power iteration algorithm.
|
| 38 |
+
|
| 39 |
+
The power iteration algorithm takes a symmetric PSD matrix `A`, and produces
|
| 40 |
+
a scalar `\lambda` , which is the greatest (in absolute value) eigenvalue
|
| 41 |
+
of `A`, and a vector v, which is the corresponding eigenvector of `A`.
|
| 42 |
+
|
| 43 |
+
References:
|
| 44 |
+
[Wikipedia, 2021](https://en.wikipedia.org/wiki/Power_iteration)
|
| 45 |
+
|
| 46 |
+
Args:
|
| 47 |
+
matrix: the symmetric PSD matrix.
|
| 48 |
+
num_iters: Number of iterations.
|
| 49 |
+
error_tolerance: Iterative exit condition.
|
| 50 |
+
precision: precision XLA related flag, the available options are:
|
| 51 |
+
a) lax.Precision.DEFAULT (better step time, but not precise);
|
| 52 |
+
b) lax.Precision.HIGH (increased precision, slower);
|
| 53 |
+
c) lax.Precision.HIGHEST (best possible precision, slowest).
|
| 54 |
+
|
| 55 |
+
Returns:
|
| 56 |
+
eigen vector, eigen value
|
| 57 |
+
"""
|
| 58 |
+
matrix_size = matrix.shape[-1]
|
| 59 |
+
def _iter_condition(state):
|
| 60 |
+
i, unused_v, unused_s, unused_s_v, run_step = state
|
| 61 |
+
return jnp.logical_and(i < num_iters, run_step)
|
| 62 |
+
|
| 63 |
+
def _iter_body(state):
|
| 64 |
+
"""One step of power iteration."""
|
| 65 |
+
i, new_v, s, s_v, unused_run_step = state
|
| 66 |
+
new_v = new_v / jnp.linalg.norm(new_v)
|
| 67 |
+
|
| 68 |
+
s_v = jnp.einsum('ij,j->i', matrix, new_v, precision=precision)
|
| 69 |
+
s_new = jnp.einsum('i,i->', new_v, s_v, precision=precision)
|
| 70 |
+
return (i + 1, s_v, s_new, s_v,
|
| 71 |
+
jnp.greater(jnp.abs(s_new - s), error_tolerance))
|
| 72 |
+
|
| 73 |
+
# Figure out how to use step as seed for random.
|
| 74 |
+
v_0 = np.random.uniform(-1.0, 1.0, matrix_size).astype(matrix.dtype)
|
| 75 |
+
|
| 76 |
+
init_state = tuple([0, v_0, jnp.zeros([], dtype=matrix.dtype), v_0, True])
|
| 77 |
+
_, v_out, s_out, _, _ = lax.while_loop(
|
| 78 |
+
_iter_condition, _iter_body, init_state)
|
| 79 |
+
v_out = v_out / jnp.linalg.norm(v_out)
|
| 80 |
+
return v_out, s_out
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def matrix_inverse_pth_root(matrix: chex.Array,
|
| 84 |
+
p: int,
|
| 85 |
+
num_iters: int = 100,
|
| 86 |
+
ridge_epsilon: float = 1e-6,
|
| 87 |
+
error_tolerance: float = 1e-6,
|
| 88 |
+
precision: lax.Precision = lax.Precision.HIGHEST):
|
| 89 |
+
"""Computes `matrix^(-1/p)`, where `p` is a positive integer.
|
| 90 |
+
|
| 91 |
+
This function uses the Coupled newton iterations algorithm for
|
| 92 |
+
the computation of a matrix's inverse pth root.
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
References:
|
| 96 |
+
[Functions of Matrices, Theory and Computation,
|
| 97 |
+
Nicholas J Higham, Pg 184, Eq 7.18](
|
| 98 |
+
https://epubs.siam.org/doi/book/10.1137/1.9780898717778)
|
| 99 |
+
|
| 100 |
+
Args:
|
| 101 |
+
matrix: the symmetric PSD matrix whose power it to be computed
|
| 102 |
+
p: exponent, for p a positive integer.
|
| 103 |
+
num_iters: Maximum number of iterations.
|
| 104 |
+
ridge_epsilon: Ridge epsilon added to make the matrix positive definite.
|
| 105 |
+
error_tolerance: Error indicator, useful for early termination.
|
| 106 |
+
precision: precision XLA related flag, the available options are:
|
| 107 |
+
a) lax.Precision.DEFAULT (better step time, but not precise);
|
| 108 |
+
b) lax.Precision.HIGH (increased precision, slower);
|
| 109 |
+
c) lax.Precision.HIGHEST (best possible precision, slowest).
|
| 110 |
+
|
| 111 |
+
Returns:
|
| 112 |
+
matrix^(-1/p)
|
| 113 |
+
"""
|
| 114 |
+
|
| 115 |
+
# We use float32 for the matrix inverse pth root.
|
| 116 |
+
# Switch to f64 if you have hardware that supports it.
|
| 117 |
+
matrix_size = matrix.shape[0]
|
| 118 |
+
alpha = jnp.asarray(-1.0 / p, jnp.float32)
|
| 119 |
+
identity = jnp.eye(matrix_size, dtype=jnp.float32)
|
| 120 |
+
_, max_ev = power_iteration(
|
| 121 |
+
matrix=matrix, num_iters=100,
|
| 122 |
+
error_tolerance=1e-6, precision=precision)
|
| 123 |
+
ridge_epsilon = ridge_epsilon * jnp.maximum(max_ev, 1e-16)
|
| 124 |
+
|
| 125 |
+
def _unrolled_mat_pow_1(mat_m):
|
| 126 |
+
"""Computes mat_m^1."""
|
| 127 |
+
return mat_m
|
| 128 |
+
|
| 129 |
+
def _unrolled_mat_pow_2(mat_m):
|
| 130 |
+
"""Computes mat_m^2."""
|
| 131 |
+
return jnp.matmul(mat_m, mat_m, precision=precision)
|
| 132 |
+
|
| 133 |
+
def _unrolled_mat_pow_4(mat_m):
|
| 134 |
+
"""Computes mat_m^4."""
|
| 135 |
+
mat_pow_2 = _unrolled_mat_pow_2(mat_m)
|
| 136 |
+
return jnp.matmul(
|
| 137 |
+
mat_pow_2, mat_pow_2, precision=precision)
|
| 138 |
+
|
| 139 |
+
def _unrolled_mat_pow_8(mat_m):
|
| 140 |
+
"""Computes mat_m^4."""
|
| 141 |
+
mat_pow_4 = _unrolled_mat_pow_4(mat_m)
|
| 142 |
+
return jnp.matmul(
|
| 143 |
+
mat_pow_4, mat_pow_4, precision=precision)
|
| 144 |
+
|
| 145 |
+
def mat_power(mat_m, p):
|
| 146 |
+
"""Computes mat_m^p, for p == 1, 2, 4 or 8.
|
| 147 |
+
|
| 148 |
+
Args:
|
| 149 |
+
mat_m: a square matrix
|
| 150 |
+
p: a positive integer
|
| 151 |
+
|
| 152 |
+
Returns:
|
| 153 |
+
mat_m^p
|
| 154 |
+
"""
|
| 155 |
+
# We unrolled the loop for performance reasons.
|
| 156 |
+
exponent = jnp.round(jnp.log2(p))
|
| 157 |
+
return lax.switch(
|
| 158 |
+
jnp.asarray(exponent, jnp.int32), [
|
| 159 |
+
_unrolled_mat_pow_1,
|
| 160 |
+
_unrolled_mat_pow_2,
|
| 161 |
+
_unrolled_mat_pow_4,
|
| 162 |
+
_unrolled_mat_pow_8,
|
| 163 |
+
], (mat_m))
|
| 164 |
+
|
| 165 |
+
def _iter_condition(state):
|
| 166 |
+
(i, unused_mat_m, unused_mat_h, unused_old_mat_h, error,
|
| 167 |
+
run_step) = state
|
| 168 |
+
error_above_threshold = jnp.logical_and(
|
| 169 |
+
error > error_tolerance, run_step)
|
| 170 |
+
return jnp.logical_and(i < num_iters, error_above_threshold)
|
| 171 |
+
|
| 172 |
+
def _iter_body(state):
|
| 173 |
+
(i, mat_m, mat_h, unused_old_mat_h, error, unused_run_step) = state
|
| 174 |
+
mat_m_i = (1 - alpha) * identity + alpha * mat_m
|
| 175 |
+
new_mat_m = jnp.matmul(mat_power(mat_m_i, p), mat_m, precision=precision)
|
| 176 |
+
new_mat_h = jnp.matmul(mat_h, mat_m_i, precision=precision)
|
| 177 |
+
new_error = jnp.max(jnp.abs(new_mat_m - identity))
|
| 178 |
+
# sometimes error increases after an iteration before decreasing and
|
| 179 |
+
# converging. 1.2 factor is used to bound the maximal allowed increase.
|
| 180 |
+
return (i + 1, new_mat_m, new_mat_h, mat_h, new_error,
|
| 181 |
+
new_error < error * 1.2)
|
| 182 |
+
|
| 183 |
+
if matrix_size == 1:
|
| 184 |
+
resultant_mat_h = (matrix + ridge_epsilon)**alpha
|
| 185 |
+
error = 0
|
| 186 |
+
else:
|
| 187 |
+
damped_matrix = matrix + ridge_epsilon * identity
|
| 188 |
+
|
| 189 |
+
z = (1 + p) / (2 * jnp.linalg.norm(damped_matrix))
|
| 190 |
+
new_mat_m_0 = damped_matrix * z
|
| 191 |
+
new_error = jnp.max(jnp.abs(new_mat_m_0 - identity))
|
| 192 |
+
new_mat_h_0 = identity * jnp.power(z, 1.0 / p)
|
| 193 |
+
init_state = tuple(
|
| 194 |
+
[0, new_mat_m_0, new_mat_h_0, new_mat_h_0, new_error, True])
|
| 195 |
+
_, mat_m, mat_h, old_mat_h, error, convergence = lax.while_loop(
|
| 196 |
+
_iter_condition, _iter_body, init_state)
|
| 197 |
+
error = jnp.max(jnp.abs(mat_m - identity))
|
| 198 |
+
is_converged = jnp.asarray(convergence, old_mat_h.dtype)
|
| 199 |
+
resultant_mat_h = is_converged * mat_h + (1 - is_converged) * old_mat_h
|
| 200 |
+
resultant_mat_h = jnp.asarray(resultant_mat_h, matrix.dtype)
|
| 201 |
+
return resultant_mat_h, error
|
lib/python3.10/site-packages/optax/_src/lookahead.py
ADDED
|
@@ -0,0 +1,192 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""A lookahead optimization wrapper."""
|
| 16 |
+
|
| 17 |
+
from typing import NamedTuple, Tuple
|
| 18 |
+
|
| 19 |
+
from absl import logging
|
| 20 |
+
import jax
|
| 21 |
+
import jax.numpy as jnp
|
| 22 |
+
|
| 23 |
+
from optax._src import base
|
| 24 |
+
|
| 25 |
+
# pylint:disable=no-value-for-parameter
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class LookaheadState(NamedTuple):
|
| 29 |
+
"""State of the `GradientTransformation` returned by `lookahead`.
|
| 30 |
+
|
| 31 |
+
Attributes:
|
| 32 |
+
fast_state: Optimizer state of the fast optimizer.
|
| 33 |
+
steps_since_sync: Number of fast optimizer steps taken since slow and fast
|
| 34 |
+
parameters were synchronized.
|
| 35 |
+
"""
|
| 36 |
+
fast_state: base.OptState
|
| 37 |
+
steps_since_sync: jnp.ndarray
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class LookaheadParams(NamedTuple):
|
| 41 |
+
"""Holds a pair of slow and fast parameters for the lookahead optimizer.
|
| 42 |
+
|
| 43 |
+
Gradients should always be calculated with the fast parameters. The slow
|
| 44 |
+
parameters should be used for testing and inference as they generalize better.
|
| 45 |
+
See the reference for a detailed discussion.
|
| 46 |
+
|
| 47 |
+
References:
|
| 48 |
+
[Zhang et al, 2019](https://arxiv.org/pdf/1907.08610v1.pdf)
|
| 49 |
+
|
| 50 |
+
Attributes:
|
| 51 |
+
fast: Fast parameters.
|
| 52 |
+
slow: Slow parameters.
|
| 53 |
+
"""
|
| 54 |
+
fast: base.Params
|
| 55 |
+
slow: base.Params
|
| 56 |
+
|
| 57 |
+
@classmethod
|
| 58 |
+
def init_synced(cls, params: base.Params) -> 'LookaheadParams':
|
| 59 |
+
"""Initialize a pair of synchronized lookahead parameters."""
|
| 60 |
+
return cls(slow=params, fast=params)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def lookahead(
|
| 64 |
+
fast_optimizer: base.GradientTransformation,
|
| 65 |
+
sync_period: int,
|
| 66 |
+
slow_step_size: float,
|
| 67 |
+
reset_state: bool = False
|
| 68 |
+
) -> base.GradientTransformation:
|
| 69 |
+
"""Lookahead optimizer.
|
| 70 |
+
|
| 71 |
+
Performs steps with a fast optimizer and periodically updates a set of slow
|
| 72 |
+
parameters. Optionally resets the fast optimizer state after synchronization
|
| 73 |
+
by calling the init function of the fast optimizer.
|
| 74 |
+
|
| 75 |
+
Updates returned by the lookahead optimizer should not be modified before they
|
| 76 |
+
are applied, otherwise fast and slow parameters are not synchronized
|
| 77 |
+
correctly.
|
| 78 |
+
|
| 79 |
+
References:
|
| 80 |
+
[Zhang et al, 2019](https://arxiv.org/pdf/1907.08610v1.pdf)
|
| 81 |
+
|
| 82 |
+
Args:
|
| 83 |
+
fast_optimizer: The optimizer to use in the inner loop of lookahead.
|
| 84 |
+
sync_period: Number of fast optimizer steps to take before synchronizing
|
| 85 |
+
parameters. Must be >= 1.
|
| 86 |
+
slow_step_size: Step size of the slow parameter updates.
|
| 87 |
+
reset_state: Whether to reset the optimizer state of the fast opimizer after
|
| 88 |
+
each synchronization.
|
| 89 |
+
|
| 90 |
+
Returns:
|
| 91 |
+
A `GradientTransformation` with init and update functions. The updates
|
| 92 |
+
passed to the update function should be calculated using the fast lookahead
|
| 93 |
+
parameters only.
|
| 94 |
+
"""
|
| 95 |
+
if sync_period < 1:
|
| 96 |
+
raise ValueError('Synchronization period must be >= 1.')
|
| 97 |
+
|
| 98 |
+
def init_fn(params: base.Params) -> LookaheadState:
|
| 99 |
+
try:
|
| 100 |
+
fast_params = params.fast
|
| 101 |
+
except AttributeError:
|
| 102 |
+
# Allowing init_fn to be called with fast parameters reduces the
|
| 103 |
+
# modifications necessary to adapt code to use lookahead in some cases.
|
| 104 |
+
logging.warning(
|
| 105 |
+
'`params` has no attribute `fast`. Continuing by assuming that '
|
| 106 |
+
'only fast parameters were passed to lookahead init.')
|
| 107 |
+
fast_params = params
|
| 108 |
+
|
| 109 |
+
return LookaheadState(
|
| 110 |
+
fast_state=fast_optimizer.init(fast_params),
|
| 111 |
+
steps_since_sync=jnp.zeros(shape=(), dtype=jnp.int32))
|
| 112 |
+
|
| 113 |
+
def update_fn(
|
| 114 |
+
updates: base.Updates, state: LookaheadState,
|
| 115 |
+
params: LookaheadParams) -> Tuple[LookaheadParams, LookaheadState]:
|
| 116 |
+
updates, fast_state = fast_optimizer.update(updates, state.fast_state,
|
| 117 |
+
params.fast)
|
| 118 |
+
|
| 119 |
+
sync_next = (state.steps_since_sync == sync_period - 1)
|
| 120 |
+
updates = _lookahead_update(updates, sync_next, params, slow_step_size)
|
| 121 |
+
if reset_state:
|
| 122 |
+
# Jittable way of resetting the fast optimizer state if parameters will be
|
| 123 |
+
# synchronized after this update step.
|
| 124 |
+
initial_state = fast_optimizer.init(params.fast)
|
| 125 |
+
fast_state = jax.tree_util.tree_map(
|
| 126 |
+
lambda current, init: (1 - sync_next) * current + sync_next * init,
|
| 127 |
+
fast_state, initial_state)
|
| 128 |
+
|
| 129 |
+
steps_since_sync = (state.steps_since_sync + 1) % sync_period
|
| 130 |
+
return updates, LookaheadState(fast_state, steps_since_sync)
|
| 131 |
+
|
| 132 |
+
return base.GradientTransformation(init_fn, update_fn)
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def _lookahead_update(
|
| 136 |
+
updates: base.Updates, sync_next: bool, params: LookaheadParams,
|
| 137 |
+
slow_step_size: float) -> LookaheadParams:
|
| 138 |
+
"""Returns the updates corresponding to one lookahead step.
|
| 139 |
+
|
| 140 |
+
References:
|
| 141 |
+
[Zhang et al, 2019](https://arxiv.org/pdf/1907.08610v1.pdf)
|
| 142 |
+
|
| 143 |
+
Args:
|
| 144 |
+
updates: Updates returned by the fast optimizer.
|
| 145 |
+
sync_next: Wether fast and slow parameters should be synchronized after the
|
| 146 |
+
fast optimizer step.
|
| 147 |
+
params: Current fast and slow parameters as `LookaheadParams` object.
|
| 148 |
+
slow_step_size: Step size of the slow optimizer.
|
| 149 |
+
|
| 150 |
+
Returns:
|
| 151 |
+
The updates for the lookahead parameters.
|
| 152 |
+
"""
|
| 153 |
+
# In the paper, lookahead is presented as two nested loops. To write lookahead
|
| 154 |
+
# as optax wrapper, these loops have to be broken into successive updates.
|
| 155 |
+
# This leads to two types of update steps:
|
| 156 |
+
#
|
| 157 |
+
# Non-synchronization steps (sync_next == False):
|
| 158 |
+
# The updates returned by the fast optimizer are used for the fast parameters
|
| 159 |
+
# without change and the slow parameter updates are zero (i.e. fast_updates =
|
| 160 |
+
# updates, slow_updates = 0).
|
| 161 |
+
#
|
| 162 |
+
# Synchronisation step (sync_next == True):
|
| 163 |
+
# This consists of two substeps: a last fast optimizer step and the
|
| 164 |
+
# synchronization.
|
| 165 |
+
# Substep 1 (last fast optimizer step):
|
| 166 |
+
# last_fast_params = fast_params + updates
|
| 167 |
+
# Substep 2 (synchronization):
|
| 168 |
+
# new_slow_params = slow_params + slow_step_size * (
|
| 169 |
+
# last_fast_params - slow_params)
|
| 170 |
+
# new_fast_params = new_slow_params
|
| 171 |
+
#
|
| 172 |
+
# Merging into a single update step we get the update rules:
|
| 173 |
+
# slow_updates = slow_step_size * (fast_params + updates - slow_params)
|
| 174 |
+
# fast_updates = new_slow_params - fast_params = updates - (1 -
|
| 175 |
+
# slow_step_size) * (fast_params + updates - slow_params)
|
| 176 |
+
#
|
| 177 |
+
# To make the equations jittable, the two types of steps are merged. Defining
|
| 178 |
+
# last_difference = fast_params + updates - slow_params, this yields the
|
| 179 |
+
# following equtions which are implemented below:
|
| 180 |
+
# slow_updates = slow_step_size * sync_next * last_difference
|
| 181 |
+
# fast_updates = updates - (
|
| 182 |
+
# 1 - slow_step_size) * sync_next * last_difference
|
| 183 |
+
last_difference = jax.tree_util.tree_map(
|
| 184 |
+
lambda f, u, s: f + u - s, params.fast, updates, params.slow)
|
| 185 |
+
slow_updates = jax.tree_util.tree_map(
|
| 186 |
+
lambda diff: slow_step_size * sync_next * diff, last_difference)
|
| 187 |
+
fast_updates = jax.tree_util.tree_map(
|
| 188 |
+
lambda up, diff: up - sync_next * (1 - slow_step_size) * diff, updates,
|
| 189 |
+
last_difference)
|
| 190 |
+
|
| 191 |
+
return LookaheadParams(fast=fast_updates, slow=slow_updates)
|
| 192 |
+
|
lib/python3.10/site-packages/optax/_src/lookahead_test.py
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Tests for `lookahead.py`."""
|
| 16 |
+
|
| 17 |
+
from typing import NamedTuple
|
| 18 |
+
|
| 19 |
+
from absl.testing import absltest
|
| 20 |
+
from absl.testing import parameterized
|
| 21 |
+
import chex
|
| 22 |
+
import jax
|
| 23 |
+
import jax.numpy as jnp
|
| 24 |
+
import numpy as np
|
| 25 |
+
from optax._src import alias
|
| 26 |
+
from optax._src import base
|
| 27 |
+
from optax._src import lookahead
|
| 28 |
+
from optax._src import update
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def _build_sgd():
|
| 32 |
+
return alias.sgd(1.)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class TestOptimizerState(NamedTuple):
|
| 36 |
+
"""Fast optimizer state for the lookahead tests."""
|
| 37 |
+
aggregate_grads: base.Params
|
| 38 |
+
# Include a variable with non-zero initial value to check that it is reset
|
| 39 |
+
# correctly by the lookahead optimizer.
|
| 40 |
+
is_reset: bool = True
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def _test_optimizer(step_size: float) -> base.GradientTransformation:
|
| 44 |
+
"""Fast optimizer for the lookahead tests."""
|
| 45 |
+
|
| 46 |
+
# Use SGD for simplicity but add non-trivial optimizer state so that the
|
| 47 |
+
# resetting behaviour of lookahead can be tested.
|
| 48 |
+
def init_fn(params):
|
| 49 |
+
aggregate_grads = jax.tree_util.tree_map(jnp.zeros_like, params)
|
| 50 |
+
return TestOptimizerState(aggregate_grads, is_reset=True)
|
| 51 |
+
|
| 52 |
+
def update_fn(updates, state, params):
|
| 53 |
+
# The test optimizer does not use the parameters, but we check that they
|
| 54 |
+
# have been passed correctly.
|
| 55 |
+
chex.assert_trees_all_equal_shapes(updates, params)
|
| 56 |
+
aggregate_grads = update.apply_updates(state.aggregate_grads, updates)
|
| 57 |
+
updates = jax.tree_util.tree_map(lambda u: step_size * u, updates)
|
| 58 |
+
return updates, TestOptimizerState(aggregate_grads, is_reset=False)
|
| 59 |
+
|
| 60 |
+
return base.GradientTransformation(init_fn, update_fn)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
class LookaheadTest(chex.TestCase):
|
| 64 |
+
"""Tests for the lookahead optimizer."""
|
| 65 |
+
|
| 66 |
+
def setUp(self):
|
| 67 |
+
super().setUp()
|
| 68 |
+
self.grads = {'x': np.array(2.), 'y': np.array(-2.)}
|
| 69 |
+
self.initial_params = {'x': np.array(3.), 'y': np.array(-3.)}
|
| 70 |
+
self.synced_initial_params = lookahead.LookaheadParams.init_synced(
|
| 71 |
+
self.initial_params)
|
| 72 |
+
|
| 73 |
+
def loop(self, optimizer, num_steps, params):
|
| 74 |
+
"""Performs a given number of optimizer steps."""
|
| 75 |
+
init_fn, update_fn = optimizer
|
| 76 |
+
# Use the chex variant to check various function versions (jit, pmap, etc).
|
| 77 |
+
step = self.variant(update_fn)
|
| 78 |
+
opt_state = self.variant(init_fn)(params)
|
| 79 |
+
for _ in range(num_steps):
|
| 80 |
+
updates, opt_state = step(self.grads, opt_state, params)
|
| 81 |
+
params = update.apply_updates(params, updates)
|
| 82 |
+
|
| 83 |
+
return params, opt_state
|
| 84 |
+
|
| 85 |
+
@chex.all_variants
|
| 86 |
+
def test_lookahead(self):
|
| 87 |
+
"""Tests the lookahead optimizer in an analytically tractable setting."""
|
| 88 |
+
sync_period = 3
|
| 89 |
+
optimizer = lookahead.lookahead(
|
| 90 |
+
_test_optimizer(-0.5), sync_period=sync_period, slow_step_size=1 / 3)
|
| 91 |
+
|
| 92 |
+
final_params, _ = self.loop(optimizer, 2 * sync_period,
|
| 93 |
+
self.synced_initial_params)
|
| 94 |
+
# x steps must be: 3 -> 2 -> 1 -> 2 (sync) -> 1 -> 0 -> 1 (sync).
|
| 95 |
+
# Similarly for y (with sign flipped).
|
| 96 |
+
correct_final_params = {'x': 1, 'y': -1}
|
| 97 |
+
chex.assert_tree_all_close(final_params.slow, correct_final_params)
|
| 98 |
+
|
| 99 |
+
@chex.all_variants
|
| 100 |
+
@parameterized.parameters([False], [True])
|
| 101 |
+
def test_lookahead_state_reset(self, reset_state):
|
| 102 |
+
"""Checks that lookahead resets the fast optimizer state correctly."""
|
| 103 |
+
num_steps = sync_period = 3
|
| 104 |
+
fast_optimizer = _test_optimizer(-0.5)
|
| 105 |
+
optimizer = lookahead.lookahead(
|
| 106 |
+
fast_optimizer,
|
| 107 |
+
sync_period=sync_period,
|
| 108 |
+
slow_step_size=0.5,
|
| 109 |
+
reset_state=reset_state)
|
| 110 |
+
|
| 111 |
+
_, opt_state = self.loop(optimizer, num_steps, self.synced_initial_params)
|
| 112 |
+
fast_state = opt_state.fast_state
|
| 113 |
+
if reset_state:
|
| 114 |
+
correct_state = fast_optimizer.init(self.initial_params)
|
| 115 |
+
else:
|
| 116 |
+
_, correct_state = self.loop(fast_optimizer, num_steps,
|
| 117 |
+
self.initial_params)
|
| 118 |
+
|
| 119 |
+
chex.assert_tree_all_close(fast_state, correct_state)
|
| 120 |
+
|
| 121 |
+
@chex.all_variants
|
| 122 |
+
@parameterized.parameters(
|
| 123 |
+
[1, 0.5, {'x': np.array(1.), 'y': np.array(-1.)}],
|
| 124 |
+
[1, 0, {'x': np.array(3.), 'y': np.array(-3.)}],
|
| 125 |
+
[1, 1, {'x': np.array(-1.), 'y': np.array(1.)}],
|
| 126 |
+
[2, 1, {'x': np.array(-1.), 'y': np.array(1.)}]) # pyformat: disable
|
| 127 |
+
def test_lookahead_edge_cases(self, sync_period, slow_step_size,
|
| 128 |
+
correct_result):
|
| 129 |
+
"""Checks special cases of the lookahed optimizer parameters."""
|
| 130 |
+
# These edge cases are important to check since users might use them as
|
| 131 |
+
# simple ways of disabling lookahead in experiments.
|
| 132 |
+
optimizer = lookahead.lookahead(
|
| 133 |
+
_test_optimizer(-1), sync_period, slow_step_size)
|
| 134 |
+
final_params, _ = self.loop(
|
| 135 |
+
optimizer, num_steps=2, params=self.synced_initial_params)
|
| 136 |
+
chex.assert_tree_all_close(final_params.slow, correct_result)
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
if __name__ == '__main__':
|
| 140 |
+
absltest.main()
|
lib/python3.10/site-packages/optax/_src/loss.py
ADDED
|
@@ -0,0 +1,521 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Standard losses used in optimisation.
|
| 16 |
+
|
| 17 |
+
We provide implementations of the most canonical losses used in deep
|
| 18 |
+
learning. These operate transparently on batches, and do not perform any
|
| 19 |
+
reduction over the batch dimensions, leaving it to the user to, for instance,
|
| 20 |
+
mean or sum losses across batch dimensions.
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
from typing import Optional, Tuple
|
| 24 |
+
|
| 25 |
+
import chex
|
| 26 |
+
import jax
|
| 27 |
+
import jax.numpy as jnp
|
| 28 |
+
|
| 29 |
+
from optax._src import utils
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def l2_loss(
|
| 33 |
+
predictions: chex.Array,
|
| 34 |
+
targets: Optional[chex.Array] = None,
|
| 35 |
+
) -> chex.Array:
|
| 36 |
+
"""Calculates the L2 loss for a set of predictions.
|
| 37 |
+
|
| 38 |
+
Note: the 0.5 term is standard in "Pattern Recognition and Machine Learning"
|
| 39 |
+
by Bishop, but not "The Elements of Statistical Learning" by Tibshirani.
|
| 40 |
+
|
| 41 |
+
References:
|
| 42 |
+
[Chris Bishop, 2006](https://bit.ly/3eeP0ga)
|
| 43 |
+
|
| 44 |
+
Args:
|
| 45 |
+
predictions: a vector of arbitrary shape `[...]`.
|
| 46 |
+
targets: a vector with shape broadcastable to that of `predictions`;
|
| 47 |
+
if not provided then it is assumed to be a vector of zeros.
|
| 48 |
+
|
| 49 |
+
Returns:
|
| 50 |
+
elementwise squared differences, with same shape as `predictions`.
|
| 51 |
+
"""
|
| 52 |
+
chex.assert_type([predictions], float)
|
| 53 |
+
if targets is not None:
|
| 54 |
+
# Avoid broadcasting logic for "-" operator.
|
| 55 |
+
chex.assert_equal_shape((predictions, targets))
|
| 56 |
+
errors = (predictions - targets) if (targets is not None) else predictions
|
| 57 |
+
return 0.5 * (errors)**2
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def huber_loss(
|
| 61 |
+
predictions: chex.Array,
|
| 62 |
+
targets: Optional[chex.Array] = None,
|
| 63 |
+
delta: float = 1.) -> chex.Array:
|
| 64 |
+
"""Huber loss, similar to L2 loss close to zero, L1 loss away from zero.
|
| 65 |
+
|
| 66 |
+
If gradient descent is applied to the `huber loss`, it is equivalent to
|
| 67 |
+
clipping gradients of an `l2_loss` to `[-delta, delta]` in the backward pass.
|
| 68 |
+
|
| 69 |
+
References:
|
| 70 |
+
[Huber, 1964](www.projecteuclid.org/download/pdf_1/euclid.aoms/1177703732)
|
| 71 |
+
|
| 72 |
+
Args:
|
| 73 |
+
predictions: a vector of arbitrary shape `[...]`.
|
| 74 |
+
targets: a vector with shape broadcastable to that of `predictions`;
|
| 75 |
+
if not provided then it is assumed to be a vector of zeros.
|
| 76 |
+
delta: the bounds for the huber loss transformation, defaults at 1.
|
| 77 |
+
|
| 78 |
+
Returns:
|
| 79 |
+
elementwise huber losses, with the same shape of `predictions`.
|
| 80 |
+
"""
|
| 81 |
+
chex.assert_type([predictions], float)
|
| 82 |
+
errors = (predictions - targets) if (targets is not None) else predictions
|
| 83 |
+
# 0.5 * err^2 if |err| <= d
|
| 84 |
+
# 0.5 * d^2 + d * (|err| - d) if |err| > d
|
| 85 |
+
abs_errors = jnp.abs(errors)
|
| 86 |
+
quadratic = jnp.minimum(abs_errors, delta)
|
| 87 |
+
# Same as max(abs_x - delta, 0) but avoids potentially doubling gradient.
|
| 88 |
+
linear = abs_errors - quadratic
|
| 89 |
+
return 0.5 * quadratic ** 2 + delta * linear
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def smooth_labels(
|
| 93 |
+
labels: chex.Array,
|
| 94 |
+
alpha: float,
|
| 95 |
+
) -> jnp.ndarray:
|
| 96 |
+
"""Apply label smoothing.
|
| 97 |
+
|
| 98 |
+
Label smoothing is often used in combination with a cross-entropy loss.
|
| 99 |
+
Smoothed labels favour small logit gaps, and it has been shown that this can
|
| 100 |
+
provide better model calibration by preventing overconfident predictions.
|
| 101 |
+
|
| 102 |
+
References:
|
| 103 |
+
[Müller et al, 2019](https://arxiv.org/pdf/1906.02629.pdf)
|
| 104 |
+
|
| 105 |
+
Args:
|
| 106 |
+
labels: one hot labels to be smoothed.
|
| 107 |
+
alpha: the smoothing factor, the greedy category with be assigned
|
| 108 |
+
probability `(1-alpha) + alpha / num_categories`
|
| 109 |
+
|
| 110 |
+
Returns:
|
| 111 |
+
a smoothed version of the one hot input labels.
|
| 112 |
+
|
| 113 |
+
"""
|
| 114 |
+
chex.assert_type([labels], float)
|
| 115 |
+
num_categories = labels.shape[-1]
|
| 116 |
+
return (1.0 - alpha) * labels + alpha / num_categories
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
def sigmoid_binary_cross_entropy(logits, labels):
|
| 120 |
+
"""Computes element-wise sigmoid cross entropy given logits and labels.
|
| 121 |
+
|
| 122 |
+
This can be used to measure the error in discrete classification tasks in
|
| 123 |
+
which each class is an independent binary prediction and different classes
|
| 124 |
+
are not mutually exclusive. This may be used for multilabel image
|
| 125 |
+
classification for instance a model may predict that an image contains both a
|
| 126 |
+
cat and a dog.
|
| 127 |
+
|
| 128 |
+
References:
|
| 129 |
+
[Goodfellow et al, 2016](http://www.deeplearningbook.org/contents/prob.html)
|
| 130 |
+
|
| 131 |
+
Args:
|
| 132 |
+
logits: Each element is the unnormalized log probability of a binary
|
| 133 |
+
prediction.
|
| 134 |
+
labels: The target probabilities, must have a shape broadcastable to that of
|
| 135 |
+
`logits`.
|
| 136 |
+
|
| 137 |
+
Returns:
|
| 138 |
+
cross entropy for each binary prediction, same shape as `logits`.
|
| 139 |
+
"""
|
| 140 |
+
chex.assert_type([logits], float)
|
| 141 |
+
log_p = jax.nn.log_sigmoid(logits)
|
| 142 |
+
# log(1 - sigmoid(x)) = log_sigmoid(-x), the latter more numerically stable
|
| 143 |
+
log_not_p = jax.nn.log_sigmoid(-logits)
|
| 144 |
+
return -labels * log_p - (1. - labels) * log_not_p
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
def softmax_cross_entropy(
|
| 148 |
+
logits: chex.Array,
|
| 149 |
+
labels: chex.Array,
|
| 150 |
+
) -> chex.Array:
|
| 151 |
+
"""Computes the softmax cross entropy between sets of logits and labels.
|
| 152 |
+
|
| 153 |
+
Measures the probability error in discrete classification tasks in which
|
| 154 |
+
the classes are mutually exclusive (each entry is in exactly one class).
|
| 155 |
+
For example, each CIFAR-10 image is labeled with one and only one label:
|
| 156 |
+
an image can be a dog or a truck, but not both.
|
| 157 |
+
|
| 158 |
+
References:
|
| 159 |
+
[Goodfellow et al, 2016](http://www.deeplearningbook.org/contents/prob.html)
|
| 160 |
+
|
| 161 |
+
Args:
|
| 162 |
+
logits: Unnormalized log probabilities, with shape `[..., num_classes]`.
|
| 163 |
+
labels: Valid probability distributions (non-negative, sum to 1), e.g a
|
| 164 |
+
one hot encoding specifying the correct class for each input;
|
| 165 |
+
must have a shape broadcastable to `[..., num_classes]``
|
| 166 |
+
|
| 167 |
+
Returns:
|
| 168 |
+
cross entropy between each prediction and the corresponding target
|
| 169 |
+
distributions, with shape `[...]`.
|
| 170 |
+
"""
|
| 171 |
+
chex.assert_type([logits], float)
|
| 172 |
+
return -jnp.sum(labels * jax.nn.log_softmax(logits, axis=-1), axis=-1)
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
def softmax_cross_entropy_with_integer_labels(
|
| 176 |
+
logits: chex.Array,
|
| 177 |
+
labels: chex.Array,
|
| 178 |
+
) -> chex.Array:
|
| 179 |
+
"""Computes softmax cross entropy between sets of logits and integer labels.
|
| 180 |
+
|
| 181 |
+
Measures the probability error in discrete classification tasks in which
|
| 182 |
+
the classes are mutually exclusive (each entry is in exactly one class).
|
| 183 |
+
For example, each CIFAR-10 image is labeled with one and only one label:
|
| 184 |
+
an image can be a dog or a truck, but not both.
|
| 185 |
+
|
| 186 |
+
References:
|
| 187 |
+
[Goodfellow et al, 2016](http://www.deeplearningbook.org/contents/prob.html)
|
| 188 |
+
|
| 189 |
+
Args:
|
| 190 |
+
logits: Unnormalized log probabilities, with shape `[..., num_classes]`.
|
| 191 |
+
labels: Integers specifying the correct class for each input, with shape
|
| 192 |
+
`[...]`.
|
| 193 |
+
|
| 194 |
+
Returns:
|
| 195 |
+
Cross entropy between each prediction and the corresponding target
|
| 196 |
+
distributions, with shape `[...]`.
|
| 197 |
+
"""
|
| 198 |
+
chex.assert_type([logits], float)
|
| 199 |
+
chex.assert_type([labels], int)
|
| 200 |
+
# This is like jnp.take_along_axis(jax.nn.log_softmax(...), ...) except that
|
| 201 |
+
# we avoid subtracting the normalizer from all values, just from the values
|
| 202 |
+
# for the correct labels.
|
| 203 |
+
logits_max = jnp.max(logits, axis=-1, keepdims=True)
|
| 204 |
+
logits -= jax.lax.stop_gradient(logits_max)
|
| 205 |
+
label_logits = jnp.take_along_axis(logits, labels[..., None], axis=-1)[..., 0]
|
| 206 |
+
log_normalizers = jnp.log(jnp.sum(jnp.exp(logits), axis=-1))
|
| 207 |
+
return log_normalizers - label_logits
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
def cosine_similarity(
|
| 211 |
+
predictions: chex.Array,
|
| 212 |
+
targets: chex.Array,
|
| 213 |
+
epsilon: float = 0.,
|
| 214 |
+
) -> chex.Array:
|
| 215 |
+
r"""Computes the cosine similarity between targets and predictions.
|
| 216 |
+
|
| 217 |
+
The cosine **similarity** is a measure of similarity between vectors defined
|
| 218 |
+
as the cosine of the angle between them, which is also the inner product of
|
| 219 |
+
those vectors normalized to have unit norm.
|
| 220 |
+
|
| 221 |
+
References:
|
| 222 |
+
[Wikipedia, 2021](https://en.wikipedia.org/wiki/Cosine_similarity)
|
| 223 |
+
|
| 224 |
+
Args:
|
| 225 |
+
predictions: The predicted vectors, with shape `[..., dim]`.
|
| 226 |
+
targets: Ground truth target vectors, with shape `[..., dim]`.
|
| 227 |
+
epsilon: minimum norm for terms in the denominator of the cosine similarity.
|
| 228 |
+
|
| 229 |
+
Returns:
|
| 230 |
+
cosine similarity measures, with shape `[...]`.
|
| 231 |
+
"""
|
| 232 |
+
chex.assert_type([predictions, targets], float)
|
| 233 |
+
# vectorize norm fn, to treat all dimensions except the last as batch dims.
|
| 234 |
+
batched_norm_fn = jnp.vectorize(
|
| 235 |
+
utils.safe_norm, signature='(k)->()', excluded={1})
|
| 236 |
+
# normalise the last dimension of targets and predictions.
|
| 237 |
+
unit_targets = targets / jnp.expand_dims(
|
| 238 |
+
batched_norm_fn(targets, epsilon), axis=-1)
|
| 239 |
+
unit_predictions = predictions / jnp.expand_dims(
|
| 240 |
+
batched_norm_fn(predictions, epsilon), axis=-1)
|
| 241 |
+
# return cosine similarity.
|
| 242 |
+
return jnp.sum(unit_targets * unit_predictions, axis=-1)
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
def cosine_distance(
|
| 246 |
+
predictions: chex.Array,
|
| 247 |
+
targets: chex.Array,
|
| 248 |
+
epsilon: float = 0.,
|
| 249 |
+
) -> chex.Array:
|
| 250 |
+
r"""Computes the cosine distance between targets and predictions.
|
| 251 |
+
|
| 252 |
+
The cosine **distance**, implemented here, measures the **dissimilarity**
|
| 253 |
+
of two vectors as the opposite of cosine **similarity**: `1 - cos(\theta)`.
|
| 254 |
+
|
| 255 |
+
References:
|
| 256 |
+
[Wikipedia, 2021](https://en.wikipedia.org/wiki/Cosine_similarity)
|
| 257 |
+
|
| 258 |
+
Args:
|
| 259 |
+
predictions: The predicted vectors, with shape `[..., dim]`.
|
| 260 |
+
targets: Ground truth target vectors, with shape `[..., dim]`.
|
| 261 |
+
epsilon: minimum norm for terms in the denominator of the cosine similarity.
|
| 262 |
+
|
| 263 |
+
Returns:
|
| 264 |
+
cosine distances, with shape `[...]`.
|
| 265 |
+
"""
|
| 266 |
+
chex.assert_type([predictions, targets], float)
|
| 267 |
+
# cosine distance = 1 - cosine similarity.
|
| 268 |
+
return 1. - cosine_similarity(predictions, targets, epsilon)
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
def log_cosh(
|
| 272 |
+
predictions: chex.Array,
|
| 273 |
+
targets: Optional[chex.Array] = None,
|
| 274 |
+
) -> chex.Array:
|
| 275 |
+
"""Calculates the log-cosh loss for a set of predictions.
|
| 276 |
+
|
| 277 |
+
log(cosh(x)) is approximately `(x**2) / 2` for small x and `abs(x) - log(2)`
|
| 278 |
+
for large x. It is a twice differentiable alternative to the Huber loss.
|
| 279 |
+
|
| 280 |
+
References:
|
| 281 |
+
[Chen et al, 2019](https://openreview.net/pdf?id=rkglvsC9Ym)
|
| 282 |
+
|
| 283 |
+
Args:
|
| 284 |
+
predictions: a vector of arbitrary shape `[...]`.
|
| 285 |
+
targets: a vector with shape broadcastable to that of `predictions`;
|
| 286 |
+
if not provided then it is assumed to be a vector of zeros.
|
| 287 |
+
|
| 288 |
+
Returns:
|
| 289 |
+
the log-cosh loss, with same shape as `predictions`.
|
| 290 |
+
"""
|
| 291 |
+
chex.assert_type([predictions], float)
|
| 292 |
+
errors = (predictions - targets) if (targets is not None) else predictions
|
| 293 |
+
# log(cosh(x)) = log((exp(x) + exp(-x))/2) = log(exp(x) + exp(-x)) - log(2)
|
| 294 |
+
return jnp.logaddexp(errors, -errors) - jnp.log(2.0).astype(errors.dtype)
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
def ctc_loss_with_forward_probs(
|
| 298 |
+
logits: chex.Array,
|
| 299 |
+
logit_paddings: chex.Array,
|
| 300 |
+
labels: chex.Array,
|
| 301 |
+
label_paddings: chex.Array,
|
| 302 |
+
blank_id: int = 0,
|
| 303 |
+
log_epsilon: float = -1e5) -> Tuple[chex.Array, chex.Array, chex.Array]:
|
| 304 |
+
r"""Computes CTC loss and CTC forward-probabilities.
|
| 305 |
+
|
| 306 |
+
The CTC loss is a loss function based on log-likelihoods of the model that
|
| 307 |
+
introduces a special blank symbol :math:`\phi` to represent variable-length
|
| 308 |
+
output sequences.
|
| 309 |
+
|
| 310 |
+
Forward probabilities returned by this function, as auxiliary results, are
|
| 311 |
+
grouped into two part: blank alpha-probability and non-blank alpha
|
| 312 |
+
probability. Those are defined as follows:
|
| 313 |
+
|
| 314 |
+
.. math::
|
| 315 |
+
\alpha_{\mathrm{BLANK}}(t, n) =
|
| 316 |
+
\sum_{\pi_{1:t-1}} p(\pi_t = \phi | \pi_{1:t-1}, y_{1:n-1}, \cdots), \\
|
| 317 |
+
\alpha_{\mathrm{LABEL}}(t, n) =
|
| 318 |
+
\sum_{\pi_{1:t-1}} p(\pi_t = y_n | \pi_{1:t-1}, y_{1:n-1}, \cdots).
|
| 319 |
+
|
| 320 |
+
Here, :math:`\pi` denotes the alignment sequence in the reference
|
| 321 |
+
[Graves et al, 2006] that is blank-inserted representations of ``labels``.
|
| 322 |
+
The return values are the logarithms of the above probabilities.
|
| 323 |
+
|
| 324 |
+
References:
|
| 325 |
+
[Graves et al, 2006](https://dl.acm.org/doi/abs/10.1145/1143844.1143891)
|
| 326 |
+
|
| 327 |
+
Args:
|
| 328 |
+
logits: (B, T, K)-array containing logits of each class where B denotes
|
| 329 |
+
the batch size, T denotes the max time frames in ``logits``, and K
|
| 330 |
+
denotes the number of classes including a class for blanks.
|
| 331 |
+
logit_paddings: (B, T)-array. Padding indicators for ``logits``. Each
|
| 332 |
+
element must be either 1.0 or 0.0, and ``logitpaddings[b, t] == 1.0``
|
| 333 |
+
denotes that ``logits[b, t, :]`` are padded values.
|
| 334 |
+
labels: (B, N)-array containing reference integer labels where N denotes
|
| 335 |
+
the max time frames in the label sequence.
|
| 336 |
+
label_paddings: (B, N)-array. Padding indicators for ``labels``. Each
|
| 337 |
+
element must be either 1.0 or 0.0, and ``labelpaddings[b, n] == 1.0``
|
| 338 |
+
denotes that ``labels[b, n]`` is a padded label. In the current
|
| 339 |
+
implementation, ``labels`` must be right-padded, i.e. each row
|
| 340 |
+
``labelpaddings[b, :]`` must be repetition of zeroes, followed by
|
| 341 |
+
repetition of ones.
|
| 342 |
+
blank_id: Id for blank token. ``logits[b, :, blank_id]`` are used as
|
| 343 |
+
probabilities of blank symbols.
|
| 344 |
+
log_epsilon: Numerically-stable approximation of log(+0).
|
| 345 |
+
|
| 346 |
+
Returns:
|
| 347 |
+
A tuple ``(loss_value, logalpha_blank, logalpha_nonblank)``. Here,
|
| 348 |
+
``loss_value`` is a (B,)-array containing the loss values for each sequence
|
| 349 |
+
in the batch, ``logalpha_blank`` and ``logalpha_nonblank`` are
|
| 350 |
+
(T, B, N+1)-arrays where the (t, b, n)-th element denotes
|
| 351 |
+
\log \alpha_B(t, n) and \log \alpha_L(t, n), respectively, for ``b``-th
|
| 352 |
+
sequence in the batch.
|
| 353 |
+
"""
|
| 354 |
+
|
| 355 |
+
chex.assert_rank(logits, 3)
|
| 356 |
+
chex.assert_rank(labels, 2)
|
| 357 |
+
batchsize, unused_maxinputlen, num_classes = logits.shape
|
| 358 |
+
batchsize_of_labels, maxlabellen = labels.shape
|
| 359 |
+
chex.assert_equal(batchsize, batchsize_of_labels)
|
| 360 |
+
chex.assert_equal(labels.shape, label_paddings.shape)
|
| 361 |
+
chex.assert_equal(logits.shape[:2], logit_paddings.shape)
|
| 362 |
+
|
| 363 |
+
logprobs = jax.nn.log_softmax(logits)
|
| 364 |
+
labellens = maxlabellen - jnp.sum(label_paddings, axis=1).astype(jnp.int32)
|
| 365 |
+
|
| 366 |
+
# repeat[b, n] == 1.0 when label[b, n] == label[b, n+1].
|
| 367 |
+
repeat = (labels[:, :-1] == labels[:, 1:]).astype(jnp.float32)
|
| 368 |
+
repeat = jnp.pad(repeat, ((0, 0), (0, 1)))
|
| 369 |
+
|
| 370 |
+
logprobs_phi = logprobs[:, :, blank_id:blank_id + 1] # [B, T, 1]
|
| 371 |
+
logprobs_phi = jnp.transpose(logprobs_phi, (1, 0, 2)) # [T, B, 1]
|
| 372 |
+
|
| 373 |
+
one_hot = jax.nn.one_hot(labels, num_classes=num_classes) # [B, N, K]
|
| 374 |
+
logprobs_emit = jnp.einsum('btk,bnk->btn', logprobs, one_hot)
|
| 375 |
+
logprobs_emit = jnp.transpose(logprobs_emit, (1, 0, 2)) # [T, B, N]
|
| 376 |
+
|
| 377 |
+
logalpha_phi_init = jnp.ones(
|
| 378 |
+
(batchsize, maxlabellen + 1)) * log_epsilon # [B, N]
|
| 379 |
+
logalpha_phi_init = logalpha_phi_init.at[:, 0].set(0.0)
|
| 380 |
+
logalpha_emit_init = jnp.ones((batchsize, maxlabellen)) * log_epsilon
|
| 381 |
+
|
| 382 |
+
def update_phi_score(phi, added_score):
|
| 383 |
+
# Update `phi[:, 1:]`` with adding `added_score` in log space.
|
| 384 |
+
return jnp.concatenate(
|
| 385 |
+
[phi[:, :1], jnp.logaddexp(phi[:, 1:], added_score)], axis=-1)
|
| 386 |
+
|
| 387 |
+
def loop_body(prev, x):
|
| 388 |
+
prev_phi, prev_emit = prev
|
| 389 |
+
# emit-to-phi epsilon transition, except if the next label is repetition
|
| 390 |
+
prev_phi_orig = prev_phi
|
| 391 |
+
prev_phi = update_phi_score(prev_phi, prev_emit + log_epsilon * repeat)
|
| 392 |
+
|
| 393 |
+
logprob_emit, logprob_phi, pad = x
|
| 394 |
+
|
| 395 |
+
# phi-to-emit transition
|
| 396 |
+
next_emit = jnp.logaddexp(prev_phi[:, :-1] + logprob_emit,
|
| 397 |
+
prev_emit + logprob_emit)
|
| 398 |
+
# self-loop transition
|
| 399 |
+
next_phi = prev_phi + logprob_phi
|
| 400 |
+
# emit-to-phi blank transition only when the next label is repetition
|
| 401 |
+
next_phi = update_phi_score(
|
| 402 |
+
next_phi, prev_emit + logprob_phi + log_epsilon * (1.0 - repeat))
|
| 403 |
+
|
| 404 |
+
pad = pad.reshape((batchsize, 1))
|
| 405 |
+
next_emit = pad * prev_emit + (1.0 - pad) * next_emit
|
| 406 |
+
next_phi = pad * prev_phi_orig + (1.0 - pad) * next_phi
|
| 407 |
+
|
| 408 |
+
return (next_phi, next_emit), (next_phi, next_emit)
|
| 409 |
+
|
| 410 |
+
xs = (logprobs_emit, logprobs_phi, logit_paddings.transpose((1, 0)))
|
| 411 |
+
_, (logalpha_phi,
|
| 412 |
+
logalpha_emit) = jax.lax.scan(loop_body,
|
| 413 |
+
(logalpha_phi_init, logalpha_emit_init), xs)
|
| 414 |
+
|
| 415 |
+
# last row needs to be updated with the last epsilon transition
|
| 416 |
+
logalpha_phi_last = update_phi_score(logalpha_phi[-1], logalpha_emit[-1])
|
| 417 |
+
logalpha_phi = logalpha_phi.at[-1].set(logalpha_phi_last)
|
| 418 |
+
|
| 419 |
+
# extract per_seq_loss
|
| 420 |
+
one_hot = jax.nn.one_hot(labellens, num_classes=maxlabellen + 1) # [B, N+1]
|
| 421 |
+
per_seq_loss = -jnp.einsum('bn,bn->b', logalpha_phi_last, one_hot)
|
| 422 |
+
|
| 423 |
+
return per_seq_loss, logalpha_phi, logalpha_emit
|
| 424 |
+
|
| 425 |
+
|
| 426 |
+
def ctc_loss(logits: chex.Array,
|
| 427 |
+
logit_paddings: chex.Array,
|
| 428 |
+
labels: chex.Array,
|
| 429 |
+
label_paddings: chex.Array,
|
| 430 |
+
blank_id: int = 0,
|
| 431 |
+
log_epsilon: float = -1e5) -> chex.Array:
|
| 432 |
+
"""Computes CTC loss.
|
| 433 |
+
|
| 434 |
+
See docstring for ``ctc_loss_with_forward_probs`` for details.
|
| 435 |
+
|
| 436 |
+
Args:
|
| 437 |
+
logits: (B, T, K)-array containing logits of each class where B denotes
|
| 438 |
+
the batch size, T denotes the max time frames in ``logits``, and K
|
| 439 |
+
denotes the number of classes including a class for blanks.
|
| 440 |
+
logit_paddings: (B, T)-array. Padding indicators for ``logits``. Each
|
| 441 |
+
element must be either 1.0 or 0.0, and ``logitpaddings[b, t] == 1.0``
|
| 442 |
+
denotes that ``logits[b, t, :]`` are padded values.
|
| 443 |
+
labels: (B, N)-array containing reference integer labels where N denotes
|
| 444 |
+
the max time frames in the label sequence.
|
| 445 |
+
label_paddings: (B, N)-array. Padding indicators for ``labels``. Each
|
| 446 |
+
element must be either 1.0 or 0.0, and ``labelpaddings[b, n] == 1.0``
|
| 447 |
+
denotes that ``labels[b, n]`` is a padded label. In the current
|
| 448 |
+
implementation, ``labels`` must be right-padded, i.e. each row
|
| 449 |
+
``labelpaddings[b, :]`` must be repetition of zeroes, followed by
|
| 450 |
+
repetition of ones.
|
| 451 |
+
blank_id: Id for blank token. ``logits[b, :, blank_id]`` are used as
|
| 452 |
+
probabilities of blank symbols.
|
| 453 |
+
log_epsilon: Numerically-stable approximation of log(+0).
|
| 454 |
+
|
| 455 |
+
Returns:
|
| 456 |
+
(B,)-array containing loss values for each sequence in the batch.
|
| 457 |
+
"""
|
| 458 |
+
per_seq_loss, _, _ = ctc_loss_with_forward_probs(
|
| 459 |
+
logits, logit_paddings, labels, label_paddings,
|
| 460 |
+
blank_id=blank_id, log_epsilon=log_epsilon)
|
| 461 |
+
return per_seq_loss
|
| 462 |
+
|
| 463 |
+
|
| 464 |
+
def kl_divergence(log_predictions: chex.Array,
|
| 465 |
+
targets: chex.Array) -> chex.Array:
|
| 466 |
+
"""Computes the Kullback-Leibler divergence (relative entropy) loss.
|
| 467 |
+
|
| 468 |
+
Measures the information gain achieved if target probability distribution
|
| 469 |
+
would be used instead of predicted probability distribution.
|
| 470 |
+
|
| 471 |
+
References:
|
| 472 |
+
[Kullback, Leibler, 1951](https://www.jstor.org/stable/2236703)
|
| 473 |
+
|
| 474 |
+
Args:
|
| 475 |
+
log_predictions: Probabilities of predicted distribution with shape
|
| 476 |
+
[..., dim]. Expected to be in the log-space to avoid underflow.
|
| 477 |
+
targets: Probabilities of target distribution with shape [..., dim].
|
| 478 |
+
Expected to be strictly positive.
|
| 479 |
+
|
| 480 |
+
Returns:
|
| 481 |
+
Kullback-Leibler divergence of predicted distribution from target
|
| 482 |
+
distribution with shape [...].
|
| 483 |
+
"""
|
| 484 |
+
chex.assert_type([log_predictions, targets], float)
|
| 485 |
+
loss = targets * (jnp.log(targets) - log_predictions)
|
| 486 |
+
return jnp.sum(loss, axis=-1)
|
| 487 |
+
|
| 488 |
+
|
| 489 |
+
def kl_divergence_with_log_targets(log_predictions: chex.Array,
|
| 490 |
+
log_targets: chex.Array) -> chex.Array:
|
| 491 |
+
"""Computes the Kullback-Leibler divergence (relative entropy) loss.
|
| 492 |
+
|
| 493 |
+
Version of kl_div_loss where targets are given in log-space.
|
| 494 |
+
|
| 495 |
+
Args:
|
| 496 |
+
log_predictions: Probabilities of predicted distribution with shape
|
| 497 |
+
[..., dim]. Expected to be in the log-space to avoid underflow.
|
| 498 |
+
log_targets: Probabilities of target distribution with shape [..., dim].
|
| 499 |
+
Expected to be in the log-space.
|
| 500 |
+
|
| 501 |
+
Returns:
|
| 502 |
+
Kullback-Leibler divergence of predicted distribution from target
|
| 503 |
+
distribution with shape [...].
|
| 504 |
+
"""
|
| 505 |
+
chex.assert_type([log_predictions, log_targets], float)
|
| 506 |
+
loss = jnp.exp(log_targets) * (log_targets - log_predictions)
|
| 507 |
+
return jnp.sum(loss, axis=-1)
|
| 508 |
+
|
| 509 |
+
|
| 510 |
+
def hinge_loss(predictor_outputs: chex.Array,
|
| 511 |
+
targets: chex.Array) -> chex.Array:
|
| 512 |
+
"""Computes the hinge loss for binary classification.
|
| 513 |
+
|
| 514 |
+
Args:
|
| 515 |
+
predictor_outputs: Outputs of the decision function.
|
| 516 |
+
targets: Target values. Target values should be strictly in the set {-1, 1}.
|
| 517 |
+
|
| 518 |
+
Returns:
|
| 519 |
+
Binary Hinge Loss.
|
| 520 |
+
"""
|
| 521 |
+
return jnp.maximum(0, 1 - predictor_outputs * targets)
|
lib/python3.10/site-packages/optax/_src/loss_test.py
ADDED
|
@@ -0,0 +1,500 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Tests for optax._src.loss."""
|
| 16 |
+
|
| 17 |
+
from absl.testing import absltest
|
| 18 |
+
from absl.testing import parameterized
|
| 19 |
+
|
| 20 |
+
import chex
|
| 21 |
+
import jax
|
| 22 |
+
import jax.numpy as jnp
|
| 23 |
+
import numpy as np
|
| 24 |
+
|
| 25 |
+
from optax._src import loss
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class L2LossTest(parameterized.TestCase):
|
| 29 |
+
|
| 30 |
+
def setUp(self):
|
| 31 |
+
super().setUp()
|
| 32 |
+
self.ys = jnp.array([-2., -1., 0.5, 1.])
|
| 33 |
+
self.ts = jnp.array([-1.5, 0., -1, 1.])
|
| 34 |
+
# compute expected outputs in numpy.
|
| 35 |
+
self.exp = 0.5 * (self.ts - self.ys) ** 2
|
| 36 |
+
|
| 37 |
+
@chex.all_variants
|
| 38 |
+
def test_scalar(self):
|
| 39 |
+
np.testing.assert_allclose(
|
| 40 |
+
self.variant(loss.l2_loss)(self.ys[0], self.ts[0]), self.exp[0])
|
| 41 |
+
|
| 42 |
+
@chex.all_variants
|
| 43 |
+
def test_batched(self):
|
| 44 |
+
np.testing.assert_allclose(
|
| 45 |
+
self.variant(loss.l2_loss)(self.ys, self.ts), self.exp)
|
| 46 |
+
|
| 47 |
+
@chex.all_variants
|
| 48 |
+
def test_shape_mismatch(self):
|
| 49 |
+
with self.assertRaises(AssertionError):
|
| 50 |
+
_ = self.variant(loss.l2_loss)(self.ys, jnp.expand_dims(self.ts, axis=-1))
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
class HuberLossTest(parameterized.TestCase):
|
| 54 |
+
|
| 55 |
+
def setUp(self):
|
| 56 |
+
super().setUp()
|
| 57 |
+
self.ys = np.array([-2.0, 0.5, 0., 0.5, 2.0, 4.0, 132.])
|
| 58 |
+
self.ts = np.array([0.0, -0.5, 0., 1., 1.0, 2.0, 0.3])
|
| 59 |
+
# computed expected outputs manually.
|
| 60 |
+
self.exp = np.array([1.5, 0.5, 0., 0.125, 0.5, 1.5, 131.2])
|
| 61 |
+
|
| 62 |
+
@chex.all_variants
|
| 63 |
+
def test_scalar(self):
|
| 64 |
+
np.testing.assert_allclose(
|
| 65 |
+
self.variant(loss.huber_loss)(self.ys[0], self.ts[0], delta=1.0),
|
| 66 |
+
self.exp[0])
|
| 67 |
+
|
| 68 |
+
@chex.all_variants
|
| 69 |
+
def test_batched(self):
|
| 70 |
+
np.testing.assert_allclose(
|
| 71 |
+
self.variant(loss.huber_loss)(self.ys, self.ts, delta=1.0),
|
| 72 |
+
self.exp)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
class SmoothLabelsTest(parameterized.TestCase):
|
| 76 |
+
|
| 77 |
+
def setUp(self):
|
| 78 |
+
super().setUp()
|
| 79 |
+
self.ts = np.array([[0., 1., 0.], [1., 0., 0.]], dtype=np.float32)
|
| 80 |
+
# compute expected outputs in numpy.
|
| 81 |
+
self.exp_alpha_zero = self.ts
|
| 82 |
+
self.exp_alpha_zero_point_one = 0.9 * self.ts + 0.1 / self.ts.shape[-1]
|
| 83 |
+
self.exp_alpha_one = jnp.ones_like(self.ts) / self.ts.shape[-1]
|
| 84 |
+
|
| 85 |
+
@chex.all_variants
|
| 86 |
+
def test_scalar(self):
|
| 87 |
+
"""Tests for a full batch."""
|
| 88 |
+
np.testing.assert_allclose(
|
| 89 |
+
self.variant(loss.smooth_labels)(self.ts[0], 0.),
|
| 90 |
+
self.exp_alpha_zero[0], atol=1e-4)
|
| 91 |
+
np.testing.assert_allclose(
|
| 92 |
+
self.variant(loss.smooth_labels)(self.ts[0], 0.1),
|
| 93 |
+
self.exp_alpha_zero_point_one[0], atol=1e-4)
|
| 94 |
+
np.testing.assert_allclose(
|
| 95 |
+
self.variant(loss.smooth_labels)(self.ts[0], 1.),
|
| 96 |
+
self.exp_alpha_one[0], atol=1e-4)
|
| 97 |
+
|
| 98 |
+
@chex.all_variants
|
| 99 |
+
def test_batched(self):
|
| 100 |
+
"""Tests for a full batch."""
|
| 101 |
+
np.testing.assert_allclose(
|
| 102 |
+
self.variant(loss.smooth_labels)(self.ts, 0.),
|
| 103 |
+
self.exp_alpha_zero, atol=1e-4)
|
| 104 |
+
np.testing.assert_allclose(
|
| 105 |
+
self.variant(loss.smooth_labels)(self.ts, 0.1),
|
| 106 |
+
self.exp_alpha_zero_point_one, atol=1e-4)
|
| 107 |
+
np.testing.assert_allclose(
|
| 108 |
+
self.variant(loss.smooth_labels)(self.ts, 1.),
|
| 109 |
+
self.exp_alpha_one, atol=1e-4)
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
class SoftmaxCrossEntropyTest(parameterized.TestCase):
|
| 113 |
+
|
| 114 |
+
def setUp(self):
|
| 115 |
+
super().setUp()
|
| 116 |
+
self.ys = np.array([[10., 1., -2.], [1., 4., 0.2]], dtype=np.float32)
|
| 117 |
+
self.ts = np.array([[0., 1., 0.], [1., 0., 0.]], dtype=np.float32)
|
| 118 |
+
# taken expected outputs from rlax.
|
| 119 |
+
self.exp = np.array([9.00013, 3.0696733], dtype=np.float32)
|
| 120 |
+
|
| 121 |
+
@chex.all_variants
|
| 122 |
+
def test_scalar(self):
|
| 123 |
+
"""Tests for a full batch."""
|
| 124 |
+
np.testing.assert_allclose(
|
| 125 |
+
self.variant(loss.softmax_cross_entropy)(self.ys[0], self.ts[0]),
|
| 126 |
+
self.exp[0], atol=1e-4)
|
| 127 |
+
|
| 128 |
+
@chex.all_variants
|
| 129 |
+
def test_batched(self):
|
| 130 |
+
"""Tests for a full batch."""
|
| 131 |
+
np.testing.assert_allclose(
|
| 132 |
+
self.variant(loss.softmax_cross_entropy)(self.ys, self.ts),
|
| 133 |
+
self.exp, atol=1e-4)
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
class SoftmaxCrossEntropyWithIntegerLabelsTest(parameterized.TestCase):
|
| 137 |
+
|
| 138 |
+
def setUp(self):
|
| 139 |
+
super().setUp()
|
| 140 |
+
self.ys = np.array([[10., 1., -2.], [1., 4., 0.2]], dtype=np.float32)
|
| 141 |
+
self.ts = np.array([1, 0], dtype=np.int32)
|
| 142 |
+
|
| 143 |
+
@chex.all_variants
|
| 144 |
+
def test_consistent_with_softmax_cross_entropy_scalar(self):
|
| 145 |
+
"""Tests for a scalar."""
|
| 146 |
+
exp = loss.softmax_cross_entropy(self.ys[0], jax.nn.one_hot(self.ts[0], 3))
|
| 147 |
+
np.testing.assert_allclose(
|
| 148 |
+
self.variant(loss.softmax_cross_entropy_with_integer_labels)(
|
| 149 |
+
self.ys[0], self.ts[0]),
|
| 150 |
+
exp, rtol=1e-6)
|
| 151 |
+
|
| 152 |
+
@chex.all_variants
|
| 153 |
+
def test_consistent_with_softmax_cross_entropy_batched(self):
|
| 154 |
+
"""Tests for a full batch."""
|
| 155 |
+
exp = loss.softmax_cross_entropy(self.ys, jax.nn.one_hot(self.ts, 3))
|
| 156 |
+
np.testing.assert_allclose(
|
| 157 |
+
self.variant(loss.softmax_cross_entropy_with_integer_labels)(
|
| 158 |
+
self.ys, self.ts),
|
| 159 |
+
exp, rtol=1e-6)
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
class SigmoidCrossEntropyTest(parameterized.TestCase):
|
| 163 |
+
|
| 164 |
+
@parameterized.parameters(
|
| 165 |
+
dict(preds=np.array([-1e+09, -1e-09]),
|
| 166 |
+
labels=np.array([1., 0.]),
|
| 167 |
+
expected=5e+08),
|
| 168 |
+
dict(preds=np.array([-1e+09, -1e-09]),
|
| 169 |
+
labels=np.array([0., 1.]),
|
| 170 |
+
expected=0.3465736),
|
| 171 |
+
dict(preds=np.array([1e+09, 1e-09]),
|
| 172 |
+
labels=np.array([1., 0.]),
|
| 173 |
+
expected=0.3465736),
|
| 174 |
+
dict(preds=np.array([1e+09, 1e-09]),
|
| 175 |
+
labels=np.array([0., 1.]),
|
| 176 |
+
expected=5e+08),
|
| 177 |
+
dict(preds=np.array([-1e+09, 1e-09]),
|
| 178 |
+
labels=np.array([1., 0.]),
|
| 179 |
+
expected=5e+08),
|
| 180 |
+
dict(preds=np.array([-1e+09, 1e-09]),
|
| 181 |
+
labels=np.array([0., 1.]),
|
| 182 |
+
expected=0.3465736),
|
| 183 |
+
dict(preds=np.array([1e+09, -1e-09]),
|
| 184 |
+
labels=np.array([1., 0.]),
|
| 185 |
+
expected=0.3465736),
|
| 186 |
+
dict(preds=np.array([1e+09, -1e-09]),
|
| 187 |
+
labels=np.array([0., 1.]),
|
| 188 |
+
expected=5e+08),
|
| 189 |
+
dict(preds=np.array([0., 0.]),
|
| 190 |
+
labels=np.array([1., 0.]),
|
| 191 |
+
expected=0.6931472),
|
| 192 |
+
dict(preds=np.array([0., 0.]),
|
| 193 |
+
labels=np.array([0., 1.]),
|
| 194 |
+
expected=0.6931472),
|
| 195 |
+
)
|
| 196 |
+
def testSigmoidCrossEntropy(self, preds, labels, expected):
|
| 197 |
+
tested = jnp.mean(loss.sigmoid_binary_cross_entropy(preds, labels))
|
| 198 |
+
np.testing.assert_allclose(tested, expected, rtol=1e-6, atol=1e-6)
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
class CosineDistanceTest(parameterized.TestCase):
|
| 202 |
+
|
| 203 |
+
def setUp(self):
|
| 204 |
+
super().setUp()
|
| 205 |
+
self.ys = np.array([[10., 1., -2.], [1., 4., 0.2]], dtype=np.float32)
|
| 206 |
+
self.ts = np.array([[0., 1.2, 0.2], [1., -0.3, 0.]], dtype=np.float32)
|
| 207 |
+
# distance computed expected output from `scipy 1.20`.
|
| 208 |
+
self.exp = np.array([0.9358251989, 1.0464068465], dtype=np.float32)
|
| 209 |
+
|
| 210 |
+
@chex.all_variants
|
| 211 |
+
def test_scalar_distance(self):
|
| 212 |
+
"""Tests for a full batch."""
|
| 213 |
+
np.testing.assert_allclose(
|
| 214 |
+
self.variant(loss.cosine_distance)(self.ys[0], self.ts[0]),
|
| 215 |
+
self.exp[0], atol=1e-4)
|
| 216 |
+
|
| 217 |
+
@chex.all_variants
|
| 218 |
+
def test_scalar_similarity(self):
|
| 219 |
+
"""Tests for a full batch."""
|
| 220 |
+
np.testing.assert_allclose(
|
| 221 |
+
self.variant(loss.cosine_similarity)(self.ys[0], self.ts[0]),
|
| 222 |
+
1. - self.exp[0], atol=1e-4)
|
| 223 |
+
|
| 224 |
+
@chex.all_variants
|
| 225 |
+
def test_batched_distance(self):
|
| 226 |
+
"""Tests for a full batch."""
|
| 227 |
+
np.testing.assert_allclose(
|
| 228 |
+
self.variant(loss.cosine_distance)(self.ys, self.ts),
|
| 229 |
+
self.exp, atol=1e-4)
|
| 230 |
+
|
| 231 |
+
@chex.all_variants
|
| 232 |
+
def test_batched_similarity(self):
|
| 233 |
+
"""Tests for a full batch."""
|
| 234 |
+
np.testing.assert_allclose(
|
| 235 |
+
self.variant(loss.cosine_similarity)(self.ys, self.ts),
|
| 236 |
+
1. - self.exp, atol=1e-4)
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
# TODO(b/188419459): add test for grad and second order grad.
|
| 240 |
+
class LogCoshTest(parameterized.TestCase):
|
| 241 |
+
|
| 242 |
+
def setUp(self):
|
| 243 |
+
super().setUp()
|
| 244 |
+
# Test large values for overflow
|
| 245 |
+
self.ys = jnp.array([500, -2., -1., 0.5, 1.])
|
| 246 |
+
self.ts = jnp.array([-200, -1.5, 0., -1, 1.])
|
| 247 |
+
# computed using tensorflow.keras.losses.log_cosh v2.4.1
|
| 248 |
+
self.exp = jnp.array([699.3068, 0.12011445, 0.4337809, 0.85544014, 0.])
|
| 249 |
+
self.exp_ys_only = jnp.array(
|
| 250 |
+
[499.30685, 1.3250027, 0.4337809, 0.12011451, 0.43378082])
|
| 251 |
+
|
| 252 |
+
@chex.all_variants
|
| 253 |
+
def test_scalar(self):
|
| 254 |
+
out = self.variant(loss.log_cosh)(self.ys[0], self.ts[0])
|
| 255 |
+
np.testing.assert_allclose(out, self.exp[0], atol=1e-5)
|
| 256 |
+
|
| 257 |
+
@chex.all_variants
|
| 258 |
+
def test_batched(self):
|
| 259 |
+
out = self.variant(loss.log_cosh)(self.ys, self.ts)
|
| 260 |
+
np.testing.assert_allclose(out, self.exp, atol=1e-5)
|
| 261 |
+
|
| 262 |
+
@chex.all_variants
|
| 263 |
+
def test_scalar_predictions_only(self):
|
| 264 |
+
out = self.variant(loss.log_cosh)(self.ys[0])
|
| 265 |
+
np.testing.assert_allclose(out, self.exp_ys_only[0], atol=1e-5)
|
| 266 |
+
|
| 267 |
+
@chex.all_variants
|
| 268 |
+
def test_batched_predictions_only(self):
|
| 269 |
+
out = self.variant(loss.log_cosh)(self.ys)
|
| 270 |
+
np.testing.assert_allclose(out, self.exp_ys_only, atol=1e-5)
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
def _lengths_to_paddings(lengths: chex.Array, maxlength: int) -> chex.Array:
|
| 274 |
+
indices = jnp.arange(maxlength).reshape((1,) * lengths.ndim + (maxlength,))
|
| 275 |
+
lengths = jnp.expand_dims(lengths, axis=-1)
|
| 276 |
+
elem_valid = indices < lengths
|
| 277 |
+
return np.logical_not(elem_valid).astype(np.float32)
|
| 278 |
+
|
| 279 |
+
|
| 280 |
+
def _average_ctc_loss(logprobs: chex.Array, logprob_paddings: chex.Array,
|
| 281 |
+
labels: chex.Array,
|
| 282 |
+
label_paddings: chex.Array) -> chex.Array:
|
| 283 |
+
return jnp.average(
|
| 284 |
+
loss.ctc_loss(logprobs, logprob_paddings, labels, label_paddings))
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
class CTCTest(parameterized.TestCase):
|
| 288 |
+
|
| 289 |
+
def setUp(self):
|
| 290 |
+
super().setUp()
|
| 291 |
+
np.random.seed(1234)
|
| 292 |
+
self._rtol = 5e-3 if jax.default_backend() != 'cpu' else 1e-6
|
| 293 |
+
|
| 294 |
+
@chex.all_variants
|
| 295 |
+
def test_with_one_to_one_alignment(self):
|
| 296 |
+
# when inputsteps and outputsteps are equal, no blank will be allowed.
|
| 297 |
+
batchsize = 8
|
| 298 |
+
steps = 50
|
| 299 |
+
nclasses = 40
|
| 300 |
+
logits = np.random.randn(batchsize, steps, nclasses)
|
| 301 |
+
labels = np.random.uniform(
|
| 302 |
+
1, nclasses, size=(batchsize, steps)).astype(np.int32)
|
| 303 |
+
|
| 304 |
+
# This function only covers the cases without same-label repetition.
|
| 305 |
+
# `test_repeat_with_one_to_one_alignment` below complements those cases.
|
| 306 |
+
# So, redraw the samples for satisfying the non-repetition constraint.
|
| 307 |
+
for n in range(labels.shape[0]):
|
| 308 |
+
for t in range(1, labels.shape[1]):
|
| 309 |
+
while labels[n, t] == labels[n, t - 1]:
|
| 310 |
+
labels[n, t] = np.random.uniform(1, nclasses)
|
| 311 |
+
|
| 312 |
+
results = self.variant(loss.ctc_loss_with_forward_probs)(
|
| 313 |
+
logits, np.zeros(logits.shape[:2]),
|
| 314 |
+
labels, np.zeros(labels.shape))
|
| 315 |
+
(per_seq_loss, logalpha_blank, logalpha_emit) = results
|
| 316 |
+
|
| 317 |
+
logprobs = jax.nn.log_softmax(logits)
|
| 318 |
+
for b in range(batchsize):
|
| 319 |
+
p = 0.0
|
| 320 |
+
for t in range(steps):
|
| 321 |
+
p += logprobs[b, t, labels[b, t]]
|
| 322 |
+
np.testing.assert_allclose(
|
| 323 |
+
np.array(-p), per_seq_loss[b], rtol=self._rtol)
|
| 324 |
+
|
| 325 |
+
# Check forward-probabilities.
|
| 326 |
+
# 1. All-phi path: logalpha_blank[-1, b, 0] must be a probability of
|
| 327 |
+
# the path that outputs blank symbols for all the frames.
|
| 328 |
+
np.testing.assert_allclose(logalpha_blank[-1, b, 0],
|
| 329 |
+
np.sum(logprobs[b, :, 0]),
|
| 330 |
+
rtol=self._rtol)
|
| 331 |
+
|
| 332 |
+
# 2. After emitting all the labels
|
| 333 |
+
# the negated loss must be identical with the forward probability of
|
| 334 |
+
# paths after consuming all the labels (because one-to-one alignment
|
| 335 |
+
# doesn't allow extra blank symbols)
|
| 336 |
+
np.testing.assert_allclose(logalpha_emit[-1, b, steps - 1],
|
| 337 |
+
-per_seq_loss[b],
|
| 338 |
+
rtol=self._rtol)
|
| 339 |
+
# and, this forward probability must be copied to the blank forward
|
| 340 |
+
# probability of the next step.
|
| 341 |
+
np.testing.assert_allclose(logalpha_blank[-1, b, steps],
|
| 342 |
+
-per_seq_loss[b],
|
| 343 |
+
rtol=self._rtol)
|
| 344 |
+
|
| 345 |
+
@chex.all_variants
|
| 346 |
+
def test_with_one_to_one_alignment_and_paddings(self):
|
| 347 |
+
batch_size = 5
|
| 348 |
+
nclasses = 13
|
| 349 |
+
steps = 7
|
| 350 |
+
logits = np.random.normal(size=[batch_size, steps, nclasses])
|
| 351 |
+
logprobs = jax.nn.log_softmax(logits)
|
| 352 |
+
|
| 353 |
+
labels = []
|
| 354 |
+
for n in range(batch_size):
|
| 355 |
+
row = list(range(1, nclasses))
|
| 356 |
+
np.random.shuffle(row)
|
| 357 |
+
labels.append(row[:steps])
|
| 358 |
+
labels = np.array(labels)
|
| 359 |
+
|
| 360 |
+
lengths = np.random.randint(3, 6, size=(batch_size,))
|
| 361 |
+
paddings = _lengths_to_paddings(lengths, steps)
|
| 362 |
+
|
| 363 |
+
actual_loss = self.variant(loss.ctc_loss)(logits, paddings, labels,
|
| 364 |
+
paddings)
|
| 365 |
+
|
| 366 |
+
value_and_grad = self.variant(jax.value_and_grad(_average_ctc_loss))
|
| 367 |
+
unused_avg_loss, actual_gradients = value_and_grad(logits, paddings, labels,
|
| 368 |
+
paddings)
|
| 369 |
+
|
| 370 |
+
for n in range(batch_size):
|
| 371 |
+
expected_loss = -sum(logprobs[n, t, k]
|
| 372 |
+
for t, k in enumerate(labels[n, :lengths[n]]))
|
| 373 |
+
np.testing.assert_allclose(expected_loss, actual_loss[n], rtol=self._rtol)
|
| 374 |
+
|
| 375 |
+
expected_gradients = np.array(jax.nn.softmax(logits[n]))
|
| 376 |
+
expected_gradients[lengths[n]:] = 0.0
|
| 377 |
+
for t, k in enumerate(labels[n, :lengths[n]]):
|
| 378 |
+
expected_gradients[t, k] -= 1.0
|
| 379 |
+
expected_gradients /= batch_size
|
| 380 |
+
np.testing.assert_allclose(
|
| 381 |
+
expected_gradients, actual_gradients[n], rtol=self._rtol)
|
| 382 |
+
|
| 383 |
+
@chex.all_variants
|
| 384 |
+
def test_repeat_with_one_to_one_alignment(self):
|
| 385 |
+
# test if it can correctly handle the same-label repetition.
|
| 386 |
+
nclasses = 5
|
| 387 |
+
labels = np.array([
|
| 388 |
+
[1, 2, 2, 3],
|
| 389 |
+
[2, 3, 4, 4],
|
| 390 |
+
[1, 1, 1, 1],
|
| 391 |
+
[1, 1, 2, 3],
|
| 392 |
+
[1, 1, 1, 2],
|
| 393 |
+
])
|
| 394 |
+
expected_alignment = [ # expected minimal alignment
|
| 395 |
+
[1, 2, 0, 2, 3],
|
| 396 |
+
[2, 3, 4, 0, 4],
|
| 397 |
+
[1, 0, 1, 0, 1, 0, 1],
|
| 398 |
+
[1, 0, 1, 2, 3],
|
| 399 |
+
[1, 0, 1, 0, 1, 2],
|
| 400 |
+
]
|
| 401 |
+
batch_size = len(labels)
|
| 402 |
+
label_lens = np.array([4] * batch_size)
|
| 403 |
+
label_steps = 6
|
| 404 |
+
# Designed to have two padding elements on the right.
|
| 405 |
+
labels = np.pad(labels, [(0, 0), (0, label_steps - labels.shape[1])])
|
| 406 |
+
label_paddings = _lengths_to_paddings(label_lens, label_steps)
|
| 407 |
+
|
| 408 |
+
logit_lengths = np.array([len(seq) for seq in expected_alignment])
|
| 409 |
+
logit_steps = max(logit_lengths)
|
| 410 |
+
logits = np.random.randn(batch_size, logit_steps, nclasses)
|
| 411 |
+
logit_paddings = _lengths_to_paddings(logit_lengths, logit_steps)
|
| 412 |
+
|
| 413 |
+
per_seq_loss = self.variant(loss.ctc_loss)(logits, logit_paddings, labels,
|
| 414 |
+
label_paddings)
|
| 415 |
+
|
| 416 |
+
logprobs = jax.nn.log_softmax(logits)
|
| 417 |
+
for n in range(batch_size):
|
| 418 |
+
expected_loss = -sum(logprobs[n, t, k]
|
| 419 |
+
for t, k in enumerate(expected_alignment[n]))
|
| 420 |
+
np.testing.assert_allclose(
|
| 421 |
+
jnp.array(expected_loss), per_seq_loss[n], rtol=self._rtol)
|
| 422 |
+
|
| 423 |
+
|
| 424 |
+
class KLDivergenceTest(parameterized.TestCase):
|
| 425 |
+
|
| 426 |
+
def setUp(self):
|
| 427 |
+
super().setUp()
|
| 428 |
+
self.log_ps = np.array(
|
| 429 |
+
[[-2.9957, -3.5066, -3.9120, -1.2040, -0.6931, -2.3026],
|
| 430 |
+
[-1.6094, -1.6094, -1.6094, -2.3026, -1.8971, -1.8971]])
|
| 431 |
+
self.qs = np.array([[0.2, 0.2, 0.2, 0.1, 0.15, 0.15],
|
| 432 |
+
[0.05, 0.03, 0.02, 0.3, 0.5, 0.1]])
|
| 433 |
+
# Computed kullback-leibler divergence of P from Q.
|
| 434 |
+
self.exp = np.array([0.8875625, 0.7187435584901326])
|
| 435 |
+
|
| 436 |
+
@chex.all_variants
|
| 437 |
+
def test_scalar(self):
|
| 438 |
+
np.testing.assert_allclose(
|
| 439 |
+
self.variant(loss.kl_divergence)(self.log_ps[0], self.qs[0]),
|
| 440 |
+
self.exp[0],
|
| 441 |
+
atol=1e-4)
|
| 442 |
+
|
| 443 |
+
@chex.all_variants
|
| 444 |
+
def test_batched(self):
|
| 445 |
+
np.testing.assert_allclose(
|
| 446 |
+
self.variant(loss.kl_divergence)(self.log_ps, self.qs),
|
| 447 |
+
self.exp,
|
| 448 |
+
atol=1e-4)
|
| 449 |
+
|
| 450 |
+
|
| 451 |
+
class KLDivergenceWithLogTargetsTest(parameterized.TestCase):
|
| 452 |
+
|
| 453 |
+
def setUp(self):
|
| 454 |
+
super().setUp()
|
| 455 |
+
self.log_ps = np.array(
|
| 456 |
+
[[-2.9957, -3.5066, -3.9120, -1.2040, -0.6931, -2.3026],
|
| 457 |
+
[-1.6094, -1.6094, -1.6094, -2.3026, -1.8971, -1.8971]])
|
| 458 |
+
self.qs = np.array([[-1.6094, -1.6094, -1.6094, -2.3026, -1.8971, -1.8971],
|
| 459 |
+
[-2.9957, -3.5066, -3.9120, -1.2040, -0.6931, -2.3026]])
|
| 460 |
+
# Computed kullback-leibler divergence of P from Q.
|
| 461 |
+
self.exp = np.array([0.8875625, 0.7187435584901326])
|
| 462 |
+
|
| 463 |
+
@chex.all_variants
|
| 464 |
+
def test_scalar(self):
|
| 465 |
+
np.testing.assert_allclose(
|
| 466 |
+
self.variant(loss.kl_divergence_with_log_targets)(self.log_ps[0],
|
| 467 |
+
self.qs[0]),
|
| 468 |
+
self.exp[0],
|
| 469 |
+
atol=1e-4)
|
| 470 |
+
|
| 471 |
+
@chex.all_variants
|
| 472 |
+
def test_batched(self):
|
| 473 |
+
np.testing.assert_allclose(
|
| 474 |
+
self.variant(loss.kl_divergence_with_log_targets)(self.log_ps, self.qs),
|
| 475 |
+
self.exp,
|
| 476 |
+
atol=1e-4)
|
| 477 |
+
|
| 478 |
+
|
| 479 |
+
class HingeLossTest(parameterized.TestCase):
|
| 480 |
+
|
| 481 |
+
def setUp(self):
|
| 482 |
+
super().setUp()
|
| 483 |
+
self.ys = np.array([
|
| 484 |
+
-0.97740268, -1.01812625, -0.81675726, -0.73605974, 2.08235648,
|
| 485 |
+
1.84101354, -1.0581002
|
| 486 |
+
])
|
| 487 |
+
self.ts = np.array([-1, -1, -1, -1, 1, 1, -1])
|
| 488 |
+
# Computed expected outputs.
|
| 489 |
+
self.correct_result = np.array(
|
| 490 |
+
[0.02259731, 0., 0.18324274, 0.26394027, 0., 0., 0.])
|
| 491 |
+
|
| 492 |
+
@chex.all_variants
|
| 493 |
+
def test_batched(self):
|
| 494 |
+
np.testing.assert_allclose(
|
| 495 |
+
self.variant(loss.hinge_loss)(self.ys, self.ts),
|
| 496 |
+
self.correct_result,
|
| 497 |
+
atol=1e-4)
|
| 498 |
+
|
| 499 |
+
if __name__ == '__main__':
|
| 500 |
+
absltest.main()
|
lib/python3.10/site-packages/optax/_src/numerics.py
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Utilities to ensure the implementation is safe wrt numerical issues.
|
| 16 |
+
|
| 17 |
+
Note that complex numbers are also supported, see
|
| 18 |
+
https://gist.github.com/wdphy16/118aef6fb5f82c49790d7678cf87da29
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
from typing import Optional, Tuple, Union
|
| 22 |
+
|
| 23 |
+
import chex
|
| 24 |
+
import jax.numpy as jnp
|
| 25 |
+
import numpy as np
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
# TODO(jscholz) Promote these functions to jax core lib?
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def abs_sq(x: chex.Array) -> chex.Array:
|
| 32 |
+
"""Returns the squared norm of a (maybe complex) array.
|
| 33 |
+
|
| 34 |
+
For real `x`, JAX generates the same HLO from this, `jnp.square(x)`, `x * x`,
|
| 35 |
+
or `x**2`.
|
| 36 |
+
|
| 37 |
+
Args:
|
| 38 |
+
x: a (maybe complex) array.
|
| 39 |
+
|
| 40 |
+
Returns:
|
| 41 |
+
The squared norm of `x`.
|
| 42 |
+
"""
|
| 43 |
+
if not isinstance(x, (np.ndarray, jnp.ndarray)):
|
| 44 |
+
raise ValueError(f"`abs_sq` accepts only NDarrays, got: {x}.")
|
| 45 |
+
return (x.conj() * x).real
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def safe_norm(x: chex.Array,
|
| 49 |
+
min_norm: chex.Numeric,
|
| 50 |
+
ord: Optional[Union[int, float, str]] = None, # pylint: disable=redefined-builtin
|
| 51 |
+
axis: Union[None, Tuple[int, ...], int] = None,
|
| 52 |
+
keepdims: bool = False) -> chex.Array:
|
| 53 |
+
"""Returns jnp.maximum(jnp.linalg.norm(x), min_norm) with correct gradients.
|
| 54 |
+
|
| 55 |
+
The gradients of `jnp.maximum(jnp.linalg.norm(x), min_norm)` at 0.0 is `NaN`,
|
| 56 |
+
because jax will evaluate both branches of the `jnp.maximum`. This function
|
| 57 |
+
will instead return the correct gradient of 0.0 also in such setting.
|
| 58 |
+
|
| 59 |
+
Args:
|
| 60 |
+
x: jax array.
|
| 61 |
+
min_norm: lower bound for the returned norm.
|
| 62 |
+
ord: {non-zero int, inf, -inf, ‘fro’, ‘nuc’}, optional. Order of the norm.
|
| 63 |
+
inf means numpy’s inf object. The default is None.
|
| 64 |
+
axis: {None, int, 2-tuple of ints}, optional. If axis is an integer, it
|
| 65 |
+
specifies the axis of x along which to compute the vector norms. If axis
|
| 66 |
+
is a 2-tuple, it specifies the axes that hold 2-D matrices, and the matrix
|
| 67 |
+
norms of these matrices are computed. If axis is None then either a vector
|
| 68 |
+
norm (when x is 1-D) or a matrix norm (when x is 2-D) is returned. The
|
| 69 |
+
default is None.
|
| 70 |
+
keepdims: bool, optional. If this is set to True, the axes which are normed
|
| 71 |
+
over are left in the result as dimensions with size one. With this option
|
| 72 |
+
the result will broadcast correctly against the original x.
|
| 73 |
+
|
| 74 |
+
Returns:
|
| 75 |
+
The safe norm of the input vector, accounting for correct gradient.
|
| 76 |
+
"""
|
| 77 |
+
norm = jnp.linalg.norm(x, ord=ord, axis=axis, keepdims=True)
|
| 78 |
+
x = jnp.where(norm <= min_norm, jnp.ones_like(x), x)
|
| 79 |
+
norm = jnp.squeeze(norm, axis=axis) if not keepdims else norm
|
| 80 |
+
masked_norm = jnp.linalg.norm(x, ord=ord, axis=axis, keepdims=keepdims)
|
| 81 |
+
return jnp.where(norm <= min_norm, min_norm, masked_norm)
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def safe_root_mean_squares(x: chex.Array, min_rms: chex.Numeric) -> chex.Array:
|
| 85 |
+
"""Returns `maximum(sqrt(mean(abs_sq(x))), min_norm)` with correct grads.
|
| 86 |
+
|
| 87 |
+
The gradients of `maximum(sqrt(mean(abs_sq(x))), min_norm)` at 0.0
|
| 88 |
+
is `NaN`, because jax will evaluate both branches of the `jnp.maximum`. This
|
| 89 |
+
function will instead return the correct gradient of 0.0 also in such setting.
|
| 90 |
+
|
| 91 |
+
Args:
|
| 92 |
+
x: jax array.
|
| 93 |
+
min_rms: lower bound for the returned norm.
|
| 94 |
+
|
| 95 |
+
Returns:
|
| 96 |
+
The safe RMS of the input vector, accounting for correct gradient.
|
| 97 |
+
"""
|
| 98 |
+
rms = jnp.sqrt(jnp.mean(abs_sq(x)))
|
| 99 |
+
x = jnp.where(rms <= min_rms, jnp.ones_like(x), x)
|
| 100 |
+
return jnp.where(rms <= min_rms, min_rms, jnp.sqrt(jnp.mean(abs_sq(x))))
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def safe_int32_increment(count: chex.Numeric) -> chex.Numeric:
|
| 104 |
+
"""Increments int32 counter by one.
|
| 105 |
+
|
| 106 |
+
Normally `max_int + 1` would overflow to `min_int`. This functions ensures
|
| 107 |
+
that when `max_int` is reached the counter stays at `max_int`.
|
| 108 |
+
|
| 109 |
+
Args:
|
| 110 |
+
count: a counter to be incremented.
|
| 111 |
+
|
| 112 |
+
Returns:
|
| 113 |
+
A counter incremented by 1, or max_int if the maximum precision is reached.
|
| 114 |
+
"""
|
| 115 |
+
chex.assert_type(count, jnp.int32)
|
| 116 |
+
max_int32_value = jnp.iinfo(jnp.int32).max
|
| 117 |
+
one = jnp.array(1, dtype=jnp.int32)
|
| 118 |
+
return jnp.where(count < max_int32_value, count + one, max_int32_value)
|
lib/python3.10/site-packages/optax/_src/privacy_test.py
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Tests for `privacy.py`."""
|
| 16 |
+
|
| 17 |
+
from absl.testing import absltest
|
| 18 |
+
from absl.testing import parameterized
|
| 19 |
+
|
| 20 |
+
import chex
|
| 21 |
+
import jax
|
| 22 |
+
import jax.numpy as jnp
|
| 23 |
+
|
| 24 |
+
from optax._src import privacy
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class DifferentiallyPrivateAggregateTest(parameterized.TestCase):
|
| 28 |
+
|
| 29 |
+
def setUp(self):
|
| 30 |
+
super().setUp()
|
| 31 |
+
self.batch_size = 8
|
| 32 |
+
self.params = {'key_a': (jnp.zeros((2, 3, 4)), jnp.zeros([])),
|
| 33 |
+
'key_b': jnp.zeros((6, 7))}
|
| 34 |
+
# Example `i`'s grads are full of `i`s. Important to include 0 to ensure
|
| 35 |
+
# there are no divisons by 0 (e.g. in norm clipping)
|
| 36 |
+
a = jnp.arange(self.batch_size)
|
| 37 |
+
self.per_eg_grads = jax.tree_util.tree_map(
|
| 38 |
+
lambda p: jnp.moveaxis(a * jnp.ones(p.shape+(self.batch_size,)), -1, 0),
|
| 39 |
+
self.params)
|
| 40 |
+
|
| 41 |
+
@chex.all_variants
|
| 42 |
+
def test_no_privacy(self):
|
| 43 |
+
"""l2_norm_clip=MAX_FLOAT32 and noise_multiplier=0 should recover SGD."""
|
| 44 |
+
dp_agg = privacy.differentially_private_aggregate(
|
| 45 |
+
l2_norm_clip=jnp.finfo(jnp.float32).max,
|
| 46 |
+
noise_multiplier=0.,
|
| 47 |
+
seed=0)
|
| 48 |
+
state = dp_agg.init(self.params)
|
| 49 |
+
update_fn = self.variant(dp_agg.update)
|
| 50 |
+
mean_grads = jax.tree_util.tree_map(lambda g: g.mean(0), self.per_eg_grads)
|
| 51 |
+
|
| 52 |
+
for _ in range(3):
|
| 53 |
+
updates, state = update_fn(self.per_eg_grads, state)
|
| 54 |
+
chex.assert_tree_all_close(updates, mean_grads)
|
| 55 |
+
|
| 56 |
+
@chex.all_variants
|
| 57 |
+
@parameterized.parameters(0.5, 10.0, 20.0, 40.0, 80.0)
|
| 58 |
+
def test_clipping_norm(self, l2_norm_clip):
|
| 59 |
+
dp_agg = privacy.differentially_private_aggregate(
|
| 60 |
+
l2_norm_clip=l2_norm_clip,
|
| 61 |
+
noise_multiplier=0.,
|
| 62 |
+
seed=42)
|
| 63 |
+
state = dp_agg.init(self.params)
|
| 64 |
+
update_fn = self.variant(dp_agg.update)
|
| 65 |
+
|
| 66 |
+
# Shape of the three arrays below is (self.batch_size, )
|
| 67 |
+
norms = [jnp.linalg.norm(g.reshape(self.batch_size, -1), axis=1)
|
| 68 |
+
for g in jax.tree_util.tree_leaves(self.per_eg_grads)]
|
| 69 |
+
global_norms = jnp.linalg.norm(jnp.stack(norms), axis=0)
|
| 70 |
+
divisors = jnp.maximum(global_norms / l2_norm_clip, 1.)
|
| 71 |
+
# Since the values of all the parameters are the same within each example,
|
| 72 |
+
# we can easily compute what the values should be:
|
| 73 |
+
expected_val = jnp.mean(jnp.arange(self.batch_size) / divisors)
|
| 74 |
+
expected_tree = jax.tree_util.tree_map(
|
| 75 |
+
lambda p: jnp.broadcast_to(expected_val, p.shape), self.params)
|
| 76 |
+
|
| 77 |
+
for _ in range(3):
|
| 78 |
+
updates, state = update_fn(self.per_eg_grads, state, self.params)
|
| 79 |
+
chex.assert_tree_all_close(updates, expected_tree, rtol=2e-7)
|
| 80 |
+
|
| 81 |
+
@chex.all_variants
|
| 82 |
+
@parameterized.parameters((3.0, 2.0), (1.0, 5.0), (100.0, 4.0), (1.0, 90.0))
|
| 83 |
+
def test_noise_multiplier(self, l2_norm_clip, noise_multiplier):
|
| 84 |
+
"""Standard dev. of noise should be l2_norm_clip * noise_multiplier."""
|
| 85 |
+
dp_agg = privacy.differentially_private_aggregate(
|
| 86 |
+
l2_norm_clip=l2_norm_clip,
|
| 87 |
+
noise_multiplier=noise_multiplier,
|
| 88 |
+
seed=1337)
|
| 89 |
+
state = dp_agg.init(None)
|
| 90 |
+
update_fn = self.variant(dp_agg.update)
|
| 91 |
+
expected_std = l2_norm_clip * noise_multiplier
|
| 92 |
+
|
| 93 |
+
grads = [jnp.ones((1, 100, 100))] # batch size 1
|
| 94 |
+
for _ in range(3):
|
| 95 |
+
updates, state = update_fn(grads, state)
|
| 96 |
+
chex.assert_tree_all_close(expected_std,
|
| 97 |
+
jnp.std(updates[0]),
|
| 98 |
+
atol=0.1 * expected_std)
|
| 99 |
+
|
| 100 |
+
def test_aggregated_updates_as_input_fails(self):
|
| 101 |
+
"""Expect per-example gradients as input to this transform."""
|
| 102 |
+
dp_agg = privacy.differentially_private_aggregate(l2_norm_clip=0.1,
|
| 103 |
+
noise_multiplier=1.1,
|
| 104 |
+
seed=2021)
|
| 105 |
+
state = dp_agg.init(self.params)
|
| 106 |
+
mean_grads = jax.tree_util.tree_map(lambda g: g.mean(0), self.per_eg_grads)
|
| 107 |
+
with self.assertRaises(ValueError):
|
| 108 |
+
dp_agg.update(mean_grads, state, self.params)
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
if __name__ == '__main__':
|
| 112 |
+
absltest.main()
|
lib/python3.10/site-packages/optax/_src/schedule.py
ADDED
|
@@ -0,0 +1,620 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""JAX Schedules.
|
| 16 |
+
|
| 17 |
+
Schedules may be used to anneal the value of a hyper-parameter over time; for
|
| 18 |
+
instance, they may be used to anneal the learning rate used to update an agent's
|
| 19 |
+
parameters or the exploration factor used to select actions.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
import functools
|
| 23 |
+
import inspect
|
| 24 |
+
from typing import Callable, Dict, Union, NamedTuple, Optional, Iterable, Sequence
|
| 25 |
+
|
| 26 |
+
from absl import logging
|
| 27 |
+
import chex
|
| 28 |
+
import jax
|
| 29 |
+
import jax.numpy as jnp
|
| 30 |
+
|
| 31 |
+
from optax._src import base
|
| 32 |
+
from optax._src import numerics
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def constant_schedule(
|
| 36 |
+
value: Union[float, int]
|
| 37 |
+
) -> base.Schedule:
|
| 38 |
+
"""Constructs a constant schedule.
|
| 39 |
+
|
| 40 |
+
Args:
|
| 41 |
+
value: value to be held constant throughout.
|
| 42 |
+
|
| 43 |
+
Returns:
|
| 44 |
+
schedule: A function that maps step counts to values.
|
| 45 |
+
"""
|
| 46 |
+
return lambda count: value
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def polynomial_schedule(
|
| 50 |
+
init_value: chex.Scalar,
|
| 51 |
+
end_value: chex.Scalar,
|
| 52 |
+
power: chex.Scalar,
|
| 53 |
+
transition_steps: int,
|
| 54 |
+
transition_begin: int = 0
|
| 55 |
+
) -> base.Schedule:
|
| 56 |
+
"""Constructs a schedule with polynomial transition from init to end value.
|
| 57 |
+
|
| 58 |
+
Args:
|
| 59 |
+
init_value: initial value for the scalar to be annealed.
|
| 60 |
+
end_value: end value of the scalar to be annealed.
|
| 61 |
+
power: the power of the polynomial used to transition from init to end.
|
| 62 |
+
transition_steps: number of steps over which annealing takes place,
|
| 63 |
+
the scalar starts changing at `transition_begin` steps and completes
|
| 64 |
+
the transition by `transition_begin + transition_steps` steps.
|
| 65 |
+
If `transition_steps <= 0`, then the entire annealing process is disabled
|
| 66 |
+
and the value is held fixed at `init_value`.
|
| 67 |
+
transition_begin: must be positive. After how many steps to start annealing
|
| 68 |
+
(before this many steps the scalar value is held fixed at `init_value`).
|
| 69 |
+
|
| 70 |
+
Returns:
|
| 71 |
+
schedule: A function that maps step counts to values.
|
| 72 |
+
"""
|
| 73 |
+
if transition_steps <= 0:
|
| 74 |
+
logging.info(
|
| 75 |
+
'A polynomial schedule was set with a non-positive `transition_steps` '
|
| 76 |
+
'value; this results in a constant schedule with value `init_value`.')
|
| 77 |
+
return lambda count: init_value
|
| 78 |
+
|
| 79 |
+
if transition_begin < 0:
|
| 80 |
+
logging.info(
|
| 81 |
+
'An exponential schedule was set with a negative `transition_begin` '
|
| 82 |
+
'value; this will result in `transition_begin` falling back to `0`.')
|
| 83 |
+
transition_begin = 0
|
| 84 |
+
|
| 85 |
+
def schedule(count):
|
| 86 |
+
count = jnp.clip(count - transition_begin, 0, transition_steps)
|
| 87 |
+
frac = 1 - count / transition_steps
|
| 88 |
+
return (init_value - end_value) * (frac**power) + end_value
|
| 89 |
+
return schedule
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
# Alias polynomial schedule to linear schedule for convenience.
|
| 93 |
+
def linear_schedule(
|
| 94 |
+
init_value: chex.Scalar,
|
| 95 |
+
end_value: chex.Scalar,
|
| 96 |
+
transition_steps: int,
|
| 97 |
+
transition_begin: int = 0
|
| 98 |
+
) -> base.Schedule:
|
| 99 |
+
return polynomial_schedule(
|
| 100 |
+
init_value=init_value, end_value=end_value, power=1,
|
| 101 |
+
transition_steps=transition_steps, transition_begin=transition_begin)
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def piecewise_constant_schedule(
|
| 105 |
+
init_value: float,
|
| 106 |
+
boundaries_and_scales: Optional[Dict[int, float]] = None
|
| 107 |
+
) -> base.Schedule:
|
| 108 |
+
"""Returns a function which implements a piecewise constant schedule.
|
| 109 |
+
|
| 110 |
+
Args:
|
| 111 |
+
init_value: An initial value `init_v`.
|
| 112 |
+
boundaries_and_scales: A map from boundaries `b_i` to non-negative scaling
|
| 113 |
+
factors `f_i`. For any step count `s`, the schedule returns `init_v`
|
| 114 |
+
scaled by the product of all factors `f_i` such that `b_i` < `s`.
|
| 115 |
+
|
| 116 |
+
Returns:
|
| 117 |
+
schedule: A function that maps step counts to values.
|
| 118 |
+
"""
|
| 119 |
+
if boundaries_and_scales is not None:
|
| 120 |
+
all_positive = all(scale >= 0. for scale in boundaries_and_scales.values())
|
| 121 |
+
if not all_positive:
|
| 122 |
+
raise ValueError(
|
| 123 |
+
'`piecewise_constant_schedule` expects non-negative scale factors')
|
| 124 |
+
|
| 125 |
+
def schedule(count):
|
| 126 |
+
v = init_value
|
| 127 |
+
if boundaries_and_scales is not None:
|
| 128 |
+
for threshold, scale in sorted(boundaries_and_scales.items()):
|
| 129 |
+
indicator = jnp.maximum(0., jnp.sign(threshold - count))
|
| 130 |
+
v = v * indicator + (1 - indicator) * scale * v
|
| 131 |
+
return v
|
| 132 |
+
|
| 133 |
+
return schedule
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def exponential_decay(
|
| 137 |
+
init_value: float,
|
| 138 |
+
transition_steps: int,
|
| 139 |
+
decay_rate: float,
|
| 140 |
+
transition_begin: int = 0,
|
| 141 |
+
staircase: bool = False,
|
| 142 |
+
end_value: Optional[float] = None
|
| 143 |
+
) -> base.Schedule:
|
| 144 |
+
"""Constructs a schedule with either continuous or discrete exponential decay.
|
| 145 |
+
|
| 146 |
+
This function applies an exponential decay function to a provided initial
|
| 147 |
+
value. The function returns the decayed value as follows:
|
| 148 |
+
|
| 149 |
+
```
|
| 150 |
+
decayed_value = init_value * decay_rate ^ (count / transition_steps)
|
| 151 |
+
```
|
| 152 |
+
|
| 153 |
+
If the argument `staircase` is `True`, then `count / transition_steps` is
|
| 154 |
+
an integer division and the decayed value follows a staircase function.
|
| 155 |
+
|
| 156 |
+
Args:
|
| 157 |
+
init_value: the initial learning rate.
|
| 158 |
+
transition_steps: must be positive. See the decay computation above.
|
| 159 |
+
decay_rate: must not be zero. The decay rate.
|
| 160 |
+
transition_begin: must be positive. After how many steps to start annealing
|
| 161 |
+
(before this many steps the scalar value is held fixed at `init_value`).
|
| 162 |
+
staircase: if `True`, decay the values at discrete intervals.
|
| 163 |
+
end_value: the value at which the exponential decay stops. When
|
| 164 |
+
`decay_rate` < 1, `end_value` is treated as a lower bound, otherwise as
|
| 165 |
+
an upper bound. Has no effect when `decay_rate` = 0.
|
| 166 |
+
|
| 167 |
+
Returns:
|
| 168 |
+
schedule: A function that maps step counts to values.
|
| 169 |
+
"""
|
| 170 |
+
|
| 171 |
+
if transition_steps <= 0:
|
| 172 |
+
logging.info(
|
| 173 |
+
'An exponential schedule was set with a non-positive `transition_steps`'
|
| 174 |
+
' value; this will result in a constant schedule with value '
|
| 175 |
+
'`init_value`.')
|
| 176 |
+
return lambda count: init_value
|
| 177 |
+
|
| 178 |
+
if decay_rate == 0:
|
| 179 |
+
logging.info(
|
| 180 |
+
'An exponential schedule was set with a zero `decay_rate` value; '
|
| 181 |
+
'this will result in a constant schedule with value `init_value`.')
|
| 182 |
+
return lambda count: init_value
|
| 183 |
+
|
| 184 |
+
if transition_begin < 0:
|
| 185 |
+
logging.info(
|
| 186 |
+
'An exponential schedule was set with a negative `transition_begin` '
|
| 187 |
+
'value; this will result in `transition_begin` falling back to `0`.')
|
| 188 |
+
transition_begin = 0
|
| 189 |
+
|
| 190 |
+
if end_value is not None:
|
| 191 |
+
clip_fn = jnp.maximum if decay_rate < 1.0 else jnp.minimum
|
| 192 |
+
|
| 193 |
+
def schedule(count):
|
| 194 |
+
decreased_count = count - transition_begin
|
| 195 |
+
p = decreased_count / transition_steps
|
| 196 |
+
if staircase:
|
| 197 |
+
p = jnp.floor(p)
|
| 198 |
+
decayed_value = jnp.where(
|
| 199 |
+
decreased_count <= 0, init_value, init_value * jnp.power(decay_rate, p))
|
| 200 |
+
if end_value is not None:
|
| 201 |
+
decayed_value = clip_fn(decayed_value, end_value)
|
| 202 |
+
return decayed_value
|
| 203 |
+
|
| 204 |
+
return schedule
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
def cosine_decay_schedule(
|
| 208 |
+
init_value: float,
|
| 209 |
+
decay_steps: int,
|
| 210 |
+
alpha: float = 0.0
|
| 211 |
+
) -> base.Schedule:
|
| 212 |
+
"""Returns a function which implements cosine learning rate decay.
|
| 213 |
+
|
| 214 |
+
The schedule does not restart when ``decay_steps`` has been reached. Instead,
|
| 215 |
+
the learning rate remains constant afterwards. For a cosine schedule with
|
| 216 |
+
restarts, :func:`optax.join_schedules` can be used to join several cosine
|
| 217 |
+
decay schedules.
|
| 218 |
+
|
| 219 |
+
For more details see: https://arxiv.org/abs/1608.03983.
|
| 220 |
+
|
| 221 |
+
Args:
|
| 222 |
+
init_value: An initial value `init_v`.
|
| 223 |
+
decay_steps: Positive integer - the number of steps for which to apply
|
| 224 |
+
the decay for.
|
| 225 |
+
alpha: Float. The minimum value of the multiplier used to adjust the
|
| 226 |
+
learning rate.
|
| 227 |
+
|
| 228 |
+
Returns:
|
| 229 |
+
schedule: A function that maps step counts to values.
|
| 230 |
+
"""
|
| 231 |
+
if not decay_steps > 0:
|
| 232 |
+
raise ValueError('The cosine_decay_schedule requires positive decay_steps!')
|
| 233 |
+
|
| 234 |
+
def schedule(count):
|
| 235 |
+
count = jnp.minimum(count, decay_steps)
|
| 236 |
+
cosine_decay = 0.5 * (1 + jnp.cos(jnp.pi * count / decay_steps))
|
| 237 |
+
decayed = (1 - alpha) * cosine_decay + alpha
|
| 238 |
+
return init_value * decayed
|
| 239 |
+
|
| 240 |
+
return schedule
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
def _linear_interpolate(start: float, end: float, pct: float):
|
| 244 |
+
return (end-start) * pct + start
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
def _cosine_interpolate(start: float, end: float, pct: float):
|
| 248 |
+
return end + (start-end) / 2.0 * (jnp.cos(jnp.pi * pct) + 1)
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
def piecewise_interpolate_schedule(
|
| 252 |
+
interpolate_type: str,
|
| 253 |
+
init_value: float,
|
| 254 |
+
boundaries_and_scales: Optional[Dict[int, float]] = None
|
| 255 |
+
) -> base.Schedule:
|
| 256 |
+
"""Returns a function which implements a piecewise interpolated schedule.
|
| 257 |
+
|
| 258 |
+
Args:
|
| 259 |
+
interpolate_type: 'linear' or 'cosine', specifying the interpolation
|
| 260 |
+
strategy.
|
| 261 |
+
init_value: An initial value `init_v`.
|
| 262 |
+
boundaries_and_scales: A map from boundaries `b_i` to non-negative scaling
|
| 263 |
+
factors `f_i`. At boundary step `b_i`, the schedule returns `init_v`
|
| 264 |
+
scaled by the product of all factors `f_j` such that `b_j` <= `b_i`. The
|
| 265 |
+
values in between each boundary will be interpolated as per `type`.
|
| 266 |
+
|
| 267 |
+
Returns:
|
| 268 |
+
schedule: A function that maps step counts to values.
|
| 269 |
+
"""
|
| 270 |
+
if interpolate_type == 'linear':
|
| 271 |
+
interpolate_fn = _linear_interpolate
|
| 272 |
+
elif interpolate_type == 'cosine':
|
| 273 |
+
interpolate_fn = _cosine_interpolate
|
| 274 |
+
else:
|
| 275 |
+
raise ValueError('`interpolate_type` must be either \'cos\' or \'linear\'')
|
| 276 |
+
|
| 277 |
+
if boundaries_and_scales:
|
| 278 |
+
boundaries, scales = zip(*sorted(boundaries_and_scales.items()))
|
| 279 |
+
if not all(scale >= 0. for scale in scales):
|
| 280 |
+
raise ValueError(
|
| 281 |
+
'`piecewise_interpolate_schedule` expects non-negative scale factors')
|
| 282 |
+
else:
|
| 283 |
+
boundaries, scales = (), ()
|
| 284 |
+
|
| 285 |
+
bounds = jnp.stack((0,) + boundaries)
|
| 286 |
+
values = jnp.cumprod(jnp.stack((init_value,) + scales))
|
| 287 |
+
interval_sizes = (bounds[1:] - bounds[:-1])
|
| 288 |
+
|
| 289 |
+
def schedule(count):
|
| 290 |
+
indicator = (bounds[:-1] <= count) & (count < bounds[1:])
|
| 291 |
+
pct = (count - bounds[:-1]) / interval_sizes
|
| 292 |
+
interp_vals = interpolate_fn(values[:-1], values[1:], pct)
|
| 293 |
+
return indicator.dot(interp_vals) + (bounds[-1] <= count) * values[-1]
|
| 294 |
+
|
| 295 |
+
return schedule
|
| 296 |
+
|
| 297 |
+
|
| 298 |
+
def linear_onecycle_schedule(
|
| 299 |
+
transition_steps: int,
|
| 300 |
+
peak_value: float,
|
| 301 |
+
pct_start: float = 0.3,
|
| 302 |
+
pct_final: float = 0.85,
|
| 303 |
+
div_factor: float = 25.0,
|
| 304 |
+
final_div_factor: float = 1e4
|
| 305 |
+
) -> base.Schedule:
|
| 306 |
+
"""Returns a function which implements the onecycle learning rate schedule.
|
| 307 |
+
|
| 308 |
+
This function uses a linear annealing strategy.
|
| 309 |
+
For more details see: https://arxiv.org/abs/1708.07120
|
| 310 |
+
|
| 311 |
+
Args:
|
| 312 |
+
transition_steps: Number of steps over which annealing takes place.
|
| 313 |
+
peak_value: Maximum value attained by schedule at pct_start percent
|
| 314 |
+
of the cycle (in number of steps).
|
| 315 |
+
pct_start: The percentage of the cycle (in number of steps) spent
|
| 316 |
+
increasing the learning rate.
|
| 317 |
+
pct_final: The percentage of the cycle (in number of steps) spent
|
| 318 |
+
increasing to peak_value then decreasing back to init_value.
|
| 319 |
+
div_factor: Determines the initial value via init_value =
|
| 320 |
+
peak_value / div_factor
|
| 321 |
+
final_div_factor: Determines the final value via final_value =
|
| 322 |
+
init_value / final_div_factor
|
| 323 |
+
|
| 324 |
+
Returns:
|
| 325 |
+
schedule: A function that maps step counts to values.
|
| 326 |
+
"""
|
| 327 |
+
if transition_steps <= 0:
|
| 328 |
+
raise ValueError(
|
| 329 |
+
'A linear onecycle schedule was set with a non-positive '
|
| 330 |
+
'`transition_steps`')
|
| 331 |
+
|
| 332 |
+
return piecewise_interpolate_schedule(
|
| 333 |
+
'linear',
|
| 334 |
+
peak_value / div_factor,
|
| 335 |
+
{int(pct_start * transition_steps): div_factor,
|
| 336 |
+
int(pct_final * transition_steps): 1. / div_factor,
|
| 337 |
+
transition_steps: 1. / final_div_factor})
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
def cosine_onecycle_schedule(
|
| 341 |
+
transition_steps: int,
|
| 342 |
+
peak_value: float,
|
| 343 |
+
pct_start: float = 0.3,
|
| 344 |
+
div_factor: float = 25.0,
|
| 345 |
+
final_div_factor: float = 1e4
|
| 346 |
+
) -> base.Schedule:
|
| 347 |
+
"""Returns a function which implements the onecycle learning rate schedule.
|
| 348 |
+
|
| 349 |
+
This function uses a cosine annealing strategy.
|
| 350 |
+
For more details see: https://arxiv.org/abs/1708.07120
|
| 351 |
+
|
| 352 |
+
Args:
|
| 353 |
+
transition_steps: Number of steps over which annealing takes place.
|
| 354 |
+
peak_value: Maximum value attained by schedule at pct_start percent
|
| 355 |
+
of the cycle (in number of steps).
|
| 356 |
+
pct_start: The percentage of the cycle (in number of steps) spent
|
| 357 |
+
increasing the learning rate.
|
| 358 |
+
div_factor: Determines the initial value via init_value =
|
| 359 |
+
peak_value / div_factor
|
| 360 |
+
final_div_factor: Determines the final value via final_value =
|
| 361 |
+
init_value / final_div_factor
|
| 362 |
+
|
| 363 |
+
Returns:
|
| 364 |
+
schedule: A function that maps step counts to values.
|
| 365 |
+
"""
|
| 366 |
+
if transition_steps <= 0:
|
| 367 |
+
raise ValueError(
|
| 368 |
+
'A linear onecycle schedule was set with a non-positive '
|
| 369 |
+
'`transition_steps`')
|
| 370 |
+
|
| 371 |
+
return piecewise_interpolate_schedule(
|
| 372 |
+
'cosine',
|
| 373 |
+
peak_value / div_factor,
|
| 374 |
+
{int(pct_start * transition_steps): div_factor,
|
| 375 |
+
int(transition_steps): 1. / (div_factor * final_div_factor)})
|
| 376 |
+
|
| 377 |
+
|
| 378 |
+
def join_schedules(schedules: Sequence[base.Schedule],
|
| 379 |
+
boundaries: Sequence[int]) -> base.Schedule:
|
| 380 |
+
"""Sequentially apply multiple schedules.
|
| 381 |
+
|
| 382 |
+
Args:
|
| 383 |
+
schedules: A list of callables (expected to be optax schedules). Each
|
| 384 |
+
schedule will receive a step count indicating the number of steps since
|
| 385 |
+
the previous boundary transition.
|
| 386 |
+
boundaries: A list of integers (of length one less than schedules) that
|
| 387 |
+
indicate when to transition between schedules.
|
| 388 |
+
Returns:
|
| 389 |
+
schedule: A function that maps step counts to values.
|
| 390 |
+
"""
|
| 391 |
+
def schedule(step: jnp.DeviceArray) -> jnp.DeviceArray:
|
| 392 |
+
output = schedules[0](step)
|
| 393 |
+
for boundary, schedule in zip(boundaries, schedules[1:]):
|
| 394 |
+
output = jnp.where(step < boundary, output, schedule(step - boundary))
|
| 395 |
+
return output
|
| 396 |
+
return schedule
|
| 397 |
+
|
| 398 |
+
|
| 399 |
+
def warmup_cosine_decay_schedule(
|
| 400 |
+
init_value: float,
|
| 401 |
+
peak_value: float,
|
| 402 |
+
warmup_steps: int,
|
| 403 |
+
decay_steps: int,
|
| 404 |
+
end_value: float = 0.0
|
| 405 |
+
) -> base.Schedule:
|
| 406 |
+
"""Linear warmup followed by cosine decay.
|
| 407 |
+
|
| 408 |
+
Args:
|
| 409 |
+
init_value: Initial value for the scalar to be annealed.
|
| 410 |
+
peak_value: Peak value for scalar to be annealed at end of warmup.
|
| 411 |
+
warmup_steps: Positive integer, the length of the linear warmup.
|
| 412 |
+
decay_steps: Positive integer, the total length of the schedule. Note that
|
| 413 |
+
this includes the warmup time, so the number of steps during which cosine
|
| 414 |
+
annealing is applied is `decay_steps - warmup_steps`.
|
| 415 |
+
end_value: End value of the scalar to be annealed.
|
| 416 |
+
Returns:
|
| 417 |
+
schedule: A function that maps step counts to values.
|
| 418 |
+
"""
|
| 419 |
+
schedules = [
|
| 420 |
+
linear_schedule(
|
| 421 |
+
init_value=init_value,
|
| 422 |
+
end_value=peak_value,
|
| 423 |
+
transition_steps=warmup_steps),
|
| 424 |
+
cosine_decay_schedule(
|
| 425 |
+
init_value=peak_value,
|
| 426 |
+
decay_steps=decay_steps - warmup_steps,
|
| 427 |
+
alpha=end_value/peak_value)]
|
| 428 |
+
return join_schedules(schedules, [warmup_steps])
|
| 429 |
+
|
| 430 |
+
|
| 431 |
+
def warmup_exponential_decay_schedule(
|
| 432 |
+
init_value: float,
|
| 433 |
+
peak_value: float,
|
| 434 |
+
warmup_steps: int,
|
| 435 |
+
transition_steps: int,
|
| 436 |
+
decay_rate: float,
|
| 437 |
+
transition_begin: int = 0,
|
| 438 |
+
staircase: bool = False,
|
| 439 |
+
end_value: Optional[float] = None
|
| 440 |
+
) -> base.Schedule:
|
| 441 |
+
"""Linear warmup followed by exponential decay.
|
| 442 |
+
|
| 443 |
+
Args:
|
| 444 |
+
init_value: Initial value for the scalar to be annealed.
|
| 445 |
+
peak_value: Peak value for scalar to be annealed at end of warmup.
|
| 446 |
+
warmup_steps: Positive integer, the length of the linear warmup.
|
| 447 |
+
transition_steps: must be positive. See `exponential_decay` for more
|
| 448 |
+
details.
|
| 449 |
+
decay_rate: must not be zero. The decay rate.
|
| 450 |
+
transition_begin: must be positive. After how many steps to start annealing
|
| 451 |
+
(before this many steps the scalar value is held fixed at `peak_value`).
|
| 452 |
+
staircase: if `True`, decay the values at discrete intervals.
|
| 453 |
+
end_value: the value at which the exponential decay stops. When
|
| 454 |
+
`decay_rate` < 1, `end_value` is treated as a lower bound, otherwise as
|
| 455 |
+
an upper bound. Has no effect when `decay_rate` = 0.
|
| 456 |
+
Returns:
|
| 457 |
+
schedule: A function that maps step counts to values.
|
| 458 |
+
"""
|
| 459 |
+
schedules = [
|
| 460 |
+
linear_schedule(
|
| 461 |
+
init_value=init_value,
|
| 462 |
+
end_value=peak_value,
|
| 463 |
+
transition_steps=warmup_steps),
|
| 464 |
+
exponential_decay(
|
| 465 |
+
init_value=peak_value,
|
| 466 |
+
transition_steps=transition_steps,
|
| 467 |
+
decay_rate=decay_rate,
|
| 468 |
+
transition_begin=transition_begin,
|
| 469 |
+
staircase=staircase,
|
| 470 |
+
end_value=end_value)]
|
| 471 |
+
return join_schedules(schedules, [warmup_steps])
|
| 472 |
+
|
| 473 |
+
|
| 474 |
+
def sgdr_schedule(cosine_kwargs: Iterable[Dict[str, chex.Numeric]]
|
| 475 |
+
) -> base.Schedule:
|
| 476 |
+
"""SGD with warm restarts, from Loschilov & Hutter (arXiv:1608.03983).
|
| 477 |
+
|
| 478 |
+
This learning rate schedule applies multiple joined cosine decay cycles.
|
| 479 |
+
For more details see: https://arxiv.org/abs/1608.03983
|
| 480 |
+
|
| 481 |
+
Args:
|
| 482 |
+
cosine_kwargs: An Iterable of dicts, where each element specifies the
|
| 483 |
+
arguments to pass to each cosine decay cycle. The `decay_steps` kwarg
|
| 484 |
+
will specify how long each cycle lasts for, and therefore when to
|
| 485 |
+
transition to the next cycle.
|
| 486 |
+
Returns:
|
| 487 |
+
schedule: A function that maps step counts to values.
|
| 488 |
+
"""
|
| 489 |
+
boundaries = []
|
| 490 |
+
schedules = []
|
| 491 |
+
step = 0
|
| 492 |
+
for kwargs in cosine_kwargs:
|
| 493 |
+
schedules += [warmup_cosine_decay_schedule(**kwargs)]
|
| 494 |
+
boundaries += [step + kwargs['decay_steps']]
|
| 495 |
+
step += kwargs['decay_steps']
|
| 496 |
+
return join_schedules(schedules, boundaries[:-1])
|
| 497 |
+
|
| 498 |
+
|
| 499 |
+
def _convert_floats(x, dtype):
|
| 500 |
+
"""Convert float-like inputs to dtype, rest pass through."""
|
| 501 |
+
if jax.dtypes.scalar_type_of(x) == float:
|
| 502 |
+
return jnp.asarray(x, dtype=dtype)
|
| 503 |
+
return x
|
| 504 |
+
|
| 505 |
+
|
| 506 |
+
class InjectHyperparamsState(NamedTuple):
|
| 507 |
+
"""Maintains inner transform state, hyperparameters, and step count."""
|
| 508 |
+
count: jnp.ndarray # shape=(), dtype=jnp.int32
|
| 509 |
+
hyperparams: Dict[str, chex.Numeric]
|
| 510 |
+
inner_state: base.OptState
|
| 511 |
+
|
| 512 |
+
|
| 513 |
+
def inject_hyperparams(
|
| 514 |
+
inner_factory: Callable[..., base.GradientTransformation],
|
| 515 |
+
static_args: Union[str, Iterable[str]] = (),
|
| 516 |
+
hyperparam_dtype: Optional[jnp.dtype] = None,
|
| 517 |
+
) -> Callable[..., base.GradientTransformation]:
|
| 518 |
+
"""Wrapper that injects hyperparameters into the inner GradientTransformation.
|
| 519 |
+
|
| 520 |
+
This wrapper allows you to pass schedules (i.e. a function that returns a
|
| 521 |
+
numeric value given a step count) instead of constants for
|
| 522 |
+
hyperparameters. You may only schedule numeric hyperparameters (i.e. boolean
|
| 523 |
+
flags cannot be scheduled).
|
| 524 |
+
|
| 525 |
+
For example, to use ``scale_by_adam`` with a piecewise linear
|
| 526 |
+
schedule for beta_1 and constant for beta_2::
|
| 527 |
+
|
| 528 |
+
scheduled_adam = optax.inject_hyperparams(optax.scale_by_adam)(
|
| 529 |
+
b1=optax.piecewise_linear_schedule(...),
|
| 530 |
+
b2=0.99)
|
| 531 |
+
|
| 532 |
+
You may manually change numeric hyperparameters that were not scheduled
|
| 533 |
+
through the ``hyperparams`` dict in the ``InjectHyperparamState``::
|
| 534 |
+
|
| 535 |
+
state = scheduled_adam.init(params)
|
| 536 |
+
updates, state = scheduled_adam.update(grads, state)
|
| 537 |
+
state.hyperparams['b2'] = 0.95
|
| 538 |
+
updates, state = scheduled_adam.update(updates, state) # uses b2 = 0.95
|
| 539 |
+
|
| 540 |
+
Manually overriding scheduled hyperparameters will have no effect (e.g.
|
| 541 |
+
in the code sample above, you cannot manually adjust ``b1``).
|
| 542 |
+
|
| 543 |
+
Args:
|
| 544 |
+
inner_factory: a function that returns the inner
|
| 545 |
+
``optax.GradientTransformation`` given the hyperparameters.
|
| 546 |
+
static_args: a string or iterable of strings specifying which
|
| 547 |
+
callable parameters are not schedules. inject_hyperparams treats all
|
| 548 |
+
callables as schedules by default, so if a hyperparameter is a
|
| 549 |
+
non-schedule callable, you must specify that using this argument.
|
| 550 |
+
hyperparam_dtype: Optional datatype override. If specified, all float
|
| 551 |
+
hyperparameters will be cast to this type.
|
| 552 |
+
|
| 553 |
+
Returns:
|
| 554 |
+
A callable that returns a ``optax.GradientTransformation``. This callable
|
| 555 |
+
accepts the same arguments as ``inner_factory``, except you may provide
|
| 556 |
+
schedules in place of the constant arguments.
|
| 557 |
+
"""
|
| 558 |
+
static_args = ({static_args} if isinstance(static_args, str) else
|
| 559 |
+
set(static_args))
|
| 560 |
+
inner_signature = inspect.signature(inner_factory)
|
| 561 |
+
|
| 562 |
+
if not static_args.issubset(inner_signature.parameters):
|
| 563 |
+
raise ValueError(
|
| 564 |
+
'`static_args` must specify a subset of `inner_factory`\'s parameters. '
|
| 565 |
+
f'Given `static_args`: {static_args}. `inner_factory` parameters: '
|
| 566 |
+
f'{set(inner_signature.parameters.keys())}')
|
| 567 |
+
|
| 568 |
+
@functools.wraps(inner_factory)
|
| 569 |
+
def wrapped_transform(*args, **kwargs) -> base.GradientTransformation:
|
| 570 |
+
bound_arguments = inner_signature.bind(*args, **kwargs)
|
| 571 |
+
bound_arguments.apply_defaults()
|
| 572 |
+
|
| 573 |
+
sched_hps, numeric_hps, other_hps = {}, {}, {}
|
| 574 |
+
for name, value in bound_arguments.arguments.items():
|
| 575 |
+
if name in static_args or isinstance(value, bool):
|
| 576 |
+
other_hps[name] = value
|
| 577 |
+
elif callable(value):
|
| 578 |
+
sched_hps[name] = value
|
| 579 |
+
elif isinstance(value, (int, float, chex.Array)):
|
| 580 |
+
numeric_hps[name] = value
|
| 581 |
+
else:
|
| 582 |
+
other_hps[name] = value
|
| 583 |
+
|
| 584 |
+
def schedule_fn(count, dtype):
|
| 585 |
+
return {k: _convert_floats(f(count), dtype) for k, f in sched_hps.items()}
|
| 586 |
+
|
| 587 |
+
def init_fn(params):
|
| 588 |
+
count = jnp.zeros([], jnp.int32)
|
| 589 |
+
if hyperparam_dtype is None:
|
| 590 |
+
dtype = getattr(next(iter(
|
| 591 |
+
jax.tree_util.tree_leaves(params)), None), 'dtype', None)
|
| 592 |
+
else:
|
| 593 |
+
dtype = hyperparam_dtype
|
| 594 |
+
hparams = {
|
| 595 |
+
k: jnp.asarray(_convert_floats(v, dtype))
|
| 596 |
+
for k, v in numeric_hps.items()}
|
| 597 |
+
hparams.update(schedule_fn(count, dtype))
|
| 598 |
+
return InjectHyperparamsState( # pylint:disable=too-many-function-args
|
| 599 |
+
count, hparams, inner_factory(**other_hps, **hparams).init(params))
|
| 600 |
+
|
| 601 |
+
def update_fn(updates, state, params=None):
|
| 602 |
+
if hyperparam_dtype is None:
|
| 603 |
+
dtype = getattr(next(iter(
|
| 604 |
+
jax.tree_util.tree_leaves(updates)), None), 'dtype', None)
|
| 605 |
+
else:
|
| 606 |
+
dtype = hyperparam_dtype
|
| 607 |
+
hparams = {k: _convert_floats(v, dtype)
|
| 608 |
+
for k, v in state.hyperparams.items()}
|
| 609 |
+
hparams.update(schedule_fn(state.count, dtype))
|
| 610 |
+
updates, inner_state = inner_factory(**other_hps, **hparams).update(
|
| 611 |
+
updates, state.inner_state, params)
|
| 612 |
+
count_inc = numerics.safe_int32_increment(state.count)
|
| 613 |
+
|
| 614 |
+
# pylint:disable=too-many-function-args
|
| 615 |
+
return updates, InjectHyperparamsState(count_inc, hparams, inner_state)
|
| 616 |
+
# pylint:enable=too-many-function-args
|
| 617 |
+
|
| 618 |
+
return base.GradientTransformation(init_fn, update_fn)
|
| 619 |
+
|
| 620 |
+
return wrapped_transform
|
lib/python3.10/site-packages/optax/_src/schedule_test.py
ADDED
|
@@ -0,0 +1,649 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Tests for `schedule.py`."""
|
| 16 |
+
|
| 17 |
+
import functools
|
| 18 |
+
|
| 19 |
+
from absl.testing import absltest
|
| 20 |
+
from absl.testing import parameterized
|
| 21 |
+
|
| 22 |
+
import chex
|
| 23 |
+
import jax
|
| 24 |
+
import jax.numpy as jnp
|
| 25 |
+
import numpy as np
|
| 26 |
+
|
| 27 |
+
from optax._src import clipping
|
| 28 |
+
from optax._src import schedule
|
| 29 |
+
from optax._src import transform
|
| 30 |
+
from optax._src import wrappers
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class ConstantTest(chex.TestCase):
|
| 34 |
+
|
| 35 |
+
@chex.all_variants
|
| 36 |
+
def test_constant(self):
|
| 37 |
+
"""Check constant schedule."""
|
| 38 |
+
# Get schedule function.
|
| 39 |
+
const_value = 10
|
| 40 |
+
num_steps = 15
|
| 41 |
+
schedule_fn = self.variant(schedule.constant_schedule(const_value))
|
| 42 |
+
# Test that generated values equal the expected schedule values.
|
| 43 |
+
generated_vals = []
|
| 44 |
+
for count in range(num_steps):
|
| 45 |
+
# Compute next value.
|
| 46 |
+
generated_vals.append(schedule_fn(count))
|
| 47 |
+
# Test output.
|
| 48 |
+
expected_vals = np.array([const_value] * num_steps, dtype=np.float32)
|
| 49 |
+
np.testing.assert_allclose(
|
| 50 |
+
expected_vals, np.array(generated_vals), atol=1e-3)
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
class PolynomialTest(chex.TestCase):
|
| 54 |
+
|
| 55 |
+
@chex.all_variants
|
| 56 |
+
def test_linear(self):
|
| 57 |
+
"""Check linear schedule."""
|
| 58 |
+
# Get schedule function.
|
| 59 |
+
schedule_fn = self.variant(
|
| 60 |
+
schedule.polynomial_schedule(
|
| 61 |
+
init_value=10., end_value=20., power=1, transition_steps=10))
|
| 62 |
+
# Test that generated values equal the expected schedule values.
|
| 63 |
+
generated_vals = []
|
| 64 |
+
for count in range(15):
|
| 65 |
+
# Compute next value.
|
| 66 |
+
generated_vals.append(schedule_fn(count))
|
| 67 |
+
# Test output.
|
| 68 |
+
expected_vals = np.array(list(range(10, 20)) + [20] * 5, dtype=np.float32)
|
| 69 |
+
np.testing.assert_allclose(
|
| 70 |
+
expected_vals, np.array(generated_vals), atol=1e-3)
|
| 71 |
+
|
| 72 |
+
@chex.all_variants
|
| 73 |
+
def test_zero_steps_schedule(self):
|
| 74 |
+
# Get schedule function.
|
| 75 |
+
initial_value = 10.
|
| 76 |
+
end_value = 20.
|
| 77 |
+
|
| 78 |
+
for num_steps in [-1, 0]:
|
| 79 |
+
schedule_fn = self.variant(
|
| 80 |
+
schedule.polynomial_schedule(
|
| 81 |
+
init_value=initial_value, end_value=end_value,
|
| 82 |
+
power=1, transition_steps=num_steps))
|
| 83 |
+
for count in range(15):
|
| 84 |
+
np.testing.assert_allclose(schedule_fn(count), initial_value)
|
| 85 |
+
|
| 86 |
+
@chex.all_variants
|
| 87 |
+
def test_nonlinear(self):
|
| 88 |
+
"""Check non-linear (quadratic) schedule."""
|
| 89 |
+
# Get schedule function.
|
| 90 |
+
schedule_fn = self.variant(
|
| 91 |
+
schedule.polynomial_schedule(
|
| 92 |
+
init_value=25., end_value=10., power=2, transition_steps=10))
|
| 93 |
+
# Test that generated values equal the expected schedule values.
|
| 94 |
+
generated_vals = []
|
| 95 |
+
for count in range(15):
|
| 96 |
+
# Compute next value.
|
| 97 |
+
generated_vals.append(schedule_fn(count))
|
| 98 |
+
# Test output.
|
| 99 |
+
expected_vals = np.array(
|
| 100 |
+
[10. + 15. * (1. - n / 10)**2 for n in range(10)] + [10] * 5,
|
| 101 |
+
dtype=np.float32)
|
| 102 |
+
np.testing.assert_allclose(
|
| 103 |
+
expected_vals, np.array(generated_vals), atol=1e-3)
|
| 104 |
+
|
| 105 |
+
@chex.all_variants
|
| 106 |
+
def test_with_decay_begin(self):
|
| 107 |
+
"""Check quadratic schedule with non-zero schedule begin."""
|
| 108 |
+
# Get schedule function.
|
| 109 |
+
schedule_fn = self.variant(
|
| 110 |
+
schedule.polynomial_schedule(
|
| 111 |
+
init_value=30., end_value=10., power=2,
|
| 112 |
+
transition_steps=10, transition_begin=4))
|
| 113 |
+
# Test that generated values equal the expected schedule values.
|
| 114 |
+
generated_vals = []
|
| 115 |
+
for count in range(20):
|
| 116 |
+
# Compute next value.
|
| 117 |
+
generated_vals.append(schedule_fn(count))
|
| 118 |
+
# Test output.
|
| 119 |
+
expected_vals = np.array(
|
| 120 |
+
[30.] * 4 + [10. + 20. * (1. - n / 10)**2 for n in range(10)] +
|
| 121 |
+
[10] * 6,
|
| 122 |
+
dtype=np.float32)
|
| 123 |
+
np.testing.assert_allclose(
|
| 124 |
+
expected_vals, np.array(generated_vals), atol=1e-3)
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
class PiecewiseConstantTest(chex.TestCase):
|
| 128 |
+
|
| 129 |
+
@chex.all_variants
|
| 130 |
+
def test_positive(self):
|
| 131 |
+
"""Check piecewise constant schedule of positive values."""
|
| 132 |
+
# Get schedule function.
|
| 133 |
+
schedule_fn = self.variant(
|
| 134 |
+
schedule.piecewise_constant_schedule(0.1, {3: 2., 6: 0.5}))
|
| 135 |
+
# Test that generated values equal the expected schedule values.
|
| 136 |
+
generated_vals = []
|
| 137 |
+
for count in range(10):
|
| 138 |
+
# Compute next value.
|
| 139 |
+
generated_vals.append(schedule_fn(count))
|
| 140 |
+
# Test output.
|
| 141 |
+
expected_vals = np.array([0.1, 0.1, 0.1, 0.2, 0.2, 0.2, 0.1, 0.1, 0.1, 0.1])
|
| 142 |
+
np.testing.assert_allclose(
|
| 143 |
+
expected_vals, np.array(generated_vals), atol=1e-3)
|
| 144 |
+
|
| 145 |
+
@chex.all_variants
|
| 146 |
+
def test_negative(self):
|
| 147 |
+
"""Check piecewise constant schedule of negative values."""
|
| 148 |
+
# Get schedule function.
|
| 149 |
+
schedule_fn = self.variant(
|
| 150 |
+
schedule.piecewise_constant_schedule(-0.1, {3: 2., 6: 0.5}))
|
| 151 |
+
# Test that generated values equal the expected schedule values.
|
| 152 |
+
generated_vals = []
|
| 153 |
+
for count in range(10):
|
| 154 |
+
# Compute next value.
|
| 155 |
+
generated_vals.append(schedule_fn(count))
|
| 156 |
+
# Test output.
|
| 157 |
+
expected_vals = -1 * np.array(
|
| 158 |
+
[0.1, 0.1, 0.1, 0.2, 0.2, 0.2, 0.1, 0.1, 0.1, 0.1])
|
| 159 |
+
np.testing.assert_allclose(
|
| 160 |
+
expected_vals, np.array(generated_vals), atol=1e-3)
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
class ExponentialTest(chex.TestCase):
|
| 164 |
+
|
| 165 |
+
@chex.all_variants
|
| 166 |
+
@parameterized.parameters(False, True)
|
| 167 |
+
def test_constant_schedule(self, staircase):
|
| 168 |
+
"""Checks constant schedule for exponential decay schedule."""
|
| 169 |
+
num_steps = 15
|
| 170 |
+
# Get schedule function.
|
| 171 |
+
init_value = 1.
|
| 172 |
+
schedule_fn = self.variant(
|
| 173 |
+
schedule.exponential_decay(
|
| 174 |
+
init_value=init_value, transition_steps=num_steps,
|
| 175 |
+
decay_rate=1., staircase=staircase))
|
| 176 |
+
# Test that generated values equal the expected schedule values.
|
| 177 |
+
generated_vals = []
|
| 178 |
+
for count in range(num_steps):
|
| 179 |
+
generated_vals.append(schedule_fn(count))
|
| 180 |
+
expected_vals = np.array([init_value] * num_steps, dtype=np.float32)
|
| 181 |
+
np.testing.assert_allclose(
|
| 182 |
+
expected_vals, np.array(generated_vals), atol=1e-3)
|
| 183 |
+
|
| 184 |
+
@chex.all_variants
|
| 185 |
+
@parameterized.parameters(False, True)
|
| 186 |
+
def test_nonvalid_transition_steps(self, staircase):
|
| 187 |
+
"""Checks nonvalid decay steps results in a constant schedule."""
|
| 188 |
+
init_value = 1.
|
| 189 |
+
for transition_steps in [-1, 0]:
|
| 190 |
+
schedule_fn = self.variant(
|
| 191 |
+
schedule.exponential_decay(
|
| 192 |
+
init_value=init_value, transition_steps=transition_steps,
|
| 193 |
+
decay_rate=1., staircase=staircase))
|
| 194 |
+
for count in range(15):
|
| 195 |
+
np.testing.assert_allclose(schedule_fn(count), init_value)
|
| 196 |
+
|
| 197 |
+
@chex.all_variants
|
| 198 |
+
@parameterized.parameters(False, True)
|
| 199 |
+
def test_nonvalid_decay_rate(self, staircase):
|
| 200 |
+
"""Checks nonvalid decay steps results in a constant schedule."""
|
| 201 |
+
init_value = 1.
|
| 202 |
+
schedule_fn = self.variant(
|
| 203 |
+
schedule.exponential_decay(
|
| 204 |
+
init_value=init_value, transition_steps=2,
|
| 205 |
+
decay_rate=0., staircase=staircase))
|
| 206 |
+
for count in range(15):
|
| 207 |
+
np.testing.assert_allclose(schedule_fn(count), init_value)
|
| 208 |
+
|
| 209 |
+
@chex.all_variants
|
| 210 |
+
@parameterized.parameters((False, 0), (True, 0), (False, 5), (True, 5))
|
| 211 |
+
def test_exponential(self, staircase, transition_begin):
|
| 212 |
+
"""Checks non-linear (quadratic) schedule."""
|
| 213 |
+
# Get schedule function.
|
| 214 |
+
init_value = 1.
|
| 215 |
+
num_steps = 15
|
| 216 |
+
transition_steps = 2
|
| 217 |
+
decay_rate = 2.
|
| 218 |
+
schedule_fn = self.variant(
|
| 219 |
+
schedule.exponential_decay(
|
| 220 |
+
init_value=init_value, transition_steps=transition_steps,
|
| 221 |
+
decay_rate=decay_rate, transition_begin=transition_begin,
|
| 222 |
+
staircase=staircase))
|
| 223 |
+
|
| 224 |
+
# Test that generated values equal the expected schedule values.
|
| 225 |
+
def _staircased(count):
|
| 226 |
+
p = count / transition_steps
|
| 227 |
+
if staircase:
|
| 228 |
+
p = np.floor(p)
|
| 229 |
+
return p
|
| 230 |
+
|
| 231 |
+
generated_vals = []
|
| 232 |
+
for count in range(num_steps + transition_begin):
|
| 233 |
+
generated_vals.append(schedule_fn(count))
|
| 234 |
+
expected_vals = np.array(
|
| 235 |
+
[init_value] * transition_begin + [
|
| 236 |
+
init_value * np.power(decay_rate, _staircased(count))
|
| 237 |
+
for count in range(num_steps)
|
| 238 |
+
],
|
| 239 |
+
dtype=np.float32)
|
| 240 |
+
np.testing.assert_allclose(
|
| 241 |
+
expected_vals, np.array(generated_vals), atol=1e-3)
|
| 242 |
+
|
| 243 |
+
@chex.all_variants
|
| 244 |
+
@parameterized.parameters(
|
| 245 |
+
(0.2, 0.1, False), (1.0, 0.1, False), (2.0, 3.0, False),
|
| 246 |
+
(0.2, 0.1, True), (1.0, 0.1, True), (2.0, 3.0, True))
|
| 247 |
+
def test_end_value_with_staircase(self, decay_rate, end_value, staircase):
|
| 248 |
+
# Get schedule function.
|
| 249 |
+
init_value = 1.
|
| 250 |
+
num_steps = 11
|
| 251 |
+
transition_steps = 2
|
| 252 |
+
transition_begin = 3
|
| 253 |
+
schedule_fn = self.variant(
|
| 254 |
+
schedule.exponential_decay(
|
| 255 |
+
init_value=init_value, transition_steps=transition_steps,
|
| 256 |
+
decay_rate=decay_rate, transition_begin=transition_begin,
|
| 257 |
+
staircase=staircase, end_value=end_value))
|
| 258 |
+
|
| 259 |
+
# Test that generated values equal the expected schedule values.
|
| 260 |
+
def _staircased(count):
|
| 261 |
+
p = count / transition_steps
|
| 262 |
+
if staircase:
|
| 263 |
+
p = np.floor(p)
|
| 264 |
+
return p
|
| 265 |
+
|
| 266 |
+
generated_vals = []
|
| 267 |
+
for count in range(num_steps + transition_begin):
|
| 268 |
+
generated_vals.append(schedule_fn(count))
|
| 269 |
+
expected_vals = np.array(
|
| 270 |
+
[init_value] * transition_begin + [
|
| 271 |
+
init_value * np.power(decay_rate, _staircased(count))
|
| 272 |
+
for count in range(num_steps)
|
| 273 |
+
],
|
| 274 |
+
dtype=np.float32)
|
| 275 |
+
|
| 276 |
+
if decay_rate < 1.0:
|
| 277 |
+
expected_vals = np.maximum(expected_vals, end_value)
|
| 278 |
+
else:
|
| 279 |
+
expected_vals = np.minimum(expected_vals, end_value)
|
| 280 |
+
|
| 281 |
+
np.testing.assert_allclose(
|
| 282 |
+
expected_vals, np.array(generated_vals), atol=1e-3)
|
| 283 |
+
|
| 284 |
+
@chex.all_variants
|
| 285 |
+
def test_immutable_count(self):
|
| 286 |
+
"""Checks constant schedule for exponential decay schedule."""
|
| 287 |
+
num_steps = 5
|
| 288 |
+
# Get schedule function.
|
| 289 |
+
init_value = 32.
|
| 290 |
+
schedule_fn = self.variant(
|
| 291 |
+
schedule.exponential_decay(
|
| 292 |
+
init_value=init_value, transition_steps=1,
|
| 293 |
+
decay_rate=0.5))
|
| 294 |
+
# Test that generated values equal the expected schedule values.
|
| 295 |
+
generated_vals = []
|
| 296 |
+
for count in range(num_steps):
|
| 297 |
+
# Jax arrays are read-only in ChexVariantType.WITHOUT_DEVICE.
|
| 298 |
+
immutable_count = jnp.array(count, dtype=jnp.float32)
|
| 299 |
+
generated_vals.append(schedule_fn(immutable_count))
|
| 300 |
+
expected_vals = np.array([32, 16, 8, 4, 2], dtype=np.float32)
|
| 301 |
+
np.testing.assert_allclose(
|
| 302 |
+
expected_vals, np.array(generated_vals), atol=1e-3)
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
class CosineDecayTest(chex.TestCase):
|
| 306 |
+
|
| 307 |
+
@chex.all_variants
|
| 308 |
+
def test_decay_count_smaller_count(self):
|
| 309 |
+
"""Check cosine schedule decay for the entire training schedule."""
|
| 310 |
+
initial_value = 0.1
|
| 311 |
+
schedule_fn = self.variant(
|
| 312 |
+
schedule.cosine_decay_schedule(initial_value, 10, 0.0))
|
| 313 |
+
# Test that generated values equal the expected schedule values.
|
| 314 |
+
generated_vals = []
|
| 315 |
+
for count in range(10):
|
| 316 |
+
# Compute next value.
|
| 317 |
+
generated_vals.append(schedule_fn(count))
|
| 318 |
+
# Test output.
|
| 319 |
+
expected_multipliers = np.array(
|
| 320 |
+
0.5 + 0.5 * np.cos(
|
| 321 |
+
np.pi * np.array(
|
| 322 |
+
[0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])))
|
| 323 |
+
np.testing.assert_allclose(
|
| 324 |
+
initial_value * expected_multipliers,
|
| 325 |
+
np.array(generated_vals), atol=1e-3)
|
| 326 |
+
|
| 327 |
+
@chex.all_variants
|
| 328 |
+
def test_decay_count_greater_count(self):
|
| 329 |
+
"""Check cosine schedule decay for a part of the training schedule."""
|
| 330 |
+
initial_value = 0.1
|
| 331 |
+
schedule_fn = self.variant(
|
| 332 |
+
schedule.cosine_decay_schedule(initial_value, 5, 0.0))
|
| 333 |
+
# Test that generated values equal the expected schedule values.
|
| 334 |
+
generated_vals = []
|
| 335 |
+
for count in range(12):
|
| 336 |
+
# Compute next value.
|
| 337 |
+
generated_vals.append(schedule_fn(count))
|
| 338 |
+
|
| 339 |
+
# Test output.
|
| 340 |
+
expected_multipliers = np.array(
|
| 341 |
+
0.5 + 0.5 * np.cos(
|
| 342 |
+
np.pi * np.array(
|
| 343 |
+
[0.0, 0.2, 0.4, 0.6, 0.8, 1., 1., 1., 1., 1., 1., 1.])))
|
| 344 |
+
np.testing.assert_allclose(
|
| 345 |
+
initial_value * expected_multipliers,
|
| 346 |
+
np.array(generated_vals), atol=1e-3)
|
| 347 |
+
|
| 348 |
+
@chex.all_variants
|
| 349 |
+
def test_decay_count_greater_count_with_alpha(self):
|
| 350 |
+
"""Check cosine schedule decay for a part of the training schedule."""
|
| 351 |
+
# Get schedule function.
|
| 352 |
+
initial_value = 0.1
|
| 353 |
+
schedule_fn = self.variant(
|
| 354 |
+
schedule.cosine_decay_schedule(initial_value, 5, 0.1))
|
| 355 |
+
# Test that generated values equal the expected schedule values.
|
| 356 |
+
generated_vals = []
|
| 357 |
+
for count in range(12):
|
| 358 |
+
# Compute next value.
|
| 359 |
+
generated_vals.append(schedule_fn(count))
|
| 360 |
+
|
| 361 |
+
# Test output.
|
| 362 |
+
expected_multipliers = np.array(
|
| 363 |
+
0.5 + 0.5 * np.cos(
|
| 364 |
+
np.pi * np.array(
|
| 365 |
+
[0.0, 0.2, 0.4, 0.6, 0.8, 1., 1., 1., 1., 1., 1., 1.])))
|
| 366 |
+
expected_multipliers = 0.9 * expected_multipliers + 0.1
|
| 367 |
+
np.testing.assert_allclose(
|
| 368 |
+
initial_value * expected_multipliers,
|
| 369 |
+
np.array(generated_vals), atol=1e-3)
|
| 370 |
+
|
| 371 |
+
|
| 372 |
+
class WarmupCosineDecayTest(chex.TestCase):
|
| 373 |
+
|
| 374 |
+
@chex.all_variants
|
| 375 |
+
@parameterized.named_parameters(
|
| 376 |
+
('with end value', 10, 0.5, 1e-4),
|
| 377 |
+
('without end value', 5, 3, 0.),)
|
| 378 |
+
def test_limits(self, init_value, peak_value, end_value):
|
| 379 |
+
"""Check cosine schedule decay for the entire training schedule."""
|
| 380 |
+
schedule_fn = self.variant(schedule.warmup_cosine_decay_schedule(
|
| 381 |
+
init_value=init_value,
|
| 382 |
+
peak_value=peak_value,
|
| 383 |
+
warmup_steps=100,
|
| 384 |
+
decay_steps=1000,
|
| 385 |
+
end_value=end_value,
|
| 386 |
+
))
|
| 387 |
+
|
| 388 |
+
np.testing.assert_allclose(init_value, schedule_fn(0))
|
| 389 |
+
np.testing.assert_allclose(peak_value, schedule_fn(100))
|
| 390 |
+
np.testing.assert_allclose(end_value, schedule_fn(1000), rtol=1e-3)
|
| 391 |
+
|
| 392 |
+
|
| 393 |
+
class SGDRTest(chex.TestCase):
|
| 394 |
+
|
| 395 |
+
@chex.all_variants
|
| 396 |
+
@parameterized.named_parameters(
|
| 397 |
+
('with step decay', 1.6, 0.8, 0.4),
|
| 398 |
+
('without step_decay', 1.6, 1.6, 1.6),)
|
| 399 |
+
def test_limits(self, lr0, lr1, lr2):
|
| 400 |
+
"""Check cosine schedule decay for the entire training schedule."""
|
| 401 |
+
lr_kwargs = []
|
| 402 |
+
for step, lr in zip([2e3, 3e3, 5e3], [lr0, lr1, lr2]):
|
| 403 |
+
lr_kwargs += [dict(decay_steps=int(step), peak_value=lr,
|
| 404 |
+
init_value=0, end_value=0.0, warmup_steps=500)]
|
| 405 |
+
schedule_fn = self.variant(schedule.sgdr_schedule(lr_kwargs))
|
| 406 |
+
np.testing.assert_allclose(lr0, schedule_fn(500))
|
| 407 |
+
np.testing.assert_allclose(lr1, schedule_fn(2500))
|
| 408 |
+
np.testing.assert_allclose(lr2, schedule_fn(5500))
|
| 409 |
+
|
| 410 |
+
|
| 411 |
+
class PiecewiseInterpolateTest(chex.TestCase):
|
| 412 |
+
|
| 413 |
+
@chex.all_variants
|
| 414 |
+
def test_linear_piecewise(self):
|
| 415 |
+
schedule_fn = self.variant(schedule.piecewise_interpolate_schedule(
|
| 416 |
+
'linear', 200., {5: 1.5, 10: 0.25}))
|
| 417 |
+
generated_vals = [schedule_fn(step) for step in range(13)]
|
| 418 |
+
expected_vals = [200., 220., 240., 260., 280., 300., 255., 210., 165.,
|
| 419 |
+
120., 75., 75., 75.]
|
| 420 |
+
np.testing.assert_allclose(generated_vals, expected_vals, atol=1e-3)
|
| 421 |
+
|
| 422 |
+
@chex.all_variants
|
| 423 |
+
def test_cos_piecewise(self):
|
| 424 |
+
schedule_fn = self.variant(schedule.piecewise_interpolate_schedule(
|
| 425 |
+
'cosine', 400., {5: 1.2, 3: 0.6, 7: 1.}))
|
| 426 |
+
generated_vals = [schedule_fn(step) for step in range(9)]
|
| 427 |
+
expected_vals = [400., 360., 280., 240., 264., 288., 288., 288., 288.]
|
| 428 |
+
np.testing.assert_allclose(generated_vals, expected_vals, atol=1e-3)
|
| 429 |
+
|
| 430 |
+
@chex.all_variants
|
| 431 |
+
def test_empty_dict(self):
|
| 432 |
+
schedule_fn = self.variant(schedule.piecewise_interpolate_schedule(
|
| 433 |
+
'linear', 13., {}))
|
| 434 |
+
generated_vals = [schedule_fn(step) for step in range(5)]
|
| 435 |
+
expected_vals = [13., 13., 13., 13., 13.]
|
| 436 |
+
np.testing.assert_allclose(generated_vals, expected_vals, atol=1e-3)
|
| 437 |
+
|
| 438 |
+
@chex.all_variants
|
| 439 |
+
def test_no_dict(self):
|
| 440 |
+
schedule_fn = self.variant(schedule.piecewise_interpolate_schedule(
|
| 441 |
+
'cosine', 17.))
|
| 442 |
+
generated_vals = [schedule_fn(step) for step in range(3)]
|
| 443 |
+
expected_vals = [17., 17., 17.]
|
| 444 |
+
np.testing.assert_allclose(generated_vals, expected_vals, atol=1e-3)
|
| 445 |
+
|
| 446 |
+
def test_invalid_type(self):
|
| 447 |
+
# pytype: disable=wrong-arg-types
|
| 448 |
+
with self.assertRaises(ValueError):
|
| 449 |
+
schedule.piecewise_interpolate_schedule('linar', 13.)
|
| 450 |
+
with self.assertRaises(ValueError):
|
| 451 |
+
schedule.piecewise_interpolate_schedule('', 13., {5: 3.})
|
| 452 |
+
with self.assertRaises(ValueError):
|
| 453 |
+
schedule.piecewise_interpolate_schedule(None, 13., {})
|
| 454 |
+
# pytype: enable=wrong-arg-types
|
| 455 |
+
|
| 456 |
+
def test_invalid_scale(self):
|
| 457 |
+
with self.assertRaises(ValueError):
|
| 458 |
+
schedule.piecewise_interpolate_schedule('linear', 13., {5: -3})
|
| 459 |
+
|
| 460 |
+
|
| 461 |
+
class OneCycleTest(chex.TestCase):
|
| 462 |
+
|
| 463 |
+
@chex.all_variants
|
| 464 |
+
def test_linear(self):
|
| 465 |
+
schedule_fn = self.variant(schedule.linear_onecycle_schedule(
|
| 466 |
+
transition_steps=10,
|
| 467 |
+
peak_value=1000,
|
| 468 |
+
pct_start=0.3,
|
| 469 |
+
pct_final=0.7,
|
| 470 |
+
div_factor=10.,
|
| 471 |
+
final_div_factor=100.))
|
| 472 |
+
|
| 473 |
+
generated_vals = [schedule_fn(step) for step in range(12)]
|
| 474 |
+
expected_vals = [100., 400., 700., 1000., 775., 550., 325., 100., 67.,
|
| 475 |
+
34., 1., 1.]
|
| 476 |
+
np.testing.assert_allclose(generated_vals, expected_vals, atol=1e-3)
|
| 477 |
+
|
| 478 |
+
@chex.all_variants
|
| 479 |
+
def test_cosine(self):
|
| 480 |
+
schedule_fn = self.variant(schedule.cosine_onecycle_schedule(
|
| 481 |
+
transition_steps=5,
|
| 482 |
+
peak_value=1000.,
|
| 483 |
+
pct_start=0.4,
|
| 484 |
+
div_factor=10.,
|
| 485 |
+
final_div_factor=100.))
|
| 486 |
+
|
| 487 |
+
generated_vals = [schedule_fn(step) for step in range(7)]
|
| 488 |
+
expected_vals = [100., 550., 1000., 750.25, 250.75, 1., 1.]
|
| 489 |
+
np.testing.assert_allclose(generated_vals, expected_vals, atol=1e-3)
|
| 490 |
+
|
| 491 |
+
def test_nonpositive_transition_steps(self):
|
| 492 |
+
with self.assertRaises(ValueError):
|
| 493 |
+
schedule.cosine_onecycle_schedule(transition_steps=0, peak_value=5.)
|
| 494 |
+
with self.assertRaises(ValueError):
|
| 495 |
+
schedule.linear_onecycle_schedule(transition_steps=0, peak_value=5.)
|
| 496 |
+
|
| 497 |
+
|
| 498 |
+
class InjectHyperparamsTest(chex.TestCase):
|
| 499 |
+
"""Tests for the inject_hyperparams wrapper."""
|
| 500 |
+
|
| 501 |
+
@chex.all_variants
|
| 502 |
+
def test_updates(self):
|
| 503 |
+
optim = schedule.inject_hyperparams(transform.scale)( # stateless
|
| 504 |
+
step_size=schedule.piecewise_constant_schedule(
|
| 505 |
+
3.0, {1: 5, 7: 2, 12: 1.5}))
|
| 506 |
+
|
| 507 |
+
params = [jnp.zeros([], dtype=jnp.float32)]
|
| 508 |
+
state = self.variant(optim.init)(params)
|
| 509 |
+
update_fn = self.variant(optim.update)
|
| 510 |
+
expected_step_size = [3.0]*2 + [15.0]*6 + [30.0]*5 + [45.0]*3
|
| 511 |
+
|
| 512 |
+
grads = [jnp.ones([], dtype=jnp.float32)]
|
| 513 |
+
for i in range(15):
|
| 514 |
+
updates, state = update_fn(grads, state, params=params)
|
| 515 |
+
np.testing.assert_almost_equal(updates[0], expected_step_size[i+1])
|
| 516 |
+
|
| 517 |
+
@chex.all_variants
|
| 518 |
+
def test_hyperparams_state(self):
|
| 519 |
+
optim = schedule.inject_hyperparams(transform.trace)( # stateful
|
| 520 |
+
decay=schedule.piecewise_constant_schedule(
|
| 521 |
+
0.8, {3: 0.5, 9: 1.25}),
|
| 522 |
+
nesterov=True)
|
| 523 |
+
|
| 524 |
+
params = [jnp.zeros([2, 3]) for _ in range(3)]
|
| 525 |
+
state = self.variant(optim.init)(params)
|
| 526 |
+
update_fn = self.variant(optim.update)
|
| 527 |
+
|
| 528 |
+
expected_mom = [0.8]*4 + [0.4]*6 + [0.5]*2
|
| 529 |
+
grads = jax.tree_util.tree_map(jnp.ones_like, params)
|
| 530 |
+
for i in range(12):
|
| 531 |
+
np.testing.assert_almost_equal(state.hyperparams['decay'],
|
| 532 |
+
expected_mom[i])
|
| 533 |
+
_, state = update_fn(grads, state)
|
| 534 |
+
|
| 535 |
+
np.testing.assert_almost_equal(state.hyperparams['decay'],
|
| 536 |
+
expected_mom[-1])
|
| 537 |
+
|
| 538 |
+
@chex.all_variants
|
| 539 |
+
def test_constant_hyperparams(self):
|
| 540 |
+
optim = schedule.inject_hyperparams(transform.scale_by_adam)(b1=0., b2=0.)
|
| 541 |
+
|
| 542 |
+
params = [jnp.zeros([2, 3]) for _ in range(3)]
|
| 543 |
+
state = self.variant(optim.init)(params)
|
| 544 |
+
update_fn = self.variant(optim.update)
|
| 545 |
+
|
| 546 |
+
grads = jax.tree_util.tree_map(jnp.ones_like, params)
|
| 547 |
+
for _ in range(5):
|
| 548 |
+
updates, state = update_fn(grads, state, params)
|
| 549 |
+
np.testing.assert_almost_equal(state.hyperparams['b1'], 0.0)
|
| 550 |
+
np.testing.assert_almost_equal(state.hyperparams['b2'], 0.0)
|
| 551 |
+
np.testing.assert_almost_equal(state.hyperparams['eps'], 1e-8)
|
| 552 |
+
np.testing.assert_almost_equal(state.hyperparams['eps_root'], 0.0)
|
| 553 |
+
assert 'eps' in state.hyperparams
|
| 554 |
+
chex.assert_tree_all_close(updates, grads)
|
| 555 |
+
|
| 556 |
+
@chex.all_variants
|
| 557 |
+
def test_overriding_hyperparam(self):
|
| 558 |
+
optim = schedule.inject_hyperparams(clipping.clip_by_global_norm)(0.1)
|
| 559 |
+
params = jnp.zeros((3, 5, 7))
|
| 560 |
+
state = self.variant(optim.init)(params)
|
| 561 |
+
update_fn = self.variant(optim.update)
|
| 562 |
+
|
| 563 |
+
grads = jnp.ones_like(params)
|
| 564 |
+
for i in range(5):
|
| 565 |
+
state.hyperparams['max_norm'] = i
|
| 566 |
+
updates, state = update_fn(grads, state)
|
| 567 |
+
assert np.isclose(jnp.linalg.norm(updates.ravel()), i)
|
| 568 |
+
|
| 569 |
+
@chex.all_variants
|
| 570 |
+
@parameterized.named_parameters(('string', 'mask'), ('list', ['mask']))
|
| 571 |
+
def test_static_args(self, static_args):
|
| 572 |
+
@functools.partial(schedule.inject_hyperparams, static_args=static_args)
|
| 573 |
+
def custom_optim(learning_rate, mask):
|
| 574 |
+
return wrappers.masked(transform.scale(-learning_rate), mask)
|
| 575 |
+
|
| 576 |
+
optim = custom_optim(
|
| 577 |
+
0.1, functools.partial(jax.tree_util.tree_map, lambda x: x.ndim > 1))
|
| 578 |
+
params = [jnp.ones((1, 2)), jnp.ones(2), jnp.ones((1, 1, 1))]
|
| 579 |
+
grads = params
|
| 580 |
+
state = self.variant(optim.init)(params)
|
| 581 |
+
updates, state = self.variant(optim.update)(grads, state)
|
| 582 |
+
expected_updates = jax.tree_util.tree_map(
|
| 583 |
+
lambda x: -0.1 * x if x.ndim > 1 else x, grads)
|
| 584 |
+
|
| 585 |
+
assert set(state.hyperparams.keys()) == {'learning_rate'}, state.hyperparams
|
| 586 |
+
chex.assert_tree_all_close(updates, expected_updates)
|
| 587 |
+
|
| 588 |
+
@chex.all_variants
|
| 589 |
+
@parameterized.named_parameters(('one_arg', 'b1'), ('two_arg', ['b1', 'b2']))
|
| 590 |
+
def test_numeric_static_args(self, static_args):
|
| 591 |
+
optim = schedule.inject_hyperparams(
|
| 592 |
+
transform.scale_by_adam, static_args=static_args)(b1=0.9, b2=0.95)
|
| 593 |
+
|
| 594 |
+
params = [jnp.ones((1, 2)), jnp.ones(2), jnp.ones((1, 1, 1))]
|
| 595 |
+
grads = params
|
| 596 |
+
state = self.variant(optim.init)(params)
|
| 597 |
+
_, state = self.variant(optim.update)(grads, state)
|
| 598 |
+
|
| 599 |
+
assert not set(state.hyperparams.keys()).intersection(set(static_args))
|
| 600 |
+
|
| 601 |
+
@chex.all_variants
|
| 602 |
+
@parameterized.named_parameters(
|
| 603 |
+
('bf16hyp f32param bf16grad', jnp.bfloat16, jnp.float32, jnp.bfloat16),
|
| 604 |
+
('bf16hyp f32param f32_grads', jnp.bfloat16, jnp.float32, jnp.float32),
|
| 605 |
+
('f32hyp bf16param bf16grad', jnp.float32, jnp.bfloat16, jnp.bfloat16),
|
| 606 |
+
('f32hyp f32param bf16grad', jnp.float32, jnp.float32, jnp.bfloat16),
|
| 607 |
+
('f32hyp bf16param f32grad', jnp.float32, jnp.bfloat16, jnp.float32),
|
| 608 |
+
)
|
| 609 |
+
def test_hyperparam_dtypes(self,
|
| 610 |
+
hyperparam_dtype,
|
| 611 |
+
param_dtype,
|
| 612 |
+
grad_dtype):
|
| 613 |
+
"""Tests that hyperparam dtype override works as desired."""
|
| 614 |
+
optim = schedule.inject_hyperparams(
|
| 615 |
+
transform.scale_by_adam,
|
| 616 |
+
hyperparam_dtype=hyperparam_dtype)(b1=0.9, b2=0.95)
|
| 617 |
+
|
| 618 |
+
params = [jnp.ones((1, 2), dtype=param_dtype),
|
| 619 |
+
jnp.ones(2, dtype=param_dtype),
|
| 620 |
+
jnp.ones((1, 1, 1), dtype=param_dtype)]
|
| 621 |
+
grads = jax.tree_map(lambda x: x.astype(grad_dtype), params)
|
| 622 |
+
state = self.variant(optim.init)(params)
|
| 623 |
+
# Check that the hyperparams are overriden
|
| 624 |
+
self.assertEqual(state.hyperparams['b1'].dtype, hyperparam_dtype)
|
| 625 |
+
self.assertEqual(state.hyperparams['b2'].dtype, hyperparam_dtype)
|
| 626 |
+
|
| 627 |
+
_, state = self.variant(optim.update)(grads, state)
|
| 628 |
+
|
| 629 |
+
self.assertEqual(state.hyperparams['b1'].dtype, hyperparam_dtype)
|
| 630 |
+
self.assertEqual(state.hyperparams['b2'].dtype, hyperparam_dtype)
|
| 631 |
+
|
| 632 |
+
@parameterized.named_parameters(('string', 'lr'), ('list', ['lr']))
|
| 633 |
+
def test_static_args_error(self, static_args):
|
| 634 |
+
with self.assertRaises(ValueError):
|
| 635 |
+
schedule.inject_hyperparams(transform.scale, static_args=static_args)
|
| 636 |
+
|
| 637 |
+
@chex.all_variants
|
| 638 |
+
def test_inject_hyperparams_starts_with_step_count_zero(self):
|
| 639 |
+
"""Checks that inject_hyperparams uses step count 0 in the first update."""
|
| 640 |
+
# See also: https://github.com/deepmind/optax/issues/415.
|
| 641 |
+
opt = schedule.inject_hyperparams(transform.scale)(lambda count: count)
|
| 642 |
+
params = jnp.zeros(3)
|
| 643 |
+
grads = jnp.array([-1, 0, 1])
|
| 644 |
+
updates, _ = self.variant(opt.update)(grads, opt.init(params))
|
| 645 |
+
np.testing.assert_array_equal(updates, np.zeros(3))
|
| 646 |
+
|
| 647 |
+
|
| 648 |
+
if __name__ == '__main__':
|
| 649 |
+
absltest.main()
|
lib/python3.10/site-packages/optax/_src/second_order.py
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Functions for computing diagonals of Hessians & Fisher info of parameters.
|
| 16 |
+
|
| 17 |
+
Computing the Hessian or Fisher information matrices for neural networks is
|
| 18 |
+
typically intractible due to the quadratic memory requirements. Solving for the
|
| 19 |
+
diagonals of these matrices is often a better solution.
|
| 20 |
+
|
| 21 |
+
This module provides two functions for computing these diagonals, `hessian_diag`
|
| 22 |
+
and `fisher_diag`., each with sub-quadratic memory requirements.
|
| 23 |
+
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
from typing import Any, Callable
|
| 27 |
+
|
| 28 |
+
import jax
|
| 29 |
+
from jax.flatten_util import ravel_pytree
|
| 30 |
+
import jax.numpy as jnp
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
# This covers both Jax and Numpy arrays.
|
| 34 |
+
# TODO(b/160876114): use the pytypes defined in Chex.
|
| 35 |
+
Array = jnp.ndarray
|
| 36 |
+
# LossFun of type f(params, inputs, targets).
|
| 37 |
+
LossFun = Callable[[Any, Array, Array], Array]
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def ravel(p: Any) -> Array:
|
| 41 |
+
return ravel_pytree(p)[0]
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def hvp(
|
| 45 |
+
loss: LossFun,
|
| 46 |
+
v: jnp.DeviceArray,
|
| 47 |
+
params: Any,
|
| 48 |
+
inputs: jnp.DeviceArray,
|
| 49 |
+
targets: jnp.DeviceArray,
|
| 50 |
+
) -> jnp.DeviceArray:
|
| 51 |
+
"""Performs an efficient vector-Hessian (of `loss`) product.
|
| 52 |
+
|
| 53 |
+
Args:
|
| 54 |
+
loss: the loss function.
|
| 55 |
+
v: a vector of size `ravel(params)`.
|
| 56 |
+
params: model parameters.
|
| 57 |
+
inputs: inputs at which `loss` is evaluated.
|
| 58 |
+
targets: targets at which `loss` is evaluated.
|
| 59 |
+
|
| 60 |
+
Returns:
|
| 61 |
+
An Array corresponding to the product of `v` and the Hessian of `loss`
|
| 62 |
+
evaluated at `(params, inputs, targets)`.
|
| 63 |
+
"""
|
| 64 |
+
_, unravel_fn = ravel_pytree(params)
|
| 65 |
+
loss_fn = lambda p: loss(p, inputs, targets)
|
| 66 |
+
return jax.jvp(jax.grad(loss_fn), [params], [unravel_fn(v)])[1]
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def hessian_diag(
|
| 70 |
+
loss: LossFun,
|
| 71 |
+
params: Any,
|
| 72 |
+
inputs: jnp.DeviceArray,
|
| 73 |
+
targets: jnp.DeviceArray,
|
| 74 |
+
) -> jnp.DeviceArray:
|
| 75 |
+
"""Computes the diagonal hessian of `loss` at (`inputs`, `targets`).
|
| 76 |
+
|
| 77 |
+
Args:
|
| 78 |
+
loss: the loss function.
|
| 79 |
+
params: model parameters.
|
| 80 |
+
inputs: inputs at which `loss` is evaluated.
|
| 81 |
+
targets: targets at which `loss` is evaluated.
|
| 82 |
+
|
| 83 |
+
Returns:
|
| 84 |
+
A DeviceArray corresponding to the product to the Hessian of `loss`
|
| 85 |
+
evaluated at `(params, inputs, targets)`.
|
| 86 |
+
"""
|
| 87 |
+
vs = jnp.eye(ravel(params).size)
|
| 88 |
+
comp = lambda v: jnp.vdot(v, ravel(hvp(loss, v, params, inputs, targets)))
|
| 89 |
+
return jax.vmap(comp)(vs)
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def fisher_diag(
|
| 93 |
+
negative_log_likelihood: LossFun,
|
| 94 |
+
params: Any,
|
| 95 |
+
inputs: jnp.ndarray,
|
| 96 |
+
targets: jnp.ndarray,
|
| 97 |
+
) -> jnp.DeviceArray:
|
| 98 |
+
"""Computes the diagonal of the (observed) Fisher information matrix.
|
| 99 |
+
|
| 100 |
+
Args:
|
| 101 |
+
negative_log_likelihood: the negative log likelihood function.
|
| 102 |
+
params: model parameters.
|
| 103 |
+
inputs: inputs at which `negative_log_likelihood` is evaluated.
|
| 104 |
+
targets: targets at which `negative_log_likelihood` is evaluated.
|
| 105 |
+
|
| 106 |
+
Returns:
|
| 107 |
+
An Array corresponding to the product to the Hessian of
|
| 108 |
+
`negative_log_likelihood` evaluated at `(params, inputs, targets)`.
|
| 109 |
+
"""
|
| 110 |
+
return jnp.square(
|
| 111 |
+
ravel(jax.grad(negative_log_likelihood)(params, inputs, targets)))
|
lib/python3.10/site-packages/optax/_src/stochastic_gradient_estimators.py
ADDED
|
@@ -0,0 +1,317 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
r"""Stochastic Monte Carlo gradient estimators.
|
| 16 |
+
|
| 17 |
+
Utility functions to approximate gradients of the form using Monte Carlo
|
| 18 |
+
estimation:
|
| 19 |
+
\nabla_{\theta} E_{p(x; \theta)} f(x)
|
| 20 |
+
|
| 21 |
+
Here f is assumed to have no dependence on the parameters theta - if f has
|
| 22 |
+
dependence on theta, the functions below need to be called with `stop_grad(f)`
|
| 23 |
+
and the chain rule needs to be applied outside these functions in order
|
| 24 |
+
to obtain unbiased gradient.
|
| 25 |
+
|
| 26 |
+
For more details, see:
|
| 27 |
+
S. Mohamed, M. Rosca, M. Figurnov, A Mnih.
|
| 28 |
+
Monte Carlo Gradient Estimation in Machine Learning. JMLR, 2020.
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
import math
|
| 32 |
+
from typing import Any, Callable, Sequence
|
| 33 |
+
|
| 34 |
+
import chex
|
| 35 |
+
import jax
|
| 36 |
+
import jax.numpy as jnp
|
| 37 |
+
import numpy as np
|
| 38 |
+
from optax._src import base
|
| 39 |
+
from optax._src import utils
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def score_function_jacobians(
|
| 43 |
+
function: Callable[[chex.Array], float],
|
| 44 |
+
params: base.Params,
|
| 45 |
+
dist_builder: Callable[..., Any],
|
| 46 |
+
rng: chex.PRNGKey,
|
| 47 |
+
num_samples: int) -> Sequence[chex.Array]:
|
| 48 |
+
r"""Score function gradient estimation.
|
| 49 |
+
|
| 50 |
+
Approximates:
|
| 51 |
+
\nabla_{\theta} E_{p(x; \theta)} f(x)
|
| 52 |
+
With:
|
| 53 |
+
E_{p(x; \theta)} f(x) \nabla_{\theta} \log p(x; \theta)
|
| 54 |
+
|
| 55 |
+
Requires: p to be differentiable wrt to theta. Applicable to both continuous
|
| 56 |
+
and discrete random variables. No requirements on f.
|
| 57 |
+
|
| 58 |
+
Args:
|
| 59 |
+
function: Function f(x) for which to estimate grads_{params} E_dist f(x).
|
| 60 |
+
The function takes in one argument (a sample from the distribution) and
|
| 61 |
+
returns a floating point value.
|
| 62 |
+
params: A tuple of jnp arrays.
|
| 63 |
+
The parameters for which to construct the distribution.
|
| 64 |
+
dist_builder: a constructor which builds a distribution given the input
|
| 65 |
+
parameters specified by params. `dist_builder(params)` should return a
|
| 66 |
+
valid distribution.
|
| 67 |
+
rng: a PRNGKey key.
|
| 68 |
+
num_samples: Int, the number of samples used to compute the grads.
|
| 69 |
+
|
| 70 |
+
Returns:
|
| 71 |
+
A tuple of size `params`, each element is `num_samples x param.shape`
|
| 72 |
+
jacobian vector containing the estimates of the gradients obtained for
|
| 73 |
+
each sample.
|
| 74 |
+
The mean of this vector is the gradient wrt to parameters that can be used
|
| 75 |
+
for learning. The entire jacobian vector can be used to assess estimator
|
| 76 |
+
variance.
|
| 77 |
+
"""
|
| 78 |
+
def surrogate(params):
|
| 79 |
+
dist = dist_builder(*params)
|
| 80 |
+
one_sample_surrogate_fn = lambda x: function(x) * dist.log_prob(x)
|
| 81 |
+
samples = jax.lax.stop_gradient(dist.sample((num_samples,), seed=rng))
|
| 82 |
+
# We vmap the function application over samples - this ensures that the
|
| 83 |
+
# function we use does not have to be vectorized itself.
|
| 84 |
+
return jax.vmap(one_sample_surrogate_fn)(samples)
|
| 85 |
+
|
| 86 |
+
return jax.jacfwd(surrogate)(params)
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def pathwise_jacobians(
|
| 90 |
+
function: Callable[[chex.Array], float],
|
| 91 |
+
params: base.Params,
|
| 92 |
+
dist_builder: Callable[..., Any],
|
| 93 |
+
rng: chex.PRNGKey,
|
| 94 |
+
num_samples: int) -> Sequence[chex.Array]:
|
| 95 |
+
r"""Pathwise gradient estimation.
|
| 96 |
+
|
| 97 |
+
Approximates:
|
| 98 |
+
\nabla_{\theta} E_{p(x; \theta)} f(x)
|
| 99 |
+
With:
|
| 100 |
+
E_{p(\epsilon)} \nabla_{\theta} f(g(\epsilon, \theta))
|
| 101 |
+
where x = g(\epsilon, \theta). g depends on the distribution p.
|
| 102 |
+
|
| 103 |
+
Requires: p to be reparametrizable and the reparametrization to be implemented
|
| 104 |
+
in tensorflow_probability. Applicable to continuous random variables.
|
| 105 |
+
f needs to be differentiable.
|
| 106 |
+
|
| 107 |
+
Args:
|
| 108 |
+
function: Function f(x) for which to estimate grads_{params} E_dist f(x).
|
| 109 |
+
The function takes in one argument (a sample from the distribution) and
|
| 110 |
+
returns a floating point value.
|
| 111 |
+
params: A tuple of jnp arrays.
|
| 112 |
+
The parameters for which to construct the distribution.
|
| 113 |
+
dist_builder: a constructor which builds a distribution given the input
|
| 114 |
+
parameters specified by params. `dist_builder(params)` should return a
|
| 115 |
+
valid distribution.
|
| 116 |
+
rng: a PRNGKey key.
|
| 117 |
+
num_samples: Int, the number of samples used to compute the grads.
|
| 118 |
+
|
| 119 |
+
Returns:
|
| 120 |
+
A tuple of size `params`, each element is `num_samples x param.shape`
|
| 121 |
+
jacobian vector containing the estimates of the gradients obtained for
|
| 122 |
+
each sample.
|
| 123 |
+
The mean of this vector is the gradient wrt to parameters that can be used
|
| 124 |
+
for learning. The entire jacobian vector can be used to assess estimator
|
| 125 |
+
variance.
|
| 126 |
+
"""
|
| 127 |
+
def surrogate(params):
|
| 128 |
+
# We vmap the function application over samples - this ensures that the
|
| 129 |
+
# function we use does not have to be vectorized itself.
|
| 130 |
+
dist = dist_builder(*params)
|
| 131 |
+
return jax.vmap(function)(dist.sample((num_samples,), seed=rng))
|
| 132 |
+
|
| 133 |
+
return jax.jacfwd(surrogate)(params)
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def measure_valued_jacobians(
|
| 137 |
+
function: Callable[[chex.Array], float],
|
| 138 |
+
params: base.Params,
|
| 139 |
+
dist_builder: Callable[..., Any],
|
| 140 |
+
rng: chex.PRNGKey,
|
| 141 |
+
num_samples: int,
|
| 142 |
+
coupling: bool = True) -> Sequence[chex.Array]:
|
| 143 |
+
r"""Measure valued gradient estimation.
|
| 144 |
+
|
| 145 |
+
Approximates:
|
| 146 |
+
\nabla_{\theta} E_{p(x; \theta)} f(x)
|
| 147 |
+
With:
|
| 148 |
+
1./ c (E_{p1(x; \theta)} f(x) - E_{p2(x; \theta)} f(x)) where p1 and p2 are
|
| 149 |
+
measures which depend on p.
|
| 150 |
+
|
| 151 |
+
Currently only supports computing gradients of expectations of Gaussian RVs.
|
| 152 |
+
|
| 153 |
+
Args:
|
| 154 |
+
function: Function f(x) for which to estimate grads_{params} E_dist f(x).
|
| 155 |
+
The function takes in one argument (a sample from the distribution) and
|
| 156 |
+
returns a floating point value.
|
| 157 |
+
params: A tuple of jnp arrays.
|
| 158 |
+
The parameters for which to construct the distribution.
|
| 159 |
+
dist_builder: a constructor which builds a distribution given the input
|
| 160 |
+
parameters specified by params. `dist_builder(params)` should return a
|
| 161 |
+
valid distribution.
|
| 162 |
+
rng: a PRNGKey key.
|
| 163 |
+
num_samples: Int, the number of samples used to compute the grads.
|
| 164 |
+
coupling: A boolean. Whether or not to use coupling for the positive and
|
| 165 |
+
negative samples. Recommended: True, as this reduces variance.
|
| 166 |
+
|
| 167 |
+
Returns:
|
| 168 |
+
A tuple of size `params`, each element is `num_samples x param.shape`
|
| 169 |
+
jacobian vector containing the estimates of the gradients obtained for
|
| 170 |
+
each sample.
|
| 171 |
+
The mean of this vector is the gradient wrt to parameters that can be used
|
| 172 |
+
for learning. The entire jacobian vector can be used to assess estimator
|
| 173 |
+
variance.
|
| 174 |
+
"""
|
| 175 |
+
if dist_builder is not utils.multi_normal:
|
| 176 |
+
raise ValueError(
|
| 177 |
+
'Unsupported distribution builder for measure_valued_jacobians!')
|
| 178 |
+
dist = dist_builder(*params)
|
| 179 |
+
# Need to apply chain rule for log scale grad (instead of scale grad).
|
| 180 |
+
return [
|
| 181 |
+
measure_valued_estimation_mean(
|
| 182 |
+
function, dist, rng, num_samples, coupling=coupling),
|
| 183 |
+
jnp.exp(dist.log_scale) * measure_valued_estimation_std(
|
| 184 |
+
function, dist, rng, num_samples, coupling=coupling)]
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
def measure_valued_estimation_mean(
|
| 188 |
+
function: Callable[[chex.Array], float],
|
| 189 |
+
dist: Any,
|
| 190 |
+
rng: chex.PRNGKey,
|
| 191 |
+
num_samples: int,
|
| 192 |
+
coupling: bool = True) -> chex.Array:
|
| 193 |
+
"""Measure valued grads of a Gaussian expectation of `function` wrt the mean.
|
| 194 |
+
|
| 195 |
+
Args:
|
| 196 |
+
function: Function f(x) for which to estimate grads_{mean} E_dist f(x).
|
| 197 |
+
The function takes in one argument (a sample from the distribution) and
|
| 198 |
+
returns a floating point value.
|
| 199 |
+
dist: a distribution on which we can call `sample`.
|
| 200 |
+
rng: a PRNGKey key.
|
| 201 |
+
num_samples: Int, the number of samples used to compute the grads.
|
| 202 |
+
coupling: A boolean. Whether or not to use coupling for the positive and
|
| 203 |
+
negative samples. Recommended: True, as this reduces variance.
|
| 204 |
+
|
| 205 |
+
Returns:
|
| 206 |
+
A `num_samples x D` vector containing the estimates of the gradients
|
| 207 |
+
obtained for each sample. The mean of this vector can be used to update
|
| 208 |
+
the mean parameter. The entire vector can be used to assess estimator
|
| 209 |
+
variance.
|
| 210 |
+
"""
|
| 211 |
+
mean, log_std = dist.params
|
| 212 |
+
std = jnp.exp(log_std)
|
| 213 |
+
|
| 214 |
+
dist_samples = dist.sample((num_samples,), seed=rng)
|
| 215 |
+
|
| 216 |
+
pos_rng, neg_rng = jax.random.split(rng)
|
| 217 |
+
pos_sample = jax.random.weibull_min(
|
| 218 |
+
pos_rng, scale=math.sqrt(2.), concentration=2., shape=dist_samples.shape)
|
| 219 |
+
|
| 220 |
+
if coupling:
|
| 221 |
+
neg_sample = pos_sample
|
| 222 |
+
else:
|
| 223 |
+
neg_sample = jax.random.weibull_min(
|
| 224 |
+
neg_rng,
|
| 225 |
+
scale=math.sqrt(2.),
|
| 226 |
+
concentration=2.,
|
| 227 |
+
shape=dist_samples.shape)
|
| 228 |
+
|
| 229 |
+
# N x D
|
| 230 |
+
positive_diag = mean + std * pos_sample
|
| 231 |
+
# N x D
|
| 232 |
+
negative_diag = mean - std * neg_sample
|
| 233 |
+
|
| 234 |
+
# NOTE: you can sample base samples here if you use the same rng
|
| 235 |
+
# Duplicate the D dimension - N x D x D.
|
| 236 |
+
base_dist_samples = utils.tile_second_to_last_dim(dist_samples)
|
| 237 |
+
positive = utils.set_diags(base_dist_samples, positive_diag)
|
| 238 |
+
negative = utils.set_diags(base_dist_samples, negative_diag)
|
| 239 |
+
|
| 240 |
+
c = np.sqrt(2 * np.pi) * std # D
|
| 241 |
+
# Apply function. We apply the function to each element of N x D x D.
|
| 242 |
+
# We apply a function that takes a sample and returns one number, so the
|
| 243 |
+
# output will be N x D (which is what we want, batch by dimension).
|
| 244 |
+
# We apply a function in parallel to the batch.
|
| 245 |
+
# Broadcast the division.
|
| 246 |
+
vmaped_function = jax.vmap(jax.vmap(function, 1, 0))
|
| 247 |
+
grads = (vmaped_function(positive) - vmaped_function(negative)) / c
|
| 248 |
+
|
| 249 |
+
chex.assert_shape(grads, (num_samples,) + std.shape)
|
| 250 |
+
return grads
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
def measure_valued_estimation_std(
|
| 254 |
+
function: Callable[[chex.Array], float],
|
| 255 |
+
dist: Any,
|
| 256 |
+
rng: chex.PRNGKey,
|
| 257 |
+
num_samples: int,
|
| 258 |
+
coupling: bool = True) -> chex.Array:
|
| 259 |
+
"""Measure valued grads of a Gaussian expectation of `function` wrt the std.
|
| 260 |
+
|
| 261 |
+
Args:
|
| 262 |
+
function: Function f(x) for which to estimate grads_{std} E_dist f(x).
|
| 263 |
+
The function takes in one argument (a sample from the distribution) and
|
| 264 |
+
returns a floating point value.
|
| 265 |
+
dist: a distribution on which we can call `sample`.
|
| 266 |
+
rng: a PRNGKey key.
|
| 267 |
+
num_samples: Int, the number of samples used to compute the grads.
|
| 268 |
+
coupling: A boolean. Whether or not to use coupling for the positive and
|
| 269 |
+
negative samples. Recommended: True, as this reduces variance.
|
| 270 |
+
|
| 271 |
+
Returns:
|
| 272 |
+
A `num_samples x D` vector containing the estimates of the gradients
|
| 273 |
+
obtained for each sample. The mean of this vector can be used to update
|
| 274 |
+
the scale parameter. The entire vector can be used to assess estimator
|
| 275 |
+
variance.
|
| 276 |
+
"""
|
| 277 |
+
mean, log_std = dist.params
|
| 278 |
+
std = jnp.exp(log_std)
|
| 279 |
+
|
| 280 |
+
dist_samples = dist.sample((num_samples,), seed=rng)
|
| 281 |
+
|
| 282 |
+
pos_rng, neg_rng = jax.random.split(rng)
|
| 283 |
+
|
| 284 |
+
# The only difference between mean and std gradients is what we sample.
|
| 285 |
+
pos_sample = jax.random.double_sided_maxwell(
|
| 286 |
+
pos_rng, loc=0.0, scale=1.0, shape=dist_samples.shape)
|
| 287 |
+
if coupling:
|
| 288 |
+
unif_rvs = jax.random.uniform(neg_rng, dist_samples.shape)
|
| 289 |
+
neg_sample = unif_rvs * pos_sample
|
| 290 |
+
else:
|
| 291 |
+
neg_sample = jax.random.normal(neg_rng, dist_samples.shape)
|
| 292 |
+
|
| 293 |
+
# Both need to be positive in the case of the scale.
|
| 294 |
+
# N x D
|
| 295 |
+
positive_diag = mean + std * pos_sample
|
| 296 |
+
# N x D
|
| 297 |
+
negative_diag = mean + std * neg_sample
|
| 298 |
+
|
| 299 |
+
# NOTE: you can sample base samples here if you use the same rng
|
| 300 |
+
# Duplicate the D dimension - N x D x D.
|
| 301 |
+
base_dist_samples = utils.tile_second_to_last_dim(dist_samples)
|
| 302 |
+
positive = utils.set_diags(base_dist_samples, positive_diag)
|
| 303 |
+
negative = utils.set_diags(base_dist_samples, negative_diag)
|
| 304 |
+
|
| 305 |
+
# Different C for the scale
|
| 306 |
+
c = std # D
|
| 307 |
+
# Apply function. We apply the function to each element of N x D x D.
|
| 308 |
+
# We apply a function that takes a sample and returns one number, so the
|
| 309 |
+
# output will be N x D (which is what we want, batch by dimension).
|
| 310 |
+
# We apply a function in parallel to the batch.
|
| 311 |
+
# Broadcast the division.
|
| 312 |
+
vmaped_function = jax.vmap(jax.vmap(function, 1, 0))
|
| 313 |
+
grads = (vmaped_function(positive) - vmaped_function(negative)) / c
|
| 314 |
+
|
| 315 |
+
chex.assert_shape(grads, (num_samples,) + std.shape)
|
| 316 |
+
return grads
|
| 317 |
+
|
lib/python3.10/site-packages/optax/_src/stochastic_gradient_estimators_test.py
ADDED
|
@@ -0,0 +1,371 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Tests for `stochastic_gradient_estimators.py`."""
|
| 16 |
+
|
| 17 |
+
from absl.testing import absltest
|
| 18 |
+
from absl.testing import parameterized
|
| 19 |
+
|
| 20 |
+
import chex
|
| 21 |
+
import jax
|
| 22 |
+
import jax.numpy as jnp
|
| 23 |
+
import numpy as np
|
| 24 |
+
|
| 25 |
+
from optax._src import stochastic_gradient_estimators as sge
|
| 26 |
+
from optax._src import utils
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
# Set seed for deterministic sampling.
|
| 30 |
+
np.random.seed(42)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
_estimator_to_num_samples = {
|
| 34 |
+
sge.score_function_jacobians: 5 * 10**5,
|
| 35 |
+
sge.measure_valued_jacobians: 10**5,
|
| 36 |
+
sge.pathwise_jacobians: 5 * 10**4,
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
_weighted_estimator_to_num_samples = {
|
| 40 |
+
sge.score_function_jacobians: 5 * 10**6,
|
| 41 |
+
sge.measure_valued_jacobians: 5 * 10**5,
|
| 42 |
+
sge.pathwise_jacobians: 5 * 10**4,
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def _ones(dims):
|
| 47 |
+
return jnp.ones(shape=(dims), dtype=jnp.float32)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def _assert_equal(actual, expected, rtol=1e-2, atol=1e-2):
|
| 51 |
+
"""Asserts that arrays are equal."""
|
| 52 |
+
# Note: assert_allclose does not check shapes
|
| 53 |
+
chex.assert_equal_shape((actual, expected))
|
| 54 |
+
|
| 55 |
+
# We get around the bug https://github.com/numpy/numpy/issues/13801
|
| 56 |
+
zero_indices = np.argwhere(expected == 0)
|
| 57 |
+
if not np.all(np.abs(actual[zero_indices]) <= atol):
|
| 58 |
+
raise AssertionError(f'Larger than {atol} diff in {actual[zero_indices]}')
|
| 59 |
+
|
| 60 |
+
non_zero_indices = np.argwhere(expected != 0)
|
| 61 |
+
np.testing.assert_allclose(
|
| 62 |
+
np.asarray(actual)[non_zero_indices],
|
| 63 |
+
expected[non_zero_indices], rtol, atol)
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def _estimator_variant(variant, estimator):
|
| 67 |
+
return variant(estimator, static_argnums=(0, 2, 4))
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def _measure_valued_variant(variant):
|
| 71 |
+
return variant(
|
| 72 |
+
sge.measure_valued_jacobians,
|
| 73 |
+
static_argnums=(0, 2, 4, 5))
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
class GradientEstimatorsTest(chex.TestCase):
|
| 77 |
+
|
| 78 |
+
@chex.all_variants
|
| 79 |
+
@parameterized.named_parameters(
|
| 80 |
+
chex.params_product([
|
| 81 |
+
('_score_function_jacobians', sge.score_function_jacobians),
|
| 82 |
+
('_pathwise_jacobians', sge.pathwise_jacobians),
|
| 83 |
+
('_measure_valued_jacobians', sge.measure_valued_jacobians),
|
| 84 |
+
], [
|
| 85 |
+
('0.1', 0.1),
|
| 86 |
+
('0.5', 0.5),
|
| 87 |
+
('0.9', 0.9),
|
| 88 |
+
],
|
| 89 |
+
named=True))
|
| 90 |
+
def testConstantFunction(self, estimator, constant):
|
| 91 |
+
data_dims = 3
|
| 92 |
+
num_samples = _estimator_to_num_samples[estimator]
|
| 93 |
+
|
| 94 |
+
effective_mean = 1.5
|
| 95 |
+
mean = effective_mean * _ones(data_dims)
|
| 96 |
+
|
| 97 |
+
effective_log_scale = 0.0
|
| 98 |
+
log_scale = effective_log_scale * _ones(data_dims)
|
| 99 |
+
rng = jax.random.PRNGKey(1)
|
| 100 |
+
|
| 101 |
+
jacobians = _estimator_variant(self.variant, estimator)(
|
| 102 |
+
lambda x: jnp.array(constant), [mean, log_scale],
|
| 103 |
+
utils.multi_normal, rng, num_samples)
|
| 104 |
+
|
| 105 |
+
# Average over the number of samples.
|
| 106 |
+
mean_jacobians = jacobians[0]
|
| 107 |
+
chex.assert_shape(mean_jacobians, (num_samples, data_dims))
|
| 108 |
+
mean_grads = np.mean(mean_jacobians, axis=0)
|
| 109 |
+
expected_mean_grads = np.zeros(data_dims, dtype=np.float32)
|
| 110 |
+
|
| 111 |
+
log_scale_jacobians = jacobians[1]
|
| 112 |
+
chex.assert_shape(log_scale_jacobians, (num_samples, data_dims))
|
| 113 |
+
log_scale_grads = np.mean(log_scale_jacobians, axis=0)
|
| 114 |
+
expected_log_scale_grads = np.zeros(data_dims, dtype=np.float32)
|
| 115 |
+
|
| 116 |
+
_assert_equal(mean_grads, expected_mean_grads, atol=5e-3)
|
| 117 |
+
_assert_equal(log_scale_grads, expected_log_scale_grads, atol=5e-3)
|
| 118 |
+
|
| 119 |
+
@chex.all_variants
|
| 120 |
+
@parameterized.named_parameters(
|
| 121 |
+
chex.params_product([
|
| 122 |
+
('_score_function_jacobians', sge.score_function_jacobians),
|
| 123 |
+
('_pathwise_jacobians', sge.pathwise_jacobians),
|
| 124 |
+
('_measure_valued_jacobians', sge.measure_valued_jacobians),
|
| 125 |
+
], [
|
| 126 |
+
('0.5_-1.', 0.5, -1.),
|
| 127 |
+
('0.7_0.0)', 0.7, 0.0),
|
| 128 |
+
('0.8_0.1', 0.8, 0.1),
|
| 129 |
+
],
|
| 130 |
+
named=True))
|
| 131 |
+
def testLinearFunction(self, estimator, effective_mean, effective_log_scale):
|
| 132 |
+
data_dims = 3
|
| 133 |
+
num_samples = _estimator_to_num_samples[estimator]
|
| 134 |
+
rng = jax.random.PRNGKey(1)
|
| 135 |
+
|
| 136 |
+
mean = effective_mean * _ones(data_dims)
|
| 137 |
+
log_scale = effective_log_scale * _ones(data_dims)
|
| 138 |
+
|
| 139 |
+
jacobians = _estimator_variant(self.variant, estimator)(
|
| 140 |
+
np.sum, [mean, log_scale],
|
| 141 |
+
utils.multi_normal, rng, num_samples)
|
| 142 |
+
|
| 143 |
+
mean_jacobians = jacobians[0]
|
| 144 |
+
chex.assert_shape(mean_jacobians, (num_samples, data_dims))
|
| 145 |
+
mean_grads = np.mean(mean_jacobians, axis=0)
|
| 146 |
+
expected_mean_grads = np.ones(data_dims, dtype=np.float32)
|
| 147 |
+
|
| 148 |
+
log_scale_jacobians = jacobians[1]
|
| 149 |
+
chex.assert_shape(log_scale_jacobians, (num_samples, data_dims))
|
| 150 |
+
log_scale_grads = np.mean(log_scale_jacobians, axis=0)
|
| 151 |
+
expected_log_scale_grads = np.zeros(data_dims, dtype=np.float32)
|
| 152 |
+
|
| 153 |
+
_assert_equal(mean_grads, expected_mean_grads)
|
| 154 |
+
_assert_equal(log_scale_grads, expected_log_scale_grads)
|
| 155 |
+
|
| 156 |
+
@chex.all_variants
|
| 157 |
+
@parameterized.named_parameters(
|
| 158 |
+
chex.params_product([
|
| 159 |
+
('_score_function_jacobians', sge.score_function_jacobians),
|
| 160 |
+
('_pathwise_jacobians', sge.pathwise_jacobians),
|
| 161 |
+
('_measure_valued_jacobians', sge.measure_valued_jacobians),
|
| 162 |
+
], [
|
| 163 |
+
('1.0_0.3', 1.0, 0.3),
|
| 164 |
+
],
|
| 165 |
+
named=True))
|
| 166 |
+
def testQuadraticFunction(
|
| 167 |
+
self, estimator, effective_mean, effective_log_scale):
|
| 168 |
+
data_dims = 3
|
| 169 |
+
num_samples = _estimator_to_num_samples[estimator]
|
| 170 |
+
rng = jax.random.PRNGKey(1)
|
| 171 |
+
|
| 172 |
+
mean = effective_mean * _ones(data_dims)
|
| 173 |
+
log_scale = effective_log_scale * _ones(data_dims)
|
| 174 |
+
|
| 175 |
+
jacobians = _estimator_variant(self.variant, estimator)(
|
| 176 |
+
lambda x: np.sum(x**2) / 2, [mean, log_scale],
|
| 177 |
+
utils.multi_normal, rng, num_samples)
|
| 178 |
+
|
| 179 |
+
mean_jacobians = jacobians[0]
|
| 180 |
+
chex.assert_shape(mean_jacobians, (num_samples, data_dims))
|
| 181 |
+
mean_grads = np.mean(mean_jacobians, axis=0)
|
| 182 |
+
expected_mean_grads = effective_mean * np.ones(
|
| 183 |
+
data_dims, dtype=np.float32)
|
| 184 |
+
|
| 185 |
+
log_scale_jacobians = jacobians[1]
|
| 186 |
+
chex.assert_shape(log_scale_jacobians, (num_samples, data_dims))
|
| 187 |
+
log_scale_grads = np.mean(log_scale_jacobians, axis=0)
|
| 188 |
+
expected_log_scale_grads = np.exp(2 * effective_log_scale) * np.ones(
|
| 189 |
+
data_dims, dtype=np.float32)
|
| 190 |
+
|
| 191 |
+
_assert_equal(mean_grads, expected_mean_grads, atol=5e-2)
|
| 192 |
+
_assert_equal(log_scale_grads, expected_log_scale_grads, atol=5e-2)
|
| 193 |
+
|
| 194 |
+
@chex.all_variants
|
| 195 |
+
@parameterized.named_parameters(
|
| 196 |
+
chex.params_product([
|
| 197 |
+
('_score_function_jacobians', sge.score_function_jacobians),
|
| 198 |
+
('_pathwise_jacobians', sge.pathwise_jacobians),
|
| 199 |
+
('_measure_valued_jacobians', sge.measure_valued_jacobians),
|
| 200 |
+
], [
|
| 201 |
+
('case_1', [1.0, 2.0, 3.], [-1., 0.3, -2.], [1., 1., 1.]),
|
| 202 |
+
('case_2', [1.0, 2.0, 3.], [-1., 0.3, -2.], [4., 2., 3.]),
|
| 203 |
+
('case_3', [1.0, 2.0, 3.], [0.1, 0.2, 0.1], [10., 5., 1.]),
|
| 204 |
+
],
|
| 205 |
+
named=True))
|
| 206 |
+
def testWeightedLinear(
|
| 207 |
+
self, estimator, effective_mean, effective_log_scale, weights):
|
| 208 |
+
num_samples = _weighted_estimator_to_num_samples[estimator]
|
| 209 |
+
rng = jax.random.PRNGKey(1)
|
| 210 |
+
|
| 211 |
+
mean = jnp.array(effective_mean)
|
| 212 |
+
log_scale = jnp.array(effective_log_scale)
|
| 213 |
+
weights = jnp.array(weights)
|
| 214 |
+
|
| 215 |
+
data_dims = len(effective_mean)
|
| 216 |
+
|
| 217 |
+
function = lambda x: jnp.sum(x * weights)
|
| 218 |
+
jacobians = _estimator_variant(self.variant, estimator)(
|
| 219 |
+
function, [mean, log_scale],
|
| 220 |
+
utils.multi_normal, rng, num_samples)
|
| 221 |
+
|
| 222 |
+
mean_jacobians = jacobians[0]
|
| 223 |
+
chex.assert_shape(mean_jacobians, (num_samples, data_dims))
|
| 224 |
+
mean_grads = np.mean(mean_jacobians, axis=0)
|
| 225 |
+
|
| 226 |
+
log_scale_jacobians = jacobians[1]
|
| 227 |
+
chex.assert_shape(log_scale_jacobians, (num_samples, data_dims))
|
| 228 |
+
log_scale_grads = np.mean(log_scale_jacobians, axis=0)
|
| 229 |
+
|
| 230 |
+
expected_mean_grads = weights
|
| 231 |
+
expected_log_scale_grads = np.zeros(data_dims, dtype=np.float32)
|
| 232 |
+
|
| 233 |
+
_assert_equal(mean_grads, expected_mean_grads, atol=5e-2)
|
| 234 |
+
_assert_equal(log_scale_grads, expected_log_scale_grads, atol=5e-2)
|
| 235 |
+
|
| 236 |
+
@chex.all_variants
|
| 237 |
+
@parameterized.named_parameters(
|
| 238 |
+
chex.params_product([
|
| 239 |
+
('_score_function_jacobians', sge.score_function_jacobians),
|
| 240 |
+
('_pathwise_jacobians', sge.pathwise_jacobians),
|
| 241 |
+
('_measure_valued_jacobians', sge.measure_valued_jacobians),
|
| 242 |
+
], [
|
| 243 |
+
('case_1', [1.0, 2.0, 3.], [-1., 0.3, -2.], [1., 1., 1.]),
|
| 244 |
+
('case_2', [1.0, 2.0, 3.], [-1., 0.3, -2.], [4., 2., 3.]),
|
| 245 |
+
('case_3', [1.0, 2.0, 3.], [0.1, 0.2, 0.1], [3., 5., 1.]),
|
| 246 |
+
],
|
| 247 |
+
named=True))
|
| 248 |
+
def testWeightedQuadratic(
|
| 249 |
+
self, estimator, effective_mean, effective_log_scale, weights):
|
| 250 |
+
num_samples = _weighted_estimator_to_num_samples[estimator]
|
| 251 |
+
rng = jax.random.PRNGKey(1)
|
| 252 |
+
|
| 253 |
+
mean = jnp.array(effective_mean, dtype=jnp.float32)
|
| 254 |
+
log_scale = jnp.array(effective_log_scale, dtype=jnp.float32)
|
| 255 |
+
weights = jnp.array(weights, dtype=jnp.float32)
|
| 256 |
+
|
| 257 |
+
data_dims = len(effective_mean)
|
| 258 |
+
|
| 259 |
+
function = lambda x: jnp.sum(x * weights) ** 2
|
| 260 |
+
jacobians = _estimator_variant(self.variant, estimator)(
|
| 261 |
+
function, [mean, log_scale],
|
| 262 |
+
utils.multi_normal, rng, num_samples)
|
| 263 |
+
|
| 264 |
+
mean_jacobians = jacobians[0]
|
| 265 |
+
chex.assert_shape(mean_jacobians, (num_samples, data_dims))
|
| 266 |
+
mean_grads = np.mean(mean_jacobians, axis=0)
|
| 267 |
+
|
| 268 |
+
log_scale_jacobians = jacobians[1]
|
| 269 |
+
chex.assert_shape(log_scale_jacobians, (num_samples, data_dims))
|
| 270 |
+
log_scale_grads = np.mean(log_scale_jacobians, axis=0)
|
| 271 |
+
|
| 272 |
+
expected_mean_grads = 2 * weights * np.sum(weights * mean)
|
| 273 |
+
effective_scale = np.exp(log_scale)
|
| 274 |
+
expected_scale_grads = 2 * weights ** 2 * effective_scale
|
| 275 |
+
expected_log_scale_grads = expected_scale_grads * effective_scale
|
| 276 |
+
|
| 277 |
+
_assert_equal(mean_grads, expected_mean_grads, atol=1e-1, rtol=1e-1)
|
| 278 |
+
_assert_equal(
|
| 279 |
+
log_scale_grads, expected_log_scale_grads, atol=1e-1, rtol=1e-1)
|
| 280 |
+
|
| 281 |
+
@chex.all_variants
|
| 282 |
+
@parameterized.named_parameters(
|
| 283 |
+
chex.params_product(
|
| 284 |
+
[
|
| 285 |
+
('_sum_cos_x', [1.0], [1.0], lambda x: jnp.sum(jnp.cos(x))),
|
| 286 |
+
# Need to ensure that the mean is not too close to 0.
|
| 287 |
+
('_sum_log_x', [10.0], [0.0], lambda x: jnp.sum(jnp.log(x))),
|
| 288 |
+
('_sum_cos_2x', [1.0, 2.0], [1.0, -2
|
| 289 |
+
], lambda x: jnp.sum(jnp.cos(2 * x))),
|
| 290 |
+
('_cos_sum_2x', [1.0, 2.0], [1.0, -2
|
| 291 |
+
], lambda x: jnp.cos(jnp.sum(2 * x))),
|
| 292 |
+
],
|
| 293 |
+
[
|
| 294 |
+
('coupling', True),
|
| 295 |
+
('nocoupling', False),
|
| 296 |
+
],
|
| 297 |
+
named=True))
|
| 298 |
+
def testNonPolynomialFunctionConsistencyWithPathwise(self, effective_mean,
|
| 299 |
+
effective_log_scale,
|
| 300 |
+
function, coupling):
|
| 301 |
+
num_samples = 10**5
|
| 302 |
+
rng = jax.random.PRNGKey(1)
|
| 303 |
+
measure_rng, pathwise_rng = jax.random.split(rng)
|
| 304 |
+
|
| 305 |
+
mean = jnp.array(effective_mean, dtype=jnp.float32)
|
| 306 |
+
log_scale = jnp.array(effective_log_scale, dtype=jnp.float32)
|
| 307 |
+
data_dims = len(effective_mean)
|
| 308 |
+
|
| 309 |
+
measure_valued_jacobians = _measure_valued_variant(self.variant)(
|
| 310 |
+
function, [mean, log_scale],
|
| 311 |
+
utils.multi_normal, measure_rng, num_samples, coupling)
|
| 312 |
+
|
| 313 |
+
measure_valued_mean_jacobians = measure_valued_jacobians[0]
|
| 314 |
+
chex.assert_shape(measure_valued_mean_jacobians, (num_samples, data_dims))
|
| 315 |
+
measure_valued_mean_grads = np.mean(measure_valued_mean_jacobians, axis=0)
|
| 316 |
+
|
| 317 |
+
measure_valued_log_scale_jacobians = measure_valued_jacobians[1]
|
| 318 |
+
chex.assert_shape(
|
| 319 |
+
measure_valued_log_scale_jacobians, (num_samples, data_dims))
|
| 320 |
+
measure_valued_log_scale_grads = np.mean(
|
| 321 |
+
measure_valued_log_scale_jacobians, axis=0)
|
| 322 |
+
|
| 323 |
+
pathwise_jacobians = _estimator_variant(
|
| 324 |
+
self.variant, sge.pathwise_jacobians)(function, [mean, log_scale],
|
| 325 |
+
utils.multi_normal, pathwise_rng,
|
| 326 |
+
num_samples)
|
| 327 |
+
|
| 328 |
+
pathwise_mean_jacobians = pathwise_jacobians[0]
|
| 329 |
+
chex.assert_shape(pathwise_mean_jacobians, (num_samples, data_dims))
|
| 330 |
+
pathwise_mean_grads = np.mean(pathwise_mean_jacobians, axis=0)
|
| 331 |
+
|
| 332 |
+
pathwise_log_scale_jacobians = pathwise_jacobians[1]
|
| 333 |
+
chex.assert_shape(pathwise_log_scale_jacobians, (num_samples, data_dims))
|
| 334 |
+
pathwise_log_scale_grads = np.mean(pathwise_log_scale_jacobians, axis=0)
|
| 335 |
+
|
| 336 |
+
_assert_equal(
|
| 337 |
+
pathwise_mean_grads, measure_valued_mean_grads, rtol=5e-1, atol=1e-1)
|
| 338 |
+
_assert_equal(
|
| 339 |
+
pathwise_log_scale_grads, measure_valued_log_scale_grads,
|
| 340 |
+
rtol=5e-1, atol=1e-1)
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
class MeasuredValuedEstimatorsTest(chex.TestCase):
|
| 344 |
+
|
| 345 |
+
@chex.all_variants
|
| 346 |
+
@parameterized.parameters([True, False])
|
| 347 |
+
def testRaisesErrorForNonGaussian(self, coupling):
|
| 348 |
+
num_samples = 10**5
|
| 349 |
+
rng = jax.random.PRNGKey(1)
|
| 350 |
+
|
| 351 |
+
function = lambda x: jnp.sum(x) ** 2
|
| 352 |
+
|
| 353 |
+
mean = jnp.array(0, dtype=jnp.float32)
|
| 354 |
+
log_scale = jnp.array(0., dtype=jnp.float32)
|
| 355 |
+
|
| 356 |
+
class TestDist():
|
| 357 |
+
|
| 358 |
+
def __init__(self, params):
|
| 359 |
+
self._params = params
|
| 360 |
+
|
| 361 |
+
def sample(self, n):
|
| 362 |
+
return np.zeros(n)
|
| 363 |
+
|
| 364 |
+
with self.assertRaises(ValueError):
|
| 365 |
+
_measure_valued_variant(self.variant)(
|
| 366 |
+
function, [mean, log_scale],
|
| 367 |
+
TestDist, rng, num_samples, coupling)
|
| 368 |
+
|
| 369 |
+
|
| 370 |
+
if __name__ == '__main__':
|
| 371 |
+
absltest.main()
|
lib/python3.10/site-packages/optax/_src/test_utils.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Testing utilities for Optax."""
|
| 16 |
+
|
| 17 |
+
import inspect
|
| 18 |
+
import types
|
| 19 |
+
from typing import Sequence, Tuple
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def find_internal_python_modules(
|
| 23 |
+
root_module: types.ModuleType,
|
| 24 |
+
) -> Sequence[Tuple[str, types.ModuleType]]:
|
| 25 |
+
"""Returns `(name, module)` for all Optax submodules under `root_module`."""
|
| 26 |
+
modules = set([(root_module.__name__, root_module)])
|
| 27 |
+
visited = set()
|
| 28 |
+
to_visit = [root_module]
|
| 29 |
+
|
| 30 |
+
while to_visit:
|
| 31 |
+
mod = to_visit.pop()
|
| 32 |
+
visited.add(mod)
|
| 33 |
+
|
| 34 |
+
for name in dir(mod):
|
| 35 |
+
obj = getattr(mod, name)
|
| 36 |
+
if inspect.ismodule(obj) and obj not in visited:
|
| 37 |
+
if obj.__name__.startswith('optax'):
|
| 38 |
+
if '_src' not in obj.__name__:
|
| 39 |
+
to_visit.append(obj)
|
| 40 |
+
modules.add((obj.__name__, obj))
|
| 41 |
+
|
| 42 |
+
return sorted(modules)
|
lib/python3.10/site-packages/optax/_src/transform.py
ADDED
|
@@ -0,0 +1,1143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Gradient transformations."""
|
| 16 |
+
|
| 17 |
+
import functools
|
| 18 |
+
from typing import Any, Callable, NamedTuple, Optional, Union
|
| 19 |
+
|
| 20 |
+
import chex
|
| 21 |
+
import jax
|
| 22 |
+
import jax.numpy as jnp
|
| 23 |
+
|
| 24 |
+
from optax._src import base
|
| 25 |
+
from optax._src import clipping
|
| 26 |
+
from optax._src import numerics
|
| 27 |
+
from optax._src import utils
|
| 28 |
+
from optax._src import wrappers
|
| 29 |
+
|
| 30 |
+
# pylint:disable=no-value-for-parameter
|
| 31 |
+
|
| 32 |
+
_abs_sq = numerics.abs_sq
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class TraceState(NamedTuple):
|
| 36 |
+
"""Holds an aggregation of past updates."""
|
| 37 |
+
trace: base.Params
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def trace(
|
| 41 |
+
decay: float,
|
| 42 |
+
nesterov: bool = False,
|
| 43 |
+
accumulator_dtype: Optional[Any] = None,
|
| 44 |
+
) -> base.GradientTransformation:
|
| 45 |
+
"""Compute a trace of past updates.
|
| 46 |
+
|
| 47 |
+
Note: `trace` and `ema` have very similar but distinct updates;
|
| 48 |
+
`trace = decay * trace + t`, while `ema = decay * ema + (1-decay) * t`.
|
| 49 |
+
Both are frequently found in the optimization literature.
|
| 50 |
+
|
| 51 |
+
Args:
|
| 52 |
+
decay: Decay rate for the trace of past updates.
|
| 53 |
+
nesterov: Whether to use Nesterov momentum.
|
| 54 |
+
accumulator_dtype: Optional `dtype` to be used for the accumulator; if
|
| 55 |
+
`None` then the `dtype` is inferred from `params` and `updates`.
|
| 56 |
+
|
| 57 |
+
Returns:
|
| 58 |
+
A `GradientTransformation` object.
|
| 59 |
+
"""
|
| 60 |
+
|
| 61 |
+
accumulator_dtype = utils.canonicalize_dtype(accumulator_dtype)
|
| 62 |
+
|
| 63 |
+
def init_fn(params):
|
| 64 |
+
return TraceState(
|
| 65 |
+
trace=jax.tree_util.tree_map(
|
| 66 |
+
lambda t: jnp.zeros_like(t, dtype=accumulator_dtype), params))
|
| 67 |
+
|
| 68 |
+
def update_fn(updates, state, params=None):
|
| 69 |
+
del params
|
| 70 |
+
f = lambda g, t: g + decay * t
|
| 71 |
+
new_trace = jax.tree_util.tree_map(f, updates, state.trace)
|
| 72 |
+
updates = (
|
| 73 |
+
jax.tree_util.tree_map(f, updates, new_trace) if nesterov
|
| 74 |
+
else new_trace)
|
| 75 |
+
new_trace = utils.cast_tree(new_trace, accumulator_dtype)
|
| 76 |
+
return updates, TraceState(trace=new_trace)
|
| 77 |
+
|
| 78 |
+
return base.GradientTransformation(init_fn, update_fn)
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def update_moment(updates, moments, decay, order):
|
| 82 |
+
"""Compute the exponential moving average of the `order`-th moment."""
|
| 83 |
+
return jax.tree_util.tree_map(
|
| 84 |
+
lambda g, t: (1 - decay) * (g ** order) + decay * t, updates, moments)
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def update_infinity_moment(updates, moments, decay, eps):
|
| 88 |
+
"""Compute the exponential moving average of the infinity norm."""
|
| 89 |
+
return jax.tree_util.tree_map(
|
| 90 |
+
lambda g, t: jnp.maximum(jnp.abs(g) + eps, decay * t), updates, moments)
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def update_moment_per_elem_norm(updates, moments, decay, order):
|
| 94 |
+
"""Compute the EMA of the `order`-th moment of the element-wise norm."""
|
| 95 |
+
|
| 96 |
+
def orderth_norm(g):
|
| 97 |
+
if jnp.isrealobj(g):
|
| 98 |
+
return g ** order
|
| 99 |
+
else:
|
| 100 |
+
half_order = order / 2
|
| 101 |
+
# JAX generates different HLO for int and float `order`
|
| 102 |
+
if half_order.is_integer():
|
| 103 |
+
half_order = int(half_order)
|
| 104 |
+
return _abs_sq(g) ** half_order
|
| 105 |
+
|
| 106 |
+
return jax.tree_util.tree_map(
|
| 107 |
+
lambda g, t: (1 - decay) * orderth_norm(g) + decay * t, updates, moments)
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
@functools.partial(jax.jit, inline=True)
|
| 111 |
+
def bias_correction(moment, decay, count):
|
| 112 |
+
"""Performs bias correction. It becomes a no-op as count goes to infinity."""
|
| 113 |
+
# The conversion to the data type of the moment ensures that bfloat16 remains
|
| 114 |
+
# bfloat16 in the optimizer state. This conversion has to be done after
|
| 115 |
+
# `bias_correction_` is calculated as calculating `decay**count` in low
|
| 116 |
+
# precision can result in it being rounded to 1 and subsequently a
|
| 117 |
+
# "division by zero" error.
|
| 118 |
+
bias_correction_ = 1 - decay**count
|
| 119 |
+
|
| 120 |
+
# Perform division in the original precision.
|
| 121 |
+
return jax.tree_util.tree_map(
|
| 122 |
+
lambda t: t / bias_correction_.astype(t.dtype), moment)
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def _reject_complex(params):
|
| 126 |
+
if any(jnp.iscomplexobj(x) for x in jax.tree_util.tree_leaves(params)):
|
| 127 |
+
raise ValueError('This transformation does not support complex parameters.')
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
class EmaState(NamedTuple):
|
| 131 |
+
"""Holds an exponential moving average of past updates."""
|
| 132 |
+
count: chex.Array # shape=(), dtype=jnp.int32.
|
| 133 |
+
ema: base.Params
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def ema(
|
| 137 |
+
decay: float,
|
| 138 |
+
debias: bool = True,
|
| 139 |
+
accumulator_dtype: Optional[Any] = None
|
| 140 |
+
) -> base.GradientTransformation:
|
| 141 |
+
"""Compute an exponential moving average of past updates.
|
| 142 |
+
|
| 143 |
+
Note: `trace` and `ema` have very similar but distinct updates;
|
| 144 |
+
`ema = decay * ema + (1-decay) * t`, while `trace = decay * trace + t`.
|
| 145 |
+
Both are frequently found in the optimization literature.
|
| 146 |
+
|
| 147 |
+
Args:
|
| 148 |
+
decay: Decay rate for the exponential moving average.
|
| 149 |
+
debias: Whether to debias the transformed gradient.
|
| 150 |
+
accumulator_dtype: Optional `dtype` to used for the accumulator; if `None`
|
| 151 |
+
then the `dtype` is inferred from `params` and `updates`.
|
| 152 |
+
|
| 153 |
+
Returns:
|
| 154 |
+
A `GradientTransformation` object.
|
| 155 |
+
"""
|
| 156 |
+
|
| 157 |
+
accumulator_dtype = utils.canonicalize_dtype(accumulator_dtype)
|
| 158 |
+
|
| 159 |
+
def init_fn(params):
|
| 160 |
+
return EmaState(
|
| 161 |
+
count=jnp.zeros([], jnp.int32),
|
| 162 |
+
ema=jax.tree_util.tree_map(
|
| 163 |
+
lambda t: jnp.zeros_like(t, dtype=accumulator_dtype), params))
|
| 164 |
+
|
| 165 |
+
def update_fn(updates, state, params=None):
|
| 166 |
+
del params
|
| 167 |
+
updates = new_ema = update_moment(updates, state.ema, decay, order=1)
|
| 168 |
+
count_inc = utils.safe_int32_increment(state.count)
|
| 169 |
+
if debias:
|
| 170 |
+
updates = bias_correction(new_ema, decay, count_inc)
|
| 171 |
+
state_ema = utils.cast_tree(new_ema, accumulator_dtype)
|
| 172 |
+
return updates, EmaState(count=count_inc, ema=state_ema)
|
| 173 |
+
|
| 174 |
+
return base.GradientTransformation(init_fn, update_fn)
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
class ScaleByRssState(NamedTuple):
|
| 178 |
+
"""State holding the sum of gradient squares to date."""
|
| 179 |
+
sum_of_squares: base.Updates
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
def scale_by_rss(
|
| 183 |
+
initial_accumulator_value: float = 0.1,
|
| 184 |
+
eps: float = 1e-7
|
| 185 |
+
) -> base.GradientTransformation:
|
| 186 |
+
"""Rescale updates by the root of the sum of all squared gradients to date.
|
| 187 |
+
|
| 188 |
+
References:
|
| 189 |
+
[Duchi et al, 2011](https://jmlr.org/papers/volume12/duchi11a/duchi11a.pdf)
|
| 190 |
+
[McMahan et al., 2010](https://arxiv.org/abs/1002.4908)
|
| 191 |
+
|
| 192 |
+
Args:
|
| 193 |
+
initial_accumulator_value: Starting value for accumulators, must be >= 0.
|
| 194 |
+
eps: A small floating point value to avoid zero denominator.
|
| 195 |
+
|
| 196 |
+
Returns:
|
| 197 |
+
A `GradientTransformation` object.
|
| 198 |
+
"""
|
| 199 |
+
|
| 200 |
+
def init_fn(params):
|
| 201 |
+
sum_of_squares = jax.tree_util.tree_map(
|
| 202 |
+
lambda t: jnp.full_like(t, initial_accumulator_value), params)
|
| 203 |
+
return ScaleByRssState(sum_of_squares=sum_of_squares)
|
| 204 |
+
|
| 205 |
+
def update_fn(updates, state, params=None):
|
| 206 |
+
del params
|
| 207 |
+
sum_of_squares = jax.tree_util.tree_map(
|
| 208 |
+
lambda g, t: _abs_sq(g) + t, updates, state.sum_of_squares)
|
| 209 |
+
inv_sqrt_g_square = jax.tree_util.tree_map(
|
| 210 |
+
lambda t: jnp.where(t > 0, jax.lax.rsqrt(t + eps), 0.0), sum_of_squares)
|
| 211 |
+
updates = jax.tree_util.tree_map(
|
| 212 |
+
lambda scale, g: scale * g, inv_sqrt_g_square, updates)
|
| 213 |
+
return updates, ScaleByRssState(sum_of_squares=sum_of_squares)
|
| 214 |
+
|
| 215 |
+
return base.GradientTransformation(init_fn, update_fn)
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
class ScaleByRmsState(NamedTuple):
|
| 219 |
+
"""State for exponential root mean-squared (RMS)-normalized updates."""
|
| 220 |
+
nu: base.Updates
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
def scale_by_rms(
|
| 224 |
+
decay: float = 0.9,
|
| 225 |
+
eps: float = 1e-8,
|
| 226 |
+
initial_scale: float = 0.
|
| 227 |
+
) -> base.GradientTransformation:
|
| 228 |
+
"""Rescale updates by the root of the exp. moving avg of the square.
|
| 229 |
+
|
| 230 |
+
References:
|
| 231 |
+
[Hinton](www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf)
|
| 232 |
+
|
| 233 |
+
Args:
|
| 234 |
+
decay: Decay rate for the exponentially weighted average of squared grads.
|
| 235 |
+
eps: Term added to the denominator to improve numerical stability.
|
| 236 |
+
initial_scale: Initial value for second moment.
|
| 237 |
+
|
| 238 |
+
Returns:
|
| 239 |
+
A `GradientTransformation` object.
|
| 240 |
+
"""
|
| 241 |
+
|
| 242 |
+
def init_fn(params):
|
| 243 |
+
nu = jax.tree_util.tree_map(
|
| 244 |
+
lambda n: jnp.full_like(n, initial_scale), params) # second moment
|
| 245 |
+
return ScaleByRmsState(nu=nu)
|
| 246 |
+
|
| 247 |
+
def update_fn(updates, state, params=None):
|
| 248 |
+
del params
|
| 249 |
+
nu = update_moment_per_elem_norm(updates, state.nu, decay, 2)
|
| 250 |
+
updates = jax.tree_util.tree_map(
|
| 251 |
+
lambda g, n: g * jax.lax.rsqrt(n + eps), updates, nu)
|
| 252 |
+
return updates, ScaleByRmsState(nu=nu)
|
| 253 |
+
|
| 254 |
+
return base.GradientTransformation(init_fn, update_fn)
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
class ScaleByRStdDevState(NamedTuple):
|
| 258 |
+
"""State for centered exponential moving average of squares of updates."""
|
| 259 |
+
mu: base.Updates
|
| 260 |
+
nu: base.Updates
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
def scale_by_stddev(
|
| 264 |
+
decay: float = 0.9,
|
| 265 |
+
eps: float = 1e-8,
|
| 266 |
+
initial_scale: float = 0.
|
| 267 |
+
) -> base.GradientTransformation:
|
| 268 |
+
"""Rescale updates by the root of the centered exp. moving average of squares.
|
| 269 |
+
|
| 270 |
+
References:
|
| 271 |
+
[Hinton](www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf)
|
| 272 |
+
|
| 273 |
+
Args:
|
| 274 |
+
decay: Decay rate for the exponentially weighted average of squared grads.
|
| 275 |
+
eps: Term added to the denominator to improve numerical stability.
|
| 276 |
+
initial_scale: Initial value for second moment.
|
| 277 |
+
|
| 278 |
+
Returns:
|
| 279 |
+
A `GradientTransformation` object.
|
| 280 |
+
"""
|
| 281 |
+
|
| 282 |
+
def init_fn(params):
|
| 283 |
+
mu = jax.tree_util.tree_map(jnp.zeros_like, params) # First moment
|
| 284 |
+
nu = jax.tree_util.tree_map(
|
| 285 |
+
lambda n: jnp.full_like(n, initial_scale), params) # second moment
|
| 286 |
+
return ScaleByRStdDevState(mu=mu, nu=nu)
|
| 287 |
+
|
| 288 |
+
def update_fn(updates, state, params=None):
|
| 289 |
+
del params
|
| 290 |
+
mu = update_moment(updates, state.mu, decay, 1)
|
| 291 |
+
nu = update_moment_per_elem_norm(updates, state.nu, decay, 2)
|
| 292 |
+
updates = jax.tree_util.tree_map(
|
| 293 |
+
lambda g, m, n: g * jax.lax.rsqrt(n - _abs_sq(m) + eps),
|
| 294 |
+
updates, mu, nu)
|
| 295 |
+
return updates, ScaleByRStdDevState(mu=mu, nu=nu)
|
| 296 |
+
|
| 297 |
+
return base.GradientTransformation(init_fn, update_fn)
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
class ScaleByAdamState(NamedTuple):
|
| 301 |
+
"""State for the Adam algorithm."""
|
| 302 |
+
count: chex.Array # shape=(), dtype=jnp.int32.
|
| 303 |
+
mu: base.Updates
|
| 304 |
+
nu: base.Updates
|
| 305 |
+
|
| 306 |
+
|
| 307 |
+
def scale_by_adam(
|
| 308 |
+
b1: float = 0.9,
|
| 309 |
+
b2: float = 0.999,
|
| 310 |
+
eps: float = 1e-8,
|
| 311 |
+
eps_root: float = 0.0,
|
| 312 |
+
mu_dtype: Optional[Any] = None,
|
| 313 |
+
) -> base.GradientTransformation:
|
| 314 |
+
"""Rescale updates according to the Adam algorithm.
|
| 315 |
+
|
| 316 |
+
References:
|
| 317 |
+
[Kingma et al, 2014](https://arxiv.org/abs/1412.6980)
|
| 318 |
+
|
| 319 |
+
Args:
|
| 320 |
+
b1: Decay rate for the exponentially weighted average of grads.
|
| 321 |
+
b2: Decay rate for the exponentially weighted average of squared grads.
|
| 322 |
+
eps: Term added to the denominator to improve numerical stability.
|
| 323 |
+
eps_root: Term added to the denominator inside the square-root to improve
|
| 324 |
+
numerical stability when backpropagating gradients through the rescaling.
|
| 325 |
+
mu_dtype: Optional `dtype` to be used for the first order accumulator; if
|
| 326 |
+
`None` then the `dtype is inferred from `params` and `updates`.
|
| 327 |
+
|
| 328 |
+
Returns:
|
| 329 |
+
A `GradientTransformation` object.
|
| 330 |
+
"""
|
| 331 |
+
|
| 332 |
+
mu_dtype = utils.canonicalize_dtype(mu_dtype)
|
| 333 |
+
|
| 334 |
+
def init_fn(params):
|
| 335 |
+
mu = jax.tree_util.tree_map( # First moment
|
| 336 |
+
lambda t: jnp.zeros_like(t, dtype=mu_dtype), params)
|
| 337 |
+
nu = jax.tree_util.tree_map(jnp.zeros_like, params) # Second moment
|
| 338 |
+
return ScaleByAdamState(count=jnp.zeros([], jnp.int32), mu=mu, nu=nu)
|
| 339 |
+
|
| 340 |
+
def update_fn(updates, state, params=None):
|
| 341 |
+
del params
|
| 342 |
+
mu = update_moment(updates, state.mu, b1, 1)
|
| 343 |
+
nu = update_moment_per_elem_norm(updates, state.nu, b2, 2)
|
| 344 |
+
count_inc = numerics.safe_int32_increment(state.count)
|
| 345 |
+
mu_hat = bias_correction(mu, b1, count_inc)
|
| 346 |
+
nu_hat = bias_correction(nu, b2, count_inc)
|
| 347 |
+
updates = jax.tree_util.tree_map(
|
| 348 |
+
lambda m, v: m / (jnp.sqrt(v + eps_root) + eps), mu_hat, nu_hat)
|
| 349 |
+
mu = utils.cast_tree(mu, mu_dtype)
|
| 350 |
+
return updates, ScaleByAdamState(count=count_inc, mu=mu, nu=nu)
|
| 351 |
+
|
| 352 |
+
return base.GradientTransformation(init_fn, update_fn)
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
class ScaleByAmsgradState(NamedTuple):
|
| 356 |
+
"""State for the AMSGrad algorithm."""
|
| 357 |
+
count: chex.Array # shape=(), dtype=jnp.int32.
|
| 358 |
+
mu: base.Updates
|
| 359 |
+
nu: base.Updates
|
| 360 |
+
nu_max: base.Updates
|
| 361 |
+
|
| 362 |
+
|
| 363 |
+
def scale_by_amsgrad(
|
| 364 |
+
b1: float = 0.9,
|
| 365 |
+
b2: float = 0.999,
|
| 366 |
+
eps: float = 1e-8,
|
| 367 |
+
eps_root: float = 0.0,
|
| 368 |
+
mu_dtype: Optional[Any] = None,
|
| 369 |
+
) -> base.GradientTransformation:
|
| 370 |
+
"""Rescale updates according to the AMSGrad algorithm.
|
| 371 |
+
|
| 372 |
+
References:
|
| 373 |
+
[Reddi et al, 2018](https://openreview.net/forum?id=ryQu7f-RZ)
|
| 374 |
+
|
| 375 |
+
Args:
|
| 376 |
+
b1: Decay rate for the exponentially weighted average of grads.
|
| 377 |
+
b2: Decay rate for the exponentially weighted average of squared grads.
|
| 378 |
+
eps: Term added to the denominator to improve numerical stability.
|
| 379 |
+
eps_root: Term added to the denominator inside the square-root to improve
|
| 380 |
+
numerical stability when backpropagating gradients through the rescaling.
|
| 381 |
+
mu_dtype: Optional `dtype` to be used for the first order accumulator; if
|
| 382 |
+
`None` then the `dtype is inferred from `params` and `updates`.
|
| 383 |
+
|
| 384 |
+
Returns:
|
| 385 |
+
A `GradientTransformation` object.
|
| 386 |
+
"""
|
| 387 |
+
|
| 388 |
+
mu_dtype = utils.canonicalize_dtype(mu_dtype)
|
| 389 |
+
|
| 390 |
+
def init_fn(params):
|
| 391 |
+
mu = jax.tree_util.tree_map( # First moment
|
| 392 |
+
lambda t: jnp.zeros_like(t, dtype=mu_dtype), params)
|
| 393 |
+
nu = jax.tree_util.tree_map(jnp.zeros_like, params) # Second moment
|
| 394 |
+
nu_max = jax.tree_util.tree_map(jnp.zeros_like, params)
|
| 395 |
+
return ScaleByAmsgradState(count=jnp.zeros([], jnp.int32), mu=mu, nu=nu,
|
| 396 |
+
nu_max=nu_max)
|
| 397 |
+
|
| 398 |
+
def update_fn(updates, state, params=None):
|
| 399 |
+
del params
|
| 400 |
+
mu = update_moment(updates, state.mu, b1, 1)
|
| 401 |
+
nu = update_moment_per_elem_norm(updates, state.nu, b2, 2)
|
| 402 |
+
count_inc = numerics.safe_int32_increment(state.count)
|
| 403 |
+
mu_hat = bias_correction(mu, b1, count_inc)
|
| 404 |
+
nu_hat = bias_correction(nu, b2, count_inc)
|
| 405 |
+
nu_max = jax.tree_util.tree_map(jnp.maximum, state.nu_max, nu_hat)
|
| 406 |
+
updates = jax.tree_util.tree_map(
|
| 407 |
+
lambda m, v: m / (jnp.sqrt(v + eps_root) + eps), mu_hat, nu_max)
|
| 408 |
+
mu = utils.cast_tree(mu, mu_dtype)
|
| 409 |
+
return updates, ScaleByAmsgradState(count=count_inc, mu=mu, nu=nu,
|
| 410 |
+
nu_max=nu_max)
|
| 411 |
+
|
| 412 |
+
return base.GradientTransformation(init_fn, update_fn)
|
| 413 |
+
|
| 414 |
+
|
| 415 |
+
def scale_by_adamax(
|
| 416 |
+
b1: float = 0.9,
|
| 417 |
+
b2: float = 0.999,
|
| 418 |
+
eps: float = 1e-8
|
| 419 |
+
) -> base.GradientTransformation:
|
| 420 |
+
"""Rescale updates according to the Adamax algorithm.
|
| 421 |
+
|
| 422 |
+
References:
|
| 423 |
+
[Kingma et al, 2014](https://arxiv.org/abs/1412.6980)
|
| 424 |
+
|
| 425 |
+
Args:
|
| 426 |
+
b1: Decay rate for the exponentially weighted average of grads.
|
| 427 |
+
b2: Decay rate for the exponentially weighted maximum of grads.
|
| 428 |
+
eps: Term added to the denominator to improve numerical stability.
|
| 429 |
+
|
| 430 |
+
Returns:
|
| 431 |
+
A `GradientTransformation` object.
|
| 432 |
+
"""
|
| 433 |
+
|
| 434 |
+
def init_fn(params):
|
| 435 |
+
mu = jax.tree_util.tree_map(jnp.zeros_like, params) # First moment
|
| 436 |
+
nu = jax.tree_util.tree_map(jnp.zeros_like, params) # Infinite moment
|
| 437 |
+
return ScaleByAdamState(count=jnp.zeros([], jnp.int32), mu=mu, nu=nu)
|
| 438 |
+
|
| 439 |
+
def update_fn(updates, state, params=None):
|
| 440 |
+
del params
|
| 441 |
+
count_inc = numerics.safe_int32_increment(state.count)
|
| 442 |
+
mu = update_moment(updates, state.mu, b1, 1)
|
| 443 |
+
nu = update_infinity_moment(updates, state.nu, b2, eps)
|
| 444 |
+
# Bias correction for mean. No bias correction needed for infinity moment.
|
| 445 |
+
mu_hat = bias_correction(mu, b1, count_inc)
|
| 446 |
+
updates = jax.tree_util.tree_map(lambda m, v: m / v, mu_hat, nu)
|
| 447 |
+
return updates, ScaleByAdamState(count=count_inc, mu=mu, nu=nu)
|
| 448 |
+
|
| 449 |
+
return base.GradientTransformation(init_fn, update_fn)
|
| 450 |
+
|
| 451 |
+
|
| 452 |
+
ScaleState = base.EmptyState
|
| 453 |
+
|
| 454 |
+
|
| 455 |
+
def scale(
|
| 456 |
+
step_size: float
|
| 457 |
+
) -> base.GradientTransformation:
|
| 458 |
+
"""Scale updates by some fixed scalar `step_size`.
|
| 459 |
+
|
| 460 |
+
Args:
|
| 461 |
+
step_size: A scalar corresponding to a fixed scaling factor for updates.
|
| 462 |
+
|
| 463 |
+
Returns:
|
| 464 |
+
A `GradientTransformation` object.
|
| 465 |
+
"""
|
| 466 |
+
|
| 467 |
+
def init_fn(params):
|
| 468 |
+
del params
|
| 469 |
+
return ScaleState()
|
| 470 |
+
|
| 471 |
+
def update_fn(updates, state, params=None):
|
| 472 |
+
del params
|
| 473 |
+
updates = jax.tree_util.tree_map(lambda g: step_size * g, updates)
|
| 474 |
+
return updates, state
|
| 475 |
+
|
| 476 |
+
return base.GradientTransformation(init_fn, update_fn)
|
| 477 |
+
|
| 478 |
+
|
| 479 |
+
def scale_by_param_block_norm(
|
| 480 |
+
min_scale: float = 1e-3
|
| 481 |
+
) -> base.GradientTransformation:
|
| 482 |
+
"""Scale updates for each param block by the norm of that block's parameters.
|
| 483 |
+
|
| 484 |
+
A `block` is here a weight vector (e.g. in a Linear layer) or a weight matrix
|
| 485 |
+
(e.g. in a convolutional layer) appearing as a leaf in the grads/param pytree.
|
| 486 |
+
|
| 487 |
+
Args:
|
| 488 |
+
min_scale: Minimum scaling factor.
|
| 489 |
+
|
| 490 |
+
Returns:
|
| 491 |
+
A `GradientTransformation` object.
|
| 492 |
+
"""
|
| 493 |
+
|
| 494 |
+
def init_fn(params):
|
| 495 |
+
del params
|
| 496 |
+
return base.EmptyState()
|
| 497 |
+
|
| 498 |
+
def update_fn(updates, state, params):
|
| 499 |
+
if params is None:
|
| 500 |
+
raise ValueError(base.NO_PARAMS_MSG)
|
| 501 |
+
updates = jax.tree_util.tree_map(
|
| 502 |
+
lambda u, p: u * numerics.safe_norm(p, min_scale),
|
| 503 |
+
updates, params)
|
| 504 |
+
return updates, state
|
| 505 |
+
|
| 506 |
+
return base.GradientTransformation(init_fn, update_fn)
|
| 507 |
+
|
| 508 |
+
|
| 509 |
+
def scale_by_param_block_rms(
|
| 510 |
+
min_scale: float = 1e-3
|
| 511 |
+
) -> base.GradientTransformation:
|
| 512 |
+
"""Scale updates by rms of the gradient for each param vector or matrix.
|
| 513 |
+
|
| 514 |
+
A `block` is here a weight vector (e.g. in a Linear layer) or a weight matrix
|
| 515 |
+
(e.g. in a convolutional layer) appearing as a leaf in the grads/param pytree.
|
| 516 |
+
|
| 517 |
+
Args:
|
| 518 |
+
min_scale: Minimum scaling factor.
|
| 519 |
+
|
| 520 |
+
Returns:
|
| 521 |
+
A `GradientTransformation` object.
|
| 522 |
+
"""
|
| 523 |
+
|
| 524 |
+
def init_fn(params):
|
| 525 |
+
del params
|
| 526 |
+
return base.EmptyState()
|
| 527 |
+
|
| 528 |
+
def update_fn(updates, state, params):
|
| 529 |
+
if params is None:
|
| 530 |
+
raise ValueError(base.NO_PARAMS_MSG)
|
| 531 |
+
updates = jax.tree_util.tree_map(
|
| 532 |
+
lambda u, p: u * numerics.safe_root_mean_squares(p, min_scale),
|
| 533 |
+
updates, params)
|
| 534 |
+
return updates, state
|
| 535 |
+
|
| 536 |
+
return base.GradientTransformation(init_fn, update_fn)
|
| 537 |
+
|
| 538 |
+
|
| 539 |
+
class ScaleByBeliefState(NamedTuple):
|
| 540 |
+
"""State for the rescaling by AdaBelief algorithm."""
|
| 541 |
+
count: chex.Array # shape=(), dtype=jnp.int32.
|
| 542 |
+
mu: base.Updates
|
| 543 |
+
nu: base.Updates
|
| 544 |
+
|
| 545 |
+
|
| 546 |
+
def scale_by_belief(
|
| 547 |
+
b1: float = 0.9,
|
| 548 |
+
b2: float = 0.999,
|
| 549 |
+
eps: float = 1e-16,
|
| 550 |
+
eps_root: float = 1e-16
|
| 551 |
+
) -> base.GradientTransformation:
|
| 552 |
+
"""Rescale updates according to the AdaBelief algorithm.
|
| 553 |
+
|
| 554 |
+
References:
|
| 555 |
+
[Zhuang et al, 2020](https://arxiv.org/abs/2010.07468)
|
| 556 |
+
|
| 557 |
+
Args:
|
| 558 |
+
b1: Decay rate for the exponentially weighted average of grads.
|
| 559 |
+
b2: Decay rate for the exponentially weighted average of variance of grads.
|
| 560 |
+
eps: Term added to the denominator to improve numerical stability.
|
| 561 |
+
eps_root: Term added to the second moment of the prediction error to
|
| 562 |
+
improve numerical stability. If backpropagating gradients through the
|
| 563 |
+
gradient transformation (e.g. for meta-learning), this must be non-zero.
|
| 564 |
+
|
| 565 |
+
Returns:
|
| 566 |
+
A `GradientTransformation` object.
|
| 567 |
+
"""
|
| 568 |
+
|
| 569 |
+
def init_fn(params):
|
| 570 |
+
mu = jax.tree_util.tree_map(jnp.zeros_like, params) # First moment
|
| 571 |
+
s = jax.tree_util.tree_map(jnp.zeros_like, params) # Second Central moment
|
| 572 |
+
return ScaleByBeliefState(count=jnp.zeros([], jnp.int32), mu=mu, nu=s)
|
| 573 |
+
|
| 574 |
+
def update_fn(updates, state, params=None):
|
| 575 |
+
del params
|
| 576 |
+
mu = update_moment(updates, state.mu, b1, 1)
|
| 577 |
+
prediction_error = jax.tree_util.tree_map(
|
| 578 |
+
lambda g, m: g-m, updates, state.mu)
|
| 579 |
+
nu = update_moment_per_elem_norm(prediction_error, state.nu, b2, 2)
|
| 580 |
+
nu = jax.tree_util.tree_map(lambda v: v + eps_root, nu)
|
| 581 |
+
count_inc = numerics.safe_int32_increment(state.count)
|
| 582 |
+
mu_hat = bias_correction(mu, b1, count_inc)
|
| 583 |
+
nu_hat = bias_correction(nu, b2, count_inc)
|
| 584 |
+
updates = jax.tree_util.tree_map(
|
| 585 |
+
lambda m, v: m / (jnp.sqrt(v) + eps), mu_hat, nu_hat)
|
| 586 |
+
return updates, ScaleByBeliefState(count=count_inc, mu=mu, nu=nu)
|
| 587 |
+
|
| 588 |
+
return base.GradientTransformation(init_fn, update_fn)
|
| 589 |
+
|
| 590 |
+
|
| 591 |
+
def scale_by_yogi(
|
| 592 |
+
b1: float = 0.9,
|
| 593 |
+
b2: float = 0.999,
|
| 594 |
+
eps: float = 1e-3,
|
| 595 |
+
eps_root: float = 0.0,
|
| 596 |
+
initial_accumulator_value: float = 1e-6
|
| 597 |
+
) -> base.GradientTransformation:
|
| 598 |
+
"""Rescale updates according to the Yogi algorithm.
|
| 599 |
+
|
| 600 |
+
Supports complex numbers, see
|
| 601 |
+
https://gist.github.com/wdphy16/118aef6fb5f82c49790d7678cf87da29
|
| 602 |
+
|
| 603 |
+
References:
|
| 604 |
+
[Zaheer et al, 2018](https://papers.nips.cc/paper/2018/hash/90365351ccc7437a1309dc64e4db32a3-Abstract.html) #pylint:disable=line-too-long
|
| 605 |
+
|
| 606 |
+
Args:
|
| 607 |
+
b1: Decay rate for the exponentially weighted average of grads.
|
| 608 |
+
b2: Decay rate for the exponentially weighted average of variance of grads.
|
| 609 |
+
eps: Term added to the denominator to improve numerical stability.
|
| 610 |
+
eps_root: Term added to the denominator inside the square-root to improve
|
| 611 |
+
numerical stability when backpropagating gradients through the rescaling.
|
| 612 |
+
initial_accumulator_value: The starting value for accumulators.
|
| 613 |
+
Only positive values are allowed.
|
| 614 |
+
|
| 615 |
+
Returns:
|
| 616 |
+
A `GradientTransformation` object.
|
| 617 |
+
"""
|
| 618 |
+
|
| 619 |
+
def init_fn(params):
|
| 620 |
+
value_like = lambda p: jnp.full_like(p, initial_accumulator_value)
|
| 621 |
+
mu = jax.tree_util.tree_map(value_like, params) # First moment
|
| 622 |
+
nu = jax.tree_util.tree_map(value_like, params) # Second Central moment
|
| 623 |
+
return ScaleByAdamState(count=jnp.zeros([], jnp.int32), mu=mu, nu=nu)
|
| 624 |
+
|
| 625 |
+
def update_fn(updates, state, params=None):
|
| 626 |
+
del params
|
| 627 |
+
mu = update_moment(updates, state.mu, b1, 1)
|
| 628 |
+
nu = jax.tree_util.tree_map(
|
| 629 |
+
lambda g, v: v - (1 - b2) * jnp.sign(v - _abs_sq(g)) * _abs_sq(g),
|
| 630 |
+
updates, state.nu)
|
| 631 |
+
count_inc = numerics.safe_int32_increment(state.count)
|
| 632 |
+
mu_hat = bias_correction(mu, b1, count_inc)
|
| 633 |
+
nu_hat = bias_correction(nu, b2, count_inc)
|
| 634 |
+
updates = jax.tree_util.tree_map(
|
| 635 |
+
lambda m, v: m / (jnp.sqrt(v + eps_root) + eps), mu_hat, nu_hat)
|
| 636 |
+
return updates, ScaleByAdamState(count=count_inc, mu=mu, nu=nu)
|
| 637 |
+
|
| 638 |
+
return base.GradientTransformation(init_fn, update_fn)
|
| 639 |
+
|
| 640 |
+
|
| 641 |
+
def scale_by_radam(
|
| 642 |
+
b1: float = 0.9,
|
| 643 |
+
b2: float = 0.999,
|
| 644 |
+
eps: float = 1e-8,
|
| 645 |
+
eps_root: float = 0.0,
|
| 646 |
+
threshold: float = 5.0
|
| 647 |
+
) -> base.GradientTransformation:
|
| 648 |
+
"""Rescale updates according to the Rectified Adam algorithm.
|
| 649 |
+
|
| 650 |
+
References:
|
| 651 |
+
[Liu et al, 2020](https://arxiv.org/abs/1908.03265)
|
| 652 |
+
|
| 653 |
+
Args:
|
| 654 |
+
b1: Decay rate for the exponentially weighted average of grads.
|
| 655 |
+
b2: Decay rate for the exponentially weighted average of squared grads.
|
| 656 |
+
eps: Term added to the denominator to improve numerical stability.
|
| 657 |
+
eps_root: Term added to the denominator inside the square-root to improve
|
| 658 |
+
numerical stability when backpropagating gradients through the rescaling.
|
| 659 |
+
threshold: Threshold for variance tractability.
|
| 660 |
+
|
| 661 |
+
Returns:
|
| 662 |
+
A `GradientTransformation` object.
|
| 663 |
+
"""
|
| 664 |
+
|
| 665 |
+
ro_inf = 2./(1 - b2) - 1
|
| 666 |
+
def _radam_update(params):
|
| 667 |
+
ro = params[0]
|
| 668 |
+
mu_hat = params[1]
|
| 669 |
+
nu_hat = params[2]
|
| 670 |
+
r = jnp.sqrt((ro - 4)*(ro - 2)*ro_inf/((ro_inf - 4)*(ro_inf - 2)*ro))
|
| 671 |
+
updates = jax.tree_util.tree_map(
|
| 672 |
+
lambda m, v: r*m / (jnp.sqrt(v + eps_root) + eps), mu_hat, nu_hat)
|
| 673 |
+
return updates
|
| 674 |
+
|
| 675 |
+
def init_fn(params):
|
| 676 |
+
mu = jax.tree_util.tree_map(jnp.zeros_like, params) # First moment
|
| 677 |
+
nu = jax.tree_util.tree_map(jnp.zeros_like, params) # Second moment
|
| 678 |
+
return ScaleByAdamState(count=jnp.zeros([], jnp.int32), mu=mu, nu=nu)
|
| 679 |
+
|
| 680 |
+
def update_fn(updates, state, params=None):
|
| 681 |
+
del params
|
| 682 |
+
mu = update_moment(updates, state.mu, b1, 1)
|
| 683 |
+
nu = update_moment_per_elem_norm(updates, state.nu, b2, 2)
|
| 684 |
+
count_inc = numerics.safe_int32_increment(state.count)
|
| 685 |
+
b2t = b2**count_inc
|
| 686 |
+
ro = ro_inf - 2 * count_inc * b2t / (1 - b2t)
|
| 687 |
+
mu_hat = bias_correction(mu, b1, count_inc)
|
| 688 |
+
nu_hat = bias_correction(nu, b2, count_inc)
|
| 689 |
+
updates = jax.lax.cond(
|
| 690 |
+
ro >= threshold, _radam_update, lambda _: mu_hat,
|
| 691 |
+
(ro, mu_hat, nu_hat))
|
| 692 |
+
return updates, ScaleByAdamState(count=count_inc, mu=mu, nu=nu)
|
| 693 |
+
|
| 694 |
+
return base.GradientTransformation(init_fn, update_fn)
|
| 695 |
+
|
| 696 |
+
|
| 697 |
+
AddDecayedWeightsState = base.EmptyState
|
| 698 |
+
|
| 699 |
+
|
| 700 |
+
def add_decayed_weights(
|
| 701 |
+
weight_decay: float = 0.0,
|
| 702 |
+
mask: Optional[Union[Any, Callable[[base.Params], Any]]] = None
|
| 703 |
+
) -> base.GradientTransformation:
|
| 704 |
+
"""Add parameter scaled by `weight_decay`.
|
| 705 |
+
|
| 706 |
+
Args:
|
| 707 |
+
weight_decay: A scalar weight decay rate.
|
| 708 |
+
mask: A tree with same structure as (or a prefix of) the params PyTree,
|
| 709 |
+
or a Callable that returns such a pytree given the params/updates.
|
| 710 |
+
The leaves should be booleans, `True` for leaves/subtrees you want to
|
| 711 |
+
apply the transformation to, and `False` for those you want to skip.
|
| 712 |
+
|
| 713 |
+
Returns:
|
| 714 |
+
A `GradientTransformation` object.
|
| 715 |
+
"""
|
| 716 |
+
|
| 717 |
+
def init_fn(params):
|
| 718 |
+
del params
|
| 719 |
+
return AddDecayedWeightsState()
|
| 720 |
+
|
| 721 |
+
def update_fn(updates, state, params):
|
| 722 |
+
if params is None:
|
| 723 |
+
raise ValueError(base.NO_PARAMS_MSG)
|
| 724 |
+
updates = jax.tree_util.tree_map(
|
| 725 |
+
lambda g, p: g + weight_decay * p, updates, params)
|
| 726 |
+
return updates, state
|
| 727 |
+
|
| 728 |
+
# If mask is not `None`, apply mask to the gradient transformation.
|
| 729 |
+
# E.g. it is common to skip weight decay on bias units and batch stats.
|
| 730 |
+
if mask is not None:
|
| 731 |
+
return wrappers.masked(
|
| 732 |
+
base.GradientTransformation(init_fn, update_fn), mask)
|
| 733 |
+
return base.GradientTransformation(init_fn, update_fn)
|
| 734 |
+
|
| 735 |
+
|
| 736 |
+
class ScaleByScheduleState(NamedTuple):
|
| 737 |
+
"""Maintains count for scale scheduling."""
|
| 738 |
+
count: chex.Array # shape=(), dtype=jnp.int32
|
| 739 |
+
|
| 740 |
+
|
| 741 |
+
def scale_by_schedule(
|
| 742 |
+
step_size_fn: base.Schedule
|
| 743 |
+
) -> base.GradientTransformation:
|
| 744 |
+
"""Scale updates using a custom schedule for the `step_size`.
|
| 745 |
+
|
| 746 |
+
Args:
|
| 747 |
+
step_size_fn: A function that takes an update count as input and proposes
|
| 748 |
+
the step_size to multiply the updates by.
|
| 749 |
+
|
| 750 |
+
Returns:
|
| 751 |
+
A `GradientTransformation` object.
|
| 752 |
+
"""
|
| 753 |
+
|
| 754 |
+
def init_fn(params):
|
| 755 |
+
del params
|
| 756 |
+
return ScaleByScheduleState(count=jnp.zeros([], jnp.int32))
|
| 757 |
+
|
| 758 |
+
def update_fn(updates, state, params=None):
|
| 759 |
+
del params
|
| 760 |
+
step_size = step_size_fn(state.count)
|
| 761 |
+
updates = jax.tree_util.tree_map(
|
| 762 |
+
lambda g: jnp.array(step_size, dtype=g.dtype) * g, updates)
|
| 763 |
+
return updates, ScaleByScheduleState(
|
| 764 |
+
count=numerics.safe_int32_increment(state.count))
|
| 765 |
+
|
| 766 |
+
return base.GradientTransformation(init_fn, update_fn)
|
| 767 |
+
|
| 768 |
+
|
| 769 |
+
class ScaleByFromageState(NamedTuple):
|
| 770 |
+
"""Maintains count for step-size scheduling."""
|
| 771 |
+
count: chex.Array # shape=(), dtype=jnp.int32
|
| 772 |
+
|
| 773 |
+
|
| 774 |
+
class ScaleByTrustRatioState(NamedTuple):
|
| 775 |
+
"""The scale and decay trust ratio transformation is stateless."""
|
| 776 |
+
|
| 777 |
+
|
| 778 |
+
def scale_by_trust_ratio(
|
| 779 |
+
min_norm: float = 0.0,
|
| 780 |
+
trust_coefficient: float = 1.,
|
| 781 |
+
eps: float = 0.,
|
| 782 |
+
) -> base.GradientTransformation:
|
| 783 |
+
"""Scale updates by trust ratio`.
|
| 784 |
+
|
| 785 |
+
References:
|
| 786 |
+
[You et. al 2020](https://arxiv.org/abs/1904.00962)
|
| 787 |
+
|
| 788 |
+
Args:
|
| 789 |
+
min_norm: Minimum norm for params and gradient norms; by default is zero.
|
| 790 |
+
trust_coefficient: A multiplier for the trust ratio.
|
| 791 |
+
eps: Additive constant added to the denominator for numerical stability.
|
| 792 |
+
|
| 793 |
+
Returns:
|
| 794 |
+
A `GradientTransformation` object.
|
| 795 |
+
"""
|
| 796 |
+
|
| 797 |
+
def init_fn(params):
|
| 798 |
+
del params
|
| 799 |
+
return ScaleByTrustRatioState()
|
| 800 |
+
|
| 801 |
+
def update_fn(updates, state, params):
|
| 802 |
+
if params is None:
|
| 803 |
+
raise ValueError(base.NO_PARAMS_MSG)
|
| 804 |
+
|
| 805 |
+
def _scale_update(update, param):
|
| 806 |
+
|
| 807 |
+
# Clip norms to minimum value, by default no clipping.
|
| 808 |
+
param_norm = numerics.safe_norm(param, min_norm)
|
| 809 |
+
update_norm = numerics.safe_norm(update, min_norm)
|
| 810 |
+
trust_ratio = trust_coefficient * param_norm / (update_norm + eps)
|
| 811 |
+
|
| 812 |
+
# If no minimum norm clipping is used
|
| 813 |
+
# Set trust_ratio to 1 in case where parameters would never be updated.
|
| 814 |
+
zero_norm = jnp.logical_or(param_norm == 0., update_norm == 0.)
|
| 815 |
+
safe_trust_ratio = jnp.where(
|
| 816 |
+
zero_norm, jnp.array(1.0, dtype=param.dtype), trust_ratio)
|
| 817 |
+
|
| 818 |
+
return update * safe_trust_ratio
|
| 819 |
+
|
| 820 |
+
updates = jax.tree_util.tree_map(_scale_update, updates, params)
|
| 821 |
+
return updates, state
|
| 822 |
+
|
| 823 |
+
return base.GradientTransformation(init_fn, update_fn)
|
| 824 |
+
|
| 825 |
+
|
| 826 |
+
class AddNoiseState(NamedTuple):
|
| 827 |
+
"""State for adding gradient noise. Contains a count for annealing."""
|
| 828 |
+
count: chex.Array
|
| 829 |
+
rng_key: chex.PRNGKey
|
| 830 |
+
|
| 831 |
+
|
| 832 |
+
def add_noise(
|
| 833 |
+
eta: float,
|
| 834 |
+
gamma: float,
|
| 835 |
+
seed: int
|
| 836 |
+
) -> base.GradientTransformation:
|
| 837 |
+
"""Add gradient noise.
|
| 838 |
+
|
| 839 |
+
References:
|
| 840 |
+
[Neelakantan et al, 2014](https://arxiv.org/abs/1511.06807)
|
| 841 |
+
|
| 842 |
+
Args:
|
| 843 |
+
eta: Base variance of the gaussian noise added to the gradient.
|
| 844 |
+
gamma: Decay exponent for annealing of the variance.
|
| 845 |
+
seed: Seed for random number generation.
|
| 846 |
+
|
| 847 |
+
Returns:
|
| 848 |
+
A `GradientTransformation` object.
|
| 849 |
+
"""
|
| 850 |
+
|
| 851 |
+
def init_fn(params):
|
| 852 |
+
del params
|
| 853 |
+
return AddNoiseState(
|
| 854 |
+
count=jnp.zeros([], jnp.int32), rng_key=jax.random.PRNGKey(seed))
|
| 855 |
+
|
| 856 |
+
def update_fn(updates, state, params=None): # pylint: disable=missing-docstring
|
| 857 |
+
del params
|
| 858 |
+
num_vars = len(jax.tree_util.tree_leaves(updates))
|
| 859 |
+
treedef = jax.tree_util.tree_structure(updates)
|
| 860 |
+
count_inc = numerics.safe_int32_increment(state.count)
|
| 861 |
+
variance = eta / count_inc**gamma
|
| 862 |
+
standard_deviation = jnp.sqrt(variance)
|
| 863 |
+
all_keys = jax.random.split(state.rng_key, num=num_vars + 1)
|
| 864 |
+
noise = jax.tree_util.tree_map(
|
| 865 |
+
lambda g, k: jax.random.normal(k, shape=g.shape, dtype=g.dtype),
|
| 866 |
+
updates, jax.tree_util.tree_unflatten(treedef, all_keys[1:]))
|
| 867 |
+
updates = jax.tree_util.tree_map(
|
| 868 |
+
lambda g, n: g + standard_deviation.astype(g.dtype) * n,
|
| 869 |
+
updates, noise)
|
| 870 |
+
return updates, AddNoiseState(count=count_inc, rng_key=all_keys[0])
|
| 871 |
+
|
| 872 |
+
return base.GradientTransformation(init_fn, update_fn)
|
| 873 |
+
|
| 874 |
+
|
| 875 |
+
class ApplyEvery(NamedTuple):
|
| 876 |
+
"""Contains a counter and a gradient accumulator."""
|
| 877 |
+
count: chex.Array
|
| 878 |
+
grad_acc: base.Updates
|
| 879 |
+
|
| 880 |
+
|
| 881 |
+
def apply_every(
|
| 882 |
+
k: int = 1
|
| 883 |
+
) -> base.GradientTransformation:
|
| 884 |
+
"""Accumulate gradients and apply them every k steps.
|
| 885 |
+
|
| 886 |
+
Note that if this transformation is part of a chain, the states of the other
|
| 887 |
+
transformations will still be updated at every step. In particular, using
|
| 888 |
+
`apply_every` with a batch size of N/2 and k=2 is not necessarily equivalent
|
| 889 |
+
to not using `apply_every` with a batch size of N. If this equivalence is
|
| 890 |
+
important for you, consider using the `optax.MultiSteps`.
|
| 891 |
+
|
| 892 |
+
Args:
|
| 893 |
+
k: Emit non-zero gradients every k steps, otherwise accumulate them.
|
| 894 |
+
|
| 895 |
+
Returns:
|
| 896 |
+
A `GradientTransformation` object.
|
| 897 |
+
"""
|
| 898 |
+
|
| 899 |
+
def init_fn(params):
|
| 900 |
+
grad_acc = jax.tree_util.tree_map(jnp.zeros_like, params)
|
| 901 |
+
return ApplyEvery(count=jnp.zeros([], jnp.int32), grad_acc=grad_acc)
|
| 902 |
+
|
| 903 |
+
def update_fn(updates, state, params=None):
|
| 904 |
+
del params
|
| 905 |
+
c = state.count % k
|
| 906 |
+
acc = c != 0
|
| 907 |
+
grad_acc = jax.tree_util.tree_map(
|
| 908 |
+
lambda g, ga: acc * ga + g, updates, state.grad_acc)
|
| 909 |
+
emit = c == (k - 1)
|
| 910 |
+
updates = jax.tree_util.tree_map(lambda ga: emit * ga, grad_acc)
|
| 911 |
+
count_inc = numerics.safe_int32_increment(state.count)
|
| 912 |
+
return updates, ApplyEvery(count=count_inc % k, grad_acc=grad_acc)
|
| 913 |
+
|
| 914 |
+
return base.GradientTransformation(init_fn, update_fn)
|
| 915 |
+
|
| 916 |
+
|
| 917 |
+
def _subtract_mean(g):
|
| 918 |
+
if len(g.shape) > 1:
|
| 919 |
+
return g - g.mean(tuple(range(1, len(g.shape))), keepdims=True)
|
| 920 |
+
else:
|
| 921 |
+
return g
|
| 922 |
+
|
| 923 |
+
|
| 924 |
+
CentralState = base.EmptyState
|
| 925 |
+
|
| 926 |
+
|
| 927 |
+
def centralize() -> base.GradientTransformation:
|
| 928 |
+
"""Centralize gradients.
|
| 929 |
+
|
| 930 |
+
References:
|
| 931 |
+
[Yong et al, 2020](https://arxiv.org/abs/2004.01461)
|
| 932 |
+
|
| 933 |
+
Returns:
|
| 934 |
+
A `GradientTransformation` object.
|
| 935 |
+
"""
|
| 936 |
+
|
| 937 |
+
def init_fn(params):
|
| 938 |
+
del params
|
| 939 |
+
return CentralState()
|
| 940 |
+
|
| 941 |
+
def update_fn(updates, state, params=None):
|
| 942 |
+
del params
|
| 943 |
+
updates = jax.tree_util.tree_map(_subtract_mean, updates)
|
| 944 |
+
return updates, state
|
| 945 |
+
|
| 946 |
+
return base.GradientTransformation(init_fn, update_fn)
|
| 947 |
+
|
| 948 |
+
|
| 949 |
+
class ScaleBySM3State(NamedTuple):
|
| 950 |
+
"""State for the SM3 algorithm."""
|
| 951 |
+
mu: base.Updates
|
| 952 |
+
nu: base.Updates
|
| 953 |
+
|
| 954 |
+
|
| 955 |
+
def scale_by_sm3(
|
| 956 |
+
b1: float = 0.9,
|
| 957 |
+
b2: float = 1.0,
|
| 958 |
+
eps: float = 1e-8
|
| 959 |
+
) -> base.GradientTransformation:
|
| 960 |
+
"""Scale updates by sm3`.
|
| 961 |
+
|
| 962 |
+
References:
|
| 963 |
+
[Anil et. al 2019](https://arxiv.org/abs/1901.11150)
|
| 964 |
+
|
| 965 |
+
Args:
|
| 966 |
+
b1: Decay rate for the exponentially weighted average of grads.
|
| 967 |
+
b2: Decay rate for the exponentially weighted average of squared grads.
|
| 968 |
+
eps: Term added to the denominator to improve numerical stability.
|
| 969 |
+
|
| 970 |
+
Returns:
|
| 971 |
+
A `GradientTransformation` object.
|
| 972 |
+
"""
|
| 973 |
+
|
| 974 |
+
def zeros_for_dim(p):
|
| 975 |
+
return [jnp.zeros([s]) for s in p.shape]
|
| 976 |
+
|
| 977 |
+
def init_fn(params):
|
| 978 |
+
_reject_complex(params)
|
| 979 |
+
mu = jax.tree_util.tree_map(zeros_for_dim, params)
|
| 980 |
+
nu = jax.tree_util.tree_map(jnp.zeros_like, params)
|
| 981 |
+
return ScaleBySM3State(mu, nu)
|
| 982 |
+
|
| 983 |
+
def _expanded_shape(shape, axis):
|
| 984 |
+
# Replaces a `shape` of [M, N, K] with 1 in all dimensions except for i.
|
| 985 |
+
# For eg: i = 1 returns [1, N, 1].
|
| 986 |
+
rank = len(shape)
|
| 987 |
+
return [1] * axis + [shape[axis]] + [1] * (rank - axis - 1)
|
| 988 |
+
|
| 989 |
+
def _new_accum(g, v):
|
| 990 |
+
coeffs = ((1.0 - b2) if b2 != 1.0 else 1.0, b2)
|
| 991 |
+
if g.ndim < 2:
|
| 992 |
+
return coeffs[0]*g**2 + coeffs[1]*v[0]
|
| 993 |
+
else:
|
| 994 |
+
return coeffs[0]*g**2 + coeffs[1]*functools.reduce(jnp.minimum, v)
|
| 995 |
+
|
| 996 |
+
def _new_mu(g, i):
|
| 997 |
+
if g.ndim < 2:
|
| 998 |
+
return g
|
| 999 |
+
else:
|
| 1000 |
+
return jnp.max(g, axis=other_axes(i, g.ndim))
|
| 1001 |
+
|
| 1002 |
+
def other_axes(idx, ndim):
|
| 1003 |
+
return list(range(idx)) + list(range(idx+1, ndim))
|
| 1004 |
+
|
| 1005 |
+
def update_fn(updates, state, params=None):
|
| 1006 |
+
del params
|
| 1007 |
+
mu = jax.tree_util.tree_map(
|
| 1008 |
+
lambda g, v: # pylint:disable=g-long-lambda
|
| 1009 |
+
[jnp.reshape(v[i], _expanded_shape(g.shape, i)) for i in range(g.ndim)],
|
| 1010 |
+
updates, state.mu)
|
| 1011 |
+
accum = jax.tree_util.tree_map(_new_accum, updates, mu)
|
| 1012 |
+
accum_inv_sqrt = jax.tree_util.tree_map(
|
| 1013 |
+
lambda t: jnp.where(t > 0, jax.lax.rsqrt(t + eps), 0.0), accum)
|
| 1014 |
+
up = jax.tree_util.tree_map(lambda g, a: g*a, updates, accum_inv_sqrt)
|
| 1015 |
+
nu = update_moment(up, state.nu, b1, 1)
|
| 1016 |
+
mu = jax.tree_util.tree_map(
|
| 1017 |
+
lambda g: [_new_mu(g, i) for i in range(g.ndim)], accum)
|
| 1018 |
+
|
| 1019 |
+
return nu, ScaleBySM3State(mu=mu, nu=nu)
|
| 1020 |
+
|
| 1021 |
+
return base.GradientTransformation(init_fn, update_fn)
|
| 1022 |
+
|
| 1023 |
+
|
| 1024 |
+
class ScaleByNovogradState(NamedTuple):
|
| 1025 |
+
"""State for Novograd."""
|
| 1026 |
+
count: chex.Array
|
| 1027 |
+
mu: base.Updates
|
| 1028 |
+
nu: base.Updates
|
| 1029 |
+
|
| 1030 |
+
|
| 1031 |
+
def scale_by_novograd(
|
| 1032 |
+
b1: float = 0.9,
|
| 1033 |
+
b2: float = 0.25,
|
| 1034 |
+
eps: float = 1e-8,
|
| 1035 |
+
eps_root: float = 0.0,
|
| 1036 |
+
weight_decay: float = 0.0,
|
| 1037 |
+
mu_dtype: Optional[Any] = None,
|
| 1038 |
+
) -> base.GradientTransformation:
|
| 1039 |
+
"""Computes NovoGrad updates.
|
| 1040 |
+
|
| 1041 |
+
References:
|
| 1042 |
+
[Ginsburg et al, 2019](https://arxiv.org/abs/1905.11286)
|
| 1043 |
+
|
| 1044 |
+
Args:
|
| 1045 |
+
b1: A decay rate for the exponentially weighted average of grads.
|
| 1046 |
+
b2: A decay rate for the exponentially weighted average of squared grads.
|
| 1047 |
+
eps: A term added to the denominator to improve numerical stability.
|
| 1048 |
+
eps_root: A term added to the denominator inside the square-root to improve
|
| 1049 |
+
numerical stability when backpropagating gradients through the rescaling.
|
| 1050 |
+
weight_decay: A scalar weight decay rate.
|
| 1051 |
+
mu_dtype: An optional `dtype` to be used for the first order accumulator; if
|
| 1052 |
+
`None` then the `dtype is inferred from `params` and `updates`.
|
| 1053 |
+
|
| 1054 |
+
Returns:
|
| 1055 |
+
The corresponding `GradientTransformation`.
|
| 1056 |
+
"""
|
| 1057 |
+
|
| 1058 |
+
mu_dtype = utils.canonicalize_dtype(mu_dtype)
|
| 1059 |
+
|
| 1060 |
+
def init_fn(params):
|
| 1061 |
+
mu = jax.tree_util.tree_map( # First moment
|
| 1062 |
+
lambda t: jnp.zeros_like(t, dtype=mu_dtype), params)
|
| 1063 |
+
nu = jax.tree_util.tree_map(lambda _: 0.0, params) # Second moment
|
| 1064 |
+
return ScaleByNovogradState(count=jnp.zeros([], jnp.int32), mu=mu, nu=nu)
|
| 1065 |
+
|
| 1066 |
+
def nu_addition(grads):
|
| 1067 |
+
return jnp.linalg.norm(grads)**2
|
| 1068 |
+
|
| 1069 |
+
def mu_addition(grads, params, nu):
|
| 1070 |
+
return grads / (jnp.sqrt(nu + eps_root) + eps) + weight_decay * params
|
| 1071 |
+
|
| 1072 |
+
def init_nu(grads, nu):
|
| 1073 |
+
del nu
|
| 1074 |
+
return jax.tree_util.tree_map(nu_addition, grads)
|
| 1075 |
+
|
| 1076 |
+
def update_nu(grads, nu):
|
| 1077 |
+
updates = jax.tree_util.tree_map(nu_addition, grads)
|
| 1078 |
+
return update_moment(updates, nu, b2, 1)
|
| 1079 |
+
|
| 1080 |
+
def init_mu(grads, params, mu, nu):
|
| 1081 |
+
del mu
|
| 1082 |
+
return jax.tree_util.tree_map(mu_addition, grads, params, nu)
|
| 1083 |
+
|
| 1084 |
+
def update_mu(grads, params, mu, nu):
|
| 1085 |
+
updates = jax.tree_util.tree_map(mu_addition, grads, params, nu)
|
| 1086 |
+
return jax.tree_util.tree_map(lambda m, u: b1 * m + u, mu, updates)
|
| 1087 |
+
|
| 1088 |
+
# Second moment
|
| 1089 |
+
def update_fn(updates, state, params):
|
| 1090 |
+
count_inc = numerics.safe_int32_increment(state.count)
|
| 1091 |
+
|
| 1092 |
+
nu = jax.lax.cond(count_inc == 1, init_nu, update_nu, updates, state.nu)
|
| 1093 |
+
|
| 1094 |
+
mu = jax.lax.cond(count_inc == 1, init_mu, update_mu, updates, params,
|
| 1095 |
+
state.mu, nu)
|
| 1096 |
+
|
| 1097 |
+
mu = utils.cast_tree(mu, mu_dtype)
|
| 1098 |
+
updates = mu
|
| 1099 |
+
return updates, ScaleByNovogradState(count=count_inc, mu=mu, nu=nu)
|
| 1100 |
+
|
| 1101 |
+
return base.GradientTransformation(init_fn, update_fn)
|
| 1102 |
+
|
| 1103 |
+
|
| 1104 |
+
def scale_by_optimistic_gradient(alpha: float = 1.0,
|
| 1105 |
+
beta: float = 1.0
|
| 1106 |
+
) -> base.GradientTransformation:
|
| 1107 |
+
"""Compute generalized optimistic gradients.
|
| 1108 |
+
|
| 1109 |
+
References:
|
| 1110 |
+
[Mokhtari et al, 2019](https://arxiv.org/abs/1901.08511v2)
|
| 1111 |
+
|
| 1112 |
+
Args:
|
| 1113 |
+
alpha: Coefficient for generalized optimistic gradient descent.
|
| 1114 |
+
beta: Coefficient for negative momentum.
|
| 1115 |
+
|
| 1116 |
+
Returns:
|
| 1117 |
+
A `GradientTransformation` object.
|
| 1118 |
+
"""
|
| 1119 |
+
|
| 1120 |
+
def init_fn(params):
|
| 1121 |
+
prev_grads = jax.tree_util.tree_map(jnp.zeros_like, params)
|
| 1122 |
+
return TraceState(trace=prev_grads)
|
| 1123 |
+
|
| 1124 |
+
def update_fn(updates, state, params=None):
|
| 1125 |
+
del params
|
| 1126 |
+
|
| 1127 |
+
new_updates = jax.tree_util.tree_map(
|
| 1128 |
+
lambda grad_t, grad_tm1: (alpha + beta) * grad_t - beta * grad_tm1,
|
| 1129 |
+
updates, state.trace)
|
| 1130 |
+
return new_updates, TraceState(trace=updates)
|
| 1131 |
+
|
| 1132 |
+
return base.GradientTransformation(init_fn, update_fn)
|
| 1133 |
+
|
| 1134 |
+
|
| 1135 |
+
# TODO(b/183800387): remove legacy aliases.
|
| 1136 |
+
# These legacy aliases are here for checkpoint compatibility
|
| 1137 |
+
# To be removed once checkpoints have updated.
|
| 1138 |
+
_safe_int32_increment = numerics.safe_int32_increment
|
| 1139 |
+
safe_int32_increment = numerics.safe_int32_increment
|
| 1140 |
+
AdditiveWeightDecayState = AddDecayedWeightsState
|
| 1141 |
+
additive_weight_decay = add_decayed_weights
|
| 1142 |
+
ClipState = clipping.ClipState
|
| 1143 |
+
ClipByGlobalNormState = clipping.ClipByGlobalNormState
|
lib/python3.10/site-packages/optax/_src/transform_test.py
ADDED
|
@@ -0,0 +1,305 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
"""Tests for `transform.py`."""
|
| 18 |
+
|
| 19 |
+
from absl.testing import absltest
|
| 20 |
+
from absl.testing import parameterized
|
| 21 |
+
|
| 22 |
+
import chex
|
| 23 |
+
import jax
|
| 24 |
+
import jax.numpy as jnp
|
| 25 |
+
import numpy as np
|
| 26 |
+
|
| 27 |
+
from optax._src import alias
|
| 28 |
+
from optax._src import combine
|
| 29 |
+
from optax._src import transform
|
| 30 |
+
from optax._src import update
|
| 31 |
+
|
| 32 |
+
STEPS = 50
|
| 33 |
+
LR = 1e-2
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class TransformTest(parameterized.TestCase):
|
| 37 |
+
|
| 38 |
+
def setUp(self):
|
| 39 |
+
super().setUp()
|
| 40 |
+
self.init_params = (jnp.array([1., 2.]), jnp.array([3., 4.]))
|
| 41 |
+
self.per_step_updates = (jnp.array([500., 5.]), jnp.array([300., 3.]))
|
| 42 |
+
|
| 43 |
+
@chex.all_variants
|
| 44 |
+
@parameterized.named_parameters([
|
| 45 |
+
('adam', transform.scale_by_adam),
|
| 46 |
+
('adamax', transform.scale_by_adamax),
|
| 47 |
+
('rmsprop', transform.scale_by_rms),
|
| 48 |
+
('stddev', transform.scale_by_stddev),
|
| 49 |
+
('trust_ratio', transform.scale_by_trust_ratio),
|
| 50 |
+
('param_block_norm', transform.scale_by_param_block_norm),
|
| 51 |
+
('param_block_rms', transform.scale_by_param_block_rms),
|
| 52 |
+
])
|
| 53 |
+
def test_scalers(self, scaler_constr):
|
| 54 |
+
params = self.init_params
|
| 55 |
+
|
| 56 |
+
scaler = scaler_constr()
|
| 57 |
+
init_fn = self.variant(scaler.init)
|
| 58 |
+
transform_fn = self.variant(scaler.update)
|
| 59 |
+
|
| 60 |
+
state = init_fn(params)
|
| 61 |
+
chex.assert_tree_all_finite(state)
|
| 62 |
+
|
| 63 |
+
updates, state = transform_fn(self.per_step_updates, state, params)
|
| 64 |
+
chex.assert_tree_all_finite((params, updates, state))
|
| 65 |
+
jax.tree_util.tree_map(
|
| 66 |
+
lambda *args: chex.assert_equal_shape(args), params, updates)
|
| 67 |
+
|
| 68 |
+
@chex.all_variants
|
| 69 |
+
def test_add_decayed_weights(self):
|
| 70 |
+
# Define a transform that add decayed weights.
|
| 71 |
+
# We can define a mask either as a pytree, or as a function that
|
| 72 |
+
# returns the pytree. Below we define the pytree directly.
|
| 73 |
+
mask = (True, dict(a=True, b=False))
|
| 74 |
+
tx = transform.add_decayed_weights(0.1, mask=mask)
|
| 75 |
+
# Define input updates and weights.
|
| 76 |
+
updates = (
|
| 77 |
+
jnp.zeros((2,), dtype=jnp.float32),
|
| 78 |
+
dict(
|
| 79 |
+
a=jnp.zeros((2,), dtype=jnp.float32),
|
| 80 |
+
b=jnp.zeros((2,), dtype=jnp.float32),))
|
| 81 |
+
weights = (
|
| 82 |
+
jnp.ones((2,), dtype=jnp.float32),
|
| 83 |
+
dict(
|
| 84 |
+
a=jnp.ones((2,), dtype=jnp.float32),
|
| 85 |
+
b=jnp.ones((2,), dtype=jnp.float32),))
|
| 86 |
+
# This mask means that we will add decayed weights to the first two
|
| 87 |
+
# terms in the input updates, but not to the last element.
|
| 88 |
+
expected_tx_updates = (
|
| 89 |
+
0.1*jnp.ones((2,), dtype=jnp.float32),
|
| 90 |
+
dict(
|
| 91 |
+
a=0.1*jnp.ones((2,), dtype=jnp.float32),
|
| 92 |
+
b=jnp.zeros((2,), dtype=jnp.float32),))
|
| 93 |
+
# Apply transform
|
| 94 |
+
state = tx.init(weights)
|
| 95 |
+
transform_fn = self.variant(tx.update)
|
| 96 |
+
new_updates, _ = transform_fn(updates, state, weights)
|
| 97 |
+
# Assert output as expected.
|
| 98 |
+
chex.assert_tree_all_close(new_updates, expected_tx_updates)
|
| 99 |
+
|
| 100 |
+
@chex.all_variants
|
| 101 |
+
def test_ema(self):
|
| 102 |
+
values = jnp.array([5.0, 7.0])
|
| 103 |
+
decay = 0.9
|
| 104 |
+
d = decay
|
| 105 |
+
|
| 106 |
+
ema = transform.ema(decay=decay, debias=False)
|
| 107 |
+
state = ema.init(values[0]) # init to zeroes
|
| 108 |
+
|
| 109 |
+
transform_fn = self.variant(ema.update)
|
| 110 |
+
mean, state = transform_fn(values[0], state)
|
| 111 |
+
np.testing.assert_allclose(mean, (1-d) * values[0], atol=1e-4)
|
| 112 |
+
|
| 113 |
+
mean, state = transform_fn(values[1], state)
|
| 114 |
+
np.testing.assert_allclose(
|
| 115 |
+
mean,
|
| 116 |
+
(1 - d) * (values[1] + d * values[0]), atol=1e-2)
|
| 117 |
+
|
| 118 |
+
@chex.all_variants
|
| 119 |
+
def test_ema_debias(self):
|
| 120 |
+
values = jnp.array([5.0, 7.0])
|
| 121 |
+
decay = 0.9
|
| 122 |
+
d = decay
|
| 123 |
+
|
| 124 |
+
ema = transform.ema(decay=decay)
|
| 125 |
+
state = ema.init(values[0])
|
| 126 |
+
|
| 127 |
+
transform_fn = self.variant(ema.update)
|
| 128 |
+
mean, state = transform_fn(values[0], state)
|
| 129 |
+
np.testing.assert_allclose(mean, values[0], atol=1e-4)
|
| 130 |
+
|
| 131 |
+
mean, state = transform_fn(values[1], state)
|
| 132 |
+
np.testing.assert_allclose(
|
| 133 |
+
mean,
|
| 134 |
+
((1 - d) * values[1] + d * (1 - d) * values[0]) / (1 - d**2),
|
| 135 |
+
atol=1e-2)
|
| 136 |
+
# The state must not be debiased.
|
| 137 |
+
np.testing.assert_allclose(
|
| 138 |
+
state.ema,
|
| 139 |
+
(1 - d) * values[1] + d * (1 - d) * values[0],
|
| 140 |
+
atol=1e-2)
|
| 141 |
+
|
| 142 |
+
@chex.all_variants
|
| 143 |
+
def test_update_infinity_moment(self):
|
| 144 |
+
values = jnp.array([5.0, 7.0])
|
| 145 |
+
decay = 0.9
|
| 146 |
+
d = decay
|
| 147 |
+
|
| 148 |
+
transform_fn = self.variant(transform.update_infinity_moment)
|
| 149 |
+
|
| 150 |
+
# identity if updating with itself (and positive decay)
|
| 151 |
+
np.testing.assert_allclose(
|
| 152 |
+
transform_fn(values, values, decay=d, eps=0.),
|
| 153 |
+
values,
|
| 154 |
+
atol=1e-4
|
| 155 |
+
)
|
| 156 |
+
# return (decayed) max when updating with zeros
|
| 157 |
+
np.testing.assert_allclose(
|
| 158 |
+
transform_fn(jnp.zeros_like(values), values, decay=d, eps=0.),
|
| 159 |
+
d * values,
|
| 160 |
+
atol=1e-4
|
| 161 |
+
)
|
| 162 |
+
# infinity norm takes absolute values
|
| 163 |
+
np.testing.assert_allclose(
|
| 164 |
+
transform_fn(-values, jnp.zeros_like(values), decay=d, eps=0.),
|
| 165 |
+
values,
|
| 166 |
+
atol=1e-4
|
| 167 |
+
)
|
| 168 |
+
# return at least `eps`
|
| 169 |
+
np.testing.assert_allclose(
|
| 170 |
+
transform_fn(jnp.zeros_like(values), jnp.zeros_like(values),
|
| 171 |
+
decay=d, eps=1e-2),
|
| 172 |
+
jnp.ones_like(values) * 1e-2,
|
| 173 |
+
atol=1e-4
|
| 174 |
+
)
|
| 175 |
+
|
| 176 |
+
@chex.all_variants
|
| 177 |
+
def test_apply_every(self):
|
| 178 |
+
# The frequency of the application of sgd
|
| 179 |
+
k = 4
|
| 180 |
+
zero_update = (jnp.array([0., 0.]), jnp.array([0., 0.]))
|
| 181 |
+
|
| 182 |
+
# optax sgd
|
| 183 |
+
optax_sgd_params = self.init_params
|
| 184 |
+
sgd = alias.sgd(LR, 0.0)
|
| 185 |
+
state_sgd = sgd.init(optax_sgd_params)
|
| 186 |
+
|
| 187 |
+
# optax sgd plus apply every
|
| 188 |
+
optax_sgd_apply_every_params = self.init_params
|
| 189 |
+
sgd_apply_every = combine.chain(
|
| 190 |
+
transform.apply_every(k=k),
|
| 191 |
+
transform.trace(decay=0, nesterov=False),
|
| 192 |
+
transform.scale(-LR))
|
| 193 |
+
state_sgd_apply_every = sgd_apply_every.init(optax_sgd_apply_every_params)
|
| 194 |
+
transform_fn = self.variant(sgd_apply_every.update)
|
| 195 |
+
|
| 196 |
+
for i in range(STEPS):
|
| 197 |
+
# Apply a step of sgd
|
| 198 |
+
updates_sgd, state_sgd = sgd.update(self.per_step_updates, state_sgd)
|
| 199 |
+
optax_sgd_params = update.apply_updates(optax_sgd_params, updates_sgd)
|
| 200 |
+
|
| 201 |
+
# Apply a step of sgd_apply_every
|
| 202 |
+
updates_sgd_apply_every, state_sgd_apply_every = transform_fn(
|
| 203 |
+
self.per_step_updates, state_sgd_apply_every)
|
| 204 |
+
optax_sgd_apply_every_params = update.apply_updates(
|
| 205 |
+
optax_sgd_apply_every_params, updates_sgd_apply_every)
|
| 206 |
+
|
| 207 |
+
# Every k steps, check equivalence.
|
| 208 |
+
if i % k == k-1:
|
| 209 |
+
chex.assert_tree_all_close(
|
| 210 |
+
optax_sgd_apply_every_params, optax_sgd_params,
|
| 211 |
+
atol=1e-6, rtol=1e-5)
|
| 212 |
+
# Otherwise, check update is zero.
|
| 213 |
+
else:
|
| 214 |
+
chex.assert_tree_all_close(
|
| 215 |
+
updates_sgd_apply_every, zero_update, atol=0.0, rtol=0.0)
|
| 216 |
+
|
| 217 |
+
def test_scale(self):
|
| 218 |
+
updates = self.per_step_updates
|
| 219 |
+
for i in range(1, STEPS + 1):
|
| 220 |
+
factor = 0.1 ** i
|
| 221 |
+
rescaler = transform.scale(factor)
|
| 222 |
+
# Apply rescaling.
|
| 223 |
+
scaled_updates, _ = rescaler.update(updates, None)
|
| 224 |
+
# Manually scale updates.
|
| 225 |
+
def rescale(t):
|
| 226 |
+
return t * factor # pylint:disable=cell-var-from-loop
|
| 227 |
+
manual_updates = jax.tree_util.tree_map(rescale, updates)
|
| 228 |
+
# Check the rescaled updates match.
|
| 229 |
+
chex.assert_tree_all_close(scaled_updates, manual_updates)
|
| 230 |
+
|
| 231 |
+
@parameterized.named_parameters([
|
| 232 |
+
('1d', [1.0, 2.0], [1.0, 2.0]),
|
| 233 |
+
('2d', [[1.0, 2.0], [3.0, 4.0]], [[-0.5, 0.5], [-0.5, 0.5]]),
|
| 234 |
+
('3d', [[[1., 2.], [3., 4.]],
|
| 235 |
+
[[5., 6.], [7., 8.]]], [[[-1.5, -0.5], [0.5, 1.5]],
|
| 236 |
+
[[-1.5, -0.5], [0.5, 1.5]]]),
|
| 237 |
+
])
|
| 238 |
+
def test_centralize(self, inputs, outputs):
|
| 239 |
+
inputs = jnp.asarray(inputs)
|
| 240 |
+
outputs = jnp.asarray(outputs)
|
| 241 |
+
centralizer = transform.centralize()
|
| 242 |
+
centralized_inputs, _ = centralizer.update(inputs, None)
|
| 243 |
+
chex.assert_tree_all_close(centralized_inputs, outputs)
|
| 244 |
+
|
| 245 |
+
@chex.all_variants
|
| 246 |
+
def test_add_noise_has_correct_variance_scaling(self):
|
| 247 |
+
# Prepare to compare noise with a rescaled unit-variance substitute.
|
| 248 |
+
eta = 0.3
|
| 249 |
+
gamma = 0.55
|
| 250 |
+
seed = 314
|
| 251 |
+
noise = transform.add_noise(eta, gamma, seed)
|
| 252 |
+
noise_unit = transform.add_noise(1.0, 0.0, seed)
|
| 253 |
+
|
| 254 |
+
params = self.init_params
|
| 255 |
+
state = noise.init(params)
|
| 256 |
+
state_unit = noise_unit.init(params)
|
| 257 |
+
|
| 258 |
+
# Check the noise itself by adding it to zeros.
|
| 259 |
+
updates = jax.tree_util.tree_map(jnp.zeros_like, params)
|
| 260 |
+
|
| 261 |
+
for i in range(1, STEPS + 1):
|
| 262 |
+
updates_i, state = self.variant(noise.update)(updates, state)
|
| 263 |
+
updates_i_unit, state_unit = noise_unit.update(updates, state_unit)
|
| 264 |
+
|
| 265 |
+
scale = jnp.sqrt(eta / i**gamma)
|
| 266 |
+
|
| 267 |
+
updates_i_rescaled = jax.tree_util.tree_map(
|
| 268 |
+
lambda g, s=scale: g * s, updates_i_unit)
|
| 269 |
+
|
| 270 |
+
chex.assert_tree_all_close(updates_i, updates_i_rescaled, rtol=1e-4)
|
| 271 |
+
|
| 272 |
+
def test_scale_by_optimistic_gradient(self):
|
| 273 |
+
|
| 274 |
+
def f(params: jnp.ndarray) -> jnp.ndarray:
|
| 275 |
+
return params['x'] ** 2
|
| 276 |
+
|
| 277 |
+
initial_params = {
|
| 278 |
+
'x': jnp.array(2.0)
|
| 279 |
+
}
|
| 280 |
+
|
| 281 |
+
og = transform.scale_by_optimistic_gradient()
|
| 282 |
+
og_state = og.init(initial_params)
|
| 283 |
+
# Provide some arbitrary previous gradient.
|
| 284 |
+
og_state.trace['x'] = 1.5
|
| 285 |
+
|
| 286 |
+
g = jax.grad(f)(initial_params)
|
| 287 |
+
og_true = 2 * g['x'] - og_state.trace['x']
|
| 288 |
+
og, og_state = og.update(g, og_state)
|
| 289 |
+
|
| 290 |
+
# Compare transformation output with manually computed optimistic gradient.
|
| 291 |
+
chex.assert_tree_all_close(og_true, og['x'])
|
| 292 |
+
|
| 293 |
+
@chex.all_variants
|
| 294 |
+
def test_bias_correction_bf16(self):
|
| 295 |
+
bias_correction_fn = self.variant(transform.bias_correction)
|
| 296 |
+
m = jnp.logspace(-10, 10, num=21, dtype=jnp.bfloat16) # 1e-10 ... 1e10
|
| 297 |
+
for decay in (0.9, 0.99, 0.999, 0.9995):
|
| 298 |
+
for count in (1, 10, 100, 1000):
|
| 299 |
+
chex.assert_tree_all_finite(
|
| 300 |
+
bias_correction_fn(m, decay, count),
|
| 301 |
+
custom_message=f'failed with decay={decay}, count={count}')
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
if __name__ == '__main__':
|
| 305 |
+
absltest.main()
|
lib/python3.10/site-packages/optax/_src/utils.py
ADDED
|
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Utility functions for testing."""
|
| 16 |
+
|
| 17 |
+
from typing import Optional, Tuple, Sequence
|
| 18 |
+
|
| 19 |
+
import chex
|
| 20 |
+
import jax
|
| 21 |
+
import jax.numpy as jnp
|
| 22 |
+
import jax.scipy.stats.norm as multivariate_normal
|
| 23 |
+
|
| 24 |
+
from optax._src import linear_algebra
|
| 25 |
+
from optax._src import numerics
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def tile_second_to_last_dim(a: chex.Array) -> chex.Array:
|
| 29 |
+
ones = jnp.ones_like(a)
|
| 30 |
+
a = jnp.expand_dims(a, axis=-1)
|
| 31 |
+
return jnp.expand_dims(ones, axis=-2) * a
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def canonicalize_dtype(
|
| 35 |
+
dtype: Optional[chex.ArrayDType]) -> Optional[chex.ArrayDType]:
|
| 36 |
+
"""Canonicalise a dtype, skip if None."""
|
| 37 |
+
if dtype is not None:
|
| 38 |
+
return jax.dtypes.canonicalize_dtype(dtype)
|
| 39 |
+
return dtype
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def cast_tree(tree: chex.ArrayTree,
|
| 43 |
+
dtype: Optional[chex.ArrayDType]) -> chex.ArrayTree:
|
| 44 |
+
"""Cast tree to given dtype, skip if None."""
|
| 45 |
+
if dtype is not None:
|
| 46 |
+
return jax.tree_util.tree_map(lambda t: t.astype(dtype), tree)
|
| 47 |
+
else:
|
| 48 |
+
return tree
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def set_diags(a: chex.Array, new_diags: chex.Array) -> chex.Array:
|
| 52 |
+
"""Set the diagonals of every DxD matrix in an input of shape NxDxD.
|
| 53 |
+
|
| 54 |
+
Args:
|
| 55 |
+
a: rank 3, tensor NxDxD.
|
| 56 |
+
new_diags: NxD matrix, the new diagonals of each DxD matrix.
|
| 57 |
+
|
| 58 |
+
Returns:
|
| 59 |
+
NxDxD tensor, with the same contents as `a` but with the diagonal
|
| 60 |
+
changed to `new_diags`.
|
| 61 |
+
"""
|
| 62 |
+
n, d, d1 = a.shape
|
| 63 |
+
assert d == d1
|
| 64 |
+
|
| 65 |
+
indices1 = jnp.repeat(jnp.arange(n), d)
|
| 66 |
+
indices2 = jnp.tile(jnp.arange(d), n)
|
| 67 |
+
indices3 = indices2
|
| 68 |
+
|
| 69 |
+
# Use numpy array setting
|
| 70 |
+
a = a.at[indices1, indices2, indices3].set(new_diags.flatten())
|
| 71 |
+
return a
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
class MultiNormalDiagFromLogScale():
|
| 75 |
+
"""MultiNormalDiag which directly exposes its input parameters."""
|
| 76 |
+
|
| 77 |
+
def __init__(self, loc: chex.Array, log_scale: chex.Array):
|
| 78 |
+
self._log_scale = log_scale
|
| 79 |
+
self._scale = jnp.exp(log_scale)
|
| 80 |
+
self._mean = loc
|
| 81 |
+
self._param_shape = jax.lax.broadcast_shapes(
|
| 82 |
+
self._mean.shape, self._scale.shape)
|
| 83 |
+
|
| 84 |
+
def sample(self, shape: Sequence[int],
|
| 85 |
+
seed: chex.PRNGKey) -> chex.Array:
|
| 86 |
+
sample_shape = tuple(shape) + self._param_shape
|
| 87 |
+
return jax.random.normal(
|
| 88 |
+
seed, shape=sample_shape) * self._scale + self._mean
|
| 89 |
+
|
| 90 |
+
def log_prob(self, x: chex.Array) -> chex.Array:
|
| 91 |
+
log_prob = multivariate_normal.logpdf(x, loc=self._mean, scale=self._scale)
|
| 92 |
+
# Sum over parameter axes.
|
| 93 |
+
sum_axis = [-(i + 1) for i in range(len(self._param_shape))]
|
| 94 |
+
return jnp.sum(log_prob, axis=sum_axis)
|
| 95 |
+
|
| 96 |
+
@property
|
| 97 |
+
def log_scale(self) -> chex.Array:
|
| 98 |
+
return self._log_scale
|
| 99 |
+
|
| 100 |
+
@property
|
| 101 |
+
def params(self) -> Sequence[chex.Array]:
|
| 102 |
+
return [self._mean, self._log_scale]
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def multi_normal(loc: chex.Array,
|
| 106 |
+
log_scale: chex.Array) -> MultiNormalDiagFromLogScale:
|
| 107 |
+
return MultiNormalDiagFromLogScale(loc=loc, log_scale=log_scale)
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
@jax.custom_vjp
|
| 111 |
+
def _scale_gradient(inputs: chex.ArrayTree, scale: float) -> chex.ArrayTree:
|
| 112 |
+
"""Internal gradient scaling implementation."""
|
| 113 |
+
del scale # Only used for the backward pass defined in _scale_gradient_bwd.
|
| 114 |
+
return inputs
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def _scale_gradient_fwd(inputs: chex.ArrayTree,
|
| 118 |
+
scale: float) -> Tuple[chex.ArrayTree, float]:
|
| 119 |
+
return _scale_gradient(inputs, scale), scale
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def _scale_gradient_bwd(scale: float,
|
| 123 |
+
g: chex.ArrayTree) -> Tuple[chex.ArrayTree, None]:
|
| 124 |
+
return (jax.tree_util.tree_map(lambda g_: g_ * scale, g), None)
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
_scale_gradient.defvjp(_scale_gradient_fwd, _scale_gradient_bwd)
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def scale_gradient(inputs: chex.ArrayTree, scale: float) -> chex.ArrayTree:
|
| 131 |
+
"""Scales gradients for the backwards pass.
|
| 132 |
+
|
| 133 |
+
Args:
|
| 134 |
+
inputs: A nested array.
|
| 135 |
+
scale: The scale factor for the gradient on the backwards pass.
|
| 136 |
+
|
| 137 |
+
Returns:
|
| 138 |
+
An array of the same structure as `inputs`, with scaled backward gradient.
|
| 139 |
+
"""
|
| 140 |
+
# Special case scales of 1. and 0. for more efficiency.
|
| 141 |
+
if scale == 1.:
|
| 142 |
+
return inputs
|
| 143 |
+
elif scale == 0.:
|
| 144 |
+
return jax.lax.stop_gradient(inputs)
|
| 145 |
+
else:
|
| 146 |
+
return _scale_gradient(inputs, scale)
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
# TODO(b/183800387): remove legacy aliases.
|
| 150 |
+
safe_norm = numerics.safe_norm
|
| 151 |
+
safe_int32_increment = numerics.safe_int32_increment
|
| 152 |
+
global_norm = linear_algebra.global_norm
|
lib/python3.10/site-packages/optax/_src/utils_test.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Tests for `utils.py`."""
|
| 16 |
+
|
| 17 |
+
from unittest import mock
|
| 18 |
+
|
| 19 |
+
from absl.testing import absltest
|
| 20 |
+
from absl.testing import parameterized
|
| 21 |
+
|
| 22 |
+
import jax
|
| 23 |
+
|
| 24 |
+
from optax._src import utils
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class ScaleGradientTest(parameterized.TestCase):
|
| 28 |
+
|
| 29 |
+
@parameterized.product(inputs=[-1., 0., 1.], scale=[-0.5, 0., 0.5, 1., 2.])
|
| 30 |
+
@mock.patch.object(jax.lax, 'stop_gradient', wraps=jax.lax.stop_gradient)
|
| 31 |
+
def test_scale_gradient(self, mock_sg, inputs, scale):
|
| 32 |
+
|
| 33 |
+
def fn(inputs):
|
| 34 |
+
outputs = utils.scale_gradient(inputs, scale)
|
| 35 |
+
return outputs ** 2
|
| 36 |
+
|
| 37 |
+
grad = jax.grad(fn)
|
| 38 |
+
self.assertEqual(grad(inputs), 2 * inputs * scale)
|
| 39 |
+
if scale == 0.:
|
| 40 |
+
mock_sg.assert_called_once_with(inputs)
|
| 41 |
+
else:
|
| 42 |
+
self.assertFalse(mock_sg.called)
|
| 43 |
+
self.assertEqual(fn(inputs), inputs ** 2)
|
| 44 |
+
|
| 45 |
+
@parameterized.product(scale=[-0.5, 0., 0.5, 1., 2.])
|
| 46 |
+
def test_scale_gradient_pytree(self, scale):
|
| 47 |
+
|
| 48 |
+
def fn(inputs):
|
| 49 |
+
outputs = utils.scale_gradient(inputs, scale)
|
| 50 |
+
outputs = jax.tree_util.tree_map(lambda x: x ** 2, outputs)
|
| 51 |
+
return sum(jax.tree_util.tree_leaves(outputs))
|
| 52 |
+
|
| 53 |
+
inputs = dict(a=-1., b=dict(c=(2.,), d=0.))
|
| 54 |
+
|
| 55 |
+
grad = jax.grad(fn)
|
| 56 |
+
grads = grad(inputs)
|
| 57 |
+
jax.tree_util.tree_map(
|
| 58 |
+
lambda i, g: self.assertEqual(g, 2 * i * scale), inputs, grads)
|
| 59 |
+
self.assertEqual(
|
| 60 |
+
fn(inputs),
|
| 61 |
+
sum(jax.tree_util.tree_leaves(
|
| 62 |
+
jax.tree_util.tree_map(lambda x: x**2, inputs))))
|
| 63 |
+
|
| 64 |
+
if __name__ == '__main__':
|
| 65 |
+
absltest.main()
|
lib/python3.10/site-packages/optax/_src/wrappers.py
ADDED
|
@@ -0,0 +1,547 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Transformation wrappers."""
|
| 16 |
+
|
| 17 |
+
import functools
|
| 18 |
+
from typing import Any, Callable, NamedTuple, Optional, Tuple, Union
|
| 19 |
+
|
| 20 |
+
import chex
|
| 21 |
+
import jax
|
| 22 |
+
from jax import lax
|
| 23 |
+
import jax.numpy as jnp
|
| 24 |
+
from jax.tree_util import tree_flatten
|
| 25 |
+
from jax.tree_util import tree_map
|
| 26 |
+
from jax.tree_util import tree_unflatten
|
| 27 |
+
import numpy as np
|
| 28 |
+
from optax._src import base
|
| 29 |
+
from optax._src import numerics
|
| 30 |
+
import typing_extensions
|
| 31 |
+
|
| 32 |
+
Array = jnp.ndarray
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def flatten(
|
| 36 |
+
inner: base.GradientTransformation
|
| 37 |
+
) -> base.GradientTransformation:
|
| 38 |
+
"""Flattens parameters and gradients for init and update of inner transform.
|
| 39 |
+
|
| 40 |
+
This can reduce the overhead of performing many calculations on lots of small
|
| 41 |
+
variables, at the cost of slightly increased memory usage.
|
| 42 |
+
|
| 43 |
+
Args:
|
| 44 |
+
inner: Inner transformation to flatten inputs for.
|
| 45 |
+
|
| 46 |
+
Returns:
|
| 47 |
+
New GradientTransformation.
|
| 48 |
+
"""
|
| 49 |
+
|
| 50 |
+
def _flatten(params):
|
| 51 |
+
"""Flattens and concatenates all tensors in params to a single vector."""
|
| 52 |
+
params, _ = tree_flatten(params)
|
| 53 |
+
return jnp.concatenate([jnp.reshape(param, [-1]) for param in params])
|
| 54 |
+
|
| 55 |
+
def _unflatten(updates, flat):
|
| 56 |
+
"""Extracts tensors from flat, using the structure and shapes of params."""
|
| 57 |
+
updates_flat, treedef = tree_flatten(updates)
|
| 58 |
+
offsets = []
|
| 59 |
+
for update in updates_flat:
|
| 60 |
+
size = np.prod(update.shape)
|
| 61 |
+
if offsets:
|
| 62 |
+
offsets.append(size + offsets[-1])
|
| 63 |
+
else:
|
| 64 |
+
offsets.append(size)
|
| 65 |
+
del offsets[-1]
|
| 66 |
+
flat_split = jnp.split(flat, offsets)
|
| 67 |
+
reshaped = [
|
| 68 |
+
jnp.reshape(flat_update, update.shape)
|
| 69 |
+
for flat_update, update in zip(flat_split, updates_flat)
|
| 70 |
+
]
|
| 71 |
+
return tree_unflatten(treedef, reshaped)
|
| 72 |
+
|
| 73 |
+
def init_fn(params):
|
| 74 |
+
flat = _flatten(params)
|
| 75 |
+
return inner.init(flat)
|
| 76 |
+
|
| 77 |
+
def update_fn(updates, state, params=None):
|
| 78 |
+
if params is not None:
|
| 79 |
+
params = _flatten(params)
|
| 80 |
+
updates_flat, state = inner.update(_flatten(updates), state, params)
|
| 81 |
+
updates = _unflatten(updates, updates_flat)
|
| 82 |
+
return updates, state
|
| 83 |
+
|
| 84 |
+
return base.GradientTransformation(init_fn, update_fn)
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
class ApplyIfFiniteState(NamedTuple):
|
| 88 |
+
"""State of the `GradientTransformation` returned by `apply_if_finite`.
|
| 89 |
+
|
| 90 |
+
Fields:
|
| 91 |
+
notfinite_count: Number of consecutive gradient updates containing an Inf or
|
| 92 |
+
a NaN. This number is reset to 0 whenever a gradient update without an Inf
|
| 93 |
+
or a NaN is done.
|
| 94 |
+
last_finite: Whether or not the last gradient update contained an Inf of a
|
| 95 |
+
NaN.
|
| 96 |
+
total_notfinite: Total number of gradient updates containing an Inf or
|
| 97 |
+
a NaN since this optimizer was initialised. This number is never reset.
|
| 98 |
+
inner_state: The state of the inner `GradientTransformation`.
|
| 99 |
+
"""
|
| 100 |
+
notfinite_count: jnp.array
|
| 101 |
+
last_finite: jnp.array
|
| 102 |
+
total_notfinite: jnp.array
|
| 103 |
+
inner_state: Any
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def apply_if_finite(
|
| 107 |
+
inner: base.GradientTransformation,
|
| 108 |
+
max_consecutive_errors: int
|
| 109 |
+
) -> base.GradientTransformation:
|
| 110 |
+
"""A function that wraps an optimizer to make it robust to a few NaNs or Infs.
|
| 111 |
+
|
| 112 |
+
The purpose of this function is to prevent any optimization to happen if the
|
| 113 |
+
gradients contain NaNs or Infs. That is, when a NaN of Inf is detected in the
|
| 114 |
+
gradients, the wrapped optimizer ignores that gradient update. If the NaNs or
|
| 115 |
+
Infs persist after a given number of updates, the wrapped optimizer gives up
|
| 116 |
+
and accepts the update.
|
| 117 |
+
|
| 118 |
+
Args:
|
| 119 |
+
inner: Inner transformation to be wrapped.
|
| 120 |
+
max_consecutive_errors: Maximum number of consecutive gradient updates
|
| 121 |
+
containing NaNs of Infs that the wrapped optimizer will ignore. After
|
| 122 |
+
that many ignored updates, the optimizer will give up and accept.
|
| 123 |
+
|
| 124 |
+
Returns:
|
| 125 |
+
New GradientTransformation.
|
| 126 |
+
"""
|
| 127 |
+
|
| 128 |
+
def init(params):
|
| 129 |
+
return ApplyIfFiniteState(
|
| 130 |
+
notfinite_count=jnp.zeros([], jnp.int32),
|
| 131 |
+
last_finite=jnp.array(True, jnp.bool_),
|
| 132 |
+
total_notfinite=jnp.zeros([], jnp.int32),
|
| 133 |
+
inner_state=inner.init(params))
|
| 134 |
+
|
| 135 |
+
def update(updates, state, params=None):
|
| 136 |
+
inner_state = state.inner_state
|
| 137 |
+
flat_updates = tree_flatten(updates)[0]
|
| 138 |
+
isfinite = jnp.all(
|
| 139 |
+
jnp.array([jnp.all(jnp.isfinite(p)) for p in flat_updates]))
|
| 140 |
+
notfinite_count = jnp.where(
|
| 141 |
+
isfinite, jnp.zeros([], jnp.int32),
|
| 142 |
+
numerics.safe_int32_increment(state.notfinite_count))
|
| 143 |
+
|
| 144 |
+
def do_update(_):
|
| 145 |
+
return inner.update(updates, inner_state, params)
|
| 146 |
+
def reject_update(_):
|
| 147 |
+
return (tree_map(jnp.zeros_like, updates), inner_state)
|
| 148 |
+
|
| 149 |
+
updates, new_inner_state = lax.cond(
|
| 150 |
+
jnp.logical_or(isfinite, notfinite_count > max_consecutive_errors),
|
| 151 |
+
do_update, reject_update, operand=None)
|
| 152 |
+
|
| 153 |
+
return updates, ApplyIfFiniteState(
|
| 154 |
+
notfinite_count=notfinite_count,
|
| 155 |
+
last_finite=isfinite,
|
| 156 |
+
total_notfinite=jnp.where(
|
| 157 |
+
isfinite, state.total_notfinite,
|
| 158 |
+
numerics.safe_int32_increment(state.total_notfinite)),
|
| 159 |
+
inner_state=new_inner_state)
|
| 160 |
+
|
| 161 |
+
return base.GradientTransformation(init=init, update=update)
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
def _zeros_tree_like(inp_tree):
|
| 165 |
+
return jax.tree_util.tree_map(jnp.zeros_like, inp_tree)
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
class MultiStepsState(NamedTuple):
|
| 169 |
+
"""State of the `GradientTransformation` returned by `MultiSteps`.
|
| 170 |
+
|
| 171 |
+
Fields:
|
| 172 |
+
mini_step: current mini-step counter. At an update, this either increases by
|
| 173 |
+
1 or is reset to 0.
|
| 174 |
+
gradient_step: gradient step counter. This only increases after enough
|
| 175 |
+
mini-steps have been accumulated.
|
| 176 |
+
inner_opt_state: the state of the wrapped otpimiser.
|
| 177 |
+
acc_grads: accumulated gradients over multiple mini-steps.
|
| 178 |
+
skip_state: an arbitrarily nested tree of arrays. This is only
|
| 179 |
+
relevant when passing a `should_skip_update_fn` to `MultiSteps`. This
|
| 180 |
+
structure will then contain values for debugging and or monitoring. The
|
| 181 |
+
actual structure will vary depending on the choice of
|
| 182 |
+
`ShouldSkipUpdateFunction`.
|
| 183 |
+
"""
|
| 184 |
+
mini_step: Array
|
| 185 |
+
gradient_step: Array
|
| 186 |
+
inner_opt_state: Any
|
| 187 |
+
acc_grads: Any
|
| 188 |
+
skip_state: chex.ArrayTree = ()
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
class ShouldSkipUpdateFunction(typing_extensions.Protocol):
|
| 192 |
+
|
| 193 |
+
def __call__(self, updates: base.Updates, gradient_step: Array,
|
| 194 |
+
params: Optional[base.Params]) -> Tuple[Array, chex.ArrayTree]:
|
| 195 |
+
"""Returns true to indicate that updates should be skipped in a multi-step.
|
| 196 |
+
|
| 197 |
+
Args:
|
| 198 |
+
updates: The updates that the gradient transformation has proposed
|
| 199 |
+
to apply
|
| 200 |
+
gradient_step: The current gradient step (see
|
| 201 |
+
`MultiStepsState.gradient_step`). This can be used for example to reject
|
| 202 |
+
large gradients with an annealed maximum allowed gradient norm.
|
| 203 |
+
params: If known, the current parameter tree of the function being
|
| 204 |
+
transformed.
|
| 205 |
+
Returns:
|
| 206 |
+
A tuple:
|
| 207 |
+
* First element is an array with a single bool indicating whether or not
|
| 208 |
+
the updates should be applied.
|
| 209 |
+
* Second element is an arbitrarily nested structure of arrays that will be
|
| 210 |
+
stored in `MultiStepsState.skip_state`. The structure will vary from
|
| 211 |
+
function to function. Debugging info, or values to monitor, can be put
|
| 212 |
+
in this structure.
|
| 213 |
+
"""
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
def skip_not_finite(
|
| 217 |
+
updates: base.Updates, gradient_step: Array,
|
| 218 |
+
params: Optional[base.Params]) -> Tuple[Array, chex.ArrayTree]:
|
| 219 |
+
"""Returns True iff any of the `updates` contains an inf or a NaN.
|
| 220 |
+
|
| 221 |
+
Args:
|
| 222 |
+
updates: see `ShouldSkipUpdateFunction`.
|
| 223 |
+
gradient_step: see `ShouldSkipUpdateFunction`.
|
| 224 |
+
params: see `ShouldSkipUpdateFunction`.
|
| 225 |
+
|
| 226 |
+
Returns:
|
| 227 |
+
A tuple:
|
| 228 |
+
* First element is a scalar array of type bool.
|
| 229 |
+
* Second element is a dictionary with keys:
|
| 230 |
+
- `should_skip`: True iff `updates` contains an inf or a NaN.
|
| 231 |
+
- `num_not_finite`: total number of inf and NaN found in `updates`.
|
| 232 |
+
"""
|
| 233 |
+
del gradient_step, params
|
| 234 |
+
all_is_finite = [jnp.sum(jnp.logical_not(jnp.isfinite(p)))
|
| 235 |
+
for p in jax.tree_util.tree_leaves(updates)]
|
| 236 |
+
num_not_finite = jnp.sum(jnp.array(all_is_finite))
|
| 237 |
+
should_skip = num_not_finite > 0
|
| 238 |
+
return should_skip, dict(should_skip=should_skip,
|
| 239 |
+
num_not_finite=num_not_finite)
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
def skip_large_updates(updates: base.Updates,
|
| 243 |
+
gradient_step: Array,
|
| 244 |
+
params: Optional[base.Params],
|
| 245 |
+
max_squared_norm: float) -> Tuple[Array, chex.ArrayTree]:
|
| 246 |
+
"""Returns True if the global norm square of `updates` is small enough.
|
| 247 |
+
|
| 248 |
+
Args:
|
| 249 |
+
updates: see `ShouldSkipUpdateFunction`.
|
| 250 |
+
gradient_step: see `ShouldSkipUpdateFunction`.
|
| 251 |
+
params: see `ShouldSkipUpdateFunction`.
|
| 252 |
+
max_squared_norm: only updates with a norm square strictly less than this
|
| 253 |
+
value will be accepted.
|
| 254 |
+
|
| 255 |
+
Returns:
|
| 256 |
+
A tuple:
|
| 257 |
+
* First element is a scalar array of type bool.
|
| 258 |
+
* Second element is a dictionary with keys:
|
| 259 |
+
- `should_skip`: True iff square norm of `updates` is larger or equal than
|
| 260 |
+
`max_squared_norm`.
|
| 261 |
+
- `norm_squared`: overall norm square of the `updates`.
|
| 262 |
+
"""
|
| 263 |
+
del gradient_step, params
|
| 264 |
+
norm_sq = jnp.sum(
|
| 265 |
+
jnp.array([jnp.sum(p**2) for p in jax.tree_util.tree_leaves(updates)]))
|
| 266 |
+
# This will also return True if `norm_sq` is NaN.
|
| 267 |
+
should_skip = jnp.logical_not(norm_sq < max_squared_norm)
|
| 268 |
+
return should_skip, dict(should_skip=should_skip, norm_squared=norm_sq)
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
class MultiSteps:
|
| 272 |
+
"""An optimizer wrapper to accumulate gradients over multiple steps.
|
| 273 |
+
|
| 274 |
+
This wrapper collects together the updates passed to its `update` function
|
| 275 |
+
over consecutive steps until a given number of scheduled steps is reached.
|
| 276 |
+
In each of these intermediate steps, the returned value from the optimizer is
|
| 277 |
+
a tree of zeros of the same shape of the updates passed as input.
|
| 278 |
+
|
| 279 |
+
Once the scheduled number of intermediate 'mini-steps' has been reached, the
|
| 280 |
+
gradients accumulated to the current time will be passed to the wrapped
|
| 281 |
+
optimizer's update function, (with the inner optimizer's state being updated
|
| 282 |
+
appropriately) and then returned to the caller. The wrapper's accumulated
|
| 283 |
+
gradients are then set back to zero and the process starts again.
|
| 284 |
+
|
| 285 |
+
The number of mini-steps per gradient update is controlled by a function, and
|
| 286 |
+
it can vary over training. This offers a means of varying batch size over
|
| 287 |
+
training.
|
| 288 |
+
"""
|
| 289 |
+
|
| 290 |
+
def __init__(
|
| 291 |
+
self,
|
| 292 |
+
opt: base.GradientTransformation,
|
| 293 |
+
every_k_schedule: Union[int, Callable[[Array], Array]],
|
| 294 |
+
use_grad_mean: bool = True,
|
| 295 |
+
should_skip_update_fn: Optional[ShouldSkipUpdateFunction] = None):
|
| 296 |
+
"""Initialiser.
|
| 297 |
+
|
| 298 |
+
Args:
|
| 299 |
+
opt: the wrapped optimizer.
|
| 300 |
+
every_k_schedule: an int or f a function.
|
| 301 |
+
* As a function, it returns how many mini-steps should be accumulated
|
| 302 |
+
in a single gradient step. Its only argument is the current
|
| 303 |
+
gradient step count. By varying the returned value, users can vary the
|
| 304 |
+
overall training batch size.
|
| 305 |
+
* If an `int`, this is the constant number of mini-steps per gradient
|
| 306 |
+
update.
|
| 307 |
+
use_grad_mean: if `True` (the default), gradients accumulated over
|
| 308 |
+
multiple mini-steps are averaged. Otherwise, they are summed.
|
| 309 |
+
should_skip_update_fn: if provided, this function is used to decide when
|
| 310 |
+
to accept or reject the updates from a mini-step. When a mini-step is
|
| 311 |
+
rejected, the inner state of `MultiSteps` is not updated. In other
|
| 312 |
+
words, it is as if this mini-step never happened. For example:
|
| 313 |
+
* to ignore updates containing inf or NaN, do
|
| 314 |
+
`should_skip_update_fn=skip_not_finite`;
|
| 315 |
+
* to ignore updates with a norm square larger then 42, do
|
| 316 |
+
`should_skip_update_fn=functools.partial(skip_large_updates,
|
| 317 |
+
max_norm_sq=42.)`.
|
| 318 |
+
Note that the optimizer's state `MultiStepsState` contains a field
|
| 319 |
+
`skip_state` in which debugging and monitoring information returned
|
| 320 |
+
by `should_skip_update_fn` is written.
|
| 321 |
+
"""
|
| 322 |
+
self._opt = opt
|
| 323 |
+
if isinstance(every_k_schedule, int):
|
| 324 |
+
self._every_k_schedule = lambda step: every_k_schedule
|
| 325 |
+
else:
|
| 326 |
+
self._every_k_schedule = every_k_schedule
|
| 327 |
+
self._use_grad_mean = use_grad_mean
|
| 328 |
+
|
| 329 |
+
if self._use_grad_mean:
|
| 330 |
+
# Use Welford algorithm for numerically stable aggregation of mean.
|
| 331 |
+
self._acc_update = (
|
| 332 |
+
lambda grad, acc, *, n_acc: acc + (grad - acc) / (n_acc + 1))
|
| 333 |
+
else:
|
| 334 |
+
self._acc_update = lambda grad, acc, *, n_acc: grad + acc
|
| 335 |
+
|
| 336 |
+
if should_skip_update_fn is None:
|
| 337 |
+
|
| 338 |
+
def should_skip_update_fn(*unused_args, **unused_kwargs):
|
| 339 |
+
return jnp.array(False, dtype=jnp.bool_), ()
|
| 340 |
+
|
| 341 |
+
self._should_skip_update_fn = should_skip_update_fn
|
| 342 |
+
|
| 343 |
+
@property
|
| 344 |
+
def inner_opt(self):
|
| 345 |
+
return self._opt
|
| 346 |
+
|
| 347 |
+
def init(self, params: Any) -> MultiStepsState:
|
| 348 |
+
"""Builds and returns initial `MultiStepsState`."""
|
| 349 |
+
updates = _zeros_tree_like(params)
|
| 350 |
+
gradient_step = jnp.zeros([], dtype=jnp.int32)
|
| 351 |
+
_, skip_state = self._should_skip_update_fn(updates, gradient_step, params)
|
| 352 |
+
init_state = MultiStepsState(
|
| 353 |
+
mini_step=jnp.zeros([], dtype=jnp.int32),
|
| 354 |
+
gradient_step=gradient_step,
|
| 355 |
+
inner_opt_state=self._opt.init(params),
|
| 356 |
+
acc_grads=updates,
|
| 357 |
+
skip_state=skip_state)
|
| 358 |
+
return init_state
|
| 359 |
+
|
| 360 |
+
def update(self,
|
| 361 |
+
updates: base.Updates,
|
| 362 |
+
state: MultiStepsState,
|
| 363 |
+
params: Optional[base.Params] = None
|
| 364 |
+
) -> Tuple[base.Updates, MultiStepsState]:
|
| 365 |
+
"""Accumulates gradients and proposes non-zero updates every `k_steps`."""
|
| 366 |
+
k_steps = self._every_k_schedule(state.gradient_step)
|
| 367 |
+
acc_grads = jax.tree_util.tree_map(
|
| 368 |
+
functools.partial(self._acc_update, n_acc=state.mini_step),
|
| 369 |
+
updates, state.acc_grads)
|
| 370 |
+
|
| 371 |
+
should_skip_update, skip_state = self._should_skip_update_fn(
|
| 372 |
+
updates, state.gradient_step, params)
|
| 373 |
+
|
| 374 |
+
def final_step(args):
|
| 375 |
+
del args
|
| 376 |
+
final_updates, new_inner_state = self._opt.update(
|
| 377 |
+
acc_grads, state.inner_opt_state, params=params)
|
| 378 |
+
new_state = MultiStepsState(
|
| 379 |
+
mini_step=jnp.zeros([], dtype=jnp.int32),
|
| 380 |
+
gradient_step=numerics.safe_int32_increment(state.gradient_step),
|
| 381 |
+
inner_opt_state=new_inner_state,
|
| 382 |
+
acc_grads=_zeros_tree_like(acc_grads),
|
| 383 |
+
skip_state=skip_state)
|
| 384 |
+
return final_updates, new_state
|
| 385 |
+
|
| 386 |
+
def mid_step(args):
|
| 387 |
+
del args
|
| 388 |
+
updates_shape_dtype, _ = jax.eval_shape(
|
| 389 |
+
self._opt.update, acc_grads, state.inner_opt_state, params=params)
|
| 390 |
+
mid_updates = jax.tree_util.tree_map(
|
| 391 |
+
lambda sd: jnp.zeros(sd.shape, sd.dtype), updates_shape_dtype)
|
| 392 |
+
new_state = MultiStepsState(
|
| 393 |
+
mini_step=numerics.safe_int32_increment(state.mini_step),
|
| 394 |
+
gradient_step=state.gradient_step,
|
| 395 |
+
inner_opt_state=state.inner_opt_state,
|
| 396 |
+
acc_grads=acc_grads,
|
| 397 |
+
skip_state=skip_state)
|
| 398 |
+
return mid_updates, new_state
|
| 399 |
+
|
| 400 |
+
new_updates, new_state = jax.lax.cond(
|
| 401 |
+
state.mini_step < k_steps - 1, (), mid_step, (), final_step)
|
| 402 |
+
|
| 403 |
+
if (should_skip_update.dtype, should_skip_update.shape) != (jnp.bool_, ()):
|
| 404 |
+
raise ValueError(
|
| 405 |
+
'The `should_skip_update_fn` function should return a boolean scalar '
|
| 406 |
+
f'array, but it returned an array of dtype {should_skip_update.dtype}'
|
| 407 |
+
f' and shape {should_skip_update.shape}')
|
| 408 |
+
|
| 409 |
+
multi_state_when_skip = MultiStepsState(
|
| 410 |
+
mini_step=state.mini_step,
|
| 411 |
+
gradient_step=state.gradient_step,
|
| 412 |
+
inner_opt_state=state.inner_opt_state,
|
| 413 |
+
acc_grads=state.acc_grads,
|
| 414 |
+
skip_state=skip_state)
|
| 415 |
+
zero_updates = jax.tree_util.tree_map(jnp.zeros_like, updates)
|
| 416 |
+
new_updates, new_state = jax.lax.cond(
|
| 417 |
+
should_skip_update,
|
| 418 |
+
(), lambda args: (zero_updates, multi_state_when_skip),
|
| 419 |
+
(), lambda args: (new_updates, new_state))
|
| 420 |
+
|
| 421 |
+
return new_updates, new_state
|
| 422 |
+
|
| 423 |
+
def has_updated(self, state: MultiStepsState) -> Array:
|
| 424 |
+
return jnp.logical_and(state.mini_step == 0, state.gradient_step > 0)
|
| 425 |
+
|
| 426 |
+
def gradient_transformation(self) -> base.GradientTransformation:
|
| 427 |
+
return base.GradientTransformation(init=self.init, update=self.update)
|
| 428 |
+
|
| 429 |
+
|
| 430 |
+
class MaskedState(NamedTuple):
|
| 431 |
+
"""Maintains inner transform state for masked transformations."""
|
| 432 |
+
inner_state: Any
|
| 433 |
+
|
| 434 |
+
|
| 435 |
+
class MaskedNode(NamedTuple):
|
| 436 |
+
"""A node used to mask out unspecified parts of a tree.
|
| 437 |
+
|
| 438 |
+
This node is ignored when mapping functions across the tree e.g. using
|
| 439 |
+
`jax.tree_util.tree_map` since it is a container without children. It can
|
| 440 |
+
therefore be used to mask out parts of a tree.
|
| 441 |
+
"""
|
| 442 |
+
|
| 443 |
+
|
| 444 |
+
def masked(
|
| 445 |
+
inner: base.GradientTransformation,
|
| 446 |
+
mask: Union[base.PyTree, Callable[[base.Params], base.PyTree]]
|
| 447 |
+
) -> base.GradientTransformation:
|
| 448 |
+
"""Mask updates so only some are transformed, the rest are passed through.
|
| 449 |
+
|
| 450 |
+
For example, it is common to skip weight decay for BatchNorm scale and all
|
| 451 |
+
bias parameters. In many networks, these are the only parameters with only
|
| 452 |
+
one dimension. So, you may create a mask function to mask these out as
|
| 453 |
+
follows::
|
| 454 |
+
|
| 455 |
+
mask_fn = lambda p: jax.tree_util.tree_map(lambda x: x.ndim != 1, p)
|
| 456 |
+
weight_decay = optax.masked(optax.add_decayed_weights(0.001), mask_fn)
|
| 457 |
+
|
| 458 |
+
You may alternatively create the mask pytree upfront::
|
| 459 |
+
|
| 460 |
+
mask = jax.tree_util.tree_map(lambda x: x.ndim != 1, params)
|
| 461 |
+
weight_decay = optax.masked(optax.add_decayed_weights(0.001), mask)
|
| 462 |
+
|
| 463 |
+
For the ``inner`` transform, state will only be stored for the parameters that
|
| 464 |
+
have a mask value of ``True``.
|
| 465 |
+
|
| 466 |
+
Args:
|
| 467 |
+
inner: Inner transformation to mask.
|
| 468 |
+
mask: a PyTree with same structure as (or a prefix of) the params PyTree, or
|
| 469 |
+
a Callable that returns such a pytree given the params/updates. The leaves
|
| 470 |
+
should be booleans, ``True`` for leaves/subtrees you want to apply the
|
| 471 |
+
transformation to, and ``False`` for those you want to skip. The mask must
|
| 472 |
+
be static for the gradient transformation to be jit-compilable.
|
| 473 |
+
|
| 474 |
+
Returns:
|
| 475 |
+
New GradientTransformation wrapping ``inner``.
|
| 476 |
+
"""
|
| 477 |
+
def mask_pytree(pytree, mask_tree):
|
| 478 |
+
return tree_map(lambda m, p: p if m else MaskedNode(), mask_tree, pytree)
|
| 479 |
+
|
| 480 |
+
def init_fn(params):
|
| 481 |
+
mask_tree = mask(params) if callable(mask) else mask
|
| 482 |
+
masked_params = mask_pytree(params, mask_tree)
|
| 483 |
+
return MaskedState(inner_state=inner.init(masked_params))
|
| 484 |
+
|
| 485 |
+
def update_fn(updates, state, params=None):
|
| 486 |
+
mask_tree = mask(updates) if callable(mask) else mask
|
| 487 |
+
masked_updates = mask_pytree(updates, mask_tree)
|
| 488 |
+
masked_params = None if params is None else mask_pytree(params, mask_tree)
|
| 489 |
+
|
| 490 |
+
new_masked_updates, new_inner_state = inner.update(
|
| 491 |
+
masked_updates, state.inner_state, masked_params)
|
| 492 |
+
|
| 493 |
+
new_updates = tree_map(
|
| 494 |
+
lambda m, new_u, old_u: new_u if m else old_u,
|
| 495 |
+
mask_tree, new_masked_updates, updates)
|
| 496 |
+
return new_updates, MaskedState(inner_state=new_inner_state)
|
| 497 |
+
|
| 498 |
+
return base.GradientTransformation(init_fn, update_fn)
|
| 499 |
+
|
| 500 |
+
|
| 501 |
+
class MaybeUpdateState(NamedTuple):
|
| 502 |
+
"""Maintains inner transform state and adds a step counter."""
|
| 503 |
+
inner_state: Any
|
| 504 |
+
step: Array
|
| 505 |
+
|
| 506 |
+
|
| 507 |
+
def maybe_update(
|
| 508 |
+
inner: base.GradientTransformation,
|
| 509 |
+
should_update_fn: Callable[[Array], Array]
|
| 510 |
+
) -> base.GradientTransformation:
|
| 511 |
+
"""Calls the inner update function only at certain steps.
|
| 512 |
+
|
| 513 |
+
Creates a transformation wrapper which counts the number of times the `update`
|
| 514 |
+
function has been called. This counter is passed to the `should_update_fn` to
|
| 515 |
+
decide when to call the inner update function.
|
| 516 |
+
|
| 517 |
+
When not calling the inner update function, the `updates` and the inner state
|
| 518 |
+
are left untouched and just passed through. The step counter is increased
|
| 519 |
+
regardless.
|
| 520 |
+
|
| 521 |
+
Args:
|
| 522 |
+
inner: the inner transformation.
|
| 523 |
+
should_update_fn: this function takes in a step counter (array of shape []
|
| 524 |
+
and dtype int32), and returns a boolean array of shape [].
|
| 525 |
+
|
| 526 |
+
Returns:
|
| 527 |
+
An `optax.GradientTransformation`.
|
| 528 |
+
"""
|
| 529 |
+
|
| 530 |
+
def init_fn(params):
|
| 531 |
+
return MaybeUpdateState(
|
| 532 |
+
inner_state=inner.init(params), step=jnp.zeros([], dtype=jnp.int32))
|
| 533 |
+
|
| 534 |
+
def update_fn(updates, state, params=None):
|
| 535 |
+
|
| 536 |
+
def do_update(_):
|
| 537 |
+
return inner.update(updates, state.inner_state, params)
|
| 538 |
+
|
| 539 |
+
def reject_update(_):
|
| 540 |
+
return updates, state.inner_state
|
| 541 |
+
|
| 542 |
+
updates, new_inner_state = lax.cond(
|
| 543 |
+
should_update_fn(state.step), do_update, reject_update, operand=None)
|
| 544 |
+
return updates, MaybeUpdateState(new_inner_state,
|
| 545 |
+
numerics.safe_int32_increment(state.step))
|
| 546 |
+
|
| 547 |
+
return base.GradientTransformation(init_fn, update_fn)
|
lib/python3.10/site-packages/optax/experimental/__init__.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Experimental features in Optax.
|
| 16 |
+
|
| 17 |
+
Features may be removed or modified at any time.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
from optax._src.experimental.complex_valued import split_real_and_imaginary
|
| 21 |
+
from optax._src.experimental.complex_valued import SplitRealAndImaginaryState
|
| 22 |
+
from optax._src.experimental.extra_args import GradientTransformationWithExtraArgs
|
| 23 |
+
from optax._src.experimental.extra_args import named_chain
|
lib/python3.10/site-packages/optax/optax_test.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Tests for optax."""
|
| 16 |
+
|
| 17 |
+
from absl.testing import absltest
|
| 18 |
+
import optax
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class OptaxTest(absltest.TestCase):
|
| 22 |
+
"""Test optax can be imported correctly."""
|
| 23 |
+
|
| 24 |
+
def test_import(self):
|
| 25 |
+
self.assertTrue(hasattr(optax, 'GradientTransformation'))
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
if __name__ == '__main__':
|
| 29 |
+
absltest.main()
|
lib/python3.10/site-packages/pasta/__init__.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
"""Pasta enables AST-based transformations on python source code."""
|
| 3 |
+
# Copyright 2017 Google LLC
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# https://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
|
| 17 |
+
from pasta.base import annotate
|
| 18 |
+
from pasta.base import ast_utils
|
| 19 |
+
from pasta.base import codegen
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def parse(src):
|
| 23 |
+
t = ast_utils.parse(src)
|
| 24 |
+
annotator = annotate.AstAnnotator(src)
|
| 25 |
+
annotator.visit(t)
|
| 26 |
+
return t
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def dump(tree):
|
| 30 |
+
return codegen.to_str(tree)
|
lib/python3.10/site-packages/pasta/augment/__init__.py
ADDED
|
File without changes
|
lib/python3.10/site-packages/pasta/augment/errors.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
"""Errors that can occur during augmentation."""
|
| 3 |
+
# Copyright 2017 Google LLC
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# https://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
|
| 17 |
+
from __future__ import absolute_import
|
| 18 |
+
from __future__ import division
|
| 19 |
+
from __future__ import print_function
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class InvalidAstError(Exception):
|
| 23 |
+
"""Occurs when the syntax tree does not meet some expected condition."""
|
lib/python3.10/site-packages/pasta/augment/import_utils.py
ADDED
|
@@ -0,0 +1,217 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
"""Functions for dealing with import statements."""
|
| 3 |
+
# Copyright 2017 Google LLC
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# https://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
|
| 17 |
+
from __future__ import absolute_import
|
| 18 |
+
from __future__ import division
|
| 19 |
+
from __future__ import print_function
|
| 20 |
+
|
| 21 |
+
import ast
|
| 22 |
+
import copy
|
| 23 |
+
import logging
|
| 24 |
+
|
| 25 |
+
from pasta.augment import errors
|
| 26 |
+
from pasta.base import ast_utils
|
| 27 |
+
from pasta.base import scope
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def add_import(tree, name_to_import, asname=None, from_import=True, merge_from_imports=True):
|
| 31 |
+
"""Adds an import to the module.
|
| 32 |
+
|
| 33 |
+
This function will try to ensure not to create duplicate imports. If name_to_import is
|
| 34 |
+
already imported, it will return the existing import. This is true even if asname is set
|
| 35 |
+
(asname will be ignored, and the existing name will be returned).
|
| 36 |
+
|
| 37 |
+
If the import would create a name that already exists in the scope given by tree, this
|
| 38 |
+
function will "import as", and append "_x" to the asname where x is the smallest positive
|
| 39 |
+
integer generating a unique name.
|
| 40 |
+
|
| 41 |
+
Arguments:
|
| 42 |
+
tree: (ast.Module) Module AST to modify.
|
| 43 |
+
name_to_import: (string) The absolute name to import.
|
| 44 |
+
asname: (string) The alias for the import ("import name_to_import as asname")
|
| 45 |
+
from_import: (boolean) If True, import the name using an ImportFrom node.
|
| 46 |
+
merge_from_imports: (boolean) If True, merge a newly inserted ImportFrom
|
| 47 |
+
node into an existing ImportFrom node, if applicable.
|
| 48 |
+
|
| 49 |
+
Returns:
|
| 50 |
+
The name (as a string) that can be used to reference the imported name. This
|
| 51 |
+
can be the fully-qualified name, the basename, or an alias name.
|
| 52 |
+
"""
|
| 53 |
+
sc = scope.analyze(tree)
|
| 54 |
+
|
| 55 |
+
# Don't add anything if it's already imported
|
| 56 |
+
if name_to_import in sc.external_references:
|
| 57 |
+
existing_ref = next((ref for ref in sc.external_references[name_to_import]
|
| 58 |
+
if ref.name_ref is not None), None)
|
| 59 |
+
if existing_ref:
|
| 60 |
+
return existing_ref.name_ref.id
|
| 61 |
+
|
| 62 |
+
import_node = None
|
| 63 |
+
added_name = None
|
| 64 |
+
|
| 65 |
+
def make_safe_alias_node(alias_name, asname):
|
| 66 |
+
# Try to avoid name conflicts
|
| 67 |
+
new_alias = ast.alias(name=alias_name, asname=asname)
|
| 68 |
+
imported_name = asname or alias_name
|
| 69 |
+
counter = 0
|
| 70 |
+
while imported_name in sc.names:
|
| 71 |
+
counter += 1
|
| 72 |
+
imported_name = new_alias.asname = '%s_%d' % (asname or alias_name,
|
| 73 |
+
counter)
|
| 74 |
+
return new_alias
|
| 75 |
+
|
| 76 |
+
# Add an ImportFrom node if requested and possible
|
| 77 |
+
if from_import and '.' in name_to_import:
|
| 78 |
+
from_module, alias_name = name_to_import.rsplit('.', 1)
|
| 79 |
+
|
| 80 |
+
new_alias = make_safe_alias_node(alias_name, asname)
|
| 81 |
+
|
| 82 |
+
if merge_from_imports:
|
| 83 |
+
# Try to add to an existing ImportFrom from the same module
|
| 84 |
+
existing_from_import = next(
|
| 85 |
+
(node for node in tree.body if isinstance(node, ast.ImportFrom)
|
| 86 |
+
and node.module == from_module and node.level == 0), None)
|
| 87 |
+
if existing_from_import:
|
| 88 |
+
existing_from_import.names.append(new_alias)
|
| 89 |
+
return new_alias.asname or new_alias.name
|
| 90 |
+
|
| 91 |
+
# Create a new node for this import
|
| 92 |
+
import_node = ast.ImportFrom(module=from_module, names=[new_alias], level=0)
|
| 93 |
+
|
| 94 |
+
# If not already created as an ImportFrom, create a normal Import node
|
| 95 |
+
if not import_node:
|
| 96 |
+
new_alias = make_safe_alias_node(alias_name=name_to_import, asname=asname)
|
| 97 |
+
import_node = ast.Import(
|
| 98 |
+
names=[new_alias])
|
| 99 |
+
|
| 100 |
+
# Insert the node at the top of the module and return the name in scope
|
| 101 |
+
tree.body.insert(1 if ast_utils.has_docstring(tree) else 0, import_node)
|
| 102 |
+
return new_alias.asname or new_alias.name
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def split_import(sc, node, alias_to_remove):
|
| 106 |
+
"""Split an import node by moving the given imported alias into a new import.
|
| 107 |
+
|
| 108 |
+
Arguments:
|
| 109 |
+
sc: (scope.Scope) Scope computed on whole tree of the code being modified.
|
| 110 |
+
node: (ast.Import|ast.ImportFrom) An import node to split.
|
| 111 |
+
alias_to_remove: (ast.alias) The import alias node to remove. This must be a
|
| 112 |
+
child of the given `node` argument.
|
| 113 |
+
|
| 114 |
+
Raises:
|
| 115 |
+
errors.InvalidAstError: if `node` is not appropriately contained in the tree
|
| 116 |
+
represented by the scope `sc`.
|
| 117 |
+
"""
|
| 118 |
+
parent = sc.parent(node)
|
| 119 |
+
parent_list = None
|
| 120 |
+
for a in ('body', 'orelse', 'finalbody'):
|
| 121 |
+
if hasattr(parent, a) and node in getattr(parent, a):
|
| 122 |
+
parent_list = getattr(parent, a)
|
| 123 |
+
break
|
| 124 |
+
else:
|
| 125 |
+
raise errors.InvalidAstError('Unable to find list containing import %r on '
|
| 126 |
+
'parent node %r' % (node, parent))
|
| 127 |
+
|
| 128 |
+
idx = parent_list.index(node)
|
| 129 |
+
new_import = copy.deepcopy(node)
|
| 130 |
+
new_import.names = [alias_to_remove]
|
| 131 |
+
node.names.remove(alias_to_remove)
|
| 132 |
+
|
| 133 |
+
parent_list.insert(idx + 1, new_import)
|
| 134 |
+
return new_import
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
def get_unused_import_aliases(tree, sc=None):
|
| 138 |
+
"""Get the import aliases that aren't used.
|
| 139 |
+
|
| 140 |
+
Arguments:
|
| 141 |
+
tree: (ast.AST) An ast to find imports in.
|
| 142 |
+
sc: A scope.Scope representing tree (generated from scratch if not
|
| 143 |
+
provided).
|
| 144 |
+
|
| 145 |
+
Returns:
|
| 146 |
+
A list of ast.alias representing imported aliases that aren't referenced in
|
| 147 |
+
the given tree.
|
| 148 |
+
"""
|
| 149 |
+
if sc is None:
|
| 150 |
+
sc = scope.analyze(tree)
|
| 151 |
+
unused_aliases = set()
|
| 152 |
+
for node in ast.walk(tree):
|
| 153 |
+
if isinstance(node, ast.alias):
|
| 154 |
+
str_name = node.asname if node.asname is not None else node.name
|
| 155 |
+
if str_name in sc.names:
|
| 156 |
+
name = sc.names[str_name]
|
| 157 |
+
if not name.reads:
|
| 158 |
+
unused_aliases.add(node)
|
| 159 |
+
else:
|
| 160 |
+
# This happens because of https://github.com/google/pasta/issues/32
|
| 161 |
+
logging.warning('Imported name %s not found in scope (perhaps it\'s '
|
| 162 |
+
'imported dynamically)', str_name)
|
| 163 |
+
|
| 164 |
+
return unused_aliases
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
def remove_import_alias_node(sc, node):
|
| 168 |
+
"""Remove an alias and if applicable remove their entire import.
|
| 169 |
+
|
| 170 |
+
Arguments:
|
| 171 |
+
sc: (scope.Scope) Scope computed on whole tree of the code being modified.
|
| 172 |
+
node: (ast.Import|ast.ImportFrom|ast.alias) The node to remove.
|
| 173 |
+
"""
|
| 174 |
+
import_node = sc.parent(node)
|
| 175 |
+
if len(import_node.names) == 1:
|
| 176 |
+
import_parent = sc.parent(import_node)
|
| 177 |
+
ast_utils.remove_child(import_parent, import_node)
|
| 178 |
+
else:
|
| 179 |
+
ast_utils.remove_child(import_node, node)
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
def remove_duplicates(tree, sc=None):
|
| 183 |
+
"""Remove duplicate imports, where it is safe to do so.
|
| 184 |
+
|
| 185 |
+
This does NOT remove imports that create new aliases
|
| 186 |
+
|
| 187 |
+
Arguments:
|
| 188 |
+
tree: (ast.AST) An ast to modify imports in.
|
| 189 |
+
sc: A scope.Scope representing tree (generated from scratch if not
|
| 190 |
+
provided).
|
| 191 |
+
|
| 192 |
+
Returns:
|
| 193 |
+
Whether any changes were made.
|
| 194 |
+
"""
|
| 195 |
+
if sc is None:
|
| 196 |
+
sc = scope.analyze(tree)
|
| 197 |
+
|
| 198 |
+
modified = False
|
| 199 |
+
seen_names = set()
|
| 200 |
+
for node in tree.body:
|
| 201 |
+
if isinstance(node, (ast.Import, ast.ImportFrom)):
|
| 202 |
+
for alias in list(node.names):
|
| 203 |
+
import_node = sc.parent(alias)
|
| 204 |
+
if isinstance(import_node, ast.Import):
|
| 205 |
+
full_name = alias.name
|
| 206 |
+
elif import_node.module:
|
| 207 |
+
full_name = '%s%s.%s' % ('.' * import_node.level,
|
| 208 |
+
import_node.module, alias.name)
|
| 209 |
+
else:
|
| 210 |
+
full_name = '%s%s' % ('.' * import_node.level, alias.name)
|
| 211 |
+
full_name += ':' + (alias.asname or alias.name)
|
| 212 |
+
if full_name in seen_names:
|
| 213 |
+
remove_import_alias_node(sc, alias)
|
| 214 |
+
modified = True
|
| 215 |
+
else:
|
| 216 |
+
seen_names.add(full_name)
|
| 217 |
+
return modified
|
lib/python3.10/site-packages/pasta/augment/import_utils_test.py
ADDED
|
@@ -0,0 +1,428 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
"""Tests for import_utils."""
|
| 3 |
+
# Copyright 2017 Google LLC
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# https://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
|
| 17 |
+
from __future__ import absolute_import
|
| 18 |
+
from __future__ import division
|
| 19 |
+
from __future__ import print_function
|
| 20 |
+
|
| 21 |
+
import ast
|
| 22 |
+
import traceback
|
| 23 |
+
import unittest
|
| 24 |
+
|
| 25 |
+
import pasta
|
| 26 |
+
from pasta.augment import import_utils
|
| 27 |
+
from pasta.base import ast_utils
|
| 28 |
+
from pasta.base import test_utils
|
| 29 |
+
from pasta.base import scope
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class SplitImportTest(test_utils.TestCase):
|
| 33 |
+
|
| 34 |
+
def test_split_normal_import(self):
|
| 35 |
+
src = 'import aaa, bbb, ccc\n'
|
| 36 |
+
t = ast.parse(src)
|
| 37 |
+
import_node = t.body[0]
|
| 38 |
+
sc = scope.analyze(t)
|
| 39 |
+
import_utils.split_import(sc, import_node, import_node.names[1])
|
| 40 |
+
|
| 41 |
+
self.assertEqual(2, len(t.body))
|
| 42 |
+
self.assertEqual(ast.Import, type(t.body[1]))
|
| 43 |
+
self.assertEqual([alias.name for alias in t.body[0].names], ['aaa', 'ccc'])
|
| 44 |
+
self.assertEqual([alias.name for alias in t.body[1].names], ['bbb'])
|
| 45 |
+
|
| 46 |
+
def test_split_from_import(self):
|
| 47 |
+
src = 'from aaa import bbb, ccc, ddd\n'
|
| 48 |
+
t = ast.parse(src)
|
| 49 |
+
import_node = t.body[0]
|
| 50 |
+
sc = scope.analyze(t)
|
| 51 |
+
import_utils.split_import(sc, import_node, import_node.names[1])
|
| 52 |
+
|
| 53 |
+
self.assertEqual(2, len(t.body))
|
| 54 |
+
self.assertEqual(ast.ImportFrom, type(t.body[1]))
|
| 55 |
+
self.assertEqual(t.body[0].module, 'aaa')
|
| 56 |
+
self.assertEqual(t.body[1].module, 'aaa')
|
| 57 |
+
self.assertEqual([alias.name for alias in t.body[0].names], ['bbb', 'ddd'])
|
| 58 |
+
|
| 59 |
+
def test_split_imports_with_alias(self):
|
| 60 |
+
src = 'import aaa as a, bbb as b, ccc as c\n'
|
| 61 |
+
t = ast.parse(src)
|
| 62 |
+
import_node = t.body[0]
|
| 63 |
+
sc = scope.analyze(t)
|
| 64 |
+
import_utils.split_import(sc, import_node, import_node.names[1])
|
| 65 |
+
|
| 66 |
+
self.assertEqual(2, len(t.body))
|
| 67 |
+
self.assertEqual([alias.name for alias in t.body[0].names], ['aaa', 'ccc'])
|
| 68 |
+
self.assertEqual([alias.name for alias in t.body[1].names], ['bbb'])
|
| 69 |
+
self.assertEqual(t.body[1].names[0].asname, 'b')
|
| 70 |
+
|
| 71 |
+
def test_split_imports_multiple(self):
|
| 72 |
+
src = 'import aaa, bbb, ccc\n'
|
| 73 |
+
t = ast.parse(src)
|
| 74 |
+
import_node = t.body[0]
|
| 75 |
+
alias_bbb = import_node.names[1]
|
| 76 |
+
alias_ccc = import_node.names[2]
|
| 77 |
+
sc = scope.analyze(t)
|
| 78 |
+
import_utils.split_import(sc, import_node, alias_bbb)
|
| 79 |
+
import_utils.split_import(sc, import_node, alias_ccc)
|
| 80 |
+
|
| 81 |
+
self.assertEqual(3, len(t.body))
|
| 82 |
+
self.assertEqual([alias.name for alias in t.body[0].names], ['aaa'])
|
| 83 |
+
self.assertEqual([alias.name for alias in t.body[1].names], ['ccc'])
|
| 84 |
+
self.assertEqual([alias.name for alias in t.body[2].names], ['bbb'])
|
| 85 |
+
|
| 86 |
+
def test_split_nested_imports(self):
|
| 87 |
+
test_cases = (
|
| 88 |
+
'def foo():\n {import_stmt}\n',
|
| 89 |
+
'class Foo(object):\n {import_stmt}\n',
|
| 90 |
+
'if foo:\n {import_stmt}\nelse:\n pass\n',
|
| 91 |
+
'if foo:\n pass\nelse:\n {import_stmt}\n',
|
| 92 |
+
'if foo:\n pass\nelif bar:\n {import_stmt}\n',
|
| 93 |
+
'try:\n {import_stmt}\nexcept:\n pass\n',
|
| 94 |
+
'try:\n pass\nexcept:\n {import_stmt}\n',
|
| 95 |
+
'try:\n pass\nfinally:\n {import_stmt}\n',
|
| 96 |
+
'for i in foo:\n {import_stmt}\n',
|
| 97 |
+
'for i in foo:\n pass\nelse:\n {import_stmt}\n',
|
| 98 |
+
'while foo:\n {import_stmt}\n',
|
| 99 |
+
)
|
| 100 |
+
|
| 101 |
+
for template in test_cases:
|
| 102 |
+
try:
|
| 103 |
+
src = template.format(import_stmt='import aaa, bbb, ccc')
|
| 104 |
+
t = ast.parse(src)
|
| 105 |
+
sc = scope.analyze(t)
|
| 106 |
+
import_node = ast_utils.find_nodes_by_type(t, ast.Import)[0]
|
| 107 |
+
import_utils.split_import(sc, import_node, import_node.names[1])
|
| 108 |
+
|
| 109 |
+
split_import_nodes = ast_utils.find_nodes_by_type(t, ast.Import)
|
| 110 |
+
self.assertEqual(1, len(t.body))
|
| 111 |
+
self.assertEqual(2, len(split_import_nodes))
|
| 112 |
+
self.assertEqual([alias.name for alias in split_import_nodes[0].names],
|
| 113 |
+
['aaa', 'ccc'])
|
| 114 |
+
self.assertEqual([alias.name for alias in split_import_nodes[1].names],
|
| 115 |
+
['bbb'])
|
| 116 |
+
except:
|
| 117 |
+
self.fail('Failed while executing case:\n%s\nCaused by:\n%s' %
|
| 118 |
+
(src, traceback.format_exc()))
|
| 119 |
+
|
| 120 |
+
class GetUnusedImportsTest(test_utils.TestCase):
|
| 121 |
+
|
| 122 |
+
def test_normal_imports(self):
|
| 123 |
+
src = """\
|
| 124 |
+
import a
|
| 125 |
+
import b
|
| 126 |
+
a.foo()
|
| 127 |
+
"""
|
| 128 |
+
tree = ast.parse(src)
|
| 129 |
+
self.assertItemsEqual(import_utils.get_unused_import_aliases(tree),
|
| 130 |
+
[tree.body[1].names[0]])
|
| 131 |
+
|
| 132 |
+
def test_import_from(self):
|
| 133 |
+
src = """\
|
| 134 |
+
from my_module import a
|
| 135 |
+
import b
|
| 136 |
+
from my_module import c
|
| 137 |
+
b.foo()
|
| 138 |
+
c.bar()
|
| 139 |
+
"""
|
| 140 |
+
tree = ast.parse(src)
|
| 141 |
+
self.assertItemsEqual(import_utils.get_unused_import_aliases(tree),
|
| 142 |
+
[tree.body[0].names[0]])
|
| 143 |
+
|
| 144 |
+
def test_import_from_alias(self):
|
| 145 |
+
src = """\
|
| 146 |
+
from my_module import a, b
|
| 147 |
+
b.foo()
|
| 148 |
+
"""
|
| 149 |
+
tree = ast.parse(src)
|
| 150 |
+
self.assertItemsEqual(import_utils.get_unused_import_aliases(tree),
|
| 151 |
+
[tree.body[0].names[0]])
|
| 152 |
+
|
| 153 |
+
def test_import_asname(self):
|
| 154 |
+
src = """\
|
| 155 |
+
from my_module import a as a_mod, b as unused_b_mod
|
| 156 |
+
import c as c_mod, d as unused_d_mod
|
| 157 |
+
a_mod.foo()
|
| 158 |
+
c_mod.foo()
|
| 159 |
+
"""
|
| 160 |
+
tree = ast.parse(src)
|
| 161 |
+
self.assertItemsEqual(import_utils.get_unused_import_aliases(tree),
|
| 162 |
+
[tree.body[0].names[1],
|
| 163 |
+
tree.body[1].names[1]])
|
| 164 |
+
|
| 165 |
+
def test_dynamic_import(self):
|
| 166 |
+
# For now we just don't want to error out on these, longer
|
| 167 |
+
# term we want to do the right thing (see
|
| 168 |
+
# https://github.com/google/pasta/issues/32)
|
| 169 |
+
src = """\
|
| 170 |
+
def foo():
|
| 171 |
+
import bar
|
| 172 |
+
"""
|
| 173 |
+
tree = ast.parse(src)
|
| 174 |
+
self.assertItemsEqual(import_utils.get_unused_import_aliases(tree),
|
| 175 |
+
[])
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
class RemoveImportTest(test_utils.TestCase):
|
| 180 |
+
# Note that we don't test any 'asname' examples but as far as remove_import_alias_node
|
| 181 |
+
# is concerned its not a different case because its still just an alias type
|
| 182 |
+
# and we don't care about the internals of the alias we're trying to remove.
|
| 183 |
+
def test_remove_just_alias(self):
|
| 184 |
+
src = "import a, b"
|
| 185 |
+
tree = ast.parse(src)
|
| 186 |
+
sc = scope.analyze(tree)
|
| 187 |
+
|
| 188 |
+
unused_b_node = tree.body[0].names[1]
|
| 189 |
+
|
| 190 |
+
import_utils.remove_import_alias_node(sc, unused_b_node)
|
| 191 |
+
|
| 192 |
+
self.assertEqual(len(tree.body), 1)
|
| 193 |
+
self.assertEqual(type(tree.body[0]), ast.Import)
|
| 194 |
+
self.assertEqual(len(tree.body[0].names), 1)
|
| 195 |
+
self.assertEqual(tree.body[0].names[0].name, 'a')
|
| 196 |
+
|
| 197 |
+
def test_remove_just_alias_import_from(self):
|
| 198 |
+
src = "from m import a, b"
|
| 199 |
+
tree = ast.parse(src)
|
| 200 |
+
sc = scope.analyze(tree)
|
| 201 |
+
|
| 202 |
+
unused_b_node = tree.body[0].names[1]
|
| 203 |
+
|
| 204 |
+
import_utils.remove_import_alias_node(sc, unused_b_node)
|
| 205 |
+
|
| 206 |
+
self.assertEqual(len(tree.body), 1)
|
| 207 |
+
self.assertEqual(type(tree.body[0]), ast.ImportFrom)
|
| 208 |
+
self.assertEqual(len(tree.body[0].names), 1)
|
| 209 |
+
self.assertEqual(tree.body[0].names[0].name, 'a')
|
| 210 |
+
|
| 211 |
+
def test_remove_full_import(self):
|
| 212 |
+
src = "import a"
|
| 213 |
+
tree = ast.parse(src)
|
| 214 |
+
sc = scope.analyze(tree)
|
| 215 |
+
|
| 216 |
+
a_node = tree.body[0].names[0]
|
| 217 |
+
|
| 218 |
+
import_utils.remove_import_alias_node(sc, a_node)
|
| 219 |
+
|
| 220 |
+
self.assertEqual(len(tree.body), 0)
|
| 221 |
+
|
| 222 |
+
def test_remove_full_importfrom(self):
|
| 223 |
+
src = "from m import a"
|
| 224 |
+
tree = ast.parse(src)
|
| 225 |
+
sc = scope.analyze(tree)
|
| 226 |
+
|
| 227 |
+
a_node = tree.body[0].names[0]
|
| 228 |
+
|
| 229 |
+
import_utils.remove_import_alias_node(sc, a_node)
|
| 230 |
+
|
| 231 |
+
self.assertEqual(len(tree.body), 0)
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
class AddImportTest(test_utils.TestCase):
|
| 235 |
+
|
| 236 |
+
def test_add_normal_import(self):
|
| 237 |
+
tree = ast.parse('')
|
| 238 |
+
self.assertEqual('a.b.c',
|
| 239 |
+
import_utils.add_import(tree, 'a.b.c', from_import=False))
|
| 240 |
+
self.assertEqual('import a.b.c\n', pasta.dump(tree))
|
| 241 |
+
|
| 242 |
+
def test_add_normal_import_with_asname(self):
|
| 243 |
+
tree = ast.parse('')
|
| 244 |
+
self.assertEqual(
|
| 245 |
+
'd',
|
| 246 |
+
import_utils.add_import(tree, 'a.b.c', asname='d', from_import=False)
|
| 247 |
+
)
|
| 248 |
+
self.assertEqual('import a.b.c as d\n', pasta.dump(tree))
|
| 249 |
+
|
| 250 |
+
def test_add_from_import(self):
|
| 251 |
+
tree = ast.parse('')
|
| 252 |
+
self.assertEqual('c',
|
| 253 |
+
import_utils.add_import(tree, 'a.b.c', from_import=True))
|
| 254 |
+
self.assertEqual('from a.b import c\n', pasta.dump(tree))
|
| 255 |
+
|
| 256 |
+
def test_add_from_import_with_asname(self):
|
| 257 |
+
tree = ast.parse('')
|
| 258 |
+
self.assertEqual(
|
| 259 |
+
'd',
|
| 260 |
+
import_utils.add_import(tree, 'a.b.c', asname='d', from_import=True)
|
| 261 |
+
)
|
| 262 |
+
self.assertEqual('from a.b import c as d\n', pasta.dump(tree))
|
| 263 |
+
|
| 264 |
+
def test_add_single_name_from_import(self):
|
| 265 |
+
tree = ast.parse('')
|
| 266 |
+
self.assertEqual('foo',
|
| 267 |
+
import_utils.add_import(tree, 'foo', from_import=True))
|
| 268 |
+
self.assertEqual('import foo\n', pasta.dump(tree))
|
| 269 |
+
|
| 270 |
+
def test_add_single_name_from_import_with_asname(self):
|
| 271 |
+
tree = ast.parse('')
|
| 272 |
+
self.assertEqual(
|
| 273 |
+
'bar',
|
| 274 |
+
import_utils.add_import(tree, 'foo', asname='bar', from_import=True)
|
| 275 |
+
)
|
| 276 |
+
self.assertEqual('import foo as bar\n', pasta.dump(tree))
|
| 277 |
+
|
| 278 |
+
def test_add_existing_import(self):
|
| 279 |
+
tree = ast.parse('from a.b import c')
|
| 280 |
+
self.assertEqual('c', import_utils.add_import(tree, 'a.b.c'))
|
| 281 |
+
self.assertEqual('from a.b import c\n', pasta.dump(tree))
|
| 282 |
+
|
| 283 |
+
def test_add_existing_import_aliased(self):
|
| 284 |
+
tree = ast.parse('from a.b import c as d')
|
| 285 |
+
self.assertEqual('d', import_utils.add_import(tree, 'a.b.c'))
|
| 286 |
+
self.assertEqual('from a.b import c as d\n', pasta.dump(tree))
|
| 287 |
+
|
| 288 |
+
def test_add_existing_import_aliased_with_asname(self):
|
| 289 |
+
tree = ast.parse('from a.b import c as d')
|
| 290 |
+
self.assertEqual('d', import_utils.add_import(tree, 'a.b.c', asname='e'))
|
| 291 |
+
self.assertEqual('from a.b import c as d\n', pasta.dump(tree))
|
| 292 |
+
|
| 293 |
+
def test_add_existing_import_normal_import(self):
|
| 294 |
+
tree = ast.parse('import a.b.c')
|
| 295 |
+
self.assertEqual('a.b',
|
| 296 |
+
import_utils.add_import(tree, 'a.b', from_import=False))
|
| 297 |
+
self.assertEqual('import a.b.c\n', pasta.dump(tree))
|
| 298 |
+
|
| 299 |
+
def test_add_existing_import_normal_import_aliased(self):
|
| 300 |
+
tree = ast.parse('import a.b.c as d')
|
| 301 |
+
self.assertEqual('a.b',
|
| 302 |
+
import_utils.add_import(tree, 'a.b', from_import=False))
|
| 303 |
+
self.assertEqual('d',
|
| 304 |
+
import_utils.add_import(tree, 'a.b.c', from_import=False))
|
| 305 |
+
self.assertEqual('import a.b\nimport a.b.c as d\n', pasta.dump(tree))
|
| 306 |
+
|
| 307 |
+
def test_add_import_with_conflict(self):
|
| 308 |
+
tree = ast.parse('def c(): pass\n')
|
| 309 |
+
self.assertEqual('c_1',
|
| 310 |
+
import_utils.add_import(tree, 'a.b.c', from_import=True))
|
| 311 |
+
self.assertEqual(
|
| 312 |
+
'from a.b import c as c_1\ndef c():\n pass\n', pasta.dump(tree))
|
| 313 |
+
|
| 314 |
+
def test_add_import_with_asname_with_conflict(self):
|
| 315 |
+
tree = ast.parse('def c(): pass\n')
|
| 316 |
+
self.assertEqual('c_1',
|
| 317 |
+
import_utils.add_import(tree, 'a.b', asname='c', from_import=True))
|
| 318 |
+
self.assertEqual(
|
| 319 |
+
'from a import b as c_1\ndef c():\n pass\n', pasta.dump(tree))
|
| 320 |
+
|
| 321 |
+
def test_merge_from_import(self):
|
| 322 |
+
tree = ast.parse('from a.b import c')
|
| 323 |
+
|
| 324 |
+
# x is explicitly not merged
|
| 325 |
+
self.assertEqual('x', import_utils.add_import(tree, 'a.b.x',
|
| 326 |
+
merge_from_imports=False))
|
| 327 |
+
self.assertEqual('from a.b import x\nfrom a.b import c\n',
|
| 328 |
+
pasta.dump(tree))
|
| 329 |
+
|
| 330 |
+
# y is allowed to be merged and is grouped into the first matching import
|
| 331 |
+
self.assertEqual('y', import_utils.add_import(tree, 'a.b.y',
|
| 332 |
+
merge_from_imports=True))
|
| 333 |
+
self.assertEqual('from a.b import x, y\nfrom a.b import c\n',
|
| 334 |
+
pasta.dump(tree))
|
| 335 |
+
|
| 336 |
+
def test_add_import_after_docstring(self):
|
| 337 |
+
tree = ast.parse('\'Docstring.\'')
|
| 338 |
+
self.assertEqual('a', import_utils.add_import(tree, 'a'))
|
| 339 |
+
self.assertEqual('\'Docstring.\'\nimport a\n', pasta.dump(tree))
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
class RemoveDuplicatesTest(test_utils.TestCase):
|
| 343 |
+
def test_remove_duplicates(self):
|
| 344 |
+
src = """
|
| 345 |
+
import a
|
| 346 |
+
import b
|
| 347 |
+
import c
|
| 348 |
+
import b
|
| 349 |
+
import d
|
| 350 |
+
"""
|
| 351 |
+
tree = ast.parse(src)
|
| 352 |
+
self.assertTrue(import_utils.remove_duplicates(tree))
|
| 353 |
+
|
| 354 |
+
self.assertEqual(len(tree.body), 4)
|
| 355 |
+
self.assertEqual(tree.body[0].names[0].name, 'a')
|
| 356 |
+
self.assertEqual(tree.body[1].names[0].name, 'b')
|
| 357 |
+
self.assertEqual(tree.body[2].names[0].name, 'c')
|
| 358 |
+
self.assertEqual(tree.body[3].names[0].name, 'd')
|
| 359 |
+
|
| 360 |
+
def test_remove_duplicates_multiple(self):
|
| 361 |
+
src = """
|
| 362 |
+
import a, b
|
| 363 |
+
import b, c
|
| 364 |
+
import d, a, e, f
|
| 365 |
+
"""
|
| 366 |
+
tree = ast.parse(src)
|
| 367 |
+
self.assertTrue(import_utils.remove_duplicates(tree))
|
| 368 |
+
|
| 369 |
+
self.assertEqual(len(tree.body), 3)
|
| 370 |
+
self.assertEqual(len(tree.body[0].names), 2)
|
| 371 |
+
self.assertEqual(tree.body[0].names[0].name, 'a')
|
| 372 |
+
self.assertEqual(tree.body[0].names[1].name, 'b')
|
| 373 |
+
self.assertEqual(len(tree.body[1].names), 1)
|
| 374 |
+
self.assertEqual(tree.body[1].names[0].name, 'c')
|
| 375 |
+
self.assertEqual(len(tree.body[2].names), 3)
|
| 376 |
+
self.assertEqual(tree.body[2].names[0].name, 'd')
|
| 377 |
+
self.assertEqual(tree.body[2].names[1].name, 'e')
|
| 378 |
+
self.assertEqual(tree.body[2].names[2].name, 'f')
|
| 379 |
+
|
| 380 |
+
def test_remove_duplicates_empty_node(self):
|
| 381 |
+
src = """
|
| 382 |
+
import a, b, c
|
| 383 |
+
import b, c
|
| 384 |
+
"""
|
| 385 |
+
tree = ast.parse(src)
|
| 386 |
+
self.assertTrue(import_utils.remove_duplicates(tree))
|
| 387 |
+
|
| 388 |
+
self.assertEqual(len(tree.body), 1)
|
| 389 |
+
self.assertEqual(len(tree.body[0].names), 3)
|
| 390 |
+
self.assertEqual(tree.body[0].names[0].name, 'a')
|
| 391 |
+
self.assertEqual(tree.body[0].names[1].name, 'b')
|
| 392 |
+
self.assertEqual(tree.body[0].names[2].name, 'c')
|
| 393 |
+
|
| 394 |
+
def test_remove_duplicates_normal_and_from(self):
|
| 395 |
+
src = """
|
| 396 |
+
import a.b
|
| 397 |
+
from a import b
|
| 398 |
+
"""
|
| 399 |
+
tree = ast.parse(src)
|
| 400 |
+
self.assertFalse(import_utils.remove_duplicates(tree))
|
| 401 |
+
self.assertEqual(len(tree.body), 2)
|
| 402 |
+
|
| 403 |
+
def test_remove_duplicates_aliases(self):
|
| 404 |
+
src = """
|
| 405 |
+
import a
|
| 406 |
+
import a as ax
|
| 407 |
+
import a as ax2
|
| 408 |
+
import a as ax
|
| 409 |
+
"""
|
| 410 |
+
tree = ast.parse(src)
|
| 411 |
+
self.assertTrue(import_utils.remove_duplicates(tree))
|
| 412 |
+
self.assertEqual(len(tree.body), 3)
|
| 413 |
+
self.assertEqual(tree.body[0].names[0].asname, None)
|
| 414 |
+
self.assertEqual(tree.body[1].names[0].asname, 'ax')
|
| 415 |
+
self.assertEqual(tree.body[2].names[0].asname, 'ax2')
|
| 416 |
+
|
| 417 |
+
|
| 418 |
+
def suite():
|
| 419 |
+
result = unittest.TestSuite()
|
| 420 |
+
result.addTests(unittest.makeSuite(SplitImportTest))
|
| 421 |
+
result.addTests(unittest.makeSuite(GetUnusedImportsTest))
|
| 422 |
+
result.addTests(unittest.makeSuite(RemoveImportTest))
|
| 423 |
+
result.addTests(unittest.makeSuite(AddImportTest))
|
| 424 |
+
result.addTests(unittest.makeSuite(RemoveDuplicatesTest))
|
| 425 |
+
return result
|
| 426 |
+
|
| 427 |
+
if __name__ == '__main__':
|
| 428 |
+
unittest.main()
|
lib/python3.10/site-packages/pasta/augment/inline.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
"""Inline constants in a python module."""
|
| 3 |
+
# Copyright 2017 Google LLC
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# https://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
|
| 17 |
+
from __future__ import absolute_import
|
| 18 |
+
from __future__ import division
|
| 19 |
+
from __future__ import print_function
|
| 20 |
+
|
| 21 |
+
import ast
|
| 22 |
+
import copy
|
| 23 |
+
|
| 24 |
+
from pasta.base import ast_utils
|
| 25 |
+
from pasta.base import scope
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class InlineError(Exception):
|
| 29 |
+
pass
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def inline_name(t, name):
|
| 33 |
+
"""Inline a constant name into a module."""
|
| 34 |
+
sc = scope.analyze(t)
|
| 35 |
+
name_node = sc.names[name]
|
| 36 |
+
|
| 37 |
+
# The name must be a Name node (not a FunctionDef, etc.)
|
| 38 |
+
if not isinstance(name_node.definition, ast.Name):
|
| 39 |
+
raise InlineError('%r is not a constant; it has type %r' % (
|
| 40 |
+
name, type(name_node.definition)))
|
| 41 |
+
|
| 42 |
+
assign_node = sc.parent(name_node.definition)
|
| 43 |
+
if not isinstance(assign_node, ast.Assign):
|
| 44 |
+
raise InlineError('%r is not declared in an assignment' % name)
|
| 45 |
+
|
| 46 |
+
value = assign_node.value
|
| 47 |
+
if not isinstance(sc.parent(assign_node), ast.Module):
|
| 48 |
+
raise InlineError('%r is not a top-level name' % name)
|
| 49 |
+
|
| 50 |
+
# If the name is written anywhere else in this module, it is not constant
|
| 51 |
+
for ref in name_node.reads:
|
| 52 |
+
if isinstance(getattr(ref, 'ctx', None), ast.Store):
|
| 53 |
+
raise InlineError('%r is not a constant' % name)
|
| 54 |
+
|
| 55 |
+
# Replace all reads of the name with a copy of its value
|
| 56 |
+
for ref in name_node.reads:
|
| 57 |
+
ast_utils.replace_child(sc.parent(ref), ref, copy.deepcopy(value))
|
| 58 |
+
|
| 59 |
+
# Remove the assignment to this name
|
| 60 |
+
if len(assign_node.targets) == 1:
|
| 61 |
+
ast_utils.remove_child(sc.parent(assign_node), assign_node)
|
| 62 |
+
else:
|
| 63 |
+
tgt_list = [tgt for tgt in assign_node.targets
|
| 64 |
+
if not (isinstance(tgt, ast.Name) and tgt.id == name)]
|
| 65 |
+
assign_node.targets = tgt_list
|
lib/python3.10/site-packages/pasta/augment/inline_test.py
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
"""Tests for augment.inline."""
|
| 3 |
+
# Copyright 2017 Google LLC
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# https://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
|
| 17 |
+
from __future__ import absolute_import
|
| 18 |
+
from __future__ import division
|
| 19 |
+
from __future__ import print_function
|
| 20 |
+
|
| 21 |
+
import ast
|
| 22 |
+
import textwrap
|
| 23 |
+
import unittest
|
| 24 |
+
|
| 25 |
+
from pasta.augment import inline
|
| 26 |
+
from pasta.base import test_utils
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class InlineTest(test_utils.TestCase):
|
| 30 |
+
|
| 31 |
+
def test_inline_simple(self):
|
| 32 |
+
src = 'x = 1\na = x\n'
|
| 33 |
+
t = ast.parse(src)
|
| 34 |
+
inline.inline_name(t, 'x')
|
| 35 |
+
self.checkAstsEqual(t, ast.parse('a = 1\n'))
|
| 36 |
+
|
| 37 |
+
def test_inline_multiple_targets(self):
|
| 38 |
+
src = 'x = y = z = 1\na = x + y\n'
|
| 39 |
+
t = ast.parse(src)
|
| 40 |
+
inline.inline_name(t, 'y')
|
| 41 |
+
self.checkAstsEqual(t, ast.parse('x = z = 1\na = x + 1\n'))
|
| 42 |
+
|
| 43 |
+
def test_inline_multiple_reads(self):
|
| 44 |
+
src = textwrap.dedent('''\
|
| 45 |
+
CONSTANT = "foo"
|
| 46 |
+
def a(b=CONSTANT):
|
| 47 |
+
return b == CONSTANT
|
| 48 |
+
''')
|
| 49 |
+
expected = textwrap.dedent('''\
|
| 50 |
+
def a(b="foo"):
|
| 51 |
+
return b == "foo"
|
| 52 |
+
''')
|
| 53 |
+
t = ast.parse(src)
|
| 54 |
+
inline.inline_name(t, 'CONSTANT')
|
| 55 |
+
self.checkAstsEqual(t, ast.parse(expected))
|
| 56 |
+
|
| 57 |
+
def test_inline_non_constant_fails(self):
|
| 58 |
+
src = textwrap.dedent('''\
|
| 59 |
+
NOT_A_CONSTANT = "foo"
|
| 60 |
+
NOT_A_CONSTANT += "bar"
|
| 61 |
+
''')
|
| 62 |
+
t = ast.parse(src)
|
| 63 |
+
with self.assertRaisesRegexp(inline.InlineError,
|
| 64 |
+
'\'NOT_A_CONSTANT\' is not a constant'):
|
| 65 |
+
inline.inline_name(t, 'NOT_A_CONSTANT')
|
| 66 |
+
|
| 67 |
+
def test_inline_function_fails(self):
|
| 68 |
+
src = 'def func(): pass\nfunc()\n'
|
| 69 |
+
t = ast.parse(src)
|
| 70 |
+
|
| 71 |
+
with self.assertRaisesRegexp(
|
| 72 |
+
inline.InlineError,
|
| 73 |
+
'\'func\' is not a constant; it has type %r' % ast.FunctionDef):
|
| 74 |
+
inline.inline_name(t, 'func')
|
| 75 |
+
|
| 76 |
+
def test_inline_conditional_fails(self):
|
| 77 |
+
src = 'if define:\n x = 1\na = x\n'
|
| 78 |
+
t = ast.parse(src)
|
| 79 |
+
with self.assertRaisesRegexp(inline.InlineError,
|
| 80 |
+
'\'x\' is not a top-level name'):
|
| 81 |
+
inline.inline_name(t, 'x')
|
| 82 |
+
|
| 83 |
+
def test_inline_non_assign_fails(self):
|
| 84 |
+
src = 'CONSTANT1, CONSTANT2 = values'
|
| 85 |
+
t = ast.parse(src)
|
| 86 |
+
with self.assertRaisesRegexp(
|
| 87 |
+
inline.InlineError, '\'CONSTANT1\' is not declared in an assignment'):
|
| 88 |
+
inline.inline_name(t, 'CONSTANT1')
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def suite():
|
| 92 |
+
result = unittest.TestSuite()
|
| 93 |
+
result.addTests(unittest.makeSuite(InlineTest))
|
| 94 |
+
return result
|
| 95 |
+
|
| 96 |
+
if __name__ == '__main__':
|
| 97 |
+
unittest.main()
|
lib/python3.10/site-packages/pasta/augment/rename.py
ADDED
|
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
"""Rename names in a python module."""
|
| 3 |
+
# Copyright 2017 Google LLC
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# https://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
|
| 17 |
+
from __future__ import absolute_import
|
| 18 |
+
from __future__ import division
|
| 19 |
+
from __future__ import print_function
|
| 20 |
+
|
| 21 |
+
import ast
|
| 22 |
+
import six
|
| 23 |
+
|
| 24 |
+
from pasta.augment import import_utils
|
| 25 |
+
from pasta.base import ast_utils
|
| 26 |
+
from pasta.base import scope
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def rename_external(t, old_name, new_name):
|
| 30 |
+
"""Rename an imported name in a module.
|
| 31 |
+
|
| 32 |
+
This will rewrite all import statements in `tree` that reference the old
|
| 33 |
+
module as well as any names in `tree` which reference the imported name. This
|
| 34 |
+
may introduce new import statements, but only if necessary.
|
| 35 |
+
|
| 36 |
+
For example, to move and rename the module `foo.bar.utils` to `foo.bar_utils`:
|
| 37 |
+
> rename_external(tree, 'foo.bar.utils', 'foo.bar_utils')
|
| 38 |
+
|
| 39 |
+
- import foo.bar.utils
|
| 40 |
+
+ import foo.bar_utils
|
| 41 |
+
|
| 42 |
+
- from foo.bar import utils
|
| 43 |
+
+ from foo import bar_utils
|
| 44 |
+
|
| 45 |
+
- from foo.bar import logic, utils
|
| 46 |
+
+ from foo.bar import logic
|
| 47 |
+
+ from foo import bar_utils
|
| 48 |
+
|
| 49 |
+
Arguments:
|
| 50 |
+
t: (ast.Module) Module syntax tree to perform the rename in. This will be
|
| 51 |
+
updated as a result of this function call with all affected nodes changed
|
| 52 |
+
and potentially new Import/ImportFrom nodes added.
|
| 53 |
+
old_name: (string) Fully-qualified path of the name to replace.
|
| 54 |
+
new_name: (string) Fully-qualified path of the name to update to.
|
| 55 |
+
|
| 56 |
+
Returns:
|
| 57 |
+
True if any changes were made, False otherwise.
|
| 58 |
+
"""
|
| 59 |
+
sc = scope.analyze(t)
|
| 60 |
+
|
| 61 |
+
if old_name not in sc.external_references:
|
| 62 |
+
return False
|
| 63 |
+
|
| 64 |
+
has_changed = False
|
| 65 |
+
renames = {}
|
| 66 |
+
already_changed = []
|
| 67 |
+
for ref in sc.external_references[old_name]:
|
| 68 |
+
if isinstance(ref.node, ast.alias):
|
| 69 |
+
parent = sc.parent(ref.node)
|
| 70 |
+
# An alias may be the most specific reference to an imported name, but it
|
| 71 |
+
# could if it is a child of an ImportFrom, the ImportFrom node's module
|
| 72 |
+
# may also need to be updated.
|
| 73 |
+
if isinstance(parent, ast.ImportFrom) and parent not in already_changed:
|
| 74 |
+
assert _rename_name_in_importfrom(sc, parent, old_name, new_name)
|
| 75 |
+
renames[old_name.rsplit('.', 1)[-1]] = new_name.rsplit('.', 1)[-1]
|
| 76 |
+
already_changed.append(parent)
|
| 77 |
+
else:
|
| 78 |
+
ref.node.name = new_name + ref.node.name[len(old_name):]
|
| 79 |
+
if not ref.node.asname:
|
| 80 |
+
renames[old_name] = new_name
|
| 81 |
+
has_changed = True
|
| 82 |
+
elif isinstance(ref.node, ast.ImportFrom):
|
| 83 |
+
if ref.node not in already_changed:
|
| 84 |
+
assert _rename_name_in_importfrom(sc, ref.node, old_name, new_name)
|
| 85 |
+
renames[old_name.rsplit('.', 1)[-1]] = new_name.rsplit('.', 1)[-1]
|
| 86 |
+
already_changed.append(ref.node)
|
| 87 |
+
has_changed = True
|
| 88 |
+
|
| 89 |
+
for rename_old, rename_new in six.iteritems(renames):
|
| 90 |
+
_rename_reads(sc, t, rename_old, rename_new)
|
| 91 |
+
return has_changed
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def _rename_name_in_importfrom(sc, node, old_name, new_name):
|
| 95 |
+
if old_name == new_name:
|
| 96 |
+
return False
|
| 97 |
+
|
| 98 |
+
module_parts = node.module.split('.')
|
| 99 |
+
old_parts = old_name.split('.')
|
| 100 |
+
new_parts = new_name.split('.')
|
| 101 |
+
|
| 102 |
+
# If just the module is changing, rename it
|
| 103 |
+
if module_parts[:len(old_parts)] == old_parts:
|
| 104 |
+
node.module = '.'.join(new_parts + module_parts[len(old_parts):])
|
| 105 |
+
return True
|
| 106 |
+
|
| 107 |
+
# Find the alias node to be changed
|
| 108 |
+
for alias_to_change in node.names:
|
| 109 |
+
if alias_to_change.name == old_parts[-1]:
|
| 110 |
+
break
|
| 111 |
+
else:
|
| 112 |
+
return False
|
| 113 |
+
|
| 114 |
+
alias_to_change.name = new_parts[-1]
|
| 115 |
+
|
| 116 |
+
# Split the import if the package has changed
|
| 117 |
+
if module_parts != new_parts[:-1]:
|
| 118 |
+
if len(node.names) > 1:
|
| 119 |
+
new_import = import_utils.split_import(sc, node, alias_to_change)
|
| 120 |
+
new_import.module = '.'.join(new_parts[:-1])
|
| 121 |
+
else:
|
| 122 |
+
node.module = '.'.join(new_parts[:-1])
|
| 123 |
+
|
| 124 |
+
return True
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
def _rename_reads(sc, t, old_name, new_name):
|
| 128 |
+
"""Updates all locations in the module where the given name is read.
|
| 129 |
+
|
| 130 |
+
Arguments:
|
| 131 |
+
sc: (scope.Scope) Scope to work in. This should be the scope of `t`.
|
| 132 |
+
t: (ast.AST) The AST to perform updates in.
|
| 133 |
+
old_name: (string) Dotted name to update.
|
| 134 |
+
new_name: (string) Dotted name to replace it with.
|
| 135 |
+
|
| 136 |
+
Returns:
|
| 137 |
+
True if any changes were made, False otherwise.
|
| 138 |
+
"""
|
| 139 |
+
name_parts = old_name.split('.')
|
| 140 |
+
try:
|
| 141 |
+
name = sc.names[name_parts[0]]
|
| 142 |
+
for part in name_parts[1:]:
|
| 143 |
+
name = name.attrs[part]
|
| 144 |
+
except KeyError:
|
| 145 |
+
return False
|
| 146 |
+
|
| 147 |
+
has_changed = False
|
| 148 |
+
for ref_node in name.reads:
|
| 149 |
+
if isinstance(ref_node, (ast.Name, ast.Attribute)):
|
| 150 |
+
ast_utils.replace_child(sc.parent(ref_node), ref_node,
|
| 151 |
+
ast.parse(new_name).body[0].value)
|
| 152 |
+
has_changed = True
|
| 153 |
+
|
| 154 |
+
return has_changed
|
lib/python3.10/site-packages/pasta/augment/rename_test.py
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
"""Tests for augment.rename."""
|
| 3 |
+
# Copyright 2017 Google LLC
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# https://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
|
| 17 |
+
from __future__ import absolute_import
|
| 18 |
+
from __future__ import division
|
| 19 |
+
from __future__ import print_function
|
| 20 |
+
|
| 21 |
+
import ast
|
| 22 |
+
import unittest
|
| 23 |
+
|
| 24 |
+
from pasta.augment import rename
|
| 25 |
+
from pasta.base import scope
|
| 26 |
+
from pasta.base import test_utils
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class RenameTest(test_utils.TestCase):
|
| 30 |
+
|
| 31 |
+
def test_rename_external_in_import(self):
|
| 32 |
+
src = 'import aaa.bbb.ccc\naaa.bbb.ccc.foo()'
|
| 33 |
+
t = ast.parse(src)
|
| 34 |
+
self.assertTrue(rename.rename_external(t, 'aaa.bbb', 'xxx.yyy'))
|
| 35 |
+
self.checkAstsEqual(t, ast.parse('import xxx.yyy.ccc\nxxx.yyy.ccc.foo()'))
|
| 36 |
+
|
| 37 |
+
t = ast.parse(src)
|
| 38 |
+
self.assertTrue(rename.rename_external(t, 'aaa.bbb.ccc', 'xxx.yyy'))
|
| 39 |
+
self.checkAstsEqual(t, ast.parse('import xxx.yyy\nxxx.yyy.foo()'))
|
| 40 |
+
|
| 41 |
+
t = ast.parse(src)
|
| 42 |
+
self.assertFalse(rename.rename_external(t, 'bbb', 'xxx.yyy'))
|
| 43 |
+
self.checkAstsEqual(t, ast.parse(src))
|
| 44 |
+
|
| 45 |
+
def test_rename_external_in_import_with_asname(self):
|
| 46 |
+
src = 'import aaa.bbb.ccc as ddd\nddd.foo()'
|
| 47 |
+
t = ast.parse(src)
|
| 48 |
+
self.assertTrue(rename.rename_external(t, 'aaa.bbb', 'xxx.yyy'))
|
| 49 |
+
self.checkAstsEqual(t, ast.parse('import xxx.yyy.ccc as ddd\nddd.foo()'))
|
| 50 |
+
|
| 51 |
+
def test_rename_external_in_import_multiple_aliases(self):
|
| 52 |
+
src = 'import aaa, aaa.bbb, aaa.bbb.ccc'
|
| 53 |
+
t = ast.parse(src)
|
| 54 |
+
self.assertTrue(rename.rename_external(t, 'aaa.bbb', 'xxx.yyy'))
|
| 55 |
+
self.checkAstsEqual(t, ast.parse('import aaa, xxx.yyy, xxx.yyy.ccc'))
|
| 56 |
+
|
| 57 |
+
def test_rename_external_in_importfrom(self):
|
| 58 |
+
src = 'from aaa.bbb.ccc import ddd\nddd.foo()'
|
| 59 |
+
t = ast.parse(src)
|
| 60 |
+
self.assertTrue(rename.rename_external(t, 'aaa.bbb', 'xxx.yyy'))
|
| 61 |
+
self.checkAstsEqual(t, ast.parse('from xxx.yyy.ccc import ddd\nddd.foo()'))
|
| 62 |
+
|
| 63 |
+
t = ast.parse(src)
|
| 64 |
+
self.assertTrue(rename.rename_external(t, 'aaa.bbb.ccc', 'xxx.yyy'))
|
| 65 |
+
self.checkAstsEqual(t, ast.parse('from xxx.yyy import ddd\nddd.foo()'))
|
| 66 |
+
|
| 67 |
+
t = ast.parse(src)
|
| 68 |
+
self.assertFalse(rename.rename_external(t, 'bbb', 'xxx.yyy'))
|
| 69 |
+
self.checkAstsEqual(t, ast.parse(src))
|
| 70 |
+
|
| 71 |
+
def test_rename_external_in_importfrom_alias(self):
|
| 72 |
+
src = 'from aaa.bbb import ccc\nccc.foo()'
|
| 73 |
+
t = ast.parse(src)
|
| 74 |
+
self.assertTrue(rename.rename_external(t, 'aaa.bbb.ccc', 'xxx.yyy'))
|
| 75 |
+
self.checkAstsEqual(t, ast.parse('from xxx import yyy\nyyy.foo()'))
|
| 76 |
+
|
| 77 |
+
def test_rename_external_in_importfrom_alias_with_asname(self):
|
| 78 |
+
src = 'from aaa.bbb import ccc as abc\nabc.foo()'
|
| 79 |
+
t = ast.parse(src)
|
| 80 |
+
self.assertTrue(rename.rename_external(t, 'aaa.bbb.ccc', 'xxx.yyy'))
|
| 81 |
+
self.checkAstsEqual(t, ast.parse('from xxx import yyy as abc\nabc.foo()'))
|
| 82 |
+
|
| 83 |
+
def test_rename_reads_name(self):
|
| 84 |
+
src = 'aaa.bbb()'
|
| 85 |
+
t = ast.parse(src)
|
| 86 |
+
sc = scope.analyze(t)
|
| 87 |
+
self.assertTrue(rename._rename_reads(sc, t, 'aaa', 'xxx'))
|
| 88 |
+
self.checkAstsEqual(t, ast.parse('xxx.bbb()'))
|
| 89 |
+
|
| 90 |
+
def test_rename_reads_name_as_attribute(self):
|
| 91 |
+
src = 'aaa.bbb()'
|
| 92 |
+
t = ast.parse(src)
|
| 93 |
+
sc = scope.analyze(t)
|
| 94 |
+
rename._rename_reads(sc, t, 'aaa', 'xxx.yyy')
|
| 95 |
+
self.checkAstsEqual(t, ast.parse('xxx.yyy.bbb()'))
|
| 96 |
+
|
| 97 |
+
def test_rename_reads_attribute(self):
|
| 98 |
+
src = 'aaa.bbb.ccc()'
|
| 99 |
+
t = ast.parse(src)
|
| 100 |
+
sc = scope.analyze(t)
|
| 101 |
+
rename._rename_reads(sc, t, 'aaa.bbb', 'xxx.yyy')
|
| 102 |
+
self.checkAstsEqual(t, ast.parse('xxx.yyy.ccc()'))
|
| 103 |
+
|
| 104 |
+
def test_rename_reads_noop(self):
|
| 105 |
+
src = 'aaa.bbb.ccc()'
|
| 106 |
+
t = ast.parse(src)
|
| 107 |
+
sc = scope.analyze(t)
|
| 108 |
+
rename._rename_reads(sc, t, 'aaa.bbb.ccc.ddd', 'xxx.yyy')
|
| 109 |
+
rename._rename_reads(sc, t, 'bbb.aaa', 'xxx.yyy')
|
| 110 |
+
self.checkAstsEqual(t, ast.parse(src))
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def suite():
|
| 114 |
+
result = unittest.TestSuite()
|
| 115 |
+
result.addTests(unittest.makeSuite(RenameTest))
|
| 116 |
+
return result
|
| 117 |
+
|
| 118 |
+
if __name__ == '__main__':
|
| 119 |
+
unittest.main()
|
lib/python3.10/site-packages/pasta/base/__init__.py
ADDED
|
File without changes
|
lib/python3.10/site-packages/pasta/base/annotate.py
ADDED
|
@@ -0,0 +1,1543 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
"""Annotate python syntax trees with formatting from the source file."""
|
| 3 |
+
# Copyright 2017 Google LLC
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# https://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
|
| 17 |
+
from __future__ import absolute_import
|
| 18 |
+
from __future__ import division
|
| 19 |
+
from __future__ import print_function
|
| 20 |
+
|
| 21 |
+
import abc
|
| 22 |
+
import ast
|
| 23 |
+
import contextlib
|
| 24 |
+
import functools
|
| 25 |
+
import itertools
|
| 26 |
+
import six
|
| 27 |
+
from six.moves import zip
|
| 28 |
+
import sys
|
| 29 |
+
|
| 30 |
+
from pasta.base import ast_constants
|
| 31 |
+
from pasta.base import ast_utils
|
| 32 |
+
from pasta.base import formatting as fmt
|
| 33 |
+
from pasta.base import token_generator
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
# ==============================================================================
|
| 37 |
+
# == Helper functions for decorating nodes with prefix + suffix ==
|
| 38 |
+
# ==============================================================================
|
| 39 |
+
|
| 40 |
+
def _gen_wrapper(f, scope=True, prefix=True, suffix=True, max_suffix_lines=None,
|
| 41 |
+
semicolon=False, comment=False, statement=False):
|
| 42 |
+
@contextlib.wraps(f)
|
| 43 |
+
def wrapped(self, node, *args, **kwargs):
|
| 44 |
+
with (self.scope(node, trailing_comma=False) if scope else _noop_context()):
|
| 45 |
+
if prefix:
|
| 46 |
+
self.prefix(node, default=self._indent if statement else '')
|
| 47 |
+
f(self, node, *args, **kwargs)
|
| 48 |
+
if suffix:
|
| 49 |
+
self.suffix(node, max_lines=max_suffix_lines, semicolon=semicolon,
|
| 50 |
+
comment=comment, default='\n' if statement else '')
|
| 51 |
+
return wrapped
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
@contextlib.contextmanager
|
| 55 |
+
def _noop_context():
|
| 56 |
+
yield
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def expression(f):
|
| 60 |
+
"""Decorates a function where the node is an expression."""
|
| 61 |
+
return _gen_wrapper(f, max_suffix_lines=0)
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def fstring_expression(f):
|
| 65 |
+
"""Decorates a function where the node is a FormattedValue in an fstring."""
|
| 66 |
+
return _gen_wrapper(f, scope=False)
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def space_around(f):
|
| 70 |
+
"""Decorates a function where the node has whitespace prefix and suffix."""
|
| 71 |
+
return _gen_wrapper(f, scope=False)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def space_left(f):
|
| 75 |
+
"""Decorates a function where the node has whitespace prefix."""
|
| 76 |
+
return _gen_wrapper(f, scope=False, suffix=False)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def statement(f):
|
| 80 |
+
"""Decorates a function where the node is a statement."""
|
| 81 |
+
return _gen_wrapper(f, scope=False, max_suffix_lines=1, semicolon=True,
|
| 82 |
+
comment=True, statement=True)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def module(f):
|
| 86 |
+
"""Special decorator for the module node."""
|
| 87 |
+
return _gen_wrapper(f, scope=False, comment=True)
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def block_statement(f):
|
| 91 |
+
"""Decorates a function where the node is a statement with children."""
|
| 92 |
+
@contextlib.wraps(f)
|
| 93 |
+
def wrapped(self, node, *args, **kwargs):
|
| 94 |
+
self.prefix(node, default=self._indent)
|
| 95 |
+
f(self, node, *args, **kwargs)
|
| 96 |
+
if hasattr(self, 'block_suffix'):
|
| 97 |
+
last_child = ast_utils.get_last_child(node)
|
| 98 |
+
# Workaround for ast.Module which does not have a lineno
|
| 99 |
+
if last_child and last_child.lineno != getattr(node, 'lineno', 0):
|
| 100 |
+
indent = (fmt.get(last_child, 'prefix') or '\n').splitlines()[-1]
|
| 101 |
+
self.block_suffix(node, indent)
|
| 102 |
+
else:
|
| 103 |
+
self.suffix(node, comment=True)
|
| 104 |
+
return wrapped
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
# ==============================================================================
|
| 108 |
+
# == NodeVisitors for annotating an AST ==
|
| 109 |
+
# ==============================================================================
|
| 110 |
+
|
| 111 |
+
class BaseVisitor(ast.NodeVisitor):
|
| 112 |
+
"""Walks a syntax tree in the order it appears in code.
|
| 113 |
+
|
| 114 |
+
This class has a dual-purpose. It is implemented (in this file) for annotating
|
| 115 |
+
an AST with formatting information needed to reconstruct the source code, but
|
| 116 |
+
it also is implemented in pasta.base.codegen to reconstruct the source code.
|
| 117 |
+
|
| 118 |
+
Each visit method in this class specifies the order in which both child nodes
|
| 119 |
+
and syntax tokens appear, plus where to account for whitespace, commas,
|
| 120 |
+
parentheses, etc.
|
| 121 |
+
"""
|
| 122 |
+
|
| 123 |
+
__metaclass__ = abc.ABCMeta
|
| 124 |
+
|
| 125 |
+
def __init__(self):
|
| 126 |
+
self._stack = []
|
| 127 |
+
self._indent = ''
|
| 128 |
+
self._indent_diff = ''
|
| 129 |
+
self._default_indent_diff = ' '
|
| 130 |
+
|
| 131 |
+
def visit(self, node):
|
| 132 |
+
self._stack.append(node)
|
| 133 |
+
super(BaseVisitor, self).visit(node)
|
| 134 |
+
assert node is self._stack.pop()
|
| 135 |
+
|
| 136 |
+
def prefix(self, node, default=''):
|
| 137 |
+
"""Account for some amount of whitespace as the prefix to a node."""
|
| 138 |
+
self.attr(node, 'prefix', [lambda: self.ws(comment=True)], default=default)
|
| 139 |
+
|
| 140 |
+
def suffix(self, node, max_lines=None, semicolon=False, comment=False,
|
| 141 |
+
default=''):
|
| 142 |
+
"""Account for some amount of whitespace as the suffix to a node."""
|
| 143 |
+
def _ws():
|
| 144 |
+
return self.ws(max_lines=max_lines, semicolon=semicolon, comment=comment)
|
| 145 |
+
self.attr(node, 'suffix', [_ws], default=default)
|
| 146 |
+
|
| 147 |
+
def indented(self, node, children_attr):
|
| 148 |
+
children = getattr(node, children_attr)
|
| 149 |
+
prev_indent = self._indent
|
| 150 |
+
prev_indent_diff = self._indent_diff
|
| 151 |
+
new_diff = fmt.get(children[0], 'indent_diff')
|
| 152 |
+
if new_diff is None:
|
| 153 |
+
new_diff = self._default_indent_diff
|
| 154 |
+
self._indent_diff = new_diff
|
| 155 |
+
self._indent = prev_indent + self._indent_diff
|
| 156 |
+
for child in children:
|
| 157 |
+
yield child
|
| 158 |
+
self.attr(node, 'block_suffix_%s' % children_attr, [])
|
| 159 |
+
self._indent = prev_indent
|
| 160 |
+
self._indent_diff = prev_indent_diff
|
| 161 |
+
|
| 162 |
+
def set_default_indent_diff(self, indent):
|
| 163 |
+
self._default_indent_diff = indent
|
| 164 |
+
|
| 165 |
+
@contextlib.contextmanager
|
| 166 |
+
def scope(self, node, attr=None, trailing_comma=False, default_parens=False):
|
| 167 |
+
"""Context manager to handle a parenthesized scope.
|
| 168 |
+
|
| 169 |
+
Arguments:
|
| 170 |
+
node: (ast.AST) Node to store the scope prefix and suffix on.
|
| 171 |
+
attr: (string, optional) Attribute of the node contained in the scope, if
|
| 172 |
+
any. For example, as `None`, the scope would wrap the entire node, but
|
| 173 |
+
as 'bases', the scope might wrap only the bases of a class.
|
| 174 |
+
trailing_comma: (boolean) If True, allow a trailing comma at the end.
|
| 175 |
+
default_parens: (boolean) If True and no formatting information is
|
| 176 |
+
present, the scope would be assumed to be parenthesized.
|
| 177 |
+
"""
|
| 178 |
+
if attr:
|
| 179 |
+
self.attr(node, attr + '_prefix', [],
|
| 180 |
+
default='(' if default_parens else '')
|
| 181 |
+
yield
|
| 182 |
+
if attr:
|
| 183 |
+
self.attr(node, attr + '_suffix', [],
|
| 184 |
+
default=')' if default_parens else '')
|
| 185 |
+
|
| 186 |
+
def token(self, token_val):
|
| 187 |
+
"""Account for a specific token."""
|
| 188 |
+
|
| 189 |
+
def attr(self, node, attr_name, attr_vals, deps=None, default=None):
|
| 190 |
+
"""Handles an attribute on the given node."""
|
| 191 |
+
|
| 192 |
+
def ws(self, max_lines=None, semicolon=False, comment=True):
|
| 193 |
+
"""Account for some amount of whitespace.
|
| 194 |
+
|
| 195 |
+
Arguments:
|
| 196 |
+
max_lines: (int) Maximum number of newlines to consider.
|
| 197 |
+
semicolon: (boolean) If True, parse up to the next semicolon (if present).
|
| 198 |
+
comment: (boolean) If True, look for a trailing comment even when not in
|
| 199 |
+
a parenthesized scope.
|
| 200 |
+
"""
|
| 201 |
+
return ''
|
| 202 |
+
|
| 203 |
+
def dots(self, num_dots):
|
| 204 |
+
"""Account for a number of dots."""
|
| 205 |
+
return '.' * num_dots
|
| 206 |
+
|
| 207 |
+
def ws_oneline(self):
|
| 208 |
+
"""Account for up to one line of whitespace."""
|
| 209 |
+
return self.ws(max_lines=1)
|
| 210 |
+
|
| 211 |
+
def optional_token(self, node, attr_name, token_val, default=False):
|
| 212 |
+
"""Account for a suffix that may or may not occur."""
|
| 213 |
+
|
| 214 |
+
def one_of_symbols(self, *symbols):
|
| 215 |
+
"""Account for one of the given symbols."""
|
| 216 |
+
return symbols[0]
|
| 217 |
+
|
| 218 |
+
# ============================================================================
|
| 219 |
+
# == BLOCK STATEMENTS: Statements that contain a list of statements ==
|
| 220 |
+
# ============================================================================
|
| 221 |
+
|
| 222 |
+
# Keeps the entire suffix, so @block_statement is not useful here.
|
| 223 |
+
@module
|
| 224 |
+
def visit_Module(self, node):
|
| 225 |
+
self.generic_visit(node)
|
| 226 |
+
|
| 227 |
+
@block_statement
|
| 228 |
+
def visit_If(self, node):
|
| 229 |
+
tok = 'elif' if fmt.get(node, 'is_elif') else 'if'
|
| 230 |
+
self.attr(node, 'open_if', [tok, self.ws], default=tok + ' ')
|
| 231 |
+
self.visit(node.test)
|
| 232 |
+
self.attr(node, 'open_block', [self.ws, ':', self.ws_oneline],
|
| 233 |
+
default=':\n')
|
| 234 |
+
|
| 235 |
+
for stmt in self.indented(node, 'body'):
|
| 236 |
+
self.visit(stmt)
|
| 237 |
+
|
| 238 |
+
if node.orelse:
|
| 239 |
+
if (len(node.orelse) == 1 and isinstance(node.orelse[0], ast.If) and
|
| 240 |
+
self.check_is_elif(node.orelse[0])):
|
| 241 |
+
fmt.set(node.orelse[0], 'is_elif', True)
|
| 242 |
+
self.visit(node.orelse[0])
|
| 243 |
+
else:
|
| 244 |
+
self.attr(node, 'elseprefix', [self.ws])
|
| 245 |
+
self.token('else')
|
| 246 |
+
self.attr(node, 'open_else', [self.ws, ':', self.ws_oneline],
|
| 247 |
+
default=':\n')
|
| 248 |
+
for stmt in self.indented(node, 'orelse'):
|
| 249 |
+
self.visit(stmt)
|
| 250 |
+
|
| 251 |
+
@abc.abstractmethod
|
| 252 |
+
def check_is_elif(self, node):
|
| 253 |
+
"""Return True if the node continues a previous `if` statement as `elif`.
|
| 254 |
+
|
| 255 |
+
In python 2.x, `elif` statments get parsed as If nodes. E.g, the following
|
| 256 |
+
two syntax forms are indistinguishable in the ast in python 2.
|
| 257 |
+
|
| 258 |
+
if a:
|
| 259 |
+
do_something()
|
| 260 |
+
elif b:
|
| 261 |
+
do_something_else()
|
| 262 |
+
|
| 263 |
+
if a:
|
| 264 |
+
do_something()
|
| 265 |
+
else:
|
| 266 |
+
if b:
|
| 267 |
+
do_something_else()
|
| 268 |
+
|
| 269 |
+
This method should return True for the 'if b' node if it has the first form.
|
| 270 |
+
"""
|
| 271 |
+
|
| 272 |
+
@block_statement
|
| 273 |
+
def visit_While(self, node):
|
| 274 |
+
self.attr(node, 'while_keyword', ['while', self.ws], default='while ')
|
| 275 |
+
self.visit(node.test)
|
| 276 |
+
self.attr(node, 'open_block', [self.ws, ':', self.ws_oneline],
|
| 277 |
+
default=':\n')
|
| 278 |
+
for stmt in self.indented(node, 'body'):
|
| 279 |
+
self.visit(stmt)
|
| 280 |
+
|
| 281 |
+
if node.orelse:
|
| 282 |
+
self.attr(node, 'else', [self.ws, 'else', self.ws, ':', self.ws_oneline],
|
| 283 |
+
default=self._indent + 'else:\n')
|
| 284 |
+
for stmt in self.indented(node, 'orelse'):
|
| 285 |
+
self.visit(stmt)
|
| 286 |
+
|
| 287 |
+
@block_statement
|
| 288 |
+
def visit_For(self, node):
|
| 289 |
+
if hasattr(ast, 'AsyncFor') and isinstance(node, ast.AsyncFor):
|
| 290 |
+
self.attr(node, 'for_keyword', ['async', self.ws, 'for', self.ws],
|
| 291 |
+
default='async for ')
|
| 292 |
+
else:
|
| 293 |
+
self.attr(node, 'for_keyword', ['for', self.ws], default='for ')
|
| 294 |
+
self.visit(node.target)
|
| 295 |
+
self.attr(node, 'for_in', [self.ws, 'in', self.ws], default=' in ')
|
| 296 |
+
self.visit(node.iter)
|
| 297 |
+
self.attr(node, 'open_block', [self.ws, ':', self.ws_oneline],
|
| 298 |
+
default=':\n')
|
| 299 |
+
for stmt in self.indented(node, 'body'):
|
| 300 |
+
self.visit(stmt)
|
| 301 |
+
|
| 302 |
+
if node.orelse:
|
| 303 |
+
self.attr(node, 'else', [self.ws, 'else', self.ws, ':', self.ws_oneline],
|
| 304 |
+
default=self._indent + 'else:\n')
|
| 305 |
+
|
| 306 |
+
for stmt in self.indented(node, 'orelse'):
|
| 307 |
+
self.visit(stmt)
|
| 308 |
+
|
| 309 |
+
def visit_AsyncFor(self, node):
|
| 310 |
+
return self.visit_For(node)
|
| 311 |
+
|
| 312 |
+
@block_statement
|
| 313 |
+
def visit_With(self, node):
|
| 314 |
+
if hasattr(node, 'items'):
|
| 315 |
+
return self.visit_With_3(node)
|
| 316 |
+
if not getattr(node, 'is_continued', False):
|
| 317 |
+
self.attr(node, 'with', ['with', self.ws], default='with ')
|
| 318 |
+
self.visit(node.context_expr)
|
| 319 |
+
if node.optional_vars:
|
| 320 |
+
self.attr(node, 'with_as', [self.ws, 'as', self.ws], default=' as ')
|
| 321 |
+
self.visit(node.optional_vars)
|
| 322 |
+
|
| 323 |
+
if len(node.body) == 1 and self.check_is_continued_with(node.body[0]):
|
| 324 |
+
node.body[0].is_continued = True
|
| 325 |
+
self.attr(node, 'with_comma', [self.ws, ',', self.ws], default=', ')
|
| 326 |
+
else:
|
| 327 |
+
self.attr(node, 'open_block', [self.ws, ':', self.ws_oneline],
|
| 328 |
+
default=':\n')
|
| 329 |
+
for stmt in self.indented(node, 'body'):
|
| 330 |
+
self.visit(stmt)
|
| 331 |
+
|
| 332 |
+
def visit_AsyncWith(self, node):
|
| 333 |
+
return self.visit_With(node)
|
| 334 |
+
|
| 335 |
+
@abc.abstractmethod
|
| 336 |
+
def check_is_continued_try(self, node):
|
| 337 |
+
pass
|
| 338 |
+
|
| 339 |
+
@abc.abstractmethod
|
| 340 |
+
def check_is_continued_with(self, node):
|
| 341 |
+
"""Return True if the node continues a previous `with` statement.
|
| 342 |
+
|
| 343 |
+
In python 2.x, `with` statments with many context expressions get parsed as
|
| 344 |
+
a tree of With nodes. E.g, the following two syntax forms are
|
| 345 |
+
indistinguishable in the ast in python 2.
|
| 346 |
+
|
| 347 |
+
with a, b, c:
|
| 348 |
+
do_something()
|
| 349 |
+
|
| 350 |
+
with a:
|
| 351 |
+
with b:
|
| 352 |
+
with c:
|
| 353 |
+
do_something()
|
| 354 |
+
|
| 355 |
+
This method should return True for the `with b` and `with c` nodes.
|
| 356 |
+
"""
|
| 357 |
+
|
| 358 |
+
def visit_With_3(self, node):
|
| 359 |
+
if hasattr(ast, 'AsyncWith') and isinstance(node, ast.AsyncWith):
|
| 360 |
+
self.attr(node, 'with', ['async', self.ws, 'with', self.ws],
|
| 361 |
+
default='async with ')
|
| 362 |
+
else:
|
| 363 |
+
self.attr(node, 'with', ['with', self.ws], default='with ')
|
| 364 |
+
|
| 365 |
+
for i, withitem in enumerate(node.items):
|
| 366 |
+
self.visit(withitem)
|
| 367 |
+
if i != len(node.items) - 1:
|
| 368 |
+
self.token(',')
|
| 369 |
+
|
| 370 |
+
self.attr(node, 'with_body_open', [':', self.ws_oneline], default=':\n')
|
| 371 |
+
for stmt in self.indented(node, 'body'):
|
| 372 |
+
self.visit(stmt)
|
| 373 |
+
|
| 374 |
+
@space_around
|
| 375 |
+
def visit_withitem(self, node):
|
| 376 |
+
self.visit(node.context_expr)
|
| 377 |
+
if node.optional_vars:
|
| 378 |
+
self.attr(node, 'as', [self.ws, 'as', self.ws], default=' as ')
|
| 379 |
+
self.visit(node.optional_vars)
|
| 380 |
+
|
| 381 |
+
@block_statement
|
| 382 |
+
def visit_ClassDef(self, node):
|
| 383 |
+
for i, decorator in enumerate(node.decorator_list):
|
| 384 |
+
self.attr(node, 'decorator_prefix_%d' % i, [self.ws, '@'], default='@')
|
| 385 |
+
self.visit(decorator)
|
| 386 |
+
self.attr(node, 'decorator_suffix_%d' % i, [self.ws],
|
| 387 |
+
default='\n' + self._indent)
|
| 388 |
+
self.attr(node, 'class_def', ['class', self.ws, node.name, self.ws],
|
| 389 |
+
default='class %s' % node.name, deps=('name',))
|
| 390 |
+
class_args = getattr(node, 'bases', []) + getattr(node, 'keywords', [])
|
| 391 |
+
with self.scope(node, 'bases', trailing_comma=bool(class_args),
|
| 392 |
+
default_parens=True):
|
| 393 |
+
for i, base in enumerate(node.bases):
|
| 394 |
+
self.visit(base)
|
| 395 |
+
self.attr(node, 'base_suffix_%d' % i, [self.ws])
|
| 396 |
+
if base != class_args[-1]:
|
| 397 |
+
self.attr(node, 'base_sep_%d' % i, [',', self.ws], default=', ')
|
| 398 |
+
if hasattr(node, 'keywords'):
|
| 399 |
+
for i, keyword in enumerate(node.keywords):
|
| 400 |
+
self.visit(keyword)
|
| 401 |
+
self.attr(node, 'keyword_suffix_%d' % i, [self.ws])
|
| 402 |
+
if keyword != node.keywords[-1]:
|
| 403 |
+
self.attr(node, 'keyword_sep_%d' % i, [',', self.ws], default=', ')
|
| 404 |
+
self.attr(node, 'open_block', [self.ws, ':', self.ws_oneline],
|
| 405 |
+
default=':\n')
|
| 406 |
+
for stmt in self.indented(node, 'body'):
|
| 407 |
+
self.visit(stmt)
|
| 408 |
+
|
| 409 |
+
@block_statement
|
| 410 |
+
def visit_FunctionDef(self, node):
|
| 411 |
+
for i, decorator in enumerate(node.decorator_list):
|
| 412 |
+
self.attr(node, 'decorator_symbol_%d' % i, [self.ws, '@', self.ws],
|
| 413 |
+
default='@')
|
| 414 |
+
self.visit(decorator)
|
| 415 |
+
self.attr(node, 'decorator_suffix_%d' % i, [self.ws_oneline],
|
| 416 |
+
default='\n' + self._indent)
|
| 417 |
+
if (hasattr(ast, 'AsyncFunctionDef') and
|
| 418 |
+
isinstance(node, ast.AsyncFunctionDef)):
|
| 419 |
+
self.attr(node, 'function_def',
|
| 420 |
+
[self.ws, 'async', self.ws, 'def', self.ws, node.name, self.ws],
|
| 421 |
+
deps=('name',), default='async def %s' % node.name)
|
| 422 |
+
else:
|
| 423 |
+
self.attr(node, 'function_def',
|
| 424 |
+
[self.ws, 'def', self.ws, node.name, self.ws],
|
| 425 |
+
deps=('name',), default='def %s' % node.name)
|
| 426 |
+
# In Python 3, there can be extra args in kwonlyargs
|
| 427 |
+
kwonlyargs = getattr(node.args, 'kwonlyargs', [])
|
| 428 |
+
args_count = sum((len(node.args.args + kwonlyargs),
|
| 429 |
+
1 if node.args.vararg else 0,
|
| 430 |
+
1 if node.args.kwarg else 0))
|
| 431 |
+
with self.scope(node, 'args', trailing_comma=args_count > 0,
|
| 432 |
+
default_parens=True):
|
| 433 |
+
self.visit(node.args)
|
| 434 |
+
|
| 435 |
+
if getattr(node, 'returns', None):
|
| 436 |
+
self.attr(node, 'returns_prefix', [self.ws, '->', self.ws],
|
| 437 |
+
deps=('returns',), default=' -> ')
|
| 438 |
+
self.visit(node.returns)
|
| 439 |
+
|
| 440 |
+
self.attr(node, 'open_block', [self.ws, ':', self.ws_oneline],
|
| 441 |
+
default=':\n')
|
| 442 |
+
for stmt in self.indented(node, 'body'):
|
| 443 |
+
self.visit(stmt)
|
| 444 |
+
|
| 445 |
+
def visit_AsyncFunctionDef(self, node):
|
| 446 |
+
return self.visit_FunctionDef(node)
|
| 447 |
+
|
| 448 |
+
@block_statement
|
| 449 |
+
def visit_TryFinally(self, node):
|
| 450 |
+
# Try with except and finally is a TryFinally with the first statement as a
|
| 451 |
+
# TryExcept in Python2
|
| 452 |
+
self.attr(node, 'open_try', ['try', self.ws, ':', self.ws_oneline],
|
| 453 |
+
default='try:\n')
|
| 454 |
+
# TODO(soupytwist): Find a cleaner solution for differentiating this.
|
| 455 |
+
if len(node.body) == 1 and self.check_is_continued_try(node.body[0]):
|
| 456 |
+
node.body[0].is_continued = True
|
| 457 |
+
self.visit(node.body[0])
|
| 458 |
+
else:
|
| 459 |
+
for stmt in self.indented(node, 'body'):
|
| 460 |
+
self.visit(stmt)
|
| 461 |
+
self.attr(node, 'open_finally',
|
| 462 |
+
[self.ws, 'finally', self.ws, ':', self.ws_oneline],
|
| 463 |
+
default='finally:\n')
|
| 464 |
+
for stmt in self.indented(node, 'finalbody'):
|
| 465 |
+
self.visit(stmt)
|
| 466 |
+
|
| 467 |
+
@block_statement
|
| 468 |
+
def visit_TryExcept(self, node):
|
| 469 |
+
if not getattr(node, 'is_continued', False):
|
| 470 |
+
self.attr(node, 'open_try', ['try', self.ws, ':', self.ws_oneline],
|
| 471 |
+
default='try:\n')
|
| 472 |
+
for stmt in self.indented(node, 'body'):
|
| 473 |
+
self.visit(stmt)
|
| 474 |
+
for handler in node.handlers:
|
| 475 |
+
self.visit(handler)
|
| 476 |
+
if node.orelse:
|
| 477 |
+
self.attr(node, 'open_else',
|
| 478 |
+
[self.ws, 'else', self.ws, ':', self.ws_oneline],
|
| 479 |
+
default='else:\n')
|
| 480 |
+
for stmt in self.indented(node, 'orelse'):
|
| 481 |
+
self.visit(stmt)
|
| 482 |
+
|
| 483 |
+
@block_statement
|
| 484 |
+
def visit_Try(self, node):
|
| 485 |
+
# Python 3
|
| 486 |
+
self.attr(node, 'open_try', [self.ws, 'try', self.ws, ':', self.ws_oneline],
|
| 487 |
+
default='try:\n')
|
| 488 |
+
for stmt in self.indented(node, 'body'):
|
| 489 |
+
self.visit(stmt)
|
| 490 |
+
for handler in node.handlers:
|
| 491 |
+
self.visit(handler)
|
| 492 |
+
if node.orelse:
|
| 493 |
+
self.attr(node, 'open_else',
|
| 494 |
+
[self.ws, 'else', self.ws, ':', self.ws_oneline],
|
| 495 |
+
default='else:\n')
|
| 496 |
+
for stmt in self.indented(node, 'orelse'):
|
| 497 |
+
self.visit(stmt)
|
| 498 |
+
if node.finalbody:
|
| 499 |
+
self.attr(node, 'open_finally',
|
| 500 |
+
[self.ws, 'finally', self.ws, ':', self.ws_oneline],
|
| 501 |
+
default='finally:\n')
|
| 502 |
+
for stmt in self.indented(node, 'finalbody'):
|
| 503 |
+
self.visit(stmt)
|
| 504 |
+
|
| 505 |
+
@block_statement
|
| 506 |
+
def visit_ExceptHandler(self, node):
|
| 507 |
+
self.token('except')
|
| 508 |
+
if node.type:
|
| 509 |
+
self.visit(node.type)
|
| 510 |
+
if node.type and node.name:
|
| 511 |
+
self.attr(node, 'as', [self.ws, self.one_of_symbols("as", ","), self.ws],
|
| 512 |
+
default=' as ')
|
| 513 |
+
if node.name:
|
| 514 |
+
if isinstance(node.name, ast.AST):
|
| 515 |
+
self.visit(node.name)
|
| 516 |
+
else:
|
| 517 |
+
self.token(node.name)
|
| 518 |
+
self.attr(node, 'open_block', [self.ws, ':', self.ws_oneline],
|
| 519 |
+
default=':\n')
|
| 520 |
+
for stmt in self.indented(node, 'body'):
|
| 521 |
+
self.visit(stmt)
|
| 522 |
+
|
| 523 |
+
@statement
|
| 524 |
+
def visit_Raise(self, node):
|
| 525 |
+
if hasattr(node, 'cause'):
|
| 526 |
+
return self.visit_Raise_3(node)
|
| 527 |
+
|
| 528 |
+
self.token('raise')
|
| 529 |
+
if node.type:
|
| 530 |
+
self.attr(node, 'type_prefix', [self.ws], default=' ')
|
| 531 |
+
self.visit(node.type)
|
| 532 |
+
if node.inst:
|
| 533 |
+
self.attr(node, 'inst_prefix', [self.ws, ',', self.ws], default=', ')
|
| 534 |
+
self.visit(node.inst)
|
| 535 |
+
if node.tback:
|
| 536 |
+
self.attr(node, 'tback_prefix', [self.ws, ',', self.ws], default=', ')
|
| 537 |
+
self.visit(node.tback)
|
| 538 |
+
|
| 539 |
+
def visit_Raise_3(self, node):
|
| 540 |
+
if node.exc:
|
| 541 |
+
self.attr(node, 'open_raise', ['raise', self.ws], default='raise ')
|
| 542 |
+
self.visit(node.exc)
|
| 543 |
+
if node.cause:
|
| 544 |
+
self.attr(node, 'cause_prefix', [self.ws, 'from', self.ws],
|
| 545 |
+
default=' from ')
|
| 546 |
+
self.visit(node.cause)
|
| 547 |
+
else:
|
| 548 |
+
self.token('raise')
|
| 549 |
+
|
| 550 |
+
# ============================================================================
|
| 551 |
+
# == STATEMENTS: Instructions without a return value ==
|
| 552 |
+
# ============================================================================
|
| 553 |
+
|
| 554 |
+
@statement
|
| 555 |
+
def visit_Assert(self, node):
|
| 556 |
+
self.attr(node, 'assert_open', ['assert', self.ws], default='assert ')
|
| 557 |
+
self.visit(node.test)
|
| 558 |
+
if node.msg:
|
| 559 |
+
self.attr(node, 'msg_prefix', [',', self.ws], default=', ')
|
| 560 |
+
self.visit(node.msg)
|
| 561 |
+
|
| 562 |
+
@statement
|
| 563 |
+
def visit_Assign(self, node):
|
| 564 |
+
for i, target in enumerate(node.targets):
|
| 565 |
+
self.visit(target)
|
| 566 |
+
self.attr(node, 'equal_%d' % i, [self.ws, '=', self.ws], default=' = ')
|
| 567 |
+
self.visit(node.value)
|
| 568 |
+
|
| 569 |
+
@statement
|
| 570 |
+
def visit_AugAssign(self, node):
|
| 571 |
+
self.visit(node.target)
|
| 572 |
+
op_token = '%s=' % ast_constants.NODE_TYPE_TO_TOKENS[type(node.op)][0]
|
| 573 |
+
self.attr(node, 'operator', [self.ws, op_token, self.ws],
|
| 574 |
+
default=' %s ' % op_token)
|
| 575 |
+
self.visit(node.value)
|
| 576 |
+
|
| 577 |
+
@statement
|
| 578 |
+
def visit_AnnAssign(self, node):
|
| 579 |
+
# TODO: Check default formatting for different values of "simple"
|
| 580 |
+
self.visit(node.target)
|
| 581 |
+
self.attr(node, 'colon', [self.ws, ':', self.ws], default=': ')
|
| 582 |
+
self.visit(node.annotation)
|
| 583 |
+
if node.value:
|
| 584 |
+
self.attr(node, 'equal', [self.ws, '=', self.ws], default=' = ')
|
| 585 |
+
self.visit(node.value)
|
| 586 |
+
|
| 587 |
+
@expression
|
| 588 |
+
def visit_Await(self, node):
|
| 589 |
+
self.attr(node, 'await', ['await', self.ws], default='await ')
|
| 590 |
+
self.visit(node.value)
|
| 591 |
+
|
| 592 |
+
@statement
|
| 593 |
+
def visit_Break(self, node):
|
| 594 |
+
self.token('break')
|
| 595 |
+
|
| 596 |
+
@statement
|
| 597 |
+
def visit_Continue(self, node):
|
| 598 |
+
self.token('continue')
|
| 599 |
+
|
| 600 |
+
@statement
|
| 601 |
+
def visit_Delete(self, node):
|
| 602 |
+
self.attr(node, 'del', ['del', self.ws], default='del ')
|
| 603 |
+
for i, target in enumerate(node.targets):
|
| 604 |
+
self.visit(target)
|
| 605 |
+
if target is not node.targets[-1]:
|
| 606 |
+
self.attr(node, 'comma_%d' % i, [self.ws, ',', self.ws], default=', ')
|
| 607 |
+
|
| 608 |
+
@statement
|
| 609 |
+
def visit_Exec(self, node):
|
| 610 |
+
# If no formatting info is present, will use parenthesized style
|
| 611 |
+
self.attr(node, 'exec', ['exec', self.ws], default='exec')
|
| 612 |
+
with self.scope(node, 'body', trailing_comma=False, default_parens=True):
|
| 613 |
+
self.visit(node.body)
|
| 614 |
+
if node.globals:
|
| 615 |
+
self.attr(node, 'in_globals',
|
| 616 |
+
[self.ws, self.one_of_symbols('in', ','), self.ws],
|
| 617 |
+
default=', ')
|
| 618 |
+
self.visit(node.globals)
|
| 619 |
+
if node.locals:
|
| 620 |
+
self.attr(node, 'in_locals', [self.ws, ',', self.ws], default=', ')
|
| 621 |
+
self.visit(node.locals)
|
| 622 |
+
|
| 623 |
+
@statement
|
| 624 |
+
def visit_Expr(self, node):
|
| 625 |
+
self.visit(node.value)
|
| 626 |
+
|
| 627 |
+
@statement
|
| 628 |
+
def visit_Global(self, node):
|
| 629 |
+
self.token('global')
|
| 630 |
+
identifiers = []
|
| 631 |
+
for ident in node.names:
|
| 632 |
+
if ident != node.names[0]:
|
| 633 |
+
identifiers.extend([self.ws, ','])
|
| 634 |
+
identifiers.extend([self.ws, ident])
|
| 635 |
+
self.attr(node, 'names', identifiers)
|
| 636 |
+
|
| 637 |
+
@statement
|
| 638 |
+
def visit_Import(self, node):
|
| 639 |
+
self.token('import')
|
| 640 |
+
for i, alias in enumerate(node.names):
|
| 641 |
+
self.attr(node, 'alias_prefix_%d' % i, [self.ws], default=' ')
|
| 642 |
+
self.visit(alias)
|
| 643 |
+
if alias != node.names[-1]:
|
| 644 |
+
self.attr(node, 'alias_sep_%d' % i, [self.ws, ','], default=',')
|
| 645 |
+
|
| 646 |
+
@statement
|
| 647 |
+
def visit_ImportFrom(self, node):
|
| 648 |
+
self.token('from')
|
| 649 |
+
self.attr(node, 'module_prefix', [self.ws], default=' ')
|
| 650 |
+
|
| 651 |
+
module_pattern = []
|
| 652 |
+
if node.level > 0:
|
| 653 |
+
module_pattern.extend([self.dots(node.level), self.ws])
|
| 654 |
+
if node.module:
|
| 655 |
+
parts = node.module.split('.')
|
| 656 |
+
for part in parts[:-1]:
|
| 657 |
+
module_pattern += [self.ws, part, self.ws, '.']
|
| 658 |
+
module_pattern += [self.ws, parts[-1]]
|
| 659 |
+
|
| 660 |
+
self.attr(node, 'module', module_pattern,
|
| 661 |
+
deps=('level', 'module'),
|
| 662 |
+
default='.' * node.level + (node.module or ''))
|
| 663 |
+
self.attr(node, 'module_suffix', [self.ws], default=' ')
|
| 664 |
+
|
| 665 |
+
self.token('import')
|
| 666 |
+
with self.scope(node, 'names', trailing_comma=True):
|
| 667 |
+
for i, alias in enumerate(node.names):
|
| 668 |
+
self.attr(node, 'alias_prefix_%d' % i, [self.ws], default=' ')
|
| 669 |
+
self.visit(alias)
|
| 670 |
+
if alias is not node.names[-1]:
|
| 671 |
+
self.attr(node, 'alias_sep_%d' % i, [self.ws, ','], default=',')
|
| 672 |
+
|
| 673 |
+
@expression
|
| 674 |
+
def visit_NamedExpr(self, node):
|
| 675 |
+
self.visit(target)
|
| 676 |
+
self.attr(node, 'equal' % i, [self.ws, ':=', self.ws], default=' := ')
|
| 677 |
+
self.visit(node.value)
|
| 678 |
+
|
| 679 |
+
@statement
|
| 680 |
+
def visit_Nonlocal(self, node):
|
| 681 |
+
self.token('nonlocal')
|
| 682 |
+
identifiers = []
|
| 683 |
+
for ident in node.names:
|
| 684 |
+
if ident != node.names[0]:
|
| 685 |
+
identifiers.extend([self.ws, ','])
|
| 686 |
+
identifiers.extend([self.ws, ident])
|
| 687 |
+
self.attr(node, 'names', identifiers)
|
| 688 |
+
|
| 689 |
+
@statement
|
| 690 |
+
def visit_Pass(self, node):
|
| 691 |
+
self.token('pass')
|
| 692 |
+
|
| 693 |
+
@statement
|
| 694 |
+
def visit_Print(self, node):
|
| 695 |
+
self.attr(node, 'print_open', ['print', self.ws], default='print ')
|
| 696 |
+
if node.dest:
|
| 697 |
+
self.attr(node, 'redirection', ['>>', self.ws], default='>>')
|
| 698 |
+
self.visit(node.dest)
|
| 699 |
+
if node.values:
|
| 700 |
+
self.attr(node, 'values_prefix', [self.ws, ',', self.ws], default=', ')
|
| 701 |
+
elif not node.nl:
|
| 702 |
+
self.attr(node, 'trailing_comma', [self.ws, ','], default=',')
|
| 703 |
+
|
| 704 |
+
for i, value in enumerate(node.values):
|
| 705 |
+
self.visit(value)
|
| 706 |
+
if value is not node.values[-1]:
|
| 707 |
+
self.attr(node, 'comma_%d' % i, [self.ws, ',', self.ws], default=', ')
|
| 708 |
+
elif not node.nl:
|
| 709 |
+
self.attr(node, 'trailing_comma', [self.ws, ','], default=',')
|
| 710 |
+
|
| 711 |
+
@statement
|
| 712 |
+
def visit_Return(self, node):
|
| 713 |
+
self.token('return')
|
| 714 |
+
if node.value:
|
| 715 |
+
self.attr(node, 'return_value_prefix', [self.ws], default=' ')
|
| 716 |
+
self.visit(node.value)
|
| 717 |
+
|
| 718 |
+
@expression
|
| 719 |
+
def visit_Yield(self, node):
|
| 720 |
+
self.token('yield')
|
| 721 |
+
if node.value:
|
| 722 |
+
self.attr(node, 'yield_value_prefix', [self.ws], default=' ')
|
| 723 |
+
self.visit(node.value)
|
| 724 |
+
|
| 725 |
+
@expression
|
| 726 |
+
def visit_YieldFrom(self, node):
|
| 727 |
+
self.attr(node, 'yield_from', ['yield', self.ws, 'from', self.ws],
|
| 728 |
+
default='yield from ')
|
| 729 |
+
self.visit(node.value)
|
| 730 |
+
|
| 731 |
+
# ============================================================================
|
| 732 |
+
# == EXPRESSIONS: Anything that evaluates and can be in parens ==
|
| 733 |
+
# ============================================================================
|
| 734 |
+
|
| 735 |
+
@expression
|
| 736 |
+
def visit_Attribute(self, node):
|
| 737 |
+
self.visit(node.value)
|
| 738 |
+
self.attr(node, 'dot', [self.ws, '.', self.ws], default='.')
|
| 739 |
+
self.token(node.attr)
|
| 740 |
+
|
| 741 |
+
@expression
|
| 742 |
+
def visit_BinOp(self, node):
|
| 743 |
+
op_symbol = ast_constants.NODE_TYPE_TO_TOKENS[type(node.op)][0]
|
| 744 |
+
self.visit(node.left)
|
| 745 |
+
self.attr(node, 'op', [self.ws, op_symbol, self.ws],
|
| 746 |
+
default=' %s ' % op_symbol, deps=('op',))
|
| 747 |
+
self.visit(node.right)
|
| 748 |
+
|
| 749 |
+
@expression
|
| 750 |
+
def visit_BoolOp(self, node):
|
| 751 |
+
op_symbol = ast_constants.NODE_TYPE_TO_TOKENS[type(node.op)][0]
|
| 752 |
+
for i, value in enumerate(node.values):
|
| 753 |
+
self.visit(value)
|
| 754 |
+
if value is not node.values[-1]:
|
| 755 |
+
self.attr(node, 'op_%d' % i, [self.ws, op_symbol, self.ws],
|
| 756 |
+
default=' %s ' % op_symbol, deps=('op',))
|
| 757 |
+
|
| 758 |
+
@expression
|
| 759 |
+
def visit_Call(self, node):
|
| 760 |
+
self.visit(node.func)
|
| 761 |
+
|
| 762 |
+
with self.scope(node, 'arguments', default_parens=True):
|
| 763 |
+
# python <3.5: starargs and kwargs are in separate fields
|
| 764 |
+
# python 3.5+: starargs args included as a Starred nodes in the arguments
|
| 765 |
+
# and kwargs are included as keywords with no argument name.
|
| 766 |
+
if sys.version_info[:2] >= (3, 5):
|
| 767 |
+
any_args = self.visit_Call_arguments35(node)
|
| 768 |
+
else:
|
| 769 |
+
any_args = self.visit_Call_arguments(node)
|
| 770 |
+
if any_args:
|
| 771 |
+
self.optional_token(node, 'trailing_comma', ',')
|
| 772 |
+
|
| 773 |
+
def visit_Call_arguments(self, node):
|
| 774 |
+
def arg_location(tup):
|
| 775 |
+
arg = tup[1]
|
| 776 |
+
if isinstance(arg, ast.keyword):
|
| 777 |
+
arg = arg.value
|
| 778 |
+
return (getattr(arg, "lineno", 0), getattr(arg, "col_offset", 0))
|
| 779 |
+
|
| 780 |
+
if node.starargs:
|
| 781 |
+
sorted_keywords = sorted(
|
| 782 |
+
[(None, kw) for kw in node.keywords] + [('*', node.starargs)],
|
| 783 |
+
key=arg_location)
|
| 784 |
+
else:
|
| 785 |
+
sorted_keywords = [(None, kw) for kw in node.keywords]
|
| 786 |
+
all_args = [(None, n) for n in node.args] + sorted_keywords
|
| 787 |
+
if node.kwargs:
|
| 788 |
+
all_args.append(('**', node.kwargs))
|
| 789 |
+
|
| 790 |
+
for i, (prefix, arg) in enumerate(all_args):
|
| 791 |
+
if prefix is not None:
|
| 792 |
+
self.attr(node, '%s_prefix' % prefix, [self.ws, prefix], default=prefix)
|
| 793 |
+
self.visit(arg)
|
| 794 |
+
if arg is not all_args[-1][1]:
|
| 795 |
+
self.attr(node, 'comma_%d' % i, [self.ws, ',', self.ws], default=', ')
|
| 796 |
+
return bool(all_args)
|
| 797 |
+
|
| 798 |
+
def visit_Call_arguments35(self, node):
|
| 799 |
+
def arg_compare(a1, a2):
|
| 800 |
+
"""Old-style comparator for sorting args."""
|
| 801 |
+
def is_arg(a):
|
| 802 |
+
return not isinstance(a, (ast.keyword, ast.Starred))
|
| 803 |
+
|
| 804 |
+
# No kwarg can come before a regular arg (but Starred can be wherever)
|
| 805 |
+
if is_arg(a1) and isinstance(a2, ast.keyword):
|
| 806 |
+
return -1
|
| 807 |
+
elif is_arg(a2) and isinstance(a1, ast.keyword):
|
| 808 |
+
return 1
|
| 809 |
+
|
| 810 |
+
# If no lineno or col_offset on one of the args, they compare as equal
|
| 811 |
+
# (since sorting is stable, this should leave them mostly where they
|
| 812 |
+
# were in the initial list).
|
| 813 |
+
def get_pos(a):
|
| 814 |
+
if isinstance(a, ast.keyword):
|
| 815 |
+
a = a.value
|
| 816 |
+
return (getattr(a, 'lineno', None), getattr(a, 'col_offset', None))
|
| 817 |
+
|
| 818 |
+
pos1 = get_pos(a1)
|
| 819 |
+
pos2 = get_pos(a2)
|
| 820 |
+
|
| 821 |
+
if None in pos1 or None in pos2:
|
| 822 |
+
return 0
|
| 823 |
+
|
| 824 |
+
# If both have lineno/col_offset set, use that to sort them
|
| 825 |
+
return -1 if pos1 < pos2 else 0 if pos1 == pos2 else 1
|
| 826 |
+
|
| 827 |
+
# Note that this always sorts keywords identically to just sorting by
|
| 828 |
+
# lineno/col_offset, except in cases where that ordering would have been
|
| 829 |
+
# a syntax error (named arg before unnamed arg).
|
| 830 |
+
all_args = sorted(node.args + node.keywords,
|
| 831 |
+
key=functools.cmp_to_key(arg_compare))
|
| 832 |
+
|
| 833 |
+
for i, arg in enumerate(all_args):
|
| 834 |
+
self.visit(arg)
|
| 835 |
+
if arg is not all_args[-1]:
|
| 836 |
+
self.attr(node, 'comma_%d' % i, [self.ws, ',', self.ws], default=', ')
|
| 837 |
+
return bool(all_args)
|
| 838 |
+
|
| 839 |
+
def visit_Starred(self, node):
|
| 840 |
+
self.attr(node, 'star', ['*', self.ws], default='*')
|
| 841 |
+
self.visit(node.value)
|
| 842 |
+
|
| 843 |
+
@expression
|
| 844 |
+
def visit_Compare(self, node):
|
| 845 |
+
self.visit(node.left)
|
| 846 |
+
for i, (op, comparator) in enumerate(zip(node.ops, node.comparators)):
|
| 847 |
+
self.attr(node, 'op_prefix_%d' % i, [self.ws], default=' ')
|
| 848 |
+
self.visit(op)
|
| 849 |
+
self.attr(node, 'op_suffix_%d' % i, [self.ws], default=' ')
|
| 850 |
+
self.visit(comparator)
|
| 851 |
+
|
| 852 |
+
@expression
|
| 853 |
+
def visit_Dict(self, node):
|
| 854 |
+
self.token('{')
|
| 855 |
+
for i, key, value in zip(range(len(node.keys)), node.keys, node.values):
|
| 856 |
+
if key is None:
|
| 857 |
+
# Handle Python 3.5+ dict unpacking syntax (PEP-448)
|
| 858 |
+
self.attr(node, 'starstar_%d' % i, [self.ws, '**'], default='**')
|
| 859 |
+
else:
|
| 860 |
+
self.visit(key)
|
| 861 |
+
self.attr(node, 'key_val_sep_%d' % i, [self.ws, ':', self.ws],
|
| 862 |
+
default=': ')
|
| 863 |
+
self.visit(value)
|
| 864 |
+
if value is not node.values[-1]:
|
| 865 |
+
self.attr(node, 'comma_%d' % i, [self.ws, ',', self.ws], default=', ')
|
| 866 |
+
self.optional_token(node, 'extracomma', ',', allow_whitespace_prefix=True)
|
| 867 |
+
self.attr(node, 'close_prefix', [self.ws, '}'], default='}')
|
| 868 |
+
|
| 869 |
+
@expression
|
| 870 |
+
def visit_DictComp(self, node):
|
| 871 |
+
self.attr(node, 'open_dict', ['{', self.ws], default='{')
|
| 872 |
+
self.visit(node.key)
|
| 873 |
+
self.attr(node, 'key_val_sep', [self.ws, ':', self.ws], default=': ')
|
| 874 |
+
self.visit(node.value)
|
| 875 |
+
for comp in node.generators:
|
| 876 |
+
self.visit(comp)
|
| 877 |
+
self.attr(node, 'close_dict', [self.ws, '}'], default='}')
|
| 878 |
+
|
| 879 |
+
@expression
|
| 880 |
+
def visit_GeneratorExp(self, node):
|
| 881 |
+
self._comp_exp(node)
|
| 882 |
+
|
| 883 |
+
@expression
|
| 884 |
+
def visit_IfExp(self, node):
|
| 885 |
+
self.visit(node.body)
|
| 886 |
+
self.attr(node, 'if', [self.ws, 'if', self.ws], default=' if ')
|
| 887 |
+
self.visit(node.test)
|
| 888 |
+
self.attr(node, 'else', [self.ws, 'else', self.ws], default=' else ')
|
| 889 |
+
self.visit(node.orelse)
|
| 890 |
+
|
| 891 |
+
@expression
|
| 892 |
+
def visit_Lambda(self, node):
|
| 893 |
+
self.attr(node, 'lambda_def', ['lambda', self.ws], default='lambda ')
|
| 894 |
+
self.visit(node.args)
|
| 895 |
+
self.attr(node, 'open_lambda', [self.ws, ':', self.ws], default=': ')
|
| 896 |
+
self.visit(node.body)
|
| 897 |
+
|
| 898 |
+
@expression
|
| 899 |
+
def visit_List(self, node):
|
| 900 |
+
self.attr(node, 'list_open', ['[', self.ws], default='[')
|
| 901 |
+
|
| 902 |
+
for i, elt in enumerate(node.elts):
|
| 903 |
+
self.visit(elt)
|
| 904 |
+
if elt is not node.elts[-1]:
|
| 905 |
+
self.attr(node, 'comma_%d' % i, [self.ws, ',', self.ws], default=', ')
|
| 906 |
+
if node.elts:
|
| 907 |
+
self.optional_token(node, 'extracomma', ',', allow_whitespace_prefix=True)
|
| 908 |
+
|
| 909 |
+
self.attr(node, 'list_close', [self.ws, ']'], default=']')
|
| 910 |
+
|
| 911 |
+
@expression
|
| 912 |
+
def visit_ListComp(self, node):
|
| 913 |
+
self._comp_exp(node, open_brace='[', close_brace=']')
|
| 914 |
+
|
| 915 |
+
def _comp_exp(self, node, open_brace=None, close_brace=None):
|
| 916 |
+
if open_brace:
|
| 917 |
+
self.attr(node, 'compexp_open', [open_brace, self.ws], default=open_brace)
|
| 918 |
+
self.visit(node.elt)
|
| 919 |
+
for i, comp in enumerate(node.generators):
|
| 920 |
+
self.visit(comp)
|
| 921 |
+
if close_brace:
|
| 922 |
+
self.attr(node, 'compexp_close', [self.ws, close_brace],
|
| 923 |
+
default=close_brace)
|
| 924 |
+
|
| 925 |
+
@expression
|
| 926 |
+
def visit_Name(self, node):
|
| 927 |
+
self.token(node.id)
|
| 928 |
+
|
| 929 |
+
@expression
|
| 930 |
+
def visit_NameConstant(self, node):
|
| 931 |
+
self.token(str(node.value))
|
| 932 |
+
|
| 933 |
+
@expression
|
| 934 |
+
def visit_Repr(self, node):
|
| 935 |
+
self.attr(node, 'repr_open', ['`', self.ws], default='`')
|
| 936 |
+
self.visit(node.value)
|
| 937 |
+
self.attr(node, 'repr_close', [self.ws, '`'], default='`')
|
| 938 |
+
|
| 939 |
+
@expression
|
| 940 |
+
def visit_Set(self, node):
|
| 941 |
+
self.attr(node, 'set_open', ['{', self.ws], default='{')
|
| 942 |
+
|
| 943 |
+
for i, elt in enumerate(node.elts):
|
| 944 |
+
self.visit(elt)
|
| 945 |
+
if elt is not node.elts[-1]:
|
| 946 |
+
self.attr(node, 'comma_%d' % i, [self.ws, ',', self.ws], default=', ')
|
| 947 |
+
else:
|
| 948 |
+
self.optional_token(node, 'extracomma', ',',
|
| 949 |
+
allow_whitespace_prefix=True)
|
| 950 |
+
|
| 951 |
+
self.attr(node, 'set_close', [self.ws, '}'], default='}')
|
| 952 |
+
|
| 953 |
+
@expression
|
| 954 |
+
def visit_SetComp(self, node):
|
| 955 |
+
self._comp_exp(node, open_brace='{', close_brace='}')
|
| 956 |
+
|
| 957 |
+
@expression
|
| 958 |
+
def visit_Subscript(self, node):
|
| 959 |
+
self.visit(node.value)
|
| 960 |
+
self.attr(node, 'slice_open', [self.ws, '[', self.ws], default='[')
|
| 961 |
+
self.visit(node.slice)
|
| 962 |
+
self.attr(node, 'slice_close', [self.ws, ']'], default=']')
|
| 963 |
+
|
| 964 |
+
@expression
|
| 965 |
+
def visit_Tuple(self, node):
|
| 966 |
+
with self.scope(node, 'elts', default_parens=True):
|
| 967 |
+
for i, elt in enumerate(node.elts):
|
| 968 |
+
self.visit(elt)
|
| 969 |
+
if elt is not node.elts[-1]:
|
| 970 |
+
self.attr(node, 'comma_%d' % i, [self.ws, ',', self.ws],
|
| 971 |
+
default=', ')
|
| 972 |
+
else:
|
| 973 |
+
self.optional_token(node, 'extracomma', ',',
|
| 974 |
+
allow_whitespace_prefix=True,
|
| 975 |
+
default=len(node.elts) == 1)
|
| 976 |
+
|
| 977 |
+
@expression
|
| 978 |
+
def visit_UnaryOp(self, node):
|
| 979 |
+
op_symbol = ast_constants.NODE_TYPE_TO_TOKENS[type(node.op)][0]
|
| 980 |
+
self.attr(node, 'op', [op_symbol, self.ws], default=op_symbol, deps=('op',))
|
| 981 |
+
self.visit(node.operand)
|
| 982 |
+
|
| 983 |
+
# ============================================================================
|
| 984 |
+
# == OPERATORS AND TOKENS: Anything that's just whitespace and tokens ==
|
| 985 |
+
# ============================================================================
|
| 986 |
+
|
| 987 |
+
@space_around
|
| 988 |
+
def visit_Ellipsis(self, node):
|
| 989 |
+
self.token('...')
|
| 990 |
+
|
| 991 |
+
def visit_Add(self, node):
|
| 992 |
+
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
|
| 993 |
+
|
| 994 |
+
def visit_Sub(self, node):
|
| 995 |
+
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
|
| 996 |
+
|
| 997 |
+
def visit_Mult(self, node):
|
| 998 |
+
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
|
| 999 |
+
|
| 1000 |
+
def visit_Div(self, node):
|
| 1001 |
+
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
|
| 1002 |
+
|
| 1003 |
+
def visit_Mod(self, node):
|
| 1004 |
+
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
|
| 1005 |
+
|
| 1006 |
+
def visit_Pow(self, node):
|
| 1007 |
+
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
|
| 1008 |
+
|
| 1009 |
+
def visit_LShift(self, node):
|
| 1010 |
+
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
|
| 1011 |
+
|
| 1012 |
+
def visit_RShift(self, node):
|
| 1013 |
+
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
|
| 1014 |
+
|
| 1015 |
+
def visit_BitAnd(self, node):
|
| 1016 |
+
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
|
| 1017 |
+
|
| 1018 |
+
def visit_BitOr(self, node):
|
| 1019 |
+
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
|
| 1020 |
+
|
| 1021 |
+
def visit_BitXor(self, node):
|
| 1022 |
+
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
|
| 1023 |
+
|
| 1024 |
+
def visit_FloorDiv(self, node):
|
| 1025 |
+
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
|
| 1026 |
+
|
| 1027 |
+
def visit_Invert(self, node):
|
| 1028 |
+
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
|
| 1029 |
+
|
| 1030 |
+
def visit_Not(self, node):
|
| 1031 |
+
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
|
| 1032 |
+
|
| 1033 |
+
def visit_UAdd(self, node):
|
| 1034 |
+
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
|
| 1035 |
+
|
| 1036 |
+
def visit_USub(self, node):
|
| 1037 |
+
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
|
| 1038 |
+
|
| 1039 |
+
def visit_Eq(self, node):
|
| 1040 |
+
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
|
| 1041 |
+
|
| 1042 |
+
def visit_NotEq(self, node):
|
| 1043 |
+
self.attr(node, 'operator', [self.one_of_symbols('!=', '<>')])
|
| 1044 |
+
|
| 1045 |
+
def visit_Lt(self, node):
|
| 1046 |
+
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
|
| 1047 |
+
|
| 1048 |
+
def visit_LtE(self, node):
|
| 1049 |
+
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
|
| 1050 |
+
|
| 1051 |
+
def visit_Gt(self, node):
|
| 1052 |
+
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
|
| 1053 |
+
|
| 1054 |
+
def visit_GtE(self, node):
|
| 1055 |
+
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
|
| 1056 |
+
|
| 1057 |
+
def visit_Is(self, node):
|
| 1058 |
+
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
|
| 1059 |
+
|
| 1060 |
+
def visit_IsNot(self, node):
|
| 1061 |
+
self.attr(node, 'content', ['is', self.ws, 'not'], default='is not')
|
| 1062 |
+
|
| 1063 |
+
def visit_In(self, node):
|
| 1064 |
+
self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0])
|
| 1065 |
+
|
| 1066 |
+
def visit_NotIn(self, node):
|
| 1067 |
+
self.attr(node, 'content', ['not', self.ws, 'in'], default='not in')
|
| 1068 |
+
|
| 1069 |
+
# ============================================================================
|
| 1070 |
+
# == MISC NODES: Nodes which are neither statements nor expressions ==
|
| 1071 |
+
# ============================================================================
|
| 1072 |
+
|
| 1073 |
+
def visit_alias(self, node):
|
| 1074 |
+
name_pattern = []
|
| 1075 |
+
parts = node.name.split('.')
|
| 1076 |
+
for part in parts[:-1]:
|
| 1077 |
+
name_pattern += [self.ws, part, self.ws, '.']
|
| 1078 |
+
name_pattern += [self.ws, parts[-1]]
|
| 1079 |
+
self.attr(node, 'name', name_pattern,
|
| 1080 |
+
deps=('name',),
|
| 1081 |
+
default=node.name)
|
| 1082 |
+
if node.asname is not None:
|
| 1083 |
+
self.attr(node, 'asname', [self.ws, 'as', self.ws], default=' as ')
|
| 1084 |
+
self.token(node.asname)
|
| 1085 |
+
|
| 1086 |
+
@space_around
|
| 1087 |
+
def visit_arg(self, node):
|
| 1088 |
+
self.token(node.arg)
|
| 1089 |
+
if node.annotation is not None:
|
| 1090 |
+
self.attr(node, 'annotation_prefix', [self.ws, ':', self.ws],
|
| 1091 |
+
default=': ')
|
| 1092 |
+
self.visit(node.annotation)
|
| 1093 |
+
|
| 1094 |
+
@space_around
|
| 1095 |
+
def visit_arguments(self, node):
|
| 1096 |
+
# In Python 3, args appearing after *args must be kwargs
|
| 1097 |
+
kwonlyargs = getattr(node, 'kwonlyargs', [])
|
| 1098 |
+
kw_defaults = getattr(node, 'kw_defaults', [])
|
| 1099 |
+
assert len(kwonlyargs) == len(kw_defaults)
|
| 1100 |
+
|
| 1101 |
+
total_args = sum((len(node.args + kwonlyargs),
|
| 1102 |
+
len(getattr(node, 'posonlyargs', [])),
|
| 1103 |
+
1 if node.vararg else 0,
|
| 1104 |
+
1 if node.kwarg else 0))
|
| 1105 |
+
arg_i = 0
|
| 1106 |
+
|
| 1107 |
+
pos_args = getattr(node, 'posonlyargs', []) + node.args
|
| 1108 |
+
positional = pos_args[:-len(node.defaults)] if node.defaults else pos_args
|
| 1109 |
+
keyword = node.args[-len(node.defaults):] if node.defaults else node.args
|
| 1110 |
+
|
| 1111 |
+
for arg in positional:
|
| 1112 |
+
self.visit(arg)
|
| 1113 |
+
arg_i += 1
|
| 1114 |
+
if arg_i < total_args:
|
| 1115 |
+
self.attr(node, 'comma_%d' % arg_i, [self.ws, ',', self.ws],
|
| 1116 |
+
default=', ')
|
| 1117 |
+
if arg_i == len(getattr(node, 'posonlyargs', [])):
|
| 1118 |
+
self.attr(node, 'posonly_sep', [self.ws, '/', self.ws, ',', self.ws],
|
| 1119 |
+
default='/, ')
|
| 1120 |
+
|
| 1121 |
+
for i, (arg, default) in enumerate(zip(keyword, node.defaults)):
|
| 1122 |
+
self.visit(arg)
|
| 1123 |
+
self.attr(node, 'default_%d' % i, [self.ws, '=', self.ws],
|
| 1124 |
+
default='=')
|
| 1125 |
+
self.visit(default)
|
| 1126 |
+
arg_i += 1
|
| 1127 |
+
if arg_i < total_args:
|
| 1128 |
+
self.attr(node, 'comma_%d' % arg_i, [self.ws, ',', self.ws],
|
| 1129 |
+
default=', ')
|
| 1130 |
+
|
| 1131 |
+
if node.vararg:
|
| 1132 |
+
self.attr(node, 'vararg_prefix', [self.ws, '*', self.ws], default='*')
|
| 1133 |
+
if isinstance(node.vararg, ast.AST):
|
| 1134 |
+
self.visit(node.vararg)
|
| 1135 |
+
else:
|
| 1136 |
+
self.token(node.vararg)
|
| 1137 |
+
self.attr(node, 'vararg_suffix', [self.ws])
|
| 1138 |
+
arg_i += 1
|
| 1139 |
+
if arg_i < total_args:
|
| 1140 |
+
self.token(',')
|
| 1141 |
+
elif kwonlyargs:
|
| 1142 |
+
# If no vararg, but we have kwonlyargs, insert a naked *, which will
|
| 1143 |
+
# definitely not be the last arg.
|
| 1144 |
+
self.attr(node, 'kwonly_sep', [self.ws, '*', self.ws, ',', self.ws]);
|
| 1145 |
+
|
| 1146 |
+
for i, (arg, default) in enumerate(zip(kwonlyargs, kw_defaults)):
|
| 1147 |
+
self.visit(arg)
|
| 1148 |
+
if default is not None:
|
| 1149 |
+
self.attr(node, 'kw_default_%d' % i, [self.ws, '=', self.ws],
|
| 1150 |
+
default='=')
|
| 1151 |
+
self.visit(default)
|
| 1152 |
+
arg_i += 1
|
| 1153 |
+
if arg_i < total_args:
|
| 1154 |
+
self.attr(node, 'comma_%d' % arg_i, [self.ws, ',', self.ws],
|
| 1155 |
+
default=', ')
|
| 1156 |
+
|
| 1157 |
+
if node.kwarg:
|
| 1158 |
+
self.attr(node, 'kwarg_prefix', [self.ws, '**', self.ws], default='**')
|
| 1159 |
+
if isinstance(node.kwarg, ast.AST):
|
| 1160 |
+
self.visit(node.kwarg)
|
| 1161 |
+
else:
|
| 1162 |
+
self.token(node.kwarg)
|
| 1163 |
+
self.attr(node, 'kwarg_suffix', [self.ws])
|
| 1164 |
+
|
| 1165 |
+
@space_around
|
| 1166 |
+
def visit_comprehension(self, node):
|
| 1167 |
+
if getattr(node, 'is_async', False):
|
| 1168 |
+
self.attr(node, 'for', [self.ws, 'async', self.ws, 'for', self.ws],
|
| 1169 |
+
default=' async for ')
|
| 1170 |
+
else:
|
| 1171 |
+
self.attr(node, 'for', [self.ws, 'for', self.ws], default=' for ')
|
| 1172 |
+
self.visit(node.target)
|
| 1173 |
+
self.attr(node, 'in', [self.ws, 'in', self.ws], default=' in ')
|
| 1174 |
+
self.visit(node.iter)
|
| 1175 |
+
for i, if_expr in enumerate(node.ifs):
|
| 1176 |
+
self.attr(node, 'if_%d' % i, [self.ws, 'if', self.ws], default=' if ')
|
| 1177 |
+
self.visit(if_expr)
|
| 1178 |
+
|
| 1179 |
+
@space_around
|
| 1180 |
+
def visit_keyword(self, node):
|
| 1181 |
+
if node.arg is None:
|
| 1182 |
+
self.attr(node, 'stars', ['**', self.ws], default='**')
|
| 1183 |
+
else:
|
| 1184 |
+
self.token(node.arg)
|
| 1185 |
+
self.attr(node, 'eq', [self.ws, '='], default='=')
|
| 1186 |
+
self.visit(node.value)
|
| 1187 |
+
|
| 1188 |
+
@space_left
|
| 1189 |
+
def visit_Index(self, node):
|
| 1190 |
+
self.visit(node.value)
|
| 1191 |
+
|
| 1192 |
+
@space_left
|
| 1193 |
+
def visit_ExtSlice(self, node):
|
| 1194 |
+
for i, dim in enumerate(node.dims):
|
| 1195 |
+
self.visit(dim)
|
| 1196 |
+
if dim is not node.dims[-1]:
|
| 1197 |
+
self.attr(node, 'dim_sep_%d' % i, [self.ws, ',', self.ws], default=', ')
|
| 1198 |
+
self.optional_token(node, 'trailing_comma', ',', default=False)
|
| 1199 |
+
|
| 1200 |
+
@space_left
|
| 1201 |
+
def visit_Slice(self, node):
|
| 1202 |
+
if node.lower:
|
| 1203 |
+
self.visit(node.lower)
|
| 1204 |
+
self.attr(node, 'lowerspace', [self.ws, ':', self.ws], default=':')
|
| 1205 |
+
if node.upper:
|
| 1206 |
+
self.visit(node.upper)
|
| 1207 |
+
|
| 1208 |
+
self.attr(node, 'stepspace1', [self.ws])
|
| 1209 |
+
self.optional_token(node, 'step_colon', ':')
|
| 1210 |
+
self.attr(node, 'stepspace2', [self.ws])
|
| 1211 |
+
if node.step and self.check_slice_includes_step(node):
|
| 1212 |
+
self.optional_token(node, 'step_colon_2', ':', default=True)
|
| 1213 |
+
node.step.is_explicit_step = True
|
| 1214 |
+
self.visit(node.step)
|
| 1215 |
+
|
| 1216 |
+
def check_slice_includes_step(self, node):
|
| 1217 |
+
"""Helper function for Slice node to determine whether to visit its step."""
|
| 1218 |
+
# This is needed because of a bug in the 2.7 parser which treats
|
| 1219 |
+
# a[::] as Slice(lower=None, upper=None, step=Name(id='None'))
|
| 1220 |
+
# but also treats a[::None] exactly the same.
|
| 1221 |
+
if not node.step:
|
| 1222 |
+
return False
|
| 1223 |
+
if getattr(node.step, 'is_explicit_step', False):
|
| 1224 |
+
return True
|
| 1225 |
+
return not (isinstance(node.step, ast.Name) and node.step.id == 'None')
|
| 1226 |
+
|
| 1227 |
+
@fstring_expression
|
| 1228 |
+
def visit_FormattedValue(self, node):
|
| 1229 |
+
self.visit(node.value)
|
| 1230 |
+
if node.conversion != -1:
|
| 1231 |
+
self.attr(node, 'conversion',
|
| 1232 |
+
[self.ws, '!', chr(node.conversion)], deps=('conversion',),
|
| 1233 |
+
default='!%c' % node.conversion)
|
| 1234 |
+
if node.format_spec:
|
| 1235 |
+
self.attr(node, 'format_spec_prefix', [self.ws, ':', self.ws],
|
| 1236 |
+
default=':')
|
| 1237 |
+
self.visit(node.format_spec)
|
| 1238 |
+
|
| 1239 |
+
|
| 1240 |
+
class AnnotationError(Exception):
|
| 1241 |
+
"""An exception for when we failed to annotate the tree."""
|
| 1242 |
+
|
| 1243 |
+
|
| 1244 |
+
class AstAnnotator(BaseVisitor):
|
| 1245 |
+
|
| 1246 |
+
def __init__(self, source):
|
| 1247 |
+
super(AstAnnotator, self).__init__()
|
| 1248 |
+
self.tokens = token_generator.TokenGenerator(source)
|
| 1249 |
+
|
| 1250 |
+
def visit(self, node):
|
| 1251 |
+
try:
|
| 1252 |
+
fmt.set(node, 'indent', self._indent)
|
| 1253 |
+
fmt.set(node, 'indent_diff', self._indent_diff)
|
| 1254 |
+
super(AstAnnotator, self).visit(node)
|
| 1255 |
+
except (TypeError, ValueError, IndexError, KeyError) as e:
|
| 1256 |
+
raise AnnotationError(e)
|
| 1257 |
+
|
| 1258 |
+
def indented(self, node, children_attr):
|
| 1259 |
+
"""Generator which annotates child nodes with their indentation level."""
|
| 1260 |
+
children = getattr(node, children_attr)
|
| 1261 |
+
cur_loc = self.tokens._loc
|
| 1262 |
+
next_loc = self.tokens.peek_non_whitespace().start
|
| 1263 |
+
# Special case: if the children are on the same line, then there is no
|
| 1264 |
+
# indentation level to track.
|
| 1265 |
+
if cur_loc[0] == next_loc[0]:
|
| 1266 |
+
indent_diff = self._indent_diff
|
| 1267 |
+
self._indent_diff = None
|
| 1268 |
+
for child in children:
|
| 1269 |
+
yield child
|
| 1270 |
+
self._indent_diff = indent_diff
|
| 1271 |
+
return
|
| 1272 |
+
|
| 1273 |
+
prev_indent = self._indent
|
| 1274 |
+
prev_indent_diff = self._indent_diff
|
| 1275 |
+
|
| 1276 |
+
# Find the indent level of the first child
|
| 1277 |
+
indent_token = self.tokens.peek_conditional(
|
| 1278 |
+
lambda t: t.type == token_generator.TOKENS.INDENT)
|
| 1279 |
+
new_indent = indent_token.src
|
| 1280 |
+
new_diff = _get_indent_diff(prev_indent, new_indent)
|
| 1281 |
+
if not new_diff:
|
| 1282 |
+
new_diff = ' ' * 4 # Sensible default
|
| 1283 |
+
print('Indent detection failed (line %d); inner indentation level is not '
|
| 1284 |
+
'more than the outer indentation.' % cur_loc[0], file=sys.stderr)
|
| 1285 |
+
|
| 1286 |
+
# Set the indent level to the child's indent and iterate over the children
|
| 1287 |
+
self._indent = new_indent
|
| 1288 |
+
self._indent_diff = new_diff
|
| 1289 |
+
for child in children:
|
| 1290 |
+
yield child
|
| 1291 |
+
# Store the suffix at this indentation level, which could be many lines
|
| 1292 |
+
fmt.set(node, 'block_suffix_%s' % children_attr,
|
| 1293 |
+
self.tokens.block_whitespace(self._indent))
|
| 1294 |
+
|
| 1295 |
+
# Dedent back to the previous level
|
| 1296 |
+
self._indent = prev_indent
|
| 1297 |
+
self._indent_diff = prev_indent_diff
|
| 1298 |
+
|
| 1299 |
+
@expression
|
| 1300 |
+
def visit_Num(self, node):
|
| 1301 |
+
"""Annotate a Num node with the exact number format."""
|
| 1302 |
+
token_number_type = token_generator.TOKENS.NUMBER
|
| 1303 |
+
contentargs = [lambda: self.tokens.next_of_type(token_number_type).src]
|
| 1304 |
+
if self.tokens.peek().src == '-':
|
| 1305 |
+
contentargs.insert(0, '-')
|
| 1306 |
+
self.attr(node, 'content', contentargs, deps=('n',), default=str(node.n))
|
| 1307 |
+
|
| 1308 |
+
@expression
|
| 1309 |
+
def visit_Str(self, node):
|
| 1310 |
+
"""Annotate a Str node with the exact string format."""
|
| 1311 |
+
self.attr(node, 'content', [self.tokens.str], deps=('s',), default=node.s)
|
| 1312 |
+
|
| 1313 |
+
@expression
|
| 1314 |
+
def visit_JoinedStr(self, node):
|
| 1315 |
+
"""Annotate a JoinedStr node with the fstr formatting metadata."""
|
| 1316 |
+
fstr_iter = self.tokens.fstr()()
|
| 1317 |
+
res = ''
|
| 1318 |
+
values = (v for v in node.values if isinstance(v, ast.FormattedValue))
|
| 1319 |
+
while True:
|
| 1320 |
+
res_part, tg = next(fstr_iter)
|
| 1321 |
+
res += res_part
|
| 1322 |
+
if tg is None:
|
| 1323 |
+
break
|
| 1324 |
+
prev_tokens = self.tokens
|
| 1325 |
+
self.tokens = tg
|
| 1326 |
+
self.visit(next(values))
|
| 1327 |
+
self.tokens = prev_tokens
|
| 1328 |
+
|
| 1329 |
+
self.attr(node, 'content', [lambda: res], default=res)
|
| 1330 |
+
|
| 1331 |
+
@expression
|
| 1332 |
+
def visit_Bytes(self, node):
|
| 1333 |
+
"""Annotate a Bytes node with the exact string format."""
|
| 1334 |
+
self.attr(node, 'content', [self.tokens.str], deps=('s',), default=node.s)
|
| 1335 |
+
|
| 1336 |
+
@space_around
|
| 1337 |
+
def visit_Ellipsis(self, node):
|
| 1338 |
+
# Ellipsis is sometimes split into 3 tokens and other times a single token
|
| 1339 |
+
# Account for both forms when parsing the input.
|
| 1340 |
+
if self.tokens.peek().src == '...':
|
| 1341 |
+
self.token('...')
|
| 1342 |
+
else:
|
| 1343 |
+
for i in range(3):
|
| 1344 |
+
self.token('.')
|
| 1345 |
+
|
| 1346 |
+
def check_is_elif(self, node):
|
| 1347 |
+
"""Return True iff the If node is an `elif` in the source."""
|
| 1348 |
+
next_tok = self.tokens.next_name()
|
| 1349 |
+
return isinstance(node, ast.If) and next_tok.src == 'elif'
|
| 1350 |
+
|
| 1351 |
+
def check_is_continued_try(self, node):
|
| 1352 |
+
"""Return True iff the TryExcept node is a continued `try` in the source."""
|
| 1353 |
+
return (isinstance(node, ast.TryExcept) and
|
| 1354 |
+
self.tokens.peek_non_whitespace().src != 'try')
|
| 1355 |
+
|
| 1356 |
+
def check_is_continued_with(self, node):
|
| 1357 |
+
"""Return True iff the With node is a continued `with` in the source."""
|
| 1358 |
+
return isinstance(node, ast.With) and self.tokens.peek().src == ','
|
| 1359 |
+
|
| 1360 |
+
def check_slice_includes_step(self, node):
|
| 1361 |
+
"""Helper function for Slice node to determine whether to visit its step."""
|
| 1362 |
+
# This is needed because of a bug in the 2.7 parser which treats
|
| 1363 |
+
# a[::] as Slice(lower=None, upper=None, step=Name(id='None'))
|
| 1364 |
+
# but also treats a[::None] exactly the same.
|
| 1365 |
+
return self.tokens.peek_non_whitespace().src not in '],'
|
| 1366 |
+
|
| 1367 |
+
def ws(self, max_lines=None, semicolon=False, comment=True):
|
| 1368 |
+
"""Parse some whitespace from the source tokens and return it."""
|
| 1369 |
+
next_token = self.tokens.peek()
|
| 1370 |
+
if semicolon and next_token and next_token.src == ';':
|
| 1371 |
+
result = self.tokens.whitespace() + self.token(';')
|
| 1372 |
+
next_token = self.tokens.peek()
|
| 1373 |
+
if next_token.type in (token_generator.TOKENS.NL,
|
| 1374 |
+
token_generator.TOKENS.NEWLINE):
|
| 1375 |
+
result += self.tokens.whitespace(max_lines=1)
|
| 1376 |
+
return result
|
| 1377 |
+
return self.tokens.whitespace(max_lines=max_lines, comment=comment)
|
| 1378 |
+
|
| 1379 |
+
def dots(self, num_dots):
|
| 1380 |
+
"""Parse a number of dots."""
|
| 1381 |
+
def _parse_dots():
|
| 1382 |
+
return self.tokens.dots(num_dots)
|
| 1383 |
+
return _parse_dots
|
| 1384 |
+
|
| 1385 |
+
def block_suffix(self, node, indent_level):
|
| 1386 |
+
fmt.set(node, 'suffix', self.tokens.block_whitespace(indent_level))
|
| 1387 |
+
|
| 1388 |
+
def token(self, token_val):
|
| 1389 |
+
"""Parse a single token with exactly the given value."""
|
| 1390 |
+
token = self.tokens.next()
|
| 1391 |
+
if token.src != token_val:
|
| 1392 |
+
raise AnnotationError("Expected %r but found %r\nline %d: %s" % (
|
| 1393 |
+
token_val, token.src, token.start[0], token.line))
|
| 1394 |
+
|
| 1395 |
+
# If the token opens or closes a parentheses scope, keep track of it
|
| 1396 |
+
if token.src in '({[':
|
| 1397 |
+
self.tokens.hint_open()
|
| 1398 |
+
elif token.src in ')}]':
|
| 1399 |
+
self.tokens.hint_closed()
|
| 1400 |
+
|
| 1401 |
+
return token.src
|
| 1402 |
+
|
| 1403 |
+
def optional_token(self, node, attr_name, token_val,
|
| 1404 |
+
allow_whitespace_prefix=False, default=False):
|
| 1405 |
+
"""Try to parse a token and attach it to the node."""
|
| 1406 |
+
del default
|
| 1407 |
+
fmt.append(node, attr_name, '')
|
| 1408 |
+
token = (self.tokens.peek_non_whitespace()
|
| 1409 |
+
if allow_whitespace_prefix else self.tokens.peek())
|
| 1410 |
+
if token and token.src == token_val:
|
| 1411 |
+
parsed = ''
|
| 1412 |
+
if allow_whitespace_prefix:
|
| 1413 |
+
parsed += self.ws()
|
| 1414 |
+
fmt.append(node, attr_name,
|
| 1415 |
+
parsed + self.tokens.next().src + self.ws())
|
| 1416 |
+
|
| 1417 |
+
def one_of_symbols(self, *symbols):
|
| 1418 |
+
"""Account for one of the given symbols."""
|
| 1419 |
+
def _one_of_symbols():
|
| 1420 |
+
next_token = self.tokens.next()
|
| 1421 |
+
found = next((s for s in symbols if s == next_token.src), None)
|
| 1422 |
+
if found is None:
|
| 1423 |
+
raise AnnotationError(
|
| 1424 |
+
'Expected one of: %r, but found: %r' % (symbols, next_token.src))
|
| 1425 |
+
return found
|
| 1426 |
+
return _one_of_symbols
|
| 1427 |
+
|
| 1428 |
+
def attr(self, node, attr_name, attr_vals, deps=None, default=None):
|
| 1429 |
+
"""Parses some source and sets an attribute on the given node.
|
| 1430 |
+
|
| 1431 |
+
Stores some arbitrary formatting information on the node. This takes a list
|
| 1432 |
+
attr_vals which tell what parts of the source to parse. The result of each
|
| 1433 |
+
function is concatenated onto the formatting data, and strings in this list
|
| 1434 |
+
are a shorthand to look for an exactly matching token.
|
| 1435 |
+
|
| 1436 |
+
For example:
|
| 1437 |
+
self.attr(node, 'foo', ['(', self.ws, 'Hello, world!', self.ws, ')'],
|
| 1438 |
+
deps=('s',), default=node.s)
|
| 1439 |
+
|
| 1440 |
+
is a rudimentary way to parse a parenthesized string. After running this,
|
| 1441 |
+
the matching source code for this node will be stored in its formatting
|
| 1442 |
+
dict under the key 'foo'. The result might be `(\n 'Hello, world!'\n)`.
|
| 1443 |
+
|
| 1444 |
+
This also keeps track of the current value of each of the dependencies.
|
| 1445 |
+
In the above example, we would have looked for the string 'Hello, world!'
|
| 1446 |
+
because that's the value of node.s, however, when we print this back, we
|
| 1447 |
+
want to know if the value of node.s has changed since this time. If any of
|
| 1448 |
+
the dependent values has changed, the default would be used instead.
|
| 1449 |
+
|
| 1450 |
+
Arguments:
|
| 1451 |
+
node: (ast.AST) An AST node to attach formatting information to.
|
| 1452 |
+
attr_name: (string) Name to store the formatting information under.
|
| 1453 |
+
attr_vals: (list of functions/strings) Each item is either a function
|
| 1454 |
+
that parses some source and return a string OR a string to match
|
| 1455 |
+
exactly (as a token).
|
| 1456 |
+
deps: (optional, set of strings) Attributes of the node which attr_vals
|
| 1457 |
+
depends on.
|
| 1458 |
+
default: (string) Unused here.
|
| 1459 |
+
"""
|
| 1460 |
+
del default # unused
|
| 1461 |
+
if deps:
|
| 1462 |
+
for dep in deps:
|
| 1463 |
+
fmt.set(node, dep + '__src', getattr(node, dep, None))
|
| 1464 |
+
attr_parts = []
|
| 1465 |
+
for attr_val in attr_vals:
|
| 1466 |
+
if isinstance(attr_val, six.string_types):
|
| 1467 |
+
attr_parts.append(self.token(attr_val))
|
| 1468 |
+
else:
|
| 1469 |
+
attr_parts.append(attr_val())
|
| 1470 |
+
fmt.set(node, attr_name, ''.join(attr_parts))
|
| 1471 |
+
|
| 1472 |
+
def scope(self, node, attr=None, trailing_comma=False, default_parens=False):
|
| 1473 |
+
"""Return a context manager to handle a parenthesized scope.
|
| 1474 |
+
|
| 1475 |
+
Arguments:
|
| 1476 |
+
node: (ast.AST) Node to store the scope prefix and suffix on.
|
| 1477 |
+
attr: (string, optional) Attribute of the node contained in the scope, if
|
| 1478 |
+
any. For example, as `None`, the scope would wrap the entire node, but
|
| 1479 |
+
as 'bases', the scope might wrap only the bases of a class.
|
| 1480 |
+
trailing_comma: (boolean) If True, allow a trailing comma at the end.
|
| 1481 |
+
default_parens: (boolean) If True and no formatting information is
|
| 1482 |
+
present, the scope would be assumed to be parenthesized.
|
| 1483 |
+
"""
|
| 1484 |
+
del default_parens
|
| 1485 |
+
return self.tokens.scope(node, attr=attr, trailing_comma=trailing_comma)
|
| 1486 |
+
|
| 1487 |
+
def _optional_token(self, token_type, token_val):
|
| 1488 |
+
token = self.tokens.peek()
|
| 1489 |
+
if not token or token.type != token_type or token.src != token_val:
|
| 1490 |
+
return ''
|
| 1491 |
+
else:
|
| 1492 |
+
self.tokens.next()
|
| 1493 |
+
return token.src + self.ws()
|
| 1494 |
+
|
| 1495 |
+
|
| 1496 |
+
def _get_indent_width(indent):
|
| 1497 |
+
width = 0
|
| 1498 |
+
for c in indent:
|
| 1499 |
+
if c == ' ':
|
| 1500 |
+
width += 1
|
| 1501 |
+
elif c == '\t':
|
| 1502 |
+
width += 8 - (width % 8)
|
| 1503 |
+
return width
|
| 1504 |
+
|
| 1505 |
+
|
| 1506 |
+
def _ltrim_indent(indent, remove_width):
|
| 1507 |
+
width = 0
|
| 1508 |
+
for i, c in enumerate(indent):
|
| 1509 |
+
if width == remove_width:
|
| 1510 |
+
break
|
| 1511 |
+
if c == ' ':
|
| 1512 |
+
width += 1
|
| 1513 |
+
elif c == '\t':
|
| 1514 |
+
if width + 8 - (width % 8) <= remove_width:
|
| 1515 |
+
width += 8 - (width % 8)
|
| 1516 |
+
else:
|
| 1517 |
+
return ' ' * (width + 8 - remove_width) + indent[i + 1:]
|
| 1518 |
+
return indent[i:]
|
| 1519 |
+
|
| 1520 |
+
|
| 1521 |
+
def _get_indent_diff(outer, inner):
|
| 1522 |
+
"""Computes the whitespace added to an indented block.
|
| 1523 |
+
|
| 1524 |
+
Finds the portion of an indent prefix that is added onto the outer indent. In
|
| 1525 |
+
most cases, the inner indent starts with the outer indent, but this is not
|
| 1526 |
+
necessarily true. For example, the outer block could be indented to four
|
| 1527 |
+
spaces and its body indented with one tab (effectively 8 spaces).
|
| 1528 |
+
|
| 1529 |
+
Arguments:
|
| 1530 |
+
outer: (string) Indentation of the outer block.
|
| 1531 |
+
inner: (string) Indentation of the inner block.
|
| 1532 |
+
Returns:
|
| 1533 |
+
The string whitespace which is added to the indentation level when moving
|
| 1534 |
+
from outer to inner.
|
| 1535 |
+
"""
|
| 1536 |
+
outer_w = _get_indent_width(outer)
|
| 1537 |
+
inner_w = _get_indent_width(inner)
|
| 1538 |
+
diff_w = inner_w - outer_w
|
| 1539 |
+
|
| 1540 |
+
if diff_w <= 0:
|
| 1541 |
+
return None
|
| 1542 |
+
|
| 1543 |
+
return _ltrim_indent(inner, inner_w - diff_w)
|
lib/python3.10/site-packages/pasta/base/annotate_test.py
ADDED
|
@@ -0,0 +1,477 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
"""Tests for annotate."""
|
| 3 |
+
# Copyright 2017 Google LLC
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# https://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
|
| 17 |
+
from __future__ import absolute_import
|
| 18 |
+
from __future__ import division
|
| 19 |
+
from __future__ import print_function
|
| 20 |
+
|
| 21 |
+
import ast
|
| 22 |
+
import difflib
|
| 23 |
+
import itertools
|
| 24 |
+
import os.path
|
| 25 |
+
from six import with_metaclass
|
| 26 |
+
import sys
|
| 27 |
+
import textwrap
|
| 28 |
+
import unittest
|
| 29 |
+
|
| 30 |
+
import pasta
|
| 31 |
+
from pasta.base import annotate
|
| 32 |
+
from pasta.base import ast_utils
|
| 33 |
+
from pasta.base import codegen
|
| 34 |
+
from pasta.base import formatting as fmt
|
| 35 |
+
from pasta.base import test_utils
|
| 36 |
+
|
| 37 |
+
TESTDATA_DIR = os.path.realpath(
|
| 38 |
+
os.path.join(os.path.dirname(pasta.__file__), '../testdata'))
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class PrefixSuffixTest(test_utils.TestCase):
|
| 42 |
+
|
| 43 |
+
def test_block_suffix(self):
|
| 44 |
+
src_tpl = textwrap.dedent('''\
|
| 45 |
+
{open_block}
|
| 46 |
+
pass #a
|
| 47 |
+
#b
|
| 48 |
+
#c
|
| 49 |
+
|
| 50 |
+
#d
|
| 51 |
+
#e
|
| 52 |
+
a
|
| 53 |
+
''')
|
| 54 |
+
test_cases = (
|
| 55 |
+
# first: attribute of the node with the last block
|
| 56 |
+
# second: code snippet to open a block
|
| 57 |
+
('body', 'def x():'),
|
| 58 |
+
('body', 'class X:'),
|
| 59 |
+
('body', 'if x:'),
|
| 60 |
+
('orelse', 'if x:\n y\nelse:'),
|
| 61 |
+
('body', 'if x:\n y\nelif y:'),
|
| 62 |
+
('body', 'while x:'),
|
| 63 |
+
('orelse', 'while x:\n y\nelse:'),
|
| 64 |
+
('finalbody', 'try:\n x\nfinally:'),
|
| 65 |
+
('body', 'try:\n x\nexcept:'),
|
| 66 |
+
('orelse', 'try:\n x\nexcept:\n y\nelse:'),
|
| 67 |
+
('body', 'with x:'),
|
| 68 |
+
('body', 'with x, y:'),
|
| 69 |
+
('body', 'with x:\n with y:'),
|
| 70 |
+
('body', 'for x in y:'),
|
| 71 |
+
)
|
| 72 |
+
def is_node_for_suffix(node, children_attr):
|
| 73 |
+
# Return True if this node contains the 'pass' statement
|
| 74 |
+
val = getattr(node, children_attr, None)
|
| 75 |
+
return isinstance(val, list) and type(val[0]) == ast.Pass
|
| 76 |
+
|
| 77 |
+
for children_attr, open_block in test_cases:
|
| 78 |
+
src = src_tpl.format(open_block=open_block)
|
| 79 |
+
t = pasta.parse(src)
|
| 80 |
+
node_finder = ast_utils.FindNodeVisitor(
|
| 81 |
+
lambda node: is_node_for_suffix(node, children_attr))
|
| 82 |
+
node_finder.visit(t)
|
| 83 |
+
node = node_finder.results[0]
|
| 84 |
+
expected = ' #b\n #c\n\n #d\n'
|
| 85 |
+
actual = str(fmt.get(node, 'block_suffix_%s' % children_attr))
|
| 86 |
+
self.assertMultiLineEqual(
|
| 87 |
+
expected, actual,
|
| 88 |
+
'Incorrect suffix for code:\n%s\nNode: %s (line %d)\nDiff:\n%s' % (
|
| 89 |
+
src, node, node.lineno, '\n'.join(_get_diff(actual, expected))))
|
| 90 |
+
self.assertMultiLineEqual(src, pasta.dump(t))
|
| 91 |
+
|
| 92 |
+
def test_module_suffix(self):
|
| 93 |
+
src = 'foo\n#bar\n\n#baz\n'
|
| 94 |
+
t = pasta.parse(src)
|
| 95 |
+
self.assertEqual(src[src.index('#bar'):], fmt.get(t, 'suffix'))
|
| 96 |
+
|
| 97 |
+
def test_no_block_suffix_for_single_line_statement(self):
|
| 98 |
+
src = 'if x: return y\n #a\n#b\n'
|
| 99 |
+
t = pasta.parse(src)
|
| 100 |
+
self.assertIsNone(fmt.get(t.body[0], 'block_suffix_body'))
|
| 101 |
+
|
| 102 |
+
def test_expression_prefix_suffix(self):
|
| 103 |
+
src = 'a\n\nfoo\n\n\nb\n'
|
| 104 |
+
t = pasta.parse(src)
|
| 105 |
+
self.assertEqual('\n', fmt.get(t.body[1], 'prefix'))
|
| 106 |
+
self.assertEqual('\n', fmt.get(t.body[1], 'suffix'))
|
| 107 |
+
|
| 108 |
+
def test_statement_prefix_suffix(self):
|
| 109 |
+
src = 'a\n\ndef foo():\n return bar\n\n\nb\n'
|
| 110 |
+
t = pasta.parse(src)
|
| 111 |
+
self.assertEqual('\n', fmt.get(t.body[1], 'prefix'))
|
| 112 |
+
self.assertEqual('', fmt.get(t.body[1], 'suffix'))
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
class IndentationTest(test_utils.TestCase):
|
| 116 |
+
|
| 117 |
+
def test_indent_levels(self):
|
| 118 |
+
src = textwrap.dedent('''\
|
| 119 |
+
foo('begin')
|
| 120 |
+
if a:
|
| 121 |
+
foo('a1')
|
| 122 |
+
if b:
|
| 123 |
+
foo('b1')
|
| 124 |
+
if c:
|
| 125 |
+
foo('c1')
|
| 126 |
+
foo('b2')
|
| 127 |
+
foo('a2')
|
| 128 |
+
foo('end')
|
| 129 |
+
''')
|
| 130 |
+
t = pasta.parse(src)
|
| 131 |
+
call_nodes = ast_utils.find_nodes_by_type(t, (ast.Call,))
|
| 132 |
+
call_nodes.sort(key=lambda node: node.lineno)
|
| 133 |
+
begin, a1, b1, c1, b2, a2, end = call_nodes
|
| 134 |
+
|
| 135 |
+
self.assertEqual('', fmt.get(begin, 'indent'))
|
| 136 |
+
self.assertEqual(' ', fmt.get(a1, 'indent'))
|
| 137 |
+
self.assertEqual(' ', fmt.get(b1, 'indent'))
|
| 138 |
+
self.assertEqual(' ', fmt.get(c1, 'indent'))
|
| 139 |
+
self.assertEqual(' ', fmt.get(b2, 'indent'))
|
| 140 |
+
self.assertEqual(' ', fmt.get(a2, 'indent'))
|
| 141 |
+
self.assertEqual('', fmt.get(end, 'indent'))
|
| 142 |
+
|
| 143 |
+
def test_indent_levels_same_line(self):
|
| 144 |
+
src = 'if a: b; c\n'
|
| 145 |
+
t = pasta.parse(src)
|
| 146 |
+
if_node = t.body[0]
|
| 147 |
+
b, c = if_node.body
|
| 148 |
+
self.assertIsNone(fmt.get(b, 'indent_diff'))
|
| 149 |
+
self.assertIsNone(fmt.get(c, 'indent_diff'))
|
| 150 |
+
|
| 151 |
+
def test_indent_depths(self):
|
| 152 |
+
template = 'if a:\n{first}if b:\n{first}{second}foo()\n'
|
| 153 |
+
indents = (' ', ' ' * 2, ' ' * 4, ' ' * 8, '\t', '\t' * 2)
|
| 154 |
+
|
| 155 |
+
for first, second in itertools.product(indents, indents):
|
| 156 |
+
src = template.format(first=first, second=second)
|
| 157 |
+
t = pasta.parse(src)
|
| 158 |
+
outer_if_node = t.body[0]
|
| 159 |
+
inner_if_node = outer_if_node.body[0]
|
| 160 |
+
call_node = inner_if_node.body[0]
|
| 161 |
+
|
| 162 |
+
self.assertEqual('', fmt.get(outer_if_node, 'indent'))
|
| 163 |
+
self.assertEqual('', fmt.get(outer_if_node, 'indent_diff'))
|
| 164 |
+
self.assertEqual(first, fmt.get(inner_if_node, 'indent'))
|
| 165 |
+
self.assertEqual(first, fmt.get(inner_if_node, 'indent_diff'))
|
| 166 |
+
self.assertEqual(first + second, fmt.get(call_node, 'indent'))
|
| 167 |
+
self.assertEqual(second, fmt.get(call_node, 'indent_diff'))
|
| 168 |
+
|
| 169 |
+
def test_indent_multiline_string(self):
|
| 170 |
+
src = textwrap.dedent('''\
|
| 171 |
+
class A:
|
| 172 |
+
"""Doc
|
| 173 |
+
string."""
|
| 174 |
+
pass
|
| 175 |
+
''')
|
| 176 |
+
t = pasta.parse(src)
|
| 177 |
+
docstring, pass_stmt = t.body[0].body
|
| 178 |
+
self.assertEqual(' ', fmt.get(docstring, 'indent'))
|
| 179 |
+
self.assertEqual(' ', fmt.get(pass_stmt, 'indent'))
|
| 180 |
+
|
| 181 |
+
def test_indent_multiline_string_with_newline(self):
|
| 182 |
+
src = textwrap.dedent('''\
|
| 183 |
+
class A:
|
| 184 |
+
"""Doc\n
|
| 185 |
+
string."""
|
| 186 |
+
pass
|
| 187 |
+
''')
|
| 188 |
+
t = pasta.parse(src)
|
| 189 |
+
docstring, pass_stmt = t.body[0].body
|
| 190 |
+
self.assertEqual(' ', fmt.get(docstring, 'indent'))
|
| 191 |
+
self.assertEqual(' ', fmt.get(pass_stmt, 'indent'))
|
| 192 |
+
|
| 193 |
+
def test_scope_trailing_comma(self):
|
| 194 |
+
template = 'def foo(a, b{trailing_comma}): pass'
|
| 195 |
+
for trailing_comma in ('', ',', ' , '):
|
| 196 |
+
tree = pasta.parse(template.format(trailing_comma=trailing_comma))
|
| 197 |
+
self.assertEqual(trailing_comma.lstrip(' ') + ')',
|
| 198 |
+
fmt.get(tree.body[0], 'args_suffix'))
|
| 199 |
+
|
| 200 |
+
template = 'class Foo(a, b{trailing_comma}): pass'
|
| 201 |
+
for trailing_comma in ('', ',', ' , '):
|
| 202 |
+
tree = pasta.parse(template.format(trailing_comma=trailing_comma))
|
| 203 |
+
self.assertEqual(trailing_comma.lstrip(' ') + ')',
|
| 204 |
+
fmt.get(tree.body[0], 'bases_suffix'))
|
| 205 |
+
|
| 206 |
+
template = 'from mod import (a, b{trailing_comma})'
|
| 207 |
+
for trailing_comma in ('', ',', ' , '):
|
| 208 |
+
tree = pasta.parse(template.format(trailing_comma=trailing_comma))
|
| 209 |
+
self.assertEqual(trailing_comma + ')',
|
| 210 |
+
fmt.get(tree.body[0], 'names_suffix'))
|
| 211 |
+
|
| 212 |
+
def test_indent_extra_newlines(self):
|
| 213 |
+
src = textwrap.dedent('''\
|
| 214 |
+
if a:
|
| 215 |
+
|
| 216 |
+
b
|
| 217 |
+
''')
|
| 218 |
+
t = pasta.parse(src)
|
| 219 |
+
if_node = t.body[0]
|
| 220 |
+
b = if_node.body[0]
|
| 221 |
+
self.assertEqual(' ', fmt.get(b, 'indent_diff'))
|
| 222 |
+
|
| 223 |
+
def test_indent_extra_newlines_with_comment(self):
|
| 224 |
+
src = textwrap.dedent('''\
|
| 225 |
+
if a:
|
| 226 |
+
#not here
|
| 227 |
+
|
| 228 |
+
b
|
| 229 |
+
''')
|
| 230 |
+
t = pasta.parse(src)
|
| 231 |
+
if_node = t.body[0]
|
| 232 |
+
b = if_node.body[0]
|
| 233 |
+
self.assertEqual(' ', fmt.get(b, 'indent_diff'))
|
| 234 |
+
|
| 235 |
+
def test_autoindent(self):
|
| 236 |
+
src = textwrap.dedent('''\
|
| 237 |
+
def a():
|
| 238 |
+
b
|
| 239 |
+
c
|
| 240 |
+
''')
|
| 241 |
+
expected = textwrap.dedent('''\
|
| 242 |
+
def a():
|
| 243 |
+
b
|
| 244 |
+
new_node
|
| 245 |
+
''')
|
| 246 |
+
t = pasta.parse(src)
|
| 247 |
+
# Repace the second node and make sure the indent level is corrected
|
| 248 |
+
t.body[0].body[1] = ast.Expr(ast.Name(id='new_node'))
|
| 249 |
+
self.assertMultiLineEqual(expected, codegen.to_str(t))
|
| 250 |
+
|
| 251 |
+
@test_utils.requires_features('mixed_tabs_spaces')
|
| 252 |
+
def test_mixed_tabs_spaces_indentation(self):
|
| 253 |
+
pasta.parse(textwrap.dedent('''\
|
| 254 |
+
if a:
|
| 255 |
+
b
|
| 256 |
+
{ONETAB}c
|
| 257 |
+
''').format(ONETAB='\t'))
|
| 258 |
+
|
| 259 |
+
@test_utils.requires_features('mixed_tabs_spaces')
|
| 260 |
+
def test_tab_below_spaces(self):
|
| 261 |
+
for num_spaces in range(1, 8):
|
| 262 |
+
t = pasta.parse(textwrap.dedent('''\
|
| 263 |
+
if a:
|
| 264 |
+
{WS}if b:
|
| 265 |
+
{ONETAB}c
|
| 266 |
+
''').format(ONETAB='\t', WS=' ' * num_spaces))
|
| 267 |
+
node_c = t.body[0].body[0].body[0]
|
| 268 |
+
self.assertEqual(fmt.get(node_c, 'indent_diff'), ' ' * (8 - num_spaces))
|
| 269 |
+
|
| 270 |
+
@test_utils.requires_features('mixed_tabs_spaces')
|
| 271 |
+
def test_tabs_below_spaces_and_tab(self):
|
| 272 |
+
for num_spaces in range(1, 8):
|
| 273 |
+
t = pasta.parse(textwrap.dedent('''\
|
| 274 |
+
if a:
|
| 275 |
+
{WS}{ONETAB}if b:
|
| 276 |
+
{ONETAB}{ONETAB}c
|
| 277 |
+
''').format(ONETAB='\t', WS=' ' * num_spaces))
|
| 278 |
+
node_c = t.body[0].body[0].body[0]
|
| 279 |
+
self.assertEqual(fmt.get(node_c, 'indent_diff'), '\t')
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
def _is_syntax_valid(filepath):
|
| 283 |
+
with open(filepath, 'r') as f:
|
| 284 |
+
try:
|
| 285 |
+
ast.parse(f.read())
|
| 286 |
+
except SyntaxError:
|
| 287 |
+
return False
|
| 288 |
+
return True
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
class SymmetricTestMeta(type):
|
| 292 |
+
|
| 293 |
+
def __new__(mcs, name, bases, inst_dict):
|
| 294 |
+
# Helper function to generate a test method
|
| 295 |
+
def symmetric_test_generator(filepath):
|
| 296 |
+
def test(self):
|
| 297 |
+
with open(filepath, 'r') as handle:
|
| 298 |
+
src = handle.read()
|
| 299 |
+
t = ast_utils.parse(src)
|
| 300 |
+
annotator = annotate.AstAnnotator(src)
|
| 301 |
+
annotator.visit(t)
|
| 302 |
+
self.assertMultiLineEqual(codegen.to_str(t), src)
|
| 303 |
+
self.assertEqual([], annotator.tokens._parens, 'Unmatched parens')
|
| 304 |
+
return test
|
| 305 |
+
|
| 306 |
+
# Add a test method for each input file
|
| 307 |
+
test_method_prefix = 'test_symmetric_'
|
| 308 |
+
data_dir = os.path.join(TESTDATA_DIR, 'ast')
|
| 309 |
+
for dirpath, dirs, files in os.walk(data_dir):
|
| 310 |
+
for filename in files:
|
| 311 |
+
if filename.endswith('.in'):
|
| 312 |
+
full_path = os.path.join(dirpath, filename)
|
| 313 |
+
inst_dict[test_method_prefix + filename[:-3]] = unittest.skipIf(
|
| 314 |
+
not _is_syntax_valid(full_path),
|
| 315 |
+
'Test contains syntax not supported by this version.',
|
| 316 |
+
)(symmetric_test_generator(full_path))
|
| 317 |
+
return type.__new__(mcs, name, bases, inst_dict)
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
class SymmetricTest(with_metaclass(SymmetricTestMeta, test_utils.TestCase)):
|
| 321 |
+
"""Validates the symmetry property.
|
| 322 |
+
|
| 323 |
+
After parsing + annotating a module, regenerating the source code for it
|
| 324 |
+
should yield the same result.
|
| 325 |
+
"""
|
| 326 |
+
|
| 327 |
+
|
| 328 |
+
def _get_node_identifier(node):
|
| 329 |
+
for attr in ('id', 'name', 'attr', 'arg', 'module'):
|
| 330 |
+
if isinstance(getattr(node, attr, None), str):
|
| 331 |
+
return getattr(node, attr, '')
|
| 332 |
+
return ''
|
| 333 |
+
|
| 334 |
+
|
| 335 |
+
class PrefixSuffixGoldenTestMeta(type):
|
| 336 |
+
|
| 337 |
+
def __new__(mcs, name, bases, inst_dict):
|
| 338 |
+
# Helper function to generate a test method
|
| 339 |
+
def golden_test_generator(input_file, golden_file):
|
| 340 |
+
def test(self):
|
| 341 |
+
with open(input_file, 'r') as handle:
|
| 342 |
+
src = handle.read()
|
| 343 |
+
t = ast_utils.parse(src)
|
| 344 |
+
annotator = annotate.AstAnnotator(src)
|
| 345 |
+
annotator.visit(t)
|
| 346 |
+
|
| 347 |
+
def escape(s):
|
| 348 |
+
return '' if s is None else s.replace('\n', '\\n')
|
| 349 |
+
|
| 350 |
+
result = '\n'.join(
|
| 351 |
+
"{0:12} {1:20} \tprefix=|{2}|\tsuffix=|{3}|\tindent=|{4}|".format(
|
| 352 |
+
str((getattr(n, 'lineno', -1), getattr(n, 'col_offset', -1))),
|
| 353 |
+
type(n).__name__ + ' ' + _get_node_identifier(n),
|
| 354 |
+
escape(fmt.get(n, 'prefix')),
|
| 355 |
+
escape(fmt.get(n, 'suffix')),
|
| 356 |
+
escape(fmt.get(n, 'indent')))
|
| 357 |
+
for n in ast.walk(t)) + '\n'
|
| 358 |
+
|
| 359 |
+
# If specified, write the golden data instead of checking it
|
| 360 |
+
if getattr(self, 'generate_goldens', False):
|
| 361 |
+
if not os.path.isdir(os.path.dirname(golden_file)):
|
| 362 |
+
os.makedirs(os.path.dirname(golden_file))
|
| 363 |
+
with open(golden_file, 'w') as f:
|
| 364 |
+
f.write(result)
|
| 365 |
+
print('Wrote: ' + golden_file)
|
| 366 |
+
return
|
| 367 |
+
|
| 368 |
+
try:
|
| 369 |
+
with open(golden_file, 'r') as f:
|
| 370 |
+
golden = f.read()
|
| 371 |
+
except IOError:
|
| 372 |
+
self.fail('Missing golden data.')
|
| 373 |
+
|
| 374 |
+
self.assertMultiLineEqual(golden, result)
|
| 375 |
+
return test
|
| 376 |
+
|
| 377 |
+
# Add a test method for each input file
|
| 378 |
+
test_method_prefix = 'test_golden_prefix_suffix_'
|
| 379 |
+
data_dir = os.path.join(TESTDATA_DIR, 'ast')
|
| 380 |
+
python_version = '%d.%d' % sys.version_info[:2]
|
| 381 |
+
for dirpath, dirs, files in os.walk(data_dir):
|
| 382 |
+
for filename in files:
|
| 383 |
+
if filename.endswith('.in'):
|
| 384 |
+
full_path = os.path.join(dirpath, filename)
|
| 385 |
+
golden_path = os.path.join(dirpath, 'golden', python_version,
|
| 386 |
+
filename[:-3] + '.out')
|
| 387 |
+
inst_dict[test_method_prefix + filename[:-3]] = unittest.skipIf(
|
| 388 |
+
not _is_syntax_valid(full_path),
|
| 389 |
+
'Test contains syntax not supported by this version.',
|
| 390 |
+
)(golden_test_generator(full_path, golden_path))
|
| 391 |
+
return type.__new__(mcs, name, bases, inst_dict)
|
| 392 |
+
|
| 393 |
+
|
| 394 |
+
class PrefixSuffixGoldenTest(with_metaclass(PrefixSuffixGoldenTestMeta,
|
| 395 |
+
test_utils.TestCase)):
|
| 396 |
+
"""Checks the prefix and suffix on each node in the AST.
|
| 397 |
+
|
| 398 |
+
This uses golden files in testdata/ast/golden. To regenerate these files, run
|
| 399 |
+
python setup.py test -s pasta.base.annotate_test.generate_goldens
|
| 400 |
+
"""
|
| 401 |
+
|
| 402 |
+
maxDiff = None
|
| 403 |
+
|
| 404 |
+
|
| 405 |
+
class ManualEditsTest(test_utils.TestCase):
|
| 406 |
+
"""Tests that we can handle ASTs that have been modified.
|
| 407 |
+
|
| 408 |
+
Such ASTs may lack position information (lineno/col_offset) on some nodes.
|
| 409 |
+
"""
|
| 410 |
+
|
| 411 |
+
def test_call_no_pos(self):
|
| 412 |
+
"""Tests that Call node traversal works without position information."""
|
| 413 |
+
src = 'f(a)'
|
| 414 |
+
t = pasta.parse(src)
|
| 415 |
+
node = ast_utils.find_nodes_by_type(t, (ast.Call,))[0]
|
| 416 |
+
node.keywords.append(ast.keyword(arg='b', value=ast.Num(n=0)))
|
| 417 |
+
self.assertEqual('f(a, b=0)', pasta.dump(t))
|
| 418 |
+
|
| 419 |
+
def test_call_illegal_pos(self):
|
| 420 |
+
"""Tests that Call node traversal works even with illegal positions."""
|
| 421 |
+
src = 'f(a)'
|
| 422 |
+
t = pasta.parse(src)
|
| 423 |
+
node = ast_utils.find_nodes_by_type(t, (ast.Call,))[0]
|
| 424 |
+
node.keywords.append(ast.keyword(arg='b', value=ast.Num(n=0)))
|
| 425 |
+
|
| 426 |
+
# This position would put b=0 before a, so it should be ignored.
|
| 427 |
+
node.keywords[-1].value.lineno = 0
|
| 428 |
+
node.keywords[-1].value.col_offset = 0
|
| 429 |
+
|
| 430 |
+
self.assertEqual('f(a, b=0)', pasta.dump(t))
|
| 431 |
+
|
| 432 |
+
|
| 433 |
+
class FstringTest(test_utils.TestCase):
|
| 434 |
+
"""Tests fstring support more in-depth."""
|
| 435 |
+
|
| 436 |
+
@test_utils.requires_features('fstring')
|
| 437 |
+
def test_fstring(self):
|
| 438 |
+
src = 'f"a {b} c d {e}"'
|
| 439 |
+
t = pasta.parse(src)
|
| 440 |
+
node = t.body[0].value
|
| 441 |
+
self.assertEqual(
|
| 442 |
+
fmt.get(node, 'content'),
|
| 443 |
+
'f"a {__pasta_fstring_val_0__} c d {__pasta_fstring_val_1__}"')
|
| 444 |
+
|
| 445 |
+
@test_utils.requires_features('fstring')
|
| 446 |
+
def test_fstring_escaping(self):
|
| 447 |
+
src = 'f"a {{{b} {{c}}"'
|
| 448 |
+
t = pasta.parse(src)
|
| 449 |
+
node = t.body[0].value
|
| 450 |
+
self.assertEqual(
|
| 451 |
+
fmt.get(node, 'content'),
|
| 452 |
+
'f"a {{{__pasta_fstring_val_0__} {{c}}"')
|
| 453 |
+
|
| 454 |
+
|
| 455 |
+
def _get_diff(before, after):
|
| 456 |
+
return difflib.ndiff(after.splitlines(), before.splitlines())
|
| 457 |
+
|
| 458 |
+
|
| 459 |
+
def suite():
|
| 460 |
+
result = unittest.TestSuite()
|
| 461 |
+
result.addTests(unittest.makeSuite(ManualEditsTest))
|
| 462 |
+
result.addTests(unittest.makeSuite(SymmetricTest))
|
| 463 |
+
result.addTests(unittest.makeSuite(PrefixSuffixTest))
|
| 464 |
+
result.addTests(unittest.makeSuite(PrefixSuffixGoldenTest))
|
| 465 |
+
result.addTests(unittest.makeSuite(FstringTest))
|
| 466 |
+
return result
|
| 467 |
+
|
| 468 |
+
|
| 469 |
+
def generate_goldens():
|
| 470 |
+
result = unittest.TestSuite()
|
| 471 |
+
result.addTests(unittest.makeSuite(PrefixSuffixGoldenTest))
|
| 472 |
+
setattr(PrefixSuffixGoldenTest, 'generate_goldens', True)
|
| 473 |
+
return result
|
| 474 |
+
|
| 475 |
+
|
| 476 |
+
if __name__ == '__main__':
|
| 477 |
+
unittest.main()
|
lib/python3.10/site-packages/pasta/base/ast_constants.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Constants relevant to ast code."""
|
| 2 |
+
|
| 3 |
+
import ast
|
| 4 |
+
|
| 5 |
+
NODE_TYPE_TO_TOKENS = {
|
| 6 |
+
ast.Add: ('+',),
|
| 7 |
+
ast.And: ('and',),
|
| 8 |
+
ast.BitAnd: ('&',),
|
| 9 |
+
ast.BitOr: ('|',),
|
| 10 |
+
ast.BitXor: ('^',),
|
| 11 |
+
ast.Div: ('/',),
|
| 12 |
+
ast.Eq: ('==',),
|
| 13 |
+
ast.FloorDiv: ('//',),
|
| 14 |
+
ast.Gt: ('>',),
|
| 15 |
+
ast.GtE: ('>=',),
|
| 16 |
+
ast.In: ('in',),
|
| 17 |
+
ast.Invert: ('~',),
|
| 18 |
+
ast.Is: ('is',),
|
| 19 |
+
ast.IsNot: ('is', 'not',),
|
| 20 |
+
ast.LShift: ('<<',),
|
| 21 |
+
ast.Lt: ('<',),
|
| 22 |
+
ast.LtE: ('<=',),
|
| 23 |
+
ast.Mod: ('%',),
|
| 24 |
+
ast.Mult: ('*',),
|
| 25 |
+
ast.Not: ('not',),
|
| 26 |
+
ast.NotEq: ('!=',),
|
| 27 |
+
ast.NotIn: ('not', 'in',),
|
| 28 |
+
ast.Or: ('or',),
|
| 29 |
+
ast.Pow: ('**',),
|
| 30 |
+
ast.RShift: ('>>',),
|
| 31 |
+
ast.Sub: ('-',),
|
| 32 |
+
ast.UAdd: ('+',),
|
| 33 |
+
ast.USub: ('-',),
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
if hasattr(ast, 'MatMult'):
|
| 38 |
+
NODE_TYPE_TO_TOKENS[ast.MatMult] = ('@',)
|